public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.0 commit in: /
Date: Tue, 23 Jun 2015 14:01:40 +0000 (UTC)	[thread overview]
Message-ID: <1435067550.bac443972d6de3c565d4d103ca34dda24d258876.mpagano@gentoo> (raw)

commit:     bac443972d6de3c565d4d103ca34dda24d258876
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Jun 23 13:52:30 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Jun 23 13:52:30 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=bac44397

Linux patch 4.0.6

 0000_README            |    4 +
 1005_linux-4.0.6.patch | 3730 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3734 insertions(+)

diff --git a/0000_README b/0000_README
index 0f63559..8761846 100644
--- a/0000_README
+++ b/0000_README
@@ -63,6 +63,10 @@ Patch:  1004_linux-4.0.5.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.0.5
 
+Patch:  1005_linux-4.0.6.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.0.6
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1005_linux-4.0.6.patch b/1005_linux-4.0.6.patch
new file mode 100644
index 0000000..15519e7
--- /dev/null
+++ b/1005_linux-4.0.6.patch
@@ -0,0 +1,3730 @@
+diff --git a/Makefile b/Makefile
+index 1880cf77059b..af6da040b952 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 0
+-SUBLEVEL = 5
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma sheep
+ 
+diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
+index c3255e0c90aa..dbb3f4d2bf84 100644
+--- a/arch/arm/boot/dts/am335x-bone-common.dtsi
++++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
+@@ -223,6 +223,25 @@
+ /include/ "tps65217.dtsi"
+ 
+ &tps {
++	/*
++	 * Configure pmic to enter OFF-state instead of SLEEP-state ("RTC-only
++	 * mode") at poweroff.  Most BeagleBone versions do not support RTC-only
++	 * mode and risk hardware damage if this mode is entered.
++	 *
++	 * For details, see linux-omap mailing list May 2015 thread
++	 *	[PATCH] ARM: dts: am335x-bone* enable pmic-shutdown-controller
++	 * In particular, messages:
++	 *	http://www.spinics.net/lists/linux-omap/msg118585.html
++	 *	http://www.spinics.net/lists/linux-omap/msg118615.html
++	 *
++	 * You can override this later with
++	 *	&tps {  /delete-property/ ti,pmic-shutdown-controller;  }
++	 * if you want to use RTC-only mode and made sure you are not affected
++	 * by the hardware problems. (Tip: double-check by performing a current
++	 * measurement after shutdown: it should be less than 1 mA.)
++	 */
++	ti,pmic-shutdown-controller;
++
+ 	regulators {
+ 		dcdc1_reg: regulator@0 {
+ 			regulator-name = "vdds_dpr";
+diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+index 43d54017b779..d0ab012fa379 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+@@ -16,7 +16,8 @@
+ #include "mt8173.dtsi"
+ 
+ / {
+-	model = "mediatek,mt8173-evb";
++	model = "MediaTek MT8173 evaluation board";
++	compatible = "mediatek,mt8173-evb", "mediatek,mt8173";
+ 
+ 	aliases {
+ 		serial0 = &uart0;
+diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
+index d2bfbc2e8995..be15e52a47a0 100644
+--- a/arch/mips/kernel/irq.c
++++ b/arch/mips/kernel/irq.c
+@@ -109,7 +109,7 @@ void __init init_IRQ(void)
+ #endif
+ }
+ 
+-#ifdef DEBUG_STACKOVERFLOW
++#ifdef CONFIG_DEBUG_STACKOVERFLOW
+ static inline void check_stack_overflow(void)
+ {
+ 	unsigned long sp;
+diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
+index 838d3a6a5b7d..cea02968a908 100644
+--- a/arch/mips/kvm/emulate.c
++++ b/arch/mips/kvm/emulate.c
+@@ -2101,7 +2101,7 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
+ 		if (vcpu->mmio_needed == 2)
+ 			*gpr = *(int16_t *) run->mmio.data;
+ 		else
+-			*gpr = *(int16_t *) run->mmio.data;
++			*gpr = *(uint16_t *)run->mmio.data;
+ 
+ 		break;
+ 	case 1:
+diff --git a/arch/mips/ralink/ill_acc.c b/arch/mips/ralink/ill_acc.c
+index e20b02e3ae28..e10d10b9e82a 100644
+--- a/arch/mips/ralink/ill_acc.c
++++ b/arch/mips/ralink/ill_acc.c
+@@ -41,7 +41,7 @@ static irqreturn_t ill_acc_irq_handler(int irq, void *_priv)
+ 		addr, (type >> ILL_ACC_OFF_S) & ILL_ACC_OFF_M,
+ 		type & ILL_ACC_LEN_M);
+ 
+-	rt_memc_w32(REG_ILL_ACC_TYPE, REG_ILL_ACC_TYPE);
++	rt_memc_w32(ILL_INT_STATUS, REG_ILL_ACC_TYPE);
+ 
+ 	return IRQ_HANDLED;
+ }
+diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
+index db257a58571f..e657b7ba3292 100644
+--- a/arch/x86/include/asm/segment.h
++++ b/arch/x86/include/asm/segment.h
+@@ -200,10 +200,21 @@
+ #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
+ 
+ #ifdef __KERNEL__
++
++/*
++ * early_idt_handler_array is an array of entry points referenced in the
++ * early IDT.  For simplicity, it's a real array with one entry point
++ * every nine bytes.  That leaves room for an optional 'push $0' if the
++ * vector has no error code (two bytes), a 'push $vector_number' (two
++ * bytes), and a jump to the common entry code (up to five bytes).
++ */
++#define EARLY_IDT_HANDLER_SIZE 9
++
+ #ifndef __ASSEMBLY__
+-extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5];
++
++extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
+ #ifdef CONFIG_TRACING
+-#define trace_early_idt_handlers early_idt_handlers
++# define trace_early_idt_handler_array early_idt_handler_array
+ #endif
+ 
+ /*
+diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
+index c4f8d4659070..b111ab5c4509 100644
+--- a/arch/x86/kernel/head64.c
++++ b/arch/x86/kernel/head64.c
+@@ -167,7 +167,7 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
+ 	clear_bss();
+ 
+ 	for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
+-		set_intr_gate(i, early_idt_handlers[i]);
++		set_intr_gate(i, early_idt_handler_array[i]);
+ 	load_idt((const struct desc_ptr *)&idt_descr);
+ 
+ 	copy_bootdata(__va(real_mode_data));
+diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
+index f36bd42d6f0c..30a2aa3782fa 100644
+--- a/arch/x86/kernel/head_32.S
++++ b/arch/x86/kernel/head_32.S
+@@ -477,21 +477,22 @@ is486:
+ __INIT
+ setup_once:
+ 	/*
+-	 * Set up a idt with 256 entries pointing to ignore_int,
+-	 * interrupt gates. It doesn't actually load idt - that needs
+-	 * to be done on each CPU. Interrupts are enabled elsewhere,
+-	 * when we can be relatively sure everything is ok.
++	 * Set up a idt with 256 interrupt gates that push zero if there
++	 * is no error code and then jump to early_idt_handler_common.
++	 * It doesn't actually load the idt - that needs to be done on
++	 * each CPU. Interrupts are enabled elsewhere, when we can be
++	 * relatively sure everything is ok.
+ 	 */
+ 
+ 	movl $idt_table,%edi
+-	movl $early_idt_handlers,%eax
++	movl $early_idt_handler_array,%eax
+ 	movl $NUM_EXCEPTION_VECTORS,%ecx
+ 1:
+ 	movl %eax,(%edi)
+ 	movl %eax,4(%edi)
+ 	/* interrupt gate, dpl=0, present */
+ 	movl $(0x8E000000 + __KERNEL_CS),2(%edi)
+-	addl $9,%eax
++	addl $EARLY_IDT_HANDLER_SIZE,%eax
+ 	addl $8,%edi
+ 	loop 1b
+ 
+@@ -523,26 +524,28 @@ setup_once:
+ 	andl $0,setup_once_ref	/* Once is enough, thanks */
+ 	ret
+ 
+-ENTRY(early_idt_handlers)
++ENTRY(early_idt_handler_array)
+ 	# 36(%esp) %eflags
+ 	# 32(%esp) %cs
+ 	# 28(%esp) %eip
+ 	# 24(%rsp) error code
+ 	i = 0
+ 	.rept NUM_EXCEPTION_VECTORS
+-	.if (EXCEPTION_ERRCODE_MASK >> i) & 1
+-	ASM_NOP2
+-	.else
++	.ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
+ 	pushl $0		# Dummy error code, to make stack frame uniform
+ 	.endif
+ 	pushl $i		# 20(%esp) Vector number
+-	jmp early_idt_handler
++	jmp early_idt_handler_common
+ 	i = i + 1
++	.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
+ 	.endr
+-ENDPROC(early_idt_handlers)
++ENDPROC(early_idt_handler_array)
+ 	
+-	/* This is global to keep gas from relaxing the jumps */
+-ENTRY(early_idt_handler)
++early_idt_handler_common:
++	/*
++	 * The stack is the hardware frame, an error code or zero, and the
++	 * vector number.
++	 */
+ 	cld
+ 
+ 	cmpl $2,(%esp)		# X86_TRAP_NMI
+@@ -602,7 +605,7 @@ ex_entry:
+ is_nmi:
+ 	addl $8,%esp		/* drop vector number and error code */
+ 	iret
+-ENDPROC(early_idt_handler)
++ENDPROC(early_idt_handler_common)
+ 
+ /* This is the default interrupt "handler" :-) */
+ 	ALIGN
+diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
+index 6fd514d9f69a..f8a8406033c3 100644
+--- a/arch/x86/kernel/head_64.S
++++ b/arch/x86/kernel/head_64.S
+@@ -321,26 +321,28 @@ bad_address:
+ 	jmp bad_address
+ 
+ 	__INIT
+-	.globl early_idt_handlers
+-early_idt_handlers:
++ENTRY(early_idt_handler_array)
+ 	# 104(%rsp) %rflags
+ 	#  96(%rsp) %cs
+ 	#  88(%rsp) %rip
+ 	#  80(%rsp) error code
+ 	i = 0
+ 	.rept NUM_EXCEPTION_VECTORS
+-	.if (EXCEPTION_ERRCODE_MASK >> i) & 1
+-	ASM_NOP2
+-	.else
++	.ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
+ 	pushq $0		# Dummy error code, to make stack frame uniform
+ 	.endif
+ 	pushq $i		# 72(%rsp) Vector number
+-	jmp early_idt_handler
++	jmp early_idt_handler_common
+ 	i = i + 1
++	.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
+ 	.endr
++ENDPROC(early_idt_handler_array)
+ 
+-/* This is global to keep gas from relaxing the jumps */
+-ENTRY(early_idt_handler)
++early_idt_handler_common:
++	/*
++	 * The stack is the hardware frame, an error code or zero, and the
++	 * vector number.
++	 */
+ 	cld
+ 
+ 	cmpl $2,(%rsp)		# X86_TRAP_NMI
+@@ -412,7 +414,7 @@ ENTRY(early_idt_handler)
+ is_nmi:
+ 	addq $16,%rsp		# drop vector number and error code
+ 	INTERRUPT_RETURN
+-ENDPROC(early_idt_handler)
++ENDPROC(early_idt_handler_common)
+ 
+ 	__INITDATA
+ 
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index 987514396c1e..ddeff4844a10 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -559,6 +559,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
+ 				if (is_ereg(dst_reg))
+ 					EMIT1(0x41);
+ 				EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
++
++				/* emit 'movzwl eax, ax' */
++				if (is_ereg(dst_reg))
++					EMIT3(0x45, 0x0F, 0xB7);
++				else
++					EMIT2(0x0F, 0xB7);
++				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
+ 				break;
+ 			case 32:
+ 				/* emit 'bswap eax' to swap lower 4 bytes */
+@@ -577,6 +584,27 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
+ 			break;
+ 
+ 		case BPF_ALU | BPF_END | BPF_FROM_LE:
++			switch (imm32) {
++			case 16:
++				/* emit 'movzwl eax, ax' to zero extend 16-bit
++				 * into 64 bit
++				 */
++				if (is_ereg(dst_reg))
++					EMIT3(0x45, 0x0F, 0xB7);
++				else
++					EMIT2(0x0F, 0xB7);
++				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
++				break;
++			case 32:
++				/* emit 'mov eax, eax' to clear upper 32-bits */
++				if (is_ereg(dst_reg))
++					EMIT1(0x45);
++				EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
++				break;
++			case 64:
++				/* nop */
++				break;
++			}
+ 			break;
+ 
+ 			/* ST: *(u8*)(dst_reg + off) = imm */
+@@ -938,7 +966,12 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
+ 	}
+ 	ctx.cleanup_addr = proglen;
+ 
+-	for (pass = 0; pass < 10; pass++) {
++	/* JITed image shrinks with every pass and the loop iterates
++	 * until the image stops shrinking. Very large bpf programs
++	 * may converge on the last pass. In such case do one more
++	 * pass to emit the final image
++	 */
++	for (pass = 0; pass < 10 || image; pass++) {
+ 		proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
+ 		if (proglen <= 0) {
+ 			image = NULL;
+diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
+index 7b9be9822724..8533c96bab13 100644
+--- a/arch/x86/vdso/Makefile
++++ b/arch/x86/vdso/Makefile
+@@ -51,7 +51,7 @@ VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
+ $(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
+ 	$(call if_changed,vdso)
+ 
+-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
++HOST_EXTRACFLAGS += -I$(srctree)/tools/include -I$(srctree)/include/uapi -I$(srctree)/arch/x86/include/uapi
+ hostprogs-y			+= vdso2c
+ 
+ quiet_cmd_vdso2c = VDSO2C  $@
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 5c39703e644f..b2e73e1ef8a4 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -1589,6 +1589,7 @@ static int blk_mq_hctx_notify(void *data, unsigned long action,
+ 	return NOTIFY_OK;
+ }
+ 
++/* hctx->ctxs will be freed in queue's release handler */
+ static void blk_mq_exit_hctx(struct request_queue *q,
+ 		struct blk_mq_tag_set *set,
+ 		struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
+@@ -1607,7 +1608,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
+ 
+ 	blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
+ 	blk_free_flush_queue(hctx->fq);
+-	kfree(hctx->ctxs);
+ 	blk_mq_free_bitmap(&hctx->ctx_map);
+ }
+ 
+@@ -1873,8 +1873,12 @@ void blk_mq_release(struct request_queue *q)
+ 	unsigned int i;
+ 
+ 	/* hctx kobj stays in hctx */
+-	queue_for_each_hw_ctx(q, hctx, i)
++	queue_for_each_hw_ctx(q, hctx, i) {
++		if (!hctx)
++			continue;
++		kfree(hctx->ctxs);
+ 		kfree(hctx);
++	}
+ 
+ 	kfree(q->queue_hw_ctx);
+ 
+diff --git a/block/genhd.c b/block/genhd.c
+index 0a536dc05f3b..ea982eadaf63 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -422,9 +422,9 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
+ 	/* allocate ext devt */
+ 	idr_preload(GFP_KERNEL);
+ 
+-	spin_lock(&ext_devt_lock);
++	spin_lock_bh(&ext_devt_lock);
+ 	idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
+-	spin_unlock(&ext_devt_lock);
++	spin_unlock_bh(&ext_devt_lock);
+ 
+ 	idr_preload_end();
+ 	if (idx < 0)
+@@ -449,9 +449,9 @@ void blk_free_devt(dev_t devt)
+ 		return;
+ 
+ 	if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
+-		spin_lock(&ext_devt_lock);
++		spin_lock_bh(&ext_devt_lock);
+ 		idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
+-		spin_unlock(&ext_devt_lock);
++		spin_unlock_bh(&ext_devt_lock);
+ 	}
+ }
+ 
+@@ -653,7 +653,6 @@ void del_gendisk(struct gendisk *disk)
+ 	disk->flags &= ~GENHD_FL_UP;
+ 
+ 	sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
+-	bdi_unregister(&disk->queue->backing_dev_info);
+ 	blk_unregister_queue(disk);
+ 	blk_unregister_region(disk_devt(disk), disk->minors);
+ 
+@@ -691,13 +690,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
+ 	} else {
+ 		struct hd_struct *part;
+ 
+-		spin_lock(&ext_devt_lock);
++		spin_lock_bh(&ext_devt_lock);
+ 		part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
+ 		if (part && get_disk(part_to_disk(part))) {
+ 			*partno = part->partno;
+ 			disk = part_to_disk(part);
+ 		}
+-		spin_unlock(&ext_devt_lock);
++		spin_unlock_bh(&ext_devt_lock);
+ 	}
+ 
+ 	return disk;
+diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
+index 23716dd8a7ec..5928d0746a27 100644
+--- a/drivers/ata/ahci_mvebu.c
++++ b/drivers/ata/ahci_mvebu.c
+@@ -45,7 +45,7 @@ static void ahci_mvebu_mbus_config(struct ahci_host_priv *hpriv,
+ 		writel((cs->mbus_attr << 8) |
+ 		       (dram->mbus_dram_target_id << 4) | 1,
+ 		       hpriv->mmio + AHCI_WINDOW_CTRL(i));
+-		writel(cs->base, hpriv->mmio + AHCI_WINDOW_BASE(i));
++		writel(cs->base >> 16, hpriv->mmio + AHCI_WINDOW_BASE(i));
+ 		writel(((cs->size - 1) & 0xffff0000),
+ 		       hpriv->mmio + AHCI_WINDOW_SIZE(i));
+ 	}
+diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
+index 80a80548ad0a..27245957eee3 100644
+--- a/drivers/ata/pata_octeon_cf.c
++++ b/drivers/ata/pata_octeon_cf.c
+@@ -1053,7 +1053,7 @@ static struct of_device_id octeon_cf_match[] = {
+ 	},
+ 	{},
+ };
+-MODULE_DEVICE_TABLE(of, octeon_i2c_match);
++MODULE_DEVICE_TABLE(of, octeon_cf_match);
+ 
+ static struct platform_driver octeon_cf_driver = {
+ 	.probe		= octeon_cf_probe,
+diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
+index 9c2ba1c97c42..df0c66cb7ad3 100644
+--- a/drivers/base/cacheinfo.c
++++ b/drivers/base/cacheinfo.c
+@@ -179,7 +179,7 @@ static int detect_cache_attributes(unsigned int cpu)
+ {
+ 	int ret;
+ 
+-	if (init_cache_level(cpu))
++	if (init_cache_level(cpu) || !cache_leaves(cpu))
+ 		return -ENOENT;
+ 
+ 	per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
+diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
+index fb9ec6221730..6f047dcb94c2 100644
+--- a/drivers/bus/mvebu-mbus.c
++++ b/drivers/bus/mvebu-mbus.c
+@@ -58,7 +58,6 @@
+ #include <linux/debugfs.h>
+ #include <linux/log2.h>
+ #include <linux/syscore_ops.h>
+-#include <linux/memblock.h>
+ 
+ /*
+  * DDR target is the same on all platforms.
+@@ -70,6 +69,7 @@
+  */
+ #define WIN_CTRL_OFF		0x0000
+ #define   WIN_CTRL_ENABLE       BIT(0)
++/* Only on HW I/O coherency capable platforms */
+ #define   WIN_CTRL_SYNCBARRIER  BIT(1)
+ #define   WIN_CTRL_TGT_MASK     0xf0
+ #define   WIN_CTRL_TGT_SHIFT    4
+@@ -102,9 +102,7 @@
+ 
+ /* Relative to mbusbridge_base */
+ #define MBUS_BRIDGE_CTRL_OFF	0x0
+-#define  MBUS_BRIDGE_SIZE_MASK  0xffff0000
+ #define MBUS_BRIDGE_BASE_OFF	0x4
+-#define  MBUS_BRIDGE_BASE_MASK  0xffff0000
+ 
+ /* Maximum number of windows, for all known platforms */
+ #define MBUS_WINS_MAX           20
+@@ -323,8 +321,9 @@ static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus,
+ 	ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) |
+ 		(attr << WIN_CTRL_ATTR_SHIFT)    |
+ 		(target << WIN_CTRL_TGT_SHIFT)   |
+-		WIN_CTRL_SYNCBARRIER             |
+ 		WIN_CTRL_ENABLE;
++	if (mbus->hw_io_coherency)
++		ctrl |= WIN_CTRL_SYNCBARRIER;
+ 
+ 	writel(base & WIN_BASE_LOW, addr + WIN_BASE_OFF);
+ 	writel(ctrl, addr + WIN_CTRL_OFF);
+@@ -577,106 +576,36 @@ static unsigned int armada_xp_mbus_win_remap_offset(int win)
+ 		return MVEBU_MBUS_NO_REMAP;
+ }
+ 
+-/*
+- * Use the memblock information to find the MBus bridge hole in the
+- * physical address space.
+- */
+-static void __init
+-mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end)
+-{
+-	struct memblock_region *r;
+-	uint64_t s = 0;
+-
+-	for_each_memblock(memory, r) {
+-		/*
+-		 * This part of the memory is above 4 GB, so we don't
+-		 * care for the MBus bridge hole.
+-		 */
+-		if (r->base >= 0x100000000)
+-			continue;
+-
+-		/*
+-		 * The MBus bridge hole is at the end of the RAM under
+-		 * the 4 GB limit.
+-		 */
+-		if (r->base + r->size > s)
+-			s = r->base + r->size;
+-	}
+-
+-	*start = s;
+-	*end = 0x100000000;
+-}
+-
+ static void __init
+ mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
+ {
+ 	int i;
+ 	int cs;
+-	uint64_t mbus_bridge_base, mbus_bridge_end;
+ 
+ 	mvebu_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
+ 
+-	mvebu_mbus_find_bridge_hole(&mbus_bridge_base, &mbus_bridge_end);
+-
+ 	for (i = 0, cs = 0; i < 4; i++) {
+-		u64 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
+-		u64 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
+-		u64 end;
+-		struct mbus_dram_window *w;
+-
+-		/* Ignore entries that are not enabled */
+-		if (!(size & DDR_SIZE_ENABLED))
+-			continue;
+-
+-		/*
+-		 * Ignore entries whose base address is above 2^32,
+-		 * since devices cannot DMA to such high addresses
+-		 */
+-		if (base & DDR_BASE_CS_HIGH_MASK)
+-			continue;
+-
+-		base = base & DDR_BASE_CS_LOW_MASK;
+-		size = (size | ~DDR_SIZE_MASK) + 1;
+-		end = base + size;
+-
+-		/*
+-		 * Adjust base/size of the current CS to make sure it
+-		 * doesn't overlap with the MBus bridge hole. This is
+-		 * particularly important for devices that do DMA from
+-		 * DRAM to a SRAM mapped in a MBus window, such as the
+-		 * CESA cryptographic engine.
+-		 */
++		u32 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
++		u32 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
+ 
+ 		/*
+-		 * The CS is fully enclosed inside the MBus bridge
+-		 * area, so ignore it.
++		 * We only take care of entries for which the chip
++		 * select is enabled, and that don't have high base
++		 * address bits set (devices can only access the first
++		 * 32 bits of the memory).
+ 		 */
+-		if (base >= mbus_bridge_base && end <= mbus_bridge_end)
+-			continue;
++		if ((size & DDR_SIZE_ENABLED) &&
++		    !(base & DDR_BASE_CS_HIGH_MASK)) {
++			struct mbus_dram_window *w;
+ 
+-		/*
+-		 * Beginning of CS overlaps with end of MBus, raise CS
+-		 * base address, and shrink its size.
+-		 */
+-		if (base >= mbus_bridge_base && end > mbus_bridge_end) {
+-			size -= mbus_bridge_end - base;
+-			base = mbus_bridge_end;
++			w = &mvebu_mbus_dram_info.cs[cs++];
++			w->cs_index = i;
++			w->mbus_attr = 0xf & ~(1 << i);
++			if (mbus->hw_io_coherency)
++				w->mbus_attr |= ATTR_HW_COHERENCY;
++			w->base = base & DDR_BASE_CS_LOW_MASK;
++			w->size = (size | ~DDR_SIZE_MASK) + 1;
+ 		}
+-
+-		/*
+-		 * End of CS overlaps with beginning of MBus, shrink
+-		 * CS size.
+-		 */
+-		if (base < mbus_bridge_base && end > mbus_bridge_base)
+-			size -= end - mbus_bridge_base;
+-
+-		w = &mvebu_mbus_dram_info.cs[cs++];
+-		w->cs_index = i;
+-		w->mbus_attr = 0xf & ~(1 << i);
+-		if (mbus->hw_io_coherency)
+-			w->mbus_attr |= ATTR_HW_COHERENCY;
+-		w->base = base;
+-		w->size = size;
+ 	}
+ 	mvebu_mbus_dram_info.num_cs = cs;
+ }
+diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
+index d9891d3461f6..7992164ea9ec 100644
+--- a/drivers/dma/at_xdmac.c
++++ b/drivers/dma/at_xdmac.c
+@@ -174,6 +174,8 @@
+ #define AT_XDMAC_MBR_UBC_NDV3		(0x3 << 27)	/* Next Descriptor View 3 */
+ 
+ #define AT_XDMAC_MAX_CHAN	0x20
++#define AT_XDMAC_MAX_CSIZE	16	/* 16 data */
++#define AT_XDMAC_MAX_DWIDTH	8	/* 64 bits */
+ 
+ #define AT_XDMAC_DMA_BUSWIDTHS\
+ 	(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
+@@ -192,20 +194,17 @@ struct at_xdmac_chan {
+ 	struct dma_chan			chan;
+ 	void __iomem			*ch_regs;
+ 	u32				mask;		/* Channel Mask */
+-	u32				cfg[2];		/* Channel Configuration Register */
+-	#define	AT_XDMAC_DEV_TO_MEM_CFG	0		/* Predifined dev to mem channel conf */
+-	#define	AT_XDMAC_MEM_TO_DEV_CFG	1		/* Predifined mem to dev channel conf */
++	u32				cfg;		/* Channel Configuration Register */
+ 	u8				perid;		/* Peripheral ID */
+ 	u8				perif;		/* Peripheral Interface */
+ 	u8				memif;		/* Memory Interface */
+-	u32				per_src_addr;
+-	u32				per_dst_addr;
+ 	u32				save_cc;
+ 	u32				save_cim;
+ 	u32				save_cnda;
+ 	u32				save_cndc;
+ 	unsigned long			status;
+ 	struct tasklet_struct		tasklet;
++	struct dma_slave_config		sconfig;
+ 
+ 	spinlock_t			lock;
+ 
+@@ -415,8 +414,9 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
+ 	struct at_xdmac_desc	*desc = txd_to_at_desc(tx);
+ 	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(tx->chan);
+ 	dma_cookie_t		cookie;
++	unsigned long		irqflags;
+ 
+-	spin_lock_bh(&atchan->lock);
++	spin_lock_irqsave(&atchan->lock, irqflags);
+ 	cookie = dma_cookie_assign(tx);
+ 
+ 	dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
+@@ -425,7 +425,7 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
+ 	if (list_is_singular(&atchan->xfers_list))
+ 		at_xdmac_start_xfer(atchan, desc);
+ 
+-	spin_unlock_bh(&atchan->lock);
++	spin_unlock_irqrestore(&atchan->lock, irqflags);
+ 	return cookie;
+ }
+ 
+@@ -494,61 +494,94 @@ static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
+ 	return chan;
+ }
+ 
++static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
++				      enum dma_transfer_direction direction)
++{
++	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
++	int			csize, dwidth;
++
++	if (direction == DMA_DEV_TO_MEM) {
++		atchan->cfg =
++			AT91_XDMAC_DT_PERID(atchan->perid)
++			| AT_XDMAC_CC_DAM_INCREMENTED_AM
++			| AT_XDMAC_CC_SAM_FIXED_AM
++			| AT_XDMAC_CC_DIF(atchan->memif)
++			| AT_XDMAC_CC_SIF(atchan->perif)
++			| AT_XDMAC_CC_SWREQ_HWR_CONNECTED
++			| AT_XDMAC_CC_DSYNC_PER2MEM
++			| AT_XDMAC_CC_MBSIZE_SIXTEEN
++			| AT_XDMAC_CC_TYPE_PER_TRAN;
++		csize = ffs(atchan->sconfig.src_maxburst) - 1;
++		if (csize < 0) {
++			dev_err(chan2dev(chan), "invalid src maxburst value\n");
++			return -EINVAL;
++		}
++		atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
++		dwidth = ffs(atchan->sconfig.src_addr_width) - 1;
++		if (dwidth < 0) {
++			dev_err(chan2dev(chan), "invalid src addr width value\n");
++			return -EINVAL;
++		}
++		atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
++	} else if (direction == DMA_MEM_TO_DEV) {
++		atchan->cfg =
++			AT91_XDMAC_DT_PERID(atchan->perid)
++			| AT_XDMAC_CC_DAM_FIXED_AM
++			| AT_XDMAC_CC_SAM_INCREMENTED_AM
++			| AT_XDMAC_CC_DIF(atchan->perif)
++			| AT_XDMAC_CC_SIF(atchan->memif)
++			| AT_XDMAC_CC_SWREQ_HWR_CONNECTED
++			| AT_XDMAC_CC_DSYNC_MEM2PER
++			| AT_XDMAC_CC_MBSIZE_SIXTEEN
++			| AT_XDMAC_CC_TYPE_PER_TRAN;
++		csize = ffs(atchan->sconfig.dst_maxburst) - 1;
++		if (csize < 0) {
++			dev_err(chan2dev(chan), "invalid src maxburst value\n");
++			return -EINVAL;
++		}
++		atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
++		dwidth = ffs(atchan->sconfig.dst_addr_width) - 1;
++		if (dwidth < 0) {
++			dev_err(chan2dev(chan), "invalid dst addr width value\n");
++			return -EINVAL;
++		}
++		atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
++	}
++
++	dev_dbg(chan2dev(chan),	"%s: cfg=0x%08x\n", __func__, atchan->cfg);
++
++	return 0;
++}
++
++/*
++ * Only check that maxburst and addr width values are supported by the
++ * the controller but not that the configuration is good to perform the
++ * transfer since we don't know the direction at this stage.
++ */
++static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig)
++{
++	if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE)
++	    || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE))
++		return -EINVAL;
++
++	if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH)
++	    || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH))
++		return -EINVAL;
++
++	return 0;
++}
++
+ static int at_xdmac_set_slave_config(struct dma_chan *chan,
+ 				      struct dma_slave_config *sconfig)
+ {
+ 	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
+-	u8 dwidth;
+-	int csize;
+ 
+-	atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] =
+-		AT91_XDMAC_DT_PERID(atchan->perid)
+-		| AT_XDMAC_CC_DAM_INCREMENTED_AM
+-		| AT_XDMAC_CC_SAM_FIXED_AM
+-		| AT_XDMAC_CC_DIF(atchan->memif)
+-		| AT_XDMAC_CC_SIF(atchan->perif)
+-		| AT_XDMAC_CC_SWREQ_HWR_CONNECTED
+-		| AT_XDMAC_CC_DSYNC_PER2MEM
+-		| AT_XDMAC_CC_MBSIZE_SIXTEEN
+-		| AT_XDMAC_CC_TYPE_PER_TRAN;
+-	csize = at_xdmac_csize(sconfig->src_maxburst);
+-	if (csize < 0) {
+-		dev_err(chan2dev(chan), "invalid src maxburst value\n");
++	if (at_xdmac_check_slave_config(sconfig)) {
++		dev_err(chan2dev(chan), "invalid slave configuration\n");
+ 		return -EINVAL;
+ 	}
+-	atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_CSIZE(csize);
+-	dwidth = ffs(sconfig->src_addr_width) - 1;
+-	atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth);
+-
+-
+-	atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] =
+-		AT91_XDMAC_DT_PERID(atchan->perid)
+-		| AT_XDMAC_CC_DAM_FIXED_AM
+-		| AT_XDMAC_CC_SAM_INCREMENTED_AM
+-		| AT_XDMAC_CC_DIF(atchan->perif)
+-		| AT_XDMAC_CC_SIF(atchan->memif)
+-		| AT_XDMAC_CC_SWREQ_HWR_CONNECTED
+-		| AT_XDMAC_CC_DSYNC_MEM2PER
+-		| AT_XDMAC_CC_MBSIZE_SIXTEEN
+-		| AT_XDMAC_CC_TYPE_PER_TRAN;
+-	csize = at_xdmac_csize(sconfig->dst_maxburst);
+-	if (csize < 0) {
+-		dev_err(chan2dev(chan), "invalid src maxburst value\n");
+-		return -EINVAL;
+-	}
+-	atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_CSIZE(csize);
+-	dwidth = ffs(sconfig->dst_addr_width) - 1;
+-	atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth);
+-
+-	/* Src and dst addr are needed to configure the link list descriptor. */
+-	atchan->per_src_addr = sconfig->src_addr;
+-	atchan->per_dst_addr = sconfig->dst_addr;
+ 
+-	dev_dbg(chan2dev(chan),
+-		"%s: cfg[dev2mem]=0x%08x, cfg[mem2dev]=0x%08x, per_src_addr=0x%08x, per_dst_addr=0x%08x\n",
+-		__func__, atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG],
+-		atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG],
+-		atchan->per_src_addr, atchan->per_dst_addr);
++	memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig));
+ 
+ 	return 0;
+ }
+@@ -563,6 +596,8 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ 	struct scatterlist	*sg;
+ 	int			i;
+ 	unsigned int		xfer_size = 0;
++	unsigned long		irqflags;
++	struct dma_async_tx_descriptor	*ret = NULL;
+ 
+ 	if (!sgl)
+ 		return NULL;
+@@ -578,7 +613,10 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ 		 flags);
+ 
+ 	/* Protect dma_sconfig field that can be modified by set_slave_conf. */
+-	spin_lock_bh(&atchan->lock);
++	spin_lock_irqsave(&atchan->lock, irqflags);
++
++	if (at_xdmac_compute_chan_conf(chan, direction))
++		goto spin_unlock;
+ 
+ 	/* Prepare descriptors. */
+ 	for_each_sg(sgl, sg, sg_len, i) {
+@@ -589,8 +627,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ 		mem = sg_dma_address(sg);
+ 		if (unlikely(!len)) {
+ 			dev_err(chan2dev(chan), "sg data length is zero\n");
+-			spin_unlock_bh(&atchan->lock);
+-			return NULL;
++			goto spin_unlock;
+ 		}
+ 		dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
+ 			 __func__, i, len, mem);
+@@ -600,20 +637,18 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ 			dev_err(chan2dev(chan), "can't get descriptor\n");
+ 			if (first)
+ 				list_splice_init(&first->descs_list, &atchan->free_descs_list);
+-			spin_unlock_bh(&atchan->lock);
+-			return NULL;
++			goto spin_unlock;
+ 		}
+ 
+ 		/* Linked list descriptor setup. */
+ 		if (direction == DMA_DEV_TO_MEM) {
+-			desc->lld.mbr_sa = atchan->per_src_addr;
++			desc->lld.mbr_sa = atchan->sconfig.src_addr;
+ 			desc->lld.mbr_da = mem;
+-			desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
+ 		} else {
+ 			desc->lld.mbr_sa = mem;
+-			desc->lld.mbr_da = atchan->per_dst_addr;
+-			desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
++			desc->lld.mbr_da = atchan->sconfig.dst_addr;
+ 		}
++		desc->lld.mbr_cfg = atchan->cfg;
+ 		dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
+ 		fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
+ 			       ? at_xdmac_get_dwidth(desc->lld.mbr_cfg)
+@@ -645,13 +680,15 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ 		xfer_size += len;
+ 	}
+ 
+-	spin_unlock_bh(&atchan->lock);
+ 
+ 	first->tx_dma_desc.flags = flags;
+ 	first->xfer_size = xfer_size;
+ 	first->direction = direction;
++	ret = &first->tx_dma_desc;
+ 
+-	return &first->tx_dma_desc;
++spin_unlock:
++	spin_unlock_irqrestore(&atchan->lock, irqflags);
++	return ret;
+ }
+ 
+ static struct dma_async_tx_descriptor *
+@@ -664,6 +701,7 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
+ 	struct at_xdmac_desc	*first = NULL, *prev = NULL;
+ 	unsigned int		periods = buf_len / period_len;
+ 	int			i;
++	unsigned long		irqflags;
+ 
+ 	dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
+ 		__func__, &buf_addr, buf_len, period_len,
+@@ -679,32 +717,34 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
+ 		return NULL;
+ 	}
+ 
++	if (at_xdmac_compute_chan_conf(chan, direction))
++		return NULL;
++
+ 	for (i = 0; i < periods; i++) {
+ 		struct at_xdmac_desc	*desc = NULL;
+ 
+-		spin_lock_bh(&atchan->lock);
++		spin_lock_irqsave(&atchan->lock, irqflags);
+ 		desc = at_xdmac_get_desc(atchan);
+ 		if (!desc) {
+ 			dev_err(chan2dev(chan), "can't get descriptor\n");
+ 			if (first)
+ 				list_splice_init(&first->descs_list, &atchan->free_descs_list);
+-			spin_unlock_bh(&atchan->lock);
++			spin_unlock_irqrestore(&atchan->lock, irqflags);
+ 			return NULL;
+ 		}
+-		spin_unlock_bh(&atchan->lock);
++		spin_unlock_irqrestore(&atchan->lock, irqflags);
+ 		dev_dbg(chan2dev(chan),
+ 			"%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
+ 			__func__, desc, &desc->tx_dma_desc.phys);
+ 
+ 		if (direction == DMA_DEV_TO_MEM) {
+-			desc->lld.mbr_sa = atchan->per_src_addr;
++			desc->lld.mbr_sa = atchan->sconfig.src_addr;
+ 			desc->lld.mbr_da = buf_addr + i * period_len;
+-			desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
+ 		} else {
+ 			desc->lld.mbr_sa = buf_addr + i * period_len;
+-			desc->lld.mbr_da = atchan->per_dst_addr;
+-			desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
++			desc->lld.mbr_da = atchan->sconfig.dst_addr;
+ 		}
++		desc->lld.mbr_cfg = atchan->cfg;
+ 		desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
+ 			| AT_XDMAC_MBR_UBC_NDEN
+ 			| AT_XDMAC_MBR_UBC_NSEN
+@@ -766,6 +806,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ 					| AT_XDMAC_CC_SIF(0)
+ 					| AT_XDMAC_CC_MBSIZE_SIXTEEN
+ 					| AT_XDMAC_CC_TYPE_MEM_TRAN;
++	unsigned long		irqflags;
+ 
+ 	dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
+ 		__func__, &src, &dest, len, flags);
+@@ -798,9 +839,9 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ 
+ 		dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
+ 
+-		spin_lock_bh(&atchan->lock);
++		spin_lock_irqsave(&atchan->lock, irqflags);
+ 		desc = at_xdmac_get_desc(atchan);
+-		spin_unlock_bh(&atchan->lock);
++		spin_unlock_irqrestore(&atchan->lock, irqflags);
+ 		if (!desc) {
+ 			dev_err(chan2dev(chan), "can't get descriptor\n");
+ 			if (first)
+@@ -886,6 +927,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ 	int			residue;
+ 	u32			cur_nda, mask, value;
+ 	u8			dwidth = 0;
++	unsigned long		flags;
+ 
+ 	ret = dma_cookie_status(chan, cookie, txstate);
+ 	if (ret == DMA_COMPLETE)
+@@ -894,7 +936,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ 	if (!txstate)
+ 		return ret;
+ 
+-	spin_lock_bh(&atchan->lock);
++	spin_lock_irqsave(&atchan->lock, flags);
+ 
+ 	desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
+ 
+@@ -904,8 +946,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ 	 */
+ 	if (!desc->active_xfer) {
+ 		dma_set_residue(txstate, desc->xfer_size);
+-		spin_unlock_bh(&atchan->lock);
+-		return ret;
++		goto spin_unlock;
+ 	}
+ 
+ 	residue = desc->xfer_size;
+@@ -936,14 +977,14 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ 	}
+ 	residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth;
+ 
+-	spin_unlock_bh(&atchan->lock);
+-
+ 	dma_set_residue(txstate, residue);
+ 
+ 	dev_dbg(chan2dev(chan),
+ 		 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
+ 		 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
+ 
++spin_unlock:
++	spin_unlock_irqrestore(&atchan->lock, flags);
+ 	return ret;
+ }
+ 
+@@ -964,8 +1005,9 @@ static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
+ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
+ {
+ 	struct at_xdmac_desc	*desc;
++	unsigned long		flags;
+ 
+-	spin_lock_bh(&atchan->lock);
++	spin_lock_irqsave(&atchan->lock, flags);
+ 
+ 	/*
+ 	 * If channel is enabled, do nothing, advance_work will be triggered
+@@ -980,7 +1022,7 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
+ 			at_xdmac_start_xfer(atchan, desc);
+ 	}
+ 
+-	spin_unlock_bh(&atchan->lock);
++	spin_unlock_irqrestore(&atchan->lock, flags);
+ }
+ 
+ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
+@@ -1116,12 +1158,13 @@ static int at_xdmac_device_config(struct dma_chan *chan,
+ {
+ 	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
+ 	int ret;
++	unsigned long		flags;
+ 
+ 	dev_dbg(chan2dev(chan), "%s\n", __func__);
+ 
+-	spin_lock_bh(&atchan->lock);
++	spin_lock_irqsave(&atchan->lock, flags);
+ 	ret = at_xdmac_set_slave_config(chan, config);
+-	spin_unlock_bh(&atchan->lock);
++	spin_unlock_irqrestore(&atchan->lock, flags);
+ 
+ 	return ret;
+ }
+@@ -1130,18 +1173,19 @@ static int at_xdmac_device_pause(struct dma_chan *chan)
+ {
+ 	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
+ 	struct at_xdmac		*atxdmac = to_at_xdmac(atchan->chan.device);
++	unsigned long		flags;
+ 
+ 	dev_dbg(chan2dev(chan), "%s\n", __func__);
+ 
+ 	if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
+ 		return 0;
+ 
+-	spin_lock_bh(&atchan->lock);
++	spin_lock_irqsave(&atchan->lock, flags);
+ 	at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
+ 	while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
+ 	       & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
+ 		cpu_relax();
+-	spin_unlock_bh(&atchan->lock);
++	spin_unlock_irqrestore(&atchan->lock, flags);
+ 
+ 	return 0;
+ }
+@@ -1150,16 +1194,19 @@ static int at_xdmac_device_resume(struct dma_chan *chan)
+ {
+ 	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
+ 	struct at_xdmac		*atxdmac = to_at_xdmac(atchan->chan.device);
++	unsigned long		flags;
+ 
+ 	dev_dbg(chan2dev(chan), "%s\n", __func__);
+ 
+-	spin_lock_bh(&atchan->lock);
+-	if (!at_xdmac_chan_is_paused(atchan))
++	spin_lock_irqsave(&atchan->lock, flags);
++	if (!at_xdmac_chan_is_paused(atchan)) {
++		spin_unlock_irqrestore(&atchan->lock, flags);
+ 		return 0;
++	}
+ 
+ 	at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
+ 	clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
+-	spin_unlock_bh(&atchan->lock);
++	spin_unlock_irqrestore(&atchan->lock, flags);
+ 
+ 	return 0;
+ }
+@@ -1169,10 +1216,11 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
+ 	struct at_xdmac_desc	*desc, *_desc;
+ 	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
+ 	struct at_xdmac		*atxdmac = to_at_xdmac(atchan->chan.device);
++	unsigned long		flags;
+ 
+ 	dev_dbg(chan2dev(chan), "%s\n", __func__);
+ 
+-	spin_lock_bh(&atchan->lock);
++	spin_lock_irqsave(&atchan->lock, flags);
+ 	at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
+ 	while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
+ 		cpu_relax();
+@@ -1182,7 +1230,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
+ 		at_xdmac_remove_xfer(atchan, desc);
+ 
+ 	clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
+-	spin_unlock_bh(&atchan->lock);
++	spin_unlock_irqrestore(&atchan->lock, flags);
+ 
+ 	return 0;
+ }
+@@ -1192,8 +1240,9 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
+ 	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
+ 	struct at_xdmac_desc	*desc;
+ 	int			i;
++	unsigned long		flags;
+ 
+-	spin_lock_bh(&atchan->lock);
++	spin_lock_irqsave(&atchan->lock, flags);
+ 
+ 	if (at_xdmac_chan_is_enabled(atchan)) {
+ 		dev_err(chan2dev(chan),
+@@ -1224,7 +1273,7 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
+ 	dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
+ 
+ spin_unlock:
+-	spin_unlock_bh(&atchan->lock);
++	spin_unlock_irqrestore(&atchan->lock, flags);
+ 	return i;
+ }
+ 
+diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
+index ac336a961dea..8e70e580c98a 100644
+--- a/drivers/dma/dmaengine.c
++++ b/drivers/dma/dmaengine.c
+@@ -505,7 +505,11 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
+ 	caps->directions = device->directions;
+ 	caps->residue_granularity = device->residue_granularity;
+ 
+-	caps->cmd_pause = !!device->device_pause;
++	/*
++	 * Some devices implement only pause (e.g. to get residuum) but no
++	 * resume. However cmd_pause is advertised as pause AND resume.
++	 */
++	caps->cmd_pause = !!(device->device_pause && device->device_resume);
+ 	caps->cmd_terminate = !!device->device_terminate_all;
+ 
+ 	return 0;
+diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
+index 0e1f56772855..a2771a8d4377 100644
+--- a/drivers/dma/pl330.c
++++ b/drivers/dma/pl330.c
+@@ -2127,6 +2127,7 @@ static int pl330_terminate_all(struct dma_chan *chan)
+ 	struct pl330_dmac *pl330 = pch->dmac;
+ 	LIST_HEAD(list);
+ 
++	pm_runtime_get_sync(pl330->ddma.dev);
+ 	spin_lock_irqsave(&pch->lock, flags);
+ 	spin_lock(&pl330->lock);
+ 	_stop(pch->thread);
+@@ -2151,6 +2152,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
+ 	list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
+ 	list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
+ 	spin_unlock_irqrestore(&pch->lock, flags);
++	pm_runtime_mark_last_busy(pl330->ddma.dev);
++	pm_runtime_put_autosuspend(pl330->ddma.dev);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+index 406624a0b201..340e21918f33 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+@@ -684,8 +684,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
+ 			dev->node_props.cpu_core_id_base);
+ 	sysfs_show_32bit_prop(buffer, "simd_id_base",
+ 			dev->node_props.simd_id_base);
+-	sysfs_show_32bit_prop(buffer, "capability",
+-			dev->node_props.capability);
+ 	sysfs_show_32bit_prop(buffer, "max_waves_per_simd",
+ 			dev->node_props.max_waves_per_simd);
+ 	sysfs_show_32bit_prop(buffer, "lds_size_in_kb",
+@@ -735,6 +733,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
+ 				kfd2kgd->get_fw_version(
+ 						dev->gpu->kgd,
+ 						KGD_ENGINE_MEC1));
++		sysfs_show_32bit_prop(buffer, "capability",
++				dev->node_props.capability);
+ 	}
+ 
+ 	return sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 27ea6bdebce7..7a628e4cb27a 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -2732,9 +2732,6 @@ void i915_gem_reset(struct drm_device *dev)
+ void
+ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
+ {
+-	if (list_empty(&ring->request_list))
+-		return;
+-
+ 	WARN_ON(i915_verify_lists(ring->dev));
+ 
+ 	/* Retire requests first as we use it above for the early return.
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index 88b36a9173c9..336e8b63ca08 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -881,10 +881,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
+ 				      DP_AUX_CH_CTL_RECEIVE_ERROR))
+ 				continue;
+ 			if (status & DP_AUX_CH_CTL_DONE)
+-				break;
++				goto done;
+ 		}
+-		if (status & DP_AUX_CH_CTL_DONE)
+-			break;
+ 	}
+ 
+ 	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
+@@ -893,6 +891,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
+ 		goto out;
+ 	}
+ 
++done:
+ 	/* Check for timeout or receive error.
+ 	 * Timeouts occur when the sink is not connected
+ 	 */
+diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
+index 56e437e31580..ae628001fd97 100644
+--- a/drivers/gpu/drm/i915/intel_i2c.c
++++ b/drivers/gpu/drm/i915/intel_i2c.c
+@@ -435,7 +435,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
+ 					       struct intel_gmbus,
+ 					       adapter);
+ 	struct drm_i915_private *dev_priv = bus->dev_priv;
+-	int i, reg_offset;
++	int i = 0, inc, try = 0, reg_offset;
+ 	int ret = 0;
+ 
+ 	intel_aux_display_runtime_get(dev_priv);
+@@ -448,12 +448,14 @@ gmbus_xfer(struct i2c_adapter *adapter,
+ 
+ 	reg_offset = dev_priv->gpio_mmio_base;
+ 
++retry:
+ 	I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
+ 
+-	for (i = 0; i < num; i++) {
++	for (; i < num; i += inc) {
++		inc = 1;
+ 		if (gmbus_is_index_read(msgs, i, num)) {
+ 			ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
+-			i += 1;  /* set i to the index of the read xfer */
++			inc = 2; /* an index read is two msgs */
+ 		} else if (msgs[i].flags & I2C_M_RD) {
+ 			ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
+ 		} else {
+@@ -525,6 +527,18 @@ clear_err:
+ 			 adapter->name, msgs[i].addr,
+ 			 (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
+ 
++	/*
++	 * Passive adapters sometimes NAK the first probe. Retry the first
++	 * message once on -ENXIO for GMBUS transfers; the bit banging algorithm
++	 * has retries internally. See also the retry loop in
++	 * drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
++	 */
++	if (ret == -ENXIO && i == 0 && try++ == 0) {
++		DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n",
++			      adapter->name);
++		goto retry;
++	}
++
+ 	goto out;
+ 
+ timeout:
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index 965a45619f6b..9bd56116fd5a 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -580,9 +580,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
+ 		else
+ 			radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+ 
+-		/* if there is no audio, set MINM_OVER_MAXP  */
+-		if (!drm_detect_monitor_audio(radeon_connector_edid(connector)))
+-			radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
+ 		if (rdev->family < CHIP_RV770)
+ 			radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
+ 		/* use frac fb div on APUs */
+@@ -1789,9 +1786,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
+ 			if ((crtc->mode.clock == test_crtc->mode.clock) &&
+ 			    (adjusted_clock == test_adjusted_clock) &&
+ 			    (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) &&
+-			    (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) &&
+-			    (drm_detect_monitor_audio(radeon_connector_edid(test_radeon_crtc->connector)) ==
+-			     drm_detect_monitor_audio(radeon_connector_edid(radeon_crtc->connector))))
++			    (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID))
+ 				return test_radeon_crtc->pll_id;
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c
+index f04205170b8a..cfa3a84a2af0 100644
+--- a/drivers/gpu/drm/radeon/dce3_1_afmt.c
++++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c
+@@ -173,7 +173,7 @@ void dce3_2_hdmi_update_acr(struct drm_encoder *encoder, long offset,
+ 	struct drm_device *dev = encoder->dev;
+ 	struct radeon_device *rdev = dev->dev_private;
+ 
+-	WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
++	WREG32(DCE3_HDMI0_ACR_PACKET_CONTROL + offset,
+ 		HDMI0_ACR_SOURCE |		/* select SW CTS value */
+ 		HDMI0_ACR_AUTO_SEND);	/* allow hw to sent ACR packets when required */
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index bd7519fdd3f4..aa232fd25992 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -1458,6 +1458,21 @@ int radeon_device_init(struct radeon_device *rdev,
+ 	if (r)
+ 		DRM_ERROR("ib ring test failed (%d).\n", r);
+ 
++	/*
++	 * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
++	 * after the CP ring have chew one packet at least. Hence here we stop
++	 * and restart DPM after the radeon_ib_ring_tests().
++	 */
++	if (rdev->pm.dpm_enabled &&
++	    (rdev->pm.pm_method == PM_METHOD_DPM) &&
++	    (rdev->family == CHIP_TURKS) &&
++	    (rdev->flags & RADEON_IS_MOBILITY)) {
++		mutex_lock(&rdev->pm.mutex);
++		radeon_dpm_disable(rdev);
++		radeon_dpm_enable(rdev);
++		mutex_unlock(&rdev->pm.mutex);
++	}
++
+ 	if ((radeon_testing & 1)) {
+ 		if (rdev->accel_working)
+ 			radeon_test_moves(rdev);
+diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
+index de42fc4a22b8..9c3377ca17b7 100644
+--- a/drivers/gpu/drm/radeon/radeon_vm.c
++++ b/drivers/gpu/drm/radeon/radeon_vm.c
+@@ -458,14 +458,16 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+ 		/* make sure object fit at this offset */
+ 		eoffset = soffset + size;
+ 		if (soffset >= eoffset) {
+-			return -EINVAL;
++			r = -EINVAL;
++			goto error_unreserve;
+ 		}
+ 
+ 		last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
+ 		if (last_pfn > rdev->vm_manager.max_pfn) {
+ 			dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
+ 				last_pfn, rdev->vm_manager.max_pfn);
+-			return -EINVAL;
++			r = -EINVAL;
++			goto error_unreserve;
+ 		}
+ 
+ 	} else {
+@@ -486,7 +488,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+ 				"(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
+ 				soffset, tmp->bo, tmp->it.start, tmp->it.last);
+ 			mutex_unlock(&vm->mutex);
+-			return -EINVAL;
++			r = -EINVAL;
++			goto error_unreserve;
+ 		}
+ 	}
+ 
+@@ -497,7 +500,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+ 			tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+ 			if (!tmp) {
+ 				mutex_unlock(&vm->mutex);
+-				return -ENOMEM;
++				r = -ENOMEM;
++				goto error_unreserve;
+ 			}
+ 			tmp->it.start = bo_va->it.start;
+ 			tmp->it.last = bo_va->it.last;
+@@ -555,7 +559,6 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+ 		r = radeon_vm_clear_bo(rdev, pt);
+ 		if (r) {
+ 			radeon_bo_unref(&pt);
+-			radeon_bo_reserve(bo_va->bo, false);
+ 			return r;
+ 		}
+ 
+@@ -575,6 +578,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+ 
+ 	mutex_unlock(&vm->mutex);
+ 	return 0;
++
++error_unreserve:
++	radeon_bo_unreserve(bo_va->bo);
++	return r;
+ }
+ 
+ /**
+diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
+index 8fe78d08e01c..7c6966434ee7 100644
+--- a/drivers/i2c/busses/i2c-hix5hd2.c
++++ b/drivers/i2c/busses/i2c-hix5hd2.c
+@@ -554,4 +554,4 @@ module_platform_driver(hix5hd2_i2c_driver);
+ MODULE_DESCRIPTION("Hix5hd2 I2C Bus driver");
+ MODULE_AUTHOR("Wei Yan <sledge.yanwei@huawei.com>");
+ MODULE_LICENSE("GPL");
+-MODULE_ALIAS("platform:i2c-hix5hd2");
++MODULE_ALIAS("platform:hix5hd2-i2c");
+diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
+index 958c8db4ec30..297e9c9ac943 100644
+--- a/drivers/i2c/busses/i2c-s3c2410.c
++++ b/drivers/i2c/busses/i2c-s3c2410.c
+@@ -1143,6 +1143,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	i2c->quirks = s3c24xx_get_device_quirks(pdev);
++	i2c->sysreg = ERR_PTR(-ENOENT);
+ 	if (pdata)
+ 		memcpy(i2c->pdata, pdata, sizeof(*pdata));
+ 	else
+diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c
+index 89d8aa1d2818..df12c57e6ce0 100644
+--- a/drivers/iio/adc/twl6030-gpadc.c
++++ b/drivers/iio/adc/twl6030-gpadc.c
+@@ -1001,7 +1001,7 @@ static struct platform_driver twl6030_gpadc_driver = {
+ 
+ module_platform_driver(twl6030_gpadc_driver);
+ 
+-MODULE_ALIAS("platform: " DRIVER_NAME);
++MODULE_ALIAS("platform:" DRIVER_NAME);
+ MODULE_AUTHOR("Balaji T K <balajitk@ti.com>");
+ MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
+ MODULE_AUTHOR("Oleksandr Kozaruk <oleksandr.kozaruk@ti.com");
+diff --git a/drivers/iio/imu/adis16400.h b/drivers/iio/imu/adis16400.h
+index 0916bf6b6c31..73b189c1c0fb 100644
+--- a/drivers/iio/imu/adis16400.h
++++ b/drivers/iio/imu/adis16400.h
+@@ -139,6 +139,7 @@
+ #define ADIS16400_NO_BURST		BIT(1)
+ #define ADIS16400_HAS_SLOW_MODE		BIT(2)
+ #define ADIS16400_HAS_SERIAL_NUMBER	BIT(3)
++#define ADIS16400_BURST_DIAG_STAT	BIT(4)
+ 
+ struct adis16400_state;
+ 
+@@ -165,6 +166,7 @@ struct adis16400_state {
+ 	int				filt_int;
+ 
+ 	struct adis adis;
++	unsigned long avail_scan_mask[2];
+ };
+ 
+ /* At the moment triggers are only used for ring buffer
+diff --git a/drivers/iio/imu/adis16400_buffer.c b/drivers/iio/imu/adis16400_buffer.c
+index 6e727ffe5262..90c24a23c679 100644
+--- a/drivers/iio/imu/adis16400_buffer.c
++++ b/drivers/iio/imu/adis16400_buffer.c
+@@ -18,7 +18,8 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev,
+ {
+ 	struct adis16400_state *st = iio_priv(indio_dev);
+ 	struct adis *adis = &st->adis;
+-	uint16_t *tx;
++	unsigned int burst_length;
++	u8 *tx;
+ 
+ 	if (st->variant->flags & ADIS16400_NO_BURST)
+ 		return adis_update_scan_mode(indio_dev, scan_mask);
+@@ -26,26 +27,29 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev,
+ 	kfree(adis->xfer);
+ 	kfree(adis->buffer);
+ 
++	/* All but the timestamp channel */
++	burst_length = (indio_dev->num_channels - 1) * sizeof(u16);
++	if (st->variant->flags & ADIS16400_BURST_DIAG_STAT)
++		burst_length += sizeof(u16);
++
+ 	adis->xfer = kcalloc(2, sizeof(*adis->xfer), GFP_KERNEL);
+ 	if (!adis->xfer)
+ 		return -ENOMEM;
+ 
+-	adis->buffer = kzalloc(indio_dev->scan_bytes + sizeof(u16),
+-		GFP_KERNEL);
++	adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL);
+ 	if (!adis->buffer)
+ 		return -ENOMEM;
+ 
+-	tx = adis->buffer + indio_dev->scan_bytes;
+-
++	tx = adis->buffer + burst_length;
+ 	tx[0] = ADIS_READ_REG(ADIS16400_GLOB_CMD);
+ 	tx[1] = 0;
+ 
+ 	adis->xfer[0].tx_buf = tx;
+ 	adis->xfer[0].bits_per_word = 8;
+ 	adis->xfer[0].len = 2;
+-	adis->xfer[1].tx_buf = tx;
++	adis->xfer[1].rx_buf = adis->buffer;
+ 	adis->xfer[1].bits_per_word = 8;
+-	adis->xfer[1].len = indio_dev->scan_bytes;
++	adis->xfer[1].len = burst_length;
+ 
+ 	spi_message_init(&adis->msg);
+ 	spi_message_add_tail(&adis->xfer[0], &adis->msg);
+@@ -61,6 +65,7 @@ irqreturn_t adis16400_trigger_handler(int irq, void *p)
+ 	struct adis16400_state *st = iio_priv(indio_dev);
+ 	struct adis *adis = &st->adis;
+ 	u32 old_speed_hz = st->adis.spi->max_speed_hz;
++	void *buffer;
+ 	int ret;
+ 
+ 	if (!adis->buffer)
+@@ -81,7 +86,12 @@ irqreturn_t adis16400_trigger_handler(int irq, void *p)
+ 		spi_setup(st->adis.spi);
+ 	}
+ 
+-	iio_push_to_buffers_with_timestamp(indio_dev, adis->buffer,
++	if (st->variant->flags & ADIS16400_BURST_DIAG_STAT)
++		buffer = adis->buffer + sizeof(u16);
++	else
++		buffer = adis->buffer;
++
++	iio_push_to_buffers_with_timestamp(indio_dev, buffer,
+ 		pf->timestamp);
+ 
+ 	iio_trigger_notify_done(indio_dev->trig);
+diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
+index fa795dcd5f75..2fd68f2219a7 100644
+--- a/drivers/iio/imu/adis16400_core.c
++++ b/drivers/iio/imu/adis16400_core.c
+@@ -405,6 +405,11 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
+ 			*val = st->variant->temp_scale_nano / 1000000;
+ 			*val2 = (st->variant->temp_scale_nano % 1000000);
+ 			return IIO_VAL_INT_PLUS_MICRO;
++		case IIO_PRESSURE:
++			/* 20 uBar = 0.002kPascal */
++			*val = 0;
++			*val2 = 2000;
++			return IIO_VAL_INT_PLUS_MICRO;
+ 		default:
+ 			return -EINVAL;
+ 		}
+@@ -454,10 +459,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
+ 	}
+ }
+ 
+-#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si) { \
++#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si, chn) { \
+ 	.type = IIO_VOLTAGE, \
+ 	.indexed = 1, \
+-	.channel = 0, \
++	.channel = chn, \
+ 	.extend_name = name, \
+ 	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ 		BIT(IIO_CHAN_INFO_SCALE), \
+@@ -474,10 +479,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
+ }
+ 
+ #define ADIS16400_SUPPLY_CHAN(addr, bits) \
+-	ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY)
++	ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY, 0)
+ 
+ #define ADIS16400_AUX_ADC_CHAN(addr, bits) \
+-	ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC)
++	ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC, 1)
+ 
+ #define ADIS16400_GYRO_CHAN(mod, addr, bits) { \
+ 	.type = IIO_ANGL_VEL, \
+@@ -773,7 +778,8 @@ static struct adis16400_chip_info adis16400_chips[] = {
+ 		.channels = adis16448_channels,
+ 		.num_channels = ARRAY_SIZE(adis16448_channels),
+ 		.flags = ADIS16400_HAS_PROD_ID |
+-				ADIS16400_HAS_SERIAL_NUMBER,
++				ADIS16400_HAS_SERIAL_NUMBER |
++				ADIS16400_BURST_DIAG_STAT,
+ 		.gyro_scale_micro = IIO_DEGREE_TO_RAD(10000), /* 0.01 deg/s */
+ 		.accel_scale_micro = IIO_G_TO_M_S_2(833), /* 1/1200 g */
+ 		.temp_scale_nano = 73860000, /* 0.07386 C */
+@@ -791,11 +797,6 @@ static const struct iio_info adis16400_info = {
+ 	.debugfs_reg_access = adis_debugfs_reg_access,
+ };
+ 
+-static const unsigned long adis16400_burst_scan_mask[] = {
+-	~0UL,
+-	0,
+-};
+-
+ static const char * const adis16400_status_error_msgs[] = {
+ 	[ADIS16400_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure",
+ 	[ADIS16400_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure",
+@@ -843,6 +844,20 @@ static const struct adis_data adis16400_data = {
+ 		BIT(ADIS16400_DIAG_STAT_POWER_LOW),
+ };
+ 
++static void adis16400_setup_chan_mask(struct adis16400_state *st)
++{
++	const struct adis16400_chip_info *chip_info = st->variant;
++	unsigned i;
++
++	for (i = 0; i < chip_info->num_channels; i++) {
++		const struct iio_chan_spec *ch = &chip_info->channels[i];
++
++		if (ch->scan_index >= 0 &&
++		    ch->scan_index != ADIS16400_SCAN_TIMESTAMP)
++			st->avail_scan_mask[0] |= BIT(ch->scan_index);
++	}
++}
++
+ static int adis16400_probe(struct spi_device *spi)
+ {
+ 	struct adis16400_state *st;
+@@ -866,8 +881,10 @@ static int adis16400_probe(struct spi_device *spi)
+ 	indio_dev->info = &adis16400_info;
+ 	indio_dev->modes = INDIO_DIRECT_MODE;
+ 
+-	if (!(st->variant->flags & ADIS16400_NO_BURST))
+-		indio_dev->available_scan_masks = adis16400_burst_scan_mask;
++	if (!(st->variant->flags & ADIS16400_NO_BURST)) {
++		adis16400_setup_chan_mask(st);
++		indio_dev->available_scan_masks = st->avail_scan_mask;
++	}
+ 
+ 	ret = adis_init(&st->adis, indio_dev, spi, &adis16400_data);
+ 	if (ret)
+diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
+index ea6cb64dfb28..d5335e664240 100644
+--- a/drivers/input/mouse/alps.c
++++ b/drivers/input/mouse/alps.c
+@@ -1042,9 +1042,8 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse)
+ 	right = (packet[1] & 0x02) >> 1;
+ 	middle = (packet[1] & 0x04) >> 2;
+ 
+-	/* Divide 2 since trackpoint's speed is too fast */
+-	input_report_rel(dev2, REL_X, (char)x / 2);
+-	input_report_rel(dev2, REL_Y, -((char)y / 2));
++	input_report_rel(dev2, REL_X, (char)x);
++	input_report_rel(dev2, REL_Y, -((char)y));
+ 
+ 	input_report_key(dev2, BTN_LEFT, left);
+ 	input_report_key(dev2, BTN_RIGHT, right);
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index 79363b687195..ce3d40004458 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1376,10 +1376,11 @@ static bool elantech_is_signature_valid(const unsigned char *param)
+ 		return true;
+ 
+ 	/*
+-	 * Some models have a revision higher then 20. Meaning param[2] may
+-	 * be 10 or 20, skip the rates check for these.
++	 * Some hw_version >= 4 models have a revision higher then 20. Meaning
++	 * that param[2] may be 10 or 20, skip the rates check for these.
+ 	 */
+-	if (param[0] == 0x46 && (param[1] & 0xef) == 0x0f && param[2] < 40)
++	if ((param[0] & 0x0f) >= 0x06 && (param[1] & 0xaf) == 0x0f &&
++	    param[2] < 40)
+ 		return true;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(rates); i++)
+@@ -1555,6 +1556,7 @@ static int elantech_set_properties(struct elantech_data *etd)
+ 		case 9:
+ 		case 10:
+ 		case 13:
++		case 14:
+ 			etd->hw_version = 4;
+ 			break;
+ 		default:
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 3b06c8a360b6..907ac9bdd763 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -148,6 +148,11 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
+ 		1024, 5112, 2024, 4832
+ 	},
+ 	{
++		(const char * const []){"LEN2000", NULL},
++		{ANY_BOARD_ID, ANY_BOARD_ID},
++		1024, 5113, 2021, 4832
++	},
++	{
+ 		(const char * const []){"LEN2001", NULL},
+ 		{ANY_BOARD_ID, ANY_BOARD_ID},
+ 		1024, 5022, 2508, 4832
+@@ -188,7 +193,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
+ 	"LEN0045",
+ 	"LEN0047",
+ 	"LEN0049",
+-	"LEN2000",
++	"LEN2000", /* S540 */
+ 	"LEN2001", /* Edge E431 */
+ 	"LEN2002", /* Edge E531 */
+ 	"LEN2003",
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 2d1e05bdbb53..272149d66f5b 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -50,6 +50,7 @@
+ #define CONTEXT_SIZE		VTD_PAGE_SIZE
+ 
+ #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
++#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
+ #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
+ #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
+ 
+@@ -672,6 +673,11 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
+ 	domain->iommu_superpage = domain_update_iommu_superpage(NULL);
+ }
+ 
++static int iommu_dummy(struct device *dev)
++{
++	return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
++}
++
+ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
+ {
+ 	struct dmar_drhd_unit *drhd = NULL;
+@@ -681,6 +687,9 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
+ 	u16 segment = 0;
+ 	int i;
+ 
++	if (iommu_dummy(dev))
++		return NULL;
++
+ 	if (dev_is_pci(dev)) {
+ 		pdev = to_pci_dev(dev);
+ 		segment = pci_domain_nr(pdev->bus);
+@@ -2554,6 +2563,10 @@ static bool device_has_rmrr(struct device *dev)
+  * In both cases we assume that PCI USB devices with RMRRs have them largely
+  * for historical reasons and that the RMRR space is not actively used post
+  * boot.  This exclusion may change if vendors begin to abuse it.
++ *
++ * The same exception is made for graphics devices, with the requirement that
++ * any use of the RMRR regions will be torn down before assigning the device
++ * to a guest.
+  */
+ static bool device_is_rmrr_locked(struct device *dev)
+ {
+@@ -2563,7 +2576,7 @@ static bool device_is_rmrr_locked(struct device *dev)
+ 	if (dev_is_pci(dev)) {
+ 		struct pci_dev *pdev = to_pci_dev(dev);
+ 
+-		if ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
++		if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
+ 			return false;
+ 	}
+ 
+@@ -2969,11 +2982,6 @@ static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
+ 	return __get_valid_domain_for_dev(dev);
+ }
+ 
+-static int iommu_dummy(struct device *dev)
+-{
+-	return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
+-}
+-
+ /* Check if the dev needs to go through non-identity map and unmap process.*/
+ static int iommu_no_mapping(struct device *dev)
+ {
+diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
+index 4a9ce5b50c5b..6b2b582433bd 100644
+--- a/drivers/irqchip/irq-sunxi-nmi.c
++++ b/drivers/irqchip/irq-sunxi-nmi.c
+@@ -104,7 +104,7 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
+ 	irqd_set_trigger_type(data, flow_type);
+ 	irq_setup_alt_chip(data, flow_type);
+ 
+-	for (i = 0; i <= gc->num_ct; i++, ct++)
++	for (i = 0; i < gc->num_ct; i++, ct++)
+ 		if (ct->type & flow_type)
+ 			ctrl_off = ct->regs.type;
+ 
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 907534b7f40d..b7bf8ee857fa 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -3765,7 +3765,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
+ 				err = -EBUSY;
+ 		}
+ 		spin_unlock(&mddev->lock);
+-		return err;
++		return err ?: len;
+ 	}
+ 	err = mddev_lock(mddev);
+ 	if (err)
+@@ -4144,13 +4144,14 @@ action_store(struct mddev *mddev, const char *page, size_t len)
+ 			set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ 		else
+ 			clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+-		flush_workqueue(md_misc_wq);
+-		if (mddev->sync_thread) {
+-			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+-			if (mddev_lock(mddev) == 0) {
++		if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
++		    mddev_lock(mddev) == 0) {
++			flush_workqueue(md_misc_wq);
++			if (mddev->sync_thread) {
++				set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ 				md_reap_sync_thread(mddev);
+-				mddev_unlock(mddev);
+ 			}
++			mddev_unlock(mddev);
+ 		}
+ 	} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+ 		   test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
+diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
+index 4df28943d222..e8d3c1d35453 100644
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -624,7 +624,7 @@ int __bond_opt_set(struct bonding *bond,
+ out:
+ 	if (ret)
+ 		bond_opt_error_interpret(bond, opt, ret, val);
+-	else
++	else if (bond->dev->reg_state == NETREG_REGISTERED)
+ 		call_netdevice_notifiers(NETDEV_CHANGEINFODATA, bond->dev);
+ 
+ 	return ret;
+diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
+index 7f05f309e935..da36bcf32404 100644
+--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
++++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
+@@ -1773,9 +1773,9 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
+ 	total_size = buf_len;
+ 
+ 	get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
+-	get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
+-					      get_fat_cmd.size,
+-					      &get_fat_cmd.dma);
++	get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
++					     get_fat_cmd.size,
++					     &get_fat_cmd.dma, GFP_ATOMIC);
+ 	if (!get_fat_cmd.va) {
+ 		dev_err(&adapter->pdev->dev,
+ 			"Memory allocation failure while reading FAT data\n");
+@@ -1820,8 +1820,8 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
+ 		log_offset += buf_size;
+ 	}
+ err:
+-	pci_free_consistent(adapter->pdev, get_fat_cmd.size,
+-			    get_fat_cmd.va, get_fat_cmd.dma);
++	dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
++			  get_fat_cmd.va, get_fat_cmd.dma);
+ 	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+ }
+@@ -2272,12 +2272,12 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
+ 		return -EINVAL;
+ 
+ 	cmd.size = sizeof(struct be_cmd_resp_port_type);
+-	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
++	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
++				     GFP_ATOMIC);
+ 	if (!cmd.va) {
+ 		dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
+ 		return -ENOMEM;
+ 	}
+-	memset(cmd.va, 0, cmd.size);
+ 
+ 	spin_lock_bh(&adapter->mcc_lock);
+ 
+@@ -2302,7 +2302,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
+ 	}
+ err:
+ 	spin_unlock_bh(&adapter->mcc_lock);
+-	pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
++	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
+ 	return status;
+ }
+ 
+@@ -2777,7 +2777,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
+ 		goto err;
+ 	}
+ 	cmd.size = sizeof(struct be_cmd_req_get_phy_info);
+-	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
++	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
++				     GFP_ATOMIC);
+ 	if (!cmd.va) {
+ 		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+ 		status = -ENOMEM;
+@@ -2811,7 +2812,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
+ 				BE_SUPPORTED_SPEED_1GBPS;
+ 		}
+ 	}
+-	pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
++	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
+ err:
+ 	spin_unlock_bh(&adapter->mcc_lock);
+ 	return status;
+@@ -2862,8 +2863,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
+ 
+ 	memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
+ 	attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
+-	attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
+-					      &attribs_cmd.dma);
++	attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
++					     attribs_cmd.size,
++					     &attribs_cmd.dma, GFP_ATOMIC);
+ 	if (!attribs_cmd.va) {
+ 		dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
+ 		status = -ENOMEM;
+@@ -2890,8 +2892,8 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
+ err:
+ 	mutex_unlock(&adapter->mbox_lock);
+ 	if (attribs_cmd.va)
+-		pci_free_consistent(adapter->pdev, attribs_cmd.size,
+-				    attribs_cmd.va, attribs_cmd.dma);
++		dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size,
++				  attribs_cmd.va, attribs_cmd.dma);
+ 	return status;
+ }
+ 
+@@ -3029,9 +3031,10 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
+ 
+ 	memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
+ 	get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
+-	get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
+-						   get_mac_list_cmd.size,
+-						   &get_mac_list_cmd.dma);
++	get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
++						  get_mac_list_cmd.size,
++						  &get_mac_list_cmd.dma,
++						  GFP_ATOMIC);
+ 
+ 	if (!get_mac_list_cmd.va) {
+ 		dev_err(&adapter->pdev->dev,
+@@ -3104,8 +3107,8 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
+ 
+ out:
+ 	spin_unlock_bh(&adapter->mcc_lock);
+-	pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
+-			    get_mac_list_cmd.va, get_mac_list_cmd.dma);
++	dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
++			  get_mac_list_cmd.va, get_mac_list_cmd.dma);
+ 	return status;
+ }
+ 
+@@ -3158,8 +3161,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
+ 
+ 	memset(&cmd, 0, sizeof(struct be_dma_mem));
+ 	cmd.size = sizeof(struct be_cmd_req_set_mac_list);
+-	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
+-				    &cmd.dma, GFP_KERNEL);
++	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
++				     GFP_KERNEL);
+ 	if (!cmd.va)
+ 		return -ENOMEM;
+ 
+@@ -3348,7 +3351,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
+ 
+ 	memset(&cmd, 0, sizeof(struct be_dma_mem));
+ 	cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
+-	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
++	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
++				     GFP_ATOMIC);
+ 	if (!cmd.va) {
+ 		dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
+ 		status = -ENOMEM;
+@@ -3383,7 +3387,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
+ err:
+ 	mutex_unlock(&adapter->mbox_lock);
+ 	if (cmd.va)
+-		pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
++		dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
++				  cmd.dma);
+ 	return status;
+ 
+ }
+@@ -3397,8 +3402,9 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
+ 
+ 	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
+ 	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
+-	extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
+-					     &extfat_cmd.dma);
++	extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
++					    extfat_cmd.size, &extfat_cmd.dma,
++					    GFP_ATOMIC);
+ 	if (!extfat_cmd.va)
+ 		return -ENOMEM;
+ 
+@@ -3420,8 +3426,8 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
+ 
+ 	status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
+ err:
+-	pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
+-			    extfat_cmd.dma);
++	dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
++			  extfat_cmd.dma);
+ 	return status;
+ }
+ 
+@@ -3434,8 +3440,9 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
+ 
+ 	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
+ 	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
+-	extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
+-					     &extfat_cmd.dma);
++	extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
++					    extfat_cmd.size, &extfat_cmd.dma,
++					    GFP_ATOMIC);
+ 
+ 	if (!extfat_cmd.va) {
+ 		dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
+@@ -3453,8 +3460,8 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
+ 				level = cfgs->module[0].trace_lvl[j].dbg_lvl;
+ 		}
+ 	}
+-	pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
+-			    extfat_cmd.dma);
++	dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
++			  extfat_cmd.dma);
+ err:
+ 	return level;
+ }
+@@ -3652,7 +3659,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
+ 
+ 	memset(&cmd, 0, sizeof(struct be_dma_mem));
+ 	cmd.size = sizeof(struct be_cmd_resp_get_func_config);
+-	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
++	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
++				     GFP_ATOMIC);
+ 	if (!cmd.va) {
+ 		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+ 		status = -ENOMEM;
+@@ -3692,7 +3700,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
+ err:
+ 	mutex_unlock(&adapter->mbox_lock);
+ 	if (cmd.va)
+-		pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
++		dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
++				  cmd.dma);
+ 	return status;
+ }
+ 
+@@ -3713,7 +3722,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
+ 
+ 	memset(&cmd, 0, sizeof(struct be_dma_mem));
+ 	cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
+-	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
++	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
++				     GFP_ATOMIC);
+ 	if (!cmd.va)
+ 		return -ENOMEM;
+ 
+@@ -3752,7 +3762,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
+ 		res->vf_if_cap_flags = vf_res->cap_flags;
+ err:
+ 	if (cmd.va)
+-		pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
++		dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
++				  cmd.dma);
+ 	return status;
+ }
+ 
+@@ -3767,7 +3778,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
+ 
+ 	memset(&cmd, 0, sizeof(struct be_dma_mem));
+ 	cmd.size = sizeof(struct be_cmd_req_set_profile_config);
+-	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
++	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
++				     GFP_ATOMIC);
+ 	if (!cmd.va)
+ 		return -ENOMEM;
+ 
+@@ -3783,7 +3795,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
+ 	status = be_cmd_notify_wait(adapter, &wrb);
+ 
+ 	if (cmd.va)
+-		pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
++		dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
++				  cmd.dma);
+ 	return status;
+ }
+ 
+diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
+index 4d2de4700769..22ffcd81a6b5 100644
+--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
++++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
+@@ -264,8 +264,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
+ 	int status = 0;
+ 
+ 	read_cmd.size = LANCER_READ_FILE_CHUNK;
+-	read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
+-					   &read_cmd.dma);
++	read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size,
++					  &read_cmd.dma, GFP_ATOMIC);
+ 
+ 	if (!read_cmd.va) {
+ 		dev_err(&adapter->pdev->dev,
+@@ -289,8 +289,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
+ 			break;
+ 		}
+ 	}
+-	pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
+-			    read_cmd.dma);
++	dma_free_coherent(&adapter->pdev->dev, read_cmd.size, read_cmd.va,
++			  read_cmd.dma);
+ 
+ 	return status;
+ }
+@@ -818,8 +818,9 @@ static int be_test_ddr_dma(struct be_adapter *adapter)
+ 	};
+ 
+ 	ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
+-	ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
+-					   &ddrdma_cmd.dma, GFP_KERNEL);
++	ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
++					    ddrdma_cmd.size, &ddrdma_cmd.dma,
++					    GFP_KERNEL);
+ 	if (!ddrdma_cmd.va)
+ 		return -ENOMEM;
+ 
+@@ -941,8 +942,9 @@ static int be_read_eeprom(struct net_device *netdev,
+ 
+ 	memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
+ 	eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
+-	eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
+-					   &eeprom_cmd.dma, GFP_KERNEL);
++	eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
++					    eeprom_cmd.size, &eeprom_cmd.dma,
++					    GFP_KERNEL);
+ 
+ 	if (!eeprom_cmd.va)
+ 		return -ENOMEM;
+diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
+index e6b790f0d9dc..893753f18098 100644
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -4392,8 +4392,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
+ 
+ 	flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
+ 				+ LANCER_FW_DOWNLOAD_CHUNK;
+-	flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
+-					  &flash_cmd.dma, GFP_KERNEL);
++	flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
++					   &flash_cmd.dma, GFP_KERNEL);
+ 	if (!flash_cmd.va)
+ 		return -ENOMEM;
+ 
+@@ -4526,8 +4526,8 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
+ 	}
+ 
+ 	flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
+-	flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
+-					  GFP_KERNEL);
++	flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
++					   GFP_KERNEL);
+ 	if (!flash_cmd.va)
+ 		return -ENOMEM;
+ 
+@@ -4941,10 +4941,10 @@ static int be_ctrl_init(struct be_adapter *adapter)
+ 		goto done;
+ 
+ 	mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
+-	mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
+-						mbox_mem_alloc->size,
+-						&mbox_mem_alloc->dma,
+-						GFP_KERNEL);
++	mbox_mem_alloc->va = dma_zalloc_coherent(&adapter->pdev->dev,
++						 mbox_mem_alloc->size,
++						 &mbox_mem_alloc->dma,
++						 GFP_KERNEL);
+ 	if (!mbox_mem_alloc->va) {
+ 		status = -ENOMEM;
+ 		goto unmap_pci_bars;
+diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
+index e22e602beef3..c5789cdf7778 100644
+--- a/drivers/net/phy/dp83640.c
++++ b/drivers/net/phy/dp83640.c
+@@ -47,7 +47,7 @@
+ #define PSF_TX		0x1000
+ #define EXT_EVENT	1
+ #define CAL_EVENT	7
+-#define CAL_TRIGGER	7
++#define CAL_TRIGGER	1
+ #define DP83640_N_PINS	12
+ 
+ #define MII_DP83640_MICR 0x11
+@@ -495,7 +495,9 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
+ 			else
+ 				evnt |= EVNT_RISE;
+ 		}
++		mutex_lock(&clock->extreg_lock);
+ 		ext_write(0, phydev, PAGE5, PTP_EVNT, evnt);
++		mutex_unlock(&clock->extreg_lock);
+ 		return 0;
+ 
+ 	case PTP_CLK_REQ_PEROUT:
+@@ -531,6 +533,8 @@ static u8 status_frame_src[6] = { 0x08, 0x00, 0x17, 0x0B, 0x6B, 0x0F };
+ 
+ static void enable_status_frames(struct phy_device *phydev, bool on)
+ {
++	struct dp83640_private *dp83640 = phydev->priv;
++	struct dp83640_clock *clock = dp83640->clock;
+ 	u16 cfg0 = 0, ver;
+ 
+ 	if (on)
+@@ -538,9 +542,13 @@ static void enable_status_frames(struct phy_device *phydev, bool on)
+ 
+ 	ver = (PSF_PTPVER & VERSIONPTP_MASK) << VERSIONPTP_SHIFT;
+ 
++	mutex_lock(&clock->extreg_lock);
++
+ 	ext_write(0, phydev, PAGE5, PSF_CFG0, cfg0);
+ 	ext_write(0, phydev, PAGE6, PSF_CFG1, ver);
+ 
++	mutex_unlock(&clock->extreg_lock);
++
+ 	if (!phydev->attached_dev) {
+ 		pr_warn("expected to find an attached netdevice\n");
+ 		return;
+@@ -837,7 +845,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
+ 	list_del_init(&rxts->list);
+ 	phy2rxts(phy_rxts, rxts);
+ 
+-	spin_lock_irqsave(&dp83640->rx_queue.lock, flags);
++	spin_lock(&dp83640->rx_queue.lock);
+ 	skb_queue_walk(&dp83640->rx_queue, skb) {
+ 		struct dp83640_skb_info *skb_info;
+ 
+@@ -852,7 +860,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
+ 			break;
+ 		}
+ 	}
+-	spin_unlock_irqrestore(&dp83640->rx_queue.lock, flags);
++	spin_unlock(&dp83640->rx_queue.lock);
+ 
+ 	if (!shhwtstamps)
+ 		list_add_tail(&rxts->list, &dp83640->rxts);
+@@ -1172,11 +1180,18 @@ static int dp83640_config_init(struct phy_device *phydev)
+ 
+ 	if (clock->chosen && !list_empty(&clock->phylist))
+ 		recalibrate(clock);
+-	else
++	else {
++		mutex_lock(&clock->extreg_lock);
+ 		enable_broadcast(phydev, clock->page, 1);
++		mutex_unlock(&clock->extreg_lock);
++	}
+ 
+ 	enable_status_frames(phydev, true);
++
++	mutex_lock(&clock->extreg_lock);
+ 	ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
++	mutex_unlock(&clock->extreg_lock);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index 52cd8db2c57d..757f28a4284c 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -1053,13 +1053,14 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
+ {
+ 	/* According to 802.3az,the EEE is supported only in full duplex-mode.
+ 	 * Also EEE feature is active when core is operating with MII, GMII
+-	 * or RGMII. Internal PHYs are also allowed to proceed and should
+-	 * return an error if they do not support EEE.
++	 * or RGMII (all kinds). Internal PHYs are also allowed to proceed and
++	 * should return an error if they do not support EEE.
+ 	 */
+ 	if ((phydev->duplex == DUPLEX_FULL) &&
+ 	    ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
+ 	    (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
+-	    (phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
++	    (phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
++	     phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID) ||
+ 	     phy_is_internal(phydev))) {
+ 		int eee_lp, eee_cap, eee_adv;
+ 		u32 lp, cap, adv;
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index c3e4da9e79ca..8067b8fbb0ee 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -1182,7 +1182,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
+ 	 * payload data instead.
+ 	 */
+ 	usbnet_set_skb_tx_stats(skb_out, n,
+-				ctx->tx_curr_frame_payload - skb_out->len);
++				(long)ctx->tx_curr_frame_payload - skb_out->len);
+ 
+ 	return skb_out;
+ 
+diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
+index 794204e34fba..152131a10047 100644
+--- a/drivers/net/xen-netback/xenbus.c
++++ b/drivers/net/xen-netback/xenbus.c
+@@ -34,6 +34,8 @@ struct backend_info {
+ 	enum xenbus_state frontend_state;
+ 	struct xenbus_watch hotplug_status_watch;
+ 	u8 have_hotplug_status_watch:1;
++
++	const char *hotplug_script;
+ };
+ 
+ static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
+@@ -236,6 +238,7 @@ static int netback_remove(struct xenbus_device *dev)
+ 		xenvif_free(be->vif);
+ 		be->vif = NULL;
+ 	}
++	kfree(be->hotplug_script);
+ 	kfree(be);
+ 	dev_set_drvdata(&dev->dev, NULL);
+ 	return 0;
+@@ -253,6 +256,7 @@ static int netback_probe(struct xenbus_device *dev,
+ 	struct xenbus_transaction xbt;
+ 	int err;
+ 	int sg;
++	const char *script;
+ 	struct backend_info *be = kzalloc(sizeof(struct backend_info),
+ 					  GFP_KERNEL);
+ 	if (!be) {
+@@ -345,6 +349,15 @@ static int netback_probe(struct xenbus_device *dev,
+ 	if (err)
+ 		pr_debug("Error writing multi-queue-max-queues\n");
+ 
++	script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
++	if (IS_ERR(script)) {
++		err = PTR_ERR(script);
++		xenbus_dev_fatal(dev, err, "reading script");
++		goto fail;
++	}
++
++	be->hotplug_script = script;
++
+ 	err = xenbus_switch_state(dev, XenbusStateInitWait);
+ 	if (err)
+ 		goto fail;
+@@ -377,22 +390,14 @@ static int netback_uevent(struct xenbus_device *xdev,
+ 			  struct kobj_uevent_env *env)
+ {
+ 	struct backend_info *be = dev_get_drvdata(&xdev->dev);
+-	char *val;
+ 
+-	val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL);
+-	if (IS_ERR(val)) {
+-		int err = PTR_ERR(val);
+-		xenbus_dev_fatal(xdev, err, "reading script");
+-		return err;
+-	} else {
+-		if (add_uevent_var(env, "script=%s", val)) {
+-			kfree(val);
+-			return -ENOMEM;
+-		}
+-		kfree(val);
+-	}
++	if (!be)
++		return 0;
++
++	if (add_uevent_var(env, "script=%s", be->hotplug_script))
++		return -ENOMEM;
+ 
+-	if (!be || !be->vif)
++	if (!be->vif)
+ 		return 0;
+ 
+ 	return add_uevent_var(env, "vif=%s", be->vif->dev->name);
+@@ -736,6 +741,7 @@ static void connect(struct backend_info *be)
+ 			goto err;
+ 		}
+ 
++		queue->credit_bytes = credit_bytes;
+ 		queue->remaining_credit = credit_bytes;
+ 		queue->credit_usec = credit_usec;
+ 
+diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
+index 3351ef408125..53826b84e0ec 100644
+--- a/drivers/of/dynamic.c
++++ b/drivers/of/dynamic.c
+@@ -225,7 +225,7 @@ void __of_attach_node(struct device_node *np)
+ 	phandle = __of_get_property(np, "phandle", &sz);
+ 	if (!phandle)
+ 		phandle = __of_get_property(np, "linux,phandle", &sz);
+-	if (IS_ENABLED(PPC_PSERIES) && !phandle)
++	if (IS_ENABLED(CONFIG_PPC_PSERIES) && !phandle)
+ 		phandle = __of_get_property(np, "ibm,phandle", &sz);
+ 	np->phandle = (phandle && (sz >= 4)) ? be32_to_cpup(phandle) : 0;
+ 
+diff --git a/drivers/staging/ozwpan/ozhcd.c b/drivers/staging/ozwpan/ozhcd.c
+index 8543bb29a138..9737a979b8db 100644
+--- a/drivers/staging/ozwpan/ozhcd.c
++++ b/drivers/staging/ozwpan/ozhcd.c
+@@ -743,8 +743,8 @@ void oz_hcd_pd_reset(void *hpd, void *hport)
+ /*
+  * Context: softirq
+  */
+-void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
+-			int length, int offset, int total_size)
++void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status, const u8 *desc,
++			u8 length, u16 offset, u16 total_size)
+ {
+ 	struct oz_port *port = hport;
+ 	struct urb *urb;
+@@ -756,8 +756,8 @@ void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
+ 	if (!urb)
+ 		return;
+ 	if (status == 0) {
+-		int copy_len;
+-		int required_size = urb->transfer_buffer_length;
++		unsigned int copy_len;
++		unsigned int required_size = urb->transfer_buffer_length;
+ 
+ 		if (required_size > total_size)
+ 			required_size = total_size;
+diff --git a/drivers/staging/ozwpan/ozusbif.h b/drivers/staging/ozwpan/ozusbif.h
+index 4249fa374012..d2a6085345be 100644
+--- a/drivers/staging/ozwpan/ozusbif.h
++++ b/drivers/staging/ozwpan/ozusbif.h
+@@ -29,8 +29,8 @@ void oz_usb_request_heartbeat(void *hpd);
+ 
+ /* Confirmation functions.
+  */
+-void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status,
+-	const u8 *desc, int length, int offset, int total_size);
++void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status,
++	const u8 *desc, u8 length, u16 offset, u16 total_size);
+ void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode,
+ 	const u8 *data, int data_len);
+ 
+diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c
+index d434d8c6fff6..f660bb198c65 100644
+--- a/drivers/staging/ozwpan/ozusbsvc1.c
++++ b/drivers/staging/ozwpan/ozusbsvc1.c
+@@ -326,7 +326,11 @@ static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx,
+ 			struct oz_multiple_fixed *body =
+ 				(struct oz_multiple_fixed *)data_hdr;
+ 			u8 *data = body->data;
+-			int n = (len - sizeof(struct oz_multiple_fixed)+1)
++			unsigned int n;
++			if (!body->unit_size ||
++				len < sizeof(struct oz_multiple_fixed) - 1)
++				break;
++			n = (len - (sizeof(struct oz_multiple_fixed) - 1))
+ 				/ body->unit_size;
+ 			while (n--) {
+ 				oz_hcd_data_ind(usb_ctx->hport, body->endpoint,
+@@ -390,10 +394,15 @@ void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
+ 	case OZ_GET_DESC_RSP: {
+ 			struct oz_get_desc_rsp *body =
+ 				(struct oz_get_desc_rsp *)usb_hdr;
+-			int data_len = elt->length -
+-					sizeof(struct oz_get_desc_rsp) + 1;
+-			u16 offs = le16_to_cpu(get_unaligned(&body->offset));
+-			u16 total_size =
++			u16 offs, total_size;
++			u8 data_len;
++
++			if (elt->length < sizeof(struct oz_get_desc_rsp) - 1)
++				break;
++			data_len = elt->length -
++					(sizeof(struct oz_get_desc_rsp) - 1);
++			offs = le16_to_cpu(get_unaligned(&body->offset));
++			total_size =
+ 				le16_to_cpu(get_unaligned(&body->total_size));
+ 			oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n");
+ 			oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id,
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index cc57a3a6b02b..eee40b5cb025 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -162,6 +162,17 @@ static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
+ 	return put_user(x, ptr);
+ }
+ 
++static inline int tty_copy_to_user(struct tty_struct *tty,
++					void __user *to,
++					const void *from,
++					unsigned long n)
++{
++	struct n_tty_data *ldata = tty->disc_data;
++
++	tty_audit_add_data(tty, to, n, ldata->icanon);
++	return copy_to_user(to, from, n);
++}
++
+ /**
+  *	n_tty_kick_worker - start input worker (if required)
+  *	@tty: terminal
+@@ -2084,12 +2095,12 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
+ 		    __func__, eol, found, n, c, size, more);
+ 
+ 	if (n > size) {
+-		ret = copy_to_user(*b, read_buf_addr(ldata, tail), size);
++		ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), size);
+ 		if (ret)
+ 			return -EFAULT;
+-		ret = copy_to_user(*b + size, ldata->read_buf, n - size);
++		ret = tty_copy_to_user(tty, *b + size, ldata->read_buf, n - size);
+ 	} else
+-		ret = copy_to_user(*b, read_buf_addr(ldata, tail), n);
++		ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), n);
+ 
+ 	if (ret)
+ 		return -EFAULT;
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index 23061918b0e4..f74f400fcb57 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -959,6 +959,14 @@ static void dma_rx_callback(void *data)
+ 
+ 	status = dmaengine_tx_status(chan, (dma_cookie_t)0, &state);
+ 	count = RX_BUF_SIZE - state.residue;
++
++	if (readl(sport->port.membase + USR2) & USR2_IDLE) {
++		/* In condition [3] the SDMA counted up too early */
++		count--;
++
++		writel(USR2_IDLE, sport->port.membase + USR2);
++	}
++
+ 	dev_dbg(sport->port.dev, "We get %d bytes.\n", count);
+ 
+ 	if (count) {
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index d201910b892f..f176941a92dd 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -339,7 +339,7 @@
+ #define DWC3_DGCMD_SET_ENDPOINT_NRDY	0x0c
+ #define DWC3_DGCMD_RUN_SOC_BUS_LOOPBACK	0x10
+ 
+-#define DWC3_DGCMD_STATUS(n)		(((n) >> 15) & 1)
++#define DWC3_DGCMD_STATUS(n)		(((n) >> 12) & 0x0F)
+ #define DWC3_DGCMD_CMDACT		(1 << 10)
+ #define DWC3_DGCMD_CMDIOC		(1 << 8)
+ 
+@@ -355,7 +355,7 @@
+ #define DWC3_DEPCMD_PARAM_SHIFT		16
+ #define DWC3_DEPCMD_PARAM(x)		((x) << DWC3_DEPCMD_PARAM_SHIFT)
+ #define DWC3_DEPCMD_GET_RSC_IDX(x)	(((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f)
+-#define DWC3_DEPCMD_STATUS(x)		(((x) >> 15) & 1)
++#define DWC3_DEPCMD_STATUS(x)		(((x) >> 12) & 0x0F)
+ #define DWC3_DEPCMD_HIPRI_FORCERM	(1 << 11)
+ #define DWC3_DEPCMD_CMDACT		(1 << 10)
+ #define DWC3_DEPCMD_CMDIOC		(1 << 8)
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index ec8ac1674854..36bf089b708f 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -3682,18 +3682,21 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
+ {
+ 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ 	unsigned long flags;
+-	int ret;
++	int ret, slot_id;
+ 	struct xhci_command *command;
+ 
+ 	command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
+ 	if (!command)
+ 		return 0;
+ 
++	/* xhci->slot_id and xhci->addr_dev are not thread-safe */
++	mutex_lock(&xhci->mutex);
+ 	spin_lock_irqsave(&xhci->lock, flags);
+ 	command->completion = &xhci->addr_dev;
+ 	ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
+ 	if (ret) {
+ 		spin_unlock_irqrestore(&xhci->lock, flags);
++		mutex_unlock(&xhci->mutex);
+ 		xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
+ 		kfree(command);
+ 		return 0;
+@@ -3702,8 +3705,10 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
+ 	spin_unlock_irqrestore(&xhci->lock, flags);
+ 
+ 	wait_for_completion(command->completion);
++	slot_id = xhci->slot_id;
++	mutex_unlock(&xhci->mutex);
+ 
+-	if (!xhci->slot_id || command->status != COMP_SUCCESS) {
++	if (!slot_id || command->status != COMP_SUCCESS) {
+ 		xhci_err(xhci, "Error while assigning device slot ID\n");
+ 		xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
+ 				HCS_MAX_SLOTS(
+@@ -3728,11 +3733,11 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
+ 	 * xhci_discover_or_reset_device(), which may be called as part of
+ 	 * mass storage driver error handling.
+ 	 */
+-	if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
++	if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
+ 		xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
+ 		goto disable_slot;
+ 	}
+-	udev->slot_id = xhci->slot_id;
++	udev->slot_id = slot_id;
+ 
+ #ifndef CONFIG_USB_DEFAULT_PERSIST
+ 	/*
+@@ -3778,12 +3783,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
+ 	struct xhci_slot_ctx *slot_ctx;
+ 	struct xhci_input_control_ctx *ctrl_ctx;
+ 	u64 temp_64;
+-	struct xhci_command *command;
++	struct xhci_command *command = NULL;
++
++	mutex_lock(&xhci->mutex);
+ 
+ 	if (!udev->slot_id) {
+ 		xhci_dbg_trace(xhci, trace_xhci_dbg_address,
+ 				"Bad Slot ID %d", udev->slot_id);
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto out;
+ 	}
+ 
+ 	virt_dev = xhci->devs[udev->slot_id];
+@@ -3796,7 +3804,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
+ 		 */
+ 		xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
+ 			udev->slot_id);
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto out;
+ 	}
+ 
+ 	if (setup == SETUP_CONTEXT_ONLY) {
+@@ -3804,13 +3813,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
+ 		if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
+ 		    SLOT_STATE_DEFAULT) {
+ 			xhci_dbg(xhci, "Slot already in default state\n");
+-			return 0;
++			goto out;
+ 		}
+ 	}
+ 
+ 	command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
+-	if (!command)
+-		return -ENOMEM;
++	if (!command) {
++		ret = -ENOMEM;
++		goto out;
++	}
+ 
+ 	command->in_ctx = virt_dev->in_ctx;
+ 	command->completion = &xhci->addr_dev;
+@@ -3820,8 +3831,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
+ 	if (!ctrl_ctx) {
+ 		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
+ 				__func__);
+-		kfree(command);
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto out;
+ 	}
+ 	/*
+ 	 * If this is the first Set Address since device plug-in or
+@@ -3848,8 +3859,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
+ 		spin_unlock_irqrestore(&xhci->lock, flags);
+ 		xhci_dbg_trace(xhci, trace_xhci_dbg_address,
+ 				"FIXME: allocate a command ring segment");
+-		kfree(command);
+-		return ret;
++		goto out;
+ 	}
+ 	xhci_ring_cmd_db(xhci);
+ 	spin_unlock_irqrestore(&xhci->lock, flags);
+@@ -3896,10 +3906,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
+ 		ret = -EINVAL;
+ 		break;
+ 	}
+-	if (ret) {
+-		kfree(command);
+-		return ret;
+-	}
++	if (ret)
++		goto out;
+ 	temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
+ 	xhci_dbg_trace(xhci, trace_xhci_dbg_address,
+ 			"Op regs DCBAA ptr = %#016llx", temp_64);
+@@ -3932,8 +3940,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
+ 	xhci_dbg_trace(xhci, trace_xhci_dbg_address,
+ 		       "Internal device address = %d",
+ 		       le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
++out:
++	mutex_unlock(&xhci->mutex);
+ 	kfree(command);
+-	return 0;
++	return ret;
+ }
+ 
+ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
+@@ -4855,6 +4865,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
+ 		return 0;
+ 	}
+ 
++	mutex_init(&xhci->mutex);
+ 	xhci->cap_regs = hcd->regs;
+ 	xhci->op_regs = hcd->regs +
+ 		HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
+@@ -5011,4 +5022,12 @@ static int __init xhci_hcd_init(void)
+ 	BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
+ 	return 0;
+ }
++
++/*
++ * If an init function is provided, an exit function must also be provided
++ * to allow module unload.
++ */
++static void __exit xhci_hcd_fini(void) { }
++
+ module_init(xhci_hcd_init);
++module_exit(xhci_hcd_fini);
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index ea75e8ccd3c1..6977f8491fa7 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1497,6 +1497,8 @@ struct xhci_hcd {
+ 	struct list_head	lpm_failed_devs;
+ 
+ 	/* slot enabling and address device helpers */
++	/* these are not thread safe so use mutex */
++	struct mutex mutex;
+ 	struct completion	addr_dev;
+ 	int slot_id;
+ 	/* For USB 3.0 LPM enable/disable. */
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 9031750e7404..ffd739e31bfc 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -128,6 +128,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
+ 	{ USB_DEVICE(0x10C4, 0x8977) },	/* CEL MeshWorks DevKit Device */
+ 	{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
++	{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
+ 	{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
+ 	{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+ 	{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 8eb68a31cab6..4c8b3b82103d 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -699,6 +699,7 @@ static const struct usb_device_id id_table_combined[] = {
+ 	{ USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) },
+ 	{ USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) },
+ 	{ USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) },
++	{ USB_DEVICE(XSENS_VID, XSENS_MTDEVBOARD_PID) },
+ 	{ USB_DEVICE(XSENS_VID, XSENS_MTW_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
+ 	{ USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 4e4f46f3c89c..792e054126de 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -155,6 +155,7 @@
+ #define XSENS_AWINDA_STATION_PID 0x0101
+ #define XSENS_AWINDA_DONGLE_PID 0x0102
+ #define XSENS_MTW_PID		0x0200	/* Xsens MTw */
++#define XSENS_MTDEVBOARD_PID	0x0300	/* Motion Tracker Development Board */
+ #define XSENS_CONVERTER_PID	0xD00D	/* Xsens USB-serial converter */
+ 
+ /* Xsens devices using FTDI VID */
+diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
+index e894eb278d83..eba1b7ac7294 100644
+--- a/drivers/virtio/virtio_pci_common.c
++++ b/drivers/virtio/virtio_pci_common.c
+@@ -423,6 +423,7 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
+ 		if (cpu == -1)
+ 			irq_set_affinity_hint(irq, NULL);
+ 		else {
++			cpumask_clear(mask);
+ 			cpumask_set_cpu(cpu, mask);
+ 			irq_set_affinity_hint(irq, mask);
+ 		}
+diff --git a/fs/aio.c b/fs/aio.c
+index a793f7023755..a1736e98c278 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -77,6 +77,11 @@ struct kioctx_cpu {
+ 	unsigned		reqs_available;
+ };
+ 
++struct ctx_rq_wait {
++	struct completion comp;
++	atomic_t count;
++};
++
+ struct kioctx {
+ 	struct percpu_ref	users;
+ 	atomic_t		dead;
+@@ -115,7 +120,7 @@ struct kioctx {
+ 	/*
+ 	 * signals when all in-flight requests are done
+ 	 */
+-	struct completion *requests_done;
++	struct ctx_rq_wait	*rq_wait;
+ 
+ 	struct {
+ 		/*
+@@ -539,8 +544,8 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
+ 	struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
+ 
+ 	/* At this point we know that there are no any in-flight requests */
+-	if (ctx->requests_done)
+-		complete(ctx->requests_done);
++	if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
++		complete(&ctx->rq_wait->comp);
+ 
+ 	INIT_WORK(&ctx->free_work, free_ioctx);
+ 	schedule_work(&ctx->free_work);
+@@ -751,7 +756,7 @@ err:
+  *	the rapid destruction of the kioctx.
+  */
+ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
+-		struct completion *requests_done)
++		      struct ctx_rq_wait *wait)
+ {
+ 	struct kioctx_table *table;
+ 
+@@ -781,7 +786,7 @@ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
+ 	if (ctx->mmap_size)
+ 		vm_munmap(ctx->mmap_base, ctx->mmap_size);
+ 
+-	ctx->requests_done = requests_done;
++	ctx->rq_wait = wait;
+ 	percpu_ref_kill(&ctx->users);
+ 	return 0;
+ }
+@@ -813,18 +818,24 @@ EXPORT_SYMBOL(wait_on_sync_kiocb);
+ void exit_aio(struct mm_struct *mm)
+ {
+ 	struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table);
+-	int i;
++	struct ctx_rq_wait wait;
++	int i, skipped;
+ 
+ 	if (!table)
+ 		return;
+ 
++	atomic_set(&wait.count, table->nr);
++	init_completion(&wait.comp);
++
++	skipped = 0;
+ 	for (i = 0; i < table->nr; ++i) {
+ 		struct kioctx *ctx = table->table[i];
+-		struct completion requests_done =
+-			COMPLETION_INITIALIZER_ONSTACK(requests_done);
+ 
+-		if (!ctx)
++		if (!ctx) {
++			skipped++;
+ 			continue;
++		}
++
+ 		/*
+ 		 * We don't need to bother with munmap() here - exit_mmap(mm)
+ 		 * is coming and it'll unmap everything. And we simply can't,
+@@ -833,10 +844,12 @@ void exit_aio(struct mm_struct *mm)
+ 		 * that it needs to unmap the area, just set it to 0.
+ 		 */
+ 		ctx->mmap_size = 0;
+-		kill_ioctx(mm, ctx, &requests_done);
++		kill_ioctx(mm, ctx, &wait);
++	}
+ 
++	if (!atomic_sub_and_test(skipped, &wait.count)) {
+ 		/* Wait until all IO for the context are done. */
+-		wait_for_completion(&requests_done);
++		wait_for_completion(&wait.comp);
+ 	}
+ 
+ 	RCU_INIT_POINTER(mm->ioctx_table, NULL);
+@@ -1321,15 +1334,17 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
+ {
+ 	struct kioctx *ioctx = lookup_ioctx(ctx);
+ 	if (likely(NULL != ioctx)) {
+-		struct completion requests_done =
+-			COMPLETION_INITIALIZER_ONSTACK(requests_done);
++		struct ctx_rq_wait wait;
+ 		int ret;
+ 
++		init_completion(&wait.comp);
++		atomic_set(&wait.count, 1);
++
+ 		/* Pass requests_done to kill_ioctx() where it can be set
+ 		 * in a thread-safe way. If we try to set it here then we have
+ 		 * a race condition if two io_destroy() called simultaneously.
+ 		 */
+-		ret = kill_ioctx(current->mm, ioctx, &requests_done);
++		ret = kill_ioctx(current->mm, ioctx, &wait);
+ 		percpu_ref_put(&ioctx->users);
+ 
+ 		/* Wait until all IO for the context are done. Otherwise kernel
+@@ -1337,7 +1352,7 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
+ 		 * is destroyed.
+ 		 */
+ 		if (!ret)
+-			wait_for_completion(&requests_done);
++			wait_for_completion(&wait.comp);
+ 
+ 		return ret;
+ 	}
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 8b33da6ec3dd..63be2a96ed6a 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -8535,6 +8535,24 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
+ 	trans = btrfs_join_transaction(root);
+ 	if (IS_ERR(trans))
+ 		return PTR_ERR(trans);
++	/*
++	 * if we are changing raid levels, try to allocate a corresponding
++	 * block group with the new raid level.
++	 */
++	alloc_flags = update_block_group_flags(root, cache->flags);
++	if (alloc_flags != cache->flags) {
++		ret = do_chunk_alloc(trans, root, alloc_flags,
++				     CHUNK_ALLOC_FORCE);
++		/*
++		 * ENOSPC is allowed here, we may have enough space
++		 * already allocated at the new raid level to
++		 * carry on
++		 */
++		if (ret == -ENOSPC)
++			ret = 0;
++		if (ret < 0)
++			goto out;
++	}
+ 
+ 	ret = set_block_group_ro(cache, 0);
+ 	if (!ret)
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index d688cfe5d496..782f3bc4651d 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -4514,8 +4514,11 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ 		}
+ 		ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
+ 					      em_len, flags);
+-		if (ret)
++		if (ret) {
++			if (ret == 1)
++				ret = 0;
+ 			goto out_free;
++		}
+ 	}
+ out_free:
+ 	free_extent_map(em);
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 2b4c5423672d..64e8fb639f72 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -3206,6 +3206,8 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
+ 	key.offset = off;
+ 
+ 	while (1) {
++		u64 next_key_min_offset = key.offset + 1;
++
+ 		/*
+ 		 * note the key will change type as we walk through the
+ 		 * tree.
+@@ -3286,7 +3288,7 @@ process_slot:
+ 			} else if (key.offset >= off + len) {
+ 				break;
+ 			}
+-
++			next_key_min_offset = key.offset + datal;
+ 			size = btrfs_item_size_nr(leaf, slot);
+ 			read_extent_buffer(leaf, buf,
+ 					   btrfs_item_ptr_offset(leaf, slot),
+@@ -3501,7 +3503,7 @@ process_slot:
+ 				break;
+ 		}
+ 		btrfs_release_path(path);
+-		key.offset++;
++		key.offset = next_key_min_offset;
+ 	}
+ 	ret = 0;
+ 
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index d6033f540cc7..571de5a08fe7 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -5852,19 +5852,20 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
+ 				ret = PTR_ERR(clone_root);
+ 				goto out;
+ 			}
+-			clone_sources_to_rollback = i + 1;
+ 			spin_lock(&clone_root->root_item_lock);
+-			clone_root->send_in_progress++;
+-			if (!btrfs_root_readonly(clone_root)) {
++			if (!btrfs_root_readonly(clone_root) ||
++			    btrfs_root_dead(clone_root)) {
+ 				spin_unlock(&clone_root->root_item_lock);
+ 				srcu_read_unlock(&fs_info->subvol_srcu, index);
+ 				ret = -EPERM;
+ 				goto out;
+ 			}
++			clone_root->send_in_progress++;
+ 			spin_unlock(&clone_root->root_item_lock);
+ 			srcu_read_unlock(&fs_info->subvol_srcu, index);
+ 
+ 			sctx->clone_roots[i].root = clone_root;
++			clone_sources_to_rollback = i + 1;
+ 		}
+ 		vfree(clone_sources_tmp);
+ 		clone_sources_tmp = NULL;
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 05fef198ff94..e477ed67a49a 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -901,6 +901,15 @@ find_root:
+ 	if (IS_ERR(new_root))
+ 		return ERR_CAST(new_root);
+ 
++	if (!(sb->s_flags & MS_RDONLY)) {
++		int ret;
++		down_read(&fs_info->cleanup_work_sem);
++		ret = btrfs_orphan_cleanup(new_root);
++		up_read(&fs_info->cleanup_work_sem);
++		if (ret)
++			return ERR_PTR(ret);
++	}
++
+ 	dir_id = btrfs_root_dirid(&new_root->root_item);
+ setup_root:
+ 	location.objectid = dir_id;
+diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
+index aff923ae8c4b..d87d8eced064 100644
+--- a/include/linux/backing-dev.h
++++ b/include/linux/backing-dev.h
+@@ -116,7 +116,6 @@ __printf(3, 4)
+ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
+ 		const char *fmt, ...);
+ int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
+-void bdi_unregister(struct backing_dev_info *bdi);
+ int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
+ void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
+ 			enum wb_reason reason);
+diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
+index 5976bdecf58b..9fe865ccc3f3 100644
+--- a/include/net/inet_connection_sock.h
++++ b/include/net/inet_connection_sock.h
+@@ -98,7 +98,8 @@ struct inet_connection_sock {
+ 	const struct tcp_congestion_ops *icsk_ca_ops;
+ 	const struct inet_connection_sock_af_ops *icsk_af_ops;
+ 	unsigned int		  (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
+-	__u8			  icsk_ca_state:7,
++	__u8			  icsk_ca_state:6,
++				  icsk_ca_setsockopt:1,
+ 				  icsk_ca_dst_locked:1;
+ 	__u8			  icsk_retransmits;
+ 	__u8			  icsk_pending;
+diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
+index 856f01cb51dd..230775f5952a 100644
+--- a/include/net/sctp/sctp.h
++++ b/include/net/sctp/sctp.h
+@@ -571,11 +571,14 @@ static inline void sctp_v6_map_v4(union sctp_addr *addr)
+ /* Map v4 address to v4-mapped v6 address */
+ static inline void sctp_v4_map_v6(union sctp_addr *addr)
+ {
++	__be16 port;
++
++	port = addr->v4.sin_port;
++	addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
++	addr->v6.sin6_port = port;
+ 	addr->v6.sin6_family = AF_INET6;
+ 	addr->v6.sin6_flowinfo = 0;
+ 	addr->v6.sin6_scope_id = 0;
+-	addr->v6.sin6_port = addr->v4.sin_port;
+-	addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
+ 	addr->v6.sin6_addr.s6_addr32[0] = 0;
+ 	addr->v6.sin6_addr.s6_addr32[1] = 0;
+ 	addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff);
+diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
+index 5a14ead59696..885d3a380451 100644
+--- a/include/trace/events/writeback.h
++++ b/include/trace/events/writeback.h
+@@ -233,7 +233,6 @@ DEFINE_EVENT(writeback_class, name, \
+ DEFINE_WRITEBACK_EVENT(writeback_nowork);
+ DEFINE_WRITEBACK_EVENT(writeback_wake_background);
+ DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
+-DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
+ 
+ DECLARE_EVENT_CLASS(wbc_class,
+ 	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 241213be507c..486d00c408b0 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -2166,7 +2166,7 @@ void task_numa_work(struct callback_head *work)
+ 	}
+ 	for (; vma; vma = vma->vm_next) {
+ 		if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
+-			is_vm_hugetlb_page(vma)) {
++			is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
+ 			continue;
+ 		}
+ 
+diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
+index 13d945c0d03f..1b28df2d9104 100644
+--- a/kernel/trace/ring_buffer_benchmark.c
++++ b/kernel/trace/ring_buffer_benchmark.c
+@@ -450,7 +450,7 @@ static int __init ring_buffer_benchmark_init(void)
+ 
+ 	if (producer_fifo >= 0) {
+ 		struct sched_param param = {
+-			.sched_priority = consumer_fifo
++			.sched_priority = producer_fifo
+ 		};
+ 		sched_setscheduler(producer, SCHED_FIFO, &param);
+ 	} else
+diff --git a/mm/backing-dev.c b/mm/backing-dev.c
+index 6dc4580df2af..000e7b3b9896 100644
+--- a/mm/backing-dev.c
++++ b/mm/backing-dev.c
+@@ -359,23 +359,6 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
+ 	flush_delayed_work(&bdi->wb.dwork);
+ }
+ 
+-/*
+- * Called when the device behind @bdi has been removed or ejected.
+- *
+- * We can't really do much here except for reducing the dirty ratio at
+- * the moment.  In the future we should be able to set a flag so that
+- * the filesystem can handle errors at mark_inode_dirty time instead
+- * of only at writeback time.
+- */
+-void bdi_unregister(struct backing_dev_info *bdi)
+-{
+-	if (WARN_ON_ONCE(!bdi->dev))
+-		return;
+-
+-	bdi_set_min_ratio(bdi, 0);
+-}
+-EXPORT_SYMBOL(bdi_unregister);
+-
+ static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
+ {
+ 	memset(wb, 0, sizeof(*wb));
+@@ -443,6 +426,7 @@ void bdi_destroy(struct backing_dev_info *bdi)
+ 	int i;
+ 
+ 	bdi_wb_shutdown(bdi);
++	bdi_set_min_ratio(bdi, 0);
+ 
+ 	WARN_ON(!list_empty(&bdi->work_list));
+ 	WARN_ON(delayed_work_pending(&bdi->wb.dwork));
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index 65842d688b7c..93caba791cde 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1978,8 +1978,10 @@ void try_offline_node(int nid)
+ 		 * wait_table may be allocated from boot memory,
+ 		 * here only free if it's allocated by vmalloc.
+ 		 */
+-		if (is_vmalloc_addr(zone->wait_table))
++		if (is_vmalloc_addr(zone->wait_table)) {
+ 			vfree(zone->wait_table);
++			zone->wait_table = NULL;
++		}
+ 	}
+ }
+ EXPORT_SYMBOL(try_offline_node);
+diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
+index e0670d7054f9..659fb96672e4 100644
+--- a/net/bridge/br_fdb.c
++++ b/net/bridge/br_fdb.c
+@@ -796,9 +796,11 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p,
+ 	int err = 0;
+ 
+ 	if (ndm->ndm_flags & NTF_USE) {
++		local_bh_disable();
+ 		rcu_read_lock();
+ 		br_fdb_update(p->br, p, addr, vid, true);
+ 		rcu_read_unlock();
++		local_bh_enable();
+ 	} else {
+ 		spin_lock_bh(&p->br->hash_lock);
+ 		err = fdb_add_entry(p, addr, ndm->ndm_state,
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index c465876c7861..b0aee78dba41 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1071,7 +1071,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
+ 
+ 		err = br_ip6_multicast_add_group(br, port, &grec->grec_mca,
+ 						 vid);
+-		if (!err)
++		if (err)
+ 			break;
+ 	}
+ 
+@@ -1821,7 +1821,7 @@ static void br_multicast_query_expired(struct net_bridge *br,
+ 	if (query->startup_sent < br->multicast_startup_query_count)
+ 		query->startup_sent++;
+ 
+-	RCU_INIT_POINTER(querier, NULL);
++	RCU_INIT_POINTER(querier->port, NULL);
+ 	br_multicast_send_query(br, NULL, query);
+ 	spin_unlock(&br->multicast_lock);
+ }
+diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
+index a6e2da0bc718..982101c12258 100644
+--- a/net/caif/caif_socket.c
++++ b/net/caif/caif_socket.c
+@@ -330,6 +330,10 @@ static long caif_stream_data_wait(struct sock *sk, long timeo)
+ 		release_sock(sk);
+ 		timeo = schedule_timeout(timeo);
+ 		lock_sock(sk);
++
++		if (sock_flag(sk, SOCK_DEAD))
++			break;
++
+ 		clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ 	}
+ 
+@@ -374,6 +378,10 @@ static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ 		struct sk_buff *skb;
+ 
+ 		lock_sock(sk);
++		if (sock_flag(sk, SOCK_DEAD)) {
++			err = -ECONNRESET;
++			goto unlock;
++		}
+ 		skb = skb_dequeue(&sk->sk_receive_queue);
+ 		caif_check_flow_release(sk);
+ 
+diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
+index a1ef53c04415..b1f2d1f44d37 100644
+--- a/net/ceph/crush/mapper.c
++++ b/net/ceph/crush/mapper.c
+@@ -290,6 +290,7 @@ static int is_out(const struct crush_map *map,
+  * @type: the type of item to choose
+  * @out: pointer to output vector
+  * @outpos: our position in that vector
++ * @out_size: size of the out vector
+  * @tries: number of attempts to make
+  * @recurse_tries: number of attempts to have recursive chooseleaf make
+  * @local_retries: localized retries
+@@ -304,6 +305,7 @@ static int crush_choose_firstn(const struct crush_map *map,
+ 			       const __u32 *weight, int weight_max,
+ 			       int x, int numrep, int type,
+ 			       int *out, int outpos,
++			       int out_size,
+ 			       unsigned int tries,
+ 			       unsigned int recurse_tries,
+ 			       unsigned int local_retries,
+@@ -322,6 +324,7 @@ static int crush_choose_firstn(const struct crush_map *map,
+ 	int item = 0;
+ 	int itemtype;
+ 	int collide, reject;
++	int count = out_size;
+ 
+ 	dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d tries %d recurse_tries %d local_retries %d local_fallback_retries %d parent_r %d\n",
+ 		recurse_to_leaf ? "_LEAF" : "",
+@@ -329,7 +332,7 @@ static int crush_choose_firstn(const struct crush_map *map,
+ 		tries, recurse_tries, local_retries, local_fallback_retries,
+ 		parent_r);
+ 
+-	for (rep = outpos; rep < numrep; rep++) {
++	for (rep = outpos; rep < numrep && count > 0 ; rep++) {
+ 		/* keep trying until we get a non-out, non-colliding item */
+ 		ftotal = 0;
+ 		skip_rep = 0;
+@@ -403,7 +406,7 @@ static int crush_choose_firstn(const struct crush_map *map,
+ 							 map->buckets[-1-item],
+ 							 weight, weight_max,
+ 							 x, outpos+1, 0,
+-							 out2, outpos,
++							 out2, outpos, count,
+ 							 recurse_tries, 0,
+ 							 local_retries,
+ 							 local_fallback_retries,
+@@ -463,6 +466,7 @@ reject:
+ 		dprintk("CHOOSE got %d\n", item);
+ 		out[outpos] = item;
+ 		outpos++;
++		count--;
+ 	}
+ 
+ 	dprintk("CHOOSE returns %d\n", outpos);
+@@ -654,6 +658,7 @@ int crush_do_rule(const struct crush_map *map,
+ 	__u32 step;
+ 	int i, j;
+ 	int numrep;
++	int out_size;
+ 	/*
+ 	 * the original choose_total_tries value was off by one (it
+ 	 * counted "retries" and not "tries").  add one.
+@@ -761,6 +766,7 @@ int crush_do_rule(const struct crush_map *map,
+ 						x, numrep,
+ 						curstep->arg2,
+ 						o+osize, j,
++						result_max-osize,
+ 						choose_tries,
+ 						recurse_tries,
+ 						choose_local_retries,
+@@ -770,11 +776,13 @@ int crush_do_rule(const struct crush_map *map,
+ 						c+osize,
+ 						0);
+ 				} else {
++					out_size = ((numrep < (result_max-osize)) ?
++                                                    numrep : (result_max-osize));
+ 					crush_choose_indep(
+ 						map,
+ 						map->buckets[-1-w[i]],
+ 						weight, weight_max,
+-						x, numrep, numrep,
++						x, out_size, numrep,
+ 						curstep->arg2,
+ 						o+osize, j,
+ 						choose_tries,
+@@ -783,7 +791,7 @@ int crush_do_rule(const struct crush_map *map,
+ 						recurse_to_leaf,
+ 						c+osize,
+ 						0);
+-					osize += numrep;
++					osize += out_size;
+ 				}
+ 			}
+ 
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 22a53acdb5bb..e977e15c2ac0 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -5170,7 +5170,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
+ 	if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
+ 		return -EBUSY;
+ 
+-	if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper))
++	if (__netdev_find_adj(dev, upper_dev, &dev->adj_list.upper))
+ 		return -EEXIST;
+ 
+ 	if (master && netdev_master_upper_dev_get(dev))
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 7ebed55b5f7d..a2b90e1fc115 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -2337,6 +2337,9 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
+ {
+ 	struct sk_buff *skb;
+ 
++	if (dev->reg_state != NETREG_REGISTERED)
++		return;
++
+ 	skb = rtmsg_ifinfo_build_skb(type, dev, change, flags);
+ 	if (skb)
+ 		rtmsg_ifinfo_send(skb, dev, flags);
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 20fc0202cbbe..e262a087050b 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -903,6 +903,10 @@ static int ip_error(struct sk_buff *skb)
+ 	bool send;
+ 	int code;
+ 
++	/* IP on this device is disabled. */
++	if (!in_dev)
++		goto out;
++
+ 	net = dev_net(rt->dst.dev);
+ 	if (!IN_DEV_FORWARD(in_dev)) {
+ 		switch (rt->dst.error) {
+diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
+index 62856e185a93..9d2fbd88df93 100644
+--- a/net/ipv4/tcp_cong.c
++++ b/net/ipv4/tcp_cong.c
+@@ -187,6 +187,7 @@ static void tcp_reinit_congestion_control(struct sock *sk,
+ 
+ 	tcp_cleanup_congestion_control(sk);
+ 	icsk->icsk_ca_ops = ca;
++	icsk->icsk_ca_setsockopt = 1;
+ 
+ 	if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init)
+ 		icsk->icsk_ca_ops->init(sk);
+@@ -335,8 +336,10 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
+ 	rcu_read_lock();
+ 	ca = __tcp_ca_find_autoload(name);
+ 	/* No change asking for existing value */
+-	if (ca == icsk->icsk_ca_ops)
++	if (ca == icsk->icsk_ca_ops) {
++		icsk->icsk_ca_setsockopt = 1;
+ 		goto out;
++	}
+ 	if (!ca)
+ 		err = -ENOENT;
+ 	else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
+diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
+index dd11ac7798c6..50277af92485 100644
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -316,7 +316,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
+ 			tw->tw_v6_daddr = sk->sk_v6_daddr;
+ 			tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
+ 			tw->tw_tclass = np->tclass;
+-			tw->tw_flowlabel = np->flow_label >> 12;
++			tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
+ 			tw->tw_ipv6only = sk->sk_ipv6only;
+ 		}
+ #endif
+@@ -437,7 +437,10 @@ void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
+ 		rcu_read_unlock();
+ 	}
+ 
+-	if (!ca_got_dst && !try_module_get(icsk->icsk_ca_ops->owner))
++	/* If no valid choice made yet, assign current system default ca. */
++	if (!ca_got_dst &&
++	    (!icsk->icsk_ca_setsockopt ||
++	     !try_module_get(icsk->icsk_ca_ops->owner)))
+ 		tcp_assign_congestion_control(sk);
+ 
+ 	tcp_set_ca_state(sk, TCP_CA_Open);
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 97ef1f8b7be8..51f17454bd7b 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -90,6 +90,7 @@
+ #include <linux/socket.h>
+ #include <linux/sockios.h>
+ #include <linux/igmp.h>
++#include <linux/inetdevice.h>
+ #include <linux/in.h>
+ #include <linux/errno.h>
+ #include <linux/timer.h>
+@@ -1348,10 +1349,8 @@ csum_copy_err:
+ 	}
+ 	unlock_sock_fast(sk, slow);
+ 
+-	if (noblock)
+-		return -EAGAIN;
+-
+-	/* starting over for a new packet */
++	/* starting over for a new packet, but check if we need to yield */
++	cond_resched();
+ 	msg->msg_flags &= ~MSG_TRUNC;
+ 	goto try_again;
+ }
+@@ -1968,6 +1967,7 @@ void udp_v4_early_demux(struct sk_buff *skb)
+ 	struct sock *sk;
+ 	struct dst_entry *dst;
+ 	int dif = skb->dev->ifindex;
++	int ours;
+ 
+ 	/* validate the packet */
+ 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
+@@ -1977,14 +1977,24 @@ void udp_v4_early_demux(struct sk_buff *skb)
+ 	uh = udp_hdr(skb);
+ 
+ 	if (skb->pkt_type == PACKET_BROADCAST ||
+-	    skb->pkt_type == PACKET_MULTICAST)
++	    skb->pkt_type == PACKET_MULTICAST) {
++		struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
++
++		if (!in_dev)
++			return;
++
++		ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
++				       iph->protocol);
++		if (!ours)
++			return;
+ 		sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
+ 						   uh->source, iph->saddr, dif);
+-	else if (skb->pkt_type == PACKET_HOST)
++	} else if (skb->pkt_type == PACKET_HOST) {
+ 		sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
+ 					     uh->source, iph->saddr, dif);
+-	else
++	} else {
+ 		return;
++	}
+ 
+ 	if (!sk)
+ 		return;
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 1f5e62229aaa..5ca3bc880fef 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -975,7 +975,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
+ 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
+ 			tcp_time_stamp + tcptw->tw_ts_offset,
+ 			tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
+-			tw->tw_tclass, (tw->tw_flowlabel << 12));
++			tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
+ 
+ 	inet_twsk_put(tw);
+ }
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index d048d46779fc..1c9512aba77e 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -528,10 +528,8 @@ csum_copy_err:
+ 	}
+ 	unlock_sock_fast(sk, slow);
+ 
+-	if (noblock)
+-		return -EAGAIN;
+-
+-	/* starting over for a new packet */
++	/* starting over for a new packet, but check if we need to yield */
++	cond_resched();
+ 	msg->msg_flags &= ~MSG_TRUNC;
+ 	goto try_again;
+ }
+@@ -734,7 +732,9 @@ static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
+ 	    (inet->inet_dport && inet->inet_dport != rmt_port) ||
+ 	    (!ipv6_addr_any(&sk->sk_v6_daddr) &&
+ 		    !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
+-	    (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
++	    (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) ||
++	    (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
++		    !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
+ 		return false;
+ 	if (!inet6_mc_check(sk, loc_addr, rmt_addr))
+ 		return false;
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index d1d7a8166f46..0e9c28dc86b7 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1052,7 +1052,7 @@ static int netlink_insert(struct sock *sk, u32 portid)
+ 	struct netlink_table *table = &nl_table[sk->sk_protocol];
+ 	int err;
+ 
+-	lock_sock(sk);
++	mutex_lock(&table->hash.mutex);
+ 
+ 	err = -EBUSY;
+ 	if (nlk_sk(sk)->portid)
+@@ -1069,11 +1069,12 @@ static int netlink_insert(struct sock *sk, u32 portid)
+ 	err = 0;
+ 	if (!__netlink_insert(table, sk)) {
+ 		err = -EADDRINUSE;
++		nlk_sk(sk)->portid = 0;
+ 		sock_put(sk);
+ 	}
+ 
+ err:
+-	release_sock(sk);
++	mutex_unlock(&table->hash.mutex);
+ 	return err;
+ }
+ 
+@@ -1082,10 +1083,12 @@ static void netlink_remove(struct sock *sk)
+ 	struct netlink_table *table;
+ 
+ 	table = &nl_table[sk->sk_protocol];
++	mutex_lock(&table->hash.mutex);
+ 	if (rhashtable_remove(&table->hash, &nlk_sk(sk)->node)) {
+ 		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
+ 		__sock_put(sk);
+ 	}
++	mutex_unlock(&table->hash.mutex);
+ 
+ 	netlink_table_grab();
+ 	if (nlk_sk(sk)->subscriptions) {
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index baef987fe2c0..d3328a19f5b2 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -81,6 +81,11 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
+ 	struct tcf_proto_ops *t;
+ 	int rc = -ENOENT;
+ 
++	/* Wait for outstanding call_rcu()s, if any, from a
++	 * tcf_proto_ops's destroy() handler.
++	 */
++	rcu_barrier();
++
+ 	write_lock(&cls_mod_lock);
+ 	list_for_each_entry(t, &tcf_proto_base, head) {
+ 		if (t == ops) {
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 243b7d169d61..d9c2ee6d2959 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -815,10 +815,8 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
+ 		if (dev->flags & IFF_UP)
+ 			dev_deactivate(dev);
+ 
+-		if (new && new->ops->attach) {
+-			new->ops->attach(new);
+-			num_q = 0;
+-		}
++		if (new && new->ops->attach)
++			goto skip;
+ 
+ 		for (i = 0; i < num_q; i++) {
+ 			struct netdev_queue *dev_queue = dev_ingress_queue(dev);
+@@ -834,12 +832,16 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
+ 				qdisc_destroy(old);
+ 		}
+ 
++skip:
+ 		if (!ingress) {
+ 			notify_and_destroy(net, skb, n, classid,
+ 					   dev->qdisc, new);
+ 			if (new && !new->ops->attach)
+ 				atomic_inc(&new->refcnt);
+ 			dev->qdisc = new ? : &noop_qdisc;
++
++			if (new && new->ops->attach)
++				new->ops->attach(new);
+ 		} else {
+ 			notify_and_destroy(net, skb, n, classid, old, new);
+ 		}
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 526b6edab018..146881f068e2 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1887,6 +1887,10 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
+ 		unix_state_unlock(sk);
+ 		timeo = freezable_schedule_timeout(timeo);
+ 		unix_state_lock(sk);
++
++		if (sock_flag(sk, SOCK_DEAD))
++			break;
++
+ 		clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ 	}
+ 
+@@ -1947,6 +1951,10 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ 		struct sk_buff *skb, *last;
+ 
+ 		unix_state_lock(sk);
++		if (sock_flag(sk, SOCK_DEAD)) {
++			err = -ECONNRESET;
++			goto unlock;
++		}
+ 		last = skb = skb_peek(&sk->sk_receive_queue);
+ again:
+ 		if (skb == NULL) {
+diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
+index 5b24d39d7903..318026617b57 100644
+--- a/net/wireless/wext-compat.c
++++ b/net/wireless/wext-compat.c
+@@ -1333,6 +1333,8 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
+ 	memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN);
+ 	wdev_unlock(wdev);
+ 
++	memset(&sinfo, 0, sizeof(sinfo));
++
+ 	if (rdev_get_station(rdev, dev, bssid, &sinfo))
+ 		return NULL;
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 93c78c3c4b95..a556d63564e6 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2167,6 +2167,7 @@ static const struct hda_fixup alc882_fixups[] = {
+ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x006c, "Acer Aspire 9810", ALC883_FIXUP_ACER_EAPD),
+ 	SND_PCI_QUIRK(0x1025, 0x0090, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
++	SND_PCI_QUIRK(0x1025, 0x0107, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
+ 	SND_PCI_QUIRK(0x1025, 0x010a, "Acer Ferrari 5000", ALC883_FIXUP_ACER_EAPD),
+ 	SND_PCI_QUIRK(0x1025, 0x0110, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
+ 	SND_PCI_QUIRK(0x1025, 0x0112, "Acer Aspire 9303", ALC883_FIXUP_ACER_EAPD),
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 3e2ef61c627b..8b7e391dd0b8 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -918,6 +918,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
+ 	case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
+ 	case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
+ 	case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
++	case USB_ID(0x046d, 0x08ca): /* Logitech Quickcam Fusion */
+ 	case USB_ID(0x046d, 0x0991):
+ 	/* Most audio usb devices lie about volume resolution.
+ 	 * Most Logitech webcams have res = 384.
+@@ -1582,12 +1583,6 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
+ 			      unitid);
+ 		return -EINVAL;
+ 	}
+-	/* no bmControls field (e.g. Maya44) -> ignore */
+-	if (desc->bLength <= 10 + input_pins) {
+-		usb_audio_dbg(state->chip, "MU %d has no bmControls field\n",
+-			      unitid);
+-		return 0;
+-	}
+ 
+ 	num_ins = 0;
+ 	ich = 0;
+@@ -1595,6 +1590,9 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
+ 		err = parse_audio_unit(state, desc->baSourceID[pin]);
+ 		if (err < 0)
+ 			continue;
++		/* no bmControls field (e.g. Maya44) -> ignore */
++		if (desc->bLength <= 10 + input_pins)
++			continue;
+ 		err = check_input_term(state, desc->baSourceID[pin], &iterm);
+ 		if (err < 0)
+ 			return err;
+diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
+index b703cb3cda19..e5000da9e9d7 100644
+--- a/sound/usb/mixer_maps.c
++++ b/sound/usb/mixer_maps.c
+@@ -437,6 +437,11 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
+ 		.map = ebox44_map,
+ 	},
+ 	{
++		/* MAYA44 USB+ */
++		.id = USB_ID(0x2573, 0x0008),
++		.map = maya44_map,
++	},
++	{
+ 		/* KEF X300A */
+ 		.id = USB_ID(0x27ac, 0x1000),
+ 		.map = scms_usb3318_map,
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index e21ec5abcc3a..2a408c60114b 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1120,6 +1120,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
+ 	case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
+ 	case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
+ 	case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
++	case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
+ 		return true;
+ 	}
+ 	return false;
+@@ -1266,8 +1267,9 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+ 		if (fp->altsetting == 2)
+ 			return SNDRV_PCM_FMTBIT_DSD_U32_BE;
+ 		break;
+-	/* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
+-	case USB_ID(0x20b1, 0x2009):
++
++	case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
++	case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */
+ 		if (fp->altsetting == 3)
+ 			return SNDRV_PCM_FMTBIT_DSD_U32_BE;
+ 		break;


             reply	other threads:[~2015-06-23 14:01 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-06-23 14:01 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2015-09-29  0:06 [gentoo-commits] proj/linux-patches:4.0 commit in: / Mike Pagano
2015-07-22 10:11 Mike Pagano
2015-07-10 23:45 Mike Pagano
2015-07-02 12:28 Mike Pagano
2015-06-30 15:01 Mike Pagano
2015-06-23 16:37 Mike Pagano
2015-06-23 15:38 Mike Pagano
2015-06-23 12:48 [gentoo-commits] proj/linux-patches:master " Mike Pagano
2015-04-29 13:35 ` [gentoo-commits] proj/linux-patches:4.0 " Mike Pagano
2015-06-23 12:48 [gentoo-commits] proj/linux-patches:master " Mike Pagano
2015-03-20  0:23 ` [gentoo-commits] proj/linux-patches:4.0 " Mike Pagano
2015-06-23 12:48 [gentoo-commits] proj/linux-patches:master " Mike Pagano
2015-04-27 18:08 ` [gentoo-commits] proj/linux-patches:4.0 " Mike Pagano
2015-06-20 17:36 Mike Pagano
2015-06-06 22:03 Mike Pagano
2015-05-17 19:55 Mike Pagano
2015-05-14 12:22 Mike Pagano
2015-05-07 19:37 Mike Pagano
2015-05-07 19:14 Mike Pagano
2015-05-03 23:55 Mike Pagano
2015-04-29 17:33 Mike Pagano
2015-03-21 20:00 Mike Pagano
2015-03-18 23:27 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1435067550.bac443972d6de3c565d4d103ca34dda24d258876.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox