public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Fri, 30 Sep 2016 19:07:21 +0000 (UTC)	[thread overview]
Message-ID: <1475262431.e3a35f50a5f087b5d20a534a6df48f097ab67201.mpagano@gentoo> (raw)

commit:     e3a35f50a5f087b5d20a534a6df48f097ab67201
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Sep 30 19:07:11 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Sep 30 19:07:11 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e3a35f50

Linux patch 4.4.23

 0000_README             |    4 +
 1022_linux-4.4.23.patch | 2907 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2911 insertions(+)

diff --git a/0000_README b/0000_README
index d60af0e..7ee3b9f 100644
--- a/0000_README
+++ b/0000_README
@@ -131,6 +131,10 @@ Patch:  1021_linux-4.4.22.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.4.22
 
+Patch:  1022_linux-4.4.23.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.4.23
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1022_linux-4.4.23.patch b/1022_linux-4.4.23.patch
new file mode 100644
index 0000000..5ea7450
--- /dev/null
+++ b/1022_linux-4.4.23.patch
@@ -0,0 +1,2907 @@
+diff --git a/Makefile b/Makefile
+index a6512f4eec9f..95421b688f23 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 22
++SUBLEVEL = 23
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+ 
+@@ -128,6 +128,10 @@ _all:
+ # Cancel implicit rules on top Makefile
+ $(CURDIR)/Makefile Makefile: ;
+ 
++ifneq ($(words $(subst :, ,$(CURDIR))), 1)
++  $(error main directory cannot contain spaces nor colons)
++endif
++
+ ifneq ($(KBUILD_OUTPUT),)
+ # Invoke a second make in the output directory, passing relevant variables
+ # check that the output directory actually exists
+@@ -495,6 +499,12 @@ ifeq ($(KBUILD_EXTMOD),)
+                 endif
+         endif
+ endif
++# install and module_install need also be processed one by one
++ifneq ($(filter install,$(MAKECMDGOALS)),)
++        ifneq ($(filter modules_install,$(MAKECMDGOALS)),)
++	        mixed-targets := 1
++        endif
++endif
+ 
+ ifeq ($(mixed-targets),1)
+ # ===========================================================================
+@@ -606,11 +616,16 @@ ARCH_CFLAGS :=
+ include arch/$(SRCARCH)/Makefile
+ 
+ KBUILD_CFLAGS	+= $(call cc-option,-fno-delete-null-pointer-checks,)
++KBUILD_CFLAGS	+= $(call cc-disable-warning,maybe-uninitialized,)
+ 
+ ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
+-KBUILD_CFLAGS	+= -Os $(call cc-disable-warning,maybe-uninitialized,)
++KBUILD_CFLAGS	+= -Os
+ else
++ifdef CONFIG_PROFILE_ALL_BRANCHES
+ KBUILD_CFLAGS	+= -O2
++else
++KBUILD_CFLAGS   += -O2
++endif
+ endif
+ 
+ # Tell gcc to never replace conditional load with a non-conditional one
+@@ -1260,7 +1275,7 @@ help:
+ 	@echo  '  firmware_install- Install all firmware to INSTALL_FW_PATH'
+ 	@echo  '                    (default: $$(INSTALL_MOD_PATH)/lib/firmware)'
+ 	@echo  '  dir/            - Build all files in dir and below'
+-	@echo  '  dir/file.[oisS] - Build specified target only'
++	@echo  '  dir/file.[ois]  - Build specified target only'
+ 	@echo  '  dir/file.lst    - Build specified mixed source/assembly target only'
+ 	@echo  '                    (requires a recent binutils and recent build (System.map))'
+ 	@echo  '  dir/file.ko     - Build module including final link'
+@@ -1500,11 +1515,11 @@ image_name:
+ # Clear a bunch of variables before executing the submake
+ tools/: FORCE
+ 	$(Q)mkdir -p $(objtree)/tools
+-	$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(O) subdir=tools -C $(src)/tools/
++	$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(shell cd $(objtree) && /bin/pwd) subdir=tools -C $(src)/tools/
+ 
+ tools/%: FORCE
+ 	$(Q)mkdir -p $(objtree)/tools
+-	$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(O) subdir=tools -C $(src)/tools/ $*
++	$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(shell cd $(objtree) && /bin/pwd) subdir=tools -C $(src)/tools/ $*
+ 
+ # Single targets
+ # ---------------------------------------------------------------------------
+diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c
+index b445a5d56f43..593da7ffb449 100644
+--- a/arch/arm/crypto/aes-ce-glue.c
++++ b/arch/arm/crypto/aes-ce-glue.c
+@@ -279,7 +279,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ 		err = blkcipher_walk_done(desc, &walk,
+ 					  walk.nbytes % AES_BLOCK_SIZE);
+ 	}
+-	if (nbytes) {
++	if (walk.nbytes % AES_BLOCK_SIZE) {
+ 		u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
+ 		u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
+ 		u8 __aligned(8) tail[AES_BLOCK_SIZE];
+diff --git a/arch/arm/mach-pxa/idp.c b/arch/arm/mach-pxa/idp.c
+index f6d02e4cbcda..5c87dff5d46e 100644
+--- a/arch/arm/mach-pxa/idp.c
++++ b/arch/arm/mach-pxa/idp.c
+@@ -83,7 +83,8 @@ static struct resource smc91x_resources[] = {
+ };
+ 
+ static struct smc91x_platdata smc91x_platdata = {
+-	.flags = SMC91X_USE_32BIT | SMC91X_USE_DMA | SMC91X_NOWAIT,
++	.flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
++		 SMC91X_USE_DMA | SMC91X_NOWAIT,
+ };
+ 
+ static struct platform_device smc91x_device = {
+diff --git a/arch/arm/mach-pxa/xcep.c b/arch/arm/mach-pxa/xcep.c
+index 13b1d4586d7d..9001312710f7 100644
+--- a/arch/arm/mach-pxa/xcep.c
++++ b/arch/arm/mach-pxa/xcep.c
+@@ -120,7 +120,8 @@ static struct resource smc91x_resources[] = {
+ };
+ 
+ static struct smc91x_platdata xcep_smc91x_info = {
+-	.flags	= SMC91X_USE_32BIT | SMC91X_NOWAIT | SMC91X_USE_DMA,
++	.flags	= SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
++		  SMC91X_NOWAIT | SMC91X_USE_DMA,
+ };
+ 
+ static struct platform_device smc91x_device = {
+diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
+index 44575edc44b1..cf0a7c2359f0 100644
+--- a/arch/arm/mach-realview/core.c
++++ b/arch/arm/mach-realview/core.c
+@@ -95,7 +95,8 @@ static struct smsc911x_platform_config smsc911x_config = {
+ };
+ 
+ static struct smc91x_platdata smc91x_platdata = {
+-	.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
++	.flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
++		 SMC91X_NOWAIT,
+ };
+ 
+ static struct platform_device realview_eth_device = {
+diff --git a/arch/arm/mach-sa1100/pleb.c b/arch/arm/mach-sa1100/pleb.c
+index 1525d7b5f1b7..88149f85bc49 100644
+--- a/arch/arm/mach-sa1100/pleb.c
++++ b/arch/arm/mach-sa1100/pleb.c
+@@ -45,7 +45,7 @@ static struct resource smc91x_resources[] = {
+ };
+ 
+ static struct smc91x_platdata smc91x_platdata = {
+-	.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
++	.flags = SMC91X_USE_16BIT | SMC91X_USE_8BIT | SMC91X_NOWAIT,
+ };
+ 
+ static struct platform_device smc91x_device = {
+diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
+index 05d9e16c0dfd..6a51dfccfe71 100644
+--- a/arch/arm64/crypto/aes-glue.c
++++ b/arch/arm64/crypto/aes-glue.c
+@@ -211,7 +211,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ 		err = blkcipher_walk_done(desc, &walk,
+ 					  walk.nbytes % AES_BLOCK_SIZE);
+ 	}
+-	if (nbytes) {
++	if (walk.nbytes % AES_BLOCK_SIZE) {
+ 		u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
+ 		u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
+ 		u8 __aligned(8) tail[AES_BLOCK_SIZE];
+diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
+index c6db52ba3a06..10c57771822d 100644
+--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
++++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
+@@ -146,7 +146,8 @@ static struct platform_device hitachi_fb_device = {
+ #include <linux/smc91x.h>
+ 
+ static struct smc91x_platdata smc91x_info = {
+-	.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
++	.flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
++		 SMC91X_NOWAIT,
+ 	.leda = RPC_LED_100_10,
+ 	.ledb = RPC_LED_TX_RX,
+ };
+diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
+index 2de71e8c104b..93c22468cc14 100644
+--- a/arch/blackfin/mach-bf561/boards/ezkit.c
++++ b/arch/blackfin/mach-bf561/boards/ezkit.c
+@@ -134,7 +134,8 @@ static struct platform_device net2272_bfin_device = {
+ #include <linux/smc91x.h>
+ 
+ static struct smc91x_platdata smc91x_info = {
+-	.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
++	.flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
++		 SMC91X_NOWAIT,
+ 	.leda = RPC_LED_100_10,
+ 	.ledb = RPC_LED_TX_RX,
+ };
+diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
+index f0e314ceb8ba..7f975b20b20c 100644
+--- a/arch/mips/Kconfig.debug
++++ b/arch/mips/Kconfig.debug
+@@ -113,42 +113,6 @@ config SPINLOCK_TEST
+ 	help
+ 	  Add several files to the debugfs to test spinlock speed.
+ 
+-if CPU_MIPSR6
+-
+-choice
+-	prompt "Compact branch policy"
+-	default MIPS_COMPACT_BRANCHES_OPTIMAL
+-
+-config MIPS_COMPACT_BRANCHES_NEVER
+-	bool "Never (force delay slot branches)"
+-	help
+-	  Pass the -mcompact-branches=never flag to the compiler in order to
+-	  force it to always emit branches with delay slots, and make no use
+-	  of the compact branch instructions introduced by MIPSr6. This is
+-	  useful if you suspect there may be an issue with compact branches in
+-	  either the compiler or the CPU.
+-
+-config MIPS_COMPACT_BRANCHES_OPTIMAL
+-	bool "Optimal (use where beneficial)"
+-	help
+-	  Pass the -mcompact-branches=optimal flag to the compiler in order for
+-	  it to make use of compact branch instructions where it deems them
+-	  beneficial, and use branches with delay slots elsewhere. This is the
+-	  default compiler behaviour, and should be used unless you have a
+-	  reason to choose otherwise.
+-
+-config MIPS_COMPACT_BRANCHES_ALWAYS
+-	bool "Always (force compact branches)"
+-	help
+-	  Pass the -mcompact-branches=always flag to the compiler in order to
+-	  force it to always emit compact branches, making no use of branch
+-	  instructions with delay slots. This can result in more compact code
+-	  which may be beneficial in some scenarios.
+-
+-endchoice
+-
+-endif # CPU_MIPSR6
+-
+ config SCACHE_DEBUGFS
+ 	bool "L2 cache debugfs entries"
+ 	depends on DEBUG_FS
+diff --git a/arch/mips/Makefile b/arch/mips/Makefile
+index 3f70ba54ae21..252e347958f3 100644
+--- a/arch/mips/Makefile
++++ b/arch/mips/Makefile
+@@ -204,10 +204,6 @@ toolchain-msa				:= $(call cc-option-yn,$(mips-cflags) -mhard-float -mfp64 -Wa$(
+ cflags-$(toolchain-msa)			+= -DTOOLCHAIN_SUPPORTS_MSA
+ endif
+ 
+-cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_NEVER)	+= -mcompact-branches=never
+-cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_OPTIMAL)	+= -mcompact-branches=optimal
+-cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_ALWAYS)	+= -mcompact-branches=always
+-
+ #
+ # Firmware support
+ #
+diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
+index e689b894353c..8dedee1def83 100644
+--- a/arch/mips/include/asm/asmmacro.h
++++ b/arch/mips/include/asm/asmmacro.h
+@@ -135,6 +135,7 @@
+ 	ldc1	$f28, THREAD_FPR28(\thread)
+ 	ldc1	$f30, THREAD_FPR30(\thread)
+ 	ctc1	\tmp, fcr31
++	.set	pop
+ 	.endm
+ 
+ 	.macro	fpu_restore_16odd thread
+diff --git a/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h b/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h
+index 2f82bfa3a773..c9f5769dfc8f 100644
+--- a/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h
++++ b/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h
+@@ -11,11 +11,13 @@
+ #define CP0_EBASE $15, 1
+ 
+ 	.macro  kernel_entry_setup
++#ifdef CONFIG_SMP
+ 	mfc0	t0, CP0_EBASE
+ 	andi	t0, t0, 0x3ff		# CPUNum
+ 	beqz	t0, 1f
+ 	# CPUs other than zero goto smp_bootstrap
+ 	j	smp_bootstrap
++#endif /* CONFIG_SMP */
+ 
+ 1:
+ 	.endm
+diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
+index 4674a74a08b5..af27334d6809 100644
+--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
++++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
+@@ -1164,7 +1164,9 @@ fpu_emul:
+ 		regs->regs[31] = r31;
+ 		regs->cp0_epc = epc;
+ 		if (!used_math()) {     /* First time FPU user.  */
++			preempt_disable();
+ 			err = init_fpu();
++			preempt_enable();
+ 			set_used_math();
+ 		}
+ 		lose_fpu(1);    /* Save FPU state for the emulator. */
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index 89847bee2b53..44a6f25e902e 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -593,14 +593,14 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
+ 		return -EOPNOTSUPP;
+ 
+ 	/* Avoid inadvertently triggering emulation */
+-	if ((value & PR_FP_MODE_FR) && cpu_has_fpu &&
+-	    !(current_cpu_data.fpu_id & MIPS_FPIR_F64))
++	if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
++	    !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
+ 		return -EOPNOTSUPP;
+-	if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre)
++	if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
+ 		return -EOPNOTSUPP;
+ 
+ 	/* FR = 0 not supported in MIPS R6 */
+-	if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6)
++	if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
+ 		return -EOPNOTSUPP;
+ 
+ 	/* Proceed with the mode switch */
+diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
+index 2b521e07b860..7fef02a9eb85 100644
+--- a/arch/mips/kernel/smp.c
++++ b/arch/mips/kernel/smp.c
+@@ -174,6 +174,9 @@ asmlinkage void start_secondary(void)
+ 	cpumask_set_cpu(cpu, &cpu_coherent_mask);
+ 	notify_cpu_starting(cpu);
+ 
++	cpumask_set_cpu(cpu, &cpu_callin_map);
++	synchronise_count_slave(cpu);
++
+ 	set_cpu_online(cpu, true);
+ 
+ 	set_cpu_sibling_map(cpu);
+@@ -181,10 +184,6 @@ asmlinkage void start_secondary(void)
+ 
+ 	calculate_cpu_foreign_map();
+ 
+-	cpumask_set_cpu(cpu, &cpu_callin_map);
+-
+-	synchronise_count_slave(cpu);
+-
+ 	/*
+ 	 * irq will be enabled in ->smp_finish(), enabling it too early
+ 	 * is dangerous.
+diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
+index 975e99759bab..5649a9e429e0 100644
+--- a/arch/mips/kernel/vdso.c
++++ b/arch/mips/kernel/vdso.c
+@@ -39,16 +39,16 @@ static struct vm_special_mapping vdso_vvar_mapping = {
+ static void __init init_vdso_image(struct mips_vdso_image *image)
+ {
+ 	unsigned long num_pages, i;
++	unsigned long data_pfn;
+ 
+ 	BUG_ON(!PAGE_ALIGNED(image->data));
+ 	BUG_ON(!PAGE_ALIGNED(image->size));
+ 
+ 	num_pages = image->size / PAGE_SIZE;
+ 
+-	for (i = 0; i < num_pages; i++) {
+-		image->mapping.pages[i] =
+-			virt_to_page(image->data + (i * PAGE_SIZE));
+-	}
++	data_pfn = __phys_to_pfn(__pa_symbol(image->data));
++	for (i = 0; i < num_pages; i++)
++		image->mapping.pages[i] = pfn_to_page(data_pfn + i);
+ }
+ 
+ static int __init init_vdso(void)
+diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
+index 8cc1622b2ee0..dca7bc87dad9 100644
+--- a/crypto/blkcipher.c
++++ b/crypto/blkcipher.c
+@@ -234,6 +234,8 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
+ 		return blkcipher_walk_done(desc, walk, -EINVAL);
+ 	}
+ 
++	bsize = min(walk->walk_blocksize, n);
++
+ 	walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
+ 			 BLKCIPHER_WALK_DIFF);
+ 	if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
+@@ -246,7 +248,6 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
+ 		}
+ 	}
+ 
+-	bsize = min(walk->walk_blocksize, n);
+ 	n = scatterwalk_clamp(&walk->in, n);
+ 	n = scatterwalk_clamp(&walk->out, n);
+ 
+diff --git a/crypto/echainiv.c b/crypto/echainiv.c
+index b96a84560b67..343a74e96e2a 100644
+--- a/crypto/echainiv.c
++++ b/crypto/echainiv.c
+@@ -1,8 +1,8 @@
+ /*
+  * echainiv: Encrypted Chain IV Generator
+  *
+- * This generator generates an IV based on a sequence number by xoring it
+- * with a salt and then encrypting it with the same key as used to encrypt
++ * This generator generates an IV based on a sequence number by multiplying
++ * it with a salt and then encrypting it with the same key as used to encrypt
+  * the plain text.  This algorithm requires that the block size be equal
+  * to the IV size.  It is mainly useful for CBC.
+  *
+@@ -23,81 +23,17 @@
+ #include <linux/err.h>
+ #include <linux/init.h>
+ #include <linux/kernel.h>
+-#include <linux/mm.h>
+ #include <linux/module.h>
+-#include <linux/percpu.h>
+-#include <linux/spinlock.h>
++#include <linux/slab.h>
+ #include <linux/string.h>
+ 
+-#define MAX_IV_SIZE 16
+-
+-static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv);
+-
+-/* We don't care if we get preempted and read/write IVs from the next CPU. */
+-static void echainiv_read_iv(u8 *dst, unsigned size)
+-{
+-	u32 *a = (u32 *)dst;
+-	u32 __percpu *b = echainiv_iv;
+-
+-	for (; size >= 4; size -= 4) {
+-		*a++ = this_cpu_read(*b);
+-		b++;
+-	}
+-}
+-
+-static void echainiv_write_iv(const u8 *src, unsigned size)
+-{
+-	const u32 *a = (const u32 *)src;
+-	u32 __percpu *b = echainiv_iv;
+-
+-	for (; size >= 4; size -= 4) {
+-		this_cpu_write(*b, *a);
+-		a++;
+-		b++;
+-	}
+-}
+-
+-static void echainiv_encrypt_complete2(struct aead_request *req, int err)
+-{
+-	struct aead_request *subreq = aead_request_ctx(req);
+-	struct crypto_aead *geniv;
+-	unsigned int ivsize;
+-
+-	if (err == -EINPROGRESS)
+-		return;
+-
+-	if (err)
+-		goto out;
+-
+-	geniv = crypto_aead_reqtfm(req);
+-	ivsize = crypto_aead_ivsize(geniv);
+-
+-	echainiv_write_iv(subreq->iv, ivsize);
+-
+-	if (req->iv != subreq->iv)
+-		memcpy(req->iv, subreq->iv, ivsize);
+-
+-out:
+-	if (req->iv != subreq->iv)
+-		kzfree(subreq->iv);
+-}
+-
+-static void echainiv_encrypt_complete(struct crypto_async_request *base,
+-					 int err)
+-{
+-	struct aead_request *req = base->data;
+-
+-	echainiv_encrypt_complete2(req, err);
+-	aead_request_complete(req, err);
+-}
+-
+ static int echainiv_encrypt(struct aead_request *req)
+ {
+ 	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
+ 	struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
+ 	struct aead_request *subreq = aead_request_ctx(req);
+-	crypto_completion_t compl;
+-	void *data;
++	__be64 nseqno;
++	u64 seqno;
+ 	u8 *info;
+ 	unsigned int ivsize = crypto_aead_ivsize(geniv);
+ 	int err;
+@@ -107,8 +43,6 @@ static int echainiv_encrypt(struct aead_request *req)
+ 
+ 	aead_request_set_tfm(subreq, ctx->child);
+ 
+-	compl = echainiv_encrypt_complete;
+-	data = req;
+ 	info = req->iv;
+ 
+ 	if (req->src != req->dst) {
+@@ -123,29 +57,30 @@ static int echainiv_encrypt(struct aead_request *req)
+ 			return err;
+ 	}
+ 
+-	if (unlikely(!IS_ALIGNED((unsigned long)info,
+-				 crypto_aead_alignmask(geniv) + 1))) {
+-		info = kmalloc(ivsize, req->base.flags &
+-				       CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
+-								  GFP_ATOMIC);
+-		if (!info)
+-			return -ENOMEM;
+-
+-		memcpy(info, req->iv, ivsize);
+-	}
+-
+-	aead_request_set_callback(subreq, req->base.flags, compl, data);
++	aead_request_set_callback(subreq, req->base.flags,
++				  req->base.complete, req->base.data);
+ 	aead_request_set_crypt(subreq, req->dst, req->dst,
+ 			       req->cryptlen, info);
+ 	aead_request_set_ad(subreq, req->assoclen);
+ 
+-	crypto_xor(info, ctx->salt, ivsize);
++	memcpy(&nseqno, info + ivsize - 8, 8);
++	seqno = be64_to_cpu(nseqno);
++	memset(info, 0, ivsize);
++
+ 	scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
+-	echainiv_read_iv(info, ivsize);
+ 
+-	err = crypto_aead_encrypt(subreq);
+-	echainiv_encrypt_complete2(req, err);
+-	return err;
++	do {
++		u64 a;
++
++		memcpy(&a, ctx->salt + ivsize - 8, 8);
++
++		a |= 1;
++		a *= seqno;
++
++		memcpy(info + ivsize - 8, &a, 8);
++	} while ((ivsize -= 8));
++
++	return crypto_aead_encrypt(subreq);
+ }
+ 
+ static int echainiv_decrypt(struct aead_request *req)
+@@ -192,8 +127,7 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
+ 	alg = crypto_spawn_aead_alg(spawn);
+ 
+ 	err = -EINVAL;
+-	if (inst->alg.ivsize & (sizeof(u32) - 1) ||
+-	    inst->alg.ivsize > MAX_IV_SIZE)
++	if (inst->alg.ivsize & (sizeof(u64) - 1) || !inst->alg.ivsize)
+ 		goto free_inst;
+ 
+ 	inst->alg.encrypt = echainiv_encrypt;
+@@ -202,7 +136,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
+ 	inst->alg.init = aead_init_geniv;
+ 	inst->alg.exit = aead_exit_geniv;
+ 
+-	inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
+ 	inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
+ 	inst->alg.base.cra_ctxsize += inst->alg.ivsize;
+ 
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
+index 4bef72a9d106..3fda594700e0 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
+@@ -59,9 +59,11 @@ static void
+ nv40_perfctr_next(struct nvkm_pm *pm, struct nvkm_perfdom *dom)
+ {
+ 	struct nvkm_device *device = pm->engine.subdev.device;
+-	if (pm->sequence != pm->sequence) {
++	struct nv40_pm *nv40pm = container_of(pm, struct nv40_pm, base);
++
++	if (nv40pm->sequence != pm->sequence) {
+ 		nvkm_wr32(device, 0x400084, 0x00000020);
+-		pm->sequence = pm->sequence;
++		nv40pm->sequence = pm->sequence;
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
+index 56e1d633875e..6e6c76080d6a 100644
+--- a/drivers/gpu/drm/qxl/qxl_draw.c
++++ b/drivers/gpu/drm/qxl/qxl_draw.c
+@@ -136,6 +136,8 @@ static int qxl_palette_create_1bit(struct qxl_bo *palette_bo,
+ 				 * correctly globaly, since that would require
+ 				 * tracking all of our palettes. */
+ 	ret = qxl_bo_kmap(palette_bo, (void **)&pal);
++	if (ret)
++		return ret;
+ 	pal->num_ents = 2;
+ 	pal->unique = unique++;
+ 	if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
+diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
+index 76e699f9ed97..eef3aa6007f1 100644
+--- a/drivers/i2c/busses/i2c-eg20t.c
++++ b/drivers/i2c/busses/i2c-eg20t.c
+@@ -773,13 +773,6 @@ static int pch_i2c_probe(struct pci_dev *pdev,
+ 	/* Set the number of I2C channel instance */
+ 	adap_info->ch_num = id->driver_data;
+ 
+-	ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
+-		  KBUILD_MODNAME, adap_info);
+-	if (ret) {
+-		pch_pci_err(pdev, "request_irq FAILED\n");
+-		goto err_request_irq;
+-	}
+-
+ 	for (i = 0; i < adap_info->ch_num; i++) {
+ 		pch_adap = &adap_info->pch_data[i].pch_adapter;
+ 		adap_info->pch_i2c_suspended = false;
+@@ -796,6 +789,17 @@ static int pch_i2c_probe(struct pci_dev *pdev,
+ 		adap_info->pch_data[i].pch_base_address = base_addr + 0x100 * i;
+ 
+ 		pch_adap->dev.parent = &pdev->dev;
++	}
++
++	ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
++		  KBUILD_MODNAME, adap_info);
++	if (ret) {
++		pch_pci_err(pdev, "request_irq FAILED\n");
++		goto err_request_irq;
++	}
++
++	for (i = 0; i < adap_info->ch_num; i++) {
++		pch_adap = &adap_info->pch_data[i].pch_adapter;
+ 
+ 		pch_i2c_init(&adap_info->pch_data[i]);
+ 
+diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
+index fdcbdab808e9..33b11563cde7 100644
+--- a/drivers/i2c/busses/i2c-qup.c
++++ b/drivers/i2c/busses/i2c-qup.c
+@@ -727,7 +727,8 @@ static int qup_i2c_pm_resume_runtime(struct device *device)
+ #ifdef CONFIG_PM_SLEEP
+ static int qup_i2c_suspend(struct device *device)
+ {
+-	qup_i2c_pm_suspend_runtime(device);
++	if (!pm_runtime_suspended(device))
++		return qup_i2c_pm_suspend_runtime(device);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
+index 7ede941e9301..131b434af994 100644
+--- a/drivers/iio/industrialio-core.c
++++ b/drivers/iio/industrialio-core.c
+@@ -433,16 +433,15 @@ ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
+ 		scale_db = true;
+ 	case IIO_VAL_INT_PLUS_MICRO:
+ 		if (vals[1] < 0)
+-			return sprintf(buf, "-%ld.%06u%s\n", abs(vals[0]),
+-					-vals[1],
+-				scale_db ? " dB" : "");
++			return sprintf(buf, "-%d.%06u%s\n", abs(vals[0]),
++				       -vals[1], scale_db ? " dB" : "");
+ 		else
+ 			return sprintf(buf, "%d.%06u%s\n", vals[0], vals[1],
+ 				scale_db ? " dB" : "");
+ 	case IIO_VAL_INT_PLUS_NANO:
+ 		if (vals[1] < 0)
+-			return sprintf(buf, "-%ld.%09u\n", abs(vals[0]),
+-					-vals[1]);
++			return sprintf(buf, "-%d.%09u\n", abs(vals[0]),
++				       -vals[1]);
+ 		else
+ 			return sprintf(buf, "%d.%09u\n", vals[0], vals[1]);
+ 	case IIO_VAL_FRACTIONAL:
+diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
+index 3821c4786662..565bb2c140ed 100644
+--- a/drivers/iommu/dmar.c
++++ b/drivers/iommu/dmar.c
+@@ -1858,10 +1858,11 @@ static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
+ 	/*
+ 	 * All PCI devices managed by this unit should have been destroyed.
+ 	 */
+-	if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt)
++	if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
+ 		for_each_active_dev_scope(dmaru->devices,
+ 					  dmaru->devices_cnt, i, dev)
+ 			return -EBUSY;
++	}
+ 
+ 	ret = dmar_ir_hotplug(dmaru, false);
+ 	if (ret == 0)
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 24d81308a1a6..b7f852d824a3 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -4182,10 +4182,11 @@ int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
+ 	if (!atsru)
+ 		return 0;
+ 
+-	if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
++	if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
+ 		for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
+ 					  i, dev)
+ 			return -EBUSY;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
+index f0480d687f17..ba780c45f645 100644
+--- a/drivers/media/platform/am437x/am437x-vpfe.c
++++ b/drivers/media/platform/am437x/am437x-vpfe.c
+@@ -1706,7 +1706,7 @@ static int vpfe_get_app_input_index(struct vpfe_device *vpfe,
+ 		sdinfo = &cfg->sub_devs[i];
+ 		client = v4l2_get_subdevdata(sdinfo->sd);
+ 		if (client->addr == curr_client->addr &&
+-		    client->adapter->nr == client->adapter->nr) {
++		    client->adapter->nr == curr_client->adapter->nr) {
+ 			if (vpfe->current_input >= 1)
+ 				return -1;
+ 			*app_input_index = j + vpfe->current_input;
+diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c
+index 744ca5cacc9b..f9fa3fad728e 100644
+--- a/drivers/mtd/maps/pmcmsp-flash.c
++++ b/drivers/mtd/maps/pmcmsp-flash.c
+@@ -75,15 +75,15 @@ static int __init init_msp_flash(void)
+ 
+ 	printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt);
+ 
+-	msp_flash = kmalloc(fcnt * sizeof(struct map_info *), GFP_KERNEL);
++	msp_flash = kcalloc(fcnt, sizeof(*msp_flash), GFP_KERNEL);
+ 	if (!msp_flash)
+ 		return -ENOMEM;
+ 
+-	msp_parts = kmalloc(fcnt * sizeof(struct mtd_partition *), GFP_KERNEL);
++	msp_parts = kcalloc(fcnt, sizeof(*msp_parts), GFP_KERNEL);
+ 	if (!msp_parts)
+ 		goto free_msp_flash;
+ 
+-	msp_maps = kcalloc(fcnt, sizeof(struct mtd_info), GFP_KERNEL);
++	msp_maps = kcalloc(fcnt, sizeof(*msp_maps), GFP_KERNEL);
+ 	if (!msp_maps)
+ 		goto free_msp_parts;
+ 
+diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
+index 142fc3d79463..784c6e1a0391 100644
+--- a/drivers/mtd/maps/sa1100-flash.c
++++ b/drivers/mtd/maps/sa1100-flash.c
+@@ -230,8 +230,10 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
+ 
+ 		info->mtd = mtd_concat_create(cdev, info->num_subdev,
+ 					      plat->name);
+-		if (info->mtd == NULL)
++		if (info->mtd == NULL) {
+ 			ret = -ENXIO;
++			goto err;
++		}
+ 	}
+ 	info->mtd->dev.parent = &pdev->dev;
+ 
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index b3d70a7a5262..5dca77e0ffed 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1317,9 +1317,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
+ 			    slave_dev->name);
+ 	}
+ 
+-	/* already enslaved */
+-	if (slave_dev->flags & IFF_SLAVE) {
+-		netdev_dbg(bond_dev, "Error: Device was already enslaved\n");
++	/* already in-use? */
++	if (netdev_is_rx_handler_busy(slave_dev)) {
++		netdev_err(bond_dev,
++			   "Error: Device is in use and cannot be enslaved\n");
+ 		return -EBUSY;
+ 	}
+ 
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index 41c0fc9f3b14..16f7cadda5c3 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -1268,11 +1268,10 @@ static int __maybe_unused flexcan_suspend(struct device *device)
+ 	struct flexcan_priv *priv = netdev_priv(dev);
+ 	int err;
+ 
+-	err = flexcan_chip_disable(priv);
+-	if (err)
+-		return err;
+-
+ 	if (netif_running(dev)) {
++		err = flexcan_chip_disable(priv);
++		if (err)
++			return err;
+ 		netif_stop_queue(dev);
+ 		netif_device_detach(dev);
+ 	}
+@@ -1285,13 +1284,17 @@ static int __maybe_unused flexcan_resume(struct device *device)
+ {
+ 	struct net_device *dev = dev_get_drvdata(device);
+ 	struct flexcan_priv *priv = netdev_priv(dev);
++	int err;
+ 
+ 	priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ 	if (netif_running(dev)) {
+ 		netif_device_attach(dev);
+ 		netif_start_queue(dev);
++		err = flexcan_chip_enable(priv);
++		if (err)
++			return err;
+ 	}
+-	return flexcan_chip_enable(priv);
++	return 0;
+ }
+ 
+ static SIMPLE_DEV_PM_OPS(flexcan_pm_ops, flexcan_suspend, flexcan_resume);
+diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
+index 6bba1c98d764..c7994e372284 100644
+--- a/drivers/net/dsa/bcm_sf2.h
++++ b/drivers/net/dsa/bcm_sf2.h
+@@ -187,8 +187,8 @@ static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val,	\
+ static inline void intrl2_##which##_mask_clear(struct bcm_sf2_priv *priv, \
+ 						u32 mask)		\
+ {									\
+-	intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR);	\
+ 	priv->irq##which##_mask &= ~(mask);				\
++	intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR);	\
+ }									\
+ static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \
+ 						u32 mask)		\
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+index 037fc4cdf5af..cc199063612a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+@@ -143,13 +143,14 @@ static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
+ 	return cmd->cmd_buf + (idx << cmd->log_stride);
+ }
+ 
+-static u8 xor8_buf(void *buf, int len)
++static u8 xor8_buf(void *buf, size_t offset, int len)
+ {
+ 	u8 *ptr = buf;
+ 	u8 sum = 0;
+ 	int i;
++	int end = len + offset;
+ 
+-	for (i = 0; i < len; i++)
++	for (i = offset; i < end; i++)
+ 		sum ^= ptr[i];
+ 
+ 	return sum;
+@@ -157,41 +158,49 @@ static u8 xor8_buf(void *buf, int len)
+ 
+ static int verify_block_sig(struct mlx5_cmd_prot_block *block)
+ {
+-	if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
++	size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
++	int xor_len = sizeof(*block) - sizeof(block->data) - 1;
++
++	if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
+ 		return -EINVAL;
+ 
+-	if (xor8_buf(block, sizeof(*block)) != 0xff)
++	if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
+ 		return -EINVAL;
+ 
+ 	return 0;
+ }
+ 
+-static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
+-			   int csum)
++static void calc_block_sig(struct mlx5_cmd_prot_block *block)
+ {
+-	block->token = token;
+-	if (csum) {
+-		block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
+-					    sizeof(block->data) - 2);
+-		block->sig = ~xor8_buf(block, sizeof(*block) - 1);
+-	}
++	int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
++	size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
++
++	block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
++	block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
+ }
+ 
+-static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
++static void calc_chain_sig(struct mlx5_cmd_msg *msg)
+ {
+ 	struct mlx5_cmd_mailbox *next = msg->next;
+-
+-	while (next) {
+-		calc_block_sig(next->buf, token, csum);
++	int size = msg->len;
++	int blen = size - min_t(int, sizeof(msg->first.data), size);
++	int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
++		/ MLX5_CMD_DATA_BLOCK_SIZE;
++	int i = 0;
++
++	for (i = 0; i < n && next; i++)  {
++		calc_block_sig(next->buf);
+ 		next = next->next;
+ 	}
+ }
+ 
+ static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
+ {
+-	ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
+-	calc_chain_sig(ent->in, ent->token, csum);
+-	calc_chain_sig(ent->out, ent->token, csum);
++	ent->lay->sig = ~xor8_buf(ent->lay, 0,  sizeof(*ent->lay));
++	if (csum) {
++		calc_chain_sig(ent->in);
++		calc_chain_sig(ent->out);
++	}
+ }
+ 
+ static void poll_timeout(struct mlx5_cmd_work_ent *ent)
+@@ -222,12 +231,17 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent)
+ 	struct mlx5_cmd_mailbox *next = ent->out->next;
+ 	int err;
+ 	u8 sig;
++	int size = ent->out->len;
++	int blen = size - min_t(int, sizeof(ent->out->first.data), size);
++	int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
++		/ MLX5_CMD_DATA_BLOCK_SIZE;
++	int i = 0;
+ 
+-	sig = xor8_buf(ent->lay, sizeof(*ent->lay));
++	sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
+ 	if (sig != 0xff)
+ 		return -EINVAL;
+ 
+-	while (next) {
++	for (i = 0; i < n && next; i++) {
+ 		err = verify_block_sig(next->buf);
+ 		if (err)
+ 			return err;
+@@ -641,7 +655,6 @@ static void cmd_work_handler(struct work_struct *work)
+ 		spin_unlock_irqrestore(&cmd->alloc_lock, flags);
+ 	}
+ 
+-	ent->token = alloc_token(cmd);
+ 	cmd->ent_arr[ent->idx] = ent;
+ 	lay = get_inst(cmd, ent->idx);
+ 	ent->lay = lay;
+@@ -755,7 +768,8 @@ static u8 *get_status_ptr(struct mlx5_outbox_hdr *out)
+ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
+ 			   struct mlx5_cmd_msg *out, void *uout, int uout_size,
+ 			   mlx5_cmd_cbk_t callback,
+-			   void *context, int page_queue, u8 *status)
++			   void *context, int page_queue, u8 *status,
++			   u8 token)
+ {
+ 	struct mlx5_cmd *cmd = &dev->cmd;
+ 	struct mlx5_cmd_work_ent *ent;
+@@ -772,6 +786,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
+ 	if (IS_ERR(ent))
+ 		return PTR_ERR(ent);
+ 
++	ent->token = token;
++
+ 	if (!callback)
+ 		init_completion(&ent->done);
+ 
+@@ -844,7 +860,8 @@ static const struct file_operations fops = {
+ 	.write	= dbg_write,
+ };
+ 
+-static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
++static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
++			    u8 token)
+ {
+ 	struct mlx5_cmd_prot_block *block;
+ 	struct mlx5_cmd_mailbox *next;
+@@ -870,6 +887,7 @@ static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
+ 		memcpy(block->data, from, copy);
+ 		from += copy;
+ 		size -= copy;
++		block->token = token;
+ 		next = next->next;
+ 	}
+ 
+@@ -939,7 +957,8 @@ static void free_cmd_box(struct mlx5_core_dev *dev,
+ }
+ 
+ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
+-					       gfp_t flags, int size)
++					       gfp_t flags, int size,
++					       u8 token)
+ {
+ 	struct mlx5_cmd_mailbox *tmp, *head = NULL;
+ 	struct mlx5_cmd_prot_block *block;
+@@ -968,6 +987,7 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
+ 		tmp->next = head;
+ 		block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
+ 		block->block_num = cpu_to_be32(n - i - 1);
++		block->token = token;
+ 		head = tmp;
+ 	}
+ 	msg->next = head;
+@@ -1351,7 +1371,7 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
+ 	}
+ 
+ 	if (IS_ERR(msg))
+-		msg = mlx5_alloc_cmd_msg(dev, gfp, in_size);
++		msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
+ 
+ 	return msg;
+ }
+@@ -1376,6 +1396,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
+ 	int err;
+ 	u8 status = 0;
+ 	u32 drv_synd;
++	u8 token;
+ 
+ 	if (pci_channel_offline(dev->pdev) ||
+ 	    dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+@@ -1394,20 +1415,22 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
+ 		return err;
+ 	}
+ 
+-	err = mlx5_copy_to_msg(inb, in, in_size);
++	token = alloc_token(&dev->cmd);
++
++	err = mlx5_copy_to_msg(inb, in, in_size, token);
+ 	if (err) {
+ 		mlx5_core_warn(dev, "err %d\n", err);
+ 		goto out_in;
+ 	}
+ 
+-	outb = mlx5_alloc_cmd_msg(dev, gfp, out_size);
++	outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
+ 	if (IS_ERR(outb)) {
+ 		err = PTR_ERR(outb);
+ 		goto out_in;
+ 	}
+ 
+ 	err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
+-			      pages_queue, &status);
++			      pages_queue, &status, token);
+ 	if (err)
+ 		goto out_out;
+ 
+@@ -1475,7 +1498,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
+ 	INIT_LIST_HEAD(&cmd->cache.med.head);
+ 
+ 	for (i = 0; i < NUM_LONG_LISTS; i++) {
+-		msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
++		msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0);
+ 		if (IS_ERR(msg)) {
+ 			err = PTR_ERR(msg);
+ 			goto ex_err;
+@@ -1485,7 +1508,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
+ 	}
+ 
+ 	for (i = 0; i < NUM_MED_LISTS; i++) {
+-		msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
++		msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0);
+ 		if (IS_ERR(msg)) {
+ 			err = PTR_ERR(msg);
+ 			goto ex_err;
+diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
+index 0e2fc1a844ab..8c44cf6ff7a2 100644
+--- a/drivers/net/ethernet/smsc/smc91x.c
++++ b/drivers/net/ethernet/smsc/smc91x.c
+@@ -2269,6 +2269,13 @@ static int smc_drv_probe(struct platform_device *pdev)
+ 	if (pd) {
+ 		memcpy(&lp->cfg, pd, sizeof(lp->cfg));
+ 		lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags);
++
++		if (!SMC_8BIT(lp) && !SMC_16BIT(lp)) {
++			dev_err(&pdev->dev,
++				"at least one of 8-bit or 16-bit access support is required.\n");
++			ret = -ENXIO;
++			goto out_free_netdev;
++		}
+ 	}
+ 
+ #if IS_BUILTIN(CONFIG_OF)
+diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
+index a3c129e1e40a..29df0465daf4 100644
+--- a/drivers/net/ethernet/smsc/smc91x.h
++++ b/drivers/net/ethernet/smsc/smc91x.h
+@@ -37,6 +37,27 @@
+ #include <linux/smc91x.h>
+ 
+ /*
++ * Any 16-bit access is performed with two 8-bit accesses if the hardware
++ * can't do it directly. Most registers are 16-bit so those are mandatory.
++ */
++#define SMC_outw_b(x, a, r)						\
++	do {								\
++		unsigned int __val16 = (x);				\
++		unsigned int __reg = (r);				\
++		SMC_outb(__val16, a, __reg);				\
++		SMC_outb(__val16 >> 8, a, __reg + (1 << SMC_IO_SHIFT));	\
++	} while (0)
++
++#define SMC_inw_b(a, r)							\
++	({								\
++		unsigned int __val16;					\
++		unsigned int __reg = r;					\
++		__val16  = SMC_inb(a, __reg);				\
++		__val16 |= SMC_inb(a, __reg + (1 << SMC_IO_SHIFT)) << 8; \
++		__val16;						\
++	})
++
++/*
+  * Define your architecture specific bus configuration parameters here.
+  */
+ 
+@@ -55,10 +76,30 @@
+ #define SMC_IO_SHIFT		(lp->io_shift)
+ 
+ #define SMC_inb(a, r)		readb((a) + (r))
+-#define SMC_inw(a, r)		readw((a) + (r))
++#define SMC_inw(a, r)							\
++	({								\
++		unsigned int __smc_r = r;				\
++		SMC_16BIT(lp) ? readw((a) + __smc_r) :			\
++		SMC_8BIT(lp) ? SMC_inw_b(a, __smc_r) :			\
++		({ BUG(); 0; });					\
++	})
++
+ #define SMC_inl(a, r)		readl((a) + (r))
+ #define SMC_outb(v, a, r)	writeb(v, (a) + (r))
++#define SMC_outw(v, a, r)						\
++	do {								\
++		unsigned int __v = v, __smc_r = r;			\
++		if (SMC_16BIT(lp))					\
++			__SMC_outw(__v, a, __smc_r);			\
++		else if (SMC_8BIT(lp))					\
++			SMC_outw_b(__v, a, __smc_r);			\
++		else							\
++			BUG();						\
++	} while (0)
++
+ #define SMC_outl(v, a, r)	writel(v, (a) + (r))
++#define SMC_insb(a, r, p, l)	readsb((a) + (r), p, l)
++#define SMC_outsb(a, r, p, l)	writesb((a) + (r), p, l)
+ #define SMC_insw(a, r, p, l)	readsw((a) + (r), p, l)
+ #define SMC_outsw(a, r, p, l)	writesw((a) + (r), p, l)
+ #define SMC_insl(a, r, p, l)	readsl((a) + (r), p, l)
+@@ -66,7 +107,7 @@
+ #define SMC_IRQ_FLAGS		(-1)	/* from resource */
+ 
+ /* We actually can't write halfwords properly if not word aligned */
+-static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
++static inline void __SMC_outw(u16 val, void __iomem *ioaddr, int reg)
+ {
+ 	if ((machine_is_mainstone() || machine_is_stargate2() ||
+ 	     machine_is_pxa_idp()) && reg & 2) {
+@@ -405,24 +446,8 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
+ 
+ #if ! SMC_CAN_USE_16BIT
+ 
+-/*
+- * Any 16-bit access is performed with two 8-bit accesses if the hardware
+- * can't do it directly. Most registers are 16-bit so those are mandatory.
+- */
+-#define SMC_outw(x, ioaddr, reg)					\
+-	do {								\
+-		unsigned int __val16 = (x);				\
+-		SMC_outb( __val16, ioaddr, reg );			\
+-		SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\
+-	} while (0)
+-#define SMC_inw(ioaddr, reg)						\
+-	({								\
+-		unsigned int __val16;					\
+-		__val16 =  SMC_inb( ioaddr, reg );			\
+-		__val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \
+-		__val16;						\
+-	})
+-
++#define SMC_outw(x, ioaddr, reg)	SMC_outw_b(x, ioaddr, reg)
++#define SMC_inw(ioaddr, reg)		SMC_inw_b(ioaddr, reg)
+ #define SMC_insw(a, r, p, l)		BUG()
+ #define SMC_outsw(a, r, p, l)		BUG()
+ 
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index 47cd306dbb3c..bba0ca786aaa 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -640,8 +640,10 @@ phy_err:
+ int phy_start_interrupts(struct phy_device *phydev)
+ {
+ 	atomic_set(&phydev->irq_disable, 0);
+-	if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt",
+-			phydev) < 0) {
++	if (request_irq(phydev->irq, phy_interrupt,
++				IRQF_SHARED,
++				"phy_interrupt",
++				phydev) < 0) {
+ 		pr_warn("%s: Can't get IRQ %d (PHY)\n",
+ 			phydev->bus->name, phydev->irq);
+ 		phydev->irq = PHY_POLL;
+diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
+index 1bdeacf7b257..bc70ce62bc03 100644
+--- a/drivers/net/wireless/ath/ath9k/init.c
++++ b/drivers/net/wireless/ath/ath9k/init.c
+@@ -869,8 +869,8 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
+ 			hw->wiphy->interface_modes |=
+ 					BIT(NL80211_IFTYPE_P2P_DEVICE);
+ 
+-			hw->wiphy->iface_combinations = if_comb;
+-			hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
++		hw->wiphy->iface_combinations = if_comb;
++		hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+ 	}
+ 
+ 	hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
+index 93bdf684babe..ae047ab7a4df 100644
+--- a/drivers/net/wireless/iwlegacy/3945.c
++++ b/drivers/net/wireless/iwlegacy/3945.c
+@@ -1019,12 +1019,13 @@ il3945_hw_txq_ctx_free(struct il_priv *il)
+ 	int txq_id;
+ 
+ 	/* Tx queues */
+-	if (il->txq)
++	if (il->txq) {
+ 		for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+ 			if (txq_id == IL39_CMD_QUEUE_NUM)
+ 				il_cmd_queue_free(il);
+ 			else
+ 				il_tx_queue_free(il, txq_id);
++	}
+ 
+ 	/* free tx queue structure */
+ 	il_free_txq_mem(il);
+diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c
+index 20e6aa910700..c148085742a0 100644
+--- a/drivers/net/wireless/iwlwifi/dvm/calib.c
++++ b/drivers/net/wireless/iwlwifi/dvm/calib.c
+@@ -901,7 +901,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv,
+ 		/* bound gain by 2 bits value max, 3rd bit is sign */
+ 		data->delta_gain_code[i] =
+ 			min(abs(delta_g),
+-			(long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
++			(s32) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
+ 
+ 		if (delta_g < 0)
+ 			/*
+diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
+index 9c65f134d447..da7a75f82489 100644
+--- a/drivers/power/max17042_battery.c
++++ b/drivers/power/max17042_battery.c
+@@ -457,13 +457,16 @@ static inline void max17042_write_model_data(struct max17042_chip *chip,
+ }
+ 
+ static inline void max17042_read_model_data(struct max17042_chip *chip,
+-					u8 addr, u32 *data, int size)
++					u8 addr, u16 *data, int size)
+ {
+ 	struct regmap *map = chip->regmap;
+ 	int i;
++	u32 tmp;
+ 
+-	for (i = 0; i < size; i++)
+-		regmap_read(map, addr + i, &data[i]);
++	for (i = 0; i < size; i++) {
++		regmap_read(map, addr + i, &tmp);
++		data[i] = (u16)tmp;
++	}
+ }
+ 
+ static inline int max17042_model_data_compare(struct max17042_chip *chip,
+@@ -486,7 +489,7 @@ static int max17042_init_model(struct max17042_chip *chip)
+ {
+ 	int ret;
+ 	int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
+-	u32 *temp_data;
++	u16 *temp_data;
+ 
+ 	temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
+ 	if (!temp_data)
+@@ -501,7 +504,7 @@ static int max17042_init_model(struct max17042_chip *chip)
+ 	ret = max17042_model_data_compare(
+ 		chip,
+ 		chip->pdata->config_data->cell_char_tbl,
+-		(u16 *)temp_data,
++		temp_data,
+ 		table_size);
+ 
+ 	max10742_lock_model(chip);
+@@ -514,7 +517,7 @@ static int max17042_verify_model_lock(struct max17042_chip *chip)
+ {
+ 	int i;
+ 	int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
+-	u32 *temp_data;
++	u16 *temp_data;
+ 	int ret = 0;
+ 
+ 	temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
+diff --git a/drivers/power/reset/hisi-reboot.c b/drivers/power/reset/hisi-reboot.c
+index 9ab7f562a83b..f69387e12c1e 100644
+--- a/drivers/power/reset/hisi-reboot.c
++++ b/drivers/power/reset/hisi-reboot.c
+@@ -53,13 +53,16 @@ static int hisi_reboot_probe(struct platform_device *pdev)
+ 
+ 	if (of_property_read_u32(np, "reboot-offset", &reboot_offset) < 0) {
+ 		pr_err("failed to find reboot-offset property\n");
++		iounmap(base);
+ 		return -EINVAL;
+ 	}
+ 
+ 	err = register_restart_handler(&hisi_restart_nb);
+-	if (err)
++	if (err) {
+ 		dev_err(&pdev->dev, "cannot register restart handler (err=%d)\n",
+ 			err);
++		iounmap(base);
++	}
+ 
+ 	return err;
+ }
+diff --git a/drivers/power/tps65217_charger.c b/drivers/power/tps65217_charger.c
+index d9f56730c735..040a40b4b173 100644
+--- a/drivers/power/tps65217_charger.c
++++ b/drivers/power/tps65217_charger.c
+@@ -205,6 +205,7 @@ static int tps65217_charger_probe(struct platform_device *pdev)
+ 	if (!charger)
+ 		return -ENOMEM;
+ 
++	platform_set_drvdata(pdev, charger);
+ 	charger->tps = tps;
+ 	charger->dev = &pdev->dev;
+ 
+diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
+index d24ca5f281b4..7831bc6b51dd 100644
+--- a/drivers/pwm/core.c
++++ b/drivers/pwm/core.c
+@@ -889,7 +889,7 @@ EXPORT_SYMBOL_GPL(devm_pwm_put);
+   */
+ bool pwm_can_sleep(struct pwm_device *pwm)
+ {
+-	return pwm->chip->can_sleep;
++	return true;
+ }
+ EXPORT_SYMBOL_GPL(pwm_can_sleep);
+ 
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 3f8d357b1bac..278e10cd771f 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -5941,11 +5941,11 @@ static void megasas_detach_one(struct pci_dev *pdev)
+ 			if (fusion->ld_drv_map[i])
+ 				free_pages((ulong)fusion->ld_drv_map[i],
+ 					fusion->drv_map_pages);
+-				if (fusion->pd_seq_sync)
+-					dma_free_coherent(&instance->pdev->dev,
+-						pd_seq_map_sz,
+-						fusion->pd_seq_sync[i],
+-						fusion->pd_seq_phys[i]);
++			if (fusion->pd_seq_sync[i])
++				dma_free_coherent(&instance->pdev->dev,
++					pd_seq_map_sz,
++					fusion->pd_seq_sync[i],
++					fusion->pd_seq_phys[i]);
+ 		}
+ 		free_pages((ulong)instance->ctrl_context,
+ 			instance->ctrl_context_pages);
+diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
+index bb40f3728742..20314ff08be0 100644
+--- a/drivers/staging/iio/adc/ad7192.c
++++ b/drivers/staging/iio/adc/ad7192.c
+@@ -236,7 +236,7 @@ static int ad7192_setup(struct ad7192_state *st,
+ 			st->mclk = pdata->ext_clk_hz;
+ 		else
+ 			st->mclk = AD7192_INT_FREQ_MHZ;
+-			break;
++		break;
+ 	default:
+ 		ret = -EINVAL;
+ 		goto out;
+diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
+index c37149b929be..502d3892d8a4 100644
+--- a/fs/autofs4/autofs_i.h
++++ b/fs/autofs4/autofs_i.h
+@@ -79,9 +79,13 @@ struct autofs_info {
+ };
+ 
+ #define AUTOFS_INF_EXPIRING	(1<<0) /* dentry is in the process of expiring */
+-#define AUTOFS_INF_NO_RCU	(1<<1) /* the dentry is being considered
++#define AUTOFS_INF_WANT_EXPIRE	(1<<1) /* the dentry is being considered
+ 					* for expiry, so RCU_walk is
+-					* not permitted
++					* not permitted.  If it progresses to
++					* actual expiry attempt, the flag is
++					* not cleared when EXPIRING is set -
++					* in that case it gets cleared only
++					* when it comes to clearing EXPIRING.
+ 					*/
+ #define AUTOFS_INF_PENDING	(1<<2) /* dentry pending mount */
+ 
+diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
+index 1cebc3c52fa5..7a5a598a2d94 100644
+--- a/fs/autofs4/expire.c
++++ b/fs/autofs4/expire.c
+@@ -315,19 +315,17 @@ struct dentry *autofs4_expire_direct(struct super_block *sb,
+ 	if (ino->flags & AUTOFS_INF_PENDING)
+ 		goto out;
+ 	if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
+-		ino->flags |= AUTOFS_INF_NO_RCU;
++		ino->flags |= AUTOFS_INF_WANT_EXPIRE;
+ 		spin_unlock(&sbi->fs_lock);
+ 		synchronize_rcu();
+ 		spin_lock(&sbi->fs_lock);
+ 		if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
+ 			ino->flags |= AUTOFS_INF_EXPIRING;
+-			smp_mb();
+-			ino->flags &= ~AUTOFS_INF_NO_RCU;
+ 			init_completion(&ino->expire_complete);
+ 			spin_unlock(&sbi->fs_lock);
+ 			return root;
+ 		}
+-		ino->flags &= ~AUTOFS_INF_NO_RCU;
++		ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
+ 	}
+ out:
+ 	spin_unlock(&sbi->fs_lock);
+@@ -417,6 +415,7 @@ static struct dentry *should_expire(struct dentry *dentry,
+ 	}
+ 	return NULL;
+ }
++
+ /*
+  * Find an eligible tree to time-out
+  * A tree is eligible if :-
+@@ -432,6 +431,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
+ 	struct dentry *root = sb->s_root;
+ 	struct dentry *dentry;
+ 	struct dentry *expired;
++	struct dentry *found;
+ 	struct autofs_info *ino;
+ 
+ 	if (!root)
+@@ -442,48 +442,54 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
+ 
+ 	dentry = NULL;
+ 	while ((dentry = get_next_positive_subdir(dentry, root))) {
++		int flags = how;
++
+ 		spin_lock(&sbi->fs_lock);
+ 		ino = autofs4_dentry_ino(dentry);
+-		if (ino->flags & AUTOFS_INF_NO_RCU)
+-			expired = NULL;
+-		else
+-			expired = should_expire(dentry, mnt, timeout, how);
+-		if (!expired) {
++		if (ino->flags & AUTOFS_INF_WANT_EXPIRE) {
+ 			spin_unlock(&sbi->fs_lock);
+ 			continue;
+ 		}
++		spin_unlock(&sbi->fs_lock);
++
++		expired = should_expire(dentry, mnt, timeout, flags);
++		if (!expired)
++			continue;
++
++		spin_lock(&sbi->fs_lock);
+ 		ino = autofs4_dentry_ino(expired);
+-		ino->flags |= AUTOFS_INF_NO_RCU;
++		ino->flags |= AUTOFS_INF_WANT_EXPIRE;
+ 		spin_unlock(&sbi->fs_lock);
+ 		synchronize_rcu();
+-		spin_lock(&sbi->fs_lock);
+-		if (should_expire(expired, mnt, timeout, how)) {
+-			if (expired != dentry)
+-				dput(dentry);
+-			goto found;
+-		}
+ 
+-		ino->flags &= ~AUTOFS_INF_NO_RCU;
++		/* Make sure a reference is not taken on found if
++		 * things have changed.
++		 */
++		flags &= ~AUTOFS_EXP_LEAVES;
++		found = should_expire(expired, mnt, timeout, how);
++		if (!found || found != expired)
++			/* Something has changed, continue */
++			goto next;
++
+ 		if (expired != dentry)
+-			dput(expired);
++			dput(dentry);
++
++		spin_lock(&sbi->fs_lock);
++		goto found;
++next:
++		spin_lock(&sbi->fs_lock);
++		ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
+ 		spin_unlock(&sbi->fs_lock);
++		if (expired != dentry)
++			dput(expired);
+ 	}
+ 	return NULL;
+ 
+ found:
+ 	DPRINTK("returning %p %pd", expired, expired);
+ 	ino->flags |= AUTOFS_INF_EXPIRING;
+-	smp_mb();
+-	ino->flags &= ~AUTOFS_INF_NO_RCU;
+ 	init_completion(&ino->expire_complete);
+ 	spin_unlock(&sbi->fs_lock);
+-	spin_lock(&sbi->lookup_lock);
+-	spin_lock(&expired->d_parent->d_lock);
+-	spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
+-	list_move(&expired->d_parent->d_subdirs, &expired->d_child);
+-	spin_unlock(&expired->d_lock);
+-	spin_unlock(&expired->d_parent->d_lock);
+-	spin_unlock(&sbi->lookup_lock);
+ 	return expired;
+ }
+ 
+@@ -492,15 +498,27 @@ int autofs4_expire_wait(struct dentry *dentry, int rcu_walk)
+ 	struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+ 	struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ 	int status;
++	int state;
+ 
+ 	/* Block on any pending expire */
+-	if (!(ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU)))
++	if (!(ino->flags & AUTOFS_INF_WANT_EXPIRE))
+ 		return 0;
+ 	if (rcu_walk)
+ 		return -ECHILD;
+ 
++retry:
+ 	spin_lock(&sbi->fs_lock);
+-	if (ino->flags & AUTOFS_INF_EXPIRING) {
++	state = ino->flags & (AUTOFS_INF_WANT_EXPIRE | AUTOFS_INF_EXPIRING);
++	if (state == AUTOFS_INF_WANT_EXPIRE) {
++		spin_unlock(&sbi->fs_lock);
++		/*
++		 * Possibly being selected for expire, wait until
++		 * it's selected or not.
++		 */
++		schedule_timeout_uninterruptible(HZ/10);
++		goto retry;
++	}
++	if (state & AUTOFS_INF_EXPIRING) {
+ 		spin_unlock(&sbi->fs_lock);
+ 
+ 		DPRINTK("waiting for expire %p name=%pd", dentry, dentry);
+@@ -551,7 +569,7 @@ int autofs4_expire_run(struct super_block *sb,
+ 	ino = autofs4_dentry_ino(dentry);
+ 	/* avoid rapid-fire expire attempts if expiry fails */
+ 	ino->last_used = now;
+-	ino->flags &= ~AUTOFS_INF_EXPIRING;
++	ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE);
+ 	complete_all(&ino->expire_complete);
+ 	spin_unlock(&sbi->fs_lock);
+ 
+@@ -579,7 +597,7 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
+ 		spin_lock(&sbi->fs_lock);
+ 		/* avoid rapid-fire expire attempts if expiry fails */
+ 		ino->last_used = now;
+-		ino->flags &= ~AUTOFS_INF_EXPIRING;
++		ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE);
+ 		complete_all(&ino->expire_complete);
+ 		spin_unlock(&sbi->fs_lock);
+ 		dput(dentry);
+diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
+index c6d7d3dbd52a..7a54c6a867c8 100644
+--- a/fs/autofs4/root.c
++++ b/fs/autofs4/root.c
+@@ -455,7 +455,7 @@ static int autofs4_d_manage(struct dentry *dentry, bool rcu_walk)
+ 		 * a mount-trap.
+ 		 */
+ 		struct inode *inode;
+-		if (ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU))
++		if (ino->flags & AUTOFS_INF_WANT_EXPIRE)
+ 			return 0;
+ 		if (d_mountpoint(dentry))
+ 			return 0;
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 65f30b3b04f9..a7e18dbadf74 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -1619,6 +1619,9 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
+ 	int namelen;
+ 	int ret = 0;
+ 
++	if (!S_ISDIR(file_inode(file)->i_mode))
++		return -ENOTDIR;
++
+ 	ret = mnt_want_write_file(file);
+ 	if (ret)
+ 		goto out;
+@@ -1676,6 +1679,9 @@ static noinline int btrfs_ioctl_snap_create(struct file *file,
+ 	struct btrfs_ioctl_vol_args *vol_args;
+ 	int ret;
+ 
++	if (!S_ISDIR(file_inode(file)->i_mode))
++		return -ENOTDIR;
++
+ 	vol_args = memdup_user(arg, sizeof(*vol_args));
+ 	if (IS_ERR(vol_args))
+ 		return PTR_ERR(vol_args);
+@@ -1699,6 +1705,9 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
+ 	bool readonly = false;
+ 	struct btrfs_qgroup_inherit *inherit = NULL;
+ 
++	if (!S_ISDIR(file_inode(file)->i_mode))
++		return -ENOTDIR;
++
+ 	vol_args = memdup_user(arg, sizeof(*vol_args));
+ 	if (IS_ERR(vol_args))
+ 		return PTR_ERR(vol_args);
+@@ -2345,6 +2354,9 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
+ 	int ret;
+ 	int err = 0;
+ 
++	if (!S_ISDIR(dir->i_mode))
++		return -ENOTDIR;
++
+ 	vol_args = memdup_user(arg, sizeof(*vol_args));
+ 	if (IS_ERR(vol_args))
+ 		return PTR_ERR(vol_args);
+diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
+index 5a7b3229b956..f34d6f5a5aca 100644
+--- a/fs/hostfs/hostfs_kern.c
++++ b/fs/hostfs/hostfs_kern.c
+@@ -959,10 +959,11 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
+ 
+ 	if (S_ISLNK(root_inode->i_mode)) {
+ 		char *name = follow_link(host_root_path);
+-		if (IS_ERR(name))
++		if (IS_ERR(name)) {
+ 			err = PTR_ERR(name);
+-		else
+-			err = read_name(root_inode, name);
++			goto out_put;
++		}
++		err = read_name(root_inode, name);
+ 		kfree(name);
+ 		if (err)
+ 			goto out_put;
+diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
+index d2f97ecca6a5..e0e5f7c3c99f 100644
+--- a/fs/notify/fanotify/fanotify.c
++++ b/fs/notify/fanotify/fanotify.c
+@@ -67,18 +67,7 @@ static int fanotify_get_response(struct fsnotify_group *group,
+ 
+ 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
+ 
+-	wait_event(group->fanotify_data.access_waitq, event->response ||
+-				atomic_read(&group->fanotify_data.bypass_perm));
+-
+-	if (!event->response) {	/* bypass_perm set */
+-		/*
+-		 * Event was canceled because group is being destroyed. Remove
+-		 * it from group's event list because we are responsible for
+-		 * freeing the permission event.
+-		 */
+-		fsnotify_remove_event(group, &event->fae.fse);
+-		return 0;
+-	}
++	wait_event(group->fanotify_data.access_waitq, event->response);
+ 
+ 	/* userspace responded, convert to something usable */
+ 	switch (event->response) {
+diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
+index 8e8e6bcd1d43..a64313868d3a 100644
+--- a/fs/notify/fanotify/fanotify_user.c
++++ b/fs/notify/fanotify/fanotify_user.c
+@@ -358,16 +358,20 @@ static int fanotify_release(struct inode *ignored, struct file *file)
+ 
+ #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ 	struct fanotify_perm_event_info *event, *next;
++	struct fsnotify_event *fsn_event;
+ 
+ 	/*
+-	 * There may be still new events arriving in the notification queue
+-	 * but since userspace cannot use fanotify fd anymore, no event can
+-	 * enter or leave access_list by now.
++	 * Stop new events from arriving in the notification queue. since
++	 * userspace cannot use fanotify fd anymore, no event can enter or
++	 * leave access_list by now either.
+ 	 */
+-	spin_lock(&group->fanotify_data.access_lock);
+-
+-	atomic_inc(&group->fanotify_data.bypass_perm);
++	fsnotify_group_stop_queueing(group);
+ 
++	/*
++	 * Process all permission events on access_list and notification queue
++	 * and simulate reply from userspace.
++	 */
++	spin_lock(&group->fanotify_data.access_lock);
+ 	list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
+ 				 fae.fse.list) {
+ 		pr_debug("%s: found group=%p event=%p\n", __func__, group,
+@@ -379,12 +383,21 @@ static int fanotify_release(struct inode *ignored, struct file *file)
+ 	spin_unlock(&group->fanotify_data.access_lock);
+ 
+ 	/*
+-	 * Since bypass_perm is set, newly queued events will not wait for
+-	 * access response. Wake up the already sleeping ones now.
+-	 * synchronize_srcu() in fsnotify_destroy_group() will wait for all
+-	 * processes sleeping in fanotify_handle_event() waiting for access
+-	 * response and thus also for all permission events to be freed.
++	 * Destroy all non-permission events. For permission events just
++	 * dequeue them and set the response. They will be freed once the
++	 * response is consumed and fanotify_get_response() returns.
+ 	 */
++	mutex_lock(&group->notification_mutex);
++	while (!fsnotify_notify_queue_is_empty(group)) {
++		fsn_event = fsnotify_remove_first_event(group);
++		if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS))
++			fsnotify_destroy_event(group, fsn_event);
++		else
++			FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
++	}
++	mutex_unlock(&group->notification_mutex);
++
++	/* Response for all permission events it set, wakeup waiters */
+ 	wake_up(&group->fanotify_data.access_waitq);
+ #endif
+ 
+@@ -755,7 +768,6 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
+ 	spin_lock_init(&group->fanotify_data.access_lock);
+ 	init_waitqueue_head(&group->fanotify_data.access_waitq);
+ 	INIT_LIST_HEAD(&group->fanotify_data.access_list);
+-	atomic_set(&group->fanotify_data.bypass_perm, 0);
+ #endif
+ 	switch (flags & FAN_ALL_CLASS_BITS) {
+ 	case FAN_CLASS_NOTIF:
+diff --git a/fs/notify/group.c b/fs/notify/group.c
+index d16b62cb2854..18eb30c6bd8f 100644
+--- a/fs/notify/group.c
++++ b/fs/notify/group.c
+@@ -40,6 +40,17 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group)
+ }
+ 
+ /*
++ * Stop queueing new events for this group. Once this function returns
++ * fsnotify_add_event() will not add any new events to the group's queue.
++ */
++void fsnotify_group_stop_queueing(struct fsnotify_group *group)
++{
++	mutex_lock(&group->notification_mutex);
++	group->shutdown = true;
++	mutex_unlock(&group->notification_mutex);
++}
++
++/*
+  * Trying to get rid of a group. Remove all marks, flush all events and release
+  * the group reference.
+  * Note that another thread calling fsnotify_clear_marks_by_group() may still
+@@ -47,6 +58,14 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group)
+  */
+ void fsnotify_destroy_group(struct fsnotify_group *group)
+ {
++	/*
++	 * Stop queueing new events. The code below is careful enough to not
++	 * require this but fanotify needs to stop queuing events even before
++	 * fsnotify_destroy_group() is called and this makes the other callers
++	 * of fsnotify_destroy_group() to see the same behavior.
++	 */
++	fsnotify_group_stop_queueing(group);
++
+ 	/* clear all inode marks for this group */
+ 	fsnotify_clear_marks_by_group(group);
+ 
+diff --git a/fs/notify/notification.c b/fs/notify/notification.c
+index a95d8e037aeb..e455e83ceeeb 100644
+--- a/fs/notify/notification.c
++++ b/fs/notify/notification.c
+@@ -82,7 +82,8 @@ void fsnotify_destroy_event(struct fsnotify_group *group,
+  * Add an event to the group notification queue.  The group can later pull this
+  * event off the queue to deal with.  The function returns 0 if the event was
+  * added to the queue, 1 if the event was merged with some other queued event,
+- * 2 if the queue of events has overflown.
++ * 2 if the event was not queued - either the queue of events has overflown
++ * or the group is shutting down.
+  */
+ int fsnotify_add_event(struct fsnotify_group *group,
+ 		       struct fsnotify_event *event,
+@@ -96,6 +97,11 @@ int fsnotify_add_event(struct fsnotify_group *group,
+ 
+ 	mutex_lock(&group->notification_mutex);
+ 
++	if (group->shutdown) {
++		mutex_unlock(&group->notification_mutex);
++		return 2;
++	}
++
+ 	if (group->q_len >= group->max_events) {
+ 		ret = 2;
+ 		/* Queue overflow event only if it isn't already queued */
+@@ -126,21 +132,6 @@ queue:
+ }
+ 
+ /*
+- * Remove @event from group's notification queue. It is the responsibility of
+- * the caller to destroy the event.
+- */
+-void fsnotify_remove_event(struct fsnotify_group *group,
+-			   struct fsnotify_event *event)
+-{
+-	mutex_lock(&group->notification_mutex);
+-	if (!list_empty(&event->list)) {
+-		list_del_init(&event->list);
+-		group->q_len--;
+-	}
+-	mutex_unlock(&group->notification_mutex);
+-}
+-
+-/*
+  * Remove and return the first event from the notification list.  It is the
+  * responsibility of the caller to destroy the obtained event
+  */
+diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
+index f90931335c6b..2e11658676eb 100644
+--- a/fs/ocfs2/dlm/dlmconvert.c
++++ b/fs/ocfs2/dlm/dlmconvert.c
+@@ -262,7 +262,6 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
+ 				  struct dlm_lock *lock, int flags, int type)
+ {
+ 	enum dlm_status status;
+-	u8 old_owner = res->owner;
+ 
+ 	mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
+ 	     lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
+@@ -329,7 +328,6 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
+ 
+ 	spin_lock(&res->spinlock);
+ 	res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
+-	lock->convert_pending = 0;
+ 	/* if it failed, move it back to granted queue.
+ 	 * if master returns DLM_NORMAL and then down before sending ast,
+ 	 * it may have already been moved to granted queue, reset to
+@@ -338,12 +336,14 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
+ 		if (status != DLM_NOTQUEUED)
+ 			dlm_error(status);
+ 		dlm_revert_pending_convert(res, lock);
+-	} else if ((res->state & DLM_LOCK_RES_RECOVERING) ||
+-			(old_owner != res->owner)) {
+-		mlog(0, "res %.*s is in recovering or has been recovered.\n",
+-				res->lockname.len, res->lockname.name);
++	} else if (!lock->convert_pending) {
++		mlog(0, "%s: res %.*s, owner died and lock has been moved back "
++				"to granted list, retry convert.\n",
++				dlm->name, res->lockname.len, res->lockname.name);
+ 		status = DLM_RECOVERING;
+ 	}
++
++	lock->convert_pending = 0;
+ bail:
+ 	spin_unlock(&res->spinlock);
+ 
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index 77d30cbd944d..56dd3957cc91 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -1536,7 +1536,8 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
+ 				       u64 start, u64 len)
+ {
+ 	int ret = 0;
+-	u64 tmpend, end = start + len;
++	u64 tmpend = 0;
++	u64 end = start + len;
+ 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ 	unsigned int csize = osb->s_clustersize;
+ 	handle_t *handle;
+@@ -1568,18 +1569,31 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
+ 	}
+ 
+ 	/*
+-	 * We want to get the byte offset of the end of the 1st cluster.
++	 * If start is on a cluster boundary and end is somewhere in another
++	 * cluster, we have not COWed the cluster starting at start, unless
++	 * end is also within the same cluster. So, in this case, we skip this
++	 * first call to ocfs2_zero_range_for_truncate() truncate and move on
++	 * to the next one.
+ 	 */
+-	tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1));
+-	if (tmpend > end)
+-		tmpend = end;
++	if ((start & (csize - 1)) != 0) {
++		/*
++		 * We want to get the byte offset of the end of the 1st
++		 * cluster.
++		 */
++		tmpend = (u64)osb->s_clustersize +
++			(start & ~(osb->s_clustersize - 1));
++		if (tmpend > end)
++			tmpend = end;
+ 
+-	trace_ocfs2_zero_partial_clusters_range1((unsigned long long)start,
+-						 (unsigned long long)tmpend);
++		trace_ocfs2_zero_partial_clusters_range1(
++			(unsigned long long)start,
++			(unsigned long long)tmpend);
+ 
+-	ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend);
+-	if (ret)
+-		mlog_errno(ret);
++		ret = ocfs2_zero_range_for_truncate(inode, handle, start,
++						    tmpend);
++		if (ret)
++			mlog_errno(ret);
++	}
+ 
+ 	if (tmpend < end) {
+ 		/*
+diff --git a/fs/reiserfs/ibalance.c b/fs/reiserfs/ibalance.c
+index b751eea32e20..5db6f45b3fed 100644
+--- a/fs/reiserfs/ibalance.c
++++ b/fs/reiserfs/ibalance.c
+@@ -1153,8 +1153,9 @@ int balance_internal(struct tree_balance *tb,
+ 				       insert_ptr);
+ 	}
+ 
+-	memcpy(new_insert_key_addr, &new_insert_key, KEY_SIZE);
+ 	insert_ptr[0] = new_insert_ptr;
++	if (new_insert_ptr)
++		memcpy(new_insert_key_addr, &new_insert_key, KEY_SIZE);
+ 
+ 	return order;
+ }
+diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
+index 39090fc56f09..eb1b8c8acfcb 100644
+--- a/fs/xfs/xfs_buf.c
++++ b/fs/xfs/xfs_buf.c
+@@ -1535,7 +1535,7 @@ xfs_wait_buftarg(
+ 	 * ensure here that all reference counts have been dropped before we
+ 	 * start walking the LRU list.
+ 	 */
+-	drain_workqueue(btp->bt_mount->m_buf_workqueue);
++	flush_workqueue(btp->bt_mount->m_buf_workqueue);
+ 
+ 	/* loop until there is nothing left on the lru list. */
+ 	while (list_lru_count(&btp->bt_lru)) {
+diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
+index 533c4408529a..850d8822e8ff 100644
+--- a/include/linux/fsnotify_backend.h
++++ b/include/linux/fsnotify_backend.h
+@@ -148,6 +148,7 @@ struct fsnotify_group {
+ 	#define FS_PRIO_1	1 /* fanotify content based access control */
+ 	#define FS_PRIO_2	2 /* fanotify pre-content access */
+ 	unsigned int priority;
++	bool shutdown;		/* group is being shut down, don't queue more events */
+ 
+ 	/* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
+ 	struct mutex mark_mutex;	/* protect marks_list */
+@@ -179,7 +180,6 @@ struct fsnotify_group {
+ 			spinlock_t access_lock;
+ 			struct list_head access_list;
+ 			wait_queue_head_t access_waitq;
+-			atomic_t bypass_perm;
+ #endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
+ 			int f_flags;
+ 			unsigned int max_marks;
+@@ -308,6 +308,8 @@ extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *op
+ extern void fsnotify_get_group(struct fsnotify_group *group);
+ /* drop reference on a group from fsnotify_alloc_group */
+ extern void fsnotify_put_group(struct fsnotify_group *group);
++/* group destruction begins, stop queuing new events */
++extern void fsnotify_group_stop_queueing(struct fsnotify_group *group);
+ /* destroy group */
+ extern void fsnotify_destroy_group(struct fsnotify_group *group);
+ /* fasync handler function */
+@@ -320,8 +322,6 @@ extern int fsnotify_add_event(struct fsnotify_group *group,
+ 			      struct fsnotify_event *event,
+ 			      int (*merge)(struct list_head *,
+ 					   struct fsnotify_event *));
+-/* Remove passed event from groups notification queue */
+-extern void fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event);
+ /* true if the group notification queue is empty */
+ extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
+ /* return, but do not dequeue the first event on the notification queue */
+diff --git a/include/linux/kernel.h b/include/linux/kernel.h
+index 924853d33a13..e571e592e53a 100644
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -202,26 +202,26 @@ extern int _cond_resched(void);
+ 
+ /**
+  * abs - return absolute value of an argument
+- * @x: the value.  If it is unsigned type, it is converted to signed type first
+- *   (s64, long or int depending on its size).
++ * @x: the value.  If it is unsigned type, it is converted to signed type first.
++ *     char is treated as if it was signed (regardless of whether it really is)
++ *     but the macro's return type is preserved as char.
+  *
+- * Return: an absolute value of x.  If x is 64-bit, macro's return type is s64,
+- *   otherwise it is signed long.
++ * Return: an absolute value of x.
+  */
+-#define abs(x) __builtin_choose_expr(sizeof(x) == sizeof(s64), ({	\
+-		s64 __x = (x);						\
+-		(__x < 0) ? -__x : __x;					\
+-	}), ({								\
+-		long ret;						\
+-		if (sizeof(x) == sizeof(long)) {			\
+-			long __x = (x);					\
+-			ret = (__x < 0) ? -__x : __x;			\
+-		} else {						\
+-			int __x = (x);					\
+-			ret = (__x < 0) ? -__x : __x;			\
+-		}							\
+-		ret;							\
+-	}))
++#define abs(x)	__abs_choose_expr(x, long long,				\
++		__abs_choose_expr(x, long,				\
++		__abs_choose_expr(x, int,				\
++		__abs_choose_expr(x, short,				\
++		__abs_choose_expr(x, char,				\
++		__builtin_choose_expr(					\
++			__builtin_types_compatible_p(typeof(x), char),	\
++			(char)({ signed char __x = (x); __x<0?-__x:__x; }), \
++			((void)0)))))))
++
++#define __abs_choose_expr(x, type, other) __builtin_choose_expr(	\
++	__builtin_types_compatible_p(typeof(x),   signed type) ||	\
++	__builtin_types_compatible_p(typeof(x), unsigned type),		\
++	({ signed type __x = (x); __x < 0 ? -__x : __x; }), other)
+ 
+ /**
+  * reciprocal_scale - "scale" a value into range [0, ep_ro)
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index b97d6823ef3c..4e9c75226f07 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -3036,6 +3036,7 @@ static inline void napi_free_frags(struct napi_struct *napi)
+ 	napi->skb = NULL;
+ }
+ 
++bool netdev_is_rx_handler_busy(struct net_device *dev);
+ int netdev_rx_handler_register(struct net_device *dev,
+ 			       rx_handler_func_t *rx_handler,
+ 			       void *rx_handler_data);
+diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
+index 26eabf5ec718..fbfadba81c5a 100644
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -601,56 +601,56 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
+  */
+ static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
+ {
+-	int ret = 0;
+ 	char __user *end = uaddr + size - 1;
+ 
+ 	if (unlikely(size == 0))
+-		return ret;
++		return 0;
+ 
++	if (unlikely(uaddr > end))
++		return -EFAULT;
+ 	/*
+ 	 * Writing zeroes into userspace here is OK, because we know that if
+ 	 * the zero gets there, we'll be overwriting it.
+ 	 */
+-	while (uaddr <= end) {
+-		ret = __put_user(0, uaddr);
+-		if (ret != 0)
+-			return ret;
++	do {
++		if (unlikely(__put_user(0, uaddr) != 0))
++			return -EFAULT;
+ 		uaddr += PAGE_SIZE;
+-	}
++	} while (uaddr <= end);
+ 
+ 	/* Check whether the range spilled into the next page. */
+ 	if (((unsigned long)uaddr & PAGE_MASK) ==
+ 			((unsigned long)end & PAGE_MASK))
+-		ret = __put_user(0, end);
++		return __put_user(0, end);
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ static inline int fault_in_multipages_readable(const char __user *uaddr,
+ 					       int size)
+ {
+ 	volatile char c;
+-	int ret = 0;
+ 	const char __user *end = uaddr + size - 1;
+ 
+ 	if (unlikely(size == 0))
+-		return ret;
++		return 0;
+ 
+-	while (uaddr <= end) {
+-		ret = __get_user(c, uaddr);
+-		if (ret != 0)
+-			return ret;
++	if (unlikely(uaddr > end))
++		return -EFAULT;
++
++	do {
++		if (unlikely(__get_user(c, uaddr) != 0))
++			return -EFAULT;
+ 		uaddr += PAGE_SIZE;
+-	}
++	} while (uaddr <= end);
+ 
+ 	/* Check whether the range spilled into the next page. */
+ 	if (((unsigned long)uaddr & PAGE_MASK) ==
+ 			((unsigned long)end & PAGE_MASK)) {
+-		ret = __get_user(c, end);
+-		(void)c;
++		return __get_user(c, end);
+ 	}
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
+diff --git a/include/linux/smc91x.h b/include/linux/smc91x.h
+index 76199b75d584..e302c447e057 100644
+--- a/include/linux/smc91x.h
++++ b/include/linux/smc91x.h
+@@ -1,6 +1,16 @@
+ #ifndef __SMC91X_H__
+ #define __SMC91X_H__
+ 
++/*
++ * These bits define which access sizes a platform can support, rather
++ * than the maximal access size.  So, if your platform can do 16-bit
++ * and 32-bit accesses to the SMC91x device, but not 8-bit, set both
++ * SMC91X_USE_16BIT and SMC91X_USE_32BIT.
++ *
++ * The SMC91x driver requires at least one of SMC91X_USE_8BIT or
++ * SMC91X_USE_16BIT to be supported - just setting SMC91X_USE_32BIT is
++ * an invalid configuration.
++ */
+ #define SMC91X_USE_8BIT (1 << 0)
+ #define SMC91X_USE_16BIT (1 << 1)
+ #define SMC91X_USE_32BIT (1 << 2)
+diff --git a/include/net/af_unix.h b/include/net/af_unix.h
+index 9b4c418bebd8..fd60eccb59a6 100644
+--- a/include/net/af_unix.h
++++ b/include/net/af_unix.h
+@@ -52,7 +52,7 @@ struct unix_sock {
+ 	struct sock		sk;
+ 	struct unix_address     *addr;
+ 	struct path		path;
+-	struct mutex		readlock;
++	struct mutex		iolock, bindlock;
+ 	struct sock		*peer;
+ 	struct list_head	link;
+ 	atomic_long_t		inflight;
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 414d822bc1db..9c3ab544d3a8 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1510,6 +1510,8 @@ static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unli
+ {
+ 	if (sk->sk_send_head == skb_unlinked)
+ 		sk->sk_send_head = NULL;
++	if (tcp_sk(sk)->highest_sack == skb_unlinked)
++		tcp_sk(sk)->highest_sack = NULL;
+ }
+ 
+ static inline void tcp_init_send_head(struct sock *sk)
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index e120bd983ad0..b9279a2844d8 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -2079,7 +2079,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
+  * which could have been changed by cpuset just after it inherits the
+  * state from the parent and before it sits on the cgroup's task list.
+  */
+-void cpuset_fork(struct task_struct *task)
++void cpuset_fork(struct task_struct *task, void *priv)
+ {
+ 	if (task_css_is_root(task, cpuset_cgrp_id))
+ 		return;
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index b7dd5718836e..3124cebaec31 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -299,12 +299,12 @@ static int create_image(int platform_mode)
+ 	save_processor_state();
+ 	trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, true);
+ 	error = swsusp_arch_suspend();
++	/* Restore control flow magically appears here */
++	restore_processor_state();
+ 	trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false);
+ 	if (error)
+ 		printk(KERN_ERR "PM: Error %d creating hibernation image\n",
+ 			error);
+-	/* Restore control flow magically appears here */
+-	restore_processor_state();
+ 	if (!in_suspend)
+ 		events_check_enabled = false;
+ 
+diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
+index 3a970604308f..f155c62f1f2c 100644
+--- a/kernel/power/snapshot.c
++++ b/kernel/power/snapshot.c
+@@ -765,9 +765,9 @@ static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
+  */
+ static bool rtree_next_node(struct memory_bitmap *bm)
+ {
+-	bm->cur.node = list_entry(bm->cur.node->list.next,
+-				  struct rtree_node, list);
+-	if (&bm->cur.node->list != &bm->cur.zone->leaves) {
++	if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
++		bm->cur.node = list_entry(bm->cur.node->list.next,
++					  struct rtree_node, list);
+ 		bm->cur.node_pfn += BM_BITS_PER_BLOCK;
+ 		bm->cur.node_bit  = 0;
+ 		touch_softlockup_watchdog();
+@@ -775,9 +775,9 @@ static bool rtree_next_node(struct memory_bitmap *bm)
+ 	}
+ 
+ 	/* No more nodes, goto next zone */
+-	bm->cur.zone = list_entry(bm->cur.zone->list.next,
++	if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
++		bm->cur.zone = list_entry(bm->cur.zone->list.next,
+ 				  struct mem_zone_bm_rtree, list);
+-	if (&bm->cur.zone->list != &bm->zones) {
+ 		bm->cur.node = list_entry(bm->cur.zone->leaves.next,
+ 					  struct rtree_node, list);
+ 		bm->cur.node_pfn = 0;
+diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
+index 9b1044e936a6..05ea5167e6bb 100644
+--- a/kernel/trace/Makefile
++++ b/kernel/trace/Makefile
+@@ -1,4 +1,8 @@
+ 
++# We are fully aware of the dangers of __builtin_return_address()
++FRAME_CFLAGS := $(call cc-disable-warning,frame-address)
++KBUILD_CFLAGS += $(FRAME_CFLAGS)
++
+ # Do not instrument the tracer itself:
+ 
+ ifdef CONFIG_FUNCTION_TRACER
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 8305cbb2d5a2..059233abcfcf 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4727,19 +4727,20 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
+ 	struct trace_iterator *iter = filp->private_data;
+ 	ssize_t sret;
+ 
+-	/* return any leftover data */
+-	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
+-	if (sret != -EBUSY)
+-		return sret;
+-
+-	trace_seq_init(&iter->seq);
+-
+ 	/*
+ 	 * Avoid more than one consumer on a single file descriptor
+ 	 * This is just a matter of traces coherency, the ring buffer itself
+ 	 * is protected.
+ 	 */
+ 	mutex_lock(&iter->mutex);
++
++	/* return any leftover data */
++	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
++	if (sret != -EBUSY)
++		goto out;
++
++	trace_seq_init(&iter->seq);
++
+ 	if (iter->trace->read) {
+ 		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
+ 		if (sret)
+@@ -5766,9 +5767,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
+ 		return -EBUSY;
+ #endif
+ 
+-	if (splice_grow_spd(pipe, &spd))
+-		return -ENOMEM;
+-
+ 	if (*ppos & (PAGE_SIZE - 1))
+ 		return -EINVAL;
+ 
+@@ -5778,6 +5776,9 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
+ 		len &= PAGE_MASK;
+ 	}
+ 
++	if (splice_grow_spd(pipe, &spd))
++		return -ENOMEM;
++
+  again:
+ 	trace_access_lock(iter->cpu_file);
+ 	entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
+@@ -5835,19 +5836,21 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
+ 	/* did we read anything? */
+ 	if (!spd.nr_pages) {
+ 		if (ret)
+-			return ret;
++			goto out;
+ 
++		ret = -EAGAIN;
+ 		if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
+-			return -EAGAIN;
++			goto out;
+ 
+ 		ret = wait_on_pipe(iter, true);
+ 		if (ret)
+-			return ret;
++			goto out;
+ 
+ 		goto again;
+ 	}
+ 
+ 	ret = splice_to_pipe(pipe, &spd);
++out:
+ 	splice_shrink_spd(&spd);
+ 
+ 	return ret;
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 0c114e2b01d3..0838e9f02b11 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2159,23 +2159,6 @@ out:
+ 	}
+ }
+ 
+-#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+-static void init_tlb_ubc(void)
+-{
+-	/*
+-	 * This deliberately does not clear the cpumask as it's expensive
+-	 * and unnecessary. If there happens to be data in there then the
+-	 * first SWAP_CLUSTER_MAX pages will send an unnecessary IPI and
+-	 * then will be cleared.
+-	 */
+-	current->tlb_ubc.flush_required = false;
+-}
+-#else
+-static inline void init_tlb_ubc(void)
+-{
+-}
+-#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
+-
+ /*
+  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
+  */
+@@ -2210,8 +2193,6 @@ static void shrink_lruvec(struct lruvec *lruvec, int swappiness,
+ 	scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
+ 			 sc->priority == DEF_PRIORITY);
+ 
+-	init_tlb_ubc();
+-
+ 	blk_start_plug(&plug);
+ 	while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
+ 					nr[LRU_INACTIVE_FILE]) {
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 7173a685309a..9542e84a9455 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1113,7 +1113,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
+ 		} else {
+ 			err = br_ip6_multicast_add_group(br, port,
+ 							 &grec->grec_mca, vid);
+-			if (!err)
++			if (err)
+ 				break;
+ 		}
+ 	}
+diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
+index f6c3b2137eea..59ce1fcc220c 100644
+--- a/net/caif/cfpkt_skbuff.c
++++ b/net/caif/cfpkt_skbuff.c
+@@ -286,7 +286,7 @@ int cfpkt_setlen(struct cfpkt *pkt, u16 len)
+ 		else
+ 			skb_trim(skb, len);
+ 
+-			return cfpkt_getlen(pkt);
++		return cfpkt_getlen(pkt);
+ 	}
+ 
+ 	/* Need to expand SKB */
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 9efbdb3ff78a..de4ed2b5a221 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3722,6 +3722,22 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
+ }
+ 
+ /**
++ *	netdev_is_rx_handler_busy - check if receive handler is registered
++ *	@dev: device to check
++ *
++ *	Check if a receive handler is already registered for a given device.
++ *	Return true if there one.
++ *
++ *	The caller must hold the rtnl_mutex.
++ */
++bool netdev_is_rx_handler_busy(struct net_device *dev)
++{
++	ASSERT_RTNL();
++	return dev && rtnl_dereference(dev->rx_handler);
++}
++EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
++
++/**
+  *	netdev_rx_handler_register - register receive handler
+  *	@dev: device to register a handler for
+  *	@rx_handler: receive handler to register
+diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
+index 744e5936c10d..e5a3ff210fec 100644
+--- a/net/ipv4/fib_trie.c
++++ b/net/ipv4/fib_trie.c
+@@ -2453,9 +2453,7 @@ struct fib_route_iter {
+ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
+ 					    loff_t pos)
+ {
+-	struct fib_table *tb = iter->main_tb;
+ 	struct key_vector *l, **tp = &iter->tnode;
+-	struct trie *t;
+ 	t_key key;
+ 
+ 	/* use cache location of next-to-find key */
+@@ -2463,8 +2461,6 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
+ 		pos -= iter->pos;
+ 		key = iter->key;
+ 	} else {
+-		t = (struct trie *)tb->tb_data;
+-		iter->tnode = t->kv;
+ 		iter->pos = 0;
+ 		key = 0;
+ 	}
+@@ -2505,12 +2501,12 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
+ 		return NULL;
+ 
+ 	iter->main_tb = tb;
++	t = (struct trie *)tb->tb_data;
++	iter->tnode = t->kv;
+ 
+ 	if (*pos != 0)
+ 		return fib_route_get_idx(iter, *pos);
+ 
+-	t = (struct trie *)tb->tb_data;
+-	iter->tnode = t->kv;
+ 	iter->pos = 0;
+ 	iter->key = 0;
+ 
+diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
+index 4d8f0b698777..65036891e080 100644
+--- a/net/ipv4/ip_vti.c
++++ b/net/ipv4/ip_vti.c
+@@ -540,6 +540,33 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = {
+ 	.get_link_net	= ip_tunnel_get_link_net,
+ };
+ 
++static bool is_vti_tunnel(const struct net_device *dev)
++{
++	return dev->netdev_ops == &vti_netdev_ops;
++}
++
++static int vti_device_event(struct notifier_block *unused,
++			    unsigned long event, void *ptr)
++{
++	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
++	struct ip_tunnel *tunnel = netdev_priv(dev);
++
++	if (!is_vti_tunnel(dev))
++		return NOTIFY_DONE;
++
++	switch (event) {
++	case NETDEV_DOWN:
++		if (!net_eq(tunnel->net, dev_net(dev)))
++			xfrm_garbage_collect(tunnel->net);
++		break;
++	}
++	return NOTIFY_DONE;
++}
++
++static struct notifier_block vti_notifier_block __read_mostly = {
++	.notifier_call = vti_device_event,
++};
++
+ static int __init vti_init(void)
+ {
+ 	const char *msg;
+@@ -547,6 +574,8 @@ static int __init vti_init(void)
+ 
+ 	pr_info("IPv4 over IPsec tunneling driver\n");
+ 
++	register_netdevice_notifier(&vti_notifier_block);
++
+ 	msg = "tunnel device";
+ 	err = register_pernet_device(&vti_net_ops);
+ 	if (err < 0)
+@@ -579,6 +608,7 @@ xfrm_proto_ah_failed:
+ xfrm_proto_esp_failed:
+ 	unregister_pernet_device(&vti_net_ops);
+ pernet_dev_failed:
++	unregister_netdevice_notifier(&vti_notifier_block);
+ 	pr_err("vti init: failed to register %s\n", msg);
+ 	return err;
+ }
+@@ -590,6 +620,7 @@ static void __exit vti_fini(void)
+ 	xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
+ 	xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
+ 	unregister_pernet_device(&vti_net_ops);
++	unregister_netdevice_notifier(&vti_notifier_block);
+ }
+ 
+ module_init(vti_init);
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 048418b049d8..b5853cac3269 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -808,8 +808,14 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
+ 	u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
+ 					     tcp_sk(sk)->snd_nxt;
+ 
++	/* RFC 7323 2.3
++	 * The window field (SEG.WND) of every outgoing segment, with the
++	 * exception of <SYN> segments, MUST be right-shifted by
++	 * Rcv.Wind.Shift bits:
++	 */
+ 	tcp_v4_send_ack(sock_net(sk), skb, seq,
+-			tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
++			tcp_rsk(req)->rcv_nxt,
++			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
+ 			tcp_time_stamp,
+ 			req->ts_recent,
+ 			0,
+diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
+index 3e6a472e6b88..92ab5bc91592 100644
+--- a/net/ipv4/tcp_yeah.c
++++ b/net/ipv4/tcp_yeah.c
+@@ -75,7 +75,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
+ 	if (!tcp_is_cwnd_limited(sk))
+ 		return;
+ 
+-	if (tp->snd_cwnd <= tp->snd_ssthresh)
++	if (tcp_in_slow_start(tp))
+ 		tcp_slow_start(tp, acked);
+ 
+ 	else if (!yeah->doing_reno_now) {
+diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
+index 263a5164a6f5..3e55447b63a4 100644
+--- a/net/ipv6/ping.c
++++ b/net/ipv6/ping.c
+@@ -150,8 +150,10 @@ int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	rt = (struct rt6_info *) dst;
+ 
+ 	np = inet6_sk(sk);
+-	if (!np)
+-		return -EBADF;
++	if (!np) {
++		err = -EBADF;
++		goto dst_err_out;
++	}
+ 
+ 	if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
+ 		fl6.flowi6_oif = np->mcast_oif;
+@@ -186,6 +188,9 @@ int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	}
+ 	release_sock(sk);
+ 
++dst_err_out:
++	dst_release(dst);
++
+ 	if (err)
+ 		return err;
+ 
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 1a1cd3938fd0..2d81e2f33ef2 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -932,9 +932,15 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
+ 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
+ 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
+ 	 */
++	/* RFC 7323 2.3
++	 * The window field (SEG.WND) of every outgoing segment, with the
++	 * exception of <SYN> segments, MUST be right-shifted by
++	 * Rcv.Wind.Shift bits:
++	 */
+ 	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
+ 			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
+-			tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
++			tcp_rsk(req)->rcv_nxt,
++			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
+ 			tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
+ 			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
+ 			0, 0);
+diff --git a/net/irda/iriap.c b/net/irda/iriap.c
+index 4a7ae32afa09..1138eaf5c682 100644
+--- a/net/irda/iriap.c
++++ b/net/irda/iriap.c
+@@ -185,8 +185,12 @@ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv,
+ 
+ 	self->magic = IAS_MAGIC;
+ 	self->mode = mode;
+-	if (mode == IAS_CLIENT)
+-		iriap_register_lsap(self, slsap_sel, mode);
++	if (mode == IAS_CLIENT) {
++		if (iriap_register_lsap(self, slsap_sel, mode)) {
++			kfree(self);
++			return NULL;
++		}
++	}
+ 
+ 	self->confirm = callback;
+ 	self->priv = priv;
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index 9b713e0ce00d..b26b7a127773 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -2111,7 +2111,8 @@ restart:
+ 					      TIPC_CONN_MSG, SHORT_H_SIZE,
+ 					      0, dnode, onode, dport, oport,
+ 					      TIPC_CONN_SHUTDOWN);
+-			tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
++			if (skb)
++				tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
+ 		}
+ 		tsk->connected = 0;
+ 		sock->state = SS_DISCONNECTING;
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 6579fd6e7459..824cc1e160bc 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -661,11 +661,11 @@ static int unix_set_peek_off(struct sock *sk, int val)
+ {
+ 	struct unix_sock *u = unix_sk(sk);
+ 
+-	if (mutex_lock_interruptible(&u->readlock))
++	if (mutex_lock_interruptible(&u->iolock))
+ 		return -EINTR;
+ 
+ 	sk->sk_peek_off = val;
+-	mutex_unlock(&u->readlock);
++	mutex_unlock(&u->iolock);
+ 
+ 	return 0;
+ }
+@@ -778,7 +778,8 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
+ 	spin_lock_init(&u->lock);
+ 	atomic_long_set(&u->inflight, 0);
+ 	INIT_LIST_HEAD(&u->link);
+-	mutex_init(&u->readlock); /* single task reading lock */
++	mutex_init(&u->iolock); /* single task reading lock */
++	mutex_init(&u->bindlock); /* single task binding lock */
+ 	init_waitqueue_head(&u->peer_wait);
+ 	init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
+ 	unix_insert_socket(unix_sockets_unbound(sk), sk);
+@@ -847,7 +848,7 @@ static int unix_autobind(struct socket *sock)
+ 	int err;
+ 	unsigned int retries = 0;
+ 
+-	err = mutex_lock_interruptible(&u->readlock);
++	err = mutex_lock_interruptible(&u->bindlock);
+ 	if (err)
+ 		return err;
+ 
+@@ -894,7 +895,7 @@ retry:
+ 	spin_unlock(&unix_table_lock);
+ 	err = 0;
+ 
+-out:	mutex_unlock(&u->readlock);
++out:	mutex_unlock(&u->bindlock);
+ 	return err;
+ }
+ 
+@@ -953,20 +954,32 @@ fail:
+ 	return NULL;
+ }
+ 
+-static int unix_mknod(struct dentry *dentry, struct path *path, umode_t mode,
+-		      struct path *res)
++static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
+ {
+-	int err;
++	struct dentry *dentry;
++	struct path path;
++	int err = 0;
++	/*
++	 * Get the parent directory, calculate the hash for last
++	 * component.
++	 */
++	dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
++	err = PTR_ERR(dentry);
++	if (IS_ERR(dentry))
++		return err;
+ 
+-	err = security_path_mknod(path, dentry, mode, 0);
++	/*
++	 * All right, let's create it.
++	 */
++	err = security_path_mknod(&path, dentry, mode, 0);
+ 	if (!err) {
+-		err = vfs_mknod(d_inode(path->dentry), dentry, mode, 0);
++		err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0);
+ 		if (!err) {
+-			res->mnt = mntget(path->mnt);
++			res->mnt = mntget(path.mnt);
+ 			res->dentry = dget(dentry);
+ 		}
+ 	}
+-
++	done_path_create(&path, dentry);
+ 	return err;
+ }
+ 
+@@ -977,12 +990,10 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 	struct unix_sock *u = unix_sk(sk);
+ 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
+ 	char *sun_path = sunaddr->sun_path;
+-	int err, name_err;
++	int err;
+ 	unsigned int hash;
+ 	struct unix_address *addr;
+ 	struct hlist_head *list;
+-	struct path path;
+-	struct dentry *dentry;
+ 
+ 	err = -EINVAL;
+ 	if (sunaddr->sun_family != AF_UNIX)
+@@ -998,34 +1009,14 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 		goto out;
+ 	addr_len = err;
+ 
+-	name_err = 0;
+-	dentry = NULL;
+-	if (sun_path[0]) {
+-		/* Get the parent directory, calculate the hash for last
+-		 * component.
+-		 */
+-		dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
+-
+-		if (IS_ERR(dentry)) {
+-			/* delay report until after 'already bound' check */
+-			name_err = PTR_ERR(dentry);
+-			dentry = NULL;
+-		}
+-	}
+-
+-	err = mutex_lock_interruptible(&u->readlock);
++	err = mutex_lock_interruptible(&u->bindlock);
+ 	if (err)
+-		goto out_path;
++		goto out;
+ 
+ 	err = -EINVAL;
+ 	if (u->addr)
+ 		goto out_up;
+ 
+-	if (name_err) {
+-		err = name_err == -EEXIST ? -EADDRINUSE : name_err;
+-		goto out_up;
+-	}
+-
+ 	err = -ENOMEM;
+ 	addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
+ 	if (!addr)
+@@ -1036,11 +1027,11 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 	addr->hash = hash ^ sk->sk_type;
+ 	atomic_set(&addr->refcnt, 1);
+ 
+-	if (dentry) {
+-		struct path u_path;
++	if (sun_path[0]) {
++		struct path path;
+ 		umode_t mode = S_IFSOCK |
+ 		       (SOCK_INODE(sock)->i_mode & ~current_umask());
+-		err = unix_mknod(dentry, &path, mode, &u_path);
++		err = unix_mknod(sun_path, mode, &path);
+ 		if (err) {
+ 			if (err == -EEXIST)
+ 				err = -EADDRINUSE;
+@@ -1048,9 +1039,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 			goto out_up;
+ 		}
+ 		addr->hash = UNIX_HASH_SIZE;
+-		hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
++		hash = d_real_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
+ 		spin_lock(&unix_table_lock);
+-		u->path = u_path;
++		u->path = path;
+ 		list = &unix_socket_table[hash];
+ 	} else {
+ 		spin_lock(&unix_table_lock);
+@@ -1072,11 +1063,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ out_unlock:
+ 	spin_unlock(&unix_table_lock);
+ out_up:
+-	mutex_unlock(&u->readlock);
+-out_path:
+-	if (dentry)
+-		done_path_create(&path, dentry);
+-
++	mutex_unlock(&u->bindlock);
+ out:
+ 	return err;
+ }
+@@ -1971,17 +1958,17 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
+ 	if (false) {
+ alloc_skb:
+ 		unix_state_unlock(other);
+-		mutex_unlock(&unix_sk(other)->readlock);
++		mutex_unlock(&unix_sk(other)->iolock);
+ 		newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
+ 					      &err, 0);
+ 		if (!newskb)
+ 			goto err;
+ 	}
+ 
+-	/* we must acquire readlock as we modify already present
++	/* we must acquire iolock as we modify already present
+ 	 * skbs in the sk_receive_queue and mess with skb->len
+ 	 */
+-	err = mutex_lock_interruptible(&unix_sk(other)->readlock);
++	err = mutex_lock_interruptible(&unix_sk(other)->iolock);
+ 	if (err) {
+ 		err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
+ 		goto err;
+@@ -2048,7 +2035,7 @@ alloc_skb:
+ 	}
+ 
+ 	unix_state_unlock(other);
+-	mutex_unlock(&unix_sk(other)->readlock);
++	mutex_unlock(&unix_sk(other)->iolock);
+ 
+ 	other->sk_data_ready(other);
+ 	scm_destroy(&scm);
+@@ -2057,7 +2044,7 @@ alloc_skb:
+ err_state_unlock:
+ 	unix_state_unlock(other);
+ err_unlock:
+-	mutex_unlock(&unix_sk(other)->readlock);
++	mutex_unlock(&unix_sk(other)->iolock);
+ err:
+ 	kfree_skb(newskb);
+ 	if (send_sigpipe && !(flags & MSG_NOSIGNAL))
+@@ -2122,7 +2109,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
+ 	if (flags&MSG_OOB)
+ 		goto out;
+ 
+-	err = mutex_lock_interruptible(&u->readlock);
++	err = mutex_lock_interruptible(&u->iolock);
+ 	if (unlikely(err)) {
+ 		/* recvmsg() in non blocking mode is supposed to return -EAGAIN
+ 		 * sk_rcvtimeo is not honored by mutex_lock_interruptible()
+@@ -2198,7 +2185,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
+ out_free:
+ 	skb_free_datagram(sk, skb);
+ out_unlock:
+-	mutex_unlock(&u->readlock);
++	mutex_unlock(&u->iolock);
+ out:
+ 	return err;
+ }
+@@ -2293,7 +2280,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
+ 	/* Lock the socket to prevent queue disordering
+ 	 * while sleeps in memcpy_tomsg
+ 	 */
+-	mutex_lock(&u->readlock);
++	mutex_lock(&u->iolock);
+ 
+ 	if (flags & MSG_PEEK)
+ 		skip = sk_peek_offset(sk, flags);
+@@ -2334,7 +2321,7 @@ again:
+ 				break;
+ 			}
+ 
+-			mutex_unlock(&u->readlock);
++			mutex_unlock(&u->iolock);
+ 
+ 			timeo = unix_stream_data_wait(sk, timeo, last,
+ 						      last_len);
+@@ -2345,7 +2332,7 @@ again:
+ 				goto out;
+ 			}
+ 
+-			mutex_lock(&u->readlock);
++			mutex_lock(&u->iolock);
+ 			continue;
+ unlock:
+ 			unix_state_unlock(sk);
+@@ -2448,7 +2435,7 @@ unlock:
+ 		}
+ 	} while (size);
+ 
+-	mutex_unlock(&u->readlock);
++	mutex_unlock(&u->iolock);
+ 	if (state->msg)
+ 		scm_recv(sock, state->msg, &scm, flags);
+ 	else
+@@ -2489,9 +2476,9 @@ static ssize_t skb_unix_socket_splice(struct sock *sk,
+ 	int ret;
+ 	struct unix_sock *u = unix_sk(sk);
+ 
+-	mutex_unlock(&u->readlock);
++	mutex_unlock(&u->iolock);
+ 	ret = splice_to_pipe(pipe, spd);
+-	mutex_lock(&u->readlock);
++	mutex_lock(&u->iolock);
+ 
+ 	return ret;
+ }
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 5d89f13a98db..bf65f31bd55e 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -6628,7 +6628,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
+ 
+ 		params.n_counter_offsets_presp = len / sizeof(u16);
+ 		if (rdev->wiphy.max_num_csa_counters &&
+-		    (params.n_counter_offsets_beacon >
++		    (params.n_counter_offsets_presp >
+ 		     rdev->wiphy.max_num_csa_counters))
+ 			return -EINVAL;
+ 


             reply	other threads:[~2016-09-30 19:07 UTC|newest]

Thread overview: 355+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-09-30 19:07 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2022-02-03 11:46 [gentoo-commits] proj/linux-patches:4.4 commit in: / Mike Pagano
2022-01-29 17:47 Mike Pagano
2022-01-27 11:42 Mike Pagano
2022-01-11 12:57 Mike Pagano
2022-01-05 12:57 Mike Pagano
2021-12-29 13:13 Mike Pagano
2021-12-22 14:09 Mike Pagano
2021-12-14 10:38 Mike Pagano
2021-12-08 12:58 Mike Pagano
2021-11-26 12:02 Mike Pagano
2021-11-12 13:39 Mike Pagano
2021-11-02 17:07 Mike Pagano
2021-10-27 12:01 Mike Pagano
2021-10-17 13:15 Mike Pagano
2021-10-09 21:36 Mike Pagano
2021-10-07 10:37 Mike Pagano
2021-10-06 11:33 Mike Pagano
2021-09-26 14:16 Mike Pagano
2021-09-22 11:43 Mike Pagano
2021-09-20 22:07 Mike Pagano
2021-09-03 11:26 Mike Pagano
2021-08-26 14:02 Mike Pagano
2021-08-25 23:20 Mike Pagano
2021-08-15 20:12 Mike Pagano
2021-08-10 16:22 Mike Pagano
2021-08-08 13:47 Mike Pagano
2021-08-04 11:56 Mike Pagano
2021-08-03 12:51 Mike Pagano
2021-07-28 12:39 Mike Pagano
2021-07-20 15:17 Alice Ferrazzi
2021-07-11 14:48 Mike Pagano
2021-06-30 14:29 Mike Pagano
2021-06-17 11:05 Alice Ferrazzi
2021-06-10 11:09 Mike Pagano
2021-06-03 10:43 Alice Ferrazzi
2021-05-26 11:59 Mike Pagano
2021-05-22 10:00 Mike Pagano
2021-04-28 11:08 Alice Ferrazzi
2021-04-16 11:20 Alice Ferrazzi
2021-04-10 13:21 Mike Pagano
2021-04-07 12:10 Mike Pagano
2021-03-30 14:13 Mike Pagano
2021-03-24 12:06 Mike Pagano
2021-03-17 15:39 Mike Pagano
2021-03-11 13:34 Mike Pagano
2021-03-07 15:12 Mike Pagano
2021-03-03 16:34 Alice Ferrazzi
2021-02-23 13:46 Mike Pagano
2021-02-10 10:17 Alice Ferrazzi
2021-02-05 14:57 Alice Ferrazzi
2021-02-03 23:23 Mike Pagano
2021-01-30 13:11 Alice Ferrazzi
2021-01-23 16:33 Mike Pagano
2021-01-17 16:23 Mike Pagano
2021-01-12 20:08 Mike Pagano
2021-01-09 12:53 Mike Pagano
2020-12-29 14:16 Mike Pagano
2020-12-11 12:54 Mike Pagano
2020-12-02 12:17 Mike Pagano
2020-11-24 13:29 Mike Pagano
2020-11-22 19:08 Mike Pagano
2020-11-18 19:21 Mike Pagano
2020-11-11 15:27 Mike Pagano
2020-11-10 13:53 Mike Pagano
2020-10-29 11:14 Mike Pagano
2020-10-17 10:13 Mike Pagano
2020-10-14 20:30 Mike Pagano
2020-10-01 11:41 Mike Pagano
2020-10-01 11:24 Mike Pagano
2020-09-24 16:04 Mike Pagano
2020-09-23 11:51 Mike Pagano
2020-09-23 11:50 Mike Pagano
2020-09-12 17:08 Mike Pagano
2020-09-03 11:32 Mike Pagano
2020-08-26 11:12 Mike Pagano
2020-08-21 11:11 Alice Ferrazzi
2020-07-31 16:10 Mike Pagano
2020-07-22 12:24 Mike Pagano
2020-07-09 12:05 Mike Pagano
2020-07-01 12:09 Mike Pagano
2020-06-22 14:43 Mike Pagano
2020-06-11 11:25 Mike Pagano
2020-06-03 11:35 Mike Pagano
2020-05-27 15:26 Mike Pagano
2020-05-20 11:20 Mike Pagano
2020-05-13 13:01 Mike Pagano
2020-05-11 22:52 Mike Pagano
2020-05-05 17:37 Mike Pagano
2020-05-02 19:20 Mike Pagano
2020-04-24 11:59 Mike Pagano
2020-04-15 18:24 Mike Pagano
2020-04-13 11:14 Mike Pagano
2020-04-02 18:55 Mike Pagano
2020-03-20 11:53 Mike Pagano
2020-03-20 11:51 Mike Pagano
2020-03-20 11:49 Mike Pagano
2020-03-11 10:14 Mike Pagano
2020-02-28 15:24 Mike Pagano
2020-02-14 23:34 Mike Pagano
2020-02-05 14:47 Mike Pagano
2020-01-29 12:36 Mike Pagano
2020-01-23 11:00 Mike Pagano
2020-01-14 22:24 Mike Pagano
2020-01-12 14:48 Mike Pagano
2020-01-04 16:46 Mike Pagano
2019-12-21 14:51 Mike Pagano
2019-12-05 14:47 Alice Ferrazzi
2019-11-29 21:41 Thomas Deutschmann
2019-11-28 23:49 Mike Pagano
2019-11-25 16:25 Mike Pagano
2019-11-16 10:54 Mike Pagano
2019-11-12 20:57 Mike Pagano
2019-11-10 16:13 Mike Pagano
2019-11-06 14:22 Mike Pagano
2019-10-29 10:08 Mike Pagano
2019-10-17 22:18 Mike Pagano
2019-10-07 21:03 Mike Pagano
2019-10-05 20:43 Mike Pagano
2019-09-21 15:56 Mike Pagano
2019-09-20 15:50 Mike Pagano
2019-09-16 12:21 Mike Pagano
2019-09-10 11:10 Mike Pagano
2019-09-06 17:17 Mike Pagano
2019-08-25 17:33 Mike Pagano
2019-08-11 10:58 Mike Pagano
2019-08-06 19:14 Mike Pagano
2019-08-04 16:03 Mike Pagano
2019-07-21 14:36 Mike Pagano
2019-07-10 11:01 Mike Pagano
2019-06-27 11:11 Mike Pagano
2019-06-22 19:01 Mike Pagano
2019-06-17 19:18 Mike Pagano
2019-06-11 17:30 Mike Pagano
2019-06-11 12:38 Mike Pagano
2019-05-16 23:01 Mike Pagano
2019-04-27 17:28 Mike Pagano
2019-04-03 10:49 Mike Pagano
2019-04-03 10:49 Mike Pagano
2019-03-23 14:17 Mike Pagano
2019-02-23 14:40 Mike Pagano
2019-02-20 11:14 Mike Pagano
2019-02-15 23:38 Mike Pagano
2019-02-15 23:35 Mike Pagano
2019-02-08 15:21 Mike Pagano
2019-02-06 20:51 Mike Pagano
2019-02-06  0:05 Mike Pagano
2019-01-26 14:59 Mike Pagano
2019-01-16 23:27 Mike Pagano
2019-01-13 19:46 Mike Pagano
2019-01-13 19:24 Mike Pagano
2018-12-29 22:56 Mike Pagano
2018-12-21 14:40 Mike Pagano
2018-12-17 21:56 Mike Pagano
2018-12-13 11:35 Mike Pagano
2018-12-01 18:35 Mike Pagano
2018-12-01 15:02 Mike Pagano
2018-11-27 16:59 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 12:18 Mike Pagano
2018-11-10 21:27 Mike Pagano
2018-10-20 12:33 Mike Pagano
2018-10-13 16:35 Mike Pagano
2018-10-10 11:20 Mike Pagano
2018-09-29 13:32 Mike Pagano
2018-09-26 10:44 Mike Pagano
2018-09-19 22:37 Mike Pagano
2018-09-15 10:09 Mike Pagano
2018-09-09 23:26 Mike Pagano
2018-09-05 15:21 Mike Pagano
2018-08-28 22:32 Mike Pagano
2018-08-24 11:41 Mike Pagano
2018-08-22 10:08 Alice Ferrazzi
2018-08-18 18:06 Mike Pagano
2018-08-17 19:24 Mike Pagano
2018-08-15 16:44 Mike Pagano
2018-08-09 10:49 Mike Pagano
2018-08-07 18:14 Mike Pagano
2018-07-28 10:37 Mike Pagano
2018-07-22 15:15 Mike Pagano
2018-07-19 15:27 Mike Pagano
2018-07-17 10:24 Mike Pagano
2018-07-12 16:21 Alice Ferrazzi
2018-07-04 14:26 Mike Pagano
2018-06-16 15:41 Mike Pagano
2018-06-13 14:54 Mike Pagano
2018-06-06 18:00 Mike Pagano
2018-05-30 22:35 Mike Pagano
2018-05-30 11:38 Mike Pagano
2018-05-26 13:43 Mike Pagano
2018-05-16 10:22 Mike Pagano
2018-05-02 16:11 Mike Pagano
2018-04-29 11:48 Mike Pagano
2018-04-24 11:28 Mike Pagano
2018-04-13 22:20 Mike Pagano
2018-04-08 14:25 Mike Pagano
2018-03-31 23:00 Mike Pagano
2018-03-31 22:16 Mike Pagano
2018-03-25 13:42 Mike Pagano
2018-03-22 12:54 Mike Pagano
2018-03-11 18:25 Mike Pagano
2018-03-05  2:52 Alice Ferrazzi
2018-02-28 15:05 Alice Ferrazzi
2018-02-25 15:46 Mike Pagano
2018-02-22 23:20 Mike Pagano
2018-02-17 15:10 Alice Ferrazzi
2018-02-03 21:23 Mike Pagano
2018-01-31 13:36 Alice Ferrazzi
2018-01-23 21:15 Mike Pagano
2018-01-17 10:20 Alice Ferrazzi
2018-01-17  9:18 Alice Ferrazzi
2018-01-15 15:01 Alice Ferrazzi
2018-01-10 11:56 Mike Pagano
2018-01-10 11:48 Mike Pagano
2018-01-05 15:59 Alice Ferrazzi
2018-01-05 15:05 Alice Ferrazzi
2018-01-02 20:12 Mike Pagano
2017-12-25 14:41 Alice Ferrazzi
2017-12-20 12:45 Mike Pagano
2017-12-16 11:46 Alice Ferrazzi
2017-12-09 18:50 Alice Ferrazzi
2017-12-05 11:39 Mike Pagano
2017-11-30 12:25 Alice Ferrazzi
2017-11-24 10:49 Alice Ferrazzi
2017-11-24  9:46 Alice Ferrazzi
2017-11-21  8:40 Alice Ferrazzi
2017-11-18 18:12 Mike Pagano
2017-11-15 16:44 Alice Ferrazzi
2017-11-08 13:50 Mike Pagano
2017-11-02 10:02 Mike Pagano
2017-10-27 10:33 Mike Pagano
2017-10-21 20:13 Mike Pagano
2017-10-18 13:44 Mike Pagano
2017-10-12 12:22 Mike Pagano
2017-10-08 14:25 Mike Pagano
2017-10-05 11:39 Mike Pagano
2017-09-27 10:38 Mike Pagano
2017-09-14 13:37 Mike Pagano
2017-09-13 22:26 Mike Pagano
2017-09-13 14:33 Mike Pagano
2017-09-07 22:42 Mike Pagano
2017-09-02 17:14 Mike Pagano
2017-08-30 10:08 Mike Pagano
2017-08-25 10:53 Mike Pagano
2017-08-16 22:30 Mike Pagano
2017-08-13 16:52 Mike Pagano
2017-08-11 17:44 Mike Pagano
2017-08-07 10:25 Mike Pagano
2017-05-14 13:32 Mike Pagano
2017-05-08 10:40 Mike Pagano
2017-05-03 17:41 Mike Pagano
2017-04-30 18:08 Mike Pagano
2017-04-30 17:59 Mike Pagano
2017-04-27  8:18 Alice Ferrazzi
2017-04-22 17:00 Mike Pagano
2017-04-18 10:21 Mike Pagano
2017-04-12 17:59 Mike Pagano
2017-04-08 13:56 Mike Pagano
2017-03-31 10:43 Mike Pagano
2017-03-30 18:16 Mike Pagano
2017-03-26 11:53 Mike Pagano
2017-03-22 12:28 Mike Pagano
2017-03-18 14:32 Mike Pagano
2017-03-15 14:39 Mike Pagano
2017-03-12 12:17 Mike Pagano
2017-03-02 16:29 Mike Pagano
2017-03-02 16:29 Mike Pagano
2017-02-26 20:45 Mike Pagano
2017-02-24  0:38 Mike Pagano
2017-02-23 20:12 Mike Pagano
2017-02-18 16:27 Alice Ferrazzi
2017-02-15 16:22 Alice Ferrazzi
2017-02-09  8:05 Alice Ferrazzi
2017-02-04 13:47 Alice Ferrazzi
2017-02-01 12:59 Alice Ferrazzi
2017-01-26  8:24 Alice Ferrazzi
2017-01-20 12:45 Alice Ferrazzi
2017-01-15 22:57 Mike Pagano
2017-01-14 14:46 Mike Pagano
2017-01-12 12:11 Mike Pagano
2017-01-09 12:46 Mike Pagano
2017-01-06 23:13 Mike Pagano
2016-12-15 23:41 Mike Pagano
2016-12-11 15:02 Alice Ferrazzi
2016-12-09 13:57 Alice Ferrazzi
2016-12-08  0:03 Mike Pagano
2016-12-02 16:21 Mike Pagano
2016-11-26 18:51 Mike Pagano
2016-11-26 18:40 Mike Pagano
2016-11-22  0:14 Mike Pagano
2016-11-19 11:03 Mike Pagano
2016-11-15 10:05 Alice Ferrazzi
2016-11-10 18:13 Alice Ferrazzi
2016-11-01  3:14 Alice Ferrazzi
2016-10-31 14:09 Alice Ferrazzi
2016-10-28 18:27 Alice Ferrazzi
2016-10-22 13:05 Mike Pagano
2016-10-21 11:10 Mike Pagano
2016-10-16 19:25 Mike Pagano
2016-10-08 19:55 Mike Pagano
2016-09-24 10:51 Mike Pagano
2016-09-16 19:10 Mike Pagano
2016-09-15 13:58 Mike Pagano
2016-09-09 19:20 Mike Pagano
2016-08-20 16:31 Mike Pagano
2016-08-17 11:48 Mike Pagano
2016-08-10 12:56 Mike Pagano
2016-07-27 19:19 Mike Pagano
2016-07-11 19:59 Mike Pagano
2016-07-02 15:30 Mike Pagano
2016-07-01  0:55 Mike Pagano
2016-06-24 20:40 Mike Pagano
2016-06-08 13:38 Mike Pagano
2016-06-02 18:24 Mike Pagano
2016-05-19 13:00 Mike Pagano
2016-05-12  0:14 Mike Pagano
2016-05-04 23:51 Mike Pagano
2016-04-20 11:27 Mike Pagano
2016-04-12 18:59 Mike Pagano
2016-03-22 22:47 Mike Pagano
2016-03-16 19:43 Mike Pagano
2016-03-10  0:51 Mike Pagano
2016-03-04 11:15 Mike Pagano
2016-02-26  0:02 Mike Pagano
2016-02-19 23:33 Mike Pagano
2016-02-18  0:20 Mike Pagano
2016-02-01  0:19 Mike Pagano
2016-02-01  0:13 Mike Pagano
2016-01-31 23:33 Mike Pagano
2016-01-20 12:38 Mike Pagano
2016-01-10 17:19 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1475262431.e3a35f50a5f087b5d20a534a6df48f097ab67201.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox