public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:5.12 commit in: /
Date: Wed, 23 Jun 2021 15:15:04 +0000 (UTC)	[thread overview]
Message-ID: <1624461293.1a7f084fb13953ad56900d4a19ac2e2aecf413af.mpagano@gentoo> (raw)

commit:     1a7f084fb13953ad56900d4a19ac2e2aecf413af
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jun 23 15:14:53 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jun 23 15:14:53 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1a7f084f

Linux patch 5.12.13

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1012_linux-5.12.13.patch | 6477 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6481 insertions(+)

diff --git a/0000_README b/0000_README
index 07044b3..34c90d1 100644
--- a/0000_README
+++ b/0000_README
@@ -91,6 +91,10 @@ Patch:  1011_linux-5.12.12.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.12.12
 
+Patch:  1012_linux-5.12.13.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.12.13
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1012_linux-5.12.13.patch b/1012_linux-5.12.13.patch
new file mode 100644
index 0000000..ef75d57
--- /dev/null
+++ b/1012_linux-5.12.13.patch
@@ -0,0 +1,6477 @@
+diff --git a/Documentation/vm/slub.rst b/Documentation/vm/slub.rst
+index 03f294a638bd8..d3028554b1e9c 100644
+--- a/Documentation/vm/slub.rst
++++ b/Documentation/vm/slub.rst
+@@ -181,7 +181,7 @@ SLUB Debug output
+ Here is a sample of slub debug output::
+ 
+  ====================================================================
+- BUG kmalloc-8: Redzone overwritten
++ BUG kmalloc-8: Right Redzone overwritten
+  --------------------------------------------------------------------
+ 
+  INFO: 0xc90f6d28-0xc90f6d2b. First byte 0x00 instead of 0xcc
+@@ -189,10 +189,10 @@ Here is a sample of slub debug output::
+  INFO: Object 0xc90f6d20 @offset=3360 fp=0xc90f6d58
+  INFO: Allocated in get_modalias+0x61/0xf5 age=53 cpu=1 pid=554
+ 
+- Bytes b4 0xc90f6d10:  00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
+-   Object 0xc90f6d20:  31 30 31 39 2e 30 30 35                         1019.005
+-  Redzone 0xc90f6d28:  00 cc cc cc                                     .
+-  Padding 0xc90f6d50:  5a 5a 5a 5a 5a 5a 5a 5a                         ZZZZZZZZ
++ Bytes b4 (0xc90f6d10): 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
++ Object   (0xc90f6d20): 31 30 31 39 2e 30 30 35                         1019.005
++ Redzone  (0xc90f6d28): 00 cc cc cc                                     .
++ Padding  (0xc90f6d50): 5a 5a 5a 5a 5a 5a 5a 5a                         ZZZZZZZZ
+ 
+    [<c010523d>] dump_trace+0x63/0x1eb
+    [<c01053df>] show_trace_log_lvl+0x1a/0x2f
+diff --git a/Makefile b/Makefile
+index e0a252b644630..d2fe36db78aed 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 12
+-SUBLEVEL = 12
++SUBLEVEL = 13
+ EXTRAVERSION =
+ NAME = Frozen Wasteland
+ 
+@@ -913,11 +913,14 @@ CC_FLAGS_LTO	+= -fvisibility=hidden
+ # Limit inlining across translation units to reduce binary size
+ KBUILD_LDFLAGS += -mllvm -import-instr-limit=5
+ 
+-# Check for frame size exceeding threshold during prolog/epilog insertion.
++# Check for frame size exceeding threshold during prolog/epilog insertion
++# when using lld < 13.0.0.
+ ifneq ($(CONFIG_FRAME_WARN),0)
++ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 130000; echo $$?),0)
+ KBUILD_LDFLAGS	+= -plugin-opt=-warn-stack-size=$(CONFIG_FRAME_WARN)
+ endif
+ endif
++endif
+ 
+ ifdef CONFIG_LTO
+ KBUILD_CFLAGS	+= -fno-lto $(CC_FLAGS_LTO)
+diff --git a/arch/arc/include/uapi/asm/sigcontext.h b/arch/arc/include/uapi/asm/sigcontext.h
+index 95f8a4380e110..7a5449dfcb290 100644
+--- a/arch/arc/include/uapi/asm/sigcontext.h
++++ b/arch/arc/include/uapi/asm/sigcontext.h
+@@ -18,6 +18,7 @@
+  */
+ struct sigcontext {
+ 	struct user_regs_struct regs;
++	struct user_regs_arcv2 v2abi;
+ };
+ 
+ #endif /* _ASM_ARC_SIGCONTEXT_H */
+diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
+index fdbe06c98895e..4868bdebf586d 100644
+--- a/arch/arc/kernel/signal.c
++++ b/arch/arc/kernel/signal.c
+@@ -61,6 +61,41 @@ struct rt_sigframe {
+ 	unsigned int sigret_magic;
+ };
+ 
++static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
++{
++	int err = 0;
++#ifndef CONFIG_ISA_ARCOMPACT
++	struct user_regs_arcv2 v2abi;
++
++	v2abi.r30 = regs->r30;
++#ifdef CONFIG_ARC_HAS_ACCL_REGS
++	v2abi.r58 = regs->r58;
++	v2abi.r59 = regs->r59;
++#else
++	v2abi.r58 = v2abi.r59 = 0;
++#endif
++	err = __copy_to_user(&mctx->v2abi, &v2abi, sizeof(v2abi));
++#endif
++	return err;
++}
++
++static int restore_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
++{
++	int err = 0;
++#ifndef CONFIG_ISA_ARCOMPACT
++	struct user_regs_arcv2 v2abi;
++
++	err = __copy_from_user(&v2abi, &mctx->v2abi, sizeof(v2abi));
++
++	regs->r30 = v2abi.r30;
++#ifdef CONFIG_ARC_HAS_ACCL_REGS
++	regs->r58 = v2abi.r58;
++	regs->r59 = v2abi.r59;
++#endif
++#endif
++	return err;
++}
++
+ static int
+ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
+ 	       sigset_t *set)
+@@ -94,6 +129,10 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
+ 
+ 	err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), &uregs.scratch,
+ 			     sizeof(sf->uc.uc_mcontext.regs.scratch));
++
++	if (is_isa_arcv2())
++		err |= save_arcv2_regs(&(sf->uc.uc_mcontext), regs);
++
+ 	err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
+ 
+ 	return err ? -EFAULT : 0;
+@@ -109,6 +148,10 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
+ 	err |= __copy_from_user(&uregs.scratch,
+ 				&(sf->uc.uc_mcontext.regs.scratch),
+ 				sizeof(sf->uc.uc_mcontext.regs.scratch));
++
++	if (is_isa_arcv2())
++		err |= restore_arcv2_regs(&(sf->uc.uc_mcontext), regs);
++
+ 	if (err)
+ 		return -EFAULT;
+ 
+diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
+index 766f064f00fbf..167fde7f2fce1 100644
+--- a/arch/powerpc/perf/core-book3s.c
++++ b/arch/powerpc/perf/core-book3s.c
+@@ -2242,7 +2242,7 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs)
+ 	bool use_siar = regs_use_siar(regs);
+ 	unsigned long siar = mfspr(SPRN_SIAR);
+ 
+-	if (ppmu->flags & PPMU_P10_DD1) {
++	if (ppmu && (ppmu->flags & PPMU_P10_DD1)) {
+ 		if (siar)
+ 			return siar;
+ 		else
+diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
+index d9522fc35ca5a..4f116be9152f5 100644
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -54,11 +54,11 @@ config RISCV
+ 	select GENERIC_TIME_VSYSCALL if MMU && 64BIT
+ 	select HANDLE_DOMAIN_IRQ
+ 	select HAVE_ARCH_AUDITSYSCALL
+-	select HAVE_ARCH_JUMP_LABEL
+-	select HAVE_ARCH_JUMP_LABEL_RELATIVE
++	select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
++	select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL
+ 	select HAVE_ARCH_KASAN if MMU && 64BIT
+ 	select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT
+-	select HAVE_ARCH_KGDB
++	select HAVE_ARCH_KGDB if !XIP_KERNEL
+ 	select HAVE_ARCH_KGDB_QXFER_PKT
+ 	select HAVE_ARCH_MMAP_RND_BITS if MMU
+ 	select HAVE_ARCH_SECCOMP_FILTER
+@@ -73,9 +73,9 @@ config RISCV
+ 	select HAVE_GCC_PLUGINS
+ 	select HAVE_GENERIC_VDSO if MMU && 64BIT
+ 	select HAVE_IRQ_TIME_ACCOUNTING
+-	select HAVE_KPROBES
+-	select HAVE_KPROBES_ON_FTRACE
+-	select HAVE_KRETPROBES
++	select HAVE_KPROBES if !XIP_KERNEL
++	select HAVE_KPROBES_ON_FTRACE if !XIP_KERNEL
++	select HAVE_KRETPROBES if !XIP_KERNEL
+ 	select HAVE_PCI
+ 	select HAVE_PERF_EVENTS
+ 	select HAVE_PERF_REGS
+@@ -227,11 +227,11 @@ config ARCH_RV64I
+ 	bool "RV64I"
+ 	select 64BIT
+ 	select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
+-	select HAVE_DYNAMIC_FTRACE if MMU && $(cc-option,-fpatchable-function-entry=8)
++	select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8)
+ 	select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
+-	select HAVE_FTRACE_MCOUNT_RECORD
++	select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
+ 	select HAVE_FUNCTION_GRAPH_TRACER
+-	select HAVE_FUNCTION_TRACER
++	select HAVE_FUNCTION_TRACER if !XIP_KERNEL
+ 	select SWIOTLB if MMU
+ 
+ endchoice
+diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
+index 12de7a9c85b35..9cc71ca9a88f9 100644
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -651,9 +651,9 @@ ENDPROC(stack_overflow)
+ .Lcleanup_sie_mcck:
+ 	larl	%r13,.Lsie_entry
+ 	slgr	%r9,%r13
+-	larl	%r13,.Lsie_skip
++	lghi	%r13,.Lsie_skip - .Lsie_entry
+ 	clgr	%r9,%r13
+-	jh	.Lcleanup_sie_int
++	jhe	.Lcleanup_sie_int
+ 	oi	__LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
+ .Lcleanup_sie_int:
+ 	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
+diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
+index ceeba9f631722..fdee23ea4e173 100644
+--- a/arch/x86/include/asm/fpu/internal.h
++++ b/arch/x86/include/asm/fpu/internal.h
+@@ -578,10 +578,17 @@ static inline void switch_fpu_finish(struct fpu *new_fpu)
+ 	 * PKRU state is switched eagerly because it needs to be valid before we
+ 	 * return to userland e.g. for a copy_to_user() operation.
+ 	 */
+-	if (current->mm) {
++	if (!(current->flags & PF_KTHREAD)) {
++		/*
++		 * If the PKRU bit in xsave.header.xfeatures is not set,
++		 * then the PKRU component was in init state, which means
++		 * XRSTOR will set PKRU to 0. If the bit is not set then
++		 * get_xsave_addr() will return NULL because the PKRU value
++		 * in memory is not valid. This means pkru_val has to be
++		 * set to 0 and not to init_pkru_value.
++		 */
+ 		pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU);
+-		if (pk)
+-			pkru_val = pk->pkru;
++		pkru_val = pk ? pk->pkru : 0;
+ 	}
+ 	__write_pkru(pkru_val);
+ }
+diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
+index a4ec65317a7fa..ec3ae30547920 100644
+--- a/arch/x86/kernel/fpu/signal.c
++++ b/arch/x86/kernel/fpu/signal.c
+@@ -307,13 +307,17 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
+ 		return 0;
+ 	}
+ 
+-	if (!access_ok(buf, size))
+-		return -EACCES;
++	if (!access_ok(buf, size)) {
++		ret = -EACCES;
++		goto out;
++	}
+ 
+-	if (!static_cpu_has(X86_FEATURE_FPU))
+-		return fpregs_soft_set(current, NULL,
+-				       0, sizeof(struct user_i387_ia32_struct),
+-				       NULL, buf) != 0;
++	if (!static_cpu_has(X86_FEATURE_FPU)) {
++		ret = fpregs_soft_set(current, NULL, 0,
++				      sizeof(struct user_i387_ia32_struct),
++				      NULL, buf);
++		goto out;
++	}
+ 
+ 	if (use_xsave()) {
+ 		struct _fpx_sw_bytes fx_sw_user;
+@@ -369,6 +373,25 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
+ 			fpregs_unlock();
+ 			return 0;
+ 		}
++
++		/*
++		 * The above did an FPU restore operation, restricted to
++		 * the user portion of the registers, and failed, but the
++		 * microcode might have modified the FPU registers
++		 * nevertheless.
++		 *
++		 * If the FPU registers do not belong to current, then
++		 * invalidate the FPU register state otherwise the task might
++		 * preempt current and return to user space with corrupted
++		 * FPU registers.
++		 *
++		 * In case current owns the FPU registers then no further
++		 * action is required. The fixup below will handle it
++		 * correctly.
++		 */
++		if (test_thread_flag(TIF_NEED_FPU_LOAD))
++			__cpu_invalidate_fpregs_state();
++
+ 		fpregs_unlock();
+ 	} else {
+ 		/*
+@@ -377,7 +400,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
+ 		 */
+ 		ret = __copy_from_user(&env, buf, sizeof(env));
+ 		if (ret)
+-			goto err_out;
++			goto out;
+ 		envp = &env;
+ 	}
+ 
+@@ -405,16 +428,9 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
+ 	if (use_xsave() && !fx_only) {
+ 		u64 init_bv = xfeatures_mask_user() & ~user_xfeatures;
+ 
+-		if (using_compacted_format()) {
+-			ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
+-		} else {
+-			ret = __copy_from_user(&fpu->state.xsave, buf_fx, state_size);
+-
+-			if (!ret && state_size > offsetof(struct xregs_state, header))
+-				ret = validate_user_xstate_header(&fpu->state.xsave.header);
+-		}
++		ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
+ 		if (ret)
+-			goto err_out;
++			goto out;
+ 
+ 		sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
+ 					      fx_only);
+@@ -434,7 +450,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
+ 		ret = __copy_from_user(&fpu->state.fxsave, buf_fx, state_size);
+ 		if (ret) {
+ 			ret = -EFAULT;
+-			goto err_out;
++			goto out;
+ 		}
+ 
+ 		sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
+@@ -452,7 +468,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
+ 	} else {
+ 		ret = __copy_from_user(&fpu->state.fsave, buf_fx, state_size);
+ 		if (ret)
+-			goto err_out;
++			goto out;
+ 
+ 		fpregs_lock();
+ 		ret = copy_kernel_to_fregs_err(&fpu->state.fsave);
+@@ -463,7 +479,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
+ 		fpregs_deactivate(fpu);
+ 	fpregs_unlock();
+ 
+-err_out:
++out:
+ 	if (ret)
+ 		fpu__clear_user_states(fpu);
+ 	return ret;
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index fa023f3feb25d..43013ac0fd4d9 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1410,6 +1410,9 @@ int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
+ 	if (!apic_x2apic_mode(apic))
+ 		valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI);
+ 
++	if (alignment + len > 4)
++		return 1;
++
+ 	if (offset > 0x3f0 || !(valid_reg_mask & APIC_REG_MASK(offset)))
+ 		return 1;
+ 
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index cd0faa1876743..676ec0d1e6be4 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -4726,9 +4726,33 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
+ 	context->inject_page_fault = kvm_inject_page_fault;
+ }
+ 
++static union kvm_mmu_role kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu)
++{
++	union kvm_mmu_role role = kvm_calc_shadow_root_page_role_common(vcpu, false);
++
++	/*
++	 * Nested MMUs are used only for walking L2's gva->gpa, they never have
++	 * shadow pages of their own and so "direct" has no meaning.   Set it
++	 * to "true" to try to detect bogus usage of the nested MMU.
++	 */
++	role.base.direct = true;
++
++	if (!is_paging(vcpu))
++		role.base.level = 0;
++	else if (is_long_mode(vcpu))
++		role.base.level = is_la57_mode(vcpu) ? PT64_ROOT_5LEVEL :
++						       PT64_ROOT_4LEVEL;
++	else if (is_pae(vcpu))
++		role.base.level = PT32E_ROOT_LEVEL;
++	else
++		role.base.level = PT32_ROOT_LEVEL;
++
++	return role;
++}
++
+ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
+ {
+-	union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false);
++	union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu);
+ 	struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
+ 
+ 	if (new_role.as_u64 == g_context->mmu_role.as_u64)
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index cf37205784297..a6ca7e657af27 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -6991,7 +6991,10 @@ static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
+ 
+ static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
+ {
+-	emul_to_vcpu(ctxt)->arch.hflags = emul_flags;
++	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
++
++	vcpu->arch.hflags = emul_flags;
++	kvm_mmu_reset_context(vcpu);
+ }
+ 
+ static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
+@@ -8147,6 +8150,7 @@ void kvm_arch_exit(void)
+ 	kvm_x86_ops.hardware_enable = NULL;
+ 	kvm_mmu_module_exit();
+ 	free_percpu(user_return_msrs);
++	kmem_cache_destroy(x86_emulator_cache);
+ 	kmem_cache_destroy(x86_fpu_cache);
+ #ifdef CONFIG_KVM_XEN
+ 	static_key_deferred_flush(&kvm_xen_enabled);
+diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
+index 9e5ccc56f8e07..356b746dfbe7a 100644
+--- a/arch/x86/mm/ioremap.c
++++ b/arch/x86/mm/ioremap.c
+@@ -118,7 +118,9 @@ static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *des
+ 	if (!IS_ENABLED(CONFIG_EFI))
+ 		return;
+ 
+-	if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA)
++	if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA ||
++	    (efi_mem_type(addr) == EFI_BOOT_SERVICES_DATA &&
++	     efi_mem_attributes(addr) & EFI_MEMORY_RUNTIME))
+ 		desc->flags |= IORES_MAP_ENCRYPTED;
+ }
+ 
+diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
+index 5eb4dc2b97dac..e94da744386f3 100644
+--- a/arch/x86/mm/numa.c
++++ b/arch/x86/mm/numa.c
+@@ -254,7 +254,13 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
+ 
+ 		/* make sure all non-reserved blocks are inside the limits */
+ 		bi->start = max(bi->start, low);
+-		bi->end = min(bi->end, high);
++
++		/* preserve info for non-RAM areas above 'max_pfn': */
++		if (bi->end > high) {
++			numa_add_memblk_to(bi->nid, high, bi->end,
++					   &numa_reserved_meminfo);
++			bi->end = high;
++		}
+ 
+ 		/* and there's no empty block */
+ 		if (bi->start >= bi->end)
+diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
+index 0c2827fd8c195..03b1b03349477 100644
+--- a/drivers/dma/Kconfig
++++ b/drivers/dma/Kconfig
+@@ -59,6 +59,7 @@ config DMA_OF
+ #devices
+ config ALTERA_MSGDMA
+ 	tristate "Altera / Intel mSGDMA Engine"
++	depends on HAS_IOMEM
+ 	select DMA_ENGINE
+ 	help
+ 	  Enable support for Altera / Intel mSGDMA controller.
+diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
+index 4ec909e0b8106..4ae057922ef1f 100644
+--- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
++++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
+@@ -332,6 +332,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
+ 	}
+ 
+ 	if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
++		err = -EINVAL;
+ 		dev_err(dev, "DPDMAI major version mismatch\n"
+ 			     "Found %u.%u, supported version is %u.%u\n",
+ 				priv->dpdmai_attr.version.major,
+@@ -341,6 +342,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
+ 	}
+ 
+ 	if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
++		err = -EINVAL;
+ 		dev_err(dev, "DPDMAI minor version mismatch\n"
+ 			     "Found %u.%u, supported version is %u.%u\n",
+ 				priv->dpdmai_attr.version.major,
+@@ -475,6 +477,7 @@ static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
+ 		ppriv->store =
+ 			dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev);
+ 		if (!ppriv->store) {
++			err = -ENOMEM;
+ 			dev_err(dev, "dpaa2_io_store_create() failed\n");
+ 			goto err_store;
+ 		}
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index 59f2104ffc771..eb41bb9df0fd9 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -218,6 +218,7 @@ static int idxd_setup_engines(struct idxd_device *idxd)
+ 		engine->idxd = idxd;
+ 		device_initialize(&engine->conf_dev);
+ 		engine->conf_dev.parent = &idxd->conf_dev;
++		engine->conf_dev.bus = &dsa_bus_type;
+ 		engine->conf_dev.type = &idxd_engine_device_type;
+ 		rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id);
+ 		if (rc < 0) {
+@@ -718,6 +719,7 @@ module_init(idxd_init_module);
+ 
+ static void __exit idxd_exit_module(void)
+ {
++	idxd_unregister_driver();
+ 	pci_unregister_driver(&idxd_pci_driver);
+ 	idxd_cdev_remove();
+ 	idxd_unregister_bus_type();
+diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
+index fd8d2bc3be9f5..110de8a600588 100644
+--- a/drivers/dma/pl330.c
++++ b/drivers/dma/pl330.c
+@@ -2694,13 +2694,15 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
+ 	for (i = 0; i < len / period_len; i++) {
+ 		desc = pl330_get_desc(pch);
+ 		if (!desc) {
++			unsigned long iflags;
++
+ 			dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
+ 				__func__, __LINE__);
+ 
+ 			if (!first)
+ 				return NULL;
+ 
+-			spin_lock_irqsave(&pl330->pool_lock, flags);
++			spin_lock_irqsave(&pl330->pool_lock, iflags);
+ 
+ 			while (!list_empty(&first->node)) {
+ 				desc = list_entry(first->node.next,
+@@ -2710,7 +2712,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
+ 
+ 			list_move_tail(&first->node, &pl330->desc_pool);
+ 
+-			spin_unlock_irqrestore(&pl330->pool_lock, flags);
++			spin_unlock_irqrestore(&pl330->pool_lock, iflags);
+ 
+ 			return NULL;
+ 		}
+diff --git a/drivers/dma/qcom/Kconfig b/drivers/dma/qcom/Kconfig
+index 365f94eb3b081..3f926a653bd88 100644
+--- a/drivers/dma/qcom/Kconfig
++++ b/drivers/dma/qcom/Kconfig
+@@ -33,6 +33,7 @@ config QCOM_GPI_DMA
+ 
+ config QCOM_HIDMA_MGMT
+ 	tristate "Qualcomm Technologies HIDMA Management support"
++	depends on HAS_IOMEM
+ 	select DMA_ENGINE
+ 	help
+ 	  Enable support for the Qualcomm Technologies HIDMA Management.
+diff --git a/drivers/dma/sf-pdma/Kconfig b/drivers/dma/sf-pdma/Kconfig
+index f8ffa02e279ff..ba46a0a15a936 100644
+--- a/drivers/dma/sf-pdma/Kconfig
++++ b/drivers/dma/sf-pdma/Kconfig
+@@ -1,5 +1,6 @@
+ config SF_PDMA
+ 	tristate "Sifive PDMA controller driver"
++	depends on HAS_IOMEM
+ 	select DMA_ENGINE
+ 	select DMA_VIRTUAL_CHANNELS
+ 	help
+diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
+index 265d7c07b348e..e1827393143f1 100644
+--- a/drivers/dma/ste_dma40.c
++++ b/drivers/dma/ste_dma40.c
+@@ -3675,6 +3675,9 @@ static int __init d40_probe(struct platform_device *pdev)
+ 
+ 	kfree(base->lcla_pool.base_unaligned);
+ 
++	if (base->lcpa_base)
++		iounmap(base->lcpa_base);
++
+ 	if (base->phy_lcpa)
+ 		release_mem_region(base->phy_lcpa,
+ 				   base->lcpa_size);
+diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
+index 70b29bd079c9f..ff7dfb3fdeb47 100644
+--- a/drivers/dma/xilinx/xilinx_dpdma.c
++++ b/drivers/dma/xilinx/xilinx_dpdma.c
+@@ -1459,7 +1459,7 @@ static void xilinx_dpdma_enable_irq(struct xilinx_dpdma_device *xdev)
+  */
+ static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device *xdev)
+ {
+-	dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ERR_ALL);
++	dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ALL);
+ 	dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL);
+ }
+ 
+@@ -1596,6 +1596,26 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
+ 	return dma_get_slave_channel(&xdev->chan[chan_id]->vchan.chan);
+ }
+ 
++static void dpdma_hw_init(struct xilinx_dpdma_device *xdev)
++{
++	unsigned int i;
++	void __iomem *reg;
++
++	/* Disable all interrupts */
++	xilinx_dpdma_disable_irq(xdev);
++
++	/* Stop all channels */
++	for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) {
++		reg = xdev->reg + XILINX_DPDMA_CH_BASE
++				+ XILINX_DPDMA_CH_OFFSET * i;
++		dpdma_clr(reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
++	}
++
++	/* Clear the interrupt status registers */
++	dpdma_write(xdev->reg, XILINX_DPDMA_ISR, XILINX_DPDMA_INTR_ALL);
++	dpdma_write(xdev->reg, XILINX_DPDMA_EISR, XILINX_DPDMA_EINTR_ALL);
++}
++
+ static int xilinx_dpdma_probe(struct platform_device *pdev)
+ {
+ 	struct xilinx_dpdma_device *xdev;
+@@ -1622,6 +1642,8 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
+ 	if (IS_ERR(xdev->reg))
+ 		return PTR_ERR(xdev->reg);
+ 
++	dpdma_hw_init(xdev);
++
+ 	xdev->irq = platform_get_irq(pdev, 0);
+ 	if (xdev->irq < 0) {
+ 		dev_err(xdev->dev, "failed to get platform irq\n");
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+index 2342c5d216f9b..72d23651501d4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -6769,8 +6769,12 @@ static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
+ 	if (ring->use_doorbell) {
+ 		WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
+ 			(adev->doorbell_index.kiq * 2) << 2);
++		/* If GC has entered CGPG, ringing doorbell > first page doesn't
++		 * wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
++		 * this issue.
++		 */
+ 		WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
+-			(adev->doorbell_index.userqueue_end * 2) << 2);
++			(adev->doorbell.size - 4));
+ 	}
+ 
+ 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index d2c020a91c0be..1fdfb7783404e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -3623,8 +3623,12 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
+ 	if (ring->use_doorbell) {
+ 		WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
+ 					(adev->doorbell_index.kiq * 2) << 2);
++		/* If GC has entered CGPG, ringing doorbell > first page doesn't
++		 * wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
++		 * this issue.
++		 */
+ 		WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
+-					(adev->doorbell_index.userqueue_end * 2) << 2);
++					(adev->doorbell.size - 4));
+ 	}
+ 
+ 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
+diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
+index dfa9fdbe98da2..06bb24d7a9fee 100644
+--- a/drivers/gpu/drm/radeon/radeon_uvd.c
++++ b/drivers/gpu/drm/radeon/radeon_uvd.c
+@@ -286,7 +286,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
+ 	if (rdev->uvd.vcpu_bo == NULL)
+ 		return -EINVAL;
+ 
+-	memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
++	memcpy_toio((void __iomem *)rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
+ 
+ 	size = radeon_bo_size(rdev->uvd.vcpu_bo);
+ 	size -= rdev->uvd_fw->size;
+@@ -294,7 +294,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
+ 	ptr = rdev->uvd.cpu_addr;
+ 	ptr += rdev->uvd_fw->size;
+ 
+-	memset(ptr, 0, size);
++	memset_io((void __iomem *)ptr, 0, size);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+index bbdfd5e26ec88..f75fb157f2ff7 100644
+--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
++++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+@@ -209,7 +209,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
+ 		goto err_disable_clk_tmds;
+ 	}
+ 
+-	ret = sun8i_hdmi_phy_probe(hdmi, phy_node);
++	ret = sun8i_hdmi_phy_get(hdmi, phy_node);
+ 	of_node_put(phy_node);
+ 	if (ret) {
+ 		dev_err(dev, "Couldn't get the HDMI PHY\n");
+@@ -242,7 +242,6 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
+ 
+ cleanup_encoder:
+ 	drm_encoder_cleanup(encoder);
+-	sun8i_hdmi_phy_remove(hdmi);
+ err_disable_clk_tmds:
+ 	clk_disable_unprepare(hdmi->clk_tmds);
+ err_assert_ctrl_reset:
+@@ -263,7 +262,6 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master,
+ 	struct sun8i_dw_hdmi *hdmi = dev_get_drvdata(dev);
+ 
+ 	dw_hdmi_unbind(hdmi->hdmi);
+-	sun8i_hdmi_phy_remove(hdmi);
+ 	clk_disable_unprepare(hdmi->clk_tmds);
+ 	reset_control_assert(hdmi->rst_ctrl);
+ 	gpiod_set_value(hdmi->ddc_en, 0);
+@@ -320,7 +318,32 @@ static struct platform_driver sun8i_dw_hdmi_pltfm_driver = {
+ 		.of_match_table = sun8i_dw_hdmi_dt_ids,
+ 	},
+ };
+-module_platform_driver(sun8i_dw_hdmi_pltfm_driver);
++
++static int __init sun8i_dw_hdmi_init(void)
++{
++	int ret;
++
++	ret = platform_driver_register(&sun8i_dw_hdmi_pltfm_driver);
++	if (ret)
++		return ret;
++
++	ret = platform_driver_register(&sun8i_hdmi_phy_driver);
++	if (ret) {
++		platform_driver_unregister(&sun8i_dw_hdmi_pltfm_driver);
++		return ret;
++	}
++
++	return ret;
++}
++
++static void __exit sun8i_dw_hdmi_exit(void)
++{
++	platform_driver_unregister(&sun8i_dw_hdmi_pltfm_driver);
++	platform_driver_unregister(&sun8i_hdmi_phy_driver);
++}
++
++module_init(sun8i_dw_hdmi_init);
++module_exit(sun8i_dw_hdmi_exit);
+ 
+ MODULE_AUTHOR("Jernej Skrabec <jernej.skrabec@siol.net>");
+ MODULE_DESCRIPTION("Allwinner DW HDMI bridge");
+diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+index d4b55af0592f8..74f6ed0e25709 100644
+--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
++++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+@@ -195,14 +195,15 @@ struct sun8i_dw_hdmi {
+ 	struct gpio_desc		*ddc_en;
+ };
+ 
++extern struct platform_driver sun8i_hdmi_phy_driver;
++
+ static inline struct sun8i_dw_hdmi *
+ encoder_to_sun8i_dw_hdmi(struct drm_encoder *encoder)
+ {
+ 	return container_of(encoder, struct sun8i_dw_hdmi, encoder);
+ }
+ 
+-int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
+-void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi);
++int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
+ 
+ void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
+ void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
+diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+index 9994edf675096..c9239708d398c 100644
+--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
++++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+@@ -5,6 +5,7 @@
+ 
+ #include <linux/delay.h>
+ #include <linux/of_address.h>
++#include <linux/of_platform.h>
+ 
+ #include "sun8i_dw_hdmi.h"
+ 
+@@ -597,10 +598,30 @@ static const struct of_device_id sun8i_hdmi_phy_of_table[] = {
+ 	{ /* sentinel */ }
+ };
+ 
+-int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
++int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
++{
++	struct platform_device *pdev = of_find_device_by_node(node);
++	struct sun8i_hdmi_phy *phy;
++
++	if (!pdev)
++		return -EPROBE_DEFER;
++
++	phy = platform_get_drvdata(pdev);
++	if (!phy)
++		return -EPROBE_DEFER;
++
++	hdmi->phy = phy;
++
++	put_device(&pdev->dev);
++
++	return 0;
++}
++
++static int sun8i_hdmi_phy_probe(struct platform_device *pdev)
+ {
+ 	const struct of_device_id *match;
+-	struct device *dev = hdmi->dev;
++	struct device *dev = &pdev->dev;
++	struct device_node *node = dev->of_node;
+ 	struct sun8i_hdmi_phy *phy;
+ 	struct resource res;
+ 	void __iomem *regs;
+@@ -704,7 +725,7 @@ int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
+ 		clk_prepare_enable(phy->clk_phy);
+ 	}
+ 
+-	hdmi->phy = phy;
++	platform_set_drvdata(pdev, phy);
+ 
+ 	return 0;
+ 
+@@ -728,9 +749,9 @@ err_put_clk_bus:
+ 	return ret;
+ }
+ 
+-void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
++static int sun8i_hdmi_phy_remove(struct platform_device *pdev)
+ {
+-	struct sun8i_hdmi_phy *phy = hdmi->phy;
++	struct sun8i_hdmi_phy *phy = platform_get_drvdata(pdev);
+ 
+ 	clk_disable_unprepare(phy->clk_mod);
+ 	clk_disable_unprepare(phy->clk_bus);
+@@ -744,4 +765,14 @@ void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
+ 	clk_put(phy->clk_pll1);
+ 	clk_put(phy->clk_mod);
+ 	clk_put(phy->clk_bus);
++	return 0;
+ }
++
++struct platform_driver sun8i_hdmi_phy_driver = {
++	.probe  = sun8i_hdmi_phy_probe,
++	.remove = sun8i_hdmi_phy_remove,
++	.driver = {
++		.name = "sun8i-hdmi-phy",
++		.of_match_table = sun8i_hdmi_phy_of_table,
++	},
++};
+diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
+index 25aac40f2764a..919877970ae3b 100644
+--- a/drivers/hwmon/scpi-hwmon.c
++++ b/drivers/hwmon/scpi-hwmon.c
+@@ -99,6 +99,15 @@ scpi_show_sensor(struct device *dev, struct device_attribute *attr, char *buf)
+ 
+ 	scpi_scale_reading(&value, sensor);
+ 
++	/*
++	 * Temperature sensor values are treated as signed values based on
++	 * observation even though that is not explicitly specified, and
++	 * because an unsigned u64 temperature does not really make practical
++	 * sense especially when the temperature is below zero degrees Celsius.
++	 */
++	if (sensor->info.class == TEMPERATURE)
++		return sprintf(buf, "%lld\n", (s64)value);
++
+ 	return sprintf(buf, "%llu\n", value);
+ }
+ 
+diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
+index 00404024d7cd5..fea237838bb0a 100644
+--- a/drivers/irqchip/irq-gic-v3.c
++++ b/drivers/irqchip/irq-gic-v3.c
+@@ -642,11 +642,45 @@ static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
+ 		nmi_exit();
+ }
+ 
++static u32 do_read_iar(struct pt_regs *regs)
++{
++	u32 iar;
++
++	if (gic_supports_nmi() && unlikely(!interrupts_enabled(regs))) {
++		u64 pmr;
++
++		/*
++		 * We were in a context with IRQs disabled. However, the
++		 * entry code has set PMR to a value that allows any
++		 * interrupt to be acknowledged, and not just NMIs. This can
++		 * lead to surprising effects if the NMI has been retired in
++		 * the meantime, and that there is an IRQ pending. The IRQ
++		 * would then be taken in NMI context, something that nobody
++		 * wants to debug twice.
++		 *
++		 * Until we sort this, drop PMR again to a level that will
++		 * actually only allow NMIs before reading IAR, and then
++		 * restore it to what it was.
++		 */
++		pmr = gic_read_pmr();
++		gic_pmr_mask_irqs();
++		isb();
++
++		iar = gic_read_iar();
++
++		gic_write_pmr(pmr);
++	} else {
++		iar = gic_read_iar();
++	}
++
++	return iar;
++}
++
+ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+ {
+ 	u32 irqnr;
+ 
+-	irqnr = gic_read_iar();
++	irqnr = do_read_iar(regs);
+ 
+ 	/* Check for special IDs first */
+ 	if ((irqnr >= 1020 && irqnr <= 1023))
+diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
+index 1f649d1780107..4484f9c079b75 100644
+--- a/drivers/net/can/usb/mcba_usb.c
++++ b/drivers/net/can/usb/mcba_usb.c
+@@ -82,6 +82,8 @@ struct mcba_priv {
+ 	bool can_ka_first_pass;
+ 	bool can_speed_check;
+ 	atomic_t free_ctx_cnt;
++	void *rxbuf[MCBA_MAX_RX_URBS];
++	dma_addr_t rxbuf_dma[MCBA_MAX_RX_URBS];
+ };
+ 
+ /* CAN frame */
+@@ -633,6 +635,7 @@ static int mcba_usb_start(struct mcba_priv *priv)
+ 	for (i = 0; i < MCBA_MAX_RX_URBS; i++) {
+ 		struct urb *urb = NULL;
+ 		u8 *buf;
++		dma_addr_t buf_dma;
+ 
+ 		/* create a URB, and a buffer for it */
+ 		urb = usb_alloc_urb(0, GFP_KERNEL);
+@@ -642,7 +645,7 @@ static int mcba_usb_start(struct mcba_priv *priv)
+ 		}
+ 
+ 		buf = usb_alloc_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
+-					 GFP_KERNEL, &urb->transfer_dma);
++					 GFP_KERNEL, &buf_dma);
+ 		if (!buf) {
+ 			netdev_err(netdev, "No memory left for USB buffer\n");
+ 			usb_free_urb(urb);
+@@ -661,11 +664,14 @@ static int mcba_usb_start(struct mcba_priv *priv)
+ 		if (err) {
+ 			usb_unanchor_urb(urb);
+ 			usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
+-					  buf, urb->transfer_dma);
++					  buf, buf_dma);
+ 			usb_free_urb(urb);
+ 			break;
+ 		}
+ 
++		priv->rxbuf[i] = buf;
++		priv->rxbuf_dma[i] = buf_dma;
++
+ 		/* Drop reference, USB core will take care of freeing it */
+ 		usb_free_urb(urb);
+ 	}
+@@ -708,7 +714,14 @@ static int mcba_usb_open(struct net_device *netdev)
+ 
+ static void mcba_urb_unlink(struct mcba_priv *priv)
+ {
++	int i;
++
+ 	usb_kill_anchored_urbs(&priv->rx_submitted);
++
++	for (i = 0; i < MCBA_MAX_RX_URBS; ++i)
++		usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
++				  priv->rxbuf[i], priv->rxbuf_dma[i]);
++
+ 	usb_kill_anchored_urbs(&priv->tx_submitted);
+ }
+ 
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index 102f2c91fdb85..20f8012bbe04a 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -236,36 +236,48 @@ static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
+ static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
+ 				struct ena_tx_buffer *tx_info,
+ 				struct xdp_frame *xdpf,
+-				void **push_hdr,
+-				u32 *push_len)
++				struct ena_com_tx_ctx *ena_tx_ctx)
+ {
+ 	struct ena_adapter *adapter = xdp_ring->adapter;
+ 	struct ena_com_buf *ena_buf;
+-	dma_addr_t dma = 0;
++	int push_len = 0;
++	dma_addr_t dma;
++	void *data;
+ 	u32 size;
+ 
+ 	tx_info->xdpf = xdpf;
++	data = tx_info->xdpf->data;
+ 	size = tx_info->xdpf->len;
+-	ena_buf = tx_info->bufs;
+ 
+-	/* llq push buffer */
+-	*push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
+-	*push_hdr = tx_info->xdpf->data;
++	if (xdp_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
++		/* Designate part of the packet for LLQ */
++		push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
++
++		ena_tx_ctx->push_header = data;
++
++		size -= push_len;
++		data += push_len;
++	}
++
++	ena_tx_ctx->header_len = push_len;
+ 
+-	if (size - *push_len > 0) {
++	if (size > 0) {
+ 		dma = dma_map_single(xdp_ring->dev,
+-				     *push_hdr + *push_len,
+-				     size - *push_len,
++				     data,
++				     size,
+ 				     DMA_TO_DEVICE);
+ 		if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
+ 			goto error_report_dma_error;
+ 
+-		tx_info->map_linear_data = 1;
+-		tx_info->num_of_bufs = 1;
+-	}
++		tx_info->map_linear_data = 0;
+ 
+-	ena_buf->paddr = dma;
+-	ena_buf->len = size;
++		ena_buf = tx_info->bufs;
++		ena_buf->paddr = dma;
++		ena_buf->len = size;
++
++		ena_tx_ctx->ena_bufs = ena_buf;
++		ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1;
++	}
+ 
+ 	return 0;
+ 
+@@ -274,10 +286,6 @@ error_report_dma_error:
+ 			  &xdp_ring->syncp);
+ 	netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
+ 
+-	xdp_return_frame_rx_napi(tx_info->xdpf);
+-	tx_info->xdpf = NULL;
+-	tx_info->num_of_bufs = 0;
+-
+ 	return -EINVAL;
+ }
+ 
+@@ -289,8 +297,6 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
+ 	struct ena_com_tx_ctx ena_tx_ctx = {};
+ 	struct ena_tx_buffer *tx_info;
+ 	u16 next_to_use, req_id;
+-	void *push_hdr;
+-	u32 push_len;
+ 	int rc;
+ 
+ 	next_to_use = xdp_ring->next_to_use;
+@@ -298,15 +304,11 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
+ 	tx_info = &xdp_ring->tx_buffer_info[req_id];
+ 	tx_info->num_of_bufs = 0;
+ 
+-	rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &push_hdr, &push_len);
++	rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &ena_tx_ctx);
+ 	if (unlikely(rc))
+ 		goto error_drop_packet;
+ 
+-	ena_tx_ctx.ena_bufs = tx_info->bufs;
+-	ena_tx_ctx.push_header = push_hdr;
+-	ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
+ 	ena_tx_ctx.req_id = req_id;
+-	ena_tx_ctx.header_len = push_len;
+ 
+ 	rc = ena_xmit_common(dev,
+ 			     xdp_ring,
+diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
+index 9e02f88645931..5e90df42b2013 100644
+--- a/drivers/net/ethernet/atheros/alx/main.c
++++ b/drivers/net/ethernet/atheros/alx/main.c
+@@ -1849,6 +1849,7 @@ out_free_netdev:
+ 	free_netdev(netdev);
+ out_pci_release:
+ 	pci_release_mem_regions(pdev);
++	pci_disable_pcie_error_reporting(pdev);
+ out_pci_disable:
+ 	pci_disable_device(pdev);
+ 	return err;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 027997c711aba..c118de27bc5c3 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -7295,7 +7295,7 @@ skip_rdma:
+ 	entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
+ 		     2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
+ 	entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
+-	entries = ctx->qp_max_l2_entries + extra_qps + ctx->qp_min_qp1_entries;
++	entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
+ 	entries = roundup(entries, ctx->tqm_entries_multiple);
+ 	entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
+ 	for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
+@@ -11573,6 +11573,8 @@ static void bnxt_fw_init_one_p3(struct bnxt *bp)
+ 	bnxt_hwrm_coal_params_qcaps(bp);
+ }
+ 
++static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
++
+ static int bnxt_fw_init_one(struct bnxt *bp)
+ {
+ 	int rc;
+@@ -11587,6 +11589,9 @@ static int bnxt_fw_init_one(struct bnxt *bp)
+ 		netdev_err(bp->dev, "Firmware init phase 2 failed\n");
+ 		return rc;
+ 	}
++	rc = bnxt_probe_phy(bp, false);
++	if (rc)
++		return rc;
+ 	rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
+ 	if (rc)
+ 		return rc;
+@@ -12976,6 +12981,7 @@ init_err_pci_clean:
+ 	bnxt_hwrm_func_drv_unrgtr(bp);
+ 	bnxt_free_hwrm_short_cmd_req(bp);
+ 	bnxt_free_hwrm_resources(bp);
++	bnxt_ethtool_free(bp);
+ 	kfree(bp->fw_health);
+ 	bp->fw_health = NULL;
+ 	bnxt_cleanup_pci(bp);
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+index 61ea3ec5c3fcc..83ed10ac86606 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+@@ -1337,13 +1337,27 @@ static int cxgb4_ethtool_flash_phy(struct net_device *netdev,
+ 		return ret;
+ 	}
+ 
+-	spin_lock_bh(&adap->win0_lock);
++	/* We have to RESET the chip/firmware because we need the
++	 * chip in uninitialized state for loading new PHY image.
++	 * Otherwise, the running firmware will only store the PHY
++	 * image in local RAM which will be lost after next reset.
++	 */
++	ret = t4_fw_reset(adap, adap->mbox, PIORSTMODE_F | PIORST_F);
++	if (ret < 0) {
++		dev_err(adap->pdev_dev,
++			"Set FW to RESET for flashing PHY FW failed. ret: %d\n",
++			ret);
++		return ret;
++	}
++
+ 	ret = t4_load_phy_fw(adap, MEMWIN_NIC, NULL, data, size);
+-	spin_unlock_bh(&adap->win0_lock);
+-	if (ret)
+-		dev_err(adap->pdev_dev, "Failed to load PHY FW\n");
++	if (ret < 0) {
++		dev_err(adap->pdev_dev, "Failed to load PHY FW. ret: %d\n",
++			ret);
++		return ret;
++	}
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ static int cxgb4_ethtool_flash_fw(struct net_device *netdev,
+@@ -1610,16 +1624,14 @@ static struct filter_entry *cxgb4_get_filter_entry(struct adapter *adap,
+ 						   u32 ftid)
+ {
+ 	struct tid_info *t = &adap->tids;
+-	struct filter_entry *f;
+ 
+-	if (ftid < t->nhpftids)
+-		f = &adap->tids.hpftid_tab[ftid];
+-	else if (ftid < t->nftids)
+-		f = &adap->tids.ftid_tab[ftid - t->nhpftids];
+-	else
+-		f = lookup_tid(&adap->tids, ftid);
++	if (ftid >= t->hpftid_base && ftid < t->hpftid_base + t->nhpftids)
++		return &t->hpftid_tab[ftid - t->hpftid_base];
+ 
+-	return f;
++	if (ftid >= t->ftid_base && ftid < t->ftid_base + t->nftids)
++		return &t->ftid_tab[ftid - t->ftid_base];
++
++	return lookup_tid(t, ftid);
+ }
+ 
+ static void cxgb4_fill_filter_rule(struct ethtool_rx_flow_spec *fs,
+@@ -1826,6 +1838,11 @@ static int cxgb4_ntuple_del_filter(struct net_device *dev,
+ 	filter_id = filter_info->loc_array[cmd->fs.location];
+ 	f = cxgb4_get_filter_entry(adapter, filter_id);
+ 
++	if (f->fs.prio)
++		filter_id -= adapter->tids.hpftid_base;
++	else if (!f->fs.hash)
++		filter_id -= (adapter->tids.ftid_base - adapter->tids.nhpftids);
++
+ 	ret = cxgb4_flow_rule_destroy(dev, f->fs.tc_prio, &f->fs, filter_id);
+ 	if (ret)
+ 		goto err;
+@@ -1885,6 +1902,11 @@ static int cxgb4_ntuple_set_filter(struct net_device *netdev,
+ 
+ 	filter_info = &adapter->ethtool_filters->port[pi->port_id];
+ 
++	if (fs.prio)
++		tid += adapter->tids.hpftid_base;
++	else if (!fs.hash)
++		tid += (adapter->tids.ftid_base - adapter->tids.nhpftids);
++
+ 	filter_info->loc_array[cmd->fs.location] = tid;
+ 	set_bit(cmd->fs.location, filter_info->bmap);
+ 	filter_info->in_use++;
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+index e664e05b9f026..5fbc087268dbe 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+@@ -198,7 +198,7 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
+ 				      WORD_MASK, f->fs.nat_lip[3] |
+ 				      f->fs.nat_lip[2] << 8 |
+ 				      f->fs.nat_lip[1] << 16 |
+-				      (u64)f->fs.nat_lip[0] << 25, 1);
++				      (u64)f->fs.nat_lip[0] << 24, 1);
+ 		}
+ 	}
+ 
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+index 1f601de02e706..762113a04dde6 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -4424,10 +4424,8 @@ static int adap_init0_phy(struct adapter *adap)
+ 
+ 	/* Load PHY Firmware onto adapter.
+ 	 */
+-	spin_lock_bh(&adap->win0_lock);
+ 	ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version,
+ 			     (u8 *)phyf->data, phyf->size);
+-	spin_unlock_bh(&adap->win0_lock);
+ 	if (ret < 0)
+ 		dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
+ 			-ret);
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+index 80882cfc370f5..601853bb34c91 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+@@ -3060,16 +3060,19 @@ int t4_read_flash(struct adapter *adapter, unsigned int addr,
+  *	@addr: the start address to write
+  *	@n: length of data to write in bytes
+  *	@data: the data to write
++ *	@byte_oriented: whether to store data as bytes or as words
+  *
+  *	Writes up to a page of data (256 bytes) to the serial flash starting
+  *	at the given address.  All the data must be written to the same page.
++ *	If @byte_oriented is set the write data is stored as byte stream
++ *	(i.e. matches what on disk), otherwise in big-endian.
+  */
+ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
+-			  unsigned int n, const u8 *data)
++			  unsigned int n, const u8 *data, bool byte_oriented)
+ {
+-	int ret;
+-	u32 buf[64];
+ 	unsigned int i, c, left, val, offset = addr & 0xff;
++	u32 buf[64];
++	int ret;
+ 
+ 	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
+ 		return -EINVAL;
+@@ -3080,10 +3083,14 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
+ 	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
+ 		goto unlock;
+ 
+-	for (left = n; left; left -= c) {
++	for (left = n; left; left -= c, data += c) {
+ 		c = min(left, 4U);
+-		for (val = 0, i = 0; i < c; ++i)
+-			val = (val << 8) + *data++;
++		for (val = 0, i = 0; i < c; ++i) {
++			if (byte_oriented)
++				val = (val << 8) + data[i];
++			else
++				val = (val << 8) + data[c - i - 1];
++		}
+ 
+ 		ret = sf1_write(adapter, c, c != left, 1, val);
+ 		if (ret)
+@@ -3096,7 +3103,8 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
+ 	t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
+ 
+ 	/* Read the page to verify the write succeeded */
+-	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
++	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
++			    byte_oriented);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -3692,7 +3700,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
+ 	 */
+ 	memcpy(first_page, fw_data, SF_PAGE_SIZE);
+ 	((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
+-	ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
++	ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, true);
+ 	if (ret)
+ 		goto out;
+ 
+@@ -3700,14 +3708,14 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
+ 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
+ 		addr += SF_PAGE_SIZE;
+ 		fw_data += SF_PAGE_SIZE;
+-		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
++		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, true);
+ 		if (ret)
+ 			goto out;
+ 	}
+ 
+-	ret = t4_write_flash(adap,
+-			     fw_start + offsetof(struct fw_hdr, fw_ver),
+-			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
++	ret = t4_write_flash(adap, fw_start + offsetof(struct fw_hdr, fw_ver),
++			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver,
++			     true);
+ out:
+ 	if (ret)
+ 		dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
+@@ -3812,9 +3820,11 @@ int t4_load_phy_fw(struct adapter *adap, int win,
+ 	/* Copy the supplied PHY Firmware image to the adapter memory location
+ 	 * allocated by the adapter firmware.
+ 	 */
++	spin_lock_bh(&adap->win0_lock);
+ 	ret = t4_memory_rw(adap, win, mtype, maddr,
+ 			   phy_fw_size, (__be32 *)phy_fw_data,
+ 			   T4_MEMORY_WRITE);
++	spin_unlock_bh(&adap->win0_lock);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -10208,7 +10218,7 @@ int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
+ 			n = size - i;
+ 		else
+ 			n = SF_PAGE_SIZE;
+-		ret = t4_write_flash(adap, addr, n, cfg_data);
++		ret = t4_write_flash(adap, addr, n, cfg_data, true);
+ 		if (ret)
+ 			goto out;
+ 
+@@ -10677,13 +10687,14 @@ int t4_load_boot(struct adapter *adap, u8 *boot_data,
+ 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
+ 		addr += SF_PAGE_SIZE;
+ 		boot_data += SF_PAGE_SIZE;
+-		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data);
++		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data,
++				     false);
+ 		if (ret)
+ 			goto out;
+ 	}
+ 
+ 	ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
+-			     (const u8 *)header);
++			     (const u8 *)header, false);
+ 
+ out:
+ 	if (ret)
+@@ -10758,7 +10769,7 @@ int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
+ 	for (i = 0; i < size; i += SF_PAGE_SIZE) {
+ 		n = min_t(u32, size - i, SF_PAGE_SIZE);
+ 
+-		ret = t4_write_flash(adap, addr, n, cfg_data);
++		ret = t4_write_flash(adap, addr, n, cfg_data, false);
+ 		if (ret)
+ 			goto out;
+ 
+@@ -10770,7 +10781,8 @@ int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
+ 	for (i = 0; i < npad; i++) {
+ 		u8 data = 0;
+ 
+-		ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data);
++		ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data,
++				     false);
+ 		if (ret)
+ 			goto out;
+ 	}
+diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
+index 46b0dbab8aadc..7c992172933bc 100644
+--- a/drivers/net/ethernet/ec_bhf.c
++++ b/drivers/net/ethernet/ec_bhf.c
+@@ -576,10 +576,12 @@ static void ec_bhf_remove(struct pci_dev *dev)
+ 	struct ec_bhf_priv *priv = netdev_priv(net_dev);
+ 
+ 	unregister_netdev(net_dev);
+-	free_netdev(net_dev);
+ 
+ 	pci_iounmap(dev, priv->dma_io);
+ 	pci_iounmap(dev, priv->io);
++
++	free_netdev(net_dev);
++
+ 	pci_release_regions(dev);
+ 	pci_clear_master(dev);
+ 	pci_disable_device(dev);
+diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
+index b6eba29d8e99e..7968568bbe214 100644
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -5897,6 +5897,7 @@ drv_cleanup:
+ unmap_bars:
+ 	be_unmap_pci_bars(adapter);
+ free_netdev:
++	pci_disable_pcie_error_reporting(pdev);
+ 	free_netdev(netdev);
+ rel_reg:
+ 	pci_release_regions(pdev);
+diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
+index 1753807cbf97e..d71eac7e19249 100644
+--- a/drivers/net/ethernet/freescale/fec_ptp.c
++++ b/drivers/net/ethernet/freescale/fec_ptp.c
+@@ -215,15 +215,13 @@ static u64 fec_ptp_read(const struct cyclecounter *cc)
+ {
+ 	struct fec_enet_private *fep =
+ 		container_of(cc, struct fec_enet_private, cc);
+-	const struct platform_device_id *id_entry =
+-		platform_get_device_id(fep->pdev);
+ 	u32 tempval;
+ 
+ 	tempval = readl(fep->hwp + FEC_ATIME_CTRL);
+ 	tempval |= FEC_T_CTRL_CAPTURE;
+ 	writel(tempval, fep->hwp + FEC_ATIME_CTRL);
+ 
+-	if (id_entry->driver_data & FEC_QUIRK_BUG_CAPTURE)
++	if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
+ 		udelay(1);
+ 
+ 	return readl(fep->hwp + FEC_ATIME);
+@@ -604,6 +602,10 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
+ 	fep->ptp_caps.enable = fec_ptp_enable;
+ 
+ 	fep->cycle_speed = clk_get_rate(fep->clk_ptp);
++	if (!fep->cycle_speed) {
++		fep->cycle_speed = NSEC_PER_SEC;
++		dev_err(&fep->pdev->dev, "clk_ptp clock rate is zero\n");
++	}
+ 	fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
+ 
+ 	spin_lock_init(&fep->tmreg_lock);
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index 27e439853c3b0..55432ea360ad4 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -1715,12 +1715,13 @@ setup_rings:
+  * ice_vsi_cfg_txqs - Configure the VSI for Tx
+  * @vsi: the VSI being configured
+  * @rings: Tx ring array to be configured
++ * @count: number of Tx ring array elements
+  *
+  * Return 0 on success and a negative value on error
+  * Configure the Tx VSI for operation.
+  */
+ static int
+-ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
++ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, u16 count)
+ {
+ 	struct ice_aqc_add_tx_qgrp *qg_buf;
+ 	u16 q_idx = 0;
+@@ -1732,7 +1733,7 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
+ 
+ 	qg_buf->num_txqs = 1;
+ 
+-	for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
++	for (q_idx = 0; q_idx < count; q_idx++) {
+ 		err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
+ 		if (err)
+ 			goto err_cfg_txqs;
+@@ -1752,7 +1753,7 @@ err_cfg_txqs:
+  */
+ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
+ {
+-	return ice_vsi_cfg_txqs(vsi, vsi->tx_rings);
++	return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
+ }
+ 
+ /**
+@@ -1767,7 +1768,7 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
+ 	int ret;
+ 	int i;
+ 
+-	ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings);
++	ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1965,17 +1966,18 @@ int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
+  * @rst_src: reset source
+  * @rel_vmvf_num: Relative ID of VF/VM
+  * @rings: Tx ring array to be stopped
++ * @count: number of Tx ring array elements
+  */
+ static int
+ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+-		      u16 rel_vmvf_num, struct ice_ring **rings)
++		      u16 rel_vmvf_num, struct ice_ring **rings, u16 count)
+ {
+ 	u16 q_idx;
+ 
+ 	if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
+ 		return -EINVAL;
+ 
+-	for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
++	for (q_idx = 0; q_idx < count; q_idx++) {
+ 		struct ice_txq_meta txq_meta = { };
+ 		int status;
+ 
+@@ -2003,7 +2005,7 @@ int
+ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ 			  u16 rel_vmvf_num)
+ {
+-	return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings);
++	return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq);
+ }
+ 
+ /**
+@@ -2012,7 +2014,7 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+  */
+ int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
+ {
+-	return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings);
++	return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq);
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index d821c687f239c..b61cd84be97fd 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -2554,6 +2554,20 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
+ 	return (ret || xdp_ring_err) ? -ENOMEM : 0;
+ }
+ 
++/**
++ * ice_xdp_safe_mode - XDP handler for safe mode
++ * @dev: netdevice
++ * @xdp: XDP command
++ */
++static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
++			     struct netdev_bpf *xdp)
++{
++	NL_SET_ERR_MSG_MOD(xdp->extack,
++			   "Please provide working DDP firmware package in order to use XDP\n"
++			   "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
++	return -EOPNOTSUPP;
++}
++
+ /**
+  * ice_xdp - implements XDP handler
+  * @dev: netdevice
+@@ -6805,6 +6819,7 @@ static const struct net_device_ops ice_netdev_safe_mode_ops = {
+ 	.ndo_change_mtu = ice_change_mtu,
+ 	.ndo_get_stats64 = ice_get_stats64,
+ 	.ndo_tx_timeout = ice_tx_timeout,
++	.ndo_bpf = ice_xdp_safe_mode,
+ };
+ 
+ static const struct net_device_ops ice_netdev_ops = {
+diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
+index 135ba5b6ae980..072075bc60ee9 100644
+--- a/drivers/net/ethernet/lantiq_xrx200.c
++++ b/drivers/net/ethernet/lantiq_xrx200.c
+@@ -154,6 +154,7 @@ static int xrx200_close(struct net_device *net_dev)
+ 
+ static int xrx200_alloc_skb(struct xrx200_chan *ch)
+ {
++	struct sk_buff *skb = ch->skb[ch->dma.desc];
+ 	dma_addr_t mapping;
+ 	int ret = 0;
+ 
+@@ -168,6 +169,7 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch)
+ 				 XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE);
+ 	if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
+ 		dev_kfree_skb_any(ch->skb[ch->dma.desc]);
++		ch->skb[ch->dma.desc] = skb;
+ 		ret = -ENOMEM;
+ 		goto skip;
+ 	}
+@@ -198,7 +200,6 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
+ 	ch->dma.desc %= LTQ_DESC_NUM;
+ 
+ 	if (ret) {
+-		ch->skb[ch->dma.desc] = skb;
+ 		net_dev->stats.rx_dropped++;
+ 		netdev_err(net_dev, "failed to allocate new rx buffer\n");
+ 		return ret;
+@@ -352,8 +353,8 @@ static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
+ 	struct xrx200_chan *ch = ptr;
+ 
+ 	if (napi_schedule_prep(&ch->napi)) {
+-		__napi_schedule(&ch->napi);
+ 		ltq_dma_disable_irq(&ch->dma);
++		__napi_schedule(&ch->napi);
+ 	}
+ 
+ 	ltq_dma_ack_irq(&ch->dma);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+index 9153c9bda96fa..897853a68cd03 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+@@ -306,6 +306,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
+ 	int ret = 0, i;
+ 
+ 	mutex_lock(&mlx5_intf_mutex);
++	priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
+ 	for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
+ 		if (!priv->adev[i]) {
+ 			bool is_supported = false;
+@@ -323,6 +324,16 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
+ 			}
+ 		} else {
+ 			adev = &priv->adev[i]->adev;
++
++			/* Pay attention that this is not PCI driver that
++			 * mlx5_core_dev is connected, but auxiliary driver.
++			 *
++			 * Here we can race of module unload with devlink
++			 * reload, but we don't need to take extra lock because
++			 * we are holding global mlx5_intf_mutex.
++			 */
++			if (!adev->dev.driver)
++				continue;
+ 			adrv = to_auxiliary_drv(adev->dev.driver);
+ 
+ 			if (adrv->resume)
+@@ -353,6 +364,10 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
+ 			continue;
+ 
+ 		adev = &priv->adev[i]->adev;
++		/* Auxiliary driver was unbind manually through sysfs */
++		if (!adev->dev.driver)
++			goto skip_suspend;
++
+ 		adrv = to_auxiliary_drv(adev->dev.driver);
+ 
+ 		if (adrv->suspend) {
+@@ -360,9 +375,11 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
+ 			continue;
+ 		}
+ 
++skip_suspend:
+ 		del_adev(&priv->adev[i]->adev);
+ 		priv->adev[i] = NULL;
+ 	}
++	priv->flags |= MLX5_PRIV_FLAGS_DETACH;
+ 	mutex_unlock(&mlx5_intf_mutex);
+ }
+ 
+@@ -451,6 +468,8 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
+ 	struct mlx5_priv *priv = &dev->priv;
+ 
+ 	lockdep_assert_held(&mlx5_intf_mutex);
++	if (priv->flags & MLX5_PRIV_FLAGS_DETACH)
++		return 0;
+ 
+ 	delete_drivers(dev);
+ 	if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
+index be0ee03de7217..2e9bee4e5209b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
+@@ -129,10 +129,9 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
+ 							     work);
+ 	struct mlx5e_neigh_hash_entry *nhe = update_work->nhe;
+ 	struct neighbour *n = update_work->n;
++	struct mlx5e_encap_entry *e = NULL;
+ 	bool neigh_connected, same_dev;
+-	struct mlx5e_encap_entry *e;
+ 	unsigned char ha[ETH_ALEN];
+-	struct mlx5e_priv *priv;
+ 	u8 nud_state, dead;
+ 
+ 	rtnl_lock();
+@@ -156,14 +155,12 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
+ 	if (!same_dev)
+ 		goto out;
+ 
+-	list_for_each_entry(e, &nhe->encap_list, encap_list) {
+-		if (!mlx5e_encap_take(e))
+-			continue;
++	/* mlx5e_get_next_init_encap() releases previous encap before returning
++	 * the next one.
++	 */
++	while ((e = mlx5e_get_next_init_encap(nhe, e)) != NULL)
++		mlx5e_rep_update_flows(netdev_priv(e->out_dev), e, neigh_connected, ha);
+ 
+-		priv = netdev_priv(e->out_dev);
+-		mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
+-		mlx5e_encap_put(priv, e);
+-	}
+ out:
+ 	rtnl_unlock();
+ 	mlx5e_release_neigh_update_work(update_work);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+index 96ba027dbef3d..9992f94f794b6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+@@ -93,13 +93,9 @@ void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
+ 
+ 	ASSERT_RTNL();
+ 
+-	/* wait for encap to be fully initialized */
+-	wait_for_completion(&e->res_ready);
+-
+ 	mutex_lock(&esw->offloads.encap_tbl_lock);
+ 	encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
+-	if (e->compl_result < 0 || (encap_connected == neigh_connected &&
+-				    ether_addr_equal(e->h_dest, ha)))
++	if (encap_connected == neigh_connected && ether_addr_equal(e->h_dest, ha))
+ 		goto unlock;
+ 
+ 	mlx5e_take_all_encap_flows(e, &flow_list);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+index 1560fcbf4ac7c..a17d79effa273 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+@@ -250,9 +250,12 @@ static void mlx5e_take_all_route_decap_flows(struct mlx5e_route_entry *r,
+ 		mlx5e_take_tmp_flow(flow, flow_list, 0);
+ }
+ 
++typedef bool (match_cb)(struct mlx5e_encap_entry *);
++
+ static struct mlx5e_encap_entry *
+-mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
+-			   struct mlx5e_encap_entry *e)
++mlx5e_get_next_matching_encap(struct mlx5e_neigh_hash_entry *nhe,
++			      struct mlx5e_encap_entry *e,
++			      match_cb match)
+ {
+ 	struct mlx5e_encap_entry *next = NULL;
+ 
+@@ -287,7 +290,7 @@ retry:
+ 	/* wait for encap to be fully initialized */
+ 	wait_for_completion(&next->res_ready);
+ 	/* continue searching if encap entry is not in valid state after completion */
+-	if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
++	if (!match(next)) {
+ 		e = next;
+ 		goto retry;
+ 	}
+@@ -295,6 +298,30 @@ retry:
+ 	return next;
+ }
+ 
++static bool mlx5e_encap_valid(struct mlx5e_encap_entry *e)
++{
++	return e->flags & MLX5_ENCAP_ENTRY_VALID;
++}
++
++static struct mlx5e_encap_entry *
++mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
++			   struct mlx5e_encap_entry *e)
++{
++	return mlx5e_get_next_matching_encap(nhe, e, mlx5e_encap_valid);
++}
++
++static bool mlx5e_encap_initialized(struct mlx5e_encap_entry *e)
++{
++	return e->compl_result >= 0;
++}
++
++struct mlx5e_encap_entry *
++mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe,
++			  struct mlx5e_encap_entry *e)
++{
++	return mlx5e_get_next_matching_encap(nhe, e, mlx5e_encap_initialized);
++}
++
+ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
+ {
+ 	struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+index 3d45341e2216f..26f7fab109d97 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+@@ -532,9 +532,6 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
+ 	struct mlx5_core_dev *mdev = priv->mdev;
+ 	struct net_device *netdev = priv->netdev;
+ 
+-	if (!priv->ipsec)
+-		return;
+-
+ 	if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
+ 	    !MLX5_CAP_ETH(mdev, swp)) {
+ 		mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 99dc9f2beed5b..16b8f52450329 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -5168,22 +5168,15 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
+ 	}
+ 
+ 	if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) {
+-		netdev->hw_features     |= NETIF_F_GSO_UDP_TUNNEL |
+-					   NETIF_F_GSO_UDP_TUNNEL_CSUM;
+-		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
+-					   NETIF_F_GSO_UDP_TUNNEL_CSUM;
+-		netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
+-		netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL |
+-					 NETIF_F_GSO_UDP_TUNNEL_CSUM;
++		netdev->hw_features     |= NETIF_F_GSO_UDP_TUNNEL;
++		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
++		netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL;
+ 	}
+ 
+ 	if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_GRE)) {
+-		netdev->hw_features     |= NETIF_F_GSO_GRE |
+-					   NETIF_F_GSO_GRE_CSUM;
+-		netdev->hw_enc_features |= NETIF_F_GSO_GRE |
+-					   NETIF_F_GSO_GRE_CSUM;
+-		netdev->gso_partial_features |= NETIF_F_GSO_GRE |
+-						NETIF_F_GSO_GRE_CSUM;
++		netdev->hw_features     |= NETIF_F_GSO_GRE;
++		netdev->hw_enc_features |= NETIF_F_GSO_GRE;
++		netdev->gso_partial_features |= NETIF_F_GSO_GRE;
+ 	}
+ 
+ 	if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index b633f669ea57f..b3b8e44540a5d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -4622,7 +4622,7 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
+ 	list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
+ 		wait_for_completion(&hpe->res_ready);
+ 		if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
+-			hpe->hp->pair->peer_gone = true;
++			mlx5_core_hairpin_clear_dead_peer(hpe->hp->pair);
+ 
+ 		mlx5e_hairpin_put(priv, hpe);
+ 	}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+index 25c091795bcd8..17027536efbaa 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+@@ -178,6 +178,9 @@ void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *f
+ void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list);
+ 
+ struct mlx5e_neigh_hash_entry;
++struct mlx5e_encap_entry *
++mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe,
++			  struct mlx5e_encap_entry *e);
+ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
+ 
+ void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+index 1fa9c18563da9..31c6a3b91f4a9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+@@ -136,7 +136,7 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
+ 
+ 	eqe = next_eqe_sw(eq);
+ 	if (!eqe)
+-		return 0;
++		goto out;
+ 
+ 	do {
+ 		struct mlx5_core_cq *cq;
+@@ -161,6 +161,8 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
+ 		++eq->cons_index;
+ 
+ 	} while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
++
++out:
+ 	eq_update_ci(eq, 1);
+ 
+ 	if (cqn != -1)
+@@ -248,9 +250,9 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
+ 		++eq->cons_index;
+ 
+ 	} while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
+-	eq_update_ci(eq, 1);
+ 
+ out:
++	eq_update_ci(eq, 1);
+ 	mlx5_eq_async_int_unlock(eq_async, recovery, &flags);
+ 
+ 	return unlikely(recovery) ? num_eqes : 0;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+index 2c6d95900e3c9..a3edeea4ddd78 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -1308,6 +1308,12 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
+ 			goto err_vhca_mapping;
+ 	}
+ 
++	/* External controller host PF has factory programmed MAC.
++	 * Read it from the device.
++	 */
++	if (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF)
++		mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, vport->info.mac);
++
+ 	esw_vport_change_handle_locked(vport);
+ 
+ 	esw->enabled_vports++;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index efb93d63e54cb..58b8f75d7a01e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -1157,7 +1157,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
+ 	err = mlx5_core_set_hca_defaults(dev);
+ 	if (err) {
+ 		mlx5_core_err(dev, "Failed to set hca defaults\n");
+-		goto err_sriov;
++		goto err_set_hca;
+ 	}
+ 
+ 	mlx5_vhca_event_start(dev);
+@@ -1190,6 +1190,7 @@ err_ec:
+ 	mlx5_sf_hw_table_destroy(dev);
+ err_vhca:
+ 	mlx5_vhca_event_stop(dev);
++err_set_hca:
+ 	mlx5_cleanup_fs(dev);
+ err_fs:
+ 	mlx5_accel_tls_cleanup(dev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+index 50af84e76fb6a..174f71ed52800 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+@@ -54,7 +54,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
+ 	mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
+ 	mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
+ 	mkey->size = MLX5_GET64(mkc, mkc, len);
+-	mkey->key |= mlx5_idx_to_mkey(mkey_index);
++	mkey->key = (u32)mlx5_mkey_variant(mkey->key) | mlx5_idx_to_mkey(mkey_index);
+ 	mkey->pd = MLX5_GET(mkc, mkc, pd);
+ 	init_waitqueue_head(&mkey->wait);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+index 8e0dddc6383f0..2389239acadc9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+@@ -156,6 +156,9 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
+ {
+ 	int err;
+ 
++	if (!MLX5_CAP_GEN(dev, roce))
++		return;
++
+ 	err = mlx5_nic_vport_enable_roce(dev);
+ 	if (err) {
+ 		mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+index 90b524c59f3c3..c4139f4648bf1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+@@ -153,6 +153,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
+ 	sf_index = event->function_id - MLX5_CAP_GEN(table->dev, sf_base_id);
+ 	sf_dev = xa_load(&table->devices, sf_index);
+ 	switch (event->new_vhca_state) {
++	case MLX5_VHCA_STATE_INVALID:
+ 	case MLX5_VHCA_STATE_ALLOCATED:
+ 		if (sf_dev)
+ 			mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+index f146c618a78e7..46ef45fa91675 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+@@ -712,7 +712,11 @@ static int dr_ste_v1_set_action_decap_l3_list(void *data,
+ 	if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
+ 		return -EINVAL;
+ 
+-	memcpy(padded_data, data, data_sz);
++	inline_data_sz =
++		MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
++
++	/* Add an alignment padding  */
++	memcpy(padded_data + data_sz % inline_data_sz, data, data_sz);
+ 
+ 	/* Remove L2L3 outer headers */
+ 	MLX5_SET(ste_single_action_remove_header_v1, hw_action, action_id,
+@@ -724,32 +728,34 @@ static int dr_ste_v1_set_action_decap_l3_list(void *data,
+ 	hw_action += DR_STE_ACTION_DOUBLE_SZ;
+ 	used_actions++; /* Remove and NOP are a single double action */
+ 
+-	inline_data_sz =
+-		MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
++	/* Point to the last dword of the header */
++	data_ptr += (data_sz / inline_data_sz) * inline_data_sz;
+ 
+-	/* Add the new header inline + 2 extra bytes */
++	/* Add the new header using inline action 4Byte at a time, the header
++	 * is added in reversed order to the beginning of the packet to avoid
++	 * incorrect parsing by the HW. Since header is 14B or 18B an extra
++	 * two bytes are padded and later removed.
++	 */
+ 	for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
+ 		void *addr_inline;
+ 
+ 		MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, action_id,
+ 			 DR_STE_V1_ACTION_ID_INSERT_INLINE);
+ 		/* The hardware expects here offset to words (2 bytes) */
+-		MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset,
+-			 i * 2);
++		MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset, 0);
+ 
+ 		/* Copy bytes one by one to avoid endianness problem */
+ 		addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v1,
+ 					   hw_action, inline_data);
+-		memcpy(addr_inline, data_ptr, inline_data_sz);
++		memcpy(addr_inline, data_ptr - i * inline_data_sz, inline_data_sz);
+ 		hw_action += DR_STE_ACTION_DOUBLE_SZ;
+-		data_ptr += inline_data_sz;
+ 		used_actions++;
+ 	}
+ 
+-	/* Remove 2 extra bytes */
++	/* Remove first 2 extra bytes */
+ 	MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, action_id,
+ 		 DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+-	MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, data_sz / 2);
++	MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, 0);
+ 	/* The hardware expects here size in words (2 bytes) */
+ 	MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, remove_size, 1);
+ 	used_actions++;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+index 612b0ac31db23..9737565cd8d43 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+@@ -124,10 +124,11 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action);
+ static inline bool
+ mlx5dr_is_supported(struct mlx5_core_dev *dev)
+ {
+-	return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
+-	       (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
+-		(MLX5_CAP_GEN(dev, steering_format_version) <=
+-		 MLX5_STEERING_FORMAT_CONNECTX_6DX));
++	return MLX5_CAP_GEN(dev, roce) &&
++	       (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
++		(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
++		 (MLX5_CAP_GEN(dev, steering_format_version) <=
++		  MLX5_STEERING_FORMAT_CONNECTX_6DX)));
+ }
+ 
+ /* buddy functions & structure */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+index 01cc00ad8acf2..b6931bbe52d29 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+@@ -424,6 +424,15 @@ err_modify_sq:
+ 	return err;
+ }
+ 
++static void mlx5_hairpin_unpair_peer_sq(struct mlx5_hairpin *hp)
++{
++	int i;
++
++	for (i = 0; i < hp->num_channels; i++)
++		mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
++				       MLX5_SQC_STATE_RST, 0, 0);
++}
++
+ static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
+ {
+ 	int i;
+@@ -432,13 +441,9 @@ static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
+ 	for (i = 0; i < hp->num_channels; i++)
+ 		mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[i], MLX5_RQC_STATE_RDY,
+ 				       MLX5_RQC_STATE_RST, 0, 0);
+-
+ 	/* unset peer SQs */
+-	if (hp->peer_gone)
+-		return;
+-	for (i = 0; i < hp->num_channels; i++)
+-		mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
+-				       MLX5_SQC_STATE_RST, 0, 0);
++	if (!hp->peer_gone)
++		mlx5_hairpin_unpair_peer_sq(hp);
+ }
+ 
+ struct mlx5_hairpin *
+@@ -485,3 +490,16 @@ void mlx5_core_hairpin_destroy(struct mlx5_hairpin *hp)
+ 	mlx5_hairpin_destroy_queues(hp);
+ 	kfree(hp);
+ }
++
++void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp)
++{
++	int i;
++
++	mlx5_hairpin_unpair_peer_sq(hp);
++
++	/* destroy peer SQ */
++	for (i = 0; i < hp->num_channels; i++)
++		mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
++
++	hp->peer_gone = true;
++}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+index e05c5c0f3ae1d..7d21fbb9192f6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+@@ -465,8 +465,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+ 	void *in;
+ 	int err;
+ 
+-	if (!vport)
+-		return -EINVAL;
+ 	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+ 		return -EACCES;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+index bf85ce9835d7f..42e4437ac3c16 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+@@ -708,7 +708,8 @@ mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
+ 							MLXSW_THERMAL_TRIP_MASK,
+ 							module_tz,
+ 							&mlxsw_thermal_module_ops,
+-							NULL, 0, 0);
++							NULL, 0,
++							module_tz->parent->polling_delay);
+ 	if (IS_ERR(module_tz->tzdev)) {
+ 		err = PTR_ERR(module_tz->tzdev);
+ 		return err;
+@@ -830,7 +831,8 @@ mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz)
+ 						MLXSW_THERMAL_TRIP_MASK,
+ 						gearbox_tz,
+ 						&mlxsw_thermal_gearbox_ops,
+-						NULL, 0, 0);
++						NULL, 0,
++						gearbox_tz->parent->polling_delay);
+ 	if (IS_ERR(gearbox_tz->tzdev))
+ 		return PTR_ERR(gearbox_tz->tzdev);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
+index c4adc7f740d3e..769386971ac3b 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
+@@ -3863,7 +3863,7 @@ MLXSW_ITEM32(reg, qeec, max_shaper_bs, 0x1C, 0, 6);
+ #define MLXSW_REG_QEEC_HIGHEST_SHAPER_BS	25
+ #define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1	5
+ #define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2	11
+-#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3	5
++#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3	11
+ 
+ static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
+ 				       enum mlxsw_reg_qeec_hr hr, u8 index,
+diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
+index 46e5c9136bacd..0c4c976548c85 100644
+--- a/drivers/net/ethernet/mscc/ocelot.c
++++ b/drivers/net/ethernet/mscc/ocelot.c
+@@ -378,6 +378,7 @@ static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port)
+ 
+ int ocelot_port_flush(struct ocelot *ocelot, int port)
+ {
++	unsigned int pause_ena;
+ 	int err, val;
+ 
+ 	/* Disable dequeuing from the egress queues */
+@@ -386,6 +387,7 @@ int ocelot_port_flush(struct ocelot *ocelot, int port)
+ 		       QSYS_PORT_MODE, port);
+ 
+ 	/* Disable flow control */
++	ocelot_fields_read(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, &pause_ena);
+ 	ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
+ 
+ 	/* Disable priority flow control */
+@@ -421,6 +423,9 @@ int ocelot_port_flush(struct ocelot *ocelot, int port)
+ 	/* Clear flushing again. */
+ 	ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port);
+ 
++	/* Re-enable flow control */
++	ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, pause_ena);
++
+ 	return err;
+ }
+ EXPORT_SYMBOL(ocelot_port_flush);
+diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+index 7e6bac85495d3..344ea11434549 100644
+--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+@@ -1602,6 +1602,8 @@ err_out_free_netdev:
+ 	free_netdev(netdev);
+ 
+ err_out_free_res:
++	if (NX_IS_REVISION_P3(pdev->revision))
++		pci_disable_pcie_error_reporting(pdev);
+ 	pci_release_regions(pdev);
+ 
+ err_out_disable_pdev:
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+index 96b947fde646b..3beafc60747e6 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+@@ -2690,6 +2690,7 @@ err_out_free_hw_res:
+ 	kfree(ahw);
+ 
+ err_out_free_res:
++	pci_disable_pcie_error_reporting(pdev);
+ 	pci_release_regions(pdev);
+ 
+ err_out_disable_pdev:
+diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+index 41fbd2ceeede4..ab1e0fcccabb6 100644
+--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+@@ -126,24 +126,24 @@ static void rmnet_get_stats64(struct net_device *dev,
+ 			      struct rtnl_link_stats64 *s)
+ {
+ 	struct rmnet_priv *priv = netdev_priv(dev);
+-	struct rmnet_vnd_stats total_stats;
++	struct rmnet_vnd_stats total_stats = { };
+ 	struct rmnet_pcpu_stats *pcpu_ptr;
++	struct rmnet_vnd_stats snapshot;
+ 	unsigned int cpu, start;
+ 
+-	memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
+-
+ 	for_each_possible_cpu(cpu) {
+ 		pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
+ 
+ 		do {
+ 			start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
+-			total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts;
+-			total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes;
+-			total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts;
+-			total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes;
++			snapshot = pcpu_ptr->stats;	/* struct assignment */
+ 		} while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
+ 
+-		total_stats.tx_drops += pcpu_ptr->stats.tx_drops;
++		total_stats.rx_pkts += snapshot.rx_pkts;
++		total_stats.rx_bytes += snapshot.rx_bytes;
++		total_stats.tx_pkts += snapshot.tx_pkts;
++		total_stats.tx_bytes += snapshot.tx_bytes;
++		total_stats.tx_drops += snapshot.tx_drops;
+ 	}
+ 
+ 	s->rx_packets = total_stats.rx_pkts;
+@@ -354,4 +354,4 @@ int rmnet_vnd_update_dev_mtu(struct rmnet_port *port,
+ 	}
+ 
+ 	return 0;
+-}
+\ No newline at end of file
++}
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+index b70d44ac09906..3c73453725f94 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+@@ -76,10 +76,10 @@ enum power_event {
+ #define LPI_CTRL_STATUS_TLPIEN	0x00000001	/* Transmit LPI Entry */
+ 
+ /* GMAC HW ADDR regs */
+-#define GMAC_ADDR_HIGH(reg)	(((reg > 15) ? 0x00000800 : 0x00000040) + \
+-				(reg * 8))
+-#define GMAC_ADDR_LOW(reg)	(((reg > 15) ? 0x00000804 : 0x00000044) + \
+-				(reg * 8))
++#define GMAC_ADDR_HIGH(reg)	((reg > 15) ? 0x00000800 + (reg - 16) * 8 : \
++				 0x00000040 + (reg * 8))
++#define GMAC_ADDR_LOW(reg)	((reg > 15) ? 0x00000804 + (reg - 16) * 8 : \
++				 0x00000044 + (reg * 8))
+ #define GMAC_MAX_PERFECT_ADDRESSES	1
+ 
+ #define GMAC_PCS_BASE		0x000000c0	/* PCS register base */
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+index 6dc9f10414e47..7e6bead6429c5 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+@@ -622,6 +622,8 @@ error_pclk_get:
+ void stmmac_remove_config_dt(struct platform_device *pdev,
+ 			     struct plat_stmmacenet_data *plat)
+ {
++	clk_disable_unprepare(plat->stmmac_clk);
++	clk_disable_unprepare(plat->pclk);
+ 	of_node_put(plat->phy_node);
+ 	of_node_put(plat->mdio_node);
+ }
+diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
+index 030185301014c..01bb36e7cff0a 100644
+--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
+@@ -849,7 +849,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ 		smp_mb();
+ 
+ 		/* Space might have just been freed - check again */
+-		if (temac_check_tx_bd_space(lp, num_frag))
++		if (temac_check_tx_bd_space(lp, num_frag + 1))
+ 			return NETDEV_TX_BUSY;
+ 
+ 		netif_wake_queue(ndev);
+@@ -876,7 +876,6 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 	cur_p->phys = cpu_to_be32(skb_dma_addr);
+-	ptr_to_txbd((void *)skb, cur_p);
+ 
+ 	for (ii = 0; ii < num_frag; ii++) {
+ 		if (++lp->tx_bd_tail >= lp->tx_bd_num)
+@@ -915,6 +914,11 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ 	}
+ 	cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
+ 
++	/* Mark last fragment with skb address, so it can be consumed
++	 * in temac_start_xmit_done()
++	 */
++	ptr_to_txbd((void *)skb, cur_p);
++
+ 	tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
+ 	lp->tx_bd_tail++;
+ 	if (lp->tx_bd_tail >= lp->tx_bd_num)
+diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
+index 17be2bb2985cd..920e9f888cc35 100644
+--- a/drivers/net/hamradio/mkiss.c
++++ b/drivers/net/hamradio/mkiss.c
+@@ -799,6 +799,7 @@ static void mkiss_close(struct tty_struct *tty)
+ 	ax->tty = NULL;
+ 
+ 	unregister_netdev(ax->dev);
++	free_netdev(ax->dev);
+ }
+ 
+ /* Perform I/O control on an active ax25 channel. */
+diff --git a/drivers/net/mhi/net.c b/drivers/net/mhi/net.c
+index f59960876083f..8e7f8728998f1 100644
+--- a/drivers/net/mhi/net.c
++++ b/drivers/net/mhi/net.c
+@@ -49,7 +49,7 @@ static int mhi_ndo_stop(struct net_device *ndev)
+ 	return 0;
+ }
+ 
+-static int mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
++static netdev_tx_t mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
+ {
+ 	struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+ 	const struct mhi_net_proto *proto = mhi_netdev->proto;
+diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
+index 0eeec80bec311..e4a5703666461 100644
+--- a/drivers/net/usb/cdc_eem.c
++++ b/drivers/net/usb/cdc_eem.c
+@@ -123,10 +123,10 @@ static struct sk_buff *eem_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ 	}
+ 
+ 	skb2 = skb_copy_expand(skb, EEM_HEAD, ETH_FCS_LEN + padlen, flags);
++	dev_kfree_skb_any(skb);
+ 	if (!skb2)
+ 		return NULL;
+ 
+-	dev_kfree_skb_any(skb);
+ 	skb = skb2;
+ 
+ done:
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index 8acf301154282..dc3d84b43e4e8 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -1902,7 +1902,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
+ static const struct driver_info cdc_ncm_info = {
+ 	.description = "CDC NCM",
+ 	.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
+-			| FLAG_LINK_INTR,
++			| FLAG_LINK_INTR | FLAG_ETHER,
+ 	.bind = cdc_ncm_bind,
+ 	.unbind = cdc_ncm_unbind,
+ 	.manage_power = usbnet_manage_power,
+diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
+index 76ed79bb1e3f1..5281291711aff 100644
+--- a/drivers/net/usb/smsc75xx.c
++++ b/drivers/net/usb/smsc75xx.c
+@@ -1483,7 +1483,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
+ 	ret = smsc75xx_wait_ready(dev, 0);
+ 	if (ret < 0) {
+ 		netdev_warn(dev->net, "device not ready in smsc75xx_bind\n");
+-		goto err;
++		goto free_pdata;
+ 	}
+ 
+ 	smsc75xx_init_mac_address(dev);
+@@ -1492,7 +1492,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
+ 	ret = smsc75xx_reset(dev);
+ 	if (ret < 0) {
+ 		netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret);
+-		goto err;
++		goto cancel_work;
+ 	}
+ 
+ 	dev->net->netdev_ops = &smsc75xx_netdev_ops;
+@@ -1503,8 +1503,11 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
+ 	dev->net->max_mtu = MAX_SINGLE_PACKET_SIZE;
+ 	return 0;
+ 
+-err:
++cancel_work:
++	cancel_work_sync(&pdata->set_multicast);
++free_pdata:
+ 	kfree(pdata);
++	dev->data[0] = 0;
+ 	return ret;
+ }
+ 
+@@ -1515,7 +1518,6 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
+ 		cancel_work_sync(&pdata->set_multicast);
+ 		netif_dbg(dev, ifdown, dev->net, "free pdata\n");
+ 		kfree(pdata);
+-		pdata = NULL;
+ 		dev->data[0] = 0;
+ 	}
+ }
+diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
+index 503e2fd7ce518..28a6c4cfe9b8c 100644
+--- a/drivers/net/vrf.c
++++ b/drivers/net/vrf.c
+@@ -1183,9 +1183,6 @@ static int vrf_dev_init(struct net_device *dev)
+ 
+ 	dev->flags = IFF_MASTER | IFF_NOARP;
+ 
+-	/* MTU is irrelevant for VRF device; set to 64k similar to lo */
+-	dev->mtu = 64 * 1024;
+-
+ 	/* similarly, oper state is irrelevant; set to up to avoid confusion */
+ 	dev->operstate = IF_OPER_UP;
+ 	netdev_lockdep_set_classes(dev);
+@@ -1685,7 +1682,8 @@ static void vrf_setup(struct net_device *dev)
+ 	 * which breaks networking.
+ 	 */
+ 	dev->min_mtu = IPV6_MIN_MTU;
+-	dev->max_mtu = ETH_MAX_MTU;
++	dev->max_mtu = IP6_MAX_MTU;
++	dev->mtu = dev->max_mtu;
+ }
+ 
+ static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
+diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
+index 051b48bd7985d..e3f5e7ab76063 100644
+--- a/drivers/pci/controller/pci-aardvark.c
++++ b/drivers/pci/controller/pci-aardvark.c
+@@ -514,7 +514,7 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
+ 		udelay(PIO_RETRY_DELAY);
+ 	}
+ 
+-	dev_err(dev, "config read/write timed out\n");
++	dev_err(dev, "PIO read/write transfer time out\n");
+ 	return -ETIMEDOUT;
+ }
+ 
+@@ -657,6 +657,35 @@ static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
+ 	return true;
+ }
+ 
++static bool advk_pcie_pio_is_running(struct advk_pcie *pcie)
++{
++	struct device *dev = &pcie->pdev->dev;
++
++	/*
++	 * Trying to start a new PIO transfer when previous has not completed
++	 * cause External Abort on CPU which results in kernel panic:
++	 *
++	 *     SError Interrupt on CPU0, code 0xbf000002 -- SError
++	 *     Kernel panic - not syncing: Asynchronous SError Interrupt
++	 *
++	 * Functions advk_pcie_rd_conf() and advk_pcie_wr_conf() are protected
++	 * by raw_spin_lock_irqsave() at pci_lock_config() level to prevent
++	 * concurrent calls at the same time. But because PIO transfer may take
++	 * about 1.5s when link is down or card is disconnected, it means that
++	 * advk_pcie_wait_pio() does not always have to wait for completion.
++	 *
++	 * Some versions of ARM Trusted Firmware handles this External Abort at
++	 * EL3 level and mask it to prevent kernel panic. Relevant TF-A commit:
++	 * https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/commit/?id=3c7dcdac5c50
++	 */
++	if (advk_readl(pcie, PIO_START)) {
++		dev_err(dev, "Previous PIO read/write transfer is still running\n");
++		return true;
++	}
++
++	return false;
++}
++
+ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
+ 			     int where, int size, u32 *val)
+ {
+@@ -673,9 +702,10 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
+ 		return pci_bridge_emul_conf_read(&pcie->bridge, where,
+ 						 size, val);
+ 
+-	/* Start PIO */
+-	advk_writel(pcie, 0, PIO_START);
+-	advk_writel(pcie, 1, PIO_ISR);
++	if (advk_pcie_pio_is_running(pcie)) {
++		*val = 0xffffffff;
++		return PCIBIOS_SET_FAILED;
++	}
+ 
+ 	/* Program the control register */
+ 	reg = advk_readl(pcie, PIO_CTRL);
+@@ -694,7 +724,8 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
+ 	/* Program the data strobe */
+ 	advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
+ 
+-	/* Start the transfer */
++	/* Clear PIO DONE ISR and start the transfer */
++	advk_writel(pcie, 1, PIO_ISR);
+ 	advk_writel(pcie, 1, PIO_START);
+ 
+ 	ret = advk_pcie_wait_pio(pcie);
+@@ -734,9 +765,8 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+ 	if (where % size)
+ 		return PCIBIOS_SET_FAILED;
+ 
+-	/* Start PIO */
+-	advk_writel(pcie, 0, PIO_START);
+-	advk_writel(pcie, 1, PIO_ISR);
++	if (advk_pcie_pio_is_running(pcie))
++		return PCIBIOS_SET_FAILED;
+ 
+ 	/* Program the control register */
+ 	reg = advk_readl(pcie, PIO_CTRL);
+@@ -763,7 +793,8 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+ 	/* Program the data strobe */
+ 	advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
+ 
+-	/* Start the transfer */
++	/* Clear PIO DONE ISR and start the transfer */
++	advk_writel(pcie, 1, PIO_ISR);
+ 	advk_writel(pcie, 1, PIO_START);
+ 
+ 	ret = advk_pcie_wait_pio(pcie);
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 653660e3ba9ef..7bf76bca888da 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -3558,6 +3558,18 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
+ 	dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
+ }
+ 
++/*
++ * Some NVIDIA GPU devices do not work with bus reset, SBR needs to be
++ * prevented for those affected devices.
++ */
++static void quirk_nvidia_no_bus_reset(struct pci_dev *dev)
++{
++	if ((dev->device & 0xffc0) == 0x2340)
++		quirk_no_bus_reset(dev);
++}
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
++			 quirk_nvidia_no_bus_reset);
++
+ /*
+  * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset.
+  * The device will throw a Link Down error on AER-capable systems and
+@@ -3578,6 +3590,16 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
+  */
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CAVIUM, 0xa100, quirk_no_bus_reset);
+ 
++/*
++ * Some TI KeyStone C667X devices do not support bus/hot reset.  The PCIESS
++ * automatically disables LTSSM when Secondary Bus Reset is received and
++ * the device stops working.  Prevent bus reset for these devices.  With
++ * this change, the device can be assigned to VMs with VFIO, but it will
++ * leak state between VMs.  Reference
++ * https://e2e.ti.com/support/processors/f/791/t/954382
++ */
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0xb005, quirk_no_bus_reset);
++
+ static void quirk_no_pm_reset(struct pci_dev *dev)
+ {
+ 	/*
+@@ -3913,6 +3935,69 @@ static int delay_250ms_after_flr(struct pci_dev *dev, int probe)
+ 	return 0;
+ }
+ 
++#define PCI_DEVICE_ID_HINIC_VF      0x375E
++#define HINIC_VF_FLR_TYPE           0x1000
++#define HINIC_VF_FLR_CAP_BIT        (1UL << 30)
++#define HINIC_VF_OP                 0xE80
++#define HINIC_VF_FLR_PROC_BIT       (1UL << 18)
++#define HINIC_OPERATION_TIMEOUT     15000	/* 15 seconds */
++
++/* Device-specific reset method for Huawei Intelligent NIC virtual functions */
++static int reset_hinic_vf_dev(struct pci_dev *pdev, int probe)
++{
++	unsigned long timeout;
++	void __iomem *bar;
++	u32 val;
++
++	if (probe)
++		return 0;
++
++	bar = pci_iomap(pdev, 0, 0);
++	if (!bar)
++		return -ENOTTY;
++
++	/* Get and check firmware capabilities */
++	val = ioread32be(bar + HINIC_VF_FLR_TYPE);
++	if (!(val & HINIC_VF_FLR_CAP_BIT)) {
++		pci_iounmap(pdev, bar);
++		return -ENOTTY;
++	}
++
++	/* Set HINIC_VF_FLR_PROC_BIT for the start of FLR */
++	val = ioread32be(bar + HINIC_VF_OP);
++	val = val | HINIC_VF_FLR_PROC_BIT;
++	iowrite32be(val, bar + HINIC_VF_OP);
++
++	pcie_flr(pdev);
++
++	/*
++	 * The device must recapture its Bus and Device Numbers after FLR
++	 * in order generate Completions.  Issue a config write to let the
++	 * device capture this information.
++	 */
++	pci_write_config_word(pdev, PCI_VENDOR_ID, 0);
++
++	/* Firmware clears HINIC_VF_FLR_PROC_BIT when reset is complete */
++	timeout = jiffies + msecs_to_jiffies(HINIC_OPERATION_TIMEOUT);
++	do {
++		val = ioread32be(bar + HINIC_VF_OP);
++		if (!(val & HINIC_VF_FLR_PROC_BIT))
++			goto reset_complete;
++		msleep(20);
++	} while (time_before(jiffies, timeout));
++
++	val = ioread32be(bar + HINIC_VF_OP);
++	if (!(val & HINIC_VF_FLR_PROC_BIT))
++		goto reset_complete;
++
++	pci_warn(pdev, "Reset dev timeout, FLR ack reg: %#010x\n", val);
++
++reset_complete:
++	pci_iounmap(pdev, bar);
++
++	return 0;
++}
++
+ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
+ 	{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
+ 		 reset_intel_82599_sfp_virtfn },
+@@ -3924,6 +4009,8 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
+ 	{ PCI_VENDOR_ID_INTEL, 0x0953, delay_250ms_after_flr },
+ 	{ PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
+ 		reset_chelsio_generic_dev },
++	{ PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HINIC_VF,
++		reset_hinic_vf_dev },
+ 	{ 0 }
+ };
+ 
+@@ -4764,6 +4851,8 @@ static const struct pci_dev_acs_enabled {
+ 	{ PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs },
+ 	{ PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs },
+ 	{ PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },
++	/* Broadcom multi-function device */
++	{ PCI_VENDOR_ID_BROADCOM, 0x16D7, pci_quirk_mf_endpoint_acs },
+ 	{ PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
+ 	/* Amazon Annapurna Labs */
+ 	{ PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
+@@ -5165,7 +5254,8 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
+ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
+ {
+ 	if ((pdev->device == 0x7312 && pdev->revision != 0x00) ||
+-	    (pdev->device == 0x7340 && pdev->revision != 0xc5))
++	    (pdev->device == 0x7340 && pdev->revision != 0xc5) ||
++	    (pdev->device == 0x7341 && pdev->revision != 0x00))
+ 		return;
+ 
+ 	if (pdev->device == 0x15d8) {
+@@ -5192,6 +5282,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats);
+ /* AMD Navi14 dGPU */
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7341, quirk_amd_harvest_no_ats);
+ /* AMD Raven platform iGPU */
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x15d8, quirk_amd_harvest_no_ats);
+ #endif /* CONFIG_PCI_ATS */
+diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
+index cdbcc49f71152..731c483a04dea 100644
+--- a/drivers/phy/mediatek/phy-mtk-tphy.c
++++ b/drivers/phy/mediatek/phy-mtk-tphy.c
+@@ -949,6 +949,8 @@ static int mtk_phy_init(struct phy *phy)
+ 		break;
+ 	default:
+ 		dev_err(tphy->dev, "incompatible PHY type\n");
++		clk_disable_unprepare(instance->ref_clk);
++		clk_disable_unprepare(instance->da_ref_clk);
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/drivers/pinctrl/ralink/pinctrl-rt2880.c b/drivers/pinctrl/ralink/pinctrl-rt2880.c
+index 1f4bca854add5..a9b511c7e8500 100644
+--- a/drivers/pinctrl/ralink/pinctrl-rt2880.c
++++ b/drivers/pinctrl/ralink/pinctrl-rt2880.c
+@@ -127,7 +127,7 @@ static int rt2880_pmx_group_enable(struct pinctrl_dev *pctrldev,
+ 	if (p->groups[group].enabled) {
+ 		dev_err(p->dev, "%s is already enabled\n",
+ 			p->groups[group].name);
+-		return -EBUSY;
++		return 0;
+ 	}
+ 
+ 	p->groups[group].enabled = 1;
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 61f1c91c62de2..3390168ac0793 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -8808,6 +8808,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
+ 	TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL),	/* P1 / X1 Extreme (2nd gen) */
+ 	TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL),	/* P1 / X1 Extreme (3nd gen) */
+ 	TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL),	/* P15 (1st gen) / P15v (1st gen) */
++	TPACPI_Q_LNV3('N', '3', '2', TPACPI_FAN_2CTL),	/* X1 Carbon (9th gen) */
+ };
+ 
+ static int __init fan_init(struct ibm_init_struct *iibm)
+diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
+index 03a246e60fd98..21c4c34c52d8d 100644
+--- a/drivers/ptp/ptp_clock.c
++++ b/drivers/ptp/ptp_clock.c
+@@ -63,7 +63,7 @@ static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
+ 	spin_unlock_irqrestore(&queue->lock, flags);
+ }
+ 
+-s32 scaled_ppm_to_ppb(long ppm)
++long scaled_ppm_to_ppb(long ppm)
+ {
+ 	/*
+ 	 * The 'freq' field in the 'struct timex' is in parts per
+@@ -80,7 +80,7 @@ s32 scaled_ppm_to_ppb(long ppm)
+ 	s64 ppb = 1 + ppm;
+ 	ppb *= 125;
+ 	ppb >>= 13;
+-	return (s32) ppb;
++	return (long) ppb;
+ }
+ EXPORT_SYMBOL(scaled_ppm_to_ppb);
+ 
+@@ -138,7 +138,7 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
+ 		delta = ktime_to_ns(kt);
+ 		err = ops->adjtime(ops, delta);
+ 	} else if (tx->modes & ADJ_FREQUENCY) {
+-		s32 ppb = scaled_ppm_to_ppb(tx->freq);
++		long ppb = scaled_ppm_to_ppb(tx->freq);
+ 		if (ppb > ops->max_adj || ppb < -ops->max_adj)
+ 			return -ERANGE;
+ 		if (ops->adjfine)
+diff --git a/drivers/regulator/cros-ec-regulator.c b/drivers/regulator/cros-ec-regulator.c
+index eb3fc1db4edc8..c4754f3cf2337 100644
+--- a/drivers/regulator/cros-ec-regulator.c
++++ b/drivers/regulator/cros-ec-regulator.c
+@@ -225,8 +225,9 @@ static int cros_ec_regulator_probe(struct platform_device *pdev)
+ 
+ 	drvdata->dev = devm_regulator_register(dev, &drvdata->desc, &cfg);
+ 	if (IS_ERR(drvdata->dev)) {
++		ret = PTR_ERR(drvdata->dev);
+ 		dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
+-		return PTR_ERR(drvdata->dev);
++		return ret;
+ 	}
+ 
+ 	platform_set_drvdata(pdev, drvdata);
+diff --git a/drivers/regulator/mt6315-regulator.c b/drivers/regulator/mt6315-regulator.c
+index 9edc34981ee0a..6b8be52c3772a 100644
+--- a/drivers/regulator/mt6315-regulator.c
++++ b/drivers/regulator/mt6315-regulator.c
+@@ -59,7 +59,7 @@ static const struct linear_range mt_volt_range1[] = {
+ 	REGULATOR_LINEAR_RANGE(0, 0, 0xbf, 6250),
+ };
+ 
+-static unsigned int mt6315_map_mode(u32 mode)
++static unsigned int mt6315_map_mode(unsigned int mode)
+ {
+ 	switch (mode) {
+ 	case MT6315_BUCK_MODE_AUTO:
+diff --git a/drivers/regulator/rt4801-regulator.c b/drivers/regulator/rt4801-regulator.c
+index 2055a9cb13ba5..7a87788d3f092 100644
+--- a/drivers/regulator/rt4801-regulator.c
++++ b/drivers/regulator/rt4801-regulator.c
+@@ -66,7 +66,7 @@ static int rt4801_enable(struct regulator_dev *rdev)
+ 	struct gpio_descs *gpios = priv->enable_gpios;
+ 	int id = rdev_get_id(rdev), ret;
+ 
+-	if (gpios->ndescs <= id) {
++	if (!gpios || gpios->ndescs <= id) {
+ 		dev_warn(&rdev->dev, "no dedicated gpio can control\n");
+ 		goto bypass_gpio;
+ 	}
+@@ -88,7 +88,7 @@ static int rt4801_disable(struct regulator_dev *rdev)
+ 	struct gpio_descs *gpios = priv->enable_gpios;
+ 	int id = rdev_get_id(rdev);
+ 
+-	if (gpios->ndescs <= id) {
++	if (!gpios || gpios->ndescs <= id) {
+ 		dev_warn(&rdev->dev, "no dedicated gpio can control\n");
+ 		goto bypass_gpio;
+ 	}
+diff --git a/drivers/regulator/rtmv20-regulator.c b/drivers/regulator/rtmv20-regulator.c
+index 5adc552dffd58..4bca64de0f672 100644
+--- a/drivers/regulator/rtmv20-regulator.c
++++ b/drivers/regulator/rtmv20-regulator.c
+@@ -27,6 +27,7 @@
+ #define RTMV20_REG_LDIRQ	0x30
+ #define RTMV20_REG_LDSTAT	0x40
+ #define RTMV20_REG_LDMASK	0x50
++#define RTMV20_MAX_REGS		(RTMV20_REG_LDMASK + 1)
+ 
+ #define RTMV20_VID_MASK		GENMASK(7, 4)
+ #define RICHTEK_VID		0x80
+@@ -313,6 +314,7 @@ static const struct regmap_config rtmv20_regmap_config = {
+ 	.val_bits = 8,
+ 	.cache_type = REGCACHE_RBTREE,
+ 	.max_register = RTMV20_REG_LDMASK,
++	.num_reg_defaults_raw = RTMV20_MAX_REGS,
+ 
+ 	.writeable_reg = rtmv20_is_accessible_reg,
+ 	.readable_reg = rtmv20_is_accessible_reg,
+diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
+index ecefc25eff0c0..337353c9655ed 100644
+--- a/drivers/s390/crypto/ap_queue.c
++++ b/drivers/s390/crypto/ap_queue.c
+@@ -135,12 +135,13 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
+ {
+ 	struct ap_queue_status status;
+ 	struct ap_message *ap_msg;
++	bool found = false;
+ 
+ 	status = ap_dqap(aq->qid, &aq->reply->psmid,
+ 			 aq->reply->msg, aq->reply->len);
+ 	switch (status.response_code) {
+ 	case AP_RESPONSE_NORMAL:
+-		aq->queue_count--;
++		aq->queue_count = max_t(int, 0, aq->queue_count - 1);
+ 		if (aq->queue_count > 0)
+ 			mod_timer(&aq->timeout,
+ 				  jiffies + aq->request_timeout);
+@@ -150,8 +151,14 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
+ 			list_del_init(&ap_msg->list);
+ 			aq->pendingq_count--;
+ 			ap_msg->receive(aq, ap_msg, aq->reply);
++			found = true;
+ 			break;
+ 		}
++		if (!found) {
++			AP_DBF_WARN("%s unassociated reply psmid=0x%016llx on 0x%02x.%04x\n",
++				    __func__, aq->reply->psmid,
++				    AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
++		}
+ 		fallthrough;
+ 	case AP_RESPONSE_NO_PENDING_REPLY:
+ 		if (!status.queue_empty || aq->queue_count <= 0)
+@@ -232,7 +239,7 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
+ 			   ap_msg->flags & AP_MSG_FLAG_SPECIAL);
+ 	switch (status.response_code) {
+ 	case AP_RESPONSE_NORMAL:
+-		aq->queue_count++;
++		aq->queue_count = max_t(int, 1, aq->queue_count + 1);
+ 		if (aq->queue_count == 1)
+ 			mod_timer(&aq->timeout, jiffies + aq->request_timeout);
+ 		list_move_tail(&ap_msg->list, &aq->pendingq);
+diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
+index 2786470a52011..4f24f63922126 100644
+--- a/drivers/spi/spi-stm32-qspi.c
++++ b/drivers/spi/spi-stm32-qspi.c
+@@ -293,7 +293,7 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
+ 	int err = 0;
+ 
+ 	if (!op->data.nbytes)
+-		return stm32_qspi_wait_nobusy(qspi);
++		goto wait_nobusy;
+ 
+ 	if (readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF)
+ 		goto out;
+@@ -314,6 +314,9 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
+ out:
+ 	/* clear flags */
+ 	writel_relaxed(FCR_CTCF | FCR_CTEF, qspi->io_base + QSPI_FCR);
++wait_nobusy:
++	if (!err)
++		err = stm32_qspi_wait_nobusy(qspi);
+ 
+ 	return err;
+ }
+diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
+index 2765289028fae..68193db8b2e3c 100644
+--- a/drivers/spi/spi-zynq-qspi.c
++++ b/drivers/spi/spi-zynq-qspi.c
+@@ -678,14 +678,14 @@ static int zynq_qspi_probe(struct platform_device *pdev)
+ 	xqspi->irq = platform_get_irq(pdev, 0);
+ 	if (xqspi->irq <= 0) {
+ 		ret = -ENXIO;
+-		goto remove_master;
++		goto clk_dis_all;
+ 	}
+ 	ret = devm_request_irq(&pdev->dev, xqspi->irq, zynq_qspi_irq,
+ 			       0, pdev->name, xqspi);
+ 	if (ret != 0) {
+ 		ret = -ENXIO;
+ 		dev_err(&pdev->dev, "request_irq failed\n");
+-		goto remove_master;
++		goto clk_dis_all;
+ 	}
+ 
+ 	ret = of_property_read_u32(np, "num-cs",
+@@ -693,8 +693,9 @@ static int zynq_qspi_probe(struct platform_device *pdev)
+ 	if (ret < 0) {
+ 		ctlr->num_chipselect = 1;
+ 	} else if (num_cs > ZYNQ_QSPI_MAX_NUM_CS) {
++		ret = -EINVAL;
+ 		dev_err(&pdev->dev, "only 2 chip selects are available\n");
+-		goto remove_master;
++		goto clk_dis_all;
+ 	} else {
+ 		ctlr->num_chipselect = num_cs;
+ 	}
+diff --git a/drivers/staging/hikey9xx/hi6421v600-regulator.c b/drivers/staging/hikey9xx/hi6421v600-regulator.c
+index f6a14e9c3cbfe..e10fe3058176d 100644
+--- a/drivers/staging/hikey9xx/hi6421v600-regulator.c
++++ b/drivers/staging/hikey9xx/hi6421v600-regulator.c
+@@ -83,7 +83,7 @@ static const unsigned int ldo34_voltages[] = {
+ 			.owner		= THIS_MODULE,			       \
+ 			.volt_table	= vtable,			       \
+ 			.n_voltages	= ARRAY_SIZE(vtable),		       \
+-			.vsel_mask	= (1 << (ARRAY_SIZE(vtable) - 1)) - 1, \
++			.vsel_mask	= ARRAY_SIZE(vtable) - 1,	       \
+ 			.vsel_reg	= vreg,				       \
+ 			.enable_reg	= ereg,				       \
+ 			.enable_mask	= emask,			       \
+diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+index cbec65e5a4645..62ea47f9fee5e 100644
+--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
++++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+@@ -2579,7 +2579,7 @@ static int rtw_cfg80211_add_monitor_if(struct adapter *padapter, char *name, str
+ 	mon_wdev->iftype = NL80211_IFTYPE_MONITOR;
+ 	mon_ndev->ieee80211_ptr = mon_wdev;
+ 
+-	ret = register_netdevice(mon_ndev);
++	ret = cfg80211_register_netdevice(mon_ndev);
+ 	if (ret) {
+ 		goto out;
+ 	}
+@@ -2661,7 +2661,7 @@ static int cfg80211_rtw_del_virtual_intf(struct wiphy *wiphy,
+ 	adapter = rtw_netdev_priv(ndev);
+ 	pwdev_priv = adapter_wdev_data(adapter);
+ 
+-	unregister_netdevice(ndev);
++	cfg80211_unregister_netdevice(ndev);
+ 
+ 	if (ndev == pwdev_priv->pmon_ndev) {
+ 		pwdev_priv->pmon_ndev = NULL;
+diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
+index 4545b23bda3f1..bac0f5458cab9 100644
+--- a/drivers/usb/chipidea/usbmisc_imx.c
++++ b/drivers/usb/chipidea/usbmisc_imx.c
+@@ -686,6 +686,16 @@ static int imx7d_charger_secondary_detection(struct imx_usbmisc_data *data)
+ 	int val;
+ 	unsigned long flags;
+ 
++	/* Clear VDATSRCENB0 to disable VDP_SRC and IDM_SNK required by BC 1.2 spec */
++	spin_lock_irqsave(&usbmisc->lock, flags);
++	val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
++	val &= ~MX7D_USB_OTG_PHY_CFG2_CHRG_VDATSRCENB0;
++	writel(val, usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
++	spin_unlock_irqrestore(&usbmisc->lock, flags);
++
++	/* TVDMSRC_DIS */
++	msleep(20);
++
+ 	/* VDM_SRC is connected to D- and IDP_SINK is connected to D+ */
+ 	spin_lock_irqsave(&usbmisc->lock, flags);
+ 	val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+@@ -695,7 +705,8 @@ static int imx7d_charger_secondary_detection(struct imx_usbmisc_data *data)
+ 				usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ 	spin_unlock_irqrestore(&usbmisc->lock, flags);
+ 
+-	usleep_range(1000, 2000);
++	/* TVDMSRC_ON */
++	msleep(40);
+ 
+ 	/*
+ 	 * Per BC 1.2, check voltage of D+:
+@@ -798,7 +809,8 @@ static int imx7d_charger_primary_detection(struct imx_usbmisc_data *data)
+ 				usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ 	spin_unlock_irqrestore(&usbmisc->lock, flags);
+ 
+-	usleep_range(1000, 2000);
++	/* TVDPSRC_ON */
++	msleep(40);
+ 
+ 	/* Check if D- is less than VDAT_REF to determine an SDP per BC 1.2 */
+ 	val = readl(usbmisc->base + MX7D_USB_OTG_PHY_STATUS);
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 13fe37fbbd2c8..6ebb8bd92e9df 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -40,6 +40,8 @@
+ #define USB_VENDOR_GENESYS_LOGIC		0x05e3
+ #define USB_VENDOR_SMSC				0x0424
+ #define USB_PRODUCT_USB5534B			0x5534
++#define USB_VENDOR_CYPRESS			0x04b4
++#define USB_PRODUCT_CY7C65632			0x6570
+ #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND	0x01
+ #define HUB_QUIRK_DISABLE_AUTOSUSPEND		0x02
+ 
+@@ -5644,6 +5646,11 @@ static const struct usb_device_id hub_id_table[] = {
+       .idProduct = USB_PRODUCT_USB5534B,
+       .bInterfaceClass = USB_CLASS_HUB,
+       .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
++    { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
++                   | USB_DEVICE_ID_MATCH_PRODUCT,
++      .idVendor = USB_VENDOR_CYPRESS,
++      .idProduct = USB_PRODUCT_CY7C65632,
++      .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
+     { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
+ 			| USB_DEVICE_ID_MATCH_INT_CLASS,
+       .idVendor = USB_VENDOR_GENESYS_LOGIC,
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 126f0e10b3ef4..0022039bc2355 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -1657,8 +1657,8 @@ static int dwc3_remove(struct platform_device *pdev)
+ 
+ 	pm_runtime_get_sync(&pdev->dev);
+ 
+-	dwc3_debugfs_exit(dwc);
+ 	dwc3_core_exit_mode(dwc);
++	dwc3_debugfs_exit(dwc);
+ 
+ 	dwc3_core_exit(dwc);
+ 	dwc3_ulpi_exit(dwc);
+diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
+index 8ab3949423604..74d9c2c38193d 100644
+--- a/drivers/usb/dwc3/debug.h
++++ b/drivers/usb/dwc3/debug.h
+@@ -413,9 +413,12 @@ static inline const char *dwc3_gadget_generic_cmd_status_string(int status)
+ 
+ 
+ #ifdef CONFIG_DEBUG_FS
++extern void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep);
+ extern void dwc3_debugfs_init(struct dwc3 *d);
+ extern void dwc3_debugfs_exit(struct dwc3 *d);
+ #else
++static inline void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
++{  }
+ static inline void dwc3_debugfs_init(struct dwc3 *d)
+ {  }
+ static inline void dwc3_debugfs_exit(struct dwc3 *d)
+diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
+index 5da4f6082d930..3ebe3e6c284d2 100644
+--- a/drivers/usb/dwc3/debugfs.c
++++ b/drivers/usb/dwc3/debugfs.c
+@@ -890,30 +890,14 @@ static void dwc3_debugfs_create_endpoint_files(struct dwc3_ep *dep,
+ 	}
+ }
+ 
+-static void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep,
+-		struct dentry *parent)
++void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
+ {
+ 	struct dentry		*dir;
+ 
+-	dir = debugfs_create_dir(dep->name, parent);
++	dir = debugfs_create_dir(dep->name, dep->dwc->root);
+ 	dwc3_debugfs_create_endpoint_files(dep, dir);
+ }
+ 
+-static void dwc3_debugfs_create_endpoint_dirs(struct dwc3 *dwc,
+-		struct dentry *parent)
+-{
+-	int			i;
+-
+-	for (i = 0; i < dwc->num_eps; i++) {
+-		struct dwc3_ep	*dep = dwc->eps[i];
+-
+-		if (!dep)
+-			continue;
+-
+-		dwc3_debugfs_create_endpoint_dir(dep, parent);
+-	}
+-}
+-
+ void dwc3_debugfs_init(struct dwc3 *dwc)
+ {
+ 	struct dentry		*root;
+@@ -944,7 +928,6 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
+ 				&dwc3_testmode_fops);
+ 		debugfs_create_file("link_state", 0644, root, dwc,
+ 				    &dwc3_link_state_fops);
+-		dwc3_debugfs_create_endpoint_dirs(dwc, root);
+ 	}
+ }
+ 
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 1f9454e0d447b..755ab6fc0791f 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -2719,6 +2719,8 @@ static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum)
+ 	INIT_LIST_HEAD(&dep->started_list);
+ 	INIT_LIST_HEAD(&dep->cancelled_list);
+ 
++	dwc3_debugfs_create_endpoint_dir(dep);
++
+ 	return 0;
+ }
+ 
+@@ -2762,6 +2764,7 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
+ 			list_del(&dep->endpoint.ep_list);
+ 		}
+ 
++		debugfs_remove_recursive(debugfs_lookup(dep->name, dwc->root));
+ 		kfree(dep);
+ 	}
+ }
+diff --git a/fs/afs/main.c b/fs/afs/main.c
+index b2975256dadbd..179004b15566d 100644
+--- a/fs/afs/main.c
++++ b/fs/afs/main.c
+@@ -203,8 +203,8 @@ static int __init afs_init(void)
+ 		goto error_fs;
+ 
+ 	afs_proc_symlink = proc_symlink("fs/afs", NULL, "../self/net/afs");
+-	if (IS_ERR(afs_proc_symlink)) {
+-		ret = PTR_ERR(afs_proc_symlink);
++	if (!afs_proc_symlink) {
++		ret = -ENOMEM;
+ 		goto error_proc;
+ 	}
+ 
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index a7d9e147dee62..595fd083c4ad1 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -2347,16 +2347,16 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
+ 	spin_lock(&sinfo->lock);
+ 	spin_lock(&cache->lock);
+ 	if (!--cache->ro) {
+-		num_bytes = cache->length - cache->reserved -
+-			    cache->pinned - cache->bytes_super -
+-			    cache->zone_unusable - cache->used;
+-		sinfo->bytes_readonly -= num_bytes;
+ 		if (btrfs_is_zoned(cache->fs_info)) {
+ 			/* Migrate zone_unusable bytes back */
+ 			cache->zone_unusable = cache->alloc_offset - cache->used;
+ 			sinfo->bytes_zone_unusable += cache->zone_unusable;
+ 			sinfo->bytes_readonly -= cache->zone_unusable;
+ 		}
++		num_bytes = cache->length - cache->reserved -
++			    cache->pinned - cache->bytes_super -
++			    cache->zone_unusable - cache->used;
++		sinfo->bytes_readonly -= num_bytes;
+ 		list_del_init(&cache->ro_list);
+ 	}
+ 	spin_unlock(&cache->lock);
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index c63d0a7f7ba4f..527c972b562dd 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -738,6 +738,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
+ 		__SetPageUptodate(page);
+ 		error = huge_add_to_page_cache(page, mapping, index);
+ 		if (unlikely(error)) {
++			restore_reserve_on_error(h, &pseudo_vma, addr, page);
+ 			put_page(page);
+ 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+ 			goto out;
+diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
+index 9e0c1afac8bdf..c175523b0a2c1 100644
+--- a/fs/notify/fanotify/fanotify_user.c
++++ b/fs/notify/fanotify/fanotify_user.c
+@@ -378,7 +378,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
+ 					info_type, fanotify_info_name(info),
+ 					info->name_len, buf, count);
+ 		if (ret < 0)
+-			return ret;
++			goto out_close_fd;
+ 
+ 		buf += ret;
+ 		count -= ret;
+@@ -426,7 +426,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
+ 					fanotify_event_object_fh(event),
+ 					info_type, dot, dot_len, buf, count);
+ 		if (ret < 0)
+-			return ret;
++			goto out_close_fd;
+ 
+ 		buf += ret;
+ 		count -= ret;
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index cccd1aab69dd1..5dae4187210d9 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -145,6 +145,7 @@ bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
+ long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
+ 						long freed);
+ bool isolate_huge_page(struct page *page, struct list_head *list);
++int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
+ void putback_active_hugepage(struct page *page);
+ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
+ void free_huge_page(struct page *page);
+@@ -330,6 +331,11 @@ static inline bool isolate_huge_page(struct page *page, struct list_head *list)
+ 	return false;
+ }
+ 
++static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
++{
++	return 0;
++}
++
+ static inline void putback_active_hugepage(struct page *page)
+ {
+ }
+@@ -591,6 +597,8 @@ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
+ 				unsigned long address);
+ int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
+ 			pgoff_t idx);
++void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
++				unsigned long address, struct page *page);
+ 
+ /* arch callback */
+ int __init __alloc_bootmem_huge_page(struct hstate *h);
+diff --git a/include/linux/mfd/rohm-bd70528.h b/include/linux/mfd/rohm-bd70528.h
+index a57af878fd0cd..4a5966475a35a 100644
+--- a/include/linux/mfd/rohm-bd70528.h
++++ b/include/linux/mfd/rohm-bd70528.h
+@@ -26,9 +26,7 @@ struct bd70528_data {
+ 	struct mutex rtc_timer_lock;
+ };
+ 
+-#define BD70528_BUCK_VOLTS 17
+-#define BD70528_BUCK_VOLTS 17
+-#define BD70528_BUCK_VOLTS 17
++#define BD70528_BUCK_VOLTS 0x10
+ #define BD70528_LDO_VOLTS 0x20
+ 
+ #define BD70528_REG_BUCK1_EN	0x0F
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index 133967c40214b..6a31bbba1b6f1 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -541,6 +541,10 @@ struct mlx5_core_roce {
+ enum {
+ 	MLX5_PRIV_FLAGS_DISABLE_IB_ADEV = 1 << 0,
+ 	MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV = 1 << 1,
++	/* Set during device detach to block any further devices
++	 * creation/deletion on drivers rescan. Unset during device attach.
++	 */
++	MLX5_PRIV_FLAGS_DETACH = 1 << 2,
+ };
+ 
+ struct mlx5_adev {
+diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
+index 028f442530cf5..60ffeb6b67ae7 100644
+--- a/include/linux/mlx5/transobj.h
++++ b/include/linux/mlx5/transobj.h
+@@ -85,4 +85,5 @@ mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
+ 			 struct mlx5_hairpin_params *params);
+ 
+ void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair);
++void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp);
+ #endif /* __TRANSOBJ_H__ */
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index 5aacc1c10a45a..8f0fb62e8975c 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -445,13 +445,6 @@ struct mm_struct {
+ 		 */
+ 		atomic_t has_pinned;
+ 
+-		/**
+-		 * @write_protect_seq: Locked when any thread is write
+-		 * protecting pages mapped by this mm to enforce a later COW,
+-		 * for instance during page table copying for fork().
+-		 */
+-		seqcount_t write_protect_seq;
+-
+ #ifdef CONFIG_MMU
+ 		atomic_long_t pgtables_bytes;	/* PTE page table pages */
+ #endif
+@@ -460,6 +453,18 @@ struct mm_struct {
+ 		spinlock_t page_table_lock; /* Protects page tables and some
+ 					     * counters
+ 					     */
++		/*
++		 * With some kernel config, the current mmap_lock's offset
++		 * inside 'mm_struct' is at 0x120, which is very optimal, as
++		 * its two hot fields 'count' and 'owner' sit in 2 different
++		 * cachelines,  and when mmap_lock is highly contended, both
++		 * of the 2 fields will be accessed frequently, current layout
++		 * will help to reduce cache bouncing.
++		 *
++		 * So please be careful with adding new fields before
++		 * mmap_lock, which can easily push the 2 fields into one
++		 * cacheline.
++		 */
+ 		struct rw_semaphore mmap_lock;
+ 
+ 		struct list_head mmlist; /* List of maybe swapped mm's.	These
+@@ -480,7 +485,15 @@ struct mm_struct {
+ 		unsigned long stack_vm;	   /* VM_STACK */
+ 		unsigned long def_flags;
+ 
++		/**
++		 * @write_protect_seq: Locked when any thread is write
++		 * protecting pages mapped by this mm to enforce a later COW,
++		 * for instance during page table copying for fork().
++		 */
++		seqcount_t write_protect_seq;
++
+ 		spinlock_t arg_lock; /* protect the below fields */
++
+ 		unsigned long start_code, end_code, start_data, end_data;
+ 		unsigned long start_brk, brk, start_stack;
+ 		unsigned long arg_start, arg_end, env_start, env_end;
+diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
+index 0d47fd33b2285..51d7f1b8b32aa 100644
+--- a/include/linux/ptp_clock_kernel.h
++++ b/include/linux/ptp_clock_kernel.h
+@@ -235,7 +235,7 @@ extern int ptp_clock_index(struct ptp_clock *ptp);
+  * @ppm:    Parts per million, but with a 16 bit binary fractional field
+  */
+ 
+-extern s32 scaled_ppm_to_ppb(long ppm);
++extern long scaled_ppm_to_ppb(long ppm);
+ 
+ /**
+  * ptp_find_pin() - obtain the pin index of a given auxiliary function
+diff --git a/include/linux/socket.h b/include/linux/socket.h
+index 385894b4a8bba..42222a84167f3 100644
+--- a/include/linux/socket.h
++++ b/include/linux/socket.h
+@@ -438,6 +438,4 @@ extern int __sys_socketpair(int family, int type, int protocol,
+ 			    int __user *usockvec);
+ extern int __sys_shutdown_sock(struct socket *sock, int how);
+ extern int __sys_shutdown(int fd, int how);
+-
+-extern struct ns_common *get_net_ns(struct ns_common *ns);
+ #endif /* _LINUX_SOCKET_H */
+diff --git a/include/linux/swapops.h b/include/linux/swapops.h
+index d9b7c9132c2f6..6430a94c69818 100644
+--- a/include/linux/swapops.h
++++ b/include/linux/swapops.h
+@@ -23,6 +23,16 @@
+ #define SWP_TYPE_SHIFT	(BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
+ #define SWP_OFFSET_MASK	((1UL << SWP_TYPE_SHIFT) - 1)
+ 
++/* Clear all flags but only keep swp_entry_t related information */
++static inline pte_t pte_swp_clear_flags(pte_t pte)
++{
++	if (pte_swp_soft_dirty(pte))
++		pte = pte_swp_clear_soft_dirty(pte);
++	if (pte_swp_uffd_wp(pte))
++		pte = pte_swp_clear_uffd_wp(pte);
++	return pte;
++}
++
+ /*
+  * Store a type+offset into a swp_entry_t in an arch-independent format
+  */
+@@ -66,10 +76,7 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
+ {
+ 	swp_entry_t arch_entry;
+ 
+-	if (pte_swp_soft_dirty(pte))
+-		pte = pte_swp_clear_soft_dirty(pte);
+-	if (pte_swp_uffd_wp(pte))
+-		pte = pte_swp_clear_uffd_wp(pte);
++	pte = pte_swp_clear_flags(pte);
+ 	arch_entry = __pte_to_swp_entry(pte);
+ 	return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
+ }
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h
+index 2d1d629e5d14b..a5ca18cfdb6fb 100644
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -6388,7 +6388,12 @@ bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
+ 
+ /**
+  * ieee80211_parse_tx_radiotap - Sanity-check and parse the radiotap header
+- *				 of injected frames
++ *				 of injected frames.
++ *
++ * To accurately parse and take into account rate and retransmission fields,
++ * you must initialize the chandef field in the ieee80211_tx_info structure
++ * of the skb before calling this function.
++ *
+  * @skb: packet injected by userspace
+  * @dev: the &struct device of this 802.11 device
+  */
+diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
+index dcaee24a4d877..14b6f7f445322 100644
+--- a/include/net/net_namespace.h
++++ b/include/net/net_namespace.h
+@@ -197,6 +197,8 @@ struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
+ void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid);
+ 
+ void net_ns_barrier(void);
++
++struct ns_common *get_net_ns(struct ns_common *ns);
+ #else /* CONFIG_NET_NS */
+ #include <linux/sched.h>
+ #include <linux/nsproxy.h>
+@@ -216,6 +218,11 @@ static inline void net_ns_get_ownership(const struct net *net,
+ }
+ 
+ static inline void net_ns_barrier(void) {}
++
++static inline struct ns_common *get_net_ns(struct ns_common *ns)
++{
++	return ERR_PTR(-EINVAL);
++}
+ #endif /* CONFIG_NET_NS */
+ 
+ 
+diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
+index 7d6687618d808..d1b327036ae43 100644
+--- a/include/uapi/linux/in.h
++++ b/include/uapi/linux/in.h
+@@ -289,6 +289,9 @@ struct sockaddr_in {
+ /* Address indicating an error return. */
+ #define	INADDR_NONE		((unsigned long int) 0xffffffff)
+ 
++/* Dummy address for src of ICMP replies if no real address is set (RFC7600). */
++#define	INADDR_DUMMY		((unsigned long int) 0xc0000008)
++
+ /* Network number for local host loopback. */
+ #define	IN_LOOPBACKNET		127
+ 
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 9e600767803b5..2423b4e918b90 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -5937,6 +5937,27 @@ struct bpf_sanitize_info {
+ 	bool mask_to_left;
+ };
+ 
++static struct bpf_verifier_state *
++sanitize_speculative_path(struct bpf_verifier_env *env,
++			  const struct bpf_insn *insn,
++			  u32 next_idx, u32 curr_idx)
++{
++	struct bpf_verifier_state *branch;
++	struct bpf_reg_state *regs;
++
++	branch = push_stack(env, next_idx, curr_idx, true);
++	if (branch && insn) {
++		regs = branch->frame[branch->curframe]->regs;
++		if (BPF_SRC(insn->code) == BPF_K) {
++			mark_reg_unknown(env, regs, insn->dst_reg);
++		} else if (BPF_SRC(insn->code) == BPF_X) {
++			mark_reg_unknown(env, regs, insn->dst_reg);
++			mark_reg_unknown(env, regs, insn->src_reg);
++		}
++	}
++	return branch;
++}
++
+ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
+ 			    struct bpf_insn *insn,
+ 			    const struct bpf_reg_state *ptr_reg,
+@@ -6020,12 +6041,26 @@ do_sim:
+ 		tmp = *dst_reg;
+ 		*dst_reg = *ptr_reg;
+ 	}
+-	ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
++	ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
++					env->insn_idx);
+ 	if (!ptr_is_dst_reg && ret)
+ 		*dst_reg = tmp;
+ 	return !ret ? REASON_STACK : 0;
+ }
+ 
++static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
++{
++	struct bpf_verifier_state *vstate = env->cur_state;
++
++	/* If we simulate paths under speculation, we don't update the
++	 * insn as 'seen' such that when we verify unreachable paths in
++	 * the non-speculative domain, sanitize_dead_code() can still
++	 * rewrite/sanitize them.
++	 */
++	if (!vstate->speculative)
++		env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
++}
++
+ static int sanitize_err(struct bpf_verifier_env *env,
+ 			const struct bpf_insn *insn, int reason,
+ 			const struct bpf_reg_state *off_reg,
+@@ -8204,14 +8239,28 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ 		if (err)
+ 			return err;
+ 	}
++
+ 	if (pred == 1) {
+-		/* only follow the goto, ignore fall-through */
++		/* Only follow the goto, ignore fall-through. If needed, push
++		 * the fall-through branch for simulation under speculative
++		 * execution.
++		 */
++		if (!env->bypass_spec_v1 &&
++		    !sanitize_speculative_path(env, insn, *insn_idx + 1,
++					       *insn_idx))
++			return -EFAULT;
+ 		*insn_idx += insn->off;
+ 		return 0;
+ 	} else if (pred == 0) {
+-		/* only follow fall-through branch, since
+-		 * that's where the program will go
++		/* Only follow the fall-through branch, since that's where the
++		 * program will go. If needed, push the goto branch for
++		 * simulation under speculative execution.
+ 		 */
++		if (!env->bypass_spec_v1 &&
++		    !sanitize_speculative_path(env, insn,
++					       *insn_idx + insn->off + 1,
++					       *insn_idx))
++			return -EFAULT;
+ 		return 0;
+ 	}
+ 
+@@ -10060,7 +10109,7 @@ static int do_check(struct bpf_verifier_env *env)
+ 		}
+ 
+ 		regs = cur_regs(env);
+-		env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
++		sanitize_mark_insn_seen(env);
+ 		prev_insn_idx = env->insn_idx;
+ 
+ 		if (class == BPF_ALU || class == BPF_ALU64) {
+@@ -10285,7 +10334,7 @@ process_bpf_exit:
+ 					return err;
+ 
+ 				env->insn_idx++;
+-				env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
++				sanitize_mark_insn_seen(env);
+ 			} else {
+ 				verbose(env, "invalid BPF_LD mode\n");
+ 				return -EINVAL;
+@@ -10784,6 +10833,7 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
+ {
+ 	struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
+ 	struct bpf_insn *insn = new_prog->insnsi;
++	u32 old_seen = old_data[off].seen;
+ 	u32 prog_len;
+ 	int i;
+ 
+@@ -10804,7 +10854,8 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
+ 	memcpy(new_data + off + cnt - 1, old_data + off,
+ 	       sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
+ 	for (i = off; i < off + cnt - 1; i++) {
+-		new_data[i].seen = env->pass_cnt;
++		/* Expand insni[off]'s seen count to the patched range. */
++		new_data[i].seen = old_seen;
+ 		new_data[i].zext_dst = insn_has_def32(env, insn + i);
+ 	}
+ 	env->insn_aux_data = new_data;
+@@ -12060,6 +12111,9 @@ static void free_states(struct bpf_verifier_env *env)
+  * insn_aux_data was touched. These variables are compared to clear temporary
+  * data from failed pass. For testing and experiments do_check_common() can be
+  * run multiple times even when prior attempt to verify is unsuccessful.
++ *
++ * Note that special handling is needed on !env->bypass_spec_v1 if this is
++ * ever called outside of error path with subsequent program rejection.
+  */
+ static void sanitize_insn_aux_data(struct bpf_verifier_env *env)
+ {
+diff --git a/kernel/crash_core.c b/kernel/crash_core.c
+index 825284baaf466..684a6061a13a4 100644
+--- a/kernel/crash_core.c
++++ b/kernel/crash_core.c
+@@ -464,6 +464,7 @@ static int __init crash_save_vmcoreinfo_init(void)
+ 	VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
+ 	VMCOREINFO_STRUCT_SIZE(mem_section);
+ 	VMCOREINFO_OFFSET(mem_section, section_mem_map);
++	VMCOREINFO_NUMBER(SECTION_SIZE_BITS);
+ 	VMCOREINFO_NUMBER(MAX_PHYSMEM_BITS);
+ #endif
+ 	VMCOREINFO_STRUCT_SIZE(page);
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 487312a5ceabb..47fcc3fe9dc5a 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -3760,11 +3760,17 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
+  */
+ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
++	/*
++	 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
++	 * See ___update_load_avg() for details.
++	 */
++	u32 divider = get_pelt_divider(&cfs_rq->avg);
++
+ 	dequeue_load_avg(cfs_rq, se);
+ 	sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
+-	sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
++	cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider;
+ 	sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
+-	sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
++	cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
+ 
+ 	add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
+ 
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index f2d4ee80feb34..7c8151d74faf0 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2198,9 +2198,6 @@ struct saved_cmdlines_buffer {
+ };
+ static struct saved_cmdlines_buffer *savedcmd;
+ 
+-/* temporary disable recording */
+-static atomic_t trace_record_taskinfo_disabled __read_mostly;
+-
+ static inline char *get_saved_cmdlines(int idx)
+ {
+ 	return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
+@@ -2486,8 +2483,6 @@ static bool tracing_record_taskinfo_skip(int flags)
+ {
+ 	if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
+ 		return true;
+-	if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
+-		return true;
+ 	if (!__this_cpu_read(trace_taskinfo_save))
+ 		return true;
+ 	return false;
+@@ -3742,9 +3737,6 @@ static void *s_start(struct seq_file *m, loff_t *pos)
+ 		return ERR_PTR(-EBUSY);
+ #endif
+ 
+-	if (!iter->snapshot)
+-		atomic_inc(&trace_record_taskinfo_disabled);
+-
+ 	if (*pos != iter->pos) {
+ 		iter->ent = NULL;
+ 		iter->cpu = 0;
+@@ -3787,9 +3779,6 @@ static void s_stop(struct seq_file *m, void *p)
+ 		return;
+ #endif
+ 
+-	if (!iter->snapshot)
+-		atomic_dec(&trace_record_taskinfo_disabled);
+-
+ 	trace_access_unlock(iter->cpu_file);
+ 	trace_event_read_unlock();
+ }
+diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
+index c1637f90c8a38..4702efb00ff21 100644
+--- a/kernel/trace/trace_clock.c
++++ b/kernel/trace/trace_clock.c
+@@ -115,9 +115,9 @@ u64 notrace trace_clock_global(void)
+ 	prev_time = READ_ONCE(trace_clock_struct.prev_time);
+ 	now = sched_clock_cpu(this_cpu);
+ 
+-	/* Make sure that now is always greater than prev_time */
++	/* Make sure that now is always greater than or equal to prev_time */
+ 	if ((s64)(now - prev_time) < 0)
+-		now = prev_time + 1;
++		now = prev_time;
+ 
+ 	/*
+ 	 * If in an NMI context then dont risk lockups and simply return
+@@ -131,7 +131,7 @@ u64 notrace trace_clock_global(void)
+ 		/* Reread prev_time in case it was already updated */
+ 		prev_time = READ_ONCE(trace_clock_struct.prev_time);
+ 		if ((s64)(now - prev_time) < 0)
+-			now = prev_time + 1;
++			now = prev_time;
+ 
+ 		trace_clock_struct.prev_time = now;
+ 
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index ce63ec0187c55..3da4817190f3d 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2127,12 +2127,18 @@ out:
+  * be restored when a newly allocated huge page must be freed.  It is
+  * to be called after calling vma_needs_reservation to determine if a
+  * reservation exists.
++ *
++ * vma_del_reservation is used in error paths where an entry in the reserve
++ * map was created during huge page allocation and must be removed.  It is to
++ * be called after calling vma_needs_reservation to determine if a reservation
++ * exists.
+  */
+ enum vma_resv_mode {
+ 	VMA_NEEDS_RESV,
+ 	VMA_COMMIT_RESV,
+ 	VMA_END_RESV,
+ 	VMA_ADD_RESV,
++	VMA_DEL_RESV,
+ };
+ static long __vma_reservation_common(struct hstate *h,
+ 				struct vm_area_struct *vma, unsigned long addr,
+@@ -2176,11 +2182,21 @@ static long __vma_reservation_common(struct hstate *h,
+ 			ret = region_del(resv, idx, idx + 1);
+ 		}
+ 		break;
++	case VMA_DEL_RESV:
++		if (vma->vm_flags & VM_MAYSHARE) {
++			region_abort(resv, idx, idx + 1, 1);
++			ret = region_del(resv, idx, idx + 1);
++		} else {
++			ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
++			/* region_add calls of range 1 should never fail. */
++			VM_BUG_ON(ret < 0);
++		}
++		break;
+ 	default:
+ 		BUG();
+ 	}
+ 
+-	if (vma->vm_flags & VM_MAYSHARE)
++	if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV)
+ 		return ret;
+ 	else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
+ 		/*
+@@ -2229,25 +2245,39 @@ static long vma_add_reservation(struct hstate *h,
+ 	return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
+ }
+ 
++static long vma_del_reservation(struct hstate *h,
++			struct vm_area_struct *vma, unsigned long addr)
++{
++	return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
++}
++
+ /*
+- * This routine is called to restore a reservation on error paths.  In the
+- * specific error paths, a huge page was allocated (via alloc_huge_page)
+- * and is about to be freed.  If a reservation for the page existed,
+- * alloc_huge_page would have consumed the reservation and set
+- * HPageRestoreReserve in the newly allocated page.  When the page is freed
+- * via free_huge_page, the global reservation count will be incremented if
+- * HPageRestoreReserve is set.  However, free_huge_page can not adjust the
+- * reserve map.  Adjust the reserve map here to be consistent with global
+- * reserve count adjustments to be made by free_huge_page.
++ * This routine is called to restore reservation information on error paths.
++ * It should ONLY be called for pages allocated via alloc_huge_page(), and
++ * the hugetlb mutex should remain held when calling this routine.
++ *
++ * It handles two specific cases:
++ * 1) A reservation was in place and the page consumed the reservation.
++ *    HPageRestoreReserve is set in the page.
++ * 2) No reservation was in place for the page, so HPageRestoreReserve is
++ *    not set.  However, alloc_huge_page always updates the reserve map.
++ *
++ * In case 1, free_huge_page later in the error path will increment the
++ * global reserve count.  But, free_huge_page does not have enough context
++ * to adjust the reservation map.  This case deals primarily with private
++ * mappings.  Adjust the reserve map here to be consistent with global
++ * reserve count adjustments to be made by free_huge_page.  Make sure the
++ * reserve map indicates there is a reservation present.
++ *
++ * In case 2, simply undo reserve map modifications done by alloc_huge_page.
+  */
+-static void restore_reserve_on_error(struct hstate *h,
+-			struct vm_area_struct *vma, unsigned long address,
+-			struct page *page)
++void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
++			unsigned long address, struct page *page)
+ {
+-	if (unlikely(HPageRestoreReserve(page))) {
+-		long rc = vma_needs_reservation(h, vma, address);
++	long rc = vma_needs_reservation(h, vma, address);
+ 
+-		if (unlikely(rc < 0)) {
++	if (HPageRestoreReserve(page)) {
++		if (unlikely(rc < 0))
+ 			/*
+ 			 * Rare out of memory condition in reserve map
+ 			 * manipulation.  Clear HPageRestoreReserve so that
+@@ -2260,16 +2290,57 @@ static void restore_reserve_on_error(struct hstate *h,
+ 			 * accounting of reserve counts.
+ 			 */
+ 			ClearHPageRestoreReserve(page);
+-		} else if (rc) {
+-			rc = vma_add_reservation(h, vma, address);
+-			if (unlikely(rc < 0))
++		else if (rc)
++			(void)vma_add_reservation(h, vma, address);
++		else
++			vma_end_reservation(h, vma, address);
++	} else {
++		if (!rc) {
++			/*
++			 * This indicates there is an entry in the reserve map
++			 * added by alloc_huge_page.  We know it was added
++			 * before the alloc_huge_page call, otherwise
++			 * HPageRestoreReserve would be set on the page.
++			 * Remove the entry so that a subsequent allocation
++			 * does not consume a reservation.
++			 */
++			rc = vma_del_reservation(h, vma, address);
++			if (rc < 0)
+ 				/*
+-				 * See above comment about rare out of
+-				 * memory condition.
++				 * VERY rare out of memory condition.  Since
++				 * we can not delete the entry, set
++				 * HPageRestoreReserve so that the reserve
++				 * count will be incremented when the page
++				 * is freed.  This reserve will be consumed
++				 * on a subsequent allocation.
+ 				 */
+-				ClearHPageRestoreReserve(page);
++				SetHPageRestoreReserve(page);
++		} else if (rc < 0) {
++			/*
++			 * Rare out of memory condition from
++			 * vma_needs_reservation call.  Memory allocation is
++			 * only attempted if a new entry is needed.  Therefore,
++			 * this implies there is not an entry in the
++			 * reserve map.
++			 *
++			 * For shared mappings, no entry in the map indicates
++			 * no reservation.  We are done.
++			 */
++			if (!(vma->vm_flags & VM_MAYSHARE))
++				/*
++				 * For private mappings, no entry indicates
++				 * a reservation is present.  Since we can
++				 * not add an entry, set SetHPageRestoreReserve
++				 * on the page so reserve count will be
++				 * incremented when freed.  This reserve will
++				 * be consumed on a subsequent allocation.
++				 */
++				SetHPageRestoreReserve(page);
+ 		} else
+-			vma_end_reservation(h, vma, address);
++			/*
++			 * No reservation present, do nothing
++			 */
++			 vma_end_reservation(h, vma, address);
+ 	}
+ }
+ 
+@@ -3886,6 +3957,8 @@ again:
+ 				spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
+ 				entry = huge_ptep_get(src_pte);
+ 				if (!pte_same(src_pte_old, entry)) {
++					restore_reserve_on_error(h, vma, addr,
++								new);
+ 					put_page(new);
+ 					/* dst_entry won't change as in child */
+ 					goto again;
+@@ -4820,6 +4893,7 @@ out_release_unlock:
+ 	if (vm_shared)
+ 		unlock_page(page);
+ out_release_nounlock:
++	restore_reserve_on_error(h, dst_vma, dst_addr, page);
+ 	put_page(page);
+ 	goto out;
+ }
+@@ -5664,6 +5738,21 @@ unlock:
+ 	return ret;
+ }
+ 
++int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
++{
++	int ret = 0;
++
++	*hugetlb = false;
++	spin_lock_irq(&hugetlb_lock);
++	if (PageHeadHuge(page)) {
++		*hugetlb = true;
++		if (HPageFreed(page) || HPageMigratable(page))
++			ret = get_page_unless_zero(page);
++	}
++	spin_unlock_irq(&hugetlb_lock);
++	return ret;
++}
++
+ void putback_active_hugepage(struct page *page)
+ {
+ 	spin_lock(&hugetlb_lock);
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index bd3945446d47e..704d05057d8c3 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -949,6 +949,17 @@ static int page_action(struct page_state *ps, struct page *p,
+ 	return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
+ }
+ 
++/*
++ * Return true if a page type of a given page is supported by hwpoison
++ * mechanism (while handling could fail), otherwise false.  This function
++ * does not return true for hugetlb or device memory pages, so it's assumed
++ * to be called only in the context where we never have such pages.
++ */
++static inline bool HWPoisonHandlable(struct page *page)
++{
++	return PageLRU(page) || __PageMovable(page);
++}
++
+ /**
+  * __get_hwpoison_page() - Get refcount for memory error handling:
+  * @page:	raw error page (hit by memory error)
+@@ -959,8 +970,22 @@ static int page_action(struct page_state *ps, struct page *p,
+ static int __get_hwpoison_page(struct page *page)
+ {
+ 	struct page *head = compound_head(page);
++	int ret = 0;
++	bool hugetlb = false;
++
++	ret = get_hwpoison_huge_page(head, &hugetlb);
++	if (hugetlb)
++		return ret;
+ 
+-	if (!PageHuge(head) && PageTransHuge(head)) {
++	/*
++	 * This check prevents from calling get_hwpoison_unless_zero()
++	 * for any unsupported type of page in order to reduce the risk of
++	 * unexpected races caused by taking a page refcount.
++	 */
++	if (!HWPoisonHandlable(head))
++		return 0;
++
++	if (PageTransHuge(head)) {
+ 		/*
+ 		 * Non anonymous thp exists only in allocation/free time. We
+ 		 * can't handle such a case correctly, so let's give it up.
+@@ -1017,7 +1042,7 @@ try_again:
+ 			ret = -EIO;
+ 		}
+ 	} else {
+-		if (PageHuge(p) || PageLRU(p) || __PageMovable(p)) {
++		if (PageHuge(p) || HWPoisonHandlable(p)) {
+ 			ret = 1;
+ 		} else {
+ 			/*
+@@ -1527,7 +1552,12 @@ try_again:
+ 		return 0;
+ 	}
+ 
+-	if (!PageTransTail(p) && !PageLRU(p))
++	/*
++	 * __munlock_pagevec may clear a writeback page's LRU flag without
++	 * page_lock. We need wait writeback completion for this page or it
++	 * may trigger vfs BUG while evict inode.
++	 */
++	if (!PageTransTail(p) && !PageLRU(p) && !PageWriteback(p))
+ 		goto identify_page_state;
+ 
+ 	/*
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index 88e833986332e..ba2f4b01920fd 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -89,8 +89,7 @@ EXPORT_SYMBOL(kmem_cache_size);
+ #ifdef CONFIG_DEBUG_VM
+ static int kmem_cache_sanity_check(const char *name, unsigned int size)
+ {
+-	if (!name || in_interrupt() || size < sizeof(void *) ||
+-		size > KMALLOC_MAX_SIZE) {
++	if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) {
+ 		pr_err("kmem_cache_create(%s) integrity check failed\n", name);
+ 		return -EINVAL;
+ 	}
+diff --git a/mm/slub.c b/mm/slub.c
+index 3021ce9bf1b3d..602f9712ab53d 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -15,6 +15,7 @@
+ #include <linux/module.h>
+ #include <linux/bit_spinlock.h>
+ #include <linux/interrupt.h>
++#include <linux/swab.h>
+ #include <linux/bitops.h>
+ #include <linux/slab.h>
+ #include "slab.h"
+@@ -710,15 +711,15 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
+ 	       p, p - addr, get_freepointer(s, p));
+ 
+ 	if (s->flags & SLAB_RED_ZONE)
+-		print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
++		print_section(KERN_ERR, "Redzone  ", p - s->red_left_pad,
+ 			      s->red_left_pad);
+ 	else if (p > addr + 16)
+ 		print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
+ 
+-	print_section(KERN_ERR, "Object ", p,
++	print_section(KERN_ERR,         "Object   ", p,
+ 		      min_t(unsigned int, s->object_size, PAGE_SIZE));
+ 	if (s->flags & SLAB_RED_ZONE)
+-		print_section(KERN_ERR, "Redzone ", p + s->object_size,
++		print_section(KERN_ERR, "Redzone  ", p + s->object_size,
+ 			s->inuse - s->object_size);
+ 
+ 	off = get_info_end(s);
+@@ -730,7 +731,7 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
+ 
+ 	if (off != size_from_object(s))
+ 		/* Beginning of the filler is the free pointer */
+-		print_section(KERN_ERR, "Padding ", p + off,
++		print_section(KERN_ERR, "Padding  ", p + off,
+ 			      size_from_object(s) - off);
+ 
+ 	dump_stack();
+@@ -907,11 +908,11 @@ static int check_object(struct kmem_cache *s, struct page *page,
+ 	u8 *endobject = object + s->object_size;
+ 
+ 	if (s->flags & SLAB_RED_ZONE) {
+-		if (!check_bytes_and_report(s, page, object, "Redzone",
++		if (!check_bytes_and_report(s, page, object, "Left Redzone",
+ 			object - s->red_left_pad, val, s->red_left_pad))
+ 			return 0;
+ 
+-		if (!check_bytes_and_report(s, page, object, "Redzone",
++		if (!check_bytes_and_report(s, page, object, "Right Redzone",
+ 			endobject, val, s->inuse - s->object_size))
+ 			return 0;
+ 	} else {
+@@ -926,7 +927,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
+ 		if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
+ 			(!check_bytes_and_report(s, page, p, "Poison", p,
+ 					POISON_FREE, s->object_size - 1) ||
+-			 !check_bytes_and_report(s, page, p, "Poison",
++			 !check_bytes_and_report(s, page, p, "End Poison",
+ 				p + s->object_size - 1, POISON_END, 1)))
+ 			return 0;
+ 		/*
+@@ -3687,7 +3688,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
+ {
+ 	slab_flags_t flags = s->flags;
+ 	unsigned int size = s->object_size;
+-	unsigned int freepointer_area;
+ 	unsigned int order;
+ 
+ 	/*
+@@ -3696,13 +3696,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
+ 	 * the possible location of the free pointer.
+ 	 */
+ 	size = ALIGN(size, sizeof(void *));
+-	/*
+-	 * This is the area of the object where a freepointer can be
+-	 * safely written. If redzoning adds more to the inuse size, we
+-	 * can't use that portion for writing the freepointer, so
+-	 * s->offset must be limited within this for the general case.
+-	 */
+-	freepointer_area = size;
+ 
+ #ifdef CONFIG_SLUB_DEBUG
+ 	/*
+@@ -3728,19 +3721,21 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
+ 
+ 	/*
+ 	 * With that we have determined the number of bytes in actual use
+-	 * by the object. This is the potential offset to the free pointer.
++	 * by the object and redzoning.
+ 	 */
+ 	s->inuse = size;
+ 
+-	if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
+-		s->ctor)) {
++	if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
++	    ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) ||
++	    s->ctor) {
+ 		/*
+ 		 * Relocate free pointer after the object if it is not
+ 		 * permitted to overwrite the first word of the object on
+ 		 * kmem_cache_free.
+ 		 *
+ 		 * This is the case if we do RCU, have a constructor or
+-		 * destructor or are poisoning the objects.
++		 * destructor, are poisoning the objects, or are
++		 * redzoning an object smaller than sizeof(void *).
+ 		 *
+ 		 * The assumption that s->offset >= s->inuse means free
+ 		 * pointer is outside of the object is used in the
+@@ -3749,13 +3744,13 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
+ 		 */
+ 		s->offset = size;
+ 		size += sizeof(void *);
+-	} else if (freepointer_area > sizeof(void *)) {
++	} else {
+ 		/*
+ 		 * Store freelist pointer near middle of object to keep
+ 		 * it away from the edges of the object to avoid small
+ 		 * sized over/underflows from neighboring allocations.
+ 		 */
+-		s->offset = ALIGN(freepointer_area / 2, sizeof(void *));
++		s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
+ 	}
+ 
+ #ifdef CONFIG_SLUB_DEBUG
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index 084a5b9a18e5c..2097648df212d 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -1900,7 +1900,7 @@ unsigned int count_swap_pages(int type, int free)
+ 
+ static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
+ {
+-	return pte_same(pte_swp_clear_soft_dirty(pte), swp_pte);
++	return pte_same(pte_swp_clear_flags(pte), swp_pte);
+ }
+ 
+ /*
+diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
+index a5e313cd6f447..b9dd150f6f01d 100644
+--- a/net/batman-adv/bat_iv_ogm.c
++++ b/net/batman-adv/bat_iv_ogm.c
+@@ -409,8 +409,10 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
+ 	if (WARN_ON(!forw_packet->if_outgoing))
+ 		return;
+ 
+-	if (WARN_ON(forw_packet->if_outgoing->soft_iface != soft_iface))
++	if (forw_packet->if_outgoing->soft_iface != soft_iface) {
++		pr_warn("%s: soft interface switch for queued OGM\n", __func__);
+ 		return;
++	}
+ 
+ 	if (forw_packet->if_incoming->if_status != BATADV_IF_ACTIVE)
+ 		return;
+diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
+index af3430c2d6ea8..660dec6785ad9 100644
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -90,8 +90,8 @@ struct bridge_mcast_stats {
+ #endif
+ 
+ struct br_tunnel_info {
+-	__be64			tunnel_id;
+-	struct metadata_dst	*tunnel_dst;
++	__be64				tunnel_id;
++	struct metadata_dst __rcu	*tunnel_dst;
+ };
+ 
+ /* private vlan flags */
+diff --git a/net/bridge/br_vlan_tunnel.c b/net/bridge/br_vlan_tunnel.c
+index 169e005fbda29..debe167202782 100644
+--- a/net/bridge/br_vlan_tunnel.c
++++ b/net/bridge/br_vlan_tunnel.c
+@@ -41,26 +41,33 @@ static struct net_bridge_vlan *br_vlan_tunnel_lookup(struct rhashtable *tbl,
+ 				      br_vlan_tunnel_rht_params);
+ }
+ 
++static void vlan_tunnel_info_release(struct net_bridge_vlan *vlan)
++{
++	struct metadata_dst *tdst = rtnl_dereference(vlan->tinfo.tunnel_dst);
++
++	WRITE_ONCE(vlan->tinfo.tunnel_id, 0);
++	RCU_INIT_POINTER(vlan->tinfo.tunnel_dst, NULL);
++	dst_release(&tdst->dst);
++}
++
+ void vlan_tunnel_info_del(struct net_bridge_vlan_group *vg,
+ 			  struct net_bridge_vlan *vlan)
+ {
+-	if (!vlan->tinfo.tunnel_dst)
++	if (!rcu_access_pointer(vlan->tinfo.tunnel_dst))
+ 		return;
+ 	rhashtable_remove_fast(&vg->tunnel_hash, &vlan->tnode,
+ 			       br_vlan_tunnel_rht_params);
+-	vlan->tinfo.tunnel_id = 0;
+-	dst_release(&vlan->tinfo.tunnel_dst->dst);
+-	vlan->tinfo.tunnel_dst = NULL;
++	vlan_tunnel_info_release(vlan);
+ }
+ 
+ static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
+ 				  struct net_bridge_vlan *vlan, u32 tun_id)
+ {
+-	struct metadata_dst *metadata = NULL;
++	struct metadata_dst *metadata = rtnl_dereference(vlan->tinfo.tunnel_dst);
+ 	__be64 key = key32_to_tunnel_id(cpu_to_be32(tun_id));
+ 	int err;
+ 
+-	if (vlan->tinfo.tunnel_dst)
++	if (metadata)
+ 		return -EEXIST;
+ 
+ 	metadata = __ip_tun_set_dst(0, 0, 0, 0, 0, TUNNEL_KEY,
+@@ -69,8 +76,8 @@ static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
+ 		return -EINVAL;
+ 
+ 	metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_BRIDGE;
+-	vlan->tinfo.tunnel_dst = metadata;
+-	vlan->tinfo.tunnel_id = key;
++	rcu_assign_pointer(vlan->tinfo.tunnel_dst, metadata);
++	WRITE_ONCE(vlan->tinfo.tunnel_id, key);
+ 
+ 	err = rhashtable_lookup_insert_fast(&vg->tunnel_hash, &vlan->tnode,
+ 					    br_vlan_tunnel_rht_params);
+@@ -79,9 +86,7 @@ static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
+ 
+ 	return 0;
+ out:
+-	dst_release(&vlan->tinfo.tunnel_dst->dst);
+-	vlan->tinfo.tunnel_dst = NULL;
+-	vlan->tinfo.tunnel_id = 0;
++	vlan_tunnel_info_release(vlan);
+ 
+ 	return err;
+ }
+@@ -182,12 +187,15 @@ int br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
+ int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
+ 				 struct net_bridge_vlan *vlan)
+ {
++	struct metadata_dst *tunnel_dst;
++	__be64 tunnel_id;
+ 	int err;
+ 
+-	if (!vlan || !vlan->tinfo.tunnel_id)
++	if (!vlan)
+ 		return 0;
+ 
+-	if (unlikely(!skb_vlan_tag_present(skb)))
++	tunnel_id = READ_ONCE(vlan->tinfo.tunnel_id);
++	if (!tunnel_id || unlikely(!skb_vlan_tag_present(skb)))
+ 		return 0;
+ 
+ 	skb_dst_drop(skb);
+@@ -195,7 +203,9 @@ int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
+ 	if (err)
+ 		return err;
+ 
+-	skb_dst_set(skb, dst_clone(&vlan->tinfo.tunnel_dst->dst));
++	tunnel_dst = rcu_dereference(vlan->tinfo.tunnel_dst);
++	if (tunnel_dst && dst_hold_safe(&tunnel_dst->dst))
++		skb_dst_set(skb, &tunnel_dst->dst);
+ 
+ 	return 0;
+ }
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index 909b9e684e043..f3e4d9528fa38 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -125,7 +125,7 @@ struct bcm_sock {
+ 	struct sock sk;
+ 	int bound;
+ 	int ifindex;
+-	struct notifier_block notifier;
++	struct list_head notifier;
+ 	struct list_head rx_ops;
+ 	struct list_head tx_ops;
+ 	unsigned long dropped_usr_msgs;
+@@ -133,6 +133,10 @@ struct bcm_sock {
+ 	char procname [32]; /* inode number in decimal with \0 */
+ };
+ 
++static LIST_HEAD(bcm_notifier_list);
++static DEFINE_SPINLOCK(bcm_notifier_lock);
++static struct bcm_sock *bcm_busy_notifier;
++
+ static inline struct bcm_sock *bcm_sk(const struct sock *sk)
+ {
+ 	return (struct bcm_sock *)sk;
+@@ -402,6 +406,7 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
+ 		if (!op->count && (op->flags & TX_COUNTEVT)) {
+ 
+ 			/* create notification to user */
++			memset(&msg_head, 0, sizeof(msg_head));
+ 			msg_head.opcode  = TX_EXPIRED;
+ 			msg_head.flags   = op->flags;
+ 			msg_head.count   = op->count;
+@@ -439,6 +444,7 @@ static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
+ 	/* this element is not throttled anymore */
+ 	data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV);
+ 
++	memset(&head, 0, sizeof(head));
+ 	head.opcode  = RX_CHANGED;
+ 	head.flags   = op->flags;
+ 	head.count   = op->count;
+@@ -560,6 +566,7 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
+ 	}
+ 
+ 	/* create notification to user */
++	memset(&msg_head, 0, sizeof(msg_head));
+ 	msg_head.opcode  = RX_TIMEOUT;
+ 	msg_head.flags   = op->flags;
+ 	msg_head.count   = op->count;
+@@ -1378,20 +1385,15 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+ /*
+  * notification handler for netdevice status changes
+  */
+-static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
+-			void *ptr)
++static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
++		       struct net_device *dev)
+ {
+-	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+-	struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier);
+ 	struct sock *sk = &bo->sk;
+ 	struct bcm_op *op;
+ 	int notify_enodev = 0;
+ 
+ 	if (!net_eq(dev_net(dev), sock_net(sk)))
+-		return NOTIFY_DONE;
+-
+-	if (dev->type != ARPHRD_CAN)
+-		return NOTIFY_DONE;
++		return;
+ 
+ 	switch (msg) {
+ 
+@@ -1426,7 +1428,28 @@ static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
+ 				sk->sk_error_report(sk);
+ 		}
+ 	}
++}
+ 
++static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
++			void *ptr)
++{
++	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
++
++	if (dev->type != ARPHRD_CAN)
++		return NOTIFY_DONE;
++	if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
++		return NOTIFY_DONE;
++	if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */
++		return NOTIFY_DONE;
++
++	spin_lock(&bcm_notifier_lock);
++	list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) {
++		spin_unlock(&bcm_notifier_lock);
++		bcm_notify(bcm_busy_notifier, msg, dev);
++		spin_lock(&bcm_notifier_lock);
++	}
++	bcm_busy_notifier = NULL;
++	spin_unlock(&bcm_notifier_lock);
+ 	return NOTIFY_DONE;
+ }
+ 
+@@ -1446,9 +1469,9 @@ static int bcm_init(struct sock *sk)
+ 	INIT_LIST_HEAD(&bo->rx_ops);
+ 
+ 	/* set notifier */
+-	bo->notifier.notifier_call = bcm_notifier;
+-
+-	register_netdevice_notifier(&bo->notifier);
++	spin_lock(&bcm_notifier_lock);
++	list_add_tail(&bo->notifier, &bcm_notifier_list);
++	spin_unlock(&bcm_notifier_lock);
+ 
+ 	return 0;
+ }
+@@ -1471,7 +1494,14 @@ static int bcm_release(struct socket *sock)
+ 
+ 	/* remove bcm_ops, timer, rx_unregister(), etc. */
+ 
+-	unregister_netdevice_notifier(&bo->notifier);
++	spin_lock(&bcm_notifier_lock);
++	while (bcm_busy_notifier == bo) {
++		spin_unlock(&bcm_notifier_lock);
++		schedule_timeout_uninterruptible(1);
++		spin_lock(&bcm_notifier_lock);
++	}
++	list_del(&bo->notifier);
++	spin_unlock(&bcm_notifier_lock);
+ 
+ 	lock_sock(sk);
+ 
+@@ -1692,6 +1722,10 @@ static struct pernet_operations canbcm_pernet_ops __read_mostly = {
+ 	.exit = canbcm_pernet_exit,
+ };
+ 
++static struct notifier_block canbcm_notifier = {
++	.notifier_call = bcm_notifier
++};
++
+ static int __init bcm_module_init(void)
+ {
+ 	int err;
+@@ -1705,12 +1739,14 @@ static int __init bcm_module_init(void)
+ 	}
+ 
+ 	register_pernet_subsys(&canbcm_pernet_ops);
++	register_netdevice_notifier(&canbcm_notifier);
+ 	return 0;
+ }
+ 
+ static void __exit bcm_module_exit(void)
+ {
+ 	can_proto_unregister(&bcm_can_proto);
++	unregister_netdevice_notifier(&canbcm_notifier);
+ 	unregister_pernet_subsys(&canbcm_pernet_ops);
+ }
+ 
+diff --git a/net/can/isotp.c b/net/can/isotp.c
+index 253b24417c8e5..be6183f8ca110 100644
+--- a/net/can/isotp.c
++++ b/net/can/isotp.c
+@@ -143,10 +143,14 @@ struct isotp_sock {
+ 	u32 force_tx_stmin;
+ 	u32 force_rx_stmin;
+ 	struct tpcon rx, tx;
+-	struct notifier_block notifier;
++	struct list_head notifier;
+ 	wait_queue_head_t wait;
+ };
+ 
++static LIST_HEAD(isotp_notifier_list);
++static DEFINE_SPINLOCK(isotp_notifier_lock);
++static struct isotp_sock *isotp_busy_notifier;
++
+ static inline struct isotp_sock *isotp_sk(const struct sock *sk)
+ {
+ 	return (struct isotp_sock *)sk;
+@@ -1013,7 +1017,14 @@ static int isotp_release(struct socket *sock)
+ 	/* wait for complete transmission of current pdu */
+ 	wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+ 
+-	unregister_netdevice_notifier(&so->notifier);
++	spin_lock(&isotp_notifier_lock);
++	while (isotp_busy_notifier == so) {
++		spin_unlock(&isotp_notifier_lock);
++		schedule_timeout_uninterruptible(1);
++		spin_lock(&isotp_notifier_lock);
++	}
++	list_del(&so->notifier);
++	spin_unlock(&isotp_notifier_lock);
+ 
+ 	lock_sock(sk);
+ 
+@@ -1317,21 +1328,16 @@ static int isotp_getsockopt(struct socket *sock, int level, int optname,
+ 	return 0;
+ }
+ 
+-static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
+-			  void *ptr)
++static void isotp_notify(struct isotp_sock *so, unsigned long msg,
++			 struct net_device *dev)
+ {
+-	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+-	struct isotp_sock *so = container_of(nb, struct isotp_sock, notifier);
+ 	struct sock *sk = &so->sk;
+ 
+ 	if (!net_eq(dev_net(dev), sock_net(sk)))
+-		return NOTIFY_DONE;
+-
+-	if (dev->type != ARPHRD_CAN)
+-		return NOTIFY_DONE;
++		return;
+ 
+ 	if (so->ifindex != dev->ifindex)
+-		return NOTIFY_DONE;
++		return;
+ 
+ 	switch (msg) {
+ 	case NETDEV_UNREGISTER:
+@@ -1357,7 +1363,28 @@ static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
+ 			sk->sk_error_report(sk);
+ 		break;
+ 	}
++}
+ 
++static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
++			  void *ptr)
++{
++	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
++
++	if (dev->type != ARPHRD_CAN)
++		return NOTIFY_DONE;
++	if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
++		return NOTIFY_DONE;
++	if (unlikely(isotp_busy_notifier)) /* Check for reentrant bug. */
++		return NOTIFY_DONE;
++
++	spin_lock(&isotp_notifier_lock);
++	list_for_each_entry(isotp_busy_notifier, &isotp_notifier_list, notifier) {
++		spin_unlock(&isotp_notifier_lock);
++		isotp_notify(isotp_busy_notifier, msg, dev);
++		spin_lock(&isotp_notifier_lock);
++	}
++	isotp_busy_notifier = NULL;
++	spin_unlock(&isotp_notifier_lock);
+ 	return NOTIFY_DONE;
+ }
+ 
+@@ -1394,8 +1421,9 @@ static int isotp_init(struct sock *sk)
+ 
+ 	init_waitqueue_head(&so->wait);
+ 
+-	so->notifier.notifier_call = isotp_notifier;
+-	register_netdevice_notifier(&so->notifier);
++	spin_lock(&isotp_notifier_lock);
++	list_add_tail(&so->notifier, &isotp_notifier_list);
++	spin_unlock(&isotp_notifier_lock);
+ 
+ 	return 0;
+ }
+@@ -1442,6 +1470,10 @@ static const struct can_proto isotp_can_proto = {
+ 	.prot = &isotp_proto,
+ };
+ 
++static struct notifier_block canisotp_notifier = {
++	.notifier_call = isotp_notifier
++};
++
+ static __init int isotp_module_init(void)
+ {
+ 	int err;
+@@ -1451,6 +1483,8 @@ static __init int isotp_module_init(void)
+ 	err = can_proto_register(&isotp_can_proto);
+ 	if (err < 0)
+ 		pr_err("can: registration of isotp protocol failed\n");
++	else
++		register_netdevice_notifier(&canisotp_notifier);
+ 
+ 	return err;
+ }
+@@ -1458,6 +1492,7 @@ static __init int isotp_module_init(void)
+ static __exit void isotp_module_exit(void)
+ {
+ 	can_proto_unregister(&isotp_can_proto);
++	unregister_netdevice_notifier(&canisotp_notifier);
+ }
+ 
+ module_init(isotp_module_init);
+diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
+index e09d087ba2409..c3946c3558826 100644
+--- a/net/can/j1939/transport.c
++++ b/net/can/j1939/transport.c
+@@ -330,6 +330,9 @@ static void j1939_session_skb_drop_old(struct j1939_session *session)
+ 
+ 	if ((do_skcb->offset + do_skb->len) < offset_start) {
+ 		__skb_unlink(do_skb, &session->skb_queue);
++		/* drop ref taken in j1939_session_skb_queue() */
++		skb_unref(do_skb);
++
+ 		kfree_skb(do_skb);
+ 	}
+ 	spin_unlock_irqrestore(&session->skb_queue.lock, flags);
+@@ -349,12 +352,13 @@ void j1939_session_skb_queue(struct j1939_session *session,
+ 
+ 	skcb->flags |= J1939_ECU_LOCAL_SRC;
+ 
++	skb_get(skb);
+ 	skb_queue_tail(&session->skb_queue, skb);
+ }
+ 
+ static struct
+-sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
+-					  unsigned int offset_start)
++sk_buff *j1939_session_skb_get_by_offset(struct j1939_session *session,
++					 unsigned int offset_start)
+ {
+ 	struct j1939_priv *priv = session->priv;
+ 	struct j1939_sk_buff_cb *do_skcb;
+@@ -371,6 +375,10 @@ sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
+ 			skb = do_skb;
+ 		}
+ 	}
++
++	if (skb)
++		skb_get(skb);
++
+ 	spin_unlock_irqrestore(&session->skb_queue.lock, flags);
+ 
+ 	if (!skb)
+@@ -381,12 +389,12 @@ sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
+ 	return skb;
+ }
+ 
+-static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
++static struct sk_buff *j1939_session_skb_get(struct j1939_session *session)
+ {
+ 	unsigned int offset_start;
+ 
+ 	offset_start = session->pkt.dpo * 7;
+-	return j1939_session_skb_find_by_offset(session, offset_start);
++	return j1939_session_skb_get_by_offset(session, offset_start);
+ }
+ 
+ /* see if we are receiver
+@@ -776,7 +784,7 @@ static int j1939_session_tx_dat(struct j1939_session *session)
+ 	int ret = 0;
+ 	u8 dat[8];
+ 
+-	se_skb = j1939_session_skb_find_by_offset(session, session->pkt.tx * 7);
++	se_skb = j1939_session_skb_get_by_offset(session, session->pkt.tx * 7);
+ 	if (!se_skb)
+ 		return -ENOBUFS;
+ 
+@@ -801,7 +809,8 @@ static int j1939_session_tx_dat(struct j1939_session *session)
+ 			netdev_err_once(priv->ndev,
+ 					"%s: 0x%p: requested data outside of queued buffer: offset %i, len %i, pkt.tx: %i\n",
+ 					__func__, session, skcb->offset, se_skb->len , session->pkt.tx);
+-			return -EOVERFLOW;
++			ret = -EOVERFLOW;
++			goto out_free;
+ 		}
+ 
+ 		if (!len) {
+@@ -835,6 +844,12 @@ static int j1939_session_tx_dat(struct j1939_session *session)
+ 	if (pkt_done)
+ 		j1939_tp_set_rxtimeout(session, 250);
+ 
++ out_free:
++	if (ret)
++		kfree_skb(se_skb);
++	else
++		consume_skb(se_skb);
++
+ 	return ret;
+ }
+ 
+@@ -1007,7 +1022,7 @@ static int j1939_xtp_txnext_receiver(struct j1939_session *session)
+ static int j1939_simple_txnext(struct j1939_session *session)
+ {
+ 	struct j1939_priv *priv = session->priv;
+-	struct sk_buff *se_skb = j1939_session_skb_find(session);
++	struct sk_buff *se_skb = j1939_session_skb_get(session);
+ 	struct sk_buff *skb;
+ 	int ret;
+ 
+@@ -1015,8 +1030,10 @@ static int j1939_simple_txnext(struct j1939_session *session)
+ 		return 0;
+ 
+ 	skb = skb_clone(se_skb, GFP_ATOMIC);
+-	if (!skb)
+-		return -ENOMEM;
++	if (!skb) {
++		ret = -ENOMEM;
++		goto out_free;
++	}
+ 
+ 	can_skb_set_owner(skb, se_skb->sk);
+ 
+@@ -1024,12 +1041,18 @@ static int j1939_simple_txnext(struct j1939_session *session)
+ 
+ 	ret = j1939_send_one(priv, skb);
+ 	if (ret)
+-		return ret;
++		goto out_free;
+ 
+ 	j1939_sk_errqueue(session, J1939_ERRQUEUE_SCHED);
+ 	j1939_sk_queue_activate_next(session);
+ 
+-	return 0;
++ out_free:
++	if (ret)
++		kfree_skb(se_skb);
++	else
++		consume_skb(se_skb);
++
++	return ret;
+ }
+ 
+ static bool j1939_session_deactivate_locked(struct j1939_session *session)
+@@ -1170,9 +1193,10 @@ static void j1939_session_completed(struct j1939_session *session)
+ 	struct sk_buff *skb;
+ 
+ 	if (!session->transmission) {
+-		skb = j1939_session_skb_find(session);
++		skb = j1939_session_skb_get(session);
+ 		/* distribute among j1939 receivers */
+ 		j1939_sk_recv(session->priv, skb);
++		consume_skb(skb);
+ 	}
+ 
+ 	j1939_session_deactivate_activate_next(session);
+@@ -1744,7 +1768,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
+ {
+ 	struct j1939_priv *priv = session->priv;
+ 	struct j1939_sk_buff_cb *skcb;
+-	struct sk_buff *se_skb;
++	struct sk_buff *se_skb = NULL;
+ 	const u8 *dat;
+ 	u8 *tpdat;
+ 	int offset;
+@@ -1786,7 +1810,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
+ 		goto out_session_cancel;
+ 	}
+ 
+-	se_skb = j1939_session_skb_find_by_offset(session, packet * 7);
++	se_skb = j1939_session_skb_get_by_offset(session, packet * 7);
+ 	if (!se_skb) {
+ 		netdev_warn(priv->ndev, "%s: 0x%p: no skb found\n", __func__,
+ 			    session);
+@@ -1848,11 +1872,13 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
+ 		j1939_tp_set_rxtimeout(session, 250);
+ 	}
+ 	session->last_cmd = 0xff;
++	consume_skb(se_skb);
+ 	j1939_session_put(session);
+ 
+ 	return;
+ 
+  out_session_cancel:
++	kfree_skb(se_skb);
+ 	j1939_session_timers_cancel(session);
+ 	j1939_session_cancel(session, J1939_XTP_ABORT_FAULT);
+ 	j1939_session_put(session);
+diff --git a/net/can/raw.c b/net/can/raw.c
+index 139d9471ddcf4..ac96fc2100253 100644
+--- a/net/can/raw.c
++++ b/net/can/raw.c
+@@ -83,7 +83,7 @@ struct raw_sock {
+ 	struct sock sk;
+ 	int bound;
+ 	int ifindex;
+-	struct notifier_block notifier;
++	struct list_head notifier;
+ 	int loopback;
+ 	int recv_own_msgs;
+ 	int fd_frames;
+@@ -95,6 +95,10 @@ struct raw_sock {
+ 	struct uniqframe __percpu *uniq;
+ };
+ 
++static LIST_HEAD(raw_notifier_list);
++static DEFINE_SPINLOCK(raw_notifier_lock);
++static struct raw_sock *raw_busy_notifier;
++
+ /* Return pointer to store the extra msg flags for raw_recvmsg().
+  * We use the space of one unsigned int beyond the 'struct sockaddr_can'
+  * in skb->cb.
+@@ -263,21 +267,16 @@ static int raw_enable_allfilters(struct net *net, struct net_device *dev,
+ 	return err;
+ }
+ 
+-static int raw_notifier(struct notifier_block *nb,
+-			unsigned long msg, void *ptr)
++static void raw_notify(struct raw_sock *ro, unsigned long msg,
++		       struct net_device *dev)
+ {
+-	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+-	struct raw_sock *ro = container_of(nb, struct raw_sock, notifier);
+ 	struct sock *sk = &ro->sk;
+ 
+ 	if (!net_eq(dev_net(dev), sock_net(sk)))
+-		return NOTIFY_DONE;
+-
+-	if (dev->type != ARPHRD_CAN)
+-		return NOTIFY_DONE;
++		return;
+ 
+ 	if (ro->ifindex != dev->ifindex)
+-		return NOTIFY_DONE;
++		return;
+ 
+ 	switch (msg) {
+ 	case NETDEV_UNREGISTER:
+@@ -305,7 +304,28 @@ static int raw_notifier(struct notifier_block *nb,
+ 			sk->sk_error_report(sk);
+ 		break;
+ 	}
++}
++
++static int raw_notifier(struct notifier_block *nb, unsigned long msg,
++			void *ptr)
++{
++	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
++
++	if (dev->type != ARPHRD_CAN)
++		return NOTIFY_DONE;
++	if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
++		return NOTIFY_DONE;
++	if (unlikely(raw_busy_notifier)) /* Check for reentrant bug. */
++		return NOTIFY_DONE;
+ 
++	spin_lock(&raw_notifier_lock);
++	list_for_each_entry(raw_busy_notifier, &raw_notifier_list, notifier) {
++		spin_unlock(&raw_notifier_lock);
++		raw_notify(raw_busy_notifier, msg, dev);
++		spin_lock(&raw_notifier_lock);
++	}
++	raw_busy_notifier = NULL;
++	spin_unlock(&raw_notifier_lock);
+ 	return NOTIFY_DONE;
+ }
+ 
+@@ -334,9 +354,9 @@ static int raw_init(struct sock *sk)
+ 		return -ENOMEM;
+ 
+ 	/* set notifier */
+-	ro->notifier.notifier_call = raw_notifier;
+-
+-	register_netdevice_notifier(&ro->notifier);
++	spin_lock(&raw_notifier_lock);
++	list_add_tail(&ro->notifier, &raw_notifier_list);
++	spin_unlock(&raw_notifier_lock);
+ 
+ 	return 0;
+ }
+@@ -351,7 +371,14 @@ static int raw_release(struct socket *sock)
+ 
+ 	ro = raw_sk(sk);
+ 
+-	unregister_netdevice_notifier(&ro->notifier);
++	spin_lock(&raw_notifier_lock);
++	while (raw_busy_notifier == ro) {
++		spin_unlock(&raw_notifier_lock);
++		schedule_timeout_uninterruptible(1);
++		spin_lock(&raw_notifier_lock);
++	}
++	list_del(&ro->notifier);
++	spin_unlock(&raw_notifier_lock);
+ 
+ 	lock_sock(sk);
+ 
+@@ -889,6 +916,10 @@ static const struct can_proto raw_can_proto = {
+ 	.prot       = &raw_proto,
+ };
+ 
++static struct notifier_block canraw_notifier = {
++	.notifier_call = raw_notifier
++};
++
+ static __init int raw_module_init(void)
+ {
+ 	int err;
+@@ -898,6 +929,8 @@ static __init int raw_module_init(void)
+ 	err = can_proto_register(&raw_can_proto);
+ 	if (err < 0)
+ 		pr_err("can: registration of raw protocol failed\n");
++	else
++		register_netdevice_notifier(&canraw_notifier);
+ 
+ 	return err;
+ }
+@@ -905,6 +938,7 @@ static __init int raw_module_init(void)
+ static __exit void raw_module_exit(void)
+ {
+ 	can_proto_unregister(&raw_can_proto);
++	unregister_netdevice_notifier(&canraw_notifier);
+ }
+ 
+ module_init(raw_module_init);
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index 43b6ac4c44395..cc8dafb25d612 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -641,6 +641,18 @@ void __put_net(struct net *net)
+ }
+ EXPORT_SYMBOL_GPL(__put_net);
+ 
++/**
++ * get_net_ns - increment the refcount of the network namespace
++ * @ns: common namespace (net)
++ *
++ * Returns the net's common namespace.
++ */
++struct ns_common *get_net_ns(struct ns_common *ns)
++{
++	return &get_net(container_of(ns, struct net, ns))->ns;
++}
++EXPORT_SYMBOL_GPL(get_net_ns);
++
+ struct net *get_net_ns_by_fd(int fd)
+ {
+ 	struct file *file;
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 9ad046917b340..2123427883baa 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -4833,10 +4833,12 @@ static int rtnl_bridge_notify(struct net_device *dev)
+ 	if (err < 0)
+ 		goto errout;
+ 
+-	if (!skb->len) {
+-		err = -EINVAL;
++	/* Notification info is only filled for bridge ports, not the bridge
++	 * device itself. Therefore, a zero notification length is valid and
++	 * should not result in an error.
++	 */
++	if (!skb->len)
+ 		goto errout;
+-	}
+ 
+ 	rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
+ 	return 0;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index c421c8f809256..7997d99afbd8e 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -1252,6 +1252,7 @@ static void __msg_zerocopy_callback(struct ubuf_info *uarg)
+ 	struct sock *sk = skb->sk;
+ 	struct sk_buff_head *q;
+ 	unsigned long flags;
++	bool is_zerocopy;
+ 	u32 lo, hi;
+ 	u16 len;
+ 
+@@ -1266,6 +1267,7 @@ static void __msg_zerocopy_callback(struct ubuf_info *uarg)
+ 	len = uarg->len;
+ 	lo = uarg->id;
+ 	hi = uarg->id + len - 1;
++	is_zerocopy = uarg->zerocopy;
+ 
+ 	serr = SKB_EXT_ERR(skb);
+ 	memset(serr, 0, sizeof(*serr));
+@@ -1273,7 +1275,7 @@ static void __msg_zerocopy_callback(struct ubuf_info *uarg)
+ 	serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY;
+ 	serr->ee.ee_data = hi;
+ 	serr->ee.ee_info = lo;
+-	if (!uarg->zerocopy)
++	if (!is_zerocopy)
+ 		serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED;
+ 
+ 	q = &sk->sk_error_queue;
+diff --git a/net/ethtool/strset.c b/net/ethtool/strset.c
+index c3a5489964cde..9908b922cce8d 100644
+--- a/net/ethtool/strset.c
++++ b/net/ethtool/strset.c
+@@ -328,6 +328,8 @@ static int strset_reply_size(const struct ethnl_req_info *req_base,
+ 	int len = 0;
+ 	int ret;
+ 
++	len += nla_total_size(0); /* ETHTOOL_A_STRSET_STRINGSETS */
++
+ 	for (i = 0; i < ETH_SS_COUNT; i++) {
+ 		const struct strset_info *set_info = &data->sets[i];
+ 
+diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
+index bfaf327e9d121..e0480c6cebaad 100644
+--- a/net/ipv4/cipso_ipv4.c
++++ b/net/ipv4/cipso_ipv4.c
+@@ -472,6 +472,7 @@ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def)
+ 		kfree(doi_def->map.std->lvl.local);
+ 		kfree(doi_def->map.std->cat.cipso);
+ 		kfree(doi_def->map.std->cat.local);
++		kfree(doi_def->map.std);
+ 		break;
+ 	}
+ 	kfree(doi_def);
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index 616e2dc1c8fa4..cd65d3146c300 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -759,6 +759,13 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+ 		icmp_param.data_len = room;
+ 	icmp_param.head_len = sizeof(struct icmphdr);
+ 
++	/* if we don't have a source address at this point, fall back to the
++	 * dummy address instead of sending out a packet with a source address
++	 * of 0.0.0.0
++	 */
++	if (!fl4.saddr)
++		fl4.saddr = htonl(INADDR_DUMMY);
++
+ 	icmp_push_reply(&icmp_param, &fl4, &ipc, &rt);
+ ende:
+ 	ip_rt_put(rt);
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index 7b272bbed2b43..6b3c558a4f232 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -1801,6 +1801,7 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
+ 	while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
+ 		in_dev->mc_list = i->next_rcu;
+ 		in_dev->mc_count--;
++		ip_mc_clear_src(i);
+ 		ip_ma_put(i);
+ 	}
+ }
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index d635b4f32d348..09506203156d1 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2081,6 +2081,19 @@ martian_source:
+ 	return err;
+ }
+ 
++/* get device for dst_alloc with local routes */
++static struct net_device *ip_rt_get_dev(struct net *net,
++					const struct fib_result *res)
++{
++	struct fib_nh_common *nhc = res->fi ? res->nhc : NULL;
++	struct net_device *dev = NULL;
++
++	if (nhc)
++		dev = l3mdev_master_dev_rcu(nhc->nhc_dev);
++
++	return dev ? : net->loopback_dev;
++}
++
+ /*
+  *	NOTE. We drop all the packets that has local source
+  *	addresses, because every properly looped back packet
+@@ -2237,7 +2250,7 @@ local_input:
+ 		}
+ 	}
+ 
+-	rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
++	rth = rt_dst_alloc(ip_rt_get_dev(net, res),
+ 			   flags | RTCF_LOCAL, res->type,
+ 			   IN_DEV_ORCONF(in_dev, NOPOLICY), false);
+ 	if (!rth)
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index c586a6bb8c6d0..3dd340679d096 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -2576,6 +2576,9 @@ void udp_destroy_sock(struct sock *sk)
+ {
+ 	struct udp_sock *up = udp_sk(sk);
+ 	bool slow = lock_sock_fast(sk);
++
++	/* protects from races with udp_abort() */
++	sock_set_flag(sk, SOCK_DEAD);
+ 	udp_flush_pending_frames(sk);
+ 	unlock_sock_fast(sk, slow);
+ 	if (static_branch_unlikely(&udp_encap_needed_key)) {
+@@ -2826,10 +2829,17 @@ int udp_abort(struct sock *sk, int err)
+ {
+ 	lock_sock(sk);
+ 
++	/* udp{v6}_destroy_sock() sets it under the sk lock, avoid racing
++	 * with close()
++	 */
++	if (sock_flag(sk, SOCK_DEAD))
++		goto out;
++
+ 	sk->sk_err = err;
+ 	sk->sk_error_report(sk);
+ 	__udp_disconnect(sk, 0);
+ 
++out:
+ 	release_sock(sk);
+ 
+ 	return 0;
+diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c
+index e204163c7036c..92f3235fa2874 100644
+--- a/net/ipv6/netfilter/nft_fib_ipv6.c
++++ b/net/ipv6/netfilter/nft_fib_ipv6.c
+@@ -135,6 +135,17 @@ void nft_fib6_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
+ }
+ EXPORT_SYMBOL_GPL(nft_fib6_eval_type);
+ 
++static bool nft_fib_v6_skip_icmpv6(const struct sk_buff *skb, u8 next, const struct ipv6hdr *iph)
++{
++	if (likely(next != IPPROTO_ICMPV6))
++		return false;
++
++	if (ipv6_addr_type(&iph->saddr) != IPV6_ADDR_ANY)
++		return false;
++
++	return ipv6_addr_type(&iph->daddr) & IPV6_ADDR_LINKLOCAL;
++}
++
+ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ 		   const struct nft_pktinfo *pkt)
+ {
+@@ -163,10 +174,13 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ 
+ 	lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif, iph);
+ 
+-	if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
+-	    nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
+-		nft_fib_store_result(dest, priv, nft_in(pkt));
+-		return;
++	if (nft_hook(pkt) == NF_INET_PRE_ROUTING ||
++	    nft_hook(pkt) == NF_INET_INGRESS) {
++		if (nft_fib_is_loopback(pkt->skb, nft_in(pkt)) ||
++		    nft_fib_v6_skip_icmpv6(pkt->skb, pkt->tprot, iph)) {
++			nft_fib_store_result(dest, priv, nft_in(pkt));
++			return;
++		}
+ 	}
+ 
+ 	*dest = 0;
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index d25e5a9252fdb..29288f134d7ac 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1597,6 +1597,9 @@ void udpv6_destroy_sock(struct sock *sk)
+ {
+ 	struct udp_sock *up = udp_sk(sk);
+ 	lock_sock(sk);
++
++	/* protects from races with udp_abort() */
++	sock_set_flag(sk, SOCK_DEAD);
+ 	udp_v6_flush_pending_frames(sk);
+ 	release_sock(sk);
+ 
+diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
+index 5296898875ffb..223fbcafd6fce 100644
+--- a/net/mac80211/debugfs.c
++++ b/net/mac80211/debugfs.c
+@@ -4,7 +4,7 @@
+  *
+  * Copyright 2007	Johannes Berg <johannes@sipsolutions.net>
+  * Copyright 2013-2014  Intel Mobile Communications GmbH
+- * Copyright (C) 2018 - 2019 Intel Corporation
++ * Copyright (C) 2018 - 2019, 2021 Intel Corporation
+  */
+ 
+ #include <linux/debugfs.h>
+@@ -387,10 +387,17 @@ static ssize_t reset_write(struct file *file, const char __user *user_buf,
+ 			   size_t count, loff_t *ppos)
+ {
+ 	struct ieee80211_local *local = file->private_data;
++	int ret;
+ 
+ 	rtnl_lock();
++	wiphy_lock(local->hw.wiphy);
+ 	__ieee80211_suspend(&local->hw, NULL);
+-	__ieee80211_resume(&local->hw);
++	ret = __ieee80211_resume(&local->hw);
++	wiphy_unlock(local->hw.wiphy);
++
++	if (ret)
++		cfg80211_shutdown_all_interfaces(local->hw.wiphy);
++
+ 	rtnl_unlock();
+ 
+ 	return count;
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index 6f8885766cbaa..6ebfd484e61d2 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -475,14 +475,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
+ 				   GFP_KERNEL);
+ 	}
+ 
+-	/* APs need special treatment */
+ 	if (sdata->vif.type == NL80211_IFTYPE_AP) {
+-		struct ieee80211_sub_if_data *vlan, *tmpsdata;
+-
+-		/* down all dependent devices, that is VLANs */
+-		list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
+-					 u.vlan.list)
+-			dev_close(vlan->dev);
+ 		WARN_ON(!list_empty(&sdata->u.ap.vlans));
+ 	} else if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+ 		/* remove all packets in parent bc_buf pointing to this dev */
+@@ -640,6 +633,15 @@ static int ieee80211_stop(struct net_device *dev)
+ {
+ 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ 
++	/* close all dependent VLAN interfaces before locking wiphy */
++	if (sdata->vif.type == NL80211_IFTYPE_AP) {
++		struct ieee80211_sub_if_data *vlan, *tmpsdata;
++
++		list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
++					 u.vlan.list)
++			dev_close(vlan->dev);
++	}
++
+ 	wiphy_lock(sdata->local->hw.wiphy);
+ 	ieee80211_do_stop(sdata, true);
+ 	wiphy_unlock(sdata->local->hw.wiphy);
+@@ -1589,6 +1591,9 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
+ 
+ 	switch (sdata->vif.type) {
+ 	case NL80211_IFTYPE_AP:
++		if (!list_empty(&sdata->u.ap.vlans))
++			return -EBUSY;
++		break;
+ 	case NL80211_IFTYPE_STATION:
+ 	case NL80211_IFTYPE_ADHOC:
+ 	case NL80211_IFTYPE_OCB:
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index 0331f3a3c40e0..9dd741b68f268 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -252,6 +252,7 @@ static void ieee80211_restart_work(struct work_struct *work)
+ 	struct ieee80211_local *local =
+ 		container_of(work, struct ieee80211_local, restart_work);
+ 	struct ieee80211_sub_if_data *sdata;
++	int ret;
+ 
+ 	/* wait for scan work complete */
+ 	flush_workqueue(local->workqueue);
+@@ -294,8 +295,12 @@ static void ieee80211_restart_work(struct work_struct *work)
+ 	/* wait for all packet processing to be done */
+ 	synchronize_net();
+ 
+-	ieee80211_reconfig(local);
++	ret = ieee80211_reconfig(local);
+ 	wiphy_unlock(local->hw.wiphy);
++
++	if (ret)
++		cfg80211_shutdown_all_interfaces(local->hw.wiphy);
++
+ 	rtnl_unlock();
+ }
+ 
+diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
+index ecad9b10984ff..e627a11844a9b 100644
+--- a/net/mac80211/rc80211_minstrel_ht.c
++++ b/net/mac80211/rc80211_minstrel_ht.c
+@@ -1516,7 +1516,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
+ 	    (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
+ 		return;
+ 
+-	if (time_is_before_jiffies(mi->sample_time))
++	if (time_is_after_jiffies(mi->sample_time))
+ 		return;
+ 
+ 	mi->sample_time = jiffies + MINSTREL_SAMPLE_INTERVAL;
+diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
+index d4cc9ac2d7033..6b50cb5e0e3cc 100644
+--- a/net/mac80211/scan.c
++++ b/net/mac80211/scan.c
+@@ -251,13 +251,24 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
+ 	struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ 	struct ieee80211_bss *bss;
+ 	struct ieee80211_channel *channel;
++	size_t min_hdr_len = offsetof(struct ieee80211_mgmt,
++				      u.probe_resp.variable);
++
++	if (!ieee80211_is_probe_resp(mgmt->frame_control) &&
++	    !ieee80211_is_beacon(mgmt->frame_control) &&
++	    !ieee80211_is_s1g_beacon(mgmt->frame_control))
++		return;
+ 
+ 	if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
+-		if (skb->len < 15)
+-			return;
+-	} else if (skb->len < 24 ||
+-		 (!ieee80211_is_probe_resp(mgmt->frame_control) &&
+-		  !ieee80211_is_beacon(mgmt->frame_control)))
++		if (ieee80211_is_s1g_short_beacon(mgmt->frame_control))
++			min_hdr_len = offsetof(struct ieee80211_ext,
++					       u.s1g_short_beacon.variable);
++		else
++			min_hdr_len = offsetof(struct ieee80211_ext,
++					       u.s1g_beacon);
++	}
++
++	if (skb->len < min_hdr_len)
+ 		return;
+ 
+ 	sdata1 = rcu_dereference(local->scan_sdata);
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 28422d6870967..d33dc4e023715 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -2002,6 +2002,26 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
+ 	ieee80211_tx(sdata, sta, skb, false);
+ }
+ 
++static bool ieee80211_validate_radiotap_len(struct sk_buff *skb)
++{
++	struct ieee80211_radiotap_header *rthdr =
++		(struct ieee80211_radiotap_header *)skb->data;
++
++	/* check for not even having the fixed radiotap header part */
++	if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
++		return false; /* too short to be possibly valid */
++
++	/* is it a header version we can trust to find length from? */
++	if (unlikely(rthdr->it_version))
++		return false; /* only version 0 is supported */
++
++	/* does the skb contain enough to deliver on the alleged length? */
++	if (unlikely(skb->len < ieee80211_get_radiotap_len(skb->data)))
++		return false; /* skb too short for claimed rt header extent */
++
++	return true;
++}
++
+ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
+ 				 struct net_device *dev)
+ {
+@@ -2010,8 +2030,6 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
+ 	struct ieee80211_radiotap_header *rthdr =
+ 		(struct ieee80211_radiotap_header *) skb->data;
+ 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+-	struct ieee80211_supported_band *sband =
+-		local->hw.wiphy->bands[info->band];
+ 	int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
+ 						   NULL);
+ 	u16 txflags;
+@@ -2024,17 +2042,8 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
+ 	u8 vht_mcs = 0, vht_nss = 0;
+ 	int i;
+ 
+-	/* check for not even having the fixed radiotap header part */
+-	if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
+-		return false; /* too short to be possibly valid */
+-
+-	/* is it a header version we can trust to find length from? */
+-	if (unlikely(rthdr->it_version))
+-		return false; /* only version 0 is supported */
+-
+-	/* does the skb contain enough to deliver on the alleged length? */
+-	if (unlikely(skb->len < ieee80211_get_radiotap_len(skb->data)))
+-		return false; /* skb too short for claimed rt header extent */
++	if (!ieee80211_validate_radiotap_len(skb))
++		return false;
+ 
+ 	info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
+ 		       IEEE80211_TX_CTL_DONTFRAG;
+@@ -2174,6 +2183,9 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
+ 		return false;
+ 
+ 	if (rate_found) {
++		struct ieee80211_supported_band *sband =
++			local->hw.wiphy->bands[info->band];
++
+ 		info->control.flags |= IEEE80211_TX_CTRL_RATE_INJECT;
+ 
+ 		for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+@@ -2187,7 +2199,7 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
+ 		} else if (rate_flags & IEEE80211_TX_RC_VHT_MCS) {
+ 			ieee80211_rate_set_vht(info->control.rates, vht_mcs,
+ 					       vht_nss);
+-		} else {
++		} else if (sband) {
+ 			for (i = 0; i < sband->n_bitrates; i++) {
+ 				if (rate * 5 != sband->bitrates[i].bitrate)
+ 					continue;
+@@ -2224,8 +2236,8 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
+ 	info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
+ 		      IEEE80211_TX_CTL_INJECTED;
+ 
+-	/* Sanity-check and process the injection radiotap header */
+-	if (!ieee80211_parse_tx_radiotap(skb, dev))
++	/* Sanity-check the length of the radiotap header */
++	if (!ieee80211_validate_radiotap_len(skb))
+ 		goto fail;
+ 
+ 	/* we now know there is a radiotap header with a length we can use */
+@@ -2339,6 +2351,14 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
+ 	ieee80211_select_queue_80211(sdata, skb, hdr);
+ 	skb_set_queue_mapping(skb, ieee80211_ac_from_tid(skb->priority));
+ 
++	/*
++	 * Process the radiotap header. This will now take into account the
++	 * selected chandef above to accurately set injection rates and
++	 * retransmissions.
++	 */
++	if (!ieee80211_parse_tx_radiotap(skb, dev))
++		goto fail_rcu;
++
+ 	/* remove the injection radiotap header */
+ 	skb_pull(skb, len_rthdr);
+ 
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index c0fa526a45b4d..53755a05f73b5 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -2186,8 +2186,6 @@ static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local)
+ 	list_for_each_entry(ctx, &local->chanctx_list, list)
+ 		ctx->driver_present = false;
+ 	mutex_unlock(&local->chanctx_mtx);
+-
+-	cfg80211_shutdown_all_interfaces(local->hw.wiphy);
+ }
+ 
+ static void ieee80211_assign_chanctx(struct ieee80211_local *local,
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index 8848a9e2a95b1..47d90cf31f125 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -337,6 +337,8 @@ void mptcp_get_options(const struct sk_buff *skb,
+ 			length--;
+ 			continue;
+ 		default:
++			if (length < 2)
++				return;
+ 			opsize = *ptr++;
+ 			if (opsize < 2) /* "silly options" */
+ 				return;
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 225b988215171..d8187ac065397 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -287,11 +287,13 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
+ 
+ 	/* try to fetch required memory from subflow */
+ 	if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
+-		if (ssk->sk_forward_alloc < skb->truesize)
+-			goto drop;
+-		__sk_mem_reclaim(ssk, skb->truesize);
+-		if (!sk_rmem_schedule(sk, skb, skb->truesize))
++		int amount = sk_mem_pages(skb->truesize) << SK_MEM_QUANTUM_SHIFT;
++
++		if (ssk->sk_forward_alloc < amount)
+ 			goto drop;
++
++		ssk->sk_forward_alloc -= amount;
++		sk->sk_forward_alloc += amount;
+ 	}
+ 
+ 	/* the skb map_seq accounts for the skb offset:
+@@ -687,18 +689,22 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
+ /* In most cases we will be able to lock the mptcp socket.  If its already
+  * owned, we need to defer to the work queue to avoid ABBA deadlock.
+  */
+-static void move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
++static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
+ {
+ 	struct sock *sk = (struct sock *)msk;
+ 	unsigned int moved = 0;
+ 
+ 	if (inet_sk_state_load(sk) == TCP_CLOSE)
+-		return;
+-
+-	mptcp_data_lock(sk);
++		return false;
+ 
+ 	__mptcp_move_skbs_from_subflow(msk, ssk, &moved);
+ 	__mptcp_ofo_queue(msk);
++	if (unlikely(ssk->sk_err)) {
++		if (!sock_owned_by_user(sk))
++			__mptcp_error_report(sk);
++		else
++			set_bit(MPTCP_ERROR_REPORT,  &msk->flags);
++	}
+ 
+ 	/* If the moves have caught up with the DATA_FIN sequence number
+ 	 * it's time to ack the DATA_FIN and change socket state, but
+@@ -707,7 +713,7 @@ static void move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
+ 	 */
+ 	if (mptcp_pending_data_fin(sk, NULL))
+ 		mptcp_schedule_work(sk);
+-	mptcp_data_unlock(sk);
++	return moved > 0;
+ }
+ 
+ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
+@@ -715,7 +721,6 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
+ 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 	int sk_rbuf, ssk_rbuf;
+-	bool wake;
+ 
+ 	/* The peer can send data while we are shutting down this
+ 	 * subflow at msk destruction time, but we must avoid enqueuing
+@@ -724,28 +729,22 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
+ 	if (unlikely(subflow->disposable))
+ 		return;
+ 
+-	/* move_skbs_to_msk below can legitly clear the data_avail flag,
+-	 * but we will need later to properly woke the reader, cache its
+-	 * value
+-	 */
+-	wake = subflow->data_avail == MPTCP_SUBFLOW_DATA_AVAIL;
+-	if (wake)
+-		set_bit(MPTCP_DATA_READY, &msk->flags);
+-
+ 	ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf);
+ 	sk_rbuf = READ_ONCE(sk->sk_rcvbuf);
+ 	if (unlikely(ssk_rbuf > sk_rbuf))
+ 		sk_rbuf = ssk_rbuf;
+ 
+-	/* over limit? can't append more skbs to msk */
++	/* over limit? can't append more skbs to msk, Also, no need to wake-up*/
+ 	if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf)
+-		goto wake;
+-
+-	move_skbs_to_msk(msk, ssk);
++		return;
+ 
+-wake:
+-	if (wake)
++	/* Wake-up the reader only for in-sequence data */
++	mptcp_data_lock(sk);
++	if (move_skbs_to_msk(msk, ssk)) {
++		set_bit(MPTCP_DATA_READY, &msk->flags);
+ 		sk->sk_data_ready(sk);
++	}
++	mptcp_data_unlock(sk);
+ }
+ 
+ void __mptcp_flush_join_list(struct mptcp_sock *msk)
+@@ -848,7 +847,7 @@ static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk)
+ 	sock_owned_by_me(sk);
+ 
+ 	mptcp_for_each_subflow(msk, subflow) {
+-		if (subflow->data_avail)
++		if (READ_ONCE(subflow->data_avail))
+ 			return mptcp_subflow_tcp_sock(subflow);
+ 	}
+ 
+@@ -1939,6 +1938,9 @@ static bool __mptcp_move_skbs(struct mptcp_sock *msk)
+ 		done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
+ 		mptcp_data_unlock(sk);
+ 		tcp_cleanup_rbuf(ssk, moved);
++
++		if (unlikely(ssk->sk_err))
++			__mptcp_error_report(sk);
+ 		unlock_sock_fast(ssk, slowpath);
+ 	} while (!done);
+ 
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index e21a5bc36cf08..14e89e4bd4a80 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -372,7 +372,6 @@ mptcp_subflow_rsk(const struct request_sock *rsk)
+ enum mptcp_data_avail {
+ 	MPTCP_SUBFLOW_NODATA,
+ 	MPTCP_SUBFLOW_DATA_AVAIL,
+-	MPTCP_SUBFLOW_OOO_DATA
+ };
+ 
+ struct mptcp_delegated_action {
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 8425cd393bf3e..d6d8ad4f918e7 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -754,10 +754,10 @@ static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq)
+ 	return seq | ((old_seq + old_data_len + 1) & GENMASK_ULL(63, 32));
+ }
+ 
+-static void warn_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
++static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
+ {
+-	WARN_ONCE(1, "Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
+-		  ssn, subflow->map_subflow_seq, subflow->map_data_len);
++	pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
++		 ssn, subflow->map_subflow_seq, subflow->map_data_len);
+ }
+ 
+ static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
+@@ -782,13 +782,13 @@ static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
+ 		/* Mapping covers data later in the subflow stream,
+ 		 * currently unsupported.
+ 		 */
+-		warn_bad_map(subflow, ssn);
++		dbg_bad_map(subflow, ssn);
+ 		return false;
+ 	}
+ 	if (unlikely(!before(ssn, subflow->map_subflow_seq +
+ 				  subflow->map_data_len))) {
+ 		/* Mapping does covers past subflow data, invalid */
+-		warn_bad_map(subflow, ssn + skb->len);
++		dbg_bad_map(subflow, ssn);
+ 		return false;
+ 	}
+ 	return true;
+@@ -974,7 +974,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
+ 	pr_debug("msk=%p ssk=%p data_avail=%d skb=%p", subflow->conn, ssk,
+ 		 subflow->data_avail, skb_peek(&ssk->sk_receive_queue));
+ 	if (!skb_peek(&ssk->sk_receive_queue))
+-		subflow->data_avail = 0;
++		WRITE_ONCE(subflow->data_avail, 0);
+ 	if (subflow->data_avail)
+ 		return true;
+ 
+@@ -1012,18 +1012,13 @@ static bool subflow_check_data_avail(struct sock *ssk)
+ 		ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
+ 		pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
+ 			 ack_seq);
+-		if (ack_seq == old_ack) {
+-			subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
+-			break;
+-		} else if (after64(ack_seq, old_ack)) {
+-			subflow->data_avail = MPTCP_SUBFLOW_OOO_DATA;
+-			break;
++		if (unlikely(before64(ack_seq, old_ack))) {
++			mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
++			continue;
+ 		}
+ 
+-		/* only accept in-sequence mapping. Old values are spurious
+-		 * retransmission
+-		 */
+-		mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
++		WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
++		break;
+ 	}
+ 	return true;
+ 
+@@ -1038,10 +1033,9 @@ fallback:
+ 		 * subflow_error_report() will introduce the appropriate barriers
+ 		 */
+ 		ssk->sk_err = EBADMSG;
+-		ssk->sk_error_report(ssk);
+ 		tcp_set_state(ssk, TCP_CLOSE);
+ 		tcp_send_active_reset(ssk, GFP_ATOMIC);
+-		subflow->data_avail = 0;
++		WRITE_ONCE(subflow->data_avail, 0);
+ 		return false;
+ 	}
+ 
+@@ -1051,7 +1045,7 @@ fallback:
+ 	subflow->map_seq = READ_ONCE(msk->ack_seq);
+ 	subflow->map_data_len = skb->len;
+ 	subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
+-	subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
++	WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
+ 	return true;
+ }
+ 
+@@ -1063,7 +1057,7 @@ bool mptcp_subflow_data_available(struct sock *sk)
+ 	if (subflow->map_valid &&
+ 	    mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
+ 		subflow->map_valid = 0;
+-		subflow->data_avail = 0;
++		WRITE_ONCE(subflow->data_avail, 0);
+ 
+ 		pr_debug("Done with mapping: seq=%u data_len=%u",
+ 			 subflow->map_subflow_seq,
+@@ -1091,41 +1085,6 @@ void mptcp_space(const struct sock *ssk, int *space, int *full_space)
+ 	*full_space = tcp_full_space(sk);
+ }
+ 
+-static void subflow_data_ready(struct sock *sk)
+-{
+-	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+-	u16 state = 1 << inet_sk_state_load(sk);
+-	struct sock *parent = subflow->conn;
+-	struct mptcp_sock *msk;
+-
+-	msk = mptcp_sk(parent);
+-	if (state & TCPF_LISTEN) {
+-		/* MPJ subflow are removed from accept queue before reaching here,
+-		 * avoid stray wakeups
+-		 */
+-		if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
+-			return;
+-
+-		set_bit(MPTCP_DATA_READY, &msk->flags);
+-		parent->sk_data_ready(parent);
+-		return;
+-	}
+-
+-	WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
+-		     !subflow->mp_join && !(state & TCPF_CLOSE));
+-
+-	if (mptcp_subflow_data_available(sk))
+-		mptcp_data_ready(parent, sk);
+-}
+-
+-static void subflow_write_space(struct sock *ssk)
+-{
+-	struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
+-
+-	mptcp_propagate_sndbuf(sk, ssk);
+-	mptcp_write_space(sk);
+-}
+-
+ void __mptcp_error_report(struct sock *sk)
+ {
+ 	struct mptcp_subflow_context *subflow;
+@@ -1166,6 +1125,43 @@ static void subflow_error_report(struct sock *ssk)
+ 	mptcp_data_unlock(sk);
+ }
+ 
++static void subflow_data_ready(struct sock *sk)
++{
++	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
++	u16 state = 1 << inet_sk_state_load(sk);
++	struct sock *parent = subflow->conn;
++	struct mptcp_sock *msk;
++
++	msk = mptcp_sk(parent);
++	if (state & TCPF_LISTEN) {
++		/* MPJ subflow are removed from accept queue before reaching here,
++		 * avoid stray wakeups
++		 */
++		if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
++			return;
++
++		set_bit(MPTCP_DATA_READY, &msk->flags);
++		parent->sk_data_ready(parent);
++		return;
++	}
++
++	WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
++		     !subflow->mp_join && !(state & TCPF_CLOSE));
++
++	if (mptcp_subflow_data_available(sk))
++		mptcp_data_ready(parent, sk);
++	else if (unlikely(sk->sk_err))
++		subflow_error_report(sk);
++}
++
++static void subflow_write_space(struct sock *ssk)
++{
++	struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
++
++	mptcp_propagate_sndbuf(sk, ssk);
++	mptcp_write_space(sk);
++}
++
+ static struct inet_connection_sock_af_ops *
+ subflow_default_af_ops(struct sock *sk)
+ {
+@@ -1474,6 +1470,8 @@ static void subflow_state_change(struct sock *sk)
+ 	 */
+ 	if (mptcp_subflow_data_available(sk))
+ 		mptcp_data_ready(parent, sk);
++	else if (unlikely(sk->sk_err))
++		subflow_error_report(sk);
+ 
+ 	subflow_sched_work_if_closed(mptcp_sk(parent), sk);
+ 
+diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
+index b100c04a0e435..3d6d49420db8b 100644
+--- a/net/netfilter/nf_synproxy_core.c
++++ b/net/netfilter/nf_synproxy_core.c
+@@ -31,6 +31,9 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
+ 	int length = (th->doff * 4) - sizeof(*th);
+ 	u8 buf[40], *ptr;
+ 
++	if (unlikely(length < 0))
++		return false;
++
+ 	ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf);
+ 	if (ptr == NULL)
+ 		return false;
+@@ -47,6 +50,8 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
+ 			length--;
+ 			continue;
+ 		default:
++			if (length < 2)
++				return true;
+ 			opsize = *ptr++;
+ 			if (opsize < 2)
+ 				return true;
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 31016c144c48b..9d5ea23529657 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -4317,13 +4317,44 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
+ 	err = nf_tables_set_alloc_name(&ctx, set, name);
+ 	kfree(name);
+ 	if (err < 0)
+-		goto err_set_alloc_name;
++		goto err_set_name;
++
++	udata = NULL;
++	if (udlen) {
++		udata = set->data + size;
++		nla_memcpy(udata, nla[NFTA_SET_USERDATA], udlen);
++	}
++
++	INIT_LIST_HEAD(&set->bindings);
++	set->table = table;
++	write_pnet(&set->net, net);
++	set->ops = ops;
++	set->ktype = ktype;
++	set->klen = desc.klen;
++	set->dtype = dtype;
++	set->objtype = objtype;
++	set->dlen = desc.dlen;
++	set->flags = flags;
++	set->size = desc.size;
++	set->policy = policy;
++	set->udlen = udlen;
++	set->udata = udata;
++	set->timeout = timeout;
++	set->gc_int = gc_int;
++
++	set->field_count = desc.field_count;
++	for (i = 0; i < desc.field_count; i++)
++		set->field_len[i] = desc.field_len[i];
++
++	err = ops->init(set, &desc, nla);
++	if (err < 0)
++		goto err_set_init;
+ 
+ 	if (nla[NFTA_SET_EXPR]) {
+ 		expr = nft_set_elem_expr_alloc(&ctx, set, nla[NFTA_SET_EXPR]);
+ 		if (IS_ERR(expr)) {
+ 			err = PTR_ERR(expr);
+-			goto err_set_alloc_name;
++			goto err_set_expr_alloc;
+ 		}
+ 		set->exprs[0] = expr;
+ 		set->num_exprs++;
+@@ -4334,74 +4365,44 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
+ 
+ 		if (!(flags & NFT_SET_EXPR)) {
+ 			err = -EINVAL;
+-			goto err_set_alloc_name;
++			goto err_set_expr_alloc;
+ 		}
+ 		i = 0;
+ 		nla_for_each_nested(tmp, nla[NFTA_SET_EXPRESSIONS], left) {
+ 			if (i == NFT_SET_EXPR_MAX) {
+ 				err = -E2BIG;
+-				goto err_set_init;
++				goto err_set_expr_alloc;
+ 			}
+ 			if (nla_type(tmp) != NFTA_LIST_ELEM) {
+ 				err = -EINVAL;
+-				goto err_set_init;
++				goto err_set_expr_alloc;
+ 			}
+ 			expr = nft_set_elem_expr_alloc(&ctx, set, tmp);
+ 			if (IS_ERR(expr)) {
+ 				err = PTR_ERR(expr);
+-				goto err_set_init;
++				goto err_set_expr_alloc;
+ 			}
+ 			set->exprs[i++] = expr;
+ 			set->num_exprs++;
+ 		}
+ 	}
+ 
+-	udata = NULL;
+-	if (udlen) {
+-		udata = set->data + size;
+-		nla_memcpy(udata, nla[NFTA_SET_USERDATA], udlen);
+-	}
+-
+-	INIT_LIST_HEAD(&set->bindings);
+-	set->table = table;
+-	write_pnet(&set->net, net);
+-	set->ops   = ops;
+-	set->ktype = ktype;
+-	set->klen  = desc.klen;
+-	set->dtype = dtype;
+-	set->objtype = objtype;
+-	set->dlen  = desc.dlen;
+-	set->flags = flags;
+-	set->size  = desc.size;
+-	set->policy = policy;
+-	set->udlen  = udlen;
+-	set->udata  = udata;
+-	set->timeout = timeout;
+-	set->gc_int = gc_int;
+ 	set->handle = nf_tables_alloc_handle(table);
+ 
+-	set->field_count = desc.field_count;
+-	for (i = 0; i < desc.field_count; i++)
+-		set->field_len[i] = desc.field_len[i];
+-
+-	err = ops->init(set, &desc, nla);
+-	if (err < 0)
+-		goto err_set_init;
+-
+ 	err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
+ 	if (err < 0)
+-		goto err_set_trans;
++		goto err_set_expr_alloc;
+ 
+ 	list_add_tail_rcu(&set->list, &table->sets);
+ 	table->use++;
+ 	return 0;
+ 
+-err_set_trans:
+-	ops->destroy(set);
+-err_set_init:
++err_set_expr_alloc:
+ 	for (i = 0; i < set->num_exprs; i++)
+ 		nft_expr_destroy(&ctx, set->exprs[i]);
+-err_set_alloc_name:
++
++	ops->destroy(set);
++err_set_init:
+ 	kfree(set->name);
+ err_set_name:
+ 	kvfree(set);
+diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
+index 1e4fb568fa841..24f10bf7d8a3f 100644
+--- a/net/qrtr/qrtr.c
++++ b/net/qrtr/qrtr.c
+@@ -435,7 +435,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
+ 	struct qrtr_sock *ipc;
+ 	struct sk_buff *skb;
+ 	struct qrtr_cb *cb;
+-	unsigned int size;
++	size_t size;
+ 	unsigned int ver;
+ 	size_t hdrlen;
+ 
+diff --git a/net/rds/recv.c b/net/rds/recv.c
+index aba4afe4dfedc..967d115f97efd 100644
+--- a/net/rds/recv.c
++++ b/net/rds/recv.c
+@@ -714,7 +714,7 @@ int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ 
+ 		if (rds_cmsg_recv(inc, msg, rs)) {
+ 			ret = -EFAULT;
+-			goto out;
++			break;
+ 		}
+ 		rds_recvmsg_zcookie(rs, msg);
+ 
+diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
+index ba7f57cb41c30..143786d8cde03 100644
+--- a/net/sched/act_ct.c
++++ b/net/sched/act_ct.c
+@@ -904,14 +904,19 @@ static int tcf_ct_act_nat(struct sk_buff *skb,
+ 	}
+ 
+ 	err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
+-	if (err == NF_ACCEPT &&
+-	    ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) {
+-		if (maniptype == NF_NAT_MANIP_SRC)
+-			maniptype = NF_NAT_MANIP_DST;
+-		else
+-			maniptype = NF_NAT_MANIP_SRC;
+-
+-		err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
++	if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) {
++		if (ct->status & IPS_SRC_NAT) {
++			if (maniptype == NF_NAT_MANIP_SRC)
++				maniptype = NF_NAT_MANIP_DST;
++			else
++				maniptype = NF_NAT_MANIP_SRC;
++
++			err = ct_nat_execute(skb, ct, ctinfo, range,
++					     maniptype);
++		} else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
++			err = ct_nat_execute(skb, ct, ctinfo, NULL,
++					     NF_NAT_MANIP_SRC);
++		}
+ 	}
+ 	return err;
+ #else
+diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
+index 7d37638ee1c7a..5c15968b5155b 100644
+--- a/net/sched/sch_cake.c
++++ b/net/sched/sch_cake.c
+@@ -943,7 +943,7 @@ static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
+ 	}
+ 
+ 	tcph = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
+-	if (!tcph)
++	if (!tcph || tcph->doff < 5)
+ 		return NULL;
+ 
+ 	return skb_header_pointer(skb, offset,
+@@ -967,6 +967,8 @@ static const void *cake_get_tcpopt(const struct tcphdr *tcph,
+ 			length--;
+ 			continue;
+ 		}
++		if (length < 2)
++			break;
+ 		opsize = *ptr++;
+ 		if (opsize < 2 || opsize > length)
+ 			break;
+@@ -1104,6 +1106,8 @@ static bool cake_tcph_may_drop(const struct tcphdr *tcph,
+ 			length--;
+ 			continue;
+ 		}
++		if (length < 2)
++			break;
+ 		opsize = *ptr++;
+ 		if (opsize < 2 || opsize > length)
+ 			break;
+diff --git a/net/socket.c b/net/socket.c
+index 84a8049c2b099..03259cb919f7e 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -1072,19 +1072,6 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
+  *	what to do with it - that's up to the protocol still.
+  */
+ 
+-/**
+- *	get_net_ns - increment the refcount of the network namespace
+- *	@ns: common namespace (net)
+- *
+- *	Returns the net's common namespace.
+- */
+-
+-struct ns_common *get_net_ns(struct ns_common *ns)
+-{
+-	return &get_net(container_of(ns, struct net, ns))->ns;
+-}
+-EXPORT_SYMBOL_GPL(get_net_ns);
+-
+ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+ {
+ 	struct socket *sock;
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 5a31307ceb76d..5d1192ceb1397 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -535,12 +535,14 @@ static void unix_release_sock(struct sock *sk, int embrion)
+ 	u->path.mnt = NULL;
+ 	state = sk->sk_state;
+ 	sk->sk_state = TCP_CLOSE;
++
++	skpair = unix_peer(sk);
++	unix_peer(sk) = NULL;
++
+ 	unix_state_unlock(sk);
+ 
+ 	wake_up_interruptible_all(&u->peer_wait);
+ 
+-	skpair = unix_peer(sk);
+-
+ 	if (skpair != NULL) {
+ 		if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
+ 			unix_state_lock(skpair);
+@@ -555,7 +557,6 @@ static void unix_release_sock(struct sock *sk, int embrion)
+ 
+ 		unix_dgram_peer_wake_disconnect(sk, skpair);
+ 		sock_put(skpair); /* It may now die */
+-		unix_peer(sk) = NULL;
+ 	}
+ 
+ 	/* Try to flush out this socket. Throw out buffers at least */
+diff --git a/net/wireless/Makefile b/net/wireless/Makefile
+index 2eee93985ab0d..af590ae606b69 100644
+--- a/net/wireless/Makefile
++++ b/net/wireless/Makefile
+@@ -28,7 +28,7 @@ $(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.hex)
+ 	@$(kecho) "  GEN     $@"
+ 	@(echo '#include "reg.h"'; \
+ 	  echo 'const u8 shipped_regdb_certs[] = {'; \
+-	  cat $^ ; \
++	  echo | cat - $^ ; \
+ 	  echo '};'; \
+ 	  echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);'; \
+ 	 ) > $@
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index 589ee5a69a2e5..0e364f32794d3 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -1339,6 +1339,11 @@ void cfg80211_register_wdev(struct cfg80211_registered_device *rdev,
+ 	rdev->devlist_generation++;
+ 	wdev->registered = true;
+ 
++	if (wdev->netdev &&
++	    sysfs_create_link(&wdev->netdev->dev.kobj, &rdev->wiphy.dev.kobj,
++			      "phy80211"))
++		pr_err("failed to add phy80211 symlink to netdev!\n");
++
+ 	nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE);
+ }
+ 
+@@ -1364,14 +1369,6 @@ int cfg80211_register_netdevice(struct net_device *dev)
+ 	if (ret)
+ 		goto out;
+ 
+-	if (sysfs_create_link(&dev->dev.kobj, &rdev->wiphy.dev.kobj,
+-			      "phy80211")) {
+-		pr_err("failed to add phy80211 symlink to netdev!\n");
+-		unregister_netdevice(dev);
+-		ret = -EINVAL;
+-		goto out;
+-	}
+-
+ 	cfg80211_register_wdev(rdev, wdev);
+ 	ret = 0;
+ out:
+diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c
+index a95c79d183492..a817d8e3e4b36 100644
+--- a/net/wireless/pmsr.c
++++ b/net/wireless/pmsr.c
+@@ -324,6 +324,7 @@ void cfg80211_pmsr_complete(struct wireless_dev *wdev,
+ 			    gfp_t gfp)
+ {
+ 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
++	struct cfg80211_pmsr_request *tmp, *prev, *to_free = NULL;
+ 	struct sk_buff *msg;
+ 	void *hdr;
+ 
+@@ -354,9 +355,20 @@ free_msg:
+ 	nlmsg_free(msg);
+ free_request:
+ 	spin_lock_bh(&wdev->pmsr_lock);
+-	list_del(&req->list);
++	/*
++	 * cfg80211_pmsr_process_abort() may have already moved this request
++	 * to the free list, and will free it later. In this case, don't free
++	 * it here.
++	 */
++	list_for_each_entry_safe(tmp, prev, &wdev->pmsr_list, list) {
++		if (tmp == req) {
++			list_del(&req->list);
++			to_free = req;
++			break;
++		}
++	}
+ 	spin_unlock_bh(&wdev->pmsr_lock);
+-	kfree(req);
++	kfree(to_free);
+ }
+ EXPORT_SYMBOL_GPL(cfg80211_pmsr_complete);
+ 
+diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
+index 9b959e3b09c6d..0c3f05c9be27a 100644
+--- a/net/wireless/sysfs.c
++++ b/net/wireless/sysfs.c
+@@ -133,6 +133,10 @@ static int wiphy_resume(struct device *dev)
+ 	if (rdev->wiphy.registered && rdev->ops->resume)
+ 		ret = rdev_resume(rdev);
+ 	wiphy_unlock(&rdev->wiphy);
++
++	if (ret)
++		cfg80211_shutdown_all_interfaces(&rdev->wiphy);
++
+ 	rtnl_unlock();
+ 
+ 	return ret;
+diff --git a/sound/soc/codecs/rt5659.c b/sound/soc/codecs/rt5659.c
+index 91a4ef7f620ca..a9b079d56fd69 100644
+--- a/sound/soc/codecs/rt5659.c
++++ b/sound/soc/codecs/rt5659.c
+@@ -2433,13 +2433,18 @@ static int set_dmic_power(struct snd_soc_dapm_widget *w,
+ 	return 0;
+ }
+ 
+-static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
++static const struct snd_soc_dapm_widget rt5659_particular_dapm_widgets[] = {
+ 	SND_SOC_DAPM_SUPPLY("LDO2", RT5659_PWR_ANLG_3, RT5659_PWR_LDO2_BIT, 0,
+ 		NULL, 0),
+-	SND_SOC_DAPM_SUPPLY("PLL", RT5659_PWR_ANLG_3, RT5659_PWR_PLL_BIT, 0,
+-		NULL, 0),
++	SND_SOC_DAPM_SUPPLY("MICBIAS1", RT5659_PWR_ANLG_2, RT5659_PWR_MB1_BIT,
++		0, NULL, 0),
+ 	SND_SOC_DAPM_SUPPLY("Mic Det Power", RT5659_PWR_VOL,
+ 		RT5659_PWR_MIC_DET_BIT, 0, NULL, 0),
++};
++
++static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
++	SND_SOC_DAPM_SUPPLY("PLL", RT5659_PWR_ANLG_3, RT5659_PWR_PLL_BIT, 0,
++		NULL, 0),
+ 	SND_SOC_DAPM_SUPPLY("Mono Vref", RT5659_PWR_ANLG_1,
+ 		RT5659_PWR_VREF3_BIT, 0, NULL, 0),
+ 
+@@ -2464,8 +2469,6 @@ static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
+ 		RT5659_ADC_MONO_R_ASRC_SFT, 0, NULL, 0),
+ 
+ 	/* Input Side */
+-	SND_SOC_DAPM_SUPPLY("MICBIAS1", RT5659_PWR_ANLG_2, RT5659_PWR_MB1_BIT,
+-		0, NULL, 0),
+ 	SND_SOC_DAPM_SUPPLY("MICBIAS2", RT5659_PWR_ANLG_2, RT5659_PWR_MB2_BIT,
+ 		0, NULL, 0),
+ 	SND_SOC_DAPM_SUPPLY("MICBIAS3", RT5659_PWR_ANLG_2, RT5659_PWR_MB3_BIT,
+@@ -3660,10 +3663,23 @@ static int rt5659_set_bias_level(struct snd_soc_component *component,
+ 
+ static int rt5659_probe(struct snd_soc_component *component)
+ {
++	struct snd_soc_dapm_context *dapm =
++		snd_soc_component_get_dapm(component);
+ 	struct rt5659_priv *rt5659 = snd_soc_component_get_drvdata(component);
+ 
+ 	rt5659->component = component;
+ 
++	switch (rt5659->pdata.jd_src) {
++	case RT5659_JD_HDA_HEADER:
++		break;
++
++	default:
++		snd_soc_dapm_new_controls(dapm,
++			rt5659_particular_dapm_widgets,
++			ARRAY_SIZE(rt5659_particular_dapm_widgets));
++		break;
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/sound/soc/codecs/rt5682-sdw.c b/sound/soc/codecs/rt5682-sdw.c
+index b49f1e16125d4..d1dd7f720ba48 100644
+--- a/sound/soc/codecs/rt5682-sdw.c
++++ b/sound/soc/codecs/rt5682-sdw.c
+@@ -462,7 +462,8 @@ static int rt5682_io_init(struct device *dev, struct sdw_slave *slave)
+ 
+ 	regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_2,
+ 		RT5682_EXT_JD_SRC, RT5682_EXT_JD_SRC_MANUAL);
+-	regmap_write(rt5682->regmap, RT5682_CBJ_CTRL_1, 0xd042);
++	regmap_write(rt5682->regmap, RT5682_CBJ_CTRL_1, 0xd142);
++	regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_5, 0x0700, 0x0600);
+ 	regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_3,
+ 		RT5682_CBJ_IN_BUF_EN, RT5682_CBJ_IN_BUF_EN);
+ 	regmap_update_bits(rt5682->regmap, RT5682_SAR_IL_CMD_1,
+diff --git a/sound/soc/codecs/tas2562.h b/sound/soc/codecs/tas2562.h
+index 81866aeb3fbfa..55b2a1f52ca37 100644
+--- a/sound/soc/codecs/tas2562.h
++++ b/sound/soc/codecs/tas2562.h
+@@ -57,13 +57,13 @@
+ #define TAS2562_TDM_CFG0_RAMPRATE_MASK		BIT(5)
+ #define TAS2562_TDM_CFG0_RAMPRATE_44_1		BIT(5)
+ #define TAS2562_TDM_CFG0_SAMPRATE_MASK		GENMASK(3, 1)
+-#define TAS2562_TDM_CFG0_SAMPRATE_7305_8KHZ	0x0
+-#define TAS2562_TDM_CFG0_SAMPRATE_14_7_16KHZ	0x1
+-#define TAS2562_TDM_CFG0_SAMPRATE_22_05_24KHZ	0x2
+-#define TAS2562_TDM_CFG0_SAMPRATE_29_4_32KHZ	0x3
+-#define TAS2562_TDM_CFG0_SAMPRATE_44_1_48KHZ	0x4
+-#define TAS2562_TDM_CFG0_SAMPRATE_88_2_96KHZ	0x5
+-#define TAS2562_TDM_CFG0_SAMPRATE_176_4_192KHZ	0x6
++#define TAS2562_TDM_CFG0_SAMPRATE_7305_8KHZ	(0x0 << 1)
++#define TAS2562_TDM_CFG0_SAMPRATE_14_7_16KHZ	(0x1 << 1)
++#define TAS2562_TDM_CFG0_SAMPRATE_22_05_24KHZ	(0x2 << 1)
++#define TAS2562_TDM_CFG0_SAMPRATE_29_4_32KHZ	(0x3 << 1)
++#define TAS2562_TDM_CFG0_SAMPRATE_44_1_48KHZ	(0x4 << 1)
++#define TAS2562_TDM_CFG0_SAMPRATE_88_2_96KHZ	(0x5 << 1)
++#define TAS2562_TDM_CFG0_SAMPRATE_176_4_192KHZ	(0x6 << 1)
+ 
+ #define TAS2562_TDM_CFG2_RIGHT_JUSTIFY	BIT(6)
+ 
+diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
+index f62f81ceab0d2..9dcbe5d5a428c 100644
+--- a/sound/soc/fsl/fsl-asoc-card.c
++++ b/sound/soc/fsl/fsl-asoc-card.c
+@@ -732,6 +732,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
+ 	/* Initialize sound card */
+ 	priv->pdev = pdev;
+ 	priv->card.dev = &pdev->dev;
++	priv->card.owner = THIS_MODULE;
+ 	ret = snd_soc_of_parse_card_name(&priv->card, "model");
+ 	if (ret) {
+ 		snprintf(priv->name, sizeof(priv->name), "%s-audio",
+diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
+index 936384a94f25e..74d3d8c586080 100644
+--- a/sound/soc/qcom/lpass-cpu.c
++++ b/sound/soc/qcom/lpass-cpu.c
+@@ -93,8 +93,30 @@ static void lpass_cpu_daiops_shutdown(struct snd_pcm_substream *substream,
+ 		struct snd_soc_dai *dai)
+ {
+ 	struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
++	struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
++	unsigned int id = dai->driver->id;
+ 
+ 	clk_disable_unprepare(drvdata->mi2s_osr_clk[dai->driver->id]);
++	/*
++	 * Ensure LRCLK is disabled even in device node validation.
++	 * Will not impact if disabled in lpass_cpu_daiops_trigger()
++	 * suspend.
++	 */
++	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++		regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_DISABLE);
++	else
++		regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_DISABLE);
++
++	/*
++	 * BCLK may not be enabled if lpass_cpu_daiops_prepare is called before
++	 * lpass_cpu_daiops_shutdown. It's paired with the clk_enable in
++	 * lpass_cpu_daiops_prepare.
++	 */
++	if (drvdata->mi2s_was_prepared[dai->driver->id]) {
++		drvdata->mi2s_was_prepared[dai->driver->id] = false;
++		clk_disable(drvdata->mi2s_bit_clk[dai->driver->id]);
++	}
++
+ 	clk_unprepare(drvdata->mi2s_bit_clk[dai->driver->id]);
+ }
+ 
+@@ -275,6 +297,18 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
+ 	case SNDRV_PCM_TRIGGER_START:
+ 	case SNDRV_PCM_TRIGGER_RESUME:
+ 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
++		/*
++		 * Ensure lpass BCLK/LRCLK is enabled during
++		 * device resume as lpass_cpu_daiops_prepare() is not called
++		 * after the device resumes. We don't check mi2s_was_prepared before
++		 * enable/disable BCLK in trigger events because:
++		 *  1. These trigger events are paired, so the BCLK
++		 *     enable_count is balanced.
++		 *  2. the BCLK can be shared (ex: headset and headset mic),
++		 *     we need to increase the enable_count so that we don't
++		 *     turn off the shared BCLK while other devices are using
++		 *     it.
++		 */
+ 		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ 			ret = regmap_fields_write(i2sctl->spken, id,
+ 						 LPAIF_I2SCTL_SPKEN_ENABLE);
+@@ -296,6 +330,10 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
+ 	case SNDRV_PCM_TRIGGER_STOP:
+ 	case SNDRV_PCM_TRIGGER_SUSPEND:
+ 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
++		/*
++		 * To ensure lpass BCLK/LRCLK is disabled during
++		 * device suspend.
++		 */
+ 		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ 			ret = regmap_fields_write(i2sctl->spken, id,
+ 						 LPAIF_I2SCTL_SPKEN_DISABLE);
+@@ -315,12 +353,53 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
+ 	return ret;
+ }
+ 
++static int lpass_cpu_daiops_prepare(struct snd_pcm_substream *substream,
++		struct snd_soc_dai *dai)
++{
++	struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
++	struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
++	unsigned int id = dai->driver->id;
++	int ret;
++
++	/*
++	 * Ensure lpass BCLK/LRCLK is enabled bit before playback/capture
++	 * data flow starts. This allows other codec to have some delay before
++	 * the data flow.
++	 * (ex: to drop start up pop noise before capture starts).
++	 */
++	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++		ret = regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_ENABLE);
++	else
++		ret = regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_ENABLE);
++
++	if (ret) {
++		dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret);
++		return ret;
++	}
++
++	/*
++	 * Check mi2s_was_prepared before enabling BCLK as lpass_cpu_daiops_prepare can
++	 * be called multiple times. It's paired with the clk_disable in
++	 * lpass_cpu_daiops_shutdown.
++	 */
++	if (!drvdata->mi2s_was_prepared[dai->driver->id]) {
++		ret = clk_enable(drvdata->mi2s_bit_clk[id]);
++		if (ret) {
++			dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret);
++			return ret;
++		}
++		drvdata->mi2s_was_prepared[dai->driver->id] = true;
++	}
++	return 0;
++}
++
+ const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
+ 	.set_sysclk	= lpass_cpu_daiops_set_sysclk,
+ 	.startup	= lpass_cpu_daiops_startup,
+ 	.shutdown	= lpass_cpu_daiops_shutdown,
+ 	.hw_params	= lpass_cpu_daiops_hw_params,
+ 	.trigger	= lpass_cpu_daiops_trigger,
++	.prepare	= lpass_cpu_daiops_prepare,
+ };
+ EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_dai_ops);
+ 
+diff --git a/sound/soc/qcom/lpass.h b/sound/soc/qcom/lpass.h
+index 83b2e08ade060..7f72214404baf 100644
+--- a/sound/soc/qcom/lpass.h
++++ b/sound/soc/qcom/lpass.h
+@@ -67,6 +67,10 @@ struct lpass_data {
+ 	/* MI2S SD lines to use for playback/capture */
+ 	unsigned int mi2s_playback_sd_mode[LPASS_MAX_MI2S_PORTS];
+ 	unsigned int mi2s_capture_sd_mode[LPASS_MAX_MI2S_PORTS];
++
++	/* The state of MI2S prepare dai_ops was called */
++	bool mi2s_was_prepared[LPASS_MAX_MI2S_PORTS];
++
+ 	int hdmi_port_enable;
+ 
+ 	/* low-power audio interface (LPAIF) registers */
+diff --git a/tools/include/uapi/linux/in.h b/tools/include/uapi/linux/in.h
+index 7d6687618d808..d1b327036ae43 100644
+--- a/tools/include/uapi/linux/in.h
++++ b/tools/include/uapi/linux/in.h
+@@ -289,6 +289,9 @@ struct sockaddr_in {
+ /* Address indicating an error return. */
+ #define	INADDR_NONE		((unsigned long int) 0xffffffff)
+ 
++/* Dummy address for src of ICMP replies if no real address is set (RFC7600). */
++#define	INADDR_DUMMY		((unsigned long int) 0xc0000008)
++
+ /* Network number for local host loopback. */
+ #define	IN_LOOPBACKNET		127
+ 
+diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
+index 007fe5d594386..fe2bec500bf68 100644
+--- a/tools/lib/bpf/xsk.c
++++ b/tools/lib/bpf/xsk.c
+@@ -928,7 +928,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
+ 			goto out_put_ctx;
+ 		}
+ 		if (xsk->fd == umem->fd)
+-			umem->rx_ring_setup_done = true;
++			umem->tx_ring_setup_done = true;
+ 	}
+ 
+ 	err = xsk_get_mmap_offsets(xsk->fd, &off);
+diff --git a/tools/perf/trace/beauty/include/linux/socket.h b/tools/perf/trace/beauty/include/linux/socket.h
+index 385894b4a8bba..42222a84167f3 100644
+--- a/tools/perf/trace/beauty/include/linux/socket.h
++++ b/tools/perf/trace/beauty/include/linux/socket.h
+@@ -438,6 +438,4 @@ extern int __sys_socketpair(int family, int type, int protocol,
+ 			    int __user *usockvec);
+ extern int __sys_shutdown_sock(struct socket *sock, int how);
+ extern int __sys_shutdown(int fd, int how);
+-
+-extern struct ns_common *get_net_ns(struct ns_common *ns);
+ #endif /* _LINUX_SOCKET_H */
+diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
+index 26c990e323781..939aed36e0c2a 100644
+--- a/tools/perf/util/metricgroup.c
++++ b/tools/perf/util/metricgroup.c
+@@ -162,10 +162,10 @@ static bool contains_event(struct evsel **metric_events, int num_events,
+ 	return false;
+ }
+ 
+-static bool evsel_same_pmu(struct evsel *ev1, struct evsel *ev2)
++static bool evsel_same_pmu_or_none(struct evsel *ev1, struct evsel *ev2)
+ {
+ 	if (!ev1->pmu_name || !ev2->pmu_name)
+-		return false;
++		return true;
+ 
+ 	return !strcmp(ev1->pmu_name, ev2->pmu_name);
+ }
+@@ -288,7 +288,7 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
+ 			 */
+ 			if (!has_constraint &&
+ 			    ev->leader != metric_events[i]->leader &&
+-			    evsel_same_pmu(ev->leader, metric_events[i]->leader))
++			    evsel_same_pmu_or_none(ev->leader, metric_events[i]->leader))
+ 				break;
+ 			if (!strcmp(metric_events[i]->name, ev->name)) {
+ 				set_bit(ev->idx, evlist_used);
+@@ -1072,16 +1072,18 @@ static int metricgroup__add_metric_sys_event_iter(struct pmu_event *pe,
+ 
+ 	ret = add_metric(d->metric_list, pe, d->metric_no_group, &m, NULL, d->ids);
+ 	if (ret)
+-		return ret;
++		goto out;
+ 
+ 	ret = resolve_metric(d->metric_no_group,
+ 				     d->metric_list, NULL, d->ids);
+ 	if (ret)
+-		return ret;
++		goto out;
+ 
+ 	*(d->has_match) = true;
+ 
+-	return *d->ret;
++out:
++	*(d->ret) = ret;
++	return ret;
+ }
+ 
+ static int metricgroup__add_metric(const char *metric, bool metric_no_group,
+diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
+index 2b5707738609e..6fad54c7ecb4a 100755
+--- a/tools/testing/selftests/net/fib_tests.sh
++++ b/tools/testing/selftests/net/fib_tests.sh
+@@ -1384,12 +1384,37 @@ ipv4_rt_replace()
+ 	ipv4_rt_replace_mpath
+ }
+ 
++# checks that cached input route on VRF port is deleted
++# when VRF is deleted
++ipv4_local_rt_cache()
++{
++	run_cmd "ip addr add 10.0.0.1/32 dev lo"
++	run_cmd "ip netns add test-ns"
++	run_cmd "ip link add veth-outside type veth peer name veth-inside"
++	run_cmd "ip link add vrf-100 type vrf table 1100"
++	run_cmd "ip link set veth-outside master vrf-100"
++	run_cmd "ip link set veth-inside netns test-ns"
++	run_cmd "ip link set veth-outside up"
++	run_cmd "ip link set vrf-100 up"
++	run_cmd "ip route add 10.1.1.1/32 dev veth-outside table 1100"
++	run_cmd "ip netns exec test-ns ip link set veth-inside up"
++	run_cmd "ip netns exec test-ns ip addr add 10.1.1.1/32 dev veth-inside"
++	run_cmd "ip netns exec test-ns ip route add 10.0.0.1/32 dev veth-inside"
++	run_cmd "ip netns exec test-ns ip route add default via 10.0.0.1"
++	run_cmd "ip netns exec test-ns ping 10.0.0.1 -c 1 -i 1"
++	run_cmd "ip link delete vrf-100"
++
++	# if we do not hang test is a success
++	log_test $? 0 "Cached route removed from VRF port device"
++}
++
+ ipv4_route_test()
+ {
+ 	route_setup
+ 
+ 	ipv4_rt_add
+ 	ipv4_rt_replace
++	ipv4_local_rt_cache
+ 
+ 	route_cleanup
+ }
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+index 65b3b983efc26..8763706b0d047 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+@@ -197,9 +197,6 @@ ip -net "$ns4" link set ns4eth3 up
+ ip -net "$ns4" route add default via 10.0.3.2
+ ip -net "$ns4" route add default via dead:beef:3::2
+ 
+-# use TCP syn cookies, even if no flooding was detected.
+-ip netns exec "$ns2" sysctl -q net.ipv4.tcp_syncookies=2
+-
+ set_ethtool_flags() {
+ 	local ns="$1"
+ 	local dev="$2"
+@@ -711,6 +708,14 @@ for sender in $ns1 $ns2 $ns3 $ns4;do
+ 		exit $ret
+ 	fi
+ 
++	# ns1<->ns2 is not subject to reordering/tc delays. Use it to test
++	# mptcp syncookie support.
++	if [ $sender = $ns1 ]; then
++		ip netns exec "$ns2" sysctl -q net.ipv4.tcp_syncookies=2
++	else
++		ip netns exec "$ns2" sysctl -q net.ipv4.tcp_syncookies=1
++	fi
++
+ 	run_tests "$ns2" $sender 10.0.1.2
+ 	run_tests "$ns2" $sender dead:beef:1::2
+ 	run_tests "$ns2" $sender 10.0.2.1


             reply	other threads:[~2021-06-23 15:15 UTC|newest]

Thread overview: 33+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-23 15:15 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2021-07-20 15:49 [gentoo-commits] proj/linux-patches:5.12 commit in: / Alice Ferrazzi
2021-07-19 11:16 Mike Pagano
2021-07-14 16:18 Mike Pagano
2021-07-13 12:36 Mike Pagano
2021-07-11 14:42 Mike Pagano
2021-07-07 13:12 Mike Pagano
2021-07-04 15:43 Mike Pagano
2021-07-01 14:28 Mike Pagano
2021-06-30 14:22 Mike Pagano
2021-06-18 12:21 Mike Pagano
2021-06-18 12:00 Mike Pagano
2021-06-18 11:35 Mike Pagano
2021-06-16 12:25 Mike Pagano
2021-06-11 13:21 Mike Pagano
2021-06-10 12:14 Mike Pagano
2021-06-08 22:15 Mike Pagano
2021-06-08 16:48 Mike Pagano
2021-06-08 16:26 Mike Pagano
2021-06-03 10:22 Alice Ferrazzi
2021-05-28 12:17 Alice Ferrazzi
2021-05-26 12:08 Mike Pagano
2021-05-24 17:26 Mike Pagano
2021-05-22 16:50 Mike Pagano
2021-05-19 12:25 Mike Pagano
2021-05-14 14:02 Alice Ferrazzi
2021-05-12 12:30 Mike Pagano
2021-05-07 13:15 Alice Ferrazzi
2021-05-02 16:05 Mike Pagano
2021-04-30 18:53 Mike Pagano
2021-04-27 11:53 Mike Pagano
2021-04-18 22:03 Mike Pagano
2021-03-23 12:29 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1624461293.1a7f084fb13953ad56900d4a19ac2e2aecf413af.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox