public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:5.14 commit in: /
Date: Wed, 22 Sep 2021 11:37:37 +0000 (UTC)	[thread overview]
Message-ID: <1632310643.00a2b84fdf9371e8fc3cfa89c197db0aa7f58939.mpagano@gentoo> (raw)

commit:     00a2b84fdf9371e8fc3cfa89c197db0aa7f58939
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Sep 22 11:37:23 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Sep 22 11:37:23 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=00a2b84f

Linux patch 5.14.7

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1006_linux-5.14.7.patch | 6334 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6338 insertions(+)

diff --git a/0000_README b/0000_README
index df8a957..0c8fa67 100644
--- a/0000_README
+++ b/0000_README
@@ -71,6 +71,10 @@ Patch:  1005_linux-5.14.6.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.14.6
 
+Patch:  1006_linux-5.14.7.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.14.7
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1006_linux-5.14.7.patch b/1006_linux-5.14.7.patch
new file mode 100644
index 0000000..a7e8c31
--- /dev/null
+++ b/1006_linux-5.14.7.patch
@@ -0,0 +1,6334 @@
+diff --git a/Documentation/devicetree/bindings/arm/tegra.yaml b/Documentation/devicetree/bindings/arm/tegra.yaml
+index b9f75e20fef5c..b2a645740ffe6 100644
+--- a/Documentation/devicetree/bindings/arm/tegra.yaml
++++ b/Documentation/devicetree/bindings/arm/tegra.yaml
+@@ -54,7 +54,7 @@ properties:
+           - const: toradex,apalis_t30
+           - const: nvidia,tegra30
+       - items:
+-          - const: toradex,apalis_t30-eval-v1.1
++          - const: toradex,apalis_t30-v1.1-eval
+           - const: toradex,apalis_t30-eval
+           - const: toradex,apalis_t30-v1.1
+           - const: toradex,apalis_t30
+diff --git a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
+index 44919d48d2415..c459f169a9044 100644
+--- a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
++++ b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
+@@ -122,7 +122,7 @@ on various other factors also like;
+ 	so the device should have enough free bytes available its OOB/Spare
+ 	area to accommodate ECC for entire page. In general following expression
+ 	helps in determining if given device can accommodate ECC syndrome:
+-	"2 + (PAGESIZE / 512) * ECC_BYTES" >= OOBSIZE"
++	"2 + (PAGESIZE / 512) * ECC_BYTES" <= OOBSIZE"
+ 	where
+ 		OOBSIZE		number of bytes in OOB/spare area
+ 		PAGESIZE	number of bytes in main-area of device page
+diff --git a/Makefile b/Makefile
+index f9c8bbf8cf71e..efb603f06e711 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 14
+-SUBLEVEL = 6
++SUBLEVEL = 7
+ EXTRAVERSION =
+ NAME = Opossums on Parade
+ 
+diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
+index a2fbea3ee07c7..102418ac5ff4a 100644
+--- a/arch/arc/mm/cache.c
++++ b/arch/arc/mm/cache.c
+@@ -1123,7 +1123,7 @@ void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
+ 	clear_page(to);
+ 	clear_bit(PG_dc_clean, &page->flags);
+ }
+-
++EXPORT_SYMBOL(clear_user_page);
+ 
+ /**********************************************************************
+  * Explicit Cache flush request from user space via syscall
+diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
+index e57b23f952846..3599b9a2f1dff 100644
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -511,7 +511,7 @@ size_t sve_state_size(struct task_struct const *task)
+ void sve_alloc(struct task_struct *task)
+ {
+ 	if (task->thread.sve_state) {
+-		memset(task->thread.sve_state, 0, sve_state_size(current));
++		memset(task->thread.sve_state, 0, sve_state_size(task));
+ 		return;
+ 	}
+ 
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index 5d1fc9c4bca5e..45ee8abcf2025 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -1220,6 +1220,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
+ 		if (copy_from_user(&reg, argp, sizeof(reg)))
+ 			break;
+ 
++		/*
++		 * We could owe a reset due to PSCI. Handle the pending reset
++		 * here to ensure userspace register accesses are ordered after
++		 * the reset.
++		 */
++		if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
++			kvm_reset_vcpu(vcpu);
++
+ 		if (ioctl == KVM_SET_ONE_REG)
+ 			r = kvm_arm_set_reg(vcpu, &reg);
+ 		else
+diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
+index 6f48336b1d86a..04ebab299aa4e 100644
+--- a/arch/arm64/kvm/handle_exit.c
++++ b/arch/arm64/kvm/handle_exit.c
+@@ -292,11 +292,12 @@ void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
+ 		kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
+ }
+ 
+-void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr,
++void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
++					      u64 elr_virt, u64 elr_phys,
+ 					      u64 par, uintptr_t vcpu,
+ 					      u64 far, u64 hpfar) {
+-	u64 elr_in_kimg = __phys_to_kimg(__hyp_pa(elr));
+-	u64 hyp_offset = elr_in_kimg - kaslr_offset() - elr;
++	u64 elr_in_kimg = __phys_to_kimg(elr_phys);
++	u64 hyp_offset = elr_in_kimg - kaslr_offset() - elr_virt;
+ 	u64 mode = spsr & PSR_MODE_MASK;
+ 
+ 	/*
+@@ -309,20 +310,24 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr,
+ 		kvm_err("Invalid host exception to nVHE hyp!\n");
+ 	} else if (ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
+ 		   (esr & ESR_ELx_BRK64_ISS_COMMENT_MASK) == BUG_BRK_IMM) {
+-		struct bug_entry *bug = find_bug(elr_in_kimg);
+ 		const char *file = NULL;
+ 		unsigned int line = 0;
+ 
+ 		/* All hyp bugs, including warnings, are treated as fatal. */
+-		if (bug)
+-			bug_get_file_line(bug, &file, &line);
++		if (!is_protected_kvm_enabled() ||
++		    IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
++			struct bug_entry *bug = find_bug(elr_in_kimg);
++
++			if (bug)
++				bug_get_file_line(bug, &file, &line);
++		}
+ 
+ 		if (file)
+ 			kvm_err("nVHE hyp BUG at: %s:%u!\n", file, line);
+ 		else
+-			kvm_err("nVHE hyp BUG at: %016llx!\n", elr + hyp_offset);
++			kvm_err("nVHE hyp BUG at: %016llx!\n", elr_virt + hyp_offset);
+ 	} else {
+-		kvm_err("nVHE hyp panic at: %016llx!\n", elr + hyp_offset);
++		kvm_err("nVHE hyp panic at: %016llx!\n", elr_virt + hyp_offset);
+ 	}
+ 
+ 	/*
+@@ -334,5 +339,5 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr,
+ 	kvm_err("Hyp Offset: 0x%llx\n", hyp_offset);
+ 
+ 	panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%016lx\n",
+-	      spsr, elr, esr, far, hpfar, par, vcpu);
++	      spsr, elr_virt, esr, far, hpfar, par, vcpu);
+ }
+diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
+index 2b23400e0fb30..4b652ffb591d4 100644
+--- a/arch/arm64/kvm/hyp/nvhe/host.S
++++ b/arch/arm64/kvm/hyp/nvhe/host.S
+@@ -7,6 +7,7 @@
+ #include <linux/linkage.h>
+ 
+ #include <asm/assembler.h>
++#include <asm/kvm_arm.h>
+ #include <asm/kvm_asm.h>
+ #include <asm/kvm_mmu.h>
+ 
+@@ -85,12 +86,24 @@ SYM_FUNC_START(__hyp_do_panic)
+ 
+ 	mov	x29, x0
+ 
++#ifdef CONFIG_NVHE_EL2_DEBUG
++	/* Ensure host stage-2 is disabled */
++	mrs	x0, hcr_el2
++	bic	x0, x0, #HCR_VM
++	msr	hcr_el2, x0
++	isb
++	tlbi	vmalls12e1
++	dsb	nsh
++#endif
++
+ 	/* Load the panic arguments into x0-7 */
+ 	mrs	x0, esr_el2
+-	get_vcpu_ptr x4, x5
+-	mrs	x5, far_el2
+-	mrs	x6, hpfar_el2
+-	mov	x7, xzr			// Unused argument
++	mov	x4, x3
++	mov	x3, x2
++	hyp_pa	x3, x6
++	get_vcpu_ptr x5, x6
++	mrs	x6, far_el2
++	mrs	x7, hpfar_el2
+ 
+ 	/* Enter the host, conditionally restoring the host context. */
+ 	cbz	x29, __host_enter_without_restoring
+diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
+index cba7872d69a85..d010778b93ffe 100644
+--- a/arch/arm64/kvm/reset.c
++++ b/arch/arm64/kvm/reset.c
+@@ -210,10 +210,16 @@ static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
+  */
+ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
+ {
++	struct vcpu_reset_state reset_state;
+ 	int ret;
+ 	bool loaded;
+ 	u32 pstate;
+ 
++	mutex_lock(&vcpu->kvm->lock);
++	reset_state = vcpu->arch.reset_state;
++	WRITE_ONCE(vcpu->arch.reset_state.reset, false);
++	mutex_unlock(&vcpu->kvm->lock);
++
+ 	/* Reset PMU outside of the non-preemptible section */
+ 	kvm_pmu_vcpu_reset(vcpu);
+ 
+@@ -276,8 +282,8 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
+ 	 * Additional reset state handling that PSCI may have imposed on us.
+ 	 * Must be done after all the sys_reg reset.
+ 	 */
+-	if (vcpu->arch.reset_state.reset) {
+-		unsigned long target_pc = vcpu->arch.reset_state.pc;
++	if (reset_state.reset) {
++		unsigned long target_pc = reset_state.pc;
+ 
+ 		/* Gracefully handle Thumb2 entry point */
+ 		if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
+@@ -286,13 +292,11 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
+ 		}
+ 
+ 		/* Propagate caller endianness */
+-		if (vcpu->arch.reset_state.be)
++		if (reset_state.be)
+ 			kvm_vcpu_set_be(vcpu);
+ 
+ 		*vcpu_pc(vcpu) = target_pc;
+-		vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
+-
+-		vcpu->arch.reset_state.reset = false;
++		vcpu_set_reg(vcpu, 0, reset_state.r0);
+ 	}
+ 
+ 	/* Reset timer */
+@@ -317,6 +321,14 @@ int kvm_set_ipa_limit(void)
+ 	mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+ 	parange = cpuid_feature_extract_unsigned_field(mmfr0,
+ 				ID_AA64MMFR0_PARANGE_SHIFT);
++	/*
++	 * IPA size beyond 48 bits could not be supported
++	 * on either 4K or 16K page size. Hence let's cap
++	 * it to 48 bits, in case it's reported as larger
++	 * on the system.
++	 */
++	if (PAGE_SIZE != SZ_64K)
++		parange = min(parange, (unsigned int)ID_AA64MMFR0_PARANGE_48);
+ 
+ 	/*
+ 	 * Check with ARMv8.5-GTG that our PAGE_SIZE is supported at
+diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
+index 21bbd615ca410..ec4e2d3635077 100644
+--- a/arch/powerpc/kernel/interrupt.c
++++ b/arch/powerpc/kernel/interrupt.c
+@@ -19,6 +19,7 @@
+ #include <asm/switch_to.h>
+ #include <asm/syscall.h>
+ #include <asm/time.h>
++#include <asm/tm.h>
+ #include <asm/unistd.h>
+ 
+ #if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32)
+@@ -138,6 +139,48 @@ notrace long system_call_exception(long r3, long r4, long r5,
+ 	 */
+ 	irq_soft_mask_regs_set_state(regs, IRQS_ENABLED);
+ 
++	/*
++	 * If system call is called with TM active, set _TIF_RESTOREALL to
++	 * prevent RFSCV being used to return to userspace, because POWER9
++	 * TM implementation has problems with this instruction returning to
++	 * transactional state. Final register values are not relevant because
++	 * the transaction will be aborted upon return anyway. Or in the case
++	 * of unsupported_scv SIGILL fault, the return state does not much
++	 * matter because it's an edge case.
++	 */
++	if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
++			unlikely(MSR_TM_TRANSACTIONAL(regs->msr)))
++		current_thread_info()->flags |= _TIF_RESTOREALL;
++
++	/*
++	 * If the system call was made with a transaction active, doom it and
++	 * return without performing the system call. Unless it was an
++	 * unsupported scv vector, in which case it's treated like an illegal
++	 * instruction.
++	 */
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++	if (unlikely(MSR_TM_TRANSACTIONAL(regs->msr)) &&
++	    !trap_is_unsupported_scv(regs)) {
++		/* Enable TM in the kernel, and disable EE (for scv) */
++		hard_irq_disable();
++		mtmsr(mfmsr() | MSR_TM);
++
++		/* tabort, this dooms the transaction, nothing else */
++		asm volatile(".long 0x7c00071d | ((%0) << 16)"
++				:: "r"(TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT));
++
++		/*
++		 * Userspace will never see the return value. Execution will
++		 * resume after the tbegin. of the aborted transaction with the
++		 * checkpointed register state. A context switch could occur
++		 * or signal delivered to the process before resuming the
++		 * doomed transaction context, but that should all be handled
++		 * as expected.
++		 */
++		return -ENOSYS;
++	}
++#endif // CONFIG_PPC_TRANSACTIONAL_MEM
++
+ 	local_irq_enable();
+ 
+ 	if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) {
+diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S
+index d4212d2ff0b54..ec950b08a8dcc 100644
+--- a/arch/powerpc/kernel/interrupt_64.S
++++ b/arch/powerpc/kernel/interrupt_64.S
+@@ -12,7 +12,6 @@
+ #include <asm/mmu.h>
+ #include <asm/ppc_asm.h>
+ #include <asm/ptrace.h>
+-#include <asm/tm.h>
+ 
+ 	.section	".toc","aw"
+ SYS_CALL_TABLE:
+@@ -55,12 +54,6 @@ COMPAT_SYS_CALL_TABLE:
+ 	.globl system_call_vectored_\name
+ system_call_vectored_\name:
+ _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
+-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+-BEGIN_FTR_SECTION
+-	extrdi.	r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
+-	bne	tabort_syscall
+-END_FTR_SECTION_IFSET(CPU_FTR_TM)
+-#endif
+ 	SCV_INTERRUPT_TO_KERNEL
+ 	mr	r10,r1
+ 	ld	r1,PACAKSAVE(r13)
+@@ -247,12 +240,6 @@ _ASM_NOKPROBE_SYMBOL(system_call_common_real)
+ 	.globl system_call_common
+ system_call_common:
+ _ASM_NOKPROBE_SYMBOL(system_call_common)
+-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+-BEGIN_FTR_SECTION
+-	extrdi.	r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
+-	bne	tabort_syscall
+-END_FTR_SECTION_IFSET(CPU_FTR_TM)
+-#endif
+ 	mr	r10,r1
+ 	ld	r1,PACAKSAVE(r13)
+ 	std	r10,0(r1)
+@@ -425,34 +412,6 @@ SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
+ RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
+ #endif
+ 
+-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+-tabort_syscall:
+-_ASM_NOKPROBE_SYMBOL(tabort_syscall)
+-	/* Firstly we need to enable TM in the kernel */
+-	mfmsr	r10
+-	li	r9, 1
+-	rldimi	r10, r9, MSR_TM_LG, 63-MSR_TM_LG
+-	mtmsrd	r10, 0
+-
+-	/* tabort, this dooms the transaction, nothing else */
+-	li	r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
+-	TABORT(R9)
+-
+-	/*
+-	 * Return directly to userspace. We have corrupted user register state,
+-	 * but userspace will never see that register state. Execution will
+-	 * resume after the tbegin of the aborted transaction with the
+-	 * checkpointed register state.
+-	 */
+-	li	r9, MSR_RI
+-	andc	r10, r10, r9
+-	mtmsrd	r10, 1
+-	mtspr	SPRN_SRR0, r11
+-	mtspr	SPRN_SRR1, r12
+-	RFI_TO_USER
+-	b	.	/* prevent speculative execution */
+-#endif
+-
+ 	/*
+ 	 * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
+ 	 * touched, no exit work created, then this can be used.
+diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
+index 47a683cd00d24..fd829f7f25a47 100644
+--- a/arch/powerpc/kernel/mce.c
++++ b/arch/powerpc/kernel/mce.c
+@@ -249,6 +249,7 @@ void machine_check_queue_event(void)
+ {
+ 	int index;
+ 	struct machine_check_event evt;
++	unsigned long msr;
+ 
+ 	if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
+ 		return;
+@@ -262,8 +263,20 @@ void machine_check_queue_event(void)
+ 	memcpy(&local_paca->mce_info->mce_event_queue[index],
+ 	       &evt, sizeof(evt));
+ 
+-	/* Queue irq work to process this event later. */
+-	irq_work_queue(&mce_event_process_work);
++	/*
++	 * Queue irq work to process this event later. Before
++	 * queuing the work enable translation for non radix LPAR,
++	 * as irq_work_queue may try to access memory outside RMO
++	 * region.
++	 */
++	if (!radix_enabled() && firmware_has_feature(FW_FEATURE_LPAR)) {
++		msr = mfmsr();
++		mtmsr(msr | MSR_IR | MSR_DR);
++		irq_work_queue(&mce_event_process_work);
++		mtmsr(msr);
++	} else {
++		irq_work_queue(&mce_event_process_work);
++	}
+ }
+ 
+ void mce_common_process_ue(struct pt_regs *regs,
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index 8dd437d7a2c63..dd18e1c447512 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -2578,7 +2578,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
+ 	/* The following code handles the fake_suspend = 1 case */
+ 	mflr	r0
+ 	std	r0, PPC_LR_STKOFF(r1)
+-	stdu	r1, -PPC_MIN_STKFRM(r1)
++	stdu	r1, -TM_FRAME_SIZE(r1)
+ 
+ 	/* Turn on TM. */
+ 	mfmsr	r8
+@@ -2593,10 +2593,42 @@ BEGIN_FTR_SECTION
+ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
+ 	nop
+ 
++	/*
++	 * It's possible that treclaim. may modify registers, if we have lost
++	 * track of fake-suspend state in the guest due to it using rfscv.
++	 * Save and restore registers in case this occurs.
++	 */
++	mfspr	r3, SPRN_DSCR
++	mfspr	r4, SPRN_XER
++	mfspr	r5, SPRN_AMR
++	/* SPRN_TAR would need to be saved here if the kernel ever used it */
++	mfcr	r12
++	SAVE_NVGPRS(r1)
++	SAVE_GPR(2, r1)
++	SAVE_GPR(3, r1)
++	SAVE_GPR(4, r1)
++	SAVE_GPR(5, r1)
++	stw	r12, 8(r1)
++	std	r1, HSTATE_HOST_R1(r13)
++
+ 	/* We have to treclaim here because that's the only way to do S->N */
+ 	li	r3, TM_CAUSE_KVM_RESCHED
+ 	TRECLAIM(R3)
+ 
++	GET_PACA(r13)
++	ld	r1, HSTATE_HOST_R1(r13)
++	REST_GPR(2, r1)
++	REST_GPR(3, r1)
++	REST_GPR(4, r1)
++	REST_GPR(5, r1)
++	lwz	r12, 8(r1)
++	REST_NVGPRS(r1)
++	mtspr	SPRN_DSCR, r3
++	mtspr	SPRN_XER, r4
++	mtspr	SPRN_AMR, r5
++	mtcr	r12
++	HMT_MEDIUM
++
+ 	/*
+ 	 * We were in fake suspend, so we are not going to save the
+ 	 * register state as the guest checkpointed state (since
+@@ -2624,7 +2656,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
+ 	std	r5, VCPU_TFHAR(r9)
+ 	std	r6, VCPU_TFIAR(r9)
+ 
+-	addi	r1, r1, PPC_MIN_STKFRM
++	addi	r1, r1, TM_FRAME_SIZE
+ 	ld	r0, PPC_LR_STKOFF(r1)
+ 	mtlr	r0
+ 	blr
+diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
+index b0ca5058e7ae6..767852ae5e84f 100644
+--- a/arch/riscv/include/asm/page.h
++++ b/arch/riscv/include/asm/page.h
+@@ -79,8 +79,8 @@ typedef struct page *pgtable_t;
+ #endif
+ 
+ #ifdef CONFIG_MMU
+-extern unsigned long pfn_base;
+-#define ARCH_PFN_OFFSET		(pfn_base)
++extern unsigned long riscv_pfn_base;
++#define ARCH_PFN_OFFSET		(riscv_pfn_base)
+ #else
+ #define ARCH_PFN_OFFSET		(PAGE_OFFSET >> PAGE_SHIFT)
+ #endif /* CONFIG_MMU */
+diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
+index 7cb4f391d106f..9786100f3a140 100644
+--- a/arch/riscv/mm/init.c
++++ b/arch/riscv/mm/init.c
+@@ -234,8 +234,8 @@ static struct pt_alloc_ops _pt_ops __initdata;
+ #define pt_ops _pt_ops
+ #endif
+ 
+-unsigned long pfn_base __ro_after_init;
+-EXPORT_SYMBOL(pfn_base);
++unsigned long riscv_pfn_base __ro_after_init;
++EXPORT_SYMBOL(riscv_pfn_base);
+ 
+ pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
+ pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
+@@ -579,7 +579,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
+ 	kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr;
+ #endif
+ 
+-	pfn_base = PFN_DOWN(kernel_map.phys_addr);
++	riscv_pfn_base = PFN_DOWN(kernel_map.phys_addr);
+ 
+ 	/*
+ 	 * Enforce boot alignment requirements of RV32 and
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index 88419263a89a9..840d8594437d5 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -248,8 +248,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
+ 
+ #define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask)		\
+ ({								\
+-	/* Branch instruction needs 6 bytes */			\
+-	int rel = (addrs[(i) + (off) + 1] - (addrs[(i) + 1] - 6)) / 2;\
++	int rel = (addrs[(i) + (off) + 1] - jit->prg) / 2;	\
+ 	_EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\
+ 	REG_SET_SEEN(b1);					\
+ 	REG_SET_SEEN(b2);					\
+@@ -761,10 +760,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
+ 		EMIT4(0xb9080000, dst_reg, src_reg);
+ 		break;
+ 	case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
+-		if (!imm)
+-			break;
+-		/* alfi %dst,imm */
+-		EMIT6_IMM(0xc20b0000, dst_reg, imm);
++		if (imm != 0) {
++			/* alfi %dst,imm */
++			EMIT6_IMM(0xc20b0000, dst_reg, imm);
++		}
+ 		EMIT_ZERO(dst_reg);
+ 		break;
+ 	case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
+@@ -786,17 +785,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
+ 		EMIT4(0xb9090000, dst_reg, src_reg);
+ 		break;
+ 	case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
+-		if (!imm)
+-			break;
+-		/* alfi %dst,-imm */
+-		EMIT6_IMM(0xc20b0000, dst_reg, -imm);
++		if (imm != 0) {
++			/* alfi %dst,-imm */
++			EMIT6_IMM(0xc20b0000, dst_reg, -imm);
++		}
+ 		EMIT_ZERO(dst_reg);
+ 		break;
+ 	case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
+ 		if (!imm)
+ 			break;
+-		/* agfi %dst,-imm */
+-		EMIT6_IMM(0xc2080000, dst_reg, -imm);
++		if (imm == -0x80000000) {
++			/* algfi %dst,0x80000000 */
++			EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000);
++		} else {
++			/* agfi %dst,-imm */
++			EMIT6_IMM(0xc2080000, dst_reg, -imm);
++		}
+ 		break;
+ 	/*
+ 	 * BPF_MUL
+@@ -811,10 +815,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
+ 		EMIT4(0xb90c0000, dst_reg, src_reg);
+ 		break;
+ 	case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
+-		if (imm == 1)
+-			break;
+-		/* msfi %r5,imm */
+-		EMIT6_IMM(0xc2010000, dst_reg, imm);
++		if (imm != 1) {
++			/* msfi %r5,imm */
++			EMIT6_IMM(0xc2010000, dst_reg, imm);
++		}
+ 		EMIT_ZERO(dst_reg);
+ 		break;
+ 	case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
+@@ -867,6 +871,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
+ 			if (BPF_OP(insn->code) == BPF_MOD)
+ 				/* lhgi %dst,0 */
+ 				EMIT4_IMM(0xa7090000, dst_reg, 0);
++			else
++				EMIT_ZERO(dst_reg);
+ 			break;
+ 		}
+ 		/* lhi %w0,0 */
+@@ -999,10 +1005,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
+ 		EMIT4(0xb9820000, dst_reg, src_reg);
+ 		break;
+ 	case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
+-		if (!imm)
+-			break;
+-		/* xilf %dst,imm */
+-		EMIT6_IMM(0xc0070000, dst_reg, imm);
++		if (imm != 0) {
++			/* xilf %dst,imm */
++			EMIT6_IMM(0xc0070000, dst_reg, imm);
++		}
+ 		EMIT_ZERO(dst_reg);
+ 		break;
+ 	case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
+@@ -1033,10 +1039,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
+ 		EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
+ 		break;
+ 	case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
+-		if (imm == 0)
+-			break;
+-		/* sll %dst,imm(%r0) */
+-		EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
++		if (imm != 0) {
++			/* sll %dst,imm(%r0) */
++			EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
++		}
+ 		EMIT_ZERO(dst_reg);
+ 		break;
+ 	case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
+@@ -1058,10 +1064,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
+ 		EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
+ 		break;
+ 	case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
+-		if (imm == 0)
+-			break;
+-		/* srl %dst,imm(%r0) */
+-		EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
++		if (imm != 0) {
++			/* srl %dst,imm(%r0) */
++			EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
++		}
+ 		EMIT_ZERO(dst_reg);
+ 		break;
+ 	case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
+@@ -1083,10 +1089,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
+ 		EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
+ 		break;
+ 	case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */
+-		if (imm == 0)
+-			break;
+-		/* sra %dst,imm(%r0) */
+-		EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
++		if (imm != 0) {
++			/* sra %dst,imm(%r0) */
++			EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
++		}
+ 		EMIT_ZERO(dst_reg);
+ 		break;
+ 	case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
+diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
+index ae683aa623ace..c5b35ea129cfa 100644
+--- a/arch/s390/pci/pci_mmio.c
++++ b/arch/s390/pci/pci_mmio.c
+@@ -159,7 +159,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
+ 
+ 	mmap_read_lock(current->mm);
+ 	ret = -EINVAL;
+-	vma = find_vma(current->mm, mmio_addr);
++	vma = vma_lookup(current->mm, mmio_addr);
+ 	if (!vma)
+ 		goto out_unlock_mmap;
+ 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+@@ -298,7 +298,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
+ 
+ 	mmap_read_lock(current->mm);
+ 	ret = -EINVAL;
+-	vma = find_vma(current->mm, mmio_addr);
++	vma = vma_lookup(current->mm, mmio_addr);
+ 	if (!vma)
+ 		goto out_unlock_mmap;
+ 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index c9fa7be3df82d..5c95d242f38d7 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -301,8 +301,8 @@ do {									\
+ 	unsigned int __gu_low, __gu_high;				\
+ 	const unsigned int __user *__gu_ptr;				\
+ 	__gu_ptr = (const void __user *)(ptr);				\
+-	__get_user_asm(__gu_low, ptr, "l", "=r", label);		\
+-	__get_user_asm(__gu_high, ptr+1, "l", "=r", label);		\
++	__get_user_asm(__gu_low, __gu_ptr, "l", "=r", label);		\
++	__get_user_asm(__gu_high, __gu_ptr+1, "l", "=r", label);	\
+ 	(x) = ((unsigned long long)__gu_high << 32) | __gu_low;		\
+ } while (0)
+ #else
+diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
+index 8cb7816d03b4c..193204aee8801 100644
+--- a/arch/x86/kernel/cpu/mce/core.c
++++ b/arch/x86/kernel/cpu/mce/core.c
+@@ -1253,6 +1253,9 @@ static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *fin
+ 
+ static void kill_me_now(struct callback_head *ch)
+ {
++	struct task_struct *p = container_of(ch, struct task_struct, mce_kill_me);
++
++	p->mce_count = 0;
+ 	force_sig(SIGBUS);
+ }
+ 
+@@ -1262,6 +1265,7 @@ static void kill_me_maybe(struct callback_head *cb)
+ 	int flags = MF_ACTION_REQUIRED;
+ 	int ret;
+ 
++	p->mce_count = 0;
+ 	pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
+ 
+ 	if (!p->mce_ripv)
+@@ -1290,17 +1294,34 @@ static void kill_me_maybe(struct callback_head *cb)
+ 	}
+ }
+ 
+-static void queue_task_work(struct mce *m, int kill_current_task)
++static void queue_task_work(struct mce *m, char *msg, int kill_current_task)
+ {
+-	current->mce_addr = m->addr;
+-	current->mce_kflags = m->kflags;
+-	current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
+-	current->mce_whole_page = whole_page(m);
++	int count = ++current->mce_count;
+ 
+-	if (kill_current_task)
+-		current->mce_kill_me.func = kill_me_now;
+-	else
+-		current->mce_kill_me.func = kill_me_maybe;
++	/* First call, save all the details */
++	if (count == 1) {
++		current->mce_addr = m->addr;
++		current->mce_kflags = m->kflags;
++		current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
++		current->mce_whole_page = whole_page(m);
++
++		if (kill_current_task)
++			current->mce_kill_me.func = kill_me_now;
++		else
++			current->mce_kill_me.func = kill_me_maybe;
++	}
++
++	/* Ten is likely overkill. Don't expect more than two faults before task_work() */
++	if (count > 10)
++		mce_panic("Too many consecutive machine checks while accessing user data", m, msg);
++
++	/* Second or later call, make sure page address matches the one from first call */
++	if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> PAGE_SHIFT))
++		mce_panic("Consecutive machine checks to different user pages", m, msg);
++
++	/* Do not call task_work_add() more than once */
++	if (count > 1)
++		return;
+ 
+ 	task_work_add(current, &current->mce_kill_me, TWA_RESUME);
+ }
+@@ -1438,7 +1459,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
+ 		/* If this triggers there is no way to recover. Die hard. */
+ 		BUG_ON(!on_thread_stack() || !user_mode(regs));
+ 
+-		queue_task_work(&m, kill_current_task);
++		queue_task_work(&m, msg, kill_current_task);
+ 
+ 	} else {
+ 		/*
+@@ -1456,7 +1477,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
+ 		}
+ 
+ 		if (m.kflags & MCE_IN_KERNEL_COPYIN)
+-			queue_task_work(&m, kill_current_task);
++			queue_task_work(&m, msg, kill_current_task);
+ 	}
+ out:
+ 	mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
+diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
+index ddeaba947eb3d..879886c6cc537 100644
+--- a/arch/x86/mm/init_64.c
++++ b/arch/x86/mm/init_64.c
+@@ -1433,18 +1433,18 @@ int kern_addr_valid(unsigned long addr)
+ 		return 0;
+ 
+ 	p4d = p4d_offset(pgd, addr);
+-	if (p4d_none(*p4d))
++	if (!p4d_present(*p4d))
+ 		return 0;
+ 
+ 	pud = pud_offset(p4d, addr);
+-	if (pud_none(*pud))
++	if (!pud_present(*pud))
+ 		return 0;
+ 
+ 	if (pud_large(*pud))
+ 		return pfn_valid(pud_pfn(*pud));
+ 
+ 	pmd = pmd_offset(pud, addr);
+-	if (pmd_none(*pmd))
++	if (!pmd_present(*pmd))
+ 		return 0;
+ 
+ 	if (pmd_large(*pmd))
+diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
+index 3112ca7786ed1..4ba2a3ee4bce1 100644
+--- a/arch/x86/mm/pat/memtype.c
++++ b/arch/x86/mm/pat/memtype.c
+@@ -583,7 +583,12 @@ int memtype_reserve(u64 start, u64 end, enum page_cache_mode req_type,
+ 	int err = 0;
+ 
+ 	start = sanitize_phys(start);
+-	end = sanitize_phys(end);
++
++	/*
++	 * The end address passed into this function is exclusive, but
++	 * sanitize_phys() expects an inclusive address.
++	 */
++	end = sanitize_phys(end - 1) + 1;
+ 	if (start >= end) {
+ 		WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
+ 				start, end - 1, cattr_name(req_type));
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index 03149422dce2b..475d9c71b1713 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -1215,6 +1215,11 @@ static void __init xen_dom0_set_legacy_features(void)
+ 	x86_platform.legacy.rtc = 1;
+ }
+ 
++static void __init xen_domu_set_legacy_features(void)
++{
++	x86_platform.legacy.rtc = 0;
++}
++
+ /* First C function to be called on Xen boot */
+ asmlinkage __visible void __init xen_start_kernel(void)
+ {
+@@ -1367,6 +1372,8 @@ asmlinkage __visible void __init xen_start_kernel(void)
+ 		add_preferred_console("xenboot", 0, NULL);
+ 		if (pci_xen)
+ 			x86_init.pci.arch_init = pci_xen_init;
++		x86_platform.set_legacy_features =
++				xen_domu_set_legacy_features;
+ 	} else {
+ 		const struct dom0_vga_console_info *info =
+ 			(void *)((char *)xen_start_info +
+diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
+index ade789e73ee42..167c4958cdf40 100644
+--- a/arch/x86/xen/mmu_pv.c
++++ b/arch/x86/xen/mmu_pv.c
+@@ -1518,14 +1518,17 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
+ 	if (pinned) {
+ 		struct page *page = pfn_to_page(pfn);
+ 
+-		if (static_branch_likely(&xen_struct_pages_ready))
++		pinned = false;
++		if (static_branch_likely(&xen_struct_pages_ready)) {
++			pinned = PagePinned(page);
+ 			SetPagePinned(page);
++		}
+ 
+ 		xen_mc_batch();
+ 
+ 		__set_pfn_prot(pfn, PAGE_KERNEL_RO);
+ 
+-		if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
++		if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned)
+ 			__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
+ 
+ 		xen_mc_issue(PARAVIRT_LAZY_MMU);
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index 9360c65169ff4..3a1038b6eeb30 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -2662,6 +2662,15 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
+ 	 * are likely to increase the throughput.
+ 	 */
+ 	bfqq->new_bfqq = new_bfqq;
++	/*
++	 * The above assignment schedules the following redirections:
++	 * each time some I/O for bfqq arrives, the process that
++	 * generated that I/O is disassociated from bfqq and
++	 * associated with new_bfqq. Here we increases new_bfqq->ref
++	 * in advance, adding the number of processes that are
++	 * expected to be associated with new_bfqq as they happen to
++	 * issue I/O.
++	 */
+ 	new_bfqq->ref += process_refs;
+ 	return new_bfqq;
+ }
+@@ -2724,6 +2733,10 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ {
+ 	struct bfq_queue *in_service_bfqq, *new_bfqq;
+ 
++	/* if a merge has already been setup, then proceed with that first */
++	if (bfqq->new_bfqq)
++		return bfqq->new_bfqq;
++
+ 	/*
+ 	 * Check delayed stable merge for rotational or non-queueing
+ 	 * devs. For this branch to be executed, bfqq must not be
+@@ -2825,9 +2838,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ 	if (bfq_too_late_for_merging(bfqq))
+ 		return NULL;
+ 
+-	if (bfqq->new_bfqq)
+-		return bfqq->new_bfqq;
+-
+ 	if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
+ 		return NULL;
+ 
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index 31fe9be179d99..26446f97deee4 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -1201,10 +1201,6 @@ int blkcg_init_queue(struct request_queue *q)
+ 	if (preloaded)
+ 		radix_tree_preload_end();
+ 
+-	ret = blk_iolatency_init(q);
+-	if (ret)
+-		goto err_destroy_all;
+-
+ 	ret = blk_ioprio_init(q);
+ 	if (ret)
+ 		goto err_destroy_all;
+@@ -1213,6 +1209,12 @@ int blkcg_init_queue(struct request_queue *q)
+ 	if (ret)
+ 		goto err_destroy_all;
+ 
++	ret = blk_iolatency_init(q);
++	if (ret) {
++		blk_throtl_exit(q);
++		goto err_destroy_all;
++	}
++
+ 	return 0;
+ 
+ err_destroy_all:
+diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
+index a97f33d0c59f9..94665037f4a35 100644
+--- a/drivers/base/power/trace.c
++++ b/drivers/base/power/trace.c
+@@ -13,6 +13,7 @@
+ #include <linux/export.h>
+ #include <linux/rtc.h>
+ #include <linux/suspend.h>
++#include <linux/init.h>
+ 
+ #include <linux/mc146818rtc.h>
+ 
+@@ -165,6 +166,9 @@ void generate_pm_trace(const void *tracedata, unsigned int user)
+ 	const char *file = *(const char **)(tracedata + 2);
+ 	unsigned int user_hash_value, file_hash_value;
+ 
++	if (!x86_platform.legacy.rtc)
++		return;
++
+ 	user_hash_value = user % USERHASH;
+ 	file_hash_value = hash_string(lineno, file, FILEHASH);
+ 	set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
+@@ -267,6 +271,9 @@ static struct notifier_block pm_trace_nb = {
+ 
+ static int __init early_resume_init(void)
+ {
++	if (!x86_platform.legacy.rtc)
++		return 0;
++
+ 	hash_value_early_read = read_magic_time();
+ 	register_pm_notifier(&pm_trace_nb);
+ 	return 0;
+@@ -277,6 +284,9 @@ static int __init late_resume_init(void)
+ 	unsigned int val = hash_value_early_read;
+ 	unsigned int user, file, dev;
+ 
++	if (!x86_platform.legacy.rtc)
++		return 0;
++
+ 	user = val % USERHASH;
+ 	val = val / USERHASH;
+ 	file = val % FILEHASH;
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index f0cdff0c5fbf4..1f91bd41a29b2 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -2113,18 +2113,6 @@ int loop_register_transfer(struct loop_func_table *funcs)
+ 	return 0;
+ }
+ 
+-static int unregister_transfer_cb(int id, void *ptr, void *data)
+-{
+-	struct loop_device *lo = ptr;
+-	struct loop_func_table *xfer = data;
+-
+-	mutex_lock(&lo->lo_mutex);
+-	if (lo->lo_encryption == xfer)
+-		loop_release_xfer(lo);
+-	mutex_unlock(&lo->lo_mutex);
+-	return 0;
+-}
+-
+ int loop_unregister_transfer(int number)
+ {
+ 	unsigned int n = number;
+@@ -2132,9 +2120,20 @@ int loop_unregister_transfer(int number)
+ 
+ 	if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
+ 		return -EINVAL;
++	/*
++	 * This function is called from only cleanup_cryptoloop().
++	 * Given that each loop device that has a transfer enabled holds a
++	 * reference to the module implementing it we should never get here
++	 * with a transfer that is set (unless forced module unloading is
++	 * requested). Thus, check module's refcount and warn if this is
++	 * not a clean unloading.
++	 */
++#ifdef CONFIG_MODULE_UNLOAD
++	if (xfer->owner && module_refcount(xfer->owner) != -1)
++		pr_err("Danger! Unregistering an in use transfer function.\n");
++#endif
+ 
+ 	xfer_funcs[n] = NULL;
+-	idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer);
+ 	return 0;
+ }
+ 
+@@ -2325,8 +2324,9 @@ static int loop_add(int i)
+ 	} else {
+ 		err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL);
+ 	}
++	mutex_unlock(&loop_ctl_mutex);
+ 	if (err < 0)
+-		goto out_unlock;
++		goto out_free_dev;
+ 	i = err;
+ 
+ 	err = -ENOMEM;
+@@ -2392,15 +2392,19 @@ static int loop_add(int i)
+ 	disk->private_data	= lo;
+ 	disk->queue		= lo->lo_queue;
+ 	sprintf(disk->disk_name, "loop%d", i);
++	/* Make this loop device reachable from pathname. */
+ 	add_disk(disk);
++	/* Show this loop device. */
++	mutex_lock(&loop_ctl_mutex);
++	lo->idr_visible = true;
+ 	mutex_unlock(&loop_ctl_mutex);
+ 	return i;
+ 
+ out_cleanup_tags:
+ 	blk_mq_free_tag_set(&lo->tag_set);
+ out_free_idr:
++	mutex_lock(&loop_ctl_mutex);
+ 	idr_remove(&loop_index_idr, i);
+-out_unlock:
+ 	mutex_unlock(&loop_ctl_mutex);
+ out_free_dev:
+ 	kfree(lo);
+@@ -2410,9 +2414,14 @@ out:
+ 
+ static void loop_remove(struct loop_device *lo)
+ {
++	/* Make this loop device unreachable from pathname. */
+ 	del_gendisk(lo->lo_disk);
+ 	blk_cleanup_disk(lo->lo_disk);
+ 	blk_mq_free_tag_set(&lo->tag_set);
++	mutex_lock(&loop_ctl_mutex);
++	idr_remove(&loop_index_idr, lo->lo_number);
++	mutex_unlock(&loop_ctl_mutex);
++	/* There is no route which can find this loop device. */
+ 	mutex_destroy(&lo->lo_mutex);
+ 	kfree(lo);
+ }
+@@ -2436,31 +2445,40 @@ static int loop_control_remove(int idx)
+ 		return -EINVAL;
+ 	}
+ 		
++	/* Hide this loop device for serialization. */
+ 	ret = mutex_lock_killable(&loop_ctl_mutex);
+ 	if (ret)
+ 		return ret;
+-
+ 	lo = idr_find(&loop_index_idr, idx);
+-	if (!lo) {
++	if (!lo || !lo->idr_visible)
+ 		ret = -ENODEV;
+-		goto out_unlock_ctrl;
+-	}
++	else
++		lo->idr_visible = false;
++	mutex_unlock(&loop_ctl_mutex);
++	if (ret)
++		return ret;
+ 
++	/* Check whether this loop device can be removed. */
+ 	ret = mutex_lock_killable(&lo->lo_mutex);
+ 	if (ret)
+-		goto out_unlock_ctrl;
++		goto mark_visible;
+ 	if (lo->lo_state != Lo_unbound ||
+ 	    atomic_read(&lo->lo_refcnt) > 0) {
+ 		mutex_unlock(&lo->lo_mutex);
+ 		ret = -EBUSY;
+-		goto out_unlock_ctrl;
++		goto mark_visible;
+ 	}
++	/* Mark this loop device no longer open()-able. */
+ 	lo->lo_state = Lo_deleting;
+ 	mutex_unlock(&lo->lo_mutex);
+ 
+-	idr_remove(&loop_index_idr, lo->lo_number);
+ 	loop_remove(lo);
+-out_unlock_ctrl:
++	return 0;
++
++mark_visible:
++	/* Show this loop device again. */
++	mutex_lock(&loop_ctl_mutex);
++	lo->idr_visible = true;
+ 	mutex_unlock(&loop_ctl_mutex);
+ 	return ret;
+ }
+@@ -2474,7 +2492,8 @@ static int loop_control_get_free(int idx)
+ 	if (ret)
+ 		return ret;
+ 	idr_for_each_entry(&loop_index_idr, lo, id) {
+-		if (lo->lo_state == Lo_unbound)
++		/* Hitting a race results in creating a new loop device which is harmless. */
++		if (lo->idr_visible && data_race(lo->lo_state) == Lo_unbound)
+ 			goto found;
+ 	}
+ 	mutex_unlock(&loop_ctl_mutex);
+@@ -2590,10 +2609,14 @@ static void __exit loop_exit(void)
+ 	unregister_blkdev(LOOP_MAJOR, "loop");
+ 	misc_deregister(&loop_misc);
+ 
+-	mutex_lock(&loop_ctl_mutex);
++	/*
++	 * There is no need to use loop_ctl_mutex here, for nobody else can
++	 * access loop_index_idr when this module is unloading (unless forced
++	 * module unloading is requested). If this is not a clean unloading,
++	 * we have no means to avoid kernel crash.
++	 */
+ 	idr_for_each_entry(&loop_index_idr, lo, id)
+ 		loop_remove(lo);
+-	mutex_unlock(&loop_ctl_mutex);
+ 
+ 	idr_destroy(&loop_index_idr);
+ }
+diff --git a/drivers/block/loop.h b/drivers/block/loop.h
+index 1988899db63ac..04c88dd6eabd6 100644
+--- a/drivers/block/loop.h
++++ b/drivers/block/loop.h
+@@ -68,6 +68,7 @@ struct loop_device {
+ 	struct blk_mq_tag_set	tag_set;
+ 	struct gendisk		*lo_disk;
+ 	struct mutex		lo_mutex;
++	bool			idr_visible;
+ };
+ 
+ struct loop_cmd {
+diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
+index 50b321a1ab1b6..d574e8cb6d7cd 100644
+--- a/drivers/gpio/gpio-mpc8xxx.c
++++ b/drivers/gpio/gpio-mpc8xxx.c
+@@ -332,7 +332,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
+ 				 mpc8xxx_gc->regs + GPIO_DIR, NULL,
+ 				 BGPIOF_BIG_ENDIAN);
+ 		if (ret)
+-			goto err;
++			return ret;
+ 		dev_dbg(&pdev->dev, "GPIO registers are LITTLE endian\n");
+ 	} else {
+ 		ret = bgpio_init(gc, &pdev->dev, 4,
+@@ -342,7 +342,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
+ 				 BGPIOF_BIG_ENDIAN
+ 				 | BGPIOF_BIG_ENDIAN_BYTE_ORDER);
+ 		if (ret)
+-			goto err;
++			return ret;
+ 		dev_dbg(&pdev->dev, "GPIO registers are BIG endian\n");
+ 	}
+ 
+@@ -380,11 +380,11 @@ static int mpc8xxx_probe(struct platform_device *pdev)
+ 	    is_acpi_node(fwnode))
+ 		gc->write_reg(mpc8xxx_gc->regs + GPIO_IBE, 0xffffffff);
+ 
+-	ret = gpiochip_add_data(gc, mpc8xxx_gc);
++	ret = devm_gpiochip_add_data(&pdev->dev, gc, mpc8xxx_gc);
+ 	if (ret) {
+ 		dev_err(&pdev->dev,
+ 			"GPIO chip registration failed with status %d\n", ret);
+-		goto err;
++		return ret;
+ 	}
+ 
+ 	mpc8xxx_gc->irqn = platform_get_irq(pdev, 0);
+@@ -416,7 +416,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
+ 
+ 	return 0;
+ err:
+-	iounmap(mpc8xxx_gc->regs);
++	irq_domain_remove(mpc8xxx_gc->irq);
+ 	return ret;
+ }
+ 
+@@ -429,9 +429,6 @@ static int mpc8xxx_remove(struct platform_device *pdev)
+ 		irq_domain_remove(mpc8xxx_gc->irq);
+ 	}
+ 
+-	gpiochip_remove(&mpc8xxx_gc->gc);
+-	iounmap(mpc8xxx_gc->regs);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 8ac6eb9f1fdb8..177a663a6a691 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -757,7 +757,7 @@ enum amd_hw_ip_block_type {
+ 	MAX_HWIP
+ };
+ 
+-#define HWIP_MAX_INSTANCE	8
++#define HWIP_MAX_INSTANCE	10
+ 
+ struct amd_powerplay {
+ 	void *pp_handle;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+index f9c01bdc3d4c7..ec472c244835c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+@@ -191,6 +191,16 @@ void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
+ 		kgd2kfd_suspend(adev->kfd.dev, run_pm);
+ }
+ 
++int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev)
++{
++	int r = 0;
++
++	if (adev->kfd.dev)
++		r = kgd2kfd_resume_iommu(adev->kfd.dev);
++
++	return r;
++}
++
+ int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
+ {
+ 	int r = 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+index cf62f43a03da1..293dd0d595c7a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+@@ -137,6 +137,7 @@ int amdgpu_amdkfd_init(void);
+ void amdgpu_amdkfd_fini(void);
+ 
+ void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
++int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev);
+ int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
+ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
+ 			const void *ih_ring_entry);
+@@ -325,6 +326,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ 			 const struct kgd2kfd_shared_resources *gpu_resources);
+ void kgd2kfd_device_exit(struct kfd_dev *kfd);
+ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
++int kgd2kfd_resume_iommu(struct kfd_dev *kfd);
+ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
+ int kgd2kfd_pre_reset(struct kfd_dev *kfd);
+ int kgd2kfd_post_reset(struct kfd_dev *kfd);
+@@ -363,6 +365,11 @@ static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
+ {
+ }
+ 
++static int __maybe_unused kgd2kfd_resume_iommu(struct kfd_dev *kfd)
++{
++	return 0;
++}
++
+ static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
+ {
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+index 536005bff24ad..83db7d8fa1508 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+@@ -1544,20 +1544,18 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
+ 	struct dentry *ent;
+ 	int r, i;
+ 
+-
+-
+ 	ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev,
+ 				  &fops_ib_preempt);
+-	if (!ent) {
++	if (IS_ERR(ent)) {
+ 		DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
+-		return -EIO;
++		return PTR_ERR(ent);
+ 	}
+ 
+ 	ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev,
+ 				  &fops_sclk_set);
+-	if (!ent) {
++	if (IS_ERR(ent)) {
+ 		DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
+-		return -EIO;
++		return PTR_ERR(ent);
+ 	}
+ 
+ 	/* Register debugfs entries for amdgpu_ttm */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index f944ed858f3e7..7b42636fc7dc6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2342,6 +2342,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
+ 	if (r)
+ 		goto init_failed;
+ 
++	r = amdgpu_amdkfd_resume_iommu(adev);
++	if (r)
++		goto init_failed;
++
+ 	r = amdgpu_device_ip_hw_init_phase1(adev);
+ 	if (r)
+ 		goto init_failed;
+@@ -3096,6 +3100,10 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
+ {
+ 	int r;
+ 
++	r = amdgpu_amdkfd_resume_iommu(adev);
++	if (r)
++		return r;
++
+ 	r = amdgpu_device_ip_resume_phase1(adev);
+ 	if (r)
+ 		return r;
+@@ -4534,6 +4542,10 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
+ 				dev_warn(tmp_adev->dev, "asic atom init failed!");
+ 			} else {
+ 				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
++				r = amdgpu_amdkfd_resume_iommu(tmp_adev);
++				if (r)
++					goto out;
++
+ 				r = amdgpu_device_ip_resume_phase1(tmp_adev);
+ 				if (r)
+ 					goto out;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+index 7b634a1517f9c..0554576d36955 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+@@ -428,8 +428,8 @@ int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
+ 	ent = debugfs_create_file(name,
+ 				  S_IFREG | S_IRUGO, root,
+ 				  ring, &amdgpu_debugfs_ring_fops);
+-	if (!ent)
+-		return -ENOMEM;
++	if (IS_ERR(ent))
++		return PTR_ERR(ent);
+ 
+ 	i_size_write(ent->d_inode, ring->ring_size + 12);
+ 	ring->ent = ent;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 3a55f08e00e1d..2335b596d892f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -513,6 +513,15 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
+ 		goto out;
+ 	}
+ 
++	if (bo->type == ttm_bo_type_device &&
++	    new_mem->mem_type == TTM_PL_VRAM &&
++	    old_mem->mem_type != TTM_PL_VRAM) {
++		/* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
++		 * accesses the BO after it's moved.
++		 */
++		abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
++	}
++
+ 	if (adev->mman.buffer_funcs_enabled) {
+ 		if (((old_mem->mem_type == TTM_PL_SYSTEM &&
+ 		      new_mem->mem_type == TTM_PL_VRAM) ||
+@@ -543,15 +552,6 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
+ 			return r;
+ 	}
+ 
+-	if (bo->type == ttm_bo_type_device &&
+-	    new_mem->mem_type == TTM_PL_VRAM &&
+-	    old_mem->mem_type != TTM_PL_VRAM) {
+-		/* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
+-		 * accesses the BO after it's moved.
+-		 */
+-		abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+-	}
+-
+ out:
+ 	/* update statistics */
+ 	atomic64_add(bo->base.size, &adev->num_bytes_moved);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index 6b57dfd2cd2ac..9e52948d49920 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -1008,17 +1008,21 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
+ 	return ret;
+ }
+ 
+-static int kfd_resume(struct kfd_dev *kfd)
++int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
+ {
+ 	int err = 0;
+ 
+ 	err = kfd_iommu_resume(kfd);
+-	if (err) {
++	if (err)
+ 		dev_err(kfd_device,
+ 			"Failed to resume IOMMU for device %x:%x\n",
+ 			kfd->pdev->vendor, kfd->pdev->device);
+-		return err;
+-	}
++	return err;
++}
++
++static int kfd_resume(struct kfd_dev *kfd)
++{
++	int err = 0;
+ 
+ 	err = kfd->dqm->ops.start(kfd->dqm);
+ 	if (err) {
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 3f913e4abd49e..6a4c6c47dcfaf 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -998,6 +998,8 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
+ 	uint32_t agp_base, agp_bot, agp_top;
+ 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
+ 
++	memset(pa_config, 0, sizeof(*pa_config));
++
+ 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
+ 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+ 
+@@ -6778,14 +6780,15 @@ const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
+ 
+ #if defined(CONFIG_DRM_AMD_DC_DCN)
+ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
+-					    struct dc_state *dc_state)
++					    struct dc_state *dc_state,
++					    struct dsc_mst_fairness_vars *vars)
+ {
+ 	struct dc_stream_state *stream = NULL;
+ 	struct drm_connector *connector;
+ 	struct drm_connector_state *new_con_state;
+ 	struct amdgpu_dm_connector *aconnector;
+ 	struct dm_connector_state *dm_conn_state;
+-	int i, j, clock, bpp;
++	int i, j, clock;
+ 	int vcpi, pbn_div, pbn = 0;
+ 
+ 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
+@@ -6824,9 +6827,15 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
+ 		}
+ 
+ 		pbn_div = dm_mst_get_pbn_divider(stream->link);
+-		bpp = stream->timing.dsc_cfg.bits_per_pixel;
+ 		clock = stream->timing.pix_clk_100hz / 10;
+-		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
++		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
++		for (j = 0; j < dc_state->stream_count; j++) {
++			if (vars[j].aconnector == aconnector) {
++				pbn = vars[j].pbn;
++				break;
++			}
++		}
++
+ 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
+ 						    aconnector->port,
+ 						    pbn, pbn_div,
+@@ -10208,6 +10217,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ 	int ret, i;
+ 	bool lock_and_validation_needed = false;
+ 	struct dm_crtc_state *dm_old_crtc_state;
++#if defined(CONFIG_DRM_AMD_DC_DCN)
++	struct dsc_mst_fairness_vars vars[MAX_PIPES];
++#endif
+ 
+ 	trace_amdgpu_dm_atomic_check_begin(state);
+ 
+@@ -10438,10 +10450,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ 			goto fail;
+ 
+ #if defined(CONFIG_DRM_AMD_DC_DCN)
+-		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
++		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
+ 			goto fail;
+ 
+-		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
++		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
+ 		if (ret)
+ 			goto fail;
+ #endif
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index 5568d4e518e6b..a2e5ab0bd1a03 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -495,12 +495,7 @@ struct dsc_mst_fairness_params {
+ 	uint32_t num_slices_h;
+ 	uint32_t num_slices_v;
+ 	uint32_t bpp_overwrite;
+-};
+-
+-struct dsc_mst_fairness_vars {
+-	int pbn;
+-	bool dsc_enabled;
+-	int bpp_x16;
++	struct amdgpu_dm_connector *aconnector;
+ };
+ 
+ static int kbps_to_peak_pbn(int kbps)
+@@ -727,12 +722,12 @@ static void try_disable_dsc(struct drm_atomic_state *state,
+ 
+ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+ 					     struct dc_state *dc_state,
+-					     struct dc_link *dc_link)
++					     struct dc_link *dc_link,
++					     struct dsc_mst_fairness_vars *vars)
+ {
+ 	int i;
+ 	struct dc_stream_state *stream;
+ 	struct dsc_mst_fairness_params params[MAX_PIPES];
+-	struct dsc_mst_fairness_vars vars[MAX_PIPES];
+ 	struct amdgpu_dm_connector *aconnector;
+ 	int count = 0;
+ 	bool debugfs_overwrite = false;
+@@ -753,6 +748,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+ 		params[count].timing = &stream->timing;
+ 		params[count].sink = stream->sink;
+ 		aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
++		params[count].aconnector = aconnector;
+ 		params[count].port = aconnector->port;
+ 		params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable;
+ 		if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE)
+@@ -775,6 +771,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+ 	}
+ 	/* Try no compression */
+ 	for (i = 0; i < count; i++) {
++		vars[i].aconnector = params[i].aconnector;
+ 		vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
+ 		vars[i].dsc_enabled = false;
+ 		vars[i].bpp_x16 = 0;
+@@ -828,7 +825,8 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+ }
+ 
+ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
+-				       struct dc_state *dc_state)
++				       struct dc_state *dc_state,
++				       struct dsc_mst_fairness_vars *vars)
+ {
+ 	int i, j;
+ 	struct dc_stream_state *stream;
+@@ -859,7 +857,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
+ 			return false;
+ 
+ 		mutex_lock(&aconnector->mst_mgr.lock);
+-		if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
++		if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars)) {
+ 			mutex_unlock(&aconnector->mst_mgr.lock);
+ 			return false;
+ 		}
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+index b38bd68121ceb..900d3f7a84989 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+@@ -39,8 +39,17 @@ void
+ dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
+ 
+ #if defined(CONFIG_DRM_AMD_DC_DCN)
++
++struct dsc_mst_fairness_vars {
++	int pbn;
++	bool dsc_enabled;
++	int bpp_x16;
++	struct amdgpu_dm_connector *aconnector;
++};
++
+ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
+-				       struct dc_state *dc_state);
++				       struct dc_state *dc_state,
++				       struct dsc_mst_fairness_vars *vars);
+ #endif
+ 
+ #endif
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 6132b645bfd19..29c861b54b440 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -2578,13 +2578,21 @@ static struct abm *get_abm_from_stream_res(const struct dc_link *link)
+ 
+ int dc_link_get_backlight_level(const struct dc_link *link)
+ {
+-
+ 	struct abm *abm = get_abm_from_stream_res(link);
++	struct panel_cntl *panel_cntl = link->panel_cntl;
++	struct dc  *dc = link->ctx->dc;
++	struct dmcu *dmcu = dc->res_pool->dmcu;
++	bool fw_set_brightness = true;
+ 
+-	if (abm == NULL || abm->funcs->get_current_backlight == NULL)
+-		return DC_ERROR_UNEXPECTED;
++	if (dmcu)
++		fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
+ 
+-	return (int) abm->funcs->get_current_backlight(abm);
++	if (!fw_set_brightness && panel_cntl->funcs->get_current_backlight)
++		return panel_cntl->funcs->get_current_backlight(panel_cntl);
++	else if (abm != NULL && abm->funcs->get_current_backlight != NULL)
++		return (int) abm->funcs->get_current_backlight(abm);
++	else
++		return DC_ERROR_UNEXPECTED;
+ }
+ 
+ int dc_link_get_target_backlight_pwm(const struct dc_link *link)
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
+index e923392358631..e8570060d007b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
+@@ -49,7 +49,6 @@
+ static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl)
+ {
+ 	uint64_t current_backlight;
+-	uint32_t round_result;
+ 	uint32_t bl_period, bl_int_count;
+ 	uint32_t bl_pwm, fractional_duty_cycle_en;
+ 	uint32_t bl_period_mask, bl_pwm_mask;
+@@ -84,15 +83,6 @@ static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_c
+ 	current_backlight = div_u64(current_backlight, bl_period);
+ 	current_backlight = (current_backlight + 1) >> 1;
+ 
+-	current_backlight = (uint64_t)(current_backlight) * bl_period;
+-
+-	round_result = (uint32_t)(current_backlight & 0xFFFFFFFF);
+-
+-	round_result = (round_result >> (bl_int_count-1)) & 1;
+-
+-	current_backlight >>= bl_int_count;
+-	current_backlight += round_result;
+-
+ 	return (uint32_t)(current_backlight);
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index ebe6721428085..42e72a16a1128 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -1381,7 +1381,7 @@ static int smu_disable_dpms(struct smu_context *smu)
+ 	 */
+ 	if (smu->uploading_custom_pp_table &&
+ 	    (adev->asic_type >= CHIP_NAVI10) &&
+-	    (adev->asic_type <= CHIP_DIMGREY_CAVEFISH))
++	    (adev->asic_type <= CHIP_BEIGE_GOBY))
+ 		return smu_disable_all_features_with_exception(smu,
+ 							       true,
+ 							       SMU_FEATURE_COUNT);
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+index 1ba42b69ce742..23ada41351ad0 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+@@ -2269,7 +2269,27 @@ static int navi10_baco_enter(struct smu_context *smu)
+ {
+ 	struct amdgpu_device *adev = smu->adev;
+ 
+-	if (adev->in_runpm)
++	/*
++	 * This aims the case below:
++	 *   amdgpu driver loaded -> runpm suspend kicked -> sound driver loaded
++	 *
++	 * For NAVI10 and later ASICs, we rely on PMFW to handle the runpm. To
++	 * make that possible, PMFW needs to acknowledge the dstate transition
++	 * process for both gfx(function 0) and audio(function 1) function of
++	 * the ASIC.
++	 *
++	 * The PCI device's initial runpm status is RUNPM_SUSPENDED. So as the
++	 * device representing the audio function of the ASIC. And that means
++	 * even if the sound driver(snd_hda_intel) was not loaded yet, it's still
++	 * possible runpm suspend kicked on the ASIC. However without the dstate
++	 * transition notification from audio function, pmfw cannot handle the
++	 * BACO in/exit correctly. And that will cause driver hang on runpm
++	 * resuming.
++	 *
++	 * To address this, we revert to legacy message way(driver masters the
++	 * timing for BACO in/exit) on sound driver missing.
++	 */
++	if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
+ 		return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
+ 	else
+ 		return smu_v11_0_baco_enter(smu);
+@@ -2279,7 +2299,7 @@ static int navi10_baco_exit(struct smu_context *smu)
+ {
+ 	struct amdgpu_device *adev = smu->adev;
+ 
+-	if (adev->in_runpm) {
++	if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
+ 		/* Wait for PMFW handling for the Dstate change */
+ 		msleep(10);
+ 		return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+index d92dd2c7448e3..9b170bd12c1b6 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+@@ -2133,7 +2133,7 @@ static int sienna_cichlid_baco_enter(struct smu_context *smu)
+ {
+ 	struct amdgpu_device *adev = smu->adev;
+ 
+-	if (adev->in_runpm)
++	if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
+ 		return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
+ 	else
+ 		return smu_v11_0_baco_enter(smu);
+@@ -2143,7 +2143,7 @@ static int sienna_cichlid_baco_exit(struct smu_context *smu)
+ {
+ 	struct amdgpu_device *adev = smu->adev;
+ 
+-	if (adev->in_runpm) {
++	if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
+ 		/* Wait for PMFW handling for the Dstate change */
+ 		msleep(10);
+ 		return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+index 415be74df28c7..54881cce1b06c 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+@@ -1053,3 +1053,24 @@ int smu_cmn_set_mp1_state(struct smu_context *smu,
+ 
+ 	return ret;
+ }
++
++bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
++{
++	struct pci_dev *p = NULL;
++	bool snd_driver_loaded;
++
++	/*
++	 * If the ASIC comes with no audio function, we always assume
++	 * it is "enabled".
++	 */
++	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
++			adev->pdev->bus->number, 1);
++	if (!p)
++		return true;
++
++	snd_driver_loaded = pci_is_enabled(p) ? true : false;
++
++	pci_dev_put(p);
++
++	return snd_driver_loaded;
++}
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+index 16993daa2ae04..b1d41360a3897 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+@@ -110,5 +110,7 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev);
+ int smu_cmn_set_mp1_state(struct smu_context *smu,
+ 			  enum pp_mp1_state mp1_state);
+ 
++bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev);
++
+ #endif
+ #endif
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+index 76d38561c9103..cf741c5c82d25 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+@@ -397,8 +397,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
+ 		if (switch_mmu_context) {
+ 			struct etnaviv_iommu_context *old_context = gpu->mmu_context;
+ 
+-			etnaviv_iommu_context_get(mmu_context);
+-			gpu->mmu_context = mmu_context;
++			gpu->mmu_context = etnaviv_iommu_context_get(mmu_context);
+ 			etnaviv_iommu_context_put(old_context);
+ 		}
+ 
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+index b8fa6ed3dd738..fb7a33b88fc0b 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+@@ -303,8 +303,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
+ 		list_del(&mapping->obj_node);
+ 	}
+ 
+-	etnaviv_iommu_context_get(mmu_context);
+-	mapping->context = mmu_context;
++	mapping->context = etnaviv_iommu_context_get(mmu_context);
+ 	mapping->use = 1;
+ 
+ 	ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+index 4dd7d9d541c09..486259e154aff 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+@@ -532,8 +532,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
+ 		goto err_submit_objects;
+ 
+ 	submit->ctx = file->driver_priv;
+-	etnaviv_iommu_context_get(submit->ctx->mmu);
+-	submit->mmu_context = submit->ctx->mmu;
++	submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
+ 	submit->exec_state = args->exec_state;
+ 	submit->flags = args->flags;
+ 
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+index 4102bcea33413..1fa98ce870f78 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+@@ -569,6 +569,12 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
+ 	/* We rely on the GPU running, so program the clock */
+ 	etnaviv_gpu_update_clock(gpu);
+ 
++	gpu->fe_running = false;
++	gpu->exec_state = -1;
++	if (gpu->mmu_context)
++		etnaviv_iommu_context_put(gpu->mmu_context);
++	gpu->mmu_context = NULL;
++
+ 	return 0;
+ }
+ 
+@@ -631,19 +637,23 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
+ 			  VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
+ 			  VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
+ 	}
++
++	gpu->fe_running = true;
+ }
+ 
+-static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu)
++static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
++					  struct etnaviv_iommu_context *context)
+ {
+-	u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer,
+-				&gpu->mmu_context->cmdbuf_mapping);
+ 	u16 prefetch;
++	u32 address;
+ 
+ 	/* setup the MMU */
+-	etnaviv_iommu_restore(gpu, gpu->mmu_context);
++	etnaviv_iommu_restore(gpu, context);
+ 
+ 	/* Start command processor */
+ 	prefetch = etnaviv_buffer_init(gpu);
++	address = etnaviv_cmdbuf_get_va(&gpu->buffer,
++					&gpu->mmu_context->cmdbuf_mapping);
+ 
+ 	etnaviv_gpu_start_fe(gpu, address, prefetch);
+ }
+@@ -826,7 +836,6 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
+ 	/* Now program the hardware */
+ 	mutex_lock(&gpu->lock);
+ 	etnaviv_gpu_hw_init(gpu);
+-	gpu->exec_state = -1;
+ 	mutex_unlock(&gpu->lock);
+ 
+ 	pm_runtime_mark_last_busy(gpu->dev);
+@@ -1051,8 +1060,6 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
+ 	spin_unlock(&gpu->event_spinlock);
+ 
+ 	etnaviv_gpu_hw_init(gpu);
+-	gpu->exec_state = -1;
+-	gpu->mmu_context = NULL;
+ 
+ 	mutex_unlock(&gpu->lock);
+ 	pm_runtime_mark_last_busy(gpu->dev);
+@@ -1364,14 +1371,12 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
+ 		goto out_unlock;
+ 	}
+ 
+-	if (!gpu->mmu_context) {
+-		etnaviv_iommu_context_get(submit->mmu_context);
+-		gpu->mmu_context = submit->mmu_context;
+-		etnaviv_gpu_start_fe_idleloop(gpu);
+-	} else {
+-		etnaviv_iommu_context_get(gpu->mmu_context);
+-		submit->prev_mmu_context = gpu->mmu_context;
+-	}
++	if (!gpu->fe_running)
++		etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
++
++	if (submit->prev_mmu_context)
++		etnaviv_iommu_context_put(submit->prev_mmu_context);
++	submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context);
+ 
+ 	if (submit->nr_pmrs) {
+ 		gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
+@@ -1573,7 +1578,7 @@ int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
+ 
+ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
+ {
+-	if (gpu->initialized && gpu->mmu_context) {
++	if (gpu->initialized && gpu->fe_running) {
+ 		/* Replace the last WAIT with END */
+ 		mutex_lock(&gpu->lock);
+ 		etnaviv_buffer_end(gpu);
+@@ -1586,8 +1591,7 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
+ 		 */
+ 		etnaviv_gpu_wait_idle(gpu, 100);
+ 
+-		etnaviv_iommu_context_put(gpu->mmu_context);
+-		gpu->mmu_context = NULL;
++		gpu->fe_running = false;
+ 	}
+ 
+ 	gpu->exec_state = -1;
+@@ -1735,6 +1739,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
+ 	etnaviv_gpu_hw_suspend(gpu);
+ #endif
+ 
++	if (gpu->mmu_context)
++		etnaviv_iommu_context_put(gpu->mmu_context);
++
+ 	if (gpu->initialized) {
+ 		etnaviv_cmdbuf_free(&gpu->buffer);
+ 		etnaviv_iommu_global_fini(gpu);
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+index 8ea48697d1321..1c75c8ed5bcea 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+@@ -101,6 +101,7 @@ struct etnaviv_gpu {
+ 	struct workqueue_struct *wq;
+ 	struct drm_gpu_scheduler sched;
+ 	bool initialized;
++	bool fe_running;
+ 
+ 	/* 'ring'-buffer: */
+ 	struct etnaviv_cmdbuf buffer;
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+index 1a7c89a67bea3..afe5dd6a9925b 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+@@ -92,6 +92,10 @@ static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
+ 	struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
+ 	u32 pgtable;
+ 
++	if (gpu->mmu_context)
++		etnaviv_iommu_context_put(gpu->mmu_context);
++	gpu->mmu_context = etnaviv_iommu_context_get(context);
++
+ 	/* set base addresses */
+ 	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base);
+ 	gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+index f8bf488e9d717..d664ae29ae209 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+@@ -172,6 +172,10 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
+ 	if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
+ 		return;
+ 
++	if (gpu->mmu_context)
++		etnaviv_iommu_context_put(gpu->mmu_context);
++	gpu->mmu_context = etnaviv_iommu_context_get(context);
++
+ 	prefetch = etnaviv_buffer_config_mmuv2(gpu,
+ 				(u32)v2_context->mtlb_dma,
+ 				(u32)context->global->bad_page_dma);
+@@ -192,6 +196,10 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu,
+ 	if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
+ 		return;
+ 
++	if (gpu->mmu_context)
++		etnaviv_iommu_context_put(gpu->mmu_context);
++	gpu->mmu_context = etnaviv_iommu_context_get(context);
++
+ 	gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
+ 		  lower_32_bits(context->global->v2.pta_dma));
+ 	gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+index dab1b58006d83..9fb1a2aadbcb0 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+@@ -199,6 +199,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
+ 		 */
+ 		list_for_each_entry_safe(m, n, &list, scan_node) {
+ 			etnaviv_iommu_remove_mapping(context, m);
++			etnaviv_iommu_context_put(m->context);
+ 			m->context = NULL;
+ 			list_del_init(&m->mmu_node);
+ 			list_del_init(&m->scan_node);
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
+index d1d6902fd13be..e4a0b7d09c2ea 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
++++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
+@@ -105,9 +105,11 @@ void etnaviv_iommu_dump(struct etnaviv_iommu_context *ctx, void *buf);
+ struct etnaviv_iommu_context *
+ etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
+ 			   struct etnaviv_cmdbuf_suballoc *suballoc);
+-static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
++static inline struct etnaviv_iommu_context *
++etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
+ {
+ 	kref_get(&ctx->refcount);
++	return ctx;
+ }
+ void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx);
+ void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index 862c1df69cc2a..d511e578ba79d 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -2453,11 +2453,14 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
+ 	 */
+ 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
+ 			     intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
+-			     sizeof(intel_dp->edp_dpcd))
++			     sizeof(intel_dp->edp_dpcd)) {
+ 		drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
+ 			    (int)sizeof(intel_dp->edp_dpcd),
+ 			    intel_dp->edp_dpcd);
+ 
++		intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14;
++	}
++
+ 	/*
+ 	 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
+ 	 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
+diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+index 053a3c2f72677..508a514c5e37d 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
++++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+@@ -848,7 +848,7 @@ intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
+ 	}
+ 
+ 	if (ret)
+-		intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
++		ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
+ 
+ 	if (intel_dp->set_idle_link_train)
+ 		intel_dp->set_idle_link_train(intel_dp, crtc_state);
+diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
+index 0473583dcdac2..482fb0ae6cb5d 100644
+--- a/drivers/gpu/drm/radeon/radeon_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_kms.c
+@@ -119,7 +119,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
+ #endif
+ 
+ 	if (pci_find_capability(pdev, PCI_CAP_ID_AGP))
+-		rdev->agp = radeon_agp_head_init(rdev->ddev);
++		rdev->agp = radeon_agp_head_init(dev);
+ 	if (rdev->agp) {
+ 		rdev->agp->agp_mtrr = arch_phys_wc_add(
+ 			rdev->agp->agp_info.aper_base,
+diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
+index 8ab3247dbc4aa..13c6b857158fc 100644
+--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
++++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
+@@ -1123,7 +1123,7 @@ static int cdn_dp_suspend(struct device *dev)
+ 	return ret;
+ }
+ 
+-static int cdn_dp_resume(struct device *dev)
++static __maybe_unused int cdn_dp_resume(struct device *dev)
+ {
+ 	struct cdn_dp_device *dp = dev_get_drvdata(dev);
+ 
+diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
+index 2aee356840a2b..314015d9e912d 100644
+--- a/drivers/hv/ring_buffer.c
++++ b/drivers/hv/ring_buffer.c
+@@ -245,6 +245,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
+ 	mutex_unlock(&ring_info->ring_buffer_mutex);
+ 
+ 	kfree(ring_info->pkt_buffer);
++	ring_info->pkt_buffer = NULL;
+ 	ring_info->pkt_buffer_size = 0;
+ }
+ 
+diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
+index 30489670ea528..cca0aac261486 100644
+--- a/drivers/mfd/ab8500-core.c
++++ b/drivers/mfd/ab8500-core.c
+@@ -485,7 +485,7 @@ static int ab8500_handle_hierarchical_line(struct ab8500 *ab8500,
+ 		if (line == AB8540_INT_GPIO43F || line == AB8540_INT_GPIO44F)
+ 			line += 1;
+ 
+-		handle_nested_irq(irq_create_mapping(ab8500->domain, line));
++		handle_nested_irq(irq_find_mapping(ab8500->domain, line));
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
+index 4145a38b38904..d0ac019850d17 100644
+--- a/drivers/mfd/axp20x.c
++++ b/drivers/mfd/axp20x.c
+@@ -125,12 +125,13 @@ static const struct regmap_range axp288_writeable_ranges[] = {
+ 
+ static const struct regmap_range axp288_volatile_ranges[] = {
+ 	regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP288_POWER_REASON),
++	regmap_reg_range(AXP22X_PWR_OUT_CTRL1, AXP22X_ALDO3_V_OUT),
+ 	regmap_reg_range(AXP288_BC_GLOBAL, AXP288_BC_GLOBAL),
+ 	regmap_reg_range(AXP288_BC_DET_STAT, AXP20X_VBUS_IPSOUT_MGMT),
+ 	regmap_reg_range(AXP20X_CHRG_BAK_CTRL, AXP20X_CHRG_BAK_CTRL),
+ 	regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IPSOUT_V_HIGH_L),
+ 	regmap_reg_range(AXP20X_TIMER_CTRL, AXP20X_TIMER_CTRL),
+-	regmap_reg_range(AXP22X_GPIO_STATE, AXP22X_GPIO_STATE),
++	regmap_reg_range(AXP20X_GPIO1_CTRL, AXP22X_GPIO_STATE),
+ 	regmap_reg_range(AXP288_RT_BATT_V_H, AXP288_RT_BATT_V_L),
+ 	regmap_reg_range(AXP20X_FG_RES, AXP288_FG_CC_CAP_REG),
+ };
+diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
+index 3bde7fda755f1..dea4e4e8bed54 100644
+--- a/drivers/mfd/db8500-prcmu.c
++++ b/drivers/mfd/db8500-prcmu.c
+@@ -1622,22 +1622,20 @@ static long round_clock_rate(u8 clock, unsigned long rate)
+ }
+ 
+ static const unsigned long db8500_armss_freqs[] = {
+-	200000000,
+-	400000000,
+-	800000000,
++	199680000,
++	399360000,
++	798720000,
+ 	998400000
+ };
+ 
+ /* The DB8520 has slightly higher ARMSS max frequency */
+ static const unsigned long db8520_armss_freqs[] = {
+-	200000000,
+-	400000000,
+-	800000000,
++	199680000,
++	399360000,
++	798720000,
+ 	1152000000
+ };
+ 
+-
+-
+ static long round_armss_rate(unsigned long rate)
+ {
+ 	unsigned long freq = 0;
+diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c
+index 428a526cbe863..9ab9adce06fdd 100644
+--- a/drivers/mfd/lpc_sch.c
++++ b/drivers/mfd/lpc_sch.c
+@@ -22,7 +22,7 @@
+ #define SMBASE		0x40
+ #define SMBUS_IO_SIZE	64
+ 
+-#define GPIOBASE	0x44
++#define GPIO_BASE	0x44
+ #define GPIO_IO_SIZE	64
+ #define GPIO_IO_SIZE_CENTERTON	128
+ 
+@@ -145,7 +145,7 @@ static int lpc_sch_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ 	if (ret == 0)
+ 		cells++;
+ 
+-	ret = lpc_sch_populate_cell(dev, GPIOBASE, "sch_gpio",
++	ret = lpc_sch_populate_cell(dev, GPIO_BASE, "sch_gpio",
+ 				    info->io_size_gpio,
+ 				    id->device, &lpc_sch_cells[cells]);
+ 	if (ret < 0)
+diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
+index 1dd39483e7c14..58d09c615e673 100644
+--- a/drivers/mfd/stmpe.c
++++ b/drivers/mfd/stmpe.c
+@@ -1095,7 +1095,7 @@ static irqreturn_t stmpe_irq(int irq, void *data)
+ 
+ 	if (variant->id_val == STMPE801_ID ||
+ 	    variant->id_val == STMPE1600_ID) {
+-		int base = irq_create_mapping(stmpe->domain, 0);
++		int base = irq_find_mapping(stmpe->domain, 0);
+ 
+ 		handle_nested_irq(base);
+ 		return IRQ_HANDLED;
+@@ -1123,7 +1123,7 @@ static irqreturn_t stmpe_irq(int irq, void *data)
+ 		while (status) {
+ 			int bit = __ffs(status);
+ 			int line = bank * 8 + bit;
+-			int nestedirq = irq_create_mapping(stmpe->domain, line);
++			int nestedirq = irq_find_mapping(stmpe->domain, line);
+ 
+ 			handle_nested_irq(nestedirq);
+ 			status &= ~(1 << bit);
+diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
+index 7614f8fe0e91c..13583cdb93b6f 100644
+--- a/drivers/mfd/tc3589x.c
++++ b/drivers/mfd/tc3589x.c
+@@ -187,7 +187,7 @@ again:
+ 
+ 	while (status) {
+ 		int bit = __ffs(status);
+-		int virq = irq_create_mapping(tc3589x->domain, bit);
++		int virq = irq_find_mapping(tc3589x->domain, bit);
+ 
+ 		handle_nested_irq(virq);
+ 		status &= ~(1 << bit);
+diff --git a/drivers/mfd/tqmx86.c b/drivers/mfd/tqmx86.c
+index ddddf08b6a4cc..732013f40e4e8 100644
+--- a/drivers/mfd/tqmx86.c
++++ b/drivers/mfd/tqmx86.c
+@@ -209,6 +209,8 @@ static int tqmx86_probe(struct platform_device *pdev)
+ 
+ 		/* Assumes the IRQ resource is first. */
+ 		tqmx_gpio_resources[0].start = gpio_irq;
++	} else {
++		tqmx_gpio_resources[0].flags = 0;
+ 	}
+ 
+ 	ocores_platfom_data.clock_khz = tqmx86_board_id_to_clk_rate(board_id);
+diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
+index 6c3a619e26286..651a028bc519a 100644
+--- a/drivers/mfd/wm8994-irq.c
++++ b/drivers/mfd/wm8994-irq.c
+@@ -154,7 +154,7 @@ static irqreturn_t wm8994_edge_irq(int irq, void *data)
+ 	struct wm8994 *wm8994 = data;
+ 
+ 	while (gpio_get_value_cansleep(wm8994->pdata.irq_gpio))
+-		handle_nested_irq(irq_create_mapping(wm8994->edge_irq, 0));
++		handle_nested_irq(irq_find_mapping(wm8994->edge_irq, 0));
+ 
+ 	return IRQ_HANDLED;
+ }
+diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
+index 6e4d0017c0bd4..f685a581df481 100644
+--- a/drivers/mtd/mtdconcat.c
++++ b/drivers/mtd/mtdconcat.c
+@@ -641,6 +641,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[],	/* subdevices to c
+ 	int i;
+ 	size_t size;
+ 	struct mtd_concat *concat;
++	struct mtd_info *subdev_master = NULL;
+ 	uint32_t max_erasesize, curr_erasesize;
+ 	int num_erase_region;
+ 	int max_writebufsize = 0;
+@@ -679,18 +680,24 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[],	/* subdevices to c
+ 	concat->mtd.subpage_sft = subdev[0]->subpage_sft;
+ 	concat->mtd.oobsize = subdev[0]->oobsize;
+ 	concat->mtd.oobavail = subdev[0]->oobavail;
+-	if (subdev[0]->_writev)
++
++	subdev_master = mtd_get_master(subdev[0]);
++	if (subdev_master->_writev)
+ 		concat->mtd._writev = concat_writev;
+-	if (subdev[0]->_read_oob)
++	if (subdev_master->_read_oob)
+ 		concat->mtd._read_oob = concat_read_oob;
+-	if (subdev[0]->_write_oob)
++	if (subdev_master->_write_oob)
+ 		concat->mtd._write_oob = concat_write_oob;
+-	if (subdev[0]->_block_isbad)
++	if (subdev_master->_block_isbad)
+ 		concat->mtd._block_isbad = concat_block_isbad;
+-	if (subdev[0]->_block_markbad)
++	if (subdev_master->_block_markbad)
+ 		concat->mtd._block_markbad = concat_block_markbad;
+-	if (subdev[0]->_panic_write)
++	if (subdev_master->_panic_write)
+ 		concat->mtd._panic_write = concat_panic_write;
++	if (subdev_master->_read)
++		concat->mtd._read = concat_read;
++	if (subdev_master->_write)
++		concat->mtd._write = concat_write;
+ 
+ 	concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
+ 
+@@ -721,14 +728,22 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[],	/* subdevices to c
+ 				    subdev[i]->flags & MTD_WRITEABLE;
+ 		}
+ 
++		subdev_master = mtd_get_master(subdev[i]);
+ 		concat->mtd.size += subdev[i]->size;
+ 		concat->mtd.ecc_stats.badblocks +=
+ 			subdev[i]->ecc_stats.badblocks;
+ 		if (concat->mtd.writesize   !=  subdev[i]->writesize ||
+ 		    concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
+ 		    concat->mtd.oobsize    !=  subdev[i]->oobsize ||
+-		    !concat->mtd._read_oob  != !subdev[i]->_read_oob ||
+-		    !concat->mtd._write_oob != !subdev[i]->_write_oob) {
++		    !concat->mtd._read_oob  != !subdev_master->_read_oob ||
++		    !concat->mtd._write_oob != !subdev_master->_write_oob) {
++			/*
++			 * Check against subdev[i] for data members, because
++			 * subdev's attributes may be different from master
++			 * mtd device. Check against subdev's master mtd
++			 * device for callbacks, because the existence of
++			 * subdev's callbacks is decided by master mtd device.
++			 */
+ 			kfree(concat);
+ 			printk("Incompatible OOB or ECC data on \"%s\"\n",
+ 			       subdev[i]->name);
+@@ -744,8 +759,6 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[],	/* subdevices to c
+ 	concat->mtd.name = name;
+ 
+ 	concat->mtd._erase = concat_erase;
+-	concat->mtd._read = concat_read;
+-	concat->mtd._write = concat_write;
+ 	concat->mtd._sync = concat_sync;
+ 	concat->mtd._lock = concat_lock;
+ 	concat->mtd._unlock = concat_unlock;
+diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c
+index d0e8ffd55c224..9dbf031716a61 100644
+--- a/drivers/mtd/nand/raw/cafe_nand.c
++++ b/drivers/mtd/nand/raw/cafe_nand.c
+@@ -751,7 +751,7 @@ static int cafe_nand_probe(struct pci_dev *pdev,
+ 			  "CAFE NAND", mtd);
+ 	if (err) {
+ 		dev_warn(&pdev->dev, "Could not register IRQ %d\n", pdev->irq);
+-		goto out_ior;
++		goto out_free_rs;
+ 	}
+ 
+ 	/* Disable master reset, enable NAND clock */
+@@ -795,6 +795,8 @@ static int cafe_nand_probe(struct pci_dev *pdev,
+ 	/* Disable NAND IRQ in global IRQ mask register */
+ 	cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
+ 	free_irq(pdev->irq, mtd);
++ out_free_rs:
++	free_rs(cafe->rs);
+  out_ior:
+ 	pci_iounmap(pdev, cafe->mmio);
+  out_free_mtd:
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index bd1417a66cbf2..604f541126654 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -1144,7 +1144,7 @@ static void b53_force_link(struct b53_device *dev, int port, int link)
+ 	u8 reg, val, off;
+ 
+ 	/* Override the port settings */
+-	if (port == dev->cpu_port) {
++	if (port == dev->imp_port) {
+ 		off = B53_PORT_OVERRIDE_CTRL;
+ 		val = PORT_OVERRIDE_EN;
+ 	} else {
+@@ -1168,7 +1168,7 @@ static void b53_force_port_config(struct b53_device *dev, int port,
+ 	u8 reg, val, off;
+ 
+ 	/* Override the port settings */
+-	if (port == dev->cpu_port) {
++	if (port == dev->imp_port) {
+ 		off = B53_PORT_OVERRIDE_CTRL;
+ 		val = PORT_OVERRIDE_EN;
+ 	} else {
+@@ -1236,7 +1236,7 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
+ 	b53_force_link(dev, port, phydev->link);
+ 
+ 	if (is531x5(dev) && phy_interface_is_rgmii(phydev)) {
+-		if (port == 8)
++		if (port == dev->imp_port)
+ 			off = B53_RGMII_CTRL_IMP;
+ 		else
+ 			off = B53_RGMII_CTRL_P(port);
+@@ -2280,6 +2280,7 @@ struct b53_chip_data {
+ 	const char *dev_name;
+ 	u16 vlans;
+ 	u16 enabled_ports;
++	u8 imp_port;
+ 	u8 cpu_port;
+ 	u8 vta_regs[3];
+ 	u8 arl_bins;
+@@ -2304,6 +2305,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+ 		.enabled_ports = 0x1f,
+ 		.arl_bins = 2,
+ 		.arl_buckets = 1024,
++		.imp_port = 5,
+ 		.cpu_port = B53_CPU_PORT_25,
+ 		.duplex_reg = B53_DUPLEX_STAT_FE,
+ 	},
+@@ -2314,6 +2316,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+ 		.enabled_ports = 0x1f,
+ 		.arl_bins = 2,
+ 		.arl_buckets = 1024,
++		.imp_port = 5,
+ 		.cpu_port = B53_CPU_PORT_25,
+ 		.duplex_reg = B53_DUPLEX_STAT_FE,
+ 	},
+@@ -2324,6 +2327,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+ 		.enabled_ports = 0x1f,
+ 		.arl_bins = 4,
+ 		.arl_buckets = 1024,
++		.imp_port = 8,
+ 		.cpu_port = B53_CPU_PORT,
+ 		.vta_regs = B53_VTA_REGS,
+ 		.duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2337,6 +2341,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+ 		.enabled_ports = 0x1f,
+ 		.arl_bins = 4,
+ 		.arl_buckets = 1024,
++		.imp_port = 8,
+ 		.cpu_port = B53_CPU_PORT,
+ 		.vta_regs = B53_VTA_REGS,
+ 		.duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2350,6 +2355,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+ 		.enabled_ports = 0x1f,
+ 		.arl_bins = 4,
+ 		.arl_buckets = 1024,
++		.imp_port = 8,
+ 		.cpu_port = B53_CPU_PORT,
+ 		.vta_regs = B53_VTA_REGS_9798,
+ 		.duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2363,6 +2369,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+ 		.enabled_ports = 0x7f,
+ 		.arl_bins = 4,
+ 		.arl_buckets = 1024,
++		.imp_port = 8,
+ 		.cpu_port = B53_CPU_PORT,
+ 		.vta_regs = B53_VTA_REGS_9798,
+ 		.duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2377,6 +2384,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+ 		.arl_bins = 4,
+ 		.arl_buckets = 1024,
+ 		.vta_regs = B53_VTA_REGS,
++		.imp_port = 8,
+ 		.cpu_port = B53_CPU_PORT,
+ 		.duplex_reg = B53_DUPLEX_STAT_GE,
+ 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+@@ -2389,6 +2397,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+ 		.enabled_ports = 0xff,
+ 		.arl_bins = 4,
+ 		.arl_buckets = 1024,
++		.imp_port = 8,
+ 		.cpu_port = B53_CPU_PORT,
+ 		.vta_regs = B53_VTA_REGS,
+ 		.duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2402,6 +2411,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+ 		.enabled_ports = 0x1ff,
+ 		.arl_bins = 4,
+ 		.arl_buckets = 1024,
++		.imp_port = 8,
+ 		.cpu_port = B53_CPU_PORT,
+ 		.vta_regs = B53_VTA_REGS,
+ 		.duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2415,6 +2425,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+ 		.enabled_ports = 0, /* pdata must provide them */
+ 		.arl_bins = 4,
+ 		.arl_buckets = 1024,
++		.imp_port = 8,
+ 		.cpu_port = B53_CPU_PORT,
+ 		.vta_regs = B53_VTA_REGS_63XX,
+ 		.duplex_reg = B53_DUPLEX_STAT_63XX,
+@@ -2428,6 +2439,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+ 		.enabled_ports = 0x1f,
+ 		.arl_bins = 4,
+ 		.arl_buckets = 1024,
++		.imp_port = 8,
+ 		.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
+ 		.vta_regs = B53_VTA_REGS,
+ 		.duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2441,6 +2453,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+ 		.enabled_ports = 0x1bf,
+ 		.arl_bins = 4,
+ 		.arl_buckets = 1024,
++		.imp_port = 8,
+ 		.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
+ 		.vta_regs = B53_VTA_REGS,
+ 		.duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2454,6 +2467,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+ 		.enabled_ports = 0x1bf,
+ 		.arl_bins = 4,
+ 		.arl_buckets = 1024,
++		.imp_port = 8,
+ 		.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
+ 		.vta_regs = B53_VTA_REGS,
+ 		.duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2467,6 +2481,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+ 		.enabled_ports = 0x1f,
+ 		.arl_bins = 4,
+ 		.arl_buckets = 1024,
++		.imp_port = 8,
+ 		.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
+ 		.vta_regs = B53_VTA_REGS,
+ 		.duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2480,6 +2495,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+ 		.enabled_ports = 0x1f,
+ 		.arl_bins = 4,
+ 		.arl_buckets = 1024,
++		.imp_port = 8,
+ 		.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
+ 		.vta_regs = B53_VTA_REGS,
+ 		.duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2493,6 +2509,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+ 		.enabled_ports = 0x1ff,
+ 		.arl_bins = 4,
+ 		.arl_buckets = 1024,
++		.imp_port = 8,
+ 		.cpu_port = B53_CPU_PORT,
+ 		.vta_regs = B53_VTA_REGS,
+ 		.duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2506,6 +2523,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+ 		.enabled_ports = 0x103,
+ 		.arl_bins = 4,
+ 		.arl_buckets = 1024,
++		.imp_port = 8,
+ 		.cpu_port = B53_CPU_PORT,
+ 		.vta_regs = B53_VTA_REGS,
+ 		.duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2520,6 +2538,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+ 		.enabled_ports = 0x1bf,
+ 		.arl_bins = 4,
+ 		.arl_buckets = 256,
++		.imp_port = 8,
+ 		.cpu_port = 8, /* TODO: ports 4, 5, 8 */
+ 		.vta_regs = B53_VTA_REGS,
+ 		.duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2533,6 +2552,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+ 		.enabled_ports = 0x1ff,
+ 		.arl_bins = 4,
+ 		.arl_buckets = 1024,
++		.imp_port = 8,
+ 		.cpu_port = B53_CPU_PORT,
+ 		.vta_regs = B53_VTA_REGS,
+ 		.duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2546,6 +2566,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+ 		.enabled_ports = 0x1ff,
+ 		.arl_bins = 4,
+ 		.arl_buckets = 256,
++		.imp_port = 8,
+ 		.cpu_port = B53_CPU_PORT,
+ 		.vta_regs = B53_VTA_REGS,
+ 		.duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2571,6 +2592,7 @@ static int b53_switch_init(struct b53_device *dev)
+ 			dev->vta_regs[1] = chip->vta_regs[1];
+ 			dev->vta_regs[2] = chip->vta_regs[2];
+ 			dev->jumbo_pm_reg = chip->jumbo_pm_reg;
++			dev->imp_port = chip->imp_port;
+ 			dev->cpu_port = chip->cpu_port;
+ 			dev->num_vlans = chip->vlans;
+ 			dev->num_arl_bins = chip->arl_bins;
+@@ -2612,9 +2634,10 @@ static int b53_switch_init(struct b53_device *dev)
+ 			dev->cpu_port = 5;
+ 	}
+ 
+-	/* cpu port is always last */
+-	dev->num_ports = dev->cpu_port + 1;
+ 	dev->enabled_ports |= BIT(dev->cpu_port);
++	dev->num_ports = fls(dev->enabled_ports);
++
++	dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS);
+ 
+ 	/* Include non standard CPU port built-in PHYs to be probed */
+ 	if (is539x(dev) || is531x5(dev)) {
+@@ -2660,7 +2683,6 @@ struct b53_device *b53_switch_alloc(struct device *base,
+ 		return NULL;
+ 
+ 	ds->dev = base;
+-	ds->num_ports = DSA_MAX_PORTS;
+ 
+ 	dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL);
+ 	if (!dev)
+diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
+index 9bf8319342b0b..5d068acf7cf81 100644
+--- a/drivers/net/dsa/b53/b53_priv.h
++++ b/drivers/net/dsa/b53/b53_priv.h
+@@ -123,6 +123,7 @@ struct b53_device {
+ 
+ 	/* used ports mask */
+ 	u16 enabled_ports;
++	unsigned int imp_port;
+ 	unsigned int cpu_port;
+ 
+ 	/* connect specific data */
+diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
+index 6ce9ec1283e05..b6c4b3adb1715 100644
+--- a/drivers/net/dsa/bcm_sf2.c
++++ b/drivers/net/dsa/bcm_sf2.c
+@@ -68,7 +68,7 @@ static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
+ 	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ 	unsigned int port, count = 0;
+ 
+-	for (port = 0; port < ARRAY_SIZE(priv->port_sts); port++) {
++	for (port = 0; port < ds->num_ports; port++) {
+ 		if (dsa_is_cpu_port(ds, port))
+ 			continue;
+ 		if (priv->port_sts[port].enabled)
+diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
+index 64d6dfa831220..267324889dd64 100644
+--- a/drivers/net/dsa/lantiq_gswip.c
++++ b/drivers/net/dsa/lantiq_gswip.c
+@@ -1885,6 +1885,12 @@ static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gph
+ 
+ 	reset_control_assert(gphy_fw->reset);
+ 
++	/* The vendor BSP uses a 200ms delay after asserting the reset line.
++	 * Without this some users are observing that the PHY is not coming up
++	 * on the MDIO bus.
++	 */
++	msleep(200);
++
+ 	ret = request_firmware(&fw, gphy_fw->fw_name, dev);
+ 	if (ret) {
+ 		dev_err(dev, "failed to load firmware: %s, error: %i\n",
+diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
+index 1f63f50f73f17..bda5a9bf4f529 100644
+--- a/drivers/net/dsa/qca8k.c
++++ b/drivers/net/dsa/qca8k.c
+@@ -643,10 +643,8 @@ qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
+ }
+ 
+ static int
+-qca8k_mdio_write(struct mii_bus *salve_bus, int phy, int regnum, u16 data)
++qca8k_mdio_write(struct mii_bus *bus, int phy, int regnum, u16 data)
+ {
+-	struct qca8k_priv *priv = salve_bus->priv;
+-	struct mii_bus *bus = priv->bus;
+ 	u16 r1, r2, page;
+ 	u32 val;
+ 	int ret;
+@@ -682,10 +680,8 @@ exit:
+ }
+ 
+ static int
+-qca8k_mdio_read(struct mii_bus *salve_bus, int phy, int regnum)
++qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum)
+ {
+-	struct qca8k_priv *priv = salve_bus->priv;
+-	struct mii_bus *bus = priv->bus;
+ 	u16 r1, r2, page;
+ 	u32 val;
+ 	int ret;
+@@ -726,6 +722,24 @@ exit:
+ 	return ret;
+ }
+ 
++static int
++qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
++{
++	struct qca8k_priv *priv = slave_bus->priv;
++	struct mii_bus *bus = priv->bus;
++
++	return qca8k_mdio_write(bus, phy, regnum, data);
++}
++
++static int
++qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
++{
++	struct qca8k_priv *priv = slave_bus->priv;
++	struct mii_bus *bus = priv->bus;
++
++	return qca8k_mdio_read(bus, phy, regnum);
++}
++
+ static int
+ qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
+ {
+@@ -775,8 +789,8 @@ qca8k_mdio_register(struct qca8k_priv *priv, struct device_node *mdio)
+ 
+ 	bus->priv = (void *)priv;
+ 	bus->name = "qca8k slave mii";
+-	bus->read = qca8k_mdio_read;
+-	bus->write = qca8k_mdio_write;
++	bus->read = qca8k_internal_mdio_read;
++	bus->write = qca8k_internal_mdio_write;
+ 	snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d",
+ 		 ds->index);
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+index 27943b0446c28..a207c36246b6a 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+@@ -1224,7 +1224,7 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
+ 
+ 	/* SR-IOV capability was enabled but there are no VFs*/
+ 	if (iov->total == 0) {
+-		err = -EINVAL;
++		err = 0;
+ 		goto failed;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 8a97640cdfe76..fdbf47446a997 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -2172,25 +2172,33 @@ static int bnxt_async_event_process(struct bnxt *bp,
+ 		if (!fw_health)
+ 			goto async_event_process_exit;
+ 
+-		fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
+-		fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
+-		if (!fw_health->enabled) {
++		if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
++			fw_health->enabled = false;
+ 			netif_info(bp, drv, bp->dev,
+ 				   "Error recovery info: error recovery[0]\n");
+ 			break;
+ 		}
++		fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
+ 		fw_health->tmr_multiplier =
+ 			DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
+ 				     bp->current_interval * 10);
+ 		fw_health->tmr_counter = fw_health->tmr_multiplier;
+-		fw_health->last_fw_heartbeat =
+-			bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
++		if (!fw_health->enabled)
++			fw_health->last_fw_heartbeat =
++				bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
+ 		fw_health->last_fw_reset_cnt =
+ 			bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ 		netif_info(bp, drv, bp->dev,
+ 			   "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
+ 			   fw_health->master, fw_health->last_fw_reset_cnt,
+ 			   bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
++		if (!fw_health->enabled) {
++			/* Make sure tmr_counter is set and visible to
++			 * bnxt_health_check() before setting enabled to true.
++			 */
++			smp_wmb();
++			fw_health->enabled = true;
++		}
+ 		goto async_event_process_exit;
+ 	}
+ 	case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
+@@ -2680,6 +2688,9 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
+ 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
+ 		int j;
+ 
++		if (!txr->tx_buf_ring)
++			continue;
++
+ 		for (j = 0; j < max_idx;) {
+ 			struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
+ 			struct sk_buff *skb;
+@@ -2764,6 +2775,9 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
+ 	}
+ 
+ skip_rx_tpa_free:
++	if (!rxr->rx_buf_ring)
++		goto skip_rx_buf_free;
++
+ 	for (i = 0; i < max_idx; i++) {
+ 		struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
+ 		dma_addr_t mapping = rx_buf->mapping;
+@@ -2786,6 +2800,11 @@ skip_rx_tpa_free:
+ 			kfree(data);
+ 		}
+ 	}
++
++skip_rx_buf_free:
++	if (!rxr->rx_agg_ring)
++		goto skip_rx_agg_free;
++
+ 	for (i = 0; i < max_agg_idx; i++) {
+ 		struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
+ 		struct page *page = rx_agg_buf->page;
+@@ -2802,6 +2821,8 @@ skip_rx_tpa_free:
+ 
+ 		__free_page(page);
+ 	}
++
++skip_rx_agg_free:
+ 	if (rxr->rx_page) {
+ 		__free_page(rxr->rx_page);
+ 		rxr->rx_page = NULL;
+@@ -11237,6 +11258,8 @@ static void bnxt_fw_health_check(struct bnxt *bp)
+ 	if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ 		return;
+ 
++	/* Make sure it is enabled before checking the tmr_counter. */
++	smp_rmb();
+ 	if (fw_health->tmr_counter) {
+ 		fw_health->tmr_counter--;
+ 		return;
+@@ -12169,6 +12192,11 @@ static void bnxt_fw_reset_task(struct work_struct *work)
+ 			return;
+ 		}
+ 
++		if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
++		    bp->fw_health->enabled) {
++			bp->fw_health->last_fw_reset_cnt =
++				bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
++		}
+ 		bp->fw_reset_state = 0;
+ 		/* Make sure fw_reset_state is 0 before clearing the flag */
+ 		smp_mb__before_atomic();
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+index 64381be935a8c..bb228619ec641 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+@@ -449,7 +449,7 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ 		return rc;
+ 
+ 	ver_resp = &bp->ver_resp;
+-	sprintf(buf, "%X", ver_resp->chip_rev);
++	sprintf(buf, "%c%d", 'A' + ver_resp->chip_rev, ver_resp->chip_metal);
+ 	rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_FIXED,
+ 			      DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, buf);
+ 	if (rc)
+@@ -471,8 +471,8 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ 	if (BNXT_PF(bp) && !bnxt_hwrm_get_nvm_cfg_ver(bp, &nvm_cfg_ver)) {
+ 		u32 ver = nvm_cfg_ver.vu32;
+ 
+-		sprintf(buf, "%d.%d.%d", (ver >> 16) & 0xf, (ver >> 8) & 0xf,
+-			ver & 0xf);
++		sprintf(buf, "%d.%d.%d", (ver >> 16) & 0xff, (ver >> 8) & 0xff,
++			ver & 0xff);
+ 		rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
+ 				      DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
+ 				      buf);
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+index 5e4429b14b8ca..2186706cf9130 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+@@ -1870,9 +1870,6 @@ bnxt_tc_indr_block_cb_lookup(struct bnxt *bp, struct net_device *netdev)
+ {
+ 	struct bnxt_flower_indr_block_cb_priv *cb_priv;
+ 
+-	/* All callback list access should be protected by RTNL. */
+-	ASSERT_RTNL();
+-
+ 	list_for_each_entry(cb_priv, &bp->tc_indr_block_list, list)
+ 		if (cb_priv->tunnel_netdev == netdev)
+ 			return cb_priv;
+diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+index 512da98019c66..2a28a38da036c 100644
+--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
++++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+@@ -1107,6 +1107,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	if (!adapter->registered_device_map) {
+ 		pr_err("%s: could not register any net devices\n",
+ 		       pci_name(pdev));
++		err = -EINVAL;
+ 		goto out_release_adapter_res;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
+index cb5c79c43bc9c..7bb81e08f9532 100644
+--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
+@@ -3306,6 +3306,9 @@ void t3_sge_stop(struct adapter *adap)
+ 
+ 	t3_sge_stop_dma(adap);
+ 
++	/* workqueues aren't initialized otherwise */
++	if (!(adap->flags & FULL_INIT_DONE))
++		return;
+ 	for (i = 0; i < SGE_QSETS; ++i) {
+ 		struct sge_qset *qs = &adap->sge.qs[i];
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index cdb5f14fb6bc5..9faa3712ea5b8 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -73,6 +73,7 @@ MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to opt
+ #define HNS3_OUTER_VLAN_TAG	2
+ 
+ #define HNS3_MIN_TX_LEN		33U
++#define HNS3_MIN_TUN_PKT_LEN	65U
+ 
+ /* hns3_pci_tbl - PCI Device ID Table
+  *
+@@ -1425,8 +1426,11 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
+ 			       l4.tcp->doff);
+ 		break;
+ 	case IPPROTO_UDP:
+-		if (hns3_tunnel_csum_bug(skb))
+-			return skb_checksum_help(skb);
++		if (hns3_tunnel_csum_bug(skb)) {
++			int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN);
++
++			return ret ? ret : skb_checksum_help(skb);
++		}
+ 
+ 		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ 		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+index 288788186eccd..e6e617aba2a4c 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+@@ -1710,6 +1710,10 @@ hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
+ 	}
+ 
+ 	bd_num = le32_to_cpu(req->bd_num);
++	if (!bd_num) {
++		dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n");
++		return -EINVAL;
++	}
+ 
+ 	desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ 	if (!desc_src)
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 03ae122f1c9ac..72d55c028ac4b 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -1528,9 +1528,10 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
+ static int hclge_configure(struct hclge_dev *hdev)
+ {
+ 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
++	const struct cpumask *cpumask = cpu_online_mask;
+ 	struct hclge_cfg cfg;
+ 	unsigned int i;
+-	int ret;
++	int node, ret;
+ 
+ 	ret = hclge_get_cfg(hdev, &cfg);
+ 	if (ret)
+@@ -1595,11 +1596,12 @@ static int hclge_configure(struct hclge_dev *hdev)
+ 
+ 	hclge_init_kdump_kernel_config(hdev);
+ 
+-	/* Set the init affinity based on pci func number */
+-	i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
+-	i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
+-	cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
+-			&hdev->affinity_mask);
++	/* Set the affinity based on numa node */
++	node = dev_to_node(&hdev->pdev->dev);
++	if (node != NUMA_NO_NODE)
++		cpumask = cpumask_of_node(node);
++
++	cpumask_copy(&hdev->affinity_mask, cpumask);
+ 
+ 	return ret;
+ }
+@@ -8118,11 +8120,12 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
+ 	hclge_clear_arfs_rules(hdev);
+ 	spin_unlock_bh(&hdev->fd_rule_lock);
+ 
+-	/* If it is not PF reset, the firmware will disable the MAC,
++	/* If it is not PF reset or FLR, the firmware will disable the MAC,
+ 	 * so it only need to stop phy here.
+ 	 */
+ 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
+-	    hdev->reset_type != HNAE3_FUNC_RESET) {
++	    hdev->reset_type != HNAE3_FUNC_RESET &&
++	    hdev->reset_type != HNAE3_FLR_RESET) {
+ 		hclge_mac_stop_phy(hdev);
+ 		hclge_update_link_status(hdev);
+ 		return;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index 938654778979a..be3ea7023ed8c 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -2463,6 +2463,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
+ 
+ 	hclgevf_enable_vector(&hdev->misc_vector, false);
+ 	event_cause = hclgevf_check_evt_cause(hdev, &clearval);
++	if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
++		hclgevf_clear_event_cause(hdev, clearval);
+ 
+ 	switch (event_cause) {
+ 	case HCLGEVF_VECTOR0_EVENT_RST:
+@@ -2475,10 +2477,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
+ 		break;
+ 	}
+ 
+-	if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) {
+-		hclgevf_clear_event_cause(hdev, clearval);
++	if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
+ 		hclgevf_enable_vector(&hdev->misc_vector, true);
+-	}
+ 
+ 	return IRQ_HANDLED;
+ }
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index a775c69e4fd7f..6aa6ff89a7651 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -4700,6 +4700,14 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
+ 		return 0;
+ 	}
+ 
++	if (adapter->failover_pending) {
++		adapter->init_done_rc = -EAGAIN;
++		netdev_dbg(netdev, "Failover pending, ignoring login response\n");
++		complete(&adapter->init_done);
++		/* login response buffer will be released on reset */
++		return 0;
++	}
++
+ 	netdev->mtu = adapter->req_mtu - ETH_HLEN;
+ 
+ 	netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index eadcb99583464..3c4f08d20414e 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -695,6 +695,7 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
+ {
+ 	if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) {
+ 		set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
++		set_bit(ICE_FLAG_AUX_ENA, pf->flags);
+ 		ice_plug_aux_dev(pf);
+ 	}
+ }
+@@ -707,5 +708,6 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
+ {
+ 	ice_unplug_aux_dev(pf);
+ 	clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
++	clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
+ }
+ #endif /* _ICE_H_ */
+diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
+index 1f2afdf6cd483..adcc9a251595a 100644
+--- a/drivers/net/ethernet/intel/ice/ice_idc.c
++++ b/drivers/net/ethernet/intel/ice/ice_idc.c
+@@ -271,6 +271,12 @@ int ice_plug_aux_dev(struct ice_pf *pf)
+ 	struct auxiliary_device *adev;
+ 	int ret;
+ 
++	/* if this PF doesn't support a technology that requires auxiliary
++	 * devices, then gracefully exit
++	 */
++	if (!ice_is_aux_ena(pf))
++		return 0;
++
+ 	iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
+ 	if (!iadev)
+ 		return -ENOMEM;
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index f62982c4d933d..78114e625ffdc 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -5962,7 +5962,9 @@ static int igc_probe(struct pci_dev *pdev,
+ 	if (pci_using_dac)
+ 		netdev->features |= NETIF_F_HIGHDMA;
+ 
+-	netdev->vlan_features |= netdev->features;
++	netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
++	netdev->mpls_features |= NETIF_F_HW_CSUM;
++	netdev->hw_enc_features |= netdev->vlan_features;
+ 
+ 	/* MTU range: 68 - 9216 */
+ 	netdev->min_mtu = ETH_MIN_MTU;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+index 5fe277e354f7a..c10cae78e79f8 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+@@ -92,7 +92,8 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu)
+  */
+ int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
+ {
+-	unsigned long timeout = jiffies + usecs_to_jiffies(10000);
++	unsigned long timeout = jiffies + usecs_to_jiffies(20000);
++	bool twice = false;
+ 	void __iomem *reg;
+ 	u64 reg_val;
+ 
+@@ -107,6 +108,15 @@ again:
+ 		usleep_range(1, 5);
+ 		goto again;
+ 	}
++	/* In scenarios where CPU is scheduled out before checking
++	 * 'time_before' (above) and gets scheduled in such that
++	 * jiffies are beyond timeout value, then check again if HW is
++	 * done with the operation in the meantime.
++	 */
++	if (!twice) {
++		twice = true;
++		goto again;
++	}
+ 	return -EBUSY;
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+index 3f8a98093f8cb..f9cf9fb315479 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+@@ -1007,7 +1007,7 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
+ 	err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn);
+ 	if (err) {
+ 		mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err);
+-		return err;
++		goto err_cancel_work;
+ 	}
+ 
+ 	err = mlx5_fw_tracer_create_mkey(tracer);
+@@ -1031,6 +1031,7 @@ err_notifier_unregister:
+ 	mlx5_core_destroy_mkey(dev, &tracer->buff.mkey);
+ err_dealloc_pd:
+ 	mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
++err_cancel_work:
+ 	cancel_work_sync(&tracer->read_fw_strings_work);
+ 	return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+index b1b51bbba0541..3f67efbe12fc5 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+@@ -940,7 +940,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work);
+ 
+ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
+ int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
+-int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
++int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val, bool rx_filter);
+ 
+ int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
+ 			  u16 vid);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+index 059799e4f483f..ef271b97fe5ef 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+@@ -300,9 +300,6 @@ mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
+ {
+ 	struct mlx5e_rep_indr_block_priv *cb_priv;
+ 
+-	/* All callback list access should be protected by RTNL. */
+-	ASSERT_RTNL();
+-
+ 	list_for_each_entry(cb_priv,
+ 			    &rpriv->uplink_priv.tc_indr_block_priv_list,
+ 			    list)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index bd72572e03d1d..1cc279d389d6f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -1882,7 +1882,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
+ 	return set_pflag_cqe_based_moder(netdev, enable, true);
+ }
+ 
+-int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val)
++int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val, bool rx_filter)
+ {
+ 	bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS);
+ 	struct mlx5e_params new_params;
+@@ -1894,8 +1894,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
+ 	if (curr_val == new_val)
+ 		return 0;
+ 
+-	if (new_val && !priv->profile->rx_ptp_support &&
+-	    priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) {
++	if (new_val && !priv->profile->rx_ptp_support && rx_filter) {
+ 		netdev_err(priv->netdev,
+ 			   "Profile doesn't support enabling of CQE compression while hardware time-stamping is enabled.\n");
+ 		return -EINVAL;
+@@ -1903,7 +1902,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
+ 
+ 	new_params = priv->channels.params;
+ 	MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
+-	if (priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE)
++	if (rx_filter)
+ 		new_params.ptp_rx = new_val;
+ 
+ 	if (new_params.ptp_rx == priv->channels.params.ptp_rx)
+@@ -1926,12 +1925,14 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
+ {
+ 	struct mlx5e_priv *priv = netdev_priv(netdev);
+ 	struct mlx5_core_dev *mdev = priv->mdev;
++	bool rx_filter;
+ 	int err;
+ 
+ 	if (!MLX5_CAP_GEN(mdev, cqe_compression))
+ 		return -EOPNOTSUPP;
+ 
+-	err = mlx5e_modify_rx_cqe_compression_locked(priv, enable);
++	rx_filter = priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE;
++	err = mlx5e_modify_rx_cqe_compression_locked(priv, enable, rx_filter);
+ 	if (err)
+ 		return err;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 2d53eaf3b9241..fa718e71db2d4 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -4004,14 +4004,14 @@ static int mlx5e_hwstamp_config_no_ptp_rx(struct mlx5e_priv *priv, bool rx_filte
+ 
+ 	if (!rx_filter)
+ 		/* Reset CQE compression to Admin default */
+-		return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def);
++		return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def, false);
+ 
+ 	if (!MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
+ 		return 0;
+ 
+ 	/* Disable CQE compression */
+ 	netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
+-	err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
++	err = mlx5e_modify_rx_cqe_compression_locked(priv, false, true);
+ 	if (err)
+ 		netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index c0697e1b71185..938ef5afe5053 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -1682,14 +1682,13 @@ static int build_match_list(struct match_list *match_head,
+ 
+ 		curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
+ 		if (!curr_match) {
++			rcu_read_unlock();
+ 			free_match_list(match_head, ft_locked);
+-			err = -ENOMEM;
+-			goto out;
++			return -ENOMEM;
+ 		}
+ 		curr_match->g = g;
+ 		list_add_tail(&curr_match->list, &match_head->list);
+ 	}
+-out:
+ 	rcu_read_unlock();
+ 	return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+index a0a059e0154ff..04c7dc224effa 100644
+--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+@@ -142,6 +142,13 @@ static int mlxbf_gige_open(struct net_device *netdev)
+ 	err = mlxbf_gige_clean_port(priv);
+ 	if (err)
+ 		goto free_irqs;
++
++	/* Clear driver's valid_polarity to match hardware,
++	 * since the above call to clean_port() resets the
++	 * receive polarity used by hardware.
++	 */
++	priv->valid_polarity = 0;
++
+ 	err = mlxbf_gige_rx_init(priv);
+ 	if (err)
+ 		goto free_irqs;
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
+index 2406d33356ad2..d87a9eab25a79 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
+@@ -1766,9 +1766,6 @@ nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
+ 	struct nfp_flower_indr_block_cb_priv *cb_priv;
+ 	struct nfp_flower_priv *priv = app->priv;
+ 
+-	/* All callback list access should be protected by RTNL. */
+-	ASSERT_RTNL();
+-
+ 	list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
+ 		if (cb_priv->netdev == netdev)
+ 			return cb_priv;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+index 4387292c37e2f..e8e17bfc41c54 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+@@ -3368,6 +3368,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
+ 			  struct qed_nvm_image_att *p_image_att)
+ {
+ 	enum nvm_image_type type;
++	int rc;
+ 	u32 i;
+ 
+ 	/* Translate image_id into MFW definitions */
+@@ -3396,7 +3397,10 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
+ 		return -EINVAL;
+ 	}
+ 
+-	qed_mcp_nvm_info_populate(p_hwfn);
++	rc = qed_mcp_nvm_info_populate(p_hwfn);
++	if (rc)
++		return rc;
++
+ 	for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
+ 		if (type == p_hwfn->nvm_info.image_att[i].image_type)
+ 			break;
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+index e6784023bce42..aa7ee43f92525 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+@@ -439,7 +439,6 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
+ 	QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, 1);
+ 	msleep(20);
+ 
+-	qlcnic_rom_unlock(adapter);
+ 	/* big hammer don't reset CAM block on reset */
+ 	QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff);
+ 
+diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
+index 47e9998b62f09..6a2416bec7ddc 100644
+--- a/drivers/net/ethernet/rdc/r6040.c
++++ b/drivers/net/ethernet/rdc/r6040.c
+@@ -119,6 +119,8 @@
+ #define PHY_ST		0x8A	/* PHY status register */
+ #define MAC_SM		0xAC	/* MAC status machine */
+ #define  MAC_SM_RST	0x0002	/* MAC status machine reset */
++#define MD_CSC		0xb6	/* MDC speed control register */
++#define  MD_CSC_DEFAULT	0x0030
+ #define MAC_ID		0xBE	/* Identifier register */
+ 
+ #define TX_DCNT		0x80	/* TX descriptor count */
+@@ -355,8 +357,9 @@ static void r6040_reset_mac(struct r6040_private *lp)
+ {
+ 	void __iomem *ioaddr = lp->base;
+ 	int limit = MAC_DEF_TIMEOUT;
+-	u16 cmd;
++	u16 cmd, md_csc;
+ 
++	md_csc = ioread16(ioaddr + MD_CSC);
+ 	iowrite16(MAC_RST, ioaddr + MCR1);
+ 	while (limit--) {
+ 		cmd = ioread16(ioaddr + MCR1);
+@@ -368,6 +371,10 @@ static void r6040_reset_mac(struct r6040_private *lp)
+ 	iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
+ 	iowrite16(0, ioaddr + MAC_SM);
+ 	mdelay(5);
++
++	/* Restore MDIO clock frequency */
++	if (md_csc != MD_CSC_DEFAULT)
++		iowrite16(md_csc, ioaddr + MD_CSC);
+ }
+ 
+ static void r6040_init_mac_regs(struct net_device *dev)
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index 840478692a370..dfd439eadd492 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -2533,6 +2533,7 @@ static netdev_tx_t sh_eth_start_xmit(struct sk_buff *skb,
+ 	else
+ 		txdesc->status |= cpu_to_le32(TD_TACT);
+ 
++	wmb(); /* cur_tx must be incremented after TACT bit was set */
+ 	mdp->cur_tx++;
+ 
+ 	if (!(sh_eth_read(ndev, EDTRR) & mdp->cd->edtrr_trns))
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+index 4c9a37dd0d3ff..ecf759ee1c9f5 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+@@ -109,8 +109,10 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
+ 		plat->bus_id = pci_dev_id(pdev);
+ 
+ 	phy_mode = device_get_phy_mode(&pdev->dev);
+-	if (phy_mode < 0)
++	if (phy_mode < 0) {
+ 		dev_err(&pdev->dev, "phy_mode not found\n");
++		return phy_mode;
++	}
+ 
+ 	plat->phy_interface = phy_mode;
+ 	plat->interface = PHY_INTERFACE_MODE_GMII;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 8a150cc462dcf..0dbd189c2721d 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -7113,13 +7113,10 @@ int stmmac_suspend(struct device *dev)
+ 	struct net_device *ndev = dev_get_drvdata(dev);
+ 	struct stmmac_priv *priv = netdev_priv(ndev);
+ 	u32 chan;
+-	int ret;
+ 
+ 	if (!ndev || !netif_running(ndev))
+ 		return 0;
+ 
+-	phylink_mac_change(priv->phylink, false);
+-
+ 	mutex_lock(&priv->lock);
+ 
+ 	netif_device_detach(ndev);
+@@ -7145,27 +7142,22 @@ int stmmac_suspend(struct device *dev)
+ 		stmmac_pmt(priv, priv->hw, priv->wolopts);
+ 		priv->irq_wake = 1;
+ 	} else {
+-		mutex_unlock(&priv->lock);
+-		rtnl_lock();
+-		if (device_may_wakeup(priv->device))
+-			phylink_speed_down(priv->phylink, false);
+-		phylink_stop(priv->phylink);
+-		rtnl_unlock();
+-		mutex_lock(&priv->lock);
+-
+ 		stmmac_mac_set(priv, priv->ioaddr, false);
+ 		pinctrl_pm_select_sleep_state(priv->device);
+-		/* Disable clock in case of PWM is off */
+-		clk_disable_unprepare(priv->plat->clk_ptp_ref);
+-		ret = pm_runtime_force_suspend(dev);
+-		if (ret) {
+-			mutex_unlock(&priv->lock);
+-			return ret;
+-		}
+ 	}
+ 
+ 	mutex_unlock(&priv->lock);
+ 
++	rtnl_lock();
++	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
++		phylink_suspend(priv->phylink, true);
++	} else {
++		if (device_may_wakeup(priv->device))
++			phylink_speed_down(priv->phylink, false);
++		phylink_suspend(priv->phylink, false);
++	}
++	rtnl_unlock();
++
+ 	if (priv->dma_cap.fpesel) {
+ 		/* Disable FPE */
+ 		stmmac_fpe_configure(priv, priv->ioaddr,
+@@ -7237,12 +7229,6 @@ int stmmac_resume(struct device *dev)
+ 		priv->irq_wake = 0;
+ 	} else {
+ 		pinctrl_pm_select_default_state(priv->device);
+-		/* enable the clk previously disabled */
+-		ret = pm_runtime_force_resume(dev);
+-		if (ret)
+-			return ret;
+-		if (priv->plat->clk_ptp_ref)
+-			clk_prepare_enable(priv->plat->clk_ptp_ref);
+ 		/* reset the phy so that it's ready */
+ 		if (priv->mii)
+ 			stmmac_mdio_reset(priv->mii);
+@@ -7256,13 +7242,15 @@ int stmmac_resume(struct device *dev)
+ 			return ret;
+ 	}
+ 
+-	if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+-		rtnl_lock();
+-		phylink_start(priv->phylink);
+-		/* We may have called phylink_speed_down before */
+-		phylink_speed_up(priv->phylink);
+-		rtnl_unlock();
++	rtnl_lock();
++	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
++		phylink_resume(priv->phylink);
++	} else {
++		phylink_resume(priv->phylink);
++		if (device_may_wakeup(priv->device))
++			phylink_speed_up(priv->phylink);
+ 	}
++	rtnl_unlock();
+ 
+ 	rtnl_lock();
+ 	mutex_lock(&priv->lock);
+@@ -7283,8 +7271,6 @@ int stmmac_resume(struct device *dev)
+ 	mutex_unlock(&priv->lock);
+ 	rtnl_unlock();
+ 
+-	phylink_mac_change(priv->phylink, true);
+-
+ 	netif_device_attach(ndev);
+ 
+ 	return 0;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+index 5ca710844cc1e..62cec9bfcd337 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+@@ -9,6 +9,7 @@
+ *******************************************************************************/
+ 
+ #include <linux/platform_device.h>
++#include <linux/pm_runtime.h>
+ #include <linux/module.h>
+ #include <linux/io.h>
+ #include <linux/of.h>
+@@ -771,9 +772,52 @@ static int __maybe_unused stmmac_runtime_resume(struct device *dev)
+ 	return stmmac_bus_clks_config(priv, true);
+ }
+ 
++static int __maybe_unused stmmac_pltfr_noirq_suspend(struct device *dev)
++{
++	struct net_device *ndev = dev_get_drvdata(dev);
++	struct stmmac_priv *priv = netdev_priv(ndev);
++	int ret;
++
++	if (!netif_running(ndev))
++		return 0;
++
++	if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
++		/* Disable clock in case of PWM is off */
++		clk_disable_unprepare(priv->plat->clk_ptp_ref);
++
++		ret = pm_runtime_force_suspend(dev);
++		if (ret)
++			return ret;
++	}
++
++	return 0;
++}
++
++static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev)
++{
++	struct net_device *ndev = dev_get_drvdata(dev);
++	struct stmmac_priv *priv = netdev_priv(ndev);
++	int ret;
++
++	if (!netif_running(ndev))
++		return 0;
++
++	if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
++		/* enable the clk previously disabled */
++		ret = pm_runtime_force_resume(dev);
++		if (ret)
++			return ret;
++
++		clk_prepare_enable(priv->plat->clk_ptp_ref);
++	}
++
++	return 0;
++}
++
+ const struct dev_pm_ops stmmac_pltfr_pm_ops = {
+ 	SET_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_suspend, stmmac_pltfr_resume)
+ 	SET_RUNTIME_PM_OPS(stmmac_runtime_suspend, stmmac_runtime_resume, NULL)
++	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_noirq_suspend, stmmac_pltfr_noirq_resume)
+ };
+ EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
+ 
+diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
+index c607ebec74567..656f6ef31b19e 100644
+--- a/drivers/net/ipa/ipa_table.c
++++ b/drivers/net/ipa/ipa_table.c
+@@ -430,7 +430,8 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
+ 	 * table region determines the number of entries it has.
+ 	 */
+ 	if (filter) {
+-		count = hweight32(ipa->filter_map);
++		/* Include one extra "slot" to hold the filter map itself */
++		count = 1 + hweight32(ipa->filter_map);
+ 		hash_count = hash_mem->size ? count : 0;
+ 	} else {
+ 		count = mem->size / sizeof(__le64);
+diff --git a/drivers/net/phy/dp83640_reg.h b/drivers/net/phy/dp83640_reg.h
+index 21aa24c741b96..daae7fa58fb82 100644
+--- a/drivers/net/phy/dp83640_reg.h
++++ b/drivers/net/phy/dp83640_reg.h
+@@ -5,7 +5,7 @@
+ #ifndef HAVE_DP83640_REGISTERS
+ #define HAVE_DP83640_REGISTERS
+ 
+-#define PAGE0                     0x0000
++/* #define PAGE0                  0x0000 */
+ #define PHYCR2                    0x001c /* PHY Control Register 2 */
+ 
+ #define PAGE4                     0x0004
+diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
+index eb29ef53d971d..42e5a681183f3 100644
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -33,6 +33,7 @@
+ enum {
+ 	PHYLINK_DISABLE_STOPPED,
+ 	PHYLINK_DISABLE_LINK,
++	PHYLINK_DISABLE_MAC_WOL,
+ };
+ 
+ /**
+@@ -1281,6 +1282,9 @@ EXPORT_SYMBOL_GPL(phylink_start);
+  * network device driver's &struct net_device_ops ndo_stop() method.  The
+  * network device's carrier state should not be changed prior to calling this
+  * function.
++ *
++ * This will synchronously bring down the link if the link is not already
++ * down (in other words, it will trigger a mac_link_down() method call.)
+  */
+ void phylink_stop(struct phylink *pl)
+ {
+@@ -1300,6 +1304,84 @@ void phylink_stop(struct phylink *pl)
+ }
+ EXPORT_SYMBOL_GPL(phylink_stop);
+ 
++/**
++ * phylink_suspend() - handle a network device suspend event
++ * @pl: a pointer to a &struct phylink returned from phylink_create()
++ * @mac_wol: true if the MAC needs to receive packets for Wake-on-Lan
++ *
++ * Handle a network device suspend event. There are several cases:
++ * - If Wake-on-Lan is not active, we can bring down the link between
++ *   the MAC and PHY by calling phylink_stop().
++ * - If Wake-on-Lan is active, and being handled only by the PHY, we
++ *   can also bring down the link between the MAC and PHY.
++ * - If Wake-on-Lan is active, but being handled by the MAC, the MAC
++ *   still needs to receive packets, so we can not bring the link down.
++ */
++void phylink_suspend(struct phylink *pl, bool mac_wol)
++{
++	ASSERT_RTNL();
++
++	if (mac_wol && (!pl->netdev || pl->netdev->wol_enabled)) {
++		/* Wake-on-Lan enabled, MAC handling */
++		mutex_lock(&pl->state_mutex);
++
++		/* Stop the resolver bringing the link up */
++		__set_bit(PHYLINK_DISABLE_MAC_WOL, &pl->phylink_disable_state);
++
++		/* Disable the carrier, to prevent transmit timeouts,
++		 * but one would hope all packets have been sent. This
++		 * also means phylink_resolve() will do nothing.
++		 */
++		netif_carrier_off(pl->netdev);
++
++		/* We do not call mac_link_down() here as we want the
++		 * link to remain up to receive the WoL packets.
++		 */
++		mutex_unlock(&pl->state_mutex);
++	} else {
++		phylink_stop(pl);
++	}
++}
++EXPORT_SYMBOL_GPL(phylink_suspend);
++
++/**
++ * phylink_resume() - handle a network device resume event
++ * @pl: a pointer to a &struct phylink returned from phylink_create()
++ *
++ * Undo the effects of phylink_suspend(), returning the link to an
++ * operational state.
++ */
++void phylink_resume(struct phylink *pl)
++{
++	ASSERT_RTNL();
++
++	if (test_bit(PHYLINK_DISABLE_MAC_WOL, &pl->phylink_disable_state)) {
++		/* Wake-on-Lan enabled, MAC handling */
++
++		/* Call mac_link_down() so we keep the overall state balanced.
++		 * Do this under the state_mutex lock for consistency. This
++		 * will cause a "Link Down" message to be printed during
++		 * resume, which is harmless - the true link state will be
++		 * printed when we run a resolve.
++		 */
++		mutex_lock(&pl->state_mutex);
++		phylink_link_down(pl);
++		mutex_unlock(&pl->state_mutex);
++
++		/* Re-apply the link parameters so that all the settings get
++		 * restored to the MAC.
++		 */
++		phylink_mac_initial_config(pl, true);
++
++		/* Re-enable and re-resolve the link parameters */
++		clear_bit(PHYLINK_DISABLE_MAC_WOL, &pl->phylink_disable_state);
++		phylink_run_resolve(pl);
++	} else {
++		phylink_start(pl);
++	}
++}
++EXPORT_SYMBOL_GPL(phylink_resume);
++
+ /**
+  * phylink_ethtool_get_wol() - get the wake on lan parameters for the PHY
+  * @pl: a pointer to a &struct phylink returned from phylink_create()
+diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
+index 4c4ab7b38d78c..82bb5ed94c485 100644
+--- a/drivers/net/usb/cdc_mbim.c
++++ b/drivers/net/usb/cdc_mbim.c
+@@ -654,6 +654,11 @@ static const struct usb_device_id mbim_devs[] = {
+ 	  .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
+ 	},
+ 
++	/* Telit LN920 */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1061, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
++	  .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
++	},
++
+ 	/* default entry */
+ 	{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ 	  .driver_info = (unsigned long)&cdc_mbim_info_zlp,
+diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
+index dec96e8ab5679..18e0ca85f6537 100644
+--- a/drivers/net/usb/hso.c
++++ b/drivers/net/usb/hso.c
+@@ -2536,13 +2536,17 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
+ 	if (!hso_net->mux_bulk_tx_buf)
+ 		goto err_free_tx_urb;
+ 
+-	add_net_device(hso_dev);
++	result = add_net_device(hso_dev);
++	if (result) {
++		dev_err(&interface->dev, "Failed to add net device\n");
++		goto err_free_tx_buf;
++	}
+ 
+ 	/* registering our net device */
+ 	result = register_netdev(net);
+ 	if (result) {
+ 		dev_err(&interface->dev, "Failed to register device\n");
+-		goto err_free_tx_buf;
++		goto err_rmv_ndev;
+ 	}
+ 
+ 	hso_log_port(hso_dev);
+@@ -2551,8 +2555,9 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
+ 
+ 	return hso_dev;
+ 
+-err_free_tx_buf:
++err_rmv_ndev:
+ 	remove_net_device(hso_dev);
++err_free_tx_buf:
+ 	kfree(hso_net->mux_bulk_tx_buf);
+ err_free_tx_urb:
+ 	usb_free_urb(hso_net->mux_bulk_tx_urb);
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+index b4b1f75b9c2a8..513f9e5387290 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+@@ -230,19 +230,11 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
+ static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len)
+ {
+ 	const struct firmware *pnvm;
+-	char pnvm_name[64];
++	char pnvm_name[MAX_PNVM_NAME];
++	size_t new_len;
+ 	int ret;
+ 
+-	/*
+-	 * The prefix unfortunately includes a hyphen at the end, so
+-	 * don't add the dot here...
+-	 */
+-	snprintf(pnvm_name, sizeof(pnvm_name), "%spnvm",
+-		 trans->cfg->fw_name_pre);
+-
+-	/* ...but replace the hyphen with the dot here. */
+-	if (strlen(trans->cfg->fw_name_pre) < sizeof(pnvm_name))
+-		pnvm_name[strlen(trans->cfg->fw_name_pre) - 1] = '.';
++	iwl_pnvm_get_fs_name(trans, pnvm_name, sizeof(pnvm_name));
+ 
+ 	ret = firmware_request_nowarn(&pnvm, pnvm_name, trans->dev);
+ 	if (ret) {
+@@ -251,11 +243,14 @@ static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len)
+ 		return ret;
+ 	}
+ 
++	new_len = pnvm->size;
+ 	*data = kmemdup(pnvm->data, pnvm->size, GFP_KERNEL);
++	release_firmware(pnvm);
++
+ 	if (!*data)
+ 		return -ENOMEM;
+ 
+-	*len = pnvm->size;
++	*len = new_len;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
+index 61d3d4e0b7d94..203c367dd4dee 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
+@@ -12,7 +12,27 @@
+ 
+ #define MVM_UCODE_PNVM_TIMEOUT	(HZ / 4)
+ 
++#define MAX_PNVM_NAME  64
++
+ int iwl_pnvm_load(struct iwl_trans *trans,
+ 		  struct iwl_notif_wait_data *notif_wait);
+ 
++static inline
++void iwl_pnvm_get_fs_name(struct iwl_trans *trans,
++			  u8 *pnvm_name, size_t max_len)
++{
++	int pre_len;
++
++	/*
++	 * The prefix unfortunately includes a hyphen at the end, so
++	 * don't add the dot here...
++	 */
++	snprintf(pnvm_name, max_len, "%spnvm", trans->cfg->fw_name_pre);
++
++	/* ...but replace the hyphen with the dot here. */
++	pre_len = strlen(trans->cfg->fw_name_pre);
++	if (pre_len < max_len && pre_len > 0)
++		pnvm_name[pre_len - 1] = '.';
++}
++
+ #endif /* __IWL_PNVM_H__ */
+diff --git a/drivers/ntb/test/ntb_msi_test.c b/drivers/ntb/test/ntb_msi_test.c
+index 7095ecd6223a7..4e18e08776c98 100644
+--- a/drivers/ntb/test/ntb_msi_test.c
++++ b/drivers/ntb/test/ntb_msi_test.c
+@@ -369,8 +369,10 @@ static int ntb_msit_probe(struct ntb_client *client, struct ntb_dev *ntb)
+ 	if (ret)
+ 		goto remove_dbgfs;
+ 
+-	if (!nm->isr_ctx)
++	if (!nm->isr_ctx) {
++		ret = -ENOMEM;
+ 		goto remove_dbgfs;
++	}
+ 
+ 	ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
+ 
+diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
+index 89df1350fefd8..65e1e5cf1b29a 100644
+--- a/drivers/ntb/test/ntb_perf.c
++++ b/drivers/ntb/test/ntb_perf.c
+@@ -598,6 +598,7 @@ static int perf_setup_inbuf(struct perf_peer *peer)
+ 		return -ENOMEM;
+ 	}
+ 	if (!IS_ALIGNED(peer->inbuf_xlat, xlat_align)) {
++		ret = -EINVAL;
+ 		dev_err(&perf->ntb->dev, "Unaligned inbuf allocated\n");
+ 		goto err_free_inbuf;
+ 	}
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 2f0cbaba12ac4..84e7cb9f19681 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -3496,7 +3496,9 @@ static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
+ 	lockdep_assert_held(&subsys->lock);
+ 
+ 	list_for_each_entry(h, &subsys->nsheads, entry) {
+-		if (h->ns_id == nsid && nvme_tryget_ns_head(h))
++		if (h->ns_id != nsid)
++			continue;
++		if (!list_empty(&h->list) && nvme_tryget_ns_head(h))
+ 			return h;
+ 	}
+ 
+@@ -3821,6 +3823,10 @@ static void nvme_ns_remove(struct nvme_ns *ns)
+ 
+ 	mutex_lock(&ns->ctrl->subsys->lock);
+ 	list_del_rcu(&ns->siblings);
++	if (list_empty(&ns->head->list)) {
++		list_del_init(&ns->head->entry);
++		last_path = true;
++	}
+ 	mutex_unlock(&ns->ctrl->subsys->lock);
+ 
+ 	synchronize_rcu(); /* guarantee not available in head->list */
+@@ -3840,13 +3846,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
+ 	list_del_init(&ns->list);
+ 	up_write(&ns->ctrl->namespaces_rwsem);
+ 
+-	/* Synchronize with nvme_init_ns_head() */
+-	mutex_lock(&ns->head->subsys->lock);
+-	if (list_empty(&ns->head->list)) {
+-		list_del_init(&ns->head->entry);
+-		last_path = true;
+-	}
+-	mutex_unlock(&ns->head->subsys->lock);
+ 	if (last_path)
+ 		nvme_mpath_shutdown_disk(ns->head);
+ 	nvme_put_ns(ns);
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 48b70e5235a39..19a711395cdc3 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -273,6 +273,12 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
+ 	} while (ret > 0);
+ }
+ 
++static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
++{
++	return !list_empty(&queue->send_list) ||
++		!llist_empty(&queue->req_list) || queue->more_requests;
++}
++
+ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
+ 		bool sync, bool last)
+ {
+@@ -293,9 +299,10 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
+ 		nvme_tcp_send_all(queue);
+ 		queue->more_requests = false;
+ 		mutex_unlock(&queue->send_mutex);
+-	} else if (last) {
+-		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
+ 	}
++
++	if (last && nvme_tcp_queue_more(queue))
++		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
+ }
+ 
+ static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
+@@ -893,12 +900,6 @@ done:
+ 	read_unlock_bh(&sk->sk_callback_lock);
+ }
+ 
+-static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
+-{
+-	return !list_empty(&queue->send_list) ||
+-		!llist_empty(&queue->req_list) || queue->more_requests;
+-}
+-
+ static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
+ {
+ 	queue->request = NULL;
+@@ -1132,8 +1133,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
+ 				pending = true;
+ 			else if (unlikely(result < 0))
+ 				break;
+-		} else
+-			pending = !llist_empty(&queue->req_list);
++		}
+ 
+ 		result = nvme_tcp_try_recv(queue);
+ 		if (result > 0)
+diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
+index 5e1e3796efa4e..326f7d13024f9 100644
+--- a/drivers/pci/controller/Kconfig
++++ b/drivers/pci/controller/Kconfig
+@@ -40,6 +40,7 @@ config PCI_FTPCI100
+ config PCI_IXP4XX
+ 	bool "Intel IXP4xx PCI controller"
+ 	depends on ARM && OF
++	depends on ARCH_IXP4XX || COMPILE_TEST
+ 	default ARCH_IXP4XX
+ 	help
+ 	  Say Y here if you want support for the PCI host controller found
+diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
+index 35e61048e133c..ffb176d288cd9 100644
+--- a/drivers/pci/controller/cadence/pci-j721e.c
++++ b/drivers/pci/controller/cadence/pci-j721e.c
+@@ -27,6 +27,7 @@
+ #define STATUS_REG_SYS_2	0x508
+ #define STATUS_CLR_REG_SYS_2	0x708
+ #define LINK_DOWN		BIT(1)
++#define J7200_LINK_DOWN		BIT(10)
+ 
+ #define J721E_PCIE_USER_CMD_STATUS	0x4
+ #define LINK_TRAINING_ENABLE		BIT(0)
+@@ -57,6 +58,7 @@ struct j721e_pcie {
+ 	struct cdns_pcie	*cdns_pcie;
+ 	void __iomem		*user_cfg_base;
+ 	void __iomem		*intd_cfg_base;
++	u32			linkdown_irq_regfield;
+ };
+ 
+ enum j721e_pcie_mode {
+@@ -66,7 +68,10 @@ enum j721e_pcie_mode {
+ 
+ struct j721e_pcie_data {
+ 	enum j721e_pcie_mode	mode;
+-	bool quirk_retrain_flag;
++	unsigned int		quirk_retrain_flag:1;
++	unsigned int		quirk_detect_quiet_flag:1;
++	u32			linkdown_irq_regfield;
++	unsigned int		byte_access_allowed:1;
+ };
+ 
+ static inline u32 j721e_pcie_user_readl(struct j721e_pcie *pcie, u32 offset)
+@@ -98,12 +103,12 @@ static irqreturn_t j721e_pcie_link_irq_handler(int irq, void *priv)
+ 	u32 reg;
+ 
+ 	reg = j721e_pcie_intd_readl(pcie, STATUS_REG_SYS_2);
+-	if (!(reg & LINK_DOWN))
++	if (!(reg & pcie->linkdown_irq_regfield))
+ 		return IRQ_NONE;
+ 
+ 	dev_err(dev, "LINK DOWN!\n");
+ 
+-	j721e_pcie_intd_writel(pcie, STATUS_CLR_REG_SYS_2, LINK_DOWN);
++	j721e_pcie_intd_writel(pcie, STATUS_CLR_REG_SYS_2, pcie->linkdown_irq_regfield);
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -112,7 +117,7 @@ static void j721e_pcie_config_link_irq(struct j721e_pcie *pcie)
+ 	u32 reg;
+ 
+ 	reg = j721e_pcie_intd_readl(pcie, ENABLE_REG_SYS_2);
+-	reg |= LINK_DOWN;
++	reg |= pcie->linkdown_irq_regfield;
+ 	j721e_pcie_intd_writel(pcie, ENABLE_REG_SYS_2, reg);
+ }
+ 
+@@ -284,10 +289,36 @@ static struct pci_ops cdns_ti_pcie_host_ops = {
+ static const struct j721e_pcie_data j721e_pcie_rc_data = {
+ 	.mode = PCI_MODE_RC,
+ 	.quirk_retrain_flag = true,
++	.byte_access_allowed = false,
++	.linkdown_irq_regfield = LINK_DOWN,
+ };
+ 
+ static const struct j721e_pcie_data j721e_pcie_ep_data = {
+ 	.mode = PCI_MODE_EP,
++	.linkdown_irq_regfield = LINK_DOWN,
++};
++
++static const struct j721e_pcie_data j7200_pcie_rc_data = {
++	.mode = PCI_MODE_RC,
++	.quirk_detect_quiet_flag = true,
++	.linkdown_irq_regfield = J7200_LINK_DOWN,
++	.byte_access_allowed = true,
++};
++
++static const struct j721e_pcie_data j7200_pcie_ep_data = {
++	.mode = PCI_MODE_EP,
++	.quirk_detect_quiet_flag = true,
++};
++
++static const struct j721e_pcie_data am64_pcie_rc_data = {
++	.mode = PCI_MODE_RC,
++	.linkdown_irq_regfield = J7200_LINK_DOWN,
++	.byte_access_allowed = true,
++};
++
++static const struct j721e_pcie_data am64_pcie_ep_data = {
++	.mode = PCI_MODE_EP,
++	.linkdown_irq_regfield = J7200_LINK_DOWN,
+ };
+ 
+ static const struct of_device_id of_j721e_pcie_match[] = {
+@@ -299,6 +330,22 @@ static const struct of_device_id of_j721e_pcie_match[] = {
+ 		.compatible = "ti,j721e-pcie-ep",
+ 		.data = &j721e_pcie_ep_data,
+ 	},
++	{
++		.compatible = "ti,j7200-pcie-host",
++		.data = &j7200_pcie_rc_data,
++	},
++	{
++		.compatible = "ti,j7200-pcie-ep",
++		.data = &j7200_pcie_ep_data,
++	},
++	{
++		.compatible = "ti,am64-pcie-host",
++		.data = &am64_pcie_rc_data,
++	},
++	{
++		.compatible = "ti,am64-pcie-ep",
++		.data = &am64_pcie_ep_data,
++	},
+ 	{},
+ };
+ 
+@@ -332,6 +379,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
+ 
+ 	pcie->dev = dev;
+ 	pcie->mode = mode;
++	pcie->linkdown_irq_regfield = data->linkdown_irq_regfield;
+ 
+ 	base = devm_platform_ioremap_resource_byname(pdev, "intd_cfg");
+ 	if (IS_ERR(base))
+@@ -391,9 +439,11 @@ static int j721e_pcie_probe(struct platform_device *pdev)
+ 			goto err_get_sync;
+ 		}
+ 
+-		bridge->ops = &cdns_ti_pcie_host_ops;
++		if (!data->byte_access_allowed)
++			bridge->ops = &cdns_ti_pcie_host_ops;
+ 		rc = pci_host_bridge_priv(bridge);
+ 		rc->quirk_retrain_flag = data->quirk_retrain_flag;
++		rc->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
+ 
+ 		cdns_pcie = &rc->pcie;
+ 		cdns_pcie->dev = dev;
+@@ -459,6 +509,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
+ 			ret = -ENOMEM;
+ 			goto err_get_sync;
+ 		}
++		ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
+ 
+ 		cdns_pcie = &ep->pcie;
+ 		cdns_pcie->dev = dev;
+diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
+index 897cdde02bd80..dd7df1ac7fda2 100644
+--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
++++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
+@@ -623,6 +623,10 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
+ 	ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE;
+ 	/* Reserve region 0 for IRQs */
+ 	set_bit(0, &ep->ob_region_map);
++
++	if (ep->quirk_detect_quiet_flag)
++		cdns_pcie_detect_quiet_min_delay_set(&ep->pcie);
++
+ 	spin_lock_init(&ep->lock);
+ 
+ 	return 0;
+diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
+index ae1c55503513a..fb96d37a135c1 100644
+--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
++++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
+@@ -498,6 +498,9 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
+ 		return PTR_ERR(rc->cfg_base);
+ 	rc->cfg_res = res;
+ 
++	if (rc->quirk_detect_quiet_flag)
++		cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
++
+ 	ret = cdns_pcie_start_link(pcie);
+ 	if (ret) {
+ 		dev_err(dev, "Failed to start link\n");
+diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c
+index 3c3646502d05c..52767f26048fd 100644
+--- a/drivers/pci/controller/cadence/pcie-cadence.c
++++ b/drivers/pci/controller/cadence/pcie-cadence.c
+@@ -7,6 +7,22 @@
+ 
+ #include "pcie-cadence.h"
+ 
++void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
++{
++	u32 delay = 0x3;
++	u32 ltssm_control_cap;
++
++	/*
++	 * Set the LTSSM Detect Quiet state min. delay to 2ms.
++	 */
++	ltssm_control_cap = cdns_pcie_readl(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP);
++	ltssm_control_cap = ((ltssm_control_cap &
++			    ~CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK) |
++			    CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay));
++
++	cdns_pcie_writel(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP, ltssm_control_cap);
++}
++
+ void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
+ 				   u32 r, bool is_io,
+ 				   u64 cpu_addr, u64 pci_addr, size_t size)
+diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
+index 30db2d68c17a0..4bde99b74135d 100644
+--- a/drivers/pci/controller/cadence/pcie-cadence.h
++++ b/drivers/pci/controller/cadence/pcie-cadence.h
+@@ -189,6 +189,14 @@
+ /* AXI link down register */
+ #define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824)
+ 
++/* LTSSM Capabilities register */
++#define CDNS_PCIE_LTSSM_CONTROL_CAP             (CDNS_PCIE_LM_BASE + 0x0054)
++#define  CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK  GENMASK(2, 1)
++#define  CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT 1
++#define  CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay) \
++	 (((delay) << CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT) & \
++	 CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK)
++
+ enum cdns_pcie_rp_bar {
+ 	RP_BAR_UNDEFINED = -1,
+ 	RP_BAR0,
+@@ -295,6 +303,7 @@ struct cdns_pcie {
+  * @avail_ib_bar: Satus of RP_BAR0, RP_BAR1 and	RP_NO_BAR if it's free or
+  *                available
+  * @quirk_retrain_flag: Retrain link as quirk for PCIe Gen2
++ * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
+  */
+ struct cdns_pcie_rc {
+ 	struct cdns_pcie	pcie;
+@@ -303,7 +312,8 @@ struct cdns_pcie_rc {
+ 	u32			vendor_id;
+ 	u32			device_id;
+ 	bool			avail_ib_bar[CDNS_PCIE_RP_MAX_IB];
+-	bool                    quirk_retrain_flag;
++	unsigned int		quirk_retrain_flag:1;
++	unsigned int		quirk_detect_quiet_flag:1;
+ };
+ 
+ /**
+@@ -334,6 +344,7 @@ struct cdns_pcie_epf {
+  *        registers fields (RMW) accessible by both remote RC and EP to
+  *        minimize time between read and write
+  * @epf: Structure to hold info about endpoint function
++ * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
+  */
+ struct cdns_pcie_ep {
+ 	struct cdns_pcie	pcie;
+@@ -348,6 +359,7 @@ struct cdns_pcie_ep {
+ 	/* protect writing to PCI_STATUS while raising legacy interrupts */
+ 	spinlock_t		lock;
+ 	struct cdns_pcie_epf	*epf;
++	unsigned int		quirk_detect_quiet_flag:1;
+ };
+ 
+ 
+@@ -508,6 +520,9 @@ static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
+ 	return 0;
+ }
+ #endif
++
++void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie);
++
+ void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
+ 				   u32 r, bool is_io,
+ 				   u64 cpu_addr, u64 pci_addr, size_t size);
+diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
+index 3ec7b29d5dc72..55c8afb9a8996 100644
+--- a/drivers/pci/controller/dwc/pcie-tegra194.c
++++ b/drivers/pci/controller/dwc/pcie-tegra194.c
+@@ -497,19 +497,19 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
+ 	struct tegra_pcie_dw *pcie = arg;
+ 	struct dw_pcie_ep *ep = &pcie->pci.ep;
+ 	int spurious = 1;
+-	u32 val, tmp;
++	u32 status_l0, status_l1, link_status;
+ 
+-	val = appl_readl(pcie, APPL_INTR_STATUS_L0);
+-	if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
+-		val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
+-		appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0);
++	status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
++	if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
++		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
++		appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
+ 
+-		if (val & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
++		if (status_l1 & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
+ 			pex_ep_event_hot_rst_done(pcie);
+ 
+-		if (val & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
+-			tmp = appl_readl(pcie, APPL_LINK_STATUS);
+-			if (tmp & APPL_LINK_STATUS_RDLH_LINK_UP) {
++		if (status_l1 & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
++			link_status = appl_readl(pcie, APPL_LINK_STATUS);
++			if (link_status & APPL_LINK_STATUS_RDLH_LINK_UP) {
+ 				dev_dbg(pcie->dev, "Link is up with Host\n");
+ 				dw_pcie_ep_linkup(ep);
+ 			}
+@@ -518,11 +518,11 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
+ 		spurious = 0;
+ 	}
+ 
+-	if (val & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
+-		val = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
+-		appl_writel(pcie, val, APPL_INTR_STATUS_L1_15);
++	if (status_l0 & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
++		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
++		appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_15);
+ 
+-		if (val & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
++		if (status_l1 & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
+ 			return IRQ_WAKE_THREAD;
+ 
+ 		spurious = 0;
+@@ -530,8 +530,8 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
+ 
+ 	if (spurious) {
+ 		dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n",
+-			 val);
+-		appl_writel(pcie, val, APPL_INTR_STATUS_L0);
++			 status_l0);
++		appl_writel(pcie, status_l0, APPL_INTR_STATUS_L0);
+ 	}
+ 
+ 	return IRQ_HANDLED;
+@@ -1763,7 +1763,7 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
+ 	val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK);
+ 	val |= MSIX_ADDR_MATCH_LOW_OFF_EN;
+ 	dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val);
+-	val = (lower_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
++	val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
+ 	dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
+ 
+ 	ret = dw_pcie_ep_init_complete(ep);
+diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
+index c979229a6d0df..b358212d71ab7 100644
+--- a/drivers/pci/controller/pci-tegra.c
++++ b/drivers/pci/controller/pci-tegra.c
+@@ -2193,13 +2193,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
+ 		rp->np = port;
+ 
+ 		rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
+-		if (IS_ERR(rp->base))
+-			return PTR_ERR(rp->base);
++		if (IS_ERR(rp->base)) {
++			err = PTR_ERR(rp->base);
++			goto err_node_put;
++		}
+ 
+ 		label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
+ 		if (!label) {
+-			dev_err(dev, "failed to create reset GPIO label\n");
+-			return -ENOMEM;
++			err = -ENOMEM;
++			goto err_node_put;
+ 		}
+ 
+ 		/*
+@@ -2217,7 +2219,8 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
+ 			} else {
+ 				dev_err(dev, "failed to get reset GPIO: %ld\n",
+ 					PTR_ERR(rp->reset_gpio));
+-				return PTR_ERR(rp->reset_gpio);
++				err = PTR_ERR(rp->reset_gpio);
++				goto err_node_put;
+ 			}
+ 		}
+ 
+diff --git a/drivers/pci/controller/pcie-iproc-bcma.c b/drivers/pci/controller/pcie-iproc-bcma.c
+index 56b8ee7bf3307..f918c713afb08 100644
+--- a/drivers/pci/controller/pcie-iproc-bcma.c
++++ b/drivers/pci/controller/pcie-iproc-bcma.c
+@@ -35,7 +35,6 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
+ {
+ 	struct device *dev = &bdev->dev;
+ 	struct iproc_pcie *pcie;
+-	LIST_HEAD(resources);
+ 	struct pci_host_bridge *bridge;
+ 	int ret;
+ 
+@@ -60,19 +59,16 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
+ 	pcie->mem.end = bdev->addr_s[0] + SZ_128M - 1;
+ 	pcie->mem.name = "PCIe MEM space";
+ 	pcie->mem.flags = IORESOURCE_MEM;
+-	pci_add_resource(&resources, &pcie->mem);
++	pci_add_resource(&bridge->windows, &pcie->mem);
++	ret = devm_request_pci_bus_resources(dev, &bridge->windows);
++	if (ret)
++		return ret;
+ 
+ 	pcie->map_irq = iproc_pcie_bcma_map_irq;
+ 
+-	ret = iproc_pcie_setup(pcie, &resources);
+-	if (ret) {
+-		dev_err(dev, "PCIe controller setup failed\n");
+-		pci_free_resource_list(&resources);
+-		return ret;
+-	}
+-
+ 	bcma_set_drvdata(bdev, pcie);
+-	return 0;
++
++	return iproc_pcie_setup(pcie, &bridge->windows);
+ }
+ 
+ static void iproc_pcie_bcma_remove(struct bcma_device *bdev)
+diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
+index b4a288e24aafb..c91d85b151290 100644
+--- a/drivers/pci/controller/pcie-rcar-ep.c
++++ b/drivers/pci/controller/pcie-rcar-ep.c
+@@ -492,9 +492,9 @@ static int rcar_pcie_ep_probe(struct platform_device *pdev)
+ 	pcie->dev = dev;
+ 
+ 	pm_runtime_enable(dev);
+-	err = pm_runtime_get_sync(dev);
++	err = pm_runtime_resume_and_get(dev);
+ 	if (err < 0) {
+-		dev_err(dev, "pm_runtime_get_sync failed\n");
++		dev_err(dev, "pm_runtime_resume_and_get failed\n");
+ 		goto err_pm_disable;
+ 	}
+ 
+diff --git a/drivers/pci/hotplug/TODO b/drivers/pci/hotplug/TODO
+index a32070be5adf9..cc6194aa24c15 100644
+--- a/drivers/pci/hotplug/TODO
++++ b/drivers/pci/hotplug/TODO
+@@ -40,9 +40,6 @@ ibmphp:
+ 
+ * The return value of pci_hp_register() is not checked.
+ 
+-* iounmap(io_mem) is called in the error path of ebda_rsrc_controller()
+-  and once more in the error path of its caller ibmphp_access_ebda().
+-
+ * The various slot data structures are difficult to follow and need to be
+   simplified.  A lot of functions are too large and too complex, they need
+   to be broken up into smaller, manageable pieces.  Negative examples are
+diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
+index 11a2661dc0627..7fb75401ad8a7 100644
+--- a/drivers/pci/hotplug/ibmphp_ebda.c
++++ b/drivers/pci/hotplug/ibmphp_ebda.c
+@@ -714,8 +714,7 @@ static int __init ebda_rsrc_controller(void)
+ 		/* init hpc structure */
+ 		hpc_ptr = alloc_ebda_hpc(slot_num, bus_num);
+ 		if (!hpc_ptr) {
+-			rc = -ENOMEM;
+-			goto error_no_hpc;
++			return -ENOMEM;
+ 		}
+ 		hpc_ptr->ctlr_id = ctlr_id;
+ 		hpc_ptr->ctlr_relative_id = ctlr;
+@@ -910,8 +909,6 @@ error:
+ 	kfree(tmp_slot);
+ error_no_slot:
+ 	free_ebda_hpc(hpc_ptr);
+-error_no_hpc:
+-	iounmap(io_mem);
+ 	return rc;
+ }
+ 
+diff --git a/drivers/pci/of.c b/drivers/pci/of.c
+index a143b02b2dcdf..d84381ce82b52 100644
+--- a/drivers/pci/of.c
++++ b/drivers/pci/of.c
+@@ -310,7 +310,7 @@ static int devm_of_pci_get_host_bridge_resources(struct device *dev,
+ 	/* Check for ranges property */
+ 	err = of_pci_range_parser_init(&parser, dev_node);
+ 	if (err)
+-		goto failed;
++		return 0;
+ 
+ 	dev_dbg(dev, "Parsing ranges property...\n");
+ 	for_each_of_pci_range(&parser, &range) {
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index a5e6759c407b9..a4eb0c042ca3e 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -265,7 +265,7 @@ static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
+ 
+ 	*endptr = strchrnul(path, ';');
+ 
+-	wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL);
++	wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
+ 	if (!wpath)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c
+index 95d4eef2c9e86..4810faa67f520 100644
+--- a/drivers/pci/pcie/ptm.c
++++ b/drivers/pci/pcie/ptm.c
+@@ -60,10 +60,8 @@ void pci_save_ptm_state(struct pci_dev *dev)
+ 		return;
+ 
+ 	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_PTM);
+-	if (!save_state) {
+-		pci_err(dev, "no suspend buffer for PTM\n");
++	if (!save_state)
+ 		return;
+-	}
+ 
+ 	cap = (u16 *)&save_state->cap.data[0];
+ 	pci_read_config_word(dev, ptm + PCI_PTM_CTRL, cap);
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 1905ee0297a4c..8c3c1ef92171f 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -4616,6 +4616,18 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
+ 		PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+ }
+ 
++/*
++ * Each of these NXP Root Ports is in a Root Complex with a unique segment
++ * number and does provide isolation features to disable peer transactions
++ * and validate bus numbers in requests, but does not provide an ACS
++ * capability.
++ */
++static int pci_quirk_nxp_rp_acs(struct pci_dev *dev, u16 acs_flags)
++{
++	return pci_acs_ctrl_enabled(acs_flags,
++		PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
++}
++
+ static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags)
+ {
+ 	if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
+@@ -4842,6 +4854,10 @@ static const struct pci_dev_acs_enabled {
+ 	{ 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */
+ 	/* Cavium ThunderX */
+ 	{ PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs },
++	/* Cavium multi-function devices */
++	{ PCI_VENDOR_ID_CAVIUM, 0xA026, pci_quirk_mf_endpoint_acs },
++	{ PCI_VENDOR_ID_CAVIUM, 0xA059, pci_quirk_mf_endpoint_acs },
++	{ PCI_VENDOR_ID_CAVIUM, 0xA060, pci_quirk_mf_endpoint_acs },
+ 	/* APM X-Gene */
+ 	{ PCI_VENDOR_ID_AMCC, 0xE004, pci_quirk_xgene_acs },
+ 	/* Ampere Computing */
+@@ -4862,6 +4878,39 @@ static const struct pci_dev_acs_enabled {
+ 	{ PCI_VENDOR_ID_ZHAOXIN, 0x3038, pci_quirk_mf_endpoint_acs },
+ 	{ PCI_VENDOR_ID_ZHAOXIN, 0x3104, pci_quirk_mf_endpoint_acs },
+ 	{ PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs },
++	/* NXP root ports, xx=16, 12, or 08 cores */
++	/* LX2xx0A : without security features + CAN-FD */
++	{ PCI_VENDOR_ID_NXP, 0x8d81, pci_quirk_nxp_rp_acs },
++	{ PCI_VENDOR_ID_NXP, 0x8da1, pci_quirk_nxp_rp_acs },
++	{ PCI_VENDOR_ID_NXP, 0x8d83, pci_quirk_nxp_rp_acs },
++	/* LX2xx0C : security features + CAN-FD */
++	{ PCI_VENDOR_ID_NXP, 0x8d80, pci_quirk_nxp_rp_acs },
++	{ PCI_VENDOR_ID_NXP, 0x8da0, pci_quirk_nxp_rp_acs },
++	{ PCI_VENDOR_ID_NXP, 0x8d82, pci_quirk_nxp_rp_acs },
++	/* LX2xx0E : security features + CAN */
++	{ PCI_VENDOR_ID_NXP, 0x8d90, pci_quirk_nxp_rp_acs },
++	{ PCI_VENDOR_ID_NXP, 0x8db0, pci_quirk_nxp_rp_acs },
++	{ PCI_VENDOR_ID_NXP, 0x8d92, pci_quirk_nxp_rp_acs },
++	/* LX2xx0N : without security features + CAN */
++	{ PCI_VENDOR_ID_NXP, 0x8d91, pci_quirk_nxp_rp_acs },
++	{ PCI_VENDOR_ID_NXP, 0x8db1, pci_quirk_nxp_rp_acs },
++	{ PCI_VENDOR_ID_NXP, 0x8d93, pci_quirk_nxp_rp_acs },
++	/* LX2xx2A : without security features + CAN-FD */
++	{ PCI_VENDOR_ID_NXP, 0x8d89, pci_quirk_nxp_rp_acs },
++	{ PCI_VENDOR_ID_NXP, 0x8da9, pci_quirk_nxp_rp_acs },
++	{ PCI_VENDOR_ID_NXP, 0x8d8b, pci_quirk_nxp_rp_acs },
++	/* LX2xx2C : security features + CAN-FD */
++	{ PCI_VENDOR_ID_NXP, 0x8d88, pci_quirk_nxp_rp_acs },
++	{ PCI_VENDOR_ID_NXP, 0x8da8, pci_quirk_nxp_rp_acs },
++	{ PCI_VENDOR_ID_NXP, 0x8d8a, pci_quirk_nxp_rp_acs },
++	/* LX2xx2E : security features + CAN */
++	{ PCI_VENDOR_ID_NXP, 0x8d98, pci_quirk_nxp_rp_acs },
++	{ PCI_VENDOR_ID_NXP, 0x8db8, pci_quirk_nxp_rp_acs },
++	{ PCI_VENDOR_ID_NXP, 0x8d9a, pci_quirk_nxp_rp_acs },
++	/* LX2xx2N : without security features + CAN */
++	{ PCI_VENDOR_ID_NXP, 0x8d99, pci_quirk_nxp_rp_acs },
++	{ PCI_VENDOR_ID_NXP, 0x8db9, pci_quirk_nxp_rp_acs },
++	{ PCI_VENDOR_ID_NXP, 0x8d9b, pci_quirk_nxp_rp_acs },
+ 	/* Zhaoxin Root/Downstream Ports */
+ 	{ PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
+ 	{ 0 }
+@@ -5350,7 +5399,7 @@ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
+ 			      PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
+ 
+ /*
+- * Create device link for NVIDIA GPU with integrated USB xHCI Host
++ * Create device link for GPUs with integrated USB xHCI Host
+  * controller to VGA.
+  */
+ static void quirk_gpu_usb(struct pci_dev *usb)
+@@ -5359,9 +5408,11 @@ static void quirk_gpu_usb(struct pci_dev *usb)
+ }
+ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
+ 			      PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
++DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
++			      PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
+ 
+ /*
+- * Create device link for NVIDIA GPU with integrated Type-C UCSI controller
++ * Create device link for GPUs with integrated Type-C UCSI controller
+  * to VGA. Currently there is no class code defined for UCSI device over PCI
+  * so using UNKNOWN class for now and it will be updated when UCSI
+  * over PCI gets a class code.
+@@ -5374,6 +5425,9 @@ static void quirk_gpu_usb_typec_ucsi(struct pci_dev *ucsi)
+ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
+ 			      PCI_CLASS_SERIAL_UNKNOWN, 8,
+ 			      quirk_gpu_usb_typec_ucsi);
++DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
++			      PCI_CLASS_SERIAL_UNKNOWN, 8,
++			      quirk_gpu_usb_typec_ucsi);
+ 
+ /*
+  * Enable the NVIDIA GPU integrated HDA controller if the BIOS left it
+diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
+index f1cbc6b2edbb3..ebadc6c08e116 100644
+--- a/drivers/remoteproc/qcom_wcnss.c
++++ b/drivers/remoteproc/qcom_wcnss.c
+@@ -142,18 +142,6 @@ static const struct wcnss_data pronto_v2_data = {
+ 	.num_vregs = 1,
+ };
+ 
+-void qcom_wcnss_assign_iris(struct qcom_wcnss *wcnss,
+-			    struct qcom_iris *iris,
+-			    bool use_48mhz_xo)
+-{
+-	mutex_lock(&wcnss->iris_lock);
+-
+-	wcnss->iris = iris;
+-	wcnss->use_48mhz_xo = use_48mhz_xo;
+-
+-	mutex_unlock(&wcnss->iris_lock);
+-}
+-
+ static int wcnss_load(struct rproc *rproc, const struct firmware *fw)
+ {
+ 	struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
+@@ -639,12 +627,20 @@ static int wcnss_probe(struct platform_device *pdev)
+ 		goto detach_pds;
+ 	}
+ 
++	wcnss->iris = qcom_iris_probe(&pdev->dev, &wcnss->use_48mhz_xo);
++	if (IS_ERR(wcnss->iris)) {
++		ret = PTR_ERR(wcnss->iris);
++		goto detach_pds;
++	}
++
+ 	ret = rproc_add(rproc);
+ 	if (ret)
+-		goto detach_pds;
++		goto remove_iris;
+ 
+-	return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
++	return 0;
+ 
++remove_iris:
++	qcom_iris_remove(wcnss->iris);
+ detach_pds:
+ 	wcnss_release_pds(wcnss);
+ free_rproc:
+@@ -657,7 +653,7 @@ static int wcnss_remove(struct platform_device *pdev)
+ {
+ 	struct qcom_wcnss *wcnss = platform_get_drvdata(pdev);
+ 
+-	of_platform_depopulate(&pdev->dev);
++	qcom_iris_remove(wcnss->iris);
+ 
+ 	rproc_del(wcnss->rproc);
+ 
+@@ -686,28 +682,7 @@ static struct platform_driver wcnss_driver = {
+ 	},
+ };
+ 
+-static int __init wcnss_init(void)
+-{
+-	int ret;
+-
+-	ret = platform_driver_register(&wcnss_driver);
+-	if (ret)
+-		return ret;
+-
+-	ret = platform_driver_register(&qcom_iris_driver);
+-	if (ret)
+-		platform_driver_unregister(&wcnss_driver);
+-
+-	return ret;
+-}
+-module_init(wcnss_init);
+-
+-static void __exit wcnss_exit(void)
+-{
+-	platform_driver_unregister(&qcom_iris_driver);
+-	platform_driver_unregister(&wcnss_driver);
+-}
+-module_exit(wcnss_exit);
++module_platform_driver(wcnss_driver);
+ 
+ MODULE_DESCRIPTION("Qualcomm Peripheral Image Loader for Wireless Subsystem");
+ MODULE_LICENSE("GPL v2");
+diff --git a/drivers/remoteproc/qcom_wcnss.h b/drivers/remoteproc/qcom_wcnss.h
+index 62c8682d0a92d..6d01ee6afa7f8 100644
+--- a/drivers/remoteproc/qcom_wcnss.h
++++ b/drivers/remoteproc/qcom_wcnss.h
+@@ -17,9 +17,9 @@ struct wcnss_vreg_info {
+ 	bool super_turbo;
+ };
+ 
++struct qcom_iris *qcom_iris_probe(struct device *parent, bool *use_48mhz_xo);
++void qcom_iris_remove(struct qcom_iris *iris);
+ int qcom_iris_enable(struct qcom_iris *iris);
+ void qcom_iris_disable(struct qcom_iris *iris);
+ 
+-void qcom_wcnss_assign_iris(struct qcom_wcnss *wcnss, struct qcom_iris *iris, bool use_48mhz_xo);
+-
+ #endif
+diff --git a/drivers/remoteproc/qcom_wcnss_iris.c b/drivers/remoteproc/qcom_wcnss_iris.c
+index 169acd305ae39..09720ddddc857 100644
+--- a/drivers/remoteproc/qcom_wcnss_iris.c
++++ b/drivers/remoteproc/qcom_wcnss_iris.c
+@@ -17,7 +17,7 @@
+ #include "qcom_wcnss.h"
+ 
+ struct qcom_iris {
+-	struct device *dev;
++	struct device dev;
+ 
+ 	struct clk *xo_clk;
+ 
+@@ -75,7 +75,7 @@ int qcom_iris_enable(struct qcom_iris *iris)
+ 
+ 	ret = clk_prepare_enable(iris->xo_clk);
+ 	if (ret) {
+-		dev_err(iris->dev, "failed to enable xo clk\n");
++		dev_err(&iris->dev, "failed to enable xo clk\n");
+ 		goto disable_regulators;
+ 	}
+ 
+@@ -93,43 +93,90 @@ void qcom_iris_disable(struct qcom_iris *iris)
+ 	regulator_bulk_disable(iris->num_vregs, iris->vregs);
+ }
+ 
+-static int qcom_iris_probe(struct platform_device *pdev)
++static const struct of_device_id iris_of_match[] = {
++	{ .compatible = "qcom,wcn3620", .data = &wcn3620_data },
++	{ .compatible = "qcom,wcn3660", .data = &wcn3660_data },
++	{ .compatible = "qcom,wcn3660b", .data = &wcn3680_data },
++	{ .compatible = "qcom,wcn3680", .data = &wcn3680_data },
++	{}
++};
++
++static void qcom_iris_release(struct device *dev)
++{
++	struct qcom_iris *iris = container_of(dev, struct qcom_iris, dev);
++
++	of_node_put(iris->dev.of_node);
++	kfree(iris);
++}
++
++struct qcom_iris *qcom_iris_probe(struct device *parent, bool *use_48mhz_xo)
+ {
++	const struct of_device_id *match;
+ 	const struct iris_data *data;
+-	struct qcom_wcnss *wcnss;
++	struct device_node *of_node;
+ 	struct qcom_iris *iris;
+ 	int ret;
+ 	int i;
+ 
+-	iris = devm_kzalloc(&pdev->dev, sizeof(struct qcom_iris), GFP_KERNEL);
+-	if (!iris)
+-		return -ENOMEM;
++	of_node = of_get_child_by_name(parent->of_node, "iris");
++	if (!of_node) {
++		dev_err(parent, "No child node \"iris\" found\n");
++		return ERR_PTR(-EINVAL);
++	}
++
++	iris = kzalloc(sizeof(*iris), GFP_KERNEL);
++	if (!iris) {
++		of_node_put(of_node);
++		return ERR_PTR(-ENOMEM);
++	}
++
++	device_initialize(&iris->dev);
++	iris->dev.parent = parent;
++	iris->dev.release = qcom_iris_release;
++	iris->dev.of_node = of_node;
++
++	dev_set_name(&iris->dev, "%s.iris", dev_name(parent));
++
++	ret = device_add(&iris->dev);
++	if (ret) {
++		put_device(&iris->dev);
++		return ERR_PTR(ret);
++	}
++
++	match = of_match_device(iris_of_match, &iris->dev);
++	if (!match) {
++		dev_err(&iris->dev, "no matching compatible for iris\n");
++		ret = -EINVAL;
++		goto err_device_del;
++	}
+ 
+-	data = of_device_get_match_data(&pdev->dev);
+-	wcnss = dev_get_drvdata(pdev->dev.parent);
++	data = match->data;
+ 
+-	iris->xo_clk = devm_clk_get(&pdev->dev, "xo");
++	iris->xo_clk = devm_clk_get(&iris->dev, "xo");
+ 	if (IS_ERR(iris->xo_clk)) {
+-		if (PTR_ERR(iris->xo_clk) != -EPROBE_DEFER)
+-			dev_err(&pdev->dev, "failed to acquire xo clk\n");
+-		return PTR_ERR(iris->xo_clk);
++		ret = PTR_ERR(iris->xo_clk);
++		if (ret != -EPROBE_DEFER)
++			dev_err(&iris->dev, "failed to acquire xo clk\n");
++		goto err_device_del;
+ 	}
+ 
+ 	iris->num_vregs = data->num_vregs;
+-	iris->vregs = devm_kcalloc(&pdev->dev,
++	iris->vregs = devm_kcalloc(&iris->dev,
+ 				   iris->num_vregs,
+ 				   sizeof(struct regulator_bulk_data),
+ 				   GFP_KERNEL);
+-	if (!iris->vregs)
+-		return -ENOMEM;
++	if (!iris->vregs) {
++		ret = -ENOMEM;
++		goto err_device_del;
++	}
+ 
+ 	for (i = 0; i < iris->num_vregs; i++)
+ 		iris->vregs[i].supply = data->vregs[i].name;
+ 
+-	ret = devm_regulator_bulk_get(&pdev->dev, iris->num_vregs, iris->vregs);
++	ret = devm_regulator_bulk_get(&iris->dev, iris->num_vregs, iris->vregs);
+ 	if (ret) {
+-		dev_err(&pdev->dev, "failed to get regulators\n");
+-		return ret;
++		dev_err(&iris->dev, "failed to get regulators\n");
++		goto err_device_del;
+ 	}
+ 
+ 	for (i = 0; i < iris->num_vregs; i++) {
+@@ -143,34 +190,17 @@ static int qcom_iris_probe(struct platform_device *pdev)
+ 					   data->vregs[i].load_uA);
+ 	}
+ 
+-	qcom_wcnss_assign_iris(wcnss, iris, data->use_48mhz_xo);
+-
+-	return 0;
+-}
++	*use_48mhz_xo = data->use_48mhz_xo;
+ 
+-static int qcom_iris_remove(struct platform_device *pdev)
+-{
+-	struct qcom_wcnss *wcnss = dev_get_drvdata(pdev->dev.parent);
++	return iris;
+ 
+-	qcom_wcnss_assign_iris(wcnss, NULL, false);
++err_device_del:
++	device_del(&iris->dev);
+ 
+-	return 0;
++	return ERR_PTR(ret);
+ }
+ 
+-static const struct of_device_id iris_of_match[] = {
+-	{ .compatible = "qcom,wcn3620", .data = &wcn3620_data },
+-	{ .compatible = "qcom,wcn3660", .data = &wcn3660_data },
+-	{ .compatible = "qcom,wcn3660b", .data = &wcn3680_data },
+-	{ .compatible = "qcom,wcn3680", .data = &wcn3680_data },
+-	{}
+-};
+-MODULE_DEVICE_TABLE(of, iris_of_match);
+-
+-struct platform_driver qcom_iris_driver = {
+-	.probe = qcom_iris_probe,
+-	.remove = qcom_iris_remove,
+-	.driver = {
+-		.name = "qcom-iris",
+-		.of_match_table = iris_of_match,
+-	},
+-};
++void qcom_iris_remove(struct qcom_iris *iris)
++{
++	device_del(&iris->dev);
++}
+diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
+index 670fd8a2970e3..6545afb2f20eb 100644
+--- a/drivers/rtc/rtc-cmos.c
++++ b/drivers/rtc/rtc-cmos.c
+@@ -1053,7 +1053,9 @@ static void cmos_check_wkalrm(struct device *dev)
+ 	 * ACK the rtc irq here
+ 	 */
+ 	if (t_now >= cmos->alarm_expires && cmos_use_acpi_alarm()) {
++		local_irq_disable();
+ 		cmos_interrupt(0, (void *)cmos->rtc);
++		local_irq_enable();
+ 		return;
+ 	}
+ 
+diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
+index 2f3515fa242a3..f3d5c7f4c13d2 100644
+--- a/drivers/s390/char/sclp_early.c
++++ b/drivers/s390/char/sclp_early.c
+@@ -45,13 +45,14 @@ static void __init sclp_early_facilities_detect(void)
+ 	sclp.has_gisaf = !!(sccb->fac118 & 0x08);
+ 	sclp.has_hvs = !!(sccb->fac119 & 0x80);
+ 	sclp.has_kss = !!(sccb->fac98 & 0x01);
+-	sclp.has_sipl = !!(sccb->cbl & 0x4000);
+ 	if (sccb->fac85 & 0x02)
+ 		S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
+ 	if (sccb->fac91 & 0x40)
+ 		S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST;
+ 	if (sccb->cpuoff > 134)
+ 		sclp.has_diag318 = !!(sccb->byte_134 & 0x80);
++	if (sccb->cpuoff > 137)
++		sclp.has_sipl = !!(sccb->cbl & 0x4000);
+ 	sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
+ 	sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
+ 	sclp.rzm <<= 20;
+diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
+index 6414bd5741b87..a38b0c39ea4be 100644
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -467,7 +467,7 @@ static void vhost_tx_batch(struct vhost_net *net,
+ 		.num = nvq->batched_xdp,
+ 		.ptr = nvq->xdp,
+ 	};
+-	int err;
++	int i, err;
+ 
+ 	if (nvq->batched_xdp == 0)
+ 		goto signal_used;
+@@ -476,6 +476,15 @@ static void vhost_tx_batch(struct vhost_net *net,
+ 	err = sock->ops->sendmsg(sock, msghdr, 0);
+ 	if (unlikely(err < 0)) {
+ 		vq_err(&nvq->vq, "Fail to batch sending packets\n");
++
++		/* free pages owned by XDP; since this is an unlikely error path,
++		 * keep it simple and avoid more complex bulk update for the
++		 * used pages
++		 */
++		for (i = 0; i < nvq->batched_xdp; ++i)
++			put_page(virt_to_head_page(nvq->xdp[i].data));
++		nvq->batched_xdp = 0;
++		nvq->done_idx = 0;
+ 		return;
+ 	}
+ 
+diff --git a/drivers/video/backlight/ktd253-backlight.c b/drivers/video/backlight/ktd253-backlight.c
+index a7df5bcca9da5..37aa5a6695309 100644
+--- a/drivers/video/backlight/ktd253-backlight.c
++++ b/drivers/video/backlight/ktd253-backlight.c
+@@ -25,6 +25,7 @@
+ 
+ #define KTD253_T_LOW_NS (200 + 10) /* Additional 10ns as safety factor */
+ #define KTD253_T_HIGH_NS (200 + 10) /* Additional 10ns as safety factor */
++#define KTD253_T_OFF_CRIT_NS 100000 /* 100 us, now it doesn't look good */
+ #define KTD253_T_OFF_MS 3
+ 
+ struct ktd253_backlight {
+@@ -34,13 +35,50 @@ struct ktd253_backlight {
+ 	u16 ratio;
+ };
+ 
++static void ktd253_backlight_set_max_ratio(struct ktd253_backlight *ktd253)
++{
++	gpiod_set_value_cansleep(ktd253->gpiod, 1);
++	ndelay(KTD253_T_HIGH_NS);
++	/* We always fall back to this when we power on */
++}
++
++static int ktd253_backlight_stepdown(struct ktd253_backlight *ktd253)
++{
++	/*
++	 * These GPIO operations absolutely can NOT sleep so no _cansleep
++	 * suffixes, and no using GPIO expanders on slow buses for this!
++	 *
++	 * The maximum number of cycles of the loop is 32  so the time taken
++	 * should nominally be:
++	 * (T_LOW_NS + T_HIGH_NS + loop_time) * 32
++	 *
++	 * Architectures do not always support ndelay() and we will get a few us
++	 * instead. If we get to a critical time limit an interrupt has likely
++	 * occured in the low part of the loop and we need to restart from the
++	 * top so we have the backlight in a known state.
++	 */
++	u64 ns;
++
++	ns = ktime_get_ns();
++	gpiod_set_value(ktd253->gpiod, 0);
++	ndelay(KTD253_T_LOW_NS);
++	gpiod_set_value(ktd253->gpiod, 1);
++	ns = ktime_get_ns() - ns;
++	if (ns >= KTD253_T_OFF_CRIT_NS) {
++		dev_err(ktd253->dev, "PCM on backlight took too long (%llu ns)\n", ns);
++		return -EAGAIN;
++	}
++	ndelay(KTD253_T_HIGH_NS);
++	return 0;
++}
++
+ static int ktd253_backlight_update_status(struct backlight_device *bl)
+ {
+ 	struct ktd253_backlight *ktd253 = bl_get_data(bl);
+ 	int brightness = backlight_get_brightness(bl);
+ 	u16 target_ratio;
+ 	u16 current_ratio = ktd253->ratio;
+-	unsigned long flags;
++	int ret;
+ 
+ 	dev_dbg(ktd253->dev, "new brightness/ratio: %d/32\n", brightness);
+ 
+@@ -62,37 +100,34 @@ static int ktd253_backlight_update_status(struct backlight_device *bl)
+ 	}
+ 
+ 	if (current_ratio == 0) {
+-		gpiod_set_value_cansleep(ktd253->gpiod, 1);
+-		ndelay(KTD253_T_HIGH_NS);
+-		/* We always fall back to this when we power on */
++		ktd253_backlight_set_max_ratio(ktd253);
+ 		current_ratio = KTD253_MAX_RATIO;
+ 	}
+ 
+-	/*
+-	 * WARNING:
+-	 * The loop to set the correct current level is performed
+-	 * with interrupts disabled as it is timing critical.
+-	 * The maximum number of cycles of the loop is 32
+-	 * so the time taken will be (T_LOW_NS + T_HIGH_NS + loop_time) * 32,
+-	 */
+-	local_irq_save(flags);
+ 	while (current_ratio != target_ratio) {
+ 		/*
+ 		 * These GPIO operations absolutely can NOT sleep so no
+ 		 * _cansleep suffixes, and no using GPIO expanders on
+ 		 * slow buses for this!
+ 		 */
+-		gpiod_set_value(ktd253->gpiod, 0);
+-		ndelay(KTD253_T_LOW_NS);
+-		gpiod_set_value(ktd253->gpiod, 1);
+-		ndelay(KTD253_T_HIGH_NS);
+-		/* After 1/32 we loop back to 32/32 */
+-		if (current_ratio == KTD253_MIN_RATIO)
++		ret = ktd253_backlight_stepdown(ktd253);
++		if (ret == -EAGAIN) {
++			/*
++			 * Something disturbed the backlight setting code when
++			 * running so we need to bring the PWM back to a known
++			 * state. This shouldn't happen too much.
++			 */
++			gpiod_set_value_cansleep(ktd253->gpiod, 0);
++			msleep(KTD253_T_OFF_MS);
++			ktd253_backlight_set_max_ratio(ktd253);
++			current_ratio = KTD253_MAX_RATIO;
++		} else if (current_ratio == KTD253_MIN_RATIO) {
++			/* After 1/32 we loop back to 32/32 */
+ 			current_ratio = KTD253_MAX_RATIO;
+-		else
++		} else {
+ 			current_ratio--;
++		}
+ 	}
+-	local_irq_restore(flags);
+ 	ktd253->ratio = current_ratio;
+ 
+ 	dev_dbg(ktd253->dev, "new ratio set to %d/32\n", target_ratio);
+diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
+index 3bab324852732..0cc07d957b643 100644
+--- a/drivers/watchdog/watchdog_dev.c
++++ b/drivers/watchdog/watchdog_dev.c
+@@ -1096,6 +1096,8 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd)
+ 		watchdog_stop(wdd);
+ 	}
+ 
++	watchdog_hrtimer_pretimeout_stop(wdd);
++
+ 	mutex_lock(&wd_data->lock);
+ 	wd_data->wdd = NULL;
+ 	wdd->wd_data = NULL;
+@@ -1103,7 +1105,6 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd)
+ 
+ 	hrtimer_cancel(&wd_data->timer);
+ 	kthread_cancel_work_sync(&wd_data->work);
+-	watchdog_hrtimer_pretimeout_stop(wdd);
+ 
+ 	put_device(&wd_data->dev);
+ }
+@@ -1172,7 +1173,10 @@ int watchdog_set_last_hw_keepalive(struct watchdog_device *wdd,
+ 
+ 	wd_data->last_hw_keepalive = ktime_sub(now, ms_to_ktime(last_ping_ms));
+ 
+-	return __watchdog_ping(wdd);
++	if (watchdog_hw_running(wdd) && handle_boot_enabled)
++		return __watchdog_ping(wdd);
++
++	return 0;
+ }
+ EXPORT_SYMBOL_GPL(watchdog_set_last_hw_keepalive);
+ 
+diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
+index 24d11861ac7d8..dbb18dc956f34 100644
+--- a/drivers/xen/swiotlb-xen.c
++++ b/drivers/xen/swiotlb-xen.c
+@@ -211,12 +211,11 @@ error:
+ 	if (repeat--) {
+ 		/* Min is 2MB */
+ 		nslabs = max(1024UL, (nslabs >> 1));
+-		pr_info("Lowering to %luMB\n",
+-			(nslabs << IO_TLB_SHIFT) >> 20);
++		bytes = nslabs << IO_TLB_SHIFT;
++		pr_info("Lowering to %luMB\n", bytes >> 20);
+ 		goto retry;
+ 	}
+ 	pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
+-	free_pages((unsigned long)start, order);
+ 	return rc;
+ }
+ 
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index 1c8f79b3dd065..dde341a6388a1 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -288,10 +288,10 @@ void fuse_request_end(struct fuse_req *req)
+ 
+ 	/*
+ 	 * test_and_set_bit() implies smp_mb() between bit
+-	 * changing and below intr_entry check. Pairs with
++	 * changing and below FR_INTERRUPTED check. Pairs with
+ 	 * smp_mb() from queue_interrupt().
+ 	 */
+-	if (!list_empty(&req->intr_entry)) {
++	if (test_bit(FR_INTERRUPTED, &req->flags)) {
+ 		spin_lock(&fiq->lock);
+ 		list_del_init(&req->intr_entry);
+ 		spin_unlock(&fiq->lock);
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index c5d4638f6d7fd..43aaa35664315 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -2683,7 +2683,8 @@ static bool io_file_supports_async(struct io_kiocb *req, int rw)
+ 	return __io_file_supports_async(req->file, rw);
+ }
+ 
+-static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
++static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
++		      int rw)
+ {
+ 	struct io_ring_ctx *ctx = req->ctx;
+ 	struct kiocb *kiocb = &req->rw.kiocb;
+@@ -2705,8 +2706,13 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 	if (unlikely(ret))
+ 		return ret;
+ 
+-	/* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */
+-	if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK))
++	/*
++	 * If the file is marked O_NONBLOCK, still allow retry for it if it
++	 * supports async. Otherwise it's impossible to use O_NONBLOCK files
++	 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
++	 */
++	if ((kiocb->ki_flags & IOCB_NOWAIT) ||
++	    ((file->f_flags & O_NONBLOCK) && !io_file_supports_async(req, rw)))
+ 		req->flags |= REQ_F_NOWAIT;
+ 
+ 	ioprio = READ_ONCE(sqe->ioprio);
+@@ -3107,12 +3113,15 @@ static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
+ 				ret = nr;
+ 			break;
+ 		}
++		if (!iov_iter_is_bvec(iter)) {
++			iov_iter_advance(iter, nr);
++		} else {
++			req->rw.len -= nr;
++			req->rw.addr += nr;
++		}
+ 		ret += nr;
+ 		if (nr != iovec.iov_len)
+ 			break;
+-		req->rw.len -= nr;
+-		req->rw.addr += nr;
+-		iov_iter_advance(iter, nr);
+ 	}
+ 
+ 	return ret;
+@@ -3190,7 +3199,7 @@ static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ {
+ 	if (unlikely(!(req->file->f_mode & FMODE_READ)))
+ 		return -EBADF;
+-	return io_prep_rw(req, sqe);
++	return io_prep_rw(req, sqe, READ);
+ }
+ 
+ /*
+@@ -3277,6 +3286,12 @@ static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
+ 		return -EINVAL;
+ }
+ 
++static bool need_read_all(struct io_kiocb *req)
++{
++	return req->flags & REQ_F_ISREG ||
++		S_ISBLK(file_inode(req->file)->i_mode);
++}
++
+ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
+ {
+ 	struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
+@@ -3331,7 +3346,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
+ 	} else if (ret == -EIOCBQUEUED) {
+ 		goto out_free;
+ 	} else if (ret <= 0 || ret == io_size || !force_nonblock ||
+-		   (req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) {
++		   (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) {
+ 		/* read all, failed, already did sync or don't want to retry */
+ 		goto done;
+ 	}
+@@ -3379,7 +3394,7 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ {
+ 	if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
+ 		return -EBADF;
+-	return io_prep_rw(req, sqe);
++	return io_prep_rw(req, sqe, WRITE);
+ }
+ 
+ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 540b377ca8f61..acbed2ecf6e8c 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -1740,8 +1740,9 @@ static inline void pci_disable_device(struct pci_dev *dev) { }
+ static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; }
+ static inline int pci_assign_resource(struct pci_dev *dev, int i)
+ { return -EBUSY; }
+-static inline int __pci_register_driver(struct pci_driver *drv,
+-					struct module *owner)
++static inline int __must_check __pci_register_driver(struct pci_driver *drv,
++						     struct module *owner,
++						     const char *mod_name)
+ { return 0; }
+ static inline int pci_register_driver(struct pci_driver *drv)
+ { return 0; }
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 4bac1831de802..1a9b8589391c0 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -2451,7 +2451,8 @@
+ #define PCI_VENDOR_ID_TDI               0x192E
+ #define PCI_DEVICE_ID_TDI_EHCI          0x0101
+ 
+-#define PCI_VENDOR_ID_FREESCALE		0x1957
++#define PCI_VENDOR_ID_FREESCALE		0x1957	/* duplicate: NXP */
++#define PCI_VENDOR_ID_NXP		0x1957	/* duplicate: FREESCALE */
+ #define PCI_DEVICE_ID_MPC8308		0xc006
+ #define PCI_DEVICE_ID_MPC8315E		0x00b4
+ #define PCI_DEVICE_ID_MPC8315		0x00b5
+diff --git a/include/linux/phylink.h b/include/linux/phylink.h
+index afb3ded0b6912..237291196ce28 100644
+--- a/include/linux/phylink.h
++++ b/include/linux/phylink.h
+@@ -451,6 +451,9 @@ void phylink_mac_change(struct phylink *, bool up);
+ void phylink_start(struct phylink *);
+ void phylink_stop(struct phylink *);
+ 
++void phylink_suspend(struct phylink *pl, bool mac_wol);
++void phylink_resume(struct phylink *pl);
++
+ void phylink_ethtool_get_wol(struct phylink *, struct ethtool_wolinfo *);
+ int phylink_ethtool_set_wol(struct phylink *, struct ethtool_wolinfo *);
+ 
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index ec8d07d88641c..f6935787e7e8b 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1394,6 +1394,7 @@ struct task_struct {
+ 					mce_whole_page : 1,
+ 					__mce_reserved : 62;
+ 	struct callback_head		mce_kill_me;
++	int				mce_count;
+ #endif
+ 
+ #ifdef CONFIG_KRETPROBES
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index b2db9cd9a73f3..4f7478c482738 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1935,7 +1935,7 @@ static inline void __skb_insert(struct sk_buff *newsk,
+ 	WRITE_ONCE(newsk->prev, prev);
+ 	WRITE_ONCE(next->prev, newsk);
+ 	WRITE_ONCE(prev->next, newsk);
+-	list->qlen++;
++	WRITE_ONCE(list->qlen, list->qlen + 1);
+ }
+ 
+ static inline void __skb_queue_splice(const struct sk_buff_head *list,
+diff --git a/include/net/dsa.h b/include/net/dsa.h
+index 048d297623c9a..d833f717e8022 100644
+--- a/include/net/dsa.h
++++ b/include/net/dsa.h
+@@ -437,6 +437,11 @@ static inline bool dsa_port_is_user(struct dsa_port *dp)
+ 	return dp->type == DSA_PORT_TYPE_USER;
+ }
+ 
++static inline bool dsa_port_is_unused(struct dsa_port *dp)
++{
++	return dp->type == DSA_PORT_TYPE_UNUSED;
++}
++
+ static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
+ {
+ 	return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED;
+diff --git a/include/net/flow.h b/include/net/flow.h
+index 6f5e702400717..58beb16a49b8d 100644
+--- a/include/net/flow.h
++++ b/include/net/flow.h
+@@ -194,7 +194,7 @@ static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4)
+ 
+ static inline struct flowi_common *flowi4_to_flowi_common(struct flowi4 *fl4)
+ {
+-	return &(flowi4_to_flowi(fl4)->u.__fl_common);
++	return &(fl4->__fl_common);
+ }
+ 
+ static inline struct flowi *flowi6_to_flowi(struct flowi6 *fl6)
+@@ -204,7 +204,7 @@ static inline struct flowi *flowi6_to_flowi(struct flowi6 *fl6)
+ 
+ static inline struct flowi_common *flowi6_to_flowi_common(struct flowi6 *fl6)
+ {
+-	return &(flowi6_to_flowi(fl6)->u.__fl_common);
++	return &(fl6->__fl_common);
+ }
+ 
+ static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn)
+diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
+index 79a699f106b14..ec88590b31984 100644
+--- a/include/uapi/linux/pkt_sched.h
++++ b/include/uapi/linux/pkt_sched.h
+@@ -827,6 +827,8 @@ struct tc_codel_xstats {
+ 
+ /* FQ_CODEL */
+ 
++#define FQ_CODEL_QUANTUM_MAX (1 << 20)
++
+ enum {
+ 	TCA_FQ_CODEL_UNSPEC,
+ 	TCA_FQ_CODEL_TARGET,
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 1cb1f9b8392e2..e5c4aca620c58 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -10192,7 +10192,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
+ 		return;
+ 
+ 	if (ifh->nr_file_filters) {
+-		mm = get_task_mm(event->ctx->task);
++		mm = get_task_mm(task);
+ 		if (!mm)
+ 			goto restart;
+ 
+diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
+index 94ef2d099e322..d713714cba67f 100644
+--- a/kernel/trace/trace_boot.c
++++ b/kernel/trace/trace_boot.c
+@@ -205,12 +205,15 @@ trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode,
+ 			pr_err("Failed to apply filter: %s\n", buf);
+ 	}
+ 
+-	xbc_node_for_each_array_value(enode, "actions", anode, p) {
+-		if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf))
+-			pr_err("action string is too long: %s\n", p);
+-		else if (trigger_process_regex(file, buf) < 0)
+-			pr_err("Failed to apply an action: %s\n", buf);
+-	}
++	if (IS_ENABLED(CONFIG_HIST_TRIGGERS)) {
++		xbc_node_for_each_array_value(enode, "actions", anode, p) {
++			if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf))
++				pr_err("action string is too long: %s\n", p);
++			else if (trigger_process_regex(file, buf) < 0)
++				pr_err("Failed to apply an action: %s\n", buf);
++		}
++	} else if (xbc_node_find_value(enode, "actions", NULL))
++		pr_err("Failed to apply event actions because CONFIG_HIST_TRIGGERS is not set.\n");
+ 
+ 	if (xbc_node_find_value(enode, "enable", NULL)) {
+ 		if (trace_event_enable_disable(file, 1, 0) < 0)
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
+index ea6178cb5e334..032191977e34c 100644
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -647,7 +647,11 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
+ 	/* Register new event */
+ 	ret = register_kprobe_event(tk);
+ 	if (ret) {
+-		pr_warn("Failed to register probe event(%d)\n", ret);
++		if (ret == -EEXIST) {
++			trace_probe_log_set_index(0);
++			trace_probe_log_err(0, EVENT_EXIST);
++		} else
++			pr_warn("Failed to register probe event(%d)\n", ret);
+ 		goto end;
+ 	}
+ 
+diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
+index 15413ad7cef2b..0e29bb14fc8be 100644
+--- a/kernel/trace/trace_probe.c
++++ b/kernel/trace/trace_probe.c
+@@ -1029,11 +1029,36 @@ error:
+ 	return ret;
+ }
+ 
++static struct trace_event_call *
++find_trace_event_call(const char *system, const char *event_name)
++{
++	struct trace_event_call *tp_event;
++	const char *name;
++
++	list_for_each_entry(tp_event, &ftrace_events, list) {
++		if (!tp_event->class->system ||
++		    strcmp(system, tp_event->class->system))
++			continue;
++		name = trace_event_name(tp_event);
++		if (!name || strcmp(event_name, name))
++			continue;
++		return tp_event;
++	}
++
++	return NULL;
++}
++
+ int trace_probe_register_event_call(struct trace_probe *tp)
+ {
+ 	struct trace_event_call *call = trace_probe_event_call(tp);
+ 	int ret;
+ 
++	lockdep_assert_held(&event_mutex);
++
++	if (find_trace_event_call(trace_probe_group_name(tp),
++				  trace_probe_name(tp)))
++		return -EEXIST;
++
+ 	ret = register_trace_event(&call->event);
+ 	if (!ret)
+ 		return -ENODEV;
+diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
+index 227d518e5ba52..9f14186d132ed 100644
+--- a/kernel/trace/trace_probe.h
++++ b/kernel/trace/trace_probe.h
+@@ -399,6 +399,7 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call,
+ 	C(NO_EVENT_NAME,	"Event name is not specified"),		\
+ 	C(EVENT_TOO_LONG,	"Event name is too long"),		\
+ 	C(BAD_EVENT_NAME,	"Event name must follow the same rules as C identifiers"), \
++	C(EVENT_EXIST,		"Given group/event name is already used by another event"), \
+ 	C(RETVAL_ON_PROBE,	"$retval is not available on probe"),	\
+ 	C(BAD_STACK_NUM,	"Invalid stack number"),		\
+ 	C(BAD_ARG_NUM,		"Invalid argument number"),		\
+diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
+index 9b50869a5ddb5..957244ee07c8d 100644
+--- a/kernel/trace/trace_uprobe.c
++++ b/kernel/trace/trace_uprobe.c
+@@ -514,7 +514,11 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
+ 
+ 	ret = register_uprobe_event(tu);
+ 	if (ret) {
+-		pr_warn("Failed to register probe event(%d)\n", ret);
++		if (ret == -EEXIST) {
++			trace_probe_log_set_index(0);
++			trace_probe_log_err(0, EVENT_EXIST);
++		} else
++			pr_warn("Failed to register probe event(%d)\n", ret);
+ 		goto end;
+ 	}
+ 
+diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
+index 37b67194c0dfe..414dc5671c45e 100644
+--- a/net/caif/chnl_net.c
++++ b/net/caif/chnl_net.c
+@@ -53,20 +53,6 @@ struct chnl_net {
+ 	enum caif_states state;
+ };
+ 
+-static void robust_list_del(struct list_head *delete_node)
+-{
+-	struct list_head *list_node;
+-	struct list_head *n;
+-	ASSERT_RTNL();
+-	list_for_each_safe(list_node, n, &chnl_net_list) {
+-		if (list_node == delete_node) {
+-			list_del(list_node);
+-			return;
+-		}
+-	}
+-	WARN_ON(1);
+-}
+-
+ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
+ {
+ 	struct sk_buff *skb;
+@@ -364,6 +350,7 @@ static int chnl_net_init(struct net_device *dev)
+ 	ASSERT_RTNL();
+ 	priv = netdev_priv(dev);
+ 	strncpy(priv->name, dev->name, sizeof(priv->name));
++	INIT_LIST_HEAD(&priv->list_field);
+ 	return 0;
+ }
+ 
+@@ -372,7 +359,7 @@ static void chnl_net_uninit(struct net_device *dev)
+ 	struct chnl_net *priv;
+ 	ASSERT_RTNL();
+ 	priv = netdev_priv(dev);
+-	robust_list_del(&priv->list_field);
++	list_del_init(&priv->list_field);
+ }
+ 
+ static const struct net_device_ops netdev_ops = {
+@@ -537,7 +524,7 @@ static void __exit chnl_exit_module(void)
+ 	rtnl_lock();
+ 	list_for_each_safe(list_node, _tmp, &chnl_net_list) {
+ 		dev = list_entry(list_node, struct chnl_net, list_field);
+-		list_del(list_node);
++		list_del_init(list_node);
+ 		delete_device(dev);
+ 	}
+ 	rtnl_unlock();
+diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
+index c5c74a34d139d..91e7a22026971 100644
+--- a/net/dccp/minisocks.c
++++ b/net/dccp/minisocks.c
+@@ -94,6 +94,8 @@ struct sock *dccp_create_openreq_child(const struct sock *sk,
+ 		newdp->dccps_role	    = DCCP_ROLE_SERVER;
+ 		newdp->dccps_hc_rx_ackvec   = NULL;
+ 		newdp->dccps_service_list   = NULL;
++		newdp->dccps_hc_rx_ccid     = NULL;
++		newdp->dccps_hc_tx_ccid     = NULL;
+ 		newdp->dccps_service	    = dreq->dreq_service;
+ 		newdp->dccps_timestamp_echo = dreq->dreq_timestamp_echo;
+ 		newdp->dccps_timestamp_time = dreq->dreq_timestamp_time;
+diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
+index 84cad1be9ce48..e058a2e320e35 100644
+--- a/net/dsa/dsa.c
++++ b/net/dsa/dsa.c
+@@ -345,6 +345,11 @@ bool dsa_schedule_work(struct work_struct *work)
+ 	return queue_work(dsa_owq, work);
+ }
+ 
++void dsa_flush_workqueue(void)
++{
++	flush_workqueue(dsa_owq);
++}
++
+ int dsa_devlink_param_get(struct devlink *dl, u32 id,
+ 			  struct devlink_param_gset_ctx *ctx)
+ {
+diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
+index 185629f27f803..79267b00af68f 100644
+--- a/net/dsa/dsa2.c
++++ b/net/dsa/dsa2.c
+@@ -809,6 +809,33 @@ static void dsa_switch_teardown(struct dsa_switch *ds)
+ 	ds->setup = false;
+ }
+ 
++/* First tear down the non-shared, then the shared ports. This ensures that
++ * all work items scheduled by our switchdev handlers for user ports have
++ * completed before we destroy the refcounting kept on the shared ports.
++ */
++static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
++{
++	struct dsa_port *dp;
++
++	list_for_each_entry(dp, &dst->ports, list)
++		if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
++			dsa_port_teardown(dp);
++
++	dsa_flush_workqueue();
++
++	list_for_each_entry(dp, &dst->ports, list)
++		if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
++			dsa_port_teardown(dp);
++}
++
++static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
++{
++	struct dsa_port *dp;
++
++	list_for_each_entry(dp, &dst->ports, list)
++		dsa_switch_teardown(dp->ds);
++}
++
+ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
+ {
+ 	struct dsa_port *dp;
+@@ -835,26 +862,13 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
+ 	return 0;
+ 
+ teardown:
+-	list_for_each_entry(dp, &dst->ports, list)
+-		dsa_port_teardown(dp);
++	dsa_tree_teardown_ports(dst);
+ 
+-	list_for_each_entry(dp, &dst->ports, list)
+-		dsa_switch_teardown(dp->ds);
++	dsa_tree_teardown_switches(dst);
+ 
+ 	return err;
+ }
+ 
+-static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
+-{
+-	struct dsa_port *dp;
+-
+-	list_for_each_entry(dp, &dst->ports, list)
+-		dsa_port_teardown(dp);
+-
+-	list_for_each_entry(dp, &dst->ports, list)
+-		dsa_switch_teardown(dp->ds);
+-}
+-
+ static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
+ {
+ 	struct dsa_port *dp;
+@@ -964,6 +978,8 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst)
+ 
+ 	dsa_tree_teardown_master(dst);
+ 
++	dsa_tree_teardown_ports(dst);
++
+ 	dsa_tree_teardown_switches(dst);
+ 
+ 	dsa_tree_teardown_default_cpu(dst);
+diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
+index cddf7cb0f398f..6c00557ca9bf4 100644
+--- a/net/dsa/dsa_priv.h
++++ b/net/dsa/dsa_priv.h
+@@ -158,6 +158,7 @@ void dsa_tag_driver_put(const struct dsa_device_ops *ops);
+ const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
+ 
+ bool dsa_schedule_work(struct work_struct *work);
++void dsa_flush_workqueue(void);
+ const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
+ 
+ static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
+diff --git a/net/dsa/slave.c b/net/dsa/slave.c
+index b34116b15d436..527fc20d47adf 100644
+--- a/net/dsa/slave.c
++++ b/net/dsa/slave.c
+@@ -1784,13 +1784,11 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
+ 		 * use the switch internal MDIO bus instead
+ 		 */
+ 		ret = dsa_slave_phy_connect(slave_dev, dp->index, phy_flags);
+-		if (ret) {
+-			netdev_err(slave_dev,
+-				   "failed to connect to port %d: %d\n",
+-				   dp->index, ret);
+-			phylink_destroy(dp->pl);
+-			return ret;
+-		}
++	}
++	if (ret) {
++		netdev_err(slave_dev, "failed to connect to PHY: %pe\n",
++			   ERR_PTR(ret));
++		phylink_destroy(dp->pl);
+ 	}
+ 
+ 	return ret;
+diff --git a/net/dsa/tag_rtl4_a.c b/net/dsa/tag_rtl4_a.c
+index 57c46b4ab2b3f..e34b80fa52e1d 100644
+--- a/net/dsa/tag_rtl4_a.c
++++ b/net/dsa/tag_rtl4_a.c
+@@ -54,9 +54,10 @@ static struct sk_buff *rtl4a_tag_xmit(struct sk_buff *skb,
+ 	p = (__be16 *)tag;
+ 	*p = htons(RTL4_A_ETHERTYPE);
+ 
+-	out = (RTL4_A_PROTOCOL_RTL8366RB << 12) | (2 << 8);
+-	/* The lower bits is the port number */
+-	out |= (u8)dp->index;
++	out = (RTL4_A_PROTOCOL_RTL8366RB << RTL4_A_PROTOCOL_SHIFT) | (2 << 8);
++	/* The lower bits indicate the port number */
++	out |= BIT(dp->index);
++
+ 	p = (__be16 *)(tag + 2);
+ 	*p = htons(out);
+ 
+diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
+index 6134b180f59f8..af011534bcb24 100644
+--- a/net/ethtool/ioctl.c
++++ b/net/ethtool/ioctl.c
+@@ -906,7 +906,7 @@ static int ethtool_rxnfc_copy_to_user(void __user *useraddr,
+ 						   rule_buf);
+ 		useraddr += offsetof(struct compat_ethtool_rxnfc, rule_locs);
+ 	} else {
+-		ret = copy_to_user(useraddr, &rxnfc, size);
++		ret = copy_to_user(useraddr, rxnfc, size);
+ 		useraddr += offsetof(struct ethtool_rxnfc, rule_locs);
+ 	}
+ 
+diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
+index 7fbd0b532f529..099259fc826aa 100644
+--- a/net/ipv4/cipso_ipv4.c
++++ b/net/ipv4/cipso_ipv4.c
+@@ -465,16 +465,14 @@ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def)
+ 	if (!doi_def)
+ 		return;
+ 
+-	if (doi_def->map.std) {
+-		switch (doi_def->type) {
+-		case CIPSO_V4_MAP_TRANS:
+-			kfree(doi_def->map.std->lvl.cipso);
+-			kfree(doi_def->map.std->lvl.local);
+-			kfree(doi_def->map.std->cat.cipso);
+-			kfree(doi_def->map.std->cat.local);
+-			kfree(doi_def->map.std);
+-			break;
+-		}
++	switch (doi_def->type) {
++	case CIPSO_V4_MAP_TRANS:
++		kfree(doi_def->map.std->lvl.cipso);
++		kfree(doi_def->map.std->lvl.local);
++		kfree(doi_def->map.std->cat.cipso);
++		kfree(doi_def->map.std->cat.local);
++		kfree(doi_def->map.std);
++		break;
+ 	}
+ 	kfree(doi_def);
+ }
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index 95419b7adf5ce..6480c6dfe1bf9 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -473,8 +473,6 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
+ 
+ static int gre_handle_offloads(struct sk_buff *skb, bool csum)
+ {
+-	if (csum && skb_checksum_start(skb) < skb->data)
+-		return -EINVAL;
+ 	return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
+ }
+ 
+@@ -632,15 +630,20 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
+ 	}
+ 
+ 	if (dev->header_ops) {
++		const int pull_len = tunnel->hlen + sizeof(struct iphdr);
++
+ 		if (skb_cow_head(skb, 0))
+ 			goto free_skb;
+ 
+ 		tnl_params = (const struct iphdr *)skb->data;
+ 
++		if (pull_len > skb_transport_offset(skb))
++			goto free_skb;
++
+ 		/* Pull skb since ip_tunnel_xmit() needs skb->data pointing
+ 		 * to gre header.
+ 		 */
+-		skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
++		skb_pull(skb, pull_len);
+ 		skb_reset_mac_header(skb);
+ 	} else {
+ 		if (skb_cow_head(skb, dev->needed_headroom))
+diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
+index 4075230b14c63..75ca4b6e484f4 100644
+--- a/net/ipv4/nexthop.c
++++ b/net/ipv4/nexthop.c
+@@ -2490,6 +2490,7 @@ static int nh_create_ipv4(struct net *net, struct nexthop *nh,
+ 		.fc_gw4   = cfg->gw.ipv4,
+ 		.fc_gw_family = cfg->gw.ipv4 ? AF_INET : 0,
+ 		.fc_flags = cfg->nh_flags,
++		.fc_nlinfo = cfg->nlinfo,
+ 		.fc_encap = cfg->nh_encap,
+ 		.fc_encap_type = cfg->nh_encap_type,
+ 	};
+@@ -2528,6 +2529,7 @@ static int nh_create_ipv6(struct net *net,  struct nexthop *nh,
+ 		.fc_ifindex = cfg->nh_ifindex,
+ 		.fc_gateway = cfg->gw.ipv6,
+ 		.fc_flags = cfg->nh_flags,
++		.fc_nlinfo = cfg->nlinfo,
+ 		.fc_encap = cfg->nh_encap,
+ 		.fc_encap_type = cfg->nh_encap_type,
+ 		.fc_is_fdb = cfg->nh_fdb,
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 149ceb5c94ffc..66d9085da87ed 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -1314,7 +1314,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
+ 	if (dup_sack && (sacked & TCPCB_RETRANS)) {
+ 		if (tp->undo_marker && tp->undo_retrans > 0 &&
+ 		    after(end_seq, tp->undo_marker))
+-			tp->undo_retrans--;
++			tp->undo_retrans = max_t(int, 0, tp->undo_retrans - pcount);
+ 		if ((sacked & TCPCB_SACKED_ACKED) &&
+ 		    before(start_seq, state->reord))
+ 				state->reord = start_seq;
+diff --git a/net/ipv4/udp_tunnel_nic.c b/net/ipv4/udp_tunnel_nic.c
+index 0d122edc368dd..b91003538d87a 100644
+--- a/net/ipv4/udp_tunnel_nic.c
++++ b/net/ipv4/udp_tunnel_nic.c
+@@ -935,7 +935,7 @@ static int __init udp_tunnel_nic_init_module(void)
+ {
+ 	int err;
+ 
+-	udp_tunnel_nic_workqueue = alloc_workqueue("udp_tunnel_nic", 0, 0);
++	udp_tunnel_nic_workqueue = alloc_ordered_workqueue("udp_tunnel_nic", 0);
+ 	if (!udp_tunnel_nic_workqueue)
+ 		return -ENOMEM;
+ 
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 7a5e90e093630..bc224f917bbd5 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -629,8 +629,6 @@ drop:
+ 
+ static int gre_handle_offloads(struct sk_buff *skb, bool csum)
+ {
+-	if (csum && skb_checksum_start(skb) < skb->data)
+-		return -EINVAL;
+ 	return iptunnel_handle_offloads(skb,
+ 					csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
+ }
+diff --git a/net/ipv6/netfilter/nf_socket_ipv6.c b/net/ipv6/netfilter/nf_socket_ipv6.c
+index 6fd54744cbc38..aa5bb8789ba0b 100644
+--- a/net/ipv6/netfilter/nf_socket_ipv6.c
++++ b/net/ipv6/netfilter/nf_socket_ipv6.c
+@@ -99,7 +99,7 @@ struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb,
+ {
+ 	__be16 dport, sport;
+ 	const struct in6_addr *daddr = NULL, *saddr = NULL;
+-	struct ipv6hdr *iph = ipv6_hdr(skb);
++	struct ipv6hdr *iph = ipv6_hdr(skb), ipv6_var;
+ 	struct sk_buff *data_skb = NULL;
+ 	int doff = 0;
+ 	int thoff = 0, tproto;
+@@ -129,8 +129,6 @@ struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb,
+ 			thoff + sizeof(*hp);
+ 
+ 	} else if (tproto == IPPROTO_ICMPV6) {
+-		struct ipv6hdr ipv6_var;
+-
+ 		if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr,
+ 					 &sport, &dport, &ipv6_var))
+ 			return NULL;
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index 53486b162f01c..93271a2632b8e 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -869,8 +869,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
+ 	}
+ 
+ 	if (tunnel->version == L2TP_HDR_VER_3 &&
+-	    l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
++	    l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) {
++		l2tp_session_dec_refcount(session);
+ 		goto invalid;
++	}
+ 
+ 	l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
+ 	l2tp_session_dec_refcount(session);
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 7b37944597833..89251cbe9f1a7 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -540,7 +540,6 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
+ 	subflow = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node);
+ 	if (subflow) {
+ 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+-		bool slow;
+ 
+ 		spin_unlock_bh(&msk->pm.lock);
+ 		pr_debug("send ack for %s%s%s",
+@@ -548,9 +547,7 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
+ 			 mptcp_pm_should_add_signal_ipv6(msk) ? " [ipv6]" : "",
+ 			 mptcp_pm_should_add_signal_port(msk) ? " [port]" : "");
+ 
+-		slow = lock_sock_fast(ssk);
+-		tcp_send_ack(ssk);
+-		unlock_sock_fast(ssk, slow);
++		mptcp_subflow_send_ack(ssk);
+ 		spin_lock_bh(&msk->pm.lock);
+ 	}
+ }
+@@ -567,7 +564,6 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
+ 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ 		struct sock *sk = (struct sock *)msk;
+ 		struct mptcp_addr_info local;
+-		bool slow;
+ 
+ 		local_address((struct sock_common *)ssk, &local);
+ 		if (!addresses_equal(&local, addr, addr->port))
+@@ -580,9 +576,7 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
+ 
+ 		spin_unlock_bh(&msk->pm.lock);
+ 		pr_debug("send ack for mp_prio");
+-		slow = lock_sock_fast(ssk);
+-		tcp_send_ack(ssk);
+-		unlock_sock_fast(ssk, slow);
++		mptcp_subflow_send_ack(ssk);
+ 		spin_lock_bh(&msk->pm.lock);
+ 
+ 		return 0;
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index a889249478152..acbead7cf50f0 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -427,19 +427,22 @@ static bool tcp_can_send_ack(const struct sock *ssk)
+ 	       (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_TIME_WAIT | TCPF_CLOSE | TCPF_LISTEN));
+ }
+ 
++void mptcp_subflow_send_ack(struct sock *ssk)
++{
++	bool slow;
++
++	slow = lock_sock_fast(ssk);
++	if (tcp_can_send_ack(ssk))
++		tcp_send_ack(ssk);
++	unlock_sock_fast(ssk, slow);
++}
++
+ static void mptcp_send_ack(struct mptcp_sock *msk)
+ {
+ 	struct mptcp_subflow_context *subflow;
+ 
+-	mptcp_for_each_subflow(msk, subflow) {
+-		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+-		bool slow;
+-
+-		slow = lock_sock_fast(ssk);
+-		if (tcp_can_send_ack(ssk))
+-			tcp_send_ack(ssk);
+-		unlock_sock_fast(ssk, slow);
+-	}
++	mptcp_for_each_subflow(msk, subflow)
++		mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow));
+ }
+ 
+ static void mptcp_subflow_cleanup_rbuf(struct sock *ssk)
+@@ -994,6 +997,13 @@ static void mptcp_wmem_uncharge(struct sock *sk, int size)
+ 	msk->wmem_reserved += size;
+ }
+ 
++static void __mptcp_mem_reclaim_partial(struct sock *sk)
++{
++	lockdep_assert_held_once(&sk->sk_lock.slock);
++	__mptcp_update_wmem(sk);
++	sk_mem_reclaim_partial(sk);
++}
++
+ static void mptcp_mem_reclaim_partial(struct sock *sk)
+ {
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+@@ -1069,12 +1079,8 @@ static void __mptcp_clean_una(struct sock *sk)
+ 	}
+ 
+ out:
+-	if (cleaned) {
+-		if (tcp_under_memory_pressure(sk)) {
+-			__mptcp_update_wmem(sk);
+-			sk_mem_reclaim_partial(sk);
+-		}
+-	}
++	if (cleaned && tcp_under_memory_pressure(sk))
++		__mptcp_mem_reclaim_partial(sk);
+ 
+ 	if (snd_una == READ_ONCE(msk->snd_nxt)) {
+ 		if (msk->timer_ival && !mptcp_data_fin_enabled(msk))
+@@ -1154,6 +1160,7 @@ struct mptcp_sendmsg_info {
+ 	u16 limit;
+ 	u16 sent;
+ 	unsigned int flags;
++	bool data_lock_held;
+ };
+ 
+ static int mptcp_check_allowed_size(struct mptcp_sock *msk, u64 data_seq,
+@@ -1225,17 +1232,17 @@ static bool __mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp)
+ 	return false;
+ }
+ 
+-static bool mptcp_must_reclaim_memory(struct sock *sk, struct sock *ssk)
++static bool mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, bool data_lock_held)
+ {
+-	return !ssk->sk_tx_skb_cache &&
+-	       tcp_under_memory_pressure(sk);
+-}
++	gfp_t gfp = data_lock_held ? GFP_ATOMIC : sk->sk_allocation;
+ 
+-static bool mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk)
+-{
+-	if (unlikely(mptcp_must_reclaim_memory(sk, ssk)))
+-		mptcp_mem_reclaim_partial(sk);
+-	return __mptcp_alloc_tx_skb(sk, ssk, sk->sk_allocation);
++	if (unlikely(tcp_under_memory_pressure(sk))) {
++		if (data_lock_held)
++			__mptcp_mem_reclaim_partial(sk);
++		else
++			mptcp_mem_reclaim_partial(sk);
++	}
++	return __mptcp_alloc_tx_skb(sk, ssk, gfp);
+ }
+ 
+ /* note: this always recompute the csum on the whole skb, even
+@@ -1259,7 +1266,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ 	bool zero_window_probe = false;
+ 	struct mptcp_ext *mpext = NULL;
+ 	struct sk_buff *skb, *tail;
+-	bool can_collapse = false;
++	bool must_collapse = false;
+ 	int size_bias = 0;
+ 	int avail_size;
+ 	size_t ret = 0;
+@@ -1279,16 +1286,24 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ 		 * SSN association set here
+ 		 */
+ 		mpext = skb_ext_find(skb, SKB_EXT_MPTCP);
+-		can_collapse = (info->size_goal - skb->len > 0) &&
+-			 mptcp_skb_can_collapse_to(data_seq, skb, mpext);
+-		if (!can_collapse) {
++		if (!mptcp_skb_can_collapse_to(data_seq, skb, mpext)) {
+ 			TCP_SKB_CB(skb)->eor = 1;
+-		} else {
++			goto alloc_skb;
++		}
++
++		must_collapse = (info->size_goal - skb->len > 0) &&
++				(skb_shinfo(skb)->nr_frags < sysctl_max_skb_frags);
++		if (must_collapse) {
+ 			size_bias = skb->len;
+ 			avail_size = info->size_goal - skb->len;
+ 		}
+ 	}
+ 
++alloc_skb:
++	if (!must_collapse && !ssk->sk_tx_skb_cache &&
++	    !mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held))
++		return 0;
++
+ 	/* Zero window and all data acked? Probe. */
+ 	avail_size = mptcp_check_allowed_size(msk, data_seq, avail_size);
+ 	if (avail_size == 0) {
+@@ -1318,7 +1333,6 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ 	if (skb == tail) {
+ 		TCP_SKB_CB(tail)->tcp_flags &= ~TCPHDR_PSH;
+ 		mpext->data_len += ret;
+-		WARN_ON_ONCE(!can_collapse);
+ 		WARN_ON_ONCE(zero_window_probe);
+ 		goto out;
+ 	}
+@@ -1470,15 +1484,6 @@ static void __mptcp_push_pending(struct sock *sk, unsigned int flags)
+ 			if (ssk != prev_ssk || !prev_ssk)
+ 				lock_sock(ssk);
+ 
+-			/* keep it simple and always provide a new skb for the
+-			 * subflow, even if we will not use it when collapsing
+-			 * on the pending one
+-			 */
+-			if (!mptcp_alloc_tx_skb(sk, ssk)) {
+-				mptcp_push_release(sk, ssk, &info);
+-				goto out;
+-			}
+-
+ 			ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
+ 			if (ret <= 0) {
+ 				mptcp_push_release(sk, ssk, &info);
+@@ -1512,7 +1517,9 @@ out:
+ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
+ {
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+-	struct mptcp_sendmsg_info info;
++	struct mptcp_sendmsg_info info = {
++		.data_lock_held = true,
++	};
+ 	struct mptcp_data_frag *dfrag;
+ 	struct sock *xmit_ssk;
+ 	int len, copied = 0;
+@@ -1538,13 +1545,6 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
+ 				goto out;
+ 			}
+ 
+-			if (unlikely(mptcp_must_reclaim_memory(sk, ssk))) {
+-				__mptcp_update_wmem(sk);
+-				sk_mem_reclaim_partial(sk);
+-			}
+-			if (!__mptcp_alloc_tx_skb(sk, ssk, GFP_ATOMIC))
+-				goto out;
+-
+ 			ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
+ 			if (ret <= 0)
+ 				goto out;
+@@ -2296,9 +2296,6 @@ static void __mptcp_retrans(struct sock *sk)
+ 	info.sent = 0;
+ 	info.limit = READ_ONCE(msk->csum_enabled) ? dfrag->data_len : dfrag->already_sent;
+ 	while (info.sent < info.limit) {
+-		if (!mptcp_alloc_tx_skb(sk, ssk))
+-			break;
+-
+ 		ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
+ 		if (ret <= 0)
+ 			break;
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 0f0c026c5f8bb..6ac564d584c19 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -560,6 +560,7 @@ void __init mptcp_subflow_init(void);
+ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how);
+ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 		     struct mptcp_subflow_context *subflow);
++void mptcp_subflow_send_ack(struct sock *ssk);
+ void mptcp_subflow_reset(struct sock *ssk);
+ void mptcp_sock_graft(struct sock *sk, struct socket *parent);
+ struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk);
+diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
+index 337e22d8b40b1..99b1de14ff7ee 100644
+--- a/net/netfilter/nft_ct.c
++++ b/net/netfilter/nft_ct.c
+@@ -41,6 +41,7 @@ struct nft_ct_helper_obj  {
+ #ifdef CONFIG_NF_CONNTRACK_ZONES
+ static DEFINE_PER_CPU(struct nf_conn *, nft_ct_pcpu_template);
+ static unsigned int nft_ct_pcpu_template_refcnt __read_mostly;
++static DEFINE_MUTEX(nft_ct_pcpu_mutex);
+ #endif
+ 
+ static u64 nft_ct_get_eval_counter(const struct nf_conn_counter *c,
+@@ -525,8 +526,10 @@ static void __nft_ct_set_destroy(const struct nft_ctx *ctx, struct nft_ct *priv)
+ #endif
+ #ifdef CONFIG_NF_CONNTRACK_ZONES
+ 	case NFT_CT_ZONE:
++		mutex_lock(&nft_ct_pcpu_mutex);
+ 		if (--nft_ct_pcpu_template_refcnt == 0)
+ 			nft_ct_tmpl_put_pcpu();
++		mutex_unlock(&nft_ct_pcpu_mutex);
+ 		break;
+ #endif
+ 	default:
+@@ -564,9 +567,13 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
+ #endif
+ #ifdef CONFIG_NF_CONNTRACK_ZONES
+ 	case NFT_CT_ZONE:
+-		if (!nft_ct_tmpl_alloc_pcpu())
++		mutex_lock(&nft_ct_pcpu_mutex);
++		if (!nft_ct_tmpl_alloc_pcpu()) {
++			mutex_unlock(&nft_ct_pcpu_mutex);
+ 			return -ENOMEM;
++		}
+ 		nft_ct_pcpu_template_refcnt++;
++		mutex_unlock(&nft_ct_pcpu_mutex);
+ 		len = sizeof(u16);
+ 		break;
+ #endif
+diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
+index bdbda61db8b96..d3c0cae813c65 100644
+--- a/net/qrtr/qrtr.c
++++ b/net/qrtr/qrtr.c
+@@ -493,7 +493,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
+ 		goto err;
+ 	}
+ 
+-	if (!size || size & 3 || len != size + hdrlen)
++	if (!size || len != ALIGN(size, 4) + hdrlen)
+ 		goto err;
+ 
+ 	if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA &&
+diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
+index bbd5f87536006..99e8db2621984 100644
+--- a/net/sched/sch_fq_codel.c
++++ b/net/sched/sch_fq_codel.c
+@@ -369,6 +369,7 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
+ {
+ 	struct fq_codel_sched_data *q = qdisc_priv(sch);
+ 	struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
++	u32 quantum = 0;
+ 	int err;
+ 
+ 	if (!opt)
+@@ -386,6 +387,13 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
+ 		    q->flows_cnt > 65536)
+ 			return -EINVAL;
+ 	}
++	if (tb[TCA_FQ_CODEL_QUANTUM]) {
++		quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
++		if (quantum > FQ_CODEL_QUANTUM_MAX) {
++			NL_SET_ERR_MSG(extack, "Invalid quantum");
++			return -EINVAL;
++		}
++	}
+ 	sch_tree_lock(sch);
+ 
+ 	if (tb[TCA_FQ_CODEL_TARGET]) {
+@@ -412,8 +420,8 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
+ 	if (tb[TCA_FQ_CODEL_ECN])
+ 		q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
+ 
+-	if (tb[TCA_FQ_CODEL_QUANTUM])
+-		q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
++	if (quantum)
++		q->quantum = quantum;
+ 
+ 	if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
+ 		q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index a155cfaf01f2e..50762be9c115e 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -1979,10 +1979,12 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
+ 		tipc_node_distr_xmit(sock_net(sk), &xmitq);
+ 	}
+ 
+-	if (!skb_cb->bytes_read)
+-		tsk_advance_rx_queue(sk);
++	if (skb_cb->bytes_read)
++		goto exit;
++
++	tsk_advance_rx_queue(sk);
+ 
+-	if (likely(!connected) || skb_cb->bytes_read)
++	if (likely(!connected))
+ 		goto exit;
+ 
+ 	/* Send connection flow control advertisement when applicable */
+@@ -2421,7 +2423,7 @@ static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
+ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
+ 			    u32 dport, struct sk_buff_head *xmitq)
+ {
+-	unsigned long time_limit = jiffies + 2;
++	unsigned long time_limit = jiffies + usecs_to_jiffies(20000);
+ 	struct sk_buff *skb;
+ 	unsigned int lim;
+ 	atomic_t *dcnt;
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index ba7ced947e51c..91ff09d833e8f 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -2774,7 +2774,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
+ 
+ 		other = unix_peer(sk);
+ 		if (other && unix_peer(other) != sk &&
+-		    unix_recvq_full(other) &&
++		    unix_recvq_full_lockless(other) &&
+ 		    unix_dgram_peer_wake_me(sk, other))
+ 			writable = 0;
+ 
+diff --git a/scripts/clang-tools/gen_compile_commands.py b/scripts/clang-tools/gen_compile_commands.py
+index b7e9ecf16e569..a70cd064bfc4b 100755
+--- a/scripts/clang-tools/gen_compile_commands.py
++++ b/scripts/clang-tools/gen_compile_commands.py
+@@ -13,6 +13,7 @@ import logging
+ import os
+ import re
+ import subprocess
++import sys
+ 
+ _DEFAULT_OUTPUT = 'compile_commands.json'
+ _DEFAULT_LOG_LEVEL = 'WARNING'
+diff --git a/tools/build/Makefile b/tools/build/Makefile
+index 5ed41b96fcded..6f11e6fc9ffe3 100644
+--- a/tools/build/Makefile
++++ b/tools/build/Makefile
+@@ -32,7 +32,7 @@ all: $(OUTPUT)fixdep
+ 
+ # Make sure there's anything to clean,
+ # feature contains check for existing OUTPUT
+-TMP_O := $(if $(OUTPUT),$(OUTPUT)/feature,./)
++TMP_O := $(if $(OUTPUT),$(OUTPUT)feature/,./)
+ 
+ clean:
+ 	$(call QUIET_CLEAN, fixdep)
+diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
+index eb8e487ef90b0..29ffd57f5cd8d 100644
+--- a/tools/perf/Makefile.config
++++ b/tools/perf/Makefile.config
+@@ -133,10 +133,10 @@ FEATURE_CHECK_LDFLAGS-libunwind = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
+ FEATURE_CHECK_CFLAGS-libunwind-debug-frame = $(LIBUNWIND_CFLAGS)
+ FEATURE_CHECK_LDFLAGS-libunwind-debug-frame = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
+ 
+-FEATURE_CHECK_LDFLAGS-libunwind-arm = -lunwind -lunwind-arm
+-FEATURE_CHECK_LDFLAGS-libunwind-aarch64 = -lunwind -lunwind-aarch64
+-FEATURE_CHECK_LDFLAGS-libunwind-x86 = -lunwind -llzma -lunwind-x86
+-FEATURE_CHECK_LDFLAGS-libunwind-x86_64 = -lunwind -llzma -lunwind-x86_64
++FEATURE_CHECK_LDFLAGS-libunwind-arm += -lunwind -lunwind-arm
++FEATURE_CHECK_LDFLAGS-libunwind-aarch64 += -lunwind -lunwind-aarch64
++FEATURE_CHECK_LDFLAGS-libunwind-x86 += -lunwind -llzma -lunwind-x86
++FEATURE_CHECK_LDFLAGS-libunwind-x86_64 += -lunwind -llzma -lunwind-x86_64
+ 
+ FEATURE_CHECK_LDFLAGS-libcrypto = -lcrypto
+ 
+diff --git a/tools/perf/bench/inject-buildid.c b/tools/perf/bench/inject-buildid.c
+index 55d373b75791b..17672790f1231 100644
+--- a/tools/perf/bench/inject-buildid.c
++++ b/tools/perf/bench/inject-buildid.c
+@@ -133,7 +133,7 @@ static u64 dso_map_addr(struct bench_dso *dso)
+ 	return 0x400000ULL + dso->ino * 8192ULL;
+ }
+ 
+-static u32 synthesize_attr(struct bench_data *data)
++static ssize_t synthesize_attr(struct bench_data *data)
+ {
+ 	union perf_event event;
+ 
+@@ -151,7 +151,7 @@ static u32 synthesize_attr(struct bench_data *data)
+ 	return writen(data->input_pipe[1], &event, event.header.size);
+ }
+ 
+-static u32 synthesize_fork(struct bench_data *data)
++static ssize_t synthesize_fork(struct bench_data *data)
+ {
+ 	union perf_event event;
+ 
+@@ -169,8 +169,7 @@ static u32 synthesize_fork(struct bench_data *data)
+ 	return writen(data->input_pipe[1], &event, event.header.size);
+ }
+ 
+-static u32 synthesize_mmap(struct bench_data *data, struct bench_dso *dso,
+-			   u64 timestamp)
++static ssize_t synthesize_mmap(struct bench_data *data, struct bench_dso *dso, u64 timestamp)
+ {
+ 	union perf_event event;
+ 	size_t len = offsetof(struct perf_record_mmap2, filename);
+@@ -198,23 +197,25 @@ static u32 synthesize_mmap(struct bench_data *data, struct bench_dso *dso,
+ 
+ 	if (len > sizeof(event.mmap2)) {
+ 		/* write mmap2 event first */
+-		writen(data->input_pipe[1], &event, len - bench_id_hdr_size);
++		if (writen(data->input_pipe[1], &event, len - bench_id_hdr_size) < 0)
++			return -1;
+ 		/* zero-fill sample id header */
+ 		memset(id_hdr_ptr, 0, bench_id_hdr_size);
+ 		/* put timestamp in the right position */
+ 		ts_idx = (bench_id_hdr_size / sizeof(u64)) - 2;
+ 		id_hdr_ptr[ts_idx] = timestamp;
+-		writen(data->input_pipe[1], id_hdr_ptr, bench_id_hdr_size);
+-	} else {
+-		ts_idx = (len / sizeof(u64)) - 2;
+-		id_hdr_ptr[ts_idx] = timestamp;
+-		writen(data->input_pipe[1], &event, len);
++		if (writen(data->input_pipe[1], id_hdr_ptr, bench_id_hdr_size) < 0)
++			return -1;
++
++		return len;
+ 	}
+-	return len;
++
++	ts_idx = (len / sizeof(u64)) - 2;
++	id_hdr_ptr[ts_idx] = timestamp;
++	return writen(data->input_pipe[1], &event, len);
+ }
+ 
+-static u32 synthesize_sample(struct bench_data *data, struct bench_dso *dso,
+-			     u64 timestamp)
++static ssize_t synthesize_sample(struct bench_data *data, struct bench_dso *dso, u64 timestamp)
+ {
+ 	union perf_event event;
+ 	struct perf_sample sample = {
+@@ -233,7 +234,7 @@ static u32 synthesize_sample(struct bench_data *data, struct bench_dso *dso,
+ 	return writen(data->input_pipe[1], &event, event.header.size);
+ }
+ 
+-static u32 synthesize_flush(struct bench_data *data)
++static ssize_t synthesize_flush(struct bench_data *data)
+ {
+ 	struct perf_event_header header = {
+ 		.size = sizeof(header),
+@@ -348,14 +349,16 @@ static int inject_build_id(struct bench_data *data, u64 *max_rss)
+ 	int status;
+ 	unsigned int i, k;
+ 	struct rusage rusage;
+-	u64 len = 0;
+ 
+ 	/* this makes the child to run */
+ 	if (perf_header__write_pipe(data->input_pipe[1]) < 0)
+ 		return -1;
+ 
+-	len += synthesize_attr(data);
+-	len += synthesize_fork(data);
++	if (synthesize_attr(data) < 0)
++		return -1;
++
++	if (synthesize_fork(data) < 0)
++		return -1;
+ 
+ 	for (i = 0; i < nr_mmaps; i++) {
+ 		int idx = rand() % (nr_dsos - 1);
+@@ -363,13 +366,18 @@ static int inject_build_id(struct bench_data *data, u64 *max_rss)
+ 		u64 timestamp = rand() % 1000000;
+ 
+ 		pr_debug2("   [%d] injecting: %s\n", i+1, dso->name);
+-		len += synthesize_mmap(data, dso, timestamp);
++		if (synthesize_mmap(data, dso, timestamp) < 0)
++			return -1;
+ 
+-		for (k = 0; k < nr_samples; k++)
+-			len += synthesize_sample(data, dso, timestamp + k * 1000);
++		for (k = 0; k < nr_samples; k++) {
++			if (synthesize_sample(data, dso, timestamp + k * 1000) < 0)
++				return -1;
++		}
+ 
+-		if ((i + 1) % 10 == 0)
+-			len += synthesize_flush(data);
++		if ((i + 1) % 10 == 0) {
++			if (synthesize_flush(data) < 0)
++				return -1;
++		}
+ 	}
+ 
+ 	/* this makes the child to finish */
+diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
+index 63d472b336de2..4fb5e90d7a57a 100644
+--- a/tools/perf/util/config.c
++++ b/tools/perf/util/config.c
+@@ -581,7 +581,10 @@ const char *perf_home_perfconfig(void)
+ 	static const char *config;
+ 	static bool failed;
+ 
+-	config = failed ? NULL : home_perfconfig();
++	if (failed || config)
++		return config;
++
++	config = home_perfconfig();
+ 	if (!config)
+ 		failed = true;
+ 
+diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
+index da19be7da284c..44e40bad0e336 100644
+--- a/tools/perf/util/machine.c
++++ b/tools/perf/util/machine.c
+@@ -2149,6 +2149,7 @@ static int add_callchain_ip(struct thread *thread,
+ 
+ 	al.filtered = 0;
+ 	al.sym = NULL;
++	al.srcline = NULL;
+ 	if (!cpumode) {
+ 		thread__find_cpumode_addr_location(thread, ip, &al);
+ 	} else {
+diff --git a/tools/testing/selftests/net/altnames.sh b/tools/testing/selftests/net/altnames.sh
+index 4254ddc3f70b5..1ef9e4159bba8 100755
+--- a/tools/testing/selftests/net/altnames.sh
++++ b/tools/testing/selftests/net/altnames.sh
+@@ -45,7 +45,7 @@ altnames_test()
+ 	check_err $? "Got unexpected long alternative name from link show JSON"
+ 
+ 	ip link property del $DUMMY_DEV altname $SHORT_NAME
+-	check_err $? "Failed to add short alternative name"
++	check_err $? "Failed to delete short alternative name"
+ 
+ 	ip -j -p link show $SHORT_NAME &>/dev/null
+ 	check_fail $? "Unexpected success while trying to do link show with deleted short alternative name"
+diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
+index fd63ebfe9a2b7..910d8126af8f2 100755
+--- a/tools/testing/selftests/net/mptcp/simult_flows.sh
++++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
+@@ -22,8 +22,8 @@ usage() {
+ 
+ cleanup()
+ {
+-	rm -f "$cin" "$cout"
+-	rm -f "$sin" "$sout"
++	rm -f "$cout" "$sout"
++	rm -f "$large" "$small"
+ 	rm -f "$capout"
+ 
+ 	local netns


             reply	other threads:[~2021-09-22 11:37 UTC|newest]

Thread overview: 40+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-22 11:37 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2021-11-21 21:14 [gentoo-commits] proj/linux-patches:5.14 commit in: / Mike Pagano
2021-11-21 20:38 Mike Pagano
2021-11-19  0:18 Mike Pagano
2021-11-18 15:32 Mike Pagano
2021-11-17 11:59 Mike Pagano
2021-11-12 14:19 Mike Pagano
2021-11-06 13:41 Mike Pagano
2021-11-02 19:30 Mike Pagano
2021-10-27 11:56 Mike Pagano
2021-10-21 12:16 Mike Pagano
2021-10-20 13:22 Mike Pagano
2021-10-18 21:17 Mike Pagano
2021-10-17 13:10 Mike Pagano
2021-10-13 16:15 Alice Ferrazzi
2021-10-09 21:30 Mike Pagano
2021-10-07 10:36 Mike Pagano
2021-10-03 19:14 Mike Pagano
2021-09-30 10:48 Mike Pagano
2021-09-26 14:11 Mike Pagano
2021-09-20 22:01 Mike Pagano
2021-09-18 16:06 Mike Pagano
2021-09-17 12:48 Mike Pagano
2021-09-17 12:40 Mike Pagano
2021-09-16 11:03 Mike Pagano
2021-09-15 11:58 Mike Pagano
2021-09-14 15:37 Mike Pagano
2021-09-12 14:36 Mike Pagano
2021-09-08 12:39 Alice Ferrazzi
2021-09-03 11:52 Mike Pagano
2021-09-03 11:17 Mike Pagano
2021-09-03  9:15 Alice Ferrazzi
2021-08-30 17:23 Mike Pagano
2021-08-25 16:30 Mike Pagano
2021-08-25 16:24 Mike Pagano
2021-08-25 16:24 Mike Pagano
2021-08-25 12:25 Mike Pagano
2021-08-18 15:03 Mike Pagano
2021-08-18 15:03 Mike Pagano
2021-08-02 22:32 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1632310643.00a2b84fdf9371e8fc3cfa89c197db0aa7f58939.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox