public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:5.17 commit in: /
Date: Wed, 20 Apr 2022 12:06:25 +0000 (UTC)	[thread overview]
Message-ID: <1650456368.3bb7b32b7676d65c17bd93e52c8ba7b78cea0447.mpagano@gentoo> (raw)

commit:     3bb7b32b7676d65c17bd93e52c8ba7b78cea0447
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Apr 20 12:06:08 2022 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Apr 20 12:06:08 2022 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3bb7b32b

Linux patch 5.17.4

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1003_linux-5.17.4.patch | 8556 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 8560 insertions(+)

diff --git a/0000_README b/0000_README
index a57e4c32..a12bf7e5 100644
--- a/0000_README
+++ b/0000_README
@@ -55,6 +55,10 @@ Patch:  1002_linux-5.17.3.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.17.3
 
+Patch:  1003_linux-5.17.3.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.17.4
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1003_linux-5.17.4.patch b/1003_linux-5.17.4.patch
new file mode 100644
index 00000000..411d2496
--- /dev/null
+++ b/1003_linux-5.17.4.patch
@@ -0,0 +1,8556 @@
+diff --git a/Documentation/devicetree/bindings/memory-controllers/synopsys,ddrc-ecc.yaml b/Documentation/devicetree/bindings/memory-controllers/synopsys,ddrc-ecc.yaml
+index fb7ae38a9c866..e3bc6ebce0904 100644
+--- a/Documentation/devicetree/bindings/memory-controllers/synopsys,ddrc-ecc.yaml
++++ b/Documentation/devicetree/bindings/memory-controllers/synopsys,ddrc-ecc.yaml
+@@ -24,9 +24,9 @@ description: |
+ properties:
+   compatible:
+     enum:
++      - snps,ddrc-3.80a
+       - xlnx,zynq-ddrc-a05
+       - xlnx,zynqmp-ddrc-2.40a
+-      - snps,ddrc-3.80a
+ 
+   interrupts:
+     maxItems: 1
+@@ -43,7 +43,9 @@ allOf:
+       properties:
+         compatible:
+           contains:
+-            const: xlnx,zynqmp-ddrc-2.40a
++            enum:
++              - snps,ddrc-3.80a
++              - xlnx,zynqmp-ddrc-2.40a
+     then:
+       required:
+         - interrupts
+diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
+index 7eb43707e601d..c421e4e306a1b 100644
+--- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml
++++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
+@@ -53,20 +53,18 @@ properties:
+         - allwinner,sun8i-r40-gmac
+         - allwinner,sun8i-v3s-emac
+         - allwinner,sun50i-a64-emac
+-        - loongson,ls2k-dwmac
+-        - loongson,ls7a-dwmac
+         - amlogic,meson6-dwmac
+         - amlogic,meson8b-dwmac
+         - amlogic,meson8m2-dwmac
+         - amlogic,meson-gxbb-dwmac
+         - amlogic,meson-axg-dwmac
+-        - loongson,ls2k-dwmac
+-        - loongson,ls7a-dwmac
+         - ingenic,jz4775-mac
+         - ingenic,x1000-mac
+         - ingenic,x1600-mac
+         - ingenic,x1830-mac
+         - ingenic,x2000-mac
++        - loongson,ls2k-dwmac
++        - loongson,ls7a-dwmac
+         - rockchip,px30-gmac
+         - rockchip,rk3128-gmac
+         - rockchip,rk3228-gmac
+diff --git a/Makefile b/Makefile
+index 02fbef1a0213b..d7747e4c216e4 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 17
+-SUBLEVEL = 3
++SUBLEVEL = 4
+ EXTRAVERSION =
+ NAME = Superb Owl
+ 
+diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
+index 428012687a802..7f7f6bae21c2d 100644
+--- a/arch/arm/mach-davinci/board-da850-evm.c
++++ b/arch/arm/mach-davinci/board-da850-evm.c
+@@ -1101,11 +1101,13 @@ static int __init da850_evm_config_emac(void)
+ 	int ret;
+ 	u32 val;
+ 	struct davinci_soc_info *soc_info = &davinci_soc_info;
+-	u8 rmii_en = soc_info->emac_pdata->rmii_en;
++	u8 rmii_en;
+ 
+ 	if (!machine_is_davinci_da850_evm())
+ 		return 0;
+ 
++	rmii_en = soc_info->emac_pdata->rmii_en;
++
+ 	cfg_chip3_base = DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG);
+ 
+ 	val = __raw_readl(cfg_chip3_base);
+diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c
+index cc75087134d38..28e0ae6e890e5 100644
+--- a/arch/arm/mach-ep93xx/clock.c
++++ b/arch/arm/mach-ep93xx/clock.c
+@@ -148,8 +148,10 @@ static struct clk_hw *ep93xx_clk_register_gate(const char *name,
+ 	psc->lock = &clk_lock;
+ 
+ 	clk = clk_register(NULL, &psc->hw);
+-	if (IS_ERR(clk))
++	if (IS_ERR(clk)) {
+ 		kfree(psc);
++		return ERR_CAST(clk);
++	}
+ 
+ 	return &psc->hw;
+ }
+diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
+index d62405ce3e6de..7496deab025ad 100644
+--- a/arch/arm64/include/asm/kvm_emulate.h
++++ b/arch/arm64/include/asm/kvm_emulate.h
+@@ -43,10 +43,22 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
+ 
+ void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
+ 
++#if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
+ static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
+ {
+ 	return !(vcpu->arch.hcr_el2 & HCR_RW);
+ }
++#else
++static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
++{
++	struct kvm *kvm = vcpu->kvm;
++
++	WARN_ON_ONCE(!test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED,
++			       &kvm->arch.flags));
++
++	return test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
++}
++#endif
+ 
+ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
+ {
+@@ -72,15 +84,14 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
+ 		vcpu->arch.hcr_el2 |= HCR_TVM;
+ 	}
+ 
+-	if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
++	if (vcpu_el1_is_32bit(vcpu))
+ 		vcpu->arch.hcr_el2 &= ~HCR_RW;
+-
+-	/*
+-	 * TID3: trap feature register accesses that we virtualise.
+-	 * For now this is conditional, since no AArch32 feature regs
+-	 * are currently virtualised.
+-	 */
+-	if (!vcpu_el1_is_32bit(vcpu))
++	else
++		/*
++		 * TID3: trap feature register accesses that we virtualise.
++		 * For now this is conditional, since no AArch32 feature regs
++		 * are currently virtualised.
++		 */
+ 		vcpu->arch.hcr_el2 |= HCR_TID3;
+ 
+ 	if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
+diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
+index 8234626a945a7..b5ae92f77c616 100644
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -122,7 +122,22 @@ struct kvm_arch {
+ 	 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
+ 	 * supported.
+ 	 */
+-	bool return_nisv_io_abort_to_user;
++#define KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER	0
++	/* Memory Tagging Extension enabled for the guest */
++#define KVM_ARCH_FLAG_MTE_ENABLED			1
++	/* At least one vCPU has ran in the VM */
++#define KVM_ARCH_FLAG_HAS_RAN_ONCE			2
++	/*
++	 * The following two bits are used to indicate the guest's EL1
++	 * register width configuration. A value of KVM_ARCH_FLAG_EL1_32BIT
++	 * bit is valid only when KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED is set.
++	 * Otherwise, the guest's EL1 register width has not yet been
++	 * determined yet.
++	 */
++#define KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED		3
++#define KVM_ARCH_FLAG_EL1_32BIT				4
++
++	unsigned long flags;
+ 
+ 	/*
+ 	 * VM-wide PMU filter, implemented as a bitmap and big enough for
+@@ -133,10 +148,6 @@ struct kvm_arch {
+ 
+ 	u8 pfr0_csv2;
+ 	u8 pfr0_csv3;
+-
+-	/* Memory Tagging Extension enabled for the guest */
+-	bool mte_enabled;
+-	bool ran_once;
+ };
+ 
+ struct kvm_vcpu_fault_info {
+@@ -792,7 +803,9 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
+ #define kvm_arm_vcpu_sve_finalized(vcpu) \
+ 	((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
+ 
+-#define kvm_has_mte(kvm) (system_supports_mte() && (kvm)->arch.mte_enabled)
++#define kvm_has_mte(kvm)					\
++	(system_supports_mte() &&				\
++	 test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags))
+ #define kvm_vcpu_has_pmu(vcpu)					\
+ 	(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
+ 
+diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
+index 3fb79b76e9d96..7bbf5104b7b7b 100644
+--- a/arch/arm64/kernel/alternative.c
++++ b/arch/arm64/kernel/alternative.c
+@@ -42,7 +42,7 @@ bool alternative_is_applied(u16 cpufeature)
+ /*
+  * Check if the target PC is within an alternative block.
+  */
+-static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
++static __always_inline bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
+ {
+ 	unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt);
+ 	return !(pc >= replptr && pc <= (replptr + alt->alt_len));
+@@ -50,7 +50,7 @@ static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
+ 
+ #define align_down(x, a)	((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
+ 
+-static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr)
++static __always_inline u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr)
+ {
+ 	u32 insn;
+ 
+@@ -95,7 +95,7 @@ static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnp
+ 	return insn;
+ }
+ 
+-static void patch_alternative(struct alt_instr *alt,
++static noinstr void patch_alternative(struct alt_instr *alt,
+ 			      __le32 *origptr, __le32 *updptr, int nr_inst)
+ {
+ 	__le32 *replptr;
+diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
+index 03991eeff6430..3006f43248084 100644
+--- a/arch/arm64/kernel/cpuidle.c
++++ b/arch/arm64/kernel/cpuidle.c
+@@ -54,6 +54,9 @@ static int psci_acpi_cpu_init_idle(unsigned int cpu)
+ 	struct acpi_lpi_state *lpi;
+ 	struct acpi_processor *pr = per_cpu(processors, cpu);
+ 
++	if (unlikely(!pr || !pr->flags.has_lpi))
++		return -EINVAL;
++
+ 	/*
+ 	 * If the PSCI cpu_suspend function hook has not been initialized
+ 	 * idle states must not be enabled, so bail out
+@@ -61,9 +64,6 @@ static int psci_acpi_cpu_init_idle(unsigned int cpu)
+ 	if (!psci_ops.cpu_suspend)
+ 		return -EOPNOTSUPP;
+ 
+-	if (unlikely(!pr || !pr->flags.has_lpi))
+-		return -EINVAL;
+-
+ 	count = pr->power.count - 1;
+ 	if (count <= 0)
+ 		return -ENODEV;
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index 85a2a75f44982..25d8aff273a10 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -89,7 +89,8 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
+ 	switch (cap->cap) {
+ 	case KVM_CAP_ARM_NISV_TO_USER:
+ 		r = 0;
+-		kvm->arch.return_nisv_io_abort_to_user = true;
++		set_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER,
++			&kvm->arch.flags);
+ 		break;
+ 	case KVM_CAP_ARM_MTE:
+ 		mutex_lock(&kvm->lock);
+@@ -97,7 +98,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
+ 			r = -EINVAL;
+ 		} else {
+ 			r = 0;
+-			kvm->arch.mte_enabled = true;
++			set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags);
+ 		}
+ 		mutex_unlock(&kvm->lock);
+ 		break;
+@@ -635,7 +636,7 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
+ 		kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu);
+ 
+ 	mutex_lock(&kvm->lock);
+-	kvm->arch.ran_once = true;
++	set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags);
+ 	mutex_unlock(&kvm->lock);
+ 
+ 	return ret;
+diff --git a/arch/arm64/kvm/mmio.c b/arch/arm64/kvm/mmio.c
+index 3e2d8ba11a027..3dd38a151d2a6 100644
+--- a/arch/arm64/kvm/mmio.c
++++ b/arch/arm64/kvm/mmio.c
+@@ -135,7 +135,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
+ 	 * volunteered to do so, and bail out otherwise.
+ 	 */
+ 	if (!kvm_vcpu_dabt_isvalid(vcpu)) {
+-		if (vcpu->kvm->arch.return_nisv_io_abort_to_user) {
++		if (test_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER,
++			     &vcpu->kvm->arch.flags)) {
+ 			run->exit_reason = KVM_EXIT_ARM_NISV;
+ 			run->arm_nisv.esr_iss = kvm_vcpu_dabt_iss_nisv_sanitized(vcpu);
+ 			run->arm_nisv.fault_ipa = fault_ipa;
+diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
+index bc771bc1a0413..fc6ee6f02fec4 100644
+--- a/arch/arm64/kvm/pmu-emul.c
++++ b/arch/arm64/kvm/pmu-emul.c
+@@ -982,7 +982,7 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
+ 
+ 		mutex_lock(&kvm->lock);
+ 
+-		if (kvm->arch.ran_once) {
++		if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags)) {
+ 			mutex_unlock(&kvm->lock);
+ 			return -EBUSY;
+ 		}
+diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
+index ecc40c8cd6f64..6c70c6f61c703 100644
+--- a/arch/arm64/kvm/reset.c
++++ b/arch/arm64/kvm/reset.c
+@@ -181,27 +181,51 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
+ 	return 0;
+ }
+ 
+-static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
++/**
++ * kvm_set_vm_width() - set the register width for the guest
++ * @vcpu: Pointer to the vcpu being configured
++ *
++ * Set both KVM_ARCH_FLAG_EL1_32BIT and KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED
++ * in the VM flags based on the vcpu's requested register width, the HW
++ * capabilities and other options (such as MTE).
++ * When REG_WIDTH_CONFIGURED is already set, the vcpu settings must be
++ * consistent with the value of the FLAG_EL1_32BIT bit in the flags.
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++static int kvm_set_vm_width(struct kvm_vcpu *vcpu)
+ {
+-	struct kvm_vcpu *tmp;
++	struct kvm *kvm = vcpu->kvm;
+ 	bool is32bit;
+-	unsigned long i;
+ 
+ 	is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
++
++	lockdep_assert_held(&kvm->lock);
++
++	if (test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags)) {
++		/*
++		 * The guest's register width is already configured.
++		 * Make sure that the vcpu is consistent with it.
++		 */
++		if (is32bit == test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags))
++			return 0;
++
++		return -EINVAL;
++	}
++
+ 	if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
+-		return false;
++		return -EINVAL;
+ 
+ 	/* MTE is incompatible with AArch32 */
+-	if (kvm_has_mte(vcpu->kvm) && is32bit)
+-		return false;
++	if (kvm_has_mte(kvm) && is32bit)
++		return -EINVAL;
+ 
+-	/* Check that the vcpus are either all 32bit or all 64bit */
+-	kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
+-		if (vcpu_has_feature(tmp, KVM_ARM_VCPU_EL1_32BIT) != is32bit)
+-			return false;
+-	}
++	if (is32bit)
++		set_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
+ 
+-	return true;
++	set_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags);
++
++	return 0;
+ }
+ 
+ /**
+@@ -230,10 +254,16 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
+ 	u32 pstate;
+ 
+ 	mutex_lock(&vcpu->kvm->lock);
+-	reset_state = vcpu->arch.reset_state;
+-	WRITE_ONCE(vcpu->arch.reset_state.reset, false);
++	ret = kvm_set_vm_width(vcpu);
++	if (!ret) {
++		reset_state = vcpu->arch.reset_state;
++		WRITE_ONCE(vcpu->arch.reset_state.reset, false);
++	}
+ 	mutex_unlock(&vcpu->kvm->lock);
+ 
++	if (ret)
++		return ret;
++
+ 	/* Reset PMU outside of the non-preemptible section */
+ 	kvm_pmu_vcpu_reset(vcpu);
+ 
+@@ -260,14 +290,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
+ 		}
+ 	}
+ 
+-	if (!vcpu_allowed_register_width(vcpu)) {
+-		ret = -EINVAL;
+-		goto out;
+-	}
+-
+ 	switch (vcpu->arch.target) {
+ 	default:
+-		if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
++		if (vcpu_el1_is_32bit(vcpu)) {
+ 			pstate = VCPU_RESET_PSTATE_SVC;
+ 		} else {
+ 			pstate = VCPU_RESET_PSTATE_EL1;
+diff --git a/arch/powerpc/include/asm/static_call.h b/arch/powerpc/include/asm/static_call.h
+index 0a0bc79bd1fa9..de1018cc522b3 100644
+--- a/arch/powerpc/include/asm/static_call.h
++++ b/arch/powerpc/include/asm/static_call.h
+@@ -24,5 +24,6 @@
+ 
+ #define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func)	__PPC_SCT(name, "b " #func)
+ #define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)	__PPC_SCT(name, "blr")
++#define ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name)	__PPC_SCT(name, "b .+20")
+ 
+ #endif /* _ASM_POWERPC_STATIC_CALL_H */
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index 791db769080d2..316f61a4cb599 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -225,6 +225,13 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
+ 	int cpu;
+ 	struct rcuwait *waitp;
+ 
++	/*
++	 * rcuwait_wake_up contains smp_mb() which orders prior stores that
++	 * create pending work vs below loads of cpu fields. The other side
++	 * is the barrier in vcpu run that orders setting the cpu fields vs
++	 * testing for pending work.
++	 */
++
+ 	waitp = kvm_arch_vcpu_get_wait(vcpu);
+ 	if (rcuwait_wake_up(waitp))
+ 		++vcpu->stat.generic.halt_wakeup;
+@@ -1089,7 +1096,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
+ 			break;
+ 		}
+ 		tvcpu->arch.prodded = 1;
+-		smp_mb();
++		smp_mb(); /* This orders prodded store vs ceded load */
+ 		if (tvcpu->arch.ceded)
+ 			kvmppc_fast_vcpu_kick_hv(tvcpu);
+ 		break;
+@@ -3771,6 +3778,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
+ 		pvc = core_info.vc[sub];
+ 		pvc->pcpu = pcpu + thr;
+ 		for_each_runnable_thread(i, vcpu, pvc) {
++			/*
++			 * XXX: is kvmppc_start_thread called too late here?
++			 * It updates vcpu->cpu and vcpu->arch.thread_cpu
++			 * which are used by kvmppc_fast_vcpu_kick_hv(), but
++			 * kick is called after new exceptions become available
++			 * and exceptions are checked earlier than here, by
++			 * kvmppc_core_prepare_to_enter.
++			 */
+ 			kvmppc_start_thread(vcpu, pvc);
+ 			kvmppc_create_dtl_entry(vcpu, pvc);
+ 			trace_kvm_guest_enter(vcpu);
+@@ -4492,6 +4507,21 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
+ 	if (need_resched() || !kvm->arch.mmu_ready)
+ 		goto out;
+ 
++	vcpu->cpu = pcpu;
++	vcpu->arch.thread_cpu = pcpu;
++	vc->pcpu = pcpu;
++	local_paca->kvm_hstate.kvm_vcpu = vcpu;
++	local_paca->kvm_hstate.ptid = 0;
++	local_paca->kvm_hstate.fake_suspend = 0;
++
++	/*
++	 * Orders set cpu/thread_cpu vs testing for pending interrupts and
++	 * doorbells below. The other side is when these fields are set vs
++	 * kvmppc_fast_vcpu_kick_hv reading the cpu/thread_cpu fields to
++	 * kick a vCPU to notice the pending interrupt.
++	 */
++	smp_mb();
++
+ 	if (!nested) {
+ 		kvmppc_core_prepare_to_enter(vcpu);
+ 		if (test_bit(BOOK3S_IRQPRIO_EXTERNAL,
+@@ -4511,13 +4541,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
+ 
+ 	tb = mftb();
+ 
+-	vcpu->cpu = pcpu;
+-	vcpu->arch.thread_cpu = pcpu;
+-	vc->pcpu = pcpu;
+-	local_paca->kvm_hstate.kvm_vcpu = vcpu;
+-	local_paca->kvm_hstate.ptid = 0;
+-	local_paca->kvm_hstate.fake_suspend = 0;
+-
+ 	__kvmppc_create_dtl_entry(vcpu, pcpu, tb + vc->tb_offset, 0);
+ 
+ 	trace_kvm_guest_enter(vcpu);
+@@ -4619,6 +4642,8 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
+ 	run->exit_reason = KVM_EXIT_INTR;
+ 	vcpu->arch.ret = -EINTR;
+  out:
++	vcpu->cpu = -1;
++	vcpu->arch.thread_cpu = -1;
+ 	powerpc_local_irq_pmu_restore(flags);
+ 	preempt_enable();
+ 	goto done;
+diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
+index 624166004e36c..6785aef4cbd46 100644
+--- a/arch/riscv/kvm/vcpu.c
++++ b/arch/riscv/kvm/vcpu.c
+@@ -653,8 +653,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+ 				     vcpu->arch.isa);
+ 	kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);
+ 
+-	csr_write(CSR_HGATP, 0);
+-
+ 	csr->vsstatus = csr_read(CSR_VSSTATUS);
+ 	csr->vsie = csr_read(CSR_VSIE);
+ 	csr->vstvec = csr_read(CSR_VSTVEC);
+diff --git a/arch/riscv/kvm/vcpu_fp.c b/arch/riscv/kvm/vcpu_fp.c
+index 4449a976e5a6b..d4308c5120078 100644
+--- a/arch/riscv/kvm/vcpu_fp.c
++++ b/arch/riscv/kvm/vcpu_fp.c
+@@ -11,6 +11,7 @@
+ #include <linux/err.h>
+ #include <linux/kvm_host.h>
+ #include <linux/uaccess.h>
++#include <asm/hwcap.h>
+ 
+ #ifdef CONFIG_FPU
+ void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu)
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 17b4e1808b8e8..85ee96abba806 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1574,8 +1574,9 @@ static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
+ #define kvm_arch_pmi_in_guest(vcpu) \
+ 	((vcpu) && (vcpu)->arch.handling_intr_from_guest)
+ 
+-int kvm_mmu_module_init(void);
+-void kvm_mmu_module_exit(void);
++void kvm_mmu_x86_module_init(void);
++int kvm_mmu_vendor_module_init(void);
++void kvm_mmu_vendor_module_exit(void);
+ 
+ void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
+ int kvm_mmu_create(struct kvm_vcpu *vcpu);
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index a4a39c3e0f196..0c2610cde6ea2 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -128,9 +128,9 @@
+ #define TSX_CTRL_RTM_DISABLE		BIT(0)	/* Disable RTM feature */
+ #define TSX_CTRL_CPUID_CLEAR		BIT(1)	/* Disable TSX enumeration */
+ 
+-/* SRBDS support */
+ #define MSR_IA32_MCU_OPT_CTRL		0x00000123
+-#define RNGDS_MITG_DIS			BIT(0)
++#define RNGDS_MITG_DIS			BIT(0)	/* SRBDS support */
++#define RTM_ALLOW			BIT(1)	/* TSX development mode */
+ 
+ #define MSR_IA32_SYSENTER_CS		0x00000174
+ #define MSR_IA32_SYSENTER_ESP		0x00000175
+diff --git a/arch/x86/include/asm/static_call.h b/arch/x86/include/asm/static_call.h
+index ed4f8bb6c2d9c..2455d721503ec 100644
+--- a/arch/x86/include/asm/static_call.h
++++ b/arch/x86/include/asm/static_call.h
+@@ -38,6 +38,8 @@
+ #define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)			\
+ 	__ARCH_DEFINE_STATIC_CALL_TRAMP(name, "ret; int3; nop; nop; nop")
+ 
++#define ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name)			\
++	ARCH_DEFINE_STATIC_CALL_TRAMP(name, __static_call_return0)
+ 
+ #define ARCH_ADD_TRAMP_KEY(name)					\
+ 	asm(".pushsection .static_call_tramp_key, \"a\"		\n"	\
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 7b8382c117889..bd6c690a9fb98 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1719,6 +1719,8 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
+ 	validate_apic_and_package_id(c);
+ 	x86_spec_ctrl_setup_ap();
+ 	update_srbds_msr();
++
++	tsx_ap_init();
+ }
+ 
+ static __init int setup_noclflush(char *arg)
+diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
+index ee6f23f7587d4..2a8e584fc9913 100644
+--- a/arch/x86/kernel/cpu/cpu.h
++++ b/arch/x86/kernel/cpu/cpu.h
+@@ -55,11 +55,10 @@ enum tsx_ctrl_states {
+ extern __ro_after_init enum tsx_ctrl_states tsx_ctrl_state;
+ 
+ extern void __init tsx_init(void);
+-extern void tsx_enable(void);
+-extern void tsx_disable(void);
+-extern void tsx_clear_cpuid(void);
++void tsx_ap_init(void);
+ #else
+ static inline void tsx_init(void) { }
++static inline void tsx_ap_init(void) { }
+ #endif /* CONFIG_CPU_SUP_INTEL */
+ 
+ extern void get_cpu_cap(struct cpuinfo_x86 *c);
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index 8321c43554a1d..f7a5370a9b3b8 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -717,13 +717,6 @@ static void init_intel(struct cpuinfo_x86 *c)
+ 
+ 	init_intel_misc_features(c);
+ 
+-	if (tsx_ctrl_state == TSX_CTRL_ENABLE)
+-		tsx_enable();
+-	else if (tsx_ctrl_state == TSX_CTRL_DISABLE)
+-		tsx_disable();
+-	else if (tsx_ctrl_state == TSX_CTRL_RTM_ALWAYS_ABORT)
+-		tsx_clear_cpuid();
+-
+ 	split_lock_init();
+ 	bus_lock_init();
+ 
+diff --git a/arch/x86/kernel/cpu/tsx.c b/arch/x86/kernel/cpu/tsx.c
+index 9c7a5f0492929..ec7bbac3a9f29 100644
+--- a/arch/x86/kernel/cpu/tsx.c
++++ b/arch/x86/kernel/cpu/tsx.c
+@@ -19,7 +19,7 @@
+ 
+ enum tsx_ctrl_states tsx_ctrl_state __ro_after_init = TSX_CTRL_NOT_SUPPORTED;
+ 
+-void tsx_disable(void)
++static void tsx_disable(void)
+ {
+ 	u64 tsx;
+ 
+@@ -39,7 +39,7 @@ void tsx_disable(void)
+ 	wrmsrl(MSR_IA32_TSX_CTRL, tsx);
+ }
+ 
+-void tsx_enable(void)
++static void tsx_enable(void)
+ {
+ 	u64 tsx;
+ 
+@@ -58,7 +58,7 @@ void tsx_enable(void)
+ 	wrmsrl(MSR_IA32_TSX_CTRL, tsx);
+ }
+ 
+-static bool __init tsx_ctrl_is_supported(void)
++static bool tsx_ctrl_is_supported(void)
+ {
+ 	u64 ia32_cap = x86_read_arch_cap_msr();
+ 
+@@ -84,7 +84,45 @@ static enum tsx_ctrl_states x86_get_tsx_auto_mode(void)
+ 	return TSX_CTRL_ENABLE;
+ }
+ 
+-void tsx_clear_cpuid(void)
++/*
++ * Disabling TSX is not a trivial business.
++ *
++ * First of all, there's a CPUID bit: X86_FEATURE_RTM_ALWAYS_ABORT
++ * which says that TSX is practically disabled (all transactions are
++ * aborted by default). When that bit is set, the kernel unconditionally
++ * disables TSX.
++ *
++ * In order to do that, however, it needs to dance a bit:
++ *
++ * 1. The first method to disable it is through MSR_TSX_FORCE_ABORT and
++ * the MSR is present only when *two* CPUID bits are set:
++ *
++ * - X86_FEATURE_RTM_ALWAYS_ABORT
++ * - X86_FEATURE_TSX_FORCE_ABORT
++ *
++ * 2. The second method is for CPUs which do not have the above-mentioned
++ * MSR: those use a different MSR - MSR_IA32_TSX_CTRL and disable TSX
++ * through that one. Those CPUs can also have the initially mentioned
++ * CPUID bit X86_FEATURE_RTM_ALWAYS_ABORT set and for those the same strategy
++ * applies: TSX gets disabled unconditionally.
++ *
++ * When either of the two methods are present, the kernel disables TSX and
++ * clears the respective RTM and HLE feature flags.
++ *
++ * An additional twist in the whole thing presents late microcode loading
++ * which, when done, may cause for the X86_FEATURE_RTM_ALWAYS_ABORT CPUID
++ * bit to be set after the update.
++ *
++ * A subsequent hotplug operation on any logical CPU except the BSP will
++ * cause for the supported CPUID feature bits to get re-detected and, if
++ * RTM and HLE get cleared all of a sudden, but, userspace did consult
++ * them before the update, then funny explosions will happen. Long story
++ * short: the kernel doesn't modify CPUID feature bits after booting.
++ *
++ * That's why, this function's call in init_intel() doesn't clear the
++ * feature flags.
++ */
++static void tsx_clear_cpuid(void)
+ {
+ 	u64 msr;
+ 
+@@ -97,6 +135,39 @@ void tsx_clear_cpuid(void)
+ 		rdmsrl(MSR_TSX_FORCE_ABORT, msr);
+ 		msr |= MSR_TFA_TSX_CPUID_CLEAR;
+ 		wrmsrl(MSR_TSX_FORCE_ABORT, msr);
++	} else if (tsx_ctrl_is_supported()) {
++		rdmsrl(MSR_IA32_TSX_CTRL, msr);
++		msr |= TSX_CTRL_CPUID_CLEAR;
++		wrmsrl(MSR_IA32_TSX_CTRL, msr);
++	}
++}
++
++/*
++ * Disable TSX development mode
++ *
++ * When the microcode released in Feb 2022 is applied, TSX will be disabled by
++ * default on some processors. MSR 0x122 (TSX_CTRL) and MSR 0x123
++ * (IA32_MCU_OPT_CTRL) can be used to re-enable TSX for development, doing so is
++ * not recommended for production deployments. In particular, applying MD_CLEAR
++ * flows for mitigation of the Intel TSX Asynchronous Abort (TAA) transient
++ * execution attack may not be effective on these processors when Intel TSX is
++ * enabled with updated microcode.
++ */
++static void tsx_dev_mode_disable(void)
++{
++	u64 mcu_opt_ctrl;
++
++	/* Check if RTM_ALLOW exists */
++	if (!boot_cpu_has_bug(X86_BUG_TAA) || !tsx_ctrl_is_supported() ||
++	    !cpu_feature_enabled(X86_FEATURE_SRBDS_CTRL))
++		return;
++
++	rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl);
++
++	if (mcu_opt_ctrl & RTM_ALLOW) {
++		mcu_opt_ctrl &= ~RTM_ALLOW;
++		wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl);
++		setup_force_cpu_cap(X86_FEATURE_RTM_ALWAYS_ABORT);
+ 	}
+ }
+ 
+@@ -105,14 +176,14 @@ void __init tsx_init(void)
+ 	char arg[5] = {};
+ 	int ret;
+ 
++	tsx_dev_mode_disable();
++
+ 	/*
+-	 * Hardware will always abort a TSX transaction if both CPUID bits
+-	 * RTM_ALWAYS_ABORT and TSX_FORCE_ABORT are set. In this case, it is
+-	 * better not to enumerate CPUID.RTM and CPUID.HLE bits. Clear them
+-	 * here.
++	 * Hardware will always abort a TSX transaction when the CPUID bit
++	 * RTM_ALWAYS_ABORT is set. In this case, it is better not to enumerate
++	 * CPUID.RTM and CPUID.HLE bits. Clear them here.
+ 	 */
+-	if (boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT) &&
+-	    boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
++	if (boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)) {
+ 		tsx_ctrl_state = TSX_CTRL_RTM_ALWAYS_ABORT;
+ 		tsx_clear_cpuid();
+ 		setup_clear_cpu_cap(X86_FEATURE_RTM);
+@@ -175,3 +246,16 @@ void __init tsx_init(void)
+ 		setup_force_cpu_cap(X86_FEATURE_HLE);
+ 	}
+ }
++
++void tsx_ap_init(void)
++{
++	tsx_dev_mode_disable();
++
++	if (tsx_ctrl_state == TSX_CTRL_ENABLE)
++		tsx_enable();
++	else if (tsx_ctrl_state == TSX_CTRL_DISABLE)
++		tsx_disable();
++	else if (tsx_ctrl_state == TSX_CTRL_RTM_ALWAYS_ABORT)
++		/* See comment over that function for more details. */
++		tsx_clear_cpuid();
++}
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index 5628d0ba637ec..7f009ebb319ab 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -6144,12 +6144,24 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
+ 	return 0;
+ }
+ 
+-int kvm_mmu_module_init(void)
++/*
++ * nx_huge_pages needs to be resolved to true/false when kvm.ko is loaded, as
++ * its default value of -1 is technically undefined behavior for a boolean.
++ */
++void kvm_mmu_x86_module_init(void)
+ {
+-	int ret = -ENOMEM;
+-
+ 	if (nx_huge_pages == -1)
+ 		__set_nx_huge_pages(get_nx_auto_mode());
++}
++
++/*
++ * The bulk of the MMU initialization is deferred until the vendor module is
++ * loaded as many of the masks/values may be modified by VMX or SVM, i.e. need
++ * to be reset when a potentially different vendor module is loaded.
++ */
++int kvm_mmu_vendor_module_init(void)
++{
++	int ret = -ENOMEM;
+ 
+ 	/*
+ 	 * MMU roles use union aliasing which is, generally speaking, an
+@@ -6197,7 +6209,7 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
+ 	mmu_free_memory_caches(vcpu);
+ }
+ 
+-void kvm_mmu_module_exit(void)
++void kvm_mmu_vendor_module_exit(void)
+ {
+ 	mmu_destroy_caches();
+ 	percpu_counter_destroy(&kvm_total_used_mmu_pages);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index c81ec70197fb5..05128162ebd58 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -8846,7 +8846,7 @@ int kvm_arch_init(void *opaque)
+ 	}
+ 	kvm_nr_uret_msrs = 0;
+ 
+-	r = kvm_mmu_module_init();
++	r = kvm_mmu_vendor_module_init();
+ 	if (r)
+ 		goto out_free_percpu;
+ 
+@@ -8894,7 +8894,7 @@ void kvm_arch_exit(void)
+ 	cancel_work_sync(&pvclock_gtod_work);
+ #endif
+ 	kvm_x86_ops.hardware_enable = NULL;
+-	kvm_mmu_module_exit();
++	kvm_mmu_vendor_module_exit();
+ 	free_percpu(user_return_msrs);
+ 	kmem_cache_destroy(x86_emulator_cache);
+ #ifdef CONFIG_KVM_XEN
+@@ -12887,3 +12887,19 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter);
+ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit);
+ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_enter);
+ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit);
++
++static int __init kvm_x86_init(void)
++{
++	kvm_mmu_x86_module_init();
++	return 0;
++}
++module_init(kvm_x86_init);
++
++static void __exit kvm_x86_exit(void)
++{
++	/*
++	 * If module_init() is implemented, module_exit() must also be
++	 * implemented to allow module unload.
++	 */
++}
++module_exit(kvm_x86_exit);
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index 0ecb140864b21..b272e963388cb 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -398,6 +398,7 @@ static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
+ 		EMIT_LFENCE();
+ 		EMIT2(0xFF, 0xE0 + reg);
+ 	} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
++		OPTIMIZER_HIDE_VAR(reg);
+ 		emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
+ 	} else
+ #endif
+diff --git a/block/bio.c b/block/bio.c
+index 1be1e360967d0..342b1cf5d713c 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -1570,7 +1570,7 @@ EXPORT_SYMBOL(bio_split);
+ void bio_trim(struct bio *bio, sector_t offset, sector_t size)
+ {
+ 	if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS ||
+-			 offset + size > bio->bi_iter.bi_size))
++			 offset + size > bio_sectors(bio)))
+ 		return;
+ 
+ 	size <<= 9;
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index 05b3985a1984b..4556c86c34659 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -1079,6 +1079,11 @@ static int flatten_lpi_states(struct acpi_processor *pr,
+ 	return 0;
+ }
+ 
++int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
++{
++	return -EOPNOTSUPP;
++}
++
+ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
+ {
+ 	int ret, i;
+@@ -1087,6 +1092,11 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
+ 	struct acpi_device *d = NULL;
+ 	struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
+ 
++	/* make sure our architecture has support */
++	ret = acpi_processor_ffh_lpi_probe(pr->id);
++	if (ret == -EOPNOTSUPP)
++		return ret;
++
+ 	if (!osc_pc_lpi_support_confirmed)
+ 		return -EOPNOTSUPP;
+ 
+@@ -1138,11 +1148,6 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
+ 	return 0;
+ }
+ 
+-int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
+-{
+-	return -ENODEV;
+-}
+-
+ int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
+ {
+ 	return -ENODEV;
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 0c854aebfe0bd..760c0d81d1482 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4014,6 +4014,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ 	{ "Crucial_CT*MX100*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
+ 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
++	{ "Samsung SSD 840 EVO*",	NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
++						ATA_HORKAGE_NO_DMA_LOG |
++						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ 	{ "Samsung SSD 840*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+ 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ 	{ "Samsung SSD 850*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index 752a11d16e262..7e079fa3795b1 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -296,6 +296,7 @@ int driver_deferred_probe_check_state(struct device *dev)
+ 
+ 	return -EPROBE_DEFER;
+ }
++EXPORT_SYMBOL_GPL(driver_deferred_probe_check_state);
+ 
+ static void deferred_probe_timeout_work_func(struct work_struct *work)
+ {
+diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
+index 5d5beeba3ed4f..478ba959362ce 100644
+--- a/drivers/block/drbd/drbd_main.c
++++ b/drivers/block/drbd/drbd_main.c
+@@ -2739,6 +2739,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
+ 	sprintf(disk->disk_name, "drbd%d", minor);
+ 	disk->private_data = device;
+ 
++	blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue);
+ 	blk_queue_write_cache(disk->queue, true, true);
+ 	/* Setting the max_hw_sectors to an odd value of 8kibyte here
+ 	   This triggers a max_bio_size message upon first attach or connect */
+diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
+index 13004beb48cab..233577b141412 100644
+--- a/drivers/block/null_blk/main.c
++++ b/drivers/block/null_blk/main.c
+@@ -1606,7 +1606,7 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
+ 	 * Only fake timeouts need to execute blk_mq_complete_request() here.
+ 	 */
+ 	cmd->error = BLK_STS_TIMEOUT;
+-	if (cmd->fake_timeout)
++	if (cmd->fake_timeout || hctx->type == HCTX_TYPE_POLL)
+ 		blk_mq_complete_request(rq);
+ 	return BLK_EH_DONE;
+ }
+diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
+index 35b56c8ba0c0e..492f3a9197ec2 100644
+--- a/drivers/firmware/arm_scmi/clock.c
++++ b/drivers/firmware/arm_scmi/clock.c
+@@ -204,7 +204,8 @@ scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
+ 
+ 	if (rate_discrete && rate) {
+ 		clk->list.num_rates = tot_rate_cnt;
+-		sort(rate, tot_rate_cnt, sizeof(*rate), rate_cmp_func, NULL);
++		sort(clk->list.rates, tot_rate_cnt, sizeof(*rate),
++		     rate_cmp_func, NULL);
+ 	}
+ 
+ 	clk->rate_discrete = rate_discrete;
+diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
+index d76bab3aaac45..e815b8f987393 100644
+--- a/drivers/firmware/arm_scmi/driver.c
++++ b/drivers/firmware/arm_scmi/driver.c
+@@ -652,7 +652,8 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
+ 
+ 	xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
+ 	if (IS_ERR(xfer)) {
+-		scmi_clear_channel(info, cinfo);
++		if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP)
++			scmi_clear_channel(info, cinfo);
+ 		return;
+ 	}
+ 
+diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
+index 8e5d87984a489..41c31b10ae848 100644
+--- a/drivers/gpio/gpio-sim.c
++++ b/drivers/gpio/gpio-sim.c
+@@ -134,7 +134,7 @@ static int gpio_sim_get_multiple(struct gpio_chip *gc,
+ 	struct gpio_sim_chip *chip = gpiochip_get_data(gc);
+ 
+ 	mutex_lock(&chip->lock);
+-	bitmap_copy(bits, chip->value_map, gc->ngpio);
++	bitmap_replace(bits, bits, chip->value_map, mask, gc->ngpio);
+ 	mutex_unlock(&chip->lock);
+ 
+ 	return 0;
+@@ -146,7 +146,7 @@ static void gpio_sim_set_multiple(struct gpio_chip *gc,
+ 	struct gpio_sim_chip *chip = gpiochip_get_data(gc);
+ 
+ 	mutex_lock(&chip->lock);
+-	bitmap_copy(chip->value_map, bits, gc->ngpio);
++	bitmap_replace(chip->value_map, chip->value_map, bits, mask, gc->ngpio);
+ 	mutex_unlock(&chip->lock);
+ }
+ 
+diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
+index a5495ad31c9ce..b7c2f2af1dee5 100644
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -387,8 +387,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
+ 	pin = agpio->pin_table[0];
+ 
+ 	if (pin <= 255) {
+-		char ev_name[5];
+-		sprintf(ev_name, "_%c%02hhX",
++		char ev_name[8];
++		sprintf(ev_name, "_%c%02X",
+ 			agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L',
+ 			pin);
+ 		if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
+diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
+index 5b393622f5920..a0f0a17e224fe 100644
+--- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h
++++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
+@@ -119,6 +119,7 @@
+ #define CONNECTOR_OBJECT_ID_eDP                   0x14
+ #define CONNECTOR_OBJECT_ID_MXM                   0x15
+ #define CONNECTOR_OBJECT_ID_LVDS_eDP              0x16
++#define CONNECTOR_OBJECT_ID_USBC                  0x17
+ 
+ /* deleted */
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index b87dca6d09fa6..052816f0efed4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -5678,7 +5678,7 @@ void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
+ 		struct amdgpu_ring *ring)
+ {
+ #ifdef CONFIG_X86_64
+-	if (adev->flags & AMD_IS_APU)
++	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
+ 		return;
+ #endif
+ 	if (adev->gmc.xgmi.connected_to_cpu)
+@@ -5694,7 +5694,7 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
+ 		struct amdgpu_ring *ring)
+ {
+ #ifdef CONFIG_X86_64
+-	if (adev->flags & AMD_IS_APU)
++	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
+ 		return;
+ #endif
+ 	if (adev->gmc.xgmi.connected_to_cpu)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 0ead08ba58c2a..c853266957ce1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -686,7 +686,7 @@ MODULE_PARM_DESC(sched_policy,
+  * Maximum number of processes that HWS can schedule concurrently. The maximum is the
+  * number of VMIDs assigned to the HWS, which is also the default.
+  */
+-int hws_max_conc_proc = 8;
++int hws_max_conc_proc = -1;
+ module_param(hws_max_conc_proc, int, 0444);
+ MODULE_PARM_DESC(hws_max_conc_proc,
+ 	"Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))");
+@@ -2276,18 +2276,23 @@ static int amdgpu_pmops_suspend(struct device *dev)
+ {
+ 	struct drm_device *drm_dev = dev_get_drvdata(dev);
+ 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
+-	int r;
+ 
+ 	if (amdgpu_acpi_is_s0ix_active(adev))
+ 		adev->in_s0ix = true;
+ 	else
+ 		adev->in_s3 = true;
+-	r = amdgpu_device_suspend(drm_dev, true);
+-	if (r)
+-		return r;
++	return amdgpu_device_suspend(drm_dev, true);
++}
++
++static int amdgpu_pmops_suspend_noirq(struct device *dev)
++{
++	struct drm_device *drm_dev = dev_get_drvdata(dev);
++	struct amdgpu_device *adev = drm_to_adev(drm_dev);
++
+ 	if (!adev->in_s0ix)
+-		r = amdgpu_asic_reset(adev);
+-	return r;
++		return amdgpu_asic_reset(adev);
++
++	return 0;
+ }
+ 
+ static int amdgpu_pmops_resume(struct device *dev)
+@@ -2528,6 +2533,7 @@ static const struct dev_pm_ops amdgpu_pm_ops = {
+ 	.prepare = amdgpu_pmops_prepare,
+ 	.complete = amdgpu_pmops_complete,
+ 	.suspend = amdgpu_pmops_suspend,
++	.suspend_noirq = amdgpu_pmops_suspend_noirq,
+ 	.resume = amdgpu_pmops_resume,
+ 	.freeze = amdgpu_pmops_freeze,
+ 	.thaw = amdgpu_pmops_thaw,
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 9189fb85a4dd4..5831aa40b1e81 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -1334,6 +1334,8 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
+ 	{ 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
+ 	/* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
+ 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
++	/* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */
++	{ 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 },
+ 	{ 0, 0, 0, 0, 0 },
+ };
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+index a2f8ed0e6a644..f1b794d5d87d6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+@@ -788,7 +788,7 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
+ 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
+ 
+ #ifdef CONFIG_X86_64
+-	if (adev->flags & AMD_IS_APU) {
++	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
+ 		adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
+ 		adev->gmc.aper_size = adev->gmc.real_vram_size;
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+index ab8adbff9e2d0..5206e2da334a4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+@@ -381,8 +381,9 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
+ 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
+ 
+ #ifdef CONFIG_X86_64
+-	if (adev->flags & AMD_IS_APU &&
+-	    adev->gmc.real_vram_size > adev->gmc.aper_size) {
++	if ((adev->flags & AMD_IS_APU) &&
++	    adev->gmc.real_vram_size > adev->gmc.aper_size &&
++	    !amdgpu_passthrough(adev)) {
+ 		adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
+ 		adev->gmc.aper_size = adev->gmc.real_vram_size;
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index 054733838292c..d07d36786836e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -581,7 +581,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
+ 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
+ 
+ #ifdef CONFIG_X86_64
+-	if (adev->flags & AMD_IS_APU) {
++	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
+ 		adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
+ 		adev->gmc.aper_size = adev->gmc.real_vram_size;
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 88c1eb9ad0684..2fb24178eaef9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -1420,7 +1420,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
+ 	 */
+ 
+ 	/* check whether both host-gpu and gpu-gpu xgmi links exist */
+-	if ((adev->flags & AMD_IS_APU) ||
++	if (((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) ||
+ 	    (adev->gmc.xgmi.supported &&
+ 	     adev->gmc.xgmi.connected_to_cpu)) {
+ 		adev->gmc.aper_base =
+@@ -1684,7 +1684,7 @@ static int gmc_v9_0_sw_fini(void *handle)
+ 	amdgpu_gem_force_release(adev);
+ 	amdgpu_vm_manager_fini(adev);
+ 	amdgpu_gart_table_vram_free(adev);
+-	amdgpu_bo_unref(&adev->gmc.pdb0_bo);
++	amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0);
+ 	amdgpu_bo_fini(adev);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+index 0ce2a7aa400b1..ad9bfc772bdff 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+@@ -1474,8 +1474,11 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
+ 
+ static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+ {
++	struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
+ 	uint32_t tmp;
+ 
++	vcn_v3_0_pause_dpg_mode(adev, 0, &state);
++
+ 	/* Wait for power status to be 1 */
+ 	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
+ 		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index 2b65d0acae2ce..2fdbe2f475e4f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -480,15 +480,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ 	}
+ 
+ 	/* Verify module parameters regarding mapped process number*/
+-	if ((hws_max_conc_proc < 0)
+-			|| (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
+-		dev_err(kfd_device,
+-			"hws_max_conc_proc %d must be between 0 and %d, use %d instead\n",
+-			hws_max_conc_proc, kfd->vm_info.vmid_num_kfd,
+-			kfd->vm_info.vmid_num_kfd);
++	if (hws_max_conc_proc >= 0)
++		kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd);
++	else
+ 		kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
+-	} else
+-		kfd->max_proc_per_quantum = hws_max_conc_proc;
+ 
+ 	/* calculate max size of mqds needed for queues */
+ 	size = max_num_of_queues_per_device *
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+index afe72dd11325d..6ca7e12bdab84 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+@@ -531,6 +531,8 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
+ 	event_waiters = kmalloc_array(num_events,
+ 					sizeof(struct kfd_event_waiter),
+ 					GFP_KERNEL);
++	if (!event_waiters)
++		return NULL;
+ 
+ 	for (i = 0; (event_waiters) && (i < num_events) ; i++) {
+ 		init_wait(&event_waiters[i].wait);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 90c017859ad42..24db2297857b4 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2693,7 +2693,8 @@ static int dm_resume(void *handle)
+ 		 * this is the case when traversing through already created
+ 		 * MST connectors, should be skipped
+ 		 */
+-		if (aconnector->mst_port)
++		if (aconnector->dc_link &&
++		    aconnector->dc_link->type == dc_connection_mst_branch)
+ 			continue;
+ 
+ 		mutex_lock(&aconnector->hpd_lock);
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index ac3071e38e4a0..f0a97b82c33a3 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -1667,8 +1667,8 @@ bool dc_is_stream_unchanged(
+ 	if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
+ 		return false;
+ 
+-	// Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks
+-	if (old_stream->audio_info.mode_count != stream->audio_info.mode_count)
++	/*compare audio info*/
++	if (memcmp(&old_stream->audio_info, &stream->audio_info, sizeof(stream->audio_info)) != 0)
+ 		return false;
+ 
+ 	return true;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+index f4f423d0b8c3f..80595d7f060c3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+@@ -940,6 +940,7 @@ static const struct hubbub_funcs hubbub1_funcs = {
+ 	.program_watermarks = hubbub1_program_watermarks,
+ 	.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
+ 	.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
++	.verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high,
+ };
+ 
+ void hubbub1_construct(struct hubbub *hubbub,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 2cefdd96d0cbb..8ca4c06ac5607 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -1112,9 +1112,13 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
+ 
+ void dcn10_verify_allow_pstate_change_high(struct dc *dc)
+ {
++	struct hubbub *hubbub = dc->res_pool->hubbub;
+ 	static bool should_log_hw_state; /* prevent hw state log by default */
+ 
+-	if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) {
++	if (!hubbub->funcs->verify_allow_pstate_change_high)
++		return;
++
++	if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
+ 		int i = 0;
+ 
+ 		if (should_log_hw_state)
+@@ -1123,8 +1127,8 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc)
+ 		TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
+ 		BREAK_TO_DEBUGGER();
+ 		if (dcn10_hw_wa_force_recovery(dc)) {
+-		/*check again*/
+-			if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub))
++			/*check again*/
++			if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
+ 				BREAK_TO_DEBUGGER();
+ 		}
+ 	}
+@@ -1497,6 +1501,9 @@ void dcn10_init_hw(struct dc *dc)
+ 	if (dc->config.power_down_display_on_boot)
+ 		dc_link_blank_all_dp_displays(dc);
+ 
++	if (hws->funcs.enable_power_gating_plane)
++		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
++
+ 	/* If taking control over from VBIOS, we may want to optimize our first
+ 	 * mode set, so we need to skip powering down pipes until we know which
+ 	 * pipes we want to use.
+@@ -1549,8 +1556,6 @@ void dcn10_init_hw(struct dc *dc)
+ 
+ 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
+ 	}
+-	if (hws->funcs.enable_power_gating_plane)
+-		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
+ 
+ 	if (dc->clk_mgr->funcs->notify_wm_ranges)
+ 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
+@@ -2515,14 +2520,18 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
+ 	struct mpc *mpc = dc->res_pool->mpc;
+ 	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
+ 
+-	if (per_pixel_alpha)
+-		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+-	else
+-		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+-
+ 	blnd_cfg.overlap_only = false;
+ 	blnd_cfg.global_gain = 0xff;
+ 
++	if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
++		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
++		blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
++	} else if (per_pixel_alpha) {
++		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
++	} else {
++		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
++	}
++
+ 	if (pipe_ctx->plane_state->global_alpha)
+ 		blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
+ 	else
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+index 4991e93e5308c..8a72b7007b9d1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+@@ -2313,14 +2313,18 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
+ 	struct mpc *mpc = dc->res_pool->mpc;
+ 	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
+ 
+-	if (per_pixel_alpha)
+-		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+-	else
+-		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+-
+ 	blnd_cfg.overlap_only = false;
+ 	blnd_cfg.global_gain = 0xff;
+ 
++	if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
++		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
++		blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
++	} else if (per_pixel_alpha) {
++		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
++	} else {
++		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
++	}
++
+ 	if (pipe_ctx->plane_state->global_alpha)
+ 		blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
+ 	else
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c
+index f4414de96acc5..152c9c5733f1c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c
+@@ -448,6 +448,7 @@ static const struct hubbub_funcs hubbub30_funcs = {
+ 	.program_watermarks = hubbub3_program_watermarks,
+ 	.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
+ 	.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
++	.verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high,
+ 	.force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes,
+ 	.force_pstate_change_control = hubbub3_force_pstate_change_control,
+ 	.init_watermarks = hubbub3_init_watermarks,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+index 1db1ca19411d8..05dc0a3ae2a3b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+@@ -548,6 +548,9 @@ void dcn30_init_hw(struct dc *dc)
+ 	if (dc->config.power_down_display_on_boot)
+ 		dc_link_blank_all_dp_displays(dc);
+ 
++	if (hws->funcs.enable_power_gating_plane)
++		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
++
+ 	/* If taking control over from VBIOS, we may want to optimize our first
+ 	 * mode set, so we need to skip powering down pipes until we know which
+ 	 * pipes we want to use.
+@@ -625,8 +628,6 @@ void dcn30_init_hw(struct dc *dc)
+ 
+ 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
+ 	}
+-	if (hws->funcs.enable_power_gating_plane)
+-		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
+ 
+ 	if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
+ 		dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c
+index 1e3bd2e9cdcc4..a046664e20316 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c
+@@ -60,6 +60,7 @@ static const struct hubbub_funcs hubbub301_funcs = {
+ 	.program_watermarks = hubbub3_program_watermarks,
+ 	.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
+ 	.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
++	.verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high,
+ 	.force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes,
+ 	.force_pstate_change_control = hubbub3_force_pstate_change_control,
+ 	.hubbub_read_state = hubbub2_read_state,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
+index 5e3bcaf12cac4..51c5f3685470a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
+@@ -949,6 +949,65 @@ static void hubbub31_get_dchub_ref_freq(struct hubbub *hubbub,
+ 	}
+ }
+ 
++static bool hubbub31_verify_allow_pstate_change_high(struct hubbub *hubbub)
++{
++	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
++
++	/*
++	 * Pstate latency is ~20us so if we wait over 40us and pstate allow
++	 * still not asserted, we are probably stuck and going to hang
++	 */
++	const unsigned int pstate_wait_timeout_us = 100;
++	const unsigned int pstate_wait_expected_timeout_us = 40;
++
++	static unsigned int max_sampled_pstate_wait_us; /* data collection */
++	static bool forced_pstate_allow; /* help with revert wa */
++
++	unsigned int debug_data = 0;
++	unsigned int i;
++
++	if (forced_pstate_allow) {
++		/* we hacked to force pstate allow to prevent hang last time
++		 * we verify_allow_pstate_change_high.  so disable force
++		 * here so we can check status
++		 */
++		REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
++			     DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0,
++			     DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 0);
++		forced_pstate_allow = false;
++	}
++
++	REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub2->debug_test_index_pstate);
++
++	for (i = 0; i < pstate_wait_timeout_us; i++) {
++		debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA);
++
++		/* Debug bit is specific to ASIC. */
++		if (debug_data & (1 << 26)) {
++			if (i > pstate_wait_expected_timeout_us)
++				DC_LOG_WARNING("pstate took longer than expected ~%dus\n", i);
++			return true;
++		}
++		if (max_sampled_pstate_wait_us < i)
++			max_sampled_pstate_wait_us = i;
++
++		udelay(1);
++	}
++
++	/* force pstate allow to prevent system hang
++	 * and break to debugger to investigate
++	 */
++	REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
++		     DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 1,
++		     DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1);
++	forced_pstate_allow = true;
++
++	DC_LOG_WARNING("pstate TEST_DEBUG_DATA: 0x%X\n",
++			debug_data);
++
++	return false;
++}
++
+ static const struct hubbub_funcs hubbub31_funcs = {
+ 	.update_dchub = hubbub2_update_dchub,
+ 	.init_dchub_sys_ctx = hubbub31_init_dchub_sys_ctx,
+@@ -961,6 +1020,7 @@ static const struct hubbub_funcs hubbub31_funcs = {
+ 	.program_watermarks = hubbub31_program_watermarks,
+ 	.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
+ 	.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
++	.verify_allow_pstate_change_high = hubbub31_verify_allow_pstate_change_high,
+ 	.program_det_size = dcn31_program_det_size,
+ 	.program_compbuf_size = dcn31_program_compbuf_size,
+ 	.init_crb = dcn31_init_crb,
+@@ -982,5 +1042,7 @@ void hubbub31_construct(struct dcn20_hubbub *hubbub31,
+ 	hubbub31->detile_buf_size = det_size_kb * 1024;
+ 	hubbub31->pixel_chunk_size = pixel_chunk_size_kb * 1024;
+ 	hubbub31->crb_size_segs = config_return_buffer_size_kb / DCN31_CRB_SEGMENT_SIZE_KB;
++
++	hubbub31->debug_test_index_pstate = 0x6;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+index 1e156f3980656..bdc4467b40d79 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+@@ -200,6 +200,9 @@ void dcn31_init_hw(struct dc *dc)
+ 	if (dc->config.power_down_display_on_boot)
+ 		dc_link_blank_all_dp_displays(dc);
+ 
++	if (hws->funcs.enable_power_gating_plane)
++		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
++
+ 	/* If taking control over from VBIOS, we may want to optimize our first
+ 	 * mode set, so we need to skip powering down pipes until we know which
+ 	 * pipes we want to use.
+@@ -249,8 +252,6 @@ void dcn31_init_hw(struct dc *dc)
+ 
+ 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
+ 	}
+-	if (hws->funcs.enable_power_gating_plane)
+-		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
+ 
+ 	if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
+ 		dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+index 0e11285035b63..f3933c9f57468 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+@@ -1011,7 +1011,7 @@ static const struct dc_debug_options debug_defaults_drv = {
+ 	.max_downscale_src_width = 4096,/*upto true 4K*/
+ 	.disable_pplib_wm_range = false,
+ 	.scl_reset_length10 = true,
+-	.sanity_checks = false,
++	.sanity_checks = true,
+ 	.underflow_assert_delay_us = 0xFFFFFFFF,
+ 	.dwb_fi_phase = -1, // -1 = disable,
+ 	.dmub_command_table = true,
+diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+index 9c74564cbd8de..8973d3a38f9c5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
++++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+@@ -864,11 +864,11 @@ static bool setup_dsc_config(
+ 		min_slices_h = inc_num_slices(dsc_common_caps.slice_caps, min_slices_h);
+ 	}
+ 
++	is_dsc_possible = (min_slices_h <= max_slices_h);
++
+ 	if (pic_width % min_slices_h != 0)
+ 		min_slices_h = 0; // DSC TODO: Maybe try increasing the number of slices first?
+ 
+-	is_dsc_possible = (min_slices_h <= max_slices_h);
+-
+ 	if (min_slices_h == 0 && max_slices_h == 0)
+ 		is_dsc_possible = false;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+index 713f5558f5e17..9195dec294c2d 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+@@ -154,6 +154,8 @@ struct hubbub_funcs {
+ 	bool (*is_allow_self_refresh_enabled)(struct hubbub *hubbub);
+ 	void (*allow_self_refresh_control)(struct hubbub *hubbub, bool allow);
+ 
++	bool (*verify_allow_pstate_change_high)(struct hubbub *hubbub);
++
+ 	void (*apply_DEDCN21_147_wa)(struct hubbub *hubbub);
+ 
+ 	void (*force_wm_propagate_to_pipes)(struct hubbub *hubbub);
+diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+index 57f198de5e2cb..4e075b01d48bb 100644
+--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
++++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+@@ -100,7 +100,8 @@ enum vsc_packet_revision {
+ //PB7 = MD0
+ #define MASK_VTEM_MD0__VRR_EN         0x01
+ #define MASK_VTEM_MD0__M_CONST        0x02
+-#define MASK_VTEM_MD0__RESERVED2      0x0C
++#define MASK_VTEM_MD0__QMS_EN         0x04
++#define MASK_VTEM_MD0__RESERVED2      0x08
+ #define MASK_VTEM_MD0__FVA_FACTOR_M1  0xF0
+ 
+ //MD1
+@@ -109,7 +110,7 @@ enum vsc_packet_revision {
+ //MD2
+ #define MASK_VTEM_MD2__BASE_REFRESH_RATE_98  0x03
+ #define MASK_VTEM_MD2__RB                    0x04
+-#define MASK_VTEM_MD2__RESERVED3             0xF8
++#define MASK_VTEM_MD2__NEXT_TFR              0xF8
+ 
+ //MD3
+ #define MASK_VTEM_MD3__BASE_REFRESH_RATE_07  0xFF
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+index 936a257b511c8..d270d9a918e80 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+@@ -67,7 +67,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ 	 * mmap ioctl is disallowed for all discrete platforms,
+ 	 * and for all platforms with GRAPHICS_VER > 12.
+ 	 */
+-	if (IS_DGFX(i915) || GRAPHICS_VER(i915) > 12)
++	if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) > IP_VER(12, 0))
+ 		return -EOPNOTSUPP;
+ 
+ 	if (args->flags & ~(I915_MMAP_WC))
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index 616be7265da4d..19622fb1fa35b 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -1714,7 +1714,7 @@ a6xx_create_private_address_space(struct msm_gpu *gpu)
+ 		return ERR_CAST(mmu);
+ 
+ 	return msm_gem_address_space_create(mmu,
+-		"gpu", 0x100000000ULL, 0x1ffffffffULL);
++		"gpu", 0x100000000ULL, SZ_4G);
+ }
+ 
+ static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
+index 1d7f82e6eafea..af9c09c308601 100644
+--- a/drivers/gpu/drm/msm/dp/dp_display.c
++++ b/drivers/gpu/drm/msm/dp/dp_display.c
+@@ -551,6 +551,12 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
+ 
+ 	mutex_unlock(&dp->event_mutex);
+ 
++	/*
++	 * add fail safe mode outside event_mutex scope
++	 * to avoid potiential circular lock with drm thread
++	 */
++	dp_panel_add_fail_safe_mode(dp->dp_display.connector);
++
+ 	/* uevent will complete connection part */
+ 	return 0;
+ };
+diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
+index f1418722c5492..26c3653c99ec9 100644
+--- a/drivers/gpu/drm/msm/dp/dp_panel.c
++++ b/drivers/gpu/drm/msm/dp/dp_panel.c
+@@ -151,6 +151,15 @@ static int dp_panel_update_modes(struct drm_connector *connector,
+ 	return rc;
+ }
+ 
++void dp_panel_add_fail_safe_mode(struct drm_connector *connector)
++{
++	/* fail safe edid */
++	mutex_lock(&connector->dev->mode_config.mutex);
++	if (drm_add_modes_noedid(connector, 640, 480))
++		drm_set_preferred_mode(connector, 640, 480);
++	mutex_unlock(&connector->dev->mode_config.mutex);
++}
++
+ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
+ 	struct drm_connector *connector)
+ {
+@@ -207,16 +216,7 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
+ 			goto end;
+ 		}
+ 
+-		/* fail safe edid */
+-		mutex_lock(&connector->dev->mode_config.mutex);
+-		if (drm_add_modes_noedid(connector, 640, 480))
+-			drm_set_preferred_mode(connector, 640, 480);
+-		mutex_unlock(&connector->dev->mode_config.mutex);
+-	} else {
+-		/* always add fail-safe mode as backup mode */
+-		mutex_lock(&connector->dev->mode_config.mutex);
+-		drm_add_modes_noedid(connector, 640, 480);
+-		mutex_unlock(&connector->dev->mode_config.mutex);
++		dp_panel_add_fail_safe_mode(connector);
+ 	}
+ 
+ 	if (panel->aux_cfg_update_done) {
+diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
+index 9023e5bb4b8b2..99739ea679a77 100644
+--- a/drivers/gpu/drm/msm/dp/dp_panel.h
++++ b/drivers/gpu/drm/msm/dp/dp_panel.h
+@@ -59,6 +59,7 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel);
+ int dp_panel_deinit(struct dp_panel *dp_panel);
+ int dp_panel_timing_cfg(struct dp_panel *dp_panel);
+ void dp_panel_dump_regs(struct dp_panel *dp_panel);
++void dp_panel_add_fail_safe_mode(struct drm_connector *connector);
+ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
+ 		struct drm_connector *connector);
+ u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp,
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
+index f19bae475c966..cd7b41b7d5180 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
+@@ -641,7 +641,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
+ 	return connector;
+ 
+ fail:
+-	connector->funcs->destroy(msm_dsi->connector);
++	connector->funcs->destroy(connector);
+ 	return ERR_PTR(ret);
+ }
+ 
+diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
+index 02b9ae65a96a8..a4f61972667b5 100644
+--- a/drivers/gpu/drm/msm/msm_gem.c
++++ b/drivers/gpu/drm/msm/msm_gem.c
+@@ -926,6 +926,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
+ 					get_pid_task(aspace->pid, PIDTYPE_PID);
+ 				if (task) {
+ 					comm = kstrdup(task->comm, GFP_KERNEL);
++					put_task_struct(task);
+ 				} else {
+ 					comm = NULL;
+ 				}
+diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
+index 666223c6bec4d..0a34e0ab4fe60 100644
+--- a/drivers/gpu/ipu-v3/ipu-di.c
++++ b/drivers/gpu/ipu-v3/ipu-di.c
+@@ -447,8 +447,9 @@ static void ipu_di_config_clock(struct ipu_di *di,
+ 
+ 		error = rate / (sig->mode.pixelclock / 1000);
+ 
+-		dev_dbg(di->ipu->dev, "  IPU clock can give %lu with divider %u, error %d.%u%%\n",
+-			rate, div, (signed)(error - 1000) / 10, error % 10);
++		dev_dbg(di->ipu->dev, "  IPU clock can give %lu with divider %u, error %c%d.%d%%\n",
++			rate, div, error < 1000 ? '-' : '+',
++			abs(error - 1000) / 10, abs(error - 1000) % 10);
+ 
+ 		/* Allow a 1% error */
+ 		if (error < 1010 && error >= 990) {
+diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
+index 439f99b8b5de2..3cf334c46c312 100644
+--- a/drivers/hv/hv_balloon.c
++++ b/drivers/hv/hv_balloon.c
+@@ -1653,6 +1653,38 @@ static void disable_page_reporting(void)
+ 	}
+ }
+ 
++static int ballooning_enabled(void)
++{
++	/*
++	 * Disable ballooning if the page size is not 4k (HV_HYP_PAGE_SIZE),
++	 * since currently it's unclear to us whether an unballoon request can
++	 * make sure all page ranges are guest page size aligned.
++	 */
++	if (PAGE_SIZE != HV_HYP_PAGE_SIZE) {
++		pr_info("Ballooning disabled because page size is not 4096 bytes\n");
++		return 0;
++	}
++
++	return 1;
++}
++
++static int hot_add_enabled(void)
++{
++	/*
++	 * Disable hot add on ARM64, because we currently rely on
++	 * memory_add_physaddr_to_nid() to get a node id of a hot add range,
++	 * however ARM64's memory_add_physaddr_to_nid() always return 0 and
++	 * DM_MEM_HOT_ADD_REQUEST doesn't have the NUMA node information for
++	 * add_memory().
++	 */
++	if (IS_ENABLED(CONFIG_ARM64)) {
++		pr_info("Memory hot add disabled on ARM64\n");
++		return 0;
++	}
++
++	return 1;
++}
++
+ static int balloon_connect_vsp(struct hv_device *dev)
+ {
+ 	struct dm_version_request version_req;
+@@ -1724,8 +1756,8 @@ static int balloon_connect_vsp(struct hv_device *dev)
+ 	 * currently still requires the bits to be set, so we have to add code
+ 	 * to fail the host's hot-add and balloon up/down requests, if any.
+ 	 */
+-	cap_msg.caps.cap_bits.balloon = 1;
+-	cap_msg.caps.cap_bits.hot_add = 1;
++	cap_msg.caps.cap_bits.balloon = ballooning_enabled();
++	cap_msg.caps.cap_bits.hot_add = hot_add_enabled();
+ 
+ 	/*
+ 	 * Specify our alignment requirements as it relates
+diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
+index 181d16bbf49d7..820e814062519 100644
+--- a/drivers/hv/hv_common.c
++++ b/drivers/hv/hv_common.c
+@@ -20,6 +20,7 @@
+ #include <linux/panic_notifier.h>
+ #include <linux/ptrace.h>
+ #include <linux/slab.h>
++#include <linux/dma-map-ops.h>
+ #include <asm/hyperv-tlfs.h>
+ #include <asm/mshyperv.h>
+ 
+@@ -216,6 +217,16 @@ bool hv_query_ext_cap(u64 cap_query)
+ }
+ EXPORT_SYMBOL_GPL(hv_query_ext_cap);
+ 
++void hv_setup_dma_ops(struct device *dev, bool coherent)
++{
++	/*
++	 * Hyper-V does not offer a vIOMMU in the guest
++	 * VM, so pass 0/NULL for the IOMMU settings
++	 */
++	arch_setup_dma_ops(dev, 0, 0, NULL, coherent);
++}
++EXPORT_SYMBOL_GPL(hv_setup_dma_ops);
++
+ bool hv_is_hibernation_supported(void)
+ {
+ 	return !hv_root_partition && acpi_sleep_state_supported(ACPI_STATE_S4);
+diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
+index 71efacb909659..3d215d9dec433 100644
+--- a/drivers/hv/ring_buffer.c
++++ b/drivers/hv/ring_buffer.c
+@@ -439,7 +439,16 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
+ static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
+ {
+ 	u32 priv_read_loc = rbi->priv_read_index;
+-	u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
++	u32 write_loc;
++
++	/*
++	 * The Hyper-V host writes the packet data, then uses
++	 * store_release() to update the write_index.  Use load_acquire()
++	 * here to prevent loads of the packet data from being re-ordered
++	 * before the read of the write_index and potentially getting
++	 * stale data.
++	 */
++	write_loc = virt_load_acquire(&rbi->ring_buffer->write_index);
+ 
+ 	if (write_loc >= priv_read_loc)
+ 		return write_loc - priv_read_loc;
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index 4bea1dfa41cdc..3cd0d3a44fa2e 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -77,8 +77,8 @@ static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
+ 
+ 	/*
+ 	 * Hyper-V should be notified only once about a panic.  If we will be
+-	 * doing hyperv_report_panic_msg() later with kmsg data, don't do
+-	 * the notification here.
++	 * doing hv_kmsg_dump() with kmsg data later, don't do the notification
++	 * here.
+ 	 */
+ 	if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
+ 	    && hyperv_report_reg()) {
+@@ -100,8 +100,8 @@ static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
+ 
+ 	/*
+ 	 * Hyper-V should be notified only once about a panic.  If we will be
+-	 * doing hyperv_report_panic_msg() later with kmsg data, don't do
+-	 * the notification here.
++	 * doing hv_kmsg_dump() with kmsg data later, don't do the notification
++	 * here.
+ 	 */
+ 	if (hyperv_report_reg())
+ 		hyperv_report_panic(regs, val, true);
+@@ -920,6 +920,21 @@ static int vmbus_probe(struct device *child_device)
+ 	return ret;
+ }
+ 
++/*
++ * vmbus_dma_configure -- Configure DMA coherence for VMbus device
++ */
++static int vmbus_dma_configure(struct device *child_device)
++{
++	/*
++	 * On ARM64, propagate the DMA coherence setting from the top level
++	 * VMbus ACPI device to the child VMbus device being added here.
++	 * On x86/x64 coherence is assumed and these calls have no effect.
++	 */
++	hv_setup_dma_ops(child_device,
++		device_get_dma_attr(&hv_acpi_dev->dev) == DEV_DMA_COHERENT);
++	return 0;
++}
++
+ /*
+  * vmbus_remove - Remove a vmbus device
+  */
+@@ -1040,6 +1055,7 @@ static struct bus_type  hv_bus = {
+ 	.remove =		vmbus_remove,
+ 	.probe =		vmbus_probe,
+ 	.uevent =		vmbus_uevent,
++	.dma_configure =	vmbus_dma_configure,
+ 	.dev_groups =		vmbus_dev_groups,
+ 	.drv_groups =		vmbus_drv_groups,
+ 	.bus_groups =		vmbus_bus_groups,
+@@ -1546,14 +1562,20 @@ static int vmbus_bus_init(void)
+ 	if (ret)
+ 		goto err_connect;
+ 
++	if (hv_is_isolation_supported())
++		sysctl_record_panic_msg = 0;
++
+ 	/*
+ 	 * Only register if the crash MSRs are available
+ 	 */
+ 	if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
+ 		u64 hyperv_crash_ctl;
+ 		/*
+-		 * Sysctl registration is not fatal, since by default
+-		 * reporting is enabled.
++		 * Panic message recording (sysctl_record_panic_msg)
++		 * is enabled by default in non-isolated guests and
++		 * disabled by default in isolated guests; the panic
++		 * message recording won't be available in isolated
++		 * guests should the following registration fail.
+ 		 */
+ 		hv_ctl_table_hdr = register_sysctl_table(hv_root_table);
+ 		if (!hv_ctl_table_hdr)
+@@ -2429,6 +2451,21 @@ static int vmbus_acpi_add(struct acpi_device *device)
+ 
+ 	hv_acpi_dev = device;
+ 
++	/*
++	 * Older versions of Hyper-V for ARM64 fail to include the _CCA
++	 * method on the top level VMbus device in the DSDT. But devices
++	 * are hardware coherent in all current Hyper-V use cases, so fix
++	 * up the ACPI device to behave as if _CCA is present and indicates
++	 * hardware coherence.
++	 */
++	ACPI_COMPANION_SET(&device->dev, device);
++	if (IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED) &&
++	    device_get_dma_attr(&device->dev) == DEV_DMA_NOT_SUPPORTED) {
++		pr_info("No ACPI _CCA found; assuming coherent device I/O\n");
++		device->flags.cca_seen = true;
++		device->flags.coherent_dma = true;
++	}
++
+ 	result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
+ 					vmbus_walk_resources, NULL);
+ 
+diff --git a/drivers/i2c/busses/i2c-pasemi-core.c b/drivers/i2c/busses/i2c-pasemi-core.c
+index 7728c8460dc0f..9028ffb58cc07 100644
+--- a/drivers/i2c/busses/i2c-pasemi-core.c
++++ b/drivers/i2c/busses/i2c-pasemi-core.c
+@@ -137,6 +137,12 @@ static int pasemi_i2c_xfer_msg(struct i2c_adapter *adapter,
+ 
+ 		TXFIFO_WR(smbus, msg->buf[msg->len-1] |
+ 			  (stop ? MTXFIFO_STOP : 0));
++
++		if (stop) {
++			err = pasemi_smb_waitready(smbus);
++			if (err)
++				goto reset_out;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
+index cf5d049342ead..6fd2b6718b086 100644
+--- a/drivers/i2c/i2c-dev.c
++++ b/drivers/i2c/i2c-dev.c
+@@ -668,16 +668,21 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy)
+ 	i2c_dev->dev.class = i2c_dev_class;
+ 	i2c_dev->dev.parent = &adap->dev;
+ 	i2c_dev->dev.release = i2cdev_dev_release;
+-	dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr);
++
++	res = dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr);
++	if (res)
++		goto err_put_i2c_dev;
+ 
+ 	res = cdev_device_add(&i2c_dev->cdev, &i2c_dev->dev);
+-	if (res) {
+-		put_i2c_dev(i2c_dev, false);
+-		return res;
+-	}
++	if (res)
++		goto err_put_i2c_dev;
+ 
+ 	pr_debug("adapter [%s] registered as minor %d\n", adap->name, adap->nr);
+ 	return 0;
++
++err_put_i2c_dev:
++	put_i2c_dev(i2c_dev, false);
++	return res;
+ }
+ 
+ static int i2cdev_detach_adapter(struct device *dev, void *dummy)
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 9399006dbc546..ffe50be8b6875 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -4400,6 +4400,7 @@ try_smaller_buffer:
+ 	}
+ 
+ 	if (ic->internal_hash) {
++		size_t recalc_tags_size;
+ 		ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
+ 		if (!ic->recalc_wq ) {
+ 			ti->error = "Cannot allocate workqueue";
+@@ -4413,8 +4414,10 @@ try_smaller_buffer:
+ 			r = -ENOMEM;
+ 			goto bad;
+ 		}
+-		ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
+-						 ic->tag_size, GFP_KERNEL);
++		recalc_tags_size = (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size;
++		if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size)
++			recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size;
++		ic->recalc_tags = kvmalloc(recalc_tags_size, GFP_KERNEL);
+ 		if (!ic->recalc_tags) {
+ 			ti->error = "Cannot allocate tags for recalculating";
+ 			r = -ENOMEM;
+diff --git a/drivers/md/dm-ps-historical-service-time.c b/drivers/md/dm-ps-historical-service-time.c
+index 875bca30a0dd5..82f2a06153dc0 100644
+--- a/drivers/md/dm-ps-historical-service-time.c
++++ b/drivers/md/dm-ps-historical-service-time.c
+@@ -27,7 +27,6 @@
+ #include <linux/blkdev.h>
+ #include <linux/slab.h>
+ #include <linux/module.h>
+-#include <linux/sched/clock.h>
+ 
+ 
+ #define DM_MSG_PREFIX	"multipath historical-service-time"
+@@ -433,7 +432,7 @@ static struct dm_path *hst_select_path(struct path_selector *ps,
+ {
+ 	struct selector *s = ps->context;
+ 	struct path_info *pi = NULL, *best = NULL;
+-	u64 time_now = sched_clock();
++	u64 time_now = ktime_get_ns();
+ 	struct dm_path *ret = NULL;
+ 	unsigned long flags;
+ 
+@@ -474,7 +473,7 @@ static int hst_start_io(struct path_selector *ps, struct dm_path *path,
+ 
+ static u64 path_service_time(struct path_info *pi, u64 start_time)
+ {
+-	u64 sched_now = ktime_get_ns();
++	u64 now = ktime_get_ns();
+ 
+ 	/* if a previous disk request has finished after this IO was
+ 	 * sent to the hardware, pretend the submission happened
+@@ -483,11 +482,11 @@ static u64 path_service_time(struct path_info *pi, u64 start_time)
+ 	if (time_after64(pi->last_finish, start_time))
+ 		start_time = pi->last_finish;
+ 
+-	pi->last_finish = sched_now;
+-	if (time_before64(sched_now, start_time))
++	pi->last_finish = now;
++	if (time_before64(now, start_time))
+ 		return 0;
+ 
+-	return sched_now - start_time;
++	return now - start_time;
+ }
+ 
+ static int hst_end_io(struct path_selector *ps, struct dm_path *path,
+diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
+index 4de5e8d2b261b..3d3d1062e2122 100644
+--- a/drivers/media/platform/rockchip/rga/rga.c
++++ b/drivers/media/platform/rockchip/rga/rga.c
+@@ -892,7 +892,7 @@ static int rga_probe(struct platform_device *pdev)
+ 	}
+ 	rga->dst_mmu_pages =
+ 		(unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
+-	if (rga->dst_mmu_pages) {
++	if (!rga->dst_mmu_pages) {
+ 		ret = -ENOMEM;
+ 		goto free_src_pages;
+ 	}
+diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
+index 47029746b89ee..0de587b412d4e 100644
+--- a/drivers/media/tuners/si2157.c
++++ b/drivers/media/tuners/si2157.c
+@@ -77,16 +77,16 @@ err_mutex_unlock:
+ }
+ 
+ static const struct si2157_tuner_info si2157_tuners[] = {
+-	{ SI2141, false, 0x60, SI2141_60_FIRMWARE, SI2141_A10_FIRMWARE },
+-	{ SI2141, false, 0x61, SI2141_61_FIRMWARE, SI2141_A10_FIRMWARE },
+-	{ SI2146, false, 0x11, SI2146_11_FIRMWARE, NULL },
+-	{ SI2147, false, 0x50, SI2147_50_FIRMWARE, NULL },
+-	{ SI2148, true,  0x32, SI2148_32_FIRMWARE, SI2158_A20_FIRMWARE },
+-	{ SI2148, true,  0x33, SI2148_33_FIRMWARE, SI2158_A20_FIRMWARE },
+-	{ SI2157, false, 0x50, SI2157_50_FIRMWARE, SI2157_A30_FIRMWARE },
+-	{ SI2158, false, 0x50, SI2158_50_FIRMWARE, SI2158_A20_FIRMWARE },
+-	{ SI2158, false, 0x51, SI2158_51_FIRMWARE, SI2158_A20_FIRMWARE },
+-	{ SI2177, false, 0x50, SI2177_50_FIRMWARE, SI2157_A30_FIRMWARE },
++	{ SI2141, 0x60, false, SI2141_60_FIRMWARE, SI2141_A10_FIRMWARE },
++	{ SI2141, 0x61, false, SI2141_61_FIRMWARE, SI2141_A10_FIRMWARE },
++	{ SI2146, 0x11, false, SI2146_11_FIRMWARE, NULL },
++	{ SI2147, 0x50, false, SI2147_50_FIRMWARE, NULL },
++	{ SI2148, 0x32, true,  SI2148_32_FIRMWARE, SI2158_A20_FIRMWARE },
++	{ SI2148, 0x33, true,  SI2148_33_FIRMWARE, SI2158_A20_FIRMWARE },
++	{ SI2157, 0x50, false, SI2157_50_FIRMWARE, SI2157_A30_FIRMWARE },
++	{ SI2158, 0x50, false, SI2158_50_FIRMWARE, SI2158_A20_FIRMWARE },
++	{ SI2158, 0x51, false, SI2158_51_FIRMWARE, SI2158_A20_FIRMWARE },
++	{ SI2177, 0x50, false, SI2177_50_FIRMWARE, SI2157_A30_FIRMWARE },
+ };
+ 
+ static int si2157_load_firmware(struct dvb_frontend *fe,
+@@ -178,7 +178,7 @@ static int si2157_find_and_load_firmware(struct dvb_frontend *fe)
+ 		}
+ 	}
+ 
+-	if (!fw_name && !fw_alt_name) {
++	if (required && !fw_name && !fw_alt_name) {
+ 		dev_err(&client->dev,
+ 			"unknown chip version Si21%d-%c%c%c ROM 0x%02x\n",
+ 			part_id, cmd.args[1], cmd.args[3], cmd.args[4], rom_id);
+diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
+index c267283b01fda..e749dcb3ddea9 100644
+--- a/drivers/memory/atmel-ebi.c
++++ b/drivers/memory/atmel-ebi.c
+@@ -544,20 +544,27 @@ static int atmel_ebi_probe(struct platform_device *pdev)
+ 	smc_np = of_parse_phandle(dev->of_node, "atmel,smc", 0);
+ 
+ 	ebi->smc.regmap = syscon_node_to_regmap(smc_np);
+-	if (IS_ERR(ebi->smc.regmap))
+-		return PTR_ERR(ebi->smc.regmap);
++	if (IS_ERR(ebi->smc.regmap)) {
++		ret = PTR_ERR(ebi->smc.regmap);
++		goto put_node;
++	}
+ 
+ 	ebi->smc.layout = atmel_hsmc_get_reg_layout(smc_np);
+-	if (IS_ERR(ebi->smc.layout))
+-		return PTR_ERR(ebi->smc.layout);
++	if (IS_ERR(ebi->smc.layout)) {
++		ret = PTR_ERR(ebi->smc.layout);
++		goto put_node;
++	}
+ 
+ 	ebi->smc.clk = of_clk_get(smc_np, 0);
+ 	if (IS_ERR(ebi->smc.clk)) {
+-		if (PTR_ERR(ebi->smc.clk) != -ENOENT)
+-			return PTR_ERR(ebi->smc.clk);
++		if (PTR_ERR(ebi->smc.clk) != -ENOENT) {
++			ret = PTR_ERR(ebi->smc.clk);
++			goto put_node;
++		}
+ 
+ 		ebi->smc.clk = NULL;
+ 	}
++	of_node_put(smc_np);
+ 	ret = clk_prepare_enable(ebi->smc.clk);
+ 	if (ret)
+ 		return ret;
+@@ -608,6 +615,10 @@ static int atmel_ebi_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	return of_platform_populate(np, NULL, NULL, dev);
++
++put_node:
++	of_node_put(smc_np);
++	return ret;
+ }
+ 
+ static __maybe_unused int atmel_ebi_resume(struct device *dev)
+diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
+index e4cc64f560196..2e545f473cc68 100644
+--- a/drivers/memory/renesas-rpc-if.c
++++ b/drivers/memory/renesas-rpc-if.c
+@@ -651,6 +651,7 @@ static int rpcif_probe(struct platform_device *pdev)
+ 	struct platform_device *vdev;
+ 	struct device_node *flash;
+ 	const char *name;
++	int ret;
+ 
+ 	flash = of_get_next_child(pdev->dev.of_node, NULL);
+ 	if (!flash) {
+@@ -674,7 +675,14 @@ static int rpcif_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 	vdev->dev.parent = &pdev->dev;
+ 	platform_set_drvdata(pdev, vdev);
+-	return platform_device_add(vdev);
++
++	ret = platform_device_add(vdev);
++	if (ret) {
++		platform_device_put(vdev);
++		return ret;
++	}
++
++	return 0;
+ }
+ 
+ static int rpcif_remove(struct platform_device *pdev)
+diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
+index 9957772201d58..c414d9e9d7c09 100644
+--- a/drivers/net/dsa/ocelot/felix.c
++++ b/drivers/net/dsa/ocelot/felix.c
+@@ -599,6 +599,8 @@ static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu,
+ 	struct ocelot *ocelot = ds->priv;
+ 	struct felix *felix = ocelot_to_felix(ocelot);
+ 	enum dsa_tag_protocol old_proto = felix->tag_proto;
++	bool cpu_port_active = false;
++	struct dsa_port *dp;
+ 	int err;
+ 
+ 	if (proto != DSA_TAG_PROTO_SEVILLE &&
+@@ -606,6 +608,27 @@ static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu,
+ 	    proto != DSA_TAG_PROTO_OCELOT_8021Q)
+ 		return -EPROTONOSUPPORT;
+ 
++	/* We don't support multiple CPU ports, yet the DT blob may have
++	 * multiple CPU ports defined. The first CPU port is the active one,
++	 * the others are inactive. In this case, DSA will call
++	 * ->change_tag_protocol() multiple times, once per CPU port.
++	 * Since we implement the tagging protocol change towards "ocelot" or
++	 * "seville" as effectively initializing the NPI port, what we are
++	 * doing is effectively changing who the NPI port is to the last @cpu
++	 * argument passed, which is an unused DSA CPU port and not the one
++	 * that should actively pass traffic.
++	 * Suppress DSA's calls on CPU ports that are inactive.
++	 */
++	dsa_switch_for_each_user_port(dp, ds) {
++		if (dp->cpu_dp->index == cpu) {
++			cpu_port_active = true;
++			break;
++		}
++	}
++
++	if (!cpu_port_active)
++		return 0;
++
+ 	felix_del_tag_protocol(ds, cpu, old_proto);
+ 
+ 	err = felix_set_tag_protocol(ds, cpu, proto);
+diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
+index 2875b52508567..443d34ce2853f 100644
+--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
++++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
+@@ -2328,7 +2328,7 @@ static int felix_pci_probe(struct pci_dev *pdev,
+ 
+ 	err = dsa_register_switch(ds);
+ 	if (err) {
+-		dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err);
++		dev_err_probe(&pdev->dev, err, "Failed to register DSA switch\n");
+ 		goto err_register_ds;
+ 	}
+ 
+diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig
+index 1c62212fb0ecb..1315896ed6e2a 100644
+--- a/drivers/net/dsa/realtek/Kconfig
++++ b/drivers/net/dsa/realtek/Kconfig
+@@ -14,6 +14,7 @@ menuconfig NET_DSA_REALTEK
+ config NET_DSA_REALTEK_SMI
+ 	tristate "Realtek SMI connected switch driver"
+ 	depends on NET_DSA_REALTEK
++	depends on OF
+ 	default y
+ 	help
+ 	  Select to enable support for registering switches connected
+diff --git a/drivers/net/dsa/realtek/realtek-smi-core.c b/drivers/net/dsa/realtek/realtek-smi-core.c
+index aae46ada8d839..a9c21f9e33709 100644
+--- a/drivers/net/dsa/realtek/realtek-smi-core.c
++++ b/drivers/net/dsa/realtek/realtek-smi-core.c
+@@ -315,7 +315,21 @@ static int realtek_smi_read(void *ctx, u32 reg, u32 *val)
+ 	return realtek_smi_read_reg(smi, reg, val);
+ }
+ 
+-static const struct regmap_config realtek_smi_mdio_regmap_config = {
++static void realtek_smi_lock(void *ctx)
++{
++	struct realtek_smi *smi = ctx;
++
++	mutex_lock(&smi->map_lock);
++}
++
++static void realtek_smi_unlock(void *ctx)
++{
++	struct realtek_smi *smi = ctx;
++
++	mutex_unlock(&smi->map_lock);
++}
++
++static const struct regmap_config realtek_smi_regmap_config = {
+ 	.reg_bits = 10, /* A4..A0 R4..R0 */
+ 	.val_bits = 16,
+ 	.reg_stride = 1,
+@@ -325,6 +339,21 @@ static const struct regmap_config realtek_smi_mdio_regmap_config = {
+ 	.reg_read = realtek_smi_read,
+ 	.reg_write = realtek_smi_write,
+ 	.cache_type = REGCACHE_NONE,
++	.lock = realtek_smi_lock,
++	.unlock = realtek_smi_unlock,
++};
++
++static const struct regmap_config realtek_smi_nolock_regmap_config = {
++	.reg_bits = 10, /* A4..A0 R4..R0 */
++	.val_bits = 16,
++	.reg_stride = 1,
++	/* PHY regs are at 0x8000 */
++	.max_register = 0xffff,
++	.reg_format_endian = REGMAP_ENDIAN_BIG,
++	.reg_read = realtek_smi_read,
++	.reg_write = realtek_smi_write,
++	.cache_type = REGCACHE_NONE,
++	.disable_locking = true,
+ };
+ 
+ static int realtek_smi_mdio_read(struct mii_bus *bus, int addr, int regnum)
+@@ -388,6 +417,7 @@ static int realtek_smi_probe(struct platform_device *pdev)
+ 	const struct realtek_smi_variant *var;
+ 	struct device *dev = &pdev->dev;
+ 	struct realtek_smi *smi;
++	struct regmap_config rc;
+ 	struct device_node *np;
+ 	int ret;
+ 
+@@ -398,14 +428,26 @@ static int realtek_smi_probe(struct platform_device *pdev)
+ 	if (!smi)
+ 		return -ENOMEM;
+ 	smi->chip_data = (void *)smi + sizeof(*smi);
+-	smi->map = devm_regmap_init(dev, NULL, smi,
+-				    &realtek_smi_mdio_regmap_config);
++
++	mutex_init(&smi->map_lock);
++
++	rc = realtek_smi_regmap_config;
++	rc.lock_arg = smi;
++	smi->map = devm_regmap_init(dev, NULL, smi, &rc);
+ 	if (IS_ERR(smi->map)) {
+ 		ret = PTR_ERR(smi->map);
+ 		dev_err(dev, "regmap init failed: %d\n", ret);
+ 		return ret;
+ 	}
+ 
++	rc = realtek_smi_nolock_regmap_config;
++	smi->map_nolock = devm_regmap_init(dev, NULL, smi, &rc);
++	if (IS_ERR(smi->map_nolock)) {
++		ret = PTR_ERR(smi->map_nolock);
++		dev_err(dev, "regmap init failed: %d\n", ret);
++		return ret;
++	}
++
+ 	/* Link forward and backward */
+ 	smi->dev = dev;
+ 	smi->clk_delay = var->clk_delay;
+diff --git a/drivers/net/dsa/realtek/realtek-smi-core.h b/drivers/net/dsa/realtek/realtek-smi-core.h
+index faed387d8db38..5fcad51e1984f 100644
+--- a/drivers/net/dsa/realtek/realtek-smi-core.h
++++ b/drivers/net/dsa/realtek/realtek-smi-core.h
+@@ -49,6 +49,8 @@ struct realtek_smi {
+ 	struct gpio_desc	*mdc;
+ 	struct gpio_desc	*mdio;
+ 	struct regmap		*map;
++	struct regmap		*map_nolock;
++	struct mutex		map_lock;
+ 	struct mii_bus		*slave_mii_bus;
+ 
+ 	unsigned int		clk_delay;
+diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
+index 3b729544798b1..696c8906c74cb 100644
+--- a/drivers/net/dsa/realtek/rtl8365mb.c
++++ b/drivers/net/dsa/realtek/rtl8365mb.c
+@@ -565,7 +565,7 @@ static int rtl8365mb_phy_poll_busy(struct realtek_smi *smi)
+ {
+ 	u32 val;
+ 
+-	return regmap_read_poll_timeout(smi->map,
++	return regmap_read_poll_timeout(smi->map_nolock,
+ 					RTL8365MB_INDIRECT_ACCESS_STATUS_REG,
+ 					val, !val, 10, 100);
+ }
+@@ -579,7 +579,7 @@ static int rtl8365mb_phy_ocp_prepare(struct realtek_smi *smi, int phy,
+ 	/* Set OCP prefix */
+ 	val = FIELD_GET(RTL8365MB_PHY_OCP_ADDR_PREFIX_MASK, ocp_addr);
+ 	ret = regmap_update_bits(
+-		smi->map, RTL8365MB_GPHY_OCP_MSB_0_REG,
++		smi->map_nolock, RTL8365MB_GPHY_OCP_MSB_0_REG,
+ 		RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK,
+ 		FIELD_PREP(RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK, val));
+ 	if (ret)
+@@ -592,8 +592,8 @@ static int rtl8365mb_phy_ocp_prepare(struct realtek_smi *smi, int phy,
+ 			  ocp_addr >> 1);
+ 	val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK,
+ 			  ocp_addr >> 6);
+-	ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG,
+-			   val);
++	ret = regmap_write(smi->map_nolock,
++			   RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG, val);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -606,36 +606,42 @@ static int rtl8365mb_phy_ocp_read(struct realtek_smi *smi, int phy,
+ 	u32 val;
+ 	int ret;
+ 
++	mutex_lock(&smi->map_lock);
++
+ 	ret = rtl8365mb_phy_poll_busy(smi);
+ 	if (ret)
+-		return ret;
++		goto out;
+ 
+ 	ret = rtl8365mb_phy_ocp_prepare(smi, phy, ocp_addr);
+ 	if (ret)
+-		return ret;
++		goto out;
+ 
+ 	/* Execute read operation */
+ 	val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK,
+ 			 RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) |
+ 	      FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK,
+ 			 RTL8365MB_INDIRECT_ACCESS_CTRL_RW_READ);
+-	ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val);
++	ret = regmap_write(smi->map_nolock, RTL8365MB_INDIRECT_ACCESS_CTRL_REG,
++			   val);
+ 	if (ret)
+-		return ret;
++		goto out;
+ 
+ 	ret = rtl8365mb_phy_poll_busy(smi);
+ 	if (ret)
+-		return ret;
++		goto out;
+ 
+ 	/* Get PHY register data */
+-	ret = regmap_read(smi->map, RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG,
+-			  &val);
++	ret = regmap_read(smi->map_nolock,
++			  RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG, &val);
+ 	if (ret)
+-		return ret;
++		goto out;
+ 
+ 	*data = val & 0xFFFF;
+ 
+-	return 0;
++out:
++	mutex_unlock(&smi->map_lock);
++
++	return ret;
+ }
+ 
+ static int rtl8365mb_phy_ocp_write(struct realtek_smi *smi, int phy,
+@@ -644,32 +650,38 @@ static int rtl8365mb_phy_ocp_write(struct realtek_smi *smi, int phy,
+ 	u32 val;
+ 	int ret;
+ 
++	mutex_lock(&smi->map_lock);
++
+ 	ret = rtl8365mb_phy_poll_busy(smi);
+ 	if (ret)
+-		return ret;
++		goto out;
+ 
+ 	ret = rtl8365mb_phy_ocp_prepare(smi, phy, ocp_addr);
+ 	if (ret)
+-		return ret;
++		goto out;
+ 
+ 	/* Set PHY register data */
+-	ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG,
+-			   data);
++	ret = regmap_write(smi->map_nolock,
++			   RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG, data);
+ 	if (ret)
+-		return ret;
++		goto out;
+ 
+ 	/* Execute write operation */
+ 	val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK,
+ 			 RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) |
+ 	      FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK,
+ 			 RTL8365MB_INDIRECT_ACCESS_CTRL_RW_WRITE);
+-	ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val);
++	ret = regmap_write(smi->map_nolock, RTL8365MB_INDIRECT_ACCESS_CTRL_REG,
++			   val);
+ 	if (ret)
+-		return ret;
++		goto out;
+ 
+ 	ret = rtl8365mb_phy_poll_busy(smi);
+ 	if (ret)
+-		return ret;
++		goto out;
++
++out:
++	mutex_unlock(&smi->map_lock);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+index bd5998012a876..2da804f84b480 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -76,7 +76,7 @@ static inline void bcmgenet_writel(u32 value, void __iomem *offset)
+ 	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ 		__raw_writel(value, offset);
+ 	else
+-		writel(value, offset);
++		writel_relaxed(value, offset);
+ }
+ 
+ static inline u32 bcmgenet_readl(void __iomem *offset)
+@@ -84,7 +84,7 @@ static inline u32 bcmgenet_readl(void __iomem *offset)
+ 	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ 		return __raw_readl(offset);
+ 	else
+-		return readl(offset);
++		return readl_relaxed(offset);
+ }
+ 
+ static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
+diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
+index d5356db7539a4..caf48023f8ea5 100644
+--- a/drivers/net/ethernet/faraday/ftgmac100.c
++++ b/drivers/net/ethernet/faraday/ftgmac100.c
+@@ -1835,11 +1835,6 @@ static int ftgmac100_probe(struct platform_device *pdev)
+ 		priv->rxdes0_edorr_mask = BIT(30);
+ 		priv->txdes0_edotr_mask = BIT(30);
+ 		priv->is_aspeed = true;
+-		/* Disable ast2600 problematic HW arbitration */
+-		if (of_device_is_compatible(np, "aspeed,ast2600-mac")) {
+-			iowrite32(FTGMAC100_TM_DEFAULT,
+-				  priv->base + FTGMAC100_OFFSET_TM);
+-		}
+ 	} else {
+ 		priv->rxdes0_edorr_mask = BIT(15);
+ 		priv->txdes0_edotr_mask = BIT(15);
+@@ -1911,6 +1906,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
+ 		err = ftgmac100_setup_clk(priv);
+ 		if (err)
+ 			goto err_phy_connect;
++
++		/* Disable ast2600 problematic HW arbitration */
++		if (of_device_is_compatible(np, "aspeed,ast2600-mac"))
++			iowrite32(FTGMAC100_TM_DEFAULT,
++				  priv->base + FTGMAC100_OFFSET_TM);
+ 	}
+ 
+ 	/* Default ring sizes */
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index d10e9a8e8011f..f55ecb6727684 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -2817,7 +2817,6 @@ continue_reset:
+ 	running = adapter->state == __IAVF_RUNNING;
+ 
+ 	if (running) {
+-		netdev->flags &= ~IFF_UP;
+ 		netif_carrier_off(netdev);
+ 		netif_tx_stop_all_queues(netdev);
+ 		adapter->link_up = false;
+@@ -2934,7 +2933,7 @@ continue_reset:
+ 		 * to __IAVF_RUNNING
+ 		 */
+ 		iavf_up_complete(adapter);
+-		netdev->flags |= IFF_UP;
++
+ 		iavf_irq_enable(adapter, true);
+ 	} else {
+ 		iavf_change_state(adapter, __IAVF_DOWN);
+@@ -2950,10 +2949,8 @@ continue_reset:
+ reset_err:
+ 	mutex_unlock(&adapter->client_lock);
+ 	mutex_unlock(&adapter->crit_lock);
+-	if (running) {
++	if (running)
+ 		iavf_change_state(adapter, __IAVF_RUNNING);
+-		netdev->flags |= IFF_UP;
+-	}
+ 	dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
+ 	iavf_close(netdev);
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
+index 5daade32ea625..fba178e076009 100644
+--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
++++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
+@@ -577,7 +577,7 @@ void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
+ {
+ 	struct net_device *netdev;
+ 
+-	if (!vsi || vsi->type != ICE_VSI_PF || !vsi->arfs_fltr_list)
++	if (!vsi || vsi->type != ICE_VSI_PF)
+ 		return;
+ 
+ 	netdev = vsi->netdev;
+@@ -599,7 +599,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
+ 	int base_idx, i;
+ 
+ 	if (!vsi || vsi->type != ICE_VSI_PF)
+-		return -EINVAL;
++		return 0;
+ 
+ 	pf = vsi->back;
+ 	netdev = vsi->netdev;
+@@ -636,7 +636,6 @@ void ice_remove_arfs(struct ice_pf *pf)
+ 	if (!pf_vsi)
+ 		return;
+ 
+-	ice_free_cpu_rx_rmap(pf_vsi);
+ 	ice_clear_arfs(pf_vsi);
+ }
+ 
+@@ -653,9 +652,5 @@ void ice_rebuild_arfs(struct ice_pf *pf)
+ 		return;
+ 
+ 	ice_remove_arfs(pf);
+-	if (ice_set_cpu_rx_rmap(pf_vsi)) {
+-		dev_err(ice_pf_to_dev(pf), "Failed to rebuild aRFS\n");
+-		return;
+-	}
+ 	ice_init_arfs(pf_vsi);
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index 5fd2bbeab2d15..15bb6f001a04f 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -2869,6 +2869,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
+ 		return;
+ 
+ 	vsi->irqs_ready = false;
++	ice_free_cpu_rx_rmap(vsi);
++
+ 	ice_for_each_q_vector(vsi, i) {
+ 		u16 vector = i + base;
+ 		int irq_num;
+@@ -2882,7 +2884,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
+ 			continue;
+ 
+ 		/* clear the affinity notifier in the IRQ descriptor */
+-		irq_set_affinity_notifier(irq_num, NULL);
++		if (!IS_ENABLED(CONFIG_RFS_ACCEL))
++			irq_set_affinity_notifier(irq_num, NULL);
+ 
+ 		/* clear the affinity_mask in the IRQ descriptor */
+ 		irq_set_affinity_hint(irq_num, NULL);
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index db2e02e673a77..2de2bbbca1e97 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -2494,6 +2494,13 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
+ 		irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
+ 	}
+ 
++	err = ice_set_cpu_rx_rmap(vsi);
++	if (err) {
++		netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
++			   vsi->vsi_num, ERR_PTR(err));
++		goto free_q_irqs;
++	}
++
+ 	vsi->irqs_ready = true;
+ 	return 0;
+ 
+@@ -3605,20 +3612,12 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
+ 	 */
+ 	ice_napi_add(vsi);
+ 
+-	status = ice_set_cpu_rx_rmap(vsi);
+-	if (status) {
+-		dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n",
+-			vsi->vsi_num, status);
+-		goto unroll_napi_add;
+-	}
+ 	status = ice_init_mac_fltr(pf);
+ 	if (status)
+-		goto free_cpu_rx_map;
++		goto unroll_napi_add;
+ 
+ 	return 0;
+ 
+-free_cpu_rx_map:
+-	ice_free_cpu_rx_rmap(vsi);
+ unroll_napi_add:
+ 	ice_tc_indir_block_unregister(vsi);
+ unroll_cfg_netdev:
+@@ -5076,7 +5075,6 @@ static int __maybe_unused ice_suspend(struct device *dev)
+ 			continue;
+ 		ice_vsi_free_q_vectors(pf->vsi[v]);
+ 	}
+-	ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
+ 	ice_clear_interrupt_scheme(pf);
+ 
+ 	pci_save_state(pdev);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+index 939b692ffc335..ce843ea914646 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+@@ -650,6 +650,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
+ 	return 0;
+ 
+ errout:
++	mutex_destroy(&mlxsw_i2c->cmd.lock);
+ 	i2c_set_clientdata(client, NULL);
+ 
+ 	return err;
+diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig
+index 93df3049cdc05..1b632cdd76309 100644
+--- a/drivers/net/ethernet/micrel/Kconfig
++++ b/drivers/net/ethernet/micrel/Kconfig
+@@ -39,6 +39,7 @@ config KS8851
+ config KS8851_MLL
+ 	tristate "Micrel KS8851 MLL"
+ 	depends on HAS_IOMEM
++	depends on PTP_1588_CLOCK_OPTIONAL
+ 	select MII
+ 	select CRC32
+ 	select EEPROM_93CX6
+diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
+index ce5970bdcc6a0..2679111ef6696 100644
+--- a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
+@@ -346,7 +346,8 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
+ 
+ 			lan966x_mac_process_raw_entry(&raw_entries[column],
+ 						      mac, &vid, &dest_idx);
+-			WARN_ON(dest_idx > lan966x->num_phys_ports);
++			if (WARN_ON(dest_idx > lan966x->num_phys_ports))
++				continue;
+ 
+ 			/* If the entry in SW is found, then there is nothing
+ 			 * to do
+@@ -392,7 +393,8 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
+ 
+ 		lan966x_mac_process_raw_entry(&raw_entries[column],
+ 					      mac, &vid, &dest_idx);
+-		WARN_ON(dest_idx > lan966x->num_phys_ports);
++		if (WARN_ON(dest_idx > lan966x->num_phys_ports))
++			continue;
+ 
+ 		mac_entry = lan966x_mac_alloc_entry(mac, vid, dest_idx);
+ 		if (!mac_entry)
+diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
+index 7de55f6a4da80..3c987fd6b9e23 100644
+--- a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
+@@ -261,8 +261,7 @@ static int lan966x_port_prechangeupper(struct net_device *dev,
+ 
+ 	if (netif_is_bridge_master(info->upper_dev) && !info->linking)
+ 		switchdev_bridge_port_unoffload(port->dev, port,
+-						&lan966x_switchdev_nb,
+-						&lan966x_switchdev_blocking_nb);
++						NULL, NULL);
+ 
+ 	return NOTIFY_DONE;
+ }
+diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+index 50ac3ee2577a2..21d2645885cef 100644
+--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
++++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+@@ -2903,11 +2903,9 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
+ 		status = myri10ge_xmit(curr, dev);
+ 		if (status != 0) {
+ 			dev_kfree_skb_any(curr);
+-			if (segs != NULL) {
+-				curr = segs;
+-				segs = next;
++			skb_list_walk_safe(next, curr, next) {
+ 				curr->next = NULL;
+-				dev_kfree_skb_any(segs);
++				dev_kfree_skb_any(curr);
+ 			}
+ 			goto drop;
+ 		}
+diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
+index cd478d2cd871a..00f6d347eaf75 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
++++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
+@@ -57,10 +57,6 @@
+ #define TSE_PCS_USE_SGMII_ENA				BIT(0)
+ #define TSE_PCS_IF_USE_SGMII				0x03
+ 
+-#define SGMII_ADAPTER_CTRL_REG				0x00
+-#define SGMII_ADAPTER_DISABLE				0x0001
+-#define SGMII_ADAPTER_ENABLE				0x0000
+-
+ #define AUTONEGO_LINK_TIMER				20
+ 
+ static int tse_pcs_reset(void __iomem *base, struct tse_pcs *pcs)
+@@ -202,12 +198,8 @@ void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev,
+ 			   unsigned int speed)
+ {
+ 	void __iomem *tse_pcs_base = pcs->tse_pcs_base;
+-	void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base;
+ 	u32 val;
+ 
+-	writew(SGMII_ADAPTER_ENABLE,
+-	       sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
+-
+ 	pcs->autoneg = phy_dev->autoneg;
+ 
+ 	if (phy_dev->autoneg == AUTONEG_ENABLE) {
+diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
+index 442812c0a4bdc..694ac25ef426b 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
++++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
+@@ -10,6 +10,10 @@
+ #include <linux/phy.h>
+ #include <linux/timer.h>
+ 
++#define SGMII_ADAPTER_CTRL_REG		0x00
++#define SGMII_ADAPTER_ENABLE		0x0000
++#define SGMII_ADAPTER_DISABLE		0x0001
++
+ struct tse_pcs {
+ 	struct device *dev;
+ 	void __iomem *tse_pcs_base;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+index b7c2579c963b6..ac9e6c7a33b55 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+@@ -18,9 +18,6 @@
+ 
+ #include "altr_tse_pcs.h"
+ 
+-#define SGMII_ADAPTER_CTRL_REG                          0x00
+-#define SGMII_ADAPTER_DISABLE                           0x0001
+-
+ #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0
+ #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1
+ #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2
+@@ -62,16 +59,14 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
+ {
+ 	struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv;
+ 	void __iomem *splitter_base = dwmac->splitter_base;
+-	void __iomem *tse_pcs_base = dwmac->pcs.tse_pcs_base;
+ 	void __iomem *sgmii_adapter_base = dwmac->pcs.sgmii_adapter_base;
+ 	struct device *dev = dwmac->dev;
+ 	struct net_device *ndev = dev_get_drvdata(dev);
+ 	struct phy_device *phy_dev = ndev->phydev;
+ 	u32 val;
+ 
+-	if ((tse_pcs_base) && (sgmii_adapter_base))
+-		writew(SGMII_ADAPTER_DISABLE,
+-		       sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
++	writew(SGMII_ADAPTER_DISABLE,
++	       sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
+ 
+ 	if (splitter_base) {
+ 		val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG);
+@@ -93,7 +88,9 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
+ 		writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
+ 	}
+ 
+-	if (tse_pcs_base && sgmii_adapter_base)
++	writew(SGMII_ADAPTER_ENABLE,
++	       sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
++	if (phy_dev)
+ 		tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed);
+ }
+ 
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index 90d96eb79984e..a960227f61da4 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -2072,15 +2072,14 @@ static int axienet_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto cleanup_clk;
+ 
+-	lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+-	if (lp->phy_node) {
+-		ret = axienet_mdio_setup(lp);
+-		if (ret)
+-			dev_warn(&pdev->dev,
+-				 "error registering MDIO bus: %d\n", ret);
+-	}
++	ret = axienet_mdio_setup(lp);
++	if (ret)
++		dev_warn(&pdev->dev,
++			 "error registering MDIO bus: %d\n", ret);
++
+ 	if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
+ 	    lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
++		lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ 		if (!lp->phy_node) {
+ 			dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n");
+ 			ret = -EINVAL;
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index 6ef5f77be4d0a..c83664b28d890 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -460,8 +460,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
+ 			return RX_HANDLER_CONSUMED;
+ 		*pskb = skb;
+ 		eth = eth_hdr(skb);
+-		if (macvlan_forward_source(skb, port, eth->h_source))
++		if (macvlan_forward_source(skb, port, eth->h_source)) {
++			kfree_skb(skb);
+ 			return RX_HANDLER_CONSUMED;
++		}
+ 		src = macvlan_hash_lookup(port, eth->h_source);
+ 		if (src && src->mode != MACVLAN_MODE_VEPA &&
+ 		    src->mode != MACVLAN_MODE_BRIDGE) {
+@@ -480,8 +482,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
+ 		return RX_HANDLER_PASS;
+ 	}
+ 
+-	if (macvlan_forward_source(skb, port, eth->h_source))
++	if (macvlan_forward_source(skb, port, eth->h_source)) {
++		kfree_skb(skb);
+ 		return RX_HANDLER_CONSUMED;
++	}
+ 	if (macvlan_passthru(port))
+ 		vlan = list_first_or_null_rcu(&port->vlans,
+ 					      struct macvlan_dev, list);
+diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c
+index 1becb1a731f67..1c1584fca6327 100644
+--- a/drivers/net/mdio/fwnode_mdio.c
++++ b/drivers/net/mdio/fwnode_mdio.c
+@@ -43,6 +43,11 @@ int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
+ 	int rc;
+ 
+ 	rc = fwnode_irq_get(child, 0);
++	/* Don't wait forever if the IRQ provider doesn't become available,
++	 * just fall back to poll mode
++	 */
++	if (rc == -EPROBE_DEFER)
++		rc = driver_deferred_probe_check_state(&phy->mdio.dev);
+ 	if (rc == -EPROBE_DEFER)
+ 		return rc;
+ 
+diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
+index 98f586f910fb1..8ed4fcf70b9b2 100644
+--- a/drivers/net/slip/slip.c
++++ b/drivers/net/slip/slip.c
+@@ -469,7 +469,7 @@ static void sl_tx_timeout(struct net_device *dev, unsigned int txqueue)
+ 	spin_lock(&sl->lock);
+ 
+ 	if (netif_queue_stopped(dev)) {
+-		if (!netif_running(dev))
++		if (!netif_running(dev) || !sl->tty)
+ 			goto out;
+ 
+ 		/* May be we must check transmitter timeout here ?
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index de999e0fedbca..aa78d7e00289a 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1106,7 +1106,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
+ 
+ 	/* NETIF_F_LLTX requires to do our own update of trans_start */
+ 	queue = netdev_get_tx_queue(dev, txq);
+-	queue->trans_start = jiffies;
++	txq_trans_cond_update(queue);
+ 
+ 	/* Notify and wake up reader process */
+ 	if (tfile->flags & TUN_FASYNC)
+diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
+index ea06d10e1c21a..ca409d450a296 100644
+--- a/drivers/net/usb/aqc111.c
++++ b/drivers/net/usb/aqc111.c
+@@ -1102,10 +1102,15 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ 	if (start_of_descs != desc_offset)
+ 		goto err;
+ 
+-	/* self check desc_offset from header*/
+-	if (desc_offset >= skb_len)
++	/* self check desc_offset from header and make sure that the
++	 * bounds of the metadata array are inside the SKB
++	 */
++	if (pkt_count * 2 + desc_offset >= skb_len)
+ 		goto err;
+ 
++	/* Packets must not overlap the metadata array */
++	skb_trim(skb, desc_offset);
++
+ 	if (pkt_count == 0)
+ 		goto err;
+ 
+diff --git a/drivers/net/veth.c b/drivers/net/veth.c
+index d29fb9759cc95..6c8f4f4dfc8a9 100644
+--- a/drivers/net/veth.c
++++ b/drivers/net/veth.c
+@@ -320,7 +320,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
+ 
+ 	rcu_read_lock();
+ 	rcv = rcu_dereference(priv->peer);
+-	if (unlikely(!rcv)) {
++	if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) {
+ 		kfree_skb(skb);
+ 		goto drop;
+ 	}
+diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
+index 28de877ad6c47..f54d5819477a4 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.c
++++ b/drivers/net/wireless/ath/ath11k/mac.c
+@@ -3131,6 +3131,20 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
+ 			arvif->do_not_send_tmpl = true;
+ 		else
+ 			arvif->do_not_send_tmpl = false;
++
++		if (vif->bss_conf.he_support) {
++			ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
++							    WMI_VDEV_PARAM_BA_MODE,
++							    WMI_BA_MODE_BUFFER_SIZE_256);
++			if (ret)
++				ath11k_warn(ar->ab,
++					    "failed to set BA BUFFER SIZE 256 for vdev: %d\n",
++					    arvif->vdev_id);
++			else
++				ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
++					   "Set BA BUFFER SIZE 256 for VDEV: %d\n",
++					   arvif->vdev_id);
++		}
+ 	}
+ 
+ 	if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
+@@ -3166,14 +3180,6 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
+ 
+ 		if (arvif->is_up && vif->bss_conf.he_support &&
+ 		    vif->bss_conf.he_oper.params) {
+-			ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+-							    WMI_VDEV_PARAM_BA_MODE,
+-							    WMI_BA_MODE_BUFFER_SIZE_256);
+-			if (ret)
+-				ath11k_warn(ar->ab,
+-					    "failed to set BA BUFFER SIZE 256 for vdev: %d\n",
+-					    arvif->vdev_id);
+-
+ 			param_id = WMI_VDEV_PARAM_HEOPS_0_31;
+ 			param_value = vif->bss_conf.he_oper.params;
+ 			ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index 98090e40e1cf4..e2791d45f5f59 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -839,7 +839,7 @@ static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix)
+ 			continue;
+ 
+ 		txinfo = IEEE80211_SKB_CB(bf->bf_mpdu);
+-		fi = (struct ath_frame_info *)&txinfo->rate_driver_data[0];
++		fi = (struct ath_frame_info *)&txinfo->status.status_driver_data[0];
+ 		if (fi->keyix == keyix)
+ 			return true;
+ 	}
+diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
+index d0caf1de2bdec..db83cc4ba810a 100644
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -141,8 +141,8 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
+ {
+ 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ 	BUILD_BUG_ON(sizeof(struct ath_frame_info) >
+-		     sizeof(tx_info->rate_driver_data));
+-	return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
++		     sizeof(tx_info->status.status_driver_data));
++	return (struct ath_frame_info *) &tx_info->status.status_driver_data[0];
+ }
+ 
+ static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
+@@ -2542,6 +2542,16 @@ skip_tx_complete:
+ 	spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
+ }
+ 
++static void ath_clear_tx_status(struct ieee80211_tx_info *tx_info)
++{
++	void *ptr = &tx_info->status;
++
++	memset(ptr + sizeof(tx_info->status.rates), 0,
++	       sizeof(tx_info->status) -
++	       sizeof(tx_info->status.rates) -
++	       sizeof(tx_info->status.status_driver_data));
++}
++
+ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
+ 			     struct ath_tx_status *ts, int nframes, int nbad,
+ 			     int txok)
+@@ -2553,6 +2563,8 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
+ 	struct ath_hw *ah = sc->sc_ah;
+ 	u8 i, tx_rateindex;
+ 
++	ath_clear_tx_status(tx_info);
++
+ 	if (txok)
+ 		tx_info->status.ack_signal = ts->ts_rssi;
+ 
+@@ -2567,6 +2579,13 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
+ 	tx_info->status.ampdu_len = nframes;
+ 	tx_info->status.ampdu_ack_len = nframes - nbad;
+ 
++	tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
++
++	for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
++		tx_info->status.rates[i].count = 0;
++		tx_info->status.rates[i].idx = -1;
++	}
++
+ 	if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
+ 	    (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
+ 		/*
+@@ -2588,16 +2607,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
+ 			tx_info->status.rates[tx_rateindex].count =
+ 				hw->max_rate_tries;
+ 	}
+-
+-	for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
+-		tx_info->status.rates[i].count = 0;
+-		tx_info->status.rates[i].idx = -1;
+-	}
+-
+-	tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
+-
+-	/* we report airtime in ath_tx_count_airtime(), don't report twice */
+-	tx_info->status.tx_time = 0;
+ }
+ 
+ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
+diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
+index ae0bc2fee4ca8..88b3b56d05228 100644
+--- a/drivers/pci/controller/pci-hyperv.c
++++ b/drivers/pci/controller/pci-hyperv.c
+@@ -3404,6 +3404,15 @@ static int hv_pci_probe(struct hv_device *hdev,
+ 	hbus->bridge->domain_nr = dom;
+ #ifdef CONFIG_X86
+ 	hbus->sysdata.domain = dom;
++#elif defined(CONFIG_ARM64)
++	/*
++	 * Set the PCI bus parent to be the corresponding VMbus
++	 * device. Then the VMbus device will be assigned as the
++	 * ACPI companion in pcibios_root_bridge_prepare() and
++	 * pci_dma_configure() will propagate device coherence
++	 * information to devices created on the bus.
++	 */
++	hbus->sysdata.parent = hdev->device.parent;
+ #endif
+ 
+ 	hbus->hdev = hdev;
+diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
+index 94ebc1ecace7c..b1b2a55de77fc 100644
+--- a/drivers/perf/fsl_imx8_ddr_perf.c
++++ b/drivers/perf/fsl_imx8_ddr_perf.c
+@@ -29,7 +29,7 @@
+ #define CNTL_OVER_MASK		0xFFFFFFFE
+ 
+ #define CNTL_CSV_SHIFT		24
+-#define CNTL_CSV_MASK		(0xFF << CNTL_CSV_SHIFT)
++#define CNTL_CSV_MASK		(0xFFU << CNTL_CSV_SHIFT)
+ 
+ #define EVENT_CYCLES_ID		0
+ #define EVENT_CYCLES_COUNTER	0
+diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
+index cadea0344486f..40befdd9dfa92 100644
+--- a/drivers/regulator/wm8994-regulator.c
++++ b/drivers/regulator/wm8994-regulator.c
+@@ -71,6 +71,35 @@ static const struct regulator_ops wm8994_ldo2_ops = {
+ };
+ 
+ static const struct regulator_desc wm8994_ldo_desc[] = {
++	{
++		.name = "LDO1",
++		.id = 1,
++		.type = REGULATOR_VOLTAGE,
++		.n_voltages = WM8994_LDO1_MAX_SELECTOR + 1,
++		.vsel_reg = WM8994_LDO_1,
++		.vsel_mask = WM8994_LDO1_VSEL_MASK,
++		.ops = &wm8994_ldo1_ops,
++		.min_uV = 2400000,
++		.uV_step = 100000,
++		.enable_time = 3000,
++		.off_on_delay = 36000,
++		.owner = THIS_MODULE,
++	},
++	{
++		.name = "LDO2",
++		.id = 2,
++		.type = REGULATOR_VOLTAGE,
++		.n_voltages = WM8994_LDO2_MAX_SELECTOR + 1,
++		.vsel_reg = WM8994_LDO_2,
++		.vsel_mask = WM8994_LDO2_VSEL_MASK,
++		.ops = &wm8994_ldo2_ops,
++		.enable_time = 3000,
++		.off_on_delay = 36000,
++		.owner = THIS_MODULE,
++	},
++};
++
++static const struct regulator_desc wm8958_ldo_desc[] = {
+ 	{
+ 		.name = "LDO1",
+ 		.id = 1,
+@@ -172,9 +201,16 @@ static int wm8994_ldo_probe(struct platform_device *pdev)
+ 	 * regulator core and we need not worry about it on the
+ 	 * error path.
+ 	 */
+-	ldo->regulator = devm_regulator_register(&pdev->dev,
+-						 &wm8994_ldo_desc[id],
+-						 &config);
++	if (ldo->wm8994->type == WM8994) {
++		ldo->regulator = devm_regulator_register(&pdev->dev,
++							 &wm8994_ldo_desc[id],
++							 &config);
++	} else {
++		ldo->regulator = devm_regulator_register(&pdev->dev,
++							 &wm8958_ldo_desc[id],
++							 &config);
++	}
++
+ 	if (IS_ERR(ldo->regulator)) {
+ 		ret = PTR_ERR(ldo->regulator);
+ 		dev_err(wm8994->dev, "Failed to register LDO%d: %d\n",
+diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+index 61f06f6885a56..89b9fbce7488a 100644
+--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
++++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+@@ -36,7 +36,7 @@
+ 
+ #define IBMVSCSIS_VERSION	"v0.2"
+ 
+-#define	INITIAL_SRP_LIMIT	800
++#define	INITIAL_SRP_LIMIT	1024
+ #define	DEFAULT_MAX_SECTORS	256
+ #define MAX_TXU			1024 * 1024
+ 
+diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
+index 98cabe09c0404..8748c5996478f 100644
+--- a/drivers/scsi/lpfc/lpfc.h
++++ b/drivers/scsi/lpfc/lpfc.h
+@@ -897,6 +897,11 @@ enum lpfc_irq_chann_mode {
+ 	NHT_MODE,
+ };
+ 
++enum lpfc_hba_bit_flags {
++	FABRIC_COMANDS_BLOCKED,
++	HBA_PCI_ERR,
++};
++
+ struct lpfc_hba {
+ 	/* SCSI interface function jump table entries */
+ 	struct lpfc_io_buf * (*lpfc_get_scsi_buf)
+@@ -1025,7 +1030,6 @@ struct lpfc_hba {
+ 					 * Firmware supports Forced Link Speed
+ 					 * capability
+ 					 */
+-#define HBA_PCI_ERR		0x80000 /* The PCI slot is offline */
+ #define HBA_FLOGI_ISSUED	0x100000 /* FLOGI was issued */
+ #define HBA_SHORT_CMF		0x200000 /* shorter CMF timer routine */
+ #define HBA_CGN_DAY_WRAP	0x400000 /* HBA Congestion info day wraps */
+@@ -1335,7 +1339,6 @@ struct lpfc_hba {
+ 	atomic_t fabric_iocb_count;
+ 	struct timer_list fabric_block_timer;
+ 	unsigned long bit_flags;
+-#define	FABRIC_COMANDS_BLOCKED	0
+ 	atomic_t num_rsrc_err;
+ 	atomic_t num_cmd_success;
+ 	unsigned long last_rsrc_error_time;
+diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
+index 89e36bf14d8f8..d4340e5a3aac2 100644
+--- a/drivers/scsi/lpfc/lpfc_crtn.h
++++ b/drivers/scsi/lpfc/lpfc_crtn.h
+@@ -652,3 +652,6 @@ struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
+ 					      uint32_t hash, uint8_t *buf);
+ void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport);
+ int lpfc_issue_els_qfpa(struct lpfc_vport *vport);
++
++void lpfc_sli_rpi_release(struct lpfc_vport *vport,
++			  struct lpfc_nodelist *ndlp);
+diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
+index 816fc406135b3..0cba306de0dbf 100644
+--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
+@@ -109,8 +109,8 @@ lpfc_rport_invalid(struct fc_rport *rport)
+ 
+ 	ndlp = rdata->pnode;
+ 	if (!rdata->pnode) {
+-		pr_err("**** %s: NULL ndlp on rport x%px SID x%x\n",
+-		       __func__, rport, rport->scsi_target_id);
++		pr_info("**** %s: NULL ndlp on rport x%px SID x%x\n",
++			__func__, rport, rport->scsi_target_id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -169,9 +169,10 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
+ 
+ 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+ 			 "3181 dev_loss_callbk x%06x, rport x%px flg x%x "
+-			 "load_flag x%x refcnt %d\n",
++			 "load_flag x%x refcnt %d state %d xpt x%x\n",
+ 			 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag,
+-			 vport->load_flag, kref_read(&ndlp->kref));
++			 vport->load_flag, kref_read(&ndlp->kref),
++			 ndlp->nlp_state, ndlp->fc4_xpt_flags);
+ 
+ 	/* Don't schedule a worker thread event if the vport is going down.
+ 	 * The teardown process cleans up the node via lpfc_drop_node.
+@@ -181,6 +182,11 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
+ 		ndlp->rport = NULL;
+ 
+ 		ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
++		/* clear the NLP_XPT_REGD if the node is not registered
++		 * with nvme-fc
++		 */
++		if (ndlp->fc4_xpt_flags == NLP_XPT_REGD)
++			ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
+ 
+ 		/* Remove the node reference from remote_port_add now.
+ 		 * The driver will not call remote_port_delete.
+@@ -225,18 +231,36 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
+ 	ndlp->rport = NULL;
+ 	spin_unlock_irqrestore(&ndlp->lock, iflags);
+ 
+-	/* We need to hold the node by incrementing the reference
+-	 * count until this queued work is done
+-	 */
+-	evtp->evt_arg1 = lpfc_nlp_get(ndlp);
++	if (phba->worker_thread) {
++		/* We need to hold the node by incrementing the reference
++		 * count until this queued work is done
++		 */
++		evtp->evt_arg1 = lpfc_nlp_get(ndlp);
++
++		spin_lock_irqsave(&phba->hbalock, iflags);
++		if (evtp->evt_arg1) {
++			evtp->evt = LPFC_EVT_DEV_LOSS;
++			list_add_tail(&evtp->evt_listp, &phba->work_list);
++			lpfc_worker_wake_up(phba);
++		}
++		spin_unlock_irqrestore(&phba->hbalock, iflags);
++	} else {
++		lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
++				 "3188 worker thread is stopped %s x%06x, "
++				 " rport x%px flg x%x load_flag x%x refcnt "
++				 "%d\n", __func__, ndlp->nlp_DID,
++				 ndlp->rport, ndlp->nlp_flag,
++				 vport->load_flag, kref_read(&ndlp->kref));
++		if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) {
++			spin_lock_irqsave(&ndlp->lock, iflags);
++			/* Node is in dev loss.  No further transaction. */
++			ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
++			spin_unlock_irqrestore(&ndlp->lock, iflags);
++			lpfc_disc_state_machine(vport, ndlp, NULL,
++						NLP_EVT_DEVICE_RM);
++		}
+ 
+-	spin_lock_irqsave(&phba->hbalock, iflags);
+-	if (evtp->evt_arg1) {
+-		evtp->evt = LPFC_EVT_DEV_LOSS;
+-		list_add_tail(&evtp->evt_listp, &phba->work_list);
+-		lpfc_worker_wake_up(phba);
+ 	}
+-	spin_unlock_irqrestore(&phba->hbalock, iflags);
+ 
+ 	return;
+ }
+@@ -503,11 +527,12 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
+ 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ 				 "0203 Devloss timeout on "
+ 				 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
+-				 "NPort x%06x Data: x%x x%x x%x\n",
++				 "NPort x%06x Data: x%x x%x x%x refcnt %d\n",
+ 				 *name, *(name+1), *(name+2), *(name+3),
+ 				 *(name+4), *(name+5), *(name+6), *(name+7),
+ 				 ndlp->nlp_DID, ndlp->nlp_flag,
+-				 ndlp->nlp_state, ndlp->nlp_rpi);
++				 ndlp->nlp_state, ndlp->nlp_rpi,
++				 kref_read(&ndlp->kref));
+ 	} else {
+ 		lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT,
+ 				 "0204 Devloss timeout on "
+@@ -755,18 +780,22 @@ lpfc_work_list_done(struct lpfc_hba *phba)
+ 	int free_evt;
+ 	int fcf_inuse;
+ 	uint32_t nlp_did;
++	bool hba_pci_err;
+ 
+ 	spin_lock_irq(&phba->hbalock);
+ 	while (!list_empty(&phba->work_list)) {
+ 		list_remove_head((&phba->work_list), evtp, typeof(*evtp),
+ 				 evt_listp);
+ 		spin_unlock_irq(&phba->hbalock);
++		hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
+ 		free_evt = 1;
+ 		switch (evtp->evt) {
+ 		case LPFC_EVT_ELS_RETRY:
+ 			ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
+-			lpfc_els_retry_delay_handler(ndlp);
+-			free_evt = 0; /* evt is part of ndlp */
++			if (!hba_pci_err) {
++				lpfc_els_retry_delay_handler(ndlp);
++				free_evt = 0; /* evt is part of ndlp */
++			}
+ 			/* decrement the node reference count held
+ 			 * for this queued work
+ 			 */
+@@ -788,8 +817,10 @@ lpfc_work_list_done(struct lpfc_hba *phba)
+ 			break;
+ 		case LPFC_EVT_RECOVER_PORT:
+ 			ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
+-			lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
+-			free_evt = 0;
++			if (!hba_pci_err) {
++				lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
++				free_evt = 0;
++			}
+ 			/* decrement the node reference count held for
+ 			 * this queued work
+ 			 */
+@@ -859,14 +890,18 @@ lpfc_work_done(struct lpfc_hba *phba)
+ 	struct lpfc_vport **vports;
+ 	struct lpfc_vport *vport;
+ 	int i;
++	bool hba_pci_err;
+ 
++	hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
+ 	spin_lock_irq(&phba->hbalock);
+ 	ha_copy = phba->work_ha;
+ 	phba->work_ha = 0;
+ 	spin_unlock_irq(&phba->hbalock);
++	if (hba_pci_err)
++		ha_copy = 0;
+ 
+ 	/* First, try to post the next mailbox command to SLI4 device */
+-	if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
++	if (phba->pci_dev_grp == LPFC_PCI_DEV_OC && !hba_pci_err)
+ 		lpfc_sli4_post_async_mbox(phba);
+ 
+ 	if (ha_copy & HA_ERATT) {
+@@ -886,7 +921,7 @@ lpfc_work_done(struct lpfc_hba *phba)
+ 		lpfc_handle_latt(phba);
+ 
+ 	/* Handle VMID Events */
+-	if (lpfc_is_vmid_enabled(phba)) {
++	if (lpfc_is_vmid_enabled(phba) && !hba_pci_err) {
+ 		if (phba->pport->work_port_events &
+ 		    WORKER_CHECK_VMID_ISSUE_QFPA) {
+ 			lpfc_check_vmid_qfpa_issue(phba);
+@@ -936,6 +971,8 @@ lpfc_work_done(struct lpfc_hba *phba)
+ 			work_port_events = vport->work_port_events;
+ 			vport->work_port_events &= ~work_port_events;
+ 			spin_unlock_irq(&vport->work_port_lock);
++			if (hba_pci_err)
++				continue;
+ 			if (work_port_events & WORKER_DISC_TMO)
+ 				lpfc_disc_timeout_handler(vport);
+ 			if (work_port_events & WORKER_ELS_TMO)
+@@ -1173,12 +1210,14 @@ lpfc_linkdown(struct lpfc_hba *phba)
+ 	struct lpfc_vport **vports;
+ 	LPFC_MBOXQ_t          *mb;
+ 	int i;
++	int offline;
+ 
+ 	if (phba->link_state == LPFC_LINK_DOWN)
+ 		return 0;
+ 
+ 	/* Block all SCSI stack I/Os */
+ 	lpfc_scsi_dev_block(phba);
++	offline = pci_channel_offline(phba->pcidev);
+ 
+ 	phba->defer_flogi_acc_flag = false;
+ 
+@@ -1219,7 +1258,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
+ 	lpfc_destroy_vport_work_array(phba, vports);
+ 
+ 	/* Clean up any SLI3 firmware default rpi's */
+-	if (phba->sli_rev > LPFC_SLI_REV3)
++	if (phba->sli_rev > LPFC_SLI_REV3 || offline)
+ 		goto skip_unreg_did;
+ 
+ 	mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+@@ -4712,6 +4751,11 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ 	spin_lock_irqsave(&ndlp->lock, iflags);
+ 	if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) {
+ 		spin_unlock_irqrestore(&ndlp->lock, iflags);
++		lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
++				 "0999 %s Not regd: ndlp x%px rport x%px DID "
++				 "x%x FLG x%x XPT x%x\n",
++				  __func__, ndlp, ndlp->rport, ndlp->nlp_DID,
++				  ndlp->nlp_flag, ndlp->fc4_xpt_flags);
+ 		return;
+ 	}
+ 
+@@ -4722,6 +4766,13 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ 	    ndlp->fc4_xpt_flags & SCSI_XPT_REGD) {
+ 		vport->phba->nport_event_cnt++;
+ 		lpfc_unregister_remote_port(ndlp);
++	} else if (!ndlp->rport) {
++		lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
++				 "1999 %s NDLP in devloss x%px DID x%x FLG x%x"
++				 " XPT x%x refcnt %d\n",
++				 __func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag,
++				 ndlp->fc4_xpt_flags,
++				 kref_read(&ndlp->kref));
+ 	}
+ 
+ 	if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) {
+@@ -5365,6 +5416,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ 				ndlp->nlp_flag &= ~NLP_UNREG_INP;
+ 				mempool_free(mbox, phba->mbox_mem_pool);
+ 				acc_plogi = 1;
++				lpfc_nlp_put(ndlp);
+ 			}
+ 		} else {
+ 			lpfc_printf_vlog(vport, KERN_INFO,
+@@ -6089,12 +6141,34 @@ lpfc_disc_flush_list(struct lpfc_vport *vport)
+ 	}
+ }
+ 
++/*
++ * lpfc_notify_xport_npr - notifies xport of node disappearance
++ * @vport: Pointer to Virtual Port object.
++ *
++ * Transitions all ndlps to NPR state.  When lpfc_nlp_set_state
++ * calls lpfc_nlp_state_cleanup, the ndlp->rport is unregistered
++ * and transport notified that the node is gone.
++ * Return Code:
++ *	none
++ */
++static void
++lpfc_notify_xport_npr(struct lpfc_vport *vport)
++{
++	struct lpfc_nodelist *ndlp, *next_ndlp;
++
++	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
++				 nlp_listp) {
++		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
++	}
++}
+ void
+ lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
+ {
+ 	lpfc_els_flush_rscn(vport);
+ 	lpfc_els_flush_cmd(vport);
+ 	lpfc_disc_flush_list(vport);
++	if (pci_channel_offline(vport->phba->pcidev))
++		lpfc_notify_xport_npr(vport);
+ }
+ 
+ /*****************************************************************************/
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index 558f7d2559c4d..9569a7390f9d5 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -95,6 +95,7 @@ static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
+ static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
+ static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
+ static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
++static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba);
+ 
+ static struct scsi_transport_template *lpfc_transport_template = NULL;
+ static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
+@@ -1652,7 +1653,7 @@ lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
+ {
+ 	spin_lock_irq(&phba->hbalock);
+ 	if (phba->link_state == LPFC_HBA_ERROR &&
+-	    phba->hba_flag & HBA_PCI_ERR) {
++		test_bit(HBA_PCI_ERR, &phba->bit_flags)) {
+ 		spin_unlock_irq(&phba->hbalock);
+ 		return;
+ 	}
+@@ -1995,6 +1996,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
+ 	if (pci_channel_offline(phba->pcidev)) {
+ 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ 				"3166 pci channel is offline\n");
++		lpfc_sli_flush_io_rings(phba);
+ 		return;
+ 	}
+ 
+@@ -2983,6 +2985,22 @@ lpfc_cleanup(struct lpfc_vport *vport)
+ 					NLP_EVT_DEVICE_RM);
+ 	}
+ 
++	/* This is a special case flush to return all
++	 * IOs before entering this loop. There are
++	 * two points in the code where a flush is
++	 * avoided if the FC_UNLOADING flag is set.
++	 * one is in the multipool destroy,
++	 * (this prevents a crash) and the other is
++	 * in the nvme abort handler, ( also prevents
++	 * a crash). Both of these exceptions are
++	 * cases where the slot is still accessible.
++	 * The flush here is only when the pci slot
++	 * is offline.
++	 */
++	if (vport->load_flag & FC_UNLOADING &&
++	    pci_channel_offline(phba->pcidev))
++		lpfc_sli_flush_io_rings(vport->phba);
++
+ 	/* At this point, ALL ndlp's should be gone
+ 	 * because of the previous NLP_EVT_DEVICE_RM.
+ 	 * Lets wait for this to happen, if needed.
+@@ -2995,7 +3013,7 @@ lpfc_cleanup(struct lpfc_vport *vport)
+ 			list_for_each_entry_safe(ndlp, next_ndlp,
+ 						&vport->fc_nodes, nlp_listp) {
+ 				lpfc_printf_vlog(ndlp->vport, KERN_ERR,
+-						 LOG_TRACE_EVENT,
++						 LOG_DISCOVERY,
+ 						 "0282 did:x%x ndlp:x%px "
+ 						 "refcnt:%d xflags x%x nflag x%x\n",
+ 						 ndlp->nlp_DID, (void *)ndlp,
+@@ -3692,7 +3710,8 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
+ 	struct lpfc_vport **vports;
+ 	struct Scsi_Host *shost;
+ 	int i;
+-	int offline = 0;
++	int offline;
++	bool hba_pci_err;
+ 
+ 	if (vport->fc_flag & FC_OFFLINE_MODE)
+ 		return;
+@@ -3702,6 +3721,7 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
+ 	lpfc_linkdown(phba);
+ 
+ 	offline =  pci_channel_offline(phba->pcidev);
++	hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
+ 
+ 	/* Issue an unreg_login to all nodes on all vports */
+ 	vports = lpfc_create_vport_work_array(phba);
+@@ -3725,11 +3745,14 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
+ 				ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ 				spin_unlock_irq(&ndlp->lock);
+ 
+-				if (offline) {
++				if (offline || hba_pci_err) {
+ 					spin_lock_irq(&ndlp->lock);
+ 					ndlp->nlp_flag &= ~(NLP_UNREG_INP |
+ 							    NLP_RPI_REGISTERED);
+ 					spin_unlock_irq(&ndlp->lock);
++					if (phba->sli_rev == LPFC_SLI_REV4)
++						lpfc_sli_rpi_release(vports[i],
++								     ndlp);
+ 				} else {
+ 					lpfc_unreg_rpi(vports[i], ndlp);
+ 				}
+@@ -13366,8 +13389,9 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
+ 	/* Abort all iocbs associated with the hba */
+ 	lpfc_sli_hba_iocb_abort(phba);
+ 
+-	/* Wait for completion of device XRI exchange busy */
+-	lpfc_sli4_xri_exchange_busy_wait(phba);
++	if (!pci_channel_offline(phba->pcidev))
++		/* Wait for completion of device XRI exchange busy */
++		lpfc_sli4_xri_exchange_busy_wait(phba);
+ 
+ 	/* per-phba callback de-registration for hotplug event */
+ 	if (phba->pport)
+@@ -13386,15 +13410,12 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
+ 	/* Disable FW logging to host memory */
+ 	lpfc_ras_stop_fwlog(phba);
+ 
+-	/* Unset the queues shared with the hardware then release all
+-	 * allocated resources.
+-	 */
+-	lpfc_sli4_queue_unset(phba);
+-	lpfc_sli4_queue_destroy(phba);
+-
+ 	/* Reset SLI4 HBA FCoE function */
+ 	lpfc_pci_function_reset(phba);
+ 
++	/* release all queue allocated resources. */
++	lpfc_sli4_queue_destroy(phba);
++
+ 	/* Free RAS DMA memory */
+ 	if (phba->ras_fwlog.ras_enabled)
+ 		lpfc_sli4_ras_dma_free(phba);
+@@ -14274,6 +14295,7 @@ lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
+ 			"2711 PCI channel permanent disable for failure\n");
+ 	/* Block all SCSI devices' I/Os on the host */
+ 	lpfc_scsi_dev_block(phba);
++	lpfc_sli4_prep_dev_for_reset(phba);
+ 
+ 	/* stop all timers */
+ 	lpfc_stop_hba_timers(phba);
+@@ -15069,24 +15091,28 @@ lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
+ static void
+ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
+ {
+-	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+-			"2826 PCI channel disable preparing for reset\n");
++	int offline =  pci_channel_offline(phba->pcidev);
++
++	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
++			"2826 PCI channel disable preparing for reset offline"
++			" %d\n", offline);
+ 
+ 	/* Block any management I/Os to the device */
+ 	lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
+ 
+-	/* Block all SCSI devices' I/Os on the host */
+-	lpfc_scsi_dev_block(phba);
+ 
++	/* HBA_PCI_ERR was set in io_error_detect */
++	lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
+ 	/* Flush all driver's outstanding I/Os as we are to reset */
+ 	lpfc_sli_flush_io_rings(phba);
++	lpfc_offline(phba);
+ 
+ 	/* stop all timers */
+ 	lpfc_stop_hba_timers(phba);
+ 
++	lpfc_sli4_queue_destroy(phba);
+ 	/* Disable interrupt and pci device */
+ 	lpfc_sli4_disable_intr(phba);
+-	lpfc_sli4_queue_destroy(phba);
+ 	pci_disable_device(phba->pcidev);
+ }
+ 
+@@ -15135,6 +15161,7 @@ lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
+ {
+ 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
++	bool hba_pci_err;
+ 
+ 	switch (state) {
+ 	case pci_channel_io_normal:
+@@ -15142,17 +15169,24 @@ lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
+ 		lpfc_sli4_prep_dev_for_recover(phba);
+ 		return PCI_ERS_RESULT_CAN_RECOVER;
+ 	case pci_channel_io_frozen:
+-		phba->hba_flag |= HBA_PCI_ERR;
++		hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
+ 		/* Fatal error, prepare for slot reset */
+-		lpfc_sli4_prep_dev_for_reset(phba);
++		if (!hba_pci_err)
++			lpfc_sli4_prep_dev_for_reset(phba);
++		else
++			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
++					"2832  Already handling PCI error "
++					"state: x%x\n", state);
+ 		return PCI_ERS_RESULT_NEED_RESET;
+ 	case pci_channel_io_perm_failure:
+-		phba->hba_flag |= HBA_PCI_ERR;
++		set_bit(HBA_PCI_ERR, &phba->bit_flags);
+ 		/* Permanent failure, prepare for device down */
+ 		lpfc_sli4_prep_dev_for_perm_failure(phba);
+ 		return PCI_ERS_RESULT_DISCONNECT;
+ 	default:
+-		phba->hba_flag |= HBA_PCI_ERR;
++		hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
++		if (!hba_pci_err)
++			lpfc_sli4_prep_dev_for_reset(phba);
+ 		/* Unknown state, prepare and request slot reset */
+ 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ 				"2825 Unknown PCI error state: x%x\n", state);
+@@ -15186,17 +15220,21 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
+ 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ 	struct lpfc_sli *psli = &phba->sli;
+ 	uint32_t intr_mode;
++	bool hba_pci_err;
+ 
+ 	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
+ 	if (pci_enable_device_mem(pdev)) {
+ 		printk(KERN_ERR "lpfc: Cannot re-enable "
+-			"PCI device after reset.\n");
++		       "PCI device after reset.\n");
+ 		return PCI_ERS_RESULT_DISCONNECT;
+ 	}
+ 
+ 	pci_restore_state(pdev);
+ 
+-	phba->hba_flag &= ~HBA_PCI_ERR;
++	hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags);
++	if (!hba_pci_err)
++		dev_info(&pdev->dev,
++			 "hba_pci_err was not set, recovering slot reset.\n");
+ 	/*
+ 	 * As the new kernel behavior of pci_restore_state() API call clears
+ 	 * device saved_state flag, need to save the restored state again.
+@@ -15210,6 +15248,8 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
+ 	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
+ 	spin_unlock_irq(&phba->hbalock);
+ 
++	/* Init cpu_map array */
++	lpfc_cpu_map_array_init(phba);
+ 	/* Configure and enable interrupt */
+ 	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
+ 	if (intr_mode == LPFC_INTR_ERROR) {
+@@ -15251,8 +15291,6 @@ lpfc_io_resume_s4(struct pci_dev *pdev)
+ 	 */
+ 	if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
+ 		/* Perform device reset */
+-		lpfc_offline_prep(phba, LPFC_MBX_WAIT);
+-		lpfc_offline(phba);
+ 		lpfc_sli_brdrestart(phba);
+ 		/* Bring the device back online */
+ 		lpfc_online(phba);
+diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
+index 9601edd838e10..df73abb59407e 100644
+--- a/drivers/scsi/lpfc/lpfc_nvme.c
++++ b/drivers/scsi/lpfc/lpfc_nvme.c
+@@ -93,6 +93,11 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
+ 
+ 	lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
+ 	vport = lport->vport;
++
++	if (!vport || vport->load_flag & FC_UNLOADING ||
++	    vport->phba->hba_flag & HBA_IOQ_FLUSH)
++		return -ENODEV;
++
+ 	qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
+ 	if (qhandle == NULL)
+ 		return -ENOMEM;
+@@ -267,7 +272,8 @@ lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
+ 		return -EINVAL;
+ 
+ 	remoteport = lpfc_rport->remoteport;
+-	if (!vport->localport)
++	if (!vport->localport ||
++	    vport->phba->hba_flag & HBA_IOQ_FLUSH)
+ 		return -EINVAL;
+ 
+ 	lport = vport->localport->private;
+@@ -559,6 +565,8 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 				 ndlp->nlp_DID, ntype, nstate);
+ 		return -ENODEV;
+ 	}
++	if (vport->phba->hba_flag & HBA_IOQ_FLUSH)
++		return -ENODEV;
+ 
+ 	if (!vport->phba->sli4_hba.nvmels_wq)
+ 		return -ENOMEM;
+@@ -662,7 +670,8 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
+ 		return -EINVAL;
+ 
+ 	vport = lport->vport;
+-	if (vport->load_flag & FC_UNLOADING)
++	if (vport->load_flag & FC_UNLOADING ||
++	    vport->phba->hba_flag & HBA_IOQ_FLUSH)
+ 		return -ENODEV;
+ 
+ 	atomic_inc(&lport->fc4NvmeLsRequests);
+@@ -1515,7 +1524,8 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
+ 
+ 	phba = vport->phba;
+ 
+-	if (unlikely(vport->load_flag & FC_UNLOADING)) {
++	if ((unlikely(vport->load_flag & FC_UNLOADING)) ||
++	    phba->hba_flag & HBA_IOQ_FLUSH) {
+ 		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
+ 				 "6124 Fail IO, Driver unload\n");
+ 		atomic_inc(&lport->xmt_fcp_err);
+@@ -2169,8 +2179,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
+ 			abts_nvme = 0;
+ 			for (i = 0; i < phba->cfg_hdw_queue; i++) {
+ 				qp = &phba->sli4_hba.hdwq[i];
+-				if (!vport || !vport->localport ||
+-				    !qp || !qp->io_wq)
++				if (!vport->localport || !qp || !qp->io_wq)
+ 					return;
+ 
+ 				pring = qp->io_wq->pring;
+@@ -2180,8 +2189,9 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
+ 				abts_scsi += qp->abts_scsi_io_bufs;
+ 				abts_nvme += qp->abts_nvme_io_bufs;
+ 			}
+-			if (!vport || !vport->localport ||
+-			    vport->phba->hba_flag & HBA_PCI_ERR)
++			if (!vport->localport ||
++			    test_bit(HBA_PCI_ERR, &vport->phba->bit_flags) ||
++			    vport->load_flag & FC_UNLOADING)
+ 				return;
+ 
+ 			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+@@ -2541,8 +2551,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ 		 * return values is ignored.  The upcall is a courtesy to the
+ 		 * transport.
+ 		 */
+-		if (vport->load_flag & FC_UNLOADING ||
+-		    unlikely(vport->phba->hba_flag & HBA_PCI_ERR))
++		if (vport->load_flag & FC_UNLOADING)
+ 			(void)nvme_fc_set_remoteport_devloss(remoteport, 0);
+ 
+ 		ret = nvme_fc_unregister_remoteport(remoteport);
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 430abebf99f15..b64c5f157ce90 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -2833,6 +2833,12 @@ __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ 	ndlp->nlp_flag &= ~NLP_UNREG_INP;
+ }
+ 
++void
++lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
++{
++	__lpfc_sli_rpi_release(vport, ndlp);
++}
++
+ /**
+  * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
+  * @phba: Pointer to HBA context object.
+@@ -4466,42 +4472,62 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
+ void
+ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+ {
+-	LIST_HEAD(completions);
++	LIST_HEAD(tx_completions);
++	LIST_HEAD(txcmplq_completions);
+ 	struct lpfc_iocbq *iocb, *next_iocb;
++	int offline;
+ 
+ 	if (pring->ringno == LPFC_ELS_RING) {
+ 		lpfc_fabric_abort_hba(phba);
+ 	}
++	offline = pci_channel_offline(phba->pcidev);
+ 
+ 	/* Error everything on txq and txcmplq
+ 	 * First do the txq.
+ 	 */
+ 	if (phba->sli_rev >= LPFC_SLI_REV4) {
+ 		spin_lock_irq(&pring->ring_lock);
+-		list_splice_init(&pring->txq, &completions);
++		list_splice_init(&pring->txq, &tx_completions);
+ 		pring->txq_cnt = 0;
+-		spin_unlock_irq(&pring->ring_lock);
+ 
+-		spin_lock_irq(&phba->hbalock);
+-		/* Next issue ABTS for everything on the txcmplq */
+-		list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
+-			lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
+-		spin_unlock_irq(&phba->hbalock);
++		if (offline) {
++			list_splice_init(&pring->txcmplq,
++					 &txcmplq_completions);
++		} else {
++			/* Next issue ABTS for everything on the txcmplq */
++			list_for_each_entry_safe(iocb, next_iocb,
++						 &pring->txcmplq, list)
++				lpfc_sli_issue_abort_iotag(phba, pring,
++							   iocb, NULL);
++		}
++		spin_unlock_irq(&pring->ring_lock);
+ 	} else {
+ 		spin_lock_irq(&phba->hbalock);
+-		list_splice_init(&pring->txq, &completions);
++		list_splice_init(&pring->txq, &tx_completions);
+ 		pring->txq_cnt = 0;
+ 
+-		/* Next issue ABTS for everything on the txcmplq */
+-		list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
+-			lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
++		if (offline) {
++			list_splice_init(&pring->txcmplq, &txcmplq_completions);
++		} else {
++			/* Next issue ABTS for everything on the txcmplq */
++			list_for_each_entry_safe(iocb, next_iocb,
++						 &pring->txcmplq, list)
++				lpfc_sli_issue_abort_iotag(phba, pring,
++							   iocb, NULL);
++		}
+ 		spin_unlock_irq(&phba->hbalock);
+ 	}
+-	/* Make sure HBA is alive */
+-	lpfc_issue_hb_tmo(phba);
+ 
++	if (offline) {
++		/* Cancel all the IOCBs from the completions list */
++		lpfc_sli_cancel_iocbs(phba, &txcmplq_completions,
++				      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
++	} else {
++		/* Make sure HBA is alive */
++		lpfc_issue_hb_tmo(phba);
++	}
+ 	/* Cancel all the IOCBs from the completions list */
+-	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
++	lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT,
+ 			      IOERR_SLI_ABORTED);
+ }
+ 
+@@ -4554,11 +4580,6 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
+ 	struct lpfc_iocbq *piocb, *next_iocb;
+ 
+ 	spin_lock_irq(&phba->hbalock);
+-	if (phba->hba_flag & HBA_IOQ_FLUSH ||
+-	    !phba->sli4_hba.hdwq) {
+-		spin_unlock_irq(&phba->hbalock);
+-		return;
+-	}
+ 	/* Indicate the I/O queues are flushed */
+ 	phba->hba_flag |= HBA_IOQ_FLUSH;
+ 	spin_unlock_irq(&phba->hbalock);
+@@ -11235,6 +11256,10 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
+ 	unsigned long iflags;
+ 	int rc;
+ 
++	/* If the PCI channel is in offline state, do not post iocbs. */
++	if (unlikely(pci_channel_offline(phba->pcidev)))
++		return IOCB_ERROR;
++
+ 	if (phba->sli_rev == LPFC_SLI_REV4) {
+ 		eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
+ 
+diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
+index 2c9d1b7964756..ae2aef9ba8cfe 100644
+--- a/drivers/scsi/megaraid/megaraid_sas.h
++++ b/drivers/scsi/megaraid/megaraid_sas.h
+@@ -2558,6 +2558,9 @@ struct megasas_instance_template {
+ #define MEGASAS_IS_LOGICAL(sdev)					\
+ 	((sdev->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
+ 
++#define MEGASAS_IS_LUN_VALID(sdev)					\
++	(((sdev)->lun == 0) ? 1 : 0)
++
+ #define MEGASAS_DEV_INDEX(scp)						\
+ 	(((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +	\
+ 	scp->device->id)
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 82e1e24257bcd..ca563498dcdb8 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -2126,6 +2126,9 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
+ 			goto scan_target;
+ 		}
+ 		return -ENXIO;
++	} else if (!MEGASAS_IS_LUN_VALID(sdev)) {
++		sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
++		return -ENXIO;
+ 	}
+ 
+ scan_target:
+@@ -2156,6 +2159,10 @@ static void megasas_slave_destroy(struct scsi_device *sdev)
+ 	instance = megasas_lookup_instance(sdev->host->host_no);
+ 
+ 	if (MEGASAS_IS_LOGICAL(sdev)) {
++		if (!MEGASAS_IS_LUN_VALID(sdev)) {
++			sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
++			return;
++		}
+ 		ld_tgt_id = MEGASAS_TARGET_ID(sdev);
+ 		instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED;
+ 		if (megasas_dbg_lvl & LD_PD_DEBUG)
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
+index 0563078227de6..a8dd14c91efdb 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
+@@ -394,10 +394,13 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
+ 		retry_count++;
+ 		if (ioc->config_cmds.smid == smid)
+ 			mpt3sas_base_free_smid(ioc, smid);
+-		if ((ioc->shost_recovery) || (ioc->config_cmds.status &
+-		    MPT3_CMD_RESET) || ioc->pci_error_recovery)
++		if (ioc->config_cmds.status & MPT3_CMD_RESET)
+ 			goto retry_config;
+-		issue_host_reset = 1;
++		if (ioc->shost_recovery || ioc->pci_error_recovery) {
++			issue_host_reset = 0;
++			r = -EFAULT;
++		} else
++			issue_host_reset = 1;
+ 		goto free_mem;
+ 	}
+ 
+diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
+index 44df7c03aab8d..605a8eb7344a7 100644
+--- a/drivers/scsi/mvsas/mv_init.c
++++ b/drivers/scsi/mvsas/mv_init.c
+@@ -646,6 +646,7 @@ static struct pci_device_id mvs_pci_table[] = {
+ 	{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
+ 	{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
+ 	{ PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
++	{ PCI_VDEVICE(TTI, 0x2640), chip_6440 },
+ 	{ PCI_VDEVICE(TTI, 0x2710), chip_9480 },
+ 	{ PCI_VDEVICE(TTI, 0x2720), chip_9480 },
+ 	{ PCI_VDEVICE(TTI, 0x2721), chip_9480 },
+diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
+index 55163469030d3..5853b3c0d76db 100644
+--- a/drivers/scsi/pm8001/pm80xx_hwi.c
++++ b/drivers/scsi/pm8001/pm80xx_hwi.c
+@@ -766,6 +766,10 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
+ 	pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity	= 0x01;
+ 	pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt		= 0x01;
+ 
++	/* Enable higher IQs and OQs, 32 to 63, bit 16 */
++	if (pm8001_ha->max_q_num > 32)
++		pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |=
++							1 << 16;
+ 	/* Disable end to end CRC checking */
+ 	pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16);
+ 
+@@ -1027,6 +1031,13 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
+ 	if (0x0000 != gst_len_mpistate)
+ 		return -EBUSY;
+ 
++	/*
++	 *  As per controller datasheet, after successful MPI
++	 *  initialization minimum 500ms delay is required before
++	 *  issuing commands.
++	 */
++	msleep(500);
++
+ 	return 0;
+ }
+ 
+@@ -1734,10 +1745,11 @@ static void
+ pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
+ {
+ #ifdef PM8001_USE_MSIX
+-	u32 mask;
+-	mask = (u32)(1 << vec);
+-
+-	pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF));
++	if (vec < 32)
++		pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, 1U << vec);
++	else
++		pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR_U,
++			    1U << (vec - 32));
+ 	return;
+ #endif
+ 	pm80xx_chip_intx_interrupt_enable(pm8001_ha);
+@@ -1753,12 +1765,15 @@ static void
+ pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
+ {
+ #ifdef PM8001_USE_MSIX
+-	u32 mask;
+-	if (vec == 0xFF)
+-		mask = 0xFFFFFFFF;
++	if (vec == 0xFF) {
++		/* disable all vectors 0-31, 32-63 */
++		pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 0xFFFFFFFF);
++		pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, 0xFFFFFFFF);
++	} else if (vec < 32)
++		pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 1U << vec);
+ 	else
+-		mask = (u32)(1 << vec);
+-	pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF));
++		pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U,
++			    1U << (vec - 32));
+ 	return;
+ #endif
+ 	pm80xx_chip_intx_interrupt_disable(pm8001_ha);
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index 554b6f7842236..c7b1b2e8bb02f 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -2221,10 +2221,10 @@ static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag)
+ 
+ 	switch (flag) {
+ 	case STOP_CONN_RECOVER:
+-		conn->state = ISCSI_CONN_FAILED;
++		WRITE_ONCE(conn->state, ISCSI_CONN_FAILED);
+ 		break;
+ 	case STOP_CONN_TERM:
+-		conn->state = ISCSI_CONN_DOWN;
++		WRITE_ONCE(conn->state, ISCSI_CONN_DOWN);
+ 		break;
+ 	default:
+ 		iscsi_cls_conn_printk(KERN_ERR, conn, "invalid stop flag %d\n",
+@@ -2236,6 +2236,49 @@ static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag)
+ 	ISCSI_DBG_TRANS_CONN(conn, "Stopping conn done.\n");
+ }
+ 
++static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active)
++{
++	struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
++	struct iscsi_endpoint *ep;
++
++	ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n");
++	WRITE_ONCE(conn->state, ISCSI_CONN_FAILED);
++
++	if (!conn->ep || !session->transport->ep_disconnect)
++		return;
++
++	ep = conn->ep;
++	conn->ep = NULL;
++
++	session->transport->unbind_conn(conn, is_active);
++	session->transport->ep_disconnect(ep);
++	ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n");
++}
++
++static void iscsi_if_disconnect_bound_ep(struct iscsi_cls_conn *conn,
++					 struct iscsi_endpoint *ep,
++					 bool is_active)
++{
++	/* Check if this was a conn error and the kernel took ownership */
++	spin_lock_irq(&conn->lock);
++	if (!test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
++		spin_unlock_irq(&conn->lock);
++		iscsi_ep_disconnect(conn, is_active);
++	} else {
++		spin_unlock_irq(&conn->lock);
++		ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n");
++		mutex_unlock(&conn->ep_mutex);
++
++		flush_work(&conn->cleanup_work);
++		/*
++		 * Userspace is now done with the EP so we can release the ref
++		 * iscsi_cleanup_conn_work_fn took.
++		 */
++		iscsi_put_endpoint(ep);
++		mutex_lock(&conn->ep_mutex);
++	}
++}
++
+ static int iscsi_if_stop_conn(struct iscsi_transport *transport,
+ 			      struct iscsi_uevent *ev)
+ {
+@@ -2256,12 +2299,25 @@ static int iscsi_if_stop_conn(struct iscsi_transport *transport,
+ 		cancel_work_sync(&conn->cleanup_work);
+ 		iscsi_stop_conn(conn, flag);
+ 	} else {
++		/*
++		 * For offload, when iscsid is restarted it won't know about
++		 * existing endpoints so it can't do a ep_disconnect. We clean
++		 * it up here for userspace.
++		 */
++		mutex_lock(&conn->ep_mutex);
++		if (conn->ep)
++			iscsi_if_disconnect_bound_ep(conn, conn->ep, true);
++		mutex_unlock(&conn->ep_mutex);
++
+ 		/*
+ 		 * Figure out if it was the kernel or userspace initiating this.
+ 		 */
++		spin_lock_irq(&conn->lock);
+ 		if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
++			spin_unlock_irq(&conn->lock);
+ 			iscsi_stop_conn(conn, flag);
+ 		} else {
++			spin_unlock_irq(&conn->lock);
+ 			ISCSI_DBG_TRANS_CONN(conn,
+ 					     "flush kernel conn cleanup.\n");
+ 			flush_work(&conn->cleanup_work);
+@@ -2270,31 +2326,14 @@ static int iscsi_if_stop_conn(struct iscsi_transport *transport,
+ 		 * Only clear for recovery to avoid extra cleanup runs during
+ 		 * termination.
+ 		 */
++		spin_lock_irq(&conn->lock);
+ 		clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
++		spin_unlock_irq(&conn->lock);
+ 	}
+ 	ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop done.\n");
+ 	return 0;
+ }
+ 
+-static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active)
+-{
+-	struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
+-	struct iscsi_endpoint *ep;
+-
+-	ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n");
+-	conn->state = ISCSI_CONN_FAILED;
+-
+-	if (!conn->ep || !session->transport->ep_disconnect)
+-		return;
+-
+-	ep = conn->ep;
+-	conn->ep = NULL;
+-
+-	session->transport->unbind_conn(conn, is_active);
+-	session->transport->ep_disconnect(ep);
+-	ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n");
+-}
+-
+ static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
+ {
+ 	struct iscsi_cls_conn *conn = container_of(work, struct iscsi_cls_conn,
+@@ -2303,18 +2342,11 @@ static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
+ 
+ 	mutex_lock(&conn->ep_mutex);
+ 	/*
+-	 * If we are not at least bound there is nothing for us to do. Userspace
+-	 * will do a ep_disconnect call if offload is used, but will not be
+-	 * doing a stop since there is nothing to clean up, so we have to clear
+-	 * the cleanup bit here.
++	 * Get a ref to the ep, so we don't release its ID until after
++	 * userspace is done referencing it in iscsi_if_disconnect_bound_ep.
+ 	 */
+-	if (conn->state != ISCSI_CONN_BOUND && conn->state != ISCSI_CONN_UP) {
+-		ISCSI_DBG_TRANS_CONN(conn, "Got error while conn is already failed. Ignoring.\n");
+-		clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
+-		mutex_unlock(&conn->ep_mutex);
+-		return;
+-	}
+-
++	if (conn->ep)
++		get_device(&conn->ep->dev);
+ 	iscsi_ep_disconnect(conn, false);
+ 
+ 	if (system_state != SYSTEM_RUNNING) {
+@@ -2370,11 +2402,12 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
+ 		conn->dd_data = &conn[1];
+ 
+ 	mutex_init(&conn->ep_mutex);
++	spin_lock_init(&conn->lock);
+ 	INIT_LIST_HEAD(&conn->conn_list);
+ 	INIT_WORK(&conn->cleanup_work, iscsi_cleanup_conn_work_fn);
+ 	conn->transport = transport;
+ 	conn->cid = cid;
+-	conn->state = ISCSI_CONN_DOWN;
++	WRITE_ONCE(conn->state, ISCSI_CONN_DOWN);
+ 
+ 	/* this is released in the dev's release function */
+ 	if (!get_device(&session->dev))
+@@ -2561,9 +2594,32 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
+ 	struct iscsi_uevent *ev;
+ 	struct iscsi_internal *priv;
+ 	int len = nlmsg_total_size(sizeof(*ev));
++	unsigned long flags;
++	int state;
+ 
+-	if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags))
+-		queue_work(iscsi_conn_cleanup_workq, &conn->cleanup_work);
++	spin_lock_irqsave(&conn->lock, flags);
++	/*
++	 * Userspace will only do a stop call if we are at least bound. And, we
++	 * only need to do the in kernel cleanup if in the UP state so cmds can
++	 * be released to upper layers. If in other states just wait for
++	 * userspace to avoid races that can leave the cleanup_work queued.
++	 */
++	state = READ_ONCE(conn->state);
++	switch (state) {
++	case ISCSI_CONN_BOUND:
++	case ISCSI_CONN_UP:
++		if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP,
++				      &conn->flags)) {
++			queue_work(iscsi_conn_cleanup_workq,
++				   &conn->cleanup_work);
++		}
++		break;
++	default:
++		ISCSI_DBG_TRANS_CONN(conn, "Got conn error in state %d\n",
++				     state);
++		break;
++	}
++	spin_unlock_irqrestore(&conn->lock, flags);
+ 
+ 	priv = iscsi_if_transport_lookup(conn->transport);
+ 	if (!priv)
+@@ -2913,7 +2969,7 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+ 	char *data = (char*)ev + sizeof(*ev);
+ 	struct iscsi_cls_conn *conn;
+ 	struct iscsi_cls_session *session;
+-	int err = 0, value = 0;
++	int err = 0, value = 0, state;
+ 
+ 	if (ev->u.set_param.len > PAGE_SIZE)
+ 		return -EINVAL;
+@@ -2930,8 +2986,8 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+ 			session->recovery_tmo = value;
+ 		break;
+ 	default:
+-		if ((conn->state == ISCSI_CONN_BOUND) ||
+-			(conn->state == ISCSI_CONN_UP)) {
++		state = READ_ONCE(conn->state);
++		if (state == ISCSI_CONN_BOUND || state == ISCSI_CONN_UP) {
+ 			err = transport->set_param(conn, ev->u.set_param.param,
+ 					data, ev->u.set_param.len);
+ 		} else {
+@@ -3003,16 +3059,7 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
+ 	}
+ 
+ 	mutex_lock(&conn->ep_mutex);
+-	/* Check if this was a conn error and the kernel took ownership */
+-	if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+-		ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n");
+-		mutex_unlock(&conn->ep_mutex);
+-
+-		flush_work(&conn->cleanup_work);
+-		goto put_ep;
+-	}
+-
+-	iscsi_ep_disconnect(conn, false);
++	iscsi_if_disconnect_bound_ep(conn, ep, false);
+ 	mutex_unlock(&conn->ep_mutex);
+ put_ep:
+ 	iscsi_put_endpoint(ep);
+@@ -3715,24 +3762,17 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
+ 		return -EINVAL;
+ 
+ 	mutex_lock(&conn->ep_mutex);
++	spin_lock_irq(&conn->lock);
+ 	if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
++		spin_unlock_irq(&conn->lock);
+ 		mutex_unlock(&conn->ep_mutex);
+ 		ev->r.retcode = -ENOTCONN;
+ 		return 0;
+ 	}
++	spin_unlock_irq(&conn->lock);
+ 
+ 	switch (nlh->nlmsg_type) {
+ 	case ISCSI_UEVENT_BIND_CONN:
+-		if (conn->ep) {
+-			/*
+-			 * For offload boot support where iscsid is restarted
+-			 * during the pivot root stage, the ep will be intact
+-			 * here when the new iscsid instance starts up and
+-			 * reconnects.
+-			 */
+-			iscsi_ep_disconnect(conn, true);
+-		}
+-
+ 		session = iscsi_session_lookup(ev->u.b_conn.sid);
+ 		if (!session) {
+ 			err = -EINVAL;
+@@ -3743,7 +3783,7 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
+ 						ev->u.b_conn.transport_eph,
+ 						ev->u.b_conn.is_leading);
+ 		if (!ev->r.retcode)
+-			conn->state = ISCSI_CONN_BOUND;
++			WRITE_ONCE(conn->state, ISCSI_CONN_BOUND);
+ 
+ 		if (ev->r.retcode || !transport->ep_connect)
+ 			break;
+@@ -3762,7 +3802,8 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
+ 	case ISCSI_UEVENT_START_CONN:
+ 		ev->r.retcode = transport->start_conn(conn);
+ 		if (!ev->r.retcode)
+-			conn->state = ISCSI_CONN_UP;
++			WRITE_ONCE(conn->state, ISCSI_CONN_UP);
++
+ 		break;
+ 	case ISCSI_UEVENT_SEND_PDU:
+ 		pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
+@@ -4070,10 +4111,11 @@ static ssize_t show_conn_state(struct device *dev,
+ {
+ 	struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent);
+ 	const char *state = "unknown";
++	int conn_state = READ_ONCE(conn->state);
+ 
+-	if (conn->state >= 0 &&
+-	    conn->state < ARRAY_SIZE(connection_state_names))
+-		state = connection_state_names[conn->state];
++	if (conn_state >= 0 &&
++	    conn_state < ARRAY_SIZE(connection_state_names))
++		state = connection_state_names[conn_state];
+ 
+ 	return sysfs_emit(buf, "%s\n", state);
+ }
+diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
+index b808c94641fa6..75f3560411386 100644
+--- a/drivers/spi/spi-cadence-quadspi.c
++++ b/drivers/spi/spi-cadence-quadspi.c
+@@ -19,6 +19,7 @@
+ #include <linux/iopoll.h>
+ #include <linux/jiffies.h>
+ #include <linux/kernel.h>
++#include <linux/log2.h>
+ #include <linux/module.h>
+ #include <linux/of_device.h>
+ #include <linux/of.h>
+@@ -102,12 +103,6 @@ struct cqspi_driver_platdata {
+ #define CQSPI_TIMEOUT_MS			500
+ #define CQSPI_READ_TIMEOUT_MS			10
+ 
+-/* Instruction type */
+-#define CQSPI_INST_TYPE_SINGLE			0
+-#define CQSPI_INST_TYPE_DUAL			1
+-#define CQSPI_INST_TYPE_QUAD			2
+-#define CQSPI_INST_TYPE_OCTAL			3
+-
+ #define CQSPI_DUMMY_CLKS_PER_BYTE		8
+ #define CQSPI_DUMMY_BYTES_MAX			4
+ #define CQSPI_DUMMY_CLKS_MAX			31
+@@ -376,10 +371,6 @@ static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op, bool dtr)
+ static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
+ 			      const struct spi_mem_op *op)
+ {
+-	f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE;
+-	f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE;
+-	f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
+-
+ 	/*
+ 	 * For an op to be DTR, cmd phase along with every other non-empty
+ 	 * phase should have dtr field set to 1. If an op phase has zero
+@@ -389,32 +380,23 @@ static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
+ 		       (!op->addr.nbytes || op->addr.dtr) &&
+ 		       (!op->data.nbytes || op->data.dtr);
+ 
+-	switch (op->data.buswidth) {
+-	case 0:
+-		break;
+-	case 1:
+-		f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
+-		break;
+-	case 2:
+-		f_pdata->data_width = CQSPI_INST_TYPE_DUAL;
+-		break;
+-	case 4:
+-		f_pdata->data_width = CQSPI_INST_TYPE_QUAD;
+-		break;
+-	case 8:
+-		f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
+-		break;
+-	default:
+-		return -EINVAL;
+-	}
++	f_pdata->inst_width = 0;
++	if (op->cmd.buswidth)
++		f_pdata->inst_width = ilog2(op->cmd.buswidth);
++
++	f_pdata->addr_width = 0;
++	if (op->addr.buswidth)
++		f_pdata->addr_width = ilog2(op->addr.buswidth);
++
++	f_pdata->data_width = 0;
++	if (op->data.buswidth)
++		f_pdata->data_width = ilog2(op->data.buswidth);
+ 
+ 	/* Right now we only support 8-8-8 DTR mode. */
+ 	if (f_pdata->dtr) {
+ 		switch (op->cmd.buswidth) {
+ 		case 0:
+-			break;
+ 		case 8:
+-			f_pdata->inst_width = CQSPI_INST_TYPE_OCTAL;
+ 			break;
+ 		default:
+ 			return -EINVAL;
+@@ -422,9 +404,7 @@ static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
+ 
+ 		switch (op->addr.buswidth) {
+ 		case 0:
+-			break;
+ 		case 8:
+-			f_pdata->addr_width = CQSPI_INST_TYPE_OCTAL;
+ 			break;
+ 		default:
+ 			return -EINVAL;
+@@ -432,9 +412,7 @@ static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
+ 
+ 		switch (op->data.buswidth) {
+ 		case 0:
+-			break;
+ 		case 8:
+-			f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
+ 			break;
+ 		default:
+ 			return -EINVAL;
+diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
+index 7b2a89a67cdba..06a5c40865513 100644
+--- a/drivers/target/target_core_user.c
++++ b/drivers/target/target_core_user.c
+@@ -1820,6 +1820,7 @@ static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi)
+ 	mutex_lock(&udev->cmdr_lock);
+ 	page = xa_load(&udev->data_pages, dpi);
+ 	if (likely(page)) {
++		get_page(page);
+ 		mutex_unlock(&udev->cmdr_lock);
+ 		return page;
+ 	}
+@@ -1876,6 +1877,7 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
+ 		/* For the vmalloc()ed cmd area pages */
+ 		addr = (void *)(unsigned long)info->mem[mi].addr + offset;
+ 		page = vmalloc_to_page(addr);
++		get_page(page);
+ 	} else {
+ 		uint32_t dpi;
+ 
+@@ -1886,7 +1888,6 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
+ 			return VM_FAULT_SIGBUS;
+ 	}
+ 
+-	get_page(page);
+ 	vmf->page = page;
+ 	return 0;
+ }
+diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
+index 2e6409cc11ada..ef54ef11af552 100644
+--- a/drivers/vfio/pci/vfio_pci_core.c
++++ b/drivers/vfio/pci/vfio_pci_core.c
+@@ -36,6 +36,10 @@ static bool nointxmask;
+ static bool disable_vga;
+ static bool disable_idle_d3;
+ 
++/* List of PF's that vfio_pci_core_sriov_configure() has been called on */
++static DEFINE_MUTEX(vfio_pci_sriov_pfs_mutex);
++static LIST_HEAD(vfio_pci_sriov_pfs);
++
+ static inline bool vfio_vga_disabled(void)
+ {
+ #ifdef CONFIG_VFIO_PCI_VGA
+@@ -434,47 +438,17 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(vfio_pci_core_disable);
+ 
+-static struct vfio_pci_core_device *get_pf_vdev(struct vfio_pci_core_device *vdev)
+-{
+-	struct pci_dev *physfn = pci_physfn(vdev->pdev);
+-	struct vfio_device *pf_dev;
+-
+-	if (!vdev->pdev->is_virtfn)
+-		return NULL;
+-
+-	pf_dev = vfio_device_get_from_dev(&physfn->dev);
+-	if (!pf_dev)
+-		return NULL;
+-
+-	if (pci_dev_driver(physfn) != pci_dev_driver(vdev->pdev)) {
+-		vfio_device_put(pf_dev);
+-		return NULL;
+-	}
+-
+-	return container_of(pf_dev, struct vfio_pci_core_device, vdev);
+-}
+-
+-static void vfio_pci_vf_token_user_add(struct vfio_pci_core_device *vdev, int val)
+-{
+-	struct vfio_pci_core_device *pf_vdev = get_pf_vdev(vdev);
+-
+-	if (!pf_vdev)
+-		return;
+-
+-	mutex_lock(&pf_vdev->vf_token->lock);
+-	pf_vdev->vf_token->users += val;
+-	WARN_ON(pf_vdev->vf_token->users < 0);
+-	mutex_unlock(&pf_vdev->vf_token->lock);
+-
+-	vfio_device_put(&pf_vdev->vdev);
+-}
+-
+ void vfio_pci_core_close_device(struct vfio_device *core_vdev)
+ {
+ 	struct vfio_pci_core_device *vdev =
+ 		container_of(core_vdev, struct vfio_pci_core_device, vdev);
+ 
+-	vfio_pci_vf_token_user_add(vdev, -1);
++	if (vdev->sriov_pf_core_dev) {
++		mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock);
++		WARN_ON(!vdev->sriov_pf_core_dev->vf_token->users);
++		vdev->sriov_pf_core_dev->vf_token->users--;
++		mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock);
++	}
+ 	vfio_spapr_pci_eeh_release(vdev->pdev);
+ 	vfio_pci_core_disable(vdev);
+ 
+@@ -495,7 +469,12 @@ void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev)
+ {
+ 	vfio_pci_probe_mmaps(vdev);
+ 	vfio_spapr_pci_eeh_open(vdev->pdev);
+-	vfio_pci_vf_token_user_add(vdev, 1);
++
++	if (vdev->sriov_pf_core_dev) {
++		mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock);
++		vdev->sriov_pf_core_dev->vf_token->users++;
++		mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock);
++	}
+ }
+ EXPORT_SYMBOL_GPL(vfio_pci_core_finish_enable);
+ 
+@@ -1603,11 +1582,8 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
+ 	 *
+ 	 * If the VF token is provided but unused, an error is generated.
+ 	 */
+-	if (!vdev->pdev->is_virtfn && !vdev->vf_token && !vf_token)
+-		return 0; /* No VF token provided or required */
+-
+ 	if (vdev->pdev->is_virtfn) {
+-		struct vfio_pci_core_device *pf_vdev = get_pf_vdev(vdev);
++		struct vfio_pci_core_device *pf_vdev = vdev->sriov_pf_core_dev;
+ 		bool match;
+ 
+ 		if (!pf_vdev) {
+@@ -1620,7 +1596,6 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
+ 		}
+ 
+ 		if (!vf_token) {
+-			vfio_device_put(&pf_vdev->vdev);
+ 			pci_info_ratelimited(vdev->pdev,
+ 				"VF token required to access device\n");
+ 			return -EACCES;
+@@ -1630,8 +1605,6 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
+ 		match = uuid_equal(uuid, &pf_vdev->vf_token->uuid);
+ 		mutex_unlock(&pf_vdev->vf_token->lock);
+ 
+-		vfio_device_put(&pf_vdev->vdev);
+-
+ 		if (!match) {
+ 			pci_info_ratelimited(vdev->pdev,
+ 				"Incorrect VF token provided for device\n");
+@@ -1752,8 +1725,30 @@ static int vfio_pci_bus_notifier(struct notifier_block *nb,
+ static int vfio_pci_vf_init(struct vfio_pci_core_device *vdev)
+ {
+ 	struct pci_dev *pdev = vdev->pdev;
++	struct vfio_pci_core_device *cur;
++	struct pci_dev *physfn;
+ 	int ret;
+ 
++	if (pdev->is_virtfn) {
++		/*
++		 * If this VF was created by our vfio_pci_core_sriov_configure()
++		 * then we can find the PF vfio_pci_core_device now, and due to
++		 * the locking in pci_disable_sriov() it cannot change until
++		 * this VF device driver is removed.
++		 */
++		physfn = pci_physfn(vdev->pdev);
++		mutex_lock(&vfio_pci_sriov_pfs_mutex);
++		list_for_each_entry(cur, &vfio_pci_sriov_pfs, sriov_pfs_item) {
++			if (cur->pdev == physfn) {
++				vdev->sriov_pf_core_dev = cur;
++				break;
++			}
++		}
++		mutex_unlock(&vfio_pci_sriov_pfs_mutex);
++		return 0;
++	}
++
++	/* Not a SRIOV PF */
+ 	if (!pdev->is_physfn)
+ 		return 0;
+ 
+@@ -1825,6 +1820,7 @@ void vfio_pci_core_init_device(struct vfio_pci_core_device *vdev,
+ 	INIT_LIST_HEAD(&vdev->ioeventfds_list);
+ 	mutex_init(&vdev->vma_lock);
+ 	INIT_LIST_HEAD(&vdev->vma_list);
++	INIT_LIST_HEAD(&vdev->sriov_pfs_item);
+ 	init_rwsem(&vdev->memory_lock);
+ }
+ EXPORT_SYMBOL_GPL(vfio_pci_core_init_device);
+@@ -1916,7 +1912,7 @@ void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev)
+ {
+ 	struct pci_dev *pdev = vdev->pdev;
+ 
+-	pci_disable_sriov(pdev);
++	vfio_pci_core_sriov_configure(pdev, 0);
+ 
+ 	vfio_unregister_group_dev(&vdev->vdev);
+ 
+@@ -1954,21 +1950,49 @@ static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
+ 
+ int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
+ {
++	struct vfio_pci_core_device *vdev;
+ 	struct vfio_device *device;
+ 	int ret = 0;
+ 
++	device_lock_assert(&pdev->dev);
++
+ 	device = vfio_device_get_from_dev(&pdev->dev);
+ 	if (!device)
+ 		return -ENODEV;
+ 
+-	if (nr_virtfn == 0)
+-		pci_disable_sriov(pdev);
+-	else
++	vdev = container_of(device, struct vfio_pci_core_device, vdev);
++
++	if (nr_virtfn) {
++		mutex_lock(&vfio_pci_sriov_pfs_mutex);
++		/*
++		 * The thread that adds the vdev to the list is the only thread
++		 * that gets to call pci_enable_sriov() and we will only allow
++		 * it to be called once without going through
++		 * pci_disable_sriov()
++		 */
++		if (!list_empty(&vdev->sriov_pfs_item)) {
++			ret = -EINVAL;
++			goto out_unlock;
++		}
++		list_add_tail(&vdev->sriov_pfs_item, &vfio_pci_sriov_pfs);
++		mutex_unlock(&vfio_pci_sriov_pfs_mutex);
+ 		ret = pci_enable_sriov(pdev, nr_virtfn);
++		if (ret)
++			goto out_del;
++		ret = nr_virtfn;
++		goto out_put;
++	}
+ 
+-	vfio_device_put(device);
++	pci_disable_sriov(pdev);
+ 
+-	return ret < 0 ? ret : nr_virtfn;
++out_del:
++	mutex_lock(&vfio_pci_sriov_pfs_mutex);
++	list_del_init(&vdev->sriov_pfs_item);
++out_unlock:
++	mutex_unlock(&vfio_pci_sriov_pfs_mutex);
++out_put:
++	vfio_device_put(device);
++	return ret;
+ }
+ EXPORT_SYMBOL_GPL(vfio_pci_core_sriov_configure);
+ 
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index 11273b70271d1..15b3fa6390818 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -1116,11 +1116,11 @@ out_free_interp:
+ 			 * independently randomized mmap region (0 load_bias
+ 			 * without MAP_FIXED nor MAP_FIXED_NOREPLACE).
+ 			 */
+-			alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
+-			if (interpreter || alignment > ELF_MIN_ALIGN) {
++			if (interpreter) {
+ 				load_bias = ELF_ET_DYN_BASE;
+ 				if (current->flags & PF_RANDOMIZE)
+ 					load_bias += arch_mmap_rnd();
++				alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
+ 				if (alignment)
+ 					load_bias &= ~(alignment - 1);
+ 				elf_flags |= MAP_FIXED_NOREPLACE;
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index a0aa6c7e23351..18e5ad5decdeb 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -2479,12 +2479,6 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
+ 		return ERR_PTR(ret);
+ 	}
+ 
+-	/*
+-	 * New block group is likely to be used soon. Try to activate it now.
+-	 * Failure is OK for now.
+-	 */
+-	btrfs_zone_activate(cache);
+-
+ 	ret = exclude_super_stripes(cache);
+ 	if (ret) {
+ 		/* We may have excluded something, so call this just in case */
+@@ -2922,7 +2916,6 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
+ 	struct btrfs_path *path = NULL;
+ 	LIST_HEAD(dirty);
+ 	struct list_head *io = &cur_trans->io_bgs;
+-	int num_started = 0;
+ 	int loops = 0;
+ 
+ 	spin_lock(&cur_trans->dirty_bgs_lock);
+@@ -2988,7 +2981,6 @@ again:
+ 			cache->io_ctl.inode = NULL;
+ 			ret = btrfs_write_out_cache(trans, cache, path);
+ 			if (ret == 0 && cache->io_ctl.inode) {
+-				num_started++;
+ 				should_put = 0;
+ 
+ 				/*
+@@ -3089,7 +3081,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
+ 	int should_put;
+ 	struct btrfs_path *path;
+ 	struct list_head *io = &cur_trans->io_bgs;
+-	int num_started = 0;
+ 
+ 	path = btrfs_alloc_path();
+ 	if (!path)
+@@ -3147,7 +3138,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
+ 			cache->io_ctl.inode = NULL;
+ 			ret = btrfs_write_out_cache(trans, cache, path);
+ 			if (ret == 0 && cache->io_ctl.inode) {
+-				num_started++;
+ 				should_put = 0;
+ 				list_add_tail(&cache->io_list, io);
+ 			} else {
+@@ -3431,7 +3421,7 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
+ 	return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
+ }
+ 
+-static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
++static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
+ {
+ 	struct btrfs_block_group *bg;
+ 	int ret;
+@@ -3518,7 +3508,11 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
+ out:
+ 	btrfs_trans_release_chunk_metadata(trans);
+ 
+-	return ret;
++	if (ret)
++		return ERR_PTR(ret);
++
++	btrfs_get_block_group(bg);
++	return bg;
+ }
+ 
+ /*
+@@ -3633,10 +3627,17 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
+ {
+ 	struct btrfs_fs_info *fs_info = trans->fs_info;
+ 	struct btrfs_space_info *space_info;
++	struct btrfs_block_group *ret_bg;
+ 	bool wait_for_alloc = false;
+ 	bool should_alloc = false;
++	bool from_extent_allocation = false;
+ 	int ret = 0;
+ 
++	if (force == CHUNK_ALLOC_FORCE_FOR_EXTENT) {
++		from_extent_allocation = true;
++		force = CHUNK_ALLOC_FORCE;
++	}
++
+ 	/* Don't re-enter if we're already allocating a chunk */
+ 	if (trans->allocating_chunk)
+ 		return -ENOSPC;
+@@ -3726,9 +3727,22 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
+ 			force_metadata_allocation(fs_info);
+ 	}
+ 
+-	ret = do_chunk_alloc(trans, flags);
++	ret_bg = do_chunk_alloc(trans, flags);
+ 	trans->allocating_chunk = false;
+ 
++	if (IS_ERR(ret_bg)) {
++		ret = PTR_ERR(ret_bg);
++	} else if (from_extent_allocation) {
++		/*
++		 * New block group is likely to be used soon. Try to activate
++		 * it now. Failure is OK for now.
++		 */
++		btrfs_zone_activate(ret_bg);
++	}
++
++	if (!ret)
++		btrfs_put_block_group(ret_bg);
++
+ 	spin_lock(&space_info->lock);
+ 	if (ret < 0) {
+ 		if (ret == -ENOSPC)
+diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
+index 5878b7ce3b78e..faa7f1d6782a0 100644
+--- a/fs/btrfs/block-group.h
++++ b/fs/btrfs/block-group.h
+@@ -35,11 +35,15 @@ enum btrfs_discard_state {
+  * the FS with empty chunks
+  *
+  * CHUNK_ALLOC_FORCE means it must try to allocate one
++ *
++ * CHUNK_ALLOC_FORCE_FOR_EXTENT like CHUNK_ALLOC_FORCE but called from
++ * find_free_extent() that also activaes the zone
+  */
+ enum btrfs_chunk_alloc_enum {
+ 	CHUNK_ALLOC_NO_FORCE,
+ 	CHUNK_ALLOC_LIMITED,
+ 	CHUNK_ALLOC_FORCE,
++	CHUNK_ALLOC_FORCE_FOR_EXTENT,
+ };
+ 
+ struct btrfs_caching_control {
+diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
+index 6158b870a269d..93f704ba877e2 100644
+--- a/fs/btrfs/compression.c
++++ b/fs/btrfs/compression.c
+@@ -534,6 +534,9 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
+ 	cb->orig_bio = NULL;
+ 	cb->nr_pages = nr_pages;
+ 
++	if (blkcg_css)
++		kthread_associate_blkcg(blkcg_css);
++
+ 	while (cur_disk_bytenr < disk_start + compressed_len) {
+ 		u64 offset = cur_disk_bytenr - disk_start;
+ 		unsigned int index = offset >> PAGE_SHIFT;
+@@ -552,6 +555,8 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
+ 				bio = NULL;
+ 				goto finish_cb;
+ 			}
++			if (blkcg_css)
++				bio->bi_opf |= REQ_CGROUP_PUNT;
+ 		}
+ 		/*
+ 		 * We should never reach next_stripe_start start as we will
+@@ -609,6 +614,9 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
+ 	return 0;
+ 
+ finish_cb:
++	if (blkcg_css)
++		kthread_associate_blkcg(NULL);
++
+ 	if (bio) {
+ 		bio->bi_status = ret;
+ 		bio_endio(bio);
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 117afcda5affb..b43f80c3bffd9 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -1826,9 +1826,10 @@ again:
+ 
+ 	ret = btrfs_insert_fs_root(fs_info, root);
+ 	if (ret) {
+-		btrfs_put_root(root);
+-		if (ret == -EEXIST)
++		if (ret == -EEXIST) {
++			btrfs_put_root(root);
+ 			goto again;
++		}
+ 		goto fail;
+ 	}
+ 	return root;
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 96427b1ecac3e..e5b832d77df96 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -4087,7 +4087,7 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
+ 			}
+ 
+ 			ret = btrfs_chunk_alloc(trans, ffe_ctl->flags,
+-						CHUNK_ALLOC_FORCE);
++						CHUNK_ALLOC_FORCE_FOR_EXTENT);
+ 
+ 			/* Do not bail out on ENOSPC since we can do more. */
+ 			if (ret == -ENOSPC)
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 99028984340aa..e93526d86a922 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -3562,7 +3562,6 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
+ 	u64 cur_end;
+ 	struct extent_map *em;
+ 	int ret = 0;
+-	int nr = 0;
+ 	size_t pg_offset = 0;
+ 	size_t iosize;
+ 	size_t blocksize = inode->i_sb->s_blocksize;
+@@ -3720,9 +3719,7 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
+ 					 end_bio_extent_readpage, 0,
+ 					 this_bio_flag,
+ 					 force_bio_submit);
+-		if (!ret) {
+-			nr++;
+-		} else {
++		if (ret) {
+ 			unlock_extent(tree, cur, cur + iosize - 1);
+ 			end_page_read(page, false, cur, iosize);
+ 			goto out;
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index a0179cc62913b..28ddd9cf20692 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -2918,8 +2918,9 @@ out:
+ 	return ret;
+ }
+ 
+-static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
++static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
+ {
++	struct inode *inode = file_inode(file);
+ 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ 	struct btrfs_root *root = BTRFS_I(inode)->root;
+ 	struct extent_state *cached_state = NULL;
+@@ -2951,6 +2952,10 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
+ 		goto out_only_mutex;
+ 	}
+ 
++	ret = file_modified(file);
++	if (ret)
++		goto out_only_mutex;
++
+ 	lockstart = round_up(offset, btrfs_inode_sectorsize(BTRFS_I(inode)));
+ 	lockend = round_down(offset + len,
+ 			     btrfs_inode_sectorsize(BTRFS_I(inode))) - 1;
+@@ -3391,7 +3396,7 @@ static long btrfs_fallocate(struct file *file, int mode,
+ 		return -EOPNOTSUPP;
+ 
+ 	if (mode & FALLOC_FL_PUNCH_HOLE)
+-		return btrfs_punch_hole(inode, offset, len);
++		return btrfs_punch_hole(file, offset, len);
+ 
+ 	/*
+ 	 * Only trigger disk allocation, don't trigger qgroup reserve
+@@ -3413,6 +3418,10 @@ static long btrfs_fallocate(struct file *file, int mode,
+ 			goto out;
+ 	}
+ 
++	ret = file_modified(file);
++	if (ret)
++		goto out;
++
+ 	/*
+ 	 * TODO: Move these two operations after we have checked
+ 	 * accurate reserved space, or fallocate can still fail but
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index ec4950cfe1ea9..9547088a93066 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -1130,7 +1130,6 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
+ 	int ret = 0;
+ 
+ 	if (btrfs_is_free_space_inode(inode)) {
+-		WARN_ON_ONCE(1);
+ 		ret = -EINVAL;
+ 		goto out_unlock;
+ 	}
+@@ -7423,6 +7422,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
+ 	u64 block_start, orig_start, orig_block_len, ram_bytes;
+ 	bool can_nocow = false;
+ 	bool space_reserved = false;
++	u64 prev_len;
+ 	int ret = 0;
+ 
+ 	/*
+@@ -7450,6 +7450,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
+ 			can_nocow = true;
+ 	}
+ 
++	prev_len = len;
+ 	if (can_nocow) {
+ 		struct extent_map *em2;
+ 
+@@ -7479,8 +7480,6 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
+ 			goto out;
+ 		}
+ 	} else {
+-		const u64 prev_len = len;
+-
+ 		/* Our caller expects us to free the input extent map. */
+ 		free_extent_map(em);
+ 		*map = NULL;
+@@ -7511,7 +7510,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
+ 	 * We have created our ordered extent, so we can now release our reservation
+ 	 * for an outstanding extent.
+ 	 */
+-	btrfs_delalloc_release_extents(BTRFS_I(inode), len);
++	btrfs_delalloc_release_extents(BTRFS_I(inode), prev_len);
+ 
+ 	/*
+ 	 * Need to update the i_size under the extent lock so buffered
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index f5688a25fca35..b0dfcc7a4225c 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -4467,10 +4467,12 @@ static int balance_kthread(void *data)
+ 	struct btrfs_fs_info *fs_info = data;
+ 	int ret = 0;
+ 
++	sb_start_write(fs_info->sb);
+ 	mutex_lock(&fs_info->balance_mutex);
+ 	if (fs_info->balance_ctl)
+ 		ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
+ 	mutex_unlock(&fs_info->balance_mutex);
++	sb_end_write(fs_info->sb);
+ 
+ 	return ret;
+ }
+diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
+index f256c8aff7bb5..ca9f3e4ec4b3f 100644
+--- a/fs/cachefiles/namei.c
++++ b/fs/cachefiles/namei.c
+@@ -57,6 +57,16 @@ static void __cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
+ 	trace_cachefiles_mark_inactive(object, inode);
+ }
+ 
++static void cachefiles_do_unmark_inode_in_use(struct cachefiles_object *object,
++					      struct dentry *dentry)
++{
++	struct inode *inode = d_backing_inode(dentry);
++
++	inode_lock(inode);
++	__cachefiles_unmark_inode_in_use(object, dentry);
++	inode_unlock(inode);
++}
++
+ /*
+  * Unmark a backing inode and tell cachefilesd that there's something that can
+  * be culled.
+@@ -68,9 +78,7 @@ void cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
+ 	struct inode *inode = file_inode(file);
+ 
+ 	if (inode) {
+-		inode_lock(inode);
+-		__cachefiles_unmark_inode_in_use(object, file->f_path.dentry);
+-		inode_unlock(inode);
++		cachefiles_do_unmark_inode_in_use(object, file->f_path.dentry);
+ 
+ 		if (!test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags)) {
+ 			atomic_long_add(inode->i_blocks, &cache->b_released);
+@@ -484,7 +492,7 @@ struct file *cachefiles_create_tmpfile(struct cachefiles_object *object)
+ 				object, d_backing_inode(path.dentry), ret,
+ 				cachefiles_trace_trunc_error);
+ 			file = ERR_PTR(ret);
+-			goto out_dput;
++			goto out_unuse;
+ 		}
+ 	}
+ 
+@@ -494,15 +502,20 @@ struct file *cachefiles_create_tmpfile(struct cachefiles_object *object)
+ 		trace_cachefiles_vfs_error(object, d_backing_inode(path.dentry),
+ 					   PTR_ERR(file),
+ 					   cachefiles_trace_open_error);
+-		goto out_dput;
++		goto out_unuse;
+ 	}
+ 	if (unlikely(!file->f_op->read_iter) ||
+ 	    unlikely(!file->f_op->write_iter)) {
+ 		fput(file);
+ 		pr_notice("Cache does not support read_iter and write_iter\n");
+ 		file = ERR_PTR(-EINVAL);
++		goto out_unuse;
+ 	}
+ 
++	goto out_dput;
++
++out_unuse:
++	cachefiles_do_unmark_inode_in_use(object, path.dentry);
+ out_dput:
+ 	dput(path.dentry);
+ out:
+@@ -590,14 +603,16 @@ static bool cachefiles_open_file(struct cachefiles_object *object,
+ check_failed:
+ 	fscache_cookie_lookup_negative(object->cookie);
+ 	cachefiles_unmark_inode_in_use(object, file);
+-	if (ret == -ESTALE) {
+-		fput(file);
+-		dput(dentry);
++	fput(file);
++	dput(dentry);
++	if (ret == -ESTALE)
+ 		return cachefiles_create_file(object);
+-	}
++	return false;
++
+ error_fput:
+ 	fput(file);
+ error:
++	cachefiles_do_unmark_inode_in_use(object, dentry);
+ 	dput(dentry);
+ 	return false;
+ }
+diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c
+index 35465109d9c4e..00b087c14995a 100644
+--- a/fs/cachefiles/xattr.c
++++ b/fs/cachefiles/xattr.c
+@@ -203,7 +203,7 @@ bool cachefiles_set_volume_xattr(struct cachefiles_volume *volume)
+ 	if (!buf)
+ 		return false;
+ 	buf->reserved = cpu_to_be32(0);
+-	memcpy(buf->data, p, len);
++	memcpy(buf->data, p, volume->vcookie->coherency_len);
+ 
+ 	ret = cachefiles_inject_write_error();
+ 	if (ret == 0)
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 6e5246122ee2c..792fdcfdc6add 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -266,22 +266,24 @@ static void cifs_kill_sb(struct super_block *sb)
+ 	 * before we kill the sb.
+ 	 */
+ 	if (cifs_sb->root) {
++		for (node = rb_first(root); node; node = rb_next(node)) {
++			tlink = rb_entry(node, struct tcon_link, tl_rbnode);
++			tcon = tlink_tcon(tlink);
++			if (IS_ERR(tcon))
++				continue;
++			cfid = &tcon->crfid;
++			mutex_lock(&cfid->fid_mutex);
++			if (cfid->dentry) {
++				dput(cfid->dentry);
++				cfid->dentry = NULL;
++			}
++			mutex_unlock(&cfid->fid_mutex);
++		}
++
++		/* finally release root dentry */
+ 		dput(cifs_sb->root);
+ 		cifs_sb->root = NULL;
+ 	}
+-	node = rb_first(root);
+-	while (node != NULL) {
+-		tlink = rb_entry(node, struct tcon_link, tl_rbnode);
+-		tcon = tlink_tcon(tlink);
+-		cfid = &tcon->crfid;
+-		mutex_lock(&cfid->fid_mutex);
+-		if (cfid->dentry) {
+-			dput(cfid->dentry);
+-			cfid->dentry = NULL;
+-		}
+-		mutex_unlock(&cfid->fid_mutex);
+-		node = rb_next(node);
+-	}
+ 
+ 	kill_anon_super(sb);
+ 	cifs_umount(cifs_sb);
+diff --git a/fs/cifs/link.c b/fs/cifs/link.c
+index 852e54ee82c28..bbdf3281559c8 100644
+--- a/fs/cifs/link.c
++++ b/fs/cifs/link.c
+@@ -85,6 +85,9 @@ parse_mf_symlink(const u8 *buf, unsigned int buf_len, unsigned int *_link_len,
+ 	if (rc != 1)
+ 		return -EINVAL;
+ 
++	if (link_len > CIFS_MF_SYMLINK_LINK_MAXLEN)
++		return -EINVAL;
++
+ 	rc = symlink_hash(link_len, link_str, md5_hash);
+ 	if (rc) {
+ 		cifs_dbg(FYI, "%s: MD5 hash failure: %d\n", __func__, rc);
+diff --git a/fs/io-wq.h b/fs/io-wq.h
+index 04d374e65e546..dbecd27656c7c 100644
+--- a/fs/io-wq.h
++++ b/fs/io-wq.h
+@@ -155,7 +155,6 @@ struct io_wq_work_node *wq_stack_extract(struct io_wq_work_node *stack)
+ struct io_wq_work {
+ 	struct io_wq_work_node list;
+ 	unsigned flags;
+-	int fd;
+ };
+ 
+ static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 1a9630dc5361b..619c67fd456dd 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -864,7 +864,11 @@ struct io_kiocb {
+ 
+ 	u64				user_data;
+ 	u32				result;
+-	u32				cflags;
++	/* fd initially, then cflags for completion */
++	union {
++		u32			cflags;
++		int			fd;
++	};
+ 
+ 	struct io_ring_ctx		*ctx;
+ 	struct task_struct		*task;
+@@ -4136,7 +4140,7 @@ static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
+ 		return -EAGAIN;
+ 
+ 	if (sp->flags & SPLICE_F_FD_IN_FIXED)
+-		in = io_file_get_fixed(req, sp->splice_fd_in, IO_URING_F_UNLOCKED);
++		in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
+ 	else
+ 		in = io_file_get_normal(req, sp->splice_fd_in);
+ 	if (!in) {
+@@ -4178,7 +4182,7 @@ static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
+ 		return -EAGAIN;
+ 
+ 	if (sp->flags & SPLICE_F_FD_IN_FIXED)
+-		in = io_file_get_fixed(req, sp->splice_fd_in, IO_URING_F_UNLOCKED);
++		in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
+ 	else
+ 		in = io_file_get_normal(req, sp->splice_fd_in);
+ 	if (!in) {
+@@ -5506,11 +5510,11 @@ static int io_poll_check_events(struct io_kiocb *req, bool locked)
+ 
+ 		if (!req->result) {
+ 			struct poll_table_struct pt = { ._key = poll->events };
++			unsigned flags = locked ? 0 : IO_URING_F_UNLOCKED;
+ 
+-			if (unlikely(!io_assign_file(req, IO_URING_F_UNLOCKED)))
+-				req->result = -EBADF;
+-			else
+-				req->result = vfs_poll(req->file, &pt) & poll->events;
++			if (unlikely(!io_assign_file(req, flags)))
++				return -EBADF;
++			req->result = vfs_poll(req->file, &pt) & poll->events;
+ 		}
+ 
+ 		/* multishot, just fill an CQE and proceed */
+@@ -6462,6 +6466,7 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
+ 	up.nr = 0;
+ 	up.tags = 0;
+ 	up.resv = 0;
++	up.resv2 = 0;
+ 
+ 	io_ring_submit_lock(ctx, needs_lock);
+ 	ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
+@@ -6708,9 +6713,9 @@ static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags)
+ 		return true;
+ 
+ 	if (req->flags & REQ_F_FIXED_FILE)
+-		req->file = io_file_get_fixed(req, req->work.fd, issue_flags);
++		req->file = io_file_get_fixed(req, req->fd, issue_flags);
+ 	else
+-		req->file = io_file_get_normal(req, req->work.fd);
++		req->file = io_file_get_normal(req, req->fd);
+ 	if (req->file)
+ 		return true;
+ 
+@@ -6724,13 +6729,14 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
+ 	const struct cred *creds = NULL;
+ 	int ret;
+ 
++	if (unlikely(!io_assign_file(req, issue_flags)))
++		return -EBADF;
++
+ 	if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
+ 		creds = override_creds(req->creds);
+ 
+ 	if (!io_op_defs[req->opcode].audit_skip)
+ 		audit_uring_entry(req->opcode);
+-	if (unlikely(!io_assign_file(req, issue_flags)))
+-		return -EBADF;
+ 
+ 	switch (req->opcode) {
+ 	case IORING_OP_NOP:
+@@ -6888,16 +6894,18 @@ static void io_wq_submit_work(struct io_wq_work *work)
+ 	if (timeout)
+ 		io_queue_linked_timeout(timeout);
+ 
+-	if (!io_assign_file(req, issue_flags)) {
+-		err = -EBADF;
+-		work->flags |= IO_WQ_WORK_CANCEL;
+-	}
+ 
+ 	/* either cancelled or io-wq is dying, so don't touch tctx->iowq */
+ 	if (work->flags & IO_WQ_WORK_CANCEL) {
++fail:
+ 		io_req_task_queue_fail(req, err);
+ 		return;
+ 	}
++	if (!io_assign_file(req, issue_flags)) {
++		err = -EBADF;
++		work->flags |= IO_WQ_WORK_CANCEL;
++		goto fail;
++	}
+ 
+ 	if (req->flags & REQ_F_FORCE_ASYNC) {
+ 		bool opcode_poll = def->pollin || def->pollout;
+@@ -7243,7 +7251,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ 	if (io_op_defs[opcode].needs_file) {
+ 		struct io_submit_state *state = &ctx->submit_state;
+ 
+-		req->work.fd = READ_ONCE(sqe->fd);
++		req->fd = READ_ONCE(sqe->fd);
+ 
+ 		/*
+ 		 * Plug now if we have more than 2 IO left after this, and the
+@@ -8528,13 +8536,15 @@ static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
+ static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
+ 				 struct io_rsrc_node *node, void *rsrc)
+ {
++	u64 *tag_slot = io_get_tag_slot(data, idx);
+ 	struct io_rsrc_put *prsrc;
+ 
+ 	prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
+ 	if (!prsrc)
+ 		return -ENOMEM;
+ 
+-	prsrc->tag = *io_get_tag_slot(data, idx);
++	prsrc->tag = *tag_slot;
++	*tag_slot = 0;
+ 	prsrc->rsrc = rsrc;
+ 	list_add(&prsrc->list, &node->rsrc_list);
+ 	return 0;
+@@ -8603,7 +8613,7 @@ static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
+ 	bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
+ 	struct io_fixed_file *file_slot;
+ 	struct file *file;
+-	int ret, i;
++	int ret;
+ 
+ 	io_ring_submit_lock(ctx, needs_lock);
+ 	ret = -ENXIO;
+@@ -8616,8 +8626,8 @@ static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
+ 	if (ret)
+ 		goto out;
+ 
+-	i = array_index_nospec(offset, ctx->nr_user_files);
+-	file_slot = io_fixed_file_slot(&ctx->file_table, i);
++	offset = array_index_nospec(offset, ctx->nr_user_files);
++	file_slot = io_fixed_file_slot(&ctx->file_table, offset);
+ 	ret = -EBADF;
+ 	if (!file_slot->file_ptr)
+ 		goto out;
+@@ -8673,8 +8683,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
+ 
+ 		if (file_slot->file_ptr) {
+ 			file = (struct file *)(file_slot->file_ptr & FFS_MASK);
+-			err = io_queue_rsrc_removal(data, up->offset + done,
+-						    ctx->rsrc_node, file);
++			err = io_queue_rsrc_removal(data, i, ctx->rsrc_node, file);
+ 			if (err)
+ 				break;
+ 			file_slot->file_ptr = 0;
+@@ -9347,7 +9356,7 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
+ 
+ 		i = array_index_nospec(offset, ctx->nr_user_bufs);
+ 		if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
+-			err = io_queue_rsrc_removal(ctx->buf_data, offset,
++			err = io_queue_rsrc_removal(ctx->buf_data, i,
+ 						    ctx->rsrc_node, ctx->user_bufs[i]);
+ 			if (unlikely(err)) {
+ 				io_buffer_unmap(ctx, &imu);
+@@ -10102,6 +10111,8 @@ static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz
+ 		return -EINVAL;
+ 	if (copy_from_user(&arg, argp, sizeof(arg)))
+ 		return -EFAULT;
++	if (arg.pad)
++		return -EINVAL;
+ 	*sig = u64_to_user_ptr(arg.sigmask);
+ 	*argsz = arg.sigmask_sz;
+ 	*ts = u64_to_user_ptr(arg.ts);
+@@ -10566,7 +10577,8 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
+ 			IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
+ 			IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
+ 			IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
+-			IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP;
++			IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP |
++			IORING_FEAT_LINKED_FILE;
+ 
+ 	if (copy_to_user(params, p, sizeof(*p))) {
+ 		ret = -EFAULT;
+@@ -10777,8 +10789,6 @@ static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
+ 	__u32 tmp;
+ 	int err;
+ 
+-	if (up->resv)
+-		return -EINVAL;
+ 	if (check_add_overflow(up->offset, nr_args, &tmp))
+ 		return -EOVERFLOW;
+ 	err = io_rsrc_node_switch_start(ctx);
+@@ -10804,6 +10814,8 @@ static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
+ 	memset(&up, 0, sizeof(up));
+ 	if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
+ 		return -EFAULT;
++	if (up.resv || up.resv2)
++		return -EINVAL;
+ 	return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
+ }
+ 
+@@ -10816,7 +10828,7 @@ static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
+ 		return -EINVAL;
+ 	if (copy_from_user(&up, arg, sizeof(up)))
+ 		return -EFAULT;
+-	if (!up.nr || up.resv)
++	if (!up.nr || up.resv || up.resv2)
+ 		return -EINVAL;
+ 	return __io_register_rsrc_update(ctx, type, &up, up.nr);
+ }
+diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
+index cc2831cec6695..496f7b3f75237 100644
+--- a/fs/nfsd/filecache.c
++++ b/fs/nfsd/filecache.c
+@@ -235,6 +235,13 @@ nfsd_file_check_write_error(struct nfsd_file *nf)
+ 	return filemap_check_wb_err(file->f_mapping, READ_ONCE(file->f_wb_err));
+ }
+ 
++static void
++nfsd_file_flush(struct nfsd_file *nf)
++{
++	if (nf->nf_file && vfs_fsync(nf->nf_file, 1) != 0)
++		nfsd_reset_write_verifier(net_generic(nf->nf_net, nfsd_net_id));
++}
++
+ static void
+ nfsd_file_do_unhash(struct nfsd_file *nf)
+ {
+@@ -302,11 +309,14 @@ nfsd_file_put(struct nfsd_file *nf)
+ 		return;
+ 	}
+ 
+-	filemap_flush(nf->nf_file->f_mapping);
+ 	is_hashed = test_bit(NFSD_FILE_HASHED, &nf->nf_flags) != 0;
+-	nfsd_file_put_noref(nf);
+-	if (is_hashed)
++	if (!is_hashed) {
++		nfsd_file_flush(nf);
++		nfsd_file_put_noref(nf);
++	} else {
++		nfsd_file_put_noref(nf);
+ 		nfsd_file_schedule_laundrette();
++	}
+ 	if (atomic_long_read(&nfsd_filecache_count) >= NFSD_FILE_LRU_LIMIT)
+ 		nfsd_file_gc();
+ }
+@@ -327,6 +337,7 @@ nfsd_file_dispose_list(struct list_head *dispose)
+ 	while(!list_empty(dispose)) {
+ 		nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
+ 		list_del(&nf->nf_lru);
++		nfsd_file_flush(nf);
+ 		nfsd_file_put_noref(nf);
+ 	}
+ }
+@@ -340,6 +351,7 @@ nfsd_file_dispose_list_sync(struct list_head *dispose)
+ 	while(!list_empty(dispose)) {
+ 		nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
+ 		list_del(&nf->nf_lru);
++		nfsd_file_flush(nf);
+ 		if (!refcount_dec_and_test(&nf->nf_ref))
+ 			continue;
+ 		if (nfsd_file_free(nf))
+diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h
+index c08758b6b3642..c05d2ce9b6cd8 100644
+--- a/include/asm-generic/mshyperv.h
++++ b/include/asm-generic/mshyperv.h
+@@ -269,6 +269,7 @@ bool hv_isolation_type_snp(void);
+ u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size);
+ void hyperv_cleanup(void);
+ bool hv_query_ext_cap(u64 cap_query);
++void hv_setup_dma_ops(struct device *dev, bool coherent);
+ void *hv_map_memory(void *addr, unsigned long size);
+ void hv_unmap_memory(void *addr);
+ #else /* CONFIG_HYPERV */
+diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
+index 2c68a545ffa7d..71942a1c642d4 100644
+--- a/include/asm-generic/tlb.h
++++ b/include/asm-generic/tlb.h
+@@ -565,10 +565,14 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
+ #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)	\
+ 	do {							\
+ 		unsigned long _sz = huge_page_size(h);		\
+-		if (_sz == PMD_SIZE)				\
+-			tlb_flush_pmd_range(tlb, address, _sz);	\
+-		else if (_sz == PUD_SIZE)			\
++		if (_sz >= P4D_SIZE)				\
++			tlb_flush_p4d_range(tlb, address, _sz);	\
++		else if (_sz >= PUD_SIZE)			\
+ 			tlb_flush_pud_range(tlb, address, _sz);	\
++		else if (_sz >= PMD_SIZE)			\
++			tlb_flush_pmd_range(tlb, address, _sz);	\
++		else						\
++			tlb_flush_pte_range(tlb, address, _sz);	\
+ 		__tlb_remove_tlb_entry(tlb, ptep, address);	\
+ 	} while (0)
+ 
+diff --git a/include/linux/kfence.h b/include/linux/kfence.h
+index f49e64222628a..726857a4b6805 100644
+--- a/include/linux/kfence.h
++++ b/include/linux/kfence.h
+@@ -204,6 +204,22 @@ static __always_inline __must_check bool kfence_free(void *addr)
+  */
+ bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs);
+ 
++#ifdef CONFIG_PRINTK
++struct kmem_obj_info;
++/**
++ * __kfence_obj_info() - fill kmem_obj_info struct
++ * @kpp: kmem_obj_info to be filled
++ * @object: the object
++ *
++ * Return:
++ * * false - not a KFENCE object
++ * * true - a KFENCE object, filled @kpp
++ *
++ * Copies information to @kpp for KFENCE objects.
++ */
++bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
++#endif
++
+ #else /* CONFIG_KFENCE */
+ 
+ static inline bool is_kfence_address(const void *addr) { return false; }
+@@ -221,6 +237,14 @@ static inline bool __must_check kfence_handle_page_fault(unsigned long addr, boo
+ 	return false;
+ }
+ 
++#ifdef CONFIG_PRINTK
++struct kmem_obj_info;
++static inline bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
++{
++	return false;
++}
++#endif
++
+ #endif
+ 
+ #endif /* _LINUX_KFENCE_H */
+diff --git a/include/linux/static_call.h b/include/linux/static_call.h
+index fcc5b48989b3c..3c50b0fdda163 100644
+--- a/include/linux/static_call.h
++++ b/include/linux/static_call.h
+@@ -196,6 +196,14 @@ extern long __static_call_return0(void);
+ 	};								\
+ 	ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
+ 
++#define DEFINE_STATIC_CALL_RET0(name, _func)				\
++	DECLARE_STATIC_CALL(name, _func);				\
++	struct static_call_key STATIC_CALL_KEY(name) = {		\
++		.func = __static_call_return0,				\
++		.type = 1,						\
++	};								\
++	ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name)
++
+ #define static_call_cond(name)	(void)__static_call(name)
+ 
+ #define EXPORT_STATIC_CALL(name)					\
+@@ -231,6 +239,12 @@ static inline int static_call_init(void) { return 0; }
+ 	};								\
+ 	ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
+ 
++#define DEFINE_STATIC_CALL_RET0(name, _func)				\
++	DECLARE_STATIC_CALL(name, _func);				\
++	struct static_call_key STATIC_CALL_KEY(name) = {		\
++		.func = __static_call_return0,				\
++	};								\
++	ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name)
+ 
+ #define static_call_cond(name)	(void)__static_call(name)
+ 
+@@ -284,6 +298,9 @@ static inline long __static_call_return0(void)
+ 		.func = NULL,						\
+ 	}
+ 
++#define DEFINE_STATIC_CALL_RET0(name, _func)				\
++	__DEFINE_STATIC_CALL(name, _func, __static_call_return0)
++
+ static inline void __static_call_nop(void) { }
+ 
+ /*
+@@ -327,7 +344,4 @@ static inline int static_call_text_reserved(void *start, void *end)
+ #define DEFINE_STATIC_CALL(name, _func)					\
+ 	__DEFINE_STATIC_CALL(name, _func, _func)
+ 
+-#define DEFINE_STATIC_CALL_RET0(name, _func)				\
+-	__DEFINE_STATIC_CALL(name, _func, __static_call_return0)
+-
+ #endif /* _LINUX_STATIC_CALL_H */
+diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
+index f35c22b3355ff..66b49afb9e693 100644
+--- a/include/linux/sunrpc/svc.h
++++ b/include/linux/sunrpc/svc.h
+@@ -412,6 +412,7 @@ struct svc_deferred_req {
+ 	size_t			addrlen;
+ 	struct sockaddr_storage	daddr;	/* where reply must come from */
+ 	size_t			daddrlen;
++	void			*xprt_ctxt;
+ 	struct cache_deferred_req handle;
+ 	size_t			xprt_hlen;
+ 	int			argslen;
+diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h
+index ae6f4838ab755..6e5db4edc3359 100644
+--- a/include/linux/vfio_pci_core.h
++++ b/include/linux/vfio_pci_core.h
+@@ -133,6 +133,8 @@ struct vfio_pci_core_device {
+ 	struct mutex		ioeventfds_lock;
+ 	struct list_head	ioeventfds_list;
+ 	struct vfio_pci_vf_token	*vf_token;
++	struct list_head		sriov_pfs_item;
++	struct vfio_pci_core_device	*sriov_pf_core_dev;
+ 	struct notifier_block	nb;
+ 	struct mutex		vma_lock;
+ 	struct list_head	vma_list;
+diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
+index aa33e1092e2c4..9f65f1bfbd246 100644
+--- a/include/net/flow_dissector.h
++++ b/include/net/flow_dissector.h
+@@ -59,6 +59,8 @@ struct flow_dissector_key_vlan {
+ 		__be16	vlan_tci;
+ 	};
+ 	__be16	vlan_tpid;
++	__be16	vlan_eth_type;
++	u16	padding;
+ };
+ 
+ struct flow_dissector_mpls_lse {
+diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
+index c5d7810fd7926..037c77fb5dc55 100644
+--- a/include/scsi/scsi_transport_iscsi.h
++++ b/include/scsi/scsi_transport_iscsi.h
+@@ -211,6 +211,8 @@ struct iscsi_cls_conn {
+ 	struct mutex ep_mutex;
+ 	struct iscsi_endpoint *ep;
+ 
++	/* Used when accessing flags and queueing work. */
++	spinlock_t lock;
+ 	unsigned long flags;
+ 	struct work_struct cleanup_work;
+ 
+diff --git a/include/sound/core.h b/include/sound/core.h
+index b7e9b58d3c788..6d4cc49584c63 100644
+--- a/include/sound/core.h
++++ b/include/sound/core.h
+@@ -284,6 +284,7 @@ int snd_card_disconnect(struct snd_card *card);
+ void snd_card_disconnect_sync(struct snd_card *card);
+ int snd_card_free(struct snd_card *card);
+ int snd_card_free_when_closed(struct snd_card *card);
++int snd_card_free_on_error(struct device *dev, int ret);
+ void snd_card_set_id(struct snd_card *card, const char *id);
+ int snd_card_register(struct snd_card *card);
+ int snd_card_info_init(void);
+diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
+index 653dfffb3ac84..8d79cebf95f32 100644
+--- a/include/sound/memalloc.h
++++ b/include/sound/memalloc.h
+@@ -51,6 +51,11 @@ struct snd_dma_device {
+ #define SNDRV_DMA_TYPE_DEV_SG	SNDRV_DMA_TYPE_DEV /* no SG-buf support */
+ #define SNDRV_DMA_TYPE_DEV_WC_SG	SNDRV_DMA_TYPE_DEV_WC
+ #endif
++/* fallback types, don't use those directly */
++#ifdef CONFIG_SND_DMA_SGBUF
++#define SNDRV_DMA_TYPE_DEV_SG_FALLBACK		10
++#define SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK	11
++#endif
+ 
+ /*
+  * info for buffer allocation
+diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
+index 5be3faf88c1a1..06fe47fb3686a 100644
+--- a/include/trace/events/sunrpc.h
++++ b/include/trace/events/sunrpc.h
+@@ -1956,17 +1956,18 @@ DECLARE_EVENT_CLASS(svc_deferred_event,
+ 	TP_STRUCT__entry(
+ 		__field(const void *, dr)
+ 		__field(u32, xid)
+-		__string(addr, dr->xprt->xpt_remotebuf)
++		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
+ 	),
+ 
+ 	TP_fast_assign(
+ 		__entry->dr = dr;
+ 		__entry->xid = be32_to_cpu(*(__be32 *)(dr->args +
+ 						       (dr->xprt_hlen>>2)));
+-		__assign_str(addr, dr->xprt->xpt_remotebuf);
++		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
++			 "%pISpc", (struct sockaddr *)&dr->addr);
+ 	),
+ 
+-	TP_printk("addr=%s dr=%p xid=0x%08x", __get_str(addr), __entry->dr,
++	TP_printk("addr=%s dr=%p xid=0x%08x", __entry->addr, __entry->dr,
+ 		__entry->xid)
+ );
+ 
+diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
+index 787f491f0d2ae..1e45368ad33fd 100644
+--- a/include/uapi/linux/io_uring.h
++++ b/include/uapi/linux/io_uring.h
+@@ -293,6 +293,7 @@ struct io_uring_params {
+ #define IORING_FEAT_NATIVE_WORKERS	(1U << 9)
+ #define IORING_FEAT_RSRC_TAGS		(1U << 10)
+ #define IORING_FEAT_CQE_SKIP		(1U << 11)
++#define IORING_FEAT_LINKED_FILE		(1U << 12)
+ 
+ /*
+  * io_uring_register(2) opcodes and arguments
+diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
+index 3021ea25a2849..7837ba4fe7289 100644
+--- a/include/uapi/linux/stddef.h
++++ b/include/uapi/linux/stddef.h
+@@ -1,4 +1,7 @@
+ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
++#ifndef _UAPI_LINUX_STDDEF_H
++#define _UAPI_LINUX_STDDEF_H
++
+ #include <linux/compiler_types.h>
+ 
+ #ifndef __always_inline
+@@ -41,3 +44,4 @@
+ 		struct { } __empty_ ## NAME; \
+ 		TYPE NAME[]; \
+ 	}
++#endif
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 407a2568f35eb..5601216eb51bd 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -70,7 +70,6 @@ struct cpuhp_cpu_state {
+ 	bool			rollback;
+ 	bool			single;
+ 	bool			bringup;
+-	int			cpu;
+ 	struct hlist_node	*node;
+ 	struct hlist_node	*last;
+ 	enum cpuhp_state	cb_state;
+@@ -474,7 +473,7 @@ static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
+ #endif
+ 
+ static inline enum cpuhp_state
+-cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
++cpuhp_set_state(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target)
+ {
+ 	enum cpuhp_state prev_state = st->state;
+ 	bool bringup = st->state < target;
+@@ -485,14 +484,15 @@ cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
+ 	st->target = target;
+ 	st->single = false;
+ 	st->bringup = bringup;
+-	if (cpu_dying(st->cpu) != !bringup)
+-		set_cpu_dying(st->cpu, !bringup);
++	if (cpu_dying(cpu) != !bringup)
++		set_cpu_dying(cpu, !bringup);
+ 
+ 	return prev_state;
+ }
+ 
+ static inline void
+-cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
++cpuhp_reset_state(int cpu, struct cpuhp_cpu_state *st,
++		  enum cpuhp_state prev_state)
+ {
+ 	bool bringup = !st->bringup;
+ 
+@@ -519,8 +519,8 @@ cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
+ 	}
+ 
+ 	st->bringup = bringup;
+-	if (cpu_dying(st->cpu) != !bringup)
+-		set_cpu_dying(st->cpu, !bringup);
++	if (cpu_dying(cpu) != !bringup)
++		set_cpu_dying(cpu, !bringup);
+ }
+ 
+ /* Regular hotplug invocation of the AP hotplug thread */
+@@ -540,15 +540,16 @@ static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
+ 	wait_for_ap_thread(st, st->bringup);
+ }
+ 
+-static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
++static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st,
++			 enum cpuhp_state target)
+ {
+ 	enum cpuhp_state prev_state;
+ 	int ret;
+ 
+-	prev_state = cpuhp_set_state(st, target);
++	prev_state = cpuhp_set_state(cpu, st, target);
+ 	__cpuhp_kick_ap(st);
+ 	if ((ret = st->result)) {
+-		cpuhp_reset_state(st, prev_state);
++		cpuhp_reset_state(cpu, st, prev_state);
+ 		__cpuhp_kick_ap(st);
+ 	}
+ 
+@@ -580,7 +581,7 @@ static int bringup_wait_for_ap(unsigned int cpu)
+ 	if (st->target <= CPUHP_AP_ONLINE_IDLE)
+ 		return 0;
+ 
+-	return cpuhp_kick_ap(st, st->target);
++	return cpuhp_kick_ap(cpu, st, st->target);
+ }
+ 
+ static int bringup_cpu(unsigned int cpu)
+@@ -703,7 +704,7 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
+ 			 ret, cpu, cpuhp_get_step(st->state)->name,
+ 			 st->state);
+ 
+-		cpuhp_reset_state(st, prev_state);
++		cpuhp_reset_state(cpu, st, prev_state);
+ 		if (can_rollback_cpu(st))
+ 			WARN_ON(cpuhp_invoke_callback_range(false, cpu, st,
+ 							    prev_state));
+@@ -720,7 +721,6 @@ static void cpuhp_create(unsigned int cpu)
+ 
+ 	init_completion(&st->done_up);
+ 	init_completion(&st->done_down);
+-	st->cpu = cpu;
+ }
+ 
+ static int cpuhp_should_run(unsigned int cpu)
+@@ -874,7 +874,7 @@ static int cpuhp_kick_ap_work(unsigned int cpu)
+ 	cpuhp_lock_release(true);
+ 
+ 	trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
+-	ret = cpuhp_kick_ap(st, st->target);
++	ret = cpuhp_kick_ap(cpu, st, st->target);
+ 	trace_cpuhp_exit(cpu, st->state, prev_state, ret);
+ 
+ 	return ret;
+@@ -1106,7 +1106,7 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
+ 			 ret, cpu, cpuhp_get_step(st->state)->name,
+ 			 st->state);
+ 
+-		cpuhp_reset_state(st, prev_state);
++		cpuhp_reset_state(cpu, st, prev_state);
+ 
+ 		if (st->state < prev_state)
+ 			WARN_ON(cpuhp_invoke_callback_range(true, cpu, st,
+@@ -1133,7 +1133,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
+ 
+ 	cpuhp_tasks_frozen = tasks_frozen;
+ 
+-	prev_state = cpuhp_set_state(st, target);
++	prev_state = cpuhp_set_state(cpu, st, target);
+ 	/*
+ 	 * If the current CPU state is in the range of the AP hotplug thread,
+ 	 * then we need to kick the thread.
+@@ -1164,7 +1164,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
+ 	ret = cpuhp_down_callbacks(cpu, st, target);
+ 	if (ret && st->state < prev_state) {
+ 		if (st->state == CPUHP_TEARDOWN_CPU) {
+-			cpuhp_reset_state(st, prev_state);
++			cpuhp_reset_state(cpu, st, prev_state);
+ 			__cpuhp_kick_ap(st);
+ 		} else {
+ 			WARN(1, "DEAD callback error for CPU%d", cpu);
+@@ -1351,7 +1351,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
+ 
+ 	cpuhp_tasks_frozen = tasks_frozen;
+ 
+-	cpuhp_set_state(st, target);
++	cpuhp_set_state(cpu, st, target);
+ 	/*
+ 	 * If the current CPU state is in the range of the AP hotplug thread,
+ 	 * then we need to kick the thread once more.
+diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h
+index 4632b0f4f72eb..8a6cd53dbe8ce 100644
+--- a/kernel/dma/direct.h
++++ b/kernel/dma/direct.h
+@@ -114,6 +114,7 @@ static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
+ 		dma_direct_sync_single_for_cpu(dev, addr, size, dir);
+ 
+ 	if (unlikely(is_swiotlb_buffer(dev, phys)))
+-		swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
++		swiotlb_tbl_unmap_single(dev, phys, size, dir,
++					 attrs | DMA_ATTR_SKIP_CPU_SYNC);
+ }
+ #endif /* _KERNEL_DMA_DIRECT_H */
+diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
+index f7ff8919dc9bb..fdf170404650f 100644
+--- a/kernel/irq/affinity.c
++++ b/kernel/irq/affinity.c
+@@ -269,8 +269,9 @@ static int __irq_build_affinity_masks(unsigned int startvec,
+ 	 */
+ 	if (numvecs <= nodes) {
+ 		for_each_node_mask(n, nodemsk) {
+-			cpumask_or(&masks[curvec].mask, &masks[curvec].mask,
+-				   node_to_cpumask[n]);
++			/* Ensure that only CPUs which are in both masks are set */
++			cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
++			cpumask_or(&masks[curvec].mask, &masks[curvec].mask, nmsk);
+ 			if (++curvec == last_affv)
+ 				curvec = firstvec;
+ 		}
+diff --git a/kernel/smp.c b/kernel/smp.c
+index 01a7c1706a58b..65a630f62363c 100644
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -579,7 +579,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
+ 
+ 	/* There shouldn't be any pending callbacks on an offline CPU. */
+ 	if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
+-		     !warned && !llist_empty(head))) {
++		     !warned && entry != NULL)) {
+ 		warned = true;
+ 		WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
+ 
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 17a283ce2b20f..5e80ee44c32a2 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -186,7 +186,7 @@ static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
+ 	 */
+ 	if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) {
+ #ifdef CONFIG_NO_HZ_FULL
+-		WARN_ON(tick_nohz_full_running);
++		WARN_ON_ONCE(tick_nohz_full_running);
+ #endif
+ 		tick_do_timer_cpu = cpu;
+ 	}
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index 85f1021ad4595..9dd2a39cb3b00 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -1722,11 +1722,14 @@ static inline void __run_timers(struct timer_base *base)
+ 	       time_after_eq(jiffies, base->next_expiry)) {
+ 		levels = collect_expired_timers(base, heads);
+ 		/*
+-		 * The only possible reason for not finding any expired
+-		 * timer at this clk is that all matching timers have been
+-		 * dequeued.
++		 * The two possible reasons for not finding any expired
++		 * timer at this clk are that all matching timers have been
++		 * dequeued or no timer has been queued since
++		 * base::next_expiry was set to base::clk +
++		 * NEXT_TIMER_MAX_DELTA.
+ 		 */
+-		WARN_ON_ONCE(!levels && !base->next_expiry_recalc);
++		WARN_ON_ONCE(!levels && !base->next_expiry_recalc
++			     && base->timers_pending);
+ 		base->clk++;
+ 		base->next_expiry = __next_timer_interrupt(base);
+ 
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index f294db835f4bc..a1da8757cc9cc 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3469,7 +3469,6 @@ static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
+ {
+ 	int nr_nodes, node;
+ 	struct page *page;
+-	int rc = 0;
+ 
+ 	lockdep_assert_held(&hugetlb_lock);
+ 
+@@ -3480,15 +3479,19 @@ static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
+ 	}
+ 
+ 	for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
+-		if (!list_empty(&h->hugepage_freelists[node])) {
+-			page = list_entry(h->hugepage_freelists[node].next,
+-					struct page, lru);
+-			rc = demote_free_huge_page(h, page);
+-			break;
++		list_for_each_entry(page, &h->hugepage_freelists[node], lru) {
++			if (PageHWPoison(page))
++				continue;
++
++			return demote_free_huge_page(h, page);
+ 		}
+ 	}
+ 
+-	return rc;
++	/*
++	 * Only way to get here is if all pages on free lists are poisoned.
++	 * Return -EBUSY so that caller will not retry.
++	 */
++	return -EBUSY;
+ }
+ 
+ #define HSTATE_ATTR_RO(_name) \
+diff --git a/mm/kfence/core.c b/mm/kfence/core.c
+index d4c7978cd75e2..af82c6f7d7239 100644
+--- a/mm/kfence/core.c
++++ b/mm/kfence/core.c
+@@ -222,27 +222,6 @@ static bool kfence_unprotect(unsigned long addr)
+ 	return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
+ }
+ 
+-static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
+-{
+-	long index;
+-
+-	/* The checks do not affect performance; only called from slow-paths. */
+-
+-	if (!is_kfence_address((void *)addr))
+-		return NULL;
+-
+-	/*
+-	 * May be an invalid index if called with an address at the edge of
+-	 * __kfence_pool, in which case we would report an "invalid access"
+-	 * error.
+-	 */
+-	index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
+-	if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
+-		return NULL;
+-
+-	return &kfence_metadata[index];
+-}
+-
+ static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
+ {
+ 	unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
+diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h
+index 9a6c4b1b12a88..600f2e2431d6d 100644
+--- a/mm/kfence/kfence.h
++++ b/mm/kfence/kfence.h
+@@ -96,6 +96,27 @@ struct kfence_metadata {
+ 
+ extern struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
+ 
++static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
++{
++	long index;
++
++	/* The checks do not affect performance; only called from slow-paths. */
++
++	if (!is_kfence_address((void *)addr))
++		return NULL;
++
++	/*
++	 * May be an invalid index if called with an address at the edge of
++	 * __kfence_pool, in which case we would report an "invalid access"
++	 * error.
++	 */
++	index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
++	if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
++		return NULL;
++
++	return &kfence_metadata[index];
++}
++
+ /* KFENCE error types for report generation. */
+ enum kfence_error_type {
+ 	KFENCE_ERROR_OOB,		/* Detected a out-of-bounds access. */
+diff --git a/mm/kfence/report.c b/mm/kfence/report.c
+index f93a7b2a338be..f5a6d8ba3e21f 100644
+--- a/mm/kfence/report.c
++++ b/mm/kfence/report.c
+@@ -273,3 +273,50 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r
+ 	/* We encountered a memory safety error, taint the kernel! */
+ 	add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK);
+ }
++
++#ifdef CONFIG_PRINTK
++static void kfence_to_kp_stack(const struct kfence_track *track, void **kp_stack)
++{
++	int i, j;
++
++	i = get_stack_skipnr(track->stack_entries, track->num_stack_entries, NULL);
++	for (j = 0; i < track->num_stack_entries && j < KS_ADDRS_COUNT; ++i, ++j)
++		kp_stack[j] = (void *)track->stack_entries[i];
++	if (j < KS_ADDRS_COUNT)
++		kp_stack[j] = NULL;
++}
++
++bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
++{
++	struct kfence_metadata *meta = addr_to_metadata((unsigned long)object);
++	unsigned long flags;
++
++	if (!meta)
++		return false;
++
++	/*
++	 * If state is UNUSED at least show the pointer requested; the rest
++	 * would be garbage data.
++	 */
++	kpp->kp_ptr = object;
++
++	/* Requesting info an a never-used object is almost certainly a bug. */
++	if (WARN_ON(meta->state == KFENCE_OBJECT_UNUSED))
++		return true;
++
++	raw_spin_lock_irqsave(&meta->lock, flags);
++
++	kpp->kp_slab = slab;
++	kpp->kp_slab_cache = meta->cache;
++	kpp->kp_objp = (void *)meta->addr;
++	kfence_to_kp_stack(&meta->alloc_track, kpp->kp_stack);
++	if (meta->state == KFENCE_OBJECT_FREED)
++		kfence_to_kp_stack(&meta->free_track, kpp->kp_free_stack);
++	/* get_stack_skipnr() ensures the first entry is outside allocator. */
++	kpp->kp_ret = kpp->kp_stack[0];
++
++	raw_spin_unlock_irqrestore(&meta->lock, flags);
++
++	return true;
++}
++#endif
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c
+index acd7cbb82e160..a182f5ddaf68b 100644
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -1132,7 +1132,7 @@ EXPORT_SYMBOL(kmemleak_no_scan);
+ void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
+ 			       gfp_t gfp)
+ {
+-	if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
++	if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
+ 		kmemleak_alloc(__va(phys), size, min_count, gfp);
+ }
+ EXPORT_SYMBOL(kmemleak_alloc_phys);
+@@ -1146,7 +1146,7 @@ EXPORT_SYMBOL(kmemleak_alloc_phys);
+  */
+ void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
+ {
+-	if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
++	if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
+ 		kmemleak_free_part(__va(phys), size);
+ }
+ EXPORT_SYMBOL(kmemleak_free_part_phys);
+@@ -1158,7 +1158,7 @@ EXPORT_SYMBOL(kmemleak_free_part_phys);
+  */
+ void __ref kmemleak_not_leak_phys(phys_addr_t phys)
+ {
+-	if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
++	if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
+ 		kmemleak_not_leak(__va(phys));
+ }
+ EXPORT_SYMBOL(kmemleak_not_leak_phys);
+@@ -1170,7 +1170,7 @@ EXPORT_SYMBOL(kmemleak_not_leak_phys);
+  */
+ void __ref kmemleak_ignore_phys(phys_addr_t phys)
+ {
+-	if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
++	if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
+ 		kmemleak_ignore(__va(phys));
+ }
+ EXPORT_SYMBOL(kmemleak_ignore_phys);
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index a1fbf656e7dbd..e6f211dcf82e7 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -6112,7 +6112,7 @@ static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
+ 	do {
+ 		zone_type--;
+ 		zone = pgdat->node_zones + zone_type;
+-		if (managed_zone(zone)) {
++		if (populated_zone(zone)) {
+ 			zoneref_set_zone(zone, &zonerefs[nr_zones++]);
+ 			check_highest_zone(zone_type);
+ 		}
+diff --git a/mm/page_io.c b/mm/page_io.c
+index 0bf8e40f4e573..d3eea0a3f1afa 100644
+--- a/mm/page_io.c
++++ b/mm/page_io.c
+@@ -51,54 +51,6 @@ void end_swap_bio_write(struct bio *bio)
+ 	bio_put(bio);
+ }
+ 
+-static void swap_slot_free_notify(struct page *page)
+-{
+-	struct swap_info_struct *sis;
+-	struct gendisk *disk;
+-	swp_entry_t entry;
+-
+-	/*
+-	 * There is no guarantee that the page is in swap cache - the software
+-	 * suspend code (at least) uses end_swap_bio_read() against a non-
+-	 * swapcache page.  So we must check PG_swapcache before proceeding with
+-	 * this optimization.
+-	 */
+-	if (unlikely(!PageSwapCache(page)))
+-		return;
+-
+-	sis = page_swap_info(page);
+-	if (data_race(!(sis->flags & SWP_BLKDEV)))
+-		return;
+-
+-	/*
+-	 * The swap subsystem performs lazy swap slot freeing,
+-	 * expecting that the page will be swapped out again.
+-	 * So we can avoid an unnecessary write if the page
+-	 * isn't redirtied.
+-	 * This is good for real swap storage because we can
+-	 * reduce unnecessary I/O and enhance wear-leveling
+-	 * if an SSD is used as the as swap device.
+-	 * But if in-memory swap device (eg zram) is used,
+-	 * this causes a duplicated copy between uncompressed
+-	 * data in VM-owned memory and compressed data in
+-	 * zram-owned memory.  So let's free zram-owned memory
+-	 * and make the VM-owned decompressed page *dirty*,
+-	 * so the page should be swapped out somewhere again if
+-	 * we again wish to reclaim it.
+-	 */
+-	disk = sis->bdev->bd_disk;
+-	entry.val = page_private(page);
+-	if (disk->fops->swap_slot_free_notify && __swap_count(entry) == 1) {
+-		unsigned long offset;
+-
+-		offset = swp_offset(entry);
+-
+-		SetPageDirty(page);
+-		disk->fops->swap_slot_free_notify(sis->bdev,
+-				offset);
+-	}
+-}
+-
+ static void end_swap_bio_read(struct bio *bio)
+ {
+ 	struct page *page = bio_first_page_all(bio);
+@@ -114,7 +66,6 @@ static void end_swap_bio_read(struct bio *bio)
+ 	}
+ 
+ 	SetPageUptodate(page);
+-	swap_slot_free_notify(page);
+ out:
+ 	unlock_page(page);
+ 	WRITE_ONCE(bio->bi_private, NULL);
+@@ -392,11 +343,6 @@ int swap_readpage(struct page *page, bool synchronous)
+ 	if (sis->flags & SWP_SYNCHRONOUS_IO) {
+ 		ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
+ 		if (!ret) {
+-			if (trylock_page(page)) {
+-				swap_slot_free_notify(page);
+-				unlock_page(page);
+-			}
+-
+ 			count_vm_event(PSWPIN);
+ 			goto out;
+ 		}
+diff --git a/mm/secretmem.c b/mm/secretmem.c
+index 22b310adb53d9..5a62ef3bcfcff 100644
+--- a/mm/secretmem.c
++++ b/mm/secretmem.c
+@@ -158,6 +158,22 @@ const struct address_space_operations secretmem_aops = {
+ 	.isolate_page	= secretmem_isolate_page,
+ };
+ 
++static int secretmem_setattr(struct user_namespace *mnt_userns,
++			     struct dentry *dentry, struct iattr *iattr)
++{
++	struct inode *inode = d_inode(dentry);
++	unsigned int ia_valid = iattr->ia_valid;
++
++	if ((ia_valid & ATTR_SIZE) && inode->i_size)
++		return -EINVAL;
++
++	return simple_setattr(mnt_userns, dentry, iattr);
++}
++
++static const struct inode_operations secretmem_iops = {
++	.setattr = secretmem_setattr,
++};
++
+ static struct vfsmount *secretmem_mnt;
+ 
+ static struct file *secretmem_file_create(unsigned long flags)
+@@ -177,6 +193,7 @@ static struct file *secretmem_file_create(unsigned long flags)
+ 	mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
+ 	mapping_set_unevictable(inode->i_mapping);
+ 
++	inode->i_op = &secretmem_iops;
+ 	inode->i_mapping->a_ops = &secretmem_aops;
+ 
+ 	/* pretend we are a normal file with zero size */
+diff --git a/mm/slab.c b/mm/slab.c
+index a36af26e15216..f4b3eebc1e2ca 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -3650,7 +3650,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
+ #endif /* CONFIG_NUMA */
+ 
+ #ifdef CONFIG_PRINTK
+-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
++void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+ {
+ 	struct kmem_cache *cachep;
+ 	unsigned int objnr;
+diff --git a/mm/slab.h b/mm/slab.h
+index c7f2abc2b154c..506dab2a6735e 100644
+--- a/mm/slab.h
++++ b/mm/slab.h
+@@ -851,7 +851,7 @@ struct kmem_obj_info {
+ 	void *kp_stack[KS_ADDRS_COUNT];
+ 	void *kp_free_stack[KS_ADDRS_COUNT];
+ };
+-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
++void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
+ #endif
+ 
+ #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index 23f2ab0713b77..a9a7d79daa102 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -555,6 +555,13 @@ bool kmem_valid_obj(void *object)
+ }
+ EXPORT_SYMBOL_GPL(kmem_valid_obj);
+ 
++static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
++{
++	if (__kfence_obj_info(kpp, object, slab))
++		return;
++	__kmem_obj_info(kpp, object, slab);
++}
++
+ /**
+  * kmem_dump_obj - Print available slab provenance information
+  * @object: slab object for which to find provenance information.
+@@ -590,6 +597,8 @@ void kmem_dump_obj(void *object)
+ 		pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
+ 	else
+ 		pr_cont(" slab%s", cp);
++	if (is_kfence_address(object))
++		pr_cont(" (kfence)");
+ 	if (kp.kp_objp)
+ 		pr_cont(" start %px", kp.kp_objp);
+ 	if (kp.kp_data_offset)
+diff --git a/mm/slob.c b/mm/slob.c
+index 60c5842215f1b..fd9c643facbc4 100644
+--- a/mm/slob.c
++++ b/mm/slob.c
+@@ -463,7 +463,7 @@ out:
+ }
+ 
+ #ifdef CONFIG_PRINTK
+-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
++void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+ {
+ 	kpp->kp_ptr = object;
+ 	kpp->kp_slab = slab;
+diff --git a/mm/slub.c b/mm/slub.c
+index 261474092e43e..e3277e33ea6e4 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -4322,7 +4322,7 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
+ }
+ 
+ #ifdef CONFIG_PRINTK
+-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
++void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+ {
+ 	void *base;
+ 	int __maybe_unused i;
+diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
+index f5686c463bc0d..363d47f945324 100644
+--- a/net/ax25/af_ax25.c
++++ b/net/ax25/af_ax25.c
+@@ -1053,6 +1053,11 @@ static int ax25_release(struct socket *sock)
+ 		ax25_destroy_socket(ax25);
+ 	}
+ 	if (ax25_dev) {
++		del_timer_sync(&ax25->timer);
++		del_timer_sync(&ax25->t1timer);
++		del_timer_sync(&ax25->t2timer);
++		del_timer_sync(&ax25->t3timer);
++		del_timer_sync(&ax25->idletimer);
+ 		dev_put_track(ax25_dev->dev, &ax25_dev->dev_tracker);
+ 		ax25_dev_put(ax25_dev);
+ 	}
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index 15833e1d6ea11..544d2028ccf51 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -1182,6 +1182,7 @@ proto_again:
+ 					 VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ 			}
+ 			key_vlan->vlan_tpid = saved_vlan_tpid;
++			key_vlan->vlan_eth_type = proto;
+ 		}
+ 
+ 		fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
+diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
+index a39bbed77f87d..d3ce6113e6c36 100644
+--- a/net/dsa/dsa2.c
++++ b/net/dsa/dsa2.c
+@@ -561,7 +561,6 @@ static void dsa_port_teardown(struct dsa_port *dp)
+ 	struct devlink_port *dlp = &dp->devlink_port;
+ 	struct dsa_switch *ds = dp->ds;
+ 	struct dsa_mac_addr *a, *tmp;
+-	struct net_device *slave;
+ 
+ 	if (!dp->setup)
+ 		return;
+@@ -583,11 +582,9 @@ static void dsa_port_teardown(struct dsa_port *dp)
+ 		dsa_port_link_unregister_of(dp);
+ 		break;
+ 	case DSA_PORT_TYPE_USER:
+-		slave = dp->slave;
+-
+-		if (slave) {
++		if (dp->slave) {
++			dsa_slave_destroy(dp->slave);
+ 			dp->slave = NULL;
+-			dsa_slave_destroy(slave);
+ 		}
+ 		break;
+ 	}
+@@ -1137,17 +1134,17 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
+ 	if (err)
+ 		goto teardown_cpu_ports;
+ 
+-	err = dsa_tree_setup_master(dst);
++	err = dsa_tree_setup_ports(dst);
+ 	if (err)
+ 		goto teardown_switches;
+ 
+-	err = dsa_tree_setup_ports(dst);
++	err = dsa_tree_setup_master(dst);
+ 	if (err)
+-		goto teardown_master;
++		goto teardown_ports;
+ 
+ 	err = dsa_tree_setup_lags(dst);
+ 	if (err)
+-		goto teardown_ports;
++		goto teardown_master;
+ 
+ 	dst->setup = true;
+ 
+@@ -1155,10 +1152,10 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
+ 
+ 	return 0;
+ 
+-teardown_ports:
+-	dsa_tree_teardown_ports(dst);
+ teardown_master:
+ 	dsa_tree_teardown_master(dst);
++teardown_ports:
++	dsa_tree_teardown_ports(dst);
+ teardown_switches:
+ 	dsa_tree_teardown_switches(dst);
+ teardown_cpu_ports:
+@@ -1176,10 +1173,10 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst)
+ 
+ 	dsa_tree_teardown_lags(dst);
+ 
+-	dsa_tree_teardown_ports(dst);
+-
+ 	dsa_tree_teardown_master(dst);
+ 
++	dsa_tree_teardown_ports(dst);
++
+ 	dsa_tree_teardown_switches(dst);
+ 
+ 	dsa_tree_teardown_cpu_ports(dst);
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 194832663d856..9d83c11ba1e74 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -485,7 +485,7 @@ int ip6_forward(struct sk_buff *skb)
+ 		goto drop;
+ 
+ 	if (!net->ipv6.devconf_all->disable_policy &&
+-	    !idev->cnf.disable_policy &&
++	    (!idev || !idev->cnf.disable_policy) &&
+ 	    !xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
+ 		__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
+ 		goto drop;
+diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
+index 9479f2787ea79..88d9cc945a216 100644
+--- a/net/mac80211/debugfs_sta.c
++++ b/net/mac80211/debugfs_sta.c
+@@ -441,7 +441,7 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
+ #define PRINT_HT_CAP(_cond, _str) \
+ 	do { \
+ 	if (_cond) \
+-			p += scnprintf(p, sizeof(buf)+buf-p, "\t" _str "\n"); \
++			p += scnprintf(p, bufsz + buf - p, "\t" _str "\n"); \
+ 	} while (0)
+ 	char *buf, *p;
+ 	int i;
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 1f5a0eece0d14..30d29d038d095 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -9275,7 +9275,7 @@ int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest)
+ }
+ EXPORT_SYMBOL_GPL(nft_parse_u32_check);
+ 
+-static unsigned int nft_parse_register(const struct nlattr *attr, u32 *preg)
++static int nft_parse_register(const struct nlattr *attr, u32 *preg)
+ {
+ 	unsigned int reg;
+ 
+diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c
+index d601974c9d2e0..b8f0111457650 100644
+--- a/net/netfilter/nft_socket.c
++++ b/net/netfilter/nft_socket.c
+@@ -36,12 +36,11 @@ static void nft_socket_wildcard(const struct nft_pktinfo *pkt,
+ 
+ #ifdef CONFIG_SOCK_CGROUP_DATA
+ static noinline bool
+-nft_sock_get_eval_cgroupv2(u32 *dest, const struct nft_pktinfo *pkt, u32 level)
++nft_sock_get_eval_cgroupv2(u32 *dest, struct sock *sk, const struct nft_pktinfo *pkt, u32 level)
+ {
+-	struct sock *sk = skb_to_full_sk(pkt->skb);
+ 	struct cgroup *cgrp;
+ 
+-	if (!sk || !sk_fullsock(sk) || !net_eq(nft_net(pkt), sock_net(sk)))
++	if (!sk_fullsock(sk))
+ 		return false;
+ 
+ 	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
+@@ -108,7 +107,7 @@ static void nft_socket_eval(const struct nft_expr *expr,
+ 		break;
+ #ifdef CONFIG_SOCK_CGROUP_DATA
+ 	case NFT_SOCKET_CGROUPV2:
+-		if (!nft_sock_get_eval_cgroupv2(dest, pkt, priv->level)) {
++		if (!nft_sock_get_eval_cgroupv2(dest, sk, pkt, priv->level)) {
+ 			regs->verdict.code = NFT_BREAK;
+ 			return;
+ 		}
+diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
+index d2537383a3e89..6a193cce2a754 100644
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -560,6 +560,10 @@ static int nci_close_device(struct nci_dev *ndev)
+ 	mutex_lock(&ndev->req_lock);
+ 
+ 	if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
++		/* Need to flush the cmd wq in case
++		 * there is a queued/running cmd_work
++		 */
++		flush_workqueue(ndev->cmd_wq);
+ 		del_timer_sync(&ndev->cmd_timer);
+ 		del_timer_sync(&ndev->data_timer);
+ 		mutex_unlock(&ndev->req_lock);
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index 5ce1208a6ea36..130b5fda9c518 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -1653,10 +1653,10 @@ static int tcf_chain_tp_insert(struct tcf_chain *chain,
+ 	if (chain->flushing)
+ 		return -EAGAIN;
+ 
++	RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
+ 	if (*chain_info->pprev == chain->filter_chain)
+ 		tcf_chain0_head_change(chain, tp);
+ 	tcf_proto_get(tp);
+-	RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
+ 	rcu_assign_pointer(*chain_info->pprev, tp);
+ 
+ 	return 0;
+diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
+index 1a9b1f140f9e9..ef5b3452254aa 100644
+--- a/net/sched/cls_flower.c
++++ b/net/sched/cls_flower.c
+@@ -1005,6 +1005,7 @@ static int fl_set_key_mpls(struct nlattr **tb,
+ static void fl_set_key_vlan(struct nlattr **tb,
+ 			    __be16 ethertype,
+ 			    int vlan_id_key, int vlan_prio_key,
++			    int vlan_next_eth_type_key,
+ 			    struct flow_dissector_key_vlan *key_val,
+ 			    struct flow_dissector_key_vlan *key_mask)
+ {
+@@ -1023,6 +1024,11 @@ static void fl_set_key_vlan(struct nlattr **tb,
+ 	}
+ 	key_val->vlan_tpid = ethertype;
+ 	key_mask->vlan_tpid = cpu_to_be16(~0);
++	if (tb[vlan_next_eth_type_key]) {
++		key_val->vlan_eth_type =
++			nla_get_be16(tb[vlan_next_eth_type_key]);
++		key_mask->vlan_eth_type = cpu_to_be16(~0);
++	}
+ }
+ 
+ static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
+@@ -1519,8 +1525,9 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
+ 
+ 		if (eth_type_vlan(ethertype)) {
+ 			fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
+-					TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
+-					&mask->vlan);
++					TCA_FLOWER_KEY_VLAN_PRIO,
++					TCA_FLOWER_KEY_VLAN_ETH_TYPE,
++					&key->vlan, &mask->vlan);
+ 
+ 			if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
+ 				ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
+@@ -1528,6 +1535,7 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
+ 					fl_set_key_vlan(tb, ethertype,
+ 							TCA_FLOWER_KEY_CVLAN_ID,
+ 							TCA_FLOWER_KEY_CVLAN_PRIO,
++							TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
+ 							&key->cvlan, &mask->cvlan);
+ 					fl_set_key_val(tb, &key->basic.n_proto,
+ 						       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
+@@ -2886,13 +2894,13 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net,
+ 		goto nla_put_failure;
+ 
+ 	if (mask->basic.n_proto) {
+-		if (mask->cvlan.vlan_tpid) {
++		if (mask->cvlan.vlan_eth_type) {
+ 			if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
+ 					 key->basic.n_proto))
+ 				goto nla_put_failure;
+-		} else if (mask->vlan.vlan_tpid) {
++		} else if (mask->vlan.vlan_eth_type) {
+ 			if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
+-					 key->basic.n_proto))
++					 key->vlan.vlan_eth_type))
+ 				goto nla_put_failure;
+ 		}
+ 	}
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index 377f896bdedc4..b9c71a304d399 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -417,7 +417,8 @@ static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
+ {
+ 	struct taprio_sched *q = qdisc_priv(sch);
+ 
+-	if (skb->sk && sock_flag(skb->sk, SOCK_TXTIME)) {
++	/* sk_flags are only safe to use on full sockets. */
++	if (skb->sk && sk_fullsock(skb->sk) && sock_flag(skb->sk, SOCK_TXTIME)) {
+ 		if (!is_valid_interval(skb, sch))
+ 			return qdisc_drop(skb, sch, to_free);
+ 	} else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index 7f342bc127358..52edee1322fc3 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -781,7 +781,7 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net,
+ 		}
+ 	}
+ 
+-	if (security_sctp_assoc_request(new_asoc, chunk->skb)) {
++	if (security_sctp_assoc_request(new_asoc, chunk->head_skb ?: chunk->skb)) {
+ 		sctp_association_free(new_asoc);
+ 		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ 	}
+@@ -932,7 +932,7 @@ enum sctp_disposition sctp_sf_do_5_1E_ca(struct net *net,
+ 
+ 	/* Set peer label for connection. */
+ 	if (security_sctp_assoc_established((struct sctp_association *)asoc,
+-					    chunk->skb))
++					    chunk->head_skb ?: chunk->skb))
+ 		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ 
+ 	/* Verify that the chunk length for the COOKIE-ACK is OK.
+@@ -2262,7 +2262,7 @@ enum sctp_disposition sctp_sf_do_5_2_4_dupcook(
+ 	}
+ 
+ 	/* Update socket peer label if first association. */
+-	if (security_sctp_assoc_request(new_asoc, chunk->skb)) {
++	if (security_sctp_assoc_request(new_asoc, chunk->head_skb ?: chunk->skb)) {
+ 		sctp_association_free(new_asoc);
+ 		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ 	}
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 3e1a9600be5e1..7b0427658056d 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -5636,7 +5636,7 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
+ 	 * Set the daddr and initialize id to something more random and also
+ 	 * copy over any ip options.
+ 	 */
+-	sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk);
++	sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sock->sk);
+ 	sp->pf->copy_ip_options(sk, sock->sk);
+ 
+ 	/* Populate the fields of the newsk from the oldsk and migrate the
+diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
+index ce27399b38b1e..f9f3f59c79de2 100644
+--- a/net/smc/smc_clc.c
++++ b/net/smc/smc_clc.c
+@@ -191,7 +191,8 @@ static int smc_nl_ueid_dumpinfo(struct sk_buff *skb, u32 portid, u32 seq,
+ 			  flags, SMC_NETLINK_DUMP_UEID);
+ 	if (!hdr)
+ 		return -ENOMEM;
+-	snprintf(ueid_str, sizeof(ueid_str), "%s", ueid);
++	memcpy(ueid_str, ueid, SMC_MAX_EID_LEN);
++	ueid_str[SMC_MAX_EID_LEN] = 0;
+ 	if (nla_put_string(skb, SMC_NLA_EID_TABLE_ENTRY, ueid_str)) {
+ 		genlmsg_cancel(skb, hdr);
+ 		return -EMSGSIZE;
+@@ -252,7 +253,8 @@ int smc_nl_dump_seid(struct sk_buff *skb, struct netlink_callback *cb)
+ 		goto end;
+ 
+ 	smc_ism_get_system_eid(&seid);
+-	snprintf(seid_str, sizeof(seid_str), "%s", seid);
++	memcpy(seid_str, seid, SMC_MAX_EID_LEN);
++	seid_str[SMC_MAX_EID_LEN] = 0;
+ 	if (nla_put_string(skb, SMC_NLA_SEID_ENTRY, seid_str))
+ 		goto err;
+ 	read_lock(&smc_clc_eid_table.lock);
+diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
+index 29f0a559d8847..4769f76505afc 100644
+--- a/net/smc/smc_pnet.c
++++ b/net/smc/smc_pnet.c
+@@ -311,8 +311,9 @@ static struct smc_ib_device *smc_pnet_find_ib(char *ib_name)
+ 	list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
+ 		if (!strncmp(ibdev->ibdev->name, ib_name,
+ 			     sizeof(ibdev->ibdev->name)) ||
+-		    !strncmp(dev_name(ibdev->ibdev->dev.parent), ib_name,
+-			     IB_DEVICE_NAME_MAX - 1)) {
++		    (ibdev->ibdev->dev.parent &&
++		     !strncmp(dev_name(ibdev->ibdev->dev.parent), ib_name,
++			     IB_DEVICE_NAME_MAX - 1))) {
+ 			goto out;
+ 		}
+ 	}
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index b21ad79941474..4a423e481a281 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -1213,6 +1213,8 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
+ 		dr->daddr = rqstp->rq_daddr;
+ 		dr->argslen = rqstp->rq_arg.len >> 2;
+ 		dr->xprt_hlen = rqstp->rq_xprt_hlen;
++		dr->xprt_ctxt = rqstp->rq_xprt_ctxt;
++		rqstp->rq_xprt_ctxt = NULL;
+ 
+ 		/* back up head to the start of the buffer and copy */
+ 		skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
+@@ -1251,6 +1253,7 @@ static noinline int svc_deferred_recv(struct svc_rqst *rqstp)
+ 	rqstp->rq_xprt_hlen   = dr->xprt_hlen;
+ 	rqstp->rq_daddr       = dr->daddr;
+ 	rqstp->rq_respages    = rqstp->rq_pages;
++	rqstp->rq_xprt_ctxt   = dr->xprt_ctxt;
+ 	svc_xprt_received(rqstp->rq_xprt);
+ 	return (dr->argslen<<2) - dr->xprt_hlen;
+ }
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+index cf76a6ad127b2..864131a9fc6e3 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+@@ -831,7 +831,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
+ 		goto out_err;
+ 	if (ret == 0)
+ 		goto out_drop;
+-	rqstp->rq_xprt_hlen = ret;
++	rqstp->rq_xprt_hlen = 0;
+ 
+ 	if (svc_rdma_is_reverse_direction_reply(xprt, ctxt))
+ 		goto out_backchannel;
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index c01fbcc848e86..dc171ca0d1b12 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -519,7 +519,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
+ 				   .len = IEEE80211_MAX_MESH_ID_LEN },
+ 	[NL80211_ATTR_MPATH_NEXT_HOP] = NLA_POLICY_ETH_ADDR_COMPAT,
+ 
+-	[NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 },
++	/* allow 3 for NUL-termination, we used to declare this NLA_STRING */
++	[NL80211_ATTR_REG_ALPHA2] = NLA_POLICY_RANGE(NLA_BINARY, 2, 3),
+ 	[NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED },
+ 
+ 	[NL80211_ATTR_BSS_CTS_PROT] = { .type = NLA_U8 },
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index b2fdac96bab07..4a6d864329106 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -2018,11 +2018,13 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
+ 		/* this is a nontransmitting bss, we need to add it to
+ 		 * transmitting bss' list if it is not there
+ 		 */
++		spin_lock_bh(&rdev->bss_lock);
+ 		if (cfg80211_add_nontrans_list(non_tx_data->tx_bss,
+ 					       &res->pub)) {
+ 			if (__cfg80211_unlink_bss(rdev, res))
+ 				rdev->bss_generation++;
+ 		}
++		spin_unlock_bh(&rdev->bss_lock);
+ 	}
+ 
+ 	trace_cfg80211_return_bss(&res->pub);
+diff --git a/scripts/gcc-plugins/latent_entropy_plugin.c b/scripts/gcc-plugins/latent_entropy_plugin.c
+index 589454bce9301..8425da41de0da 100644
+--- a/scripts/gcc-plugins/latent_entropy_plugin.c
++++ b/scripts/gcc-plugins/latent_entropy_plugin.c
+@@ -86,25 +86,31 @@ static struct plugin_info latent_entropy_plugin_info = {
+ 	.help		= "disable\tturn off latent entropy instrumentation\n",
+ };
+ 
+-static unsigned HOST_WIDE_INT seed;
+-/*
+- * get_random_seed() (this is a GCC function) generates the seed.
+- * This is a simple random generator without any cryptographic security because
+- * the entropy doesn't come from here.
+- */
++static unsigned HOST_WIDE_INT deterministic_seed;
++static unsigned HOST_WIDE_INT rnd_buf[32];
++static size_t rnd_idx = ARRAY_SIZE(rnd_buf);
++static int urandom_fd = -1;
++
+ static unsigned HOST_WIDE_INT get_random_const(void)
+ {
+-	unsigned int i;
+-	unsigned HOST_WIDE_INT ret = 0;
+-
+-	for (i = 0; i < 8 * sizeof(ret); i++) {
+-		ret = (ret << 1) | (seed & 1);
+-		seed >>= 1;
+-		if (ret & 1)
+-			seed ^= 0xD800000000000000ULL;
++	if (deterministic_seed) {
++		unsigned HOST_WIDE_INT w = deterministic_seed;
++		w ^= w << 13;
++		w ^= w >> 7;
++		w ^= w << 17;
++		deterministic_seed = w;
++		return deterministic_seed;
+ 	}
+ 
+-	return ret;
++	if (urandom_fd < 0) {
++		urandom_fd = open("/dev/urandom", O_RDONLY);
++		gcc_assert(urandom_fd >= 0);
++	}
++	if (rnd_idx >= ARRAY_SIZE(rnd_buf)) {
++		gcc_assert(read(urandom_fd, rnd_buf, sizeof(rnd_buf)) == sizeof(rnd_buf));
++		rnd_idx = 0;
++	}
++	return rnd_buf[rnd_idx++];
+ }
+ 
+ static tree tree_get_random_const(tree type)
+@@ -537,8 +543,6 @@ static void latent_entropy_start_unit(void *gcc_data __unused,
+ 	tree type, id;
+ 	int quals;
+ 
+-	seed = get_random_seed(false);
+-
+ 	if (in_lto_p)
+ 		return;
+ 
+@@ -573,6 +577,12 @@ __visible int plugin_init(struct plugin_name_args *plugin_info,
+ 	const struct plugin_argument * const argv = plugin_info->argv;
+ 	int i;
+ 
++	/*
++	 * Call get_random_seed() with noinit=true, so that this returns
++	 * 0 in the case where no seed has been passed via -frandom-seed.
++	 */
++	deterministic_seed = get_random_seed(true);
++
+ 	static const struct ggc_root_tab gt_ggc_r_gt_latent_entropy[] = {
+ 		{
+ 			.base = &latent_entropy_decl,
+diff --git a/sound/core/init.c b/sound/core/init.c
+index 31ba7024e3add..726a8353201f8 100644
+--- a/sound/core/init.c
++++ b/sound/core/init.c
+@@ -209,6 +209,12 @@ static void __snd_card_release(struct device *dev, void *data)
+  * snd_card_register(), the very first devres action to call snd_card_free()
+  * is added automatically.  In that way, the resource disconnection is assured
+  * at first, then released in the expected order.
++ *
++ * If an error happens at the probe before snd_card_register() is called and
++ * there have been other devres resources, you'd need to free the card manually
++ * via snd_card_free() call in the error; otherwise it may lead to UAF due to
++ * devres call orders.  You can use snd_card_free_on_error() helper for
++ * handling it more easily.
+  */
+ int snd_devm_card_new(struct device *parent, int idx, const char *xid,
+ 		      struct module *module, size_t extra_size,
+@@ -235,6 +241,28 @@ int snd_devm_card_new(struct device *parent, int idx, const char *xid,
+ }
+ EXPORT_SYMBOL_GPL(snd_devm_card_new);
+ 
++/**
++ * snd_card_free_on_error - a small helper for handling devm probe errors
++ * @dev: the managed device object
++ * @ret: the return code from the probe callback
++ *
++ * This function handles the explicit snd_card_free() call at the error from
++ * the probe callback.  It's just a small helper for simplifying the error
++ * handling with the managed devices.
++ */
++int snd_card_free_on_error(struct device *dev, int ret)
++{
++	struct snd_card *card;
++
++	if (!ret)
++		return 0;
++	card = devres_find(dev, __snd_card_release, NULL, NULL);
++	if (card)
++		snd_card_free(card);
++	return ret;
++}
++EXPORT_SYMBOL_GPL(snd_card_free_on_error);
++
+ static int snd_card_init(struct snd_card *card, struct device *parent,
+ 			 int idx, const char *xid, struct module *module,
+ 			 size_t extra_size)
+diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
+index 6fd763d4d15b1..15dc7160ba34e 100644
+--- a/sound/core/memalloc.c
++++ b/sound/core/memalloc.c
+@@ -499,6 +499,10 @@ static const struct snd_malloc_ops snd_dma_wc_ops = {
+ };
+ #endif /* CONFIG_X86 */
+ 
++#ifdef CONFIG_SND_DMA_SGBUF
++static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size);
++#endif
++
+ /*
+  * Non-contiguous pages allocator
+  */
+@@ -509,8 +513,18 @@ static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
+ 
+ 	sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
+ 				      DEFAULT_GFP, 0);
+-	if (!sgt)
++	if (!sgt) {
++#ifdef CONFIG_SND_DMA_SGBUF
++		if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
++			dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
++		else
++			dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK;
++		return snd_dma_sg_fallback_alloc(dmab, size);
++#else
+ 		return NULL;
++#endif
++	}
++
+ 	dmab->dev.need_sync = dma_need_sync(dmab->dev.dev,
+ 					    sg_dma_address(sgt->sgl));
+ 	p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
+@@ -633,6 +647,8 @@ static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
+ 
+ 	if (!p)
+ 		return NULL;
++	if (dmab->dev.type != SNDRV_DMA_TYPE_DEV_WC_SG)
++		return p;
+ 	for_each_sgtable_page(sgt, &iter, 0)
+ 		set_memory_wc(sg_wc_address(&iter), 1);
+ 	return p;
+@@ -665,6 +681,95 @@ static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
+ 	.get_page = snd_dma_noncontig_get_page,
+ 	.get_chunk_size = snd_dma_noncontig_get_chunk_size,
+ };
++
++/* Fallback SG-buffer allocations for x86 */
++struct snd_dma_sg_fallback {
++	size_t count;
++	struct page **pages;
++	dma_addr_t *addrs;
++};
++
++static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
++				       struct snd_dma_sg_fallback *sgbuf)
++{
++	size_t i;
++
++	if (sgbuf->count && dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
++		set_pages_array_wb(sgbuf->pages, sgbuf->count);
++	for (i = 0; i < sgbuf->count && sgbuf->pages[i]; i++)
++		dma_free_coherent(dmab->dev.dev, PAGE_SIZE,
++				  page_address(sgbuf->pages[i]),
++				  sgbuf->addrs[i]);
++	kvfree(sgbuf->pages);
++	kvfree(sgbuf->addrs);
++	kfree(sgbuf);
++}
++
++static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
++{
++	struct snd_dma_sg_fallback *sgbuf;
++	struct page **pages;
++	size_t i, count;
++	void *p;
++
++	sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
++	if (!sgbuf)
++		return NULL;
++	count = PAGE_ALIGN(size) >> PAGE_SHIFT;
++	pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
++	if (!pages)
++		goto error;
++	sgbuf->pages = pages;
++	sgbuf->addrs = kvcalloc(count, sizeof(*sgbuf->addrs), GFP_KERNEL);
++	if (!sgbuf->addrs)
++		goto error;
++
++	for (i = 0; i < count; sgbuf->count++, i++) {
++		p = dma_alloc_coherent(dmab->dev.dev, PAGE_SIZE,
++				       &sgbuf->addrs[i], DEFAULT_GFP);
++		if (!p)
++			goto error;
++		sgbuf->pages[i] = virt_to_page(p);
++	}
++
++	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
++		set_pages_array_wc(pages, count);
++	p = vmap(pages, count, VM_MAP, PAGE_KERNEL);
++	if (!p)
++		goto error;
++	dmab->private_data = sgbuf;
++	return p;
++
++ error:
++	__snd_dma_sg_fallback_free(dmab, sgbuf);
++	return NULL;
++}
++
++static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab)
++{
++	vunmap(dmab->area);
++	__snd_dma_sg_fallback_free(dmab, dmab->private_data);
++}
++
++static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab,
++				    struct vm_area_struct *area)
++{
++	struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
++
++	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
++		area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
++	return vm_map_pages(area, sgbuf->pages, sgbuf->count);
++}
++
++static const struct snd_malloc_ops snd_dma_sg_fallback_ops = {
++	.alloc = snd_dma_sg_fallback_alloc,
++	.free = snd_dma_sg_fallback_free,
++	.mmap = snd_dma_sg_fallback_mmap,
++	/* reuse vmalloc helpers */
++	.get_addr = snd_dma_vmalloc_get_addr,
++	.get_page = snd_dma_vmalloc_get_page,
++	.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
++};
+ #endif /* CONFIG_SND_DMA_SGBUF */
+ 
+ /*
+@@ -736,6 +841,10 @@ static const struct snd_malloc_ops *dma_ops[] = {
+ #ifdef CONFIG_GENERIC_ALLOCATOR
+ 	[SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
+ #endif /* CONFIG_GENERIC_ALLOCATOR */
++#ifdef CONFIG_SND_DMA_SGBUF
++	[SNDRV_DMA_TYPE_DEV_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
++	[SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
++#endif
+ #endif /* CONFIG_HAS_DMA */
+ };
+ 
+diff --git a/sound/core/pcm_misc.c b/sound/core/pcm_misc.c
+index 4866aed97aacc..5588b6a1ee8bd 100644
+--- a/sound/core/pcm_misc.c
++++ b/sound/core/pcm_misc.c
+@@ -433,7 +433,7 @@ int snd_pcm_format_set_silence(snd_pcm_format_t format, void *data, unsigned int
+ 		return 0;
+ 	width = pcm_formats[(INT)format].phys; /* physical width */
+ 	pat = pcm_formats[(INT)format].silence;
+-	if (! width)
++	if (!width || !pat)
+ 		return -EINVAL;
+ 	/* signed or 1 byte data */
+ 	if (pcm_formats[(INT)format].signd == 1 || width <= 8) {
+diff --git a/sound/drivers/mtpav.c b/sound/drivers/mtpav.c
+index 11235baaf6fa5..f212f233ea618 100644
+--- a/sound/drivers/mtpav.c
++++ b/sound/drivers/mtpav.c
+@@ -693,8 +693,6 @@ static int snd_mtpav_probe(struct platform_device *dev)
+ 	mtp_card->outmidihwport = 0xffffffff;
+ 	timer_setup(&mtp_card->timer, snd_mtpav_output_timer, 0);
+ 
+-	card->private_free = snd_mtpav_free;
+-
+ 	err = snd_mtpav_get_RAWMIDI(mtp_card);
+ 	if (err < 0)
+ 		return err;
+@@ -716,6 +714,8 @@ static int snd_mtpav_probe(struct platform_device *dev)
+ 	if (err < 0)
+ 		return err;
+ 
++	card->private_free = snd_mtpav_free;
++
+ 	platform_set_drvdata(dev, card);
+ 	printk(KERN_INFO "Motu MidiTimePiece on parallel port irq: %d ioport: 0x%lx\n", irq, port);
+ 	return 0;
+diff --git a/sound/isa/galaxy/galaxy.c b/sound/isa/galaxy/galaxy.c
+index ea001c80149dd..3164eb8510fa4 100644
+--- a/sound/isa/galaxy/galaxy.c
++++ b/sound/isa/galaxy/galaxy.c
+@@ -478,7 +478,7 @@ static void snd_galaxy_free(struct snd_card *card)
+ 		galaxy_set_config(galaxy, galaxy->config);
+ }
+ 
+-static int snd_galaxy_probe(struct device *dev, unsigned int n)
++static int __snd_galaxy_probe(struct device *dev, unsigned int n)
+ {
+ 	struct snd_galaxy *galaxy;
+ 	struct snd_wss *chip;
+@@ -598,6 +598,11 @@ static int snd_galaxy_probe(struct device *dev, unsigned int n)
+ 	return 0;
+ }
+ 
++static int snd_galaxy_probe(struct device *dev, unsigned int n)
++{
++	return snd_card_free_on_error(dev, __snd_galaxy_probe(dev, n));
++}
++
+ static struct isa_driver snd_galaxy_driver = {
+ 	.match		= snd_galaxy_match,
+ 	.probe		= snd_galaxy_probe,
+diff --git a/sound/isa/sc6000.c b/sound/isa/sc6000.c
+index 26ab7ff807684..60398fced046b 100644
+--- a/sound/isa/sc6000.c
++++ b/sound/isa/sc6000.c
+@@ -537,7 +537,7 @@ static void snd_sc6000_free(struct snd_card *card)
+ 		sc6000_setup_board(vport, 0);
+ }
+ 
+-static int snd_sc6000_probe(struct device *devptr, unsigned int dev)
++static int __snd_sc6000_probe(struct device *devptr, unsigned int dev)
+ {
+ 	static const int possible_irqs[] = { 5, 7, 9, 10, 11, -1 };
+ 	static const int possible_dmas[] = { 1, 3, 0, -1 };
+@@ -662,6 +662,11 @@ static int snd_sc6000_probe(struct device *devptr, unsigned int dev)
+ 	return 0;
+ }
+ 
++static int snd_sc6000_probe(struct device *devptr, unsigned int dev)
++{
++	return snd_card_free_on_error(devptr, __snd_sc6000_probe(devptr, dev));
++}
++
+ static struct isa_driver snd_sc6000_driver = {
+ 	.match		= snd_sc6000_match,
+ 	.probe		= snd_sc6000_probe,
+diff --git a/sound/pci/ad1889.c b/sound/pci/ad1889.c
+index bba4dae8dcc70..50e30704bf6f9 100644
+--- a/sound/pci/ad1889.c
++++ b/sound/pci/ad1889.c
+@@ -844,8 +844,8 @@ snd_ad1889_create(struct snd_card *card, struct pci_dev *pci)
+ }
+ 
+ static int
+-snd_ad1889_probe(struct pci_dev *pci,
+-		 const struct pci_device_id *pci_id)
++__snd_ad1889_probe(struct pci_dev *pci,
++		   const struct pci_device_id *pci_id)
+ {
+ 	int err;
+ 	static int devno;
+@@ -904,6 +904,12 @@ snd_ad1889_probe(struct pci_dev *pci,
+ 	return 0;
+ }
+ 
++static int snd_ad1889_probe(struct pci_dev *pci,
++			    const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __snd_ad1889_probe(pci, pci_id));
++}
++
+ static const struct pci_device_id snd_ad1889_ids[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_ANALOG_DEVICES, PCI_DEVICE_ID_AD1889JS) },
+ 	{ 0, },
+diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c
+index 92eb59db106de..2378a39abaebe 100644
+--- a/sound/pci/ali5451/ali5451.c
++++ b/sound/pci/ali5451/ali5451.c
+@@ -2124,8 +2124,8 @@ static int snd_ali_create(struct snd_card *card,
+ 	return 0;
+ }
+ 
+-static int snd_ali_probe(struct pci_dev *pci,
+-			 const struct pci_device_id *pci_id)
++static int __snd_ali_probe(struct pci_dev *pci,
++			   const struct pci_device_id *pci_id)
+ {
+ 	struct snd_card *card;
+ 	struct snd_ali *codec;
+@@ -2170,6 +2170,12 @@ static int snd_ali_probe(struct pci_dev *pci,
+ 	return 0;
+ }
+ 
++static int snd_ali_probe(struct pci_dev *pci,
++			 const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __snd_ali_probe(pci, pci_id));
++}
++
+ static struct pci_driver ali5451_driver = {
+ 	.name = KBUILD_MODNAME,
+ 	.id_table = snd_ali_ids,
+diff --git a/sound/pci/als300.c b/sound/pci/als300.c
+index b86565dcdbe41..c70aff0601205 100644
+--- a/sound/pci/als300.c
++++ b/sound/pci/als300.c
+@@ -708,7 +708,7 @@ static int snd_als300_probe(struct pci_dev *pci,
+ 
+ 	err = snd_als300_create(card, pci, chip_type);
+ 	if (err < 0)
+-		return err;
++		goto error;
+ 
+ 	strcpy(card->driver, "ALS300");
+ 	if (chip->chip_type == DEVICE_ALS300_PLUS)
+@@ -723,11 +723,15 @@ static int snd_als300_probe(struct pci_dev *pci,
+ 
+ 	err = snd_card_register(card);
+ 	if (err < 0)
+-		return err;
++		goto error;
+ 
+ 	pci_set_drvdata(pci, card);
+ 	dev++;
+ 	return 0;
++
++ error:
++	snd_card_free(card);
++	return err;
+ }
+ 
+ static struct pci_driver als300_driver = {
+diff --git a/sound/pci/als4000.c b/sound/pci/als4000.c
+index 535eccd124bee..f33aeb692a112 100644
+--- a/sound/pci/als4000.c
++++ b/sound/pci/als4000.c
+@@ -806,8 +806,8 @@ static void snd_card_als4000_free( struct snd_card *card )
+ 	snd_als4000_free_gameport(acard);
+ }
+ 
+-static int snd_card_als4000_probe(struct pci_dev *pci,
+-				  const struct pci_device_id *pci_id)
++static int __snd_card_als4000_probe(struct pci_dev *pci,
++				    const struct pci_device_id *pci_id)
+ {
+ 	static int dev;
+ 	struct snd_card *card;
+@@ -930,6 +930,12 @@ static int snd_card_als4000_probe(struct pci_dev *pci,
+ 	return 0;
+ }
+ 
++static int snd_card_als4000_probe(struct pci_dev *pci,
++				  const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __snd_card_als4000_probe(pci, pci_id));
++}
++
+ #ifdef CONFIG_PM_SLEEP
+ static int snd_als4000_suspend(struct device *dev)
+ {
+diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c
+index b8e035d5930d2..43d01f1847ed7 100644
+--- a/sound/pci/atiixp.c
++++ b/sound/pci/atiixp.c
+@@ -1572,8 +1572,8 @@ static int snd_atiixp_init(struct snd_card *card, struct pci_dev *pci)
+ }
+ 
+ 
+-static int snd_atiixp_probe(struct pci_dev *pci,
+-			    const struct pci_device_id *pci_id)
++static int __snd_atiixp_probe(struct pci_dev *pci,
++			      const struct pci_device_id *pci_id)
+ {
+ 	struct snd_card *card;
+ 	struct atiixp *chip;
+@@ -1623,6 +1623,12 @@ static int snd_atiixp_probe(struct pci_dev *pci,
+ 	return 0;
+ }
+ 
++static int snd_atiixp_probe(struct pci_dev *pci,
++			    const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __snd_atiixp_probe(pci, pci_id));
++}
++
+ static struct pci_driver atiixp_driver = {
+ 	.name = KBUILD_MODNAME,
+ 	.id_table = snd_atiixp_ids,
+diff --git a/sound/pci/atiixp_modem.c b/sound/pci/atiixp_modem.c
+index 178dce8ef1e99..8864c4c3c7e13 100644
+--- a/sound/pci/atiixp_modem.c
++++ b/sound/pci/atiixp_modem.c
+@@ -1201,8 +1201,8 @@ static int snd_atiixp_init(struct snd_card *card, struct pci_dev *pci)
+ }
+ 
+ 
+-static int snd_atiixp_probe(struct pci_dev *pci,
+-			    const struct pci_device_id *pci_id)
++static int __snd_atiixp_probe(struct pci_dev *pci,
++			      const struct pci_device_id *pci_id)
+ {
+ 	struct snd_card *card;
+ 	struct atiixp_modem *chip;
+@@ -1247,6 +1247,12 @@ static int snd_atiixp_probe(struct pci_dev *pci,
+ 	return 0;
+ }
+ 
++static int snd_atiixp_probe(struct pci_dev *pci,
++			    const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __snd_atiixp_probe(pci, pci_id));
++}
++
+ static struct pci_driver atiixp_modem_driver = {
+ 	.name = KBUILD_MODNAME,
+ 	.id_table = snd_atiixp_ids,
+diff --git a/sound/pci/au88x0/au88x0.c b/sound/pci/au88x0/au88x0.c
+index 342ef2a6655e3..eb234153691bc 100644
+--- a/sound/pci/au88x0/au88x0.c
++++ b/sound/pci/au88x0/au88x0.c
+@@ -193,7 +193,7 @@ snd_vortex_create(struct snd_card *card, struct pci_dev *pci)
+ 
+ // constructor -- see "Constructor" sub-section
+ static int
+-snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
++__snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+ {
+ 	static int dev;
+ 	struct snd_card *card;
+@@ -310,6 +310,12 @@ snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+ 	return 0;
+ }
+ 
++static int
++snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __snd_vortex_probe(pci, pci_id));
++}
++
+ // pci_driver definition
+ static struct pci_driver vortex_driver = {
+ 	.name = KBUILD_MODNAME,
+diff --git a/sound/pci/aw2/aw2-alsa.c b/sound/pci/aw2/aw2-alsa.c
+index d56f126d6fdd9..29a4bcdec237a 100644
+--- a/sound/pci/aw2/aw2-alsa.c
++++ b/sound/pci/aw2/aw2-alsa.c
+@@ -275,7 +275,7 @@ static int snd_aw2_probe(struct pci_dev *pci,
+ 	/* (3) Create main component */
+ 	err = snd_aw2_create(card, pci);
+ 	if (err < 0)
+-		return err;
++		goto error;
+ 
+ 	/* initialize mutex */
+ 	mutex_init(&chip->mtx);
+@@ -294,13 +294,17 @@ static int snd_aw2_probe(struct pci_dev *pci,
+ 	/* (6) Register card instance */
+ 	err = snd_card_register(card);
+ 	if (err < 0)
+-		return err;
++		goto error;
+ 
+ 	/* (7) Set PCI driver data */
+ 	pci_set_drvdata(pci, card);
+ 
+ 	dev++;
+ 	return 0;
++
++ error:
++	snd_card_free(card);
++	return err;
+ }
+ 
+ /* open callback */
+diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c
+index 089050470ff27..7f329dfc5404a 100644
+--- a/sound/pci/azt3328.c
++++ b/sound/pci/azt3328.c
+@@ -2427,7 +2427,7 @@ snd_azf3328_create(struct snd_card *card,
+ }
+ 
+ static int
+-snd_azf3328_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
++__snd_azf3328_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+ {
+ 	static int dev;
+ 	struct snd_card *card;
+@@ -2520,6 +2520,12 @@ snd_azf3328_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+ 	return 0;
+ }
+ 
++static int
++snd_azf3328_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __snd_azf3328_probe(pci, pci_id));
++}
++
+ #ifdef CONFIG_PM_SLEEP
+ static inline void
+ snd_azf3328_suspend_regs(const struct snd_azf3328 *chip,
+diff --git a/sound/pci/bt87x.c b/sound/pci/bt87x.c
+index d23f931638410..621985bfee5d7 100644
+--- a/sound/pci/bt87x.c
++++ b/sound/pci/bt87x.c
+@@ -805,8 +805,8 @@ static int snd_bt87x_detect_card(struct pci_dev *pci)
+ 	return SND_BT87X_BOARD_UNKNOWN;
+ }
+ 
+-static int snd_bt87x_probe(struct pci_dev *pci,
+-			   const struct pci_device_id *pci_id)
++static int __snd_bt87x_probe(struct pci_dev *pci,
++			     const struct pci_device_id *pci_id)
+ {
+ 	static int dev;
+ 	struct snd_card *card;
+@@ -889,6 +889,12 @@ static int snd_bt87x_probe(struct pci_dev *pci,
+ 	return 0;
+ }
+ 
++static int snd_bt87x_probe(struct pci_dev *pci,
++			   const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __snd_bt87x_probe(pci, pci_id));
++}
++
+ /* default entries for all Bt87x cards - it's not exported */
+ /* driver_data is set to 0 to call detection */
+ static const struct pci_device_id snd_bt87x_default_ids[] = {
+diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c
+index 36fb150b72fb5..f4cc112bddf3e 100644
+--- a/sound/pci/ca0106/ca0106_main.c
++++ b/sound/pci/ca0106/ca0106_main.c
+@@ -1725,8 +1725,8 @@ static int snd_ca0106_midi(struct snd_ca0106 *chip, unsigned int channel)
+ }
+ 
+ 
+-static int snd_ca0106_probe(struct pci_dev *pci,
+-					const struct pci_device_id *pci_id)
++static int __snd_ca0106_probe(struct pci_dev *pci,
++			      const struct pci_device_id *pci_id)
+ {
+ 	static int dev;
+ 	struct snd_card *card;
+@@ -1786,6 +1786,12 @@ static int snd_ca0106_probe(struct pci_dev *pci,
+ 	return 0;
+ }
+ 
++static int snd_ca0106_probe(struct pci_dev *pci,
++			    const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __snd_ca0106_probe(pci, pci_id));
++}
++
+ #ifdef CONFIG_PM_SLEEP
+ static int snd_ca0106_suspend(struct device *dev)
+ {
+diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
+index dab801d9d3b48..727db6d433916 100644
+--- a/sound/pci/cmipci.c
++++ b/sound/pci/cmipci.c
+@@ -3247,15 +3247,19 @@ static int snd_cmipci_probe(struct pci_dev *pci,
+ 
+ 	err = snd_cmipci_create(card, pci, dev);
+ 	if (err < 0)
+-		return err;
++		goto error;
+ 
+ 	err = snd_card_register(card);
+ 	if (err < 0)
+-		return err;
++		goto error;
+ 
+ 	pci_set_drvdata(pci, card);
+ 	dev++;
+ 	return 0;
++
++ error:
++	snd_card_free(card);
++	return err;
+ }
+ 
+ #ifdef CONFIG_PM_SLEEP
+diff --git a/sound/pci/cs4281.c b/sound/pci/cs4281.c
+index e7367402b84a3..0c9cadf7b3b80 100644
+--- a/sound/pci/cs4281.c
++++ b/sound/pci/cs4281.c
+@@ -1827,8 +1827,8 @@ static void snd_cs4281_opl3_command(struct snd_opl3 *opl3, unsigned short cmd,
+ 	spin_unlock_irqrestore(&opl3->reg_lock, flags);
+ }
+ 
+-static int snd_cs4281_probe(struct pci_dev *pci,
+-			    const struct pci_device_id *pci_id)
++static int __snd_cs4281_probe(struct pci_dev *pci,
++			      const struct pci_device_id *pci_id)
+ {
+ 	static int dev;
+ 	struct snd_card *card;
+@@ -1888,6 +1888,12 @@ static int snd_cs4281_probe(struct pci_dev *pci,
+ 	return 0;
+ }
+ 
++static int snd_cs4281_probe(struct pci_dev *pci,
++			    const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __snd_cs4281_probe(pci, pci_id));
++}
++
+ /*
+  * Power Management
+  */
+diff --git a/sound/pci/cs5535audio/cs5535audio.c b/sound/pci/cs5535audio/cs5535audio.c
+index 499fa0148f9a4..440b8f9b40c96 100644
+--- a/sound/pci/cs5535audio/cs5535audio.c
++++ b/sound/pci/cs5535audio/cs5535audio.c
+@@ -281,8 +281,8 @@ static int snd_cs5535audio_create(struct snd_card *card,
+ 	return 0;
+ }
+ 
+-static int snd_cs5535audio_probe(struct pci_dev *pci,
+-				 const struct pci_device_id *pci_id)
++static int __snd_cs5535audio_probe(struct pci_dev *pci,
++				   const struct pci_device_id *pci_id)
+ {
+ 	static int dev;
+ 	struct snd_card *card;
+@@ -331,6 +331,12 @@ static int snd_cs5535audio_probe(struct pci_dev *pci,
+ 	return 0;
+ }
+ 
++static int snd_cs5535audio_probe(struct pci_dev *pci,
++				 const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __snd_cs5535audio_probe(pci, pci_id));
++}
++
+ static struct pci_driver cs5535audio_driver = {
+ 	.name = KBUILD_MODNAME,
+ 	.id_table = snd_cs5535audio_ids,
+diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
+index 25b012ef5c3e6..c70c3ac4e99a5 100644
+--- a/sound/pci/echoaudio/echoaudio.c
++++ b/sound/pci/echoaudio/echoaudio.c
+@@ -1970,8 +1970,8 @@ static int snd_echo_create(struct snd_card *card,
+ }
+ 
+ /* constructor */
+-static int snd_echo_probe(struct pci_dev *pci,
+-			  const struct pci_device_id *pci_id)
++static int __snd_echo_probe(struct pci_dev *pci,
++			    const struct pci_device_id *pci_id)
+ {
+ 	static int dev;
+ 	struct snd_card *card;
+@@ -2139,6 +2139,11 @@ static int snd_echo_probe(struct pci_dev *pci,
+ 	return 0;
+ }
+ 
++static int snd_echo_probe(struct pci_dev *pci,
++			  const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __snd_echo_probe(pci, pci_id));
++}
+ 
+ 
+ #if defined(CONFIG_PM_SLEEP)
+diff --git a/sound/pci/emu10k1/emu10k1x.c b/sound/pci/emu10k1/emu10k1x.c
+index c49c44dc10820..89043392f3ec7 100644
+--- a/sound/pci/emu10k1/emu10k1x.c
++++ b/sound/pci/emu10k1/emu10k1x.c
+@@ -1491,8 +1491,8 @@ static int snd_emu10k1x_midi(struct emu10k1x *emu)
+ 	return 0;
+ }
+ 
+-static int snd_emu10k1x_probe(struct pci_dev *pci,
+-			      const struct pci_device_id *pci_id)
++static int __snd_emu10k1x_probe(struct pci_dev *pci,
++				const struct pci_device_id *pci_id)
+ {
+ 	static int dev;
+ 	struct snd_card *card;
+@@ -1554,6 +1554,12 @@ static int snd_emu10k1x_probe(struct pci_dev *pci,
+ 	return 0;
+ }
+ 
++static int snd_emu10k1x_probe(struct pci_dev *pci,
++			      const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __snd_emu10k1x_probe(pci, pci_id));
++}
++
+ // PCI IDs
+ static const struct pci_device_id snd_emu10k1x_ids[] = {
+ 	{ PCI_VDEVICE(CREATIVE, 0x0006), 0 },	/* Dell OEM version (EMU10K1) */
+diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c
+index 2651f0c64c062..94efe347a97a9 100644
+--- a/sound/pci/ens1370.c
++++ b/sound/pci/ens1370.c
+@@ -2304,8 +2304,8 @@ static irqreturn_t snd_audiopci_interrupt(int irq, void *dev_id)
+ 	return IRQ_HANDLED;
+ }
+ 
+-static int snd_audiopci_probe(struct pci_dev *pci,
+-			      const struct pci_device_id *pci_id)
++static int __snd_audiopci_probe(struct pci_dev *pci,
++				const struct pci_device_id *pci_id)
+ {
+ 	static int dev;
+ 	struct snd_card *card;
+@@ -2369,6 +2369,12 @@ static int snd_audiopci_probe(struct pci_dev *pci,
+ 	return 0;
+ }
+ 
++static int snd_audiopci_probe(struct pci_dev *pci,
++			      const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __snd_audiopci_probe(pci, pci_id));
++}
++
+ static struct pci_driver ens137x_driver = {
+ 	.name = KBUILD_MODNAME,
+ 	.id_table = snd_audiopci_ids,
+diff --git a/sound/pci/es1938.c b/sound/pci/es1938.c
+index 00b976f42a3db..e34ec6f89e7e0 100644
+--- a/sound/pci/es1938.c
++++ b/sound/pci/es1938.c
+@@ -1716,8 +1716,8 @@ static int snd_es1938_mixer(struct es1938 *chip)
+ }
+        
+ 
+-static int snd_es1938_probe(struct pci_dev *pci,
+-			    const struct pci_device_id *pci_id)
++static int __snd_es1938_probe(struct pci_dev *pci,
++			      const struct pci_device_id *pci_id)
+ {
+ 	static int dev;
+ 	struct snd_card *card;
+@@ -1796,6 +1796,12 @@ static int snd_es1938_probe(struct pci_dev *pci,
+ 	return 0;
+ }
+ 
++static int snd_es1938_probe(struct pci_dev *pci,
++			    const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __snd_es1938_probe(pci, pci_id));
++}
++
+ static struct pci_driver es1938_driver = {
+ 	.name = KBUILD_MODNAME,
+ 	.id_table = snd_es1938_ids,
+diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c
+index 6a8a02a9ecf41..4a7e20bb11bca 100644
+--- a/sound/pci/es1968.c
++++ b/sound/pci/es1968.c
+@@ -2741,8 +2741,8 @@ static int snd_es1968_create(struct snd_card *card,
+ 
+ /*
+  */
+-static int snd_es1968_probe(struct pci_dev *pci,
+-			    const struct pci_device_id *pci_id)
++static int __snd_es1968_probe(struct pci_dev *pci,
++			      const struct pci_device_id *pci_id)
+ {
+ 	static int dev;
+ 	struct snd_card *card;
+@@ -2848,6 +2848,12 @@ static int snd_es1968_probe(struct pci_dev *pci,
+ 	return 0;
+ }
+ 
++static int snd_es1968_probe(struct pci_dev *pci,
++			    const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __snd_es1968_probe(pci, pci_id));
++}
++
+ static struct pci_driver es1968_driver = {
+ 	.name = KBUILD_MODNAME,
+ 	.id_table = snd_es1968_ids,
+diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
+index 9c22ff19e56d2..62b3cb126c6d0 100644
+--- a/sound/pci/fm801.c
++++ b/sound/pci/fm801.c
+@@ -1268,8 +1268,8 @@ static int snd_fm801_create(struct snd_card *card,
+ 	return 0;
+ }
+ 
+-static int snd_card_fm801_probe(struct pci_dev *pci,
+-				const struct pci_device_id *pci_id)
++static int __snd_card_fm801_probe(struct pci_dev *pci,
++				  const struct pci_device_id *pci_id)
+ {
+ 	static int dev;
+ 	struct snd_card *card;
+@@ -1333,6 +1333,12 @@ static int snd_card_fm801_probe(struct pci_dev *pci,
+ 	return 0;
+ }
+ 
++static int snd_card_fm801_probe(struct pci_dev *pci,
++				const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __snd_card_fm801_probe(pci, pci_id));
++}
++
+ #ifdef CONFIG_PM_SLEEP
+ static const unsigned char saved_regs[] = {
+ 	FM801_PCM_VOL, FM801_I2S_VOL, FM801_FM_VOL, FM801_REC_SRC,
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 16e90524a4977..ca40c2bd8ba62 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2619,6 +2619,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x65f1, "Clevo PC50HS", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x65f5, "Clevo PD50PN[NRT]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+@@ -9217,6 +9218,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x505d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x505f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x5062, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
++	SND_PCI_QUIRK(0x17aa, 0x508b, "Thinkpad X12 Gen 1", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
+ 	SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c
+index f6275868877a7..6fab2ad85bbec 100644
+--- a/sound/pci/ice1712/ice1724.c
++++ b/sound/pci/ice1712/ice1724.c
+@@ -2519,8 +2519,8 @@ static int snd_vt1724_create(struct snd_card *card,
+  *
+  */
+ 
+-static int snd_vt1724_probe(struct pci_dev *pci,
+-			    const struct pci_device_id *pci_id)
++static int __snd_vt1724_probe(struct pci_dev *pci,
++			      const struct pci_device_id *pci_id)
+ {
+ 	static int dev;
+ 	struct snd_card *card;
+@@ -2662,6 +2662,12 @@ static int snd_vt1724_probe(struct pci_dev *pci,
+ 	return 0;
+ }
+ 
++static int snd_vt1724_probe(struct pci_dev *pci,
++			    const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __snd_vt1724_probe(pci, pci_id));
++}
++
+ #ifdef CONFIG_PM_SLEEP
+ static int snd_vt1724_suspend(struct device *dev)
+ {
+diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
+index a51032b3ac4d8..ae285c0a629c8 100644
+--- a/sound/pci/intel8x0.c
++++ b/sound/pci/intel8x0.c
+@@ -3109,8 +3109,8 @@ static int check_default_spdif_aclink(struct pci_dev *pci)
+ 	return 0;
+ }
+ 
+-static int snd_intel8x0_probe(struct pci_dev *pci,
+-			      const struct pci_device_id *pci_id)
++static int __snd_intel8x0_probe(struct pci_dev *pci,
++				const struct pci_device_id *pci_id)
+ {
+ 	struct snd_card *card;
+ 	struct intel8x0 *chip;
+@@ -3189,6 +3189,12 @@ static int snd_intel8x0_probe(struct pci_dev *pci,
+ 	return 0;
+ }
+ 
++static int snd_intel8x0_probe(struct pci_dev *pci,
++			      const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __snd_intel8x0_probe(pci, pci_id));
++}
++
+ static struct pci_driver intel8x0_driver = {
+ 	.name = KBUILD_MODNAME,
+ 	.id_table = snd_intel8x0_ids,
+diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c
+index 7de3cb2f17b52..2845cc006d0cf 100644
+--- a/sound/pci/intel8x0m.c
++++ b/sound/pci/intel8x0m.c
+@@ -1178,8 +1178,8 @@ static struct shortname_table {
+ 	{ 0 },
+ };
+ 
+-static int snd_intel8x0m_probe(struct pci_dev *pci,
+-			       const struct pci_device_id *pci_id)
++static int __snd_intel8x0m_probe(struct pci_dev *pci,
++				 const struct pci_device_id *pci_id)
+ {
+ 	struct snd_card *card;
+ 	struct intel8x0m *chip;
+@@ -1225,6 +1225,12 @@ static int snd_intel8x0m_probe(struct pci_dev *pci,
+ 	return 0;
+ }
+ 
++static int snd_intel8x0m_probe(struct pci_dev *pci,
++			       const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __snd_intel8x0m_probe(pci, pci_id));
++}
++
+ static struct pci_driver intel8x0m_driver = {
+ 	.name = KBUILD_MODNAME,
+ 	.id_table = snd_intel8x0m_ids,
+diff --git a/sound/pci/korg1212/korg1212.c b/sound/pci/korg1212/korg1212.c
+index 5c9e240ff6a9c..33b4f95d65b3f 100644
+--- a/sound/pci/korg1212/korg1212.c
++++ b/sound/pci/korg1212/korg1212.c
+@@ -2355,7 +2355,7 @@ snd_korg1212_probe(struct pci_dev *pci,
+ 
+ 	err = snd_korg1212_create(card, pci);
+ 	if (err < 0)
+-		return err;
++		goto error;
+ 
+ 	strcpy(card->driver, "korg1212");
+ 	strcpy(card->shortname, "korg1212");
+@@ -2366,10 +2366,14 @@ snd_korg1212_probe(struct pci_dev *pci,
+ 
+ 	err = snd_card_register(card);
+ 	if (err < 0)
+-		return err;
++		goto error;
+ 	pci_set_drvdata(pci, card);
+ 	dev++;
+ 	return 0;
++
++ error:
++	snd_card_free(card);
++	return err;
+ }
+ 
+ static struct pci_driver korg1212_driver = {
+diff --git a/sound/pci/lola/lola.c b/sound/pci/lola/lola.c
+index 5269a1d396a5b..1aa30e90b86a7 100644
+--- a/sound/pci/lola/lola.c
++++ b/sound/pci/lola/lola.c
+@@ -637,8 +637,8 @@ static int lola_create(struct snd_card *card, struct pci_dev *pci, int dev)
+ 	return 0;
+ }
+ 
+-static int lola_probe(struct pci_dev *pci,
+-		      const struct pci_device_id *pci_id)
++static int __lola_probe(struct pci_dev *pci,
++			const struct pci_device_id *pci_id)
+ {
+ 	static int dev;
+ 	struct snd_card *card;
+@@ -687,6 +687,12 @@ static int lola_probe(struct pci_dev *pci,
+ 	return 0;
+ }
+ 
++static int lola_probe(struct pci_dev *pci,
++		      const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __lola_probe(pci, pci_id));
++}
++
+ /* PCI IDs */
+ static const struct pci_device_id lola_ids[] = {
+ 	{ PCI_VDEVICE(DIGIGRAM, 0x0001) },
+diff --git a/sound/pci/lx6464es/lx6464es.c b/sound/pci/lx6464es/lx6464es.c
+index 168a1084f7303..bd9b6148dd6fb 100644
+--- a/sound/pci/lx6464es/lx6464es.c
++++ b/sound/pci/lx6464es/lx6464es.c
+@@ -1019,7 +1019,7 @@ static int snd_lx6464es_probe(struct pci_dev *pci,
+ 	err = snd_lx6464es_create(card, pci);
+ 	if (err < 0) {
+ 		dev_err(card->dev, "error during snd_lx6464es_create\n");
+-		return err;
++		goto error;
+ 	}
+ 
+ 	strcpy(card->driver, "LX6464ES");
+@@ -1036,12 +1036,16 @@ static int snd_lx6464es_probe(struct pci_dev *pci,
+ 
+ 	err = snd_card_register(card);
+ 	if (err < 0)
+-		return err;
++		goto error;
+ 
+ 	dev_dbg(chip->card->dev, "initialization successful\n");
+ 	pci_set_drvdata(pci, card);
+ 	dev++;
+ 	return 0;
++
++ error:
++	snd_card_free(card);
++	return err;
+ }
+ 
+ static struct pci_driver lx6464es_driver = {
+diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
+index 056838ead21d6..261850775c807 100644
+--- a/sound/pci/maestro3.c
++++ b/sound/pci/maestro3.c
+@@ -2637,7 +2637,7 @@ snd_m3_create(struct snd_card *card, struct pci_dev *pci,
+ /*
+  */
+ static int
+-snd_m3_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
++__snd_m3_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+ {
+ 	static int dev;
+ 	struct snd_card *card;
+@@ -2702,6 +2702,12 @@ snd_m3_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+ 	return 0;
+ }
+ 
++static int
++snd_m3_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __snd_m3_probe(pci, pci_id));
++}
++
+ static struct pci_driver m3_driver = {
+ 	.name = KBUILD_MODNAME,
+ 	.id_table = snd_m3_ids,
+diff --git a/sound/pci/nm256/nm256.c b/sound/pci/nm256/nm256.c
+index c9c178504959e..f99a1e96e9231 100644
+--- a/sound/pci/nm256/nm256.c
++++ b/sound/pci/nm256/nm256.c
+@@ -1573,7 +1573,6 @@ snd_nm256_create(struct snd_card *card, struct pci_dev *pci)
+ 	chip->coeffs_current = 0;
+ 
+ 	snd_nm256_init_chip(chip);
+-	card->private_free = snd_nm256_free;
+ 
+ 	// pci_set_master(pci); /* needed? */
+ 	return 0;
+@@ -1680,6 +1679,7 @@ static int snd_nm256_probe(struct pci_dev *pci,
+ 	err = snd_card_register(card);
+ 	if (err < 0)
+ 		return err;
++	card->private_free = snd_nm256_free;
+ 
+ 	pci_set_drvdata(pci, card);
+ 	return 0;
+diff --git a/sound/pci/oxygen/oxygen_lib.c b/sound/pci/oxygen/oxygen_lib.c
+index 4fb3f2484fdba..92ffe9dc20c55 100644
+--- a/sound/pci/oxygen/oxygen_lib.c
++++ b/sound/pci/oxygen/oxygen_lib.c
+@@ -576,7 +576,7 @@ static void oxygen_card_free(struct snd_card *card)
+ 	mutex_destroy(&chip->mutex);
+ }
+ 
+-int oxygen_pci_probe(struct pci_dev *pci, int index, char *id,
++static int __oxygen_pci_probe(struct pci_dev *pci, int index, char *id,
+ 		     struct module *owner,
+ 		     const struct pci_device_id *ids,
+ 		     int (*get_model)(struct oxygen *chip,
+@@ -701,6 +701,16 @@ int oxygen_pci_probe(struct pci_dev *pci, int index, char *id,
+ 	pci_set_drvdata(pci, card);
+ 	return 0;
+ }
++
++int oxygen_pci_probe(struct pci_dev *pci, int index, char *id,
++		     struct module *owner,
++		     const struct pci_device_id *ids,
++		     int (*get_model)(struct oxygen *chip,
++				      const struct pci_device_id *id))
++{
++	return snd_card_free_on_error(&pci->dev,
++				      __oxygen_pci_probe(pci, index, id, owner, ids, get_model));
++}
+ EXPORT_SYMBOL(oxygen_pci_probe);
+ 
+ #ifdef CONFIG_PM_SLEEP
+diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c
+index 5a987c683c41c..b37c877c2c160 100644
+--- a/sound/pci/riptide/riptide.c
++++ b/sound/pci/riptide/riptide.c
+@@ -2023,7 +2023,7 @@ static void snd_riptide_joystick_remove(struct pci_dev *pci)
+ #endif
+ 
+ static int
+-snd_card_riptide_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
++__snd_card_riptide_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+ {
+ 	static int dev;
+ 	struct snd_card *card;
+@@ -2124,6 +2124,12 @@ snd_card_riptide_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+ 	return 0;
+ }
+ 
++static int
++snd_card_riptide_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __snd_card_riptide_probe(pci, pci_id));
++}
++
+ static struct pci_driver driver = {
+ 	.name = KBUILD_MODNAME,
+ 	.id_table = snd_riptide_ids,
+diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c
+index 5b6bd9f0b2f77..9c0ac025e1432 100644
+--- a/sound/pci/rme32.c
++++ b/sound/pci/rme32.c
+@@ -1875,7 +1875,7 @@ static void snd_rme32_card_free(struct snd_card *card)
+ }
+ 
+ static int
+-snd_rme32_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
++__snd_rme32_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+ {
+ 	static int dev;
+ 	struct rme32 *rme32;
+@@ -1927,6 +1927,12 @@ snd_rme32_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+ 	return 0;
+ }
+ 
++static int
++snd_rme32_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __snd_rme32_probe(pci, pci_id));
++}
++
+ static struct pci_driver rme32_driver = {
+ 	.name =		KBUILD_MODNAME,
+ 	.id_table =	snd_rme32_ids,
+diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c
+index 8fc8115049203..bccb7e0d3d116 100644
+--- a/sound/pci/rme96.c
++++ b/sound/pci/rme96.c
+@@ -2430,8 +2430,8 @@ static void snd_rme96_card_free(struct snd_card *card)
+ }
+ 
+ static int
+-snd_rme96_probe(struct pci_dev *pci,
+-		const struct pci_device_id *pci_id)
++__snd_rme96_probe(struct pci_dev *pci,
++		  const struct pci_device_id *pci_id)
+ {
+ 	static int dev;
+ 	struct rme96 *rme96;
+@@ -2498,6 +2498,12 @@ snd_rme96_probe(struct pci_dev *pci,
+ 	return 0;
+ }
+ 
++static int snd_rme96_probe(struct pci_dev *pci,
++			   const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __snd_rme96_probe(pci, pci_id));
++}
++
+ static struct pci_driver rme96_driver = {
+ 	.name = KBUILD_MODNAME,
+ 	.id_table = snd_rme96_ids,
+diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
+index 96c12dfb24cf9..3db641318d3ae 100644
+--- a/sound/pci/rme9652/hdsp.c
++++ b/sound/pci/rme9652/hdsp.c
+@@ -5444,17 +5444,21 @@ static int snd_hdsp_probe(struct pci_dev *pci,
+ 	hdsp->pci = pci;
+ 	err = snd_hdsp_create(card, hdsp);
+ 	if (err)
+-		return err;
++		goto error;
+ 
+ 	strcpy(card->shortname, "Hammerfall DSP");
+ 	sprintf(card->longname, "%s at 0x%lx, irq %d", hdsp->card_name,
+ 		hdsp->port, hdsp->irq);
+ 	err = snd_card_register(card);
+ 	if (err)
+-		return err;
++		goto error;
+ 	pci_set_drvdata(pci, card);
+ 	dev++;
+ 	return 0;
++
++ error:
++	snd_card_free(card);
++	return err;
+ }
+ 
+ static struct pci_driver hdsp_driver = {
+diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
+index ff06ee82607cf..fa1812e7a49dc 100644
+--- a/sound/pci/rme9652/hdspm.c
++++ b/sound/pci/rme9652/hdspm.c
+@@ -6895,7 +6895,7 @@ static int snd_hdspm_probe(struct pci_dev *pci,
+ 
+ 	err = snd_hdspm_create(card, hdspm);
+ 	if (err < 0)
+-		return err;
++		goto error;
+ 
+ 	if (hdspm->io_type != MADIface) {
+ 		snprintf(card->shortname, sizeof(card->shortname), "%s_%x",
+@@ -6914,12 +6914,16 @@ static int snd_hdspm_probe(struct pci_dev *pci,
+ 
+ 	err = snd_card_register(card);
+ 	if (err < 0)
+-		return err;
++		goto error;
+ 
+ 	pci_set_drvdata(pci, card);
+ 
+ 	dev++;
+ 	return 0;
++
++ error:
++	snd_card_free(card);
++	return err;
+ }
+ 
+ static struct pci_driver hdspm_driver = {
+diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
+index 7755e19aa7761..1d614fe89a6ae 100644
+--- a/sound/pci/rme9652/rme9652.c
++++ b/sound/pci/rme9652/rme9652.c
+@@ -2572,7 +2572,7 @@ static int snd_rme9652_probe(struct pci_dev *pci,
+ 	rme9652->pci = pci;
+ 	err = snd_rme9652_create(card, rme9652, precise_ptr[dev]);
+ 	if (err)
+-		return err;
++		goto error;
+ 
+ 	strcpy(card->shortname, rme9652->card_name);
+ 
+@@ -2580,10 +2580,14 @@ static int snd_rme9652_probe(struct pci_dev *pci,
+ 		card->shortname, rme9652->port, rme9652->irq);
+ 	err = snd_card_register(card);
+ 	if (err)
+-		return err;
++		goto error;
+ 	pci_set_drvdata(pci, card);
+ 	dev++;
+ 	return 0;
++
++ error:
++	snd_card_free(card);
++	return err;
+ }
+ 
+ static struct pci_driver rme9652_driver = {
+diff --git a/sound/pci/sis7019.c b/sound/pci/sis7019.c
+index 0b722b0e0604b..fabe393607f8f 100644
+--- a/sound/pci/sis7019.c
++++ b/sound/pci/sis7019.c
+@@ -1331,8 +1331,8 @@ static int sis_chip_create(struct snd_card *card,
+ 	return 0;
+ }
+ 
+-static int snd_sis7019_probe(struct pci_dev *pci,
+-			     const struct pci_device_id *pci_id)
++static int __snd_sis7019_probe(struct pci_dev *pci,
++			       const struct pci_device_id *pci_id)
+ {
+ 	struct snd_card *card;
+ 	struct sis7019 *sis;
+@@ -1352,8 +1352,8 @@ static int snd_sis7019_probe(struct pci_dev *pci,
+ 	if (!codecs)
+ 		codecs = SIS_PRIMARY_CODEC_PRESENT;
+ 
+-	rc = snd_card_new(&pci->dev, index, id, THIS_MODULE,
+-			  sizeof(*sis), &card);
++	rc = snd_devm_card_new(&pci->dev, index, id, THIS_MODULE,
++			       sizeof(*sis), &card);
+ 	if (rc < 0)
+ 		return rc;
+ 
+@@ -1386,6 +1386,12 @@ static int snd_sis7019_probe(struct pci_dev *pci,
+ 	return 0;
+ }
+ 
++static int snd_sis7019_probe(struct pci_dev *pci,
++			     const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __snd_sis7019_probe(pci, pci_id));
++}
++
+ static struct pci_driver sis7019_driver = {
+ 	.name = KBUILD_MODNAME,
+ 	.id_table = snd_sis7019_ids,
+diff --git a/sound/pci/sonicvibes.c b/sound/pci/sonicvibes.c
+index c8c49881008fd..f91cbf6eeca0f 100644
+--- a/sound/pci/sonicvibes.c
++++ b/sound/pci/sonicvibes.c
+@@ -1387,8 +1387,8 @@ static int snd_sonicvibes_midi(struct sonicvibes *sonic,
+ 	return 0;
+ }
+ 
+-static int snd_sonic_probe(struct pci_dev *pci,
+-			   const struct pci_device_id *pci_id)
++static int __snd_sonic_probe(struct pci_dev *pci,
++			     const struct pci_device_id *pci_id)
+ {
+ 	static int dev;
+ 	struct snd_card *card;
+@@ -1459,6 +1459,12 @@ static int snd_sonic_probe(struct pci_dev *pci,
+ 	return 0;
+ }
+ 
++static int snd_sonic_probe(struct pci_dev *pci,
++			   const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __snd_sonic_probe(pci, pci_id));
++}
++
+ static struct pci_driver sonicvibes_driver = {
+ 	.name = KBUILD_MODNAME,
+ 	.id_table = snd_sonic_ids,
+diff --git a/sound/pci/via82xx.c b/sound/pci/via82xx.c
+index 65514f7e42d7d..361b83fd721e6 100644
+--- a/sound/pci/via82xx.c
++++ b/sound/pci/via82xx.c
+@@ -2458,8 +2458,8 @@ static int check_dxs_list(struct pci_dev *pci, int revision)
+ 	return VIA_DXS_48K;
+ };
+ 
+-static int snd_via82xx_probe(struct pci_dev *pci,
+-			     const struct pci_device_id *pci_id)
++static int __snd_via82xx_probe(struct pci_dev *pci,
++			       const struct pci_device_id *pci_id)
+ {
+ 	struct snd_card *card;
+ 	struct via82xx *chip;
+@@ -2569,6 +2569,12 @@ static int snd_via82xx_probe(struct pci_dev *pci,
+ 	return 0;
+ }
+ 
++static int snd_via82xx_probe(struct pci_dev *pci,
++			     const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __snd_via82xx_probe(pci, pci_id));
++}
++
+ static struct pci_driver via82xx_driver = {
+ 	.name = KBUILD_MODNAME,
+ 	.id_table = snd_via82xx_ids,
+diff --git a/sound/pci/via82xx_modem.c b/sound/pci/via82xx_modem.c
+index 234f7fbed2364..ca7f024bf8ec6 100644
+--- a/sound/pci/via82xx_modem.c
++++ b/sound/pci/via82xx_modem.c
+@@ -1103,8 +1103,8 @@ static int snd_via82xx_create(struct snd_card *card,
+ }
+ 
+ 
+-static int snd_via82xx_probe(struct pci_dev *pci,
+-			     const struct pci_device_id *pci_id)
++static int __snd_via82xx_probe(struct pci_dev *pci,
++			       const struct pci_device_id *pci_id)
+ {
+ 	struct snd_card *card;
+ 	struct via82xx_modem *chip;
+@@ -1157,6 +1157,12 @@ static int snd_via82xx_probe(struct pci_dev *pci,
+ 	return 0;
+ }
+ 
++static int snd_via82xx_probe(struct pci_dev *pci,
++			     const struct pci_device_id *pci_id)
++{
++	return snd_card_free_on_error(&pci->dev, __snd_via82xx_probe(pci, pci_id));
++}
++
+ static struct pci_driver via82xx_modem_driver = {
+ 	.name = KBUILD_MODNAME,
+ 	.id_table = snd_via82xx_modem_ids,
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index cec6e91afea24..6d699065e81a2 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -669,9 +669,9 @@ static const struct snd_pcm_hardware snd_usb_hardware =
+ 				SNDRV_PCM_INFO_PAUSE,
+ 	.channels_min =		1,
+ 	.channels_max =		256,
+-	.buffer_bytes_max =	1024 * 1024,
++	.buffer_bytes_max =	INT_MAX, /* limited by BUFFER_TIME later */
+ 	.period_bytes_min =	64,
+-	.period_bytes_max =	512 * 1024,
++	.period_bytes_max =	INT_MAX, /* limited by PERIOD_TIME later */
+ 	.periods_min =		2,
+ 	.periods_max =		1024,
+ };
+@@ -1064,6 +1064,18 @@ static int setup_hw_info(struct snd_pcm_runtime *runtime, struct snd_usb_substre
+ 			return err;
+ 	}
+ 
++	/* set max period and buffer sizes for 1 and 2 seconds, respectively */
++	err = snd_pcm_hw_constraint_minmax(runtime,
++					   SNDRV_PCM_HW_PARAM_PERIOD_TIME,
++					   0, 1000000);
++	if (err < 0)
++		return err;
++	err = snd_pcm_hw_constraint_minmax(runtime,
++					   SNDRV_PCM_HW_PARAM_BUFFER_TIME,
++					   0, 2000000);
++	if (err < 0)
++		return err;
++
+ 	/* additional hw constraints for implicit fb */
+ 	err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
+ 				  hw_rule_format_implicit_fb, subs,
+diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c
+index 4a3ff6468aa75..fa664cf03c326 100644
+--- a/sound/x86/intel_hdmi_audio.c
++++ b/sound/x86/intel_hdmi_audio.c
+@@ -1665,7 +1665,7 @@ static void hdmi_lpe_audio_free(struct snd_card *card)
+  * This function is called when the i915 driver creates the
+  * hdmi-lpe-audio platform device.
+  */
+-static int hdmi_lpe_audio_probe(struct platform_device *pdev)
++static int __hdmi_lpe_audio_probe(struct platform_device *pdev)
+ {
+ 	struct snd_card *card;
+ 	struct snd_intelhad_card *card_ctx;
+@@ -1828,6 +1828,11 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
++static int hdmi_lpe_audio_probe(struct platform_device *pdev)
++{
++	return snd_card_free_on_error(&pdev->dev, __hdmi_lpe_audio_probe(pdev));
++}
++
+ static const struct dev_pm_ops hdmi_lpe_audio_pm = {
+ 	SET_SYSTEM_SLEEP_PM_OPS(hdmi_lpe_audio_suspend, hdmi_lpe_audio_resume)
+ };
+diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
+index a4a39c3e0f196..0c2610cde6ea2 100644
+--- a/tools/arch/x86/include/asm/msr-index.h
++++ b/tools/arch/x86/include/asm/msr-index.h
+@@ -128,9 +128,9 @@
+ #define TSX_CTRL_RTM_DISABLE		BIT(0)	/* Disable RTM feature */
+ #define TSX_CTRL_CPUID_CLEAR		BIT(1)	/* Disable TSX enumeration */
+ 
+-/* SRBDS support */
+ #define MSR_IA32_MCU_OPT_CTRL		0x00000123
+-#define RNGDS_MITG_DIS			BIT(0)
++#define RNGDS_MITG_DIS			BIT(0)	/* SRBDS support */
++#define RTM_ALLOW			BIT(1)	/* TSX development mode */
+ 
+ #define MSR_IA32_SYSENTER_CS		0x00000174
+ #define MSR_IA32_SYSENTER_ESP		0x00000175
+diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
+index 24997925ae00d..dd84fed698a3b 100644
+--- a/tools/perf/util/parse-events.c
++++ b/tools/perf/util/parse-events.c
+@@ -1523,7 +1523,9 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
+ 	bool use_uncore_alias;
+ 	LIST_HEAD(config_terms);
+ 
+-	if (verbose > 1) {
++	pmu = parse_state->fake_pmu ?: perf_pmu__find(name);
++
++	if (verbose > 1 && !(pmu && pmu->selectable)) {
+ 		fprintf(stderr, "Attempting to add event pmu '%s' with '",
+ 			name);
+ 		if (head_config) {
+@@ -1536,7 +1538,6 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
+ 		fprintf(stderr, "' that may result in non-fatal errors\n");
+ 	}
+ 
+-	pmu = parse_state->fake_pmu ?: perf_pmu__find(name);
+ 	if (!pmu) {
+ 		char *err_str;
+ 
+diff --git a/tools/testing/selftests/kvm/include/riscv/processor.h b/tools/testing/selftests/kvm/include/riscv/processor.h
+index dc284c6bdbc37..eca5c622efd25 100644
+--- a/tools/testing/selftests/kvm/include/riscv/processor.h
++++ b/tools/testing/selftests/kvm/include/riscv/processor.h
+@@ -101,7 +101,9 @@ static inline void set_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id,
+ #define PGTBL_PTE_WRITE_SHIFT			2
+ #define PGTBL_PTE_READ_MASK			0x0000000000000002ULL
+ #define PGTBL_PTE_READ_SHIFT			1
+-#define PGTBL_PTE_PERM_MASK			(PGTBL_PTE_EXECUTE_MASK | \
++#define PGTBL_PTE_PERM_MASK			(PGTBL_PTE_ACCESSED_MASK | \
++						 PGTBL_PTE_DIRTY_MASK | \
++						 PGTBL_PTE_EXECUTE_MASK | \
+ 						 PGTBL_PTE_WRITE_MASK | \
+ 						 PGTBL_PTE_READ_MASK)
+ #define PGTBL_PTE_VALID_MASK			0x0000000000000001ULL
+diff --git a/tools/testing/selftests/kvm/lib/riscv/processor.c b/tools/testing/selftests/kvm/lib/riscv/processor.c
+index d377f2603d98a..3961487a4870d 100644
+--- a/tools/testing/selftests/kvm/lib/riscv/processor.c
++++ b/tools/testing/selftests/kvm/lib/riscv/processor.c
+@@ -268,7 +268,7 @@ void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
+ 		core.regs.t3, core.regs.t4, core.regs.t5, core.regs.t6);
+ }
+ 
+-static void guest_hang(void)
++static void __aligned(16) guest_hang(void)
+ {
+ 	while (1)
+ 		;
+diff --git a/tools/testing/selftests/mqueue/mq_perf_tests.c b/tools/testing/selftests/mqueue/mq_perf_tests.c
+index b019e0b8221c7..84fda3b490735 100644
+--- a/tools/testing/selftests/mqueue/mq_perf_tests.c
++++ b/tools/testing/selftests/mqueue/mq_perf_tests.c
+@@ -180,6 +180,9 @@ void shutdown(int exit_val, char *err_cause, int line_no)
+ 	if (in_shutdown++)
+ 		return;
+ 
++	/* Free the cpu_set allocated using CPU_ALLOC in main function */
++	CPU_FREE(cpu_set);
++
+ 	for (i = 0; i < num_cpus_to_pin; i++)
+ 		if (cpu_threads[i]) {
+ 			pthread_kill(cpu_threads[i], SIGUSR1);
+@@ -551,6 +554,12 @@ int main(int argc, char *argv[])
+ 		perror("sysconf(_SC_NPROCESSORS_ONLN)");
+ 		exit(1);
+ 	}
++
++	if (getuid() != 0)
++		ksft_exit_skip("Not running as root, but almost all tests "
++			"require root in order to modify\nsystem settings.  "
++			"Exiting.\n");
++
+ 	cpus_online = min(MAX_CPUS, sysconf(_SC_NPROCESSORS_ONLN));
+ 	cpu_set = CPU_ALLOC(cpus_online);
+ 	if (cpu_set == NULL) {
+@@ -589,7 +598,7 @@ int main(int argc, char *argv[])
+ 						cpu_set)) {
+ 					fprintf(stderr, "Any given CPU may "
+ 						"only be given once.\n");
+-					exit(1);
++					goto err_code;
+ 				} else
+ 					CPU_SET_S(cpus_to_pin[cpu],
+ 						  cpu_set_size, cpu_set);
+@@ -607,7 +616,7 @@ int main(int argc, char *argv[])
+ 				queue_path = malloc(strlen(option) + 2);
+ 				if (!queue_path) {
+ 					perror("malloc()");
+-					exit(1);
++					goto err_code;
+ 				}
+ 				queue_path[0] = '/';
+ 				queue_path[1] = 0;
+@@ -622,17 +631,12 @@ int main(int argc, char *argv[])
+ 		fprintf(stderr, "Must pass at least one CPU to continuous "
+ 			"mode.\n");
+ 		poptPrintUsage(popt_context, stderr, 0);
+-		exit(1);
++		goto err_code;
+ 	} else if (!continuous_mode) {
+ 		num_cpus_to_pin = 1;
+ 		cpus_to_pin[0] = cpus_online - 1;
+ 	}
+ 
+-	if (getuid() != 0)
+-		ksft_exit_skip("Not running as root, but almost all tests "
+-			"require root in order to modify\nsystem settings.  "
+-			"Exiting.\n");
+-
+ 	max_msgs = fopen(MAX_MSGS, "r+");
+ 	max_msgsize = fopen(MAX_MSGSIZE, "r+");
+ 	if (!max_msgs)
+@@ -740,4 +744,9 @@ int main(int argc, char *argv[])
+ 			sleep(1);
+ 	}
+ 	shutdown(0, "", 0);
++
++err_code:
++	CPU_FREE(cpu_set);
++	exit(1);
++
+ }


             reply	other threads:[~2022-04-20 12:06 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-20 12:06 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2022-06-14 17:10 [gentoo-commits] proj/linux-patches:5.17 commit in: / Mike Pagano
2022-06-09 18:31 Mike Pagano
2022-06-09 11:25 Mike Pagano
2022-06-06 11:01 Mike Pagano
2022-05-30 13:58 Mike Pagano
2022-05-25 13:09 Mike Pagano
2022-05-25 11:52 Mike Pagano
2022-05-18  9:38 Mike Pagano
2022-05-15 22:08 Mike Pagano
2022-05-12 11:27 Mike Pagano
2022-05-11 16:54 Mike Pagano
2022-05-09 10:57 Mike Pagano
2022-04-29 15:22 Mike Pagano
2022-04-28 12:03 Mike Pagano
2022-04-27 13:16 Mike Pagano
2022-04-27 13:10 Mike Pagano
2022-04-26 12:07 Mike Pagano
2022-04-13 17:53 Mike Pagano
2022-04-12 19:12 Mike Pagano
2022-04-12 17:40 Mike Pagano
2022-04-08 13:09 Mike Pagano
2022-04-08 12:53 Mike Pagano
2022-03-28 22:06 Mike Pagano
2022-03-28 10:53 Mike Pagano
2022-03-19 14:39 Mike Pagano
2022-03-06 17:51 Mike Pagano
2022-01-31 13:05 Mike Pagano
2022-01-23 20:00 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1650456368.3bb7b32b7676d65c17bd93e52c8ba7b78cea0447.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox