public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:5.17 commit in: /
Date: Wed, 13 Apr 2022 17:53:02 +0000 (UTC)	[thread overview]
Message-ID: <1649872335.d8736bd8058ccef545d015dcb5b8d7541d58dd99.mpagano@gentoo> (raw)

commit:     d8736bd8058ccef545d015dcb5b8d7541d58dd99
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Apr 13 17:52:15 2022 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Apr 13 17:52:15 2022 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d8736bd8

Linux patch 5.17.3

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1002_linux-5.17.3.patch | 16307 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 16311 insertions(+)

diff --git a/0000_README b/0000_README
index 18d9a104..a57e4c32 100644
--- a/0000_README
+++ b/0000_README
@@ -51,6 +51,10 @@ Patch:  1001_linux-5.17.2.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.17.2
 
+Patch:  1002_linux-5.17.3.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.17.3
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1002_linux-5.17.3.patch b/1002_linux-5.17.3.patch
new file mode 100644
index 00000000..980116ce
--- /dev/null
+++ b/1002_linux-5.17.3.patch
@@ -0,0 +1,16307 @@
+diff --git a/Documentation/virt/kvm/devices/vcpu.rst b/Documentation/virt/kvm/devices/vcpu.rst
+index 60a29972d3f1b..d063aaee5bb73 100644
+--- a/Documentation/virt/kvm/devices/vcpu.rst
++++ b/Documentation/virt/kvm/devices/vcpu.rst
+@@ -70,7 +70,7 @@ irqchip.
+ 	 -ENODEV  PMUv3 not supported or GIC not initialized
+ 	 -ENXIO   PMUv3 not properly configured or in-kernel irqchip not
+ 	 	  configured as required prior to calling this attribute
+-	 -EBUSY   PMUv3 already initialized
++	 -EBUSY   PMUv3 already initialized or a VCPU has already run
+ 	 -EINVAL  Invalid filter range
+ 	 =======  ======================================================
+ 
+diff --git a/Makefile b/Makefile
+index 06d852cad74ff..02fbef1a0213b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 17
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Superb Owl
+ 
+diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
+index bfbf0c4c7c5e5..39f5c1672f480 100644
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -75,6 +75,7 @@
+ #define ARM_CPU_PART_CORTEX_A77		0xD0D
+ #define ARM_CPU_PART_NEOVERSE_V1	0xD40
+ #define ARM_CPU_PART_CORTEX_A78		0xD41
++#define ARM_CPU_PART_CORTEX_A78AE	0xD42
+ #define ARM_CPU_PART_CORTEX_X1		0xD44
+ #define ARM_CPU_PART_CORTEX_A510	0xD46
+ #define ARM_CPU_PART_CORTEX_A710	0xD47
+@@ -123,6 +124,7 @@
+ #define MIDR_CORTEX_A77	MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
+ #define MIDR_NEOVERSE_V1	MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1)
+ #define MIDR_CORTEX_A78	MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78)
++#define MIDR_CORTEX_A78AE	MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
+ #define MIDR_CORTEX_X1	MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
+ #define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
+ #define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
+diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
+index 031e3a2537fc8..8234626a945a7 100644
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -136,6 +136,7 @@ struct kvm_arch {
+ 
+ 	/* Memory Tagging Extension enabled for the guest */
+ 	bool mte_enabled;
++	bool ran_once;
+ };
+ 
+ struct kvm_vcpu_fault_info {
+diff --git a/arch/arm64/kernel/patching.c b/arch/arm64/kernel/patching.c
+index 771f543464e06..33e0fabc0b79b 100644
+--- a/arch/arm64/kernel/patching.c
++++ b/arch/arm64/kernel/patching.c
+@@ -117,8 +117,8 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg)
+ 	int i, ret = 0;
+ 	struct aarch64_insn_patch *pp = arg;
+ 
+-	/* The first CPU becomes master */
+-	if (atomic_inc_return(&pp->cpu_count) == 1) {
++	/* The last CPU becomes master */
++	if (atomic_inc_return(&pp->cpu_count) == num_online_cpus()) {
+ 		for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
+ 			ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
+ 							     pp->new_insns[i]);
+diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
+index 5777929d35bf4..40be3a7c2c531 100644
+--- a/arch/arm64/kernel/proton-pack.c
++++ b/arch/arm64/kernel/proton-pack.c
+@@ -853,6 +853,7 @@ u8 spectre_bhb_loop_affected(int scope)
+ 	if (scope == SCOPE_LOCAL_CPU) {
+ 		static const struct midr_range spectre_bhb_k32_list[] = {
+ 			MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
++			MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
+ 			MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
+ 			MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
+ 			MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
+diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
+index 27df5c1e6baad..3b46041f2b978 100644
+--- a/arch/arm64/kernel/smp.c
++++ b/arch/arm64/kernel/smp.c
+@@ -234,6 +234,7 @@ asmlinkage notrace void secondary_start_kernel(void)
+ 	 * Log the CPU info before it is marked online and might get read.
+ 	 */
+ 	cpuinfo_store_cpu();
++	store_cpu_topology(cpu);
+ 
+ 	/*
+ 	 * Enable GIC and timers.
+@@ -242,7 +243,6 @@ asmlinkage notrace void secondary_start_kernel(void)
+ 
+ 	ipi_setup(cpu);
+ 
+-	store_cpu_topology(cpu);
+ 	numa_add_cpu(cpu);
+ 
+ 	/*
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index 4dca6ffd03d42..85a2a75f44982 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -634,6 +634,10 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
+ 	if (kvm_vm_is_protected(kvm))
+ 		kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu);
+ 
++	mutex_lock(&kvm->lock);
++	kvm->arch.ran_once = true;
++	mutex_unlock(&kvm->lock);
++
+ 	return ret;
+ }
+ 
+diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
+index fbcfd4ec6f926..bc771bc1a0413 100644
+--- a/arch/arm64/kvm/pmu-emul.c
++++ b/arch/arm64/kvm/pmu-emul.c
+@@ -924,6 +924,8 @@ static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
+ 
+ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
+ {
++	struct kvm *kvm = vcpu->kvm;
++
+ 	if (!kvm_vcpu_has_pmu(vcpu))
+ 		return -ENODEV;
+ 
+@@ -941,7 +943,7 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
+ 		int __user *uaddr = (int __user *)(long)attr->addr;
+ 		int irq;
+ 
+-		if (!irqchip_in_kernel(vcpu->kvm))
++		if (!irqchip_in_kernel(kvm))
+ 			return -EINVAL;
+ 
+ 		if (get_user(irq, uaddr))
+@@ -951,7 +953,7 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
+ 		if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
+ 			return -EINVAL;
+ 
+-		if (!pmu_irq_is_valid(vcpu->kvm, irq))
++		if (!pmu_irq_is_valid(kvm, irq))
+ 			return -EINVAL;
+ 
+ 		if (kvm_arm_pmu_irq_initialized(vcpu))
+@@ -966,7 +968,7 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
+ 		struct kvm_pmu_event_filter filter;
+ 		int nr_events;
+ 
+-		nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
++		nr_events = kvm_pmu_event_mask(kvm) + 1;
+ 
+ 		uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr;
+ 
+@@ -978,12 +980,17 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
+ 		     filter.action != KVM_PMU_EVENT_DENY))
+ 			return -EINVAL;
+ 
+-		mutex_lock(&vcpu->kvm->lock);
++		mutex_lock(&kvm->lock);
++
++		if (kvm->arch.ran_once) {
++			mutex_unlock(&kvm->lock);
++			return -EBUSY;
++		}
+ 
+-		if (!vcpu->kvm->arch.pmu_filter) {
+-			vcpu->kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
+-			if (!vcpu->kvm->arch.pmu_filter) {
+-				mutex_unlock(&vcpu->kvm->lock);
++		if (!kvm->arch.pmu_filter) {
++			kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
++			if (!kvm->arch.pmu_filter) {
++				mutex_unlock(&kvm->lock);
+ 				return -ENOMEM;
+ 			}
+ 
+@@ -994,17 +1001,17 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
+ 			 * events, the default is to allow.
+ 			 */
+ 			if (filter.action == KVM_PMU_EVENT_ALLOW)
+-				bitmap_zero(vcpu->kvm->arch.pmu_filter, nr_events);
++				bitmap_zero(kvm->arch.pmu_filter, nr_events);
+ 			else
+-				bitmap_fill(vcpu->kvm->arch.pmu_filter, nr_events);
++				bitmap_fill(kvm->arch.pmu_filter, nr_events);
+ 		}
+ 
+ 		if (filter.action == KVM_PMU_EVENT_ALLOW)
+-			bitmap_set(vcpu->kvm->arch.pmu_filter, filter.base_event, filter.nevents);
++			bitmap_set(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
+ 		else
+-			bitmap_clear(vcpu->kvm->arch.pmu_filter, filter.base_event, filter.nevents);
++			bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
+ 
+-		mutex_unlock(&vcpu->kvm->lock);
++		mutex_unlock(&kvm->lock);
+ 
+ 		return 0;
+ 	}
+diff --git a/arch/mips/boot/dts/ingenic/jz4780.dtsi b/arch/mips/boot/dts/ingenic/jz4780.dtsi
+index 3f9ea47a10cd2..b998301f179ce 100644
+--- a/arch/mips/boot/dts/ingenic/jz4780.dtsi
++++ b/arch/mips/boot/dts/ingenic/jz4780.dtsi
+@@ -510,7 +510,7 @@
+ 			#address-cells = <1>;
+ 			#size-cells = <1>;
+ 
+-			eth0_addr: eth-mac-addr@0x22 {
++			eth0_addr: eth-mac-addr@22 {
+ 				reg = <0x22 0x6>;
+ 			};
+ 		};
+diff --git a/arch/mips/include/asm/setup.h b/arch/mips/include/asm/setup.h
+index bb36a400203df..8c56b862fd9c2 100644
+--- a/arch/mips/include/asm/setup.h
++++ b/arch/mips/include/asm/setup.h
+@@ -16,7 +16,7 @@ static inline void setup_8250_early_printk_port(unsigned long base,
+ 	unsigned int reg_shift, unsigned int timeout) {}
+ #endif
+ 
+-extern void set_handler(unsigned long offset, void *addr, unsigned long len);
++void set_handler(unsigned long offset, const void *addr, unsigned long len);
+ extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len);
+ 
+ typedef void (*vi_handler_t)(void);
+diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
+index a486486b2355c..246c6a6b02614 100644
+--- a/arch/mips/kernel/traps.c
++++ b/arch/mips/kernel/traps.c
+@@ -2091,19 +2091,19 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
+ 		 * If no shadow set is selected then use the default handler
+ 		 * that does normal register saving and standard interrupt exit
+ 		 */
+-		extern char except_vec_vi, except_vec_vi_lui;
+-		extern char except_vec_vi_ori, except_vec_vi_end;
+-		extern char rollback_except_vec_vi;
+-		char *vec_start = using_rollback_handler() ?
+-			&rollback_except_vec_vi : &except_vec_vi;
++		extern const u8 except_vec_vi[], except_vec_vi_lui[];
++		extern const u8 except_vec_vi_ori[], except_vec_vi_end[];
++		extern const u8 rollback_except_vec_vi[];
++		const u8 *vec_start = using_rollback_handler() ?
++				      rollback_except_vec_vi : except_vec_vi;
+ #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
+-		const int lui_offset = &except_vec_vi_lui - vec_start + 2;
+-		const int ori_offset = &except_vec_vi_ori - vec_start + 2;
++		const int lui_offset = except_vec_vi_lui - vec_start + 2;
++		const int ori_offset = except_vec_vi_ori - vec_start + 2;
+ #else
+-		const int lui_offset = &except_vec_vi_lui - vec_start;
+-		const int ori_offset = &except_vec_vi_ori - vec_start;
++		const int lui_offset = except_vec_vi_lui - vec_start;
++		const int ori_offset = except_vec_vi_ori - vec_start;
+ #endif
+-		const int handler_len = &except_vec_vi_end - vec_start;
++		const int handler_len = except_vec_vi_end - vec_start;
+ 
+ 		if (handler_len > VECTORSPACING) {
+ 			/*
+@@ -2311,7 +2311,7 @@ void per_cpu_trap_init(bool is_boot_cpu)
+ }
+ 
+ /* Install CPU exception handler */
+-void set_handler(unsigned long offset, void *addr, unsigned long size)
++void set_handler(unsigned long offset, const void *addr, unsigned long size)
+ {
+ #ifdef CONFIG_CPU_MICROMIPS
+ 	memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
+diff --git a/arch/mips/ralink/ill_acc.c b/arch/mips/ralink/ill_acc.c
+index 115a69fc20caa..f395ae218470f 100644
+--- a/arch/mips/ralink/ill_acc.c
++++ b/arch/mips/ralink/ill_acc.c
+@@ -61,6 +61,7 @@ static int __init ill_acc_of_setup(void)
+ 	pdev = of_find_device_by_node(np);
+ 	if (!pdev) {
+ 		pr_err("%pOFn: failed to lookup pdev\n", np);
++		of_node_put(np);
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/arch/parisc/kernel/patch.c b/arch/parisc/kernel/patch.c
+index 80a0ab372802d..e59574f65e641 100644
+--- a/arch/parisc/kernel/patch.c
++++ b/arch/parisc/kernel/patch.c
+@@ -40,10 +40,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags,
+ 
+ 	*need_unmap = 1;
+ 	set_fixmap(fixmap, page_to_phys(page));
+-	if (flags)
+-		raw_spin_lock_irqsave(&patch_lock, *flags);
+-	else
+-		__acquire(&patch_lock);
++	raw_spin_lock_irqsave(&patch_lock, *flags);
+ 
+ 	return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
+ }
+@@ -52,10 +49,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
+ {
+ 	clear_fixmap(fixmap);
+ 
+-	if (flags)
+-		raw_spin_unlock_irqrestore(&patch_lock, *flags);
+-	else
+-		__release(&patch_lock);
++	raw_spin_unlock_irqrestore(&patch_lock, *flags);
+ }
+ 
+ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
+@@ -67,8 +61,9 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
+ 	int mapped;
+ 
+ 	/* Make sure we don't have any aliases in cache */
+-	flush_kernel_vmap_range(addr, len);
+-	flush_icache_range(start, end);
++	flush_kernel_dcache_range_asm(start, end);
++	flush_kernel_icache_range_asm(start, end);
++	flush_tlb_kernel_range(start, end);
+ 
+ 	p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, &mapped);
+ 
+@@ -81,8 +76,10 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
+ 			 * We're crossing a page boundary, so
+ 			 * need to remap
+ 			 */
+-			flush_kernel_vmap_range((void *)fixmap,
+-						(p-fixmap) * sizeof(*p));
++			flush_kernel_dcache_range_asm((unsigned long)fixmap,
++						      (unsigned long)p);
++			flush_tlb_kernel_range((unsigned long)fixmap,
++					       (unsigned long)p);
+ 			if (mapped)
+ 				patch_unmap(FIX_TEXT_POKE0, &flags);
+ 			p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags,
+@@ -90,10 +87,10 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
+ 		}
+ 	}
+ 
+-	flush_kernel_vmap_range((void *)fixmap, (p-fixmap) * sizeof(*p));
++	flush_kernel_dcache_range_asm((unsigned long)fixmap, (unsigned long)p);
++	flush_tlb_kernel_range((unsigned long)fixmap, (unsigned long)p);
+ 	if (mapped)
+ 		patch_unmap(FIX_TEXT_POKE0, &flags);
+-	flush_icache_range(start, end);
+ }
+ 
+ void __kprobes __patch_text(void *addr, u32 insn)
+diff --git a/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi b/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi
+index 099a598c74c00..bfe1ed5be3374 100644
+--- a/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi
++++ b/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi
+@@ -139,12 +139,12 @@
+ 		fman@400000 {
+ 			ethernet@e6000 {
+ 				phy-handle = <&phy_rgmii_0>;
+-				phy-connection-type = "rgmii";
++				phy-connection-type = "rgmii-id";
+ 			};
+ 
+ 			ethernet@e8000 {
+ 				phy-handle = <&phy_rgmii_1>;
+-				phy-connection-type = "rgmii";
++				phy-connection-type = "rgmii-id";
+ 			};
+ 
+ 			mdio0: mdio@fc000 {
+diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
+index fc28f46d2f9dc..5404f7abbcf8d 100644
+--- a/arch/powerpc/include/asm/interrupt.h
++++ b/arch/powerpc/include/asm/interrupt.h
+@@ -612,7 +612,7 @@ DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault);
+ DECLARE_INTERRUPT_HANDLER(do_bad_segment_interrupt);
+ 
+ /* hash_utils.c */
+-DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault);
++DECLARE_INTERRUPT_HANDLER(do_hash_fault);
+ 
+ /* fault.c */
+ DECLARE_INTERRUPT_HANDLER(do_page_fault);
+diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
+index 254687258f42b..f2c5c26869f1a 100644
+--- a/arch/powerpc/include/asm/page.h
++++ b/arch/powerpc/include/asm/page.h
+@@ -132,7 +132,11 @@ static inline bool pfn_valid(unsigned long pfn)
+ #define virt_to_page(kaddr)	pfn_to_page(virt_to_pfn(kaddr))
+ #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
+ 
+-#define virt_addr_valid(kaddr)	pfn_valid(virt_to_pfn(kaddr))
++#define virt_addr_valid(vaddr)	({					\
++	unsigned long _addr = (unsigned long)vaddr;			\
++	_addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory &&	\
++	pfn_valid(virt_to_pfn(_addr));					\
++})
+ 
+ /*
+  * On Book-E parts we need __va to parse the device tree and we can't
+diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
+index 733e6ef367589..1f42aabbbab3a 100644
+--- a/arch/powerpc/kernel/rtas.c
++++ b/arch/powerpc/kernel/rtas.c
+@@ -1313,6 +1313,12 @@ int __init early_init_dt_scan_rtas(unsigned long node,
+ 	entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL);
+ 	sizep  = of_get_flat_dt_prop(node, "rtas-size", NULL);
+ 
++#ifdef CONFIG_PPC64
++	/* need this feature to decide the crashkernel offset */
++	if (of_get_flat_dt_prop(node, "ibm,hypertas-functions", NULL))
++		powerpc_firmware_features |= FW_FEATURE_LPAR;
++#endif
++
+ 	if (basep && entryp && sizep) {
+ 		rtas.base = *basep;
+ 		rtas.entry = *entryp;
+diff --git a/arch/powerpc/kernel/secvar-sysfs.c b/arch/powerpc/kernel/secvar-sysfs.c
+index a0a78aba2083e..1ee4640a26413 100644
+--- a/arch/powerpc/kernel/secvar-sysfs.c
++++ b/arch/powerpc/kernel/secvar-sysfs.c
+@@ -26,15 +26,18 @@ static ssize_t format_show(struct kobject *kobj, struct kobj_attribute *attr,
+ 	const char *format;
+ 
+ 	node = of_find_compatible_node(NULL, NULL, "ibm,secvar-backend");
+-	if (!of_device_is_available(node))
+-		return -ENODEV;
++	if (!of_device_is_available(node)) {
++		rc = -ENODEV;
++		goto out;
++	}
+ 
+ 	rc = of_property_read_string(node, "format", &format);
+ 	if (rc)
+-		return rc;
++		goto out;
+ 
+ 	rc = sprintf(buf, "%s\n", format);
+ 
++out:
+ 	of_node_put(node);
+ 
+ 	return rc;
+diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c
+index 8b68d9f91a03b..abf5897ae88c8 100644
+--- a/arch/powerpc/kexec/core.c
++++ b/arch/powerpc/kexec/core.c
+@@ -134,11 +134,18 @@ void __init reserve_crashkernel(void)
+ 	if (!crashk_res.start) {
+ #ifdef CONFIG_PPC64
+ 		/*
+-		 * On 64bit we split the RMO in half but cap it at half of
+-		 * a small SLB (128MB) since the crash kernel needs to place
+-		 * itself and some stacks to be in the first segment.
++		 * On the LPAR platform place the crash kernel to mid of
++		 * RMA size (512MB or more) to ensure the crash kernel
++		 * gets enough space to place itself and some stack to be
++		 * in the first segment. At the same time normal kernel
++		 * also get enough space to allocate memory for essential
++		 * system resource in the first segment. Keep the crash
++		 * kernel starts at 128MB offset on other platforms.
+ 		 */
+-		crashk_res.start = min(0x8000000ULL, (ppc64_rma_size / 2));
++		if (firmware_has_feature(FW_FEATURE_LPAR))
++			crashk_res.start = ppc64_rma_size / 2;
++		else
++			crashk_res.start = min(0x8000000ULL, (ppc64_rma_size / 2));
+ #else
+ 		crashk_res.start = KDUMP_KERNELBASE;
+ #endif
+diff --git a/arch/powerpc/kvm/book3s_64_entry.S b/arch/powerpc/kvm/book3s_64_entry.S
+index 05e003eb5d906..e42d1c609e476 100644
+--- a/arch/powerpc/kvm/book3s_64_entry.S
++++ b/arch/powerpc/kvm/book3s_64_entry.S
+@@ -414,10 +414,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
+ 	 */
+ 	ld	r10,HSTATE_SCRATCH0(r13)
+ 	cmpwi	r10,BOOK3S_INTERRUPT_MACHINE_CHECK
+-	beq	machine_check_common
++	beq	.Lcall_machine_check_common
+ 
+ 	cmpwi	r10,BOOK3S_INTERRUPT_SYSTEM_RESET
+-	beq	system_reset_common
++	beq	.Lcall_system_reset_common
+ 
+ 	b	.
++
++.Lcall_machine_check_common:
++	b	machine_check_common
++
++.Lcall_system_reset_common:
++	b	system_reset_common
+ #endif
+diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
+index 906d434633667..00c68e7fb11e4 100644
+--- a/arch/powerpc/lib/code-patching.c
++++ b/arch/powerpc/lib/code-patching.c
+@@ -43,9 +43,14 @@ int raw_patch_instruction(u32 *addr, ppc_inst_t instr)
+ #ifdef CONFIG_STRICT_KERNEL_RWX
+ static DEFINE_PER_CPU(struct vm_struct *, text_poke_area);
+ 
++static int map_patch_area(void *addr, unsigned long text_poke_addr);
++static void unmap_patch_area(unsigned long addr);
++
+ static int text_area_cpu_up(unsigned int cpu)
+ {
+ 	struct vm_struct *area;
++	unsigned long addr;
++	int err;
+ 
+ 	area = get_vm_area(PAGE_SIZE, VM_ALLOC);
+ 	if (!area) {
+@@ -53,6 +58,15 @@ static int text_area_cpu_up(unsigned int cpu)
+ 			cpu);
+ 		return -1;
+ 	}
++
++	// Map/unmap the area to ensure all page tables are pre-allocated
++	addr = (unsigned long)area->addr;
++	err = map_patch_area(empty_zero_page, addr);
++	if (err)
++		return err;
++
++	unmap_patch_area(addr);
++
+ 	this_cpu_write(text_poke_area, area);
+ 
+ 	return 0;
+diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
+index 7abf82a698d32..985cabdd7f679 100644
+--- a/arch/powerpc/mm/book3s64/hash_utils.c
++++ b/arch/powerpc/mm/book3s64/hash_utils.c
+@@ -1621,8 +1621,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
+ }
+ EXPORT_SYMBOL_GPL(hash_page);
+ 
+-DECLARE_INTERRUPT_HANDLER(__do_hash_fault);
+-DEFINE_INTERRUPT_HANDLER(__do_hash_fault)
++DEFINE_INTERRUPT_HANDLER(do_hash_fault)
+ {
+ 	unsigned long ea = regs->dar;
+ 	unsigned long dsisr = regs->dsisr;
+@@ -1681,35 +1680,6 @@ DEFINE_INTERRUPT_HANDLER(__do_hash_fault)
+ 	}
+ }
+ 
+-/*
+- * The _RAW interrupt entry checks for the in_nmi() case before
+- * running the full handler.
+- */
+-DEFINE_INTERRUPT_HANDLER_RAW(do_hash_fault)
+-{
+-	/*
+-	 * If we are in an "NMI" (e.g., an interrupt when soft-disabled), then
+-	 * don't call hash_page, just fail the fault. This is required to
+-	 * prevent re-entrancy problems in the hash code, namely perf
+-	 * interrupts hitting while something holds H_PAGE_BUSY, and taking a
+-	 * hash fault. See the comment in hash_preload().
+-	 *
+-	 * We come here as a result of a DSI at a point where we don't want
+-	 * to call hash_page, such as when we are accessing memory (possibly
+-	 * user memory) inside a PMU interrupt that occurred while interrupts
+-	 * were soft-disabled.  We want to invoke the exception handler for
+-	 * the access, or panic if there isn't a handler.
+-	 */
+-	if (unlikely(in_nmi())) {
+-		do_bad_page_fault_segv(regs);
+-		return 0;
+-	}
+-
+-	__do_hash_fault(regs);
+-
+-	return 0;
+-}
+-
+ #ifdef CONFIG_PPC_MM_SLICES
+ static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
+ {
+@@ -1776,26 +1746,18 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
+ #endif /* CONFIG_PPC_64K_PAGES */
+ 
+ 	/*
+-	 * __hash_page_* must run with interrupts off, as it sets the
+-	 * H_PAGE_BUSY bit. It's possible for perf interrupts to hit at any
+-	 * time and may take a hash fault reading the user stack, see
+-	 * read_user_stack_slow() in the powerpc/perf code.
+-	 *
+-	 * If that takes a hash fault on the same page as we lock here, it
+-	 * will bail out when seeing H_PAGE_BUSY set, and retry the access
+-	 * leading to an infinite loop.
++	 * __hash_page_* must run with interrupts off, including PMI interrupts
++	 * off, as it sets the H_PAGE_BUSY bit.
+ 	 *
+-	 * Disabling interrupts here does not prevent perf interrupts, but it
+-	 * will prevent them taking hash faults (see the NMI test in
+-	 * do_hash_page), then read_user_stack's copy_from_user_nofault will
+-	 * fail and perf will fall back to read_user_stack_slow(), which
+-	 * walks the Linux page tables.
++	 * It's otherwise possible for perf interrupts to hit at any time and
++	 * may take a hash fault reading the user stack, which could take a
++	 * hash miss and deadlock on the same H_PAGE_BUSY bit.
+ 	 *
+ 	 * Interrupts must also be off for the duration of the
+ 	 * mm_is_thread_local test and update, to prevent preempt running the
+ 	 * mm on another CPU (XXX: this may be racy vs kthread_use_mm).
+ 	 */
+-	local_irq_save(flags);
++	powerpc_local_irq_pmu_save(flags);
+ 
+ 	/* Is that local to this CPU ? */
+ 	if (mm_is_thread_local(mm))
+@@ -1820,7 +1782,7 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
+ 				   mm_ctx_user_psize(&mm->context),
+ 				   pte_val(*ptep));
+ 
+-	local_irq_restore(flags);
++	powerpc_local_irq_pmu_restore(flags);
+ }
+ 
+ /*
+diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
+index 8e301cd8925b2..4d221d033804e 100644
+--- a/arch/powerpc/mm/mem.c
++++ b/arch/powerpc/mm/mem.c
+@@ -255,7 +255,7 @@ void __init mem_init(void)
+ #endif
+ 
+ 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+-	set_max_mapnr(max_low_pfn);
++	set_max_mapnr(max_pfn);
+ 
+ 	kasan_late_init();
+ 
+diff --git a/arch/powerpc/mm/pageattr.c b/arch/powerpc/mm/pageattr.c
+index 3bb9d168e3b31..85753e32a4de9 100644
+--- a/arch/powerpc/mm/pageattr.c
++++ b/arch/powerpc/mm/pageattr.c
+@@ -15,12 +15,14 @@
+ #include <asm/pgtable.h>
+ 
+ 
++static pte_basic_t pte_update_delta(pte_t *ptep, unsigned long addr,
++				    unsigned long old, unsigned long new)
++{
++	return pte_update(&init_mm, addr, ptep, old & ~new, new & ~old, 0);
++}
++
+ /*
+- * Updates the attributes of a page in three steps:
+- *
+- * 1. take the page_table_lock
+- * 2. install the new entry with the updated attributes
+- * 3. flush the TLB
++ * Updates the attributes of a page atomically.
+  *
+  * This sequence is safe against concurrent updates, and also allows updating the
+  * attributes of a page currently being executed or accessed.
+@@ -28,25 +30,21 @@
+ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
+ {
+ 	long action = (long)data;
+-	pte_t pte;
+ 
+-	spin_lock(&init_mm.page_table_lock);
+-
+-	pte = ptep_get(ptep);
+-
+-	/* modify the PTE bits as desired, then apply */
++	/* modify the PTE bits as desired */
+ 	switch (action) {
+ 	case SET_MEMORY_RO:
+-		pte = pte_wrprotect(pte);
++		/* Don't clear DIRTY bit */
++		pte_update_delta(ptep, addr, _PAGE_KERNEL_RW & ~_PAGE_DIRTY, _PAGE_KERNEL_RO);
+ 		break;
+ 	case SET_MEMORY_RW:
+-		pte = pte_mkwrite(pte_mkdirty(pte));
++		pte_update_delta(ptep, addr, _PAGE_KERNEL_RO, _PAGE_KERNEL_RW);
+ 		break;
+ 	case SET_MEMORY_NX:
+-		pte = pte_exprotect(pte);
++		pte_update_delta(ptep, addr, _PAGE_KERNEL_ROX, _PAGE_KERNEL_RO);
+ 		break;
+ 	case SET_MEMORY_X:
+-		pte = pte_mkexec(pte);
++		pte_update_delta(ptep, addr, _PAGE_KERNEL_RO, _PAGE_KERNEL_ROX);
+ 		break;
+ 	case SET_MEMORY_NP:
+ 		pte_update(&init_mm, addr, ptep, _PAGE_PRESENT, 0, 0);
+@@ -59,16 +57,12 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
+ 		break;
+ 	}
+ 
+-	pte_update(&init_mm, addr, ptep, ~0UL, pte_val(pte), 0);
+-
+ 	/* See ptesync comment in radix__set_pte_at() */
+ 	if (radix_enabled())
+ 		asm volatile("ptesync": : :"memory");
+ 
+ 	flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+ 
+-	spin_unlock(&init_mm.page_table_lock);
+-
+ 	return 0;
+ }
+ 
+diff --git a/arch/powerpc/perf/callchain.h b/arch/powerpc/perf/callchain.h
+index d6fa6e25234f4..19a8d051ddf10 100644
+--- a/arch/powerpc/perf/callchain.h
++++ b/arch/powerpc/perf/callchain.h
+@@ -2,7 +2,6 @@
+ #ifndef _POWERPC_PERF_CALLCHAIN_H
+ #define _POWERPC_PERF_CALLCHAIN_H
+ 
+-int read_user_stack_slow(const void __user *ptr, void *buf, int nb);
+ void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
+ 			    struct pt_regs *regs);
+ void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
+@@ -26,17 +25,11 @@ static inline int __read_user_stack(const void __user *ptr, void *ret,
+ 				    size_t size)
+ {
+ 	unsigned long addr = (unsigned long)ptr;
+-	int rc;
+ 
+ 	if (addr > TASK_SIZE - size || (addr & (size - 1)))
+ 		return -EFAULT;
+ 
+-	rc = copy_from_user_nofault(ret, ptr, size);
+-
+-	if (IS_ENABLED(CONFIG_PPC64) && !radix_enabled() && rc)
+-		return read_user_stack_slow(ptr, ret, size);
+-
+-	return rc;
++	return copy_from_user_nofault(ret, ptr, size);
+ }
+ 
+ #endif /* _POWERPC_PERF_CALLCHAIN_H */
+diff --git a/arch/powerpc/perf/callchain_64.c b/arch/powerpc/perf/callchain_64.c
+index 8d0df4226328d..488e8a21a11ea 100644
+--- a/arch/powerpc/perf/callchain_64.c
++++ b/arch/powerpc/perf/callchain_64.c
+@@ -18,33 +18,6 @@
+ 
+ #include "callchain.h"
+ 
+-/*
+- * On 64-bit we don't want to invoke hash_page on user addresses from
+- * interrupt context, so if the access faults, we read the page tables
+- * to find which page (if any) is mapped and access it directly. Radix
+- * has no need for this so it doesn't use read_user_stack_slow.
+- */
+-int read_user_stack_slow(const void __user *ptr, void *buf, int nb)
+-{
+-
+-	unsigned long addr = (unsigned long) ptr;
+-	unsigned long offset;
+-	struct page *page;
+-	void *kaddr;
+-
+-	if (get_user_page_fast_only(addr, FOLL_WRITE, &page)) {
+-		kaddr = page_address(page);
+-
+-		/* align address to page boundary */
+-		offset = addr & ~PAGE_MASK;
+-
+-		memcpy(buf, kaddr + offset, nb);
+-		put_page(page);
+-		return 0;
+-	}
+-	return -EFAULT;
+-}
+-
+ static int read_user_stack_64(const unsigned long __user *ptr, unsigned long *ret)
+ {
+ 	return __read_user_stack(ptr, ret, sizeof(*ret));
+diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
+index 87bc1929ee5a8..e2e1fec91c6ed 100644
+--- a/arch/powerpc/platforms/Kconfig.cputype
++++ b/arch/powerpc/platforms/Kconfig.cputype
+@@ -107,6 +107,7 @@ config PPC_BOOK3S_64
+ 
+ config PPC_BOOK3E_64
+ 	bool "Embedded processors"
++	select PPC_FSL_BOOK3E
+ 	select PPC_FPU # Make it a choice ?
+ 	select PPC_SMP_MUXED_IPI
+ 	select PPC_DOORBELL
+@@ -295,7 +296,7 @@ config FSL_BOOKE
+ config PPC_FSL_BOOK3E
+ 	bool
+ 	select ARCH_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64
+-	select FSL_EMB_PERFMON
++	imply FSL_EMB_PERFMON
+ 	select PPC_SMP_MUXED_IPI
+ 	select PPC_DOORBELL
+ 	select PPC_KUEP
+diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
+index 89c86f32aff86..bb5bda6b2357b 100644
+--- a/arch/powerpc/sysdev/xive/common.c
++++ b/arch/powerpc/sysdev/xive/common.c
+@@ -1791,7 +1791,7 @@ static int xive_ipi_debug_show(struct seq_file *m, void *private)
+ 	if (xive_ops->debug_show)
+ 		xive_ops->debug_show(m, private);
+ 
+-	for_each_possible_cpu(cpu)
++	for_each_online_cpu(cpu)
+ 		xive_debug_show_ipi(m, cpu);
+ 	return 0;
+ }
+diff --git a/arch/riscv/lib/memmove.S b/arch/riscv/lib/memmove.S
+index 07d1d2152ba5c..e0609e1f0864d 100644
+--- a/arch/riscv/lib/memmove.S
++++ b/arch/riscv/lib/memmove.S
+@@ -1,64 +1,316 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
++/* SPDX-License-Identifier: GPL-2.0-only */
++/*
++ * Copyright (C) 2022 Michael T. Kloos <michael@michaelkloos.com>
++ */
+ 
+ #include <linux/linkage.h>
+ #include <asm/asm.h>
+ 
+-ENTRY(__memmove)
+-WEAK(memmove)
+-        move    t0, a0
+-        move    t1, a1
+-
+-        beq     a0, a1, exit_memcpy
+-        beqz    a2, exit_memcpy
+-        srli    t2, a2, 0x2
+-
+-        slt     t3, a0, a1
+-        beqz    t3, do_reverse
+-
+-        andi    a2, a2, 0x3
+-        li      t4, 1
+-        beqz    t2, byte_copy
+-
+-word_copy:
+-        lw      t3, 0(a1)
+-        addi    t2, t2, -1
+-        addi    a1, a1, 4
+-        sw      t3, 0(a0)
+-        addi    a0, a0, 4
+-        bnez    t2, word_copy
+-        beqz    a2, exit_memcpy
+-        j       byte_copy
+-
+-do_reverse:
+-        add     a0, a0, a2
+-        add     a1, a1, a2
+-        andi    a2, a2, 0x3
+-        li      t4, -1
+-        beqz    t2, reverse_byte_copy
+-
+-reverse_word_copy:
+-        addi    a1, a1, -4
+-        addi    t2, t2, -1
+-        lw      t3, 0(a1)
+-        addi    a0, a0, -4
+-        sw      t3, 0(a0)
+-        bnez    t2, reverse_word_copy
+-        beqz    a2, exit_memcpy
+-
+-reverse_byte_copy:
+-        addi    a0, a0, -1
+-        addi    a1, a1, -1
++SYM_FUNC_START(__memmove)
++SYM_FUNC_START_WEAK(memmove)
++	/*
++	 * Returns
++	 *   a0 - dest
++	 *
++	 * Parameters
++	 *   a0 - Inclusive first byte of dest
++	 *   a1 - Inclusive first byte of src
++	 *   a2 - Length of copy n
++	 *
++	 * Because the return matches the parameter register a0,
++	 * we will not clobber or modify that register.
++	 *
++	 * Note: This currently only works on little-endian.
++	 * To port to big-endian, reverse the direction of shifts
++	 * in the 2 misaligned fixup copy loops.
++	 */
+ 
++	/* Return if nothing to do */
++	beq a0, a1, return_from_memmove
++	beqz a2, return_from_memmove
++
++	/*
++	 * Register Uses
++	 *      Forward Copy: a1 - Index counter of src
++	 *      Reverse Copy: a4 - Index counter of src
++	 *      Forward Copy: t3 - Index counter of dest
++	 *      Reverse Copy: t4 - Index counter of dest
++	 *   Both Copy Modes: t5 - Inclusive first multibyte/aligned of dest
++	 *   Both Copy Modes: t6 - Non-Inclusive last multibyte/aligned of dest
++	 *   Both Copy Modes: t0 - Link / Temporary for load-store
++	 *   Both Copy Modes: t1 - Temporary for load-store
++	 *   Both Copy Modes: t2 - Temporary for load-store
++	 *   Both Copy Modes: a5 - dest to src alignment offset
++	 *   Both Copy Modes: a6 - Shift ammount
++	 *   Both Copy Modes: a7 - Inverse Shift ammount
++	 *   Both Copy Modes: a2 - Alternate breakpoint for unrolled loops
++	 */
++
++	/*
++	 * Solve for some register values now.
++	 * Byte copy does not need t5 or t6.
++	 */
++	mv   t3, a0
++	add  t4, a0, a2
++	add  a4, a1, a2
++
++	/*
++	 * Byte copy if copying less than (2 * SZREG) bytes. This can
++	 * cause problems with the bulk copy implementation and is
++	 * small enough not to bother.
++	 */
++	andi t0, a2, -(2 * SZREG)
++	beqz t0, byte_copy
++
++	/*
++	 * Now solve for t5 and t6.
++	 */
++	andi t5, t3, -SZREG
++	andi t6, t4, -SZREG
++	/*
++	 * If dest(Register t3) rounded down to the nearest naturally
++	 * aligned SZREG address, does not equal dest, then add SZREG
++	 * to find the low-bound of SZREG alignment in the dest memory
++	 * region.  Note that this could overshoot the dest memory
++	 * region if n is less than SZREG.  This is one reason why
++	 * we always byte copy if n is less than SZREG.
++	 * Otherwise, dest is already naturally aligned to SZREG.
++	 */
++	beq  t5, t3, 1f
++		addi t5, t5, SZREG
++	1:
++
++	/*
++	 * If the dest and src are co-aligned to SZREG, then there is
++	 * no need for the full rigmarole of a full misaligned fixup copy.
++	 * Instead, do a simpler co-aligned copy.
++	 */
++	xor  t0, a0, a1
++	andi t1, t0, (SZREG - 1)
++	beqz t1, coaligned_copy
++	/* Fall through to misaligned fixup copy */
++
++misaligned_fixup_copy:
++	bltu a1, a0, misaligned_fixup_copy_reverse
++
++misaligned_fixup_copy_forward:
++	jal  t0, byte_copy_until_aligned_forward
++
++	andi a5, a1, (SZREG - 1) /* Find the alignment offset of src (a1) */
++	slli a6, a5, 3 /* Multiply by 8 to convert that to bits to shift */
++	sub  a5, a1, t3 /* Find the difference between src and dest */
++	andi a1, a1, -SZREG /* Align the src pointer */
++	addi a2, t6, SZREG /* The other breakpoint for the unrolled loop*/
++
++	/*
++	 * Compute The Inverse Shift
++	 * a7 = XLEN - a6 = XLEN + -a6
++	 * 2s complement negation to find the negative: -a6 = ~a6 + 1
++	 * Add that to XLEN.  XLEN = SZREG * 8.
++	 */
++	not  a7, a6
++	addi a7, a7, (SZREG * 8 + 1)
++
++	/*
++	 * Fix Misalignment Copy Loop - Forward
++	 * load_val0 = load_ptr[0];
++	 * do {
++	 * 	load_val1 = load_ptr[1];
++	 * 	store_ptr += 2;
++	 * 	store_ptr[0 - 2] = (load_val0 >> {a6}) | (load_val1 << {a7});
++	 *
++	 * 	if (store_ptr == {a2})
++	 * 		break;
++	 *
++	 * 	load_val0 = load_ptr[2];
++	 * 	load_ptr += 2;
++	 * 	store_ptr[1 - 2] = (load_val1 >> {a6}) | (load_val0 << {a7});
++	 *
++	 * } while (store_ptr != store_ptr_end);
++	 * store_ptr = store_ptr_end;
++	 */
++
++	REG_L t0, (0 * SZREG)(a1)
++	1:
++	REG_L t1, (1 * SZREG)(a1)
++	addi  t3, t3, (2 * SZREG)
++	srl   t0, t0, a6
++	sll   t2, t1, a7
++	or    t2, t0, t2
++	REG_S t2, ((0 * SZREG) - (2 * SZREG))(t3)
++
++	beq   t3, a2, 2f
++
++	REG_L t0, (2 * SZREG)(a1)
++	addi  a1, a1, (2 * SZREG)
++	srl   t1, t1, a6
++	sll   t2, t0, a7
++	or    t2, t1, t2
++	REG_S t2, ((1 * SZREG) - (2 * SZREG))(t3)
++
++	bne   t3, t6, 1b
++	2:
++	mv    t3, t6 /* Fix the dest pointer in case the loop was broken */
++
++	add  a1, t3, a5 /* Restore the src pointer */
++	j byte_copy_forward /* Copy any remaining bytes */
++
++misaligned_fixup_copy_reverse:
++	jal  t0, byte_copy_until_aligned_reverse
++
++	andi a5, a4, (SZREG - 1) /* Find the alignment offset of src (a4) */
++	slli a6, a5, 3 /* Multiply by 8 to convert that to bits to shift */
++	sub  a5, a4, t4 /* Find the difference between src and dest */
++	andi a4, a4, -SZREG /* Align the src pointer */
++	addi a2, t5, -SZREG /* The other breakpoint for the unrolled loop*/
++
++	/*
++	 * Compute The Inverse Shift
++	 * a7 = XLEN - a6 = XLEN + -a6
++	 * 2s complement negation to find the negative: -a6 = ~a6 + 1
++	 * Add that to XLEN.  XLEN = SZREG * 8.
++	 */
++	not  a7, a6
++	addi a7, a7, (SZREG * 8 + 1)
++
++	/*
++	 * Fix Misalignment Copy Loop - Reverse
++	 * load_val1 = load_ptr[0];
++	 * do {
++	 * 	load_val0 = load_ptr[-1];
++	 * 	store_ptr -= 2;
++	 * 	store_ptr[1] = (load_val0 >> {a6}) | (load_val1 << {a7});
++	 *
++	 * 	if (store_ptr == {a2})
++	 * 		break;
++	 *
++	 * 	load_val1 = load_ptr[-2];
++	 * 	load_ptr -= 2;
++	 * 	store_ptr[0] = (load_val1 >> {a6}) | (load_val0 << {a7});
++	 *
++	 * } while (store_ptr != store_ptr_end);
++	 * store_ptr = store_ptr_end;
++	 */
++
++	REG_L t1, ( 0 * SZREG)(a4)
++	1:
++	REG_L t0, (-1 * SZREG)(a4)
++	addi  t4, t4, (-2 * SZREG)
++	sll   t1, t1, a7
++	srl   t2, t0, a6
++	or    t2, t1, t2
++	REG_S t2, ( 1 * SZREG)(t4)
++
++	beq   t4, a2, 2f
++
++	REG_L t1, (-2 * SZREG)(a4)
++	addi  a4, a4, (-2 * SZREG)
++	sll   t0, t0, a7
++	srl   t2, t1, a6
++	or    t2, t0, t2
++	REG_S t2, ( 0 * SZREG)(t4)
++
++	bne   t4, t5, 1b
++	2:
++	mv    t4, t5 /* Fix the dest pointer in case the loop was broken */
++
++	add  a4, t4, a5 /* Restore the src pointer */
++	j byte_copy_reverse /* Copy any remaining bytes */
++
++/*
++ * Simple copy loops for SZREG co-aligned memory locations.
++ * These also make calls to do byte copies for any unaligned
++ * data at their terminations.
++ */
++coaligned_copy:
++	bltu a1, a0, coaligned_copy_reverse
++
++coaligned_copy_forward:
++	jal t0, byte_copy_until_aligned_forward
++
++	1:
++	REG_L t1, ( 0 * SZREG)(a1)
++	addi  a1, a1, SZREG
++	addi  t3, t3, SZREG
++	REG_S t1, (-1 * SZREG)(t3)
++	bne   t3, t6, 1b
++
++	j byte_copy_forward /* Copy any remaining bytes */
++
++coaligned_copy_reverse:
++	jal t0, byte_copy_until_aligned_reverse
++
++	1:
++	REG_L t1, (-1 * SZREG)(a4)
++	addi  a4, a4, -SZREG
++	addi  t4, t4, -SZREG
++	REG_S t1, ( 0 * SZREG)(t4)
++	bne   t4, t5, 1b
++
++	j byte_copy_reverse /* Copy any remaining bytes */
++
++/*
++ * These are basically sub-functions within the function.  They
++ * are used to byte copy until the dest pointer is in alignment.
++ * At which point, a bulk copy method can be used by the
++ * calling code.  These work on the same registers as the bulk
++ * copy loops.  Therefore, the register values can be picked
++ * up from where they were left and we avoid code duplication
++ * without any overhead except the call in and return jumps.
++ */
++byte_copy_until_aligned_forward:
++	beq  t3, t5, 2f
++	1:
++	lb   t1,  0(a1)
++	addi a1, a1, 1
++	addi t3, t3, 1
++	sb   t1, -1(t3)
++	bne  t3, t5, 1b
++	2:
++	jalr zero, 0x0(t0) /* Return to multibyte copy loop */
++
++byte_copy_until_aligned_reverse:
++	beq  t4, t6, 2f
++	1:
++	lb   t1, -1(a4)
++	addi a4, a4, -1
++	addi t4, t4, -1
++	sb   t1,  0(t4)
++	bne  t4, t6, 1b
++	2:
++	jalr zero, 0x0(t0) /* Return to multibyte copy loop */
++
++/*
++ * Simple byte copy loops.
++ * These will byte copy until they reach the end of data to copy.
++ * At that point, they will call to return from memmove.
++ */
+ byte_copy:
+-        lb      t3, 0(a1)
+-        addi    a2, a2, -1
+-        sb      t3, 0(a0)
+-        add     a1, a1, t4
+-        add     a0, a0, t4
+-        bnez    a2, byte_copy
+-
+-exit_memcpy:
+-        move a0, t0
+-        move a1, t1
+-        ret
+-END(__memmove)
++	bltu a1, a0, byte_copy_reverse
++
++byte_copy_forward:
++	beq  t3, t4, 2f
++	1:
++	lb   t1,  0(a1)
++	addi a1, a1, 1
++	addi t3, t3, 1
++	sb   t1, -1(t3)
++	bne  t3, t4, 1b
++	2:
++	ret
++
++byte_copy_reverse:
++	beq  t4, t3, 2f
++	1:
++	lb   t1, -1(a4)
++	addi a4, a4, -1
++	addi t4, t4, -1
++	sb   t1,  0(t4)
++	bne  t4, t3, 1b
++	2:
++
++return_from_memmove:
++	ret
++
++SYM_FUNC_END(memmove)
++SYM_FUNC_END(__memmove)
+diff --git a/arch/um/include/asm/xor.h b/arch/um/include/asm/xor.h
+index f512704a9ec7b..22b39de73c246 100644
+--- a/arch/um/include/asm/xor.h
++++ b/arch/um/include/asm/xor.h
+@@ -4,8 +4,10 @@
+ 
+ #ifdef CONFIG_64BIT
+ #undef CONFIG_X86_32
++#define TT_CPU_INF_XOR_DEFAULT (AVX_SELECT(&xor_block_sse_pf64))
+ #else
+ #define CONFIG_X86_32 1
++#define TT_CPU_INF_XOR_DEFAULT (AVX_SELECT(&xor_block_8regs))
+ #endif
+ 
+ #include <asm/cpufeature.h>
+@@ -16,7 +18,7 @@
+ #undef XOR_SELECT_TEMPLATE
+ /* pick an arbitrary one - measuring isn't possible with inf-cpu */
+ #define XOR_SELECT_TEMPLATE(x)	\
+-	(time_travel_mode == TT_MODE_INFCPU ? &xor_block_8regs : NULL)
++	(time_travel_mode == TT_MODE_INFCPU ? TT_CPU_INF_XOR_DEFAULT : x))
+ #endif
+ 
+ #endif
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 9f5bd41bf660c..d0ecc4005df33 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -2837,6 +2837,11 @@ config IA32_AOUT
+ config X86_X32
+ 	bool "x32 ABI for 64-bit mode"
+ 	depends on X86_64
++	# llvm-objcopy does not convert x86_64 .note.gnu.property or
++	# compressed debug sections to x86_x32 properly:
++	# https://github.com/ClangBuiltLinux/linux/issues/514
++	# https://github.com/ClangBuiltLinux/linux/issues/1141
++	depends on $(success,$(OBJCOPY) --version | head -n1 | grep -qv llvm)
+ 	help
+ 	  Include code to run binaries for the x32 native 32-bit ABI
+ 	  for 64-bit processors.  An x32 process gets access to the
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index a3c7ca876aebd..d87c9b246a8fa 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -281,7 +281,7 @@ static struct extra_reg intel_spr_extra_regs[] __read_mostly = {
+ 	INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
+ 	INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
+ 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
+-	INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
++	INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
+ 	INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
+ 	INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
+ 	EVENT_EXTRA_END
+@@ -5515,7 +5515,11 @@ static void intel_pmu_check_event_constraints(struct event_constraint *event_con
+ 			/* Disabled fixed counters which are not in CPUID */
+ 			c->idxmsk64 &= intel_ctrl;
+ 
+-			if (c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES)
++			/*
++			 * Don't extend the pseudo-encoding to the
++			 * generic counters
++			 */
++			if (!use_fixed_pseudo_encoding(c->code))
+ 				c->idxmsk64 |= (1ULL << num_counters) - 1;
+ 		}
+ 		c->idxmsk64 &=
+diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
+index c878fed3056fd..fbcfec4dc4ccd 100644
+--- a/arch/x86/include/asm/asm.h
++++ b/arch/x86/include/asm/asm.h
+@@ -154,24 +154,24 @@
+ 
+ # define DEFINE_EXTABLE_TYPE_REG \
+ 	".macro extable_type_reg type:req reg:req\n"						\
+-	".set found, 0\n"									\
+-	".set regnr, 0\n"									\
++	".set .Lfound, 0\n"									\
++	".set .Lregnr, 0\n"									\
+ 	".irp rs,rax,rcx,rdx,rbx,rsp,rbp,rsi,rdi,r8,r9,r10,r11,r12,r13,r14,r15\n"		\
+ 	".ifc \\reg, %%\\rs\n"									\
+-	".set found, found+1\n"									\
+-	".long \\type + (regnr << 8)\n"								\
++	".set .Lfound, .Lfound+1\n"								\
++	".long \\type + (.Lregnr << 8)\n"							\
+ 	".endif\n"										\
+-	".set regnr, regnr+1\n"									\
++	".set .Lregnr, .Lregnr+1\n"								\
+ 	".endr\n"										\
+-	".set regnr, 0\n"									\
++	".set .Lregnr, 0\n"									\
+ 	".irp rs,eax,ecx,edx,ebx,esp,ebp,esi,edi,r8d,r9d,r10d,r11d,r12d,r13d,r14d,r15d\n"	\
+ 	".ifc \\reg, %%\\rs\n"									\
+-	".set found, found+1\n"									\
+-	".long \\type + (regnr << 8)\n"								\
++	".set .Lfound, .Lfound+1\n"								\
++	".long \\type + (.Lregnr << 8)\n"							\
+ 	".endif\n"										\
+-	".set regnr, regnr+1\n"									\
++	".set .Lregnr, .Lregnr+1\n"								\
+ 	".endr\n"										\
+-	".if (found != 1)\n"									\
++	".if (.Lfound != 1)\n"									\
+ 	".error \"extable_type_reg: bad register argument\"\n"					\
+ 	".endif\n"										\
+ 	".endm\n"
+diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
+index bab883c0b6fee..66570e95af398 100644
+--- a/arch/x86/include/asm/bug.h
++++ b/arch/x86/include/asm/bug.h
+@@ -77,9 +77,9 @@ do {								\
+  */
+ #define __WARN_FLAGS(flags)					\
+ do {								\
+-	__auto_type f = BUGFLAG_WARNING|(flags);		\
++	__auto_type __flags = BUGFLAG_WARNING|(flags);		\
+ 	instrumentation_begin();				\
+-	_BUG_FLAGS(ASM_UD2, f, ASM_REACHABLE);			\
++	_BUG_FLAGS(ASM_UD2, __flags, ASM_REACHABLE);		\
+ 	instrumentation_end();					\
+ } while (0)
+ 
+diff --git a/arch/x86/include/asm/irq_stack.h b/arch/x86/include/asm/irq_stack.h
+index ae9d40f6c7066..05af249d6bec2 100644
+--- a/arch/x86/include/asm/irq_stack.h
++++ b/arch/x86/include/asm/irq_stack.h
+@@ -99,7 +99,8 @@
+ }
+ 
+ #define ASM_CALL_ARG0							\
+-	"call %P[__func]				\n"
++	"call %P[__func]				\n"		\
++	ASM_REACHABLE
+ 
+ #define ASM_CALL_ARG1							\
+ 	"movq	%[arg1], %%rdi				\n"		\
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index ec9830d2aabf8..17b4e1808b8e8 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -509,6 +509,7 @@ struct kvm_pmu {
+ 	u64 global_ctrl_mask;
+ 	u64 global_ovf_ctrl_mask;
+ 	u64 reserved_bits;
++	u64 raw_event_mask;
+ 	u8 version;
+ 	struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
+ 	struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
+diff --git a/arch/x86/include/asm/msi.h b/arch/x86/include/asm/msi.h
+index b85147d75626e..d71c7e8b738d2 100644
+--- a/arch/x86/include/asm/msi.h
++++ b/arch/x86/include/asm/msi.h
+@@ -12,14 +12,17 @@ int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec,
+ /* Structs and defines for the X86 specific MSI message format */
+ 
+ typedef struct x86_msi_data {
+-	u32	vector			:  8,
+-		delivery_mode		:  3,
+-		dest_mode_logical	:  1,
+-		reserved		:  2,
+-		active_low		:  1,
+-		is_level		:  1;
+-
+-	u32	dmar_subhandle;
++	union {
++		struct {
++			u32	vector			:  8,
++				delivery_mode		:  3,
++				dest_mode_logical	:  1,
++				reserved		:  2,
++				active_low		:  1,
++				is_level		:  1;
++		};
++		u32	dmar_subhandle;
++	};
+ } __attribute__ ((packed)) arch_msi_msg_data_t;
+ #define arch_msi_msg_data	x86_msi_data
+ 
+diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
+index 8fc1b5003713f..a2b6626c681f5 100644
+--- a/arch/x86/include/asm/perf_event.h
++++ b/arch/x86/include/asm/perf_event.h
+@@ -241,6 +241,11 @@ struct x86_pmu_capability {
+ #define INTEL_PMC_IDX_FIXED_SLOTS	(INTEL_PMC_IDX_FIXED + 3)
+ #define INTEL_PMC_MSK_FIXED_SLOTS	(1ULL << INTEL_PMC_IDX_FIXED_SLOTS)
+ 
++static inline bool use_fixed_pseudo_encoding(u64 code)
++{
++	return !(code & 0xff);
++}
++
+ /*
+  * We model BTS tracing as another fixed-mode PMC.
+  *
+diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
+index 5818b837fd4d4..2d719e0d2e404 100644
+--- a/arch/x86/kernel/cpu/mce/core.c
++++ b/arch/x86/kernel/cpu/mce/core.c
+@@ -834,6 +834,59 @@ static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
+ 	m->cs = regs->cs;
+ }
+ 
++/*
++ * Disable fast string copy and return from the MCE handler upon the first SRAR
++ * MCE on bank 1 due to a CPU erratum on Intel Skylake/Cascade Lake/Cooper Lake
++ * CPUs.
++ * The fast string copy instructions ("REP; MOVS*") could consume an
++ * uncorrectable memory error in the cache line _right after_ the desired region
++ * to copy and raise an MCE with RIP pointing to the instruction _after_ the
++ * "REP; MOVS*".
++ * This mitigation addresses the issue completely with the caveat of performance
++ * degradation on the CPU affected. This is still better than the OS crashing on
++ * MCEs raised on an irrelevant process due to "REP; MOVS*" accesses from a
++ * kernel context (e.g., copy_page).
++ *
++ * Returns true when fast string copy on CPU has been disabled.
++ */
++static noinstr bool quirk_skylake_repmov(void)
++{
++	u64 mcgstatus   = mce_rdmsrl(MSR_IA32_MCG_STATUS);
++	u64 misc_enable = mce_rdmsrl(MSR_IA32_MISC_ENABLE);
++	u64 mc1_status;
++
++	/*
++	 * Apply the quirk only to local machine checks, i.e., no broadcast
++	 * sync is needed.
++	 */
++	if (!(mcgstatus & MCG_STATUS_LMCES) ||
++	    !(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING))
++		return false;
++
++	mc1_status = mce_rdmsrl(MSR_IA32_MCx_STATUS(1));
++
++	/* Check for a software-recoverable data fetch error. */
++	if ((mc1_status &
++	     (MCI_STATUS_VAL | MCI_STATUS_OVER | MCI_STATUS_UC | MCI_STATUS_EN |
++	      MCI_STATUS_ADDRV | MCI_STATUS_MISCV | MCI_STATUS_PCC |
++	      MCI_STATUS_AR | MCI_STATUS_S)) ==
++	     (MCI_STATUS_VAL |                   MCI_STATUS_UC | MCI_STATUS_EN |
++	      MCI_STATUS_ADDRV | MCI_STATUS_MISCV |
++	      MCI_STATUS_AR | MCI_STATUS_S)) {
++		misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
++		mce_wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
++		mce_wrmsrl(MSR_IA32_MCx_STATUS(1), 0);
++
++		instrumentation_begin();
++		pr_err_once("Erratum detected, disable fast string copy instructions.\n");
++		instrumentation_end();
++
++		return true;
++	}
++
++	return false;
++}
++
+ /*
+  * Do a quick check if any of the events requires a panic.
+  * This decides if we keep the events around or clear them.
+@@ -1403,6 +1456,9 @@ noinstr void do_machine_check(struct pt_regs *regs)
+ 	else if (unlikely(!mca_cfg.initialized))
+ 		return unexpected_machine_check(regs);
+ 
++	if (mce_flags.skx_repmov_quirk && quirk_skylake_repmov())
++		goto clear;
++
+ 	/*
+ 	 * Establish sequential order between the CPUs entering the machine
+ 	 * check handler.
+@@ -1545,6 +1601,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
+ out:
+ 	instrumentation_end();
+ 
++clear:
+ 	mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
+ }
+ EXPORT_SYMBOL_GPL(do_machine_check);
+@@ -1858,6 +1915,13 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
+ 
+ 		if (c->x86 == 6 && c->x86_model == 45)
+ 			mce_flags.snb_ifu_quirk = 1;
++
++		/*
++		 * Skylake, Cascacde Lake and Cooper Lake require a quirk on
++		 * rep movs.
++		 */
++		if (c->x86 == 6 && c->x86_model == INTEL_FAM6_SKYLAKE_X)
++			mce_flags.skx_repmov_quirk = 1;
+ 	}
+ 
+ 	if (c->x86_vendor == X86_VENDOR_ZHAOXIN) {
+diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h
+index 52c633950b38d..24d099e2d2a23 100644
+--- a/arch/x86/kernel/cpu/mce/internal.h
++++ b/arch/x86/kernel/cpu/mce/internal.h
+@@ -170,7 +170,10 @@ struct mce_vendor_flags {
+ 	/* SandyBridge IFU quirk */
+ 	snb_ifu_quirk		: 1,
+ 
+-	__reserved_0		: 57;
++	/* Skylake, Cascade Lake, Cooper Lake REP;MOVS* quirk */
++	skx_repmov_quirk	: 1,
++
++	__reserved_0		: 56;
+ };
+ 
+ extern struct mce_vendor_flags mce_flags;
+diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
+index 531fb4cbb63fd..aa72cefdd5be6 100644
+--- a/arch/x86/kernel/static_call.c
++++ b/arch/x86/kernel/static_call.c
+@@ -12,10 +12,9 @@ enum insn_type {
+ };
+ 
+ /*
+- * data16 data16 xorq %rax, %rax - a single 5 byte instruction that clears %rax
+- * The REX.W cancels the effect of any data16.
++ * cs cs cs xorl %eax, %eax - a single 5 byte instruction that clears %[er]ax
+  */
+-static const u8 xor5rax[] = { 0x66, 0x66, 0x48, 0x31, 0xc0 };
++static const u8 xor5rax[] = { 0x2e, 0x2e, 0x2e, 0x31, 0xc0 };
+ 
+ static const u8 retinsn[] = { RET_INSN_OPCODE, 0xcc, 0xcc, 0xcc, 0xcc };
+ 
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 02d061a06aa19..de9d8a27387cf 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -3523,8 +3523,10 @@ static int em_rdpid(struct x86_emulate_ctxt *ctxt)
+ {
+ 	u64 tsc_aux = 0;
+ 
+-	if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
++	if (!ctxt->ops->guest_has_rdpid(ctxt))
+ 		return emulate_ud(ctxt);
++
++	ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux);
+ 	ctxt->dst.val = tsc_aux;
+ 	return X86EMUL_CONTINUE;
+ }
+diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
+index 39eded2426ffd..a2a7654d8aced 100644
+--- a/arch/x86/kvm/kvm_emulate.h
++++ b/arch/x86/kvm/kvm_emulate.h
+@@ -226,6 +226,7 @@ struct x86_emulate_ops {
+ 	bool (*guest_has_long_mode)(struct x86_emulate_ctxt *ctxt);
+ 	bool (*guest_has_movbe)(struct x86_emulate_ctxt *ctxt);
+ 	bool (*guest_has_fxsr)(struct x86_emulate_ctxt *ctxt);
++	bool (*guest_has_rdpid)(struct x86_emulate_ctxt *ctxt);
+ 
+ 	void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
+ 
+diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
+index b1a02993782b3..eca39f56c2315 100644
+--- a/arch/x86/kvm/pmu.c
++++ b/arch/x86/kvm/pmu.c
+@@ -96,8 +96,7 @@ static void kvm_perf_overflow(struct perf_event *perf_event,
+ 
+ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
+ 				  u64 config, bool exclude_user,
+-				  bool exclude_kernel, bool intr,
+-				  bool in_tx, bool in_tx_cp)
++				  bool exclude_kernel, bool intr)
+ {
+ 	struct perf_event *event;
+ 	struct perf_event_attr attr = {
+@@ -116,16 +115,14 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
+ 
+ 	attr.sample_period = get_sample_period(pmc, pmc->counter);
+ 
+-	if (in_tx)
+-		attr.config |= HSW_IN_TX;
+-	if (in_tx_cp) {
++	if ((attr.config & HSW_IN_TX_CHECKPOINTED) &&
++	    guest_cpuid_is_intel(pmc->vcpu)) {
+ 		/*
+ 		 * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
+ 		 * period. Just clear the sample period so at least
+ 		 * allocating the counter doesn't fail.
+ 		 */
+ 		attr.sample_period = 0;
+-		attr.config |= HSW_IN_TX_CHECKPOINTED;
+ 	}
+ 
+ 	event = perf_event_create_kernel_counter(&attr, -1, current,
+@@ -185,6 +182,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
+ 	u32 type = PERF_TYPE_RAW;
+ 	struct kvm *kvm = pmc->vcpu->kvm;
+ 	struct kvm_pmu_event_filter *filter;
++	struct kvm_pmu *pmu = vcpu_to_pmu(pmc->vcpu);
+ 	bool allow_event = true;
+ 
+ 	if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
+@@ -221,7 +219,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
+ 	}
+ 
+ 	if (type == PERF_TYPE_RAW)
+-		config = eventsel & AMD64_RAW_EVENT_MASK;
++		config = eventsel & pmu->raw_event_mask;
+ 
+ 	if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
+ 		return;
+@@ -232,9 +230,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
+ 	pmc_reprogram_counter(pmc, type, config,
+ 			      !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
+ 			      !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
+-			      eventsel & ARCH_PERFMON_EVENTSEL_INT,
+-			      (eventsel & HSW_IN_TX),
+-			      (eventsel & HSW_IN_TX_CHECKPOINTED));
++			      eventsel & ARCH_PERFMON_EVENTSEL_INT);
+ }
+ EXPORT_SYMBOL_GPL(reprogram_gp_counter);
+ 
+@@ -270,7 +266,7 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
+ 			      kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc),
+ 			      !(en_field & 0x2), /* exclude user */
+ 			      !(en_field & 0x1), /* exclude kernel */
+-			      pmi, false, false);
++			      pmi);
+ }
+ EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
+ 
+diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
+index 5aa45f13b16dc..ba40b7fced5ae 100644
+--- a/arch/x86/kvm/svm/pmu.c
++++ b/arch/x86/kvm/svm/pmu.c
+@@ -262,12 +262,10 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 	/* MSR_EVNTSELn */
+ 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
+ 	if (pmc) {
+-		if (data == pmc->eventsel)
+-			return 0;
+-		if (!(data & pmu->reserved_bits)) {
++		data &= ~pmu->reserved_bits;
++		if (data != pmc->eventsel)
+ 			reprogram_gp_counter(pmc, data);
+-			return 0;
+-		}
++		return 0;
+ 	}
+ 
+ 	return 1;
+@@ -284,6 +282,7 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
+ 
+ 	pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
+ 	pmu->reserved_bits = 0xfffffff000280000ull;
++	pmu->raw_event_mask = AMD64_RAW_EVENT_MASK;
+ 	pmu->version = 1;
+ 	/* not applicable to AMD; but clean them to prevent any fall out */
+ 	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
+diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
+index fa98d6844728f..86bcfed6599ea 100644
+--- a/arch/x86/kvm/svm/svm.h
++++ b/arch/x86/kvm/svm/svm.h
+@@ -22,6 +22,8 @@
+ #include <asm/svm.h>
+ #include <asm/sev-common.h>
+ 
++#include "kvm_cache_regs.h"
++
+ #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
+ 
+ #define	IOPM_SIZE PAGE_SIZE * 3
+diff --git a/arch/x86/kvm/svm/svm_onhyperv.c b/arch/x86/kvm/svm/svm_onhyperv.c
+index 98aa981c04ec5..8cdc62c74a964 100644
+--- a/arch/x86/kvm/svm/svm_onhyperv.c
++++ b/arch/x86/kvm/svm/svm_onhyperv.c
+@@ -4,7 +4,6 @@
+  */
+ 
+ #include <linux/kvm_host.h>
+-#include "kvm_cache_regs.h"
+ 
+ #include <asm/mshyperv.h>
+ 
+diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
+index 466d18fc0c5da..5fa3870b89881 100644
+--- a/arch/x86/kvm/vmx/pmu_intel.c
++++ b/arch/x86/kvm/vmx/pmu_intel.c
+@@ -389,6 +389,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 	struct kvm_pmc *pmc;
+ 	u32 msr = msr_info->index;
+ 	u64 data = msr_info->data;
++	u64 reserved_bits;
+ 
+ 	switch (msr) {
+ 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
+@@ -443,7 +444,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
+ 			if (data == pmc->eventsel)
+ 				return 0;
+-			if (!(data & pmu->reserved_bits)) {
++			reserved_bits = pmu->reserved_bits;
++			if ((pmc->idx == 2) &&
++			    (pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED))
++				reserved_bits ^= HSW_IN_TX_CHECKPOINTED;
++			if (!(data & reserved_bits)) {
+ 				reprogram_gp_counter(pmc, data);
+ 				return 0;
+ 			}
+@@ -485,6 +490,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
+ 	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
+ 	pmu->version = 0;
+ 	pmu->reserved_bits = 0xffffffff00200000ull;
++	pmu->raw_event_mask = X86_RAW_EVENT_MASK;
+ 
+ 	entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
+ 	if (!entry || !enable_pmu)
+@@ -533,8 +539,10 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
+ 	entry = kvm_find_cpuid_entry(vcpu, 7, 0);
+ 	if (entry &&
+ 	    (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
+-	    (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
+-		pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
++	    (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) {
++		pmu->reserved_bits ^= HSW_IN_TX;
++		pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
++	}
+ 
+ 	bitmap_set(pmu->all_valid_pmc_idx,
+ 		0, pmu->nr_arch_gp_counters);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 9b6166348c94f..c81ec70197fb5 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -7675,6 +7675,11 @@ static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt)
+ 	return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_FXSR);
+ }
+ 
++static bool emulator_guest_has_rdpid(struct x86_emulate_ctxt *ctxt)
++{
++	return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_RDPID);
++}
++
+ static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
+ {
+ 	return kvm_register_read_raw(emul_to_vcpu(ctxt), reg);
+@@ -7757,6 +7762,7 @@ static const struct x86_emulate_ops emulate_ops = {
+ 	.guest_has_long_mode = emulator_guest_has_long_mode,
+ 	.guest_has_movbe     = emulator_guest_has_movbe,
+ 	.guest_has_fxsr      = emulator_guest_has_fxsr,
++	.guest_has_rdpid     = emulator_guest_has_rdpid,
+ 	.set_nmi_mask        = emulator_set_nmi_mask,
+ 	.get_hflags          = emulator_get_hflags,
+ 	.exiting_smm         = emulator_exiting_smm,
+diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
+index a6cf56a149393..b3cb49de0a643 100644
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -854,13 +854,11 @@ done:
+ 			nr_invalidate);
+ }
+ 
+-static bool tlb_is_not_lazy(int cpu)
++static bool tlb_is_not_lazy(int cpu, void *data)
+ {
+ 	return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu);
+ }
+ 
+-static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask);
+-
+ DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);
+ EXPORT_PER_CPU_SYMBOL(cpu_tlbstate_shared);
+ 
+@@ -889,36 +887,11 @@ STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask,
+ 	 * up on the new contents of what used to be page tables, while
+ 	 * doing a speculative memory access.
+ 	 */
+-	if (info->freed_tables) {
++	if (info->freed_tables)
+ 		on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true);
+-	} else {
+-		/*
+-		 * Although we could have used on_each_cpu_cond_mask(),
+-		 * open-coding it has performance advantages, as it eliminates
+-		 * the need for indirect calls or retpolines. In addition, it
+-		 * allows to use a designated cpumask for evaluating the
+-		 * condition, instead of allocating one.
+-		 *
+-		 * This code works under the assumption that there are no nested
+-		 * TLB flushes, an assumption that is already made in
+-		 * flush_tlb_mm_range().
+-		 *
+-		 * cond_cpumask is logically a stack-local variable, but it is
+-		 * more efficient to have it off the stack and not to allocate
+-		 * it on demand. Preemption is disabled and this code is
+-		 * non-reentrant.
+-		 */
+-		struct cpumask *cond_cpumask = this_cpu_ptr(&flush_tlb_mask);
+-		int cpu;
+-
+-		cpumask_clear(cond_cpumask);
+-
+-		for_each_cpu(cpu, cpumask) {
+-			if (tlb_is_not_lazy(cpu))
+-				__cpumask_set_cpu(cpu, cond_cpumask);
+-		}
+-		on_each_cpu_mask(cond_cpumask, flush_tlb_func, (void *)info, true);
+-	}
++	else
++		on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func,
++				(void *)info, 1, cpumask);
+ }
+ 
+ void flush_tlb_multi(const struct cpumask *cpumask,
+diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
+index 9f2b251e83c56..3822666fb73d5 100644
+--- a/arch/x86/power/cpu.c
++++ b/arch/x86/power/cpu.c
+@@ -40,7 +40,8 @@ static void msr_save_context(struct saved_context *ctxt)
+ 	struct saved_msr *end = msr + ctxt->saved_msrs.num;
+ 
+ 	while (msr < end) {
+-		msr->valid = !rdmsrl_safe(msr->info.msr_no, &msr->info.reg.q);
++		if (msr->valid)
++			rdmsrl(msr->info.msr_no, msr->info.reg.q);
+ 		msr++;
+ 	}
+ }
+@@ -424,8 +425,10 @@ static int msr_build_context(const u32 *msr_id, const int num)
+ 	}
+ 
+ 	for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
++		u64 dummy;
++
+ 		msr_array[i].info.msr_no	= msr_id[j];
+-		msr_array[i].valid		= false;
++		msr_array[i].valid		= !rdmsrl_safe(msr_id[j], &dummy);
+ 		msr_array[i].info.reg.q		= 0;
+ 	}
+ 	saved_msrs->num   = total_num;
+@@ -500,10 +503,24 @@ static int pm_cpu_check(const struct x86_cpu_id *c)
+ 	return ret;
+ }
+ 
++static void pm_save_spec_msr(void)
++{
++	u32 spec_msr_id[] = {
++		MSR_IA32_SPEC_CTRL,
++		MSR_IA32_TSX_CTRL,
++		MSR_TSX_FORCE_ABORT,
++		MSR_IA32_MCU_OPT_CTRL,
++		MSR_AMD64_LS_CFG,
++	};
++
++	msr_build_context(spec_msr_id, ARRAY_SIZE(spec_msr_id));
++}
++
+ static int pm_check_save_msr(void)
+ {
+ 	dmi_check_system(msr_save_dmi_table);
+ 	pm_cpu_check(msr_save_cpu_table);
++	pm_save_spec_msr();
+ 
+ 	return 0;
+ }
+diff --git a/arch/x86/xen/smp_hvm.c b/arch/x86/xen/smp_hvm.c
+index 6ff3c887e0b99..b70afdff419ca 100644
+--- a/arch/x86/xen/smp_hvm.c
++++ b/arch/x86/xen/smp_hvm.c
+@@ -19,6 +19,12 @@ static void __init xen_hvm_smp_prepare_boot_cpu(void)
+ 	 */
+ 	xen_vcpu_setup(0);
+ 
++	/*
++	 * Called again in case the kernel boots on vcpu >= MAX_VIRT_CPUS.
++	 * Refer to comments in xen_hvm_init_time_ops().
++	 */
++	xen_hvm_init_time_ops();
++
+ 	/*
+ 	 * The alternative logic (which patches the unlock/lock) runs before
+ 	 * the smp bootup up code is activated. Hence we need to set this up
+diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
+index d9c945ee11008..9ef0a5cca96ee 100644
+--- a/arch/x86/xen/time.c
++++ b/arch/x86/xen/time.c
+@@ -558,6 +558,11 @@ static void xen_hvm_setup_cpu_clockevents(void)
+ 
+ void __init xen_hvm_init_time_ops(void)
+ {
++	static bool hvm_time_initialized;
++
++	if (hvm_time_initialized)
++		return;
++
+ 	/*
+ 	 * vector callback is needed otherwise we cannot receive interrupts
+ 	 * on cpu > 0 and at this point we don't know how many cpus are
+@@ -567,7 +572,22 @@ void __init xen_hvm_init_time_ops(void)
+ 		return;
+ 
+ 	if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
+-		pr_info("Xen doesn't support pvclock on HVM, disable pv timer");
++		pr_info_once("Xen doesn't support pvclock on HVM, disable pv timer");
++		return;
++	}
++
++	/*
++	 * Only MAX_VIRT_CPUS 'vcpu_info' are embedded inside 'shared_info'.
++	 * The __this_cpu_read(xen_vcpu) is still NULL when Xen HVM guest
++	 * boots on vcpu >= MAX_VIRT_CPUS (e.g., kexec), To access
++	 * __this_cpu_read(xen_vcpu) via xen_clocksource_read() will panic.
++	 *
++	 * The xen_hvm_init_time_ops() should be called again later after
++	 * __this_cpu_read(xen_vcpu) is available.
++	 */
++	if (!__this_cpu_read(xen_vcpu)) {
++		pr_info("Delay xen_init_time_common() as kernel is running on vcpu=%d\n",
++			xen_vcpu_nr(0));
+ 		return;
+ 	}
+ 
+@@ -577,6 +597,8 @@ void __init xen_hvm_init_time_ops(void)
+ 	x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;
+ 
+ 	x86_platform.set_wallclock = xen_set_wallclock;
++
++	hvm_time_initialized = true;
+ }
+ #endif
+ 
+diff --git a/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi b/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi
+index 9bf8bad1dd18a..c33932568aa73 100644
+--- a/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi
++++ b/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi
+@@ -8,19 +8,19 @@
+ 			reg = <0x00000000 0x08000000>;
+ 			bank-width = <2>;
+ 			device-width = <2>;
+-			partition@0x0 {
++			partition@0 {
+ 				label = "data";
+ 				reg = <0x00000000 0x06000000>;
+ 			};
+-			partition@0x6000000 {
++			partition@6000000 {
+ 				label = "boot loader area";
+ 				reg = <0x06000000 0x00800000>;
+ 			};
+-			partition@0x6800000 {
++			partition@6800000 {
+ 				label = "kernel image";
+ 				reg = <0x06800000 0x017e0000>;
+ 			};
+-			partition@0x7fe0000 {
++			partition@7fe0000 {
+ 				label = "boot environment";
+ 				reg = <0x07fe0000 0x00020000>;
+ 			};
+diff --git a/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi b/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi
+index 40c2f81f7cb66..7bde2ab2d6fb5 100644
+--- a/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi
++++ b/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi
+@@ -8,19 +8,19 @@
+ 			reg = <0x08000000 0x01000000>;
+ 			bank-width = <2>;
+ 			device-width = <2>;
+-			partition@0x0 {
++			partition@0 {
+ 				label = "boot loader area";
+ 				reg = <0x00000000 0x00400000>;
+ 			};
+-			partition@0x400000 {
++			partition@400000 {
+ 				label = "kernel image";
+ 				reg = <0x00400000 0x00600000>;
+ 			};
+-			partition@0xa00000 {
++			partition@a00000 {
+ 				label = "data";
+ 				reg = <0x00a00000 0x005e0000>;
+ 			};
+-			partition@0xfe0000 {
++			partition@fe0000 {
+ 				label = "boot environment";
+ 				reg = <0x00fe0000 0x00020000>;
+ 			};
+diff --git a/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi b/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi
+index fb8d3a9f33c23..0655b868749a4 100644
+--- a/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi
++++ b/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi
+@@ -8,11 +8,11 @@
+ 			reg = <0x08000000 0x00400000>;
+ 			bank-width = <2>;
+ 			device-width = <2>;
+-			partition@0x0 {
++			partition@0 {
+ 				label = "boot loader area";
+ 				reg = <0x00000000 0x003f0000>;
+ 			};
+-			partition@0x3f0000 {
++			partition@3f0000 {
+ 				label = "boot environment";
+ 				reg = <0x003f0000 0x00010000>;
+ 			};
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index f8e9fa82cb9b1..05b3985a1984b 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -570,8 +570,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
+ {
+ 	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
+ 
+-	if (cx->type == ACPI_STATE_C3)
+-		ACPI_FLUSH_CPU_CACHE();
++	ACPI_FLUSH_CPU_CACHE();
+ 
+ 	while (1) {
+ 
+diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
+index bec33d781ae04..e3263e961045a 100644
+--- a/drivers/ata/sata_dwc_460ex.c
++++ b/drivers/ata/sata_dwc_460ex.c
+@@ -137,7 +137,11 @@ struct sata_dwc_device {
+ #endif
+ };
+ 
+-#define SATA_DWC_QCMD_MAX	32
++/*
++ * Allow one extra special slot for commands and DMA management
++ * to account for libata internal commands.
++ */
++#define SATA_DWC_QCMD_MAX	(ATA_MAX_QUEUE + 1)
+ 
+ struct sata_dwc_device_port {
+ 	struct sata_dwc_device	*hsdev;
+diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
+index f27d5b0f9a0bb..a98bfcf4a5f02 100644
+--- a/drivers/block/drbd/drbd_int.h
++++ b/drivers/block/drbd/drbd_int.h
+@@ -1642,22 +1642,22 @@ struct sib_info {
+ };
+ void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
+ 
+-extern void notify_resource_state(struct sk_buff *,
++extern int notify_resource_state(struct sk_buff *,
+ 				  unsigned int,
+ 				  struct drbd_resource *,
+ 				  struct resource_info *,
+ 				  enum drbd_notification_type);
+-extern void notify_device_state(struct sk_buff *,
++extern int notify_device_state(struct sk_buff *,
+ 				unsigned int,
+ 				struct drbd_device *,
+ 				struct device_info *,
+ 				enum drbd_notification_type);
+-extern void notify_connection_state(struct sk_buff *,
++extern int notify_connection_state(struct sk_buff *,
+ 				    unsigned int,
+ 				    struct drbd_connection *,
+ 				    struct connection_info *,
+ 				    enum drbd_notification_type);
+-extern void notify_peer_device_state(struct sk_buff *,
++extern int notify_peer_device_state(struct sk_buff *,
+ 				     unsigned int,
+ 				     struct drbd_peer_device *,
+ 				     struct peer_device_info *,
+diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
+index 6f450816c4fa6..5d5beeba3ed4f 100644
+--- a/drivers/block/drbd/drbd_main.c
++++ b/drivers/block/drbd/drbd_main.c
+@@ -2793,12 +2793,12 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
+ 
+ 	if (init_submitter(device)) {
+ 		err = ERR_NOMEM;
+-		goto out_idr_remove_vol;
++		goto out_idr_remove_from_resource;
+ 	}
+ 
+ 	err = add_disk(disk);
+ 	if (err)
+-		goto out_idr_remove_vol;
++		goto out_idr_remove_from_resource;
+ 
+ 	/* inherit the connection state */
+ 	device->state.conn = first_connection(resource)->cstate;
+@@ -2812,8 +2812,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
+ 	drbd_debugfs_device_add(device);
+ 	return NO_ERROR;
+ 
+-out_idr_remove_vol:
+-	idr_remove(&connection->peer_devices, vnr);
+ out_idr_remove_from_resource:
+ 	for_each_connection(connection, resource) {
+ 		peer_device = idr_remove(&connection->peer_devices, vnr);
+diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
+index 44ccf8b4f4b29..69184cf17b6ad 100644
+--- a/drivers/block/drbd/drbd_nl.c
++++ b/drivers/block/drbd/drbd_nl.c
+@@ -4617,7 +4617,7 @@ static int nla_put_notification_header(struct sk_buff *msg,
+ 	return drbd_notification_header_to_skb(msg, &nh, true);
+ }
+ 
+-void notify_resource_state(struct sk_buff *skb,
++int notify_resource_state(struct sk_buff *skb,
+ 			   unsigned int seq,
+ 			   struct drbd_resource *resource,
+ 			   struct resource_info *resource_info,
+@@ -4659,16 +4659,17 @@ void notify_resource_state(struct sk_buff *skb,
+ 		if (err && err != -ESRCH)
+ 			goto failed;
+ 	}
+-	return;
++	return 0;
+ 
+ nla_put_failure:
+ 	nlmsg_free(skb);
+ failed:
+ 	drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
+ 			err, seq);
++	return err;
+ }
+ 
+-void notify_device_state(struct sk_buff *skb,
++int notify_device_state(struct sk_buff *skb,
+ 			 unsigned int seq,
+ 			 struct drbd_device *device,
+ 			 struct device_info *device_info,
+@@ -4708,16 +4709,17 @@ void notify_device_state(struct sk_buff *skb,
+ 		if (err && err != -ESRCH)
+ 			goto failed;
+ 	}
+-	return;
++	return 0;
+ 
+ nla_put_failure:
+ 	nlmsg_free(skb);
+ failed:
+ 	drbd_err(device, "Error %d while broadcasting event. Event seq:%u\n",
+ 		 err, seq);
++	return err;
+ }
+ 
+-void notify_connection_state(struct sk_buff *skb,
++int notify_connection_state(struct sk_buff *skb,
+ 			     unsigned int seq,
+ 			     struct drbd_connection *connection,
+ 			     struct connection_info *connection_info,
+@@ -4757,16 +4759,17 @@ void notify_connection_state(struct sk_buff *skb,
+ 		if (err && err != -ESRCH)
+ 			goto failed;
+ 	}
+-	return;
++	return 0;
+ 
+ nla_put_failure:
+ 	nlmsg_free(skb);
+ failed:
+ 	drbd_err(connection, "Error %d while broadcasting event. Event seq:%u\n",
+ 		 err, seq);
++	return err;
+ }
+ 
+-void notify_peer_device_state(struct sk_buff *skb,
++int notify_peer_device_state(struct sk_buff *skb,
+ 			      unsigned int seq,
+ 			      struct drbd_peer_device *peer_device,
+ 			      struct peer_device_info *peer_device_info,
+@@ -4807,13 +4810,14 @@ void notify_peer_device_state(struct sk_buff *skb,
+ 		if (err && err != -ESRCH)
+ 			goto failed;
+ 	}
+-	return;
++	return 0;
+ 
+ nla_put_failure:
+ 	nlmsg_free(skb);
+ failed:
+ 	drbd_err(peer_device, "Error %d while broadcasting event. Event seq:%u\n",
+ 		 err, seq);
++	return err;
+ }
+ 
+ void notify_helper(enum drbd_notification_type type,
+@@ -4864,7 +4868,7 @@ fail:
+ 		 err, seq);
+ }
+ 
+-static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
++static int notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
+ {
+ 	struct drbd_genlmsghdr *dh;
+ 	int err;
+@@ -4878,11 +4882,12 @@ static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
+ 	if (nla_put_notification_header(skb, NOTIFY_EXISTS))
+ 		goto nla_put_failure;
+ 	genlmsg_end(skb, dh);
+-	return;
++	return 0;
+ 
+ nla_put_failure:
+ 	nlmsg_free(skb);
+ 	pr_err("Error %d sending event. Event seq:%u\n", err, seq);
++	return err;
+ }
+ 
+ static void free_state_changes(struct list_head *list)
+@@ -4909,6 +4914,7 @@ static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
+ 	unsigned int seq = cb->args[2];
+ 	unsigned int n;
+ 	enum drbd_notification_type flags = 0;
++	int err = 0;
+ 
+ 	/* There is no need for taking notification_mutex here: it doesn't
+ 	   matter if the initial state events mix with later state chage
+@@ -4917,32 +4923,32 @@ static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
+ 
+ 	cb->args[5]--;
+ 	if (cb->args[5] == 1) {
+-		notify_initial_state_done(skb, seq);
++		err = notify_initial_state_done(skb, seq);
+ 		goto out;
+ 	}
+ 	n = cb->args[4]++;
+ 	if (cb->args[4] < cb->args[3])
+ 		flags |= NOTIFY_CONTINUES;
+ 	if (n < 1) {
+-		notify_resource_state_change(skb, seq, state_change->resource,
++		err = notify_resource_state_change(skb, seq, state_change->resource,
+ 					     NOTIFY_EXISTS | flags);
+ 		goto next;
+ 	}
+ 	n--;
+ 	if (n < state_change->n_connections) {
+-		notify_connection_state_change(skb, seq, &state_change->connections[n],
++		err = notify_connection_state_change(skb, seq, &state_change->connections[n],
+ 					       NOTIFY_EXISTS | flags);
+ 		goto next;
+ 	}
+ 	n -= state_change->n_connections;
+ 	if (n < state_change->n_devices) {
+-		notify_device_state_change(skb, seq, &state_change->devices[n],
++		err = notify_device_state_change(skb, seq, &state_change->devices[n],
+ 					   NOTIFY_EXISTS | flags);
+ 		goto next;
+ 	}
+ 	n -= state_change->n_devices;
+ 	if (n < state_change->n_devices * state_change->n_connections) {
+-		notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
++		err = notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
+ 						NOTIFY_EXISTS | flags);
+ 		goto next;
+ 	}
+@@ -4957,7 +4963,10 @@ next:
+ 		cb->args[4] = 0;
+ 	}
+ out:
+-	return skb->len;
++	if (err)
++		return err;
++	else
++		return skb->len;
+ }
+ 
+ int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
+diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
+index b8a27818ab3f8..4ee11aef6672b 100644
+--- a/drivers/block/drbd/drbd_state.c
++++ b/drivers/block/drbd/drbd_state.c
+@@ -1537,7 +1537,7 @@ int drbd_bitmap_io_from_worker(struct drbd_device *device,
+ 	return rv;
+ }
+ 
+-void notify_resource_state_change(struct sk_buff *skb,
++int notify_resource_state_change(struct sk_buff *skb,
+ 				  unsigned int seq,
+ 				  struct drbd_resource_state_change *resource_state_change,
+ 				  enum drbd_notification_type type)
+@@ -1550,10 +1550,10 @@ void notify_resource_state_change(struct sk_buff *skb,
+ 		.res_susp_fen = resource_state_change->susp_fen[NEW],
+ 	};
+ 
+-	notify_resource_state(skb, seq, resource, &resource_info, type);
++	return notify_resource_state(skb, seq, resource, &resource_info, type);
+ }
+ 
+-void notify_connection_state_change(struct sk_buff *skb,
++int notify_connection_state_change(struct sk_buff *skb,
+ 				    unsigned int seq,
+ 				    struct drbd_connection_state_change *connection_state_change,
+ 				    enum drbd_notification_type type)
+@@ -1564,10 +1564,10 @@ void notify_connection_state_change(struct sk_buff *skb,
+ 		.conn_role = connection_state_change->peer_role[NEW],
+ 	};
+ 
+-	notify_connection_state(skb, seq, connection, &connection_info, type);
++	return notify_connection_state(skb, seq, connection, &connection_info, type);
+ }
+ 
+-void notify_device_state_change(struct sk_buff *skb,
++int notify_device_state_change(struct sk_buff *skb,
+ 				unsigned int seq,
+ 				struct drbd_device_state_change *device_state_change,
+ 				enum drbd_notification_type type)
+@@ -1577,10 +1577,10 @@ void notify_device_state_change(struct sk_buff *skb,
+ 		.dev_disk_state = device_state_change->disk_state[NEW],
+ 	};
+ 
+-	notify_device_state(skb, seq, device, &device_info, type);
++	return notify_device_state(skb, seq, device, &device_info, type);
+ }
+ 
+-void notify_peer_device_state_change(struct sk_buff *skb,
++int notify_peer_device_state_change(struct sk_buff *skb,
+ 				     unsigned int seq,
+ 				     struct drbd_peer_device_state_change *p,
+ 				     enum drbd_notification_type type)
+@@ -1594,7 +1594,7 @@ void notify_peer_device_state_change(struct sk_buff *skb,
+ 		.peer_resync_susp_dependency = p->resync_susp_dependency[NEW],
+ 	};
+ 
+-	notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type);
++	return notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type);
+ }
+ 
+ static void broadcast_state_change(struct drbd_state_change *state_change)
+@@ -1602,7 +1602,7 @@ static void broadcast_state_change(struct drbd_state_change *state_change)
+ 	struct drbd_resource_state_change *resource_state_change = &state_change->resource[0];
+ 	bool resource_state_has_changed;
+ 	unsigned int n_device, n_connection, n_peer_device, n_peer_devices;
+-	void (*last_func)(struct sk_buff *, unsigned int, void *,
++	int (*last_func)(struct sk_buff *, unsigned int, void *,
+ 			  enum drbd_notification_type) = NULL;
+ 	void *last_arg = NULL;
+ 
+diff --git a/drivers/block/drbd/drbd_state_change.h b/drivers/block/drbd/drbd_state_change.h
+index ba80f612d6abb..d5b0479bc9a66 100644
+--- a/drivers/block/drbd/drbd_state_change.h
++++ b/drivers/block/drbd/drbd_state_change.h
+@@ -44,19 +44,19 @@ extern struct drbd_state_change *remember_old_state(struct drbd_resource *, gfp_
+ extern void copy_old_to_new_state_change(struct drbd_state_change *);
+ extern void forget_state_change(struct drbd_state_change *);
+ 
+-extern void notify_resource_state_change(struct sk_buff *,
++extern int notify_resource_state_change(struct sk_buff *,
+ 					 unsigned int,
+ 					 struct drbd_resource_state_change *,
+ 					 enum drbd_notification_type type);
+-extern void notify_connection_state_change(struct sk_buff *,
++extern int notify_connection_state_change(struct sk_buff *,
+ 					   unsigned int,
+ 					   struct drbd_connection_state_change *,
+ 					   enum drbd_notification_type type);
+-extern void notify_device_state_change(struct sk_buff *,
++extern int notify_device_state_change(struct sk_buff *,
+ 				       unsigned int,
+ 				       struct drbd_device_state_change *,
+ 				       enum drbd_notification_type type);
+-extern void notify_peer_device_state_change(struct sk_buff *,
++extern int notify_peer_device_state_change(struct sk_buff *,
+ 					    unsigned int,
+ 					    struct drbd_peer_device_state_change *,
+ 					    enum drbd_notification_type type);
+diff --git a/drivers/bluetooth/btmtk.h b/drivers/bluetooth/btmtk.h
+index 6e7b0c7567c0f..0defa68bc2cef 100644
+--- a/drivers/bluetooth/btmtk.h
++++ b/drivers/bluetooth/btmtk.h
+@@ -5,6 +5,7 @@
+ #define FIRMWARE_MT7668		"mediatek/mt7668pr2h.bin"
+ #define FIRMWARE_MT7961		"mediatek/BT_RAM_CODE_MT7961_1_2_hdr.bin"
+ 
++#define HCI_EV_WMT 0xe4
+ #define HCI_WMT_MAX_EVENT_SIZE		64
+ 
+ #define BTMTK_WMT_REG_READ 0x2
+diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
+index 9b868f187316d..ecf29cfa7d792 100644
+--- a/drivers/bluetooth/btmtksdio.c
++++ b/drivers/bluetooth/btmtksdio.c
+@@ -370,13 +370,6 @@ static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
+ 	struct hci_event_hdr *hdr = (void *)skb->data;
+ 	int err;
+ 
+-	/* Fix up the vendor event id with 0xff for vendor specific instead
+-	 * of 0xe4 so that event send via monitoring socket can be parsed
+-	 * properly.
+-	 */
+-	if (hdr->evt == 0xe4)
+-		hdr->evt = HCI_EV_VENDOR;
+-
+ 	/* When someone waits for the WMT event, the skb is being cloned
+ 	 * and being processed the events from there then.
+ 	 */
+@@ -392,7 +385,7 @@ static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
+ 	if (err < 0)
+ 		goto err_free_skb;
+ 
+-	if (hdr->evt == HCI_EV_VENDOR) {
++	if (hdr->evt == HCI_EV_WMT) {
+ 		if (test_and_clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT,
+ 				       &bdev->tx_state)) {
+ 			/* Barrier to sync with other CPUs */
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 2afbd87d77c9b..42234d5f602dd 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -2254,7 +2254,6 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
+ {
+ 	struct hci_dev *hdev = urb->context;
+ 	struct btusb_data *data = hci_get_drvdata(hdev);
+-	struct hci_event_hdr *hdr;
+ 	struct sk_buff *skb;
+ 	int err;
+ 
+@@ -2274,13 +2273,6 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
+ 		hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
+ 		skb_put_data(skb, urb->transfer_buffer, urb->actual_length);
+ 
+-		hdr = (void *)skb->data;
+-		/* Fix up the vendor event id with 0xff for vendor specific
+-		 * instead of 0xe4 so that event send via monitoring socket can
+-		 * be parsed properly.
+-		 */
+-		hdr->evt = 0xff;
+-
+ 		/* When someone waits for the WMT event, the skb is being cloned
+ 		 * and being processed the events from there then.
+ 		 */
+diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
+index e3c430539a176..9fa3c76a267f5 100644
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -2245,7 +2245,7 @@ static struct virtio_driver virtio_rproc_serial = {
+ 	.remove =	virtcons_remove,
+ };
+ 
+-static int __init init(void)
++static int __init virtio_console_init(void)
+ {
+ 	int err;
+ 
+@@ -2280,7 +2280,7 @@ free:
+ 	return err;
+ }
+ 
+-static void __exit fini(void)
++static void __exit virtio_console_fini(void)
+ {
+ 	reclaim_dma_bufs();
+ 
+@@ -2290,8 +2290,8 @@ static void __exit fini(void)
+ 	class_destroy(pdrvdata.class);
+ 	debugfs_remove_recursive(pdrvdata.debugfs_dir);
+ }
+-module_init(init);
+-module_exit(fini);
++module_init(virtio_console_init);
++module_exit(virtio_console_fini);
+ 
+ MODULE_DESCRIPTION("Virtio console driver");
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c
+index f7b41366666e5..4de098b6b0d4e 100644
+--- a/drivers/clk/clk-si5341.c
++++ b/drivers/clk/clk-si5341.c
+@@ -798,6 +798,15 @@ static unsigned long si5341_output_clk_recalc_rate(struct clk_hw *hw,
+ 	u32 r_divider;
+ 	u8 r[3];
+ 
++	err = regmap_read(output->data->regmap,
++			SI5341_OUT_CONFIG(output), &val);
++	if (err < 0)
++		return err;
++
++	/* If SI5341_OUT_CFG_RDIV_FORCE2 is set, r_divider is 2 */
++	if (val & SI5341_OUT_CFG_RDIV_FORCE2)
++		return parent_rate / 2;
++
+ 	err = regmap_bulk_read(output->data->regmap,
+ 			SI5341_OUT_R_REG(output), r, 3);
+ 	if (err < 0)
+@@ -814,13 +823,6 @@ static unsigned long si5341_output_clk_recalc_rate(struct clk_hw *hw,
+ 	r_divider += 1;
+ 	r_divider <<= 1;
+ 
+-	err = regmap_read(output->data->regmap,
+-			SI5341_OUT_CONFIG(output), &val);
+-	if (err < 0)
+-		return err;
+-
+-	if (val & SI5341_OUT_CFG_RDIV_FORCE2)
+-		r_divider = 2;
+ 
+ 	return parent_rate / r_divider;
+ }
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index 01b64b962e76f..2fdfce116087a 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -632,6 +632,24 @@ static void clk_core_get_boundaries(struct clk_core *core,
+ 		*max_rate = min(*max_rate, clk_user->max_rate);
+ }
+ 
++static bool clk_core_check_boundaries(struct clk_core *core,
++				      unsigned long min_rate,
++				      unsigned long max_rate)
++{
++	struct clk *user;
++
++	lockdep_assert_held(&prepare_lock);
++
++	if (min_rate > core->max_rate || max_rate < core->min_rate)
++		return false;
++
++	hlist_for_each_entry(user, &core->clks, clks_node)
++		if (min_rate > user->max_rate || max_rate < user->min_rate)
++			return false;
++
++	return true;
++}
++
+ void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
+ 			   unsigned long max_rate)
+ {
+@@ -2348,6 +2366,11 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
+ 	clk->min_rate = min;
+ 	clk->max_rate = max;
+ 
++	if (!clk_core_check_boundaries(clk->core, min, max)) {
++		ret = -EINVAL;
++		goto out;
++	}
++
+ 	rate = clk_core_get_rate_nolock(clk->core);
+ 	if (rate < min || rate > max) {
+ 		/*
+@@ -2376,6 +2399,7 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
+ 		}
+ 	}
+ 
++out:
+ 	if (clk->exclusive_count)
+ 		clk_core_rate_protect(clk->core);
+ 
+diff --git a/drivers/clk/mediatek/clk-mt8192.c b/drivers/clk/mediatek/clk-mt8192.c
+index cbc7c6dbe0f44..79ddb3cc0b98a 100644
+--- a/drivers/clk/mediatek/clk-mt8192.c
++++ b/drivers/clk/mediatek/clk-mt8192.c
+@@ -1236,9 +1236,17 @@ static int clk_mt8192_infra_probe(struct platform_device *pdev)
+ 
+ 	r = mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks), clk_data);
+ 	if (r)
+-		return r;
++		goto free_clk_data;
++
++	r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
++	if (r)
++		goto free_clk_data;
++
++	return r;
+ 
+-	return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
++free_clk_data:
++	mtk_free_clk_data(clk_data);
++	return r;
+ }
+ 
+ static int clk_mt8192_peri_probe(struct platform_device *pdev)
+@@ -1253,9 +1261,17 @@ static int clk_mt8192_peri_probe(struct platform_device *pdev)
+ 
+ 	r = mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks), clk_data);
+ 	if (r)
+-		return r;
++		goto free_clk_data;
++
++	r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
++	if (r)
++		goto free_clk_data;
+ 
+-	return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
++	return r;
++
++free_clk_data:
++	mtk_free_clk_data(clk_data);
++	return r;
+ }
+ 
+ static int clk_mt8192_apmixed_probe(struct platform_device *pdev)
+@@ -1271,9 +1287,17 @@ static int clk_mt8192_apmixed_probe(struct platform_device *pdev)
+ 	mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ 	r = mtk_clk_register_gates(node, apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
+ 	if (r)
+-		return r;
++		goto free_clk_data;
+ 
+-	return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
++	r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
++	if (r)
++		goto free_clk_data;
++
++	return r;
++
++free_clk_data:
++	mtk_free_clk_data(clk_data);
++	return r;
+ }
+ 
+ static const struct of_device_id of_match_clk_mt8192[] = {
+diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c
+index 69a9e8069a486..604a367bc498a 100644
+--- a/drivers/clk/rockchip/clk-rk3568.c
++++ b/drivers/clk/rockchip/clk-rk3568.c
+@@ -1038,13 +1038,13 @@ static struct rockchip_clk_branch rk3568_clk_branches[] __initdata = {
+ 			RK3568_CLKGATE_CON(20), 8, GFLAGS),
+ 	GATE(HCLK_VOP, "hclk_vop", "hclk_vo", 0,
+ 			RK3568_CLKGATE_CON(20), 9, GFLAGS),
+-	COMPOSITE(DCLK_VOP0, "dclk_vop0", hpll_vpll_gpll_cpll_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
++	COMPOSITE(DCLK_VOP0, "dclk_vop0", hpll_vpll_gpll_cpll_p, CLK_SET_RATE_NO_REPARENT,
+ 			RK3568_CLKSEL_CON(39), 10, 2, MFLAGS, 0, 8, DFLAGS,
+ 			RK3568_CLKGATE_CON(20), 10, GFLAGS),
+-	COMPOSITE(DCLK_VOP1, "dclk_vop1", hpll_vpll_gpll_cpll_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
++	COMPOSITE(DCLK_VOP1, "dclk_vop1", hpll_vpll_gpll_cpll_p, CLK_SET_RATE_NO_REPARENT,
+ 			RK3568_CLKSEL_CON(40), 10, 2, MFLAGS, 0, 8, DFLAGS,
+ 			RK3568_CLKGATE_CON(20), 11, GFLAGS),
+-	COMPOSITE(DCLK_VOP2, "dclk_vop2", hpll_vpll_gpll_cpll_p, 0,
++	COMPOSITE(DCLK_VOP2, "dclk_vop2", hpll_vpll_gpll_cpll_p, CLK_SET_RATE_NO_REPARENT,
+ 			RK3568_CLKSEL_CON(41), 10, 2, MFLAGS, 0, 8, DFLAGS,
+ 			RK3568_CLKGATE_CON(20), 12, GFLAGS),
+ 	GATE(CLK_VOP_PWM, "clk_vop_pwm", "xin24m", 0,
+diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
+index 3da33c786d77c..29eafab4353ef 100644
+--- a/drivers/clk/ti/clk.c
++++ b/drivers/clk/ti/clk.c
+@@ -131,7 +131,7 @@ int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops)
+ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[])
+ {
+ 	struct ti_dt_clk *c;
+-	struct device_node *node, *parent;
++	struct device_node *node, *parent, *child;
+ 	struct clk *clk;
+ 	struct of_phandle_args clkspec;
+ 	char buf[64];
+@@ -171,10 +171,13 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[])
+ 		node = of_find_node_by_name(NULL, buf);
+ 		if (num_args && compat_mode) {
+ 			parent = node;
+-			node = of_get_child_by_name(parent, "clock");
+-			if (!node)
+-				node = of_get_child_by_name(parent, "clk");
+-			of_node_put(parent);
++			child = of_get_child_by_name(parent, "clock");
++			if (!child)
++				child = of_get_child_by_name(parent, "clk");
++			if (child) {
++				of_node_put(parent);
++				node = child;
++			}
+ 		}
+ 
+ 		clkspec.np = node;
+diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
+index db17196266e4b..82d370ae6a4a5 100644
+--- a/drivers/cpufreq/cppc_cpufreq.c
++++ b/drivers/cpufreq/cppc_cpufreq.c
+@@ -303,52 +303,48 @@ static u64 cppc_get_dmi_max_khz(void)
+ 
+ /*
+  * If CPPC lowest_freq and nominal_freq registers are exposed then we can
+- * use them to convert perf to freq and vice versa
+- *
+- * If the perf/freq point lies between Nominal and Lowest, we can treat
+- * (Low perf, Low freq) and (Nom Perf, Nom freq) as 2D co-ordinates of a line
+- * and extrapolate the rest
+- * For perf/freq > Nominal, we use the ratio perf:freq at Nominal for conversion
++ * use them to convert perf to freq and vice versa. The conversion is
++ * extrapolated as an affine function passing by the 2 points:
++ *  - (Low perf, Low freq)
++ *  - (Nominal perf, Nominal perf)
+  */
+ static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu_data,
+ 					     unsigned int perf)
+ {
+ 	struct cppc_perf_caps *caps = &cpu_data->perf_caps;
++	s64 retval, offset = 0;
+ 	static u64 max_khz;
+ 	u64 mul, div;
+ 
+ 	if (caps->lowest_freq && caps->nominal_freq) {
+-		if (perf >= caps->nominal_perf) {
+-			mul = caps->nominal_freq;
+-			div = caps->nominal_perf;
+-		} else {
+-			mul = caps->nominal_freq - caps->lowest_freq;
+-			div = caps->nominal_perf - caps->lowest_perf;
+-		}
++		mul = caps->nominal_freq - caps->lowest_freq;
++		div = caps->nominal_perf - caps->lowest_perf;
++		offset = caps->nominal_freq - div64_u64(caps->nominal_perf * mul, div);
+ 	} else {
+ 		if (!max_khz)
+ 			max_khz = cppc_get_dmi_max_khz();
+ 		mul = max_khz;
+ 		div = caps->highest_perf;
+ 	}
+-	return (u64)perf * mul / div;
++
++	retval = offset + div64_u64(perf * mul, div);
++	if (retval >= 0)
++		return retval;
++	return 0;
+ }
+ 
+ static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data,
+ 					     unsigned int freq)
+ {
+ 	struct cppc_perf_caps *caps = &cpu_data->perf_caps;
++	s64 retval, offset = 0;
+ 	static u64 max_khz;
+ 	u64  mul, div;
+ 
+ 	if (caps->lowest_freq && caps->nominal_freq) {
+-		if (freq >= caps->nominal_freq) {
+-			mul = caps->nominal_perf;
+-			div = caps->nominal_freq;
+-		} else {
+-			mul = caps->lowest_perf;
+-			div = caps->lowest_freq;
+-		}
++		mul = caps->nominal_perf - caps->lowest_perf;
++		div = caps->nominal_freq - caps->lowest_freq;
++		offset = caps->nominal_perf - div64_u64(caps->nominal_freq * mul, div);
+ 	} else {
+ 		if (!max_khz)
+ 			max_khz = cppc_get_dmi_max_khz();
+@@ -356,7 +352,10 @@ static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data,
+ 		div = max_khz;
+ 	}
+ 
+-	return (u64)freq * mul / div;
++	retval = offset + div64_u64(freq * mul, div);
++	if (retval >= 0)
++		return retval;
++	return 0;
+ }
+ 
+ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
+diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
+index b26ed690f03c8..158e5e7defaeb 100644
+--- a/drivers/dma/sh/shdma-base.c
++++ b/drivers/dma/sh/shdma-base.c
+@@ -115,10 +115,8 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
+ 		ret = pm_runtime_get(schan->dev);
+ 
+ 		spin_unlock_irq(&schan->chan_lock);
+-		if (ret < 0) {
++		if (ret < 0)
+ 			dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
+-			pm_runtime_put(schan->dev);
+-		}
+ 
+ 		pm_runtime_barrier(schan->dev);
+ 
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 6630d92e30ada..344e376b2ee99 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -1404,6 +1404,16 @@ static int gpiochip_to_irq(struct gpio_chip *gc, unsigned int offset)
+ {
+ 	struct irq_domain *domain = gc->irq.domain;
+ 
++#ifdef CONFIG_GPIOLIB_IRQCHIP
++	/*
++	 * Avoid race condition with other code, which tries to lookup
++	 * an IRQ before the irqchip has been properly registered,
++	 * i.e. while gpiochip is still being brought up.
++	 */
++	if (!gc->irq.initialized)
++		return -EPROBE_DEFER;
++#endif
++
+ 	if (!gpiochip_irqchip_irq_valid(gc, offset))
+ 		return -ENXIO;
+ 
+@@ -1593,6 +1603,15 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
+ 
+ 	acpi_gpiochip_request_interrupts(gc);
+ 
++	/*
++	 * Using barrier() here to prevent compiler from reordering
++	 * gc->irq.initialized before initialization of above
++	 * GPIO chip irq members.
++	 */
++	barrier();
++
++	gc->irq.initialized = true;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index f9bab963a948a..5df387c4d7fbb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -1813,12 +1813,6 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
+ 				true);
+ 	ret = unreserve_bo_and_vms(&ctx, false, false);
+ 
+-	/* Only apply no TLB flush on Aldebaran to
+-	 * workaround regressions on other Asics.
+-	 */
+-	if (table_freed && (adev->asic_type != CHIP_ALDEBARAN))
+-		*table_freed = true;
+-
+ 	goto out;
+ 
+ out_unreserve:
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 06d07502a1f68..a34be65c9eaac 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -1509,6 +1509,7 @@ int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
+ 		return 0;
+ 
+ 	default:
++		dma_fence_put(fence);
+ 		return -EINVAL;
+ 	}
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index f18c698137a6b..b87dca6d09fa6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2723,11 +2723,11 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
+ 		}
+ 	}
+ 
+-	amdgpu_amdkfd_suspend(adev, false);
+-
+ 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+ 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+ 
++	amdgpu_amdkfd_suspend(adev, false);
++
+ 	/* Workaroud for ASICs need to disable SMC first */
+ 	amdgpu_device_smu_fini_early(adev);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+index 1916ec84dd71f..e7845df6cad22 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -266,7 +266,7 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
+ 		    * adev->gfx.mec.num_pipe_per_mec
+ 		    * adev->gfx.mec.num_queue_per_pipe;
+ 
+-	while (queue_bit-- >= 0) {
++	while (--queue_bit >= 0) {
+ 		if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
+ 			continue;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 5661b82d84d46..dda53fe30975d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -1303,7 +1303,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
+ 	    !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
+ 		return;
+ 
+-	dma_resv_lock(bo->base.resv, NULL);
++	if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
++		return;
+ 
+ 	r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
+ 	if (!WARN_ON(r)) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+index da11ceba06981..0ce2a7aa400b1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+@@ -569,8 +569,8 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
+ 			AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
+ 
+ 	/* VCN global tiling registers */
+-	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
+-		UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
++	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
++		UVD, inst_idx, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+ }
+ 
+ static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 4bfc0c8ab764b..70122978bdd0f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1416,6 +1416,12 @@ err_unlock:
+ 	return ret;
+ }
+ 
++static bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev) {
++	return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) ||
++	       (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) &&
++	        dev->adev->sdma.instance[0].fw_version >= 18);
++}
++
+ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ 					struct kfd_process *p, void *data)
+ {
+@@ -1503,7 +1509,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ 	}
+ 
+ 	/* Flush TLBs after waiting for the page table updates to complete */
+-	if (table_freed) {
++	if (table_freed || !kfd_flush_tlb_after_unmap(dev)) {
+ 		for (i = 0; i < args->n_devices; i++) {
+ 			peer = kfd_device_by_id(devices_arr[i]);
+ 			if (WARN_ON_ONCE(!peer))
+@@ -1603,7 +1609,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+ 	}
+ 	mutex_unlock(&p->mutex);
+ 
+-	if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) {
++	if (kfd_flush_tlb_after_unmap(dev)) {
+ 		err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev,
+ 				(struct kgd_mem *) mem, true);
+ 		if (err) {
+@@ -1840,13 +1846,9 @@ static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
+ 	if (!args->start_addr || !args->size)
+ 		return -EINVAL;
+ 
+-	mutex_lock(&p->mutex);
+-
+ 	r = svm_ioctl(p, args->op, args->start_addr, args->size, args->nattr,
+ 		      args->attrs);
+ 
+-	mutex_unlock(&p->mutex);
+-
+ 	return r;
+ }
+ #else
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+index 9624bbe8b5013..281def1c6c08e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+@@ -1567,7 +1567,7 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
+ 	/* Fetch the CRAT table from ACPI */
+ 	status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table);
+ 	if (status == AE_NOT_FOUND) {
+-		pr_warn("CRAT table not found\n");
++		pr_info("CRAT table not found\n");
+ 		return -ENODATA;
+ 	} else if (ACPI_FAILURE(status)) {
+ 		const char *err = acpi_format_exception(status);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index d1145da5348f4..74f162887d3b1 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -1150,7 +1150,6 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
+ 
+ 	cancel_delayed_work_sync(&p->eviction_work);
+ 	cancel_delayed_work_sync(&p->restore_work);
+-	cancel_delayed_work_sync(&p->svms.restore_work);
+ 
+ 	mutex_lock(&p->mutex);
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
+index deae12dc777d2..40d0d8cb3fe83 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
+@@ -268,15 +268,6 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd)
+ 		return ret;
+ 	}
+ 
+-	ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client,
+-			       O_RDWR);
+-	if (ret < 0) {
+-		kfifo_free(&client->fifo);
+-		kfree(client);
+-		return ret;
+-	}
+-	*fd = ret;
+-
+ 	init_waitqueue_head(&client->wait_queue);
+ 	spin_lock_init(&client->lock);
+ 	client->events = 0;
+@@ -286,5 +277,20 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd)
+ 	list_add_rcu(&client->list, &dev->smi_clients);
+ 	spin_unlock(&dev->smi_lock);
+ 
++	ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client,
++			       O_RDWR);
++	if (ret < 0) {
++		spin_lock(&dev->smi_lock);
++		list_del_rcu(&client->list);
++		spin_unlock(&dev->smi_lock);
++
++		synchronize_rcu();
++
++		kfifo_free(&client->fifo);
++		kfree(client);
++		return ret;
++	}
++	*fd = ret;
++
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+index f2805ba74c80b..ffec25e642e25 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+@@ -1643,13 +1643,14 @@ static void svm_range_restore_work(struct work_struct *work)
+ 
+ 	pr_debug("restore svm ranges\n");
+ 
+-	/* kfd_process_notifier_release destroys this worker thread. So during
+-	 * the lifetime of this thread, kfd_process and mm will be valid.
+-	 */
+ 	p = container_of(svms, struct kfd_process, svms);
+-	mm = p->mm;
+-	if (!mm)
++
++	/* Keep mm reference when svm_range_validate_and_map ranges */
++	mm = get_task_mm(p->lead_thread);
++	if (!mm) {
++		pr_debug("svms 0x%p process mm gone\n", svms);
+ 		return;
++	}
+ 
+ 	svm_range_list_lock_and_flush_work(svms, mm);
+ 	mutex_lock(&svms->lock);
+@@ -1703,6 +1704,7 @@ static void svm_range_restore_work(struct work_struct *work)
+ out_reschedule:
+ 	mutex_unlock(&svms->lock);
+ 	mmap_write_unlock(mm);
++	mmput(mm);
+ 
+ 	/* If validation failed, reschedule another attempt */
+ 	if (evicted_ranges) {
+@@ -1985,10 +1987,9 @@ svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
+ }
+ 
+ static void
+-svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange)
++svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange,
++			 struct mm_struct *mm)
+ {
+-	struct mm_struct *mm = prange->work_item.mm;
+-
+ 	switch (prange->work_item.op) {
+ 	case SVM_OP_NULL:
+ 		pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n",
+@@ -2065,40 +2066,44 @@ static void svm_range_deferred_list_work(struct work_struct *work)
+ 	struct svm_range_list *svms;
+ 	struct svm_range *prange;
+ 	struct mm_struct *mm;
+-	struct kfd_process *p;
+ 
+ 	svms = container_of(work, struct svm_range_list, deferred_list_work);
+ 	pr_debug("enter svms 0x%p\n", svms);
+ 
+-	p = container_of(svms, struct kfd_process, svms);
+-	/* Avoid mm is gone when inserting mmu notifier */
+-	mm = get_task_mm(p->lead_thread);
+-	if (!mm) {
+-		pr_debug("svms 0x%p process mm gone\n", svms);
+-		return;
+-	}
+-retry:
+-	mmap_write_lock(mm);
+-
+-	/* Checking for the need to drain retry faults must be inside
+-	 * mmap write lock to serialize with munmap notifiers.
+-	 */
+-	if (unlikely(atomic_read(&svms->drain_pagefaults))) {
+-		mmap_write_unlock(mm);
+-		svm_range_drain_retry_fault(svms);
+-		goto retry;
+-	}
+-
+ 	spin_lock(&svms->deferred_list_lock);
+ 	while (!list_empty(&svms->deferred_range_list)) {
+ 		prange = list_first_entry(&svms->deferred_range_list,
+ 					  struct svm_range, deferred_list);
+-		list_del_init(&prange->deferred_list);
+ 		spin_unlock(&svms->deferred_list_lock);
+ 
+ 		pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
+ 			 prange->start, prange->last, prange->work_item.op);
+ 
++		mm = prange->work_item.mm;
++retry:
++		mmap_write_lock(mm);
++
++		/* Checking for the need to drain retry faults must be inside
++		 * mmap write lock to serialize with munmap notifiers.
++		 */
++		if (unlikely(atomic_read(&svms->drain_pagefaults))) {
++			mmap_write_unlock(mm);
++			svm_range_drain_retry_fault(svms);
++			goto retry;
++		}
++
++		/* Remove from deferred_list must be inside mmap write lock, for
++		 * two race cases:
++		 * 1. unmap_from_cpu may change work_item.op and add the range
++		 *    to deferred_list again, cause use after free bug.
++		 * 2. svm_range_list_lock_and_flush_work may hold mmap write
++		 *    lock and continue because deferred_list is empty, but
++		 *    deferred_list work is actually waiting for mmap lock.
++		 */
++		spin_lock(&svms->deferred_list_lock);
++		list_del_init(&prange->deferred_list);
++		spin_unlock(&svms->deferred_list_lock);
++
+ 		mutex_lock(&svms->lock);
+ 		mutex_lock(&prange->migrate_mutex);
+ 		while (!list_empty(&prange->child_list)) {
+@@ -2109,19 +2114,20 @@ retry:
+ 			pr_debug("child prange 0x%p op %d\n", pchild,
+ 				 pchild->work_item.op);
+ 			list_del_init(&pchild->child_list);
+-			svm_range_handle_list_op(svms, pchild);
++			svm_range_handle_list_op(svms, pchild, mm);
+ 		}
+ 		mutex_unlock(&prange->migrate_mutex);
+ 
+-		svm_range_handle_list_op(svms, prange);
++		svm_range_handle_list_op(svms, prange, mm);
+ 		mutex_unlock(&svms->lock);
++		mmap_write_unlock(mm);
++
++		/* Pairs with mmget in svm_range_add_list_work */
++		mmput(mm);
+ 
+ 		spin_lock(&svms->deferred_list_lock);
+ 	}
+ 	spin_unlock(&svms->deferred_list_lock);
+-
+-	mmap_write_unlock(mm);
+-	mmput(mm);
+ 	pr_debug("exit svms 0x%p\n", svms);
+ }
+ 
+@@ -2139,6 +2145,9 @@ svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
+ 			prange->work_item.op = op;
+ 	} else {
+ 		prange->work_item.op = op;
++
++		/* Pairs with mmput in deferred_list_work */
++		mmget(mm);
+ 		prange->work_item.mm = mm;
+ 		list_add_tail(&prange->deferred_list,
+ 			      &prange->svms->deferred_range_list);
+@@ -2830,6 +2839,8 @@ void svm_range_list_fini(struct kfd_process *p)
+ 
+ 	pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
+ 
++	cancel_delayed_work_sync(&p->svms.restore_work);
++
+ 	/* Ensure list work is finished before process is destroyed */
+ 	flush_work(&p->svms.deferred_list_work);
+ 
+@@ -2840,7 +2851,6 @@ void svm_range_list_fini(struct kfd_process *p)
+ 	atomic_inc(&p->svms.drain_pagefaults);
+ 	svm_range_drain_retry_fault(&p->svms);
+ 
+-
+ 	list_for_each_entry_safe(prange, next, &p->svms.list, list) {
+ 		svm_range_unlink(prange);
+ 		svm_range_remove_notifier(prange);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index b28b5c4908601..90c017859ad42 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -3951,7 +3951,7 @@ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *cap
+ 				 max - min);
+ }
+ 
+-static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
++static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
+ 					 int bl_idx,
+ 					 u32 user_brightness)
+ {
+@@ -3982,7 +3982,8 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
+ 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
+ 	}
+ 
+-	return rc ? 0 : 1;
++	if (rc)
++		dm->actual_brightness[bl_idx] = user_brightness;
+ }
+ 
+ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
+@@ -9914,7 +9915,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+ 	/* restore the backlight level */
+ 	for (i = 0; i < dm->num_of_edps; i++) {
+ 		if (dm->backlight_dev[i] &&
+-		    (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
++		    (dm->actual_brightness[i] != dm->brightness[i]))
+ 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
+ 	}
+ #endif
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+index b9a69b0cef23b..7a23cd603714b 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+@@ -540,6 +540,12 @@ struct amdgpu_display_manager {
+ 	 * cached backlight values.
+ 	 */
+ 	u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
++	/**
++	 * @actual_brightness:
++	 *
++	 * last successfully applied backlight values.
++	 */
++	u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP];
+ };
+ 
+ enum dsc_clock_force_state {
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+index 26719efa5396d..12d437d9a0e4c 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+@@ -227,8 +227,10 @@ static ssize_t dp_link_settings_read(struct file *f, char __user *buf,
+ 			break;
+ 
+ 		r = put_user(*(rd_buf + result), buf);
+-		if (r)
++		if (r) {
++			kfree(rd_buf);
+ 			return r; /* r = -EFAULT */
++		}
+ 
+ 		buf += 1;
+ 		size -= 1;
+@@ -389,8 +391,10 @@ static ssize_t dp_phy_settings_read(struct file *f, char __user *buf,
+ 			break;
+ 
+ 		r = put_user((*(rd_buf + result)), buf);
+-		if (r)
++		if (r) {
++			kfree(rd_buf);
+ 			return r; /* r = -EFAULT */
++		}
+ 
+ 		buf += 1;
+ 		size -= 1;
+@@ -1359,8 +1363,10 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
+ 				break;
+ 	}
+ 
+-	if (!pipe_ctx)
++	if (!pipe_ctx) {
++		kfree(rd_buf);
+ 		return -ENXIO;
++	}
+ 
+ 	dsc = pipe_ctx->stream_res.dsc;
+ 	if (dsc)
+@@ -1376,8 +1382,10 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
+ 			break;
+ 
+ 		r = put_user(*(rd_buf + result), buf);
+-		if (r)
++		if (r) {
++			kfree(rd_buf);
+ 			return r; /* r = -EFAULT */
++		}
+ 
+ 		buf += 1;
+ 		size -= 1;
+@@ -1546,8 +1554,10 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
+ 				break;
+ 	}
+ 
+-	if (!pipe_ctx)
++	if (!pipe_ctx) {
++		kfree(rd_buf);
+ 		return -ENXIO;
++	}
+ 
+ 	dsc = pipe_ctx->stream_res.dsc;
+ 	if (dsc)
+@@ -1563,8 +1573,10 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
+ 			break;
+ 
+ 		r = put_user(*(rd_buf + result), buf);
+-		if (r)
++		if (r) {
++			kfree(rd_buf);
+ 			return r; /* r = -EFAULT */
++		}
+ 
+ 		buf += 1;
+ 		size -= 1;
+@@ -1731,8 +1743,10 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
+ 				break;
+ 	}
+ 
+-	if (!pipe_ctx)
++	if (!pipe_ctx) {
++		kfree(rd_buf);
+ 		return -ENXIO;
++	}
+ 
+ 	dsc = pipe_ctx->stream_res.dsc;
+ 	if (dsc)
+@@ -1748,8 +1762,10 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
+ 			break;
+ 
+ 		r = put_user(*(rd_buf + result), buf);
+-		if (r)
++		if (r) {
++			kfree(rd_buf);
+ 			return r; /* r = -EFAULT */
++		}
+ 
+ 		buf += 1;
+ 		size -= 1;
+@@ -1912,8 +1928,10 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
+ 				break;
+ 	}
+ 
+-	if (!pipe_ctx)
++	if (!pipe_ctx) {
++		kfree(rd_buf);
+ 		return -ENXIO;
++	}
+ 
+ 	dsc = pipe_ctx->stream_res.dsc;
+ 	if (dsc)
+@@ -1929,8 +1947,10 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
+ 			break;
+ 
+ 		r = put_user(*(rd_buf + result), buf);
+-		if (r)
++		if (r) {
++			kfree(rd_buf);
+ 			return r; /* r = -EFAULT */
++		}
+ 
+ 		buf += 1;
+ 		size -= 1;
+@@ -2088,8 +2108,10 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
+ 				break;
+ 	}
+ 
+-	if (!pipe_ctx)
++	if (!pipe_ctx) {
++		kfree(rd_buf);
+ 		return -ENXIO;
++	}
+ 
+ 	dsc = pipe_ctx->stream_res.dsc;
+ 	if (dsc)
+@@ -2105,8 +2127,10 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
+ 			break;
+ 
+ 		r = put_user(*(rd_buf + result), buf);
+-		if (r)
++		if (r) {
++			kfree(rd_buf);
+ 			return r; /* r = -EFAULT */
++		}
+ 
+ 		buf += 1;
+ 		size -= 1;
+@@ -2145,8 +2169,10 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
+ 				break;
+ 	}
+ 
+-	if (!pipe_ctx)
++	if (!pipe_ctx) {
++		kfree(rd_buf);
+ 		return -ENXIO;
++	}
+ 
+ 	dsc = pipe_ctx->stream_res.dsc;
+ 	if (dsc)
+@@ -2162,8 +2188,10 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
+ 			break;
+ 
+ 		r = put_user(*(rd_buf + result), buf);
+-		if (r)
++		if (r) {
++			kfree(rd_buf);
+ 			return r; /* r = -EFAULT */
++		}
+ 
+ 		buf += 1;
+ 		size -= 1;
+@@ -2217,8 +2245,10 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
+ 				break;
+ 	}
+ 
+-	if (!pipe_ctx)
++	if (!pipe_ctx) {
++		kfree(rd_buf);
+ 		return -ENXIO;
++	}
+ 
+ 	dsc = pipe_ctx->stream_res.dsc;
+ 	if (dsc)
+@@ -2234,8 +2264,10 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
+ 			break;
+ 
+ 		r = put_user(*(rd_buf + result), buf);
+-		if (r)
++		if (r) {
++			kfree(rd_buf);
+ 			return r; /* r = -EFAULT */
++		}
+ 
+ 		buf += 1;
+ 		size -= 1;
+@@ -2289,8 +2321,10 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
+ 				break;
+ 	}
+ 
+-	if (!pipe_ctx)
++	if (!pipe_ctx) {
++		kfree(rd_buf);
+ 		return -ENXIO;
++	}
+ 
+ 	dsc = pipe_ctx->stream_res.dsc;
+ 	if (dsc)
+@@ -2306,8 +2340,10 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
+ 			break;
+ 
+ 		r = put_user(*(rd_buf + result), buf);
+-		if (r)
++		if (r) {
++			kfree(rd_buf);
+ 			return r; /* r = -EFAULT */
++		}
+ 
+ 		buf += 1;
+ 		size -= 1;
+@@ -3459,8 +3495,10 @@ static ssize_t dcc_en_bits_read(
+ 	dc->hwss.get_dcc_en_bits(dc, dcc_en_bits);
+ 
+ 	rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+-	if (!rd_buf)
++	if (!rd_buf) {
++		kfree(dcc_en_bits);
+ 		return -ENOMEM;
++	}
+ 
+ 	for (i = 0; i < num_pipes; i++)
+ 		offset += snprintf(rd_buf + offset, rd_buf_size - offset,
+@@ -3473,8 +3511,10 @@ static ssize_t dcc_en_bits_read(
+ 		if (*pos >= rd_buf_size)
+ 			break;
+ 		r = put_user(*(rd_buf + result), buf);
+-		if (r)
++		if (r) {
++			kfree(rd_buf);
+ 			return r; /* r = -EFAULT */
++		}
+ 		buf += 1;
+ 		size -= 1;
+ 		*pos += 1;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+index c510638b4f997..a009fc654ac95 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+@@ -149,10 +149,8 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
+ 
+ 	link = stream->link;
+ 
+-	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
+-
+-	if (psr_config.psr_version > 0) {
+-		psr_config.psr_exit_link_training_required = 0x1;
++	if (link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) {
++		psr_config.psr_version = link->psr_settings.psr_version;
+ 		psr_config.psr_frame_capture_indication_req = 0;
+ 		psr_config.psr_rfb_setup_time = 0x37;
+ 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index ba1aa994db4b7..62bc6ce887535 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -76,6 +76,8 @@
+ 
+ #include "dc_trace.h"
+ 
++#include "dce/dmub_outbox.h"
++
+ #define CTX \
+ 	dc->ctx
+ 
+@@ -3707,13 +3709,23 @@ void dc_hardware_release(struct dc *dc)
+ }
+ #endif
+ 
+-/**
+- * dc_enable_dmub_notifications - Returns whether dmub notification can be enabled
+- * @dc: dc structure
++/*
++ *****************************************************************************
++ * Function: dc_is_dmub_outbox_supported -
++ * 
++ * @brief 
++ *      Checks whether DMUB FW supports outbox notifications, if supported
++ *		DM should register outbox interrupt prior to actually enabling interrupts
++ *		via dc_enable_dmub_outbox
+  *
+- * Returns: True to enable dmub notifications, False otherwise
++ *  @param
++ *		[in] dc: dc structure
++ *
++ *  @return
++ *		True if DMUB FW supports outbox notifications, False otherwise
++ *****************************************************************************
+  */
+-bool dc_enable_dmub_notifications(struct dc *dc)
++bool dc_is_dmub_outbox_supported(struct dc *dc)
+ {
+ #if defined(CONFIG_DRM_AMD_DC_DCN)
+ 	/* YELLOW_CARP B0 USB4 DPIA needs dmub notifications for interrupts */
+@@ -3728,6 +3740,48 @@ bool dc_enable_dmub_notifications(struct dc *dc)
+ 
+ /**
+  * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message
++ *  Function: dc_enable_dmub_notifications
++ *
++ *  @brief
++ *		Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox
++ *		notifications. All DMs shall switch to dc_is_dmub_outbox_supported.
++ *		This API shall be removed after switching.
++ *
++ *  @param
++ *		[in] dc: dc structure
++ *
++ *  @return
++ *		True if DMUB FW supports outbox notifications, False otherwise
++ *****************************************************************************
++ */
++bool dc_enable_dmub_notifications(struct dc *dc)
++{
++	return dc_is_dmub_outbox_supported(dc);
++}
++
++/**
++ *****************************************************************************
++ *  Function: dc_enable_dmub_outbox
++ *
++ *  @brief
++ *		Enables DMUB unsolicited notifications to x86 via outbox
++ *
++ *  @param
++ *		[in] dc: dc structure
++ *
++ *  @return
++ *		None
++ *****************************************************************************
++ */
++void dc_enable_dmub_outbox(struct dc *dc)
++{
++	struct dc_context *dc_ctx = dc->ctx;
++
++	dmub_enable_outbox_notification(dc_ctx->dmub_srv);
++}
++
++/**
++ *****************************************************************************
+  *                                      Sets port index appropriately for legacy DDC
+  * @dc: dc structure
+  * @link_index: link index
+@@ -3829,7 +3883,7 @@ uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
+  *		[in] payload: aux payload
+  *		[out] notify: set_config immediate reply
+  *
+- *	@return
++ *  @return
+  *		True if successful, False if failure
+  *****************************************************************************
+  */
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index 61b8f29a0c303..49d5271dcfdc8 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -2378,22 +2378,27 @@ static enum link_training_result dp_perform_8b_10b_link_training(
+ 				repeater_id--) {
+ 			status = perform_clock_recovery_sequence(link, link_res, lt_settings, repeater_id);
+ 
+-			if (status != LINK_TRAINING_SUCCESS)
++			if (status != LINK_TRAINING_SUCCESS) {
++				repeater_training_done(link, repeater_id);
+ 				break;
++			}
+ 
+ 			status = perform_channel_equalization_sequence(link,
+ 					link_res,
+ 					lt_settings,
+ 					repeater_id);
+ 
++			repeater_training_done(link, repeater_id);
++
+ 			if (status != LINK_TRAINING_SUCCESS)
+ 				break;
+ 
+-			repeater_training_done(link, repeater_id);
++			for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
++				lt_settings->dpcd_lane_settings[lane].raw = 0;
++				lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = 0;
++				lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = 0;
++			}
+ 		}
+-
+-		for (lane = 0; lane < (uint8_t)lt_settings->link_settings.lane_count; lane++)
+-			lt_settings->dpcd_lane_settings[lane].raw = 0;
+ 	}
+ 
+ 	if (status == LINK_TRAINING_SUCCESS) {
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 18757c1585232..ac3071e38e4a0 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -1640,6 +1640,9 @@ static bool are_stream_backends_same(
+ 	if (is_timing_changed(stream_a, stream_b))
+ 		return false;
+ 
++	if (stream_a->signal != stream_b->signal)
++		return false;
++
+ 	if (stream_a->dpms_off != stream_b->dpms_off)
+ 		return false;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index b518648906212..6a8c100a3688d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -1448,8 +1448,11 @@ void dc_z10_restore(const struct dc *dc);
+ void dc_z10_save_init(struct dc *dc);
+ #endif
+ 
++bool dc_is_dmub_outbox_supported(struct dc *dc);
+ bool dc_enable_dmub_notifications(struct dc *dc);
+ 
++void dc_enable_dmub_outbox(struct dc *dc);
++
+ bool dc_process_dmub_aux_transfer_async(struct dc *dc,
+ 				uint32_t link_index,
+ 				struct aux_payload *payload);
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c
+index faad8555ddbb6..fff1d07d865d7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c
+@@ -22,20 +22,23 @@
+  * Authors: AMD
+  */
+ 
+-#include "dmub_outbox.h"
++#include "dc.h"
+ #include "dc_dmub_srv.h"
++#include "dmub_outbox.h"
+ #include "dmub/inc/dmub_cmd.h"
+ 
+-/**
+- *  dmub_enable_outbox_notification - Sends inbox cmd to dmub to enable outbox1
+- *                                    messages with interrupt. Dmub sends outbox1
+- *                                    message and triggers outbox1 interrupt.
+- * @dc: dc structure
++/*
++ *  Function: dmub_enable_outbox_notification
++ *
++ *  @brief
++ *		Sends inbox cmd to dmub for enabling outbox notifications to x86.
++ *
++ *  @param
++ *		[in] dmub_srv: dmub_srv structure
+  */
+-void dmub_enable_outbox_notification(struct dc *dc)
++void dmub_enable_outbox_notification(struct dc_dmub_srv *dmub_srv)
+ {
+ 	union dmub_rb_cmd cmd;
+-	struct dc_context *dc_ctx = dc->ctx;
+ 
+ 	memset(&cmd, 0x0, sizeof(cmd));
+ 	cmd.outbox1_enable.header.type = DMUB_CMD__OUTBOX1_ENABLE;
+@@ -45,7 +48,7 @@ void dmub_enable_outbox_notification(struct dc *dc)
+ 		sizeof(cmd.outbox1_enable.header);
+ 	cmd.outbox1_enable.enable = true;
+ 
+-	dc_dmub_srv_cmd_queue(dc_ctx->dmub_srv, &cmd);
+-	dc_dmub_srv_cmd_execute(dc_ctx->dmub_srv);
+-	dc_dmub_srv_wait_idle(dc_ctx->dmub_srv);
++	dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
++	dc_dmub_srv_cmd_execute(dmub_srv);
++	dc_dmub_srv_wait_idle(dmub_srv);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.h
+index 4e0aa0d1a2d5c..58ceabb9d497d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.h
+@@ -26,8 +26,8 @@
+ #ifndef _DMUB_OUTBOX_H_
+ #define _DMUB_OUTBOX_H_
+ 
+-#include "dc.h"
++struct dc_dmub_srv;
+ 
+-void dmub_enable_outbox_notification(struct dc *dc);
++void dmub_enable_outbox_notification(struct dc_dmub_srv *dmub_srv);
+ 
+ #endif /* _DMUB_OUTBOX_H_ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 530a72e3eefe2..2cefdd96d0cbb 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -1493,17 +1493,6 @@ void dcn10_init_hw(struct dc *dc)
+ 			link->link_status.link_active = true;
+ 	}
+ 
+-	/* Power gate DSCs */
+-	if (!is_optimized_init_done) {
+-		for (i = 0; i < res_pool->res_cap->num_dsc; i++)
+-			if (hws->funcs.dsc_pg_control != NULL)
+-				hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
+-	}
+-
+-	/* Enable outbox notification feature of dmub */
+-	if (dc->debug.enable_dmub_aux_for_legacy_ddc)
+-		dmub_enable_outbox_notification(dc);
+-
+ 	/* we want to turn off all dp displays before doing detection */
+ 	if (dc->config.power_down_display_on_boot)
+ 		dc_link_blank_all_dp_displays(dc);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+index e5cc6bf45743a..ca1bbc942fd40 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+@@ -873,7 +873,7 @@ static const struct dc_debug_options debug_defaults_drv = {
+ 		.clock_trace = true,
+ 		.disable_pplib_clock_request = true,
+ 		.min_disp_clk_khz = 100000,
+-		.pipe_split_policy = MPC_SPLIT_DYNAMIC,
++		.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+ 		.force_single_disp_pipe_split = false,
+ 		.disable_dcc = DCC_ENABLE,
+ 		.vsr_support = true,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+index 4206ce5bf9a92..1e156f3980656 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+@@ -194,7 +194,7 @@ void dcn31_init_hw(struct dc *dc)
+ 
+ 	/* Enables outbox notifications for usb4 dpia */
+ 	if (dc->res_pool->usb4_dpia_count)
+-		dmub_enable_outbox_notification(dc);
++		dmub_enable_outbox_notification(dc->ctx->dmub_srv);
+ 
+ 	/* we want to turn off all dp displays before doing detection */
+ 	if (dc->config.power_down_display_on_boot)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+index 8d64187478e42..0e11285035b63 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+@@ -2025,7 +2025,9 @@ bool dcn31_validate_bandwidth(struct dc *dc,
+ 
+ 	BW_VAL_TRACE_COUNT();
+ 
++	DC_FP_START();
+ 	out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
++	DC_FP_END();
+ 
+ 	// Disable fast_validate to set min dcfclk in alculate_wm_and_dlg
+ 	if (pipe_cnt == 0)
+diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+index 08362d506534b..a68496b3f9296 100644
+--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
++++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+@@ -1045,6 +1045,17 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
+ 
+ 	if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
+ 		return false;
++	/* Don't use baco for reset in S3.
++	 * This is a workaround for some platforms
++	 * where entering BACO during suspend
++	 * seems to cause reboots or hangs.
++	 * This might be related to the fact that BACO controls
++	 * power to the whole GPU including devices like audio and USB.
++	 * Powering down/up everything may adversely affect these other
++	 * devices.  Needs more investigation.
++	 */
++	if (adev->in_s3)
++		return false;
+ 
+ 	if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
+ 		return false;
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+index 9ddd8491ff008..ede71de2343dc 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+@@ -773,13 +773,13 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
+ 		smum_send_msg_to_smc_with_parameter(hwmgr,
+ 						PPSMC_MSG_SetHardMinFclkByFreq,
+ 						hwmgr->display_config->num_display > 3 ?
+-						data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk :
++						(data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk / 100) :
+ 						min_mclk,
+ 						NULL);
+ 
+ 		smum_send_msg_to_smc_with_parameter(hwmgr,
+ 						PPSMC_MSG_SetHardMinSocclkByFreq,
+-						data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk,
++						data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk / 100,
+ 						NULL);
+ 		smum_send_msg_to_smc_with_parameter(hwmgr,
+ 						PPSMC_MSG_SetHardMinVcn,
+@@ -792,11 +792,11 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
+ 						NULL);
+ 		smum_send_msg_to_smc_with_parameter(hwmgr,
+ 						PPSMC_MSG_SetSoftMaxFclkByFreq,
+-						data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk,
++						data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk / 100,
+ 						NULL);
+ 		smum_send_msg_to_smc_with_parameter(hwmgr,
+ 						PPSMC_MSG_SetSoftMaxSocclkByFreq,
+-						data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk,
++						data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk / 100,
+ 						NULL);
+ 		smum_send_msg_to_smc_with_parameter(hwmgr,
+ 						PPSMC_MSG_SetSoftMaxVcn,
+diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
+index 6e484d836cfe2..691039aba87f4 100644
+--- a/drivers/gpu/drm/bridge/nwl-dsi.c
++++ b/drivers/gpu/drm/bridge/nwl-dsi.c
+@@ -861,18 +861,19 @@ nwl_dsi_bridge_mode_set(struct drm_bridge *bridge,
+ 	memcpy(&dsi->mode, adjusted_mode, sizeof(dsi->mode));
+ 	drm_mode_debug_printmodeline(adjusted_mode);
+ 
+-	pm_runtime_get_sync(dev);
++	if (pm_runtime_resume_and_get(dev) < 0)
++		return;
+ 
+ 	if (clk_prepare_enable(dsi->lcdif_clk) < 0)
+-		return;
++		goto runtime_put;
+ 	if (clk_prepare_enable(dsi->core_clk) < 0)
+-		return;
++		goto runtime_put;
+ 
+ 	/* Step 1 from DSI reset-out instructions */
+ 	ret = reset_control_deassert(dsi->rst_pclk);
+ 	if (ret < 0) {
+ 		DRM_DEV_ERROR(dev, "Failed to deassert PCLK: %d\n", ret);
+-		return;
++		goto runtime_put;
+ 	}
+ 
+ 	/* Step 2 from DSI reset-out instructions */
+@@ -882,13 +883,18 @@ nwl_dsi_bridge_mode_set(struct drm_bridge *bridge,
+ 	ret = reset_control_deassert(dsi->rst_esc);
+ 	if (ret < 0) {
+ 		DRM_DEV_ERROR(dev, "Failed to deassert ESC: %d\n", ret);
+-		return;
++		goto runtime_put;
+ 	}
+ 	ret = reset_control_deassert(dsi->rst_byte);
+ 	if (ret < 0) {
+ 		DRM_DEV_ERROR(dev, "Failed to deassert BYTE: %d\n", ret);
+-		return;
++		goto runtime_put;
+ 	}
++
++	return;
++
++runtime_put:
++	pm_runtime_put_sync(dev);
+ }
+ 
+ static void
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index b8f5419e514ae..83e5c115e7547 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -212,9 +212,7 @@ static const struct edid_quirk {
+ 
+ 	/* Windows Mixed Reality Headsets */
+ 	EDID_QUIRK('A', 'C', 'R', 0x7fce, EDID_QUIRK_NON_DESKTOP),
+-	EDID_QUIRK('H', 'P', 'N', 0x3515, EDID_QUIRK_NON_DESKTOP),
+ 	EDID_QUIRK('L', 'E', 'N', 0x0408, EDID_QUIRK_NON_DESKTOP),
+-	EDID_QUIRK('L', 'E', 'N', 0xb800, EDID_QUIRK_NON_DESKTOP),
+ 	EDID_QUIRK('F', 'U', 'J', 0x1970, EDID_QUIRK_NON_DESKTOP),
+ 	EDID_QUIRK('D', 'E', 'L', 0x7fce, EDID_QUIRK_NON_DESKTOP),
+ 	EDID_QUIRK('S', 'E', 'C', 0x144a, EDID_QUIRK_NON_DESKTOP),
+@@ -5327,17 +5325,13 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
+ 	info->width_mm = edid->width_cm * 10;
+ 	info->height_mm = edid->height_cm * 10;
+ 
+-	info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
+-
+ 	drm_get_monitor_range(connector, edid);
+ 
+-	DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop);
+-
+ 	if (edid->revision < 3)
+-		return quirks;
++		goto out;
+ 
+ 	if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
+-		return quirks;
++		goto out;
+ 
+ 	info->color_formats |= DRM_COLOR_FORMAT_RGB444;
+ 	drm_parse_cea_ext(connector, edid);
+@@ -5358,7 +5352,7 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
+ 
+ 	/* Only defined for 1.4 with digital displays */
+ 	if (edid->revision < 4)
+-		return quirks;
++		goto out;
+ 
+ 	switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
+ 	case DRM_EDID_DIGITAL_DEPTH_6:
+@@ -5395,6 +5389,13 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
+ 
+ 	drm_update_mso(connector, edid);
+ 
++out:
++	if (quirks & EDID_QUIRK_NON_DESKTOP) {
++		drm_dbg_kms(connector->dev, "Non-desktop display%s\n",
++			    info->non_desktop ? " (redundant quirk)" : "");
++		info->non_desktop = true;
++	}
++
+ 	return quirks;
+ }
+ 
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index b910978d3e480..4e853acfd1e8a 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -180,6 +180,12 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MicroPC"),
+ 		},
+ 		.driver_data = (void *)&lcd720x1280_rightside_up,
++	}, {	/* GPD Win Max */
++		.matches = {
++		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1619-01"),
++		},
++		.driver_data = (void *)&lcd800x1280_rightside_up,
+ 	}, {	/*
+ 		 * GPD Pocket, note that the the DMI data is less generic then
+ 		 * it seems, devices with a board-vendor of "AMI Corporation"
+diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
+index 87428fb23d9ff..a2277a0d6d06f 100644
+--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
++++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
+@@ -222,6 +222,7 @@ static int dw_hdmi_imx_probe(struct platform_device *pdev)
+ 	struct device_node *np = pdev->dev.of_node;
+ 	const struct of_device_id *match = of_match_node(dw_hdmi_imx_dt_ids, np);
+ 	struct imx_hdmi *hdmi;
++	int ret;
+ 
+ 	hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
+ 	if (!hdmi)
+@@ -243,10 +244,15 @@ static int dw_hdmi_imx_probe(struct platform_device *pdev)
+ 	hdmi->bridge = of_drm_find_bridge(np);
+ 	if (!hdmi->bridge) {
+ 		dev_err(hdmi->dev, "Unable to find bridge\n");
++		dw_hdmi_remove(hdmi->hdmi);
+ 		return -ENODEV;
+ 	}
+ 
+-	return component_add(&pdev->dev, &dw_hdmi_imx_ops);
++	ret = component_add(&pdev->dev, &dw_hdmi_imx_ops);
++	if (ret)
++		dw_hdmi_remove(hdmi->hdmi);
++
++	return ret;
+ }
+ 
+ static int dw_hdmi_imx_remove(struct platform_device *pdev)
+diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
+index e5078d03020d9..fb0e951248f68 100644
+--- a/drivers/gpu/drm/imx/imx-ldb.c
++++ b/drivers/gpu/drm/imx/imx-ldb.c
+@@ -572,6 +572,8 @@ static int imx_ldb_panel_ddc(struct device *dev,
+ 		edidp = of_get_property(child, "edid", &edid_len);
+ 		if (edidp) {
+ 			channel->edid = kmemdup(edidp, edid_len, GFP_KERNEL);
++			if (!channel->edid)
++				return -ENOMEM;
+ 		} else if (!channel->panel) {
+ 			/* fallback to display-timings node */
+ 			ret = of_get_drm_display_mode(child,
+diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
+index 06cb1a59b9bcd..63ba2ad846791 100644
+--- a/drivers/gpu/drm/imx/parallel-display.c
++++ b/drivers/gpu/drm/imx/parallel-display.c
+@@ -75,8 +75,10 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
+ 		ret = of_get_drm_display_mode(np, &imxpd->mode,
+ 					      &imxpd->bus_flags,
+ 					      OF_USE_NATIVE_MODE);
+-		if (ret)
++		if (ret) {
++			drm_mode_destroy(connector->dev, mode);
+ 			return ret;
++		}
+ 
+ 		drm_mode_copy(mode, &imxpd->mode);
+ 		mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
+index 6b3ced4aaaf5d..3a3f53f0c8ae1 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
+@@ -1877,7 +1877,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
+ 
+ 	/* do not autoenable, will be enabled later */
+ 	ret = devm_request_irq(&pdev->dev, msm_host->irq, dsi_host_irq,
+-			IRQF_TRIGGER_HIGH | IRQF_ONESHOT | IRQF_NO_AUTOEN,
++			IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN,
+ 			"dsi_isr", msm_host);
+ 	if (ret < 0) {
+ 		dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
+index e1772211b0a4b..612310d5d4812 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
+@@ -216,6 +216,7 @@ gm20b_pmu = {
+ 	.intr = gt215_pmu_intr,
+ 	.recv = gm20b_pmu_recv,
+ 	.initmsg = gm20b_pmu_initmsg,
++	.reset = gf100_pmu_reset,
+ };
+ 
+ #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
+index 6bf7fc1bd1e3b..1a6f9c3af5ecd 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
+@@ -23,7 +23,7 @@
+  */
+ #include "priv.h"
+ 
+-static void
++void
+ gp102_pmu_reset(struct nvkm_pmu *pmu)
+ {
+ 	struct nvkm_device *device = pmu->subdev.device;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
+index ba1583bb618b2..94cfb1791af6e 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
+@@ -83,6 +83,7 @@ gp10b_pmu = {
+ 	.intr = gt215_pmu_intr,
+ 	.recv = gm20b_pmu_recv,
+ 	.initmsg = gm20b_pmu_initmsg,
++	.reset = gp102_pmu_reset,
+ };
+ 
+ #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+index bcaade758ff72..21abf31f44420 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+@@ -41,6 +41,7 @@ int gt215_pmu_send(struct nvkm_pmu *, u32[2], u32, u32, u32, u32);
+ 
+ bool gf100_pmu_enabled(struct nvkm_pmu *);
+ void gf100_pmu_reset(struct nvkm_pmu *);
++void gp102_pmu_reset(struct nvkm_pmu *pmu);
+ 
+ void gk110_pmu_pgob(struct nvkm_pmu *, bool);
+ 
+diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
+index 2c3378a259b1e..e1542451ef9d0 100644
+--- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
++++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
+@@ -612,8 +612,10 @@ static int ili9341_dbi_probe(struct spi_device *spi, struct gpio_desc *dc,
+ 	int ret;
+ 
+ 	vcc = devm_regulator_get_optional(dev, "vcc");
+-	if (IS_ERR(vcc))
++	if (IS_ERR(vcc)) {
+ 		dev_err(dev, "get optional vcc failed\n");
++		vcc = NULL;
++	}
+ 
+ 	dbidev = devm_drm_dev_alloc(dev, &ili9341_dbi_driver,
+ 				    struct mipi_dbi_dev, drm);
+diff --git a/drivers/gpu/drm/sprd/sprd_dpu.c b/drivers/gpu/drm/sprd/sprd_dpu.c
+index 06a3414ee43a3..1637203ea1036 100644
+--- a/drivers/gpu/drm/sprd/sprd_dpu.c
++++ b/drivers/gpu/drm/sprd/sprd_dpu.c
+@@ -790,6 +790,11 @@ static int sprd_dpu_context_init(struct sprd_dpu *dpu,
+ 	int ret;
+ 
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	if (!res) {
++		dev_err(dev, "failed to get I/O resource\n");
++		return -EINVAL;
++	}
++
+ 	ctx->base = devm_ioremap(dev, res->start, resource_size(res));
+ 	if (!ctx->base) {
+ 		dev_err(dev, "failed to map dpu registers\n");
+diff --git a/drivers/gpu/drm/sprd/sprd_drm.c b/drivers/gpu/drm/sprd/sprd_drm.c
+index a077e2d4d7217..af2be97d5ed08 100644
+--- a/drivers/gpu/drm/sprd/sprd_drm.c
++++ b/drivers/gpu/drm/sprd/sprd_drm.c
+@@ -155,7 +155,7 @@ static void sprd_drm_shutdown(struct platform_device *pdev)
+ 	struct drm_device *drm = platform_get_drvdata(pdev);
+ 
+ 	if (!drm) {
+-		drm_warn(drm, "drm device is not available, no shutdown\n");
++		dev_warn(&pdev->dev, "drm device is not available, no shutdown\n");
+ 		return;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/sprd/sprd_dsi.c b/drivers/gpu/drm/sprd/sprd_dsi.c
+index 911b3cddc2640..12b67a5d59231 100644
+--- a/drivers/gpu/drm/sprd/sprd_dsi.c
++++ b/drivers/gpu/drm/sprd/sprd_dsi.c
+@@ -907,6 +907,11 @@ static int sprd_dsi_context_init(struct sprd_dsi *dsi,
+ 	struct resource *res;
+ 
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	if (!res) {
++		dev_err(dev, "failed to get I/O resource\n");
++		return -EINVAL;
++	}
++
+ 	ctx->base = devm_ioremap(dev, res->start, resource_size(res));
+ 	if (!ctx->base) {
+ 		drm_err(dsi->drm, "failed to map dsi host registers\n");
+diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
+index c7ed2e1cbab6e..92bc0faee84f3 100644
+--- a/drivers/gpu/drm/v3d/v3d_gem.c
++++ b/drivers/gpu/drm/v3d/v3d_gem.c
+@@ -798,7 +798,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
+ 
+ 		if (!render->base.perfmon) {
+ 			ret = -ENOENT;
+-			goto fail;
++			goto fail_perfmon;
+ 		}
+ 	}
+ 
+@@ -847,6 +847,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
+ 
+ fail_unreserve:
+ 	mutex_unlock(&v3d->sched_lock);
++fail_perfmon:
+ 	drm_gem_unlock_reservations(last_job->bo,
+ 				    last_job->bo_count, &acquire_ctx);
+ fail:
+@@ -1027,7 +1028,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
+ 						     args->perfmon_id);
+ 		if (!job->base.perfmon) {
+ 			ret = -ENOENT;
+-			goto fail;
++			goto fail_perfmon;
+ 		}
+ 	}
+ 
+@@ -1056,6 +1057,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
+ 
+ fail_unreserve:
+ 	mutex_unlock(&v3d->sched_lock);
++fail_perfmon:
+ 	drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
+ 				    &acquire_ctx);
+ fail:
+diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
+index 7dc89dc6b0f0e..590376d776a18 100644
+--- a/drivers/hid/hid-apple.c
++++ b/drivers/hid/hid-apple.c
+@@ -748,15 +748,15 @@ static const struct hid_device_id apple_devices[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY),
+ 		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021),
+-		.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
++		.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY },
+ 	{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021),
+ 		.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021),
+-		.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
++		.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY },
+ 	{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021),
+ 		.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021),
+-		.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
++		.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY },
+ 	{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021),
+ 		.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ 
+diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
+index 60375879612f3..67be81208a2d9 100644
+--- a/drivers/hv/channel_mgmt.c
++++ b/drivers/hv/channel_mgmt.c
+@@ -380,7 +380,7 @@ void vmbus_channel_map_relid(struct vmbus_channel *channel)
+ 	 * execute:
+ 	 *
+ 	 *  (a) In the "normal (i.e., not resuming from hibernation)" path,
+-	 *      the full barrier in smp_store_mb() guarantees that the store
++	 *      the full barrier in virt_store_mb() guarantees that the store
+ 	 *      is propagated to all CPUs before the add_channel_work work
+ 	 *      is queued.  In turn, add_channel_work is queued before the
+ 	 *      channel's ring buffer is allocated/initialized and the
+@@ -392,14 +392,14 @@ void vmbus_channel_map_relid(struct vmbus_channel *channel)
+ 	 *      recv_int_page before retrieving the channel pointer from the
+ 	 *      array of channels.
+ 	 *
+-	 *  (b) In the "resuming from hibernation" path, the smp_store_mb()
++	 *  (b) In the "resuming from hibernation" path, the virt_store_mb()
+ 	 *      guarantees that the store is propagated to all CPUs before
+ 	 *      the VMBus connection is marked as ready for the resume event
+ 	 *      (cf. check_ready_for_resume_event()).  The interrupt handler
+ 	 *      of the VMBus driver and vmbus_chan_sched() can not run before
+ 	 *      vmbus_bus_resume() has completed execution (cf. resume_noirq).
+ 	 */
+-	smp_store_mb(
++	virt_store_mb(
+ 		vmbus_connection.channels[channel->offermsg.child_relid],
+ 		channel);
+ }
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index 12a2b37e87f30..4bea1dfa41cdc 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -2097,6 +2097,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
+ 	child_device_obj->device.parent = &hv_acpi_dev->dev;
+ 	child_device_obj->device.release = vmbus_device_release;
+ 
++	child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
++	child_device_obj->device.dma_mask = &child_device_obj->dma_mask;
++	dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64));
++
+ 	/*
+ 	 * Register with the LDM. This will kick off the driver/device
+ 	 * binding...which will eventually call vmbus_match() and vmbus_probe()
+@@ -2122,9 +2126,6 @@ int vmbus_device_register(struct hv_device *child_device_obj)
+ 	}
+ 	hv_debug_add_dev_dir(child_device_obj);
+ 
+-	child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
+-	child_device_obj->device.dma_mask = &child_device_obj->dma_mask;
+-	dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64));
+ 	return 0;
+ 
+ err_kset_unregister:
+@@ -2780,10 +2781,15 @@ static void __exit vmbus_exit(void)
+ 	if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
+ 		kmsg_dump_unregister(&hv_kmsg_dumper);
+ 		unregister_die_notifier(&hyperv_die_block);
+-		atomic_notifier_chain_unregister(&panic_notifier_list,
+-						 &hyperv_panic_block);
+ 	}
+ 
++	/*
++	 * The panic notifier is always registered, hence we should
++	 * also unconditionally unregister it here as well.
++	 */
++	atomic_notifier_chain_unregister(&panic_notifier_list,
++					 &hyperv_panic_block);
++
+ 	free_page((unsigned long)hv_panic_page);
+ 	unregister_sysctl_table(hv_ctl_table_hdr);
+ 	hv_ctl_table_hdr = NULL;
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index 35f0d5e7533d6..1c107d6d03b99 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -2824,6 +2824,7 @@ static int cm_dreq_handler(struct cm_work *work)
+ 	switch (cm_id_priv->id.state) {
+ 	case IB_CM_REP_SENT:
+ 	case IB_CM_DREQ_SENT:
++	case IB_CM_MRA_REP_RCVD:
+ 		ib_cancel_mad(cm_id_priv->msg);
+ 		break;
+ 	case IB_CM_ESTABLISHED:
+@@ -2831,8 +2832,6 @@ static int cm_dreq_handler(struct cm_work *work)
+ 		    cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
+ 			ib_cancel_mad(cm_id_priv->msg);
+ 		break;
+-	case IB_CM_MRA_REP_RCVD:
+-		break;
+ 	case IB_CM_TIMEWAIT:
+ 		atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
+ 						     [CM_DREQ_COUNTER]);
+diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
+index 876cc78a22cca..7333646021bb8 100644
+--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
++++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
+@@ -80,6 +80,9 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
+ 	unsigned long flags;
+ 	struct list_head del_list;
+ 
++	/* Prevent freeing of mm until we are completely finished. */
++	mmgrab(handler->mn.mm);
++
+ 	/* Unregister first so we don't get any more notifications. */
+ 	mmu_notifier_unregister(&handler->mn, handler->mn.mm);
+ 
+@@ -102,6 +105,9 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
+ 
+ 	do_remove(handler, &del_list);
+ 
++	/* Now the mm may be freed. */
++	mmdrop(handler->mn.mm);
++
+ 	kfree(handler);
+ }
+ 
+diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
+index 2910d78333130..d40a1460ef971 100644
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -541,8 +541,10 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
+ 		spin_lock_irq(&ent->lock);
+ 		if (ent->disabled)
+ 			goto out;
+-		if (need_delay)
++		if (need_delay) {
+ 			queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
++			goto out;
++		}
+ 		remove_cache_mr_locked(ent);
+ 		queue_adjust_cache_locked(ent);
+ 	}
+@@ -630,6 +632,7 @@ static void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
+ {
+ 	struct mlx5_cache_ent *ent = mr->cache_ent;
+ 
++	WRITE_ONCE(dev->cache.last_add, jiffies);
+ 	spin_lock_irq(&ent->lock);
+ 	list_add_tail(&mr->list, &ent->head);
+ 	ent->available_mrs++;
+diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
+index ae50b56e89132..8ef112f883a77 100644
+--- a/drivers/infiniband/sw/rdmavt/qp.c
++++ b/drivers/infiniband/sw/rdmavt/qp.c
+@@ -3190,7 +3190,11 @@ serr_no_r_lock:
+ 	spin_lock_irqsave(&sqp->s_lock, flags);
+ 	rvt_send_complete(sqp, wqe, send_status);
+ 	if (sqp->ibqp.qp_type == IB_QPT_RC) {
+-		int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
++		int lastwqe;
++
++		spin_lock(&sqp->r_lock);
++		lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
++		spin_unlock(&sqp->r_lock);
+ 
+ 		sqp->s_flags &= ~RVT_S_BUSY;
+ 		spin_unlock_irqrestore(&sqp->s_lock, flags);
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+index 759b85f033315..df4d06d4d183a 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+@@ -297,6 +297,7 @@ static bool rtrs_clt_change_state_from_to(struct rtrs_clt_path *clt_path,
+ 	return changed;
+ }
+ 
++static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path *clt_path);
+ static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
+ {
+ 	struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+@@ -304,16 +305,7 @@ static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
+ 	if (rtrs_clt_change_state_from_to(clt_path,
+ 					   RTRS_CLT_CONNECTED,
+ 					   RTRS_CLT_RECONNECTING)) {
+-		struct rtrs_clt_sess *clt = clt_path->clt;
+-		unsigned int delay_ms;
+-
+-		/*
+-		 * Normal scenario, reconnect if we were successfully connected
+-		 */
+-		delay_ms = clt->reconnect_delay_sec * 1000;
+-		queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork,
+-				   msecs_to_jiffies(delay_ms +
+-						    prandom_u32() % RTRS_RECONNECT_SEED));
++		queue_work(rtrs_wq, &clt_path->err_recovery_work);
+ 	} else {
+ 		/*
+ 		 * Error can happen just on establishing new connection,
+@@ -1511,6 +1503,22 @@ static void rtrs_clt_init_hb(struct rtrs_clt_path *clt_path)
+ static void rtrs_clt_reconnect_work(struct work_struct *work);
+ static void rtrs_clt_close_work(struct work_struct *work);
+ 
++static void rtrs_clt_err_recovery_work(struct work_struct *work)
++{
++	struct rtrs_clt_path *clt_path;
++	struct rtrs_clt_sess *clt;
++	int delay_ms;
++
++	clt_path = container_of(work, struct rtrs_clt_path, err_recovery_work);
++	clt = clt_path->clt;
++	delay_ms = clt->reconnect_delay_sec * 1000;
++	rtrs_clt_stop_and_destroy_conns(clt_path);
++	queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork,
++			   msecs_to_jiffies(delay_ms +
++					    prandom_u32() %
++					    RTRS_RECONNECT_SEED));
++}
++
+ static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt,
+ 					const struct rtrs_addr *path,
+ 					size_t con_num, u32 nr_poll_queues)
+@@ -1562,6 +1570,7 @@ static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt,
+ 	clt_path->state = RTRS_CLT_CONNECTING;
+ 	atomic_set(&clt_path->connected_cnt, 0);
+ 	INIT_WORK(&clt_path->close_work, rtrs_clt_close_work);
++	INIT_WORK(&clt_path->err_recovery_work, rtrs_clt_err_recovery_work);
+ 	INIT_DELAYED_WORK(&clt_path->reconnect_dwork, rtrs_clt_reconnect_work);
+ 	rtrs_clt_init_hb(clt_path);
+ 
+@@ -2326,6 +2335,7 @@ static void rtrs_clt_close_work(struct work_struct *work)
+ 
+ 	clt_path = container_of(work, struct rtrs_clt_path, close_work);
+ 
++	cancel_work_sync(&clt_path->err_recovery_work);
+ 	cancel_delayed_work_sync(&clt_path->reconnect_dwork);
+ 	rtrs_clt_stop_and_destroy_conns(clt_path);
+ 	rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSED, NULL);
+@@ -2638,7 +2648,6 @@ static void rtrs_clt_reconnect_work(struct work_struct *work)
+ {
+ 	struct rtrs_clt_path *clt_path;
+ 	struct rtrs_clt_sess *clt;
+-	unsigned int delay_ms;
+ 	int err;
+ 
+ 	clt_path = container_of(to_delayed_work(work), struct rtrs_clt_path,
+@@ -2655,8 +2664,6 @@ static void rtrs_clt_reconnect_work(struct work_struct *work)
+ 	}
+ 	clt_path->reconnect_attempts++;
+ 
+-	/* Stop everything */
+-	rtrs_clt_stop_and_destroy_conns(clt_path);
+ 	msleep(RTRS_RECONNECT_BACKOFF);
+ 	if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING, NULL)) {
+ 		err = init_path(clt_path);
+@@ -2669,11 +2676,7 @@ static void rtrs_clt_reconnect_work(struct work_struct *work)
+ reconnect_again:
+ 	if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_RECONNECTING, NULL)) {
+ 		clt_path->stats->reconnects.fail_cnt++;
+-		delay_ms = clt->reconnect_delay_sec * 1000;
+-		queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork,
+-				   msecs_to_jiffies(delay_ms +
+-						    prandom_u32() %
+-						    RTRS_RECONNECT_SEED));
++		queue_work(rtrs_wq, &clt_path->err_recovery_work);
+ 	}
+ }
+ 
+@@ -2908,6 +2911,7 @@ int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path *clt_path)
+ 						 &old_state);
+ 	if (changed) {
+ 		clt_path->reconnect_attempts = 0;
++		rtrs_clt_stop_and_destroy_conns(clt_path);
+ 		queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, 0);
+ 	}
+ 	if (changed || old_state == RTRS_CLT_RECONNECTING) {
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
+index d1b18a154ae03..f848c0392d982 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.h
++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
+@@ -134,6 +134,7 @@ struct rtrs_clt_path {
+ 	struct rtrs_clt_io_req	*reqs;
+ 	struct delayed_work	reconnect_dwork;
+ 	struct work_struct	close_work;
++	struct work_struct	err_recovery_work;
+ 	unsigned int		reconnect_attempts;
+ 	bool			established;
+ 	struct rtrs_rbuf	*rbufs;
+diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+index 6dc6d8b6b3686..f60381cdf1c48 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+@@ -1558,6 +1558,7 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
+ 				dev_info(smmu->dev, "\t0x%016llx\n",
+ 					 (unsigned long long)evt[i]);
+ 
++			cond_resched();
+ 		}
+ 
+ 		/*
+diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
+index 980e4af3f06b6..d2e82a1b56d8a 100644
+--- a/drivers/iommu/omap-iommu.c
++++ b/drivers/iommu/omap-iommu.c
+@@ -1661,7 +1661,7 @@ static struct iommu_device *omap_iommu_probe_device(struct device *dev)
+ 	num_iommus = of_property_count_elems_of_size(dev->of_node, "iommus",
+ 						     sizeof(phandle));
+ 	if (num_iommus < 0)
+-		return 0;
++		return ERR_PTR(-ENODEV);
+ 
+ 	arch_data = kcalloc(num_iommus + 1, sizeof(*arch_data), GFP_KERNEL);
+ 	if (!arch_data)
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index cd772973114af..a0fc764ec9dc6 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -3011,18 +3011,12 @@ static int __init allocate_lpi_tables(void)
+ 	return 0;
+ }
+ 
+-static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
++static u64 read_vpend_dirty_clear(void __iomem *vlpi_base)
+ {
+ 	u32 count = 1000000;	/* 1s! */
+ 	bool clean;
+ 	u64 val;
+ 
+-	val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
+-	val &= ~GICR_VPENDBASER_Valid;
+-	val &= ~clr;
+-	val |= set;
+-	gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+-
+ 	do {
+ 		val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
+ 		clean = !(val & GICR_VPENDBASER_Dirty);
+@@ -3033,10 +3027,26 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
+ 		}
+ 	} while (!clean && count);
+ 
+-	if (unlikely(val & GICR_VPENDBASER_Dirty)) {
++	if (unlikely(!clean))
+ 		pr_err_ratelimited("ITS virtual pending table not cleaning\n");
++
++	return val;
++}
++
++static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
++{
++	u64 val;
++
++	/* Make sure we wait until the RD is done with the initial scan */
++	val = read_vpend_dirty_clear(vlpi_base);
++	val &= ~GICR_VPENDBASER_Valid;
++	val &= ~clr;
++	val |= set;
++	gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
++
++	val = read_vpend_dirty_clear(vlpi_base);
++	if (unlikely(val & GICR_VPENDBASER_Dirty))
+ 		val |= GICR_VPENDBASER_PendingLast;
+-	}
+ 
+ 	return val;
+ }
+diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
+index 5e935d97207dc..907af63d1bba9 100644
+--- a/drivers/irqchip/irq-gic-v3.c
++++ b/drivers/irqchip/irq-gic-v3.c
+@@ -206,11 +206,11 @@ static inline void __iomem *gic_dist_base(struct irq_data *d)
+ 	}
+ }
+ 
+-static void gic_do_wait_for_rwp(void __iomem *base)
++static void gic_do_wait_for_rwp(void __iomem *base, u32 bit)
+ {
+ 	u32 count = 1000000;	/* 1s! */
+ 
+-	while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
++	while (readl_relaxed(base + GICD_CTLR) & bit) {
+ 		count--;
+ 		if (!count) {
+ 			pr_err_ratelimited("RWP timeout, gone fishing\n");
+@@ -224,13 +224,13 @@ static void gic_do_wait_for_rwp(void __iomem *base)
+ /* Wait for completion of a distributor change */
+ static void gic_dist_wait_for_rwp(void)
+ {
+-	gic_do_wait_for_rwp(gic_data.dist_base);
++	gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP);
+ }
+ 
+ /* Wait for completion of a redistributor change */
+ static void gic_redist_wait_for_rwp(void)
+ {
+-	gic_do_wait_for_rwp(gic_data_rdist_rd_base());
++	gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP);
+ }
+ 
+ #ifdef CONFIG_ARM64
+@@ -1466,6 +1466,12 @@ static int gic_irq_domain_translate(struct irq_domain *d,
+ 		if(fwspec->param_count != 2)
+ 			return -EINVAL;
+ 
++		if (fwspec->param[0] < 16) {
++			pr_err(FW_BUG "Illegal GSI%d translation request\n",
++			       fwspec->param[0]);
++			return -EINVAL;
++		}
++
+ 		*hwirq = fwspec->param[0];
+ 		*type = fwspec->param[1];
+ 
+diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
+index b8bb46c65a97a..3dbac62c932dd 100644
+--- a/drivers/irqchip/irq-gic.c
++++ b/drivers/irqchip/irq-gic.c
+@@ -1085,6 +1085,12 @@ static int gic_irq_domain_translate(struct irq_domain *d,
+ 		if(fwspec->param_count != 2)
+ 			return -EINVAL;
+ 
++		if (fwspec->param[0] < 16) {
++			pr_err(FW_BUG "Illegal GSI%d translation request\n",
++			       fwspec->param[0]);
++			return -EINVAL;
++		}
++
+ 		*hwirq = fwspec->param[0];
+ 		*type = fwspec->param[1];
+ 
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index 21fe8652b095b..901abd6dea419 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -18,6 +18,7 @@
+ #include <linux/dm-ioctl.h>
+ #include <linux/hdreg.h>
+ #include <linux/compat.h>
++#include <linux/nospec.h>
+ 
+ #include <linux/uaccess.h>
+ #include <linux/ima.h>
+@@ -1788,6 +1789,7 @@ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
+ 	if (unlikely(cmd >= ARRAY_SIZE(_ioctls)))
+ 		return NULL;
+ 
++	cmd = array_index_nospec(cmd, ARRAY_SIZE(_ioctls));
+ 	*ioctl_flags = _ioctls[cmd].flags;
+ 	return _ioctls[cmd].fn;
+ }
+diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
+index 579ab6183d4d8..dffeb47a9efbc 100644
+--- a/drivers/md/dm-rq.c
++++ b/drivers/md/dm-rq.c
+@@ -499,8 +499,13 @@ static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
+ 
+ 	if (unlikely(!ti)) {
+ 		int srcu_idx;
+-		struct dm_table *map = dm_get_live_table(md, &srcu_idx);
++		struct dm_table *map;
+ 
++		map = dm_get_live_table(md, &srcu_idx);
++		if (unlikely(!map)) {
++			dm_put_live_table(md, srcu_idx);
++			return BLK_STS_RESOURCE;
++		}
+ 		ti = dm_table_find_target(map, 0);
+ 		dm_put_live_table(md, srcu_idx);
+ 	}
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 394778d8bf549..dcb8d8fc7877a 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1519,15 +1519,10 @@ static void dm_submit_bio(struct bio *bio)
+ 	struct dm_table *map;
+ 
+ 	map = dm_get_live_table(md, &srcu_idx);
+-	if (unlikely(!map)) {
+-		DMERR_LIMIT("%s: mapping table unavailable, erroring io",
+-			    dm_device_name(md));
+-		bio_io_error(bio);
+-		goto out;
+-	}
+ 
+-	/* If suspended, queue this IO for later */
+-	if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
++	/* If suspended, or map not yet available, queue this IO for later */
++	if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) ||
++	    unlikely(!map)) {
+ 		if (bio->bi_opf & REQ_NOWAIT)
+ 			bio_wouldblock_error(bio);
+ 		else if (bio->bi_opf & REQ_RAHEAD)
+diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
+index c1eefaebacb64..bcc6e547e0714 100644
+--- a/drivers/misc/habanalabs/common/memory.c
++++ b/drivers/misc/habanalabs/common/memory.c
+@@ -1967,16 +1967,15 @@ err_dec_exporting_cnt:
+ static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
+ {
+ 	struct hl_device *hdev = hpriv->hdev;
+-	struct hl_ctx *ctx = hpriv->ctx;
+ 	u64 block_handle, device_addr = 0;
++	struct hl_ctx *ctx = hpriv->ctx;
+ 	u32 handle = 0, block_size;
+-	int rc, dmabuf_fd = -EBADF;
++	int rc;
+ 
+ 	switch (args->in.op) {
+ 	case HL_MEM_OP_ALLOC:
+ 		if (args->in.alloc.mem_size == 0) {
+-			dev_err(hdev->dev,
+-				"alloc size must be larger than 0\n");
++			dev_err(hdev->dev, "alloc size must be larger than 0\n");
+ 			rc = -EINVAL;
+ 			goto out;
+ 		}
+@@ -1997,15 +1996,14 @@ static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
+ 
+ 	case HL_MEM_OP_MAP:
+ 		if (args->in.flags & HL_MEM_USERPTR) {
+-			device_addr = args->in.map_host.host_virt_addr;
+-			rc = 0;
++			dev_err(hdev->dev, "Failed to map host memory when MMU is disabled\n");
++			rc = -EPERM;
+ 		} else {
+-			rc = get_paddr_from_handle(ctx, &args->in,
+-							&device_addr);
++			rc = get_paddr_from_handle(ctx, &args->in, &device_addr);
++			memset(args, 0, sizeof(*args));
++			args->out.device_virt_addr = device_addr;
+ 		}
+ 
+-		memset(args, 0, sizeof(*args));
+-		args->out.device_virt_addr = device_addr;
+ 		break;
+ 
+ 	case HL_MEM_OP_UNMAP:
+@@ -2013,20 +2011,14 @@ static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
+ 		break;
+ 
+ 	case HL_MEM_OP_MAP_BLOCK:
+-		rc = map_block(hdev, args->in.map_block.block_addr,
+-				&block_handle, &block_size);
++		rc = map_block(hdev, args->in.map_block.block_addr, &block_handle, &block_size);
+ 		args->out.block_handle = block_handle;
+ 		args->out.block_size = block_size;
+ 		break;
+ 
+ 	case HL_MEM_OP_EXPORT_DMABUF_FD:
+-		rc = export_dmabuf_from_addr(ctx,
+-				args->in.export_dmabuf_fd.handle,
+-				args->in.export_dmabuf_fd.mem_size,
+-				args->in.flags,
+-				&dmabuf_fd);
+-		memset(args, 0, sizeof(*args));
+-		args->out.fd = dmabuf_fd;
++		dev_err(hdev->dev, "Failed to export dma-buf object when MMU is disabled\n");
++		rc = -EPERM;
+ 		break;
+ 
+ 	default:
+diff --git a/drivers/misc/habanalabs/common/mmu/mmu_v1.c b/drivers/misc/habanalabs/common/mmu/mmu_v1.c
+index 6134b6ae76157..3cadef97817d6 100644
+--- a/drivers/misc/habanalabs/common/mmu/mmu_v1.c
++++ b/drivers/misc/habanalabs/common/mmu/mmu_v1.c
+@@ -467,7 +467,7 @@ static void hl_mmu_v1_fini(struct hl_device *hdev)
+ {
+ 	/* MMU H/W fini was already done in device hw_fini() */
+ 
+-	if (!ZERO_OR_NULL_PTR(hdev->mmu_priv.hr.mmu_shadow_hop0)) {
++	if (!ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) {
+ 		kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
+ 		gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
+ 
+diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
+index 013c6da2e3ca1..b4dacea801511 100644
+--- a/drivers/misc/habanalabs/gaudi/gaudi.c
++++ b/drivers/misc/habanalabs/gaudi/gaudi.c
+@@ -7819,6 +7819,48 @@ static void gaudi_print_fw_alive_info(struct hl_device *hdev,
+ 		fw_alive->thread_id, fw_alive->uptime_seconds);
+ }
+ 
++static void gaudi_print_nic_axi_irq_info(struct hl_device *hdev, u16 event_type,
++						void *data)
++{
++	char desc[64] = "", *type;
++	struct eq_nic_sei_event *eq_nic_sei = data;
++	u16 nic_id = event_type - GAUDI_EVENT_NIC_SEI_0;
++
++	switch (eq_nic_sei->axi_error_cause) {
++	case RXB:
++		type = "RXB";
++		break;
++	case RXE:
++		type = "RXE";
++		break;
++	case TXS:
++		type = "TXS";
++		break;
++	case TXE:
++		type = "TXE";
++		break;
++	case QPC_RESP:
++		type = "QPC_RESP";
++		break;
++	case NON_AXI_ERR:
++		type = "NON_AXI_ERR";
++		break;
++	case TMR:
++		type = "TMR";
++		break;
++	default:
++		dev_err(hdev->dev, "unknown NIC AXI cause %d\n",
++			eq_nic_sei->axi_error_cause);
++		type = "N/A";
++		break;
++	}
++
++	snprintf(desc, sizeof(desc), "NIC%d_%s%d", nic_id, type,
++			eq_nic_sei->id);
++	dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
++		event_type, desc);
++}
++
+ static int gaudi_non_hard_reset_late_init(struct hl_device *hdev)
+ {
+ 	/* GAUDI doesn't support any reset except hard-reset */
+@@ -8066,6 +8108,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
+ 				struct hl_eq_entry *eq_entry)
+ {
+ 	struct gaudi_device *gaudi = hdev->asic_specific;
++	u64 data = le64_to_cpu(eq_entry->data[0]);
+ 	u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
+ 	u32 fw_fatal_err_flag = 0;
+ 	u16 event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK)
+@@ -8263,6 +8306,11 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
+ 		hl_fw_unmask_irq(hdev, event_type);
+ 		break;
+ 
++	case GAUDI_EVENT_NIC_SEI_0 ... GAUDI_EVENT_NIC_SEI_4:
++		gaudi_print_nic_axi_irq_info(hdev, event_type, &data);
++		hl_fw_unmask_irq(hdev, event_type);
++		break;
++
+ 	case GAUDI_EVENT_DMA_IF_SEI_0 ... GAUDI_EVENT_DMA_IF_SEI_3:
+ 		gaudi_print_irq_info(hdev, event_type, false);
+ 		gaudi_print_sm_sei_info(hdev, event_type,
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index 4e67c1403cc93..db99882c95d86 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -1880,6 +1880,31 @@ static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq)
+ 	       brq->data.error || brq->cmd.resp[0] & CMD_ERRORS;
+ }
+ 
++static int mmc_spi_err_check(struct mmc_card *card)
++{
++	u32 status = 0;
++	int err;
++
++	/*
++	 * SPI does not have a TRAN state we have to wait on, instead the
++	 * card is ready again when it no longer holds the line LOW.
++	 * We still have to ensure two things here before we know the write
++	 * was successful:
++	 * 1. The card has not disconnected during busy and we actually read our
++	 * own pull-up, thinking it was still connected, so ensure it
++	 * still responds.
++	 * 2. Check for any error bits, in particular R1_SPI_IDLE to catch a
++	 * just reconnected card after being disconnected during busy.
++	 */
++	err = __mmc_send_status(card, &status, 0);
++	if (err)
++		return err;
++	/* All R1 and R2 bits of SPI are errors in our case */
++	if (status)
++		return -EIO;
++	return 0;
++}
++
+ static int mmc_blk_busy_cb(void *cb_data, bool *busy)
+ {
+ 	struct mmc_blk_busy_data *data = cb_data;
+@@ -1903,9 +1928,16 @@ static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
+ 	struct mmc_blk_busy_data cb_data;
+ 	int err;
+ 
+-	if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ)
++	if (rq_data_dir(req) == READ)
+ 		return 0;
+ 
++	if (mmc_host_is_spi(card->host)) {
++		err = mmc_spi_err_check(card);
++		if (err)
++			mqrq->brq.data.bytes_xfered = 0;
++		return err;
++	}
++
+ 	cb_data.card = card;
+ 	cb_data.status = 0;
+ 	err = __mmc_poll_for_busy(card->host, 0, MMC_BLK_TIMEOUT_MS,
+@@ -2350,6 +2382,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
+ 	struct mmc_blk_data *md;
+ 	int devidx, ret;
+ 	char cap_str[10];
++	bool cache_enabled = false;
++	bool fua_enabled = false;
+ 
+ 	devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL);
+ 	if (devidx < 0) {
+@@ -2429,13 +2463,17 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
+ 			md->flags |= MMC_BLK_CMD23;
+ 	}
+ 
+-	if (mmc_card_mmc(card) &&
+-	    md->flags & MMC_BLK_CMD23 &&
++	if (md->flags & MMC_BLK_CMD23 &&
+ 	    ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
+ 	     card->ext_csd.rel_sectors)) {
+ 		md->flags |= MMC_BLK_REL_WR;
+-		blk_queue_write_cache(md->queue.queue, true, true);
++		fua_enabled = true;
++		cache_enabled = true;
+ 	}
++	if (mmc_cache_enabled(card->host))
++		cache_enabled  = true;
++
++	blk_queue_write_cache(md->queue.queue, cache_enabled, fua_enabled);
+ 
+ 	string_get_size((u64)size, 512, STRING_UNITS_2,
+ 			cap_str, sizeof(cap_str));
+diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
+index 20f5687272778..f879dc63d9364 100644
+--- a/drivers/mmc/core/quirks.h
++++ b/drivers/mmc/core/quirks.h
+@@ -149,6 +149,11 @@ static const struct mmc_fixup __maybe_unused sdio_fixup_methods[] = {
+ static const struct mmc_fixup __maybe_unused sdio_card_init_methods[] = {
+ 	SDIO_FIXUP_COMPATIBLE("ti,wl1251", wl1251_quirk, 0),
+ 
++	SDIO_FIXUP_COMPATIBLE("silabs,wf200", add_quirk,
++			      MMC_QUIRK_BROKEN_BYTE_MODE_512 |
++			      MMC_QUIRK_LENIENT_FN0 |
++			      MMC_QUIRK_BLKSZ_FOR_BYTE_MODE),
++
+ 	END_FIXUP
+ };
+ 
+diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
+index 9c13f2c313658..4566d7fc9055a 100644
+--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
++++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
+@@ -62,8 +62,8 @@ static int sdmmc_idma_validate_data(struct mmci_host *host,
+ 	 * excepted the last element which has no constraint on idmasize
+ 	 */
+ 	for_each_sg(data->sg, sg, data->sg_len - 1, i) {
+-		if (!IS_ALIGNED(data->sg->offset, sizeof(u32)) ||
+-		    !IS_ALIGNED(data->sg->length, SDMMC_IDMA_BURST)) {
++		if (!IS_ALIGNED(sg->offset, sizeof(u32)) ||
++		    !IS_ALIGNED(sg->length, SDMMC_IDMA_BURST)) {
+ 			dev_err(mmc_dev(host->mmc),
+ 				"unaligned scatterlist: ofst:%x length:%d\n",
+ 				data->sg->offset, data->sg->length);
+@@ -71,7 +71,7 @@ static int sdmmc_idma_validate_data(struct mmci_host *host,
+ 		}
+ 	}
+ 
+-	if (!IS_ALIGNED(data->sg->offset, sizeof(u32))) {
++	if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
+ 		dev_err(mmc_dev(host->mmc),
+ 			"unaligned last scatterlist: ofst:%x length:%d\n",
+ 			data->sg->offset, data->sg->length);
+diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
+index 2797a9c0f17d8..ddb5ca2f559e2 100644
+--- a/drivers/mmc/host/renesas_sdhi_core.c
++++ b/drivers/mmc/host/renesas_sdhi_core.c
+@@ -144,9 +144,9 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
+ 		return clk_get_rate(priv->clk);
+ 
+ 	if (priv->clkh) {
++		/* HS400 with 4TAP needs different clock settings */
+ 		bool use_4tap = priv->quirks && priv->quirks->hs400_4taps;
+-		bool need_slow_clkh = (host->mmc->ios.timing == MMC_TIMING_UHS_SDR104) ||
+-				      (host->mmc->ios.timing == MMC_TIMING_MMC_HS400);
++		bool need_slow_clkh = host->mmc->ios.timing == MMC_TIMING_MMC_HS400;
+ 		clkh_shift = use_4tap && need_slow_clkh ? 1 : 2;
+ 		ref_clk = priv->clkh;
+ 	}
+@@ -396,10 +396,10 @@ static void renesas_sdhi_hs400_complete(struct mmc_host *mmc)
+ 			SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) |
+ 			sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2));
+ 
+-	/* Set the sampling clock selection range of HS400 mode */
+ 	sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL,
+ 		       SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN |
+-		       0x4 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT);
++		       sd_scc_read32(host, priv,
++				     SH_MOBILE_SDHI_SCC_DTCNTL));
+ 
+ 	/* Avoid bad TAP */
+ 	if (bad_taps & BIT(priv->tap_set)) {
+diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
+index 666cee4c7f7c6..08e838400b526 100644
+--- a/drivers/mmc/host/sdhci-xenon.c
++++ b/drivers/mmc/host/sdhci-xenon.c
+@@ -241,16 +241,6 @@ static void xenon_voltage_switch(struct sdhci_host *host)
+ {
+ 	/* Wait for 5ms after set 1.8V signal enable bit */
+ 	usleep_range(5000, 5500);
+-
+-	/*
+-	 * For some reason the controller's Host Control2 register reports
+-	 * the bit representing 1.8V signaling as 0 when read after it was
+-	 * written as 1. Subsequent read reports 1.
+-	 *
+-	 * Since this may cause some issues, do an empty read of the Host
+-	 * Control2 register here to circumvent this.
+-	 */
+-	sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ }
+ 
+ static unsigned int xenon_get_max_clock(struct sdhci_host *host)
+diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c
+index ec87126e1a7df..8ccda748fd084 100644
+--- a/drivers/net/can/usb/etas_es58x/es58x_fd.c
++++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c
+@@ -172,12 +172,11 @@ static int es58x_fd_rx_event_msg(struct net_device *netdev,
+ 	const struct es58x_fd_rx_event_msg *rx_event_msg;
+ 	int ret;
+ 
++	rx_event_msg = &es58x_fd_urb_cmd->rx_event_msg;
+ 	ret = es58x_check_msg_len(es58x_dev->dev, *rx_event_msg, msg_len);
+ 	if (ret)
+ 		return ret;
+ 
+-	rx_event_msg = &es58x_fd_urb_cmd->rx_event_msg;
+-
+ 	return es58x_rx_err_msg(netdev, rx_event_msg->error_code,
+ 				rx_event_msg->event_code,
+ 				get_unaligned_le64(&rx_event_msg->timestamp));
+diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
+index 33f0ceae381d4..2875b52508567 100644
+--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
++++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
+@@ -1940,6 +1940,10 @@ static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
+ 		case FLOW_ACTION_GATE:
+ 			size = struct_size(sgi, entries, a->gate.num_entries);
+ 			sgi = kzalloc(size, GFP_KERNEL);
++			if (!sgi) {
++				ret = -ENOMEM;
++				goto err;
++			}
+ 			vsc9959_psfp_parse_gate(a, sgi);
+ 			ret = vsc9959_psfp_sgi_table_add(ocelot, sgi);
+ 			if (ret) {
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index b1c98d1408b82..6af0ae1d0c462 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -3224,6 +3224,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
+ 		}
+ 		qidx = bp->tc_to_qidx[j];
+ 		ring->queue_id = bp->q_info[qidx].queue_id;
++		spin_lock_init(&txr->xdp_tx_lock);
+ 		if (i < bp->tx_nr_rings_xdp)
+ 			continue;
+ 		if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
+@@ -10294,6 +10295,12 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+ 	if (irq_re_init)
+ 		udp_tunnel_nic_reset_ntf(bp->dev);
+ 
++	if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
++		if (!static_key_enabled(&bnxt_xdp_locking_key))
++			static_branch_enable(&bnxt_xdp_locking_key);
++	} else if (static_key_enabled(&bnxt_xdp_locking_key)) {
++		static_branch_disable(&bnxt_xdp_locking_key);
++	}
+ 	set_bit(BNXT_STATE_OPEN, &bp->state);
+ 	bnxt_enable_int(bp);
+ 	/* Enable TX queues */
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+index 666fc1e7a7d2f..d57bff46b5878 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -593,7 +593,8 @@ struct nqe_cn {
+ #define BNXT_MAX_MTU		9500
+ #define BNXT_MAX_PAGE_MODE_MTU	\
+ 	((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN -	\
+-	 XDP_PACKET_HEADROOM)
++	 XDP_PACKET_HEADROOM - \
++	 SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info)))
+ 
+ #define BNXT_MIN_PKT_SIZE	52
+ 
+@@ -800,6 +801,8 @@ struct bnxt_tx_ring_info {
+ 	u32			dev_state;
+ 
+ 	struct bnxt_ring_struct	tx_ring_struct;
++	/* Synchronize simultaneous xdp_xmit on same ring */
++	spinlock_t		xdp_tx_lock;
+ };
+ 
+ #define BNXT_LEGACY_COAL_CMPL_PARAMS					\
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+index 8aaa2335f848a..f09b04556c32e 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -2101,9 +2101,7 @@ static int bnxt_set_pauseparam(struct net_device *dev,
+ 		}
+ 
+ 		link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+-		if (bp->hwrm_spec_code >= 0x10201)
+-			link_info->req_flow_ctrl =
+-				PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
++		link_info->req_flow_ctrl = 0;
+ 	} else {
+ 		/* when transition from auto pause to force pause,
+ 		 * force a link change
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+index 52fad0fdeacf3..03b1d6c045048 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+@@ -20,6 +20,8 @@
+ #include "bnxt.h"
+ #include "bnxt_xdp.h"
+ 
++DEFINE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
++
+ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
+ 				   struct bnxt_tx_ring_info *txr,
+ 				   dma_addr_t mapping, u32 len)
+@@ -227,11 +229,16 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
+ 	ring = smp_processor_id() % bp->tx_nr_rings_xdp;
+ 	txr = &bp->tx_ring[ring];
+ 
++	if (READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING)
++		return -EINVAL;
++
++	if (static_branch_unlikely(&bnxt_xdp_locking_key))
++		spin_lock(&txr->xdp_tx_lock);
++
+ 	for (i = 0; i < num_frames; i++) {
+ 		struct xdp_frame *xdp = frames[i];
+ 
+-		if (!txr || !bnxt_tx_avail(bp, txr) ||
+-		    !(bp->bnapi[ring]->flags & BNXT_NAPI_FLAG_XDP))
++		if (!bnxt_tx_avail(bp, txr))
+ 			break;
+ 
+ 		mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len,
+@@ -250,6 +257,9 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
+ 		bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
+ 	}
+ 
++	if (static_branch_unlikely(&bnxt_xdp_locking_key))
++		spin_unlock(&txr->xdp_tx_lock);
++
+ 	return nxmit;
+ }
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+index 0df40c3beb050..067bb5e821f54 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+@@ -10,6 +10,8 @@
+ #ifndef BNXT_XDP_H
+ #define BNXT_XDP_H
+ 
++DECLARE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
++
+ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
+ 				   struct bnxt_tx_ring_info *txr,
+ 				   dma_addr_t mapping, u32 len);
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
+index 5f5f8c53c4a0f..c8cb541572ffe 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
+@@ -167,7 +167,7 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
+ 	base = of_iomap(node, 0);
+ 	if (!base) {
+ 		err = -ENOMEM;
+-		goto err_close;
++		goto err_put;
+ 	}
+ 
+ 	err = fsl_mc_allocate_irqs(mc_dev);
+@@ -210,6 +210,8 @@ err_free_mc_irq:
+ 	fsl_mc_free_irqs(mc_dev);
+ err_unmap:
+ 	iounmap(base);
++err_put:
++	of_node_put(node);
+ err_close:
+ 	dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+ err_free_mcp:
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
+index 9ddeb015eb7eb..e830987a8c6dd 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
+@@ -1899,8 +1899,9 @@ i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
+ 
+ 	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ 
+-	status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+-				    sizeof(vsi_ctx->info), cmd_details);
++	status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
++					      sizeof(vsi_ctx->info),
++					      cmd_details, true);
+ 
+ 	if (status)
+ 		goto aq_add_vsi_exit;
+@@ -2287,8 +2288,9 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
+ 
+ 	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ 
+-	status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+-				    sizeof(vsi_ctx->info), cmd_details);
++	status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
++					      sizeof(vsi_ctx->info),
++					      cmd_details, true);
+ 
+ 	vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
+ 	vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
+@@ -2673,8 +2675,8 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
+ 	if (buf_size > I40E_AQ_LARGE_BUF)
+ 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ 
+-	status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
+-				       cmd_details);
++	status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
++					      cmd_details, true);
+ 
+ 	return status;
+ }
+@@ -2715,8 +2717,8 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
+ 	if (buf_size > I40E_AQ_LARGE_BUF)
+ 		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ 
+-	status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
+-				       cmd_details);
++	status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
++					      cmd_details, true);
+ 
+ 	return status;
+ }
+@@ -3868,7 +3870,8 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+ 
+ 	cmd->seid = cpu_to_le16(seid);
+ 
+-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
++	status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
++					      cmd_details, true);
+ 
+ 	return status;
+ }
+diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
+index 4babe4705a552..358a9b3031d5c 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf.h
++++ b/drivers/net/ethernet/intel/iavf/iavf.h
+@@ -44,6 +44,9 @@
+ #define DEFAULT_DEBUG_LEVEL_SHIFT 3
+ #define PFX "iavf: "
+ 
++int iavf_status_to_errno(enum iavf_status status);
++int virtchnl_status_to_errno(enum virtchnl_status_code v_status);
++
+ /* VSI state flags shared with common code */
+ enum iavf_vsi_state_t {
+ 	__IAVF_VSI_DOWN,
+@@ -515,7 +518,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter);
+ void iavf_del_vlans(struct iavf_adapter *adapter);
+ void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags);
+ void iavf_request_stats(struct iavf_adapter *adapter);
+-void iavf_request_reset(struct iavf_adapter *adapter);
++int iavf_request_reset(struct iavf_adapter *adapter);
+ void iavf_get_hena(struct iavf_adapter *adapter);
+ void iavf_set_hena(struct iavf_adapter *adapter);
+ void iavf_set_rss_key(struct iavf_adapter *adapter);
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index 0e178a0a59c5d..d10e9a8e8011f 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -51,6 +51,113 @@ MODULE_LICENSE("GPL v2");
+ static const struct net_device_ops iavf_netdev_ops;
+ struct workqueue_struct *iavf_wq;
+ 
++int iavf_status_to_errno(enum iavf_status status)
++{
++	switch (status) {
++	case IAVF_SUCCESS:
++		return 0;
++	case IAVF_ERR_PARAM:
++	case IAVF_ERR_MAC_TYPE:
++	case IAVF_ERR_INVALID_MAC_ADDR:
++	case IAVF_ERR_INVALID_LINK_SETTINGS:
++	case IAVF_ERR_INVALID_PD_ID:
++	case IAVF_ERR_INVALID_QP_ID:
++	case IAVF_ERR_INVALID_CQ_ID:
++	case IAVF_ERR_INVALID_CEQ_ID:
++	case IAVF_ERR_INVALID_AEQ_ID:
++	case IAVF_ERR_INVALID_SIZE:
++	case IAVF_ERR_INVALID_ARP_INDEX:
++	case IAVF_ERR_INVALID_FPM_FUNC_ID:
++	case IAVF_ERR_QP_INVALID_MSG_SIZE:
++	case IAVF_ERR_INVALID_FRAG_COUNT:
++	case IAVF_ERR_INVALID_ALIGNMENT:
++	case IAVF_ERR_INVALID_PUSH_PAGE_INDEX:
++	case IAVF_ERR_INVALID_IMM_DATA_SIZE:
++	case IAVF_ERR_INVALID_VF_ID:
++	case IAVF_ERR_INVALID_HMCFN_ID:
++	case IAVF_ERR_INVALID_PBLE_INDEX:
++	case IAVF_ERR_INVALID_SD_INDEX:
++	case IAVF_ERR_INVALID_PAGE_DESC_INDEX:
++	case IAVF_ERR_INVALID_SD_TYPE:
++	case IAVF_ERR_INVALID_HMC_OBJ_INDEX:
++	case IAVF_ERR_INVALID_HMC_OBJ_COUNT:
++	case IAVF_ERR_INVALID_SRQ_ARM_LIMIT:
++		return -EINVAL;
++	case IAVF_ERR_NVM:
++	case IAVF_ERR_NVM_CHECKSUM:
++	case IAVF_ERR_PHY:
++	case IAVF_ERR_CONFIG:
++	case IAVF_ERR_UNKNOWN_PHY:
++	case IAVF_ERR_LINK_SETUP:
++	case IAVF_ERR_ADAPTER_STOPPED:
++	case IAVF_ERR_MASTER_REQUESTS_PENDING:
++	case IAVF_ERR_AUTONEG_NOT_COMPLETE:
++	case IAVF_ERR_RESET_FAILED:
++	case IAVF_ERR_BAD_PTR:
++	case IAVF_ERR_SWFW_SYNC:
++	case IAVF_ERR_QP_TOOMANY_WRS_POSTED:
++	case IAVF_ERR_QUEUE_EMPTY:
++	case IAVF_ERR_FLUSHED_QUEUE:
++	case IAVF_ERR_OPCODE_MISMATCH:
++	case IAVF_ERR_CQP_COMPL_ERROR:
++	case IAVF_ERR_BACKING_PAGE_ERROR:
++	case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE:
++	case IAVF_ERR_MEMCPY_FAILED:
++	case IAVF_ERR_SRQ_ENABLED:
++	case IAVF_ERR_ADMIN_QUEUE_ERROR:
++	case IAVF_ERR_ADMIN_QUEUE_FULL:
++	case IAVF_ERR_BAD_IWARP_CQE:
++	case IAVF_ERR_NVM_BLANK_MODE:
++	case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
++	case IAVF_ERR_DIAG_TEST_FAILED:
++	case IAVF_ERR_FIRMWARE_API_VERSION:
++	case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
++		return -EIO;
++	case IAVF_ERR_DEVICE_NOT_SUPPORTED:
++		return -ENODEV;
++	case IAVF_ERR_NO_AVAILABLE_VSI:
++	case IAVF_ERR_RING_FULL:
++		return -ENOSPC;
++	case IAVF_ERR_NO_MEMORY:
++		return -ENOMEM;
++	case IAVF_ERR_TIMEOUT:
++	case IAVF_ERR_ADMIN_QUEUE_TIMEOUT:
++		return -ETIMEDOUT;
++	case IAVF_ERR_NOT_IMPLEMENTED:
++	case IAVF_NOT_SUPPORTED:
++		return -EOPNOTSUPP;
++	case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
++		return -EALREADY;
++	case IAVF_ERR_NOT_READY:
++		return -EBUSY;
++	case IAVF_ERR_BUF_TOO_SHORT:
++		return -EMSGSIZE;
++	}
++
++	return -EIO;
++}
++
++int virtchnl_status_to_errno(enum virtchnl_status_code v_status)
++{
++	switch (v_status) {
++	case VIRTCHNL_STATUS_SUCCESS:
++		return 0;
++	case VIRTCHNL_STATUS_ERR_PARAM:
++	case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
++		return -EINVAL;
++	case VIRTCHNL_STATUS_ERR_NO_MEMORY:
++		return -ENOMEM;
++	case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
++	case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
++	case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:
++		return -EIO;
++	case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED:
++		return -EOPNOTSUPP;
++	}
++
++	return -EIO;
++}
++
+ /**
+  * iavf_pdev_to_adapter - go from pci_dev to adapter
+  * @pdev: pci_dev pointer
+@@ -1421,7 +1528,7 @@ static int iavf_config_rss_aq(struct iavf_adapter *adapter)
+ 	struct iavf_aqc_get_set_rss_key_data *rss_key =
+ 		(struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
+ 	struct iavf_hw *hw = &adapter->hw;
+-	int ret = 0;
++	enum iavf_status status;
+ 
+ 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ 		/* bail because we already have a command pending */
+@@ -1430,24 +1537,25 @@ static int iavf_config_rss_aq(struct iavf_adapter *adapter)
+ 		return -EBUSY;
+ 	}
+ 
+-	ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
+-	if (ret) {
++	status = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
++	if (status) {
+ 		dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
+-			iavf_stat_str(hw, ret),
++			iavf_stat_str(hw, status),
+ 			iavf_aq_str(hw, hw->aq.asq_last_status));
+-		return ret;
++		return iavf_status_to_errno(status);
+ 
+ 	}
+ 
+-	ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
+-				  adapter->rss_lut, adapter->rss_lut_size);
+-	if (ret) {
++	status = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
++				     adapter->rss_lut, adapter->rss_lut_size);
++	if (status) {
+ 		dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
+-			iavf_stat_str(hw, ret),
++			iavf_stat_str(hw, status),
+ 			iavf_aq_str(hw, hw->aq.asq_last_status));
++		return iavf_status_to_errno(status);
+ 	}
+ 
+-	return ret;
++	return 0;
+ 
+ }
+ 
+@@ -2003,23 +2111,24 @@ static void iavf_startup(struct iavf_adapter *adapter)
+ {
+ 	struct pci_dev *pdev = adapter->pdev;
+ 	struct iavf_hw *hw = &adapter->hw;
+-	int err;
++	enum iavf_status status;
++	int ret;
+ 
+ 	WARN_ON(adapter->state != __IAVF_STARTUP);
+ 
+ 	/* driver loaded, probe complete */
+ 	adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
+ 	adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
+-	err = iavf_set_mac_type(hw);
+-	if (err) {
+-		dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err);
++	status = iavf_set_mac_type(hw);
++	if (status) {
++		dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", status);
+ 		goto err;
+ 	}
+ 
+-	err = iavf_check_reset_complete(hw);
+-	if (err) {
++	ret = iavf_check_reset_complete(hw);
++	if (ret) {
+ 		dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
+-			 err);
++			 ret);
+ 		goto err;
+ 	}
+ 	hw->aq.num_arq_entries = IAVF_AQ_LEN;
+@@ -2027,14 +2136,15 @@ static void iavf_startup(struct iavf_adapter *adapter)
+ 	hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
+ 	hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
+ 
+-	err = iavf_init_adminq(hw);
+-	if (err) {
+-		dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err);
++	status = iavf_init_adminq(hw);
++	if (status) {
++		dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
++			status);
+ 		goto err;
+ 	}
+-	err = iavf_send_api_ver(adapter);
+-	if (err) {
+-		dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
++	ret = iavf_send_api_ver(adapter);
++	if (ret) {
++		dev_err(&pdev->dev, "Unable to send to PF (%d)\n", ret);
+ 		iavf_shutdown_adminq(hw);
+ 		goto err;
+ 	}
+@@ -2070,7 +2180,7 @@ static void iavf_init_version_check(struct iavf_adapter *adapter)
+ 	/* aq msg sent, awaiting reply */
+ 	err = iavf_verify_api_ver(adapter);
+ 	if (err) {
+-		if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK)
++		if (err == -EALREADY)
+ 			err = iavf_send_api_ver(adapter);
+ 		else
+ 			dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
+@@ -2171,11 +2281,11 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter)
+ 		}
+ 	}
+ 	err = iavf_get_vf_config(adapter);
+-	if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) {
++	if (err == -EALREADY) {
+ 		err = iavf_send_vf_config_msg(adapter);
+ 		goto err_alloc;
+-	} else if (err == IAVF_ERR_PARAM) {
+-		/* We only get ERR_PARAM if the device is in a very bad
++	} else if (err == -EINVAL) {
++		/* We only get -EINVAL if the device is in a very bad
+ 		 * state or if we've been disabled for previous bad
+ 		 * behavior. Either way, we're done now.
+ 		 */
+@@ -2626,6 +2736,7 @@ static void iavf_reset_task(struct work_struct *work)
+ 	struct iavf_hw *hw = &adapter->hw;
+ 	struct iavf_mac_filter *f, *ftmp;
+ 	struct iavf_cloud_filter *cf;
++	enum iavf_status status;
+ 	u32 reg_val;
+ 	int i = 0, err;
+ 	bool running;
+@@ -2727,10 +2838,12 @@ continue_reset:
+ 	/* kill and reinit the admin queue */
+ 	iavf_shutdown_adminq(hw);
+ 	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+-	err = iavf_init_adminq(hw);
+-	if (err)
++	status = iavf_init_adminq(hw);
++	if (status) {
+ 		dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
+-			 err);
++			 status);
++		goto reset_err;
++	}
+ 	adapter->aq_required = 0;
+ 
+ 	if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+index 5263cefe46f5f..b8c5837f8b505 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+@@ -22,17 +22,17 @@ static int iavf_send_pf_msg(struct iavf_adapter *adapter,
+ 			    enum virtchnl_ops op, u8 *msg, u16 len)
+ {
+ 	struct iavf_hw *hw = &adapter->hw;
+-	enum iavf_status err;
++	enum iavf_status status;
+ 
+ 	if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
+ 		return 0; /* nothing to see here, move along */
+ 
+-	err = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
+-	if (err)
+-		dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
+-			op, iavf_stat_str(hw, err),
++	status = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
++	if (status)
++		dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, status %s, aq_err %s\n",
++			op, iavf_stat_str(hw, status),
+ 			iavf_aq_str(hw, hw->aq.asq_last_status));
+-	return err;
++	return iavf_status_to_errno(status);
+ }
+ 
+ /**
+@@ -1827,11 +1827,13 @@ void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter)
+  *
+  * Request that the PF reset this VF. No response is expected.
+  **/
+-void iavf_request_reset(struct iavf_adapter *adapter)
++int iavf_request_reset(struct iavf_adapter *adapter)
+ {
++	int err;
+ 	/* Don't check CURRENT_OP - this is always higher priority */
+-	iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
++	err = iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
+ 	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
++	return err;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index 2f60230d332a6..9c04a71a9fca3 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -674,7 +674,7 @@ static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev)
+ 
+ static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi)
+ {
+-	return !!vsi->xdp_prog;
++	return !!READ_ONCE(vsi->xdp_prog);
+ }
+ 
+ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index 53256aca27c78..5fd2bbeab2d15 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -1452,6 +1452,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
+ 		ring->tx_tstamps = &pf->ptp.port.tx;
+ 		ring->dev = dev;
+ 		ring->count = vsi->num_tx_desc;
++		ring->txq_teid = ICE_INVAL_TEID;
+ 		WRITE_ONCE(vsi->tx_rings[i], ring);
+ 	}
+ 
+@@ -3147,6 +3148,8 @@ int ice_vsi_release(struct ice_vsi *vsi)
+ 		}
+ 	}
+ 
++	if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
++		ice_clear_dflt_vsi(pf->first_sw);
+ 	ice_fltr_remove_all(vsi);
+ 	ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
+ 	err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 296f9d5f74084..db2e02e673a77 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -2546,7 +2546,7 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
+ 		spin_lock_init(&xdp_ring->tx_lock);
+ 		for (j = 0; j < xdp_ring->count; j++) {
+ 			tx_desc = ICE_TX_DESC(xdp_ring, j);
+-			tx_desc->cmd_type_offset_bsz = cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE);
++			tx_desc->cmd_type_offset_bsz = 0;
+ 		}
+ 	}
+ 
+@@ -2742,8 +2742,10 @@ free_qmap:
+ 
+ 	ice_for_each_xdp_txq(vsi, i)
+ 		if (vsi->xdp_rings[i]) {
+-			if (vsi->xdp_rings[i]->desc)
++			if (vsi->xdp_rings[i]->desc) {
++				synchronize_rcu();
+ 				ice_free_tx_ring(vsi->xdp_rings[i]);
++			}
+ 			kfree_rcu(vsi->xdp_rings[i], rcu);
+ 			vsi->xdp_rings[i] = NULL;
+ 		}
+@@ -5432,16 +5434,19 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
+ 
+ 	/* Add filter for new MAC. If filter exists, return success */
+ 	err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
+-	if (err == -EEXIST)
++	if (err == -EEXIST) {
+ 		/* Although this MAC filter is already present in hardware it's
+ 		 * possible in some cases (e.g. bonding) that dev_addr was
+ 		 * modified outside of the driver and needs to be restored back
+ 		 * to this value.
+ 		 */
+ 		netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
+-	else if (err)
++
++		return 0;
++	} else if (err) {
+ 		/* error if the new filter addition failed */
+ 		err = -EADDRNOTAVAIL;
++	}
+ 
+ err_update_filters:
+ 	if (err) {
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+index 1be3cd4b2bef7..2bee8f10ad89c 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+@@ -3351,9 +3351,9 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
+ 				goto error_param;
+ 			}
+ 
+-			/* Skip queue if not enabled */
+ 			if (!test_bit(vf_q_id, vf->txq_ena))
+-				continue;
++				dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
++					vf_q_id, vsi->vsi_num);
+ 
+ 			ice_fill_txq_meta(vsi, ring, &txq_meta);
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index feb874bde171f..30620b942fa09 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -41,8 +41,10 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
+ static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
+ {
+ 	ice_clean_tx_ring(vsi->tx_rings[q_idx]);
+-	if (ice_is_xdp_ena_vsi(vsi))
++	if (ice_is_xdp_ena_vsi(vsi)) {
++		synchronize_rcu();
+ 		ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
++	}
+ 	ice_clean_rx_ring(vsi->rx_rings[q_idx]);
+ }
+ 
+@@ -763,7 +765,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
+ 	struct ice_vsi *vsi = np->vsi;
+ 	struct ice_tx_ring *ring;
+ 
+-	if (test_bit(ICE_DOWN, vsi->state))
++	if (test_bit(ICE_VSI_DOWN, vsi->state))
+ 		return -ENETDOWN;
+ 
+ 	if (!ice_is_xdp_ena_vsi(vsi))
+diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
+index 143ca8be5eb59..4008596963be4 100644
+--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
++++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
+@@ -2751,7 +2751,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
+ 	}
+ 
+ 	ret = of_get_mac_address(pnp, ppd.mac_addr);
+-	if (ret)
++	if (ret == -EPROBE_DEFER)
+ 		return ret;
+ 
+ 	mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c
+index 6699bdf5cf012..b895c378cfaf3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c
+@@ -27,11 +27,7 @@ tc_act_parse_sample(struct mlx5e_tc_act_parse_state *parse_state,
+ 		    struct mlx5e_priv *priv,
+ 		    struct mlx5_flow_attr *attr)
+ {
+-	struct mlx5e_sample_attr *sample_attr;
+-
+-	sample_attr = kzalloc(sizeof(*attr->sample_attr), GFP_KERNEL);
+-	if (!sample_attr)
+-		return -ENOMEM;
++	struct mlx5e_sample_attr *sample_attr = &attr->sample_attr;
+ 
+ 	sample_attr->rate = act->sample.rate;
+ 	sample_attr->group_num = act->sample.psample_group->group_num;
+@@ -39,7 +35,6 @@ tc_act_parse_sample(struct mlx5e_tc_act_parse_state *parse_state,
+ 	if (act->sample.truncate)
+ 		sample_attr->trunc_size = act->sample.trunc_size;
+ 
+-	attr->sample_attr = sample_attr;
+ 	flow_flag_set(parse_state->flow, SAMPLE);
+ 
+ 	return 0;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
+index ff4b4f8a5a9db..0faaf9a4b5317 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
+@@ -513,7 +513,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
+ 	sample_flow = kzalloc(sizeof(*sample_flow), GFP_KERNEL);
+ 	if (!sample_flow)
+ 		return ERR_PTR(-ENOMEM);
+-	sample_attr = attr->sample_attr;
++	sample_attr = &attr->sample_attr;
+ 	sample_attr->sample_flow = sample_flow;
+ 
+ 	/* For NICs with reg_c_preserve support or decap action, use
+@@ -546,6 +546,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
+ 		err = PTR_ERR(sample_flow->sampler);
+ 		goto err_sampler;
+ 	}
++	sample_attr->sampler_id = sample_flow->sampler->sampler_id;
+ 
+ 	/* Create an id mapping reg_c0 value to sample object. */
+ 	restore_obj.type = MLX5_MAPPED_OBJ_SAMPLE;
+@@ -585,8 +586,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
+ 	pre_attr->outer_match_level = attr->outer_match_level;
+ 	pre_attr->chain = attr->chain;
+ 	pre_attr->prio = attr->prio;
+-	pre_attr->sample_attr = attr->sample_attr;
+-	sample_attr->sampler_id = sample_flow->sampler->sampler_id;
++	pre_attr->sample_attr = *sample_attr;
+ 	pre_esw_attr = pre_attr->esw_attr;
+ 	pre_esw_attr->in_mdev = esw_attr->in_mdev;
+ 	pre_esw_attr->in_rep = esw_attr->in_rep;
+@@ -633,11 +633,11 @@ mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample,
+ 	 * will hit fw syndromes.
+ 	 */
+ 	esw = tc_psample->esw;
+-	sample_flow = attr->sample_attr->sample_flow;
++	sample_flow = attr->sample_attr.sample_flow;
+ 	mlx5_eswitch_del_offloaded_rule(esw, sample_flow->pre_rule, sample_flow->pre_attr);
+ 
+ 	sample_restore_put(tc_psample, sample_flow->restore);
+-	mapping_remove(esw->offloads.reg_c0_obj_pool, attr->sample_attr->restore_obj_id);
++	mapping_remove(esw->offloads.reg_c0_obj_pool, attr->sample_attr.restore_obj_id);
+ 	sampler_put(tc_psample, sample_flow->sampler);
+ 	if (sample_flow->post_act_handle)
+ 		mlx5e_tc_post_act_del(tc_psample->post_act, sample_flow->post_act_handle);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 3667f5ef5990f..169e3524bb1c7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -5345,6 +5345,7 @@ mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *prof
+ 	}
+ 
+ 	netif_carrier_off(netdev);
++	netif_tx_disable(netdev);
+ 	dev_net_set(netdev, mlx5_core_net(mdev));
+ 
+ 	return netdev;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index b27532a9301e7..7e5c00349ccf9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -1634,7 +1634,6 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
+ 	if (flow_flag_test(flow, L3_TO_L2_DECAP))
+ 		mlx5e_detach_decap(priv, flow);
+ 
+-	kfree(attr->sample_attr);
+ 	kvfree(attr->esw_attr->rx_tun_attr);
+ 	kvfree(attr->parse_attr);
+ 	kfree(flow->attr);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+index 5ffae9b130665..2f09e34db9ffe 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+@@ -71,7 +71,7 @@ struct mlx5_flow_attr {
+ 	struct mlx5_fc *counter;
+ 	struct mlx5_modify_hdr *modify_hdr;
+ 	struct mlx5_ct_attr ct_attr;
+-	struct mlx5e_sample_attr *sample_attr;
++	struct mlx5e_sample_attr sample_attr;
+ 	struct mlx5e_tc_flow_parse_attr *parse_attr;
+ 	u32 chain;
+ 	u16 prio;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index cfcd72bad9af6..e7e7b4b0dcdb5 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -201,12 +201,12 @@ esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
+ static int
+ esw_setup_sampler_dest(struct mlx5_flow_destination *dest,
+ 		       struct mlx5_flow_act *flow_act,
+-		       struct mlx5_flow_attr *attr,
++		       u32 sampler_id,
+ 		       int i)
+ {
+ 	flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ 	dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
+-	dest[i].sampler_id = attr->sample_attr->sampler_id;
++	dest[i].sampler_id = sampler_id;
+ 
+ 	return 0;
+ }
+@@ -466,7 +466,7 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
+ 		attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
+ 
+ 	if (attr->flags & MLX5_ESW_ATTR_FLAG_SAMPLE) {
+-		esw_setup_sampler_dest(dest, flow_act, attr, *i);
++		esw_setup_sampler_dest(dest, flow_act, attr->sample_attr.sampler_id, *i);
+ 		(*i)++;
+ 	} else if (attr->dest_ft) {
+ 		esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
+index 7b16a1188aabb..fd79860de723b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
+@@ -433,35 +433,12 @@ int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
+ 				     struct mlx5_module_eeprom_query_params *params,
+ 				     u8 *data)
+ {
+-	u8 module_id;
+ 	int err;
+ 
+ 	err = mlx5_query_module_num(dev, &params->module_number);
+ 	if (err)
+ 		return err;
+ 
+-	err = mlx5_query_module_id(dev, params->module_number, &module_id);
+-	if (err)
+-		return err;
+-
+-	switch (module_id) {
+-	case MLX5_MODULE_ID_SFP:
+-		if (params->page > 0)
+-			return -EINVAL;
+-		break;
+-	case MLX5_MODULE_ID_QSFP:
+-	case MLX5_MODULE_ID_QSFP28:
+-	case MLX5_MODULE_ID_QSFP_PLUS:
+-		if (params->page > 3)
+-			return -EINVAL;
+-		break;
+-	case MLX5_MODULE_ID_DSFP:
+-		break;
+-	default:
+-		mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id);
+-		return -EINVAL;
+-	}
+-
+ 	if (params->i2c_address != MLX5_I2C_ADDR_HIGH &&
+ 	    params->i2c_address != MLX5_I2C_ADDR_LOW) {
+ 		mlx5_core_err(dev, "I2C address not recognized: 0x%x\n", params->i2c_address);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+index aa411dec62f00..eb1319d63613e 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -2148,13 +2148,11 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
+ 	struct mlxsw_sp *mlxsw_sp = priv;
+ 	struct mlxsw_sp_port *mlxsw_sp_port;
+ 	enum mlxsw_reg_pude_oper_status status;
+-	unsigned int max_ports;
+ 	u16 local_port;
+ 
+-	max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+ 	local_port = mlxsw_reg_pude_local_port_get(pude_pl);
+ 
+-	if (WARN_ON_ONCE(!local_port || local_port >= max_ports))
++	if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
+ 		return;
+ 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ 	if (!mlxsw_sp_port)
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+index bb2442e1f7052..30942b6ffcf99 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+@@ -481,6 +481,13 @@ int
+ mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ 				      bool is_8021ad_tagged,
+ 				      bool is_8021q_tagged);
++static inline bool
++mlxsw_sp_local_port_is_valid(struct mlxsw_sp *mlxsw_sp, u16 local_port)
++{
++	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
++
++	return local_port < max_ports && local_port;
++}
+ 
+ /* spectrum_buffers.c */
+ struct mlxsw_sp_hdroom_prio {
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+index 0ff163fbc7750..35422e64d89fc 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+@@ -568,12 +568,11 @@ void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
+ 				 u8 domain_number, u16 sequence_id,
+ 				 u64 timestamp)
+ {
+-	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+ 	struct mlxsw_sp_port *mlxsw_sp_port;
+ 	struct mlxsw_sp1_ptp_key key;
+ 	u8 types;
+ 
+-	if (WARN_ON_ONCE(local_port >= max_ports))
++	if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
+ 		return;
+ 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ 	if (!mlxsw_sp_port)
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+index 65c1724c63b0a..bffdb41fc4edc 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+@@ -2616,7 +2616,6 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
+ 					    char *sfn_pl, int rec_index,
+ 					    bool adding)
+ {
+-	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+ 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ 	struct mlxsw_sp_bridge_device *bridge_device;
+ 	struct mlxsw_sp_bridge_port *bridge_port;
+@@ -2630,7 +2629,7 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
+ 
+ 	mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
+ 
+-	if (WARN_ON_ONCE(local_port >= max_ports))
++	if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
+ 		return;
+ 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ 	if (!mlxsw_sp_port) {
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
+index e3edca187ddfa..5250d1d1e49ca 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
+@@ -489,7 +489,7 @@ struct split_type_defs {
+ 
+ #define STATIC_DEBUG_LINE_DWORDS	9
+ 
+-#define NUM_COMMON_GLOBAL_PARAMS	11
++#define NUM_COMMON_GLOBAL_PARAMS	10
+ 
+ #define MAX_RECURSION_DEPTH		10
+ 
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
+index b242000a77fd8..b7cc36589f592 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
+@@ -748,6 +748,9 @@ qede_build_skb(struct qede_rx_queue *rxq,
+ 	buf = page_address(bd->data) + bd->page_offset;
+ 	skb = build_skb(buf, rxq->rx_buf_seg_size);
+ 
++	if (unlikely(!skb))
++		return NULL;
++
+ 	skb_reserve(skb, pad);
+ 	skb_put(skb, len);
+ 
+diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
+index ead550ae27097..40bfd0ad7d053 100644
+--- a/drivers/net/ethernet/sfc/efx_channels.c
++++ b/drivers/net/ethernet/sfc/efx_channels.c
+@@ -764,6 +764,85 @@ void efx_remove_channels(struct efx_nic *efx)
+ 	kfree(efx->xdp_tx_queues);
+ }
+ 
++static int efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
++				struct efx_tx_queue *tx_queue)
++{
++	if (xdp_queue_number >= efx->xdp_tx_queue_count)
++		return -EINVAL;
++
++	netif_dbg(efx, drv, efx->net_dev,
++		  "Channel %u TXQ %u is XDP %u, HW %u\n",
++		  tx_queue->channel->channel, tx_queue->label,
++		  xdp_queue_number, tx_queue->queue);
++	efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
++	return 0;
++}
++
++static void efx_set_xdp_channels(struct efx_nic *efx)
++{
++	struct efx_tx_queue *tx_queue;
++	struct efx_channel *channel;
++	unsigned int next_queue = 0;
++	int xdp_queue_number = 0;
++	int rc;
++
++	/* We need to mark which channels really have RX and TX
++	 * queues, and adjust the TX queue numbers if we have separate
++	 * RX-only and TX-only channels.
++	 */
++	efx_for_each_channel(channel, efx) {
++		if (channel->channel < efx->tx_channel_offset)
++			continue;
++
++		if (efx_channel_is_xdp_tx(channel)) {
++			efx_for_each_channel_tx_queue(tx_queue, channel) {
++				tx_queue->queue = next_queue++;
++				rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
++							  tx_queue);
++				if (rc == 0)
++					xdp_queue_number++;
++			}
++		} else {
++			efx_for_each_channel_tx_queue(tx_queue, channel) {
++				tx_queue->queue = next_queue++;
++				netif_dbg(efx, drv, efx->net_dev,
++					  "Channel %u TXQ %u is HW %u\n",
++					  channel->channel, tx_queue->label,
++					  tx_queue->queue);
++			}
++
++			/* If XDP is borrowing queues from net stack, it must
++			 * use the queue with no csum offload, which is the
++			 * first one of the channel
++			 * (note: tx_queue_by_type is not initialized yet)
++			 */
++			if (efx->xdp_txq_queues_mode ==
++			    EFX_XDP_TX_QUEUES_BORROWED) {
++				tx_queue = &channel->tx_queue[0];
++				rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
++							  tx_queue);
++				if (rc == 0)
++					xdp_queue_number++;
++			}
++		}
++	}
++	WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
++		xdp_queue_number != efx->xdp_tx_queue_count);
++	WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
++		xdp_queue_number > efx->xdp_tx_queue_count);
++
++	/* If we have more CPUs than assigned XDP TX queues, assign the already
++	 * existing queues to the exceeding CPUs
++	 */
++	next_queue = 0;
++	while (xdp_queue_number < efx->xdp_tx_queue_count) {
++		tx_queue = efx->xdp_tx_queues[next_queue++];
++		rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
++		if (rc == 0)
++			xdp_queue_number++;
++	}
++}
++
+ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
+ {
+ 	struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
+@@ -835,6 +914,7 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
+ 		efx_init_napi_channel(efx->channel[i]);
+ 	}
+ 
++	efx_set_xdp_channels(efx);
+ out:
+ 	/* Destroy unused channel structures */
+ 	for (i = 0; i < efx->n_channels; i++) {
+@@ -867,26 +947,9 @@ rollback:
+ 	goto out;
+ }
+ 
+-static inline int
+-efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
+-		     struct efx_tx_queue *tx_queue)
+-{
+-	if (xdp_queue_number >= efx->xdp_tx_queue_count)
+-		return -EINVAL;
+-
+-	netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
+-		  tx_queue->channel->channel, tx_queue->label,
+-		  xdp_queue_number, tx_queue->queue);
+-	efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+-	return 0;
+-}
+-
+ int efx_set_channels(struct efx_nic *efx)
+ {
+-	struct efx_tx_queue *tx_queue;
+ 	struct efx_channel *channel;
+-	unsigned int next_queue = 0;
+-	int xdp_queue_number;
+ 	int rc;
+ 
+ 	efx->tx_channel_offset =
+@@ -904,61 +967,14 @@ int efx_set_channels(struct efx_nic *efx)
+ 			return -ENOMEM;
+ 	}
+ 
+-	/* We need to mark which channels really have RX and TX
+-	 * queues, and adjust the TX queue numbers if we have separate
+-	 * RX-only and TX-only channels.
+-	 */
+-	xdp_queue_number = 0;
+ 	efx_for_each_channel(channel, efx) {
+ 		if (channel->channel < efx->n_rx_channels)
+ 			channel->rx_queue.core_index = channel->channel;
+ 		else
+ 			channel->rx_queue.core_index = -1;
+-
+-		if (channel->channel >= efx->tx_channel_offset) {
+-			if (efx_channel_is_xdp_tx(channel)) {
+-				efx_for_each_channel_tx_queue(tx_queue, channel) {
+-					tx_queue->queue = next_queue++;
+-					rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+-					if (rc == 0)
+-						xdp_queue_number++;
+-				}
+-			} else {
+-				efx_for_each_channel_tx_queue(tx_queue, channel) {
+-					tx_queue->queue = next_queue++;
+-					netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is HW %u\n",
+-						  channel->channel, tx_queue->label,
+-						  tx_queue->queue);
+-				}
+-
+-				/* If XDP is borrowing queues from net stack, it must use the queue
+-				 * with no csum offload, which is the first one of the channel
+-				 * (note: channel->tx_queue_by_type is not initialized yet)
+-				 */
+-				if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
+-					tx_queue = &channel->tx_queue[0];
+-					rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+-					if (rc == 0)
+-						xdp_queue_number++;
+-				}
+-			}
+-		}
+ 	}
+-	WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
+-		xdp_queue_number != efx->xdp_tx_queue_count);
+-	WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
+-		xdp_queue_number > efx->xdp_tx_queue_count);
+ 
+-	/* If we have more CPUs than assigned XDP TX queues, assign the already
+-	 * existing queues to the exceeding CPUs
+-	 */
+-	next_queue = 0;
+-	while (xdp_queue_number < efx->xdp_tx_queue_count) {
+-		tx_queue = efx->xdp_tx_queues[next_queue++];
+-		rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+-		if (rc == 0)
+-			xdp_queue_number++;
+-	}
++	efx_set_xdp_channels(efx);
+ 
+ 	rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
+ 	if (rc)
+@@ -1102,7 +1118,7 @@ void efx_start_channels(struct efx_nic *efx)
+ 	struct efx_rx_queue *rx_queue;
+ 	struct efx_channel *channel;
+ 
+-	efx_for_each_channel(channel, efx) {
++	efx_for_each_channel_rev(channel, efx) {
+ 		efx_for_each_channel_tx_queue(tx_queue, channel) {
+ 			efx_init_tx_queue(tx_queue);
+ 			atomic_inc(&efx->active_queues);
+diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
+index 633ca77a26fd1..b925de9b43028 100644
+--- a/drivers/net/ethernet/sfc/rx_common.c
++++ b/drivers/net/ethernet/sfc/rx_common.c
+@@ -166,6 +166,9 @@ static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
+ 	struct efx_nic *efx = rx_queue->efx;
+ 	int i;
+ 
++	if (unlikely(!rx_queue->page_ring))
++		return;
++
+ 	/* Unmap and release the pages in the recycle ring. Remove the ring. */
+ 	for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
+ 		struct page *page = rx_queue->page_ring[i];
+diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
+index d16e031e95f44..6983799e1c05d 100644
+--- a/drivers/net/ethernet/sfc/tx.c
++++ b/drivers/net/ethernet/sfc/tx.c
+@@ -443,6 +443,9 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
+ 	if (unlikely(!tx_queue))
+ 		return -EINVAL;
+ 
++	if (!tx_queue->initialised)
++		return -EINVAL;
++
+ 	if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
+ 		HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu);
+ 
+diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c
+index d530cde2b8648..9bc8281b7f5bd 100644
+--- a/drivers/net/ethernet/sfc/tx_common.c
++++ b/drivers/net/ethernet/sfc/tx_common.c
+@@ -101,6 +101,8 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
+ 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ 		  "shutting down TX queue %d\n", tx_queue->queue);
+ 
++	tx_queue->initialised = false;
++
+ 	if (!tx_queue->buffer)
+ 		return;
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+index 5d29f336315b7..11e1055e8260f 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+@@ -431,8 +431,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
+ 	plat->phylink_node = np;
+ 
+ 	/* Get max speed of operation from device tree */
+-	if (of_property_read_u32(np, "max-speed", &plat->max_speed))
+-		plat->max_speed = -1;
++	of_property_read_u32(np, "max-speed", &plat->max_speed);
+ 
+ 	plat->bus_id = of_alias_get_id(np, "ethernet");
+ 	if (plat->bus_id < 0)
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 6b12902a803f0..cecf8c63096cd 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -133,11 +133,17 @@ static void macvtap_setup(struct net_device *dev)
+ 	dev->tx_queue_len = TUN_READQ_SIZE;
+ }
+ 
++static struct net *macvtap_link_net(const struct net_device *dev)
++{
++	return dev_net(macvlan_dev_real_dev(dev));
++}
++
+ static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
+ 	.kind		= "macvtap",
+ 	.setup		= macvtap_setup,
+ 	.newlink	= macvtap_newlink,
+ 	.dellink	= macvtap_dellink,
++	.get_link_net	= macvtap_link_net,
+ 	.priv_size      = sizeof(struct macvtap_dev),
+ };
+ 
+diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
+index 64fb76c1e3959..08381038810d6 100644
+--- a/drivers/net/mdio/mdio-mscc-miim.c
++++ b/drivers/net/mdio/mdio-mscc-miim.c
+@@ -93,6 +93,9 @@ static int mscc_miim_read(struct mii_bus *bus, int mii_id, int regnum)
+ 	u32 val;
+ 	int ret;
+ 
++	if (regnum & MII_ADDR_C45)
++		return -EOPNOTSUPP;
++
+ 	ret = mscc_miim_wait_pending(bus);
+ 	if (ret)
+ 		goto out;
+@@ -136,6 +139,9 @@ static int mscc_miim_write(struct mii_bus *bus, int mii_id,
+ 	struct mscc_miim_dev *miim = bus->priv;
+ 	int ret;
+ 
++	if (regnum & MII_ADDR_C45)
++		return -EOPNOTSUPP;
++
+ 	ret = mscc_miim_wait_pending(bus);
+ 	if (ret < 0)
+ 		goto out;
+diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
+index c1512c9925a66..15aa5ac1ff49c 100644
+--- a/drivers/net/phy/sfp-bus.c
++++ b/drivers/net/phy/sfp-bus.c
+@@ -74,6 +74,12 @@ static const struct sfp_quirk sfp_quirks[] = {
+ 		.vendor = "HUAWEI",
+ 		.part = "MA5671A",
+ 		.modes = sfp_quirk_2500basex,
++	}, {
++		// Lantech 8330-262D-E can operate at 2500base-X, but
++		// incorrectly report 2500MBd NRZ in their EEPROM
++		.vendor = "Lantech",
++		.part = "8330-262D-E",
++		.modes = sfp_quirk_2500basex,
+ 	}, {
+ 		.vendor = "UBNT",
+ 		.part = "UF-INSTANT",
+diff --git a/drivers/net/tap.c b/drivers/net/tap.c
+index 8e3a28ba6b282..ba2ef5437e167 100644
+--- a/drivers/net/tap.c
++++ b/drivers/net/tap.c
+@@ -1198,7 +1198,8 @@ static int tap_sendmsg(struct socket *sock, struct msghdr *m,
+ 	struct xdp_buff *xdp;
+ 	int i;
+ 
+-	if (ctl && (ctl->type == TUN_MSG_PTR)) {
++	if (m->msg_controllen == sizeof(struct tun_msg_ctl) &&
++	    ctl && ctl->type == TUN_MSG_PTR) {
+ 		for (i = 0; i < ctl->num; i++) {
+ 			xdp = &((struct xdp_buff *)ctl->ptr)[i];
+ 			tap_get_user_xdp(q, xdp);
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index fed85447701a5..de999e0fedbca 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -2489,7 +2489,8 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
+ 	if (!tun)
+ 		return -EBADFD;
+ 
+-	if (ctl && (ctl->type == TUN_MSG_PTR)) {
++	if (m->msg_controllen == sizeof(struct tun_msg_ctl) &&
++	    ctl && ctl->type == TUN_MSG_PTR) {
+ 		struct tun_page tpage;
+ 		int n = ctl->num;
+ 		int flush = 0;
+diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
+index e0b1ab99a359e..f37adcef4bef3 100644
+--- a/drivers/net/vrf.c
++++ b/drivers/net/vrf.c
+@@ -1266,6 +1266,7 @@ static int vrf_prepare_mac_header(struct sk_buff *skb,
+ 	eth = (struct ethhdr *)skb->data;
+ 
+ 	skb_reset_mac_header(skb);
++	skb_reset_mac_len(skb);
+ 
+ 	/* we set the ethernet destination and the source addresses to the
+ 	 * address of the VRF device.
+@@ -1295,9 +1296,9 @@ static int vrf_prepare_mac_header(struct sk_buff *skb,
+  */
+ static int vrf_add_mac_header_if_unset(struct sk_buff *skb,
+ 				       struct net_device *vrf_dev,
+-				       u16 proto)
++				       u16 proto, struct net_device *orig_dev)
+ {
+-	if (skb_mac_header_was_set(skb))
++	if (skb_mac_header_was_set(skb) && dev_has_header(orig_dev))
+ 		return 0;
+ 
+ 	return vrf_prepare_mac_header(skb, vrf_dev, proto);
+@@ -1403,6 +1404,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
+ 
+ 	/* if packet is NDISC then keep the ingress interface */
+ 	if (!is_ndisc) {
++		struct net_device *orig_dev = skb->dev;
++
+ 		vrf_rx_stats(vrf_dev, skb->len);
+ 		skb->dev = vrf_dev;
+ 		skb->skb_iif = vrf_dev->ifindex;
+@@ -1411,7 +1414,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
+ 			int err;
+ 
+ 			err = vrf_add_mac_header_if_unset(skb, vrf_dev,
+-							  ETH_P_IPV6);
++							  ETH_P_IPV6,
++							  orig_dev);
+ 			if (likely(!err)) {
+ 				skb_push(skb, skb->mac_len);
+ 				dev_queue_xmit_nit(skb, vrf_dev);
+@@ -1441,6 +1445,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
+ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
+ 				  struct sk_buff *skb)
+ {
++	struct net_device *orig_dev = skb->dev;
++
+ 	skb->dev = vrf_dev;
+ 	skb->skb_iif = vrf_dev->ifindex;
+ 	IPCB(skb)->flags |= IPSKB_L3SLAVE;
+@@ -1461,7 +1467,8 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
+ 	if (!list_empty(&vrf_dev->ptype_all)) {
+ 		int err;
+ 
+-		err = vrf_add_mac_header_if_unset(skb, vrf_dev, ETH_P_IP);
++		err = vrf_add_mac_header_if_unset(skb, vrf_dev, ETH_P_IP,
++						  orig_dev);
+ 		if (likely(!err)) {
+ 			skb_push(skb, skb->mac_len);
+ 			dev_queue_xmit_nit(skb, vrf_dev);
+diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
+index 3fb0aa0008259..24bd0520926bf 100644
+--- a/drivers/net/wireless/ath/ath11k/ahb.c
++++ b/drivers/net/wireless/ath/ath11k/ahb.c
+@@ -391,6 +391,8 @@ static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab)
+ 
+ 		for (j = 0; j < irq_grp->num_irq; j++)
+ 			free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
++
++		netif_napi_del(&irq_grp->napi);
+ 	}
+ }
+ 
+diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
+index 08e33778f63b8..28de877ad6c47 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.c
++++ b/drivers/net/wireless/ath/ath11k/mac.c
+@@ -5574,7 +5574,7 @@ static int ath11k_mac_mgmt_tx(struct ath11k *ar, struct sk_buff *skb,
+ 
+ 	skb_queue_tail(q, skb);
+ 	atomic_inc(&ar->num_pending_mgmt_tx);
+-	ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
++	queue_work(ar->ab->workqueue, &ar->wmi_mgmt_tx_work);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
+index cccaa348cf212..8b21438028169 100644
+--- a/drivers/net/wireless/ath/ath11k/mhi.c
++++ b/drivers/net/wireless/ath/ath11k/mhi.c
+@@ -561,7 +561,7 @@ static int ath11k_mhi_set_state(struct ath11k_pci *ab_pci,
+ 		ret = 0;
+ 		break;
+ 	case ATH11K_MHI_POWER_ON:
+-		ret = mhi_async_power_up(ab_pci->mhi_ctrl);
++		ret = mhi_sync_power_up(ab_pci->mhi_ctrl);
+ 		break;
+ 	case ATH11K_MHI_POWER_OFF:
+ 		mhi_power_down(ab_pci->mhi_ctrl, true);
+diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
+index de71ad594f347..903758751c99a 100644
+--- a/drivers/net/wireless/ath/ath11k/pci.c
++++ b/drivers/net/wireless/ath/ath11k/pci.c
+@@ -1571,6 +1571,11 @@ static __maybe_unused int ath11k_pci_pm_suspend(struct device *dev)
+ 	struct ath11k_base *ab = dev_get_drvdata(dev);
+ 	int ret;
+ 
++	if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
++		ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot skipping pci suspend as qmi is not initialised\n");
++		return 0;
++	}
++
+ 	ret = ath11k_core_suspend(ab);
+ 	if (ret)
+ 		ath11k_warn(ab, "failed to suspend core: %d\n", ret);
+@@ -1583,6 +1588,11 @@ static __maybe_unused int ath11k_pci_pm_resume(struct device *dev)
+ 	struct ath11k_base *ab = dev_get_drvdata(dev);
+ 	int ret;
+ 
++	if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
++		ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot skipping pci resume as qmi is not initialised\n");
++		return 0;
++	}
++
+ 	ret = ath11k_core_resume(ab);
+ 	if (ret)
+ 		ath11k_warn(ab, "failed to resume core: %d\n", ret);
+diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
+index 1fbc2c19848f2..d444b3d70ba2e 100644
+--- a/drivers/net/wireless/ath/ath5k/eeprom.c
++++ b/drivers/net/wireless/ath/ath5k/eeprom.c
+@@ -746,6 +746,9 @@ ath5k_eeprom_convert_pcal_info_5111(struct ath5k_hw *ah, int mode,
+ 			}
+ 		}
+ 
++		if (idx == AR5K_EEPROM_N_PD_CURVES)
++			goto err_out;
++
+ 		ee->ee_pd_gains[mode] = 1;
+ 
+ 		pd = &chinfo[pier].pd_curves[idx];
+diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
+index 85e7042837556..a647a406b87be 100644
+--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
++++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
+@@ -139,6 +139,7 @@ config IWLMEI
+ 	tristate "Intel Management Engine communication over WLAN"
+ 	depends on INTEL_MEI
+ 	depends on PM
++	depends on CFG80211
+ 	help
+ 	  Enables the iwlmei kernel module.
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+index 456b7eaac5700..061fe6cc6cf5b 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+@@ -1,6 +1,6 @@
+ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+ /*
+- * Copyright (C) 2018-2021 Intel Corporation
++ * Copyright (C) 2018-2022 Intel Corporation
+  */
+ #ifndef __iwl_fw_dbg_tlv_h__
+ #define __iwl_fw_dbg_tlv_h__
+@@ -249,11 +249,10 @@ struct iwl_fw_ini_hcmd_tlv {
+ } __packed; /* FW_TLV_DEBUG_HCMD_API_S_VER_1 */
+ 
+ /**
+-* struct iwl_fw_ini_conf_tlv - preset configuration TLV
++* struct iwl_fw_ini_addr_val - Address and value to set it to
+ *
+ * @address: the base address
+ * @value: value to set at address
+-
+ */
+ struct iwl_fw_ini_addr_val {
+ 	__le32 address;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+index 9af40b0fa37ae..a6e6673bf4ee0 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
++ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
+  * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
+  * Copyright (C) 2017 Intel Deutschland GmbH
+  */
+@@ -349,18 +349,31 @@ void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
+ 	 * otherwise we might not be able to reuse this phy.
+ 	 */
+ 	if (ctxt->ref == 0) {
+-		struct ieee80211_channel *chan;
++		struct ieee80211_channel *chan = NULL;
+ 		struct cfg80211_chan_def chandef;
+-		struct ieee80211_supported_band *sband = NULL;
+-		enum nl80211_band band = NL80211_BAND_2GHZ;
++		struct ieee80211_supported_band *sband;
++		enum nl80211_band band;
++		int channel;
+ 
+-		while (!sband && band < NUM_NL80211_BANDS)
+-			sband = mvm->hw->wiphy->bands[band++];
++		for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
++			sband = mvm->hw->wiphy->bands[band];
+ 
+-		if (WARN_ON(!sband))
+-			return;
++			if (!sband)
++				continue;
++
++			for (channel = 0; channel < sband->n_channels; channel++)
++				if (!(sband->channels[channel].flags &
++						IEEE80211_CHAN_DISABLED)) {
++					chan = &sband->channels[channel];
++					break;
++				}
+ 
+-		chan = &sband->channels[0];
++			if (chan)
++				break;
++		}
++
++		if (WARN_ON(!chan))
++			return;
+ 
+ 		cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
+ 		iwl_mvm_phy_ctxt_changed(mvm, ctxt, &chandef, 1, 1);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+index 5f92a09db3742..4cd507cb412de 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+@@ -1893,7 +1893,10 @@ static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm,
+ 			IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
+ 
+ 	/* set fragmented ebs for fragmented scan on HB channels */
+-	if (iwl_mvm_is_scan_fragmented(params->hb_type))
++	if ((!iwl_mvm_is_cdb_supported(mvm) &&
++	     iwl_mvm_is_scan_fragmented(params->type)) ||
++	    (iwl_mvm_is_cdb_supported(mvm) &&
++	     iwl_mvm_is_scan_fragmented(params->hb_type)))
+ 		flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG;
+ 
+ 	return flags;
+diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
+index 3a9af8931c35a..3d644925a4e04 100644
+--- a/drivers/net/wireless/mediatek/mt76/dma.c
++++ b/drivers/net/wireless/mediatek/mt76/dma.c
+@@ -465,6 +465,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
+ 
+ 		qbuf.addr = addr + offset;
+ 		qbuf.len = len - offset;
++		qbuf.skip_unmap = false;
+ 		mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
+ 		frames++;
+ 	}
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
+index 1f6f7a44d3f00..5197fcb066492 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76.h
+@@ -19,7 +19,7 @@
+ 
+ #define MT_MCU_RING_SIZE	32
+ #define MT_RX_BUF_SIZE		2048
+-#define MT_SKB_HEAD_LEN		128
++#define MT_SKB_HEAD_LEN		256
+ 
+ #define MT_MAX_NON_AQL_PKT	16
+ #define MT_TXQ_FREE_THR		32
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+index ba31bb7caaf90..5d69e77814c9d 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+@@ -1841,7 +1841,7 @@ mt7615_mac_adjust_sensitivity(struct mt7615_phy *phy,
+ 	struct mt7615_dev *dev = phy->dev;
+ 	int false_cca = ofdm ? phy->false_cca_ofdm : phy->false_cca_cck;
+ 	bool ext_phy = phy != &dev->phy;
+-	u16 def_th = ofdm ? -98 : -110;
++	s16 def_th = ofdm ? -98 : -110;
+ 	bool update = false;
+ 	s8 *sensitivity;
+ 	int signal;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+index db267642924d0..e4c300aa15260 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+@@ -1085,6 +1085,7 @@ mt7915_mac_write_txwi_80211(struct mt7915_dev *dev, __le32 *txwi,
+ 		val = MT_TXD3_SN_VALID |
+ 		      FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
+ 		txwi[3] |= cpu_to_le32(val);
++		txwi[7] &= ~cpu_to_le32(MT_TXD7_HW_AMSDU);
+ 	}
+ 
+ 	val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+index 7a8d2596c2265..4abb7a6e775af 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+@@ -273,6 +273,7 @@ static void mt7921_stop(struct ieee80211_hw *hw)
+ 
+ 	cancel_delayed_work_sync(&dev->pm.ps_work);
+ 	cancel_work_sync(&dev->pm.wake_work);
++	cancel_work_sync(&dev->reset_work);
+ 	mt76_connac_free_pending_tx_skbs(&dev->pm, NULL);
+ 
+ 	mt7921_mutex_acquire(dev);
+diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
+index e429428232c15..e7e9f17df96a3 100644
+--- a/drivers/net/wireless/realtek/rtw88/debug.c
++++ b/drivers/net/wireless/realtek/rtw88/debug.c
+@@ -390,7 +390,7 @@ static ssize_t rtw_debugfs_set_h2c(struct file *filp,
+ 		     &param[0], &param[1], &param[2], &param[3],
+ 		     &param[4], &param[5], &param[6], &param[7]);
+ 	if (num != 8) {
+-		rtw_info(rtwdev, "invalid H2C command format for debug\n");
++		rtw_warn(rtwdev, "invalid H2C command format for debug\n");
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h
+index 61f8369fe2d61..066792dd96afb 100644
+--- a/drivers/net/wireless/realtek/rtw88/debug.h
++++ b/drivers/net/wireless/realtek/rtw88/debug.h
+@@ -23,6 +23,7 @@ enum rtw_debug_mask {
+ 	RTW_DBG_PATH_DIV	= 0x00004000,
+ 	RTW_DBG_ADAPTIVITY	= 0x00008000,
+ 	RTW_DBG_HW_SCAN		= 0x00010000,
++	RTW_DBG_STATE		= 0x00020000,
+ 
+ 	RTW_DBG_ALL		= 0xffffffff
+ };
+diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
+index 4c8e5ea5d069c..db90d75a86339 100644
+--- a/drivers/net/wireless/realtek/rtw88/fw.c
++++ b/drivers/net/wireless/realtek/rtw88/fw.c
+@@ -2131,7 +2131,7 @@ void rtw_hw_scan_status_report(struct rtw_dev *rtwdev, struct sk_buff *skb)
+ 	rtw_hw_scan_complete(rtwdev, vif, aborted);
+ 
+ 	if (aborted)
+-		rtw_info(rtwdev, "HW scan aborted with code: %d\n", rc);
++		rtw_dbg(rtwdev, RTW_DBG_HW_SCAN, "HW scan aborted with code: %d\n", rc);
+ }
+ 
+ void rtw_store_op_chan(struct rtw_dev *rtwdev)
+diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
+index 647d2662955ba..5cdc54c9a9aae 100644
+--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
++++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
+@@ -208,7 +208,7 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
+ 
+ 	mutex_unlock(&rtwdev->mutex);
+ 
+-	rtw_info(rtwdev, "start vif %pM on port %d\n", vif->addr, rtwvif->port);
++	rtw_dbg(rtwdev, RTW_DBG_STATE, "start vif %pM on port %d\n", vif->addr, rtwvif->port);
+ 	return 0;
+ }
+ 
+@@ -219,7 +219,7 @@ static void rtw_ops_remove_interface(struct ieee80211_hw *hw,
+ 	struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ 	u32 config = 0;
+ 
+-	rtw_info(rtwdev, "stop vif %pM on port %d\n", vif->addr, rtwvif->port);
++	rtw_dbg(rtwdev, RTW_DBG_STATE, "stop vif %pM on port %d\n", vif->addr, rtwvif->port);
+ 
+ 	mutex_lock(&rtwdev->mutex);
+ 
+@@ -245,8 +245,8 @@ static int rtw_ops_change_interface(struct ieee80211_hw *hw,
+ {
+ 	struct rtw_dev *rtwdev = hw->priv;
+ 
+-	rtw_info(rtwdev, "change vif %pM (%d)->(%d), p2p (%d)->(%d)\n",
+-		 vif->addr, vif->type, type, vif->p2p, p2p);
++	rtw_dbg(rtwdev, RTW_DBG_STATE, "change vif %pM (%d)->(%d), p2p (%d)->(%d)\n",
++		vif->addr, vif->type, type, vif->p2p, p2p);
+ 
+ 	rtw_ops_remove_interface(hw, vif);
+ 
+diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
+index 39c223a2e3e2d..b00200f81db7d 100644
+--- a/drivers/net/wireless/realtek/rtw88/main.c
++++ b/drivers/net/wireless/realtek/rtw88/main.c
+@@ -314,8 +314,8 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
+ 
+ 	rtwdev->sta_cnt++;
+ 	rtwdev->beacon_loss = false;
+-	rtw_info(rtwdev, "sta %pM joined with macid %d\n",
+-		 sta->addr, si->mac_id);
++	rtw_dbg(rtwdev, RTW_DBG_STATE, "sta %pM joined with macid %d\n",
++		sta->addr, si->mac_id);
+ 
+ 	return 0;
+ }
+@@ -336,8 +336,8 @@ void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
+ 	kfree(si->mask);
+ 
+ 	rtwdev->sta_cnt--;
+-	rtw_info(rtwdev, "sta %pM with macid %d left\n",
+-		 sta->addr, si->mac_id);
++	rtw_dbg(rtwdev, RTW_DBG_STATE, "sta %pM with macid %d left\n",
++		sta->addr, si->mac_id);
+ }
+ 
+ struct rtw_fwcd_hdr {
+diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+index db078df63f855..80d4761796b15 100644
+--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
++++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+@@ -499,7 +499,7 @@ static s8 get_cck_rx_pwr(struct rtw_dev *rtwdev, u8 lna_idx, u8 vga_idx)
+ 	}
+ 
+ 	if (lna_idx >= lna_gain_table_size) {
+-		rtw_info(rtwdev, "incorrect lna index (%d)\n", lna_idx);
++		rtw_warn(rtwdev, "incorrect lna index (%d)\n", lna_idx);
+ 		return -120;
+ 	}
+ 
+diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+index dd4fbb82750d5..a23806b69b0fa 100644
+--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
++++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+@@ -1012,12 +1012,12 @@ static int rtw8822b_set_antenna(struct rtw_dev *rtwdev,
+ 		antenna_tx, antenna_rx);
+ 
+ 	if (!rtw8822b_check_rf_path(antenna_tx)) {
+-		rtw_info(rtwdev, "unsupported tx path 0x%x\n", antenna_tx);
++		rtw_warn(rtwdev, "unsupported tx path 0x%x\n", antenna_tx);
+ 		return -EINVAL;
+ 	}
+ 
+ 	if (!rtw8822b_check_rf_path(antenna_rx)) {
+-		rtw_info(rtwdev, "unsupported rx path 0x%x\n", antenna_rx);
++		rtw_warn(rtwdev, "unsupported rx path 0x%x\n", antenna_rx);
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+index 35c46e5209de3..ddf4d1a23e605 100644
+--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
++++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+@@ -2798,7 +2798,7 @@ static int rtw8822c_set_antenna(struct rtw_dev *rtwdev,
+ 	case BB_PATH_AB:
+ 		break;
+ 	default:
+-		rtw_info(rtwdev, "unsupported tx path 0x%x\n", antenna_tx);
++		rtw_warn(rtwdev, "unsupported tx path 0x%x\n", antenna_tx);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -2808,7 +2808,7 @@ static int rtw8822c_set_antenna(struct rtw_dev *rtwdev,
+ 	case BB_PATH_AB:
+ 		break;
+ 	default:
+-		rtw_info(rtwdev, "unsupported rx path 0x%x\n", antenna_rx);
++		rtw_warn(rtwdev, "unsupported rx path 0x%x\n", antenna_rx);
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/drivers/net/wireless/realtek/rtw88/sar.c b/drivers/net/wireless/realtek/rtw88/sar.c
+index 3383726c4d90f..c472f1502b82a 100644
+--- a/drivers/net/wireless/realtek/rtw88/sar.c
++++ b/drivers/net/wireless/realtek/rtw88/sar.c
+@@ -91,10 +91,10 @@ int rtw_set_sar_specs(struct rtw_dev *rtwdev,
+ 			return -EINVAL;
+ 
+ 		power = sar->sub_specs[i].power;
+-		rtw_info(rtwdev, "On freq %u to %u, set SAR %d in 1/%lu dBm\n",
+-			 rtw_common_sar_freq_ranges[idx].start_freq,
+-			 rtw_common_sar_freq_ranges[idx].end_freq,
+-			 power, BIT(RTW_COMMON_SAR_FCT));
++		rtw_dbg(rtwdev, RTW_DBG_REGD, "On freq %u to %u, set SAR %d in 1/%lu dBm\n",
++			rtw_common_sar_freq_ranges[idx].start_freq,
++			rtw_common_sar_freq_ranges[idx].end_freq,
++			power, BIT(RTW_COMMON_SAR_FCT));
+ 
+ 		for (j = 0; j < RTW_RF_PATH_MAX; j++) {
+ 			for (k = 0; k < RTW_RATE_SECTION_MAX; k++) {
+diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
+index a0737eea9f81d..9632e7f218dda 100644
+--- a/drivers/net/wireless/realtek/rtw89/core.c
++++ b/drivers/net/wireless/realtek/rtw89/core.c
+@@ -1509,11 +1509,12 @@ static void rtw89_core_txq_push(struct rtw89_dev *rtwdev,
+ 	unsigned long i;
+ 	int ret;
+ 
++	rcu_read_lock();
+ 	for (i = 0; i < frame_cnt; i++) {
+ 		skb = ieee80211_tx_dequeue_ni(rtwdev->hw, txq);
+ 		if (!skb) {
+ 			rtw89_debug(rtwdev, RTW89_DBG_TXRX, "dequeue a NULL skb\n");
+-			return;
++			goto out;
+ 		}
+ 		rtw89_core_txq_check_agg(rtwdev, rtwtxq, skb);
+ 		ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, NULL);
+@@ -1523,6 +1524,8 @@ static void rtw89_core_txq_push(struct rtw89_dev *rtwdev,
+ 			break;
+ 		}
+ 	}
++out:
++	rcu_read_unlock();
+ }
+ 
+ static u32 rtw89_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, u8 tid)
+diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c
+index 596c185b5dda4..b5f2f9f393926 100644
+--- a/drivers/opp/debugfs.c
++++ b/drivers/opp/debugfs.c
+@@ -10,6 +10,7 @@
+ #include <linux/debugfs.h>
+ #include <linux/device.h>
+ #include <linux/err.h>
++#include <linux/of.h>
+ #include <linux/init.h>
+ #include <linux/limits.h>
+ #include <linux/slab.h>
+@@ -131,9 +132,13 @@ void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
+ 	debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend);
+ 	debugfs_create_u32("performance_state", S_IRUGO, d, &opp->pstate);
+ 	debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate);
++	debugfs_create_u32("level", S_IRUGO, d, &opp->level);
+ 	debugfs_create_ulong("clock_latency_ns", S_IRUGO, d,
+ 			     &opp->clock_latency_ns);
+ 
++	opp->of_name = of_node_full_name(opp->np);
++	debugfs_create_str("of_name", S_IRUGO, d, (char **)&opp->of_name);
++
+ 	opp_debug_create_supplies(opp, opp_table, d);
+ 	opp_debug_create_bw(opp, opp_table, d);
+ 
+diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
+index 407c3bfe51d96..45e3a55239a13 100644
+--- a/drivers/opp/opp.h
++++ b/drivers/opp/opp.h
+@@ -96,6 +96,7 @@ struct dev_pm_opp {
+ 
+ #ifdef CONFIG_DEBUG_FS
+ 	struct dentry *dentry;
++	const char *of_name;
+ #endif
+ };
+ 
+diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
+index 952a92504df69..e33036281327d 100644
+--- a/drivers/parisc/dino.c
++++ b/drivers/parisc/dino.c
+@@ -142,9 +142,8 @@ struct dino_device
+ {
+ 	struct pci_hba_data	hba;	/* 'C' inheritance - must be first */
+ 	spinlock_t		dinosaur_pen;
+-	unsigned long		txn_addr; /* EIR addr to generate interrupt */ 
+-	u32			txn_data; /* EIR data assign to each dino */ 
+ 	u32 			imr;	  /* IRQ's which are enabled */ 
++	struct gsc_irq		gsc_irq;
+ 	int			global_irq[DINO_LOCAL_IRQS]; /* map IMR bit to global irq */
+ #ifdef DINO_DEBUG
+ 	unsigned int		dino_irr0; /* save most recent IRQ line stat */
+@@ -339,14 +338,43 @@ static void dino_unmask_irq(struct irq_data *d)
+ 	if (tmp & DINO_MASK_IRQ(local_irq)) {
+ 		DBG(KERN_WARNING "%s(): IRQ asserted! (ILR 0x%x)\n",
+ 				__func__, tmp);
+-		gsc_writel(dino_dev->txn_data, dino_dev->txn_addr);
++		gsc_writel(dino_dev->gsc_irq.txn_data, dino_dev->gsc_irq.txn_addr);
+ 	}
+ }
+ 
++#ifdef CONFIG_SMP
++static int dino_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
++				bool force)
++{
++	struct dino_device *dino_dev = irq_data_get_irq_chip_data(d);
++	struct cpumask tmask;
++	int cpu_irq;
++	u32 eim;
++
++	if (!cpumask_and(&tmask, dest, cpu_online_mask))
++		return -EINVAL;
++
++	cpu_irq = cpu_check_affinity(d, &tmask);
++	if (cpu_irq < 0)
++		return cpu_irq;
++
++	dino_dev->gsc_irq.txn_addr = txn_affinity_addr(d->irq, cpu_irq);
++	eim = ((u32) dino_dev->gsc_irq.txn_addr) | dino_dev->gsc_irq.txn_data;
++	__raw_writel(eim, dino_dev->hba.base_addr+DINO_IAR0);
++
++	irq_data_update_effective_affinity(d, &tmask);
++
++	return IRQ_SET_MASK_OK;
++}
++#endif
++
+ static struct irq_chip dino_interrupt_type = {
+ 	.name		= "GSC-PCI",
+ 	.irq_unmask	= dino_unmask_irq,
+ 	.irq_mask	= dino_mask_irq,
++#ifdef CONFIG_SMP
++	.irq_set_affinity = dino_set_affinity_irq,
++#endif
+ };
+ 
+ 
+@@ -806,7 +834,6 @@ static int __init dino_common_init(struct parisc_device *dev,
+ {
+ 	int status;
+ 	u32 eim;
+-	struct gsc_irq gsc_irq;
+ 	struct resource *res;
+ 
+ 	pcibios_register_hba(&dino_dev->hba);
+@@ -821,10 +848,8 @@ static int __init dino_common_init(struct parisc_device *dev,
+ 	**   still only has 11 IRQ input lines - just map some of them
+ 	**   to a different processor.
+ 	*/
+-	dev->irq = gsc_alloc_irq(&gsc_irq);
+-	dino_dev->txn_addr = gsc_irq.txn_addr;
+-	dino_dev->txn_data = gsc_irq.txn_data;
+-	eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data;
++	dev->irq = gsc_alloc_irq(&dino_dev->gsc_irq);
++	eim = ((u32) dino_dev->gsc_irq.txn_addr) | dino_dev->gsc_irq.txn_data;
+ 
+ 	/* 
+ 	** Dino needs a PA "IRQ" to get a processor's attention.
+diff --git a/drivers/parisc/gsc.c b/drivers/parisc/gsc.c
+index ed9371acf37eb..ec175ae998733 100644
+--- a/drivers/parisc/gsc.c
++++ b/drivers/parisc/gsc.c
+@@ -135,10 +135,41 @@ static void gsc_asic_unmask_irq(struct irq_data *d)
+ 	 */
+ }
+ 
++#ifdef CONFIG_SMP
++static int gsc_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
++				bool force)
++{
++	struct gsc_asic *gsc_dev = irq_data_get_irq_chip_data(d);
++	struct cpumask tmask;
++	int cpu_irq;
++
++	if (!cpumask_and(&tmask, dest, cpu_online_mask))
++		return -EINVAL;
++
++	cpu_irq = cpu_check_affinity(d, &tmask);
++	if (cpu_irq < 0)
++		return cpu_irq;
++
++	gsc_dev->gsc_irq.txn_addr = txn_affinity_addr(d->irq, cpu_irq);
++	gsc_dev->eim = ((u32) gsc_dev->gsc_irq.txn_addr) | gsc_dev->gsc_irq.txn_data;
++
++	/* switch IRQ's for devices below LASI/WAX to other CPU */
++	gsc_writel(gsc_dev->eim, gsc_dev->hpa + OFFSET_IAR);
++
++	irq_data_update_effective_affinity(d, &tmask);
++
++	return IRQ_SET_MASK_OK;
++}
++#endif
++
++
+ static struct irq_chip gsc_asic_interrupt_type = {
+ 	.name		=	"GSC-ASIC",
+ 	.irq_unmask	=	gsc_asic_unmask_irq,
+ 	.irq_mask	=	gsc_asic_mask_irq,
++#ifdef CONFIG_SMP
++	.irq_set_affinity =	gsc_set_affinity_irq,
++#endif
+ };
+ 
+ int gsc_assign_irq(struct irq_chip *type, void *data)
+diff --git a/drivers/parisc/gsc.h b/drivers/parisc/gsc.h
+index 86abad3fa2150..73cbd0bb1975a 100644
+--- a/drivers/parisc/gsc.h
++++ b/drivers/parisc/gsc.h
+@@ -31,6 +31,7 @@ struct gsc_asic {
+ 	int version;
+ 	int type;
+ 	int eim;
++	struct gsc_irq gsc_irq;
+ 	int global_irq[32];
+ };
+ 
+diff --git a/drivers/parisc/lasi.c b/drivers/parisc/lasi.c
+index 4e4fd12c2112e..6ef621adb63a8 100644
+--- a/drivers/parisc/lasi.c
++++ b/drivers/parisc/lasi.c
+@@ -163,7 +163,6 @@ static int __init lasi_init_chip(struct parisc_device *dev)
+ {
+ 	extern void (*chassis_power_off)(void);
+ 	struct gsc_asic *lasi;
+-	struct gsc_irq gsc_irq;
+ 	int ret;
+ 
+ 	lasi = kzalloc(sizeof(*lasi), GFP_KERNEL);
+@@ -185,7 +184,7 @@ static int __init lasi_init_chip(struct parisc_device *dev)
+ 	lasi_init_irq(lasi);
+ 
+ 	/* the IRQ lasi should use */
+-	dev->irq = gsc_alloc_irq(&gsc_irq);
++	dev->irq = gsc_alloc_irq(&lasi->gsc_irq);
+ 	if (dev->irq < 0) {
+ 		printk(KERN_ERR "%s(): cannot get GSC irq\n",
+ 				__func__);
+@@ -193,9 +192,9 @@ static int __init lasi_init_chip(struct parisc_device *dev)
+ 		return -EBUSY;
+ 	}
+ 
+-	lasi->eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data;
++	lasi->eim = ((u32) lasi->gsc_irq.txn_addr) | lasi->gsc_irq.txn_data;
+ 
+-	ret = request_irq(gsc_irq.irq, gsc_asic_intr, 0, "lasi", lasi);
++	ret = request_irq(lasi->gsc_irq.irq, gsc_asic_intr, 0, "lasi", lasi);
+ 	if (ret < 0) {
+ 		kfree(lasi);
+ 		return ret;
+diff --git a/drivers/parisc/wax.c b/drivers/parisc/wax.c
+index 5b6df15162354..73a2b01f8d9ca 100644
+--- a/drivers/parisc/wax.c
++++ b/drivers/parisc/wax.c
+@@ -68,7 +68,6 @@ static int __init wax_init_chip(struct parisc_device *dev)
+ {
+ 	struct gsc_asic *wax;
+ 	struct parisc_device *parent;
+-	struct gsc_irq gsc_irq;
+ 	int ret;
+ 
+ 	wax = kzalloc(sizeof(*wax), GFP_KERNEL);
+@@ -85,7 +84,7 @@ static int __init wax_init_chip(struct parisc_device *dev)
+ 	wax_init_irq(wax);
+ 
+ 	/* the IRQ wax should use */
+-	dev->irq = gsc_claim_irq(&gsc_irq, WAX_GSC_IRQ);
++	dev->irq = gsc_claim_irq(&wax->gsc_irq, WAX_GSC_IRQ);
+ 	if (dev->irq < 0) {
+ 		printk(KERN_ERR "%s(): cannot get GSC irq\n",
+ 				__func__);
+@@ -93,9 +92,9 @@ static int __init wax_init_chip(struct parisc_device *dev)
+ 		return -EBUSY;
+ 	}
+ 
+-	wax->eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data;
++	wax->eim = ((u32) wax->gsc_irq.txn_addr) | wax->gsc_irq.txn_data;
+ 
+-	ret = request_irq(gsc_irq.irq, gsc_asic_intr, 0, "wax", wax);
++	ret = request_irq(wax->gsc_irq.irq, gsc_asic_intr, 0, "wax", wax);
+ 	if (ret < 0) {
+ 		kfree(wax);
+ 		return ret;
+diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
+index 82e2c618d532d..15348be1a8aa5 100644
+--- a/drivers/pci/controller/pci-aardvark.c
++++ b/drivers/pci/controller/pci-aardvark.c
+@@ -1186,7 +1186,7 @@ static void advk_msi_irq_compose_msi_msg(struct irq_data *data,
+ 
+ 	msg->address_lo = lower_32_bits(msi_msg);
+ 	msg->address_hi = upper_32_bits(msi_msg);
+-	msg->data = data->irq;
++	msg->data = data->hwirq;
+ }
+ 
+ static int advk_msi_set_affinity(struct irq_data *irq_data,
+@@ -1203,15 +1203,11 @@ static int advk_msi_irq_domain_alloc(struct irq_domain *domain,
+ 	int hwirq, i;
+ 
+ 	mutex_lock(&pcie->msi_used_lock);
+-	hwirq = bitmap_find_next_zero_area(pcie->msi_used, MSI_IRQ_NUM,
+-					   0, nr_irqs, 0);
+-	if (hwirq >= MSI_IRQ_NUM) {
+-		mutex_unlock(&pcie->msi_used_lock);
+-		return -ENOSPC;
+-	}
+-
+-	bitmap_set(pcie->msi_used, hwirq, nr_irqs);
++	hwirq = bitmap_find_free_region(pcie->msi_used, MSI_IRQ_NUM,
++					order_base_2(nr_irqs));
+ 	mutex_unlock(&pcie->msi_used_lock);
++	if (hwirq < 0)
++		return -ENOSPC;
+ 
+ 	for (i = 0; i < nr_irqs; i++)
+ 		irq_domain_set_info(domain, virq + i, hwirq + i,
+@@ -1229,7 +1225,7 @@ static void advk_msi_irq_domain_free(struct irq_domain *domain,
+ 	struct advk_pcie *pcie = domain->host_data;
+ 
+ 	mutex_lock(&pcie->msi_used_lock);
+-	bitmap_clear(pcie->msi_used, d->hwirq, nr_irqs);
++	bitmap_release_region(pcie->msi_used, d->hwirq, order_base_2(nr_irqs));
+ 	mutex_unlock(&pcie->msi_used_lock);
+ }
+ 
+diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
+index 90d84d3bc868f..5b833f00e9800 100644
+--- a/drivers/pci/endpoint/functions/pci-epf-test.c
++++ b/drivers/pci/endpoint/functions/pci-epf-test.c
+@@ -285,7 +285,17 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
+ 		if (ret)
+ 			dev_err(dev, "Data transfer failed\n");
+ 	} else {
+-		memcpy(dst_addr, src_addr, reg->size);
++		void *buf;
++
++		buf = kzalloc(reg->size, GFP_KERNEL);
++		if (!buf) {
++			ret = -ENOMEM;
++			goto err_map_addr;
++		}
++
++		memcpy_fromio(buf, src_addr, reg->size);
++		memcpy_toio(dst_addr, buf, reg->size);
++		kfree(buf);
+ 	}
+ 	ktime_get_ts64(&end);
+ 	pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma);
+@@ -441,7 +451,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
+ 		if (!epf_test->dma_supported) {
+ 			dev_err(dev, "Cannot transfer data using DMA\n");
+ 			ret = -EINVAL;
+-			goto err_map_addr;
++			goto err_dma_map;
+ 		}
+ 
+ 		src_phys_addr = dma_map_single(dma_dev, buf, reg->size,
+diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
+index 85dce560831a8..040ae076ec0e9 100644
+--- a/drivers/pci/hotplug/pciehp_hpc.c
++++ b/drivers/pci/hotplug/pciehp_hpc.c
+@@ -1086,6 +1086,8 @@ static void quirk_cmd_compl(struct pci_dev *pdev)
+ }
+ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
+ 			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
++DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0110,
++			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
+ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400,
+ 			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
+ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0401,
+diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
+index 7640491aab123..30234c261b05c 100644
+--- a/drivers/perf/qcom_l2_pmu.c
++++ b/drivers/perf/qcom_l2_pmu.c
+@@ -736,7 +736,7 @@ static struct cluster_pmu *l2_cache_associate_cpu_with_cluster(
+ {
+ 	u64 mpidr;
+ 	int cpu_cluster_id;
+-	struct cluster_pmu *cluster = NULL;
++	struct cluster_pmu *cluster;
+ 
+ 	/*
+ 	 * This assumes that the cluster_id is in MPIDR[aff1] for
+@@ -758,10 +758,10 @@ static struct cluster_pmu *l2_cache_associate_cpu_with_cluster(
+ 			 cluster->cluster_id);
+ 		cpumask_set_cpu(cpu, &cluster->cluster_cpus);
+ 		*per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster;
+-		break;
++		return cluster;
+ 	}
+ 
+-	return cluster;
++	return NULL;
+ }
+ 
+ static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
+diff --git a/drivers/phy/amlogic/phy-meson-gxl-usb2.c b/drivers/phy/amlogic/phy-meson-gxl-usb2.c
+index 2b3c0d730f20f..db17c3448bfed 100644
+--- a/drivers/phy/amlogic/phy-meson-gxl-usb2.c
++++ b/drivers/phy/amlogic/phy-meson-gxl-usb2.c
+@@ -114,8 +114,10 @@ static int phy_meson_gxl_usb2_init(struct phy *phy)
+ 		return ret;
+ 
+ 	ret = clk_prepare_enable(priv->clk);
+-	if (ret)
++	if (ret) {
++		reset_control_rearm(priv->reset);
+ 		return ret;
++	}
+ 
+ 	return 0;
+ }
+@@ -125,6 +127,7 @@ static int phy_meson_gxl_usb2_exit(struct phy *phy)
+ 	struct phy_meson_gxl_usb2_priv *priv = phy_get_drvdata(phy);
+ 
+ 	clk_disable_unprepare(priv->clk);
++	reset_control_rearm(priv->reset);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/phy/amlogic/phy-meson8b-usb2.c b/drivers/phy/amlogic/phy-meson8b-usb2.c
+index cf10bed40528a..dd96763911b8b 100644
+--- a/drivers/phy/amlogic/phy-meson8b-usb2.c
++++ b/drivers/phy/amlogic/phy-meson8b-usb2.c
+@@ -154,6 +154,7 @@ static int phy_meson8b_usb2_power_on(struct phy *phy)
+ 	ret = clk_prepare_enable(priv->clk_usb_general);
+ 	if (ret) {
+ 		dev_err(&phy->dev, "Failed to enable USB general clock\n");
++		reset_control_rearm(priv->reset);
+ 		return ret;
+ 	}
+ 
+@@ -161,6 +162,7 @@ static int phy_meson8b_usb2_power_on(struct phy *phy)
+ 	if (ret) {
+ 		dev_err(&phy->dev, "Failed to enable USB DDR clock\n");
+ 		clk_disable_unprepare(priv->clk_usb_general);
++		reset_control_rearm(priv->reset);
+ 		return ret;
+ 	}
+ 
+@@ -199,6 +201,7 @@ static int phy_meson8b_usb2_power_on(struct phy *phy)
+ 				dev_warn(&phy->dev, "USB ID detect failed!\n");
+ 				clk_disable_unprepare(priv->clk_usb);
+ 				clk_disable_unprepare(priv->clk_usb_general);
++				reset_control_rearm(priv->reset);
+ 				return -EINVAL;
+ 			}
+ 		}
+@@ -218,6 +221,7 @@ static int phy_meson8b_usb2_power_off(struct phy *phy)
+ 
+ 	clk_disable_unprepare(priv->clk_usb);
+ 	clk_disable_unprepare(priv->clk_usb_general);
++	reset_control_rearm(priv->reset);
+ 
+ 	/* power off the PHY by putting it into reset mode */
+ 	regmap_update_bits(priv->regmap, REG_CTRL, REG_CTRL_POWER_ON_RESET,
+@@ -265,8 +269,9 @@ static int phy_meson8b_usb2_probe(struct platform_device *pdev)
+ 		return PTR_ERR(priv->clk_usb);
+ 
+ 	priv->reset = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
+-	if (PTR_ERR(priv->reset) == -EPROBE_DEFER)
+-		return PTR_ERR(priv->reset);
++	if (IS_ERR(priv->reset))
++		return dev_err_probe(&pdev->dev, PTR_ERR(priv->reset),
++				     "Failed to get the reset line");
+ 
+ 	priv->dr_mode = of_usb_get_dr_mode_by_phy(pdev->dev.of_node, -1);
+ 	if (priv->dr_mode == USB_DR_MODE_UNKNOWN) {
+diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
+index 24deeeb29af21..53abd553b842e 100644
+--- a/drivers/platform/x86/Kconfig
++++ b/drivers/platform/x86/Kconfig
+@@ -1027,7 +1027,7 @@ config TOUCHSCREEN_DMI
+ 
+ config X86_ANDROID_TABLETS
+ 	tristate "X86 Android tablet support"
+-	depends on I2C && SERIAL_DEV_BUS && ACPI && GPIOLIB
++	depends on I2C && SPI && SERIAL_DEV_BUS && ACPI && EFI && GPIOLIB
+ 	help
+ 	  X86 tablets which ship with Android as (part of) the factory image
+ 	  typically have various problems with their DSDTs. The factory kernels
+diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
+index 48a46466f0862..88f0bfd6ecf1a 100644
+--- a/drivers/platform/x86/hp-wmi.c
++++ b/drivers/platform/x86/hp-wmi.c
+@@ -35,10 +35,6 @@ MODULE_LICENSE("GPL");
+ MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C");
+ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
+ 
+-static int enable_tablet_mode_sw = -1;
+-module_param(enable_tablet_mode_sw, int, 0444);
+-MODULE_PARM_DESC(enable_tablet_mode_sw, "Enable SW_TABLET_MODE reporting (-1=auto, 0=no, 1=yes)");
+-
+ #define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
+ #define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
+ #define HP_OMEN_EC_THERMAL_PROFILE_OFFSET 0x95
+@@ -107,6 +103,7 @@ enum hp_wmi_commandtype {
+ 	HPWMI_FEATURE2_QUERY		= 0x0d,
+ 	HPWMI_WIRELESS2_QUERY		= 0x1b,
+ 	HPWMI_POSTCODEERROR_QUERY	= 0x2a,
++	HPWMI_SYSTEM_DEVICE_MODE	= 0x40,
+ 	HPWMI_THERMAL_PROFILE_QUERY	= 0x4c,
+ };
+ 
+@@ -217,6 +214,19 @@ struct rfkill2_device {
+ static int rfkill2_count;
+ static struct rfkill2_device rfkill2[HPWMI_MAX_RFKILL2_DEVICES];
+ 
++/*
++ * Chassis Types values were obtained from SMBIOS reference
++ * specification version 3.00. A complete list of system enclosures
++ * and chassis types is available on Table 17.
++ */
++static const char * const tablet_chassis_types[] = {
++	"30", /* Tablet*/
++	"31", /* Convertible */
++	"32"  /* Detachable */
++};
++
++#define DEVICE_MODE_TABLET	0x06
++
+ /* map output size to the corresponding WMI method id */
+ static inline int encode_outsize_for_pvsz(int outsize)
+ {
+@@ -320,7 +330,7 @@ static int hp_wmi_get_fan_speed(int fan)
+ 	char fan_data[4] = { fan, 0, 0, 0 };
+ 
+ 	int ret = hp_wmi_perform_query(HPWMI_FAN_SPEED_GET_QUERY, HPWMI_GM,
+-				       &fan_data, sizeof(fan_data),
++				       &fan_data, sizeof(char),
+ 				       sizeof(fan_data));
+ 
+ 	if (ret != 0)
+@@ -345,14 +355,39 @@ static int hp_wmi_read_int(int query)
+ 	return val;
+ }
+ 
+-static int hp_wmi_hw_state(int mask)
++static int hp_wmi_get_dock_state(void)
+ {
+ 	int state = hp_wmi_read_int(HPWMI_HARDWARE_QUERY);
+ 
+ 	if (state < 0)
+ 		return state;
+ 
+-	return !!(state & mask);
++	return !!(state & HPWMI_DOCK_MASK);
++}
++
++static int hp_wmi_get_tablet_mode(void)
++{
++	char system_device_mode[4] = { 0 };
++	const char *chassis_type;
++	bool tablet_found;
++	int ret;
++
++	chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
++	if (!chassis_type)
++		return -ENODEV;
++
++	tablet_found = match_string(tablet_chassis_types,
++				    ARRAY_SIZE(tablet_chassis_types),
++				    chassis_type) >= 0;
++	if (!tablet_found)
++		return -ENODEV;
++
++	ret = hp_wmi_perform_query(HPWMI_SYSTEM_DEVICE_MODE, HPWMI_READ,
++				   system_device_mode, 0, sizeof(system_device_mode));
++	if (ret < 0)
++		return ret;
++
++	return system_device_mode[0] == DEVICE_MODE_TABLET;
+ }
+ 
+ static int omen_thermal_profile_set(int mode)
+@@ -364,7 +399,7 @@ static int omen_thermal_profile_set(int mode)
+ 		return -EINVAL;
+ 
+ 	ret = hp_wmi_perform_query(HPWMI_SET_PERFORMANCE_MODE, HPWMI_GM,
+-				   &buffer, sizeof(buffer), sizeof(buffer));
++				   &buffer, sizeof(buffer), 0);
+ 
+ 	if (ret)
+ 		return ret < 0 ? ret : -EINVAL;
+@@ -401,7 +436,7 @@ static int hp_wmi_fan_speed_max_set(int enabled)
+ 	int ret;
+ 
+ 	ret = hp_wmi_perform_query(HPWMI_FAN_SPEED_MAX_SET_QUERY, HPWMI_GM,
+-				   &enabled, sizeof(enabled), sizeof(enabled));
++				   &enabled, sizeof(enabled), 0);
+ 
+ 	if (ret)
+ 		return ret < 0 ? ret : -EINVAL;
+@@ -414,7 +449,7 @@ static int hp_wmi_fan_speed_max_get(void)
+ 	int val = 0, ret;
+ 
+ 	ret = hp_wmi_perform_query(HPWMI_FAN_SPEED_MAX_GET_QUERY, HPWMI_GM,
+-				   &val, sizeof(val), sizeof(val));
++				   &val, 0, sizeof(val));
+ 
+ 	if (ret)
+ 		return ret < 0 ? ret : -EINVAL;
+@@ -426,7 +461,7 @@ static int __init hp_wmi_bios_2008_later(void)
+ {
+ 	int state = 0;
+ 	int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, HPWMI_READ, &state,
+-				       sizeof(state), sizeof(state));
++				       0, sizeof(state));
+ 	if (!ret)
+ 		return 1;
+ 
+@@ -437,7 +472,7 @@ static int __init hp_wmi_bios_2009_later(void)
+ {
+ 	u8 state[128];
+ 	int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, HPWMI_READ, &state,
+-				       sizeof(state), sizeof(state));
++				       0, sizeof(state));
+ 	if (!ret)
+ 		return 1;
+ 
+@@ -515,7 +550,7 @@ static int hp_wmi_rfkill2_refresh(void)
+ 	int err, i;
+ 
+ 	err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state,
+-				   sizeof(state), sizeof(state));
++				   0, sizeof(state));
+ 	if (err)
+ 		return err;
+ 
+@@ -568,7 +603,7 @@ static ssize_t als_show(struct device *dev, struct device_attribute *attr,
+ static ssize_t dock_show(struct device *dev, struct device_attribute *attr,
+ 			 char *buf)
+ {
+-	int value = hp_wmi_hw_state(HPWMI_DOCK_MASK);
++	int value = hp_wmi_get_dock_state();
+ 	if (value < 0)
+ 		return value;
+ 	return sprintf(buf, "%d\n", value);
+@@ -577,7 +612,7 @@ static ssize_t dock_show(struct device *dev, struct device_attribute *attr,
+ static ssize_t tablet_show(struct device *dev, struct device_attribute *attr,
+ 			   char *buf)
+ {
+-	int value = hp_wmi_hw_state(HPWMI_TABLET_MASK);
++	int value = hp_wmi_get_tablet_mode();
+ 	if (value < 0)
+ 		return value;
+ 	return sprintf(buf, "%d\n", value);
+@@ -604,7 +639,7 @@ static ssize_t als_store(struct device *dev, struct device_attribute *attr,
+ 		return ret;
+ 
+ 	ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, HPWMI_WRITE, &tmp,
+-				       sizeof(tmp), sizeof(tmp));
++				       sizeof(tmp), 0);
+ 	if (ret)
+ 		return ret < 0 ? ret : -EINVAL;
+ 
+@@ -625,9 +660,9 @@ static ssize_t postcode_store(struct device *dev, struct device_attribute *attr,
+ 	if (clear == false)
+ 		return -EINVAL;
+ 
+-	/* Clear the POST error code. It is kept until until cleared. */
++	/* Clear the POST error code. It is kept until cleared. */
+ 	ret = hp_wmi_perform_query(HPWMI_POSTCODEERROR_QUERY, HPWMI_WRITE, &tmp,
+-				       sizeof(tmp), sizeof(tmp));
++				       sizeof(tmp), 0);
+ 	if (ret)
+ 		return ret < 0 ? ret : -EINVAL;
+ 
+@@ -699,10 +734,10 @@ static void hp_wmi_notify(u32 value, void *context)
+ 	case HPWMI_DOCK_EVENT:
+ 		if (test_bit(SW_DOCK, hp_wmi_input_dev->swbit))
+ 			input_report_switch(hp_wmi_input_dev, SW_DOCK,
+-					    hp_wmi_hw_state(HPWMI_DOCK_MASK));
++					    hp_wmi_get_dock_state());
+ 		if (test_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit))
+ 			input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
+-					    hp_wmi_hw_state(HPWMI_TABLET_MASK));
++					    hp_wmi_get_tablet_mode());
+ 		input_sync(hp_wmi_input_dev);
+ 		break;
+ 	case HPWMI_PARK_HDD:
+@@ -780,19 +815,17 @@ static int __init hp_wmi_input_setup(void)
+ 	__set_bit(EV_SW, hp_wmi_input_dev->evbit);
+ 
+ 	/* Dock */
+-	val = hp_wmi_hw_state(HPWMI_DOCK_MASK);
++	val = hp_wmi_get_dock_state();
+ 	if (!(val < 0)) {
+ 		__set_bit(SW_DOCK, hp_wmi_input_dev->swbit);
+ 		input_report_switch(hp_wmi_input_dev, SW_DOCK, val);
+ 	}
+ 
+ 	/* Tablet mode */
+-	if (enable_tablet_mode_sw > 0) {
+-		val = hp_wmi_hw_state(HPWMI_TABLET_MASK);
+-		if (val >= 0) {
+-			__set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
+-			input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
+-		}
++	val = hp_wmi_get_tablet_mode();
++	if (!(val < 0)) {
++		__set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
++		input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
+ 	}
+ 
+ 	err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL);
+@@ -919,7 +952,7 @@ static int __init hp_wmi_rfkill2_setup(struct platform_device *device)
+ 	int err, i;
+ 
+ 	err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state,
+-				   sizeof(state), sizeof(state));
++				   0, sizeof(state));
+ 	if (err)
+ 		return err < 0 ? err : -EINVAL;
+ 
+@@ -1227,10 +1260,10 @@ static int hp_wmi_resume_handler(struct device *device)
+ 	if (hp_wmi_input_dev) {
+ 		if (test_bit(SW_DOCK, hp_wmi_input_dev->swbit))
+ 			input_report_switch(hp_wmi_input_dev, SW_DOCK,
+-					    hp_wmi_hw_state(HPWMI_DOCK_MASK));
++					    hp_wmi_get_dock_state());
+ 		if (test_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit))
+ 			input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
+-					    hp_wmi_hw_state(HPWMI_TABLET_MASK));
++					    hp_wmi_get_tablet_mode());
+ 		input_sync(hp_wmi_input_dev);
+ 	}
+ 
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 3424b080db772..3fb8cda31eb9e 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -8699,10 +8699,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
+ 	TPACPI_Q_LNV3('N', '2', 'N', TPACPI_FAN_2CTL),	/* P53 / P73 */
+ 	TPACPI_Q_LNV3('N', '2', 'E', TPACPI_FAN_2CTL),	/* P1 / X1 Extreme (1st gen) */
+ 	TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL),	/* P1 / X1 Extreme (2nd gen) */
+-	TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL),	/* P1 / X1 Extreme (3nd gen) */
+-	TPACPI_Q_LNV3('N', '4', '0', TPACPI_FAN_2CTL),	/* P1 / X1 Extreme (4nd gen) */
+ 	TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL),	/* P15 (1st gen) / P15v (1st gen) */
+-	TPACPI_Q_LNV3('N', '3', '2', TPACPI_FAN_2CTL),	/* X1 Carbon (9th gen) */
+ 	TPACPI_Q_LNV3('N', '3', '7', TPACPI_FAN_2CTL),  /* T15g (2nd gen) */
+ 	TPACPI_Q_LNV3('N', '1', 'O', TPACPI_FAN_NOFAN),	/* X1 Tablet (2nd gen) */
+ };
+@@ -8746,6 +8743,9 @@ static int __init fan_init(struct ibm_init_struct *iibm)
+ 		 * ThinkPad ECs supports the fan control register */
+ 		if (likely(acpi_ec_read(fan_status_offset,
+ 					&fan_control_initial_status))) {
++			int res;
++			unsigned int speed;
++
+ 			fan_status_access_mode = TPACPI_FAN_RD_TPEC;
+ 			if (quirks & TPACPI_FAN_Q1)
+ 				fan_quirk1_setup();
+@@ -8758,6 +8758,15 @@ static int __init fan_init(struct ibm_init_struct *iibm)
+ 				tp_features.second_fan_ctl = 1;
+ 				pr_info("secondary fan control enabled\n");
+ 			}
++			/* Try and probe the 2nd fan */
++			res = fan2_get_speed(&speed);
++			if (res >= 0) {
++				/* It responded - so let's assume it's there */
++				tp_features.second_fan = 1;
++				tp_features.second_fan_ctl = 1;
++				pr_info("secondary fan control detected & enabled\n");
++			}
++
+ 		} else {
+ 			pr_err("ThinkPad ACPI EC access misbehaving, fan status and control unavailable\n");
+ 			return -ENODEV;
+diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
+index b366e2fd8e97f..5e4a693528111 100644
+--- a/drivers/power/supply/Kconfig
++++ b/drivers/power/supply/Kconfig
+@@ -351,14 +351,14 @@ config AXP20X_POWER
+ 
+ config AXP288_CHARGER
+ 	tristate "X-Powers AXP288 Charger"
+-	depends on MFD_AXP20X && EXTCON_AXP288 && IOSF_MBI
++	depends on MFD_AXP20X && EXTCON_AXP288 && IOSF_MBI && ACPI
+ 	help
+ 	  Say yes here to have support X-Power AXP288 power management IC (PMIC)
+ 	  integrated charger.
+ 
+ config AXP288_FUEL_GAUGE
+ 	tristate "X-Powers AXP288 Fuel Gauge"
+-	depends on MFD_AXP20X && IIO && IOSF_MBI
++	depends on MFD_AXP20X && IIO && IOSF_MBI && ACPI
+ 	help
+ 	  Say yes here to have support for X-Power power management IC (PMIC)
+ 	  Fuel Gauge. The device provides battery statistics and status
+diff --git a/drivers/power/supply/axp20x_battery.c b/drivers/power/supply/axp20x_battery.c
+index 5d197141f4760..9106077c0dbb4 100644
+--- a/drivers/power/supply/axp20x_battery.c
++++ b/drivers/power/supply/axp20x_battery.c
+@@ -186,7 +186,6 @@ static int axp20x_battery_get_prop(struct power_supply *psy,
+ 				   union power_supply_propval *val)
+ {
+ 	struct axp20x_batt_ps *axp20x_batt = power_supply_get_drvdata(psy);
+-	struct iio_channel *chan;
+ 	int ret = 0, reg, val1;
+ 
+ 	switch (psp) {
+@@ -266,12 +265,12 @@ static int axp20x_battery_get_prop(struct power_supply *psy,
+ 		if (ret)
+ 			return ret;
+ 
+-		if (reg & AXP20X_PWR_STATUS_BAT_CHARGING)
+-			chan = axp20x_batt->batt_chrg_i;
+-		else
+-			chan = axp20x_batt->batt_dischrg_i;
+-
+-		ret = iio_read_channel_processed(chan, &val->intval);
++		if (reg & AXP20X_PWR_STATUS_BAT_CHARGING) {
++			ret = iio_read_channel_processed(axp20x_batt->batt_chrg_i, &val->intval);
++		} else {
++			ret = iio_read_channel_processed(axp20x_batt->batt_dischrg_i, &val1);
++			val->intval = -val1;
++		}
+ 		if (ret)
+ 			return ret;
+ 
+diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
+index ec41f6cd3f93f..19746e658a6a8 100644
+--- a/drivers/power/supply/axp288_charger.c
++++ b/drivers/power/supply/axp288_charger.c
+@@ -42,11 +42,11 @@
+ #define VBUS_ISPOUT_CUR_LIM_1500MA	0x1	/* 1500mA */
+ #define VBUS_ISPOUT_CUR_LIM_2000MA	0x2	/* 2000mA */
+ #define VBUS_ISPOUT_CUR_NO_LIM		0x3	/* 2500mA */
+-#define VBUS_ISPOUT_VHOLD_SET_MASK	0x31
++#define VBUS_ISPOUT_VHOLD_SET_MASK	0x38
+ #define VBUS_ISPOUT_VHOLD_SET_BIT_POS	0x3
+ #define VBUS_ISPOUT_VHOLD_SET_OFFSET	4000	/* 4000mV */
+ #define VBUS_ISPOUT_VHOLD_SET_LSB_RES	100	/* 100mV */
+-#define VBUS_ISPOUT_VHOLD_SET_4300MV	0x3	/* 4300mV */
++#define VBUS_ISPOUT_VHOLD_SET_4400MV	0x4	/* 4400mV */
+ #define VBUS_ISPOUT_VBUS_PATH_DIS	BIT(7)
+ 
+ #define CHRG_CCCV_CC_MASK		0xf		/* 4 bits */
+@@ -769,6 +769,16 @@ static int charger_init_hw_regs(struct axp288_chrg_info *info)
+ 		ret = axp288_charger_vbus_path_select(info, true);
+ 		if (ret < 0)
+ 			return ret;
++	} else {
++		/* Set Vhold to the factory default / recommended 4.4V */
++		val = VBUS_ISPOUT_VHOLD_SET_4400MV << VBUS_ISPOUT_VHOLD_SET_BIT_POS;
++		ret = regmap_update_bits(info->regmap, AXP20X_VBUS_IPSOUT_MGMT,
++					 VBUS_ISPOUT_VHOLD_SET_MASK, val);
++		if (ret < 0) {
++			dev_err(&info->pdev->dev, "register(%x) write error(%d)\n",
++				AXP20X_VBUS_IPSOUT_MGMT, ret);
++			return ret;
++		}
+ 	}
+ 
+ 	/* Read current charge voltage and current limit */
+@@ -828,6 +838,13 @@ static int axp288_charger_probe(struct platform_device *pdev)
+ 	struct power_supply_config charger_cfg = {};
+ 	unsigned int val;
+ 
++	/*
++	 * Normally the native AXP288 fg/charger drivers are preferred but
++	 * on some devices the ACPI drivers should be used instead.
++	 */
++	if (!acpi_quirk_skip_acpi_ac_and_battery())
++		return -ENODEV;
++
+ 	/*
+ 	 * On some devices the fuelgauge and charger parts of the axp288 are
+ 	 * not used, check that the fuelgauge is enabled (CC_CTRL != 0).
+diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
+index c1da217fdb0e2..ce8ffd0a41b5a 100644
+--- a/drivers/power/supply/axp288_fuel_gauge.c
++++ b/drivers/power/supply/axp288_fuel_gauge.c
+@@ -9,6 +9,7 @@
+  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+  */
+ 
++#include <linux/acpi.h>
+ #include <linux/dmi.h>
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+@@ -560,12 +561,6 @@ static const struct dmi_system_id axp288_no_battery_list[] = {
+ 			DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
+ 		},
+ 	},
+-	{
+-		/* ECS EF20EA */
+-		.matches = {
+-			DMI_MATCH(DMI_PRODUCT_NAME, "EF20EA"),
+-		},
+-	},
+ 	{
+ 		/* Intel Cherry Trail Compute Stick, Windows version */
+ 		.matches = {
+@@ -624,6 +619,13 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
+ 	};
+ 	unsigned int val;
+ 
++	/*
++	 * Normally the native AXP288 fg/charger drivers are preferred but
++	 * on some devices the ACPI drivers should be used instead.
++	 */
++	if (!acpi_quirk_skip_acpi_ac_and_battery())
++		return -ENODEV;
++
+ 	if (dmi_check_system(axp288_no_battery_list))
+ 		return -ENODEV;
+ 
+diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
+index 41b92dc2f011a..9233bfedeb174 100644
+--- a/drivers/ptp/ptp_sysfs.c
++++ b/drivers/ptp/ptp_sysfs.c
+@@ -14,7 +14,7 @@ static ssize_t clock_name_show(struct device *dev,
+ 			       struct device_attribute *attr, char *page)
+ {
+ 	struct ptp_clock *ptp = dev_get_drvdata(dev);
+-	return snprintf(page, PAGE_SIZE-1, "%s\n", ptp->info->name);
++	return sysfs_emit(page, "%s\n", ptp->info->name);
+ }
+ static DEVICE_ATTR_RO(clock_name);
+ 
+@@ -387,7 +387,7 @@ static ssize_t ptp_pin_show(struct device *dev, struct device_attribute *attr,
+ 
+ 	mutex_unlock(&ptp->pincfg_mux);
+ 
+-	return snprintf(page, PAGE_SIZE, "%u %u\n", func, chan);
++	return sysfs_emit(page, "%u %u\n", func, chan);
+ }
+ 
+ static ssize_t ptp_pin_store(struct device *dev, struct device_attribute *attr,
+diff --git a/drivers/regulator/atc260x-regulator.c b/drivers/regulator/atc260x-regulator.c
+index 05147d2c38428..485e58b264c04 100644
+--- a/drivers/regulator/atc260x-regulator.c
++++ b/drivers/regulator/atc260x-regulator.c
+@@ -292,6 +292,7 @@ enum atc2603c_reg_ids {
+ 	.bypass_mask = BIT(5), \
+ 	.active_discharge_reg = ATC2603C_PMU_SWITCH_CTL, \
+ 	.active_discharge_mask = BIT(1), \
++	.active_discharge_on = BIT(1), \
+ 	.owner = THIS_MODULE, \
+ }
+ 
+diff --git a/drivers/regulator/rtq2134-regulator.c b/drivers/regulator/rtq2134-regulator.c
+index f21e3f8b21f23..8e13dea354a21 100644
+--- a/drivers/regulator/rtq2134-regulator.c
++++ b/drivers/regulator/rtq2134-regulator.c
+@@ -285,6 +285,7 @@ static const unsigned int rtq2134_buck_ramp_delay_table[] = {
+ 		.enable_mask = RTQ2134_VOUTEN_MASK, \
+ 		.active_discharge_reg = RTQ2134_REG_BUCK##_id##_CFG0, \
+ 		.active_discharge_mask = RTQ2134_ACTDISCHG_MASK, \
++		.active_discharge_on = RTQ2134_ACTDISCHG_MASK, \
+ 		.ramp_reg = RTQ2134_REG_BUCK##_id##_RSPCFG, \
+ 		.ramp_mask = RTQ2134_RSPUP_MASK, \
+ 		.ramp_delay_table = rtq2134_buck_ramp_delay_table, \
+diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
+index 2018614f258f6..6eaa9321c0741 100644
+--- a/drivers/rtc/rtc-wm8350.c
++++ b/drivers/rtc/rtc-wm8350.c
+@@ -432,14 +432,21 @@ static int wm8350_rtc_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
+-	wm8350_register_irq(wm8350, WM8350_IRQ_RTC_SEC,
++	ret = wm8350_register_irq(wm8350, WM8350_IRQ_RTC_SEC,
+ 			    wm8350_rtc_update_handler, 0,
+ 			    "RTC Seconds", wm8350);
++	if (ret)
++		return ret;
++
+ 	wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_SEC);
+ 
+-	wm8350_register_irq(wm8350, WM8350_IRQ_RTC_ALM,
++	ret = wm8350_register_irq(wm8350, WM8350_IRQ_RTC_ALM,
+ 			    wm8350_rtc_alarm_handler, 0,
+ 			    "RTC Alarm", wm8350);
++	if (ret) {
++		wm8350_free_irq(wm8350, WM8350_IRQ_RTC_SEC, wm8350);
++		return ret;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
+index d17880b57d17b..2449b4215b32d 100644
+--- a/drivers/scsi/aha152x.c
++++ b/drivers/scsi/aha152x.c
+@@ -3375,13 +3375,11 @@ static int __init aha152x_setup(char *str)
+ 	setup[setup_count].synchronous = ints[0] >= 6 ? ints[6] : 1;
+ 	setup[setup_count].delay       = ints[0] >= 7 ? ints[7] : DELAY_DEFAULT;
+ 	setup[setup_count].ext_trans   = ints[0] >= 8 ? ints[8] : 0;
+-	if (ints[0] > 8) {                                                /*}*/
++	if (ints[0] > 8)
+ 		printk(KERN_NOTICE "aha152x: usage: aha152x=<IOBASE>[,<IRQ>[,<SCSI ID>"
+ 		       "[,<RECONNECT>[,<PARITY>[,<SYNCHRONOUS>[,<DELAY>[,<EXT_TRANS>]]]]]]]\n");
+-	} else {
++	else
+ 		setup_count++;
+-		return 0;
+-	}
+ 
+ 	return 1;
+ }
+diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
+index f46989bd083cc..5a85401e9e2d3 100644
+--- a/drivers/scsi/bfa/bfad_attr.c
++++ b/drivers/scsi/bfa/bfad_attr.c
+@@ -711,7 +711,7 @@ bfad_im_serial_num_show(struct device *dev, struct device_attribute *attr,
+ 	char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
+ 
+ 	bfa_get_adapter_serial_num(&bfad->bfa, serial_num);
+-	return snprintf(buf, PAGE_SIZE, "%s\n", serial_num);
++	return sysfs_emit(buf, "%s\n", serial_num);
+ }
+ 
+ static ssize_t
+@@ -725,7 +725,7 @@ bfad_im_model_show(struct device *dev, struct device_attribute *attr,
+ 	char model[BFA_ADAPTER_MODEL_NAME_LEN];
+ 
+ 	bfa_get_adapter_model(&bfad->bfa, model);
+-	return snprintf(buf, PAGE_SIZE, "%s\n", model);
++	return sysfs_emit(buf, "%s\n", model);
+ }
+ 
+ static ssize_t
+@@ -805,7 +805,7 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
+ 		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ 			"Invalid Model");
+ 
+-	return snprintf(buf, PAGE_SIZE, "%s\n", model_descr);
++	return sysfs_emit(buf, "%s\n", model_descr);
+ }
+ 
+ static ssize_t
+@@ -819,7 +819,7 @@ bfad_im_node_name_show(struct device *dev, struct device_attribute *attr,
+ 	u64        nwwn;
+ 
+ 	nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port);
+-	return snprintf(buf, PAGE_SIZE, "0x%llx\n", cpu_to_be64(nwwn));
++	return sysfs_emit(buf, "0x%llx\n", cpu_to_be64(nwwn));
+ }
+ 
+ static ssize_t
+@@ -836,7 +836,7 @@ bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr,
+ 	bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
+ 	strlcpy(symname, port_attr.port_cfg.sym_name.symname,
+ 			BFA_SYMNAME_MAXLEN);
+-	return snprintf(buf, PAGE_SIZE, "%s\n", symname);
++	return sysfs_emit(buf, "%s\n", symname);
+ }
+ 
+ static ssize_t
+@@ -850,14 +850,14 @@ bfad_im_hw_version_show(struct device *dev, struct device_attribute *attr,
+ 	char hw_ver[BFA_VERSION_LEN];
+ 
+ 	bfa_get_pci_chip_rev(&bfad->bfa, hw_ver);
+-	return snprintf(buf, PAGE_SIZE, "%s\n", hw_ver);
++	return sysfs_emit(buf, "%s\n", hw_ver);
+ }
+ 
+ static ssize_t
+ bfad_im_drv_version_show(struct device *dev, struct device_attribute *attr,
+ 				char *buf)
+ {
+-	return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_VERSION);
++	return sysfs_emit(buf, "%s\n", BFAD_DRIVER_VERSION);
+ }
+ 
+ static ssize_t
+@@ -871,7 +871,7 @@ bfad_im_optionrom_version_show(struct device *dev,
+ 	char optrom_ver[BFA_VERSION_LEN];
+ 
+ 	bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver);
+-	return snprintf(buf, PAGE_SIZE, "%s\n", optrom_ver);
++	return sysfs_emit(buf, "%s\n", optrom_ver);
+ }
+ 
+ static ssize_t
+@@ -885,7 +885,7 @@ bfad_im_fw_version_show(struct device *dev, struct device_attribute *attr,
+ 	char fw_ver[BFA_VERSION_LEN];
+ 
+ 	bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
+-	return snprintf(buf, PAGE_SIZE, "%s\n", fw_ver);
++	return sysfs_emit(buf, "%s\n", fw_ver);
+ }
+ 
+ static ssize_t
+@@ -897,7 +897,7 @@ bfad_im_num_of_ports_show(struct device *dev, struct device_attribute *attr,
+ 			(struct bfad_im_port_s *) shost->hostdata[0];
+ 	struct bfad_s *bfad = im_port->bfad;
+ 
+-	return snprintf(buf, PAGE_SIZE, "%d\n",
++	return sysfs_emit(buf, "%d\n",
+ 			bfa_get_nports(&bfad->bfa));
+ }
+ 
+@@ -905,7 +905,7 @@ static ssize_t
+ bfad_im_drv_name_show(struct device *dev, struct device_attribute *attr,
+ 				char *buf)
+ {
+-	return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_NAME);
++	return sysfs_emit(buf, "%s\n", BFAD_DRIVER_NAME);
+ }
+ 
+ static ssize_t
+@@ -924,14 +924,14 @@ bfad_im_num_of_discovered_ports_show(struct device *dev,
+ 	rports = kcalloc(nrports, sizeof(struct bfa_rport_qualifier_s),
+ 			 GFP_ATOMIC);
+ 	if (rports == NULL)
+-		return snprintf(buf, PAGE_SIZE, "Failed\n");
++		return sysfs_emit(buf, "Failed\n");
+ 
+ 	spin_lock_irqsave(&bfad->bfad_lock, flags);
+ 	bfa_fcs_lport_get_rport_quals(port->fcs_port, rports, &nrports);
+ 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ 	kfree(rports);
+ 
+-	return snprintf(buf, PAGE_SIZE, "%d\n", nrports);
++	return sysfs_emit(buf, "%d\n", nrports);
+ }
+ 
+ static          DEVICE_ATTR(serial_number, S_IRUGO,
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+index 70173389f6ebd..52089538e9de6 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+@@ -2398,17 +2398,25 @@ static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p)
+ 	return IRQ_WAKE_THREAD;
+ }
+ 
++static void hisi_sas_v3_free_vectors(void *data)
++{
++	struct pci_dev *pdev = data;
++
++	pci_free_irq_vectors(pdev);
++}
++
+ static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba)
+ {
+ 	int vectors;
+ 	int max_msi = HISI_SAS_MSI_COUNT_V3_HW, min_msi;
+ 	struct Scsi_Host *shost = hisi_hba->shost;
++	struct pci_dev *pdev = hisi_hba->pci_dev;
+ 	struct irq_affinity desc = {
+ 		.pre_vectors = BASE_VECTORS_V3_HW,
+ 	};
+ 
+ 	min_msi = MIN_AFFINE_VECTORS_V3_HW;
+-	vectors = pci_alloc_irq_vectors_affinity(hisi_hba->pci_dev,
++	vectors = pci_alloc_irq_vectors_affinity(pdev,
+ 						 min_msi, max_msi,
+ 						 PCI_IRQ_MSI |
+ 						 PCI_IRQ_AFFINITY,
+@@ -2420,6 +2428,7 @@ static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba)
+ 	hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW;
+ 	shost->nr_hw_queues = hisi_hba->cq_nvecs;
+ 
++	devm_add_action(&pdev->dev, hisi_sas_v3_free_vectors, pdev);
+ 	return 0;
+ }
+ 
+@@ -3967,6 +3976,54 @@ static const struct file_operations debugfs_bist_phy_v3_hw_fops = {
+ 	.owner = THIS_MODULE,
+ };
+ 
++static ssize_t debugfs_bist_cnt_v3_hw_write(struct file *filp,
++					const char __user *buf,
++					size_t count, loff_t *ppos)
++{
++	struct seq_file *m = filp->private_data;
++	struct hisi_hba *hisi_hba = m->private;
++	unsigned int cnt;
++	int val;
++
++	if (hisi_hba->debugfs_bist_enable)
++		return -EPERM;
++
++	val = kstrtouint_from_user(buf, count, 0, &cnt);
++	if (val)
++		return val;
++
++	if (cnt)
++		return -EINVAL;
++
++	hisi_hba->debugfs_bist_cnt = 0;
++	return count;
++}
++
++static int debugfs_bist_cnt_v3_hw_show(struct seq_file *s, void *p)
++{
++	struct hisi_hba *hisi_hba = s->private;
++
++	seq_printf(s, "%u\n", hisi_hba->debugfs_bist_cnt);
++
++	return 0;
++}
++
++static int debugfs_bist_cnt_v3_hw_open(struct inode *inode,
++					  struct file *filp)
++{
++	return single_open(filp, debugfs_bist_cnt_v3_hw_show,
++			   inode->i_private);
++}
++
++static const struct file_operations debugfs_bist_cnt_v3_hw_ops = {
++	.open = debugfs_bist_cnt_v3_hw_open,
++	.read = seq_read,
++	.write = debugfs_bist_cnt_v3_hw_write,
++	.llseek = seq_lseek,
++	.release = single_release,
++	.owner = THIS_MODULE,
++};
++
+ static const struct {
+ 	int		value;
+ 	char		*name;
+@@ -4604,8 +4661,8 @@ static void debugfs_bist_init_v3_hw(struct hisi_hba *hisi_hba)
+ 	debugfs_create_file("phy_id", 0600, hisi_hba->debugfs_bist_dentry,
+ 			    hisi_hba, &debugfs_bist_phy_v3_hw_fops);
+ 
+-	debugfs_create_u32("cnt", 0600, hisi_hba->debugfs_bist_dentry,
+-			   &hisi_hba->debugfs_bist_cnt);
++	debugfs_create_file("cnt", 0600, hisi_hba->debugfs_bist_dentry,
++			    hisi_hba, &debugfs_bist_cnt_v3_hw_ops);
+ 
+ 	debugfs_create_file("loopback_mode", 0600,
+ 			    hisi_hba->debugfs_bist_dentry,
+@@ -4769,7 +4826,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 
+ 	rc = scsi_add_host(shost, dev);
+ 	if (rc)
+-		goto err_out_free_irq_vectors;
++		goto err_out_debugfs;
+ 
+ 	rc = sas_register_ha(sha);
+ 	if (rc)
+@@ -4800,8 +4857,6 @@ err_out_hw_init:
+ 	sas_unregister_ha(sha);
+ err_out_register_ha:
+ 	scsi_remove_host(shost);
+-err_out_free_irq_vectors:
+-	pci_free_irq_vectors(pdev);
+ err_out_debugfs:
+ 	debugfs_exit_v3_hw(hisi_hba);
+ err_out_ha:
+@@ -4825,7 +4880,6 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba)
+ 
+ 		devm_free_irq(&pdev->dev, pci_irq_vector(pdev, nr), cq);
+ 	}
+-	pci_free_irq_vectors(pdev);
+ }
+ 
+ static void hisi_sas_v3_remove(struct pci_dev *pdev)
+diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
+index 841000445b9a1..aa223db4cf53c 100644
+--- a/drivers/scsi/libfc/fc_exch.c
++++ b/drivers/scsi/libfc/fc_exch.c
+@@ -1701,6 +1701,7 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
+ 	if (cancel_delayed_work_sync(&ep->timeout_work)) {
+ 		FC_EXCH_DBG(ep, "Exchange timer canceled due to ABTS response\n");
+ 		fc_exch_release(ep);	/* release from pending timer hold */
++		return;
+ 	}
+ 
+ 	spin_lock_bh(&ep->ex_lock);
+diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
+index fc4eaf6d1e47e..d892ade421bf9 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr.h
++++ b/drivers/scsi/mpi3mr/mpi3mr.h
+@@ -866,6 +866,8 @@ struct mpi3mr_ioc {
+  * @send_ack: Event acknowledgment required or not
+  * @process_evt: Bottomhalf processing required or not
+  * @evt_ctx: Event context to send in Ack
++ * @pending_at_sml: waiting for device add/remove API to complete
++ * @discard: discard this event
+  * @ref_count: kref count
+  * @event_data: Actual MPI3 event data
+  */
+@@ -877,6 +879,8 @@ struct mpi3mr_fwevt {
+ 	bool send_ack;
+ 	bool process_evt;
+ 	u32 evt_ctx;
++	bool pending_at_sml;
++	bool discard;
+ 	struct kref ref_count;
+ 	char event_data[0] __aligned(4);
+ };
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+index 15bdc21ead669..e44868230197f 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+@@ -1520,7 +1520,7 @@ static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
+ 			    MPI3MR_MAX_SEG_LIST_SIZE,
+ 			    mrioc->req_qinfo[q_idx].q_segment_list,
+ 			    mrioc->req_qinfo[q_idx].q_segment_list_dma);
+-			mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
++			mrioc->req_qinfo[q_idx].q_segment_list = NULL;
+ 		}
+ 	} else
+ 		size = mrioc->req_qinfo[q_idx].segment_qd *
+@@ -4353,8 +4353,8 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
+ 	memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
+ 	memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
+ 	memset(mrioc->evtack_cmds_bitmap, 0, mrioc->evtack_cmds_bitmap_sz);
+-	mpi3mr_cleanup_fwevt_list(mrioc);
+ 	mpi3mr_flush_host_io(mrioc);
++	mpi3mr_cleanup_fwevt_list(mrioc);
+ 	mpi3mr_invalidate_devhandles(mrioc);
+ 	if (mrioc->prepare_for_reset) {
+ 		mrioc->prepare_for_reset = 0;
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
+index 284117da9086a..f7893de35b26b 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
+@@ -285,6 +285,35 @@ static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt(
+ 	return fwevt;
+ }
+ 
++/**
++ * mpi3mr_cancel_work - cancel firmware event
++ * @fwevt: fwevt object which needs to be canceled
++ *
++ * Return: Nothing.
++ */
++static void mpi3mr_cancel_work(struct mpi3mr_fwevt *fwevt)
++{
++	/*
++	 * Wait on the fwevt to complete. If this returns 1, then
++	 * the event was never executed.
++	 *
++	 * If it did execute, we wait for it to finish, and the put will
++	 * happen from mpi3mr_process_fwevt()
++	 */
++	if (cancel_work_sync(&fwevt->work)) {
++		/*
++		 * Put fwevt reference count after
++		 * dequeuing it from worker queue
++		 */
++		mpi3mr_fwevt_put(fwevt);
++		/*
++		 * Put fwevt reference count to neutralize
++		 * kref_init increment
++		 */
++		mpi3mr_fwevt_put(fwevt);
++	}
++}
++
+ /**
+  * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list
+  * @mrioc: Adapter instance reference
+@@ -302,28 +331,25 @@ void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc)
+ 	    !mrioc->fwevt_worker_thread)
+ 		return;
+ 
+-	while ((fwevt = mpi3mr_dequeue_fwevt(mrioc)) ||
+-	    (fwevt = mrioc->current_event)) {
++	while ((fwevt = mpi3mr_dequeue_fwevt(mrioc)))
++		mpi3mr_cancel_work(fwevt);
++
++	if (mrioc->current_event) {
++		fwevt = mrioc->current_event;
+ 		/*
+-		 * Wait on the fwevt to complete. If this returns 1, then
+-		 * the event was never executed, and we need a put for the
+-		 * reference the work had on the fwevt.
+-		 *
+-		 * If it did execute, we wait for it to finish, and the put will
+-		 * happen from mpi3mr_process_fwevt()
++		 * Don't call cancel_work_sync() API for the
++		 * fwevt work if the controller reset is
++		 * get called as part of processing the
++		 * same fwevt work (or) when worker thread is
++		 * waiting for device add/remove APIs to complete.
++		 * Otherwise we will see deadlock.
+ 		 */
+-		if (cancel_work_sync(&fwevt->work)) {
+-			/*
+-			 * Put fwevt reference count after
+-			 * dequeuing it from worker queue
+-			 */
+-			mpi3mr_fwevt_put(fwevt);
+-			/*
+-			 * Put fwevt reference count to neutralize
+-			 * kref_init increment
+-			 */
+-			mpi3mr_fwevt_put(fwevt);
++		if (current_work() == &fwevt->work || fwevt->pending_at_sml) {
++			fwevt->discard = 1;
++			return;
+ 		}
++
++		mpi3mr_cancel_work(fwevt);
+ 	}
+ }
+ 
+@@ -690,6 +716,24 @@ static struct mpi3mr_tgt_dev  *__mpi3mr_get_tgtdev_from_tgtpriv(
+ 	return tgtdev;
+ }
+ 
++/**
++ * mpi3mr_print_device_event_notice - print notice related to post processing of
++ *					device event after controller reset.
++ *
++ * @mrioc: Adapter instance reference
++ * @device_add: true for device add event and false for device removal event
++ *
++ * Return: None.
++ */
++static void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc,
++	bool device_add)
++{
++	ioc_notice(mrioc, "Device %s was in progress before the reset and\n",
++	    (device_add ? "addition" : "removal"));
++	ioc_notice(mrioc, "completed after reset, verify whether the exposed devices\n");
++	ioc_notice(mrioc, "are matched with attached devices for correctness\n");
++}
++
+ /**
+  * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers
+  * @mrioc: Adapter instance reference
+@@ -714,8 +758,17 @@ static void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc,
+ 	}
+ 
+ 	if (tgtdev->starget) {
++		if (mrioc->current_event)
++			mrioc->current_event->pending_at_sml = 1;
+ 		scsi_remove_target(&tgtdev->starget->dev);
+ 		tgtdev->host_exposed = 0;
++		if (mrioc->current_event) {
++			mrioc->current_event->pending_at_sml = 0;
++			if (mrioc->current_event->discard) {
++				mpi3mr_print_device_event_notice(mrioc, false);
++				return;
++			}
++		}
+ 	}
+ 	ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n",
+ 	    __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
+@@ -749,11 +802,20 @@ static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc,
+ 	}
+ 	if (!tgtdev->host_exposed && !mrioc->reset_in_progress) {
+ 		tgtdev->host_exposed = 1;
++		if (mrioc->current_event)
++			mrioc->current_event->pending_at_sml = 1;
+ 		scsi_scan_target(&mrioc->shost->shost_gendev, 0,
+ 		    tgtdev->perst_id,
+ 		    SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
+ 		if (!tgtdev->starget)
+ 			tgtdev->host_exposed = 0;
++		if (mrioc->current_event) {
++			mrioc->current_event->pending_at_sml = 0;
++			if (mrioc->current_event->discard) {
++				mpi3mr_print_device_event_notice(mrioc, true);
++				goto out;
++			}
++		}
+ 	}
+ out:
+ 	if (tgtdev)
+@@ -1193,6 +1255,8 @@ static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc,
+ 	mpi3mr_sastopochg_evt_debug(mrioc, event_data);
+ 
+ 	for (i = 0; i < event_data->num_entries; i++) {
++		if (fwevt->discard)
++			return;
+ 		handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle);
+ 		if (!handle)
+ 			continue;
+@@ -1324,6 +1388,8 @@ static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc,
+ 	mpi3mr_pcietopochg_evt_debug(mrioc, event_data);
+ 
+ 	for (i = 0; i < event_data->num_entries; i++) {
++		if (fwevt->discard)
++			return;
+ 		handle =
+ 		    le16_to_cpu(event_data->port_entry[i].attached_dev_handle);
+ 		if (!handle)
+@@ -1362,8 +1428,8 @@ static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc,
+ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
+ 	struct mpi3mr_fwevt *fwevt)
+ {
+-	mrioc->current_event = fwevt;
+ 	mpi3mr_fwevt_del_from_list(mrioc, fwevt);
++	mrioc->current_event = fwevt;
+ 
+ 	if (mrioc->stop_drv_processing)
+ 		goto out;
+@@ -2551,6 +2617,8 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
+ 		scmd->result = DID_OK << 16;
+ 		goto out_success;
+ 	}
++
++	scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_count);
+ 	if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN &&
+ 	    xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY ||
+ 	    scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT ||
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+index 00792767c620d..7e476f50935b8 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+@@ -11035,6 +11035,7 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
+ {
+ 	struct _sas_port *mpt3sas_port, *next;
+ 	unsigned long flags;
++	int port_id;
+ 
+ 	/* remove sibling ports attached to this expander */
+ 	list_for_each_entry_safe(mpt3sas_port, next,
+@@ -11055,6 +11056,8 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
+ 			    mpt3sas_port->hba_port);
+ 	}
+ 
++	port_id = sas_expander->port->port_id;
++
+ 	mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
+ 	    sas_expander->sas_address_parent, sas_expander->port);
+ 
+@@ -11062,7 +11065,7 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
+ 	    "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
+ 	    sas_expander->handle, (unsigned long long)
+ 	    sas_expander->sas_address,
+-	    sas_expander->port->port_id);
++	    port_id);
+ 
+ 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ 	list_del(&sas_expander->list);
+diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
+index dcae2d4464f90..44df7c03aab8d 100644
+--- a/drivers/scsi/mvsas/mv_init.c
++++ b/drivers/scsi/mvsas/mv_init.c
+@@ -696,7 +696,7 @@ static struct pci_driver mvs_pci_driver = {
+ static ssize_t driver_version_show(struct device *cdev,
+ 				   struct device_attribute *attr, char *buffer)
+ {
+-	return snprintf(buffer, PAGE_SIZE, "%s\n", DRV_VERSION);
++	return sysfs_emit(buffer, "%s\n", DRV_VERSION);
+ }
+ 
+ static DEVICE_ATTR_RO(driver_version);
+@@ -744,7 +744,7 @@ static ssize_t interrupt_coalescing_store(struct device *cdev,
+ static ssize_t interrupt_coalescing_show(struct device *cdev,
+ 					 struct device_attribute *attr, char *buffer)
+ {
+-	return snprintf(buffer, PAGE_SIZE, "%d\n", interrupt_coalescing);
++	return sysfs_emit(buffer, "%d\n", interrupt_coalescing);
+ }
+ 
+ static DEVICE_ATTR_RW(interrupt_coalescing);
+diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
+index d853e8d0195a6..27ead825c2bb6 100644
+--- a/drivers/scsi/pm8001/pm8001_hwi.c
++++ b/drivers/scsi/pm8001/pm8001_hwi.c
+@@ -1522,7 +1522,6 @@ void pm8001_work_fn(struct work_struct *work)
+ 	case IO_XFER_ERROR_BREAK:
+ 	{	/* This one stashes the sas_task instead */
+ 		struct sas_task *t = (struct sas_task *)pm8001_dev;
+-		u32 tag;
+ 		struct pm8001_ccb_info *ccb;
+ 		struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha;
+ 		unsigned long flags, flags1;
+@@ -1544,8 +1543,8 @@ void pm8001_work_fn(struct work_struct *work)
+ 		/* Search for a possible ccb that matches the task */
+ 		for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) {
+ 			ccb = &pm8001_ha->ccb_info[i];
+-			tag = ccb->ccb_tag;
+-			if ((tag != 0xFFFFFFFF) && (ccb->task == t))
++			if ((ccb->ccb_tag != PM8001_INVALID_TAG) &&
++			    (ccb->task == t))
+ 				break;
+ 		}
+ 		if (!ccb) {
+@@ -1567,11 +1566,11 @@ void pm8001_work_fn(struct work_struct *work)
+ 			spin_unlock_irqrestore(&t->task_state_lock, flags1);
+ 			pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with event 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
+ 				   t, pw->handler, ts->resp, ts->stat);
+-			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
++			pm8001_ccb_task_free(pm8001_ha, t, ccb, ccb->ccb_tag);
+ 			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ 		} else {
+ 			spin_unlock_irqrestore(&t->task_state_lock, flags1);
+-			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
++			pm8001_ccb_task_free(pm8001_ha, t, ccb, ccb->ccb_tag);
+ 			mb();/* in order to force CPU ordering */
+ 			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ 			t->task_done(t);
+@@ -1580,7 +1579,6 @@ void pm8001_work_fn(struct work_struct *work)
+ 	case IO_XFER_OPEN_RETRY_TIMEOUT:
+ 	{	/* This one stashes the sas_task instead */
+ 		struct sas_task *t = (struct sas_task *)pm8001_dev;
+-		u32 tag;
+ 		struct pm8001_ccb_info *ccb;
+ 		struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha;
+ 		unsigned long flags, flags1;
+@@ -1614,8 +1612,8 @@ void pm8001_work_fn(struct work_struct *work)
+ 		/* Search for a possible ccb that matches the task */
+ 		for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) {
+ 			ccb = &pm8001_ha->ccb_info[i];
+-			tag = ccb->ccb_tag;
+-			if ((tag != 0xFFFFFFFF) && (ccb->task == t))
++			if ((ccb->ccb_tag != PM8001_INVALID_TAG) &&
++			    (ccb->task == t))
+ 				break;
+ 		}
+ 		if (!ccb) {
+@@ -1686,19 +1684,13 @@ void pm8001_work_fn(struct work_struct *work)
+ 		struct task_status_struct *ts;
+ 		struct sas_task *task;
+ 		int i;
+-		u32 tag, device_id;
++		u32 device_id;
+ 
+ 		for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) {
+ 			ccb = &pm8001_ha->ccb_info[i];
+ 			task = ccb->task;
+ 			ts = &task->task_status;
+-			tag = ccb->ccb_tag;
+-			/* check if tag is NULL */
+-			if (!tag) {
+-				pm8001_dbg(pm8001_ha, FAIL,
+-					"tag Null\n");
+-				continue;
+-			}
++
+ 			if (task != NULL) {
+ 				dev = task->dev;
+ 				if (!dev) {
+@@ -1707,10 +1699,11 @@ void pm8001_work_fn(struct work_struct *work)
+ 					continue;
+ 				}
+ 				/*complete sas task and update to top layer */
+-				pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
++				pm8001_ccb_task_free(pm8001_ha, task, ccb,
++						     ccb->ccb_tag);
+ 				ts->resp = SAS_TASK_COMPLETE;
+ 				task->task_done(task);
+-			} else if (tag != 0xFFFFFFFF) {
++			} else if (ccb->ccb_tag != PM8001_INVALID_TAG) {
+ 				/* complete the internal commands/non-sas task */
+ 				pm8001_dev = ccb->device;
+ 				if (pm8001_dev->dcompletion) {
+@@ -1718,7 +1711,7 @@ void pm8001_work_fn(struct work_struct *work)
+ 					pm8001_dev->dcompletion = NULL;
+ 				}
+ 				complete(pm8001_ha->nvmd_completion);
+-				pm8001_tag_free(pm8001_ha, tag);
++				pm8001_tag_free(pm8001_ha, ccb->ccb_tag);
+ 			}
+ 		}
+ 		/* Deregister all the device ids  */
+@@ -1772,7 +1765,6 @@ static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha,
+ 	}
+ 
+ 	task = sas_alloc_slow_task(GFP_ATOMIC);
+-
+ 	if (!task) {
+ 		pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task\n");
+ 		return;
+@@ -1781,8 +1773,10 @@ static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha,
+ 	task->task_done = pm8001_task_done;
+ 
+ 	res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+-	if (res)
++	if (res) {
++		sas_free_task(task);
+ 		return;
++	}
+ 
+ 	ccb = &pm8001_ha->ccb_info[ccb_tag];
+ 	ccb->device = pm8001_ha_dev;
+@@ -1799,8 +1793,10 @@ static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha,
+ 
+ 	ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
+ 			sizeof(task_abort), 0);
+-	if (ret)
++	if (ret) {
++		sas_free_task(task);
+ 		pm8001_tag_free(pm8001_ha, ccb_tag);
++	}
+ 
+ }
+ 
+@@ -2316,11 +2312,6 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 	param = le32_to_cpu(psataPayload->param);
+ 	tag = le32_to_cpu(psataPayload->tag);
+ 
+-	if (!tag) {
+-		pm8001_dbg(pm8001_ha, FAIL, "tag null\n");
+-		return;
+-	}
+-
+ 	ccb = &pm8001_ha->ccb_info[tag];
+ 	t = ccb->task;
+ 	pm8001_dev = ccb->device;
+@@ -3056,7 +3047,7 @@ void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha,
+ 		   device_id, pds, nds, status);
+ 	complete(pm8001_dev->setds_completion);
+ 	ccb->task = NULL;
+-	ccb->ccb_tag = 0xFFFFFFFF;
++	ccb->ccb_tag = PM8001_INVALID_TAG;
+ 	pm8001_tag_free(pm8001_ha, tag);
+ }
+ 
+@@ -3074,7 +3065,7 @@ void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 				dlen_status);
+ 	}
+ 	ccb->task = NULL;
+-	ccb->ccb_tag = 0xFFFFFFFF;
++	ccb->ccb_tag = PM8001_INVALID_TAG;
+ 	pm8001_tag_free(pm8001_ha, tag);
+ }
+ 
+@@ -3101,7 +3092,7 @@ pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 		 * freed by requesting path anywhere.
+ 		 */
+ 		ccb->task = NULL;
+-		ccb->ccb_tag = 0xFFFFFFFF;
++		ccb->ccb_tag = PM8001_INVALID_TAG;
+ 		pm8001_tag_free(pm8001_ha, tag);
+ 		return;
+ 	}
+@@ -3147,7 +3138,7 @@ pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 	complete(pm8001_ha->nvmd_completion);
+ 	pm8001_dbg(pm8001_ha, MSG, "Get nvmd data complete!\n");
+ 	ccb->task = NULL;
+-	ccb->ccb_tag = 0xFFFFFFFF;
++	ccb->ccb_tag = PM8001_INVALID_TAG;
+ 	pm8001_tag_free(pm8001_ha, tag);
+ }
+ 
+@@ -3560,7 +3551,7 @@ int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 	}
+ 	complete(pm8001_dev->dcompletion);
+ 	ccb->task = NULL;
+-	ccb->ccb_tag = 0xFFFFFFFF;
++	ccb->ccb_tag = PM8001_INVALID_TAG;
+ 	pm8001_tag_free(pm8001_ha, htag);
+ 	return 0;
+ }
+@@ -3632,7 +3623,7 @@ int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
+ 	}
+ 	kfree(ccb->fw_control_context);
+ 	ccb->task = NULL;
+-	ccb->ccb_tag = 0xFFFFFFFF;
++	ccb->ccb_tag = PM8001_INVALID_TAG;
+ 	pm8001_tag_free(pm8001_ha, tag);
+ 	complete(pm8001_ha->nvmd_completion);
+ 	return 0;
+@@ -3668,10 +3659,6 @@ int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 
+ 	status = le32_to_cpu(pPayload->status);
+ 	tag = le32_to_cpu(pPayload->tag);
+-	if (!tag) {
+-		pm8001_dbg(pm8001_ha, FAIL, " TAG NULL. RETURNING !!!\n");
+-		return -1;
+-	}
+ 
+ 	scp = le32_to_cpu(pPayload->scp);
+ 	ccb = &pm8001_ha->ccb_info[tag];
+@@ -3706,12 +3693,11 @@ int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 	mb();
+ 
+ 	if (pm8001_dev->id & NCQ_ABORT_ALL_FLAG) {
+-		pm8001_tag_free(pm8001_ha, tag);
+ 		sas_free_task(t);
+-		/* clear the flag */
+-		pm8001_dev->id &= 0xBFFFFFFF;
+-	} else
++		pm8001_dev->id &= ~NCQ_ABORT_ALL_FLAG;
++	} else {
+ 		t->task_done(t);
++	}
+ 
+ 	return 0;
+ }
+@@ -4479,6 +4465,9 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
+ 		SAS_ADDR_SIZE);
+ 	rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ 			sizeof(payload), 0);
++	if (rc)
++		pm8001_tag_free(pm8001_ha, tag);
++
+ 	return rc;
+ }
+ 
+@@ -4891,6 +4880,11 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
+ 	ccb->ccb_tag = tag;
+ 	rc = pm8001_chip_fw_flash_update_build(pm8001_ha, &flash_update_info,
+ 		tag);
++	if (rc) {
++		kfree(fw_control_context);
++		pm8001_tag_free(pm8001_ha, tag);
++	}
++
+ 	return rc;
+ }
+ 
+@@ -4995,6 +4989,9 @@ pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
+ 	payload.nds = cpu_to_le32(state);
+ 	rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ 			sizeof(payload), 0);
++	if (rc)
++		pm8001_tag_free(pm8001_ha, tag);
++
+ 	return rc;
+ 
+ }
+diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
+index d8a2121cb8d93..d2a2593f669d6 100644
+--- a/drivers/scsi/pm8001/pm8001_init.c
++++ b/drivers/scsi/pm8001/pm8001_init.c
+@@ -1216,10 +1216,11 @@ pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha, struct Scsi_Host *shost,
+ 			goto err_out;
+ 		}
+ 		pm8001_ha->ccb_info[i].task = NULL;
+-		pm8001_ha->ccb_info[i].ccb_tag = 0xffffffff;
++		pm8001_ha->ccb_info[i].ccb_tag = PM8001_INVALID_TAG;
+ 		pm8001_ha->ccb_info[i].device = NULL;
+ 		++pm8001_ha->tags_num;
+ 	}
++
+ 	return 0;
+ 
+ err_out_noccb:
+diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
+index 32edda3e55c6c..b68c8400ca158 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.c
++++ b/drivers/scsi/pm8001/pm8001_sas.c
+@@ -567,7 +567,7 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
+ 
+ 	task->lldd_task = NULL;
+ 	ccb->task = NULL;
+-	ccb->ccb_tag = 0xFFFFFFFF;
++	ccb->ccb_tag = PM8001_INVALID_TAG;
+ 	ccb->open_retry = 0;
+ 	pm8001_tag_free(pm8001_ha, ccb_idx);
+ }
+@@ -847,10 +847,10 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
+ 
+ 		res = PM8001_CHIP_DISP->task_abort(pm8001_ha,
+ 			pm8001_dev, flag, task_tag, ccb_tag);
+-
+ 		if (res) {
+ 			del_timer(&task->slow_task->timer);
+ 			pm8001_dbg(pm8001_ha, FAIL, "Executing internal task failed\n");
++			pm8001_tag_free(pm8001_ha, ccb_tag);
+ 			goto ex_err;
+ 		}
+ 		wait_for_completion(&task->slow_task->completion);
+@@ -952,9 +952,11 @@ void pm8001_open_reject_retry(
+ 		struct task_status_struct *ts;
+ 		struct pm8001_device *pm8001_dev;
+ 		unsigned long flags1;
+-		u32 tag;
+ 		struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
+ 
++		if (ccb->ccb_tag == PM8001_INVALID_TAG)
++			continue;
++
+ 		pm8001_dev = ccb->device;
+ 		if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))
+ 			continue;
+@@ -966,9 +968,6 @@ void pm8001_open_reject_retry(
+ 				continue;
+ 		} else if (pm8001_dev != device_to_close)
+ 			continue;
+-		tag = ccb->ccb_tag;
+-		if (!tag || (tag == 0xFFFFFFFF))
+-			continue;
+ 		task = ccb->task;
+ 		if (!task || !task->task_done)
+ 			continue;
+@@ -989,11 +988,11 @@ void pm8001_open_reject_retry(
+ 				& SAS_TASK_STATE_ABORTED))) {
+ 			spin_unlock_irqrestore(&task->task_state_lock,
+ 				flags1);
+-			pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
++			pm8001_ccb_task_free(pm8001_ha, task, ccb, ccb->ccb_tag);
+ 		} else {
+ 			spin_unlock_irqrestore(&task->task_state_lock,
+ 				flags1);
+-			pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
++			pm8001_ccb_task_free(pm8001_ha, task, ccb, ccb->ccb_tag);
+ 			mb();/* in order to force CPU ordering */
+ 			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ 			task->task_done(task);
+diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
+index a17da1cebce17..1791cdf302762 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.h
++++ b/drivers/scsi/pm8001/pm8001_sas.h
+@@ -738,6 +738,8 @@ void pm8001_free_dev(struct pm8001_device *pm8001_dev);
+ /* ctl shared API */
+ extern const struct attribute_group *pm8001_host_groups[];
+ 
++#define PM8001_INVALID_TAG	((u32)-1)
++
+ static inline void
+ pm8001_ccb_task_free_done(struct pm8001_hba_info *pm8001_ha,
+ 			struct sas_task *task, struct pm8001_ccb_info *ccb,
+diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
+index 908dbac20b483..55163469030d3 100644
+--- a/drivers/scsi/pm8001/pm80xx_hwi.c
++++ b/drivers/scsi/pm8001/pm80xx_hwi.c
+@@ -67,18 +67,16 @@ int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shift_value)
+ }
+ 
+ static void pm80xx_pci_mem_copy(struct pm8001_hba_info  *pm8001_ha, u32 soffset,
+-				const void *destination,
++				__le32 *destination,
+ 				u32 dw_count, u32 bus_base_number)
+ {
+ 	u32 index, value, offset;
+-	u32 *destination1;
+-	destination1 = (u32 *)destination;
+ 
+-	for (index = 0; index < dw_count; index += 4, destination1++) {
++	for (index = 0; index < dw_count; index += 4, destination++) {
+ 		offset = (soffset + index);
+ 		if (offset < (64 * 1024)) {
+ 			value = pm8001_cr32(pm8001_ha, bus_base_number, offset);
+-			*destination1 =  cpu_to_le32(value);
++			*destination = cpu_to_le32(value);
+ 		}
+ 	}
+ 	return;
+@@ -2406,11 +2404,6 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
+ 	param = le32_to_cpu(psataPayload->param);
+ 	tag = le32_to_cpu(psataPayload->tag);
+ 
+-	if (!tag) {
+-		pm8001_dbg(pm8001_ha, FAIL, "tag null\n");
+-		return;
+-	}
+-
+ 	ccb = &pm8001_ha->ccb_info[tag];
+ 	t = ccb->task;
+ 	pm8001_dev = ccb->device;
+@@ -4927,8 +4920,13 @@ static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
+ 	payload.tag = cpu_to_le32(tag);
+ 	payload.phyop_phyid =
+ 		cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF));
+-	return pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+-			sizeof(payload), 0);
++
++	rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
++				  sizeof(payload), 0);
++	if (rc)
++		pm8001_tag_free(pm8001_ha, tag);
++
++	return rc;
+ }
+ 
+ static u32 pm80xx_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha)
+diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
+index 1f8f80b2dbfcb..a9f8de5e9639a 100644
+--- a/drivers/scsi/scsi_logging.c
++++ b/drivers/scsi/scsi_logging.c
+@@ -30,7 +30,7 @@ static inline const char *scmd_name(const struct scsi_cmnd *scmd)
+ {
+ 	struct request *rq = scsi_cmd_to_rq((struct scsi_cmnd *)scmd);
+ 
+-	if (!rq->q->disk)
++	if (!rq->q || !rq->q->disk)
+ 		return NULL;
+ 	return rq->q->disk->disk_name;
+ }
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index f4e6c68ac99ed..2ef78083f1eff 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -223,6 +223,8 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
+ 	int ret;
+ 	struct sbitmap sb_backup;
+ 
++	depth = min_t(unsigned int, depth, scsi_device_max_queue_depth(sdev));
++
+ 	/*
+ 	 * realloc if new shift is calculated, which is caused by setting
+ 	 * up one new default queue depth after calling ->slave_configure
+@@ -245,6 +247,9 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
+ 				scsi_device_max_queue_depth(sdev),
+ 				new_shift, GFP_KERNEL,
+ 				sdev->request_queue->node, false, true);
++	if (!ret)
++		sbitmap_resize(&sdev->budget_map, depth);
++
+ 	if (need_free) {
+ 		if (ret)
+ 			sdev->budget_map = sb_backup;
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 66056806159a6..8b5d2a4076c21 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -3320,6 +3320,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ 			sd_read_block_limits(sdkp);
+ 			sd_read_block_characteristics(sdkp);
+ 			sd_zbc_read_zones(sdkp, buffer);
++			sd_read_cpr(sdkp);
+ 		}
+ 
+ 		sd_print_capacity(sdkp, old_capacity);
+@@ -3329,7 +3330,6 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ 		sd_read_app_tag_own(sdkp, buffer);
+ 		sd_read_write_same(sdkp, buffer);
+ 		sd_read_security(sdkp, buffer);
+-		sd_read_cpr(sdkp);
+ 	}
+ 
+ 	/*
+diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
+index f0897d587454a..f3749e5086737 100644
+--- a/drivers/scsi/smartpqi/smartpqi_init.c
++++ b/drivers/scsi/smartpqi/smartpqi_init.c
+@@ -2513,17 +2513,15 @@ static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
+ 	struct pqi_scsi_dev *device;
+ 	struct pqi_scsi_dev *next;
+ 
+-	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+-
+ 	list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
+ 		scsi_device_list_entry) {
+ 		if (pqi_is_device_added(device))
+ 			pqi_remove_device(ctrl_info, device);
++		spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+ 		list_del(&device->scsi_device_list_entry);
+ 		pqi_free_device(device);
++		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ 	}
+-
+-	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ }
+ 
+ static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
+@@ -7857,6 +7855,21 @@ static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
+ 	return pqi_revert_to_sis_mode(ctrl_info);
+ }
+ 
++static void pqi_perform_lockup_action(void)
++{
++	switch (pqi_lockup_action) {
++	case PANIC:
++		panic("FATAL: Smart Family Controller lockup detected");
++		break;
++	case REBOOT:
++		emergency_restart();
++		break;
++	case NONE:
++	default:
++		break;
++	}
++}
++
+ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
+ {
+ 	int rc;
+@@ -7881,8 +7894,15 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
+ 	 * commands.
+ 	 */
+ 	rc = sis_wait_for_ctrl_ready(ctrl_info);
+-	if (rc)
++	if (rc) {
++		if (reset_devices) {
++			dev_err(&ctrl_info->pci_dev->dev,
++				"kdump init failed with error %d\n", rc);
++			pqi_lockup_action = REBOOT;
++			pqi_perform_lockup_action();
++		}
+ 		return rc;
++	}
+ 
+ 	/*
+ 	 * Get the controller properties.  This allows us to determine
+@@ -8607,21 +8627,6 @@ static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int de
+ 	return pqi_ctrl_init_resume(ctrl_info);
+ }
+ 
+-static void pqi_perform_lockup_action(void)
+-{
+-	switch (pqi_lockup_action) {
+-	case PANIC:
+-		panic("FATAL: Smart Family Controller lockup detected");
+-		break;
+-	case REBOOT:
+-		emergency_restart();
+-		break;
+-	case NONE:
+-	default:
+-		break;
+-	}
+-}
+-
+ static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
+ 	.data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
+ 	.status = SAM_STAT_CHECK_CONDITION,
+diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
+index f925b1f1f9ada..a0beb11abdc9d 100644
+--- a/drivers/scsi/sr.c
++++ b/drivers/scsi/sr.c
+@@ -578,7 +578,7 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
+ 
+ 	scsi_autopm_get_device(sdev);
+ 
+-	if (ret != CDROMCLOSETRAY && ret != CDROMEJECT) {
++	if (cmd != CDROMCLOSETRAY && cmd != CDROMEJECT) {
+ 		ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
+ 		if (ret != -ENOSYS)
+ 			goto put;
+diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
+index f76692053ca17..e892b9feffb11 100644
+--- a/drivers/scsi/ufs/ufshcd-pci.c
++++ b/drivers/scsi/ufs/ufshcd-pci.c
+@@ -428,6 +428,12 @@ static int ufs_intel_adl_init(struct ufs_hba *hba)
+ 	return ufs_intel_common_init(hba);
+ }
+ 
++static int ufs_intel_mtl_init(struct ufs_hba *hba)
++{
++	hba->caps |= UFSHCD_CAP_CRYPTO | UFSHCD_CAP_WB_EN;
++	return ufs_intel_common_init(hba);
++}
++
+ static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
+ 	.name                   = "intel-pci",
+ 	.init			= ufs_intel_common_init,
+@@ -465,6 +471,16 @@ static struct ufs_hba_variant_ops ufs_intel_adl_hba_vops = {
+ 	.device_reset		= ufs_intel_device_reset,
+ };
+ 
++static struct ufs_hba_variant_ops ufs_intel_mtl_hba_vops = {
++	.name                   = "intel-pci",
++	.init			= ufs_intel_mtl_init,
++	.exit			= ufs_intel_common_exit,
++	.hce_enable_notify	= ufs_intel_hce_enable_notify,
++	.link_startup_notify	= ufs_intel_link_startup_notify,
++	.resume			= ufs_intel_resume,
++	.device_reset		= ufs_intel_device_reset,
++};
++
+ #ifdef CONFIG_PM_SLEEP
+ static int ufshcd_pci_restore(struct device *dev)
+ {
+@@ -579,6 +595,7 @@ static const struct pci_device_id ufshcd_pci_tbl[] = {
+ 	{ PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops },
+ 	{ PCI_VDEVICE(INTEL, 0x51FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
+ 	{ PCI_VDEVICE(INTEL, 0x54FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
++	{ PCI_VDEVICE(INTEL, 0x7E47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
+ 	{ }	/* terminate list */
+ };
+ 
+diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
+index 2d36a0715fca6..b34feba1f53de 100644
+--- a/drivers/scsi/ufs/ufshpb.c
++++ b/drivers/scsi/ufs/ufshpb.c
+@@ -869,12 +869,6 @@ static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
+ 	struct ufshpb_region *rgn, *victim_rgn = NULL;
+ 
+ 	list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) {
+-		if (!rgn) {
+-			dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+-				"%s: no region allocated\n",
+-				__func__);
+-			return NULL;
+-		}
+ 		if (ufshpb_check_srgns_issue_state(hpb, rgn))
+ 			continue;
+ 
+@@ -890,6 +884,11 @@ static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
+ 		break;
+ 	}
+ 
++	if (!victim_rgn)
++		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
++			"%s: no region allocated\n",
++			__func__);
++
+ 	return victim_rgn;
+ }
+ 
+diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c
+index 27b9e2baab1a6..7acf9193a9e80 100644
+--- a/drivers/scsi/zorro7xx.c
++++ b/drivers/scsi/zorro7xx.c
+@@ -159,6 +159,8 @@ static void zorro7xx_remove_one(struct zorro_dev *z)
+ 	scsi_remove_host(host);
+ 
+ 	NCR_700_release(host);
++	if (host->base > 0x01000000)
++		iounmap(hostdata->base);
+ 	kfree(hostdata);
+ 	free_irq(host->irq, host);
+ 	zorro_release_device(z);
+diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
+index 86c76211b3d3d..cad2d55dcd3d2 100644
+--- a/drivers/spi/spi-bcm-qspi.c
++++ b/drivers/spi/spi-bcm-qspi.c
+@@ -1205,7 +1205,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
+ 	addr = op->addr.val;
+ 	len = op->data.nbytes;
+ 
+-	if (bcm_qspi_bspi_ver_three(qspi) == true) {
++	if (has_bspi(qspi) && bcm_qspi_bspi_ver_three(qspi) == true) {
+ 		/*
+ 		 * The address coming into this function is a raw flash offset.
+ 		 * But for BSPI <= V3, we need to convert it to a remapped BSPI
+@@ -1224,7 +1224,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
+ 	    len < 4)
+ 		mspi_read = true;
+ 
+-	if (mspi_read)
++	if (!has_bspi(qspi) || mspi_read)
+ 		return bcm_qspi_mspi_exec_mem_op(spi, op);
+ 
+ 	ret = bcm_qspi_bspi_set_mode(qspi, op, 0);
+diff --git a/drivers/spi/spi-rpc-if.c b/drivers/spi/spi-rpc-if.c
+index fe82f3575df4f..24ec1c83f379c 100644
+--- a/drivers/spi/spi-rpc-if.c
++++ b/drivers/spi/spi-rpc-if.c
+@@ -158,14 +158,18 @@ static int rpcif_spi_probe(struct platform_device *pdev)
+ 
+ 	error = rpcif_hw_init(rpc, false);
+ 	if (error)
+-		return error;
++		goto out_disable_rpm;
+ 
+ 	error = spi_register_controller(ctlr);
+ 	if (error) {
+ 		dev_err(&pdev->dev, "spi_register_controller failed\n");
+-		rpcif_disable_rpm(rpc);
++		goto out_disable_rpm;
+ 	}
+ 
++	return 0;
++
++out_disable_rpm:
++	rpcif_disable_rpm(rpc);
+ 	return error;
+ }
+ 
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index d96082dc3340d..bbf977a0d2c57 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -1149,11 +1149,15 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
+ 
+ 	if (ctlr->dma_tx)
+ 		tx_dev = ctlr->dma_tx->device->dev;
++	else if (ctlr->dma_map_dev)
++		tx_dev = ctlr->dma_map_dev;
+ 	else
+ 		tx_dev = ctlr->dev.parent;
+ 
+ 	if (ctlr->dma_rx)
+ 		rx_dev = ctlr->dma_rx->device->dev;
++	else if (ctlr->dma_map_dev)
++		rx_dev = ctlr->dma_map_dev;
+ 	else
+ 		rx_dev = ctlr->dev.parent;
+ 
+diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+index 3a2e4582db8e1..a3e3c9f9aa181 100644
+--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+@@ -1209,6 +1209,9 @@ int vchiq_dump_platform_instances(void *dump_context)
+ 	int len;
+ 	int i;
+ 
++	if (!state)
++		return -ENOTCONN;
++
+ 	/*
+ 	 * There is no list of instances, so instead scan all services,
+ 	 * marking those that have been dumped.
+diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+index 7fe20d4b7ba28..b7295236671c1 100644
+--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+@@ -2306,6 +2306,9 @@ void vchiq_msg_queue_push(unsigned int handle, struct vchiq_header *header)
+ 	struct vchiq_service *service = find_service_by_handle(handle);
+ 	int pos;
+ 
++	if (!service)
++		return;
++
+ 	while (service->msg_queue_write == service->msg_queue_read +
+ 		VCHIQ_MAX_SLOTS) {
+ 		if (wait_for_completion_interruptible(&service->msg_queue_pop))
+@@ -2326,6 +2329,9 @@ struct vchiq_header *vchiq_msg_hold(unsigned int handle)
+ 	struct vchiq_header *header;
+ 	int pos;
+ 
++	if (!service)
++		return NULL;
++
+ 	if (service->msg_queue_write == service->msg_queue_read)
+ 		return NULL;
+ 
+diff --git a/drivers/staging/wfx/bus_sdio.c b/drivers/staging/wfx/bus_sdio.c
+index a670176ba06f0..0612f8a7c0857 100644
+--- a/drivers/staging/wfx/bus_sdio.c
++++ b/drivers/staging/wfx/bus_sdio.c
+@@ -207,9 +207,6 @@ static int wfx_sdio_probe(struct sdio_func *func,
+ 
+ 	bus->func = func;
+ 	sdio_set_drvdata(func, bus);
+-	func->card->quirks |= MMC_QUIRK_LENIENT_FN0 |
+-			      MMC_QUIRK_BLKSZ_FOR_BYTE_MODE |
+-			      MMC_QUIRK_BROKEN_BYTE_MODE_512;
+ 
+ 	sdio_claim_host(func);
+ 	ret = sdio_enable_func(func);
+diff --git a/drivers/staging/wfx/main.c b/drivers/staging/wfx/main.c
+index 858d778cc5897..e3999e95ce851 100644
+--- a/drivers/staging/wfx/main.c
++++ b/drivers/staging/wfx/main.c
+@@ -322,7 +322,8 @@ struct wfx_dev *wfx_init_common(struct device *dev,
+ 	wdev->pdata.gpio_wakeup = devm_gpiod_get_optional(dev, "wakeup",
+ 							  GPIOD_OUT_LOW);
+ 	if (IS_ERR(wdev->pdata.gpio_wakeup))
+-		return NULL;
++		goto err;
++
+ 	if (wdev->pdata.gpio_wakeup)
+ 		gpiod_set_consumer_name(wdev->pdata.gpio_wakeup, "wfx wakeup");
+ 
+@@ -341,6 +342,10 @@ struct wfx_dev *wfx_init_common(struct device *dev,
+ 		return NULL;
+ 
+ 	return wdev;
++
++err:
++	ieee80211_free_hw(hw);
++	return NULL;
+ }
+ 
+ int wfx_probe(struct wfx_dev *wdev)
+diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
+index d002a4e48ed93..0d94a7cb275e5 100644
+--- a/drivers/tty/serial/samsung_tty.c
++++ b/drivers/tty/serial/samsung_tty.c
+@@ -921,11 +921,8 @@ static void s3c24xx_serial_tx_chars(struct s3c24xx_uart_port *ourport)
+ 		return;
+ 	}
+ 
+-	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) {
+-		spin_unlock(&port->lock);
++	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ 		uart_write_wakeup(port);
+-		spin_lock(&port->lock);
+-	}
+ 
+ 	if (uart_circ_empty(xmit))
+ 		s3c24xx_serial_stop_tx(port);
+diff --git a/drivers/usb/cdns3/cdnsp-debug.h b/drivers/usb/cdns3/cdnsp-debug.h
+index a8776df2d4e0c..f0ca865cce2a0 100644
+--- a/drivers/usb/cdns3/cdnsp-debug.h
++++ b/drivers/usb/cdns3/cdnsp-debug.h
+@@ -182,208 +182,211 @@ static inline const char *cdnsp_decode_trb(char *str, size_t size, u32 field0,
+ 	int ep_id = TRB_TO_EP_INDEX(field3) - 1;
+ 	int type = TRB_FIELD_TO_TYPE(field3);
+ 	unsigned int ep_num;
+-	int ret = 0;
++	int ret;
+ 	u32 temp;
+ 
+ 	ep_num = DIV_ROUND_UP(ep_id, 2);
+ 
+ 	switch (type) {
+ 	case TRB_LINK:
+-		ret += snprintf(str, size,
+-				"LINK %08x%08x intr %ld type '%s' flags %c:%c:%c:%c",
+-				field1, field0, GET_INTR_TARGET(field2),
+-				cdnsp_trb_type_string(type),
+-				field3 & TRB_IOC ? 'I' : 'i',
+-				field3 & TRB_CHAIN ? 'C' : 'c',
+-				field3 & TRB_TC ? 'T' : 't',
+-				field3 & TRB_CYCLE ? 'C' : 'c');
++		ret = snprintf(str, size,
++			       "LINK %08x%08x intr %ld type '%s' flags %c:%c:%c:%c",
++			       field1, field0, GET_INTR_TARGET(field2),
++			       cdnsp_trb_type_string(type),
++			       field3 & TRB_IOC ? 'I' : 'i',
++			       field3 & TRB_CHAIN ? 'C' : 'c',
++			       field3 & TRB_TC ? 'T' : 't',
++			       field3 & TRB_CYCLE ? 'C' : 'c');
+ 		break;
+ 	case TRB_TRANSFER:
+ 	case TRB_COMPLETION:
+ 	case TRB_PORT_STATUS:
+ 	case TRB_HC_EVENT:
+-		ret += snprintf(str, size,
+-				"ep%d%s(%d) type '%s' TRB %08x%08x status '%s'"
+-				" len %ld slot %ld flags %c:%c",
+-				ep_num, ep_id % 2 ? "out" : "in",
+-				TRB_TO_EP_INDEX(field3),
+-				cdnsp_trb_type_string(type), field1, field0,
+-				cdnsp_trb_comp_code_string(GET_COMP_CODE(field2)),
+-				EVENT_TRB_LEN(field2), TRB_TO_SLOT_ID(field3),
+-				field3 & EVENT_DATA ? 'E' : 'e',
+-				field3 & TRB_CYCLE ? 'C' : 'c');
++		ret = snprintf(str, size,
++			       "ep%d%s(%d) type '%s' TRB %08x%08x status '%s'"
++			       " len %ld slot %ld flags %c:%c",
++			       ep_num, ep_id % 2 ? "out" : "in",
++			       TRB_TO_EP_INDEX(field3),
++			       cdnsp_trb_type_string(type), field1, field0,
++			       cdnsp_trb_comp_code_string(GET_COMP_CODE(field2)),
++			       EVENT_TRB_LEN(field2), TRB_TO_SLOT_ID(field3),
++			       field3 & EVENT_DATA ? 'E' : 'e',
++			       field3 & TRB_CYCLE ? 'C' : 'c');
+ 		break;
+ 	case TRB_MFINDEX_WRAP:
+-		ret += snprintf(str, size, "%s: flags %c",
+-				cdnsp_trb_type_string(type),
+-				field3 & TRB_CYCLE ? 'C' : 'c');
++		ret = snprintf(str, size, "%s: flags %c",
++			       cdnsp_trb_type_string(type),
++			       field3 & TRB_CYCLE ? 'C' : 'c');
+ 		break;
+ 	case TRB_SETUP:
+-		ret += snprintf(str, size,
+-				"type '%s' bRequestType %02x bRequest %02x "
+-				"wValue %02x%02x wIndex %02x%02x wLength %d "
+-				"length %ld TD size %ld intr %ld Setup ID %ld "
+-				"flags %c:%c:%c",
+-				cdnsp_trb_type_string(type),
+-				field0 & 0xff,
+-				(field0 & 0xff00) >> 8,
+-				(field0 & 0xff000000) >> 24,
+-				(field0 & 0xff0000) >> 16,
+-				(field1 & 0xff00) >> 8,
+-				field1 & 0xff,
+-				(field1 & 0xff000000) >> 16 |
+-				(field1 & 0xff0000) >> 16,
+-				TRB_LEN(field2), GET_TD_SIZE(field2),
+-				GET_INTR_TARGET(field2),
+-				TRB_SETUPID_TO_TYPE(field3),
+-				field3 & TRB_IDT ? 'D' : 'd',
+-				field3 & TRB_IOC ? 'I' : 'i',
+-				field3 & TRB_CYCLE ? 'C' : 'c');
++		ret = snprintf(str, size,
++			       "type '%s' bRequestType %02x bRequest %02x "
++			       "wValue %02x%02x wIndex %02x%02x wLength %d "
++			       "length %ld TD size %ld intr %ld Setup ID %ld "
++			       "flags %c:%c:%c",
++			       cdnsp_trb_type_string(type),
++			       field0 & 0xff,
++			       (field0 & 0xff00) >> 8,
++			       (field0 & 0xff000000) >> 24,
++			       (field0 & 0xff0000) >> 16,
++			       (field1 & 0xff00) >> 8,
++			       field1 & 0xff,
++			       (field1 & 0xff000000) >> 16 |
++			       (field1 & 0xff0000) >> 16,
++			       TRB_LEN(field2), GET_TD_SIZE(field2),
++			       GET_INTR_TARGET(field2),
++			       TRB_SETUPID_TO_TYPE(field3),
++			       field3 & TRB_IDT ? 'D' : 'd',
++			       field3 & TRB_IOC ? 'I' : 'i',
++			       field3 & TRB_CYCLE ? 'C' : 'c');
+ 		break;
+ 	case TRB_DATA:
+-		ret += snprintf(str, size,
+-				"type '%s' Buffer %08x%08x length %ld TD size %ld "
+-				"intr %ld flags %c:%c:%c:%c:%c:%c:%c",
+-				cdnsp_trb_type_string(type),
+-				field1, field0, TRB_LEN(field2),
+-				GET_TD_SIZE(field2),
+-				GET_INTR_TARGET(field2),
+-				field3 & TRB_IDT ? 'D' : 'i',
+-				field3 & TRB_IOC ? 'I' : 'i',
+-				field3 & TRB_CHAIN ? 'C' : 'c',
+-				field3 & TRB_NO_SNOOP ? 'S' : 's',
+-				field3 & TRB_ISP ? 'I' : 'i',
+-				field3 & TRB_ENT ? 'E' : 'e',
+-				field3 & TRB_CYCLE ? 'C' : 'c');
++		ret = snprintf(str, size,
++			       "type '%s' Buffer %08x%08x length %ld TD size %ld "
++			       "intr %ld flags %c:%c:%c:%c:%c:%c:%c",
++			       cdnsp_trb_type_string(type),
++			       field1, field0, TRB_LEN(field2),
++			       GET_TD_SIZE(field2),
++			       GET_INTR_TARGET(field2),
++			       field3 & TRB_IDT ? 'D' : 'i',
++			       field3 & TRB_IOC ? 'I' : 'i',
++			       field3 & TRB_CHAIN ? 'C' : 'c',
++			       field3 & TRB_NO_SNOOP ? 'S' : 's',
++			       field3 & TRB_ISP ? 'I' : 'i',
++			       field3 & TRB_ENT ? 'E' : 'e',
++			       field3 & TRB_CYCLE ? 'C' : 'c');
+ 		break;
+ 	case TRB_STATUS:
+-		ret += snprintf(str, size,
+-				"Buffer %08x%08x length %ld TD size %ld intr"
+-				"%ld type '%s' flags %c:%c:%c:%c",
+-				field1, field0, TRB_LEN(field2),
+-				GET_TD_SIZE(field2),
+-				GET_INTR_TARGET(field2),
+-				cdnsp_trb_type_string(type),
+-				field3 & TRB_IOC ? 'I' : 'i',
+-				field3 & TRB_CHAIN ? 'C' : 'c',
+-				field3 & TRB_ENT ? 'E' : 'e',
+-				field3 & TRB_CYCLE ? 'C' : 'c');
++		ret = snprintf(str, size,
++			       "Buffer %08x%08x length %ld TD size %ld intr"
++			       "%ld type '%s' flags %c:%c:%c:%c",
++			       field1, field0, TRB_LEN(field2),
++			       GET_TD_SIZE(field2),
++			       GET_INTR_TARGET(field2),
++			       cdnsp_trb_type_string(type),
++			       field3 & TRB_IOC ? 'I' : 'i',
++			       field3 & TRB_CHAIN ? 'C' : 'c',
++			       field3 & TRB_ENT ? 'E' : 'e',
++			       field3 & TRB_CYCLE ? 'C' : 'c');
+ 		break;
+ 	case TRB_NORMAL:
+ 	case TRB_ISOC:
+ 	case TRB_EVENT_DATA:
+ 	case TRB_TR_NOOP:
+-		ret += snprintf(str, size,
+-				"type '%s' Buffer %08x%08x length %ld "
+-				"TD size %ld intr %ld "
+-				"flags %c:%c:%c:%c:%c:%c:%c:%c:%c",
+-				cdnsp_trb_type_string(type),
+-				field1, field0, TRB_LEN(field2),
+-				GET_TD_SIZE(field2),
+-				GET_INTR_TARGET(field2),
+-				field3 & TRB_BEI ? 'B' : 'b',
+-				field3 & TRB_IDT ? 'T' : 't',
+-				field3 & TRB_IOC ? 'I' : 'i',
+-				field3 & TRB_CHAIN ? 'C' : 'c',
+-				field3 & TRB_NO_SNOOP ? 'S' : 's',
+-				field3 & TRB_ISP ? 'I' : 'i',
+-				field3 & TRB_ENT ? 'E' : 'e',
+-				field3 & TRB_CYCLE ? 'C' : 'c',
+-				!(field3 & TRB_EVENT_INVALIDATE) ? 'V' : 'v');
++		ret = snprintf(str, size,
++			       "type '%s' Buffer %08x%08x length %ld "
++			       "TD size %ld intr %ld "
++			       "flags %c:%c:%c:%c:%c:%c:%c:%c:%c",
++			       cdnsp_trb_type_string(type),
++			       field1, field0, TRB_LEN(field2),
++			       GET_TD_SIZE(field2),
++			       GET_INTR_TARGET(field2),
++			       field3 & TRB_BEI ? 'B' : 'b',
++			       field3 & TRB_IDT ? 'T' : 't',
++			       field3 & TRB_IOC ? 'I' : 'i',
++			       field3 & TRB_CHAIN ? 'C' : 'c',
++			       field3 & TRB_NO_SNOOP ? 'S' : 's',
++			       field3 & TRB_ISP ? 'I' : 'i',
++			       field3 & TRB_ENT ? 'E' : 'e',
++			       field3 & TRB_CYCLE ? 'C' : 'c',
++			       !(field3 & TRB_EVENT_INVALIDATE) ? 'V' : 'v');
+ 		break;
+ 	case TRB_CMD_NOOP:
+ 	case TRB_ENABLE_SLOT:
+-		ret += snprintf(str, size, "%s: flags %c",
+-				cdnsp_trb_type_string(type),
+-				field3 & TRB_CYCLE ? 'C' : 'c');
++		ret = snprintf(str, size, "%s: flags %c",
++			       cdnsp_trb_type_string(type),
++			       field3 & TRB_CYCLE ? 'C' : 'c');
+ 		break;
+ 	case TRB_DISABLE_SLOT:
+-		ret += snprintf(str, size, "%s: slot %ld flags %c",
+-				cdnsp_trb_type_string(type),
+-				TRB_TO_SLOT_ID(field3),
+-				field3 & TRB_CYCLE ? 'C' : 'c');
++		ret = snprintf(str, size, "%s: slot %ld flags %c",
++			       cdnsp_trb_type_string(type),
++			       TRB_TO_SLOT_ID(field3),
++			       field3 & TRB_CYCLE ? 'C' : 'c');
+ 		break;
+ 	case TRB_ADDR_DEV:
+-		ret += snprintf(str, size,
+-				"%s: ctx %08x%08x slot %ld flags %c:%c",
+-				cdnsp_trb_type_string(type), field1, field0,
+-				TRB_TO_SLOT_ID(field3),
+-				field3 & TRB_BSR ? 'B' : 'b',
+-				field3 & TRB_CYCLE ? 'C' : 'c');
++		ret = snprintf(str, size,
++			       "%s: ctx %08x%08x slot %ld flags %c:%c",
++			       cdnsp_trb_type_string(type), field1, field0,
++			       TRB_TO_SLOT_ID(field3),
++			       field3 & TRB_BSR ? 'B' : 'b',
++			       field3 & TRB_CYCLE ? 'C' : 'c');
+ 		break;
+ 	case TRB_CONFIG_EP:
+-		ret += snprintf(str, size,
+-				"%s: ctx %08x%08x slot %ld flags %c:%c",
+-				cdnsp_trb_type_string(type), field1, field0,
+-				TRB_TO_SLOT_ID(field3),
+-				field3 & TRB_DC ? 'D' : 'd',
+-				field3 & TRB_CYCLE ? 'C' : 'c');
++		ret = snprintf(str, size,
++			       "%s: ctx %08x%08x slot %ld flags %c:%c",
++			       cdnsp_trb_type_string(type), field1, field0,
++			       TRB_TO_SLOT_ID(field3),
++			       field3 & TRB_DC ? 'D' : 'd',
++			       field3 & TRB_CYCLE ? 'C' : 'c');
+ 		break;
+ 	case TRB_EVAL_CONTEXT:
+-		ret += snprintf(str, size,
+-				"%s: ctx %08x%08x slot %ld flags %c",
+-				cdnsp_trb_type_string(type), field1, field0,
+-				TRB_TO_SLOT_ID(field3),
+-				field3 & TRB_CYCLE ? 'C' : 'c');
++		ret = snprintf(str, size,
++			       "%s: ctx %08x%08x slot %ld flags %c",
++			       cdnsp_trb_type_string(type), field1, field0,
++			       TRB_TO_SLOT_ID(field3),
++			       field3 & TRB_CYCLE ? 'C' : 'c');
+ 		break;
+ 	case TRB_RESET_EP:
+ 	case TRB_HALT_ENDPOINT:
+ 	case TRB_FLUSH_ENDPOINT:
+-		ret += snprintf(str, size,
+-				"%s: ep%d%s(%d) ctx %08x%08x slot %ld flags %c",
+-				cdnsp_trb_type_string(type),
+-				ep_num, ep_id % 2 ? "out" : "in",
+-				TRB_TO_EP_INDEX(field3), field1, field0,
+-				TRB_TO_SLOT_ID(field3),
+-				field3 & TRB_CYCLE ? 'C' : 'c');
++		ret = snprintf(str, size,
++			       "%s: ep%d%s(%d) ctx %08x%08x slot %ld flags %c",
++			       cdnsp_trb_type_string(type),
++			       ep_num, ep_id % 2 ? "out" : "in",
++			       TRB_TO_EP_INDEX(field3), field1, field0,
++			       TRB_TO_SLOT_ID(field3),
++			       field3 & TRB_CYCLE ? 'C' : 'c');
+ 		break;
+ 	case TRB_STOP_RING:
+-		ret += snprintf(str, size,
+-				"%s: ep%d%s(%d) slot %ld sp %d flags %c",
+-				cdnsp_trb_type_string(type),
+-				ep_num, ep_id % 2 ? "out" : "in",
+-				TRB_TO_EP_INDEX(field3),
+-				TRB_TO_SLOT_ID(field3),
+-				TRB_TO_SUSPEND_PORT(field3),
+-				field3 & TRB_CYCLE ? 'C' : 'c');
++		ret = snprintf(str, size,
++			       "%s: ep%d%s(%d) slot %ld sp %d flags %c",
++			       cdnsp_trb_type_string(type),
++			       ep_num, ep_id % 2 ? "out" : "in",
++			       TRB_TO_EP_INDEX(field3),
++			       TRB_TO_SLOT_ID(field3),
++			       TRB_TO_SUSPEND_PORT(field3),
++			       field3 & TRB_CYCLE ? 'C' : 'c');
+ 		break;
+ 	case TRB_SET_DEQ:
+-		ret += snprintf(str, size,
+-				"%s: ep%d%s(%d) deq %08x%08x stream %ld slot %ld  flags %c",
+-				cdnsp_trb_type_string(type),
+-				ep_num, ep_id % 2 ? "out" : "in",
+-				TRB_TO_EP_INDEX(field3), field1, field0,
+-				TRB_TO_STREAM_ID(field2),
+-				TRB_TO_SLOT_ID(field3),
+-				field3 & TRB_CYCLE ? 'C' : 'c');
++		ret = snprintf(str, size,
++			       "%s: ep%d%s(%d) deq %08x%08x stream %ld slot %ld  flags %c",
++			       cdnsp_trb_type_string(type),
++			       ep_num, ep_id % 2 ? "out" : "in",
++			       TRB_TO_EP_INDEX(field3), field1, field0,
++			       TRB_TO_STREAM_ID(field2),
++			       TRB_TO_SLOT_ID(field3),
++			       field3 & TRB_CYCLE ? 'C' : 'c');
+ 		break;
+ 	case TRB_RESET_DEV:
+-		ret += snprintf(str, size, "%s: slot %ld flags %c",
+-				cdnsp_trb_type_string(type),
+-				TRB_TO_SLOT_ID(field3),
+-				field3 & TRB_CYCLE ? 'C' : 'c');
++		ret = snprintf(str, size, "%s: slot %ld flags %c",
++			       cdnsp_trb_type_string(type),
++			       TRB_TO_SLOT_ID(field3),
++			       field3 & TRB_CYCLE ? 'C' : 'c');
+ 		break;
+ 	case TRB_ENDPOINT_NRDY:
+-		temp  = TRB_TO_HOST_STREAM(field2);
+-
+-		ret += snprintf(str, size,
+-				"%s: ep%d%s(%d) H_SID %x%s%s D_SID %lx flags %c:%c",
+-				cdnsp_trb_type_string(type),
+-				ep_num, ep_id % 2 ? "out" : "in",
+-				TRB_TO_EP_INDEX(field3), temp,
+-				temp == STREAM_PRIME_ACK ? "(PRIME)" : "",
+-				temp == STREAM_REJECTED ? "(REJECTED)" : "",
+-				TRB_TO_DEV_STREAM(field0),
+-				field3 & TRB_STAT ? 'S' : 's',
+-				field3 & TRB_CYCLE ? 'C' : 'c');
++		temp = TRB_TO_HOST_STREAM(field2);
++
++		ret = snprintf(str, size,
++			       "%s: ep%d%s(%d) H_SID %x%s%s D_SID %lx flags %c:%c",
++			       cdnsp_trb_type_string(type),
++			       ep_num, ep_id % 2 ? "out" : "in",
++			       TRB_TO_EP_INDEX(field3), temp,
++			       temp == STREAM_PRIME_ACK ? "(PRIME)" : "",
++			       temp == STREAM_REJECTED ? "(REJECTED)" : "",
++			       TRB_TO_DEV_STREAM(field0),
++			       field3 & TRB_STAT ? 'S' : 's',
++			       field3 & TRB_CYCLE ? 'C' : 'c');
+ 		break;
+ 	default:
+-		ret += snprintf(str, size,
+-				"type '%s' -> raw %08x %08x %08x %08x",
+-				cdnsp_trb_type_string(type),
+-				field0, field1, field2, field3);
++		ret = snprintf(str, size,
++			       "type '%s' -> raw %08x %08x %08x %08x",
++			       cdnsp_trb_type_string(type),
++			       field0, field1, field2, field3);
+ 	}
+ 
++	if (ret >= size)
++		pr_info("CDNSP: buffer overflowed.\n");
++
+ 	return str;
+ }
+ 
+diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
+index e196673f5c647..efaf0db595f46 100644
+--- a/drivers/usb/dwc3/dwc3-omap.c
++++ b/drivers/usb/dwc3/dwc3-omap.c
+@@ -242,7 +242,7 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
+ 		break;
+ 
+ 	case OMAP_DWC3_ID_FLOAT:
+-		if (omap->vbus_reg)
++		if (omap->vbus_reg && regulator_is_enabled(omap->vbus_reg))
+ 			regulator_disable(omap->vbus_reg);
+ 		val = dwc3_omap_read_utmi_ctrl(omap);
+ 		val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG;
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index 06d0e88ec8af9..4d9608cc55f73 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -185,7 +185,8 @@ static const struct software_node dwc3_pci_amd_mr_swnode = {
+ 	.properties = dwc3_pci_mr_properties,
+ };
+ 
+-static int dwc3_pci_quirks(struct dwc3_pci *dwc)
++static int dwc3_pci_quirks(struct dwc3_pci *dwc,
++			   const struct software_node *swnode)
+ {
+ 	struct pci_dev			*pdev = dwc->pci;
+ 
+@@ -242,7 +243,7 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
+ 		}
+ 	}
+ 
+-	return 0;
++	return device_add_software_node(&dwc->dwc3->dev, swnode);
+ }
+ 
+ #ifdef CONFIG_PM
+@@ -307,11 +308,7 @@ static int dwc3_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
+ 	dwc->dwc3->dev.parent = dev;
+ 	ACPI_COMPANION_SET(&dwc->dwc3->dev, ACPI_COMPANION(dev));
+ 
+-	ret = device_add_software_node(&dwc->dwc3->dev, (void *)id->driver_data);
+-	if (ret < 0)
+-		goto err;
+-
+-	ret = dwc3_pci_quirks(dwc);
++	ret = dwc3_pci_quirks(dwc, (void *)id->driver_data);
+ 	if (ret)
+ 		goto err;
+ 
+diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
+index 43f1b0d461c1e..be76f891b9c52 100644
+--- a/drivers/usb/gadget/udc/tegra-xudc.c
++++ b/drivers/usb/gadget/udc/tegra-xudc.c
+@@ -32,9 +32,6 @@
+ #include <linux/workqueue.h>
+ 
+ /* XUSB_DEV registers */
+-#define SPARAM 0x000
+-#define  SPARAM_ERSTMAX_MASK GENMASK(20, 16)
+-#define  SPARAM_ERSTMAX(x) (((x) << 16) & SPARAM_ERSTMAX_MASK)
+ #define DB 0x004
+ #define  DB_TARGET_MASK GENMASK(15, 8)
+ #define  DB_TARGET(x) (((x) << 8) & DB_TARGET_MASK)
+@@ -275,8 +272,10 @@ BUILD_EP_CONTEXT_RW(deq_hi, deq_hi, 0, 0xffffffff)
+ BUILD_EP_CONTEXT_RW(avg_trb_len, tx_info, 0, 0xffff)
+ BUILD_EP_CONTEXT_RW(max_esit_payload, tx_info, 16, 0xffff)
+ BUILD_EP_CONTEXT_RW(edtla, rsvd[0], 0, 0xffffff)
+-BUILD_EP_CONTEXT_RW(seq_num, rsvd[0], 24, 0xff)
++BUILD_EP_CONTEXT_RW(rsvd, rsvd[0], 24, 0x1)
+ BUILD_EP_CONTEXT_RW(partial_td, rsvd[0], 25, 0x1)
++BUILD_EP_CONTEXT_RW(splitxstate, rsvd[0], 26, 0x1)
++BUILD_EP_CONTEXT_RW(seq_num, rsvd[0], 27, 0x1f)
+ BUILD_EP_CONTEXT_RW(cerrcnt, rsvd[1], 18, 0x3)
+ BUILD_EP_CONTEXT_RW(data_offset, rsvd[2], 0, 0x1ffff)
+ BUILD_EP_CONTEXT_RW(numtrbs, rsvd[2], 22, 0x1f)
+@@ -1557,6 +1556,9 @@ static int __tegra_xudc_ep_set_halt(struct tegra_xudc_ep *ep, bool halt)
+ 		ep_reload(xudc, ep->index);
+ 
+ 		ep_ctx_write_state(ep->context, EP_STATE_RUNNING);
++		ep_ctx_write_rsvd(ep->context, 0);
++		ep_ctx_write_partial_td(ep->context, 0);
++		ep_ctx_write_splitxstate(ep->context, 0);
+ 		ep_ctx_write_seq_num(ep->context, 0);
+ 
+ 		ep_reload(xudc, ep->index);
+@@ -2812,7 +2814,10 @@ static void tegra_xudc_reset(struct tegra_xudc *xudc)
+ 	xudc->setup_seq_num = 0;
+ 	xudc->queued_setup_packet = false;
+ 
+-	ep_ctx_write_seq_num(ep0->context, xudc->setup_seq_num);
++	ep_ctx_write_rsvd(ep0->context, 0);
++	ep_ctx_write_partial_td(ep0->context, 0);
++	ep_ctx_write_splitxstate(ep0->context, 0);
++	ep_ctx_write_seq_num(ep0->context, 0);
+ 
+ 	deq_ptr = trb_virt_to_phys(ep0, &ep0->transfer_ring[ep0->deq_ptr]);
+ 
+@@ -3295,11 +3300,6 @@ static void tegra_xudc_init_event_ring(struct tegra_xudc *xudc)
+ 	unsigned int i;
+ 	u32 val;
+ 
+-	val = xudc_readl(xudc, SPARAM);
+-	val &= ~(SPARAM_ERSTMAX_MASK);
+-	val |= SPARAM_ERSTMAX(XUDC_NR_EVENT_RINGS);
+-	xudc_writel(xudc, val, SPARAM);
+-
+ 	for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
+ 		memset(xudc->event_ring[i], 0, XUDC_EVENT_RING_SIZE *
+ 		       sizeof(*xudc->event_ring[i]));
+diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
+index e87cf3a00fa4b..638f03b897394 100644
+--- a/drivers/usb/host/ehci-pci.c
++++ b/drivers/usb/host/ehci-pci.c
+@@ -21,6 +21,9 @@ static const char hcd_name[] = "ehci-pci";
+ /* defined here to avoid adding to pci_ids.h for single instance use */
+ #define PCI_DEVICE_ID_INTEL_CE4100_USB	0x2e70
+ 
++#define PCI_VENDOR_ID_ASPEED		0x1a03
++#define PCI_DEVICE_ID_ASPEED_EHCI	0x2603
++
+ /*-------------------------------------------------------------------------*/
+ #define PCI_DEVICE_ID_INTEL_QUARK_X1000_SOC		0x0939
+ static inline bool is_intel_quark_x1000(struct pci_dev *pdev)
+@@ -222,6 +225,12 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
+ 			ehci->has_synopsys_hc_bug = 1;
+ 		}
+ 		break;
++	case PCI_VENDOR_ID_ASPEED:
++		if (pdev->device == PCI_DEVICE_ID_ASPEED_EHCI) {
++			ehci_info(ehci, "applying Aspeed HC workaround\n");
++			ehci->is_aspeed = 1;
++		}
++		break;
+ 	}
+ 
+ 	/* optional debug port, normally in the first BAR */
+diff --git a/drivers/usb/host/xen-hcd.c b/drivers/usb/host/xen-hcd.c
+index 19b8c7ed74cb1..4ed3ee328a4a6 100644
+--- a/drivers/usb/host/xen-hcd.c
++++ b/drivers/usb/host/xen-hcd.c
+@@ -51,6 +51,7 @@ struct vdevice_status {
+ struct usb_shadow {
+ 	struct xenusb_urb_request req;
+ 	struct urb *urb;
++	bool in_flight;
+ };
+ 
+ struct xenhcd_info {
+@@ -722,6 +723,12 @@ static void xenhcd_gnttab_done(struct xenhcd_info *info, unsigned int id)
+ 	int nr_segs = 0;
+ 	int i;
+ 
++	if (!shadow->in_flight) {
++		xenhcd_set_error(info, "Illegal request id");
++		return;
++	}
++	shadow->in_flight = false;
++
+ 	nr_segs = shadow->req.nr_buffer_segs;
+ 
+ 	if (xenusb_pipeisoc(shadow->req.pipe))
+@@ -805,6 +812,7 @@ static int xenhcd_do_request(struct xenhcd_info *info, struct urb_priv *urbp)
+ 
+ 	info->urb_ring.req_prod_pvt++;
+ 	info->shadow[id].urb = urb;
++	info->shadow[id].in_flight = true;
+ 
+ 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->urb_ring, notify);
+ 	if (notify)
+@@ -933,10 +941,27 @@ static int xenhcd_unlink_urb(struct xenhcd_info *info, struct urb_priv *urbp)
+ 	return ret;
+ }
+ 
+-static int xenhcd_urb_request_done(struct xenhcd_info *info)
++static void xenhcd_res_to_urb(struct xenhcd_info *info,
++			      struct xenusb_urb_response *res, struct urb *urb)
++{
++	if (unlikely(!urb))
++		return;
++
++	if (res->actual_length > urb->transfer_buffer_length)
++		urb->actual_length = urb->transfer_buffer_length;
++	else if (res->actual_length < 0)
++		urb->actual_length = 0;
++	else
++		urb->actual_length = res->actual_length;
++	urb->error_count = res->error_count;
++	urb->start_frame = res->start_frame;
++	xenhcd_giveback_urb(info, urb, res->status);
++}
++
++static int xenhcd_urb_request_done(struct xenhcd_info *info,
++				   unsigned int *eoiflag)
+ {
+ 	struct xenusb_urb_response res;
+-	struct urb *urb;
+ 	RING_IDX i, rp;
+ 	__u16 id;
+ 	int more_to_do = 0;
+@@ -963,16 +988,12 @@ static int xenhcd_urb_request_done(struct xenhcd_info *info)
+ 			xenhcd_gnttab_done(info, id);
+ 			if (info->error)
+ 				goto err;
+-			urb = info->shadow[id].urb;
+-			if (likely(urb)) {
+-				urb->actual_length = res.actual_length;
+-				urb->error_count = res.error_count;
+-				urb->start_frame = res.start_frame;
+-				xenhcd_giveback_urb(info, urb, res.status);
+-			}
++			xenhcd_res_to_urb(info, &res, info->shadow[id].urb);
+ 		}
+ 
+ 		xenhcd_add_id_to_freelist(info, id);
++
++		*eoiflag = 0;
+ 	}
+ 	info->urb_ring.rsp_cons = i;
+ 
+@@ -990,7 +1011,7 @@ static int xenhcd_urb_request_done(struct xenhcd_info *info)
+ 	return 0;
+ }
+ 
+-static int xenhcd_conn_notify(struct xenhcd_info *info)
++static int xenhcd_conn_notify(struct xenhcd_info *info, unsigned int *eoiflag)
+ {
+ 	struct xenusb_conn_response res;
+ 	struct xenusb_conn_request *req;
+@@ -1035,6 +1056,8 @@ static int xenhcd_conn_notify(struct xenhcd_info *info)
+ 				       info->conn_ring.req_prod_pvt);
+ 		req->id = id;
+ 		info->conn_ring.req_prod_pvt++;
++
++		*eoiflag = 0;
+ 	}
+ 
+ 	if (rc != info->conn_ring.req_prod_pvt)
+@@ -1057,14 +1080,19 @@ static int xenhcd_conn_notify(struct xenhcd_info *info)
+ static irqreturn_t xenhcd_int(int irq, void *dev_id)
+ {
+ 	struct xenhcd_info *info = (struct xenhcd_info *)dev_id;
++	unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
+ 
+-	if (unlikely(info->error))
++	if (unlikely(info->error)) {
++		xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
+ 		return IRQ_HANDLED;
++	}
+ 
+-	while (xenhcd_urb_request_done(info) | xenhcd_conn_notify(info))
++	while (xenhcd_urb_request_done(info, &eoiflag) |
++	       xenhcd_conn_notify(info, &eoiflag))
+ 		/* Yield point for this unbounded loop. */
+ 		cond_resched();
+ 
++	xen_irq_lateeoi(irq, eoiflag);
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -1141,9 +1169,9 @@ static int xenhcd_setup_rings(struct xenbus_device *dev,
+ 		goto fail;
+ 	}
+ 
+-	err = bind_evtchn_to_irq(info->evtchn);
++	err = bind_evtchn_to_irq_lateeoi(info->evtchn);
+ 	if (err <= 0) {
+-		xenbus_dev_fatal(dev, err, "bind_evtchn_to_irq");
++		xenbus_dev_fatal(dev, err, "bind_evtchn_to_irq_lateeoi");
+ 		goto fail;
+ 	}
+ 
+@@ -1496,6 +1524,7 @@ static struct usb_hcd *xenhcd_create_hcd(struct xenbus_device *dev)
+ 	for (i = 0; i < XENUSB_URB_RING_SIZE; i++) {
+ 		info->shadow[i].req.id = i + 1;
+ 		info->shadow[i].urb = NULL;
++		info->shadow[i].in_flight = false;
+ 	}
+ 	info->shadow[XENUSB_URB_RING_SIZE - 1].req.id = 0x0fff;
+ 
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+index 9fe1071a96444..1b5de3af1a627 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -163,6 +163,7 @@ struct mlx5_vdpa_net {
+ 	u32 cur_num_vqs;
+ 	struct notifier_block nb;
+ 	struct vdpa_callback config_cb;
++	struct mlx5_vdpa_wq_ent cvq_ent;
+ };
+ 
+ static void free_resources(struct mlx5_vdpa_net *ndev);
+@@ -1616,10 +1617,10 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
+ 	ndev = to_mlx5_vdpa_ndev(mvdev);
+ 	cvq = &mvdev->cvq;
+ 	if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
+-		goto out;
++		return;
+ 
+ 	if (!cvq->ready)
+-		goto out;
++		return;
+ 
+ 	while (true) {
+ 		err = vringh_getdesc_iotlb(&cvq->vring, &cvq->riov, &cvq->wiov, &cvq->head,
+@@ -1653,9 +1654,10 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
+ 
+ 		if (vringh_need_notify_iotlb(&cvq->vring))
+ 			vringh_notify(&cvq->vring);
++
++		queue_work(mvdev->wq, &wqent->work);
++		break;
+ 	}
+-out:
+-	kfree(wqent);
+ }
+ 
+ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
+@@ -1663,7 +1665,6 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
+ 	struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ 	struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ 	struct mlx5_vdpa_virtqueue *mvq;
+-	struct mlx5_vdpa_wq_ent *wqent;
+ 
+ 	if (!is_index_valid(mvdev, idx))
+ 		return;
+@@ -1672,13 +1673,7 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
+ 		if (!mvdev->wq || !mvdev->cvq.ready)
+ 			return;
+ 
+-		wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
+-		if (!wqent)
+-			return;
+-
+-		wqent->mvdev = mvdev;
+-		INIT_WORK(&wqent->work, mlx5_cvq_kick_handler);
+-		queue_work(mvdev->wq, &wqent->work);
++		queue_work(mvdev->wq, &ndev->cvq_ent.work);
+ 		return;
+ 	}
+ 
+@@ -2668,6 +2663,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
+ 	if (err)
+ 		goto err_mr;
+ 
++	ndev->cvq_ent.mvdev = mvdev;
++	INIT_WORK(&ndev->cvq_ent.work, mlx5_cvq_kick_handler);
+ 	mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq");
+ 	if (!mvdev->wq) {
+ 		err = -ENOMEM;
+diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
+index 57d3b2cbbd8e5..82ac1569deb05 100644
+--- a/drivers/vfio/pci/vfio_pci_rdwr.c
++++ b/drivers/vfio/pci/vfio_pci_rdwr.c
+@@ -288,6 +288,7 @@ out:
+ 	return done;
+ }
+ 
++#ifdef CONFIG_VFIO_PCI_VGA
+ ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf,
+ 			       size_t count, loff_t *ppos, bool iswrite)
+ {
+@@ -355,6 +356,7 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf,
+ 
+ 	return done;
+ }
++#endif
+ 
+ static void vfio_pci_ioeventfd_do_write(struct vfio_pci_ioeventfd *ioeventfd,
+ 					bool test_mem)
+diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
+index 28ef323882fb2..792ab5f236471 100644
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -473,6 +473,7 @@ static void vhost_tx_batch(struct vhost_net *net,
+ 		goto signal_used;
+ 
+ 	msghdr->msg_control = &ctl;
++	msghdr->msg_controllen = sizeof(ctl);
+ 	err = sock->ops->sendmsg(sock, msghdr, 0);
+ 	if (unlikely(err < 0)) {
+ 		vq_err(&nvq->vq, "Fail to batch sending packets\n");
+diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
+index ad9aac06427a0..00f0f282e7a13 100644
+--- a/drivers/video/fbdev/core/fbmem.c
++++ b/drivers/video/fbdev/core/fbmem.c
+@@ -1583,7 +1583,14 @@ static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
+ 			 * If it's not a platform device, at least print a warning. A
+ 			 * fix would add code to remove the device from the system.
+ 			 */
+-			if (dev_is_platform(device)) {
++			if (!device) {
++				/* TODO: Represent each OF framebuffer as its own
++				 * device in the device hierarchy. For now, offb
++				 * doesn't have such a device, so unregister the
++				 * framebuffer as before without warning.
++				 */
++				do_unregister_framebuffer(registered_fb[i]);
++			} else if (dev_is_platform(device)) {
+ 				registered_fb[i]->forced_out = true;
+ 				platform_device_unregister(to_platform_device(device));
+ 			} else {
+diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
+index 565578002d79e..c7b8a8e787e23 100644
+--- a/drivers/w1/slaves/w1_therm.c
++++ b/drivers/w1/slaves/w1_therm.c
+@@ -2089,16 +2089,20 @@ static ssize_t w1_seq_show(struct device *device,
+ 		if (sl->reg_num.id == reg_num->id)
+ 			seq = i;
+ 
++		if (w1_reset_bus(sl->master))
++			goto error;
++
++		/* Put the device into chain DONE state */
++		w1_write_8(sl->master, W1_MATCH_ROM);
++		w1_write_block(sl->master, (u8 *)&rn, 8);
+ 		w1_write_8(sl->master, W1_42_CHAIN);
+ 		w1_write_8(sl->master, W1_42_CHAIN_DONE);
+ 		w1_write_8(sl->master, W1_42_CHAIN_DONE_INV);
+-		w1_read_block(sl->master, &ack, sizeof(ack));
+ 
+ 		/* check for acknowledgment */
+ 		ack = w1_read_8(sl->master);
+ 		if (ack != W1_42_SUCCESS_CONFIRM_BYTE)
+ 			goto error;
+-
+ 	}
+ 
+ 	/* Exit from CHAIN state */
+diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
+index 0399cf8e3c32c..151e9da5da2dc 100644
+--- a/fs/btrfs/extent_io.h
++++ b/fs/btrfs/extent_io.h
+@@ -118,7 +118,7 @@ struct btrfs_bio_ctrl {
+  */
+ struct extent_changeset {
+ 	/* How many bytes are set/cleared in this operation */
+-	unsigned int bytes_changed;
++	u64 bytes_changed;
+ 
+ 	/* Changed ranges */
+ 	struct ulist range_changed;
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 0f4408f9daddc..ec4950cfe1ea9 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4466,6 +4466,13 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
+ 			   dest->root_key.objectid);
+ 		return -EPERM;
+ 	}
++	if (atomic_read(&dest->nr_swapfiles)) {
++		spin_unlock(&dest->root_item_lock);
++		btrfs_warn(fs_info,
++			   "attempt to delete subvolume %llu with active swapfile",
++			   root->root_key.objectid);
++		return -EPERM;
++	}
+ 	root_flags = btrfs_root_flags(&dest->root_item);
+ 	btrfs_set_root_flags(&dest->root_item,
+ 			     root_flags | BTRFS_ROOT_SUBVOL_DEAD);
+@@ -10424,8 +10431,23 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ 	 * set. We use this counter to prevent snapshots. We must increment it
+ 	 * before walking the extents because we don't want a concurrent
+ 	 * snapshot to run after we've already checked the extents.
++	 *
++	 * It is possible that subvolume is marked for deletion but still not
++	 * removed yet. To prevent this race, we check the root status before
++	 * activating the swapfile.
+ 	 */
++	spin_lock(&root->root_item_lock);
++	if (btrfs_root_dead(root)) {
++		spin_unlock(&root->root_item_lock);
++
++		btrfs_exclop_finish(fs_info);
++		btrfs_warn(fs_info,
++		"cannot activate swapfile because subvolume %llu is being deleted",
++			root->root_key.objectid);
++		return -EPERM;
++	}
+ 	atomic_inc(&root->nr_swapfiles);
++	spin_unlock(&root->root_item_lock);
+ 
+ 	isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
+ 
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 8d47ec5fc4f44..8fe9d55d68622 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -1215,7 +1215,7 @@ static u32 get_extent_max_capacity(const struct extent_map *em)
+ }
+ 
+ static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
+-				     bool locked)
++				     u32 extent_thresh, u64 newer_than, bool locked)
+ {
+ 	struct extent_map *next;
+ 	bool ret = false;
+@@ -1225,11 +1225,12 @@ static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
+ 		return false;
+ 
+ 	/*
+-	 * We want to check if the next extent can be merged with the current
+-	 * one, which can be an extent created in a past generation, so we pass
+-	 * a minimum generation of 0 to defrag_lookup_extent().
++	 * Here we need to pass @newer_then when checking the next extent, or
++	 * we will hit a case we mark current extent for defrag, but the next
++	 * one will not be a target.
++	 * This will just cause extra IO without really reducing the fragments.
+ 	 */
+-	next = defrag_lookup_extent(inode, em->start + em->len, 0, locked);
++	next = defrag_lookup_extent(inode, em->start + em->len, newer_than, locked);
+ 	/* No more em or hole */
+ 	if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
+ 		goto out;
+@@ -1241,6 +1242,13 @@ static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
+ 	 */
+ 	if (next->len >= get_extent_max_capacity(em))
+ 		goto out;
++	/* Skip older extent */
++	if (next->generation < newer_than)
++		goto out;
++	/* Also check extent size */
++	if (next->len >= extent_thresh)
++		goto out;
++
+ 	ret = true;
+ out:
+ 	free_extent_map(next);
+@@ -1446,7 +1454,7 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
+ 			goto next;
+ 
+ 		next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em,
+-							  locked);
++						extent_thresh, newer_than, locked);
+ 		if (!next_mergeable) {
+ 			struct defrag_target_range *last;
+ 
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index f5b11c1a000aa..f5688a25fca35 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -1945,23 +1945,18 @@ static void update_dev_time(const char *device_path)
+ 	path_put(&path);
+ }
+ 
+-static int btrfs_rm_dev_item(struct btrfs_device *device)
++static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans,
++			     struct btrfs_device *device)
+ {
+ 	struct btrfs_root *root = device->fs_info->chunk_root;
+ 	int ret;
+ 	struct btrfs_path *path;
+ 	struct btrfs_key key;
+-	struct btrfs_trans_handle *trans;
+ 
+ 	path = btrfs_alloc_path();
+ 	if (!path)
+ 		return -ENOMEM;
+ 
+-	trans = btrfs_start_transaction(root, 0);
+-	if (IS_ERR(trans)) {
+-		btrfs_free_path(path);
+-		return PTR_ERR(trans);
+-	}
+ 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
+ 	key.type = BTRFS_DEV_ITEM_KEY;
+ 	key.offset = device->devid;
+@@ -1972,21 +1967,12 @@ static int btrfs_rm_dev_item(struct btrfs_device *device)
+ 	if (ret) {
+ 		if (ret > 0)
+ 			ret = -ENOENT;
+-		btrfs_abort_transaction(trans, ret);
+-		btrfs_end_transaction(trans);
+ 		goto out;
+ 	}
+ 
+ 	ret = btrfs_del_item(trans, root, path);
+-	if (ret) {
+-		btrfs_abort_transaction(trans, ret);
+-		btrfs_end_transaction(trans);
+-	}
+-
+ out:
+ 	btrfs_free_path(path);
+-	if (!ret)
+-		ret = btrfs_commit_transaction(trans);
+ 	return ret;
+ }
+ 
+@@ -2127,6 +2113,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
+ 		    struct btrfs_dev_lookup_args *args,
+ 		    struct block_device **bdev, fmode_t *mode)
+ {
++	struct btrfs_trans_handle *trans;
+ 	struct btrfs_device *device;
+ 	struct btrfs_fs_devices *cur_devices;
+ 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+@@ -2142,7 +2129,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
+ 
+ 	ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
+ 	if (ret)
+-		goto out;
++		return ret;
+ 
+ 	device = btrfs_find_device(fs_info->fs_devices, args);
+ 	if (!device) {
+@@ -2150,27 +2137,22 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
+ 			ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
+ 		else
+ 			ret = -ENOENT;
+-		goto out;
++		return ret;
+ 	}
+ 
+ 	if (btrfs_pinned_by_swapfile(fs_info, device)) {
+ 		btrfs_warn_in_rcu(fs_info,
+ 		  "cannot remove device %s (devid %llu) due to active swapfile",
+ 				  rcu_str_deref(device->name), device->devid);
+-		ret = -ETXTBSY;
+-		goto out;
++		return -ETXTBSY;
+ 	}
+ 
+-	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
+-		ret = BTRFS_ERROR_DEV_TGT_REPLACE;
+-		goto out;
+-	}
++	if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
++		return BTRFS_ERROR_DEV_TGT_REPLACE;
+ 
+ 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
+-	    fs_info->fs_devices->rw_devices == 1) {
+-		ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
+-		goto out;
+-	}
++	    fs_info->fs_devices->rw_devices == 1)
++		return BTRFS_ERROR_DEV_ONLY_WRITABLE;
+ 
+ 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
+ 		mutex_lock(&fs_info->chunk_mutex);
+@@ -2183,14 +2165,22 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
+ 	if (ret)
+ 		goto error_undo;
+ 
+-	/*
+-	 * TODO: the superblock still includes this device in its num_devices
+-	 * counter although write_all_supers() is not locked out. This
+-	 * could give a filesystem state which requires a degraded mount.
+-	 */
+-	ret = btrfs_rm_dev_item(device);
+-	if (ret)
++	trans = btrfs_start_transaction(fs_info->chunk_root, 0);
++	if (IS_ERR(trans)) {
++		ret = PTR_ERR(trans);
+ 		goto error_undo;
++	}
++
++	ret = btrfs_rm_dev_item(trans, device);
++	if (ret) {
++		/* Any error in dev item removal is critical */
++		btrfs_crit(fs_info,
++			   "failed to remove device item for devid %llu: %d",
++			   device->devid, ret);
++		btrfs_abort_transaction(trans, ret);
++		btrfs_end_transaction(trans);
++		return ret;
++	}
+ 
+ 	clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
+ 	btrfs_scrub_cancel_dev(device);
+@@ -2273,7 +2263,8 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
+ 		free_fs_devices(cur_devices);
+ 	}
+ 
+-out:
++	ret = btrfs_commit_transaction(trans);
++
+ 	return ret;
+ 
+ error_undo:
+@@ -2284,7 +2275,7 @@ error_undo:
+ 		device->fs_devices->rw_devices++;
+ 		mutex_unlock(&fs_info->chunk_mutex);
+ 	}
+-	goto out;
++	return ret;
+ }
+ 
+ void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index f559d517c7c44..f03705d2f8a8c 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -1927,18 +1927,19 @@ int btrfs_zone_finish(struct btrfs_block_group *block_group)
+ 
+ bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
+ {
++	struct btrfs_fs_info *fs_info = fs_devices->fs_info;
+ 	struct btrfs_device *device;
+ 	bool ret = false;
+ 
+-	if (!btrfs_is_zoned(fs_devices->fs_info))
++	if (!btrfs_is_zoned(fs_info))
+ 		return true;
+ 
+ 	/* Non-single profiles are not supported yet */
+ 	ASSERT((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0);
+ 
+ 	/* Check if there is a device with active zones left */
+-	mutex_lock(&fs_devices->device_list_mutex);
+-	list_for_each_entry(device, &fs_devices->devices, dev_list) {
++	mutex_lock(&fs_info->chunk_mutex);
++	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
+ 		struct btrfs_zoned_device_info *zinfo = device->zone_info;
+ 
+ 		if (!device->bdev)
+@@ -1950,7 +1951,7 @@ bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
+ 			break;
+ 		}
+ 	}
+-	mutex_unlock(&fs_devices->device_list_mutex);
++	mutex_unlock(&fs_info->chunk_mutex);
+ 
+ 	return ret;
+ }
+diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
+index 133dbd9338e73..d91fa53e12b33 100644
+--- a/fs/ceph/dir.c
++++ b/fs/ceph/dir.c
+@@ -478,8 +478,11 @@ more:
+ 					2 : (fpos_off(rde->offset) + 1);
+ 			err = note_last_dentry(dfi, rde->name, rde->name_len,
+ 					       next_offset);
+-			if (err)
++			if (err) {
++				ceph_mdsc_put_request(dfi->last_readdir);
++				dfi->last_readdir = NULL;
+ 				return err;
++			}
+ 		} else if (req->r_reply_info.dir_end) {
+ 			dfi->next_offset = 2;
+ 			/* keep last name */
+@@ -520,6 +523,12 @@ more:
+ 		if (!dir_emit(ctx, rde->name, rde->name_len,
+ 			      ceph_present_ino(inode->i_sb, le64_to_cpu(rde->inode.in->ino)),
+ 			      le32_to_cpu(rde->inode.in->mode) >> 12)) {
++			/*
++			 * NOTE: Here no need to put the 'dfi->last_readdir',
++			 * because when dir_emit stops us it's most likely
++			 * doesn't have enough memory, etc. So for next readdir
++			 * it will continue.
++			 */
+ 			dout("filldir stopping us...\n");
+ 			return 0;
+ 		}
+diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
+index ef4a980a7bf37..c092dce0485c7 100644
+--- a/fs/ceph/inode.c
++++ b/fs/ceph/inode.c
+@@ -87,13 +87,13 @@ struct inode *ceph_get_snapdir(struct inode *parent)
+ 	if (!S_ISDIR(parent->i_mode)) {
+ 		pr_warn_once("bad snapdir parent type (mode=0%o)\n",
+ 			     parent->i_mode);
+-		return ERR_PTR(-ENOTDIR);
++		goto err;
+ 	}
+ 
+ 	if (!(inode->i_state & I_NEW) && !S_ISDIR(inode->i_mode)) {
+ 		pr_warn_once("bad snapdir inode type (mode=0%o)\n",
+ 			     inode->i_mode);
+-		return ERR_PTR(-ENOTDIR);
++		goto err;
+ 	}
+ 
+ 	inode->i_mode = parent->i_mode;
+@@ -113,6 +113,12 @@ struct inode *ceph_get_snapdir(struct inode *parent)
+ 	}
+ 
+ 	return inode;
++err:
++	if ((inode->i_state & I_NEW))
++		discard_new_inode(inode);
++	else
++		iput(inode);
++	return ERR_PTR(-ENOTDIR);
+ }
+ 
+ const struct inode_operations ceph_file_iops = {
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index d6f8ccc7bfe2f..c3be6a541c8fc 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -453,9 +453,7 @@ static int reconnect_target_unlocked(struct TCP_Server_Info *server, struct dfs_
+ 	return rc;
+ }
+ 
+-static int
+-reconnect_dfs_server(struct TCP_Server_Info *server,
+-		     bool mark_smb_session)
++static int reconnect_dfs_server(struct TCP_Server_Info *server)
+ {
+ 	int rc = 0;
+ 	const char *refpath = server->current_fullpath + 1;
+@@ -479,7 +477,12 @@ reconnect_dfs_server(struct TCP_Server_Info *server,
+ 	if (!cifs_tcp_ses_needs_reconnect(server, num_targets))
+ 		return 0;
+ 
+-	cifs_mark_tcp_ses_conns_for_reconnect(server, mark_smb_session);
++	/*
++	 * Unconditionally mark all sessions & tcons for reconnect as we might be connecting to a
++	 * different server or share during failover.  It could be improved by adding some logic to
++	 * only do that in case it connects to a different server or share, though.
++	 */
++	cifs_mark_tcp_ses_conns_for_reconnect(server, true);
+ 
+ 	cifs_abort_connection(server);
+ 
+@@ -537,7 +540,7 @@ int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
+ 	}
+ 	spin_unlock(&cifs_tcp_ses_lock);
+ 
+-	return reconnect_dfs_server(server, mark_smb_session);
++	return reconnect_dfs_server(server);
+ }
+ #else
+ int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
+@@ -4465,7 +4468,7 @@ static int tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tco
+ 	 */
+ 	if (rc && server->current_fullpath != server->origin_fullpath) {
+ 		server->current_fullpath = server->origin_fullpath;
+-		cifs_reconnect(tcon->ses->server, true);
++		cifs_signal_cifsd_for_reconnect(server, true);
+ 	}
+ 
+ 	dfs_cache_free_tgts(tl);
+diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
+index ebe236b9d9f56..235aa1b395ebc 100644
+--- a/fs/cifs/netmisc.c
++++ b/fs/cifs/netmisc.c
+@@ -896,7 +896,7 @@ map_and_check_smb_error(struct mid_q_entry *mid, bool logErr)
+ 		if (class == ERRSRV && code == ERRbaduid) {
+ 			cifs_dbg(FYI, "Server returned 0x%x, reconnecting session...\n",
+ 				code);
+-			cifs_reconnect(mid->server, false);
++			cifs_signal_cifsd_for_reconnect(mid->server, false);
+ 		}
+ 	}
+ 
+diff --git a/fs/file_table.c b/fs/file_table.c
+index 7d2e692b66a94..ada8fe814db97 100644
+--- a/fs/file_table.c
++++ b/fs/file_table.c
+@@ -412,6 +412,7 @@ void __fput_sync(struct file *file)
+ }
+ 
+ EXPORT_SYMBOL(fput);
++EXPORT_SYMBOL(__fput_sync);
+ 
+ void __init files_init(void)
+ {
+diff --git a/fs/io-wq.h b/fs/io-wq.h
+index dbecd27656c7c..04d374e65e546 100644
+--- a/fs/io-wq.h
++++ b/fs/io-wq.h
+@@ -155,6 +155,7 @@ struct io_wq_work_node *wq_stack_extract(struct io_wq_work_node *stack)
+ struct io_wq_work {
+ 	struct io_wq_work_node list;
+ 	unsigned flags;
++	int fd;
+ };
+ 
+ static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 5e6788ab188fa..1a9630dc5361b 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -112,8 +112,7 @@
+ 			IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
+ 
+ #define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
+-				REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \
+-				REQ_F_ASYNC_DATA)
++				REQ_F_POLLED | REQ_F_CREDS | REQ_F_ASYNC_DATA)
+ 
+ #define IO_TCTX_REFS_CACHE_NR	(1U << 10)
+ 
+@@ -469,7 +468,6 @@ struct io_uring_task {
+ 	const struct io_ring_ctx *last;
+ 	struct io_wq		*io_wq;
+ 	struct percpu_counter	inflight;
+-	atomic_t		inflight_tracked;
+ 	atomic_t		in_idle;
+ 
+ 	spinlock_t		task_lock;
+@@ -560,7 +558,8 @@ struct io_rw {
+ 	/* NOTE: kiocb has the file as the first member, so don't do it here */
+ 	struct kiocb			kiocb;
+ 	u64				addr;
+-	u64				len;
++	u32				len;
++	u32				flags;
+ };
+ 
+ struct io_connect {
+@@ -621,10 +620,10 @@ struct io_epoll {
+ 
+ struct io_splice {
+ 	struct file			*file_out;
+-	struct file			*file_in;
+ 	loff_t				off_out;
+ 	loff_t				off_in;
+ 	u64				len;
++	int				splice_fd_in;
+ 	unsigned int			flags;
+ };
+ 
+@@ -1127,8 +1126,11 @@ static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
+ 				     struct io_uring_rsrc_update2 *up,
+ 				     unsigned nr_args);
+ static void io_clean_op(struct io_kiocb *req);
+-static struct file *io_file_get(struct io_ring_ctx *ctx,
+-				struct io_kiocb *req, int fd, bool fixed);
++static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
++					     unsigned issue_flags);
++static inline struct file *io_file_get_normal(struct io_kiocb *req, int fd);
++static void io_drop_inflight_file(struct io_kiocb *req);
++static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags);
+ static void __io_queue_sqe(struct io_kiocb *req);
+ static void io_rsrc_put_work(struct work_struct *work);
+ 
+@@ -1257,13 +1259,20 @@ static void io_rsrc_refs_refill(struct io_ring_ctx *ctx)
+ }
+ 
+ static inline void io_req_set_rsrc_node(struct io_kiocb *req,
+-					struct io_ring_ctx *ctx)
++					struct io_ring_ctx *ctx,
++					unsigned int issue_flags)
+ {
+ 	if (!req->fixed_rsrc_refs) {
+ 		req->fixed_rsrc_refs = &ctx->rsrc_node->refs;
+-		ctx->rsrc_cached_refs--;
+-		if (unlikely(ctx->rsrc_cached_refs < 0))
+-			io_rsrc_refs_refill(ctx);
++
++		if (!(issue_flags & IO_URING_F_UNLOCKED)) {
++			lockdep_assert_held(&ctx->uring_lock);
++			ctx->rsrc_cached_refs--;
++			if (unlikely(ctx->rsrc_cached_refs < 0))
++				io_rsrc_refs_refill(ctx);
++		} else {
++			percpu_ref_get(req->fixed_rsrc_refs);
++		}
+ 	}
+ }
+ 
+@@ -1303,29 +1312,9 @@ static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
+ 			  bool cancel_all)
+ 	__must_hold(&req->ctx->timeout_lock)
+ {
+-	struct io_kiocb *req;
+-
+ 	if (task && head->task != task)
+ 		return false;
+-	if (cancel_all)
+-		return true;
+-
+-	io_for_each_link(req, head) {
+-		if (req->flags & REQ_F_INFLIGHT)
+-			return true;
+-	}
+-	return false;
+-}
+-
+-static bool io_match_linked(struct io_kiocb *head)
+-{
+-	struct io_kiocb *req;
+-
+-	io_for_each_link(req, head) {
+-		if (req->flags & REQ_F_INFLIGHT)
+-			return true;
+-	}
+-	return false;
++	return cancel_all;
+ }
+ 
+ /*
+@@ -1335,24 +1324,9 @@ static bool io_match_linked(struct io_kiocb *head)
+ static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
+ 			       bool cancel_all)
+ {
+-	bool matched;
+-
+ 	if (task && head->task != task)
+ 		return false;
+-	if (cancel_all)
+-		return true;
+-
+-	if (head->flags & REQ_F_LINK_TIMEOUT) {
+-		struct io_ring_ctx *ctx = head->ctx;
+-
+-		/* protect against races with linked timeouts */
+-		spin_lock_irq(&ctx->timeout_lock);
+-		matched = io_match_linked(head);
+-		spin_unlock_irq(&ctx->timeout_lock);
+-	} else {
+-		matched = io_match_linked(head);
+-	}
+-	return matched;
++	return cancel_all;
+ }
+ 
+ static inline bool req_has_async_data(struct io_kiocb *req)
+@@ -1500,14 +1474,6 @@ static inline bool io_req_ffs_set(struct io_kiocb *req)
+ 	return req->flags & REQ_F_FIXED_FILE;
+ }
+ 
+-static inline void io_req_track_inflight(struct io_kiocb *req)
+-{
+-	if (!(req->flags & REQ_F_INFLIGHT)) {
+-		req->flags |= REQ_F_INFLIGHT;
+-		atomic_inc(&current->io_uring->inflight_tracked);
+-	}
+-}
+-
+ static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
+ {
+ 	if (WARN_ON_ONCE(!req->link))
+@@ -1551,14 +1517,6 @@ static void io_prep_async_work(struct io_kiocb *req)
+ 		if (def->unbound_nonreg_file)
+ 			req->work.flags |= IO_WQ_WORK_UNBOUND;
+ 	}
+-
+-	switch (req->opcode) {
+-	case IORING_OP_SPLICE:
+-	case IORING_OP_TEE:
+-		if (!S_ISREG(file_inode(req->splice.file_in)->i_mode))
+-			req->work.flags |= IO_WQ_WORK_UNBOUND;
+-		break;
+-	}
+ }
+ 
+ static void io_prep_async_link(struct io_kiocb *req)
+@@ -1652,12 +1610,11 @@ static __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
+ 	__must_hold(&ctx->completion_lock)
+ {
+ 	u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
++	struct io_kiocb *req, *tmp;
+ 
+ 	spin_lock_irq(&ctx->timeout_lock);
+-	while (!list_empty(&ctx->timeout_list)) {
++	list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
+ 		u32 events_needed, events_got;
+-		struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
+-						struct io_kiocb, timeout.list);
+ 
+ 		if (io_is_timeout_noseq(req))
+ 			break;
+@@ -1674,7 +1631,6 @@ static __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
+ 		if (events_got < events_needed)
+ 			break;
+ 
+-		list_del_init(&req->timeout.list);
+ 		io_kill_timeout(req, 0);
+ 	}
+ 	ctx->cq_last_tm_flush = seq;
+@@ -2381,6 +2337,8 @@ static void io_req_task_work_add(struct io_kiocb *req, bool priority)
+ 
+ 	WARN_ON_ONCE(!tctx);
+ 
++	io_drop_inflight_file(req);
++
+ 	spin_lock_irqsave(&tctx->task_lock, flags);
+ 	if (priority)
+ 		wq_list_add_tail(&req->io_task_work.node, &tctx->prior_task_list);
+@@ -2994,50 +2952,11 @@ static inline bool io_file_supports_nowait(struct io_kiocb *req)
+ 
+ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ {
+-	struct io_ring_ctx *ctx = req->ctx;
+ 	struct kiocb *kiocb = &req->rw.kiocb;
+-	struct file *file = req->file;
+ 	unsigned ioprio;
+ 	int ret;
+ 
+-	if (!io_req_ffs_set(req))
+-		req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
+-
+ 	kiocb->ki_pos = READ_ONCE(sqe->off);
+-	if (kiocb->ki_pos == -1) {
+-		if (!(file->f_mode & FMODE_STREAM)) {
+-			req->flags |= REQ_F_CUR_POS;
+-			kiocb->ki_pos = file->f_pos;
+-		} else {
+-			kiocb->ki_pos = 0;
+-		}
+-	}
+-	kiocb->ki_flags = iocb_flags(file);
+-	ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
+-	if (unlikely(ret))
+-		return ret;
+-
+-	/*
+-	 * If the file is marked O_NONBLOCK, still allow retry for it if it
+-	 * supports async. Otherwise it's impossible to use O_NONBLOCK files
+-	 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
+-	 */
+-	if ((kiocb->ki_flags & IOCB_NOWAIT) ||
+-	    ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
+-		req->flags |= REQ_F_NOWAIT;
+-
+-	if (ctx->flags & IORING_SETUP_IOPOLL) {
+-		if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
+-			return -EOPNOTSUPP;
+-
+-		kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
+-		kiocb->ki_complete = io_complete_rw_iopoll;
+-		req->iopoll_completed = 0;
+-	} else {
+-		if (kiocb->ki_flags & IOCB_HIPRI)
+-			return -EINVAL;
+-		kiocb->ki_complete = io_complete_rw;
+-	}
+ 
+ 	ioprio = READ_ONCE(sqe->ioprio);
+ 	if (ioprio) {
+@@ -3053,6 +2972,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 	req->imu = NULL;
+ 	req->rw.addr = READ_ONCE(sqe->addr);
+ 	req->rw.len = READ_ONCE(sqe->len);
++	req->rw.flags = READ_ONCE(sqe->rw_flags);
+ 	req->buf_index = READ_ONCE(sqe->buf_index);
+ 	return 0;
+ }
+@@ -3169,7 +3089,8 @@ static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter
+ 	return 0;
+ }
+ 
+-static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
++static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
++			   unsigned int issue_flags)
+ {
+ 	struct io_mapped_ubuf *imu = req->imu;
+ 	u16 index, buf_index = req->buf_index;
+@@ -3179,7 +3100,7 @@ static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
+ 
+ 		if (unlikely(buf_index >= ctx->nr_user_bufs))
+ 			return -EFAULT;
+-		io_req_set_rsrc_node(req, ctx);
++		io_req_set_rsrc_node(req, ctx, issue_flags);
+ 		index = array_index_nospec(buf_index, ctx->nr_user_bufs);
+ 		imu = READ_ONCE(ctx->user_bufs[index]);
+ 		req->imu = imu;
+@@ -3335,7 +3256,7 @@ static struct iovec *__io_import_iovec(int rw, struct io_kiocb *req,
+ 	ssize_t ret;
+ 
+ 	if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
+-		ret = io_import_fixed(req, rw, iter);
++		ret = io_import_fixed(req, rw, iter, issue_flags);
+ 		if (ret)
+ 			return ERR_PTR(ret);
+ 		return NULL;
+@@ -3533,13 +3454,6 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
+ 	return 0;
+ }
+ 
+-static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+-{
+-	if (unlikely(!(req->file->f_mode & FMODE_READ)))
+-		return -EBADF;
+-	return io_prep_rw(req, sqe);
+-}
+-
+ /*
+  * This is our waitqueue callback handler, registered through __folio_lock_async()
+  * when we initially tried to do the IO with the iocb armed our waitqueue.
+@@ -3627,6 +3541,58 @@ static bool need_read_all(struct io_kiocb *req)
+ 		S_ISBLK(file_inode(req->file)->i_mode);
+ }
+ 
++static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
++{
++	struct kiocb *kiocb = &req->rw.kiocb;
++	struct io_ring_ctx *ctx = req->ctx;
++	struct file *file = req->file;
++	int ret;
++
++	if (unlikely(!file || !(file->f_mode & mode)))
++		return -EBADF;
++
++	if (!io_req_ffs_set(req))
++		req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
++
++	if (kiocb->ki_pos == -1) {
++		if (!(file->f_mode & FMODE_STREAM)) {
++			req->flags |= REQ_F_CUR_POS;
++			kiocb->ki_pos = file->f_pos;
++		} else {
++			kiocb->ki_pos = 0;
++		}
++	}
++
++	kiocb->ki_flags = iocb_flags(file);
++	ret = kiocb_set_rw_flags(kiocb, req->rw.flags);
++	if (unlikely(ret))
++		return ret;
++
++	/*
++	 * If the file is marked O_NONBLOCK, still allow retry for it if it
++	 * supports async. Otherwise it's impossible to use O_NONBLOCK files
++	 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
++	 */
++	if ((kiocb->ki_flags & IOCB_NOWAIT) ||
++	    ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
++		req->flags |= REQ_F_NOWAIT;
++
++	if (ctx->flags & IORING_SETUP_IOPOLL) {
++		if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
++			return -EOPNOTSUPP;
++
++		kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
++		kiocb->ki_complete = io_complete_rw_iopoll;
++		req->iopoll_completed = 0;
++	} else {
++		if (kiocb->ki_flags & IOCB_HIPRI)
++			return -EINVAL;
++		kiocb->ki_complete = io_complete_rw;
++	}
++
++	return 0;
++}
++
+ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
+ {
+ 	struct io_rw_state __s, *s = &__s;
+@@ -3651,6 +3617,9 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
+ 		iov_iter_restore(&s->iter, &s->iter_state);
+ 		iovec = NULL;
+ 	}
++	ret = io_rw_init_file(req, FMODE_READ);
++	if (unlikely(ret))
++		return ret;
+ 	req->result = iov_iter_count(&s->iter);
+ 
+ 	if (force_nonblock) {
+@@ -3749,14 +3718,6 @@ out_free:
+ 	return 0;
+ }
+ 
+-static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+-{
+-	if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
+-		return -EBADF;
+-	req->rw.kiocb.ki_hint = ki_hint_validate(file_write_hint(req->file));
+-	return io_prep_rw(req, sqe);
+-}
+-
+ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
+ {
+ 	struct io_rw_state __s, *s = &__s;
+@@ -3776,6 +3737,9 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
+ 		iov_iter_restore(&s->iter, &s->iter_state);
+ 		iovec = NULL;
+ 	}
++	ret = io_rw_init_file(req, FMODE_WRITE);
++	if (unlikely(ret))
++		return ret;
+ 	req->result = iov_iter_count(&s->iter);
+ 
+ 	if (force_nonblock) {
+@@ -4144,18 +4108,11 @@ static int __io_splice_prep(struct io_kiocb *req,
+ 	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+ 		return -EINVAL;
+ 
+-	sp->file_in = NULL;
+ 	sp->len = READ_ONCE(sqe->len);
+ 	sp->flags = READ_ONCE(sqe->splice_flags);
+-
+ 	if (unlikely(sp->flags & ~valid_flags))
+ 		return -EINVAL;
+-
+-	sp->file_in = io_file_get(req->ctx, req, READ_ONCE(sqe->splice_fd_in),
+-				  (sp->flags & SPLICE_F_FD_IN_FIXED));
+-	if (!sp->file_in)
+-		return -EBADF;
+-	req->flags |= REQ_F_NEED_CLEANUP;
++	sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in);
+ 	return 0;
+ }
+ 
+@@ -4170,20 +4127,29 @@ static int io_tee_prep(struct io_kiocb *req,
+ static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
+ {
+ 	struct io_splice *sp = &req->splice;
+-	struct file *in = sp->file_in;
+ 	struct file *out = sp->file_out;
+ 	unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
++	struct file *in;
+ 	long ret = 0;
+ 
+ 	if (issue_flags & IO_URING_F_NONBLOCK)
+ 		return -EAGAIN;
++
++	if (sp->flags & SPLICE_F_FD_IN_FIXED)
++		in = io_file_get_fixed(req, sp->splice_fd_in, IO_URING_F_UNLOCKED);
++	else
++		in = io_file_get_normal(req, sp->splice_fd_in);
++	if (!in) {
++		ret = -EBADF;
++		goto done;
++	}
++
+ 	if (sp->len)
+ 		ret = do_tee(in, out, sp->len, flags);
+ 
+ 	if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
+ 		io_put_file(in);
+-	req->flags &= ~REQ_F_NEED_CLEANUP;
+-
++done:
+ 	if (ret != sp->len)
+ 		req_set_fail(req);
+ 	io_req_complete(req, ret);
+@@ -4202,15 +4168,24 @@ static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
+ {
+ 	struct io_splice *sp = &req->splice;
+-	struct file *in = sp->file_in;
+ 	struct file *out = sp->file_out;
+ 	unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
+ 	loff_t *poff_in, *poff_out;
++	struct file *in;
+ 	long ret = 0;
+ 
+ 	if (issue_flags & IO_URING_F_NONBLOCK)
+ 		return -EAGAIN;
+ 
++	if (sp->flags & SPLICE_F_FD_IN_FIXED)
++		in = io_file_get_fixed(req, sp->splice_fd_in, IO_URING_F_UNLOCKED);
++	else
++		in = io_file_get_normal(req, sp->splice_fd_in);
++	if (!in) {
++		ret = -EBADF;
++		goto done;
++	}
++
+ 	poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
+ 	poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
+ 
+@@ -4219,8 +4194,7 @@ static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
+ 
+ 	if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
+ 		io_put_file(in);
+-	req->flags &= ~REQ_F_NEED_CLEANUP;
+-
++done:
+ 	if (ret != sp->len)
+ 		req_set_fail(req);
+ 	io_req_complete(req, ret);
+@@ -4245,9 +4219,6 @@ static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ {
+ 	struct io_ring_ctx *ctx = req->ctx;
+ 
+-	if (!req->file)
+-		return -EBADF;
+-
+ 	if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
+ 		return -EINVAL;
+ 	if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
+@@ -5514,7 +5485,7 @@ static void io_poll_remove_entries(struct io_kiocb *req)
+  * either spurious wakeup or multishot CQE is served. 0 when it's done with
+  * the request, then the mask is stored in req->result.
+  */
+-static int io_poll_check_events(struct io_kiocb *req)
++static int io_poll_check_events(struct io_kiocb *req, bool locked)
+ {
+ 	struct io_ring_ctx *ctx = req->ctx;
+ 	struct io_poll_iocb *poll = io_poll_get_single(req);
+@@ -5536,7 +5507,10 @@ static int io_poll_check_events(struct io_kiocb *req)
+ 		if (!req->result) {
+ 			struct poll_table_struct pt = { ._key = poll->events };
+ 
+-			req->result = vfs_poll(req->file, &pt) & poll->events;
++			if (unlikely(!io_assign_file(req, IO_URING_F_UNLOCKED)))
++				req->result = -EBADF;
++			else
++				req->result = vfs_poll(req->file, &pt) & poll->events;
+ 		}
+ 
+ 		/* multishot, just fill an CQE and proceed */
+@@ -5570,7 +5544,7 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
+ 	struct io_ring_ctx *ctx = req->ctx;
+ 	int ret;
+ 
+-	ret = io_poll_check_events(req);
++	ret = io_poll_check_events(req, *locked);
+ 	if (ret > 0)
+ 		return;
+ 
+@@ -5595,7 +5569,7 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
+ 	struct io_ring_ctx *ctx = req->ctx;
+ 	int ret;
+ 
+-	ret = io_poll_check_events(req);
++	ret = io_poll_check_events(req, *locked);
+ 	if (ret > 0)
+ 		return;
+ 
+@@ -6281,6 +6255,7 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ 	if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0)
+ 		return -EINVAL;
+ 
++	INIT_LIST_HEAD(&req->timeout.list);
+ 	data->mode = io_translate_timeout_mode(flags);
+ 	hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
+ 
+@@ -6507,11 +6482,10 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 	case IORING_OP_READV:
+ 	case IORING_OP_READ_FIXED:
+ 	case IORING_OP_READ:
+-		return io_read_prep(req, sqe);
+ 	case IORING_OP_WRITEV:
+ 	case IORING_OP_WRITE_FIXED:
+ 	case IORING_OP_WRITE:
+-		return io_write_prep(req, sqe);
++		return io_prep_rw(req, sqe);
+ 	case IORING_OP_POLL_ADD:
+ 		return io_poll_add_prep(req, sqe);
+ 	case IORING_OP_POLL_REMOVE:
+@@ -6689,11 +6663,6 @@ static void io_clean_op(struct io_kiocb *req)
+ 			kfree(io->free_iov);
+ 			break;
+ 			}
+-		case IORING_OP_SPLICE:
+-		case IORING_OP_TEE:
+-			if (!(req->splice.flags & SPLICE_F_FD_IN_FIXED))
+-				io_put_file(req->splice.file_in);
+-			break;
+ 		case IORING_OP_OPENAT:
+ 		case IORING_OP_OPENAT2:
+ 			if (req->open.filename)
+@@ -6724,11 +6693,6 @@ static void io_clean_op(struct io_kiocb *req)
+ 		kfree(req->apoll);
+ 		req->apoll = NULL;
+ 	}
+-	if (req->flags & REQ_F_INFLIGHT) {
+-		struct io_uring_task *tctx = req->task->io_uring;
+-
+-		atomic_dec(&tctx->inflight_tracked);
+-	}
+ 	if (req->flags & REQ_F_CREDS)
+ 		put_cred(req->creds);
+ 	if (req->flags & REQ_F_ASYNC_DATA) {
+@@ -6738,6 +6702,23 @@ static void io_clean_op(struct io_kiocb *req)
+ 	req->flags &= ~IO_REQ_CLEAN_FLAGS;
+ }
+ 
++static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags)
++{
++	if (req->file || !io_op_defs[req->opcode].needs_file)
++		return true;
++
++	if (req->flags & REQ_F_FIXED_FILE)
++		req->file = io_file_get_fixed(req, req->work.fd, issue_flags);
++	else
++		req->file = io_file_get_normal(req, req->work.fd);
++	if (req->file)
++		return true;
++
++	req_set_fail(req);
++	req->result = -EBADF;
++	return false;
++}
++
+ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
+ {
+ 	const struct cred *creds = NULL;
+@@ -6748,6 +6729,8 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
+ 
+ 	if (!io_op_defs[req->opcode].audit_skip)
+ 		audit_uring_entry(req->opcode);
++	if (unlikely(!io_assign_file(req, issue_flags)))
++		return -EBADF;
+ 
+ 	switch (req->opcode) {
+ 	case IORING_OP_NOP:
+@@ -6889,10 +6872,11 @@ static struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
+ static void io_wq_submit_work(struct io_wq_work *work)
+ {
+ 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
++	const struct io_op_def *def = &io_op_defs[req->opcode];
+ 	unsigned int issue_flags = IO_URING_F_UNLOCKED;
+ 	bool needs_poll = false;
+ 	struct io_kiocb *timeout;
+-	int ret = 0;
++	int ret = 0, err = -ECANCELED;
+ 
+ 	/* one will be dropped by ->io_free_work() after returning to io-wq */
+ 	if (!(req->flags & REQ_F_REFCOUNT))
+@@ -6904,14 +6888,18 @@ static void io_wq_submit_work(struct io_wq_work *work)
+ 	if (timeout)
+ 		io_queue_linked_timeout(timeout);
+ 
++	if (!io_assign_file(req, issue_flags)) {
++		err = -EBADF;
++		work->flags |= IO_WQ_WORK_CANCEL;
++	}
++
+ 	/* either cancelled or io-wq is dying, so don't touch tctx->iowq */
+ 	if (work->flags & IO_WQ_WORK_CANCEL) {
+-		io_req_task_queue_fail(req, -ECANCELED);
++		io_req_task_queue_fail(req, err);
+ 		return;
+ 	}
+ 
+ 	if (req->flags & REQ_F_FORCE_ASYNC) {
+-		const struct io_op_def *def = &io_op_defs[req->opcode];
+ 		bool opcode_poll = def->pollin || def->pollout;
+ 
+ 		if (opcode_poll && file_can_poll(req->file)) {
+@@ -6968,46 +6956,56 @@ static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file
+ 	file_slot->file_ptr = file_ptr;
+ }
+ 
+-static inline struct file *io_file_get_fixed(struct io_ring_ctx *ctx,
+-					     struct io_kiocb *req, int fd)
++static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
++					     unsigned int issue_flags)
+ {
+-	struct file *file;
++	struct io_ring_ctx *ctx = req->ctx;
++	struct file *file = NULL;
+ 	unsigned long file_ptr;
+ 
++	if (issue_flags & IO_URING_F_UNLOCKED)
++		mutex_lock(&ctx->uring_lock);
++
+ 	if (unlikely((unsigned int)fd >= ctx->nr_user_files))
+-		return NULL;
++		goto out;
+ 	fd = array_index_nospec(fd, ctx->nr_user_files);
+ 	file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
+ 	file = (struct file *) (file_ptr & FFS_MASK);
+ 	file_ptr &= ~FFS_MASK;
+ 	/* mask in overlapping REQ_F and FFS bits */
+ 	req->flags |= (file_ptr << REQ_F_SUPPORT_NOWAIT_BIT);
+-	io_req_set_rsrc_node(req, ctx);
++	io_req_set_rsrc_node(req, ctx, 0);
++out:
++	if (issue_flags & IO_URING_F_UNLOCKED)
++		mutex_unlock(&ctx->uring_lock);
+ 	return file;
+ }
+ 
+-static struct file *io_file_get_normal(struct io_ring_ctx *ctx,
+-				       struct io_kiocb *req, int fd)
++/*
++ * Drop the file for requeue operations. Only used of req->file is the
++ * io_uring descriptor itself.
++ */
++static void io_drop_inflight_file(struct io_kiocb *req)
++{
++	if (unlikely(req->flags & REQ_F_INFLIGHT)) {
++		fput(req->file);
++		req->file = NULL;
++		req->flags &= ~REQ_F_INFLIGHT;
++	}
++}
++
++static struct file *io_file_get_normal(struct io_kiocb *req, int fd)
+ {
+ 	struct file *file = fget(fd);
+ 
+-	trace_io_uring_file_get(ctx, fd);
++	trace_io_uring_file_get(req->ctx, fd);
+ 
+ 	/* we don't allow fixed io_uring files */
+-	if (file && unlikely(file->f_op == &io_uring_fops))
+-		io_req_track_inflight(req);
++	if (file && file->f_op == &io_uring_fops)
++		req->flags |= REQ_F_INFLIGHT;
+ 	return file;
+ }
+ 
+-static inline struct file *io_file_get(struct io_ring_ctx *ctx,
+-				       struct io_kiocb *req, int fd, bool fixed)
+-{
+-	if (fixed)
+-		return io_file_get_fixed(ctx, req, fd);
+-	else
+-		return io_file_get_normal(ctx, req, fd);
+-}
+-
+ static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
+ {
+ 	struct io_kiocb *prev = req->timeout.prev;
+@@ -7245,6 +7243,8 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ 	if (io_op_defs[opcode].needs_file) {
+ 		struct io_submit_state *state = &ctx->submit_state;
+ 
++		req->work.fd = READ_ONCE(sqe->fd);
++
+ 		/*
+ 		 * Plug now if we have more than 2 IO left after this, and the
+ 		 * target is potentially a read/write to block based storage.
+@@ -7254,11 +7254,6 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ 			state->need_plug = false;
+ 			blk_start_plug_nr_ios(&state->plug, state->submit_nr);
+ 		}
+-
+-		req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd),
+-					(sqe_flags & IOSQE_FIXED_FILE));
+-		if (unlikely(!req->file))
+-			return -EBADF;
+ 	}
+ 
+ 	personality = READ_ONCE(sqe->personality);
+@@ -8237,8 +8232,12 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
+ 		refcount_add(skb->truesize, &sk->sk_wmem_alloc);
+ 		skb_queue_head(&sk->sk_receive_queue, skb);
+ 
+-		for (i = 0; i < nr_files; i++)
+-			fput(fpl->fp[i]);
++		for (i = 0; i < nr; i++) {
++			struct file *file = io_file_from_index(ctx, i + offset);
++
++			if (file)
++				fput(file);
++		}
+ 	} else {
+ 		kfree_skb(skb);
+ 		free_uid(fpl->user);
+@@ -8700,7 +8699,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
+ 				err = -EBADF;
+ 				break;
+ 			}
+-			*io_get_tag_slot(data, up->offset + done) = tag;
++			*io_get_tag_slot(data, i) = tag;
+ 			io_fixed_file_set(file_slot, file);
+ 			err = io_sqe_file_register(ctx, file, i);
+ 			if (err) {
+@@ -8775,7 +8774,6 @@ static __cold int io_uring_alloc_task_context(struct task_struct *task,
+ 	xa_init(&tctx->xa);
+ 	init_waitqueue_head(&tctx->wait);
+ 	atomic_set(&tctx->in_idle, 0);
+-	atomic_set(&tctx->inflight_tracked, 0);
+ 	task->io_uring = tctx;
+ 	spin_lock_init(&tctx->task_lock);
+ 	INIT_WQ_LIST(&tctx->task_list);
+@@ -9913,7 +9911,7 @@ static __cold void io_uring_clean_tctx(struct io_uring_task *tctx)
+ static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
+ {
+ 	if (tracked)
+-		return atomic_read(&tctx->inflight_tracked);
++		return 0;
+ 	return percpu_counter_sum(&tctx->inflight);
+ }
+ 
+@@ -10866,7 +10864,15 @@ static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
+ 	if (len > cpumask_size())
+ 		len = cpumask_size();
+ 
+-	if (copy_from_user(new_mask, arg, len)) {
++	if (in_compat_syscall()) {
++		ret = compat_get_bitmap(cpumask_bits(new_mask),
++					(const compat_ulong_t __user *)arg,
++					len * 8 /* CHAR_BIT */);
++	} else {
++		ret = copy_from_user(new_mask, arg, len);
++	}
++
++	if (ret) {
+ 		free_cpumask_var(new_mask);
+ 		return -EFAULT;
+ 	}
+diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
+index 57ab424c05ff0..072821b50ab91 100644
+--- a/fs/jfs/inode.c
++++ b/fs/jfs/inode.c
+@@ -146,12 +146,13 @@ void jfs_evict_inode(struct inode *inode)
+ 		dquot_initialize(inode);
+ 
+ 		if (JFS_IP(inode)->fileset == FILESYSTEM_I) {
++			struct inode *ipimap = JFS_SBI(inode->i_sb)->ipimap;
+ 			truncate_inode_pages_final(&inode->i_data);
+ 
+ 			if (test_cflag(COMMIT_Freewmap, inode))
+ 				jfs_free_zero_link(inode);
+ 
+-			if (JFS_SBI(inode->i_sb)->ipimap)
++			if (ipimap && JFS_IP(ipimap)->i_imap)
+ 				diFree(inode);
+ 
+ 			/*
+diff --git a/fs/minix/inode.c b/fs/minix/inode.c
+index a71f1cf894b9f..d4bd94234ef73 100644
+--- a/fs/minix/inode.c
++++ b/fs/minix/inode.c
+@@ -447,7 +447,8 @@ static const struct address_space_operations minix_aops = {
+ 	.writepage = minix_writepage,
+ 	.write_begin = minix_write_begin,
+ 	.write_end = generic_write_end,
+-	.bmap = minix_bmap
++	.bmap = minix_bmap,
++	.direct_IO = noop_direct_IO
+ };
+ 
+ static const struct inode_operations minix_symlink_inode_operations = {
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index 75cb1cbe4cdea..911bdb35eb085 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1853,16 +1853,6 @@ const struct dentry_operations nfs4_dentry_operations = {
+ };
+ EXPORT_SYMBOL_GPL(nfs4_dentry_operations);
+ 
+-static fmode_t flags_to_mode(int flags)
+-{
+-	fmode_t res = (__force fmode_t)flags & FMODE_EXEC;
+-	if ((flags & O_ACCMODE) != O_WRONLY)
+-		res |= FMODE_READ;
+-	if ((flags & O_ACCMODE) != O_RDONLY)
+-		res |= FMODE_WRITE;
+-	return res;
+-}
+-
+ static struct nfs_open_context *create_nfs_open_context(struct dentry *dentry, int open_flags, struct file *filp)
+ {
+ 	return alloc_nfs_open_context(dentry, flags_to_mode(open_flags), filp);
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index eabfdab543c8c..11c566d8769f6 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -173,8 +173,8 @@ ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
+ 	VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
+ 
+ 	if (iov_iter_rw(iter) == READ)
+-		return nfs_file_direct_read(iocb, iter);
+-	return nfs_file_direct_write(iocb, iter);
++		return nfs_file_direct_read(iocb, iter, true);
++	return nfs_file_direct_write(iocb, iter, true);
+ }
+ 
+ static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
+@@ -425,6 +425,7 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
+  * nfs_file_direct_read - file direct read operation for NFS files
+  * @iocb: target I/O control block
+  * @iter: vector of user buffers into which to read data
++ * @swap: flag indicating this is swap IO, not O_DIRECT IO
+  *
+  * We use this function for direct reads instead of calling
+  * generic_file_aio_read() in order to avoid gfar's check to see if
+@@ -440,7 +441,8 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
+  * client must read the updated atime from the server back into its
+  * cache.
+  */
+-ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
++ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
++			     bool swap)
+ {
+ 	struct file *file = iocb->ki_filp;
+ 	struct address_space *mapping = file->f_mapping;
+@@ -482,12 +484,14 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
+ 	if (iter_is_iovec(iter))
+ 		dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
+ 
+-	nfs_start_io_direct(inode);
++	if (!swap)
++		nfs_start_io_direct(inode);
+ 
+ 	NFS_I(inode)->read_io += count;
+ 	requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
+ 
+-	nfs_end_io_direct(inode);
++	if (!swap)
++		nfs_end_io_direct(inode);
+ 
+ 	if (requested > 0) {
+ 		result = nfs_direct_wait(dreq);
+@@ -790,7 +794,7 @@ static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
+  */
+ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
+ 					       struct iov_iter *iter,
+-					       loff_t pos)
++					       loff_t pos, int ioflags)
+ {
+ 	struct nfs_pageio_descriptor desc;
+ 	struct inode *inode = dreq->inode;
+@@ -798,7 +802,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
+ 	size_t requested_bytes = 0;
+ 	size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
+ 
+-	nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE, false,
++	nfs_pageio_init_write(&desc, inode, ioflags, false,
+ 			      &nfs_direct_write_completion_ops);
+ 	desc.pg_dreq = dreq;
+ 	get_dreq(dreq);
+@@ -876,6 +880,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
+  * nfs_file_direct_write - file direct write operation for NFS files
+  * @iocb: target I/O control block
+  * @iter: vector of user buffers from which to write data
++ * @swap: flag indicating this is swap IO, not O_DIRECT IO
+  *
+  * We use this function for direct writes instead of calling
+  * generic_file_aio_write() in order to avoid taking the inode
+@@ -892,7 +897,8 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
+  * Note that O_APPEND is not supported for NFS direct writes, as there
+  * is no atomic O_APPEND write facility in the NFS protocol.
+  */
+-ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
++ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
++			      bool swap)
+ {
+ 	ssize_t result, requested;
+ 	size_t count;
+@@ -906,7 +912,11 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
+ 	dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
+ 		file, iov_iter_count(iter), (long long) iocb->ki_pos);
+ 
+-	result = generic_write_checks(iocb, iter);
++	if (swap)
++		/* bypass generic checks */
++		result =  iov_iter_count(iter);
++	else
++		result = generic_write_checks(iocb, iter);
+ 	if (result <= 0)
+ 		return result;
+ 	count = result;
+@@ -937,16 +947,22 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
+ 		dreq->iocb = iocb;
+ 	pnfs_init_ds_commit_info_ops(&dreq->ds_cinfo, inode);
+ 
+-	nfs_start_io_direct(inode);
++	if (swap) {
++		requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
++							    FLUSH_STABLE);
++	} else {
++		nfs_start_io_direct(inode);
+ 
+-	requested = nfs_direct_write_schedule_iovec(dreq, iter, pos);
++		requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
++							    FLUSH_COND_STABLE);
+ 
+-	if (mapping->nrpages) {
+-		invalidate_inode_pages2_range(mapping,
+-					      pos >> PAGE_SHIFT, end);
+-	}
++		if (mapping->nrpages) {
++			invalidate_inode_pages2_range(mapping,
++						      pos >> PAGE_SHIFT, end);
++		}
+ 
+-	nfs_end_io_direct(inode);
++		nfs_end_io_direct(inode);
++	}
+ 
+ 	if (requested > 0) {
+ 		result = nfs_direct_wait(dreq);
+diff --git a/fs/nfs/file.c b/fs/nfs/file.c
+index 76d76acbc5943..d8583f57ff99f 100644
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -162,7 +162,7 @@ nfs_file_read(struct kiocb *iocb, struct iov_iter *to)
+ 	ssize_t result;
+ 
+ 	if (iocb->ki_flags & IOCB_DIRECT)
+-		return nfs_file_direct_read(iocb, to);
++		return nfs_file_direct_read(iocb, to, false);
+ 
+ 	dprintk("NFS: read(%pD2, %zu@%lu)\n",
+ 		iocb->ki_filp,
+@@ -619,7 +619,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
+ 		return result;
+ 
+ 	if (iocb->ki_flags & IOCB_DIRECT)
+-		return nfs_file_direct_write(iocb, from);
++		return nfs_file_direct_write(iocb, from, false);
+ 
+ 	dprintk("NFS: write(%pD2, %zu@%Ld)\n",
+ 		file, iov_iter_count(from), (long long) iocb->ki_pos);
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index d96baa4450e39..e4fb939a2904b 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -1180,7 +1180,6 @@ int nfs_open(struct inode *inode, struct file *filp)
+ 	nfs_fscache_open_file(inode, filp);
+ 	return 0;
+ }
+-EXPORT_SYMBOL_GPL(nfs_open);
+ 
+ /*
+  * This function is called whenever some part of NFS notices that
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index 2de7c56a1fbed..465e39ff018d4 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -42,6 +42,16 @@ static inline bool nfs_lookup_is_soft_revalidate(const struct dentry *dentry)
+ 	return true;
+ }
+ 
++static inline fmode_t flags_to_mode(int flags)
++{
++	fmode_t res = (__force fmode_t)flags & FMODE_EXEC;
++	if ((flags & O_ACCMODE) != O_WRONLY)
++		res |= FMODE_READ;
++	if ((flags & O_ACCMODE) != O_RDONLY)
++		res |= FMODE_WRITE;
++	return res;
++}
++
+ /*
+  * Note: RFC 1813 doesn't limit the number of auth flavors that
+  * a server can return, so make something up.
+@@ -573,6 +583,13 @@ nfs_write_match_verf(const struct nfs_writeverf *verf,
+ 		!nfs_write_verifier_cmp(&req->wb_verf, &verf->verifier);
+ }
+ 
++static inline gfp_t nfs_io_gfp_mask(void)
++{
++	if (current->flags & PF_WQ_WORKER)
++		return GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
++	return GFP_KERNEL;
++}
++
+ /* unlink.c */
+ extern struct rpc_task *
+ nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
+diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
+index 32129446beca6..ca878d021faba 100644
+--- a/fs/nfs/nfs42proc.c
++++ b/fs/nfs/nfs42proc.c
+@@ -591,8 +591,10 @@ static int _nfs42_proc_copy_notify(struct file *src, struct file *dst,
+ 
+ 	ctx = get_nfs_open_context(nfs_file_open_context(src));
+ 	l_ctx = nfs_get_lock_context(ctx);
+-	if (IS_ERR(l_ctx))
+-		return PTR_ERR(l_ctx);
++	if (IS_ERR(l_ctx)) {
++		status = PTR_ERR(l_ctx);
++		goto out;
++	}
+ 
+ 	status = nfs4_set_rw_stateid(&args->cna_src_stateid, ctx, l_ctx,
+ 				     FMODE_READ);
+@@ -600,7 +602,7 @@ static int _nfs42_proc_copy_notify(struct file *src, struct file *dst,
+ 	if (status) {
+ 		if (status == -EAGAIN)
+ 			status = -NFS4ERR_BAD_STATEID;
+-		return status;
++		goto out;
+ 	}
+ 
+ 	status = nfs4_call_sync(src_server->client, src_server, &msg,
+@@ -609,6 +611,7 @@ static int _nfs42_proc_copy_notify(struct file *src, struct file *dst,
+ 	if (status == -ENOTSUPP)
+ 		src_server->caps &= ~NFS_CAP_COPY_NOTIFY;
+ 
++out:
+ 	put_nfs_open_context(nfs_file_open_context(src));
+ 	return status;
+ }
+diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
+index e79ae4cbc395e..e34af48fb4f41 100644
+--- a/fs/nfs/nfs4file.c
++++ b/fs/nfs/nfs4file.c
+@@ -32,6 +32,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
+ 	struct dentry *parent = NULL;
+ 	struct inode *dir;
+ 	unsigned openflags = filp->f_flags;
++	fmode_t f_mode;
+ 	struct iattr attr;
+ 	int err;
+ 
+@@ -50,8 +51,9 @@ nfs4_file_open(struct inode *inode, struct file *filp)
+ 	if (err)
+ 		return err;
+ 
++	f_mode = filp->f_mode;
+ 	if ((openflags & O_ACCMODE) == 3)
+-		return nfs_open(inode, filp);
++		f_mode |= flags_to_mode(openflags);
+ 
+ 	/* We can't create new files here */
+ 	openflags &= ~(O_CREAT|O_EXCL);
+@@ -59,7 +61,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
+ 	parent = dget_parent(dentry);
+ 	dir = d_inode(parent);
+ 
+-	ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode, filp);
++	ctx = alloc_nfs_open_context(file_dentry(filp), f_mode, filp);
+ 	err = PTR_ERR(ctx);
+ 	if (IS_ERR(ctx))
+ 		goto out;
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index f5a62c0d999b4..0f4818627ef0c 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -49,6 +49,7 @@
+ #include <linux/workqueue.h>
+ #include <linux/bitops.h>
+ #include <linux/jiffies.h>
++#include <linux/sched/mm.h>
+ 
+ #include <linux/sunrpc/clnt.h>
+ 
+@@ -2560,9 +2561,17 @@ static void nfs4_layoutreturn_any_run(struct nfs_client *clp)
+ 
+ static void nfs4_state_manager(struct nfs_client *clp)
+ {
++	unsigned int memflags;
+ 	int status = 0;
+ 	const char *section = "", *section_sep = "";
+ 
++	/*
++	 * State recovery can deadlock if the direct reclaim code tries
++	 * start NFS writeback. So ensure memory allocations are all
++	 * GFP_NOFS.
++	 */
++	memflags = memalloc_nofs_save();
++
+ 	/* Ensure exclusive access to NFSv4 state */
+ 	do {
+ 		trace_nfs4_state_mgr(clp);
+@@ -2657,6 +2666,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
+ 			clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
+ 		}
+ 
++		memalloc_nofs_restore(memflags);
+ 		nfs4_end_drain_session(clp);
+ 		nfs4_clear_state_manager_bit(clp);
+ 
+@@ -2674,6 +2684,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
+ 			return;
+ 		if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
+ 			return;
++		memflags = memalloc_nofs_save();
+ 	} while (refcount_read(&clp->cl_count) > 1 && !signalled());
+ 	goto out_drain;
+ 
+@@ -2686,6 +2697,7 @@ out_error:
+ 			clp->cl_hostname, -status);
+ 	ssleep(1);
+ out_drain:
++	memalloc_nofs_restore(memflags);
+ 	nfs4_end_drain_session(clp);
+ 	nfs4_clear_state_manager_bit(clp);
+ }
+diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
+index 815d630802451..9157dd19b8b4f 100644
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -90,10 +90,10 @@ void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
+ 	}
+ }
+ 
+-static inline struct nfs_page *
+-nfs_page_alloc(void)
++static inline struct nfs_page *nfs_page_alloc(void)
+ {
+-	struct nfs_page	*p = kmem_cache_zalloc(nfs_page_cachep, GFP_KERNEL);
++	struct nfs_page *p =
++		kmem_cache_zalloc(nfs_page_cachep, nfs_io_gfp_mask());
+ 	if (p)
+ 		INIT_LIST_HEAD(&p->wb_list);
+ 	return p;
+@@ -892,7 +892,7 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
+ 	struct nfs_commit_info cinfo;
+ 	struct nfs_page_array *pg_array = &hdr->page_array;
+ 	unsigned int pagecount, pageused;
+-	gfp_t gfp_flags = GFP_KERNEL;
++	gfp_t gfp_flags = nfs_io_gfp_mask();
+ 
+ 	pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count);
+ 	pg_array->npages = pagecount;
+@@ -979,7 +979,7 @@ nfs_pageio_alloc_mirrors(struct nfs_pageio_descriptor *desc,
+ 	desc->pg_mirrors_dynamic = NULL;
+ 	if (mirror_count == 1)
+ 		return desc->pg_mirrors_static;
+-	ret = kmalloc_array(mirror_count, sizeof(*ret), GFP_KERNEL);
++	ret = kmalloc_array(mirror_count, sizeof(*ret), nfs_io_gfp_mask());
+ 	if (ret != NULL) {
+ 		for (i = 0; i < mirror_count; i++)
+ 			nfs_pageio_mirror_init(&ret[i], desc->pg_bsize);
+diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
+index 316f68f96e573..657c242a18ff1 100644
+--- a/fs/nfs/pnfs_nfs.c
++++ b/fs/nfs/pnfs_nfs.c
+@@ -419,7 +419,7 @@ static struct nfs_commit_data *
+ pnfs_bucket_fetch_commitdata(struct pnfs_commit_bucket *bucket,
+ 			     struct nfs_commit_info *cinfo)
+ {
+-	struct nfs_commit_data *data = nfs_commitdata_alloc(false);
++	struct nfs_commit_data *data = nfs_commitdata_alloc();
+ 
+ 	if (!data)
+ 		return NULL;
+@@ -515,7 +515,11 @@ pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
+ 	unsigned int nreq = 0;
+ 
+ 	if (!list_empty(mds_pages)) {
+-		data = nfs_commitdata_alloc(true);
++		data = nfs_commitdata_alloc();
++		if (!data) {
++			nfs_retry_commit(mds_pages, NULL, cinfo, -1);
++			return -ENOMEM;
++		}
+ 		data->ds_commit_index = -1;
+ 		list_splice_init(mds_pages, &data->pages);
+ 		list_add_tail(&data->list, &list);
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 60693ab6a0325..9388503030992 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -70,27 +70,17 @@ static mempool_t *nfs_wdata_mempool;
+ static struct kmem_cache *nfs_cdata_cachep;
+ static mempool_t *nfs_commit_mempool;
+ 
+-struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail)
++struct nfs_commit_data *nfs_commitdata_alloc(void)
+ {
+ 	struct nfs_commit_data *p;
+ 
+-	if (never_fail)
+-		p = mempool_alloc(nfs_commit_mempool, GFP_NOIO);
+-	else {
+-		/* It is OK to do some reclaim, not no safe to wait
+-		 * for anything to be returned to the pool.
+-		 * mempool_alloc() cannot handle that particular combination,
+-		 * so we need two separate attempts.
+-		 */
++	p = kmem_cache_zalloc(nfs_cdata_cachep, nfs_io_gfp_mask());
++	if (!p) {
+ 		p = mempool_alloc(nfs_commit_mempool, GFP_NOWAIT);
+-		if (!p)
+-			p = kmem_cache_alloc(nfs_cdata_cachep, GFP_NOIO |
+-					     __GFP_NOWARN | __GFP_NORETRY);
+ 		if (!p)
+ 			return NULL;
++		memset(p, 0, sizeof(*p));
+ 	}
+-
+-	memset(p, 0, sizeof(*p));
+ 	INIT_LIST_HEAD(&p->pages);
+ 	return p;
+ }
+@@ -104,9 +94,15 @@ EXPORT_SYMBOL_GPL(nfs_commit_free);
+ 
+ static struct nfs_pgio_header *nfs_writehdr_alloc(void)
+ {
+-	struct nfs_pgio_header *p = mempool_alloc(nfs_wdata_mempool, GFP_KERNEL);
++	struct nfs_pgio_header *p;
+ 
+-	memset(p, 0, sizeof(*p));
++	p = kmem_cache_zalloc(nfs_wdata_cachep, nfs_io_gfp_mask());
++	if (!p) {
++		p = mempool_alloc(nfs_wdata_mempool, GFP_NOWAIT);
++		if (!p)
++			return NULL;
++		memset(p, 0, sizeof(*p));
++	}
+ 	p->rw_mode = FMODE_WRITE;
+ 	return p;
+ }
+@@ -1826,7 +1822,11 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how,
+ 	if (list_empty(head))
+ 		return 0;
+ 
+-	data = nfs_commitdata_alloc(true);
++	data = nfs_commitdata_alloc();
++	if (!data) {
++		nfs_retry_commit(head, NULL, cinfo, -1);
++		return -ENOMEM;
++	}
+ 
+ 	/* Set up the argument struct */
+ 	nfs_init_commit(data, head, NULL, cinfo);
+diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
+index b0728c8ad90ce..f8996b46f430e 100644
+--- a/include/linux/gpio/driver.h
++++ b/include/linux/gpio/driver.h
+@@ -218,6 +218,15 @@ struct gpio_irq_chip {
+ 	 */
+ 	bool per_parent_data;
+ 
++	/**
++	 * @initialized:
++	 *
++	 * Flag to track GPIO chip irq member's initialization.
++	 * This flag will make sure GPIO chip irq members are not used
++	 * before they are initialized.
++	 */
++	bool initialized;
++
+ 	/**
+ 	 * @init_hw: optional routine to initialize hardware before
+ 	 * an IRQ chip will be added. This is quite useful when
+diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
+index a59d25f193857..b8641dc0ee661 100644
+--- a/include/linux/ipv6.h
++++ b/include/linux/ipv6.h
+@@ -51,7 +51,7 @@ struct ipv6_devconf {
+ 	__s32		use_optimistic;
+ #endif
+ #ifdef CONFIG_IPV6_MROUTE
+-	__s32		mc_forwarding;
++	atomic_t	mc_forwarding;
+ #endif
+ 	__s32		disable_ipv6;
+ 	__s32		drop_unicast_in_l2_multicast;
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index aed44e9b5d899..c7a0d500b396f 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -1389,13 +1389,16 @@ static inline unsigned long *section_to_usemap(struct mem_section *ms)
+ 
+ static inline struct mem_section *__nr_to_section(unsigned long nr)
+ {
++	unsigned long root = SECTION_NR_TO_ROOT(nr);
++
++	if (unlikely(root >= NR_SECTION_ROOTS))
++		return NULL;
++
+ #ifdef CONFIG_SPARSEMEM_EXTREME
+-	if (!mem_section)
++	if (!mem_section || !mem_section[root])
+ 		return NULL;
+ #endif
+-	if (!mem_section[SECTION_NR_TO_ROOT(nr)])
+-		return NULL;
+-	return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
++	return &mem_section[root][nr & SECTION_ROOT_MASK];
+ }
+ extern size_t mem_section_usage_size(void);
+ 
+diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
+index 68f81d8d36def..c9d3dc79d5876 100644
+--- a/include/linux/nfs_fs.h
++++ b/include/linux/nfs_fs.h
+@@ -513,10 +513,10 @@ static inline const struct cred *nfs_file_cred(struct file *file)
+  * linux/fs/nfs/direct.c
+  */
+ extern ssize_t nfs_direct_IO(struct kiocb *, struct iov_iter *);
+-extern ssize_t nfs_file_direct_read(struct kiocb *iocb,
+-			struct iov_iter *iter);
+-extern ssize_t nfs_file_direct_write(struct kiocb *iocb,
+-			struct iov_iter *iter);
++ssize_t nfs_file_direct_read(struct kiocb *iocb,
++			     struct iov_iter *iter, bool swap);
++ssize_t nfs_file_direct_write(struct kiocb *iocb,
++			      struct iov_iter *iter, bool swap);
+ 
+ /*
+  * linux/fs/nfs/dir.c
+@@ -585,7 +585,7 @@ extern int nfs_wb_all(struct inode *inode);
+ extern int nfs_wb_page(struct inode *inode, struct page *page);
+ extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
+ extern int  nfs_commit_inode(struct inode *, int);
+-extern struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail);
++extern struct nfs_commit_data *nfs_commitdata_alloc(void);
+ extern void nfs_commit_free(struct nfs_commit_data *data);
+ bool nfs_commit_end(struct nfs_mds_commit_info *cinfo);
+ 
+diff --git a/include/linux/ref_tracker.h b/include/linux/ref_tracker.h
+index 60f3453be23e6..a443abda937d8 100644
+--- a/include/linux/ref_tracker.h
++++ b/include/linux/ref_tracker.h
+@@ -13,6 +13,7 @@ struct ref_tracker_dir {
+ 	spinlock_t		lock;
+ 	unsigned int		quarantine_avail;
+ 	refcount_t		untracked;
++	bool			dead;
+ 	struct list_head	list; /* List of active trackers */
+ 	struct list_head	quarantine; /* List of dead trackers */
+ #endif
+@@ -26,6 +27,7 @@ static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir,
+ 	INIT_LIST_HEAD(&dir->quarantine);
+ 	spin_lock_init(&dir->lock);
+ 	dir->quarantine_avail = quarantine_count;
++	dir->dead = false;
+ 	refcount_set(&dir->untracked, 1);
+ 	stack_depot_init();
+ }
+diff --git a/include/linux/static_call.h b/include/linux/static_call.h
+index 3e56a9751c062..fcc5b48989b3c 100644
+--- a/include/linux/static_call.h
++++ b/include/linux/static_call.h
+@@ -248,10 +248,7 @@ static inline int static_call_text_reserved(void *start, void *end)
+ 	return 0;
+ }
+ 
+-static inline long __static_call_return0(void)
+-{
+-	return 0;
+-}
++extern long __static_call_return0(void);
+ 
+ #define EXPORT_STATIC_CALL(name)					\
+ 	EXPORT_SYMBOL(STATIC_CALL_KEY(name));				\
+diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h
+index ef9a44b6cf5d5..ae6f4838ab755 100644
+--- a/include/linux/vfio_pci_core.h
++++ b/include/linux/vfio_pci_core.h
+@@ -159,8 +159,17 @@ extern ssize_t vfio_pci_config_rw(struct vfio_pci_core_device *vdev,
+ extern ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf,
+ 			       size_t count, loff_t *ppos, bool iswrite);
+ 
++#ifdef CONFIG_VFIO_PCI_VGA
+ extern ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf,
+ 			       size_t count, loff_t *ppos, bool iswrite);
++#else
++static inline ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev,
++				      char __user *buf, size_t count,
++				      loff_t *ppos, bool iswrite)
++{
++	return -EINVAL;
++}
++#endif
+ 
+ extern long vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset,
+ 			       uint64_t data, int count, int fd);
+diff --git a/include/net/arp.h b/include/net/arp.h
+index 031374ac2f222..d7ef4ec71dfeb 100644
+--- a/include/net/arp.h
++++ b/include/net/arp.h
+@@ -65,6 +65,7 @@ void arp_send(int type, int ptype, __be32 dest_ip,
+ 	      const unsigned char *src_hw, const unsigned char *th);
+ int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir);
+ void arp_ifdown(struct net_device *dev);
++int arp_invalidate(struct net_device *dev, __be32 ip, bool force);
+ 
+ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
+ 			   struct net_device *dev, __be32 src_ip,
+diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
+index a647e5fabdbd6..2aa5e95808a5a 100644
+--- a/include/net/bluetooth/bluetooth.h
++++ b/include/net/bluetooth/bluetooth.h
+@@ -204,19 +204,21 @@ void bt_err_ratelimited(const char *fmt, ...);
+ #define BT_DBG(fmt, ...)	pr_debug(fmt "\n", ##__VA_ARGS__)
+ #endif
+ 
++#define bt_dev_name(hdev) ((hdev) ? (hdev)->name : "null")
++
+ #define bt_dev_info(hdev, fmt, ...)				\
+-	BT_INFO("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
++	BT_INFO("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__)
+ #define bt_dev_warn(hdev, fmt, ...)				\
+-	BT_WARN("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
++	BT_WARN("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__)
+ #define bt_dev_err(hdev, fmt, ...)				\
+-	BT_ERR("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
++	BT_ERR("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__)
+ #define bt_dev_dbg(hdev, fmt, ...)				\
+-	BT_DBG("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
++	BT_DBG("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__)
+ 
+ #define bt_dev_warn_ratelimited(hdev, fmt, ...)			\
+-	bt_warn_ratelimited("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
++	bt_warn_ratelimited("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__)
+ #define bt_dev_err_ratelimited(hdev, fmt, ...)			\
+-	bt_err_ratelimited("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
++	bt_err_ratelimited("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__)
+ 
+ /* Connection and socket states */
+ enum {
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index e336e9c1dda4f..36d727f94ac29 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -294,6 +294,9 @@ struct adv_monitor {
+ 
+ #define HCI_MAX_SHORT_NAME_LENGTH	10
+ 
++#define HCI_CONN_HANDLE_UNSET		0xffff
++#define HCI_CONN_HANDLE_MAX		0x0eff
++
+ /* Min encryption key size to match with SMP */
+ #define HCI_MIN_ENC_KEY_SIZE		7
+ 
+diff --git a/include/net/mctp.h b/include/net/mctp.h
+index 7e35ec79b909c..204ae3aebc0da 100644
+--- a/include/net/mctp.h
++++ b/include/net/mctp.h
+@@ -36,8 +36,6 @@ struct mctp_hdr {
+ #define MCTP_HDR_TAG_SHIFT	0
+ #define MCTP_HDR_TAG_MASK	GENMASK(2, 0)
+ 
+-#define MCTP_HEADER_MAXLEN	4
+-
+ #define MCTP_INITIAL_DEFAULT_NET	1
+ 
+ static inline bool mctp_address_ok(mctp_eid_t eid)
+diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
+index 5b61c462e534b..374cc7b260fcd 100644
+--- a/include/net/net_namespace.h
++++ b/include/net/net_namespace.h
+@@ -513,4 +513,10 @@ static inline void fnhe_genid_bump(struct net *net)
+ 	atomic_inc(&net->fnhe_genid);
+ }
+ 
++#ifdef CONFIG_NET
++void net_ns_init(void);
++#else
++static inline void net_ns_init(void) {}
++#endif
++
+ #endif /* __NET_NET_NAMESPACE_H */
+diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
+index 29982d60b68ab..5be3faf88c1a1 100644
+--- a/include/trace/events/sunrpc.h
++++ b/include/trace/events/sunrpc.h
+@@ -1005,7 +1005,6 @@ DEFINE_RPC_XPRT_LIFETIME_EVENT(connect);
+ DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_auto);
+ DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_done);
+ DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_force);
+-DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_cleanup);
+ DEFINE_RPC_XPRT_LIFETIME_EVENT(destroy);
+ 
+ DECLARE_EVENT_CLASS(rpc_xprt_event,
+diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
+index 015bfec0dbfdb..4b46021eafa99 100644
+--- a/include/uapi/linux/bpf.h
++++ b/include/uapi/linux/bpf.h
+@@ -5500,7 +5500,8 @@ struct bpf_sock {
+ 	__u32 src_ip4;
+ 	__u32 src_ip6[4];
+ 	__u32 src_port;		/* host byte order */
+-	__u32 dst_port;		/* network byte order */
++	__be16 dst_port;	/* network byte order */
++	__u16 :16;		/* zero padding */
+ 	__u32 dst_ip4;
+ 	__u32 dst_ip6[4];
+ 	__u32 state;
+@@ -6378,7 +6379,8 @@ struct bpf_sk_lookup {
+ 	__u32 protocol;		/* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */
+ 	__u32 remote_ip4;	/* Network byte order */
+ 	__u32 remote_ip6[4];	/* Network byte order */
+-	__u32 remote_port;	/* Network byte order */
++	__be16 remote_port;	/* Network byte order */
++	__u16 :16;		/* Zero padding */
+ 	__u32 local_ip4;	/* Network byte order */
+ 	__u32 local_ip6[4];	/* Network byte order */
+ 	__u32 local_port;	/* Host byte order */
+diff --git a/include/uapi/linux/can/isotp.h b/include/uapi/linux/can/isotp.h
+index c55935b64ccc8..590f8aea2b6d2 100644
+--- a/include/uapi/linux/can/isotp.h
++++ b/include/uapi/linux/can/isotp.h
+@@ -137,20 +137,16 @@ struct can_isotp_ll_options {
+ #define CAN_ISOTP_WAIT_TX_DONE	0x400	/* wait for tx completion */
+ #define CAN_ISOTP_SF_BROADCAST	0x800	/* 1-to-N functional addressing */
+ 
+-/* default values */
++/* protocol machine default values */
+ 
+ #define CAN_ISOTP_DEFAULT_FLAGS		0
+ #define CAN_ISOTP_DEFAULT_EXT_ADDRESS	0x00
+ #define CAN_ISOTP_DEFAULT_PAD_CONTENT	0xCC /* prevent bit-stuffing */
+-#define CAN_ISOTP_DEFAULT_FRAME_TXTIME	0
++#define CAN_ISOTP_DEFAULT_FRAME_TXTIME	50000 /* 50 micro seconds */
+ #define CAN_ISOTP_DEFAULT_RECV_BS	0
+ #define CAN_ISOTP_DEFAULT_RECV_STMIN	0x00
+ #define CAN_ISOTP_DEFAULT_RECV_WFTMAX	0
+ 
+-#define CAN_ISOTP_DEFAULT_LL_MTU	CAN_MTU
+-#define CAN_ISOTP_DEFAULT_LL_TX_DL	CAN_MAX_DLEN
+-#define CAN_ISOTP_DEFAULT_LL_TX_FLAGS	0
+-
+ /*
+  * Remark on CAN_ISOTP_DEFAULT_RECV_* values:
+  *
+@@ -162,4 +158,24 @@ struct can_isotp_ll_options {
+  * consistency and copied directly into the flow control (FC) frame.
+  */
+ 
++/* link layer default values => make use of Classical CAN frames */
++
++#define CAN_ISOTP_DEFAULT_LL_MTU	CAN_MTU
++#define CAN_ISOTP_DEFAULT_LL_TX_DL	CAN_MAX_DLEN
++#define CAN_ISOTP_DEFAULT_LL_TX_FLAGS	0
++
++/*
++ * The CAN_ISOTP_DEFAULT_FRAME_TXTIME has become a non-zero value as
++ * it only makes sense for isotp implementation tests to run without
++ * a N_As value. As user space applications usually do not set the
++ * frame_txtime element of struct can_isotp_options the new in-kernel
++ * default is very likely overwritten with zero when the sockopt()
++ * CAN_ISOTP_OPTS is invoked.
++ * To make sure that a N_As value of zero is only set intentional the
++ * value '0' is now interpreted as 'do not change the current value'.
++ * When a frame_txtime of zero is required for testing purposes this
++ * CAN_ISOTP_FRAME_TXTIME_ZERO u32 value has to be set in frame_txtime.
++ */
++#define CAN_ISOTP_FRAME_TXTIME_ZERO	0xFFFFFFFF
++
+ #endif /* !_UAPI_CAN_ISOTP_H */
+diff --git a/init/main.c b/init/main.c
+index 65fa2e41a9c09..9a5097b2251a5 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -99,6 +99,7 @@
+ #include <linux/kcsan.h>
+ #include <linux/init_syscalls.h>
+ #include <linux/stackdepot.h>
++#include <net/net_namespace.h>
+ 
+ #include <asm/io.h>
+ #include <asm/bugs.h>
+@@ -1116,6 +1117,7 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
+ 	key_init();
+ 	security_init();
+ 	dbg_late_init();
++	net_ns_init();
+ 	vfs_caches_init();
+ 	pagecache_init();
+ 	signals_init();
+@@ -1190,7 +1192,7 @@ static int __init initcall_blacklist(char *str)
+ 		}
+ 	} while (str_entry);
+ 
+-	return 0;
++	return 1;
+ }
+ 
+ static bool __init_or_module initcall_blacklisted(initcall_t fn)
+@@ -1452,7 +1454,9 @@ static noinline void __init kernel_init_freeable(void);
+ bool rodata_enabled __ro_after_init = true;
+ static int __init set_debug_rodata(char *str)
+ {
+-	return strtobool(str, &rodata_enabled);
++	if (strtobool(str, &rodata_enabled))
++		pr_warn("Invalid option string for rodata: '%s'\n", str);
++	return 1;
+ }
+ __setup("rodata=", set_debug_rodata);
+ #endif
+diff --git a/kernel/Makefile b/kernel/Makefile
+index 56f4ee97f3284..a18d169732d2e 100644
+--- a/kernel/Makefile
++++ b/kernel/Makefile
+@@ -113,7 +113,8 @@ obj-$(CONFIG_CPU_PM) += cpu_pm.o
+ obj-$(CONFIG_BPF) += bpf/
+ obj-$(CONFIG_KCSAN) += kcsan/
+ obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
+-obj-$(CONFIG_HAVE_STATIC_CALL_INLINE) += static_call.o
++obj-$(CONFIG_HAVE_STATIC_CALL) += static_call.o
++obj-$(CONFIG_HAVE_STATIC_CALL_INLINE) += static_call_inline.o
+ obj-$(CONFIG_CFI_CLANG) += cfi.o
+ 
+ obj-$(CONFIG_PERF_EVENTS) += events/
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 69cf71d973121..0ee9ffceb9764 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -11640,6 +11640,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+ 
+ 	event->state		= PERF_EVENT_STATE_INACTIVE;
+ 
++	if (parent_event)
++		event->event_caps = parent_event->event_caps;
++
+ 	if (event->attr.sigtrap)
+ 		atomic_set(&event->event_limit, 1);
+ 
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 1620ae8535dcf..1eec4925b8c64 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -5675,6 +5675,8 @@ static inline struct task_struct *pick_task(struct rq *rq)
+ 
+ extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
+ 
++static void queue_core_balance(struct rq *rq);
++
+ static struct task_struct *
+ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+ {
+@@ -5724,7 +5726,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+ 		}
+ 
+ 		rq->core_pick = NULL;
+-		return next;
++		goto out;
+ 	}
+ 
+ 	put_prev_task_balance(rq, prev, rf);
+@@ -5774,7 +5776,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+ 			 */
+ 			WARN_ON_ONCE(fi_before);
+ 			task_vruntime_update(rq, next, false);
+-			goto done;
++			goto out_set_next;
+ 		}
+ 	}
+ 
+@@ -5893,8 +5895,12 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+ 		resched_curr(rq_i);
+ 	}
+ 
+-done:
++out_set_next:
+ 	set_next_task(rq, next);
++out:
++	if (rq->core->core_forceidle_count && next == rq->idle)
++		queue_core_balance(rq);
++
+ 	return next;
+ }
+ 
+@@ -5923,7 +5929,7 @@ static bool try_steal_cookie(int this, int that)
+ 		if (p == src->core_pick || p == src->curr)
+ 			goto next;
+ 
+-		if (!cpumask_test_cpu(this, &p->cpus_mask))
++		if (!is_cpu_allowed(p, this))
+ 			goto next;
+ 
+ 		if (p->core_occupation > dst->idle->core_occupation)
+@@ -5989,7 +5995,7 @@ static void sched_core_balance(struct rq *rq)
+ 
+ static DEFINE_PER_CPU(struct callback_head, core_balance_head);
+ 
+-void queue_core_balance(struct rq *rq)
++static void queue_core_balance(struct rq *rq)
+ {
+ 	if (!sched_core_enabled(rq))
+ 		return;
+diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
+index d17b0a5ce6ac3..314c36fc9c42f 100644
+--- a/kernel/sched/idle.c
++++ b/kernel/sched/idle.c
+@@ -437,7 +437,6 @@ static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool fir
+ {
+ 	update_idle_core(rq);
+ 	schedstat_inc(rq->sched_goidle);
+-	queue_core_balance(rq);
+ }
+ 
+ #ifdef CONFIG_SMP
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 9b33ba9c3c420..e8a5549488dd1 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1247,8 +1247,6 @@ static inline bool sched_group_cookie_match(struct rq *rq,
+ 	return false;
+ }
+ 
+-extern void queue_core_balance(struct rq *rq);
+-
+ static inline bool sched_core_enqueued(struct task_struct *p)
+ {
+ 	return !RB_EMPTY_NODE(&p->core_node);
+@@ -1282,10 +1280,6 @@ static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
+ 	return &rq->__lock;
+ }
+ 
+-static inline void queue_core_balance(struct rq *rq)
+-{
+-}
+-
+ static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p)
+ {
+ 	return true;
+diff --git a/kernel/static_call.c b/kernel/static_call.c
+index 43ba0b1e0edbb..e9c3e69f38379 100644
+--- a/kernel/static_call.c
++++ b/kernel/static_call.c
+@@ -1,548 +1,8 @@
+ // SPDX-License-Identifier: GPL-2.0
+-#include <linux/init.h>
+ #include <linux/static_call.h>
+-#include <linux/bug.h>
+-#include <linux/smp.h>
+-#include <linux/sort.h>
+-#include <linux/slab.h>
+-#include <linux/module.h>
+-#include <linux/cpu.h>
+-#include <linux/processor.h>
+-#include <asm/sections.h>
+-
+-extern struct static_call_site __start_static_call_sites[],
+-			       __stop_static_call_sites[];
+-extern struct static_call_tramp_key __start_static_call_tramp_key[],
+-				    __stop_static_call_tramp_key[];
+-
+-static bool static_call_initialized;
+-
+-/* mutex to protect key modules/sites */
+-static DEFINE_MUTEX(static_call_mutex);
+-
+-static void static_call_lock(void)
+-{
+-	mutex_lock(&static_call_mutex);
+-}
+-
+-static void static_call_unlock(void)
+-{
+-	mutex_unlock(&static_call_mutex);
+-}
+-
+-static inline void *static_call_addr(struct static_call_site *site)
+-{
+-	return (void *)((long)site->addr + (long)&site->addr);
+-}
+-
+-static inline unsigned long __static_call_key(const struct static_call_site *site)
+-{
+-	return (long)site->key + (long)&site->key;
+-}
+-
+-static inline struct static_call_key *static_call_key(const struct static_call_site *site)
+-{
+-	return (void *)(__static_call_key(site) & ~STATIC_CALL_SITE_FLAGS);
+-}
+-
+-/* These assume the key is word-aligned. */
+-static inline bool static_call_is_init(struct static_call_site *site)
+-{
+-	return __static_call_key(site) & STATIC_CALL_SITE_INIT;
+-}
+-
+-static inline bool static_call_is_tail(struct static_call_site *site)
+-{
+-	return __static_call_key(site) & STATIC_CALL_SITE_TAIL;
+-}
+-
+-static inline void static_call_set_init(struct static_call_site *site)
+-{
+-	site->key = (__static_call_key(site) | STATIC_CALL_SITE_INIT) -
+-		    (long)&site->key;
+-}
+-
+-static int static_call_site_cmp(const void *_a, const void *_b)
+-{
+-	const struct static_call_site *a = _a;
+-	const struct static_call_site *b = _b;
+-	const struct static_call_key *key_a = static_call_key(a);
+-	const struct static_call_key *key_b = static_call_key(b);
+-
+-	if (key_a < key_b)
+-		return -1;
+-
+-	if (key_a > key_b)
+-		return 1;
+-
+-	return 0;
+-}
+-
+-static void static_call_site_swap(void *_a, void *_b, int size)
+-{
+-	long delta = (unsigned long)_a - (unsigned long)_b;
+-	struct static_call_site *a = _a;
+-	struct static_call_site *b = _b;
+-	struct static_call_site tmp = *a;
+-
+-	a->addr = b->addr  - delta;
+-	a->key  = b->key   - delta;
+-
+-	b->addr = tmp.addr + delta;
+-	b->key  = tmp.key  + delta;
+-}
+-
+-static inline void static_call_sort_entries(struct static_call_site *start,
+-					    struct static_call_site *stop)
+-{
+-	sort(start, stop - start, sizeof(struct static_call_site),
+-	     static_call_site_cmp, static_call_site_swap);
+-}
+-
+-static inline bool static_call_key_has_mods(struct static_call_key *key)
+-{
+-	return !(key->type & 1);
+-}
+-
+-static inline struct static_call_mod *static_call_key_next(struct static_call_key *key)
+-{
+-	if (!static_call_key_has_mods(key))
+-		return NULL;
+-
+-	return key->mods;
+-}
+-
+-static inline struct static_call_site *static_call_key_sites(struct static_call_key *key)
+-{
+-	if (static_call_key_has_mods(key))
+-		return NULL;
+-
+-	return (struct static_call_site *)(key->type & ~1);
+-}
+-
+-void __static_call_update(struct static_call_key *key, void *tramp, void *func)
+-{
+-	struct static_call_site *site, *stop;
+-	struct static_call_mod *site_mod, first;
+-
+-	cpus_read_lock();
+-	static_call_lock();
+-
+-	if (key->func == func)
+-		goto done;
+-
+-	key->func = func;
+-
+-	arch_static_call_transform(NULL, tramp, func, false);
+-
+-	/*
+-	 * If uninitialized, we'll not update the callsites, but they still
+-	 * point to the trampoline and we just patched that.
+-	 */
+-	if (WARN_ON_ONCE(!static_call_initialized))
+-		goto done;
+-
+-	first = (struct static_call_mod){
+-		.next = static_call_key_next(key),
+-		.mod = NULL,
+-		.sites = static_call_key_sites(key),
+-	};
+-
+-	for (site_mod = &first; site_mod; site_mod = site_mod->next) {
+-		bool init = system_state < SYSTEM_RUNNING;
+-		struct module *mod = site_mod->mod;
+-
+-		if (!site_mod->sites) {
+-			/*
+-			 * This can happen if the static call key is defined in
+-			 * a module which doesn't use it.
+-			 *
+-			 * It also happens in the has_mods case, where the
+-			 * 'first' entry has no sites associated with it.
+-			 */
+-			continue;
+-		}
+-
+-		stop = __stop_static_call_sites;
+-
+-		if (mod) {
+-#ifdef CONFIG_MODULES
+-			stop = mod->static_call_sites +
+-			       mod->num_static_call_sites;
+-			init = mod->state == MODULE_STATE_COMING;
+-#endif
+-		}
+-
+-		for (site = site_mod->sites;
+-		     site < stop && static_call_key(site) == key; site++) {
+-			void *site_addr = static_call_addr(site);
+-
+-			if (!init && static_call_is_init(site))
+-				continue;
+-
+-			if (!kernel_text_address((unsigned long)site_addr)) {
+-				/*
+-				 * This skips patching built-in __exit, which
+-				 * is part of init_section_contains() but is
+-				 * not part of kernel_text_address().
+-				 *
+-				 * Skipping built-in __exit is fine since it
+-				 * will never be executed.
+-				 */
+-				WARN_ONCE(!static_call_is_init(site),
+-					  "can't patch static call site at %pS",
+-					  site_addr);
+-				continue;
+-			}
+-
+-			arch_static_call_transform(site_addr, NULL, func,
+-						   static_call_is_tail(site));
+-		}
+-	}
+-
+-done:
+-	static_call_unlock();
+-	cpus_read_unlock();
+-}
+-EXPORT_SYMBOL_GPL(__static_call_update);
+-
+-static int __static_call_init(struct module *mod,
+-			      struct static_call_site *start,
+-			      struct static_call_site *stop)
+-{
+-	struct static_call_site *site;
+-	struct static_call_key *key, *prev_key = NULL;
+-	struct static_call_mod *site_mod;
+-
+-	if (start == stop)
+-		return 0;
+-
+-	static_call_sort_entries(start, stop);
+-
+-	for (site = start; site < stop; site++) {
+-		void *site_addr = static_call_addr(site);
+-
+-		if ((mod && within_module_init((unsigned long)site_addr, mod)) ||
+-		    (!mod && init_section_contains(site_addr, 1)))
+-			static_call_set_init(site);
+-
+-		key = static_call_key(site);
+-		if (key != prev_key) {
+-			prev_key = key;
+-
+-			/*
+-			 * For vmlinux (!mod) avoid the allocation by storing
+-			 * the sites pointer in the key itself. Also see
+-			 * __static_call_update()'s @first.
+-			 *
+-			 * This allows architectures (eg. x86) to call
+-			 * static_call_init() before memory allocation works.
+-			 */
+-			if (!mod) {
+-				key->sites = site;
+-				key->type |= 1;
+-				goto do_transform;
+-			}
+-
+-			site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL);
+-			if (!site_mod)
+-				return -ENOMEM;
+-
+-			/*
+-			 * When the key has a direct sites pointer, extract
+-			 * that into an explicit struct static_call_mod, so we
+-			 * can have a list of modules.
+-			 */
+-			if (static_call_key_sites(key)) {
+-				site_mod->mod = NULL;
+-				site_mod->next = NULL;
+-				site_mod->sites = static_call_key_sites(key);
+-
+-				key->mods = site_mod;
+-
+-				site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL);
+-				if (!site_mod)
+-					return -ENOMEM;
+-			}
+-
+-			site_mod->mod = mod;
+-			site_mod->sites = site;
+-			site_mod->next = static_call_key_next(key);
+-			key->mods = site_mod;
+-		}
+-
+-do_transform:
+-		arch_static_call_transform(site_addr, NULL, key->func,
+-				static_call_is_tail(site));
+-	}
+-
+-	return 0;
+-}
+-
+-static int addr_conflict(struct static_call_site *site, void *start, void *end)
+-{
+-	unsigned long addr = (unsigned long)static_call_addr(site);
+-
+-	if (addr <= (unsigned long)end &&
+-	    addr + CALL_INSN_SIZE > (unsigned long)start)
+-		return 1;
+-
+-	return 0;
+-}
+-
+-static int __static_call_text_reserved(struct static_call_site *iter_start,
+-				       struct static_call_site *iter_stop,
+-				       void *start, void *end, bool init)
+-{
+-	struct static_call_site *iter = iter_start;
+-
+-	while (iter < iter_stop) {
+-		if (init || !static_call_is_init(iter)) {
+-			if (addr_conflict(iter, start, end))
+-				return 1;
+-		}
+-		iter++;
+-	}
+-
+-	return 0;
+-}
+-
+-#ifdef CONFIG_MODULES
+-
+-static int __static_call_mod_text_reserved(void *start, void *end)
+-{
+-	struct module *mod;
+-	int ret;
+-
+-	preempt_disable();
+-	mod = __module_text_address((unsigned long)start);
+-	WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
+-	if (!try_module_get(mod))
+-		mod = NULL;
+-	preempt_enable();
+-
+-	if (!mod)
+-		return 0;
+-
+-	ret = __static_call_text_reserved(mod->static_call_sites,
+-			mod->static_call_sites + mod->num_static_call_sites,
+-			start, end, mod->state == MODULE_STATE_COMING);
+-
+-	module_put(mod);
+-
+-	return ret;
+-}
+-
+-static unsigned long tramp_key_lookup(unsigned long addr)
+-{
+-	struct static_call_tramp_key *start = __start_static_call_tramp_key;
+-	struct static_call_tramp_key *stop = __stop_static_call_tramp_key;
+-	struct static_call_tramp_key *tramp_key;
+-
+-	for (tramp_key = start; tramp_key != stop; tramp_key++) {
+-		unsigned long tramp;
+-
+-		tramp = (long)tramp_key->tramp + (long)&tramp_key->tramp;
+-		if (tramp == addr)
+-			return (long)tramp_key->key + (long)&tramp_key->key;
+-	}
+-
+-	return 0;
+-}
+-
+-static int static_call_add_module(struct module *mod)
+-{
+-	struct static_call_site *start = mod->static_call_sites;
+-	struct static_call_site *stop = start + mod->num_static_call_sites;
+-	struct static_call_site *site;
+-
+-	for (site = start; site != stop; site++) {
+-		unsigned long s_key = __static_call_key(site);
+-		unsigned long addr = s_key & ~STATIC_CALL_SITE_FLAGS;
+-		unsigned long key;
+-
+-		/*
+-		 * Is the key is exported, 'addr' points to the key, which
+-		 * means modules are allowed to call static_call_update() on
+-		 * it.
+-		 *
+-		 * Otherwise, the key isn't exported, and 'addr' points to the
+-		 * trampoline so we need to lookup the key.
+-		 *
+-		 * We go through this dance to prevent crazy modules from
+-		 * abusing sensitive static calls.
+-		 */
+-		if (!kernel_text_address(addr))
+-			continue;
+-
+-		key = tramp_key_lookup(addr);
+-		if (!key) {
+-			pr_warn("Failed to fixup __raw_static_call() usage at: %ps\n",
+-				static_call_addr(site));
+-			return -EINVAL;
+-		}
+-
+-		key |= s_key & STATIC_CALL_SITE_FLAGS;
+-		site->key = key - (long)&site->key;
+-	}
+-
+-	return __static_call_init(mod, start, stop);
+-}
+-
+-static void static_call_del_module(struct module *mod)
+-{
+-	struct static_call_site *start = mod->static_call_sites;
+-	struct static_call_site *stop = mod->static_call_sites +
+-					mod->num_static_call_sites;
+-	struct static_call_key *key, *prev_key = NULL;
+-	struct static_call_mod *site_mod, **prev;
+-	struct static_call_site *site;
+-
+-	for (site = start; site < stop; site++) {
+-		key = static_call_key(site);
+-		if (key == prev_key)
+-			continue;
+-
+-		prev_key = key;
+-
+-		for (prev = &key->mods, site_mod = key->mods;
+-		     site_mod && site_mod->mod != mod;
+-		     prev = &site_mod->next, site_mod = site_mod->next)
+-			;
+-
+-		if (!site_mod)
+-			continue;
+-
+-		*prev = site_mod->next;
+-		kfree(site_mod);
+-	}
+-}
+-
+-static int static_call_module_notify(struct notifier_block *nb,
+-				     unsigned long val, void *data)
+-{
+-	struct module *mod = data;
+-	int ret = 0;
+-
+-	cpus_read_lock();
+-	static_call_lock();
+-
+-	switch (val) {
+-	case MODULE_STATE_COMING:
+-		ret = static_call_add_module(mod);
+-		if (ret) {
+-			WARN(1, "Failed to allocate memory for static calls");
+-			static_call_del_module(mod);
+-		}
+-		break;
+-	case MODULE_STATE_GOING:
+-		static_call_del_module(mod);
+-		break;
+-	}
+-
+-	static_call_unlock();
+-	cpus_read_unlock();
+-
+-	return notifier_from_errno(ret);
+-}
+-
+-static struct notifier_block static_call_module_nb = {
+-	.notifier_call = static_call_module_notify,
+-};
+-
+-#else
+-
+-static inline int __static_call_mod_text_reserved(void *start, void *end)
+-{
+-	return 0;
+-}
+-
+-#endif /* CONFIG_MODULES */
+-
+-int static_call_text_reserved(void *start, void *end)
+-{
+-	bool init = system_state < SYSTEM_RUNNING;
+-	int ret = __static_call_text_reserved(__start_static_call_sites,
+-			__stop_static_call_sites, start, end, init);
+-
+-	if (ret)
+-		return ret;
+-
+-	return __static_call_mod_text_reserved(start, end);
+-}
+-
+-int __init static_call_init(void)
+-{
+-	int ret;
+-
+-	if (static_call_initialized)
+-		return 0;
+-
+-	cpus_read_lock();
+-	static_call_lock();
+-	ret = __static_call_init(NULL, __start_static_call_sites,
+-				 __stop_static_call_sites);
+-	static_call_unlock();
+-	cpus_read_unlock();
+-
+-	if (ret) {
+-		pr_err("Failed to allocate memory for static_call!\n");
+-		BUG();
+-	}
+-
+-	static_call_initialized = true;
+-
+-#ifdef CONFIG_MODULES
+-	register_module_notifier(&static_call_module_nb);
+-#endif
+-	return 0;
+-}
+-early_initcall(static_call_init);
+ 
+ long __static_call_return0(void)
+ {
+ 	return 0;
+ }
+-
+-#ifdef CONFIG_STATIC_CALL_SELFTEST
+-
+-static int func_a(int x)
+-{
+-	return x+1;
+-}
+-
+-static int func_b(int x)
+-{
+-	return x+2;
+-}
+-
+-DEFINE_STATIC_CALL(sc_selftest, func_a);
+-
+-static struct static_call_data {
+-      int (*func)(int);
+-      int val;
+-      int expect;
+-} static_call_data [] __initdata = {
+-      { NULL,   2, 3 },
+-      { func_b, 2, 4 },
+-      { func_a, 2, 3 }
+-};
+-
+-static int __init test_static_call_init(void)
+-{
+-      int i;
+-
+-      for (i = 0; i < ARRAY_SIZE(static_call_data); i++ ) {
+-	      struct static_call_data *scd = &static_call_data[i];
+-
+-              if (scd->func)
+-                      static_call_update(sc_selftest, scd->func);
+-
+-              WARN_ON(static_call(sc_selftest)(scd->val) != scd->expect);
+-      }
+-
+-      return 0;
+-}
+-early_initcall(test_static_call_init);
+-
+-#endif /* CONFIG_STATIC_CALL_SELFTEST */
++EXPORT_SYMBOL_GPL(__static_call_return0);
+diff --git a/kernel/static_call_inline.c b/kernel/static_call_inline.c
+new file mode 100644
+index 0000000000000..dc5665b628140
+--- /dev/null
++++ b/kernel/static_call_inline.c
+@@ -0,0 +1,543 @@
++// SPDX-License-Identifier: GPL-2.0
++#include <linux/init.h>
++#include <linux/static_call.h>
++#include <linux/bug.h>
++#include <linux/smp.h>
++#include <linux/sort.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++#include <linux/cpu.h>
++#include <linux/processor.h>
++#include <asm/sections.h>
++
++extern struct static_call_site __start_static_call_sites[],
++			       __stop_static_call_sites[];
++extern struct static_call_tramp_key __start_static_call_tramp_key[],
++				    __stop_static_call_tramp_key[];
++
++static bool static_call_initialized;
++
++/* mutex to protect key modules/sites */
++static DEFINE_MUTEX(static_call_mutex);
++
++static void static_call_lock(void)
++{
++	mutex_lock(&static_call_mutex);
++}
++
++static void static_call_unlock(void)
++{
++	mutex_unlock(&static_call_mutex);
++}
++
++static inline void *static_call_addr(struct static_call_site *site)
++{
++	return (void *)((long)site->addr + (long)&site->addr);
++}
++
++static inline unsigned long __static_call_key(const struct static_call_site *site)
++{
++	return (long)site->key + (long)&site->key;
++}
++
++static inline struct static_call_key *static_call_key(const struct static_call_site *site)
++{
++	return (void *)(__static_call_key(site) & ~STATIC_CALL_SITE_FLAGS);
++}
++
++/* These assume the key is word-aligned. */
++static inline bool static_call_is_init(struct static_call_site *site)
++{
++	return __static_call_key(site) & STATIC_CALL_SITE_INIT;
++}
++
++static inline bool static_call_is_tail(struct static_call_site *site)
++{
++	return __static_call_key(site) & STATIC_CALL_SITE_TAIL;
++}
++
++static inline void static_call_set_init(struct static_call_site *site)
++{
++	site->key = (__static_call_key(site) | STATIC_CALL_SITE_INIT) -
++		    (long)&site->key;
++}
++
++static int static_call_site_cmp(const void *_a, const void *_b)
++{
++	const struct static_call_site *a = _a;
++	const struct static_call_site *b = _b;
++	const struct static_call_key *key_a = static_call_key(a);
++	const struct static_call_key *key_b = static_call_key(b);
++
++	if (key_a < key_b)
++		return -1;
++
++	if (key_a > key_b)
++		return 1;
++
++	return 0;
++}
++
++static void static_call_site_swap(void *_a, void *_b, int size)
++{
++	long delta = (unsigned long)_a - (unsigned long)_b;
++	struct static_call_site *a = _a;
++	struct static_call_site *b = _b;
++	struct static_call_site tmp = *a;
++
++	a->addr = b->addr  - delta;
++	a->key  = b->key   - delta;
++
++	b->addr = tmp.addr + delta;
++	b->key  = tmp.key  + delta;
++}
++
++static inline void static_call_sort_entries(struct static_call_site *start,
++					    struct static_call_site *stop)
++{
++	sort(start, stop - start, sizeof(struct static_call_site),
++	     static_call_site_cmp, static_call_site_swap);
++}
++
++static inline bool static_call_key_has_mods(struct static_call_key *key)
++{
++	return !(key->type & 1);
++}
++
++static inline struct static_call_mod *static_call_key_next(struct static_call_key *key)
++{
++	if (!static_call_key_has_mods(key))
++		return NULL;
++
++	return key->mods;
++}
++
++static inline struct static_call_site *static_call_key_sites(struct static_call_key *key)
++{
++	if (static_call_key_has_mods(key))
++		return NULL;
++
++	return (struct static_call_site *)(key->type & ~1);
++}
++
++void __static_call_update(struct static_call_key *key, void *tramp, void *func)
++{
++	struct static_call_site *site, *stop;
++	struct static_call_mod *site_mod, first;
++
++	cpus_read_lock();
++	static_call_lock();
++
++	if (key->func == func)
++		goto done;
++
++	key->func = func;
++
++	arch_static_call_transform(NULL, tramp, func, false);
++
++	/*
++	 * If uninitialized, we'll not update the callsites, but they still
++	 * point to the trampoline and we just patched that.
++	 */
++	if (WARN_ON_ONCE(!static_call_initialized))
++		goto done;
++
++	first = (struct static_call_mod){
++		.next = static_call_key_next(key),
++		.mod = NULL,
++		.sites = static_call_key_sites(key),
++	};
++
++	for (site_mod = &first; site_mod; site_mod = site_mod->next) {
++		bool init = system_state < SYSTEM_RUNNING;
++		struct module *mod = site_mod->mod;
++
++		if (!site_mod->sites) {
++			/*
++			 * This can happen if the static call key is defined in
++			 * a module which doesn't use it.
++			 *
++			 * It also happens in the has_mods case, where the
++			 * 'first' entry has no sites associated with it.
++			 */
++			continue;
++		}
++
++		stop = __stop_static_call_sites;
++
++		if (mod) {
++#ifdef CONFIG_MODULES
++			stop = mod->static_call_sites +
++			       mod->num_static_call_sites;
++			init = mod->state == MODULE_STATE_COMING;
++#endif
++		}
++
++		for (site = site_mod->sites;
++		     site < stop && static_call_key(site) == key; site++) {
++			void *site_addr = static_call_addr(site);
++
++			if (!init && static_call_is_init(site))
++				continue;
++
++			if (!kernel_text_address((unsigned long)site_addr)) {
++				/*
++				 * This skips patching built-in __exit, which
++				 * is part of init_section_contains() but is
++				 * not part of kernel_text_address().
++				 *
++				 * Skipping built-in __exit is fine since it
++				 * will never be executed.
++				 */
++				WARN_ONCE(!static_call_is_init(site),
++					  "can't patch static call site at %pS",
++					  site_addr);
++				continue;
++			}
++
++			arch_static_call_transform(site_addr, NULL, func,
++						   static_call_is_tail(site));
++		}
++	}
++
++done:
++	static_call_unlock();
++	cpus_read_unlock();
++}
++EXPORT_SYMBOL_GPL(__static_call_update);
++
++static int __static_call_init(struct module *mod,
++			      struct static_call_site *start,
++			      struct static_call_site *stop)
++{
++	struct static_call_site *site;
++	struct static_call_key *key, *prev_key = NULL;
++	struct static_call_mod *site_mod;
++
++	if (start == stop)
++		return 0;
++
++	static_call_sort_entries(start, stop);
++
++	for (site = start; site < stop; site++) {
++		void *site_addr = static_call_addr(site);
++
++		if ((mod && within_module_init((unsigned long)site_addr, mod)) ||
++		    (!mod && init_section_contains(site_addr, 1)))
++			static_call_set_init(site);
++
++		key = static_call_key(site);
++		if (key != prev_key) {
++			prev_key = key;
++
++			/*
++			 * For vmlinux (!mod) avoid the allocation by storing
++			 * the sites pointer in the key itself. Also see
++			 * __static_call_update()'s @first.
++			 *
++			 * This allows architectures (eg. x86) to call
++			 * static_call_init() before memory allocation works.
++			 */
++			if (!mod) {
++				key->sites = site;
++				key->type |= 1;
++				goto do_transform;
++			}
++
++			site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL);
++			if (!site_mod)
++				return -ENOMEM;
++
++			/*
++			 * When the key has a direct sites pointer, extract
++			 * that into an explicit struct static_call_mod, so we
++			 * can have a list of modules.
++			 */
++			if (static_call_key_sites(key)) {
++				site_mod->mod = NULL;
++				site_mod->next = NULL;
++				site_mod->sites = static_call_key_sites(key);
++
++				key->mods = site_mod;
++
++				site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL);
++				if (!site_mod)
++					return -ENOMEM;
++			}
++
++			site_mod->mod = mod;
++			site_mod->sites = site;
++			site_mod->next = static_call_key_next(key);
++			key->mods = site_mod;
++		}
++
++do_transform:
++		arch_static_call_transform(site_addr, NULL, key->func,
++				static_call_is_tail(site));
++	}
++
++	return 0;
++}
++
++static int addr_conflict(struct static_call_site *site, void *start, void *end)
++{
++	unsigned long addr = (unsigned long)static_call_addr(site);
++
++	if (addr <= (unsigned long)end &&
++	    addr + CALL_INSN_SIZE > (unsigned long)start)
++		return 1;
++
++	return 0;
++}
++
++static int __static_call_text_reserved(struct static_call_site *iter_start,
++				       struct static_call_site *iter_stop,
++				       void *start, void *end, bool init)
++{
++	struct static_call_site *iter = iter_start;
++
++	while (iter < iter_stop) {
++		if (init || !static_call_is_init(iter)) {
++			if (addr_conflict(iter, start, end))
++				return 1;
++		}
++		iter++;
++	}
++
++	return 0;
++}
++
++#ifdef CONFIG_MODULES
++
++static int __static_call_mod_text_reserved(void *start, void *end)
++{
++	struct module *mod;
++	int ret;
++
++	preempt_disable();
++	mod = __module_text_address((unsigned long)start);
++	WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
++	if (!try_module_get(mod))
++		mod = NULL;
++	preempt_enable();
++
++	if (!mod)
++		return 0;
++
++	ret = __static_call_text_reserved(mod->static_call_sites,
++			mod->static_call_sites + mod->num_static_call_sites,
++			start, end, mod->state == MODULE_STATE_COMING);
++
++	module_put(mod);
++
++	return ret;
++}
++
++static unsigned long tramp_key_lookup(unsigned long addr)
++{
++	struct static_call_tramp_key *start = __start_static_call_tramp_key;
++	struct static_call_tramp_key *stop = __stop_static_call_tramp_key;
++	struct static_call_tramp_key *tramp_key;
++
++	for (tramp_key = start; tramp_key != stop; tramp_key++) {
++		unsigned long tramp;
++
++		tramp = (long)tramp_key->tramp + (long)&tramp_key->tramp;
++		if (tramp == addr)
++			return (long)tramp_key->key + (long)&tramp_key->key;
++	}
++
++	return 0;
++}
++
++static int static_call_add_module(struct module *mod)
++{
++	struct static_call_site *start = mod->static_call_sites;
++	struct static_call_site *stop = start + mod->num_static_call_sites;
++	struct static_call_site *site;
++
++	for (site = start; site != stop; site++) {
++		unsigned long s_key = __static_call_key(site);
++		unsigned long addr = s_key & ~STATIC_CALL_SITE_FLAGS;
++		unsigned long key;
++
++		/*
++		 * Is the key is exported, 'addr' points to the key, which
++		 * means modules are allowed to call static_call_update() on
++		 * it.
++		 *
++		 * Otherwise, the key isn't exported, and 'addr' points to the
++		 * trampoline so we need to lookup the key.
++		 *
++		 * We go through this dance to prevent crazy modules from
++		 * abusing sensitive static calls.
++		 */
++		if (!kernel_text_address(addr))
++			continue;
++
++		key = tramp_key_lookup(addr);
++		if (!key) {
++			pr_warn("Failed to fixup __raw_static_call() usage at: %ps\n",
++				static_call_addr(site));
++			return -EINVAL;
++		}
++
++		key |= s_key & STATIC_CALL_SITE_FLAGS;
++		site->key = key - (long)&site->key;
++	}
++
++	return __static_call_init(mod, start, stop);
++}
++
++static void static_call_del_module(struct module *mod)
++{
++	struct static_call_site *start = mod->static_call_sites;
++	struct static_call_site *stop = mod->static_call_sites +
++					mod->num_static_call_sites;
++	struct static_call_key *key, *prev_key = NULL;
++	struct static_call_mod *site_mod, **prev;
++	struct static_call_site *site;
++
++	for (site = start; site < stop; site++) {
++		key = static_call_key(site);
++		if (key == prev_key)
++			continue;
++
++		prev_key = key;
++
++		for (prev = &key->mods, site_mod = key->mods;
++		     site_mod && site_mod->mod != mod;
++		     prev = &site_mod->next, site_mod = site_mod->next)
++			;
++
++		if (!site_mod)
++			continue;
++
++		*prev = site_mod->next;
++		kfree(site_mod);
++	}
++}
++
++static int static_call_module_notify(struct notifier_block *nb,
++				     unsigned long val, void *data)
++{
++	struct module *mod = data;
++	int ret = 0;
++
++	cpus_read_lock();
++	static_call_lock();
++
++	switch (val) {
++	case MODULE_STATE_COMING:
++		ret = static_call_add_module(mod);
++		if (ret) {
++			WARN(1, "Failed to allocate memory for static calls");
++			static_call_del_module(mod);
++		}
++		break;
++	case MODULE_STATE_GOING:
++		static_call_del_module(mod);
++		break;
++	}
++
++	static_call_unlock();
++	cpus_read_unlock();
++
++	return notifier_from_errno(ret);
++}
++
++static struct notifier_block static_call_module_nb = {
++	.notifier_call = static_call_module_notify,
++};
++
++#else
++
++static inline int __static_call_mod_text_reserved(void *start, void *end)
++{
++	return 0;
++}
++
++#endif /* CONFIG_MODULES */
++
++int static_call_text_reserved(void *start, void *end)
++{
++	bool init = system_state < SYSTEM_RUNNING;
++	int ret = __static_call_text_reserved(__start_static_call_sites,
++			__stop_static_call_sites, start, end, init);
++
++	if (ret)
++		return ret;
++
++	return __static_call_mod_text_reserved(start, end);
++}
++
++int __init static_call_init(void)
++{
++	int ret;
++
++	if (static_call_initialized)
++		return 0;
++
++	cpus_read_lock();
++	static_call_lock();
++	ret = __static_call_init(NULL, __start_static_call_sites,
++				 __stop_static_call_sites);
++	static_call_unlock();
++	cpus_read_unlock();
++
++	if (ret) {
++		pr_err("Failed to allocate memory for static_call!\n");
++		BUG();
++	}
++
++	static_call_initialized = true;
++
++#ifdef CONFIG_MODULES
++	register_module_notifier(&static_call_module_nb);
++#endif
++	return 0;
++}
++early_initcall(static_call_init);
++
++#ifdef CONFIG_STATIC_CALL_SELFTEST
++
++static int func_a(int x)
++{
++	return x+1;
++}
++
++static int func_b(int x)
++{
++	return x+2;
++}
++
++DEFINE_STATIC_CALL(sc_selftest, func_a);
++
++static struct static_call_data {
++      int (*func)(int);
++      int val;
++      int expect;
++} static_call_data [] __initdata = {
++      { NULL,   2, 3 },
++      { func_b, 2, 4 },
++      { func_a, 2, 3 }
++};
++
++static int __init test_static_call_init(void)
++{
++      int i;
++
++      for (i = 0; i < ARRAY_SIZE(static_call_data); i++ ) {
++	      struct static_call_data *scd = &static_call_data[i];
++
++              if (scd->func)
++                      static_call_update(sc_selftest, scd->func);
++
++              WARN_ON(static_call(sc_selftest)(scd->val) != scd->expect);
++      }
++
++      return 0;
++}
++early_initcall(test_static_call_init);
++
++#endif /* CONFIG_STATIC_CALL_SELFTEST */
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index 14b89aa37c5c9..440fd666c16d1 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -416,7 +416,8 @@ config SECTION_MISMATCH_WARN_ONLY
+ 	  If unsure, say Y.
+ 
+ config DEBUG_FORCE_FUNCTION_ALIGN_64B
+-	bool "Force all function address 64B aligned" if EXPERT
++	bool "Force all function address 64B aligned"
++	depends on EXPERT && (X86_64 || ARM64 || PPC32 || PPC64 || ARC)
+ 	help
+ 	  There are cases that a commit from one domain changes the function
+ 	  address alignment of other domains, and cause magic performance
+diff --git a/lib/logic_iomem.c b/lib/logic_iomem.c
+index 8c3365f26e51d..b247d412ddef7 100644
+--- a/lib/logic_iomem.c
++++ b/lib/logic_iomem.c
+@@ -68,7 +68,7 @@ int logic_iomem_add_region(struct resource *resource,
+ }
+ EXPORT_SYMBOL(logic_iomem_add_region);
+ 
+-#ifndef CONFIG_LOGIC_IOMEM_FALLBACK
++#ifndef CONFIG_INDIRECT_IOMEM_FALLBACK
+ static void __iomem *real_ioremap(phys_addr_t offset, size_t size)
+ {
+ 	WARN(1, "invalid ioremap(0x%llx, 0x%zx)\n",
+@@ -81,7 +81,7 @@ static void real_iounmap(volatile void __iomem *addr)
+ 	WARN(1, "invalid iounmap for addr 0x%llx\n",
+ 	     (unsigned long long)(uintptr_t __force)addr);
+ }
+-#endif /* CONFIG_LOGIC_IOMEM_FALLBACK */
++#endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */
+ 
+ void __iomem *ioremap(phys_addr_t offset, size_t size)
+ {
+@@ -168,7 +168,7 @@ void iounmap(volatile void __iomem *addr)
+ }
+ EXPORT_SYMBOL(iounmap);
+ 
+-#ifndef CONFIG_LOGIC_IOMEM_FALLBACK
++#ifndef CONFIG_INDIRECT_IOMEM_FALLBACK
+ #define MAKE_FALLBACK(op, sz) 						\
+ static u##sz real_raw_read ## op(const volatile void __iomem *addr)	\
+ {									\
+@@ -213,7 +213,7 @@ static void real_memcpy_toio(volatile void __iomem *addr, const void *buffer,
+ 	WARN(1, "Invalid memcpy_toio at address 0x%llx\n",
+ 	     (unsigned long long)(uintptr_t __force)addr);
+ }
+-#endif /* CONFIG_LOGIC_IOMEM_FALLBACK */
++#endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */
+ 
+ #define MAKE_OP(op, sz) 						\
+ u##sz __raw_read ## op(const volatile void __iomem *addr)		\
+diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
+index 926f4823d5eac..fd1728d94babb 100644
+--- a/lib/lz4/lz4_decompress.c
++++ b/lib/lz4/lz4_decompress.c
+@@ -271,8 +271,12 @@ static FORCE_INLINE int LZ4_decompress_generic(
+ 			ip += length;
+ 			op += length;
+ 
+-			/* Necessarily EOF, due to parsing restrictions */
+-			if (!partialDecoding || (cpy == oend))
++			/* Necessarily EOF when !partialDecoding.
++			 * When partialDecoding, it is EOF if we've either
++			 * filled the output buffer or
++			 * can't proceed with reading an offset for following match.
++			 */
++			if (!partialDecoding || (cpy == oend) || (ip >= (iend - 2)))
+ 				break;
+ 		} else {
+ 			/* may overwrite up to WILDCOPYLENGTH beyond cpy */
+diff --git a/lib/ref_tracker.c b/lib/ref_tracker.c
+index a6789c0c626b0..32ff6bd497f8e 100644
+--- a/lib/ref_tracker.c
++++ b/lib/ref_tracker.c
+@@ -20,6 +20,7 @@ void ref_tracker_dir_exit(struct ref_tracker_dir *dir)
+ 	unsigned long flags;
+ 	bool leak = false;
+ 
++	dir->dead = true;
+ 	spin_lock_irqsave(&dir->lock, flags);
+ 	list_for_each_entry_safe(tracker, n, &dir->quarantine, head) {
+ 		list_del(&tracker->head);
+@@ -72,6 +73,8 @@ int ref_tracker_alloc(struct ref_tracker_dir *dir,
+ 	gfp_t gfp_mask = gfp;
+ 	unsigned long flags;
+ 
++	WARN_ON_ONCE(dir->dead);
++
+ 	if (gfp & __GFP_DIRECT_RECLAIM)
+ 		gfp_mask |= __GFP_NOFAIL;
+ 	*trackerp = tracker = kzalloc(sizeof(*tracker), gfp_mask);
+@@ -100,6 +103,8 @@ int ref_tracker_free(struct ref_tracker_dir *dir,
+ 	unsigned int nr_entries;
+ 	unsigned long flags;
+ 
++	WARN_ON_ONCE(dir->dead);
++
+ 	if (!tracker) {
+ 		refcount_dec(&dir->untracked);
+ 		return -EEXIST;
+diff --git a/mm/highmem.c b/mm/highmem.c
+index 762679050c9a0..916b66e0776c2 100644
+--- a/mm/highmem.c
++++ b/mm/highmem.c
+@@ -624,7 +624,7 @@ void __kmap_local_sched_out(void)
+ 
+ 		/* With debug all even slots are unmapped and act as guard */
+ 		if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) {
+-			WARN_ON_ONCE(!pte_none(pteval));
++			WARN_ON_ONCE(pte_val(pteval) != 0);
+ 			continue;
+ 		}
+ 		if (WARN_ON_ONCE(pte_none(pteval)))
+@@ -661,7 +661,7 @@ void __kmap_local_sched_in(void)
+ 
+ 		/* With debug all even slots are unmapped and act as guard */
+ 		if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) {
+-			WARN_ON_ONCE(!pte_none(pteval));
++			WARN_ON_ONCE(pte_val(pteval) != 0);
+ 			continue;
+ 		}
+ 		if (WARN_ON_ONCE(pte_none(pteval)))
+diff --git a/mm/kfence/core.c b/mm/kfence/core.c
+index 13128fa130625..d4c7978cd75e2 100644
+--- a/mm/kfence/core.c
++++ b/mm/kfence/core.c
+@@ -555,6 +555,8 @@ static bool __init kfence_init_pool(void)
+ 	 * enters __slab_free() slow-path.
+ 	 */
+ 	for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
++		struct slab *slab = page_slab(&pages[i]);
++
+ 		if (!i || (i % 2))
+ 			continue;
+ 
+@@ -562,7 +564,11 @@ static bool __init kfence_init_pool(void)
+ 		if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
+ 			goto err;
+ 
+-		__SetPageSlab(&pages[i]);
++		__folio_set_slab(slab_folio(slab));
++#ifdef CONFIG_MEMCG
++		slab->memcg_data = (unsigned long)&kfence_metadata[i / 2 - 1].objcg |
++				   MEMCG_DATA_OBJCGS;
++#endif
+ 	}
+ 
+ 	/*
+@@ -938,6 +944,9 @@ void __kfence_free(void *addr)
+ {
+ 	struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
+ 
++#ifdef CONFIG_MEMCG
++	KFENCE_WARN_ON(meta->objcg);
++#endif
+ 	/*
+ 	 * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
+ 	 * the object, as the object page may be recycled for other-typed
+diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h
+index 2a2d5de9d3791..9a6c4b1b12a88 100644
+--- a/mm/kfence/kfence.h
++++ b/mm/kfence/kfence.h
+@@ -89,6 +89,9 @@ struct kfence_metadata {
+ 	struct kfence_track free_track;
+ 	/* For updating alloc_covered on frees. */
+ 	u32 alloc_stack_hash;
++#ifdef CONFIG_MEMCG
++	struct obj_cgroup *objcg;
++#endif
+ };
+ 
+ extern struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 1628cd90d9fcc..f468bd911c3b9 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -2736,6 +2736,7 @@ alloc_new:
+ 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
+ 	if (!mpol_new)
+ 		goto err_out;
++	atomic_set(&mpol_new->refcnt, 1);
+ 	goto restart;
+ }
+ 
+diff --git a/mm/mremap.c b/mm/mremap.c
+index 002eec83e91e5..0e175aef536e1 100644
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -486,6 +486,9 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
+ 	pmd_t *old_pmd, *new_pmd;
+ 	pud_t *old_pud, *new_pud;
+ 
++	if (!len)
++		return 0;
++
+ 	old_end = old_addr + len;
+ 	flush_cache_range(vma, old_addr, old_end);
+ 
+diff --git a/mm/rmap.c b/mm/rmap.c
+index 6a1e8c7f62136..9e27f9f038d39 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -1599,7 +1599,30 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+ 
+ 			/* MADV_FREE page check */
+ 			if (!PageSwapBacked(page)) {
+-				if (!PageDirty(page)) {
++				int ref_count, map_count;
++
++				/*
++				 * Synchronize with gup_pte_range():
++				 * - clear PTE; barrier; read refcount
++				 * - inc refcount; barrier; read PTE
++				 */
++				smp_mb();
++
++				ref_count = page_ref_count(page);
++				map_count = page_mapcount(page);
++
++				/*
++				 * Order reads for page refcount and dirty flag
++				 * (see comments in __remove_mapping()).
++				 */
++				smp_rmb();
++
++				/*
++				 * The only page refs must be one from isolation
++				 * plus the rmap(s) (dropped by discard:).
++				 */
++				if (ref_count == 1 + map_count &&
++				    !PageDirty(page)) {
+ 					/* Invalidate as we cleared the pte */
+ 					mmu_notifier_invalidate_range(mm,
+ 						address, address + PAGE_SIZE);
+diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
+index f4004cf0ff6fb..9f311fddfaf9a 100644
+--- a/net/batman-adv/multicast.c
++++ b/net/batman-adv/multicast.c
+@@ -134,7 +134,7 @@ static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev)
+ {
+ 	struct inet6_dev *in6_dev = __in6_dev_get(dev);
+ 
+-	if (in6_dev && in6_dev->cnf.mc_forwarding)
++	if (in6_dev && atomic_read(&in6_dev->cnf.mc_forwarding))
+ 		return BATADV_NO_FLAGS;
+ 	else
+ 		return BATADV_MCAST_WANT_NO_RTR6;
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 3bb2b3b6a1c92..84312c8365493 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -691,6 +691,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ 
+ 	bacpy(&conn->dst, dst);
+ 	bacpy(&conn->src, &hdev->bdaddr);
++	conn->handle = HCI_CONN_HANDLE_UNSET;
+ 	conn->hdev  = hdev;
+ 	conn->type  = type;
+ 	conn->role  = role;
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index a105b7317560c..d984777c9b58b 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -3068,6 +3068,11 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
+ 	struct hci_ev_conn_complete *ev = data;
+ 	struct hci_conn *conn;
+ 
++	if (__le16_to_cpu(ev->handle) > HCI_CONN_HANDLE_MAX) {
++		bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for invalid handle");
++		return;
++	}
++
+ 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
+ 
+ 	hci_dev_lock(hdev);
+@@ -3106,6 +3111,17 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
+ 		}
+ 	}
+ 
++	/* The HCI_Connection_Complete event is only sent once per connection.
++	 * Processing it more than once per connection can corrupt kernel memory.
++	 *
++	 * As the connection handle is set here for the first time, it indicates
++	 * whether the connection is already set up.
++	 */
++	if (conn->handle != HCI_CONN_HANDLE_UNSET) {
++		bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
++		goto unlock;
++	}
++
+ 	if (!ev->status) {
+ 		conn->handle = __le16_to_cpu(ev->handle);
+ 
+@@ -4661,6 +4677,24 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
+ 	struct hci_ev_sync_conn_complete *ev = data;
+ 	struct hci_conn *conn;
+ 
++	switch (ev->link_type) {
++	case SCO_LINK:
++	case ESCO_LINK:
++		break;
++	default:
++		/* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
++		 * for HCI_Synchronous_Connection_Complete is limited to
++		 * either SCO or eSCO
++		 */
++		bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
++		return;
++	}
++
++	if (__le16_to_cpu(ev->handle) > HCI_CONN_HANDLE_MAX) {
++		bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete for invalid handle");
++		return;
++	}
++
+ 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
+ 
+ 	hci_dev_lock(hdev);
+@@ -4684,23 +4718,19 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
+ 			goto unlock;
+ 	}
+ 
++	/* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
++	 * Processing it more than once per connection can corrupt kernel memory.
++	 *
++	 * As the connection handle is set here for the first time, it indicates
++	 * whether the connection is already set up.
++	 */
++	if (conn->handle != HCI_CONN_HANDLE_UNSET) {
++		bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
++		goto unlock;
++	}
++
+ 	switch (ev->status) {
+ 	case 0x00:
+-		/* The synchronous connection complete event should only be
+-		 * sent once per new connection. Receiving a successful
+-		 * complete event when the connection status is already
+-		 * BT_CONNECTED means that the device is misbehaving and sent
+-		 * multiple complete event packets for the same new connection.
+-		 *
+-		 * Registering the device more than once can corrupt kernel
+-		 * memory, hence upon detecting this invalid event, we report
+-		 * an error and ignore the packet.
+-		 */
+-		if (conn->state == BT_CONNECTED) {
+-			bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
+-			goto unlock;
+-		}
+-
+ 		conn->handle = __le16_to_cpu(ev->handle);
+ 		conn->state  = BT_CONNECTED;
+ 		conn->type   = ev->link_type;
+@@ -5423,8 +5453,9 @@ static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
+ 	hci_dev_lock(hdev);
+ 
+ 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
+-	if (hcon) {
++	if (hcon && hcon->type == AMP_LINK) {
+ 		hcon->state = BT_CLOSED;
++		hci_disconn_cfm(hcon, ev->reason);
+ 		hci_conn_del(hcon);
+ 	}
+ 
+@@ -5496,6 +5527,11 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
+ 	struct smp_irk *irk;
+ 	u8 addr_type;
+ 
++	if (handle > HCI_CONN_HANDLE_MAX) {
++		bt_dev_err(hdev, "Ignoring HCI_LE_Connection_Complete for invalid handle");
++		return;
++	}
++
+ 	hci_dev_lock(hdev);
+ 
+ 	/* All controllers implicitly stop advertising in the event of a
+@@ -5537,6 +5573,17 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
+ 		cancel_delayed_work(&conn->le_conn_timeout);
+ 	}
+ 
++	/* The HCI_LE_Connection_Complete event is only sent once per connection.
++	 * Processing it more than once per connection can corrupt kernel memory.
++	 *
++	 * As the connection handle is set here for the first time, it indicates
++	 * whether the connection is already set up.
++	 */
++	if (conn->handle != HCI_CONN_HANDLE_UNSET) {
++		bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
++		goto unlock;
++	}
++
+ 	le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
+ 
+ 	/* Lookup the identity address from the stored connection
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 405d48c3e63ed..8f4c5698913d7 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -379,6 +379,9 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ {
+ 	struct hci_cmd_sync_work_entry *entry;
+ 
++	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
++		return -ENODEV;
++
+ 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ 	if (!entry)
+ 		return -ENOMEM;
+@@ -5156,8 +5159,8 @@ static void set_ext_conn_params(struct hci_conn *conn,
+ 	p->max_ce_len = cpu_to_le16(0x0000);
+ }
+ 
+-int hci_le_ext_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
+-				u8 own_addr_type)
++static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,
++				       struct hci_conn *conn, u8 own_addr_type)
+ {
+ 	struct hci_cp_le_ext_create_conn *cp;
+ 	struct hci_cp_le_ext_conn_param *p;
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index e817ff0607a06..8df99c07f2724 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -1436,6 +1436,7 @@ static void l2cap_ecred_connect(struct l2cap_chan *chan)
+ 
+ 	l2cap_ecred_init(chan, 0);
+ 
++	memset(&data, 0, sizeof(data));
+ 	data.pdu.req.psm     = chan->psm;
+ 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
+ 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
+diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
+index 46dd957559672..eb43aaac0392c 100644
+--- a/net/bpf/test_run.c
++++ b/net/bpf/test_run.c
+@@ -960,7 +960,7 @@ int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kat
+ 	if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx)))
+ 		goto out;
+ 
+-	if (user_ctx->local_port > U16_MAX || user_ctx->remote_port > U16_MAX) {
++	if (user_ctx->local_port > U16_MAX) {
+ 		ret = -ERANGE;
+ 		goto out;
+ 	}
+@@ -968,7 +968,7 @@ int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kat
+ 	ctx.family = (u16)user_ctx->family;
+ 	ctx.protocol = (u16)user_ctx->protocol;
+ 	ctx.dport = (u16)user_ctx->local_port;
+-	ctx.sport = (__force __be16)user_ctx->remote_port;
++	ctx.sport = user_ctx->remote_port;
+ 
+ 	switch (ctx.family) {
+ 	case AF_INET:
+diff --git a/net/can/isotp.c b/net/can/isotp.c
+index a95d171b3a64b..5bce7c66c1219 100644
+--- a/net/can/isotp.c
++++ b/net/can/isotp.c
+@@ -141,6 +141,7 @@ struct isotp_sock {
+ 	struct can_isotp_options opt;
+ 	struct can_isotp_fc_options rxfc, txfc;
+ 	struct can_isotp_ll_options ll;
++	u32 frame_txtime;
+ 	u32 force_tx_stmin;
+ 	u32 force_rx_stmin;
+ 	struct tpcon rx, tx;
+@@ -360,7 +361,7 @@ static int isotp_rcv_fc(struct isotp_sock *so, struct canfd_frame *cf, int ae)
+ 
+ 		so->tx_gap = ktime_set(0, 0);
+ 		/* add transmission time for CAN frame N_As */
+-		so->tx_gap = ktime_add_ns(so->tx_gap, so->opt.frame_txtime);
++		so->tx_gap = ktime_add_ns(so->tx_gap, so->frame_txtime);
+ 		/* add waiting time for consecutive frames N_Cs */
+ 		if (so->opt.flags & CAN_ISOTP_FORCE_TXSTMIN)
+ 			so->tx_gap = ktime_add_ns(so->tx_gap,
+@@ -1247,6 +1248,14 @@ static int isotp_setsockopt_locked(struct socket *sock, int level, int optname,
+ 		/* no separate rx_ext_address is given => use ext_address */
+ 		if (!(so->opt.flags & CAN_ISOTP_RX_EXT_ADDR))
+ 			so->opt.rx_ext_address = so->opt.ext_address;
++
++		/* check for frame_txtime changes (0 => no changes) */
++		if (so->opt.frame_txtime) {
++			if (so->opt.frame_txtime == CAN_ISOTP_FRAME_TXTIME_ZERO)
++				so->frame_txtime = 0;
++			else
++				so->frame_txtime = so->opt.frame_txtime;
++		}
+ 		break;
+ 
+ 	case CAN_ISOTP_RECV_FC:
+@@ -1448,6 +1457,7 @@ static int isotp_init(struct sock *sk)
+ 	so->opt.rxpad_content = CAN_ISOTP_DEFAULT_PAD_CONTENT;
+ 	so->opt.txpad_content = CAN_ISOTP_DEFAULT_PAD_CONTENT;
+ 	so->opt.frame_txtime = CAN_ISOTP_DEFAULT_FRAME_TXTIME;
++	so->frame_txtime = CAN_ISOTP_DEFAULT_FRAME_TXTIME;
+ 	so->rxfc.bs = CAN_ISOTP_DEFAULT_RECV_BS;
+ 	so->rxfc.stmin = CAN_ISOTP_DEFAULT_RECV_STMIN;
+ 	so->rxfc.wftmax = CAN_ISOTP_DEFAULT_RECV_WFTMAX;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 1baab07820f65..91cf709c98b37 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -10732,8 +10732,7 @@ static int __net_init netdev_init(struct net *net)
+ 	BUILD_BUG_ON(GRO_HASH_BUCKETS >
+ 		     8 * sizeof_field(struct napi_struct, gro_bitmask));
+ 
+-	if (net != &init_net)
+-		INIT_LIST_HEAD(&net->dev_base_head);
++	INIT_LIST_HEAD(&net->dev_base_head);
+ 
+ 	net->dev_name_head = netdev_create_hash();
+ 	if (net->dev_name_head == NULL)
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 9eb785842258a..af0bafe9dcce2 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -6777,24 +6777,33 @@ BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len
+ 	if (!th->ack || th->rst || th->syn)
+ 		return -ENOENT;
+ 
++	if (unlikely(iph_len < sizeof(struct iphdr)))
++		return -EINVAL;
++
+ 	if (tcp_synq_no_recent_overflow(sk))
+ 		return -ENOENT;
+ 
+ 	cookie = ntohl(th->ack_seq) - 1;
+ 
+-	switch (sk->sk_family) {
+-	case AF_INET:
+-		if (unlikely(iph_len < sizeof(struct iphdr)))
++	/* Both struct iphdr and struct ipv6hdr have the version field at the
++	 * same offset so we can cast to the shorter header (struct iphdr).
++	 */
++	switch (((struct iphdr *)iph)->version) {
++	case 4:
++		if (sk->sk_family == AF_INET6 && ipv6_only_sock(sk))
+ 			return -EINVAL;
+ 
+ 		ret = __cookie_v4_check((struct iphdr *)iph, th, cookie);
+ 		break;
+ 
+ #if IS_BUILTIN(CONFIG_IPV6)
+-	case AF_INET6:
++	case 6:
+ 		if (unlikely(iph_len < sizeof(struct ipv6hdr)))
+ 			return -EINVAL;
+ 
++		if (sk->sk_family != AF_INET6)
++			return -EINVAL;
++
+ 		ret = __cookie_v6_check((struct ipv6hdr *)iph, th, cookie);
+ 		break;
+ #endif /* CONFIG_IPV6 */
+@@ -8033,6 +8042,7 @@ bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
+ 			      struct bpf_insn_access_aux *info)
+ {
+ 	const int size_default = sizeof(__u32);
++	int field_size;
+ 
+ 	if (off < 0 || off >= sizeof(struct bpf_sock))
+ 		return false;
+@@ -8044,7 +8054,6 @@ bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
+ 	case offsetof(struct bpf_sock, family):
+ 	case offsetof(struct bpf_sock, type):
+ 	case offsetof(struct bpf_sock, protocol):
+-	case offsetof(struct bpf_sock, dst_port):
+ 	case offsetof(struct bpf_sock, src_port):
+ 	case offsetof(struct bpf_sock, rx_queue_mapping):
+ 	case bpf_ctx_range(struct bpf_sock, src_ip4):
+@@ -8053,6 +8062,14 @@ bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
+ 	case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]):
+ 		bpf_ctx_record_field_size(info, size_default);
+ 		return bpf_ctx_narrow_access_ok(off, size, size_default);
++	case bpf_ctx_range(struct bpf_sock, dst_port):
++		field_size = size == size_default ?
++			size_default : sizeof_field(struct bpf_sock, dst_port);
++		bpf_ctx_record_field_size(info, field_size);
++		return bpf_ctx_narrow_access_ok(off, size, field_size);
++	case offsetofend(struct bpf_sock, dst_port) ...
++	     offsetof(struct bpf_sock, dst_ip4) - 1:
++		return false;
+ 	}
+ 
+ 	return size == size_default;
+@@ -10604,12 +10621,24 @@ static bool sk_lookup_is_valid_access(int off, int size,
+ 	case bpf_ctx_range(struct bpf_sk_lookup, local_ip4):
+ 	case bpf_ctx_range_till(struct bpf_sk_lookup, remote_ip6[0], remote_ip6[3]):
+ 	case bpf_ctx_range_till(struct bpf_sk_lookup, local_ip6[0], local_ip6[3]):
+-	case bpf_ctx_range(struct bpf_sk_lookup, remote_port):
+ 	case bpf_ctx_range(struct bpf_sk_lookup, local_port):
+ 	case bpf_ctx_range(struct bpf_sk_lookup, ingress_ifindex):
+ 		bpf_ctx_record_field_size(info, sizeof(__u32));
+ 		return bpf_ctx_narrow_access_ok(off, size, sizeof(__u32));
+ 
++	case bpf_ctx_range(struct bpf_sk_lookup, remote_port):
++		/* Allow 4-byte access to 2-byte field for backward compatibility */
++		if (size == sizeof(__u32))
++			return true;
++		bpf_ctx_record_field_size(info, sizeof(__be16));
++		return bpf_ctx_narrow_access_ok(off, size, sizeof(__be16));
++
++	case offsetofend(struct bpf_sk_lookup, remote_port) ...
++	     offsetof(struct bpf_sk_lookup, local_ip4) - 1:
++		/* Allow access to zero padding for backward compatibility */
++		bpf_ctx_record_field_size(info, sizeof(__u16));
++		return bpf_ctx_narrow_access_ok(off, size, sizeof(__u16));
++
+ 	default:
+ 		return false;
+ 	}
+@@ -10691,6 +10720,11 @@ static u32 sk_lookup_convert_ctx_access(enum bpf_access_type type,
+ 						     sport, 2, target_size));
+ 		break;
+ 
++	case offsetofend(struct bpf_sk_lookup, remote_port):
++		*target_size = 2;
++		*insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
++		break;
++
+ 	case offsetof(struct bpf_sk_lookup, local_port):
+ 		*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
+ 				      bpf_target_off(struct bpf_sk_lookup_kern,
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index a5b5bb99c6446..212e65add9512 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -44,13 +44,7 @@ EXPORT_SYMBOL_GPL(net_rwsem);
+ static struct key_tag init_net_key_domain = { .usage = REFCOUNT_INIT(1) };
+ #endif
+ 
+-struct net init_net = {
+-	.ns.count	= REFCOUNT_INIT(1),
+-	.dev_base_head	= LIST_HEAD_INIT(init_net.dev_base_head),
+-#ifdef CONFIG_KEYS
+-	.key_domain	= &init_net_key_domain,
+-#endif
+-};
++struct net init_net;
+ EXPORT_SYMBOL(init_net);
+ 
+ static bool init_net_initialized;
+@@ -1084,7 +1078,7 @@ out:
+ 	rtnl_set_sk_err(net, RTNLGRP_NSID, err);
+ }
+ 
+-static int __init net_ns_init(void)
++void __init net_ns_init(void)
+ {
+ 	struct net_generic *ng;
+ 
+@@ -1105,6 +1099,9 @@ static int __init net_ns_init(void)
+ 
+ 	rcu_assign_pointer(init_net.gen, ng);
+ 
++#ifdef CONFIG_KEYS
++	init_net.key_domain = &init_net_key_domain;
++#endif
+ 	down_write(&pernet_ops_rwsem);
+ 	if (setup_net(&init_net, &init_user_ns))
+ 		panic("Could not setup the initial network namespace");
+@@ -1119,12 +1116,8 @@ static int __init net_ns_init(void)
+ 		      RTNL_FLAG_DOIT_UNLOCKED);
+ 	rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
+ 		      RTNL_FLAG_DOIT_UNLOCKED);
+-
+-	return 0;
+ }
+ 
+-pure_initcall(net_ns_init);
+-
+ static void free_exit_list(struct pernet_operations *ops, struct list_head *net_exit_list)
+ {
+ 	ops_pre_exit_list(ops, net_exit_list);
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 2fb8eb6791e8a..43b995e935cd6 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -3652,13 +3652,24 @@ static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr,
+ 			   bool *changed, struct netlink_ext_ack *extack)
+ {
+ 	char *alt_ifname;
++	size_t size;
+ 	int err;
+ 
+ 	err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack);
+ 	if (err)
+ 		return err;
+ 
+-	alt_ifname = nla_strdup(attr, GFP_KERNEL);
++	if (cmd == RTM_NEWLINKPROP) {
++		size = rtnl_prop_list_size(dev);
++		size += nla_total_size(ALTIFNAMSIZ);
++		if (size >= U16_MAX) {
++			NL_SET_ERR_MSG(extack,
++				       "effective property list too long");
++			return -EINVAL;
++		}
++	}
++
++	alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT);
+ 	if (!alt_ifname)
+ 		return -ENOMEM;
+ 
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index a8a2fb745274c..180fa6a26ad45 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -5275,11 +5275,18 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
+ 	if (skb_cloned(to))
+ 		return false;
+ 
+-	/* The page pool signature of struct page will eventually figure out
+-	 * which pages can be recycled or not but for now let's prohibit slab
+-	 * allocated and page_pool allocated SKBs from being coalesced.
++	/* In general, avoid mixing slab allocated and page_pool allocated
++	 * pages within the same SKB. However when @to is not pp_recycle and
++	 * @from is cloned, we can transition frag pages from page_pool to
++	 * reference counted.
++	 *
++	 * On the other hand, don't allow coalescing two pp_recycle SKBs if
++	 * @from is cloned, in case the SKB is using page_pool fragment
++	 * references (PP_FLAG_PAGE_FRAG). Since we only take full page
++	 * references for cloned SKBs at the moment that would result in
++	 * inconsistent reference counts.
+ 	 */
+-	if (to->pp_recycle != from->pp_recycle)
++	if (to->pp_recycle != (from->pp_recycle && !skb_cloned(from)))
+ 		return false;
+ 
+ 	if (len <= skb_tailroom(to)) {
+diff --git a/net/dsa/master.c b/net/dsa/master.c
+index 880f910b23a99..10b51ffbb6f48 100644
+--- a/net/dsa/master.c
++++ b/net/dsa/master.c
+@@ -337,11 +337,24 @@ static const struct attribute_group dsa_group = {
+ 
+ static struct lock_class_key dsa_master_addr_list_lock_key;
+ 
++static void dsa_master_reset_mtu(struct net_device *dev)
++{
++	int err;
++
++	err = dev_set_mtu(dev, ETH_DATA_LEN);
++	if (err)
++		netdev_dbg(dev,
++			   "Unable to reset MTU to exclude DSA overheads\n");
++}
++
+ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
+ {
++	const struct dsa_device_ops *tag_ops = cpu_dp->tag_ops;
+ 	struct dsa_switch *ds = cpu_dp->ds;
+ 	struct device_link *consumer_link;
+-	int ret;
++	int mtu, ret;
++
++	mtu = ETH_DATA_LEN + dsa_tag_protocol_overhead(tag_ops);
+ 
+ 	/* The DSA master must use SET_NETDEV_DEV for this to work. */
+ 	consumer_link = device_link_add(ds->dev, dev->dev.parent,
+@@ -351,6 +364,15 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
+ 			   "Failed to create a device link to DSA switch %s\n",
+ 			   dev_name(ds->dev));
+ 
++	/* The switch driver may not implement ->port_change_mtu(), case in
++	 * which dsa_slave_change_mtu() will not update the master MTU either,
++	 * so we need to do that here.
++	 */
++	ret = dev_set_mtu(dev, mtu);
++	if (ret)
++		netdev_warn(dev, "error %d setting MTU to %d to include DSA overhead\n",
++			    ret, mtu);
++
+ 	/* If we use a tagging format that doesn't have an ethertype
+ 	 * field, make sure that all packets from this point on get
+ 	 * sent to the tag format's receive function.
+@@ -388,6 +410,7 @@ void dsa_master_teardown(struct net_device *dev)
+ 	sysfs_remove_group(&dev->dev.kobj, &dsa_group);
+ 	dsa_netdev_ops_set(dev, NULL);
+ 	dsa_master_ethtool_teardown(dev);
++	dsa_master_reset_mtu(dev);
+ 	dsa_master_set_promiscuity(dev, -1);
+ 
+ 	dev->dsa_ptr = NULL;
+diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
+index 4db0325f6e1af..dc28f0588e540 100644
+--- a/net/ipv4/arp.c
++++ b/net/ipv4/arp.c
+@@ -1116,13 +1116,18 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev)
+ 	return err;
+ }
+ 
+-static int arp_invalidate(struct net_device *dev, __be32 ip)
++int arp_invalidate(struct net_device *dev, __be32 ip, bool force)
+ {
+ 	struct neighbour *neigh = neigh_lookup(&arp_tbl, &ip, dev);
+ 	int err = -ENXIO;
+ 	struct neigh_table *tbl = &arp_tbl;
+ 
+ 	if (neigh) {
++		if ((neigh->nud_state & NUD_VALID) && !force) {
++			neigh_release(neigh);
++			return 0;
++		}
++
+ 		if (neigh->nud_state & ~NUD_NOARP)
+ 			err = neigh_update(neigh, NULL, NUD_FAILED,
+ 					   NEIGH_UPDATE_F_OVERRIDE|
+@@ -1169,7 +1174,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
+ 		if (!dev)
+ 			return -EINVAL;
+ 	}
+-	return arp_invalidate(dev, ip);
++	return arp_invalidate(dev, ip, true);
+ }
+ 
+ /*
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index 85117b45216d4..89a5a48755950 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -1115,9 +1115,11 @@ void fib_add_ifaddr(struct in_ifaddr *ifa)
+ 		return;
+ 
+ 	/* Add broadcast address, if it is explicitly assigned. */
+-	if (ifa->ifa_broadcast && ifa->ifa_broadcast != htonl(0xFFFFFFFF))
++	if (ifa->ifa_broadcast && ifa->ifa_broadcast != htonl(0xFFFFFFFF)) {
+ 		fib_magic(RTM_NEWROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32,
+ 			  prim, 0);
++		arp_invalidate(dev, ifa->ifa_broadcast, false);
++	}
+ 
+ 	if (!ipv4_is_zeronet(prefix) && !(ifa->ifa_flags & IFA_F_SECONDARY) &&
+ 	    (prefix != addr || ifa->ifa_prefixlen < 32)) {
+@@ -1131,6 +1133,7 @@ void fib_add_ifaddr(struct in_ifaddr *ifa)
+ 		if (ifa->ifa_prefixlen < 31) {
+ 			fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix | ~mask,
+ 				  32, prim, 0);
++			arp_invalidate(dev, prefix | ~mask, false);
+ 		}
+ 	}
+ }
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index 2dd375f7407b6..0a0f497703450 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -888,8 +888,13 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
+ 	}
+ 
+ 	if (cfg->fc_oif || cfg->fc_gw_family) {
+-		struct fib_nh *nh = fib_info_nh(fi, 0);
++		struct fib_nh *nh;
++
++		/* cannot match on nexthop object attributes */
++		if (fi->nh)
++			return 1;
+ 
++		nh = fib_info_nh(fi, 0);
+ 		if (cfg->fc_encap) {
+ 			if (fib_encap_match(net, cfg->fc_encap_type,
+ 					    cfg->fc_encap, nh, cfg, extack))
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index 30ab717ff1b81..17440840a7914 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -637,7 +637,9 @@ int __inet_hash(struct sock *sk, struct sock *osk)
+ 	int err = 0;
+ 
+ 	if (sk->sk_state != TCP_LISTEN) {
++		local_bh_disable();
+ 		inet_ehash_nolisten(sk, osk, NULL);
++		local_bh_enable();
+ 		return 0;
+ 	}
+ 	WARN_ON(!sk_unhashed(sk));
+@@ -669,45 +671,54 @@ int inet_hash(struct sock *sk)
+ {
+ 	int err = 0;
+ 
+-	if (sk->sk_state != TCP_CLOSE) {
+-		local_bh_disable();
++	if (sk->sk_state != TCP_CLOSE)
+ 		err = __inet_hash(sk, NULL);
+-		local_bh_enable();
+-	}
+ 
+ 	return err;
+ }
+ EXPORT_SYMBOL_GPL(inet_hash);
+ 
+-void inet_unhash(struct sock *sk)
++static void __inet_unhash(struct sock *sk, struct inet_listen_hashbucket *ilb)
+ {
+-	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
+-	struct inet_listen_hashbucket *ilb = NULL;
+-	spinlock_t *lock;
+-
+ 	if (sk_unhashed(sk))
+ 		return;
+ 
+-	if (sk->sk_state == TCP_LISTEN) {
+-		ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
+-		lock = &ilb->lock;
+-	} else {
+-		lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
+-	}
+-	spin_lock_bh(lock);
+-	if (sk_unhashed(sk))
+-		goto unlock;
+-
+ 	if (rcu_access_pointer(sk->sk_reuseport_cb))
+ 		reuseport_stop_listen_sock(sk);
+ 	if (ilb) {
++		struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
++
+ 		inet_unhash2(hashinfo, sk);
+ 		ilb->count--;
+ 	}
+ 	__sk_nulls_del_node_init_rcu(sk);
+ 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+-unlock:
+-	spin_unlock_bh(lock);
++}
++
++void inet_unhash(struct sock *sk)
++{
++	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
++
++	if (sk_unhashed(sk))
++		return;
++
++	if (sk->sk_state == TCP_LISTEN) {
++		struct inet_listen_hashbucket *ilb;
++
++		ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
++		/* Don't disable bottom halves while acquiring the lock to
++		 * avoid circular locking dependency on PREEMPT_RT.
++		 */
++		spin_lock(&ilb->lock);
++		__inet_unhash(sk, ilb);
++		spin_unlock(&ilb->lock);
++	} else {
++		spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
++
++		spin_lock_bh(lock);
++		__inet_unhash(sk, NULL);
++		spin_unlock_bh(lock);
++	}
+ }
+ EXPORT_SYMBOL_GPL(inet_unhash);
+ 
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index f908e2fd30b24..4df84013c4e6b 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -554,7 +554,7 @@ static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
+ #ifdef CONFIG_IPV6_MROUTE
+ 	if ((all || type == NETCONFA_MC_FORWARDING) &&
+ 	    nla_put_s32(skb, NETCONFA_MC_FORWARDING,
+-			devconf->mc_forwarding) < 0)
++			atomic_read(&devconf->mc_forwarding)) < 0)
+ 		goto nla_put_failure;
+ #endif
+ 	if ((all || type == NETCONFA_PROXY_NEIGH) &&
+@@ -5539,7 +5539,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
+ 	array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic;
+ #endif
+ #ifdef CONFIG_IPV6_MROUTE
+-	array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding;
++	array[DEVCONF_MC_FORWARDING] = atomic_read(&cnf->mc_forwarding);
+ #endif
+ 	array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6;
+ 	array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index 8fe7900f19499..7d7b7523d1265 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -441,11 +441,14 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ {
+ 	struct sock *sk = sock->sk;
+ 	u32 flags = BIND_WITH_LOCK;
++	const struct proto *prot;
+ 	int err = 0;
+ 
++	/* IPV6_ADDRFORM can change sk->sk_prot under us. */
++	prot = READ_ONCE(sk->sk_prot);
+ 	/* If the socket has its own bind function then use it. */
+-	if (sk->sk_prot->bind)
+-		return sk->sk_prot->bind(sk, uaddr, addr_len);
++	if (prot->bind)
++		return prot->bind(sk, uaddr, addr_len);
+ 
+ 	if (addr_len < SIN6_LEN_RFC2133)
+ 		return -EINVAL;
+@@ -555,6 +558,7 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+ 	void __user *argp = (void __user *)arg;
+ 	struct sock *sk = sock->sk;
+ 	struct net *net = sock_net(sk);
++	const struct proto *prot;
+ 
+ 	switch (cmd) {
+ 	case SIOCADDRT:
+@@ -572,9 +576,11 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+ 	case SIOCSIFDSTADDR:
+ 		return addrconf_set_dstaddr(net, argp);
+ 	default:
+-		if (!sk->sk_prot->ioctl)
++		/* IPV6_ADDRFORM can change sk->sk_prot under us. */
++		prot = READ_ONCE(sk->sk_prot);
++		if (!prot->ioctl)
+ 			return -ENOIOCTLCMD;
+-		return sk->sk_prot->ioctl(sk, cmd, arg);
++		return prot->ioctl(sk, cmd, arg);
+ 	}
+ 	/*NOTREACHED*/
+ 	return 0;
+@@ -636,11 +642,14 @@ INDIRECT_CALLABLE_DECLARE(int udpv6_sendmsg(struct sock *, struct msghdr *,
+ int inet6_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+ {
+ 	struct sock *sk = sock->sk;
++	const struct proto *prot;
+ 
+ 	if (unlikely(inet_send_prepare(sk)))
+ 		return -EAGAIN;
+ 
+-	return INDIRECT_CALL_2(sk->sk_prot->sendmsg, tcp_sendmsg, udpv6_sendmsg,
++	/* IPV6_ADDRFORM can change sk->sk_prot under us. */
++	prot = READ_ONCE(sk->sk_prot);
++	return INDIRECT_CALL_2(prot->sendmsg, tcp_sendmsg, udpv6_sendmsg,
+ 			       sk, msg, size);
+ }
+ 
+@@ -650,13 +659,16 @@ int inet6_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ 		  int flags)
+ {
+ 	struct sock *sk = sock->sk;
++	const struct proto *prot;
+ 	int addr_len = 0;
+ 	int err;
+ 
+ 	if (likely(!(flags & MSG_ERRQUEUE)))
+ 		sock_rps_record_flow(sk);
+ 
+-	err = INDIRECT_CALL_2(sk->sk_prot->recvmsg, tcp_recvmsg, udpv6_recvmsg,
++	/* IPV6_ADDRFORM can change sk->sk_prot under us. */
++	prot = READ_ONCE(sk->sk_prot);
++	err = INDIRECT_CALL_2(prot->recvmsg, tcp_recvmsg, udpv6_recvmsg,
+ 			      sk, msg, size, flags & MSG_DONTWAIT,
+ 			      flags & ~MSG_DONTWAIT, &addr_len);
+ 	if (err >= 0)
+diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
+index 4514444e96c8d..4740afecf7c62 100644
+--- a/net/ipv6/inet6_hashtables.c
++++ b/net/ipv6/inet6_hashtables.c
+@@ -333,11 +333,8 @@ int inet6_hash(struct sock *sk)
+ {
+ 	int err = 0;
+ 
+-	if (sk->sk_state != TCP_CLOSE) {
+-		local_bh_disable();
++	if (sk->sk_state != TCP_CLOSE)
+ 		err = __inet_hash(sk, NULL);
+-		local_bh_enable();
+-	}
+ 
+ 	return err;
+ }
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
+index 80256717868e6..d4b1e2c5aa76d 100644
+--- a/net/ipv6/ip6_input.c
++++ b/net/ipv6/ip6_input.c
+@@ -508,7 +508,7 @@ int ip6_mc_input(struct sk_buff *skb)
+ 	/*
+ 	 *      IPv6 multicast router mode is now supported ;)
+ 	 */
+-	if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding &&
++	if (atomic_read(&dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding) &&
+ 	    !(ipv6_addr_type(&hdr->daddr) &
+ 	      (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)) &&
+ 	    likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) {
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index 8a2db926b5eb6..e3c884678dbe2 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -734,7 +734,7 @@ static int mif6_delete(struct mr_table *mrt, int vifi, int notify,
+ 
+ 	in6_dev = __in6_dev_get(dev);
+ 	if (in6_dev) {
+-		in6_dev->cnf.mc_forwarding--;
++		atomic_dec(&in6_dev->cnf.mc_forwarding);
+ 		inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
+ 					     NETCONFA_MC_FORWARDING,
+ 					     dev->ifindex, &in6_dev->cnf);
+@@ -902,7 +902,7 @@ static int mif6_add(struct net *net, struct mr_table *mrt,
+ 
+ 	in6_dev = __in6_dev_get(dev);
+ 	if (in6_dev) {
+-		in6_dev->cnf.mc_forwarding++;
++		atomic_inc(&in6_dev->cnf.mc_forwarding);
+ 		inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
+ 					     NETCONFA_MC_FORWARDING,
+ 					     dev->ifindex, &in6_dev->cnf);
+@@ -1553,7 +1553,7 @@ static int ip6mr_sk_init(struct mr_table *mrt, struct sock *sk)
+ 	} else {
+ 		rcu_assign_pointer(mrt->mroute_sk, sk);
+ 		sock_set_flag(sk, SOCK_RCU_FREE);
+-		net->ipv6.devconf_all->mc_forwarding++;
++		atomic_inc(&net->ipv6.devconf_all->mc_forwarding);
+ 	}
+ 	write_unlock_bh(&mrt_lock);
+ 
+@@ -1586,7 +1586,7 @@ int ip6mr_sk_done(struct sock *sk)
+ 			 * so the RCU grace period before sk freeing
+ 			 * is guaranteed by sk_destruct()
+ 			 */
+-			net->ipv6.devconf_all->mc_forwarding--;
++			atomic_dec(&net->ipv6.devconf_all->mc_forwarding);
+ 			write_unlock_bh(&mrt_lock);
+ 			inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
+ 						     NETCONFA_MC_FORWARDING,
+diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
+index a733803a710cf..222f6bf220ba0 100644
+--- a/net/ipv6/ipv6_sockglue.c
++++ b/net/ipv6/ipv6_sockglue.c
+@@ -475,7 +475,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
+ 				sock_prot_inuse_add(net, sk->sk_prot, -1);
+ 				sock_prot_inuse_add(net, &tcp_prot, 1);
+ 
+-				sk->sk_prot = &tcp_prot;
++				/* Paired with READ_ONCE(sk->sk_prot) in net/ipv6/af_inet6.c */
++				WRITE_ONCE(sk->sk_prot, &tcp_prot);
+ 				icsk->icsk_af_ops = &ipv4_specific;
+ 				sk->sk_socket->ops = &inet_stream_ops;
+ 				sk->sk_family = PF_INET;
+@@ -489,7 +490,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
+ 				sock_prot_inuse_add(net, sk->sk_prot, -1);
+ 				sock_prot_inuse_add(net, prot, 1);
+ 
+-				sk->sk_prot = prot;
++				/* Paired with READ_ONCE(sk->sk_prot) in net/ipv6/af_inet6.c */
++				WRITE_ONCE(sk->sk_prot, prot);
+ 				sk->sk_socket->ops = &inet_dgram_ops;
+ 				sk->sk_family = PF_INET;
+ 			}
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index ea1cf414a92e7..da1bf48e79370 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -4495,7 +4495,7 @@ static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
+ 	struct inet6_dev *idev;
+ 	int type;
+ 
+-	if (netif_is_l3_master(skb->dev) &&
++	if (netif_is_l3_master(skb->dev) ||
+ 	    dst->dev == net->loopback_dev)
+ 		idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
+ 	else
+diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c
+index c921de63b494b..fc05351d3a82e 100644
+--- a/net/mctp/af_mctp.c
++++ b/net/mctp/af_mctp.c
+@@ -90,13 +90,13 @@ out_release:
+ static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ {
+ 	DECLARE_SOCKADDR(struct sockaddr_mctp *, addr, msg->msg_name);
+-	const int hlen = MCTP_HEADER_MAXLEN + sizeof(struct mctp_hdr);
+ 	int rc, addrlen = msg->msg_namelen;
+ 	struct sock *sk = sock->sk;
+ 	struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
+ 	struct mctp_skb_cb *cb;
+ 	struct mctp_route *rt;
+-	struct sk_buff *skb;
++	struct sk_buff *skb = NULL;
++	int hlen;
+ 
+ 	if (addr) {
+ 		if (addrlen < sizeof(struct sockaddr_mctp))
+@@ -119,6 +119,34 @@ static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ 	if (addr->smctp_network == MCTP_NET_ANY)
+ 		addr->smctp_network = mctp_default_net(sock_net(sk));
+ 
++	/* direct addressing */
++	if (msk->addr_ext && addrlen >= sizeof(struct sockaddr_mctp_ext)) {
++		DECLARE_SOCKADDR(struct sockaddr_mctp_ext *,
++				 extaddr, msg->msg_name);
++		struct net_device *dev;
++
++		rc = -EINVAL;
++		rcu_read_lock();
++		dev = dev_get_by_index_rcu(sock_net(sk), extaddr->smctp_ifindex);
++		/* check for correct halen */
++		if (dev && extaddr->smctp_halen == dev->addr_len) {
++			hlen = LL_RESERVED_SPACE(dev) + sizeof(struct mctp_hdr);
++			rc = 0;
++		}
++		rcu_read_unlock();
++		if (rc)
++			goto err_free;
++		rt = NULL;
++	} else {
++		rt = mctp_route_lookup(sock_net(sk), addr->smctp_network,
++				       addr->smctp_addr.s_addr);
++		if (!rt) {
++			rc = -EHOSTUNREACH;
++			goto err_free;
++		}
++		hlen = LL_RESERVED_SPACE(rt->dev->dev) + sizeof(struct mctp_hdr);
++	}
++
+ 	skb = sock_alloc_send_skb(sk, hlen + 1 + len,
+ 				  msg->msg_flags & MSG_DONTWAIT, &rc);
+ 	if (!skb)
+@@ -137,8 +165,8 @@ static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ 	cb = __mctp_cb(skb);
+ 	cb->net = addr->smctp_network;
+ 
+-	/* direct addressing */
+-	if (msk->addr_ext && addrlen >= sizeof(struct sockaddr_mctp_ext)) {
++	if (!rt) {
++		/* fill extended address in cb */
+ 		DECLARE_SOCKADDR(struct sockaddr_mctp_ext *,
+ 				 extaddr, msg->msg_name);
+ 
+@@ -149,17 +177,9 @@ static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ 		}
+ 
+ 		cb->ifindex = extaddr->smctp_ifindex;
++		/* smctp_halen is checked above */
+ 		cb->halen = extaddr->smctp_halen;
+ 		memcpy(cb->haddr, extaddr->smctp_haddr, cb->halen);
+-
+-		rt = NULL;
+-	} else {
+-		rt = mctp_route_lookup(sock_net(sk), addr->smctp_network,
+-				       addr->smctp_addr.s_addr);
+-		if (!rt) {
+-			rc = -EHOSTUNREACH;
+-			goto err_free;
+-		}
+ 	}
+ 
+ 	rc = mctp_local_output(sk, rt, skb, addr->smctp_addr.s_addr,
+diff --git a/net/mctp/device.c b/net/mctp/device.c
+index ef2755f82f87b..f86ef6d751bdc 100644
+--- a/net/mctp/device.c
++++ b/net/mctp/device.c
+@@ -24,12 +24,25 @@ struct mctp_dump_cb {
+ 	size_t a_idx;
+ };
+ 
+-/* unlocked: caller must hold rcu_read_lock */
++/* unlocked: caller must hold rcu_read_lock.
++ * Returned mctp_dev has its refcount incremented, or NULL if unset.
++ */
+ struct mctp_dev *__mctp_dev_get(const struct net_device *dev)
+ {
+-	return rcu_dereference(dev->mctp_ptr);
++	struct mctp_dev *mdev = rcu_dereference(dev->mctp_ptr);
++
++	/* RCU guarantees that any mdev is still live.
++	 * Zero refcount implies a pending free, return NULL.
++	 */
++	if (mdev)
++		if (!refcount_inc_not_zero(&mdev->refs))
++			return NULL;
++	return mdev;
+ }
+ 
++/* Returned mctp_dev does not have refcount incremented. The returned pointer
++ * remains live while rtnl_lock is held, as that prevents mctp_unregister()
++ */
+ struct mctp_dev *mctp_dev_get_rtnl(const struct net_device *dev)
+ {
+ 	return rtnl_dereference(dev->mctp_ptr);
+@@ -123,6 +136,7 @@ static int mctp_dump_addrinfo(struct sk_buff *skb, struct netlink_callback *cb)
+ 				if (mdev) {
+ 					rc = mctp_dump_dev_addrinfo(mdev,
+ 								    skb, cb);
++					mctp_dev_put(mdev);
+ 					// Error indicates full buffer, this
+ 					// callback will get retried.
+ 					if (rc < 0)
+@@ -297,7 +311,7 @@ void mctp_dev_hold(struct mctp_dev *mdev)
+ 
+ void mctp_dev_put(struct mctp_dev *mdev)
+ {
+-	if (refcount_dec_and_test(&mdev->refs)) {
++	if (mdev && refcount_dec_and_test(&mdev->refs)) {
+ 		dev_put(mdev->dev);
+ 		kfree_rcu(mdev, rcu);
+ 	}
+@@ -369,6 +383,7 @@ static size_t mctp_get_link_af_size(const struct net_device *dev,
+ 	if (!mdev)
+ 		return 0;
+ 	ret = nla_total_size(4); /* IFLA_MCTP_NET */
++	mctp_dev_put(mdev);
+ 	return ret;
+ }
+ 
+diff --git a/net/mctp/route.c b/net/mctp/route.c
+index e52cef7505002..1a296e211a507 100644
+--- a/net/mctp/route.c
++++ b/net/mctp/route.c
+@@ -498,6 +498,11 @@ static int mctp_route_output(struct mctp_route *route, struct sk_buff *skb)
+ 
+ 	if (cb->ifindex) {
+ 		/* direct route; use the hwaddr we stashed in sendmsg */
++		if (cb->halen != skb->dev->addr_len) {
++			/* sanity check, sendmsg should have already caught this */
++			kfree_skb(skb);
++			return -EMSGSIZE;
++		}
+ 		daddr = cb->haddr;
+ 	} else {
+ 		/* If lookup fails let the device handle daddr==NULL */
+@@ -507,7 +512,7 @@ static int mctp_route_output(struct mctp_route *route, struct sk_buff *skb)
+ 
+ 	rc = dev_hard_header(skb, skb->dev, ntohs(skb->protocol),
+ 			     daddr, skb->dev->dev_addr, skb->len);
+-	if (rc) {
++	if (rc < 0) {
+ 		kfree_skb(skb);
+ 		return -EHOSTUNREACH;
+ 	}
+@@ -707,7 +712,7 @@ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
+ {
+ 	const unsigned int hlen = sizeof(struct mctp_hdr);
+ 	struct mctp_hdr *hdr, *hdr2;
+-	unsigned int pos, size;
++	unsigned int pos, size, headroom;
+ 	struct sk_buff *skb2;
+ 	int rc;
+ 	u8 seq;
+@@ -721,6 +726,9 @@ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
+ 		return -EMSGSIZE;
+ 	}
+ 
++	/* keep same headroom as the original skb */
++	headroom = skb_headroom(skb);
++
+ 	/* we've got the header */
+ 	skb_pull(skb, hlen);
+ 
+@@ -728,7 +736,7 @@ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
+ 		/* size of message payload */
+ 		size = min(mtu - hlen, skb->len - pos);
+ 
+-		skb2 = alloc_skb(MCTP_HEADER_MAXLEN + hlen + size, GFP_KERNEL);
++		skb2 = alloc_skb(headroom + hlen + size, GFP_KERNEL);
+ 		if (!skb2) {
+ 			rc = -ENOMEM;
+ 			break;
+@@ -744,7 +752,7 @@ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
+ 			skb_set_owner_w(skb2, skb->sk);
+ 
+ 		/* establish packet */
+-		skb_reserve(skb2, MCTP_HEADER_MAXLEN);
++		skb_reserve(skb2, headroom);
+ 		skb_reset_network_header(skb2);
+ 		skb_put(skb2, hlen + size);
+ 		skb2->transport_header = skb2->network_header + hlen;
+@@ -786,7 +794,7 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt,
+ {
+ 	struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
+ 	struct mctp_skb_cb *cb = mctp_cb(skb);
+-	struct mctp_route tmp_rt;
++	struct mctp_route tmp_rt = {0};
+ 	struct mctp_sk_key *key;
+ 	struct net_device *dev;
+ 	struct mctp_hdr *hdr;
+@@ -892,6 +900,7 @@ out_release:
+ 		mctp_route_release(rt);
+ 
+ 	dev_put(dev);
++	mctp_dev_put(tmp_rt.dev);
+ 
+ 	return rc;
+ 
+@@ -1057,11 +1066,13 @@ static int mctp_pkttype_receive(struct sk_buff *skb, struct net_device *dev,
+ 
+ 	rt->output(rt, skb);
+ 	mctp_route_release(rt);
++	mctp_dev_put(mdev);
+ 
+ 	return NET_RX_SUCCESS;
+ 
+ err_drop:
+ 	kfree_skb(skb);
++	mctp_dev_put(mdev);
+ 	return NET_RX_DROP;
+ }
+ 
+diff --git a/net/mctp/test/utils.c b/net/mctp/test/utils.c
+index 7b7918702592a..e03ba66bbe181 100644
+--- a/net/mctp/test/utils.c
++++ b/net/mctp/test/utils.c
+@@ -54,7 +54,6 @@ struct mctp_test_dev *mctp_test_create_dev(void)
+ 
+ 	rcu_read_lock();
+ 	dev->mdev = __mctp_dev_get(ndev);
+-	mctp_dev_hold(dev->mdev);
+ 	rcu_read_unlock();
+ 
+ 	return dev;
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index bf1e17c678f13..7552e1e9fd629 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -67,6 +67,8 @@ EXPORT_SYMBOL_GPL(nf_conntrack_hash);
+ struct conntrack_gc_work {
+ 	struct delayed_work	dwork;
+ 	u32			next_bucket;
++	u32			avg_timeout;
++	u32			start_time;
+ 	bool			exiting;
+ 	bool			early_drop;
+ };
+@@ -78,8 +80,19 @@ static __read_mostly bool nf_conntrack_locks_all;
+ /* serialize hash resizes and nf_ct_iterate_cleanup */
+ static DEFINE_MUTEX(nf_conntrack_mutex);
+ 
+-#define GC_SCAN_INTERVAL	(120u * HZ)
++#define GC_SCAN_INTERVAL_MAX	(60ul * HZ)
++#define GC_SCAN_INTERVAL_MIN	(1ul * HZ)
++
++/* clamp timeouts to this value (TCP unacked) */
++#define GC_SCAN_INTERVAL_CLAMP	(300ul * HZ)
++
++/* large initial bias so that we don't scan often just because we have
++ * three entries with a 1s timeout.
++ */
++#define GC_SCAN_INTERVAL_INIT	INT_MAX
++
+ #define GC_SCAN_MAX_DURATION	msecs_to_jiffies(10)
++#define GC_SCAN_EXPIRED_MAX	(64000u / HZ)
+ 
+ #define MIN_CHAINLEN	8u
+ #define MAX_CHAINLEN	(32u - MIN_CHAINLEN)
+@@ -1421,16 +1434,28 @@ static bool gc_worker_can_early_drop(const struct nf_conn *ct)
+ 
+ static void gc_worker(struct work_struct *work)
+ {
+-	unsigned long end_time = jiffies + GC_SCAN_MAX_DURATION;
+ 	unsigned int i, hashsz, nf_conntrack_max95 = 0;
+-	unsigned long next_run = GC_SCAN_INTERVAL;
++	u32 end_time, start_time = nfct_time_stamp;
+ 	struct conntrack_gc_work *gc_work;
++	unsigned int expired_count = 0;
++	unsigned long next_run;
++	s32 delta_time;
++
+ 	gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
+ 
+ 	i = gc_work->next_bucket;
+ 	if (gc_work->early_drop)
+ 		nf_conntrack_max95 = nf_conntrack_max / 100u * 95u;
+ 
++	if (i == 0) {
++		gc_work->avg_timeout = GC_SCAN_INTERVAL_INIT;
++		gc_work->start_time = start_time;
++	}
++
++	next_run = gc_work->avg_timeout;
++
++	end_time = start_time + GC_SCAN_MAX_DURATION;
++
+ 	do {
+ 		struct nf_conntrack_tuple_hash *h;
+ 		struct hlist_nulls_head *ct_hash;
+@@ -1447,6 +1472,7 @@ static void gc_worker(struct work_struct *work)
+ 
+ 		hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
+ 			struct nf_conntrack_net *cnet;
++			unsigned long expires;
+ 			struct net *net;
+ 
+ 			tmp = nf_ct_tuplehash_to_ctrack(h);
+@@ -1456,11 +1482,29 @@ static void gc_worker(struct work_struct *work)
+ 				continue;
+ 			}
+ 
++			if (expired_count > GC_SCAN_EXPIRED_MAX) {
++				rcu_read_unlock();
++
++				gc_work->next_bucket = i;
++				gc_work->avg_timeout = next_run;
++
++				delta_time = nfct_time_stamp - gc_work->start_time;
++
++				/* re-sched immediately if total cycle time is exceeded */
++				next_run = delta_time < (s32)GC_SCAN_INTERVAL_MAX;
++				goto early_exit;
++			}
++
+ 			if (nf_ct_is_expired(tmp)) {
+ 				nf_ct_gc_expired(tmp);
++				expired_count++;
+ 				continue;
+ 			}
+ 
++			expires = clamp(nf_ct_expires(tmp), GC_SCAN_INTERVAL_MIN, GC_SCAN_INTERVAL_CLAMP);
++			next_run += expires;
++			next_run /= 2u;
++
+ 			if (nf_conntrack_max95 == 0 || gc_worker_skip_ct(tmp))
+ 				continue;
+ 
+@@ -1478,8 +1522,10 @@ static void gc_worker(struct work_struct *work)
+ 				continue;
+ 			}
+ 
+-			if (gc_worker_can_early_drop(tmp))
++			if (gc_worker_can_early_drop(tmp)) {
+ 				nf_ct_kill(tmp);
++				expired_count++;
++			}
+ 
+ 			nf_ct_put(tmp);
+ 		}
+@@ -1492,33 +1538,38 @@ static void gc_worker(struct work_struct *work)
+ 		cond_resched();
+ 		i++;
+ 
+-		if (time_after(jiffies, end_time) && i < hashsz) {
++		delta_time = nfct_time_stamp - end_time;
++		if (delta_time > 0 && i < hashsz) {
++			gc_work->avg_timeout = next_run;
+ 			gc_work->next_bucket = i;
+ 			next_run = 0;
+-			break;
++			goto early_exit;
+ 		}
+ 	} while (i < hashsz);
+ 
++	gc_work->next_bucket = 0;
++
++	next_run = clamp(next_run, GC_SCAN_INTERVAL_MIN, GC_SCAN_INTERVAL_MAX);
++
++	delta_time = max_t(s32, nfct_time_stamp - gc_work->start_time, 1);
++	if (next_run > (unsigned long)delta_time)
++		next_run -= delta_time;
++	else
++		next_run = 1;
++
++early_exit:
+ 	if (gc_work->exiting)
+ 		return;
+ 
+-	/*
+-	 * Eviction will normally happen from the packet path, and not
+-	 * from this gc worker.
+-	 *
+-	 * This worker is only here to reap expired entries when system went
+-	 * idle after a busy period.
+-	 */
+-	if (next_run) {
++	if (next_run)
+ 		gc_work->early_drop = false;
+-		gc_work->next_bucket = 0;
+-	}
++
+ 	queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run);
+ }
+ 
+ static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
+ {
+-	INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker);
++	INIT_DELAYED_WORK(&gc_work->dwork, gc_worker);
+ 	gc_work->exiting = false;
+ }
+ 
+diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
+index 7b727d3ebf9df..04bd2f89afe88 100644
+--- a/net/netfilter/nft_bitwise.c
++++ b/net/netfilter/nft_bitwise.c
+@@ -287,7 +287,7 @@ static bool nft_bitwise_reduce(struct nft_regs_track *track,
+ 	if (!track->regs[priv->sreg].selector)
+ 		return false;
+ 
+-	bitwise = nft_expr_priv(expr);
++	bitwise = nft_expr_priv(track->regs[priv->dreg].selector);
+ 	if (track->regs[priv->sreg].selector == track->regs[priv->dreg].selector &&
+ 	    track->regs[priv->dreg].bitwise &&
+ 	    track->regs[priv->dreg].bitwise->ops == expr->ops &&
+@@ -434,7 +434,7 @@ static bool nft_bitwise_fast_reduce(struct nft_regs_track *track,
+ 	if (!track->regs[priv->sreg].selector)
+ 		return false;
+ 
+-	bitwise = nft_expr_priv(expr);
++	bitwise = nft_expr_priv(track->regs[priv->dreg].selector);
+ 	if (track->regs[priv->sreg].selector == track->regs[priv->dreg].selector &&
+ 	    track->regs[priv->dreg].bitwise &&
+ 	    track->regs[priv->dreg].bitwise->ops == expr->ops &&
+diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
+index beb0e573266d0..54c0830039470 100644
+--- a/net/netlabel/netlabel_kapi.c
++++ b/net/netlabel/netlabel_kapi.c
+@@ -885,6 +885,8 @@ int netlbl_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len,
+ 	unsigned char bitmask;
+ 	unsigned char byte;
+ 
++	if (offset >= bitmap_len)
++		return -1;
+ 	byte_offset = offset / 8;
+ 	byte = bitmap[byte_offset];
+ 	bit_spot = offset;
+diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
+index 780d9e2246f39..8955f31fa47e9 100644
+--- a/net/openvswitch/actions.c
++++ b/net/openvswitch/actions.c
+@@ -1051,7 +1051,7 @@ static int clone(struct datapath *dp, struct sk_buff *skb,
+ 	int rem = nla_len(attr);
+ 	bool dont_clone_flow_key;
+ 
+-	/* The first action is always 'OVS_CLONE_ATTR_ARG'. */
++	/* The first action is always 'OVS_CLONE_ATTR_EXEC'. */
+ 	clone_arg = nla_data(attr);
+ 	dont_clone_flow_key = nla_get_u32(clone_arg);
+ 	actions = nla_next(clone_arg, &rem);
+diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
+index 0d677c9c2c805..c591b923016a6 100644
+--- a/net/openvswitch/flow_netlink.c
++++ b/net/openvswitch/flow_netlink.c
+@@ -2288,6 +2288,62 @@ static struct sw_flow_actions *nla_alloc_flow_actions(int size)
+ 	return sfa;
+ }
+ 
++static void ovs_nla_free_nested_actions(const struct nlattr *actions, int len);
++
++static void ovs_nla_free_check_pkt_len_action(const struct nlattr *action)
++{
++	const struct nlattr *a;
++	int rem;
++
++	nla_for_each_nested(a, action, rem) {
++		switch (nla_type(a)) {
++		case OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL:
++		case OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER:
++			ovs_nla_free_nested_actions(nla_data(a), nla_len(a));
++			break;
++		}
++	}
++}
++
++static void ovs_nla_free_clone_action(const struct nlattr *action)
++{
++	const struct nlattr *a = nla_data(action);
++	int rem = nla_len(action);
++
++	switch (nla_type(a)) {
++	case OVS_CLONE_ATTR_EXEC:
++		/* The real list of actions follows this attribute. */
++		a = nla_next(a, &rem);
++		ovs_nla_free_nested_actions(a, rem);
++		break;
++	}
++}
++
++static void ovs_nla_free_dec_ttl_action(const struct nlattr *action)
++{
++	const struct nlattr *a = nla_data(action);
++
++	switch (nla_type(a)) {
++	case OVS_DEC_TTL_ATTR_ACTION:
++		ovs_nla_free_nested_actions(nla_data(a), nla_len(a));
++		break;
++	}
++}
++
++static void ovs_nla_free_sample_action(const struct nlattr *action)
++{
++	const struct nlattr *a = nla_data(action);
++	int rem = nla_len(action);
++
++	switch (nla_type(a)) {
++	case OVS_SAMPLE_ATTR_ARG:
++		/* The real list of actions follows this attribute. */
++		a = nla_next(a, &rem);
++		ovs_nla_free_nested_actions(a, rem);
++		break;
++	}
++}
++
+ static void ovs_nla_free_set_action(const struct nlattr *a)
+ {
+ 	const struct nlattr *ovs_key = nla_data(a);
+@@ -2301,25 +2357,54 @@ static void ovs_nla_free_set_action(const struct nlattr *a)
+ 	}
+ }
+ 
+-void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
++static void ovs_nla_free_nested_actions(const struct nlattr *actions, int len)
+ {
+ 	const struct nlattr *a;
+ 	int rem;
+ 
+-	if (!sf_acts)
++	/* Whenever new actions are added, the need to update this
++	 * function should be considered.
++	 */
++	BUILD_BUG_ON(OVS_ACTION_ATTR_MAX != 23);
++
++	if (!actions)
+ 		return;
+ 
+-	nla_for_each_attr(a, sf_acts->actions, sf_acts->actions_len, rem) {
++	nla_for_each_attr(a, actions, len, rem) {
+ 		switch (nla_type(a)) {
+-		case OVS_ACTION_ATTR_SET:
+-			ovs_nla_free_set_action(a);
++		case OVS_ACTION_ATTR_CHECK_PKT_LEN:
++			ovs_nla_free_check_pkt_len_action(a);
++			break;
++
++		case OVS_ACTION_ATTR_CLONE:
++			ovs_nla_free_clone_action(a);
+ 			break;
++
+ 		case OVS_ACTION_ATTR_CT:
+ 			ovs_ct_free_action(a);
+ 			break;
++
++		case OVS_ACTION_ATTR_DEC_TTL:
++			ovs_nla_free_dec_ttl_action(a);
++			break;
++
++		case OVS_ACTION_ATTR_SAMPLE:
++			ovs_nla_free_sample_action(a);
++			break;
++
++		case OVS_ACTION_ATTR_SET:
++			ovs_nla_free_set_action(a);
++			break;
+ 		}
+ 	}
++}
++
++void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
++{
++	if (!sf_acts)
++		return;
+ 
++	ovs_nla_free_nested_actions(sf_acts->actions, sf_acts->actions_len);
+ 	kfree(sf_acts);
+ }
+ 
+@@ -3429,7 +3514,9 @@ static int clone_action_to_attr(const struct nlattr *attr,
+ 	if (!start)
+ 		return -EMSGSIZE;
+ 
+-	err = ovs_nla_put_actions(nla_data(attr), rem, skb);
++	/* Skipping the OVS_CLONE_ATTR_EXEC that is always the first attribute. */
++	attr = nla_next(nla_data(attr), &rem);
++	err = ovs_nla_put_actions(attr, rem, skb);
+ 
+ 	if (err)
+ 		nla_nest_cancel(skb, start);
+diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
+index 25bbc4cc8b135..f15d6942da453 100644
+--- a/net/rxrpc/net_ns.c
++++ b/net/rxrpc/net_ns.c
+@@ -113,8 +113,8 @@ static __net_exit void rxrpc_exit_net(struct net *net)
+ 	struct rxrpc_net *rxnet = rxrpc_net(net);
+ 
+ 	rxnet->live = false;
+-	del_timer_sync(&rxnet->peer_keepalive_timer);
+ 	cancel_work_sync(&rxnet->peer_keepalive_work);
++	del_timer_sync(&rxnet->peer_keepalive_timer);
+ 	rxrpc_destroy_all_calls(rxnet);
+ 	rxrpc_destroy_all_connections(rxnet);
+ 	rxrpc_destroy_all_peers(rxnet);
+diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
+index a18609f608fb7..e213aaf45d67c 100644
+--- a/net/sctp/outqueue.c
++++ b/net/sctp/outqueue.c
+@@ -914,6 +914,7 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
+ 				ctx->asoc->base.sk->sk_err = -error;
+ 				return;
+ 			}
++			ctx->asoc->stats.octrlchunks++;
+ 			break;
+ 
+ 		case SCTP_CID_ABORT:
+@@ -938,7 +939,10 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
+ 
+ 		case SCTP_CID_HEARTBEAT:
+ 			if (chunk->pmtu_probe) {
+-				sctp_packet_singleton(ctx->transport, chunk, ctx->gfp);
++				error = sctp_packet_singleton(ctx->transport,
++							      chunk, ctx->gfp);
++				if (!error)
++					ctx->asoc->stats.octrlchunks++;
+ 				break;
+ 			}
+ 			fallthrough;
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 284befa909676..303c5e56e4df4 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -2625,8 +2625,8 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
+ 		    sk->sk_state != SMC_CLOSED) {
+ 			if (val) {
+ 				SMC_STAT_INC(smc, ndly_cnt);
+-				mod_delayed_work(smc->conn.lgr->tx_wq,
+-						 &smc->conn.tx_work, 0);
++				smc_tx_pending(&smc->conn);
++				cancel_delayed_work(&smc->conn.tx_work);
+ 			}
+ 		}
+ 		break;
+@@ -2636,8 +2636,8 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
+ 		    sk->sk_state != SMC_CLOSED) {
+ 			if (!val) {
+ 				SMC_STAT_INC(smc, cork_cnt);
+-				mod_delayed_work(smc->conn.lgr->tx_wq,
+-						 &smc->conn.tx_work, 0);
++				smc_tx_pending(&smc->conn);
++				cancel_delayed_work(&smc->conn.tx_work);
+ 			}
+ 		}
+ 		break;
+diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
+index be7d704976ffb..f40f6ed0fbdb4 100644
+--- a/net/smc/smc_core.c
++++ b/net/smc/smc_core.c
+@@ -1989,7 +1989,7 @@ static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize,
+  */
+ static inline int smc_rmb_wnd_update_limit(int rmbe_size)
+ {
+-	return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
++	return max_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
+ }
+ 
+ /* map an rmb buf to a link */
+diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
+index be241d53020f1..7b0b6e24582f9 100644
+--- a/net/smc/smc_tx.c
++++ b/net/smc/smc_tx.c
+@@ -597,27 +597,32 @@ int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
+ 	return rc;
+ }
+ 
+-/* Wakeup sndbuf consumers from process context
+- * since there is more data to transmit
+- */
+-void smc_tx_work(struct work_struct *work)
++void smc_tx_pending(struct smc_connection *conn)
+ {
+-	struct smc_connection *conn = container_of(to_delayed_work(work),
+-						   struct smc_connection,
+-						   tx_work);
+ 	struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
+ 	int rc;
+ 
+-	lock_sock(&smc->sk);
+ 	if (smc->sk.sk_err)
+-		goto out;
++		return;
+ 
+ 	rc = smc_tx_sndbuf_nonempty(conn);
+ 	if (!rc && conn->local_rx_ctrl.prod_flags.write_blocked &&
+ 	    !atomic_read(&conn->bytes_to_rcv))
+ 		conn->local_rx_ctrl.prod_flags.write_blocked = 0;
++}
++
++/* Wakeup sndbuf consumers from process context
++ * since there is more data to transmit
++ */
++void smc_tx_work(struct work_struct *work)
++{
++	struct smc_connection *conn = container_of(to_delayed_work(work),
++						   struct smc_connection,
++						   tx_work);
++	struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
+ 
+-out:
++	lock_sock(&smc->sk);
++	smc_tx_pending(conn);
+ 	release_sock(&smc->sk);
+ }
+ 
+diff --git a/net/smc/smc_tx.h b/net/smc/smc_tx.h
+index 07e6ad76224a0..a59f370b8b432 100644
+--- a/net/smc/smc_tx.h
++++ b/net/smc/smc_tx.h
+@@ -27,6 +27,7 @@ static inline int smc_tx_prepared_sends(struct smc_connection *conn)
+ 	return smc_curs_diff(conn->sndbuf_desc->len, &sent, &prep);
+ }
+ 
++void smc_tx_pending(struct smc_connection *conn);
+ void smc_tx_work(struct work_struct *work);
+ void smc_tx_init(struct smc_sock *smc);
+ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len);
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index b36d235d2d6d9..0222ad4523a9d 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -2197,6 +2197,7 @@ call_transmit_status(struct rpc_task *task)
+ 		 * socket just returned a connection error,
+ 		 * then hold onto the transport lock.
+ 		 */
++	case -ENOMEM:
+ 	case -ENOBUFS:
+ 		rpc_delay(task, HZ>>2);
+ 		fallthrough;
+@@ -2280,6 +2281,7 @@ call_bc_transmit_status(struct rpc_task *task)
+ 	case -ENOTCONN:
+ 	case -EPIPE:
+ 		break;
++	case -ENOMEM:
+ 	case -ENOBUFS:
+ 		rpc_delay(task, HZ>>2);
+ 		fallthrough;
+@@ -2362,6 +2364,11 @@ call_status(struct rpc_task *task)
+ 	case -EPIPE:
+ 	case -EAGAIN:
+ 		break;
++	case -ENFILE:
++	case -ENOBUFS:
++	case -ENOMEM:
++		rpc_delay(task, HZ>>2);
++		break;
+ 	case -EIO:
+ 		/* shutdown or soft timeout */
+ 		goto out_exit;
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index ae295844ac55a..9020cedb7c95a 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -186,11 +186,6 @@ static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
+ 
+ /*
+  * Add new request to wait queue.
+- *
+- * Swapper tasks always get inserted at the head of the queue.
+- * This should avoid many nasty memory deadlocks and hopefully
+- * improve overall performance.
+- * Everyone else gets appended to the queue to ensure proper FIFO behavior.
+  */
+ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
+ 		struct rpc_task *task,
+@@ -199,8 +194,6 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
+ 	INIT_LIST_HEAD(&task->u.tk_wait.timer_list);
+ 	if (RPC_IS_PRIORITY(queue))
+ 		__rpc_add_wait_queue_priority(queue, task, queue_priority);
+-	else if (RPC_IS_SWAPPER(task))
+-		list_add(&task->u.tk_wait.list, &queue->tasks[0]);
+ 	else
+ 		list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
+ 	task->tk_waitqueue = queue;
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 478f857cdaed4..6ea3d87e11475 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -1096,7 +1096,9 @@ static int svc_tcp_sendmsg(struct socket *sock, struct xdr_buf *xdr,
+ 	int ret;
+ 
+ 	*sentp = 0;
+-	xdr_alloc_bvec(xdr, GFP_KERNEL);
++	ret = xdr_alloc_bvec(xdr, GFP_KERNEL);
++	if (ret < 0)
++		return ret;
+ 
+ 	ret = kernel_sendmsg(sock, &msg, &rm, 1, rm.iov_len);
+ 	if (ret < 0)
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index 396a74974f60f..d557a3cb2ad4a 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -929,12 +929,7 @@ void xprt_connect(struct rpc_task *task)
+ 	if (!xprt_lock_write(xprt, task))
+ 		return;
+ 
+-	if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
+-		trace_xprt_disconnect_cleanup(xprt);
+-		xprt->ops->close(xprt);
+-	}
+-
+-	if (!xprt_connected(xprt)) {
++	if (!xprt_connected(xprt) && !test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
+ 		task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
+ 		rpc_sleep_on_timeout(&xprt->pending, task, NULL,
+ 				xprt_request_timeout(task->tk_rqstp));
+@@ -1354,17 +1349,6 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
+ 				INIT_LIST_HEAD(&req->rq_xmit2);
+ 				goto out;
+ 			}
+-		} else if (RPC_IS_SWAPPER(task)) {
+-			list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
+-				if (pos->rq_cong || pos->rq_bytes_sent)
+-					continue;
+-				if (RPC_IS_SWAPPER(pos->rq_task))
+-					continue;
+-				/* Note: req is added _before_ pos */
+-				list_add_tail(&req->rq_xmit, &pos->rq_xmit);
+-				INIT_LIST_HEAD(&req->rq_xmit2);
+-				goto out;
+-			}
+ 		} else if (!req->rq_seqno) {
+ 			list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
+ 				if (pos->rq_task->tk_owner != task->tk_owner)
+@@ -1690,12 +1674,15 @@ out:
+ static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
+ {
+ 	struct rpc_rqst *req = ERR_PTR(-EAGAIN);
++	gfp_t gfp_mask = GFP_KERNEL;
+ 
+ 	if (xprt->num_reqs >= xprt->max_reqs)
+ 		goto out;
+ 	++xprt->num_reqs;
+ 	spin_unlock(&xprt->reserve_lock);
+-	req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS);
++	if (current->flags & PF_WQ_WORKER)
++		gfp_mask |= __GFP_NORETRY | __GFP_NOWARN;
++	req = kzalloc(sizeof(*req), gfp_mask);
+ 	spin_lock(&xprt->reserve_lock);
+ 	if (req != NULL)
+ 		goto out;
+diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
+index ff78a296fa810..6b7e10e5a141d 100644
+--- a/net/sunrpc/xprtrdma/transport.c
++++ b/net/sunrpc/xprtrdma/transport.c
+@@ -521,7 +521,7 @@ xprt_rdma_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
+ 	return;
+ 
+ out_sleep:
+-	task->tk_status = -EAGAIN;
++	task->tk_status = -ENOMEM;
+ 	xprt_add_backlog(xprt, task);
+ }
+ 
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 11eab0f0333b0..7aef2876beb38 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -763,12 +763,12 @@ xs_stream_start_connect(struct sock_xprt *transport)
+ /**
+  * xs_nospace - handle transmit was incomplete
+  * @req: pointer to RPC request
++ * @transport: pointer to struct sock_xprt
+  *
+  */
+-static int xs_nospace(struct rpc_rqst *req)
++static int xs_nospace(struct rpc_rqst *req, struct sock_xprt *transport)
+ {
+-	struct rpc_xprt *xprt = req->rq_xprt;
+-	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
++	struct rpc_xprt *xprt = &transport->xprt;
+ 	struct sock *sk = transport->inet;
+ 	int ret = -EAGAIN;
+ 
+@@ -779,25 +779,49 @@ static int xs_nospace(struct rpc_rqst *req)
+ 
+ 	/* Don't race with disconnect */
+ 	if (xprt_connected(xprt)) {
++		struct socket_wq *wq;
++
++		rcu_read_lock();
++		wq = rcu_dereference(sk->sk_wq);
++		set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags);
++		rcu_read_unlock();
++
+ 		/* wait for more buffer space */
++		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ 		sk->sk_write_pending++;
+ 		xprt_wait_for_buffer_space(xprt);
+ 	} else
+ 		ret = -ENOTCONN;
+ 
+ 	spin_unlock(&xprt->transport_lock);
++	return ret;
++}
+ 
+-	/* Race breaker in case memory is freed before above code is called */
+-	if (ret == -EAGAIN) {
+-		struct socket_wq *wq;
++static int xs_sock_nospace(struct rpc_rqst *req)
++{
++	struct sock_xprt *transport =
++		container_of(req->rq_xprt, struct sock_xprt, xprt);
++	struct sock *sk = transport->inet;
++	int ret = -EAGAIN;
+ 
+-		rcu_read_lock();
+-		wq = rcu_dereference(sk->sk_wq);
+-		set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags);
+-		rcu_read_unlock();
++	lock_sock(sk);
++	if (!sock_writeable(sk))
++		ret = xs_nospace(req, transport);
++	release_sock(sk);
++	return ret;
++}
+ 
+-		sk->sk_write_space(sk);
+-	}
++static int xs_stream_nospace(struct rpc_rqst *req)
++{
++	struct sock_xprt *transport =
++		container_of(req->rq_xprt, struct sock_xprt, xprt);
++	struct sock *sk = transport->inet;
++	int ret = -EAGAIN;
++
++	lock_sock(sk);
++	if (!sk_stream_memory_free(sk))
++		ret = xs_nospace(req, transport);
++	release_sock(sk);
+ 	return ret;
+ }
+ 
+@@ -856,7 +880,7 @@ static int xs_local_send_request(struct rpc_rqst *req)
+ 
+ 	/* Close the stream if the previous transmission was incomplete */
+ 	if (xs_send_request_was_aborted(transport, req)) {
+-		xs_close(xprt);
++		xprt_force_disconnect(xprt);
+ 		return -ENOTCONN;
+ 	}
+ 
+@@ -887,14 +911,14 @@ static int xs_local_send_request(struct rpc_rqst *req)
+ 	case -ENOBUFS:
+ 		break;
+ 	case -EAGAIN:
+-		status = xs_nospace(req);
++		status = xs_stream_nospace(req);
+ 		break;
+ 	default:
+ 		dprintk("RPC:       sendmsg returned unrecognized error %d\n",
+ 			-status);
+ 		fallthrough;
+ 	case -EPIPE:
+-		xs_close(xprt);
++		xprt_force_disconnect(xprt);
+ 		status = -ENOTCONN;
+ 	}
+ 
+@@ -963,7 +987,7 @@ process_status:
+ 		/* Should we call xs_close() here? */
+ 		break;
+ 	case -EAGAIN:
+-		status = xs_nospace(req);
++		status = xs_sock_nospace(req);
+ 		break;
+ 	case -ENETUNREACH:
+ 	case -ENOBUFS:
+@@ -1083,7 +1107,7 @@ static int xs_tcp_send_request(struct rpc_rqst *req)
+ 		/* Should we call xs_close() here? */
+ 		break;
+ 	case -EAGAIN:
+-		status = xs_nospace(req);
++		status = xs_stream_nospace(req);
+ 		break;
+ 	case -ECONNRESET:
+ 	case -ECONNREFUSED:
+@@ -1179,6 +1203,16 @@ static void xs_reset_transport(struct sock_xprt *transport)
+ 
+ 	if (sk == NULL)
+ 		return;
++	/*
++	 * Make sure we're calling this in a context from which it is safe
++	 * to call __fput_sync(). In practice that means rpciod and the
++	 * system workqueue.
++	 */
++	if (!(current->flags & PF_WQ_WORKER)) {
++		WARN_ON_ONCE(1);
++		set_bit(XPRT_CLOSE_WAIT, &xprt->state);
++		return;
++	}
+ 
+ 	if (atomic_read(&transport->xprt.swapper))
+ 		sk_clear_memalloc(sk);
+@@ -1202,7 +1236,7 @@ static void xs_reset_transport(struct sock_xprt *transport)
+ 	mutex_unlock(&transport->recv_mutex);
+ 
+ 	trace_rpc_socket_close(xprt, sock);
+-	fput(filp);
++	__fput_sync(filp);
+ 
+ 	xprt_disconnect_done(xprt);
+ }
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index efc84845bb6b0..75a6995913839 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -1495,7 +1495,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
+ 	if (prot->version == TLS_1_3_VERSION ||
+ 	    prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305)
+ 		memcpy(iv + iv_offset, tls_ctx->rx.iv,
+-		       crypto_aead_ivsize(ctx->aead_recv));
++		       prot->iv_size + prot->salt_size);
+ 	else
+ 		memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
+ 
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index b888522f133b3..b2fdac96bab07 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -700,8 +700,12 @@ static bool cfg80211_find_ssid_match(struct cfg80211_colocated_ap *ap,
+ 
+ 	for (i = 0; i < request->n_ssids; i++) {
+ 		/* wildcard ssid in the scan request */
+-		if (!request->ssids[i].ssid_len)
++		if (!request->ssids[i].ssid_len) {
++			if (ap->multi_bss && !ap->transmitted_bssid)
++				continue;
++
+ 			return true;
++		}
+ 
+ 		if (ap->ssid_len &&
+ 		    ap->ssid_len == request->ssids[i].ssid_len) {
+@@ -827,6 +831,9 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
+ 		    !cfg80211_find_ssid_match(ap, request))
+ 			continue;
+ 
++		if (!request->n_ssids && ap->multi_bss && !ap->transmitted_bssid)
++			continue;
++
+ 		cfg80211_scan_req_add_chan(request, chan, true);
+ 		memcpy(scan_6ghz_params->bssid, ap->bssid, ETH_ALEN);
+ 		scan_6ghz_params->short_ssid = ap->short_ssid;
+diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
+index 1480910c792e2..de66e1cc07348 100644
+--- a/tools/build/feature/Makefile
++++ b/tools/build/feature/Makefile
+@@ -217,9 +217,16 @@ strip-libs = $(filter-out -l%,$(1))
+ PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null)
+ PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS))
+ PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS))
+-PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null`
++PERL_EMBED_CCOPTS = $(shell perl -MExtUtils::Embed -e ccopts 2>/dev/null)
+ FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
+ 
++ifeq ($(CC_NO_CLANG), 0)
++  PERL_EMBED_LDOPTS := $(filter-out -specs=%,$(PERL_EMBED_LDOPTS))
++  PERL_EMBED_CCOPTS := $(filter-out -flto=auto -ffat-lto-objects, $(PERL_EMBED_CCOPTS))
++  PERL_EMBED_CCOPTS := $(filter-out -specs=%,$(PERL_EMBED_CCOPTS))
++  FLAGS_PERL_EMBED += -Wno-compound-token-split-by-macro
++endif
++
+ $(OUTPUT)test-libperl.bin:
+ 	$(BUILD) $(FLAGS_PERL_EMBED)
+ 
+diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
+index f947b61b21071..b8b37fe760069 100644
+--- a/tools/lib/bpf/Makefile
++++ b/tools/lib/bpf/Makefile
+@@ -131,7 +131,7 @@ GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN_SHARED) | \
+ 			   sort -u | wc -l)
+ VERSIONED_SYM_COUNT = $(shell readelf --dyn-syms --wide $(OUTPUT)libbpf.so | \
+ 			      sed 's/\[.*\]//' | \
+-			      awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}' | \
++			      awk '/GLOBAL/ && /DEFAULT/ && !/UND|ABS/ {print $$NF}' | \
+ 			      grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l)
+ 
+ CMD_TARGETS = $(LIB_TARGET) $(PC_FILE)
+@@ -194,7 +194,7 @@ check_abi: $(OUTPUT)libbpf.so $(VERSION_SCRIPT)
+ 		    sort -u > $(OUTPUT)libbpf_global_syms.tmp;		 \
+ 		readelf --dyn-syms --wide $(OUTPUT)libbpf.so |		 \
+ 		    sed 's/\[.*\]//' |					 \
+-		    awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}'|  \
++		    awk '/GLOBAL/ && /DEFAULT/ && !/UND|ABS/ {print $$NF}'|  \
+ 		    grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 |		 \
+ 		    sort -u > $(OUTPUT)libbpf_versioned_syms.tmp; 	 \
+ 		diff -u $(OUTPUT)libbpf_global_syms.tmp			 \
+diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
+index e1b5056068828..122d79c8f4b42 100644
+--- a/tools/lib/bpf/bpf_tracing.h
++++ b/tools/lib/bpf/bpf_tracing.h
+@@ -112,6 +112,10 @@
+ 
+ #elif defined(bpf_target_s390)
+ 
++struct pt_regs___s390 {
++	unsigned long orig_gpr2;
++};
++
+ /* s390 provides user_pt_regs instead of struct pt_regs to userspace */
+ #define __PT_REGS_CAST(x) ((const user_pt_regs *)(x))
+ #define __PT_PARM1_REG gprs[2]
+@@ -124,6 +128,8 @@
+ #define __PT_RC_REG gprs[2]
+ #define __PT_SP_REG gprs[15]
+ #define __PT_IP_REG psw.addr
++#define PT_REGS_PARM1_SYSCALL(x) ({ _Pragma("GCC error \"use PT_REGS_PARM1_CORE_SYSCALL() instead\""); 0l; })
++#define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ((const struct pt_regs___s390 *)(x), orig_gpr2)
+ 
+ #elif defined(bpf_target_arm)
+ 
+@@ -140,6 +146,10 @@
+ 
+ #elif defined(bpf_target_arm64)
+ 
++struct pt_regs___arm64 {
++	unsigned long orig_x0;
++};
++
+ /* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
+ #define __PT_REGS_CAST(x) ((const struct user_pt_regs *)(x))
+ #define __PT_PARM1_REG regs[0]
+@@ -152,6 +162,8 @@
+ #define __PT_RC_REG regs[0]
+ #define __PT_SP_REG sp
+ #define __PT_IP_REG pc
++#define PT_REGS_PARM1_SYSCALL(x) ({ _Pragma("GCC error \"use PT_REGS_PARM1_CORE_SYSCALL() instead\""); 0l; })
++#define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ((const struct pt_regs___arm64 *)(x), orig_x0)
+ 
+ #elif defined(bpf_target_mips)
+ 
+@@ -178,6 +190,8 @@
+ #define __PT_RC_REG gpr[3]
+ #define __PT_SP_REG sp
+ #define __PT_IP_REG nip
++/* powerpc does not select ARCH_HAS_SYSCALL_WRAPPER. */
++#define PT_REGS_SYSCALL_REGS(ctx) ctx
+ 
+ #elif defined(bpf_target_sparc)
+ 
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index 7c33ec67c4a95..3470813daf4aa 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -1090,6 +1090,17 @@ static void annotate_call_site(struct objtool_file *file,
+ 			               : arch_nop_insn(insn->len));
+ 
+ 		insn->type = sibling ? INSN_RETURN : INSN_NOP;
++
++		if (sibling) {
++			/*
++			 * We've replaced the tail-call JMP insn by two new
++			 * insn: RET; INT3, except we only have a single struct
++			 * insn here. Mark it retpoline_safe to avoid the SLS
++			 * warning, instead of adding another insn.
++			 */
++			insn->retpoline_safe = true;
++		}
++
+ 		return;
+ 	}
+ 
+diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
+index 96ad944ca6a88..f3bf9297bcc03 100644
+--- a/tools/perf/Makefile.config
++++ b/tools/perf/Makefile.config
+@@ -272,6 +272,9 @@ ifdef PYTHON_CONFIG
+   PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil
+   PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --includes 2>/dev/null)
+   FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
++  ifeq ($(CC_NO_CLANG), 0)
++    PYTHON_EMBED_CCOPTS := $(filter-out -ffat-lto-objects, $(PYTHON_EMBED_CCOPTS))
++  endif
+ endif
+ 
+ FEATURE_CHECK_CFLAGS-libpython := $(PYTHON_EMBED_CCOPTS)
+@@ -790,6 +793,9 @@ else
+     LDFLAGS += $(PERL_EMBED_LDFLAGS)
+     EXTLIBS += $(PERL_EMBED_LIBADD)
+     CFLAGS += -DHAVE_LIBPERL_SUPPORT
++    ifeq ($(CC_NO_CLANG), 0)
++      CFLAGS += -Wno-compound-token-split-by-macro
++    endif
+     $(call detected,CONFIG_LIBPERL)
+   endif
+ endif
+diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c
+index 2100d46ccf5e6..bb4ab99afa7f8 100644
+--- a/tools/perf/arch/arm64/util/arm-spe.c
++++ b/tools/perf/arch/arm64/util/arm-spe.c
+@@ -239,6 +239,12 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
+ 		arm_spe_set_timestamp(itr, arm_spe_evsel);
+ 	}
+ 
++	/*
++	 * Set this only so that perf report knows that SPE generates memory info. It has no effect
++	 * on the opening of the event or the SPE data produced.
++	 */
++	evsel__set_sample_bit(arm_spe_evsel, DATA_SRC);
++
+ 	/* Add dummy event to keep tracking */
+ 	err = parse_events(evlist, "dummy:u", NULL);
+ 	if (err)
+diff --git a/tools/perf/perf.c b/tools/perf/perf.c
+index 2f6b67189b426..6aae7b6c376b4 100644
+--- a/tools/perf/perf.c
++++ b/tools/perf/perf.c
+@@ -434,7 +434,7 @@ void pthread__unblock_sigwinch(void)
+ static int libperf_print(enum libperf_print_level level,
+ 			 const char *fmt, va_list ap)
+ {
+-	return eprintf(level, verbose, fmt, ap);
++	return veprintf(level, verbose, fmt, ap);
+ }
+ 
+ int main(int argc, const char **argv)
+diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
+index 2dab2d2620608..afdca7f2959f0 100644
+--- a/tools/perf/tests/dwarf-unwind.c
++++ b/tools/perf/tests/dwarf-unwind.c
+@@ -122,7 +122,7 @@ NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__thread(struct thread *thr
+ 	}
+ 
+ 	err = unwind__get_entries(unwind_entry, &cnt, thread,
+-				  &sample, MAX_STACK);
++				  &sample, MAX_STACK, false);
+ 	if (err)
+ 		pr_debug("unwind failed\n");
+ 	else if (cnt != MAX_STACK) {
+diff --git a/tools/perf/util/arm64-frame-pointer-unwind-support.c b/tools/perf/util/arm64-frame-pointer-unwind-support.c
+index 2242a885fbd73..4940be4a0569c 100644
+--- a/tools/perf/util/arm64-frame-pointer-unwind-support.c
++++ b/tools/perf/util/arm64-frame-pointer-unwind-support.c
+@@ -53,7 +53,7 @@ u64 get_leaf_frame_caller_aarch64(struct perf_sample *sample, struct thread *thr
+ 		sample->user_regs.cache_regs[PERF_REG_ARM64_SP] = 0;
+ 	}
+ 
+-	ret = unwind__get_entries(add_entry, &entries, thread, sample, 2);
++	ret = unwind__get_entries(add_entry, &entries, thread, sample, 2, true);
+ 	sample->user_regs = old_regs;
+ 
+ 	if (ret || entries.length != 2)
+diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
+index 3945500036937..564abe17a0bd2 100644
+--- a/tools/perf/util/machine.c
++++ b/tools/perf/util/machine.c
+@@ -2983,7 +2983,7 @@ static int thread__resolve_callchain_unwind(struct thread *thread,
+ 		return 0;
+ 
+ 	return unwind__get_entries(unwind_entry, cursor,
+-				   thread, sample, max_stack);
++				   thread, sample, max_stack, false);
+ }
+ 
+ int thread__resolve_callchain(struct thread *thread,
+diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
+index 498b05708db57..245dc70d1882a 100644
+--- a/tools/perf/util/session.c
++++ b/tools/perf/util/session.c
+@@ -2084,6 +2084,7 @@ prefetch_event(char *buf, u64 head, size_t mmap_size,
+ 	       bool needs_swap, union perf_event *error)
+ {
+ 	union perf_event *event;
++	u16 event_size;
+ 
+ 	/*
+ 	 * Ensure we have enough space remaining to read
+@@ -2096,15 +2097,23 @@ prefetch_event(char *buf, u64 head, size_t mmap_size,
+ 	if (needs_swap)
+ 		perf_event_header__bswap(&event->header);
+ 
+-	if (head + event->header.size <= mmap_size)
++	event_size = event->header.size;
++	if (head + event_size <= mmap_size)
+ 		return event;
+ 
+ 	/* We're not fetching the event so swap back again */
+ 	if (needs_swap)
+ 		perf_event_header__bswap(&event->header);
+ 
+-	pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx:"
+-		 " fuzzed or compressed perf.data?\n",__func__, head, event->header.size, mmap_size);
++	/* Check if the event fits into the next mmapped buf. */
++	if (event_size <= mmap_size - head % page_size) {
++		/* Remap buf and fetch again. */
++		return NULL;
++	}
++
++	/* Invalid input. Event size should never exceed mmap_size. */
++	pr_debug("%s: head=%#" PRIx64 " event->header.size=%#x, mmap_size=%#zx:"
++		 " fuzzed or compressed perf.data?\n", __func__, head, event_size, mmap_size);
+ 
+ 	return error;
+ }
+diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
+index 483f05004e682..c255a2c90cd67 100644
+--- a/tools/perf/util/setup.py
++++ b/tools/perf/util/setup.py
+@@ -1,12 +1,14 @@
+-from os import getenv
++from os import getenv, path
+ from subprocess import Popen, PIPE
+ from re import sub
+ 
+ cc = getenv("CC")
+ cc_is_clang = b"clang version" in Popen([cc.split()[0], "-v"], stderr=PIPE).stderr.readline()
++src_feature_tests  = getenv('srctree') + '/tools/build/feature'
+ 
+ def clang_has_option(option):
+-    return [o for o in Popen([cc, option], stderr=PIPE).stderr.readlines() if b"unknown argument" in o] == [ ]
++    cc_output = Popen([cc, option, path.join(src_feature_tests, "test-hello.c") ], stderr=PIPE).stderr.readlines()
++    return [o for o in cc_output if ((b"unknown argument" in o) or (b"is not supported" in o))] == [ ]
+ 
+ if cc_is_clang:
+     from distutils.sysconfig import get_config_vars
+@@ -23,6 +25,8 @@ if cc_is_clang:
+             vars[var] = sub("-fstack-protector-strong", "", vars[var])
+         if not clang_has_option("-fno-semantic-interposition"):
+             vars[var] = sub("-fno-semantic-interposition", "", vars[var])
++        if not clang_has_option("-ffat-lto-objects"):
++            vars[var] = sub("-ffat-lto-objects", "", vars[var])
+ 
+ from distutils.core import setup, Extension
+ 
+diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
+index a74b517f74974..94aa40f6e3482 100644
+--- a/tools/perf/util/unwind-libdw.c
++++ b/tools/perf/util/unwind-libdw.c
+@@ -200,7 +200,8 @@ frame_callback(Dwfl_Frame *state, void *arg)
+ 	bool isactivation;
+ 
+ 	if (!dwfl_frame_pc(state, &pc, NULL)) {
+-		pr_err("%s", dwfl_errmsg(-1));
++		if (!ui->best_effort)
++			pr_err("%s", dwfl_errmsg(-1));
+ 		return DWARF_CB_ABORT;
+ 	}
+ 
+@@ -208,7 +209,8 @@ frame_callback(Dwfl_Frame *state, void *arg)
+ 	report_module(pc, ui);
+ 
+ 	if (!dwfl_frame_pc(state, &pc, &isactivation)) {
+-		pr_err("%s", dwfl_errmsg(-1));
++		if (!ui->best_effort)
++			pr_err("%s", dwfl_errmsg(-1));
+ 		return DWARF_CB_ABORT;
+ 	}
+ 
+@@ -222,7 +224,8 @@ frame_callback(Dwfl_Frame *state, void *arg)
+ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
+ 			struct thread *thread,
+ 			struct perf_sample *data,
+-			int max_stack)
++			int max_stack,
++			bool best_effort)
+ {
+ 	struct unwind_info *ui, ui_buf = {
+ 		.sample		= data,
+@@ -231,6 +234,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
+ 		.cb		= cb,
+ 		.arg		= arg,
+ 		.max_stack	= max_stack,
++		.best_effort    = best_effort
+ 	};
+ 	Dwarf_Word ip;
+ 	int err = -EINVAL, i;
+diff --git a/tools/perf/util/unwind-libdw.h b/tools/perf/util/unwind-libdw.h
+index 0cbd2650e280e..8c88bc4f2304b 100644
+--- a/tools/perf/util/unwind-libdw.h
++++ b/tools/perf/util/unwind-libdw.h
+@@ -20,6 +20,7 @@ struct unwind_info {
+ 	void			*arg;
+ 	int			max_stack;
+ 	int			idx;
++	bool			best_effort;
+ 	struct unwind_entry	entries[];
+ };
+ 
+diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
+index 71a3533491815..41e29fc7648ae 100644
+--- a/tools/perf/util/unwind-libunwind-local.c
++++ b/tools/perf/util/unwind-libunwind-local.c
+@@ -96,6 +96,7 @@ struct unwind_info {
+ 	struct perf_sample	*sample;
+ 	struct machine		*machine;
+ 	struct thread		*thread;
++	bool			 best_effort;
+ };
+ 
+ #define dw_read(ptr, type, end) ({	\
+@@ -553,7 +554,8 @@ static int access_reg(unw_addr_space_t __maybe_unused as,
+ 
+ 	ret = perf_reg_value(&val, &ui->sample->user_regs, id);
+ 	if (ret) {
+-		pr_err("unwind: can't read reg %d\n", regnum);
++		if (!ui->best_effort)
++			pr_err("unwind: can't read reg %d\n", regnum);
+ 		return ret;
+ 	}
+ 
+@@ -666,7 +668,7 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
+ 			return -1;
+ 
+ 		ret = unw_init_remote(&c, addr_space, ui);
+-		if (ret)
++		if (ret && !ui->best_effort)
+ 			display_error(ret);
+ 
+ 		while (!ret && (unw_step(&c) > 0) && i < max_stack) {
+@@ -704,12 +706,14 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
+ 
+ static int _unwind__get_entries(unwind_entry_cb_t cb, void *arg,
+ 			struct thread *thread,
+-			struct perf_sample *data, int max_stack)
++			struct perf_sample *data, int max_stack,
++			bool best_effort)
+ {
+ 	struct unwind_info ui = {
+ 		.sample       = data,
+ 		.thread       = thread,
+ 		.machine      = thread->maps->machine,
++		.best_effort  = best_effort
+ 	};
+ 
+ 	if (!data->user_regs.regs)
+diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c
+index e89a5479b3613..509c287ee7628 100644
+--- a/tools/perf/util/unwind-libunwind.c
++++ b/tools/perf/util/unwind-libunwind.c
+@@ -80,9 +80,11 @@ void unwind__finish_access(struct maps *maps)
+ 
+ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
+ 			 struct thread *thread,
+-			 struct perf_sample *data, int max_stack)
++			 struct perf_sample *data, int max_stack,
++			 bool best_effort)
+ {
+ 	if (thread->maps->unwind_libunwind_ops)
+-		return thread->maps->unwind_libunwind_ops->get_entries(cb, arg, thread, data, max_stack);
++		return thread->maps->unwind_libunwind_ops->get_entries(cb, arg, thread, data,
++								       max_stack, best_effort);
+ 	return 0;
+ }
+diff --git a/tools/perf/util/unwind.h b/tools/perf/util/unwind.h
+index ab8ad469c8de5..b2a03fa5289b3 100644
+--- a/tools/perf/util/unwind.h
++++ b/tools/perf/util/unwind.h
+@@ -23,13 +23,19 @@ struct unwind_libunwind_ops {
+ 	void (*finish_access)(struct maps *maps);
+ 	int (*get_entries)(unwind_entry_cb_t cb, void *arg,
+ 			   struct thread *thread,
+-			   struct perf_sample *data, int max_stack);
++			   struct perf_sample *data, int max_stack, bool best_effort);
+ };
+ 
+ #ifdef HAVE_DWARF_UNWIND_SUPPORT
++/*
++ * When best_effort is set, don't report errors and fail silently. This could
++ * be expanded in the future to be more permissive about things other than
++ * error messages.
++ */
+ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
+ 			struct thread *thread,
+-			struct perf_sample *data, int max_stack);
++			struct perf_sample *data, int max_stack,
++			bool best_effort);
+ /* libunwind specific */
+ #ifdef HAVE_LIBUNWIND_SUPPORT
+ #ifndef LIBUNWIND__ARCH_REG_ID
+@@ -65,7 +71,8 @@ unwind__get_entries(unwind_entry_cb_t cb __maybe_unused,
+ 		    void *arg __maybe_unused,
+ 		    struct thread *thread __maybe_unused,
+ 		    struct perf_sample *data __maybe_unused,
+-		    int max_stack __maybe_unused)
++		    int max_stack __maybe_unused,
++		    bool best_effort __maybe_unused)
+ {
+ 	return 0;
+ }
+diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup.c b/tools/testing/selftests/bpf/progs/test_sk_lookup.c
+index 83b0aaa52ef77..a0e0d85e29da7 100644
+--- a/tools/testing/selftests/bpf/progs/test_sk_lookup.c
++++ b/tools/testing/selftests/bpf/progs/test_sk_lookup.c
+@@ -412,8 +412,7 @@ int ctx_narrow_access(struct bpf_sk_lookup *ctx)
+ 
+ 	/* Narrow loads from remote_port field. Expect SRC_PORT. */
+ 	if (LSB(ctx->remote_port, 0) != ((SRC_PORT >> 0) & 0xff) ||
+-	    LSB(ctx->remote_port, 1) != ((SRC_PORT >> 8) & 0xff) ||
+-	    LSB(ctx->remote_port, 2) != 0 || LSB(ctx->remote_port, 3) != 0)
++	    LSB(ctx->remote_port, 1) != ((SRC_PORT >> 8) & 0xff))
+ 		return SK_DROP;
+ 	if (LSW(ctx->remote_port, 0) != SRC_PORT)
+ 		return SK_DROP;
+diff --git a/tools/testing/selftests/bpf/xdpxceiver.c b/tools/testing/selftests/bpf/xdpxceiver.c
+index ffa5502ad95ed..5f8296d29e778 100644
+--- a/tools/testing/selftests/bpf/xdpxceiver.c
++++ b/tools/testing/selftests/bpf/xdpxceiver.c
+@@ -266,22 +266,24 @@ static int xsk_configure_umem(struct xsk_umem_info *umem, void *buffer, u64 size
+ }
+ 
+ static int xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem,
+-				struct ifobject *ifobject, u32 qid)
++				struct ifobject *ifobject, bool shared)
+ {
+-	struct xsk_socket_config cfg;
++	struct xsk_socket_config cfg = {};
+ 	struct xsk_ring_cons *rxr;
+ 	struct xsk_ring_prod *txr;
+ 
+ 	xsk->umem = umem;
+ 	cfg.rx_size = xsk->rxqsize;
+ 	cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
+-	cfg.libbpf_flags = 0;
++	cfg.libbpf_flags = XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD;
+ 	cfg.xdp_flags = ifobject->xdp_flags;
+ 	cfg.bind_flags = ifobject->bind_flags;
++	if (shared)
++		cfg.bind_flags |= XDP_SHARED_UMEM;
+ 
+ 	txr = ifobject->tx_on ? &xsk->tx : NULL;
+ 	rxr = ifobject->rx_on ? &xsk->rx : NULL;
+-	return xsk_socket__create(&xsk->xsk, ifobject->ifname, qid, umem->umem, rxr, txr, &cfg);
++	return xsk_socket__create(&xsk->xsk, ifobject->ifname, 0, umem->umem, rxr, txr, &cfg);
+ }
+ 
+ static struct option long_options[] = {
+@@ -387,7 +389,6 @@ static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
+ 	for (i = 0; i < MAX_INTERFACES; i++) {
+ 		struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx;
+ 
+-		ifobj->umem = &ifobj->umem_arr[0];
+ 		ifobj->xsk = &ifobj->xsk_arr[0];
+ 		ifobj->use_poll = false;
+ 		ifobj->pacing_on = true;
+@@ -401,11 +402,12 @@ static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
+ 			ifobj->tx_on = false;
+ 		}
+ 
++		memset(ifobj->umem, 0, sizeof(*ifobj->umem));
++		ifobj->umem->num_frames = DEFAULT_UMEM_BUFFERS;
++		ifobj->umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
++
+ 		for (j = 0; j < MAX_SOCKETS; j++) {
+-			memset(&ifobj->umem_arr[j], 0, sizeof(ifobj->umem_arr[j]));
+ 			memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j]));
+-			ifobj->umem_arr[j].num_frames = DEFAULT_UMEM_BUFFERS;
+-			ifobj->umem_arr[j].frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
+ 			ifobj->xsk_arr[j].rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
+ 		}
+ 	}
+@@ -950,7 +952,10 @@ static void tx_stats_validate(struct ifobject *ifobject)
+ 
+ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
+ {
++	u64 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size;
+ 	int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
++	int ret, ifindex;
++	void *bufs;
+ 	u32 i;
+ 
+ 	ifobject->ns_fd = switch_namespace(ifobject->nsname);
+@@ -958,23 +963,20 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
+ 	if (ifobject->umem->unaligned_mode)
+ 		mmap_flags |= MAP_HUGETLB;
+ 
+-	for (i = 0; i < test->nb_sockets; i++) {
+-		u64 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size;
+-		u32 ctr = 0;
+-		void *bufs;
+-		int ret;
++	bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
++	if (bufs == MAP_FAILED)
++		exit_with_error(errno);
+ 
+-		bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
+-		if (bufs == MAP_FAILED)
+-			exit_with_error(errno);
++	ret = xsk_configure_umem(ifobject->umem, bufs, umem_sz);
++	if (ret)
++		exit_with_error(-ret);
+ 
+-		ret = xsk_configure_umem(&ifobject->umem_arr[i], bufs, umem_sz);
+-		if (ret)
+-			exit_with_error(-ret);
++	for (i = 0; i < test->nb_sockets; i++) {
++		u32 ctr = 0;
+ 
+ 		while (ctr++ < SOCK_RECONF_CTR) {
+-			ret = xsk_configure_socket(&ifobject->xsk_arr[i], &ifobject->umem_arr[i],
+-						   ifobject, i);
++			ret = xsk_configure_socket(&ifobject->xsk_arr[i], ifobject->umem,
++						   ifobject, !!i);
+ 			if (!ret)
+ 				break;
+ 
+@@ -985,8 +987,22 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
+ 		}
+ 	}
+ 
+-	ifobject->umem = &ifobject->umem_arr[0];
+ 	ifobject->xsk = &ifobject->xsk_arr[0];
++
++	if (!ifobject->rx_on)
++		return;
++
++	ifindex = if_nametoindex(ifobject->ifname);
++	if (!ifindex)
++		exit_with_error(errno);
++
++	ret = xsk_setup_xdp_prog(ifindex, &ifobject->xsk_map_fd);
++	if (ret)
++		exit_with_error(-ret);
++
++	ret = xsk_socket__update_xskmap(ifobject->xsk->xsk, ifobject->xsk_map_fd);
++	if (ret)
++		exit_with_error(-ret);
+ }
+ 
+ static void testapp_cleanup_xsk_res(struct ifobject *ifobj)
+@@ -1142,14 +1158,16 @@ static void testapp_bidi(struct test_spec *test)
+ 
+ static void swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx)
+ {
++	int ret;
++
+ 	xsk_socket__delete(ifobj_tx->xsk->xsk);
+-	xsk_umem__delete(ifobj_tx->umem->umem);
+ 	xsk_socket__delete(ifobj_rx->xsk->xsk);
+-	xsk_umem__delete(ifobj_rx->umem->umem);
+-	ifobj_tx->umem = &ifobj_tx->umem_arr[1];
+ 	ifobj_tx->xsk = &ifobj_tx->xsk_arr[1];
+-	ifobj_rx->umem = &ifobj_rx->umem_arr[1];
+ 	ifobj_rx->xsk = &ifobj_rx->xsk_arr[1];
++
++	ret = xsk_socket__update_xskmap(ifobj_rx->xsk->xsk, ifobj_rx->xsk_map_fd);
++	if (ret)
++		exit_with_error(-ret);
+ }
+ 
+ static void testapp_bpf_res(struct test_spec *test)
+@@ -1408,13 +1426,13 @@ static struct ifobject *ifobject_create(void)
+ 	if (!ifobj->xsk_arr)
+ 		goto out_xsk_arr;
+ 
+-	ifobj->umem_arr = calloc(MAX_SOCKETS, sizeof(*ifobj->umem_arr));
+-	if (!ifobj->umem_arr)
+-		goto out_umem_arr;
++	ifobj->umem = calloc(1, sizeof(*ifobj->umem));
++	if (!ifobj->umem)
++		goto out_umem;
+ 
+ 	return ifobj;
+ 
+-out_umem_arr:
++out_umem:
+ 	free(ifobj->xsk_arr);
+ out_xsk_arr:
+ 	free(ifobj);
+@@ -1423,7 +1441,7 @@ out_xsk_arr:
+ 
+ static void ifobject_delete(struct ifobject *ifobj)
+ {
+-	free(ifobj->umem_arr);
++	free(ifobj->umem);
+ 	free(ifobj->xsk_arr);
+ 	free(ifobj);
+ }
+diff --git a/tools/testing/selftests/bpf/xdpxceiver.h b/tools/testing/selftests/bpf/xdpxceiver.h
+index 2f705f44b7483..62a3e63886325 100644
+--- a/tools/testing/selftests/bpf/xdpxceiver.h
++++ b/tools/testing/selftests/bpf/xdpxceiver.h
+@@ -125,10 +125,10 @@ struct ifobject {
+ 	struct xsk_socket_info *xsk;
+ 	struct xsk_socket_info *xsk_arr;
+ 	struct xsk_umem_info *umem;
+-	struct xsk_umem_info *umem_arr;
+ 	thread_func_t func_ptr;
+ 	struct pkt_stream *pkt_stream;
+ 	int ns_fd;
++	int xsk_map_fd;
+ 	u32 dst_ip;
+ 	u32 src_ip;
+ 	u32 xdp_flags;
+diff --git a/tools/testing/selftests/kvm/aarch64/vgic_irq.c b/tools/testing/selftests/kvm/aarch64/vgic_irq.c
+index 7eca977999170..554ca649d4701 100644
+--- a/tools/testing/selftests/kvm/aarch64/vgic_irq.c
++++ b/tools/testing/selftests/kvm/aarch64/vgic_irq.c
+@@ -306,7 +306,8 @@ static void guest_restore_active(struct test_args *args,
+ 	uint32_t prio, intid, ap1r;
+ 	int i;
+ 
+-	/* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
++	/*
++	 * Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
+ 	 * in descending order, so intid+1 can preempt intid.
+ 	 */
+ 	for (i = 0, prio = (num - 1) * 8; i < num; i++, prio -= 8) {
+@@ -315,7 +316,8 @@ static void guest_restore_active(struct test_args *args,
+ 		gic_set_priority(intid, prio);
+ 	}
+ 
+-	/* In a real migration, KVM would restore all GIC state before running
++	/*
++	 * In a real migration, KVM would restore all GIC state before running
+ 	 * guest code.
+ 	 */
+ 	for (i = 0; i < num; i++) {
+@@ -472,10 +474,10 @@ static void test_restore_active(struct test_args *args, struct kvm_inject_desc *
+ 		guest_restore_active(args, MIN_SPI, 4, f->cmd);
+ }
+ 
+-static void guest_code(struct test_args args)
++static void guest_code(struct test_args *args)
+ {
+-	uint32_t i, nr_irqs = args.nr_irqs;
+-	bool level_sensitive = args.level_sensitive;
++	uint32_t i, nr_irqs = args->nr_irqs;
++	bool level_sensitive = args->level_sensitive;
+ 	struct kvm_inject_desc *f, *inject_fns;
+ 
+ 	gic_init(GIC_V3, 1, dist, redist);
+@@ -484,11 +486,11 @@ static void guest_code(struct test_args args)
+ 		gic_irq_enable(i);
+ 
+ 	for (i = MIN_SPI; i < nr_irqs; i++)
+-		gic_irq_set_config(i, !args.level_sensitive);
++		gic_irq_set_config(i, !level_sensitive);
+ 
+-	gic_set_eoi_split(args.eoi_split);
++	gic_set_eoi_split(args->eoi_split);
+ 
+-	reset_priorities(&args);
++	reset_priorities(args);
+ 	gic_set_priority_mask(CPU_PRIO_MASK);
+ 
+ 	inject_fns  = level_sensitive ? inject_level_fns
+@@ -497,17 +499,18 @@ static void guest_code(struct test_args args)
+ 	local_irq_enable();
+ 
+ 	/* Start the tests. */
+-	for_each_supported_inject_fn(&args, inject_fns, f) {
+-		test_injection(&args, f);
+-		test_preemption(&args, f);
+-		test_injection_failure(&args, f);
++	for_each_supported_inject_fn(args, inject_fns, f) {
++		test_injection(args, f);
++		test_preemption(args, f);
++		test_injection_failure(args, f);
+ 	}
+ 
+-	/* Restore the active state of IRQs. This would happen when live
++	/*
++	 * Restore the active state of IRQs. This would happen when live
+ 	 * migrating IRQs in the middle of being handled.
+ 	 */
+-	for_each_supported_activate_fn(&args, set_active_fns, f)
+-		test_restore_active(&args, f);
++	for_each_supported_activate_fn(args, set_active_fns, f)
++		test_restore_active(args, f);
+ 
+ 	GUEST_DONE();
+ }
+@@ -573,8 +576,8 @@ static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm,
+ 		kvm_gsi_routing_write(vm, routing);
+ 	} else {
+ 		ret = _kvm_gsi_routing_write(vm, routing);
+-		/* The kernel only checks for KVM_IRQCHIP_NUM_PINS. */
+-		if (intid >= KVM_IRQCHIP_NUM_PINS)
++		/* The kernel only checks e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS */
++		if (((uint64_t)intid + num - 1 - MIN_SPI) >= KVM_IRQCHIP_NUM_PINS)
+ 			TEST_ASSERT(ret != 0 && errno == EINVAL,
+ 				"Bad intid %u did not cause KVM_SET_GSI_ROUTING "
+ 				"error: rc: %i errno: %i", intid, ret, errno);
+@@ -739,6 +742,7 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
+ 	int gic_fd;
+ 	struct kvm_vm *vm;
+ 	struct kvm_inject_args inject_args;
++	vm_vaddr_t args_gva;
+ 
+ 	struct test_args args = {
+ 		.nr_irqs = nr_irqs,
+@@ -757,7 +761,9 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
+ 	vcpu_init_descriptor_tables(vm, VCPU_ID);
+ 
+ 	/* Setup the guest args page (so it gets the args). */
+-	vcpu_args_set(vm, 0, 1, args);
++	args_gva = vm_vaddr_alloc_page(vm);
++	memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args));
++	vcpu_args_set(vm, 0, 1, args_gva);
+ 
+ 	gic_fd = vgic_v3_setup(vm, 1, nr_irqs,
+ 			GICD_BASE_GPA, GICR_BASE_GPA);
+@@ -841,7 +847,8 @@ int main(int argc, char **argv)
+ 		}
+ 	}
+ 
+-	/* If the user just specified nr_irqs and/or gic_version, then run all
++	/*
++	 * If the user just specified nr_irqs and/or gic_version, then run all
+ 	 * combinations.
+ 	 */
+ 	if (default_args) {
+diff --git a/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c b/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c
+index 00f613c0583cd..263bf3ed8fd55 100644
+--- a/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c
++++ b/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c
+@@ -19,7 +19,7 @@ struct gicv3_data {
+ 	unsigned int nr_spis;
+ };
+ 
+-#define sgi_base_from_redist(redist_base) 	(redist_base + SZ_64K)
++#define sgi_base_from_redist(redist_base)	(redist_base + SZ_64K)
+ #define DIST_BIT				(1U << 31)
+ 
+ enum gicv3_intid_range {
+@@ -105,7 +105,8 @@ static void gicv3_set_eoi_split(bool split)
+ {
+ 	uint32_t val;
+ 
+-	/* All other fields are read-only, so no need to read CTLR first. In
++	/*
++	 * All other fields are read-only, so no need to read CTLR first. In
+ 	 * fact, the kernel does the same.
+ 	 */
+ 	val = split ? (1U << 1) : 0;
+@@ -159,9 +160,10 @@ static void gicv3_access_reg(uint32_t intid, uint64_t offset,
+ 	uint32_t cpu_or_dist;
+ 
+ 	GUEST_ASSERT(bits_per_field <= reg_bits);
+-	GUEST_ASSERT(*val < (1U << bits_per_field));
+-	/* Some registers like IROUTER are 64 bit long. Those are currently not
+-	 * supported by readl nor writel, so just asserting here until then.
++	GUEST_ASSERT(!write || *val < (1U << bits_per_field));
++	/*
++	 * This function does not support 64 bit accesses. Just asserting here
++	 * until we implement readq/writeq.
+ 	 */
+ 	GUEST_ASSERT(reg_bits == 32);
+ 
+diff --git a/tools/testing/selftests/kvm/lib/aarch64/vgic.c b/tools/testing/selftests/kvm/lib/aarch64/vgic.c
+index f5cd0c536d85c..5d45046c1b805 100644
+--- a/tools/testing/selftests/kvm/lib/aarch64/vgic.c
++++ b/tools/testing/selftests/kvm/lib/aarch64/vgic.c
+@@ -140,9 +140,6 @@ static void vgic_poke_irq(int gic_fd, uint32_t intid,
+ 	uint64_t val;
+ 	bool intid_is_private = INTID_IS_SGI(intid) || INTID_IS_PPI(intid);
+ 
+-	/* Check that the addr part of the attr is within 32 bits. */
+-	assert(attr <= KVM_DEV_ARM_VGIC_OFFSET_MASK);
+-
+ 	uint32_t group = intid_is_private ? KVM_DEV_ARM_VGIC_GRP_REDIST_REGS
+ 					  : KVM_DEV_ARM_VGIC_GRP_DIST_REGS;
+ 
+@@ -152,7 +149,11 @@ static void vgic_poke_irq(int gic_fd, uint32_t intid,
+ 		attr += SZ_64K;
+ 	}
+ 
+-	/* All calls will succeed, even with invalid intid's, as long as the
++	/* Check that the addr part of the attr is within 32 bits. */
++	assert((attr & ~KVM_DEV_ARM_VGIC_OFFSET_MASK) == 0);
++
++	/*
++	 * All calls will succeed, even with invalid intid's, as long as the
+ 	 * addr part of the attr is within 32 bits (checked above). An invalid
+ 	 * intid will just make the read/writes point to above the intended
+ 	 * register space (i.e., ICPENDR after ISPENDR).
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 246168310a754..610cc7920c8a2 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -439,8 +439,8 @@ static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
+ 
+ static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
+ {
+-	kvm_dirty_ring_free(&vcpu->dirty_ring);
+ 	kvm_arch_vcpu_destroy(vcpu);
++	kvm_dirty_ring_free(&vcpu->dirty_ring);
+ 
+ 	/*
+ 	 * No need for rcu_read_lock as VCPU_RUN is the only place that changes


             reply	other threads:[~2022-04-13 17:53 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-13 17:53 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2022-06-14 17:10 [gentoo-commits] proj/linux-patches:5.17 commit in: / Mike Pagano
2022-06-09 18:31 Mike Pagano
2022-06-09 11:25 Mike Pagano
2022-06-06 11:01 Mike Pagano
2022-05-30 13:58 Mike Pagano
2022-05-25 13:09 Mike Pagano
2022-05-25 11:52 Mike Pagano
2022-05-18  9:38 Mike Pagano
2022-05-15 22:08 Mike Pagano
2022-05-12 11:27 Mike Pagano
2022-05-11 16:54 Mike Pagano
2022-05-09 10:57 Mike Pagano
2022-04-29 15:22 Mike Pagano
2022-04-28 12:03 Mike Pagano
2022-04-27 13:16 Mike Pagano
2022-04-27 13:10 Mike Pagano
2022-04-26 12:07 Mike Pagano
2022-04-20 12:06 Mike Pagano
2022-04-12 19:12 Mike Pagano
2022-04-12 17:40 Mike Pagano
2022-04-08 13:09 Mike Pagano
2022-04-08 12:53 Mike Pagano
2022-03-28 22:06 Mike Pagano
2022-03-28 10:53 Mike Pagano
2022-03-19 14:39 Mike Pagano
2022-03-06 17:51 Mike Pagano
2022-01-31 13:05 Mike Pagano
2022-01-23 20:00 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1649872335.d8736bd8058ccef545d015dcb5b8d7541d58dd99.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox