public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:6.9 commit in: /
Date: Thu, 11 Jul 2024 11:47:57 +0000 (UTC)	[thread overview]
Message-ID: <1720698465.3507dfb3bd809fee9977bd10ae9a03a35dff682c.mpagano@gentoo> (raw)

commit:     3507dfb3bd809fee9977bd10ae9a03a35dff682c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jul 11 11:47:45 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jul 11 11:47:45 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3507dfb3

Linux patch 6.9.9

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |    4 +
 1008_linux-6.9.9.patch | 7414 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 7418 insertions(+)

diff --git a/0000_README b/0000_README
index 8008738f..7f674d82 100644
--- a/0000_README
+++ b/0000_README
@@ -75,6 +75,10 @@ Patch:  1007_linux-6.9.8.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.9.8
 
+Patch:  1008_linux-6.9.9.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.9.9
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch
 Desc:   Enable link security restrictions by default.

diff --git a/1008_linux-6.9.9.patch b/1008_linux-6.9.9.patch
new file mode 100644
index 00000000..aabc3078
--- /dev/null
+++ b/1008_linux-6.9.9.patch
@@ -0,0 +1,7414 @@
+diff --git a/Makefile b/Makefile
+index 060e20dba35e5..cbe3a580ff480 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 9
+-SUBLEVEL = 8
++SUBLEVEL = 9
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
+index 2d92713be2a09..6195937aa6dc5 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
+@@ -289,7 +289,7 @@ vdd_gpu: DCDC_REG2 {
+ 				regulator-name = "vdd_gpu";
+ 				regulator-always-on;
+ 				regulator-boot-on;
+-				regulator-min-microvolt = <900000>;
++				regulator-min-microvolt = <500000>;
+ 				regulator-max-microvolt = <1350000>;
+ 				regulator-ramp-delay = <6001>;
+ 
+diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
+index 7b610864b3645..2d6c886b40f44 100644
+--- a/arch/powerpc/include/asm/interrupt.h
++++ b/arch/powerpc/include/asm/interrupt.h
+@@ -336,6 +336,14 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
+ 	if (IS_ENABLED(CONFIG_KASAN))
+ 		return;
+ 
++	/*
++	 * Likewise, do not use it in real mode if percpu first chunk is not
++	 * embedded. With CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK enabled there
++	 * are chances where percpu allocation can come from vmalloc area.
++	 */
++	if (percpu_first_chunk_is_paged)
++		return;
++
+ 	/* Otherwise, it should be safe to call it */
+ 	nmi_enter();
+ }
+@@ -351,6 +359,8 @@ static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct inter
+ 		// no nmi_exit for a pseries hash guest taking a real mode exception
+ 	} else if (IS_ENABLED(CONFIG_KASAN)) {
+ 		// no nmi_exit for KASAN in real mode
++	} else if (percpu_first_chunk_is_paged) {
++		// no nmi_exit if percpu first chunk is not embedded
+ 	} else {
+ 		nmi_exit();
+ 	}
+diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
+index ba2e13bb879dc..048e3705af20b 100644
+--- a/arch/powerpc/include/asm/io.h
++++ b/arch/powerpc/include/asm/io.h
+@@ -37,7 +37,7 @@ extern struct pci_dev *isa_bridge_pcidev;
+  * define properly based on the platform
+  */
+ #ifndef CONFIG_PCI
+-#define _IO_BASE	0
++#define _IO_BASE	POISON_POINTER_DELTA
+ #define _ISA_MEM_BASE	0
+ #define PCI_DRAM_OFFSET 0
+ #elif defined(CONFIG_PPC32)
+diff --git a/arch/powerpc/include/asm/percpu.h b/arch/powerpc/include/asm/percpu.h
+index 8e5b7d0b851c6..634970ce13c6b 100644
+--- a/arch/powerpc/include/asm/percpu.h
++++ b/arch/powerpc/include/asm/percpu.h
+@@ -15,6 +15,16 @@
+ #endif /* CONFIG_SMP */
+ #endif /* __powerpc64__ */
+ 
++#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) && defined(CONFIG_SMP)
++#include <linux/jump_label.h>
++DECLARE_STATIC_KEY_FALSE(__percpu_first_chunk_is_paged);
++
++#define percpu_first_chunk_is_paged	\
++		(static_key_enabled(&__percpu_first_chunk_is_paged.key))
++#else
++#define percpu_first_chunk_is_paged	false
++#endif /* CONFIG_PPC64 && CONFIG_SMP */
++
+ #include <asm-generic/percpu.h>
+ 
+ #include <asm/paca.h>
+diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
+index 4690c219bfa4d..63432a33ec49a 100644
+--- a/arch/powerpc/kernel/head_64.S
++++ b/arch/powerpc/kernel/head_64.S
+@@ -647,8 +647,9 @@ __after_prom_start:
+  * Note: This process overwrites the OF exception vectors.
+  */
+ 	LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET)
+-	mr.	r4,r26			/* In some cases the loader may  */
+-	beq	9f			/* have already put us at zero */
++	mr	r4,r26			/* Load the virtual source address into r4 */
++	cmpld	r3,r4			/* Check if source == dest */
++	beq	9f			/* If so skip the copy  */
+ 	li	r6,0x100		/* Start offset, the first 0x100 */
+ 					/* bytes were copied earlier.	 */
+ 
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index 2f19d5e944852..ae36a129789ff 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -834,6 +834,7 @@ static __init int pcpu_cpu_to_node(int cpu)
+ 
+ unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+ EXPORT_SYMBOL(__per_cpu_offset);
++DEFINE_STATIC_KEY_FALSE(__percpu_first_chunk_is_paged);
+ 
+ void __init setup_per_cpu_areas(void)
+ {
+@@ -876,6 +877,7 @@ void __init setup_per_cpu_areas(void)
+ 	if (rc < 0)
+ 		panic("cannot initialize percpu area (err=%d)", rc);
+ 
++	static_key_enable(&__percpu_first_chunk_is_paged.key);
+ 	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+ 	for_each_possible_cpu(cpu) {
+                 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
+diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c
+index 762e4d09aacfa..27254624f6389 100644
+--- a/arch/powerpc/kexec/core_64.c
++++ b/arch/powerpc/kexec/core_64.c
+@@ -26,6 +26,7 @@
+ #include <asm/paca.h>
+ #include <asm/mmu.h>
+ #include <asm/sections.h>	/* _end */
++#include <asm/setup.h>
+ #include <asm/smp.h>
+ #include <asm/hw_breakpoint.h>
+ #include <asm/svm.h>
+@@ -315,6 +316,16 @@ void default_machine_kexec(struct kimage *image)
+ 	if (!kdump_in_progress())
+ 		kexec_prepare_cpus();
+ 
++#ifdef CONFIG_PPC_PSERIES
++	/*
++	 * This must be done after other CPUs have shut down, otherwise they
++	 * could execute the 'scv' instruction, which is not supported with
++	 * reloc disabled (see configure_exceptions()).
++	 */
++	if (firmware_has_feature(FW_FEATURE_SET_MODE))
++		pseries_disable_reloc_on_exc();
++#endif
++
+ 	printk("kexec: Starting switchover sequence.\n");
+ 
+ 	/* switch to a staticly allocated stack.  Based on irq stack code.
+diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c
+index 096d09ed89f67..431be156ca9bb 100644
+--- a/arch/powerpc/platforms/pseries/kexec.c
++++ b/arch/powerpc/platforms/pseries/kexec.c
+@@ -61,11 +61,3 @@ void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
+ 	} else
+ 		xics_kexec_teardown_cpu(secondary);
+ }
+-
+-void pseries_machine_kexec(struct kimage *image)
+-{
+-	if (firmware_has_feature(FW_FEATURE_SET_MODE))
+-		pseries_disable_reloc_on_exc();
+-
+-	default_machine_kexec(image);
+-}
+diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
+index bba4ad192b0fe..3968a6970fa81 100644
+--- a/arch/powerpc/platforms/pseries/pseries.h
++++ b/arch/powerpc/platforms/pseries/pseries.h
+@@ -38,7 +38,6 @@ static inline void smp_init_pseries(void) { }
+ #endif
+ 
+ extern void pseries_kexec_cpu_down(int crash_shutdown, int secondary);
+-void pseries_machine_kexec(struct kimage *image);
+ 
+ extern void pSeries_final_fixup(void);
+ 
+diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
+index 284a6fa04b0c2..b44de0f0822f0 100644
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -1159,7 +1159,6 @@ define_machine(pseries) {
+ 	.machine_check_exception = pSeries_machine_check_exception,
+ 	.machine_check_log_err	= pSeries_machine_check_log_err,
+ #ifdef CONFIG_KEXEC_CORE
+-	.machine_kexec          = pseries_machine_kexec,
+ 	.kexec_cpu_down         = pseries_kexec_cpu_down,
+ #endif
+ #ifdef CONFIG_MEMORY_HOTPLUG
+diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
+index d79d6633f3336..bd4813bad317e 100644
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -1350,7 +1350,7 @@ static int cpu_cmd(void)
+ 	}
+ 	termch = cpu;
+ 
+-	if (!scanhex(&cpu)) {
++	if (!scanhex(&cpu) || cpu >= num_possible_cpus()) {
+ 		/* print cpus waiting or in xmon */
+ 		printf("cpus stopped:");
+ 		last_cpu = first_cpu = NR_CPUS;
+@@ -2772,7 +2772,7 @@ static void dump_pacas(void)
+ 
+ 	termch = c;	/* Put c back, it wasn't 'a' */
+ 
+-	if (scanhex(&num))
++	if (scanhex(&num) && num < num_possible_cpus())
+ 		dump_one_paca(num);
+ 	else
+ 		dump_one_paca(xmon_owner);
+@@ -2845,7 +2845,7 @@ static void dump_xives(void)
+ 
+ 	termch = c;	/* Put c back, it wasn't 'a' */
+ 
+-	if (scanhex(&num))
++	if (scanhex(&num) && num < num_possible_cpus())
+ 		dump_one_xive(num);
+ 	else
+ 		dump_one_xive(xmon_owner);
+diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
+index efd851e1b4832..7c8a71a526a30 100644
+--- a/arch/riscv/include/asm/errata_list.h
++++ b/arch/riscv/include/asm/errata_list.h
+@@ -43,11 +43,21 @@ ALTERNATIVE(__stringify(RISCV_PTR do_page_fault),			\
+ 	    CONFIG_ERRATA_SIFIVE_CIP_453)
+ #else /* !__ASSEMBLY__ */
+ 
+-#define ALT_FLUSH_TLB_PAGE(x)						\
++#define ALT_SFENCE_VMA_ASID(asid)					\
++asm(ALTERNATIVE("sfence.vma x0, %0", "sfence.vma", SIFIVE_VENDOR_ID,	\
++		ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200)	\
++		: : "r" (asid) : "memory")
++
++#define ALT_SFENCE_VMA_ADDR(addr)					\
+ asm(ALTERNATIVE("sfence.vma %0", "sfence.vma", SIFIVE_VENDOR_ID,	\
+ 		ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200)	\
+ 		: : "r" (addr) : "memory")
+ 
++#define ALT_SFENCE_VMA_ADDR_ASID(addr, asid)				\
++asm(ALTERNATIVE("sfence.vma %0, %1", "sfence.vma", SIFIVE_VENDOR_ID,	\
++		ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200)	\
++		: : "r" (addr), "r" (asid) : "memory")
++
+ /*
+  * _val is marked as "will be overwritten", so need to set it to 0
+  * in the default case.
+diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
+index 4112cc8d1d69f..79dada53d7eb5 100644
+--- a/arch/riscv/include/asm/tlbflush.h
++++ b/arch/riscv/include/asm/tlbflush.h
+@@ -22,10 +22,27 @@ static inline void local_flush_tlb_all(void)
+ 	__asm__ __volatile__ ("sfence.vma" : : : "memory");
+ }
+ 
++static inline void local_flush_tlb_all_asid(unsigned long asid)
++{
++	if (asid != FLUSH_TLB_NO_ASID)
++		ALT_SFENCE_VMA_ASID(asid);
++	else
++		local_flush_tlb_all();
++}
++
+ /* Flush one page from local TLB */
+ static inline void local_flush_tlb_page(unsigned long addr)
+ {
+-	ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
++	ALT_SFENCE_VMA_ADDR(addr);
++}
++
++static inline void local_flush_tlb_page_asid(unsigned long addr,
++					     unsigned long asid)
++{
++	if (asid != FLUSH_TLB_NO_ASID)
++		ALT_SFENCE_VMA_ADDR_ASID(addr, asid);
++	else
++		local_flush_tlb_page(addr);
+ }
+ #else /* CONFIG_MMU */
+ #define local_flush_tlb_all()			do { } while (0)
+diff --git a/arch/riscv/kernel/machine_kexec.c b/arch/riscv/kernel/machine_kexec.c
+index ed9cad20c039d..3c830a6f7ef46 100644
+--- a/arch/riscv/kernel/machine_kexec.c
++++ b/arch/riscv/kernel/machine_kexec.c
+@@ -121,20 +121,12 @@ static void machine_kexec_mask_interrupts(void)
+ 
+ 	for_each_irq_desc(i, desc) {
+ 		struct irq_chip *chip;
+-		int ret;
+ 
+ 		chip = irq_desc_get_chip(desc);
+ 		if (!chip)
+ 			continue;
+ 
+-		/*
+-		 * First try to remove the active state. If this
+-		 * fails, try to EOI the interrupt.
+-		 */
+-		ret = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false);
+-
+-		if (ret && irqd_irq_inprogress(&desc->irq_data) &&
+-		    chip->irq_eoi)
++		if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
+ 			chip->irq_eoi(&desc->irq_data);
+ 
+ 		if (chip->irq_mask)
+diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c
+index 86391a5061dda..cee1b9ca4ec48 100644
+--- a/arch/riscv/kvm/vcpu_pmu.c
++++ b/arch/riscv/kvm/vcpu_pmu.c
+@@ -39,7 +39,7 @@ static u64 kvm_pmu_get_sample_period(struct kvm_pmc *pmc)
+ 	u64 sample_period;
+ 
+ 	if (!pmc->counter_val)
+-		sample_period = counter_val_mask + 1;
++		sample_period = counter_val_mask;
+ 	else
+ 		sample_period = (-pmc->counter_val) & counter_val_mask;
+ 
+diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
+index 07d743f87b3f6..a6f788774856b 100644
+--- a/arch/riscv/mm/tlbflush.c
++++ b/arch/riscv/mm/tlbflush.c
+@@ -7,29 +7,6 @@
+ #include <asm/sbi.h>
+ #include <asm/mmu_context.h>
+ 
+-static inline void local_flush_tlb_all_asid(unsigned long asid)
+-{
+-	if (asid != FLUSH_TLB_NO_ASID)
+-		__asm__ __volatile__ ("sfence.vma x0, %0"
+-				:
+-				: "r" (asid)
+-				: "memory");
+-	else
+-		local_flush_tlb_all();
+-}
+-
+-static inline void local_flush_tlb_page_asid(unsigned long addr,
+-		unsigned long asid)
+-{
+-	if (asid != FLUSH_TLB_NO_ASID)
+-		__asm__ __volatile__ ("sfence.vma %0, %1"
+-				:
+-				: "r" (addr), "r" (asid)
+-				: "memory");
+-	else
+-		local_flush_tlb_page(addr);
+-}
+-
+ /*
+  * Flush entire TLB if number of entries to be flushed is greater
+  * than the threshold below.
+diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
+index 95990461888fc..9281063636a73 100644
+--- a/arch/s390/include/asm/kvm_host.h
++++ b/arch/s390/include/asm/kvm_host.h
+@@ -427,6 +427,7 @@ struct kvm_vcpu_stat {
+ 	u64 instruction_io_other;
+ 	u64 instruction_lpsw;
+ 	u64 instruction_lpswe;
++	u64 instruction_lpswey;
+ 	u64 instruction_pfmf;
+ 	u64 instruction_ptff;
+ 	u64 instruction_sck;
+diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
+index bbbdc5abe2b2c..a589547ee0200 100644
+--- a/arch/s390/include/asm/processor.h
++++ b/arch/s390/include/asm/processor.h
+@@ -305,8 +305,8 @@ static inline void __load_psw(psw_t psw)
+  */
+ static __always_inline void __load_psw_mask(unsigned long mask)
+ {
++	psw_t psw __uninitialized;
+ 	unsigned long addr;
+-	psw_t psw;
+ 
+ 	psw.mask = mask;
+ 
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 82e9631cd9efb..54b5b2565df8d 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -132,6 +132,7 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+ 	STATS_DESC_COUNTER(VCPU, instruction_io_other),
+ 	STATS_DESC_COUNTER(VCPU, instruction_lpsw),
+ 	STATS_DESC_COUNTER(VCPU, instruction_lpswe),
++	STATS_DESC_COUNTER(VCPU, instruction_lpswey),
+ 	STATS_DESC_COUNTER(VCPU, instruction_pfmf),
+ 	STATS_DESC_COUNTER(VCPU, instruction_ptff),
+ 	STATS_DESC_COUNTER(VCPU, instruction_sck),
+diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
+index 111eb5c747840..bf8534218af3d 100644
+--- a/arch/s390/kvm/kvm-s390.h
++++ b/arch/s390/kvm/kvm-s390.h
+@@ -138,6 +138,21 @@ static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, u8 *ar)
+ 	return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
+ }
+ 
++static inline u64 kvm_s390_get_base_disp_siy(struct kvm_vcpu *vcpu, u8 *ar)
++{
++	u32 base1 = vcpu->arch.sie_block->ipb >> 28;
++	s64 disp1;
++
++	/* The displacement is a 20bit _SIGNED_ value */
++	disp1 = sign_extend64(((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
++			      ((vcpu->arch.sie_block->ipb & 0xff00) << 4), 19);
++
++	if (ar)
++		*ar = base1;
++
++	return (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
++}
++
+ static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
+ 					      u64 *address1, u64 *address2,
+ 					      u8 *ar_b1, u8 *ar_b2)
+diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
+index 1be19cc9d73c1..1a49b89706f86 100644
+--- a/arch/s390/kvm/priv.c
++++ b/arch/s390/kvm/priv.c
+@@ -797,6 +797,36 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
+ 	return 0;
+ }
+ 
++static int handle_lpswey(struct kvm_vcpu *vcpu)
++{
++	psw_t new_psw;
++	u64 addr;
++	int rc;
++	u8 ar;
++
++	vcpu->stat.instruction_lpswey++;
++
++	if (!test_kvm_facility(vcpu->kvm, 193))
++		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
++
++	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
++		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
++
++	addr = kvm_s390_get_base_disp_siy(vcpu, &ar);
++	if (addr & 7)
++		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
++
++	rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
++	if (rc)
++		return kvm_s390_inject_prog_cond(vcpu, rc);
++
++	vcpu->arch.sie_block->gpsw = new_psw;
++	if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
++		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
++
++	return 0;
++}
++
+ static int handle_stidp(struct kvm_vcpu *vcpu)
+ {
+ 	u64 stidp_data = vcpu->kvm->arch.model.cpuid;
+@@ -1462,6 +1492,8 @@ int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
+ 	case 0x61:
+ 	case 0x62:
+ 		return handle_ri(vcpu);
++	case 0x71:
++		return handle_lpswey(vcpu);
+ 	default:
+ 		return -EOPNOTSUPP;
+ 	}
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index 15319b217bf3f..4bd7cbab4c241 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -104,6 +104,7 @@ static int blk_validate_zoned_limits(struct queue_limits *lim)
+ static int blk_validate_limits(struct queue_limits *lim)
+ {
+ 	unsigned int max_hw_sectors;
++	unsigned int logical_block_sectors;
+ 
+ 	/*
+ 	 * Unless otherwise specified, default to 512 byte logical blocks and a
+@@ -134,8 +135,11 @@ static int blk_validate_limits(struct queue_limits *lim)
+ 		lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
+ 	if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
+ 		return -EINVAL;
++	logical_block_sectors = lim->logical_block_size >> SECTOR_SHIFT;
++	if (WARN_ON_ONCE(logical_block_sectors > lim->max_hw_sectors))
++		return -EINVAL;
+ 	lim->max_hw_sectors = round_down(lim->max_hw_sectors,
+-			lim->logical_block_size >> SECTOR_SHIFT);
++			logical_block_sectors);
+ 
+ 	/*
+ 	 * The actual max_sectors value is a complex beast and also takes the
+@@ -153,7 +157,7 @@ static int blk_validate_limits(struct queue_limits *lim)
+ 		lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
+ 	}
+ 	lim->max_sectors = round_down(lim->max_sectors,
+-			lim->logical_block_size >> SECTOR_SHIFT);
++			logical_block_sectors);
+ 
+ 	/*
+ 	 * Random default for the maximum number of segments.  Driver should not
+diff --git a/crypto/aead.c b/crypto/aead.c
+index 54906633566a2..5f3c1954d8e5d 100644
+--- a/crypto/aead.c
++++ b/crypto/aead.c
+@@ -45,8 +45,7 @@ static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
+ 	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
+ 	memcpy(alignbuffer, key, keylen);
+ 	ret = crypto_aead_alg(tfm)->setkey(tfm, alignbuffer, keylen);
+-	memset(alignbuffer, 0, keylen);
+-	kfree(buffer);
++	kfree_sensitive(buffer);
+ 	return ret;
+ }
+ 
+diff --git a/crypto/cipher.c b/crypto/cipher.c
+index 47c77a3e59783..40cae908788ec 100644
+--- a/crypto/cipher.c
++++ b/crypto/cipher.c
+@@ -34,8 +34,7 @@ static int setkey_unaligned(struct crypto_cipher *tfm, const u8 *key,
+ 	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
+ 	memcpy(alignbuffer, key, keylen);
+ 	ret = cia->cia_setkey(crypto_cipher_tfm(tfm), alignbuffer, keylen);
+-	memset(alignbuffer, 0, keylen);
+-	kfree(buffer);
++	kfree_sensitive(buffer);
+ 	return ret;
+ 
+ }
+diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
+index 3ec611dc0c09f..a905e955bbfc7 100644
+--- a/drivers/base/regmap/regmap-i2c.c
++++ b/drivers/base/regmap/regmap-i2c.c
+@@ -350,7 +350,8 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
+ 
+ 		if (quirks->max_write_len &&
+ 		    (bus->max_raw_write == 0 || bus->max_raw_write > quirks->max_write_len))
+-			max_write = quirks->max_write_len;
++			max_write = quirks->max_write_len -
++				(config->reg_bits + config->pad_bits) / BITS_PER_BYTE;
+ 
+ 		if (max_read || max_write) {
+ 			ret_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
+diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
+index 27928deccc643..74d0418ddac78 100644
+--- a/drivers/block/null_blk/zoned.c
++++ b/drivers/block/null_blk/zoned.c
+@@ -84,6 +84,17 @@ int null_init_zoned_dev(struct nullb_device *dev,
+ 		return -EINVAL;
+ 	}
+ 
++	/*
++	 * If a smaller zone capacity was requested, do not allow a smaller last
++	 * zone at the same time as such zone configuration does not correspond
++	 * to any real zoned device.
++	 */
++	if (dev->zone_capacity != dev->zone_size &&
++	    dev->size & (dev->zone_size - 1)) {
++		pr_err("A smaller last zone is not allowed with zone capacity smaller than zone size.\n");
++		return -EINVAL;
++	}
++
+ 	zone_capacity_sects = mb_to_sects(dev->zone_capacity);
+ 	dev_capacity_sects = mb_to_sects(dev->size);
+ 	dev->zone_size_sects = mb_to_sects(dev->zone_size);
+diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c
+index 0c2f15235b4cd..d90858ea2fe59 100644
+--- a/drivers/bluetooth/hci_bcm4377.c
++++ b/drivers/bluetooth/hci_bcm4377.c
+@@ -495,6 +495,10 @@ struct bcm4377_data;
+  *                  extended scanning
+  * broken_mws_transport_config: Set to true if the chip erroneously claims to
+  *                              support MWS Transport Configuration
++ * broken_le_ext_adv_report_phy: Set to true if this chip stuffs flags inside
++ *                               reserved bits of Primary/Secondary_PHY inside
++ *                               LE Extended Advertising Report events which
++ *                               have to be ignored
+  * send_calibration: Optional callback to send calibration data
+  * send_ptb: Callback to send "PTB" regulatory/calibration data
+  */
+@@ -513,6 +517,7 @@ struct bcm4377_hw {
+ 	unsigned long broken_ext_scan : 1;
+ 	unsigned long broken_mws_transport_config : 1;
+ 	unsigned long broken_le_coded : 1;
++	unsigned long broken_le_ext_adv_report_phy : 1;
+ 
+ 	int (*send_calibration)(struct bcm4377_data *bcm4377);
+ 	int (*send_ptb)(struct bcm4377_data *bcm4377,
+@@ -716,7 +721,7 @@ static void bcm4377_handle_ack(struct bcm4377_data *bcm4377,
+ 		ring->events[msgid] = NULL;
+ 	}
+ 
+-	bitmap_release_region(ring->msgids, msgid, ring->n_entries);
++	bitmap_release_region(ring->msgids, msgid, 0);
+ 
+ unlock:
+ 	spin_unlock_irqrestore(&ring->lock, flags);
+@@ -2373,6 +2378,8 @@ static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 		set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
+ 	if (bcm4377->hw->broken_le_coded)
+ 		set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks);
++	if (bcm4377->hw->broken_le_ext_adv_report_phy)
++		set_bit(HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY, &hdev->quirks);
+ 
+ 	pci_set_drvdata(pdev, bcm4377);
+ 	hci_set_drvdata(hdev, bcm4377);
+@@ -2477,6 +2484,7 @@ static const struct bcm4377_hw bcm4377_hw_variants[] = {
+ 		.clear_pciecfg_subsystem_ctrl_bit19 = true,
+ 		.broken_mws_transport_config = true,
+ 		.broken_le_coded = true,
++		.broken_le_ext_adv_report_phy = true,
+ 		.send_calibration = bcm4387_send_calibration,
+ 		.send_ptb = bcm4378_send_ptb,
+ 	},
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index 0c9c9ee56592d..9a0bc86f9aace 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -2450,15 +2450,27 @@ static void qca_serdev_shutdown(struct device *dev)
+ 	struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
+ 	struct hci_uart *hu = &qcadev->serdev_hu;
+ 	struct hci_dev *hdev = hu->hdev;
+-	struct qca_data *qca = hu->priv;
+ 	const u8 ibs_wake_cmd[] = { 0xFD };
+ 	const u8 edl_reset_soc_cmd[] = { 0x01, 0x00, 0xFC, 0x01, 0x05 };
+ 
+ 	if (qcadev->btsoc_type == QCA_QCA6390) {
+-		if (test_bit(QCA_BT_OFF, &qca->flags) ||
+-		    !test_bit(HCI_RUNNING, &hdev->flags))
++		/* The purpose of sending the VSC is to reset SOC into a initial
++		 * state and the state will ensure next hdev->setup() success.
++		 * if HCI_QUIRK_NON_PERSISTENT_SETUP is set, it means that
++		 * hdev->setup() can do its job regardless of SoC state, so
++		 * don't need to send the VSC.
++		 * if HCI_SETUP is set, it means that hdev->setup() was never
++		 * invoked and the SOC is already in the initial state, so
++		 * don't also need to send the VSC.
++		 */
++		if (test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks) ||
++		    hci_dev_test_flag(hdev, HCI_SETUP))
+ 			return;
+ 
++		/* The serdev must be in open state when conrol logic arrives
++		 * here, so also fix the use-after-free issue caused by that
++		 * the serdev is flushed or wrote after it is closed.
++		 */
+ 		serdev_device_write_flush(serdev);
+ 		ret = serdev_device_write_buf(serdev, ibs_wake_cmd,
+ 					      sizeof(ibs_wake_cmd));
+diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
+index a5e07270e0d41..20c90ebb3a3f6 100644
+--- a/drivers/cdrom/cdrom.c
++++ b/drivers/cdrom/cdrom.c
+@@ -2358,7 +2358,7 @@ static int cdrom_ioctl_timed_media_change(struct cdrom_device_info *cdi,
+ 		return -EFAULT;
+ 
+ 	tmp_info.media_flags = 0;
+-	if (tmp_info.last_media_change - cdi->last_media_change_ms < 0)
++	if (cdi->last_media_change_ms > tmp_info.last_media_change)
+ 		tmp_info.media_flags |= MEDIA_CHANGED_FLAG;
+ 
+ 	tmp_info.last_media_change = cdi->last_media_change_ms;
+diff --git a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
+index ba504e19d4203..62d876e150e11 100644
+--- a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
++++ b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
+@@ -29,6 +29,7 @@ static const struct mtk_gate mfg_clks[] = {
+ static const struct mtk_clk_desc mfg_desc = {
+ 	.clks = mfg_clks,
+ 	.num_clks = ARRAY_SIZE(mfg_clks),
++	.need_runtime_pm = true,
+ };
+ 
+ static const struct of_device_id of_match_clk_mt8183_mfg[] = {
+diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
+index bd37ab4d1a9bb..ba1d1c495bc2b 100644
+--- a/drivers/clk/mediatek/clk-mtk.c
++++ b/drivers/clk/mediatek/clk-mtk.c
+@@ -496,14 +496,16 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
+ 	}
+ 
+ 
+-	devm_pm_runtime_enable(&pdev->dev);
+-	/*
+-	 * Do a pm_runtime_resume_and_get() to workaround a possible
+-	 * deadlock between clk_register() and the genpd framework.
+-	 */
+-	r = pm_runtime_resume_and_get(&pdev->dev);
+-	if (r)
+-		return r;
++	if (mcd->need_runtime_pm) {
++		devm_pm_runtime_enable(&pdev->dev);
++		/*
++		 * Do a pm_runtime_resume_and_get() to workaround a possible
++		 * deadlock between clk_register() and the genpd framework.
++		 */
++		r = pm_runtime_resume_and_get(&pdev->dev);
++		if (r)
++			return r;
++	}
+ 
+ 	/* Calculate how many clk_hw_onecell_data entries to allocate */
+ 	num_clks = mcd->num_clks + mcd->num_composite_clks;
+@@ -585,7 +587,8 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
+ 			goto unregister_clks;
+ 	}
+ 
+-	pm_runtime_put(&pdev->dev);
++	if (mcd->need_runtime_pm)
++		pm_runtime_put(&pdev->dev);
+ 
+ 	return r;
+ 
+@@ -618,7 +621,8 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
+ 	if (mcd->shared_io && base)
+ 		iounmap(base);
+ 
+-	pm_runtime_put(&pdev->dev);
++	if (mcd->need_runtime_pm)
++		pm_runtime_put(&pdev->dev);
+ 	return r;
+ }
+ 
+diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
+index 22096501a60a7..c17fe1c2d732d 100644
+--- a/drivers/clk/mediatek/clk-mtk.h
++++ b/drivers/clk/mediatek/clk-mtk.h
+@@ -237,6 +237,8 @@ struct mtk_clk_desc {
+ 
+ 	int (*clk_notifier_func)(struct device *dev, struct clk *clk);
+ 	unsigned int mfg_clk_idx;
++
++	bool need_runtime_pm;
+ };
+ 
+ int mtk_clk_pdev_probe(struct platform_device *pdev);
+diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
+index be18ff983d35c..003308a288968 100644
+--- a/drivers/clk/qcom/clk-alpha-pll.c
++++ b/drivers/clk/qcom/clk-alpha-pll.c
+@@ -2555,6 +2555,9 @@ static int clk_alpha_pll_stromer_plus_set_rate(struct clk_hw *hw,
+ 	regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
+ 					a >> ALPHA_BITWIDTH);
+ 
++	regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
++			   PLL_ALPHA_EN, PLL_ALPHA_EN);
++
+ 	regmap_write(pll->clkr.regmap, PLL_MODE(pll), PLL_BYPASSNL);
+ 
+ 	/* Wait five micro seconds or more */
+diff --git a/drivers/clk/qcom/gcc-ipq9574.c b/drivers/clk/qcom/gcc-ipq9574.c
+index 0a3f846695b80..f8b9a1e93bef2 100644
+--- a/drivers/clk/qcom/gcc-ipq9574.c
++++ b/drivers/clk/qcom/gcc-ipq9574.c
+@@ -2140,9 +2140,10 @@ static struct clk_rcg2 pcnoc_bfdcd_clk_src = {
+ 
+ static struct clk_branch gcc_crypto_axi_clk = {
+ 	.halt_reg = 0x16010,
++	.halt_check = BRANCH_HALT_VOTED,
+ 	.clkr = {
+-		.enable_reg = 0x16010,
+-		.enable_mask = BIT(0),
++		.enable_reg = 0xb004,
++		.enable_mask = BIT(15),
+ 		.hw.init = &(const struct clk_init_data) {
+ 			.name = "gcc_crypto_axi_clk",
+ 			.parent_hws = (const struct clk_hw *[]) {
+@@ -2156,9 +2157,10 @@ static struct clk_branch gcc_crypto_axi_clk = {
+ 
+ static struct clk_branch gcc_crypto_ahb_clk = {
+ 	.halt_reg = 0x16014,
++	.halt_check = BRANCH_HALT_VOTED,
+ 	.clkr = {
+-		.enable_reg = 0x16014,
+-		.enable_mask = BIT(0),
++		.enable_reg = 0xb004,
++		.enable_mask = BIT(16),
+ 		.hw.init = &(const struct clk_init_data) {
+ 			.name = "gcc_crypto_ahb_clk",
+ 			.parent_hws = (const struct clk_hw *[]) {
+diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
+index cf4a7b6e0b23a..0559a33faf00e 100644
+--- a/drivers/clk/qcom/gcc-sm6350.c
++++ b/drivers/clk/qcom/gcc-sm6350.c
+@@ -100,8 +100,8 @@ static struct clk_alpha_pll gpll6 = {
+ 		.enable_mask = BIT(6),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gpll6",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&gpll0.clkr.hw,
++			.parent_data = &(const struct clk_parent_data){
++				.fw_name = "bi_tcxo",
+ 			},
+ 			.num_parents = 1,
+ 			.ops = &clk_alpha_pll_fixed_fabia_ops,
+@@ -124,7 +124,7 @@ static struct clk_alpha_pll_postdiv gpll6_out_even = {
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "gpll6_out_even",
+ 		.parent_hws = (const struct clk_hw*[]){
+-			&gpll0.clkr.hw,
++			&gpll6.clkr.hw,
+ 		},
+ 		.num_parents = 1,
+ 		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+@@ -139,8 +139,8 @@ static struct clk_alpha_pll gpll7 = {
+ 		.enable_mask = BIT(7),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gpll7",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&gpll0.clkr.hw,
++			.parent_data = &(const struct clk_parent_data){
++				.fw_name = "bi_tcxo",
+ 			},
+ 			.num_parents = 1,
+ 			.ops = &clk_alpha_pll_fixed_fabia_ops,
+diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c
+index ac0091b4ce242..be375ce0149c8 100644
+--- a/drivers/clk/sunxi-ng/ccu_common.c
++++ b/drivers/clk/sunxi-ng/ccu_common.c
+@@ -132,7 +132,6 @@ static int sunxi_ccu_probe(struct sunxi_ccu *ccu, struct device *dev,
+ 
+ 	for (i = 0; i < desc->hw_clks->num ; i++) {
+ 		struct clk_hw *hw = desc->hw_clks->hws[i];
+-		struct ccu_common *common = hw_to_ccu_common(hw);
+ 		const char *name;
+ 
+ 		if (!hw)
+@@ -147,14 +146,21 @@ static int sunxi_ccu_probe(struct sunxi_ccu *ccu, struct device *dev,
+ 			pr_err("Couldn't register clock %d - %s\n", i, name);
+ 			goto err_clk_unreg;
+ 		}
++	}
++
++	for (i = 0; i < desc->num_ccu_clks; i++) {
++		struct ccu_common *cclk = desc->ccu_clks[i];
++
++		if (!cclk)
++			continue;
+ 
+-		if (common->max_rate)
+-			clk_hw_set_rate_range(hw, common->min_rate,
+-					      common->max_rate);
++		if (cclk->max_rate)
++			clk_hw_set_rate_range(&cclk->hw, cclk->min_rate,
++					      cclk->max_rate);
+ 		else
+-			WARN(common->min_rate,
++			WARN(cclk->min_rate,
+ 			     "No max_rate, ignoring min_rate of clock %d - %s\n",
+-			     i, name);
++			     i, clk_hw_get_name(&cclk->hw));
+ 	}
+ 
+ 	ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
+diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c
+index cd67fa348ca72..6351a452878dd 100644
+--- a/drivers/crypto/hisilicon/debugfs.c
++++ b/drivers/crypto/hisilicon/debugfs.c
+@@ -809,8 +809,14 @@ static void dfx_regs_uninit(struct hisi_qm *qm,
+ {
+ 	int i;
+ 
++	if (!dregs)
++		return;
++
+ 	/* Setting the pointer is NULL to prevent double free */
+ 	for (i = 0; i < reg_len; i++) {
++		if (!dregs[i].regs)
++			continue;
++
+ 		kfree(dregs[i].regs);
+ 		dregs[i].regs = NULL;
+ 	}
+@@ -860,14 +866,21 @@ static struct dfx_diff_registers *dfx_regs_init(struct hisi_qm *qm,
+ static int qm_diff_regs_init(struct hisi_qm *qm,
+ 		struct dfx_diff_registers *dregs, u32 reg_len)
+ {
++	int ret;
++
+ 	qm->debug.qm_diff_regs = dfx_regs_init(qm, qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
+-	if (IS_ERR(qm->debug.qm_diff_regs))
+-		return PTR_ERR(qm->debug.qm_diff_regs);
++	if (IS_ERR(qm->debug.qm_diff_regs)) {
++		ret = PTR_ERR(qm->debug.qm_diff_regs);
++		qm->debug.qm_diff_regs = NULL;
++		return ret;
++	}
+ 
+ 	qm->debug.acc_diff_regs = dfx_regs_init(qm, dregs, reg_len);
+ 	if (IS_ERR(qm->debug.acc_diff_regs)) {
+ 		dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
+-		return PTR_ERR(qm->debug.acc_diff_regs);
++		ret = PTR_ERR(qm->debug.acc_diff_regs);
++		qm->debug.acc_diff_regs = NULL;
++		return ret;
+ 	}
+ 
+ 	return 0;
+@@ -908,7 +921,9 @@ static int qm_last_regs_init(struct hisi_qm *qm)
+ static void qm_diff_regs_uninit(struct hisi_qm *qm, u32 reg_len)
+ {
+ 	dfx_regs_uninit(qm, qm->debug.acc_diff_regs, reg_len);
++	qm->debug.acc_diff_regs = NULL;
+ 	dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
++	qm->debug.qm_diff_regs = NULL;
+ }
+ 
+ /**
+diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
+index c290d8937b19c..fabea0d650297 100644
+--- a/drivers/crypto/hisilicon/sec2/sec_main.c
++++ b/drivers/crypto/hisilicon/sec2/sec_main.c
+@@ -152,7 +152,7 @@ static const struct hisi_qm_cap_info sec_basic_info[] = {
+ 	{SEC_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x1, 0x1, 0x1},
+ 	{SEC_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x4, 0x4, 0x4},
+ 	{SEC_CORES_PER_CLUSTER_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x4, 0x4, 0x4},
+-	{SEC_CORE_ENABLE_BITMAP, 0x3140, 32, GENMASK(31, 0), 0x17F, 0x17F, 0xF},
++	{SEC_CORE_ENABLE_BITMAP, 0x3140, 0, GENMASK(31, 0), 0x17F, 0x17F, 0xF},
+ 	{SEC_DRV_ALG_BITMAP_LOW, 0x3144, 0, GENMASK(31, 0), 0x18050CB, 0x18050CB, 0x18670CF},
+ 	{SEC_DRV_ALG_BITMAP_HIGH, 0x3148, 0, GENMASK(31, 0), 0x395C, 0x395C, 0x395C},
+ 	{SEC_DEV_ALG_BITMAP_LOW, 0x314c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
+index 015c95a825d31..ac2a5d2d47463 100644
+--- a/drivers/firmware/dmi_scan.c
++++ b/drivers/firmware/dmi_scan.c
+@@ -101,6 +101,17 @@ static void dmi_decode_table(u8 *buf,
+ 	       (data - buf + sizeof(struct dmi_header)) <= dmi_len) {
+ 		const struct dmi_header *dm = (const struct dmi_header *)data;
+ 
++		/*
++		 * If a short entry is found (less than 4 bytes), not only it
++		 * is invalid, but we cannot reliably locate the next entry.
++		 */
++		if (dm->length < sizeof(struct dmi_header)) {
++			pr_warn(FW_BUG
++				"Corrupted DMI table, offset %zd (only %d entries processed)\n",
++				data - buf, i);
++			break;
++		}
++
+ 		/*
+ 		 *  We want to know the total length (formatted area and
+ 		 *  strings) before decoding to make sure we won't run off the
+diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c
+index 880ffcb500887..921f61507ae83 100644
+--- a/drivers/firmware/sysfb.c
++++ b/drivers/firmware/sysfb.c
+@@ -101,8 +101,10 @@ static __init struct device *sysfb_parent_dev(const struct screen_info *si)
+ 	if (IS_ERR(pdev)) {
+ 		return ERR_CAST(pdev);
+ 	} else if (pdev) {
+-		if (!sysfb_pci_dev_is_enabled(pdev))
++		if (!sysfb_pci_dev_is_enabled(pdev)) {
++			pci_dev_put(pdev);
+ 			return ERR_PTR(-ENODEV);
++		}
+ 		return &pdev->dev;
+ 	}
+ 
+@@ -137,7 +139,7 @@ static __init int sysfb_init(void)
+ 	if (compatible) {
+ 		pd = sysfb_create_simplefb(si, &mode, parent);
+ 		if (!IS_ERR(pd))
+-			goto unlock_mutex;
++			goto put_device;
+ 	}
+ 
+ 	/* if the FB is incompatible, create a legacy framebuffer device */
+@@ -155,7 +157,7 @@ static __init int sysfb_init(void)
+ 	pd = platform_device_alloc(name, 0);
+ 	if (!pd) {
+ 		ret = -ENOMEM;
+-		goto unlock_mutex;
++		goto put_device;
+ 	}
+ 
+ 	pd->dev.parent = parent;
+@@ -170,9 +172,11 @@ static __init int sysfb_init(void)
+ 	if (ret)
+ 		goto err;
+ 
+-	goto unlock_mutex;
++	goto put_device;
+ err:
+ 	platform_device_put(pd);
++put_device:
++	put_device(parent);
+ unlock_mutex:
+ 	mutex_unlock(&disable_lock);
+ 	return ret;
+diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
+index 71e1af7c21847..d89e78f0ead31 100644
+--- a/drivers/gpio/gpio-mmio.c
++++ b/drivers/gpio/gpio-mmio.c
+@@ -619,8 +619,6 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev,
+ 	ret = gpiochip_get_ngpios(gc, dev);
+ 	if (ret)
+ 		gc->ngpio = gc->bgpio_bits;
+-	else
+-		gc->bgpio_bits = roundup_pow_of_two(round_up(gc->ngpio, 8));
+ 
+ 	ret = bgpio_setup_io(gc, dat, set, clr, flags);
+ 	if (ret)
+diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
+index cb0cefaec37e8..5c4442200118a 100644
+--- a/drivers/gpio/gpiolib-of.c
++++ b/drivers/gpio/gpiolib-of.c
+@@ -202,6 +202,24 @@ static void of_gpio_try_fixup_polarity(const struct device_node *np,
+ 		 * helper, and be consistent with what other drivers do.
+ 		 */
+ 		{ "qi,lb60",		"rb-gpios",	true },
++#endif
++#if IS_ENABLED(CONFIG_PCI_LANTIQ)
++		/*
++		 * According to the PCI specification, the RST# pin is an
++		 * active-low signal. However, most of the device trees that
++		 * have been widely used for a long time incorrectly describe
++		 * reset GPIO as active-high, and were also using wrong name
++		 * for the property.
++		 */
++		{ "lantiq,pci-xway",	"gpio-reset",	false },
++#endif
++#if IS_ENABLED(CONFIG_TOUCHSCREEN_TSC2005)
++		/*
++		 * DTS for Nokia N900 incorrectly specified "active high"
++		 * polarity for the reset line, while the chip actually
++		 * treats it as "active low".
++		 */
++		{ "ti,tsc2005",		"reset-gpios",	false },
+ #endif
+ 	};
+ 	unsigned int i;
+@@ -504,9 +522,9 @@ static struct gpio_desc *of_find_gpio_rename(struct device_node *np,
+ 		{ "reset",	"reset-n-io",	"marvell,nfc-uart" },
+ 		{ "reset",	"reset-n-io",	"mrvl,nfc-uart" },
+ #endif
+-#if !IS_ENABLED(CONFIG_PCI_LANTIQ)
++#if IS_ENABLED(CONFIG_PCI_LANTIQ)
+ 		/* MIPS Lantiq PCI */
+-		{ "reset",	"gpios-reset",	"lantiq,pci-xway" },
++		{ "reset",	"gpio-reset",	"lantiq,pci-xway" },
+ #endif
+ 
+ 		/*
+diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+index 576067d66bb9a..d0a8da67dc2a1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
++++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+@@ -97,7 +97,7 @@ static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev)
+ 		adev->ip_blocks[i].status.hw = false;
+ 	}
+ 
+-	return r;
++	return 0;
+ }
+ 
+ static int
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+index 35dd6effa9a34..7291c3fd8cf70 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+@@ -455,6 +455,9 @@ void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev,
+ 		else
+ 			mem_info->local_mem_size_private =
+ 					KFD_XCP_MEMORY_SIZE(adev, xcp->id);
++	} else if (adev->flags & AMD_IS_APU) {
++		mem_info->local_mem_size_public = (ttm_tt_pages_limit() << PAGE_SHIFT);
++		mem_info->local_mem_size_private = 0;
+ 	} else {
+ 		mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
+ 		mem_info->local_mem_size_private = adev->gmc.real_vram_size -
+@@ -809,6 +812,8 @@ u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id)
+ 		}
+ 		do_div(tmp, adev->xcp_mgr->num_xcp_per_mem_partition);
+ 		return ALIGN_DOWN(tmp, PAGE_SIZE);
++	} else if (adev->flags & AMD_IS_APU) {
++		return (ttm_tt_pages_limit() << PAGE_SHIFT);
+ 	} else {
+ 		return adev->gmc.real_vram_size;
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 0535b07987d9d..8975cf41a91ac 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -196,7 +196,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
+ 			return -EINVAL;
+ 
+ 		vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id);
+-		if (adev->gmc.is_app_apu) {
++		if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
+ 			system_mem_needed = size;
+ 			ttm_mem_needed = size;
+ 		}
+@@ -232,7 +232,8 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
+ 		  "adev reference can't be null when vram is used");
+ 	if (adev && xcp_id >= 0) {
+ 		adev->kfd.vram_used[xcp_id] += vram_needed;
+-		adev->kfd.vram_used_aligned[xcp_id] += adev->gmc.is_app_apu ?
++		adev->kfd.vram_used_aligned[xcp_id] +=
++				(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) ?
+ 				vram_needed :
+ 				ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
+ 	}
+@@ -260,7 +261,7 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
+ 
+ 		if (adev) {
+ 			adev->kfd.vram_used[xcp_id] -= size;
+-			if (adev->gmc.is_app_apu) {
++			if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
+ 				adev->kfd.vram_used_aligned[xcp_id] -= size;
+ 				kfd_mem_limit.system_mem_used -= size;
+ 				kfd_mem_limit.ttm_mem_used -= size;
+@@ -889,7 +890,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
+ 	 * if peer device has large BAR. In contrast, access over xGMI is
+ 	 * allowed for both small and large BAR configurations of peer device
+ 	 */
+-	if ((adev != bo_adev && !adev->gmc.is_app_apu) &&
++	if ((adev != bo_adev && !(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU)) &&
+ 	    ((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) ||
+ 	     (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) ||
+ 	     (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
+@@ -1657,7 +1658,7 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
+ 		- atomic64_read(&adev->vram_pin_size)
+ 		- reserved_for_pt;
+ 
+-	if (adev->gmc.is_app_apu) {
++	if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
+ 		system_mem_available = no_system_mem_limit ?
+ 					kfd_mem_limit.max_system_mem_limit :
+ 					kfd_mem_limit.max_system_mem_limit -
+@@ -1705,7 +1706,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+ 	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
+ 		domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
+ 
+-		if (adev->gmc.is_app_apu) {
++		if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
+ 			domain = AMDGPU_GEM_DOMAIN_GTT;
+ 			alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
+ 			alloc_flags = 0;
+@@ -1952,7 +1953,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
+ 	if (size) {
+ 		if (!is_imported &&
+ 		   (mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM ||
+-		   (adev->gmc.is_app_apu &&
++		   ((adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) &&
+ 		    mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT)))
+ 			*size = bo_size;
+ 		else
+@@ -2374,8 +2375,9 @@ static int import_obj_create(struct amdgpu_device *adev,
+ 	(*mem)->dmabuf = dma_buf;
+ 	(*mem)->bo = bo;
+ 	(*mem)->va = va;
+-	(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) && !adev->gmc.is_app_apu ?
+-		AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
++	(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) &&
++			 !(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) ?
++			 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
+ 
+ 	(*mem)->mapped_to_gpu_memory = 0;
+ 	(*mem)->process_info = avm->process_info;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+index f5d0fa207a88b..b62ae3c91a9db 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+@@ -2065,12 +2065,13 @@ static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+ 	char reg_offset[11];
+ 	uint32_t *new = NULL, *tmp = NULL;
+-	int ret, i = 0, len = 0;
++	unsigned int len = 0;
++	int ret, i = 0;
+ 
+ 	do {
+ 		memset(reg_offset, 0, 11);
+ 		if (copy_from_user(reg_offset, buf + len,
+-					min(10, ((int)size-len)))) {
++					min(10, (size-len)))) {
+ 			ret = -EFAULT;
+ 			goto error_free;
+ 		}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+index 55d5508987ffe..1d955652f3ba6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -1206,7 +1206,8 @@ void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev,
+ 		fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
+ 		break;
+ 	default:
+-		break;
++		dev_err(adev->dev, "Invalid ucode id %u\n", ucode_id);
++		return;
+ 	}
+ 
+ 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+index 7e6d09730e6d3..665c63f552787 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+@@ -445,6 +445,14 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
+ 
+ 	entry.ih = ih;
+ 	entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
++
++	/*
++	 * timestamp is not supported on some legacy SOCs (cik, cz, iceland,
++	 * si and tonga), so initialize timestamp and timestamp_src to 0
++	 */
++	entry.timestamp = 0;
++	entry.timestamp_src = 0;
++
+ 	amdgpu_ih_decode_iv(adev, &entry);
+ 
+ 	trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+index e0f8ce9d84406..db9cb2b4e9823 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+@@ -43,7 +43,7 @@ struct amdgpu_iv_entry;
+ #define AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(x)		AMDGPU_GET_REG_FIELD(x, 7, 7)
+ #define AMDGPU_RAS_GPU_ERR_SOCKET_ID(x)			AMDGPU_GET_REG_FIELD(x, 10, 8)
+ #define AMDGPU_RAS_GPU_ERR_AID_ID(x)			AMDGPU_GET_REG_FIELD(x, 12, 11)
+-#define AMDGPU_RAS_GPU_ERR_HBM_ID(x)			AMDGPU_GET_REG_FIELD(x, 13, 13)
++#define AMDGPU_RAS_GPU_ERR_HBM_ID(x)			AMDGPU_GET_REG_FIELD(x, 14, 13)
+ #define AMDGPU_RAS_GPU_ERR_BOOT_STATUS(x)		AMDGPU_GET_REG_FIELD(x, 31, 31)
+ 
+ #define AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT	1000
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+index 20436f81856ad..6f7451e3ee87e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+@@ -170,6 +170,7 @@ static void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
+ 	}
+ 
+ 	kfree(err_data->err_addr);
++	err_data->err_addr = NULL;
+ 
+ 	mutex_unlock(&con->page_retirement_lock);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+index 59acf424a078f..968ca2c84ef7e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+@@ -743,7 +743,8 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p,
+ 	uint32_t created = 0;
+ 	uint32_t allocated = 0;
+ 	uint32_t tmp, handle = 0;
+-	uint32_t *size = &tmp;
++	uint32_t dummy = 0xffffffff;
++	uint32_t *size = &dummy;
+ 	unsigned int idx;
+ 	int i, r = 0;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
+index 93f6772d1b241..481217c32d853 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
++++ b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
+@@ -92,7 +92,7 @@ static int sienna_cichlid_mode2_suspend_ip(struct amdgpu_device *adev)
+ 		adev->ip_blocks[i].status.hw = false;
+ 	}
+ 
+-	return r;
++	return 0;
+ }
+ 
+ static int
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+index 5c8d81bfce7ab..ba651d12f1fa0 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+@@ -1023,7 +1023,7 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
+ 	if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 1))
+ 		return -EINVAL;
+ 
+-	if (adev->gmc.is_app_apu)
++	if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU)
+ 		return 0;
+ 
+ 	pgmap = &kfddev->pgmap;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+index 386875e6eb96b..069b81eeea03c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+@@ -2619,7 +2619,8 @@ svm_range_best_restore_location(struct svm_range *prange,
+ 		return -1;
+ 	}
+ 
+-	if (node->adev->gmc.is_app_apu)
++	if (node->adev->gmc.is_app_apu ||
++	    node->adev->flags & AMD_IS_APU)
+ 		return 0;
+ 
+ 	if (prange->preferred_loc == gpuid ||
+@@ -3337,7 +3338,8 @@ svm_range_best_prefetch_location(struct svm_range *prange)
+ 		goto out;
+ 	}
+ 
+-	if (bo_node->adev->gmc.is_app_apu) {
++	if (bo_node->adev->gmc.is_app_apu ||
++	    bo_node->adev->flags & AMD_IS_APU) {
+ 		best_loc = 0;
+ 		goto out;
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+index 026863a0abcd3..9c37bd0567efa 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+@@ -201,7 +201,8 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s
+  * is initialized to not 0 when page migration register device memory.
+  */
+ #define KFD_IS_SVM_API_SUPPORTED(adev) ((adev)->kfd.pgmap.type != 0 ||\
+-					(adev)->gmc.is_app_apu)
++					(adev)->gmc.is_app_apu ||\
++					((adev)->flags & AMD_IS_APU))
+ 
+ void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index f866a02f4f489..2152e40ee1c27 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -274,7 +274,7 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+ 				  u32 *vbl, u32 *position)
+ {
+-	u32 v_blank_start, v_blank_end, h_position, v_position;
++	u32 v_blank_start = 0, v_blank_end = 0, h_position = 0, v_position = 0;
+ 	struct amdgpu_crtc *acrtc = NULL;
+ 	struct dc *dc = adev->dm.dc;
+ 
+@@ -848,7 +848,7 @@ static void dm_handle_hpd_work(struct work_struct *work)
+  */
+ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
+ {
+-	struct dmub_notification notify;
++	struct dmub_notification notify = {0};
+ 	struct common_irq_params *irq_params = interrupt_params;
+ 	struct amdgpu_device *adev = irq_params->adev;
+ 	struct amdgpu_display_manager *dm = &adev->dm;
+@@ -7192,7 +7192,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
+ 	struct amdgpu_dm_connector *aconnector;
+ 	struct dm_connector_state *dm_conn_state;
+ 	int i, j, ret;
+-	int vcpi, pbn_div, pbn, slot_num = 0;
++	int vcpi, pbn_div, pbn = 0, slot_num = 0;
+ 
+ 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
+ 
+@@ -10595,7 +10595,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ 	struct drm_dp_mst_topology_mgr *mgr;
+ 	struct drm_dp_mst_topology_state *mst_state;
+-	struct dsc_mst_fairness_vars vars[MAX_PIPES];
++	struct dsc_mst_fairness_vars vars[MAX_PIPES] = {0};
+ 
+ 	trace_amdgpu_dm_atomic_check_begin(state);
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+index c7715a17f388b..4d7a5d470b1ea 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+@@ -1249,7 +1249,7 @@ static ssize_t dp_sdp_message_debugfs_write(struct file *f, const char __user *b
+ 				 size_t size, loff_t *pos)
+ {
+ 	int r;
+-	uint8_t data[36];
++	uint8_t data[36] = {0};
+ 	struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ 	struct dm_crtc_state *acrtc_state;
+ 	uint32_t write_size = 36;
+@@ -2960,7 +2960,7 @@ static int psr_read_residency(void *data, u64 *val)
+ {
+ 	struct amdgpu_dm_connector *connector = data;
+ 	struct dc_link *link = connector->dc_link;
+-	u32 residency;
++	u32 residency = 0;
+ 
+ 	link->dc->link_srv->edp_get_psr_residency(link, &residency);
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+index 3271c8c7905dd..4e036356b6a89 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+@@ -560,11 +560,19 @@ void dcn3_clk_mgr_construct(
+ 	dce_clock_read_ss_info(clk_mgr);
+ 
+ 	clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL);
++	if (!clk_mgr->base.bw_params) {
++		BREAK_TO_DEBUGGER();
++		return;
++	}
+ 
+ 	/* need physical address of table to give to PMFW */
+ 	clk_mgr->wm_range_table = dm_helpers_allocate_gpu_mem(clk_mgr->base.ctx,
+ 			DC_MEM_ALLOC_TYPE_GART, sizeof(WatermarksExternal_t),
+ 			&clk_mgr->wm_range_table_addr);
++	if (!clk_mgr->wm_range_table) {
++		BREAK_TO_DEBUGGER();
++		return;
++	}
+ }
+ 
+ void dcn3_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+index e506e4f969ca9..dda1173be35ea 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+@@ -1208,11 +1208,19 @@ void dcn32_clk_mgr_construct(
+ 	clk_mgr->smu_present = false;
+ 
+ 	clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL);
++	if (!clk_mgr->base.bw_params) {
++		BREAK_TO_DEBUGGER();
++		return;
++	}
+ 
+ 	/* need physical address of table to give to PMFW */
+ 	clk_mgr->wm_range_table = dm_helpers_allocate_gpu_mem(clk_mgr->base.ctx,
+ 			DC_MEM_ALLOC_TYPE_GART, sizeof(WatermarksExternal_t),
+ 			&clk_mgr->wm_range_table_addr);
++	if (!clk_mgr->wm_range_table) {
++		BREAK_TO_DEBUGGER();
++		return;
++	}
+ }
+ 
+ void dcn32_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index ec4bf9432bdb1..ab598e1f088cf 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -2168,50 +2168,91 @@ static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe,
+ 	}
+ }
+ 
+-void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
++static void resource_log_pipe_for_stream(struct dc *dc, struct dc_state *state,
++		struct pipe_ctx *otg_master, int stream_idx)
+ {
+-	struct pipe_ctx *otg_master;
+ 	struct pipe_ctx *opp_heads[MAX_PIPES];
+ 	struct pipe_ctx *dpp_pipes[MAX_PIPES];
+ 
+-	int stream_idx, slice_idx, dpp_idx, plane_idx, slice_count, dpp_count;
++	int slice_idx, dpp_idx, plane_idx, slice_count, dpp_count;
+ 	bool is_primary;
+ 	DC_LOGGER_INIT(dc->ctx->logger);
+ 
++	slice_count = resource_get_opp_heads_for_otg_master(otg_master,
++			&state->res_ctx, opp_heads);
++	for (slice_idx = 0; slice_idx < slice_count; slice_idx++) {
++		plane_idx = -1;
++		if (opp_heads[slice_idx]->plane_state) {
++			dpp_count = resource_get_dpp_pipes_for_opp_head(
++					opp_heads[slice_idx],
++					&state->res_ctx,
++					dpp_pipes);
++			for (dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) {
++				is_primary = !dpp_pipes[dpp_idx]->top_pipe ||
++						dpp_pipes[dpp_idx]->top_pipe->plane_state != dpp_pipes[dpp_idx]->plane_state;
++				if (is_primary)
++					plane_idx++;
++				resource_log_pipe(dc, dpp_pipes[dpp_idx],
++						stream_idx, slice_idx,
++						plane_idx, slice_count,
++						is_primary);
++			}
++		} else {
++			resource_log_pipe(dc, opp_heads[slice_idx],
++					stream_idx, slice_idx, plane_idx,
++					slice_count, true);
++		}
++
++	}
++}
++
++static int resource_stream_to_stream_idx(struct dc_state *state,
++		struct dc_stream_state *stream)
++{
++	int i, stream_idx = -1;
++
++	for (i = 0; i < state->stream_count; i++)
++		if (state->streams[i] == stream) {
++			stream_idx = i;
++			break;
++		}
++
++	/* never return negative array index */
++	if (stream_idx == -1) {
++		ASSERT(0);
++		return 0;
++	}
++
++	return stream_idx;
++}
++
++void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
++{
++	struct pipe_ctx *otg_master;
++	int stream_idx, phantom_stream_idx;
++	DC_LOGGER_INIT(dc->ctx->logger);
++
+ 	DC_LOG_DC("    pipe topology update");
+ 	DC_LOG_DC("  ________________________");
+ 	for (stream_idx = 0; stream_idx < state->stream_count; stream_idx++) {
++		if (state->streams[stream_idx]->is_phantom)
++			continue;
++
+ 		otg_master = resource_get_otg_master_for_stream(
+ 				&state->res_ctx, state->streams[stream_idx]);
+-		if (!otg_master	|| otg_master->stream_res.tg == NULL) {
+-			DC_LOG_DC("topology update: otg_master NULL stream_idx %d!\n", stream_idx);
+-			return;
+-		}
+-		slice_count = resource_get_opp_heads_for_otg_master(otg_master,
+-				&state->res_ctx, opp_heads);
+-		for (slice_idx = 0; slice_idx < slice_count; slice_idx++) {
+-			plane_idx = -1;
+-			if (opp_heads[slice_idx]->plane_state) {
+-				dpp_count = resource_get_dpp_pipes_for_opp_head(
+-						opp_heads[slice_idx],
+-						&state->res_ctx,
+-						dpp_pipes);
+-				for (dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) {
+-					is_primary = !dpp_pipes[dpp_idx]->top_pipe ||
+-							dpp_pipes[dpp_idx]->top_pipe->plane_state != dpp_pipes[dpp_idx]->plane_state;
+-					if (is_primary)
+-						plane_idx++;
+-					resource_log_pipe(dc, dpp_pipes[dpp_idx],
+-							stream_idx, slice_idx,
+-							plane_idx, slice_count,
+-							is_primary);
+-				}
+-			} else {
+-				resource_log_pipe(dc, opp_heads[slice_idx],
+-						stream_idx, slice_idx, plane_idx,
+-						slice_count, true);
+-			}
++		resource_log_pipe_for_stream(dc, state, otg_master, stream_idx);
++	}
++	if (state->phantom_stream_count > 0) {
++		DC_LOG_DC(" |    (phantom pipes)     |");
++		for (stream_idx = 0; stream_idx < state->stream_count; stream_idx++) {
++			if (state->stream_status[stream_idx].mall_stream_config.type != SUBVP_MAIN)
++				continue;
+ 
++			phantom_stream_idx = resource_stream_to_stream_idx(state,
++					state->stream_status[stream_idx].mall_stream_config.paired_stream);
++			otg_master = resource_get_otg_master_for_stream(
++					&state->res_ctx, state->streams[phantom_stream_idx]);
++			resource_log_pipe_for_stream(dc, state, otg_master, stream_idx);
+ 		}
+ 	}
+ 	DC_LOG_DC(" |________________________|\n");
+@@ -3117,6 +3158,9 @@ static struct audio *find_first_free_audio(
+ {
+ 	int i, available_audio_count;
+ 
++	if (id == ENGINE_ID_UNKNOWN)
++		return NULL;
++
+ 	available_audio_count = pool->audio_count;
+ 
+ 	for (i = 0; i < available_audio_count; i++) {
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+index 9be5ebf3a8c0b..79cd4c4790439 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+@@ -9460,8 +9460,10 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
+ 
+ 		/* Copy the calculated watermarks to mp.Watermark as the getter functions are
+ 		 * implemented by the DML team to copy the calculated values from the mp.Watermark interface.
++		 * &mode_lib->mp.Watermark and &locals->Watermark are the same address, memcpy may lead to
++		 * unexpected behavior. memmove should be used.
+ 		 */
+-		memcpy(&mode_lib->mp.Watermark, CalculateWatermarks_params->Watermark, sizeof(struct Watermarks));
++		memmove(&mode_lib->mp.Watermark, CalculateWatermarks_params->Watermark, sizeof(struct Watermarks));
+ 
+ 		for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
+ 			if (mode_lib->ms.cache_display_cfg.writeback.WritebackEnable[k] == true) {
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+index a52c594e1ba4b..e1f1b5dd13203 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+@@ -88,7 +88,8 @@ static int find_disp_cfg_idx_by_plane_id(struct dml2_dml_to_dc_pipe_mapping *map
+ 			return  i;
+ 	}
+ 
+-	return -1;
++	ASSERT(false);
++	return __DML2_WRAPPER_MAX_STREAMS_PLANES__;
+ }
+ 
+ static int find_disp_cfg_idx_by_stream_id(struct dml2_dml_to_dc_pipe_mapping *mapping, unsigned int stream_id)
+@@ -100,7 +101,8 @@ static int find_disp_cfg_idx_by_stream_id(struct dml2_dml_to_dc_pipe_mapping *ma
+ 			return  i;
+ 	}
+ 
+-	return -1;
++	ASSERT(false);
++	return __DML2_WRAPPER_MAX_STREAMS_PLANES__;
+ }
+ 
+ // The master pipe of a stream is defined as the top pipe in odm slice 0
+diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
+index 1c0d89e675da5..bb576a9c5fdbd 100644
+--- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
++++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
+@@ -211,8 +211,12 @@ bool dce110_vblank_set(struct irq_service *irq_service,
+ 						   info->ext_id);
+ 	uint8_t pipe_offset = dal_irq_src - IRQ_TYPE_VBLANK;
+ 
+-	struct timing_generator *tg =
+-			dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
++	struct timing_generator *tg;
++
++	if (pipe_offset >= MAX_PIPES)
++		return false;
++
++	tg = dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
+ 
+ 	if (enable) {
+ 		if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) {
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+index ecc477ef8e3b7..b427a98066c11 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+@@ -2050,6 +2050,9 @@ bool dcn30_validate_bandwidth(struct dc *dc,
+ 
+ 	BW_VAL_TRACE_COUNT();
+ 
++	if (!pipes)
++		goto validate_fail;
++
+ 	DC_FP_START();
+ 	out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
+ 	DC_FP_END();
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+index 2fb1d00ff9654..f38de5391176f 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+@@ -1311,6 +1311,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
+ 
+ 	/* allocate HPO link encoder */
+ 	hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
++	if (!hpo_dp_enc31)
++		return NULL; /* out of memory */
+ 
+ 	hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
+ 					&hpo_dp_link_enc_regs[inst],
+@@ -1767,6 +1769,9 @@ bool dcn31_validate_bandwidth(struct dc *dc,
+ 
+ 	BW_VAL_TRACE_COUNT();
+ 
++	if (!pipes)
++		goto validate_fail;
++
+ 	DC_FP_START();
+ 	out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
+ 	DC_FP_END();
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+index c97391edb5ff7..2791fc45bb8c7 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+@@ -1384,6 +1384,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
+ 
+ 	/* allocate HPO link encoder */
+ 	hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
++	if (!hpo_dp_enc31)
++		return NULL; /* out of memory */
+ 
+ 	hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
+ 					&hpo_dp_link_enc_regs[inst],
+@@ -1744,6 +1746,9 @@ bool dcn314_validate_bandwidth(struct dc *dc,
+ 
+ 	BW_VAL_TRACE_COUNT();
+ 
++	if (!pipes)
++		goto validate_fail;
++
+ 	if (filter_modes_for_single_channel_workaround(dc, context))
+ 		goto validate_fail;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+index 515ba435f759c..4ce0f4bf1d9bb 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+@@ -1309,6 +1309,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
+ 
+ 	/* allocate HPO link encoder */
+ 	hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
++	if (!hpo_dp_enc31)
++		return NULL; /* out of memory */
+ 
+ 	hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
+ 					&hpo_dp_link_enc_regs[inst],
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
+index b9753d4606f89..efa5627b0c50a 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
+@@ -1306,6 +1306,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
+ 
+ 	/* allocate HPO link encoder */
+ 	hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
++	if (!hpo_dp_enc31)
++		return NULL; /* out of memory */
+ 
+ 	hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
+ 					&hpo_dp_link_enc_regs[inst],
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+index ce1754cc1f463..1f5a91b764828 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+@@ -1304,6 +1304,8 @@ static struct hpo_dp_link_encoder *dcn32_hpo_dp_link_encoder_create(
+ 
+ 	/* allocate HPO link encoder */
+ 	hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
++	if (!hpo_dp_enc31)
++		return NULL; /* out of memory */
+ 
+ #undef REG_STRUCT
+ #define REG_STRUCT hpo_dp_link_enc_regs
+@@ -1751,6 +1753,9 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
+ 
+ 	BW_VAL_TRACE_COUNT();
+ 
++	if (!pipes)
++		goto validate_fail;
++
+ 	DC_FP_START();
+ 	out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
+ 	DC_FP_END();
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+index 296a0a8e71459..e83d340ed6260 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+@@ -1288,6 +1288,8 @@ static struct hpo_dp_link_encoder *dcn321_hpo_dp_link_encoder_create(
+ 
+ 	/* allocate HPO link encoder */
+ 	hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
++	if (!hpo_dp_enc31)
++		return NULL; /* out of memory */
+ 
+ #undef REG_STRUCT
+ #define REG_STRUCT hpo_dp_link_enc_regs
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+index 5d52853cac96a..cf0cb5cf4b5b2 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+@@ -1368,6 +1368,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
+ 
+ 	/* allocate HPO link encoder */
+ 	hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
++	if (!hpo_dp_enc31)
++		return NULL; /* out of memory */
+ 
+ #undef REG_STRUCT
+ #define REG_STRUCT hpo_dp_link_enc_regs
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+index 909e14261f9b4..116b59123199f 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+@@ -1348,6 +1348,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
+ 
+ 	/* allocate HPO link encoder */
+ 	hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
++	if (!hpo_dp_enc31)
++		return NULL; /* out of memory */
+ 
+ #undef REG_STRUCT
+ #define REG_STRUCT hpo_dp_link_enc_regs
+diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+index f7b5583ee609a..8e9caae7c9559 100644
+--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+@@ -156,6 +156,10 @@ static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
+ 	uint32_t cur_size = 0;
+ 	uint32_t data_offset = 0;
+ 
++	if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) {
++		return MOD_HDCP_STATUS_DDC_FAILURE;
++	}
++
+ 	if (is_dp_hdcp(hdcp)) {
+ 		while (buf_len > 0) {
+ 			cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
+@@ -215,6 +219,10 @@ static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
+ 	uint32_t cur_size = 0;
+ 	uint32_t data_offset = 0;
+ 
++	if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) {
++		return MOD_HDCP_STATUS_DDC_FAILURE;
++	}
++
+ 	if (is_dp_hdcp(hdcp)) {
+ 		while (buf_len > 0) {
+ 			cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
+diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
+index 1acb2d2c5597b..09cbc3afd6d89 100644
+--- a/drivers/gpu/drm/amd/include/atomfirmware.h
++++ b/drivers/gpu/drm/amd/include/atomfirmware.h
+@@ -734,7 +734,7 @@ struct atom_gpio_pin_lut_v2_1
+ {
+   struct  atom_common_table_header  table_header;
+   /*the real number of this included in the structure is calcualted by using the (whole structure size - the header size)/size of atom_gpio_pin_lut  */
+-  struct  atom_gpio_pin_assignment  gpio_pin[8];
++  struct  atom_gpio_pin_assignment  gpio_pin[];
+ };
+ 
+ 
+@@ -3583,7 +3583,7 @@ struct atom_gpio_voltage_object_v4
+    uint8_t  phase_delay_us;                      // phase delay in unit of micro second
+    uint8_t  reserved;   
+    uint32_t gpio_mask_val;                         // GPIO Mask value
+-   struct atom_voltage_gpio_map_lut voltage_gpio_lut[1];
++   struct atom_voltage_gpio_map_lut voltage_gpio_lut[] __counted_by(gpio_entry_num);
+ };
+ 
+ struct  atom_svid2_voltage_object_v4
+diff --git a/drivers/gpu/drm/drm_fbdev_generic.c b/drivers/gpu/drm/drm_fbdev_generic.c
+index b4659cd6285ab..cbb7418b789f8 100644
+--- a/drivers/gpu/drm/drm_fbdev_generic.c
++++ b/drivers/gpu/drm/drm_fbdev_generic.c
+@@ -84,7 +84,8 @@ static int drm_fbdev_generic_helper_fb_probe(struct drm_fb_helper *fb_helper,
+ 		    sizes->surface_width, sizes->surface_height,
+ 		    sizes->surface_bpp);
+ 
+-	format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
++	format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
++					     sizes->surface_depth);
+ 	buffer = drm_client_framebuffer_create(client, sizes->surface_width,
+ 					       sizes->surface_height, format);
+ 	if (IS_ERR(buffer))
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index aa93129c3397e..426bbee2d9f5e 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -421,6 +421,13 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"),
+ 		},
+ 		.driver_data = (void *)&lcd800x1280_rightside_up,
++	}, {	/* Valve Steam Deck */
++		.matches = {
++		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Valve"),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galileo"),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"),
++		},
++		.driver_data = (void *)&lcd800x1280_rightside_up,
+ 	}, {	/* VIOS LTH17 */
+ 		.matches = {
+ 		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),
+diff --git a/drivers/gpu/drm/lima/lima_gp.c b/drivers/gpu/drm/lima/lima_gp.c
+index e15295071533b..3282997a0358d 100644
+--- a/drivers/gpu/drm/lima/lima_gp.c
++++ b/drivers/gpu/drm/lima/lima_gp.c
+@@ -345,7 +345,9 @@ int lima_gp_init(struct lima_ip *ip)
+ 
+ void lima_gp_fini(struct lima_ip *ip)
+ {
++	struct lima_device *dev = ip->dev;
+ 
++	devm_free_irq(dev->dev, ip->irq, ip);
+ }
+ 
+ int lima_gp_pipe_init(struct lima_device *dev)
+diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c
+index e18317c5ca8c1..6611e2836bf0d 100644
+--- a/drivers/gpu/drm/lima/lima_mmu.c
++++ b/drivers/gpu/drm/lima/lima_mmu.c
+@@ -118,7 +118,12 @@ int lima_mmu_init(struct lima_ip *ip)
+ 
+ void lima_mmu_fini(struct lima_ip *ip)
+ {
++	struct lima_device *dev = ip->dev;
++
++	if (ip->id == lima_ip_ppmmu_bcast)
++		return;
+ 
++	devm_free_irq(dev->dev, ip->irq, ip);
+ }
+ 
+ void lima_mmu_flush_tlb(struct lima_ip *ip)
+diff --git a/drivers/gpu/drm/lima/lima_pp.c b/drivers/gpu/drm/lima/lima_pp.c
+index a4a2ffe6527c2..eaab4788dff49 100644
+--- a/drivers/gpu/drm/lima/lima_pp.c
++++ b/drivers/gpu/drm/lima/lima_pp.c
+@@ -286,7 +286,9 @@ int lima_pp_init(struct lima_ip *ip)
+ 
+ void lima_pp_fini(struct lima_ip *ip)
+ {
++	struct lima_device *dev = ip->dev;
+ 
++	devm_free_irq(dev->dev, ip->irq, ip);
+ }
+ 
+ int lima_pp_bcast_resume(struct lima_ip *ip)
+@@ -319,7 +321,9 @@ int lima_pp_bcast_init(struct lima_ip *ip)
+ 
+ void lima_pp_bcast_fini(struct lima_ip *ip)
+ {
++	struct lima_device *dev = ip->dev;
+ 
++	devm_free_irq(dev->dev, ip->irq, ip);
+ }
+ 
+ static int lima_pp_task_validate(struct lima_sched_pipe *pipe,
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index 856b3ef5edb89..0c71d761d3785 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -1001,6 +1001,9 @@ nouveau_connector_get_modes(struct drm_connector *connector)
+ 		struct drm_display_mode *mode;
+ 
+ 		mode = drm_mode_duplicate(dev, nv_connector->native_mode);
++		if (!mode)
++			return 0;
++
+ 		drm_mode_probed_add(connector, mode);
+ 		ret = 1;
+ 	}
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index 96a724e8f3ffe..c4feaacf17ac2 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -346,6 +346,7 @@ static void ttm_bo_release(struct kref *kref)
+ 		if (!dma_resv_test_signaled(bo->base.resv,
+ 					    DMA_RESV_USAGE_BOOKKEEP) ||
+ 		    (want_init_on_free() && (bo->ttm != NULL)) ||
++		    bo->type == ttm_bo_type_sg ||
+ 		    !dma_resv_trylock(bo->base.resv)) {
+ 			/* The BO is not idle, resurrect it for delayed destroy */
+ 			ttm_bo_flush_all_fences(bo);
+diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
+index 9f6d571d7fa9c..a3d2dd42adf96 100644
+--- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c
++++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
+@@ -12,6 +12,7 @@
+ #include "tests/xe_pci_test.h"
+ 
+ #include "xe_pci.h"
++#include "xe_pm.h"
+ 
+ static bool p2p_enabled(struct dma_buf_test_params *params)
+ {
+@@ -259,6 +260,7 @@ static int dma_buf_run_device(struct xe_device *xe)
+ 	const struct dma_buf_test_params *params;
+ 	struct kunit *test = xe_cur_kunit();
+ 
++	xe_pm_runtime_get(xe);
+ 	for (params = test_params; params->mem_mask; ++params) {
+ 		struct dma_buf_test_params p = *params;
+ 
+@@ -266,6 +268,7 @@ static int dma_buf_run_device(struct xe_device *xe)
+ 		test->priv = &p;
+ 		xe_test_dmabuf_import_same_driver(xe);
+ 	}
++	xe_pm_runtime_put(xe);
+ 
+ 	/* A non-zero return would halt iteration over driver devices */
+ 	return 0;
+diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
+index a7ab9ba645f99..c78fbb9bc5fc7 100644
+--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
++++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
+@@ -315,7 +315,7 @@ static void init_steering_oaddrm(struct xe_gt *gt)
+ 	else
+ 		gt->steering[OADDRM].group_target = 1;
+ 
+-	gt->steering[DSS].instance_target = 0;		/* unused */
++	gt->steering[OADDRM].instance_target = 0;	/* unused */
+ }
+ 
+ static void init_steering_sqidi_psmi(struct xe_gt *gt)
+@@ -330,8 +330,8 @@ static void init_steering_sqidi_psmi(struct xe_gt *gt)
+ 
+ static void init_steering_inst0(struct xe_gt *gt)
+ {
+-	gt->steering[DSS].group_target = 0;		/* unused */
+-	gt->steering[DSS].instance_target = 0;		/* unused */
++	gt->steering[INSTANCE0].group_target = 0;	/* unused */
++	gt->steering[INSTANCE0].instance_target = 0;	/* unused */
+ }
+ 
+ static const struct {
+diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
+index aca519f5b85d9..bebfdc8897813 100644
+--- a/drivers/gpu/drm/xe/xe_migrate.c
++++ b/drivers/gpu/drm/xe/xe_migrate.c
+@@ -1336,7 +1336,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
+ 						 GFP_KERNEL, true, 0);
+ 			if (IS_ERR(sa_bo)) {
+ 				err = PTR_ERR(sa_bo);
+-				goto err;
++				goto err_bb;
+ 			}
+ 
+ 			ppgtt_ofs = NUM_KERNEL_PDE +
+@@ -1387,7 +1387,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
+ 					 update_idx);
+ 	if (IS_ERR(job)) {
+ 		err = PTR_ERR(job);
+-		goto err_bb;
++		goto err_sa;
+ 	}
+ 
+ 	/* Wait on BO move */
+@@ -1436,12 +1436,12 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
+ 
+ err_job:
+ 	xe_sched_job_put(job);
++err_sa:
++	drm_suballoc_free(sa_bo, NULL);
+ err_bb:
+ 	if (!q)
+ 		mutex_unlock(&m->job_mutex);
+ 	xe_bb_free(bb, NULL);
+-err:
+-	drm_suballoc_free(sa_bo, NULL);
+ 	return ERR_PTR(err);
+ }
+ 
+diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
+index efcf78673e747..b6a995c852ab4 100644
+--- a/drivers/hwmon/dell-smm-hwmon.c
++++ b/drivers/hwmon/dell-smm-hwmon.c
+@@ -1530,6 +1530,14 @@ static const struct dmi_system_id i8k_whitelist_fan_control[] __initconst = {
+ 		},
+ 		.driver_data = (void *)&i8k_fan_control_data[I8K_FAN_30A3_31A3],
+ 	},
++	{
++		.ident = "Dell G15 5511",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Dell G15 5511"),
++		},
++		.driver_data = (void *)&i8k_fan_control_data[I8K_FAN_30A3_31A3],
++	},
+ 	{ }
+ };
+ 
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index 79870dd7a0146..fafd999b4bcb2 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -1059,7 +1059,7 @@ static const struct pci_device_id i801_ids[] = {
+ MODULE_DEVICE_TABLE(pci, i801_ids);
+ 
+ #if defined CONFIG_X86 && defined CONFIG_DMI
+-static unsigned char apanel_addr;
++static unsigned char apanel_addr __ro_after_init;
+ 
+ /* Scan the system ROM for the signature "FJKEYINF" */
+ static __init const void __iomem *bios_signature(const void __iomem *bios)
+diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
+index a12525b3186bc..f448505d54682 100644
+--- a/drivers/i2c/busses/i2c-pnx.c
++++ b/drivers/i2c/busses/i2c-pnx.c
+@@ -15,7 +15,6 @@
+ #include <linux/ioport.h>
+ #include <linux/delay.h>
+ #include <linux/i2c.h>
+-#include <linux/timer.h>
+ #include <linux/completion.h>
+ #include <linux/platform_device.h>
+ #include <linux/io.h>
+@@ -32,7 +31,6 @@ struct i2c_pnx_mif {
+ 	int			ret;		/* Return value */
+ 	int			mode;		/* Interface mode */
+ 	struct completion	complete;	/* I/O completion */
+-	struct timer_list	timer;		/* Timeout */
+ 	u8 *			buf;		/* Data buffer */
+ 	int			len;		/* Length of data buffer */
+ 	int			order;		/* RX Bytes to order via TX */
+@@ -117,24 +115,6 @@ static inline int wait_reset(struct i2c_pnx_algo_data *data)
+ 	return (timeout <= 0);
+ }
+ 
+-static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data)
+-{
+-	struct timer_list *timer = &alg_data->mif.timer;
+-	unsigned long expires = msecs_to_jiffies(alg_data->timeout);
+-
+-	if (expires <= 1)
+-		expires = 2;
+-
+-	del_timer_sync(timer);
+-
+-	dev_dbg(&alg_data->adapter.dev, "Timer armed at %lu plus %lu jiffies.\n",
+-		jiffies, expires);
+-
+-	timer->expires = jiffies + expires;
+-
+-	add_timer(timer);
+-}
+-
+ /**
+  * i2c_pnx_start - start a device
+  * @slave_addr:		slave address
+@@ -259,8 +239,6 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data)
+ 				~(mcntrl_afie | mcntrl_naie | mcntrl_drmie),
+ 				  I2C_REG_CTL(alg_data));
+ 
+-			del_timer_sync(&alg_data->mif.timer);
+-
+ 			dev_dbg(&alg_data->adapter.dev,
+ 				"%s(): Waking up xfer routine.\n",
+ 				__func__);
+@@ -276,8 +254,6 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data)
+ 			~(mcntrl_afie | mcntrl_naie | mcntrl_drmie),
+ 			  I2C_REG_CTL(alg_data));
+ 
+-		/* Stop timer. */
+-		del_timer_sync(&alg_data->mif.timer);
+ 		dev_dbg(&alg_data->adapter.dev,
+ 			"%s(): Waking up xfer routine after zero-xfer.\n",
+ 			__func__);
+@@ -364,8 +340,6 @@ static int i2c_pnx_master_rcv(struct i2c_pnx_algo_data *alg_data)
+ 				 mcntrl_drmie | mcntrl_daie);
+ 			iowrite32(ctl, I2C_REG_CTL(alg_data));
+ 
+-			/* Kill timer. */
+-			del_timer_sync(&alg_data->mif.timer);
+ 			complete(&alg_data->mif.complete);
+ 		}
+ 	}
+@@ -400,8 +374,6 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
+ 			 mcntrl_drmie);
+ 		iowrite32(ctl, I2C_REG_CTL(alg_data));
+ 
+-		/* Stop timer, to prevent timeout. */
+-		del_timer_sync(&alg_data->mif.timer);
+ 		complete(&alg_data->mif.complete);
+ 	} else if (stat & mstatus_nai) {
+ 		/* Slave did not acknowledge, generate a STOP */
+@@ -419,8 +391,6 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
+ 		/* Our return value. */
+ 		alg_data->mif.ret = -EIO;
+ 
+-		/* Stop timer, to prevent timeout. */
+-		del_timer_sync(&alg_data->mif.timer);
+ 		complete(&alg_data->mif.complete);
+ 	} else {
+ 		/*
+@@ -453,9 +423,8 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
+ 	return IRQ_HANDLED;
+ }
+ 
+-static void i2c_pnx_timeout(struct timer_list *t)
++static void i2c_pnx_timeout(struct i2c_pnx_algo_data *alg_data)
+ {
+-	struct i2c_pnx_algo_data *alg_data = from_timer(alg_data, t, mif.timer);
+ 	u32 ctl;
+ 
+ 	dev_err(&alg_data->adapter.dev,
+@@ -472,7 +441,6 @@ static void i2c_pnx_timeout(struct timer_list *t)
+ 	iowrite32(ctl, I2C_REG_CTL(alg_data));
+ 	wait_reset(alg_data);
+ 	alg_data->mif.ret = -EIO;
+-	complete(&alg_data->mif.complete);
+ }
+ 
+ static inline void bus_reset_if_active(struct i2c_pnx_algo_data *alg_data)
+@@ -514,6 +482,7 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ 	struct i2c_msg *pmsg;
+ 	int rc = 0, completed = 0, i;
+ 	struct i2c_pnx_algo_data *alg_data = adap->algo_data;
++	unsigned long time_left;
+ 	u32 stat;
+ 
+ 	dev_dbg(&alg_data->adapter.dev,
+@@ -548,7 +517,6 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ 		dev_dbg(&alg_data->adapter.dev, "%s(): mode %d, %d bytes\n",
+ 			__func__, alg_data->mif.mode, alg_data->mif.len);
+ 
+-		i2c_pnx_arm_timer(alg_data);
+ 
+ 		/* initialize the completion var */
+ 		init_completion(&alg_data->mif.complete);
+@@ -564,7 +532,10 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ 			break;
+ 
+ 		/* Wait for completion */
+-		wait_for_completion(&alg_data->mif.complete);
++		time_left = wait_for_completion_timeout(&alg_data->mif.complete,
++							alg_data->timeout);
++		if (time_left == 0)
++			i2c_pnx_timeout(alg_data);
+ 
+ 		if (!(rc = alg_data->mif.ret))
+ 			completed++;
+@@ -653,7 +624,10 @@ static int i2c_pnx_probe(struct platform_device *pdev)
+ 	alg_data->adapter.algo_data = alg_data;
+ 	alg_data->adapter.nr = pdev->id;
+ 
+-	alg_data->timeout = I2C_PNX_TIMEOUT_DEFAULT;
++	alg_data->timeout = msecs_to_jiffies(I2C_PNX_TIMEOUT_DEFAULT);
++	if (alg_data->timeout <= 1)
++		alg_data->timeout = 2;
++
+ #ifdef CONFIG_OF
+ 	alg_data->adapter.dev.of_node = of_node_get(pdev->dev.of_node);
+ 	if (pdev->dev.of_node) {
+@@ -673,8 +647,6 @@ static int i2c_pnx_probe(struct platform_device *pdev)
+ 	if (IS_ERR(alg_data->clk))
+ 		return PTR_ERR(alg_data->clk);
+ 
+-	timer_setup(&alg_data->mif.timer, i2c_pnx_timeout, 0);
+-
+ 	snprintf(alg_data->adapter.name, sizeof(alg_data->adapter.name),
+ 		 "%s", pdev->name);
+ 
+diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
+index f5feca7fa9b9c..2ed749f50a29f 100644
+--- a/drivers/infiniband/core/user_mad.c
++++ b/drivers/infiniband/core/user_mad.c
+@@ -63,6 +63,8 @@ MODULE_AUTHOR("Roland Dreier");
+ MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
+ MODULE_LICENSE("Dual BSD/GPL");
+ 
++#define MAX_UMAD_RECV_LIST_SIZE 200000
++
+ enum {
+ 	IB_UMAD_MAX_PORTS  = RDMA_MAX_PORTS,
+ 	IB_UMAD_MAX_AGENTS = 32,
+@@ -113,6 +115,7 @@ struct ib_umad_file {
+ 	struct mutex		mutex;
+ 	struct ib_umad_port    *port;
+ 	struct list_head	recv_list;
++	atomic_t		recv_list_size;
+ 	struct list_head	send_list;
+ 	struct list_head	port_list;
+ 	spinlock_t		send_lock;
+@@ -180,24 +183,28 @@ static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id)
+ 	return file->agents_dead ? NULL : file->agent[id];
+ }
+ 
+-static int queue_packet(struct ib_umad_file *file,
+-			struct ib_mad_agent *agent,
+-			struct ib_umad_packet *packet)
++static int queue_packet(struct ib_umad_file *file, struct ib_mad_agent *agent,
++			struct ib_umad_packet *packet, bool is_recv_mad)
+ {
+ 	int ret = 1;
+ 
+ 	mutex_lock(&file->mutex);
+ 
++	if (is_recv_mad &&
++	    atomic_read(&file->recv_list_size) > MAX_UMAD_RECV_LIST_SIZE)
++		goto unlock;
++
+ 	for (packet->mad.hdr.id = 0;
+ 	     packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;
+ 	     packet->mad.hdr.id++)
+ 		if (agent == __get_agent(file, packet->mad.hdr.id)) {
+ 			list_add_tail(&packet->list, &file->recv_list);
++			atomic_inc(&file->recv_list_size);
+ 			wake_up_interruptible(&file->recv_wait);
+ 			ret = 0;
+ 			break;
+ 		}
+-
++unlock:
+ 	mutex_unlock(&file->mutex);
+ 
+ 	return ret;
+@@ -224,7 +231,7 @@ static void send_handler(struct ib_mad_agent *agent,
+ 	if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) {
+ 		packet->length = IB_MGMT_MAD_HDR;
+ 		packet->mad.hdr.status = ETIMEDOUT;
+-		if (!queue_packet(file, agent, packet))
++		if (!queue_packet(file, agent, packet, false))
+ 			return;
+ 	}
+ 	kfree(packet);
+@@ -284,7 +291,7 @@ static void recv_handler(struct ib_mad_agent *agent,
+ 		rdma_destroy_ah_attr(&ah_attr);
+ 	}
+ 
+-	if (queue_packet(file, agent, packet))
++	if (queue_packet(file, agent, packet, true))
+ 		goto err2;
+ 	return;
+ 
+@@ -409,6 +416,7 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
+ 
+ 	packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
+ 	list_del(&packet->list);
++	atomic_dec(&file->recv_list_size);
+ 
+ 	mutex_unlock(&file->mutex);
+ 
+@@ -421,6 +429,7 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
+ 		/* Requeue packet */
+ 		mutex_lock(&file->mutex);
+ 		list_add(&packet->list, &file->recv_list);
++		atomic_inc(&file->recv_list_size);
+ 		mutex_unlock(&file->mutex);
+ 	} else {
+ 		if (packet->recv_wc)
+diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c
+index 16231fe080b00..609a5f01761bd 100644
+--- a/drivers/input/ff-core.c
++++ b/drivers/input/ff-core.c
+@@ -9,8 +9,10 @@
+ /* #define DEBUG */
+ 
+ #include <linux/input.h>
++#include <linux/limits.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
++#include <linux/overflow.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ 
+@@ -315,9 +317,8 @@ int input_ff_create(struct input_dev *dev, unsigned int max_effects)
+ 		return -EINVAL;
+ 	}
+ 
+-	ff_dev_size = sizeof(struct ff_device) +
+-				max_effects * sizeof(struct file *);
+-	if (ff_dev_size < max_effects) /* overflow */
++	ff_dev_size = struct_size(ff, effect_owners, max_effects);
++	if (ff_dev_size == SIZE_MAX) /* overflow */
+ 		return -EINVAL;
+ 
+ 	ff = kzalloc(ff_dev_size, GFP_KERNEL);
+diff --git a/drivers/leds/leds-an30259a.c b/drivers/leds/leds-an30259a.c
+index 0216afed3b6e7..decfca447d8a7 100644
+--- a/drivers/leds/leds-an30259a.c
++++ b/drivers/leds/leds-an30259a.c
+@@ -283,7 +283,10 @@ static int an30259a_probe(struct i2c_client *client)
+ 	if (err < 0)
+ 		return err;
+ 
+-	mutex_init(&chip->mutex);
++	err = devm_mutex_init(&client->dev, &chip->mutex);
++	if (err)
++		return err;
++
+ 	chip->client = client;
+ 	i2c_set_clientdata(client, chip);
+ 
+@@ -317,17 +320,9 @@ static int an30259a_probe(struct i2c_client *client)
+ 	return 0;
+ 
+ exit:
+-	mutex_destroy(&chip->mutex);
+ 	return err;
+ }
+ 
+-static void an30259a_remove(struct i2c_client *client)
+-{
+-	struct an30259a *chip = i2c_get_clientdata(client);
+-
+-	mutex_destroy(&chip->mutex);
+-}
+-
+ static const struct of_device_id an30259a_match_table[] = {
+ 	{ .compatible = "panasonic,an30259a", },
+ 	{ /* sentinel */ },
+@@ -347,7 +342,6 @@ static struct i2c_driver an30259a_driver = {
+ 		.of_match_table = an30259a_match_table,
+ 	},
+ 	.probe = an30259a_probe,
+-	.remove = an30259a_remove,
+ 	.id_table = an30259a_id,
+ };
+ 
+diff --git a/drivers/leds/leds-mlxreg.c b/drivers/leds/leds-mlxreg.c
+index 5595788d98d20..1b70de72376cc 100644
+--- a/drivers/leds/leds-mlxreg.c
++++ b/drivers/leds/leds-mlxreg.c
+@@ -256,6 +256,7 @@ static int mlxreg_led_probe(struct platform_device *pdev)
+ {
+ 	struct mlxreg_core_platform_data *led_pdata;
+ 	struct mlxreg_led_priv_data *priv;
++	int err;
+ 
+ 	led_pdata = dev_get_platdata(&pdev->dev);
+ 	if (!led_pdata) {
+@@ -267,26 +268,21 @@ static int mlxreg_led_probe(struct platform_device *pdev)
+ 	if (!priv)
+ 		return -ENOMEM;
+ 
+-	mutex_init(&priv->access_lock);
++	err = devm_mutex_init(&pdev->dev, &priv->access_lock);
++	if (err)
++		return err;
++
+ 	priv->pdev = pdev;
+ 	priv->pdata = led_pdata;
+ 
+ 	return mlxreg_led_config(priv);
+ }
+ 
+-static void mlxreg_led_remove(struct platform_device *pdev)
+-{
+-	struct mlxreg_led_priv_data *priv = dev_get_drvdata(&pdev->dev);
+-
+-	mutex_destroy(&priv->access_lock);
+-}
+-
+ static struct platform_driver mlxreg_led_driver = {
+ 	.driver = {
+ 	    .name = "leds-mlxreg",
+ 	},
+ 	.probe = mlxreg_led_probe,
+-	.remove_new = mlxreg_led_remove,
+ };
+ 
+ module_platform_driver(mlxreg_led_driver);
+diff --git a/drivers/media/dvb-frontends/as102_fe_types.h b/drivers/media/dvb-frontends/as102_fe_types.h
+index 297f9520ebf9d..8a4e392c88965 100644
+--- a/drivers/media/dvb-frontends/as102_fe_types.h
++++ b/drivers/media/dvb-frontends/as102_fe_types.h
+@@ -174,6 +174,6 @@ struct as10x_register_addr {
+ 	uint32_t addr;
+ 	/* register mode access */
+ 	uint8_t mode;
+-};
++} __packed;
+ 
+ #endif
+diff --git a/drivers/media/dvb-frontends/tda10048.c b/drivers/media/dvb-frontends/tda10048.c
+index 5d5e4e9e4422e..3e725cdcc66bd 100644
+--- a/drivers/media/dvb-frontends/tda10048.c
++++ b/drivers/media/dvb-frontends/tda10048.c
+@@ -410,6 +410,7 @@ static int tda10048_set_if(struct dvb_frontend *fe, u32 bw)
+ 	struct tda10048_config *config = &state->config;
+ 	int i;
+ 	u32 if_freq_khz;
++	u64 sample_freq;
+ 
+ 	dprintk(1, "%s(bw = %d)\n", __func__, bw);
+ 
+@@ -451,9 +452,11 @@ static int tda10048_set_if(struct dvb_frontend *fe, u32 bw)
+ 	dprintk(1, "- pll_pfactor = %d\n", state->pll_pfactor);
+ 
+ 	/* Calculate the sample frequency */
+-	state->sample_freq = state->xtal_hz * (state->pll_mfactor + 45);
+-	state->sample_freq /= (state->pll_nfactor + 1);
+-	state->sample_freq /= (state->pll_pfactor + 4);
++	sample_freq = state->xtal_hz;
++	sample_freq *= state->pll_mfactor + 45;
++	do_div(sample_freq, state->pll_nfactor + 1);
++	do_div(sample_freq, state->pll_pfactor + 4);
++	state->sample_freq = sample_freq;
+ 	dprintk(1, "- sample_freq = %d\n", state->sample_freq);
+ 
+ 	/* Update the I/F */
+diff --git a/drivers/media/dvb-frontends/tda18271c2dd.c b/drivers/media/dvb-frontends/tda18271c2dd.c
+index a348344879433..fd928787207ed 100644
+--- a/drivers/media/dvb-frontends/tda18271c2dd.c
++++ b/drivers/media/dvb-frontends/tda18271c2dd.c
+@@ -328,7 +328,7 @@ static int CalcMainPLL(struct tda_state *state, u32 freq)
+ 
+ 	OscFreq = (u64) freq * (u64) Div;
+ 	OscFreq *= (u64) 16384;
+-	do_div(OscFreq, (u64)16000000);
++	do_div(OscFreq, 16000000);
+ 	MainDiv = OscFreq;
+ 
+ 	state->m_Regs[MPD] = PostDiv & 0x77;
+@@ -352,7 +352,7 @@ static int CalcCalPLL(struct tda_state *state, u32 freq)
+ 	OscFreq = (u64)freq * (u64)Div;
+ 	/* CalDiv = u32( OscFreq * 16384 / 16000000 ); */
+ 	OscFreq *= (u64)16384;
+-	do_div(OscFreq, (u64)16000000);
++	do_div(OscFreq, 16000000);
+ 	CalDiv = OscFreq;
+ 
+ 	state->m_Regs[CPD] = PostDiv;
+diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
+index f250640729ca4..b947a55281f0e 100644
+--- a/drivers/media/i2c/st-mipid02.c
++++ b/drivers/media/i2c/st-mipid02.c
+@@ -326,7 +326,7 @@ static int mipid02_configure_from_rx_speed(struct mipid02_dev *bridge,
+ 	}
+ 
+ 	dev_dbg(&client->dev, "detect link_freq = %lld Hz", link_freq);
+-	do_div(ui_4, link_freq);
++	ui_4 = div64_u64(ui_4, link_freq);
+ 	bridge->r.clk_lane_reg1 |= ui_4 << 2;
+ 
+ 	return 0;
+diff --git a/drivers/media/i2c/tc358746.c b/drivers/media/i2c/tc358746.c
+index d676adc4401bb..edf79107adc51 100644
+--- a/drivers/media/i2c/tc358746.c
++++ b/drivers/media/i2c/tc358746.c
+@@ -844,8 +844,7 @@ static unsigned long tc358746_find_pll_settings(struct tc358746 *tc358746,
+ 			continue;
+ 
+ 		tmp = fout * postdiv;
+-		do_div(tmp, fin);
+-		mul = tmp;
++		mul = div64_ul(tmp, fin);
+ 		if (mul > 511)
+ 			continue;
+ 
+diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_av1_req_lat_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_av1_req_lat_if.c
+index 2b6a5adbc4199..b0e2e59f61b5d 100644
+--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_av1_req_lat_if.c
++++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_av1_req_lat_if.c
+@@ -1023,18 +1023,26 @@ static void vdec_av1_slice_free_working_buffer(struct vdec_av1_slice_instance *i
+ 	int i;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(instance->mv); i++)
+-		mtk_vcodec_mem_free(ctx, &instance->mv[i]);
++		if (instance->mv[i].va)
++			mtk_vcodec_mem_free(ctx, &instance->mv[i]);
+ 
+ 	for (i = 0; i < ARRAY_SIZE(instance->seg); i++)
+-		mtk_vcodec_mem_free(ctx, &instance->seg[i]);
++		if (instance->seg[i].va)
++			mtk_vcodec_mem_free(ctx, &instance->seg[i]);
+ 
+ 	for (i = 0; i < ARRAY_SIZE(instance->cdf); i++)
+-		mtk_vcodec_mem_free(ctx, &instance->cdf[i]);
++		if (instance->cdf[i].va)
++			mtk_vcodec_mem_free(ctx, &instance->cdf[i]);
++
+ 
+-	mtk_vcodec_mem_free(ctx, &instance->tile);
+-	mtk_vcodec_mem_free(ctx, &instance->cdf_temp);
+-	mtk_vcodec_mem_free(ctx, &instance->cdf_table);
+-	mtk_vcodec_mem_free(ctx, &instance->iq_table);
++	if (instance->tile.va)
++		mtk_vcodec_mem_free(ctx, &instance->tile);
++	if (instance->cdf_temp.va)
++		mtk_vcodec_mem_free(ctx, &instance->cdf_temp);
++	if (instance->cdf_table.va)
++		mtk_vcodec_mem_free(ctx, &instance->cdf_table);
++	if (instance->iq_table.va)
++		mtk_vcodec_mem_free(ctx, &instance->iq_table);
+ 
+ 	instance->level = AV1_RES_NONE;
+ }
+diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c
+index a68dac72c4e42..f8145998fcaf7 100644
+--- a/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c
++++ b/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c
+@@ -301,11 +301,12 @@ static void h264_enc_free_work_buf(struct venc_h264_inst *inst)
+ 	 * other buffers need to be freed by AP.
+ 	 */
+ 	for (i = 0; i < VENC_H264_VPU_WORK_BUF_MAX; i++) {
+-		if (i != VENC_H264_VPU_WORK_BUF_SKIP_FRAME)
++		if (i != VENC_H264_VPU_WORK_BUF_SKIP_FRAME && inst->work_bufs[i].va)
+ 			mtk_vcodec_mem_free(inst->ctx, &inst->work_bufs[i]);
+ 	}
+ 
+-	mtk_vcodec_mem_free(inst->ctx, &inst->pps_buf);
++	if (inst->pps_buf.va)
++		mtk_vcodec_mem_free(inst->ctx, &inst->pps_buf);
+ }
+ 
+ static int h264_enc_alloc_work_buf(struct venc_h264_inst *inst, bool is_34bit)
+diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
+index 3af594134a6de..6ddc205133939 100644
+--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
++++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
+@@ -2412,7 +2412,12 @@ static int stk9090m_frontend_attach(struct dvb_usb_adapter *adap)
+ 
+ 	adap->fe_adap[0].fe = dvb_attach(dib9000_attach, &adap->dev->i2c_adap, 0x80, &stk9090m_config);
+ 
+-	return adap->fe_adap[0].fe == NULL ?  -ENODEV : 0;
++	if (!adap->fe_adap[0].fe) {
++		release_firmware(state->frontend_firmware);
++		return -ENODEV;
++	}
++
++	return 0;
+ }
+ 
+ static int dib9090_tuner_attach(struct dvb_usb_adapter *adap)
+@@ -2485,8 +2490,10 @@ static int nim9090md_frontend_attach(struct dvb_usb_adapter *adap)
+ 	dib9000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x20, 0x80);
+ 	adap->fe_adap[0].fe = dvb_attach(dib9000_attach, &adap->dev->i2c_adap, 0x80, &nim9090md_config[0]);
+ 
+-	if (adap->fe_adap[0].fe == NULL)
++	if (!adap->fe_adap[0].fe) {
++		release_firmware(state->frontend_firmware);
+ 		return -ENODEV;
++	}
+ 
+ 	i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_3_4, 0);
+ 	dib9000_i2c_enumeration(i2c, 1, 0x12, 0x82);
+@@ -2494,7 +2501,12 @@ static int nim9090md_frontend_attach(struct dvb_usb_adapter *adap)
+ 	fe_slave = dvb_attach(dib9000_attach, i2c, 0x82, &nim9090md_config[1]);
+ 	dib9000_set_slave_frontend(adap->fe_adap[0].fe, fe_slave);
+ 
+-	return fe_slave == NULL ?  -ENODEV : 0;
++	if (!fe_slave) {
++		release_firmware(state->frontend_firmware);
++		return -ENODEV;
++	}
++
++	return 0;
+ }
+ 
+ static int nim9090md_tuner_attach(struct dvb_usb_adapter *adap)
+diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
+index b3bb1805829ad..f31d3835430e7 100644
+--- a/drivers/media/usb/dvb-usb/dw2102.c
++++ b/drivers/media/usb/dvb-usb/dw2102.c
+@@ -716,6 +716,7 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ {
+ 	struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ 	struct dw2102_state *state;
++	int j;
+ 
+ 	if (!d)
+ 		return -ENODEV;
+@@ -729,11 +730,11 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ 		return -EAGAIN;
+ 	}
+ 
+-	switch (num) {
+-	case 1:
+-		switch (msg[0].addr) {
++	j = 0;
++	while (j < num) {
++		switch (msg[j].addr) {
+ 		case SU3000_STREAM_CTRL:
+-			state->data[0] = msg[0].buf[0] + 0x36;
++			state->data[0] = msg[j].buf[0] + 0x36;
+ 			state->data[1] = 3;
+ 			state->data[2] = 0;
+ 			if (dvb_usb_generic_rw(d, state->data, 3,
+@@ -745,61 +746,86 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ 			if (dvb_usb_generic_rw(d, state->data, 1,
+ 					state->data, 2, 0) < 0)
+ 				err("i2c transfer failed.");
+-			msg[0].buf[1] = state->data[0];
+-			msg[0].buf[0] = state->data[1];
++			msg[j].buf[1] = state->data[0];
++			msg[j].buf[0] = state->data[1];
+ 			break;
+ 		default:
+-			if (3 + msg[0].len > sizeof(state->data)) {
+-				warn("i2c wr: len=%d is too big!\n",
+-				     msg[0].len);
++			/* if the current write msg is followed by a another
++			 * read msg to/from the same address
++			 */
++			if ((j+1 < num) && (msg[j+1].flags & I2C_M_RD) &&
++			    (msg[j].addr == msg[j+1].addr)) {
++				/* join both i2c msgs to one usb read command */
++				if (4 + msg[j].len > sizeof(state->data)) {
++					warn("i2c combined wr/rd: write len=%d is too big!\n",
++					    msg[j].len);
++					num = -EOPNOTSUPP;
++					break;
++				}
++				if (1 + msg[j+1].len > sizeof(state->data)) {
++					warn("i2c combined wr/rd: read len=%d is too big!\n",
++					    msg[j+1].len);
++					num = -EOPNOTSUPP;
++					break;
++				}
++
++				state->data[0] = 0x09;
++				state->data[1] = msg[j].len;
++				state->data[2] = msg[j+1].len;
++				state->data[3] = msg[j].addr;
++				memcpy(&state->data[4], msg[j].buf, msg[j].len);
++
++				if (dvb_usb_generic_rw(d, state->data, msg[j].len + 4,
++					state->data, msg[j+1].len + 1, 0) < 0)
++					err("i2c transfer failed.");
++
++				memcpy(msg[j+1].buf, &state->data[1], msg[j+1].len);
++				j++;
++				break;
++			}
++
++			if (msg[j].flags & I2C_M_RD) {
++				/* single read */
++				if (4 + msg[j].len > sizeof(state->data)) {
++					warn("i2c rd: len=%d is too big!\n", msg[j].len);
++					num = -EOPNOTSUPP;
++					break;
++				}
++
++				state->data[0] = 0x09;
++				state->data[1] = 0;
++				state->data[2] = msg[j].len;
++				state->data[3] = msg[j].addr;
++				memcpy(&state->data[4], msg[j].buf, msg[j].len);
++
++				if (dvb_usb_generic_rw(d, state->data, 4,
++					state->data, msg[j].len + 1, 0) < 0)
++					err("i2c transfer failed.");
++
++				memcpy(msg[j].buf, &state->data[1], msg[j].len);
++				break;
++			}
++
++			/* single write */
++			if (3 + msg[j].len > sizeof(state->data)) {
++				warn("i2c wr: len=%d is too big!\n", msg[j].len);
+ 				num = -EOPNOTSUPP;
+ 				break;
+ 			}
+ 
+-			/* always i2c write*/
+ 			state->data[0] = 0x08;
+-			state->data[1] = msg[0].addr;
+-			state->data[2] = msg[0].len;
++			state->data[1] = msg[j].addr;
++			state->data[2] = msg[j].len;
+ 
+-			memcpy(&state->data[3], msg[0].buf, msg[0].len);
++			memcpy(&state->data[3], msg[j].buf, msg[j].len);
+ 
+-			if (dvb_usb_generic_rw(d, state->data, msg[0].len + 3,
++			if (dvb_usb_generic_rw(d, state->data, msg[j].len + 3,
+ 						state->data, 1, 0) < 0)
+ 				err("i2c transfer failed.");
++		} // switch
++		j++;
+ 
+-		}
+-		break;
+-	case 2:
+-		/* always i2c read */
+-		if (4 + msg[0].len > sizeof(state->data)) {
+-			warn("i2c rd: len=%d is too big!\n",
+-			     msg[0].len);
+-			num = -EOPNOTSUPP;
+-			break;
+-		}
+-		if (1 + msg[1].len > sizeof(state->data)) {
+-			warn("i2c rd: len=%d is too big!\n",
+-			     msg[1].len);
+-			num = -EOPNOTSUPP;
+-			break;
+-		}
+-
+-		state->data[0] = 0x09;
+-		state->data[1] = msg[0].len;
+-		state->data[2] = msg[1].len;
+-		state->data[3] = msg[0].addr;
+-		memcpy(&state->data[4], msg[0].buf, msg[0].len);
+-
+-		if (dvb_usb_generic_rw(d, state->data, msg[0].len + 4,
+-					state->data, msg[1].len + 1, 0) < 0)
+-			err("i2c transfer failed.");
+-
+-		memcpy(msg[1].buf, &state->data[1], msg[1].len);
+-		break;
+-	default:
+-		warn("more than 2 i2c messages at a time is not handled yet.");
+-		break;
+-	}
++	} // while
+ 	mutex_unlock(&d->data_mutex);
+ 	mutex_unlock(&d->i2c_mutex);
+ 	return num;
+diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
+index 8e1de1e8bd127..a6e450181fd01 100644
+--- a/drivers/media/usb/s2255/s2255drv.c
++++ b/drivers/media/usb/s2255/s2255drv.c
+@@ -247,7 +247,7 @@ struct s2255_vc {
+ struct s2255_dev {
+ 	struct s2255_vc         vc[MAX_CHANNELS];
+ 	struct v4l2_device      v4l2_dev;
+-	atomic_t                num_channels;
++	refcount_t		num_channels;
+ 	int			frames;
+ 	struct mutex		lock;	/* channels[].vdev.lock */
+ 	struct mutex		cmdlock; /* protects cmdbuf */
+@@ -1550,11 +1550,11 @@ static void s2255_video_device_release(struct video_device *vdev)
+ 		container_of(vdev, struct s2255_vc, vdev);
+ 
+ 	dprintk(dev, 4, "%s, chnls: %d\n", __func__,
+-		atomic_read(&dev->num_channels));
++		refcount_read(&dev->num_channels));
+ 
+ 	v4l2_ctrl_handler_free(&vc->hdl);
+ 
+-	if (atomic_dec_and_test(&dev->num_channels))
++	if (refcount_dec_and_test(&dev->num_channels))
+ 		s2255_destroy(dev);
+ 	return;
+ }
+@@ -1659,7 +1659,7 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
+ 				"failed to register video device!\n");
+ 			break;
+ 		}
+-		atomic_inc(&dev->num_channels);
++		refcount_inc(&dev->num_channels);
+ 		v4l2_info(&dev->v4l2_dev, "V4L2 device registered as %s\n",
+ 			  video_device_node_name(&vc->vdev));
+ 
+@@ -1667,11 +1667,11 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
+ 	pr_info("Sensoray 2255 V4L driver Revision: %s\n",
+ 		S2255_VERSION);
+ 	/* if no channels registered, return error and probe will fail*/
+-	if (atomic_read(&dev->num_channels) == 0) {
++	if (refcount_read(&dev->num_channels) == 0) {
+ 		v4l2_device_unregister(&dev->v4l2_dev);
+ 		return ret;
+ 	}
+-	if (atomic_read(&dev->num_channels) != MAX_CHANNELS)
++	if (refcount_read(&dev->num_channels) != MAX_CHANNELS)
+ 		pr_warn("s2255: Not all channels available.\n");
+ 	return 0;
+ }
+@@ -2221,7 +2221,7 @@ static int s2255_probe(struct usb_interface *interface,
+ 		goto errorFWDATA1;
+ 	}
+ 
+-	atomic_set(&dev->num_channels, 0);
++	refcount_set(&dev->num_channels, 0);
+ 	dev->pid = id->idProduct;
+ 	dev->fw_data = kzalloc(sizeof(struct s2255_fw), GFP_KERNEL);
+ 	if (!dev->fw_data)
+@@ -2341,12 +2341,12 @@ static void s2255_disconnect(struct usb_interface *interface)
+ {
+ 	struct s2255_dev *dev = to_s2255_dev(usb_get_intfdata(interface));
+ 	int i;
+-	int channels = atomic_read(&dev->num_channels);
++	int channels = refcount_read(&dev->num_channels);
+ 	mutex_lock(&dev->lock);
+ 	v4l2_device_disconnect(&dev->v4l2_dev);
+ 	mutex_unlock(&dev->lock);
+ 	/*see comments in the uvc_driver.c usb disconnect function */
+-	atomic_inc(&dev->num_channels);
++	refcount_inc(&dev->num_channels);
+ 	/* unregister each video device. */
+ 	for (i = 0; i < channels; i++)
+ 		video_unregister_device(&dev->vc[i].vdev);
+@@ -2359,7 +2359,7 @@ static void s2255_disconnect(struct usb_interface *interface)
+ 		dev->vc[i].vidstatus_ready = 1;
+ 		wake_up(&dev->vc[i].wait_vidstatus);
+ 	}
+-	if (atomic_dec_and_test(&dev->num_channels))
++	if (refcount_dec_and_test(&dev->num_channels))
+ 		s2255_destroy(dev);
+ 	dev_info(&interface->dev, "%s\n", __func__);
+ }
+diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
+index d7dbbd469b892..53e16d39af4bf 100644
+--- a/drivers/mtd/nand/raw/nand_base.c
++++ b/drivers/mtd/nand/raw/nand_base.c
+@@ -1093,28 +1093,32 @@ static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
+ 				   unsigned int offset_in_page)
+ {
+ 	struct mtd_info *mtd = nand_to_mtd(chip);
++	bool ident_stage = !mtd->writesize;
+ 
+-	/* Make sure the offset is less than the actual page size. */
+-	if (offset_in_page > mtd->writesize + mtd->oobsize)
+-		return -EINVAL;
++	/* Bypass all checks during NAND identification */
++	if (likely(!ident_stage)) {
++		/* Make sure the offset is less than the actual page size. */
++		if (offset_in_page > mtd->writesize + mtd->oobsize)
++			return -EINVAL;
+ 
+-	/*
+-	 * On small page NANDs, there's a dedicated command to access the OOB
+-	 * area, and the column address is relative to the start of the OOB
+-	 * area, not the start of the page. Asjust the address accordingly.
+-	 */
+-	if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
+-		offset_in_page -= mtd->writesize;
++		/*
++		 * On small page NANDs, there's a dedicated command to access the OOB
++		 * area, and the column address is relative to the start of the OOB
++		 * area, not the start of the page. Asjust the address accordingly.
++		 */
++		if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
++			offset_in_page -= mtd->writesize;
+ 
+-	/*
+-	 * The offset in page is expressed in bytes, if the NAND bus is 16-bit
+-	 * wide, then it must be divided by 2.
+-	 */
+-	if (chip->options & NAND_BUSWIDTH_16) {
+-		if (WARN_ON(offset_in_page % 2))
+-			return -EINVAL;
++		/*
++		 * The offset in page is expressed in bytes, if the NAND bus is 16-bit
++		 * wide, then it must be divided by 2.
++		 */
++		if (chip->options & NAND_BUSWIDTH_16) {
++			if (WARN_ON(offset_in_page % 2))
++				return -EINVAL;
+ 
+-		offset_in_page /= 2;
++			offset_in_page /= 2;
++		}
+ 	}
+ 
+ 	addrs[0] = offset_in_page;
+@@ -1123,7 +1127,7 @@ static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
+ 	 * Small page NANDs use 1 cycle for the columns, while large page NANDs
+ 	 * need 2
+ 	 */
+-	if (mtd->writesize <= 512)
++	if (!ident_stage && mtd->writesize <= 512)
+ 		return 1;
+ 
+ 	addrs[1] = offset_in_page >> 8;
+@@ -1436,16 +1440,19 @@ int nand_change_read_column_op(struct nand_chip *chip,
+ 			       unsigned int len, bool force_8bit)
+ {
+ 	struct mtd_info *mtd = nand_to_mtd(chip);
++	bool ident_stage = !mtd->writesize;
+ 
+ 	if (len && !buf)
+ 		return -EINVAL;
+ 
+-	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+-		return -EINVAL;
++	if (!ident_stage) {
++		if (offset_in_page + len > mtd->writesize + mtd->oobsize)
++			return -EINVAL;
+ 
+-	/* Small page NANDs do not support column change. */
+-	if (mtd->writesize <= 512)
+-		return -ENOTSUPP;
++		/* Small page NANDs do not support column change. */
++		if (mtd->writesize <= 512)
++			return -ENOTSUPP;
++	}
+ 
+ 	if (nand_has_exec_op(chip)) {
+ 		const struct nand_interface_config *conf =
+@@ -2173,7 +2180,7 @@ EXPORT_SYMBOL_GPL(nand_reset_op);
+ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
+ 		      bool force_8bit, bool check_only)
+ {
+-	if (!len || !buf)
++	if (!len || (!check_only && !buf))
+ 		return -EINVAL;
+ 
+ 	if (nand_has_exec_op(chip)) {
+@@ -6301,6 +6308,7 @@ static const struct nand_ops rawnand_ops = {
+ static int nand_scan_tail(struct nand_chip *chip)
+ {
+ 	struct mtd_info *mtd = nand_to_mtd(chip);
++	struct nand_device *base = &chip->base;
+ 	struct nand_ecc_ctrl *ecc = &chip->ecc;
+ 	int ret, i;
+ 
+@@ -6445,9 +6453,13 @@ static int nand_scan_tail(struct nand_chip *chip)
+ 	if (!ecc->write_oob_raw)
+ 		ecc->write_oob_raw = ecc->write_oob;
+ 
+-	/* propagate ecc info to mtd_info */
++	/* Propagate ECC info to the generic NAND and MTD layers */
+ 	mtd->ecc_strength = ecc->strength;
++	if (!base->ecc.ctx.conf.strength)
++		base->ecc.ctx.conf.strength = ecc->strength;
+ 	mtd->ecc_step_size = ecc->size;
++	if (!base->ecc.ctx.conf.step_size)
++		base->ecc.ctx.conf.step_size = ecc->size;
+ 
+ 	/*
+ 	 * Set the number of read / write steps for one page depending on ECC
+@@ -6455,6 +6467,8 @@ static int nand_scan_tail(struct nand_chip *chip)
+ 	 */
+ 	if (!ecc->steps)
+ 		ecc->steps = mtd->writesize / ecc->size;
++	if (!base->ecc.ctx.nsteps)
++		base->ecc.ctx.nsteps = ecc->steps;
+ 	if (ecc->steps * ecc->size != mtd->writesize) {
+ 		WARN(1, "Invalid ECC parameters\n");
+ 		ret = -EINVAL;
+diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c
+index 7baaef69d70ad..55580447633be 100644
+--- a/drivers/mtd/nand/raw/rockchip-nand-controller.c
++++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c
+@@ -420,13 +420,13 @@ static int rk_nfc_setup_interface(struct nand_chip *chip, int target,
+ 	u32 rate, tc2rw, trwpw, trw2c;
+ 	u32 temp;
+ 
+-	if (target < 0)
+-		return 0;
+-
+ 	timings = nand_get_sdr_timings(conf);
+ 	if (IS_ERR(timings))
+ 		return -EOPNOTSUPP;
+ 
++	if (target < 0)
++		return 0;
++
+ 	if (IS_ERR(nfc->nfc_clk))
+ 		rate = clk_get_rate(nfc->ahb_clk);
+ 	else
+diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
+index 4cdbc7e084f4b..fea1d87a97539 100644
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -1214,9 +1214,9 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond,
+ 	__be32 target;
+ 
+ 	if (newval->string) {
+-		if (!in4_pton(newval->string+1, -1, (u8 *)&target, -1, NULL)) {
+-			netdev_err(bond->dev, "invalid ARP target %pI4 specified\n",
+-				   &target);
++		if (strlen(newval->string) < 1 ||
++		    !in4_pton(newval->string + 1, -1, (u8 *)&target, -1, NULL)) {
++			netdev_err(bond->dev, "invalid ARP target specified\n");
+ 			return ret;
+ 		}
+ 		if (newval->string[0] == '+')
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+index 8faf8a462c055..ffc3e93292501 100644
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+@@ -125,6 +125,7 @@ static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err_liste
+ 
+ static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leafimx = {
+ 	.quirks = 0,
++	.family = KVASER_LEAF,
+ 	.ops = &kvaser_usb_leaf_dev_ops,
+ };
+ 
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 5a202edfec371..80741e506f422 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -131,8 +131,8 @@ struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip)
+ {
+ 	struct mv88e6xxx_mdio_bus *mdio_bus;
+ 
+-	mdio_bus = list_first_entry(&chip->mdios, struct mv88e6xxx_mdio_bus,
+-				    list);
++	mdio_bus = list_first_entry_or_null(&chip->mdios,
++					    struct mv88e6xxx_mdio_bus, list);
+ 	if (!mdio_bus)
+ 		return NULL;
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+index e2a4e1088b7f4..9580ab83d387c 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+@@ -1262,7 +1262,7 @@ enum {
+ 
+ struct bnx2x_fw_stats_req {
+ 	struct stats_query_header hdr;
+-	struct stats_query_entry query[FP_SB_MAX_E1x+
++	struct stats_query_entry query[FP_SB_MAX_E2 +
+ 		BNX2X_FIRST_QUEUE_QUERY_IDX];
+ };
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 0fab62a56f3b3..2b7936b3fb3ef 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -12436,7 +12436,11 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
+ 	if (!BNXT_NEW_RM(bp))
+ 		return true;
+ 
+-	if (hwr.vnic == bp->hw_resc.resv_vnics &&
++	/* Do not reduce VNIC and RSS ctx reservations.  There is a FW
++	 * issue that will mess up the default VNIC if we reduce the
++	 * reservations.
++	 */
++	if (hwr.vnic <= bp->hw_resc.resv_vnics &&
+ 	    hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
+ 		return true;
+ 
+diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
+index 9aebfb843d9d1..ae90c09c56a89 100644
+--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
++++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
+@@ -8,6 +8,7 @@
+ #include "gve.h"
+ #include "gve_adminq.h"
+ #include "gve_dqo.h"
++#include "gve_utils.h"
+ 
+ static void gve_get_drvinfo(struct net_device *netdev,
+ 			    struct ethtool_drvinfo *info)
+@@ -165,6 +166,8 @@ gve_get_ethtool_stats(struct net_device *netdev,
+ 	struct stats *report_stats;
+ 	int *rx_qid_to_stats_idx;
+ 	int *tx_qid_to_stats_idx;
++	int num_stopped_rxqs = 0;
++	int num_stopped_txqs = 0;
+ 	struct gve_priv *priv;
+ 	bool skip_nic_stats;
+ 	unsigned int start;
+@@ -181,12 +184,23 @@ gve_get_ethtool_stats(struct net_device *netdev,
+ 					    sizeof(int), GFP_KERNEL);
+ 	if (!rx_qid_to_stats_idx)
+ 		return;
++	for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
++		rx_qid_to_stats_idx[ring] = -1;
++		if (!gve_rx_was_added_to_block(priv, ring))
++			num_stopped_rxqs++;
++	}
+ 	tx_qid_to_stats_idx = kmalloc_array(num_tx_queues,
+ 					    sizeof(int), GFP_KERNEL);
+ 	if (!tx_qid_to_stats_idx) {
+ 		kfree(rx_qid_to_stats_idx);
+ 		return;
+ 	}
++	for (ring = 0; ring < num_tx_queues; ring++) {
++		tx_qid_to_stats_idx[ring] = -1;
++		if (!gve_tx_was_added_to_block(priv, ring))
++			num_stopped_txqs++;
++	}
++
+ 	for (rx_pkts = 0, rx_bytes = 0, rx_hsplit_pkt = 0,
+ 	     rx_skb_alloc_fail = 0, rx_buf_alloc_fail = 0,
+ 	     rx_desc_err_dropped_pkt = 0, rx_hsplit_unsplit_pkt = 0,
+@@ -260,7 +274,13 @@ gve_get_ethtool_stats(struct net_device *netdev,
+ 	/* For rx cross-reporting stats, start from nic rx stats in report */
+ 	base_stats_idx = GVE_TX_STATS_REPORT_NUM * num_tx_queues +
+ 		GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues;
+-	max_stats_idx = NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues +
++	/* The boundary between driver stats and NIC stats shifts if there are
++	 * stopped queues.
++	 */
++	base_stats_idx += NIC_RX_STATS_REPORT_NUM * num_stopped_rxqs +
++		NIC_TX_STATS_REPORT_NUM * num_stopped_txqs;
++	max_stats_idx = NIC_RX_STATS_REPORT_NUM *
++		(priv->rx_cfg.num_queues - num_stopped_rxqs) +
+ 		base_stats_idx;
+ 	/* Preprocess the stats report for rx, map queue id to start index */
+ 	skip_nic_stats = false;
+@@ -274,6 +294,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
+ 			skip_nic_stats = true;
+ 			break;
+ 		}
++		if (queue_id < 0 || queue_id >= priv->rx_cfg.num_queues) {
++			net_err_ratelimited("Invalid rxq id in NIC stats\n");
++			continue;
++		}
+ 		rx_qid_to_stats_idx[queue_id] = stats_idx;
+ 	}
+ 	/* walk RX rings */
+@@ -308,11 +332,11 @@ gve_get_ethtool_stats(struct net_device *netdev,
+ 			data[i++] = rx->rx_copybreak_pkt;
+ 			data[i++] = rx->rx_copied_pkt;
+ 			/* stats from NIC */
+-			if (skip_nic_stats) {
++			stats_idx = rx_qid_to_stats_idx[ring];
++			if (skip_nic_stats || stats_idx < 0) {
+ 				/* skip NIC rx stats */
+ 				i += NIC_RX_STATS_REPORT_NUM;
+ 			} else {
+-				stats_idx = rx_qid_to_stats_idx[ring];
+ 				for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) {
+ 					u64 value =
+ 						be64_to_cpu(report_stats[stats_idx + j].value);
+@@ -338,7 +362,8 @@ gve_get_ethtool_stats(struct net_device *netdev,
+ 
+ 	/* For tx cross-reporting stats, start from nic tx stats in report */
+ 	base_stats_idx = max_stats_idx;
+-	max_stats_idx = NIC_TX_STATS_REPORT_NUM * num_tx_queues +
++	max_stats_idx = NIC_TX_STATS_REPORT_NUM *
++		(num_tx_queues - num_stopped_txqs) +
+ 		max_stats_idx;
+ 	/* Preprocess the stats report for tx, map queue id to start index */
+ 	skip_nic_stats = false;
+@@ -352,6 +377,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
+ 			skip_nic_stats = true;
+ 			break;
+ 		}
++		if (queue_id < 0 || queue_id >= num_tx_queues) {
++			net_err_ratelimited("Invalid txq id in NIC stats\n");
++			continue;
++		}
+ 		tx_qid_to_stats_idx[queue_id] = stats_idx;
+ 	}
+ 	/* walk TX rings */
+@@ -383,11 +412,11 @@ gve_get_ethtool_stats(struct net_device *netdev,
+ 			data[i++] = gve_tx_load_event_counter(priv, tx);
+ 			data[i++] = tx->dma_mapping_error;
+ 			/* stats from NIC */
+-			if (skip_nic_stats) {
++			stats_idx = tx_qid_to_stats_idx[ring];
++			if (skip_nic_stats || stats_idx < 0) {
+ 				/* skip NIC tx stats */
+ 				i += NIC_TX_STATS_REPORT_NUM;
+ 			} else {
+-				stats_idx = tx_qid_to_stats_idx[ring];
+ 				for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
+ 					u64 value =
+ 						be64_to_cpu(report_stats[stats_idx + j].value);
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index 3692fce201959..334f652c60601 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -6363,49 +6363,49 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
+ 		mac_data |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+ 		ew32(EXTCNF_CTRL, mac_data);
+ 
+-		/* Enable the Dynamic Power Gating in the MAC */
+-		mac_data = er32(FEXTNVM7);
+-		mac_data |= BIT(22);
+-		ew32(FEXTNVM7, mac_data);
+-
+ 		/* Disable disconnected cable conditioning for Power Gating */
+ 		mac_data = er32(DPGFR);
+ 		mac_data |= BIT(2);
+ 		ew32(DPGFR, mac_data);
+ 
+-		/* Don't wake from dynamic Power Gating with clock request */
+-		mac_data = er32(FEXTNVM12);
+-		mac_data |= BIT(12);
+-		ew32(FEXTNVM12, mac_data);
+-
+-		/* Ungate PGCB clock */
+-		mac_data = er32(FEXTNVM9);
+-		mac_data &= ~BIT(28);
+-		ew32(FEXTNVM9, mac_data);
+-
+-		/* Enable K1 off to enable mPHY Power Gating */
+-		mac_data = er32(FEXTNVM6);
+-		mac_data |= BIT(31);
+-		ew32(FEXTNVM6, mac_data);
+-
+-		/* Enable mPHY power gating for any link and speed */
+-		mac_data = er32(FEXTNVM8);
+-		mac_data |= BIT(9);
+-		ew32(FEXTNVM8, mac_data);
+-
+ 		/* Enable the Dynamic Clock Gating in the DMA and MAC */
+ 		mac_data = er32(CTRL_EXT);
+ 		mac_data |= E1000_CTRL_EXT_DMA_DYN_CLK_EN;
+ 		ew32(CTRL_EXT, mac_data);
+-
+-		/* No MAC DPG gating SLP_S0 in modern standby
+-		 * Switch the logic of the lanphypc to use PMC counter
+-		 */
+-		mac_data = er32(FEXTNVM5);
+-		mac_data |= BIT(7);
+-		ew32(FEXTNVM5, mac_data);
+ 	}
+ 
++	/* Enable the Dynamic Power Gating in the MAC */
++	mac_data = er32(FEXTNVM7);
++	mac_data |= BIT(22);
++	ew32(FEXTNVM7, mac_data);
++
++	/* Don't wake from dynamic Power Gating with clock request */
++	mac_data = er32(FEXTNVM12);
++	mac_data |= BIT(12);
++	ew32(FEXTNVM12, mac_data);
++
++	/* Ungate PGCB clock */
++	mac_data = er32(FEXTNVM9);
++	mac_data &= ~BIT(28);
++	ew32(FEXTNVM9, mac_data);
++
++	/* Enable K1 off to enable mPHY Power Gating */
++	mac_data = er32(FEXTNVM6);
++	mac_data |= BIT(31);
++	ew32(FEXTNVM6, mac_data);
++
++	/* Enable mPHY power gating for any link and speed */
++	mac_data = er32(FEXTNVM8);
++	mac_data |= BIT(9);
++	ew32(FEXTNVM8, mac_data);
++
++	/* No MAC DPG gating SLP_S0 in modern standby
++	 * Switch the logic of the lanphypc to use PMC counter
++	 */
++	mac_data = er32(FEXTNVM5);
++	mac_data |= BIT(7);
++	ew32(FEXTNVM5, mac_data);
++
+ 	/* Disable the time synchronization clock */
+ 	mac_data = er32(FEXTNVM7);
+ 	mac_data |= BIT(31);
+@@ -6498,33 +6498,6 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
+ 	} else {
+ 		/* Request driver unconfigure the device from S0ix */
+ 
+-		/* Disable the Dynamic Power Gating in the MAC */
+-		mac_data = er32(FEXTNVM7);
+-		mac_data &= 0xFFBFFFFF;
+-		ew32(FEXTNVM7, mac_data);
+-
+-		/* Disable mPHY power gating for any link and speed */
+-		mac_data = er32(FEXTNVM8);
+-		mac_data &= ~BIT(9);
+-		ew32(FEXTNVM8, mac_data);
+-
+-		/* Disable K1 off */
+-		mac_data = er32(FEXTNVM6);
+-		mac_data &= ~BIT(31);
+-		ew32(FEXTNVM6, mac_data);
+-
+-		/* Disable Ungate PGCB clock */
+-		mac_data = er32(FEXTNVM9);
+-		mac_data |= BIT(28);
+-		ew32(FEXTNVM9, mac_data);
+-
+-		/* Cancel not waking from dynamic
+-		 * Power Gating with clock request
+-		 */
+-		mac_data = er32(FEXTNVM12);
+-		mac_data &= ~BIT(12);
+-		ew32(FEXTNVM12, mac_data);
+-
+ 		/* Cancel disable disconnected cable conditioning
+ 		 * for Power Gating
+ 		 */
+@@ -6537,13 +6510,6 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
+ 		mac_data &= 0xFFF7FFFF;
+ 		ew32(CTRL_EXT, mac_data);
+ 
+-		/* Revert the lanphypc logic to use the internal Gbe counter
+-		 * and not the PMC counter
+-		 */
+-		mac_data = er32(FEXTNVM5);
+-		mac_data &= 0xFFFFFF7F;
+-		ew32(FEXTNVM5, mac_data);
+-
+ 		/* Enable the periodic inband message,
+ 		 * Request PCIe clock in K1 page770_17[10:9] =01b
+ 		 */
+@@ -6581,6 +6547,40 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
+ 	mac_data &= ~BIT(31);
+ 	mac_data |= BIT(0);
+ 	ew32(FEXTNVM7, mac_data);
++
++	/* Disable the Dynamic Power Gating in the MAC */
++	mac_data = er32(FEXTNVM7);
++	mac_data &= 0xFFBFFFFF;
++	ew32(FEXTNVM7, mac_data);
++
++	/* Disable mPHY power gating for any link and speed */
++	mac_data = er32(FEXTNVM8);
++	mac_data &= ~BIT(9);
++	ew32(FEXTNVM8, mac_data);
++
++	/* Disable K1 off */
++	mac_data = er32(FEXTNVM6);
++	mac_data &= ~BIT(31);
++	ew32(FEXTNVM6, mac_data);
++
++	/* Disable Ungate PGCB clock */
++	mac_data = er32(FEXTNVM9);
++	mac_data |= BIT(28);
++	ew32(FEXTNVM9, mac_data);
++
++	/* Cancel not waking from dynamic
++	 * Power Gating with clock request
++	 */
++	mac_data = er32(FEXTNVM12);
++	mac_data &= ~BIT(12);
++	ew32(FEXTNVM12, mac_data);
++
++	/* Revert the lanphypc logic to use the internal Gbe counter
++	 * and not the PMC counter
++	 */
++	mac_data = er32(FEXTNVM5);
++	mac_data &= 0xFFFFFF7F;
++	ew32(FEXTNVM5, mac_data);
+ }
+ 
+ static int e1000e_pm_freeze(struct device *dev)
+diff --git a/drivers/net/ethernet/intel/ice/ice_hwmon.c b/drivers/net/ethernet/intel/ice/ice_hwmon.c
+index e4c2c1bff6c08..b7aa6812510a4 100644
+--- a/drivers/net/ethernet/intel/ice/ice_hwmon.c
++++ b/drivers/net/ethernet/intel/ice/ice_hwmon.c
+@@ -96,7 +96,7 @@ static bool ice_is_internal_reading_supported(struct ice_pf *pf)
+ 
+ 	unsigned long sensors = pf->hw.dev_caps.supported_sensors;
+ 
+-	return _test_bit(ICE_SENSOR_SUPPORT_E810_INT_TEMP_BIT, &sensors);
++	return test_bit(ICE_SENSOR_SUPPORT_E810_INT_TEMP_BIT, &sensors);
+ };
+ 
+ void ice_hwmon_init(struct ice_pf *pf)
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
+index c11eba07283c6..f46d879c62d26 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
++++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
+@@ -1578,6 +1578,10 @@ void ice_ptp_extts_event(struct ice_pf *pf)
+ 	u8 chan, tmr_idx;
+ 	u32 hi, lo;
+ 
++	/* Don't process timestamp events if PTP is not ready */
++	if (pf->ptp.state != ICE_PTP_READY)
++		return;
++
+ 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ 	/* Event time is captured by one of the two matched registers
+ 	 *      GLTSYN_EVNT_L: 32 LSB of sampled time event
+@@ -1603,27 +1607,33 @@ void ice_ptp_extts_event(struct ice_pf *pf)
+ /**
+  * ice_ptp_cfg_extts - Configure EXTTS pin and channel
+  * @pf: Board private structure
+- * @ena: true to enable; false to disable
+  * @chan: GPIO channel (0-3)
+- * @gpio_pin: GPIO pin
+- * @extts_flags: request flags from the ptp_extts_request.flags
++ * @config: desired EXTTS configuration.
++ * @store: If set to true, the values will be stored
++ *
++ * Configure an external timestamp event on the requested channel.
++ *
++ * Return: 0 on success, -EOPNOTUSPP on unsupported flags
+  */
+-static int
+-ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin,
+-		  unsigned int extts_flags)
++static int ice_ptp_cfg_extts(struct ice_pf *pf, unsigned int chan,
++			     struct ice_extts_channel *config, bool store)
+ {
+ 	u32 func, aux_reg, gpio_reg, irq_reg;
+ 	struct ice_hw *hw = &pf->hw;
+ 	u8 tmr_idx;
+ 
+-	if (chan > (unsigned int)pf->ptp.info.n_ext_ts)
+-		return -EINVAL;
++	/* Reject requests with unsupported flags */
++	if (config->flags & ~(PTP_ENABLE_FEATURE |
++			      PTP_RISING_EDGE |
++			      PTP_FALLING_EDGE |
++			      PTP_STRICT_FLAGS))
++		return -EOPNOTSUPP;
+ 
+ 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ 
+ 	irq_reg = rd32(hw, PFINT_OICR_ENA);
+ 
+-	if (ena) {
++	if (config->ena) {
+ 		/* Enable the interrupt */
+ 		irq_reg |= PFINT_OICR_TSYN_EVNT_M;
+ 		aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M;
+@@ -1632,9 +1642,9 @@ ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin,
+ #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE	BIT(1)
+ 
+ 		/* set event level to requested edge */
+-		if (extts_flags & PTP_FALLING_EDGE)
++		if (config->flags & PTP_FALLING_EDGE)
+ 			aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE;
+-		if (extts_flags & PTP_RISING_EDGE)
++		if (config->flags & PTP_RISING_EDGE)
+ 			aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE;
+ 
+ 		/* Write GPIO CTL reg.
+@@ -1655,11 +1665,51 @@ ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin,
+ 
+ 	wr32(hw, PFINT_OICR_ENA, irq_reg);
+ 	wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg);
+-	wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg);
++	wr32(hw, GLGEN_GPIO_CTL(config->gpio_pin), gpio_reg);
++
++	if (store)
++		memcpy(&pf->ptp.extts_channels[chan], config, sizeof(*config));
+ 
+ 	return 0;
+ }
+ 
++/**
++ * ice_ptp_disable_all_extts - Disable all EXTTS channels
++ * @pf: Board private structure
++ */
++static void ice_ptp_disable_all_extts(struct ice_pf *pf)
++{
++	struct ice_extts_channel extts_cfg = {};
++	int i;
++
++	for (i = 0; i < pf->ptp.info.n_ext_ts; i++) {
++		if (pf->ptp.extts_channels[i].ena) {
++			extts_cfg.gpio_pin = pf->ptp.extts_channels[i].gpio_pin;
++			extts_cfg.ena = false;
++			ice_ptp_cfg_extts(pf, i, &extts_cfg, false);
++		}
++	}
++
++	synchronize_irq(pf->oicr_irq.virq);
++}
++
++/**
++ * ice_ptp_enable_all_extts - Enable all EXTTS channels
++ * @pf: Board private structure
++ *
++ * Called during reset to restore user configuration.
++ */
++static void ice_ptp_enable_all_extts(struct ice_pf *pf)
++{
++	int i;
++
++	for (i = 0; i < pf->ptp.info.n_ext_ts; i++) {
++		if (pf->ptp.extts_channels[i].ena)
++			ice_ptp_cfg_extts(pf, i, &pf->ptp.extts_channels[i],
++					  false);
++	}
++}
++
+ /**
+  * ice_ptp_cfg_clkout - Configure clock to generate periodic wave
+  * @pf: Board private structure
+@@ -1678,6 +1728,9 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
+ 	u32 func, val, gpio_pin;
+ 	u8 tmr_idx;
+ 
++	if (config && config->flags & ~PTP_PEROUT_PHASE)
++		return -EOPNOTSUPP;
++
+ 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ 
+ 	/* 0. Reset mode & out_en in AUX_OUT */
+@@ -1814,17 +1867,18 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
+ 			 struct ptp_clock_request *rq, int on)
+ {
+ 	struct ice_pf *pf = ptp_info_to_pf(info);
+-	struct ice_perout_channel clk_cfg = {0};
+ 	bool sma_pres = false;
+ 	unsigned int chan;
+ 	u32 gpio_pin;
+-	int err;
+ 
+ 	if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
+ 		sma_pres = true;
+ 
+ 	switch (rq->type) {
+ 	case PTP_CLK_REQ_PEROUT:
++	{
++		struct ice_perout_channel clk_cfg = {};
++
+ 		chan = rq->perout.index;
+ 		if (sma_pres) {
+ 			if (chan == ice_pin_desc_e810t[SMA1].chan)
+@@ -1844,15 +1898,19 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
+ 			clk_cfg.gpio_pin = chan;
+ 		}
+ 
++		clk_cfg.flags = rq->perout.flags;
+ 		clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) +
+ 				   rq->perout.period.nsec);
+ 		clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) +
+ 				       rq->perout.start.nsec);
+ 		clk_cfg.ena = !!on;
+ 
+-		err = ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true);
+-		break;
++		return ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true);
++	}
+ 	case PTP_CLK_REQ_EXTTS:
++	{
++		struct ice_extts_channel extts_cfg = {};
++
+ 		chan = rq->extts.index;
+ 		if (sma_pres) {
+ 			if (chan < ice_pin_desc_e810t[SMA2].chan)
+@@ -1868,14 +1926,15 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
+ 			gpio_pin = chan;
+ 		}
+ 
+-		err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin,
+-					rq->extts.flags);
+-		break;
++		extts_cfg.flags = rq->extts.flags;
++		extts_cfg.gpio_pin = gpio_pin;
++		extts_cfg.ena = !!on;
++
++		return ice_ptp_cfg_extts(pf, chan, &extts_cfg, true);
++	}
+ 	default:
+ 		return -EOPNOTSUPP;
+ 	}
+-
+-	return err;
+ }
+ 
+ /**
+@@ -1888,26 +1947,32 @@ static int ice_ptp_gpio_enable_e823(struct ptp_clock_info *info,
+ 				    struct ptp_clock_request *rq, int on)
+ {
+ 	struct ice_pf *pf = ptp_info_to_pf(info);
+-	struct ice_perout_channel clk_cfg = {0};
+-	int err;
+ 
+ 	switch (rq->type) {
+ 	case PTP_CLK_REQ_PPS:
++	{
++		struct ice_perout_channel clk_cfg = {};
++
++		clk_cfg.flags = rq->perout.flags;
+ 		clk_cfg.gpio_pin = PPS_PIN_INDEX;
+ 		clk_cfg.period = NSEC_PER_SEC;
+ 		clk_cfg.ena = !!on;
+ 
+-		err = ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true);
+-		break;
++		return ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true);
++	}
+ 	case PTP_CLK_REQ_EXTTS:
+-		err = ice_ptp_cfg_extts(pf, !!on, rq->extts.index,
+-					TIME_SYNC_PIN_INDEX, rq->extts.flags);
+-		break;
++	{
++		struct ice_extts_channel extts_cfg = {};
++
++		extts_cfg.flags = rq->extts.flags;
++		extts_cfg.gpio_pin = TIME_SYNC_PIN_INDEX;
++		extts_cfg.ena = !!on;
++
++		return ice_ptp_cfg_extts(pf, rq->extts.index, &extts_cfg, true);
++	}
+ 	default:
+ 		return -EOPNOTSUPP;
+ 	}
+-
+-	return err;
+ }
+ 
+ /**
+@@ -2745,6 +2810,10 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf)
+ 		ice_ptp_restart_all_phy(pf);
+ 	}
+ 
++	/* Re-enable all periodic outputs and external timestamp events */
++	ice_ptp_enable_all_clkout(pf);
++	ice_ptp_enable_all_extts(pf);
++
+ 	return 0;
+ }
+ 
+@@ -3300,6 +3369,8 @@ void ice_ptp_release(struct ice_pf *pf)
+ 
+ 	ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
+ 
++	ice_ptp_disable_all_extts(pf);
++
+ 	kthread_cancel_delayed_work_sync(&pf->ptp.work);
+ 
+ 	ice_ptp_port_phy_stop(&pf->ptp.port);
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
+index 3af20025043a6..e2af9749061ca 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
++++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
+@@ -29,10 +29,17 @@ enum ice_ptp_pin_e810t {
+ struct ice_perout_channel {
+ 	bool ena;
+ 	u32 gpio_pin;
++	u32 flags;
+ 	u64 period;
+ 	u64 start_time;
+ };
+ 
++struct ice_extts_channel {
++	bool ena;
++	u32 gpio_pin;
++	u32 flags;
++};
++
+ /* The ice hardware captures Tx hardware timestamps in the PHY. The timestamp
+  * is stored in a buffer of registers. Depending on the specific hardware,
+  * this buffer might be shared across multiple PHY ports.
+@@ -226,6 +233,7 @@ enum ice_ptp_state {
+  * @ext_ts_irq: the external timestamp IRQ in use
+  * @kworker: kwork thread for handling periodic work
+  * @perout_channels: periodic output data
++ * @extts_channels: channels for external timestamps
+  * @info: structure defining PTP hardware capabilities
+  * @clock: pointer to registered PTP clock device
+  * @tstamp_config: hardware timestamping configuration
+@@ -249,6 +257,7 @@ struct ice_ptp {
+ 	u8 ext_ts_irq;
+ 	struct kthread_worker *kworker;
+ 	struct ice_perout_channel perout_channels[GLTSYN_TGT_H_IDX_MAX];
++	struct ice_extts_channel extts_channels[GLTSYN_TGT_H_IDX_MAX];
+ 	struct ptp_clock_info info;
+ 	struct ptp_clock *clock;
+ 	struct hwtstamp_config tstamp_config;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+index c54fd01ea635a..3d274599015be 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+@@ -989,7 +989,12 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
+ 	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
+ 	struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
+ 	struct net *net = dev_net(x->xso.dev);
++	u64 trailer_packets = 0, trailer_bytes = 0;
++	u64 replay_packets = 0, replay_bytes = 0;
++	u64 auth_packets = 0, auth_bytes = 0;
++	u64 success_packets, success_bytes;
+ 	u64 packets, bytes, lastuse;
++	size_t headers;
+ 
+ 	lockdep_assert(lockdep_is_held(&x->lock) ||
+ 		       lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex) ||
+@@ -999,26 +1004,43 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
+ 		return;
+ 
+ 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
+-		mlx5_fc_query_cached(ipsec_rule->auth.fc, &bytes, &packets, &lastuse);
+-		x->stats.integrity_failed += packets;
+-		XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR, packets);
+-
+-		mlx5_fc_query_cached(ipsec_rule->trailer.fc, &bytes, &packets, &lastuse);
+-		XFRM_ADD_STATS(net, LINUX_MIB_XFRMINHDRERROR, packets);
++		mlx5_fc_query_cached(ipsec_rule->auth.fc, &auth_bytes,
++				     &auth_packets, &lastuse);
++		x->stats.integrity_failed += auth_packets;
++		XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR, auth_packets);
++
++		mlx5_fc_query_cached(ipsec_rule->trailer.fc, &trailer_bytes,
++				     &trailer_packets, &lastuse);
++		XFRM_ADD_STATS(net, LINUX_MIB_XFRMINHDRERROR, trailer_packets);
+ 	}
+ 
+ 	if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
+ 		return;
+ 
+-	mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
+-	x->curlft.packets += packets;
+-	x->curlft.bytes += bytes;
+-
+ 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
+-		mlx5_fc_query_cached(ipsec_rule->replay.fc, &bytes, &packets, &lastuse);
+-		x->stats.replay += packets;
+-		XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR, packets);
++		mlx5_fc_query_cached(ipsec_rule->replay.fc, &replay_bytes,
++				     &replay_packets, &lastuse);
++		x->stats.replay += replay_packets;
++		XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR, replay_packets);
+ 	}
++
++	mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
++	success_packets = packets - auth_packets - trailer_packets - replay_packets;
++	x->curlft.packets += success_packets;
++	/* NIC counts all bytes passed through flow steering and doesn't have
++	 * an ability to count payload data size which is needed for SA.
++	 *
++	 * To overcome HW limitestion, let's approximate the payload size
++	 * by removing always available headers.
++	 */
++	headers = sizeof(struct ethhdr);
++	if (sa_entry->attrs.family == AF_INET)
++		headers += sizeof(struct iphdr);
++	else
++		headers += sizeof(struct ipv6hdr);
++
++	success_bytes = bytes - auth_bytes - trailer_bytes - replay_bytes;
++	x->curlft.bytes += success_bytes - headers * success_packets;
+ }
+ 
+ static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 981a3e058840d..cab1770aa476c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -5732,6 +5732,11 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
+ 		kfree(priv->htb_qos_sq_stats[i]);
+ 	kvfree(priv->htb_qos_sq_stats);
+ 
++	if (priv->mqprio_rl) {
++		mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
++		mlx5e_mqprio_rl_free(priv->mqprio_rl);
++	}
++
+ 	memset(priv, 0, sizeof(*priv));
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
+index 50d2ea3239798..a436ce895e45a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
+@@ -6,6 +6,9 @@
+ #include "helper.h"
+ #include "ofld.h"
+ 
++static int
++acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
++
+ static bool
+ esw_acl_ingress_prio_tag_enabled(struct mlx5_eswitch *esw,
+ 				 const struct mlx5_vport *vport)
+@@ -123,18 +126,31 @@ static int esw_acl_ingress_src_port_drop_create(struct mlx5_eswitch *esw,
+ {
+ 	struct mlx5_flow_act flow_act = {};
+ 	struct mlx5_flow_handle *flow_rule;
++	bool created = false;
+ 	int err = 0;
+ 
++	if (!vport->ingress.acl) {
++		err = acl_ingress_ofld_setup(esw, vport);
++		if (err)
++			return err;
++		created = true;
++	}
++
+ 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ 	flow_act.fg = vport->ingress.offloads.drop_grp;
+ 	flow_rule = mlx5_add_flow_rules(vport->ingress.acl, NULL, &flow_act, NULL, 0);
+ 	if (IS_ERR(flow_rule)) {
+ 		err = PTR_ERR(flow_rule);
+-		goto out;
++		goto err_out;
+ 	}
+ 
+ 	vport->ingress.offloads.drop_rule = flow_rule;
+-out:
++
++	return 0;
++err_out:
++	/* Only destroy ingress acl created in this function. */
++	if (created)
++		esw_acl_ingress_ofld_cleanup(esw, vport);
+ 	return err;
+ }
+ 
+@@ -299,16 +315,12 @@ static void esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport *vport)
+ 	}
+ }
+ 
+-int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
+-			       struct mlx5_vport *vport)
++static int
++acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+ {
+ 	int num_ftes = 0;
+ 	int err;
+ 
+-	if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
+-	    !esw_acl_ingress_prio_tag_enabled(esw, vport))
+-		return 0;
+-
+ 	esw_acl_ingress_allow_rule_destroy(vport);
+ 
+ 	if (mlx5_eswitch_vport_match_metadata_enabled(esw))
+@@ -347,6 +359,15 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
+ 	return err;
+ }
+ 
++int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
++{
++	if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
++	    !esw_acl_ingress_prio_tag_enabled(esw, vport))
++		return 0;
++
++	return acl_ingress_ofld_setup(esw, vport);
++}
++
+ void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw,
+ 				  struct mlx5_vport *vport)
+ {
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
+index 025e0db983feb..b032d5a4b3b84 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
+@@ -1484,6 +1484,7 @@ static int mlxsw_linecard_types_init(struct mlxsw_core *mlxsw_core,
+ 	vfree(types_info->data);
+ err_data_alloc:
+ 	kfree(types_info);
++	linecards->types_info = NULL;
+ 	return err;
+ }
+ 
+diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
+index dcab638c57fe8..24c90d8f5a442 100644
+--- a/drivers/net/ethernet/renesas/rswitch.c
++++ b/drivers/net/ethernet/renesas/rswitch.c
+@@ -871,13 +871,13 @@ static void rswitch_tx_free(struct net_device *ndev)
+ 		dma_rmb();
+ 		skb = gq->skbs[gq->dirty];
+ 		if (skb) {
++			rdev->ndev->stats.tx_packets++;
++			rdev->ndev->stats.tx_bytes += skb->len;
+ 			dma_unmap_single(ndev->dev.parent,
+ 					 gq->unmap_addrs[gq->dirty],
+ 					 skb->len, DMA_TO_DEVICE);
+ 			dev_kfree_skb_any(gq->skbs[gq->dirty]);
+ 			gq->skbs[gq->dirty] = NULL;
+-			rdev->ndev->stats.tx_packets++;
+-			rdev->ndev->stats.tx_bytes += skb->len;
+ 		}
+ 		desc->desc.die_dt = DT_EEMPTY;
+ 	}
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+index 65d7370b47d57..466c4002f00d4 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+@@ -272,7 +272,7 @@ static const struct ethqos_emac_por emac_v4_0_0_por[] = {
+ 
+ static const struct ethqos_emac_driver_data emac_v4_0_0_data = {
+ 	.por = emac_v4_0_0_por,
+-	.num_por = ARRAY_SIZE(emac_v3_0_0_por),
++	.num_por = ARRAY_SIZE(emac_v4_0_0_por),
+ 	.rgmii_config_loopback_en = false,
+ 	.has_emac_ge_3 = true,
+ 	.link_clk_name = "phyaux",
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 7c6fb14b55550..39e8340446c71 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -7662,9 +7662,10 @@ int stmmac_dvr_probe(struct device *device,
+ #ifdef STMMAC_VLAN_TAG_USED
+ 	/* Both mac100 and gmac support receive VLAN tag detection */
+ 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
+-	ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+-	priv->hw->hw_vlan_en = true;
+-
++	if (priv->plat->has_gmac4) {
++		ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
++		priv->hw->hw_vlan_en = true;
++	}
+ 	if (priv->dma_cap.vlhash) {
+ 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
+diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+index c09a6f7445754..db640ea63f034 100644
+--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
++++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+@@ -1959,6 +1959,7 @@ int wx_sw_init(struct wx *wx)
+ 	}
+ 
+ 	bitmap_zero(wx->state, WX_STATE_NBITS);
++	wx->misc_irq_domain = false;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+index 07ba3a270a14f..88e5e390770b5 100644
+--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
++++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+@@ -1686,6 +1686,7 @@ static int wx_set_interrupt_capability(struct wx *wx)
+ 	}
+ 
+ 	pdev->irq = pci_irq_vector(pdev, 0);
++	wx->num_q_vectors = 1;
+ 
+ 	return 0;
+ }
+@@ -1996,7 +1997,8 @@ void wx_free_irq(struct wx *wx)
+ 	int vector;
+ 
+ 	if (!(pdev->msix_enabled)) {
+-		free_irq(pdev->irq, wx);
++		if (!wx->misc_irq_domain)
++			free_irq(pdev->irq, wx);
+ 		return;
+ 	}
+ 
+@@ -2011,7 +2013,7 @@ void wx_free_irq(struct wx *wx)
+ 		free_irq(entry->vector, q_vector);
+ 	}
+ 
+-	if (wx->mac.type == wx_mac_em)
++	if (!wx->misc_irq_domain)
+ 		free_irq(wx->msix_entry->vector, wx);
+ }
+ EXPORT_SYMBOL(wx_free_irq);
+@@ -2026,6 +2028,9 @@ int wx_setup_isb_resources(struct wx *wx)
+ {
+ 	struct pci_dev *pdev = wx->pdev;
+ 
++	if (wx->isb_mem)
++		return 0;
++
+ 	wx->isb_mem = dma_alloc_coherent(&pdev->dev,
+ 					 sizeof(u32) * 4,
+ 					 &wx->isb_dma,
+@@ -2385,7 +2390,6 @@ static void wx_free_all_tx_resources(struct wx *wx)
+ 
+ void wx_free_resources(struct wx *wx)
+ {
+-	wx_free_isb_resources(wx);
+ 	wx_free_all_rx_resources(wx);
+ 	wx_free_all_tx_resources(wx);
+ }
+diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
+index 5aaf7b1fa2db9..0df7f5712b6f7 100644
+--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
++++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
+@@ -1058,6 +1058,7 @@ struct wx {
+ 	dma_addr_t isb_dma;
+ 	u32 *isb_mem;
+ 	u32 isb_tag[WX_ISB_MAX];
++	bool misc_irq_domain;
+ 
+ #define WX_MAX_RETA_ENTRIES 128
+ #define WX_RSS_INDIR_TBL_MAX 64
+diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+index e894e01d030d1..af30ca0312b81 100644
+--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
++++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+@@ -387,6 +387,7 @@ static int ngbe_open(struct net_device *netdev)
+ err_free_irq:
+ 	wx_free_irq(wx);
+ err_free_resources:
++	wx_free_isb_resources(wx);
+ 	wx_free_resources(wx);
+ 	return err;
+ }
+@@ -408,6 +409,7 @@ static int ngbe_close(struct net_device *netdev)
+ 
+ 	ngbe_down(wx);
+ 	wx_free_irq(wx);
++	wx_free_isb_resources(wx);
+ 	wx_free_resources(wx);
+ 	phylink_disconnect_phy(wx->phylink);
+ 	wx_control_hw(wx, false);
+diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
+index b3e3605d1edb3..a4cf682dca650 100644
+--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
++++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
+@@ -27,57 +27,19 @@ void txgbe_irq_enable(struct wx *wx, bool queues)
+ }
+ 
+ /**
+- * txgbe_intr - msi/legacy mode Interrupt Handler
+- * @irq: interrupt number
+- * @data: pointer to a network interface device structure
+- **/
+-static irqreturn_t txgbe_intr(int __always_unused irq, void *data)
+-{
+-	struct wx_q_vector *q_vector;
+-	struct wx *wx  = data;
+-	struct pci_dev *pdev;
+-	u32 eicr;
+-
+-	q_vector = wx->q_vector[0];
+-	pdev = wx->pdev;
+-
+-	eicr = wx_misc_isb(wx, WX_ISB_VEC0);
+-	if (!eicr) {
+-		/* shared interrupt alert!
+-		 * the interrupt that we masked before the ICR read.
+-		 */
+-		if (netif_running(wx->netdev))
+-			txgbe_irq_enable(wx, true);
+-		return IRQ_NONE;        /* Not our interrupt */
+-	}
+-	wx->isb_mem[WX_ISB_VEC0] = 0;
+-	if (!(pdev->msi_enabled))
+-		wr32(wx, WX_PX_INTA, 1);
+-
+-	wx->isb_mem[WX_ISB_MISC] = 0;
+-	/* would disable interrupts here but it is auto disabled */
+-	napi_schedule_irqoff(&q_vector->napi);
+-
+-	/* re-enable link(maybe) and non-queue interrupts, no flush.
+-	 * txgbe_poll will re-enable the queue interrupts
+-	 */
+-	if (netif_running(wx->netdev))
+-		txgbe_irq_enable(wx, false);
+-
+-	return IRQ_HANDLED;
+-}
+-
+-/**
+- * txgbe_request_msix_irqs - Initialize MSI-X interrupts
++ * txgbe_request_queue_irqs - Initialize MSI-X queue interrupts
+  * @wx: board private structure
+  *
+- * Allocate MSI-X vectors and request interrupts from the kernel.
++ * Allocate MSI-X queue vectors and request interrupts from the kernel.
+  **/
+-static int txgbe_request_msix_irqs(struct wx *wx)
++int txgbe_request_queue_irqs(struct wx *wx)
+ {
+ 	struct net_device *netdev = wx->netdev;
+ 	int vector, err;
+ 
++	if (!wx->pdev->msix_enabled)
++		return 0;
++
+ 	for (vector = 0; vector < wx->num_q_vectors; vector++) {
+ 		struct wx_q_vector *q_vector = wx->q_vector[vector];
+ 		struct msix_entry *entry = &wx->msix_q_entries[vector];
+@@ -110,34 +72,6 @@ static int txgbe_request_msix_irqs(struct wx *wx)
+ 	return err;
+ }
+ 
+-/**
+- * txgbe_request_irq - initialize interrupts
+- * @wx: board private structure
+- *
+- * Attempt to configure interrupts using the best available
+- * capabilities of the hardware and kernel.
+- **/
+-int txgbe_request_irq(struct wx *wx)
+-{
+-	struct net_device *netdev = wx->netdev;
+-	struct pci_dev *pdev = wx->pdev;
+-	int err;
+-
+-	if (pdev->msix_enabled)
+-		err = txgbe_request_msix_irqs(wx);
+-	else if (pdev->msi_enabled)
+-		err = request_irq(wx->pdev->irq, &txgbe_intr, 0,
+-				  netdev->name, wx);
+-	else
+-		err = request_irq(wx->pdev->irq, &txgbe_intr, IRQF_SHARED,
+-				  netdev->name, wx);
+-
+-	if (err)
+-		wx_err(wx, "request_irq failed, Error %d\n", err);
+-
+-	return err;
+-}
+-
+ static int txgbe_request_gpio_irq(struct txgbe *txgbe)
+ {
+ 	txgbe->gpio_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
+@@ -177,6 +111,36 @@ static const struct irq_domain_ops txgbe_misc_irq_domain_ops = {
+ };
+ 
+ static irqreturn_t txgbe_misc_irq_handle(int irq, void *data)
++{
++	struct wx_q_vector *q_vector;
++	struct txgbe *txgbe = data;
++	struct wx *wx = txgbe->wx;
++	u32 eicr;
++
++	if (wx->pdev->msix_enabled)
++		return IRQ_WAKE_THREAD;
++
++	eicr = wx_misc_isb(wx, WX_ISB_VEC0);
++	if (!eicr) {
++		/* shared interrupt alert!
++		 * the interrupt that we masked before the ICR read.
++		 */
++		if (netif_running(wx->netdev))
++			txgbe_irq_enable(wx, true);
++		return IRQ_NONE;        /* Not our interrupt */
++	}
++	wx->isb_mem[WX_ISB_VEC0] = 0;
++	if (!(wx->pdev->msi_enabled))
++		wr32(wx, WX_PX_INTA, 1);
++
++	/* would disable interrupts here but it is auto disabled */
++	q_vector = wx->q_vector[0];
++	napi_schedule_irqoff(&q_vector->napi);
++
++	return IRQ_WAKE_THREAD;
++}
++
++static irqreturn_t txgbe_misc_irq_thread_fn(int irq, void *data)
+ {
+ 	struct txgbe *txgbe = data;
+ 	struct wx *wx = txgbe->wx;
+@@ -223,6 +187,7 @@ void txgbe_free_misc_irq(struct txgbe *txgbe)
+ 
+ int txgbe_setup_misc_irq(struct txgbe *txgbe)
+ {
++	unsigned long flags = IRQF_ONESHOT;
+ 	struct wx *wx = txgbe->wx;
+ 	int hwirq, err;
+ 
+@@ -236,14 +201,17 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
+ 		irq_create_mapping(txgbe->misc.domain, hwirq);
+ 
+ 	txgbe->misc.chip = txgbe_irq_chip;
+-	if (wx->pdev->msix_enabled)
++	if (wx->pdev->msix_enabled) {
+ 		txgbe->misc.irq = wx->msix_entry->vector;
+-	else
++	} else {
+ 		txgbe->misc.irq = wx->pdev->irq;
++		if (!wx->pdev->msi_enabled)
++			flags |= IRQF_SHARED;
++	}
+ 
+-	err = request_threaded_irq(txgbe->misc.irq, NULL,
+-				   txgbe_misc_irq_handle,
+-				   IRQF_ONESHOT,
++	err = request_threaded_irq(txgbe->misc.irq, txgbe_misc_irq_handle,
++				   txgbe_misc_irq_thread_fn,
++				   flags,
+ 				   wx->netdev->name, txgbe);
+ 	if (err)
+ 		goto del_misc_irq;
+@@ -256,6 +224,8 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
+ 	if (err)
+ 		goto free_gpio_irq;
+ 
++	wx->misc_irq_domain = true;
++
+ 	return 0;
+ 
+ free_gpio_irq:
+diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h
+index b77945e7a0f26..e6285b94625ea 100644
+--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h
++++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h
+@@ -2,6 +2,6 @@
+ /* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
+ 
+ void txgbe_irq_enable(struct wx *wx, bool queues);
+-int txgbe_request_irq(struct wx *wx);
++int txgbe_request_queue_irqs(struct wx *wx);
+ void txgbe_free_misc_irq(struct txgbe *txgbe);
+ int txgbe_setup_misc_irq(struct txgbe *txgbe);
+diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+index 8c7a74981b907..ca74d9422065a 100644
+--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
++++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+@@ -294,9 +294,9 @@ static int txgbe_open(struct net_device *netdev)
+ 
+ 	wx_configure(wx);
+ 
+-	err = txgbe_request_irq(wx);
++	err = txgbe_request_queue_irqs(wx);
+ 	if (err)
+-		goto err_free_isb;
++		goto err_free_resources;
+ 
+ 	/* Notify the stack of the actual queue counts. */
+ 	err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues);
+@@ -313,8 +313,8 @@ static int txgbe_open(struct net_device *netdev)
+ 
+ err_free_irq:
+ 	wx_free_irq(wx);
+-err_free_isb:
+-	wx_free_isb_resources(wx);
++err_free_resources:
++	wx_free_resources(wx);
+ err_reset:
+ 	txgbe_reset(wx);
+ 
+@@ -729,6 +729,7 @@ static void txgbe_remove(struct pci_dev *pdev)
+ 
+ 	txgbe_remove_phy(txgbe);
+ 	txgbe_free_misc_irq(txgbe);
++	wx_free_isb_resources(wx);
+ 
+ 	pci_release_selected_regions(pdev,
+ 				     pci_select_bars(pdev, IORESOURCE_MEM));
+diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
+index 536bd6564f8b8..dade51cf599c6 100644
+--- a/drivers/net/ntb_netdev.c
++++ b/drivers/net/ntb_netdev.c
+@@ -119,7 +119,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
+ 	skb->protocol = eth_type_trans(skb, ndev);
+ 	skb->ip_summed = CHECKSUM_NONE;
+ 
+-	if (__netif_rx(skb) == NET_RX_DROP) {
++	if (netif_rx(skb) == NET_RX_DROP) {
+ 		ndev->stats.rx_errors++;
+ 		ndev->stats.rx_dropped++;
+ 	} else {
+diff --git a/drivers/net/phy/aquantia/aquantia.h b/drivers/net/phy/aquantia/aquantia.h
+index 1c19ae74ad2b4..4830b25e6c7d3 100644
+--- a/drivers/net/phy/aquantia/aquantia.h
++++ b/drivers/net/phy/aquantia/aquantia.h
+@@ -6,6 +6,9 @@
+  * Author: Heiner Kallweit <hkallweit1@gmail.com>
+  */
+ 
++#ifndef AQUANTIA_H
++#define AQUANTIA_H
++
+ #include <linux/device.h>
+ #include <linux/phy.h>
+ 
+@@ -120,3 +123,5 @@ static inline int aqr_hwmon_probe(struct phy_device *phydev) { return 0; }
+ #endif
+ 
+ int aqr_firmware_load(struct phy_device *phydev);
++
++#endif /* AQUANTIA_H */
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+index fb8bd50eb7de8..5521c0ea5b261 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+@@ -257,7 +257,7 @@ mt76_connac_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len,
+ 	};
+ 	u16 ntlv;
+ 
+-	ptlv = skb_put(skb, len);
++	ptlv = skb_put_zero(skb, len);
+ 	memcpy(ptlv, &tlv, sizeof(tlv));
+ 
+ 	ntlv = le16_to_cpu(ntlv_hdr->tlv_num);
+@@ -1670,7 +1670,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ 	set_bit(MT76_HW_SCANNING, &phy->state);
+ 	mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f;
+ 
+-	req = (struct mt76_connac_hw_scan_req *)skb_put(skb, sizeof(*req));
++	req = (struct mt76_connac_hw_scan_req *)skb_put_zero(skb, sizeof(*req));
+ 
+ 	req->seq_num = mvif->scan_seq_num | mvif->band_idx << 7;
+ 	req->bss_idx = mvif->idx;
+@@ -1798,7 +1798,7 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
+ 
+ 	mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f;
+ 
+-	req = (struct mt76_connac_sched_scan_req *)skb_put(skb, sizeof(*req));
++	req = (struct mt76_connac_sched_scan_req *)skb_put_zero(skb, sizeof(*req));
+ 	req->version = 1;
+ 	req->seq_num = mvif->scan_seq_num | mvif->band_idx << 7;
+ 
+@@ -2321,7 +2321,7 @@ int mt76_connac_mcu_update_gtk_rekey(struct ieee80211_hw *hw,
+ 		return -ENOMEM;
+ 
+ 	skb_put_data(skb, &hdr, sizeof(hdr));
+-	gtk_tlv = (struct mt76_connac_gtk_rekey_tlv *)skb_put(skb,
++	gtk_tlv = (struct mt76_connac_gtk_rekey_tlv *)skb_put_zero(skb,
+ 							 sizeof(*gtk_tlv));
+ 	gtk_tlv->tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_GTK_REKEY);
+ 	gtk_tlv->len = cpu_to_le16(sizeof(*gtk_tlv));
+@@ -2446,7 +2446,7 @@ mt76_connac_mcu_set_wow_pattern(struct mt76_dev *dev,
+ 		return -ENOMEM;
+ 
+ 	skb_put_data(skb, &hdr, sizeof(hdr));
+-	ptlv = (struct mt76_connac_wow_pattern_tlv *)skb_put(skb, sizeof(*ptlv));
++	ptlv = (struct mt76_connac_wow_pattern_tlv *)skb_put_zero(skb, sizeof(*ptlv));
+ 	ptlv->tag = cpu_to_le16(UNI_SUSPEND_WOW_PATTERN);
+ 	ptlv->len = cpu_to_le16(sizeof(*ptlv));
+ 	ptlv->data_len = pattern->pattern_len;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+index d90f98c500399..b7157bdb3103f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+@@ -424,7 +424,7 @@ mt7915_mcu_add_nested_subtlv(struct sk_buff *skb, int sub_tag, int sub_len,
+ 		.len = cpu_to_le16(sub_len),
+ 	};
+ 
+-	ptlv = skb_put(skb, sub_len);
++	ptlv = skb_put_zero(skb, sub_len);
+ 	memcpy(ptlv, &tlv, sizeof(tlv));
+ 
+ 	le16_add_cpu(sub_ntlv, 1);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
+index 9bd953586b041..62c03d088925c 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
+@@ -225,6 +225,11 @@ mt7996_radar_trigger(void *data, u64 val)
+ 	if (val > MT_RX_SEL2)
+ 		return -EINVAL;
+ 
++	if (val == MT_RX_SEL2 && !dev->rdd2_phy) {
++		dev_err(dev->mt76.dev, "Background radar is not enabled\n");
++		return -EINVAL;
++	}
++
+ 	return mt7996_mcu_rdd_cmd(dev, RDD_RADAR_EMULATE,
+ 				  val, 0, 0);
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+index e86c05d0eecc9..bc3b2babb094d 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+@@ -355,7 +355,10 @@ mt7996_mcu_rx_radar_detected(struct mt7996_dev *dev, struct sk_buff *skb)
+ 	if (r->band_idx >= ARRAY_SIZE(dev->mt76.phys))
+ 		return;
+ 
+-	if (dev->rdd2_phy && r->band_idx == MT_RX_SEL2)
++	if (r->band_idx == MT_RX_SEL2 && !dev->rdd2_phy)
++		return;
++
++	if (r->band_idx == MT_RX_SEL2)
+ 		mphy = dev->rdd2_phy->mt76;
+ 	else
+ 		mphy = dev->mt76.phys[r->band_idx];
+diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
+index f1085ccb7eedc..7719e4f3e2a23 100644
+--- a/drivers/net/wireless/microchip/wilc1000/hif.c
++++ b/drivers/net/wireless/microchip/wilc1000/hif.c
+@@ -382,7 +382,8 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
+ 	struct ieee80211_p2p_noa_attr noa_attr;
+ 	const struct cfg80211_bss_ies *ies;
+ 	struct wilc_join_bss_param *param;
+-	u8 rates_len = 0, ies_len;
++	u8 rates_len = 0;
++	int ies_len;
+ 	int ret;
+ 
+ 	param = kzalloc(sizeof(*param), GFP_KERNEL);
+diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
+index 6c75ebbb21caa..ef86389545ffb 100644
+--- a/drivers/net/wireless/realtek/rtw89/fw.c
++++ b/drivers/net/wireless/realtek/rtw89/fw.c
+@@ -4646,6 +4646,10 @@ static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev,
+ 	u8 i, idx;
+ 
+ 	sband = rtwdev->hw->wiphy->bands[NL80211_BAND_6GHZ];
++	if (!sband) {
++		option->prohib_chan = U64_MAX;
++		return;
++	}
+ 
+ 	for (i = 0; i < sband->n_channels; i++) {
+ 		chan = &sband->channels[i];
+diff --git a/drivers/nfc/virtual_ncidev.c b/drivers/nfc/virtual_ncidev.c
+index 590b038e449e5..6b89d596ba9af 100644
+--- a/drivers/nfc/virtual_ncidev.c
++++ b/drivers/nfc/virtual_ncidev.c
+@@ -125,6 +125,10 @@ static ssize_t virtual_ncidev_write(struct file *file,
+ 		kfree_skb(skb);
+ 		return -EFAULT;
+ 	}
++	if (strnlen(skb->data, count) != count) {
++		kfree_skb(skb);
++		return -EINVAL;
++	}
+ 
+ 	nci_recv_frame(vdev->ndev, skb);
+ 	return count;
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index a4e46eb20be63..1bee176fd850e 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -596,7 +596,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
+ 		int node, srcu_idx;
+ 
+ 		srcu_idx = srcu_read_lock(&head->srcu);
+-		for_each_node(node)
++		for_each_online_node(node)
+ 			__nvme_find_path(head, node);
+ 		srcu_read_unlock(&head->srcu, srcu_idx);
+ 	}
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 710043086dffa..102a9fb0c65ff 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -778,7 +778,8 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
+ 		struct bio_vec bv = req_bvec(req);
+ 
+ 		if (!is_pci_p2pdma_page(bv.bv_page)) {
+-			if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
++			if ((bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1)) +
++			     bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
+ 				return nvme_setup_prp_simple(dev, req,
+ 							     &cmnd->rw, &bv);
+ 
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index 2fde22323622e..06f0c587f3437 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -818,6 +818,15 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
+ 	percpu_ref_exit(&sq->ref);
+ 	nvmet_auth_sq_free(sq);
+ 
++	/*
++	 * we must reference the ctrl again after waiting for inflight IO
++	 * to complete. Because admin connect may have sneaked in after we
++	 * store sq->ctrl locally, but before we killed the percpu_ref. the
++	 * admin connect allocates and assigns sq->ctrl, which now needs a
++	 * final ref put, as this ctrl is going away.
++	 */
++	ctrl = sq->ctrl;
++
+ 	if (ctrl) {
+ 		/*
+ 		 * The teardown flow may take some time, and the host may not
+diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
+index 16e941449b144..7d345009c3270 100644
+--- a/drivers/platform/x86/toshiba_acpi.c
++++ b/drivers/platform/x86/toshiba_acpi.c
+@@ -3276,7 +3276,7 @@ static const char *find_hci_method(acpi_handle handle)
+  */
+ #define QUIRK_HCI_HOTKEY_QUICKSTART		BIT(1)
+ 
+-static const struct dmi_system_id toshiba_dmi_quirks[] = {
++static const struct dmi_system_id toshiba_dmi_quirks[] __initconst = {
+ 	{
+ 	 /* Toshiba Portégé R700 */
+ 	 /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
+@@ -3311,8 +3311,6 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
+ 	struct toshiba_acpi_dev *dev;
+ 	const char *hci_method;
+ 	u32 dummy;
+-	const struct dmi_system_id *dmi_id;
+-	long quirks = 0;
+ 	int ret = 0;
+ 
+ 	if (toshiba_acpi)
+@@ -3465,16 +3463,6 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
+ 	}
+ #endif
+ 
+-	dmi_id = dmi_first_match(toshiba_dmi_quirks);
+-	if (dmi_id)
+-		quirks = (long)dmi_id->driver_data;
+-
+-	if (turn_on_panel_on_resume == -1)
+-		turn_on_panel_on_resume = !!(quirks & QUIRK_TURN_ON_PANEL_ON_RESUME);
+-
+-	if (hci_hotkey_quickstart == -1)
+-		hci_hotkey_quickstart = !!(quirks & QUIRK_HCI_HOTKEY_QUICKSTART);
+-
+ 	toshiba_wwan_available(dev);
+ 	if (dev->wwan_supported)
+ 		toshiba_acpi_setup_wwan_rfkill(dev);
+@@ -3624,10 +3612,27 @@ static struct acpi_driver toshiba_acpi_driver = {
+ 	.drv.pm	= &toshiba_acpi_pm,
+ };
+ 
++static void __init toshiba_dmi_init(void)
++{
++	const struct dmi_system_id *dmi_id;
++	long quirks = 0;
++
++	dmi_id = dmi_first_match(toshiba_dmi_quirks);
++	if (dmi_id)
++		quirks = (long)dmi_id->driver_data;
++
++	if (turn_on_panel_on_resume == -1)
++		turn_on_panel_on_resume = !!(quirks & QUIRK_TURN_ON_PANEL_ON_RESUME);
++
++	if (hci_hotkey_quickstart == -1)
++		hci_hotkey_quickstart = !!(quirks & QUIRK_HCI_HOTKEY_QUICKSTART);
++}
++
+ static int __init toshiba_acpi_init(void)
+ {
+ 	int ret;
+ 
++	toshiba_dmi_init();
+ 	toshiba_proc_dir = proc_mkdir(PROC_TOSHIBA, acpi_root_dir);
+ 	if (!toshiba_proc_dir) {
+ 		pr_err("Unable to create proc dir " PROC_TOSHIBA "\n");
+diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
+index c6a10ec2c83f6..89e1be0815b52 100644
+--- a/drivers/platform/x86/touchscreen_dmi.c
++++ b/drivers/platform/x86/touchscreen_dmi.c
+@@ -897,6 +897,22 @@ static const struct ts_dmi_data schneider_sct101ctm_data = {
+ 	.properties	= schneider_sct101ctm_props,
+ };
+ 
++static const struct property_entry globalspace_solt_ivw116_props[] = {
++	PROPERTY_ENTRY_U32("touchscreen-min-x", 7),
++	PROPERTY_ENTRY_U32("touchscreen-min-y", 22),
++	PROPERTY_ENTRY_U32("touchscreen-size-x", 1723),
++	PROPERTY_ENTRY_U32("touchscreen-size-y", 1077),
++	PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-globalspace-solt-ivw116.fw"),
++	PROPERTY_ENTRY_U32("silead,max-fingers", 10),
++	PROPERTY_ENTRY_BOOL("silead,home-button"),
++	{ }
++};
++
++static const struct ts_dmi_data globalspace_solt_ivw116_data = {
++	.acpi_name	= "MSSL1680:00",
++	.properties	= globalspace_solt_ivw116_props,
++};
++
+ static const struct property_entry techbite_arc_11_6_props[] = {
+ 	PROPERTY_ENTRY_U32("touchscreen-min-x", 5),
+ 	PROPERTY_ENTRY_U32("touchscreen-min-y", 7),
+@@ -1385,6 +1401,17 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ 			DMI_MATCH(DMI_BIOS_DATE, "04/24/2018"),
+ 		},
+ 	},
++	{
++		/* Jumper EZpad 6s Pro */
++		.driver_data = (void *)&jumper_ezpad_6_pro_b_data,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Jumper"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Ezpad"),
++			/* Above matches are too generic, add bios match */
++			DMI_MATCH(DMI_BIOS_VERSION, "E.WSA116_8.E1.042.bin"),
++			DMI_MATCH(DMI_BIOS_DATE, "01/08/2020"),
++		},
++	},
+ 	{
+ 		/* Jumper EZpad 6 m4 */
+ 		.driver_data = (void *)&jumper_ezpad_6_m4_data,
+@@ -1624,6 +1651,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "SCT101CTM"),
+ 		},
+ 	},
++	{
++		/* GlobalSpace SoLT IVW 11.6" */
++		.driver_data = (void *)&globalspace_solt_ivw116_data,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Globalspace Tech Pvt Ltd"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "SolTIVW"),
++			DMI_MATCH(DMI_PRODUCT_SKU, "PN20170413488"),
++		},
++	},
+ 	{
+ 		/* Techbite Arc 11.6 */
+ 		.driver_data = (void *)&techbite_arc_11_6_data,
+diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
+index 180a008d38eaa..4118b64781cb5 100644
+--- a/drivers/s390/block/dasd_eckd.c
++++ b/drivers/s390/block/dasd_eckd.c
+@@ -4906,7 +4906,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
+ 				ccw++;
+ 			if (dst) {
+ 				if (ccw->flags & CCW_FLAG_IDA)
+-					cda = *((char **)dma32_to_virt(ccw->cda));
++					cda = dma64_to_virt(*((dma64_t *)dma32_to_virt(ccw->cda)));
+ 				else
+ 					cda = dma32_to_virt(ccw->cda);
+ 				if (dst != cda) {
+@@ -5525,7 +5525,7 @@ dasd_eckd_dump_ccw_range(struct dasd_device *device, struct ccw1 *from,
+ 
+ 		/* get pointer to data (consider IDALs) */
+ 		if (from->flags & CCW_FLAG_IDA)
+-			datap = (char *)*((addr_t *)dma32_to_virt(from->cda));
++			datap = dma64_to_virt(*((dma64_t *)dma32_to_virt(from->cda)));
+ 		else
+ 			datap = dma32_to_virt(from->cda);
+ 
+diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
+index 361e9bd752570..9f2023a077c20 100644
+--- a/drivers/s390/block/dasd_fba.c
++++ b/drivers/s390/block/dasd_fba.c
+@@ -585,7 +585,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
+ 				ccw++;
+ 			if (dst) {
+ 				if (ccw->flags & CCW_FLAG_IDA)
+-					cda = *((char **)dma32_to_virt(ccw->cda));
++					cda = dma64_to_virt(*((dma64_t *)dma32_to_virt(ccw->cda)));
+ 				else
+ 					cda = dma32_to_virt(ccw->cda);
+ 				if (dst != cda) {
+diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
+index 6e5c508b1e07c..5f6e102256276 100644
+--- a/drivers/s390/cio/vfio_ccw_cp.c
++++ b/drivers/s390/cio/vfio_ccw_cp.c
+@@ -490,13 +490,14 @@ static int ccwchain_fetch_tic(struct ccw1 *ccw,
+ 			      struct channel_program *cp)
+ {
+ 	struct ccwchain *iter;
+-	u32 cda, ccw_head;
++	u32 offset, ccw_head;
+ 
+ 	list_for_each_entry(iter, &cp->ccwchain_list, next) {
+ 		ccw_head = iter->ch_iova;
+ 		if (is_cpa_within_range(ccw->cda, ccw_head, iter->ch_len)) {
+-			cda = (u64)iter->ch_ccw + dma32_to_u32(ccw->cda) - ccw_head;
+-			ccw->cda = u32_to_dma32(cda);
++			/* Calculate offset of TIC target */
++			offset = dma32_to_u32(ccw->cda) - ccw_head;
++			ccw->cda = virt_to_dma32((void *)iter->ch_ccw + offset);
+ 			return 0;
+ 		}
+ 	}
+@@ -914,7 +915,7 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
+ 	 * in the ioctl directly. Path status changes etc.
+ 	 */
+ 	list_for_each_entry(chain, &cp->ccwchain_list, next) {
+-		ccw_head = (u32)(u64)chain->ch_ccw;
++		ccw_head = dma32_to_u32(virt_to_dma32(chain->ch_ccw));
+ 		/*
+ 		 * On successful execution, cpa points just beyond the end
+ 		 * of the chain.
+diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
+index dccf664a3d957..ffc0b5db55c29 100644
+--- a/drivers/s390/crypto/pkey_api.c
++++ b/drivers/s390/crypto/pkey_api.c
+@@ -1359,10 +1359,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		rc = cca_genseckey(kgs.cardnr, kgs.domain,
+ 				   kgs.keytype, kgs.seckey.seckey);
+ 		pr_debug("%s cca_genseckey()=%d\n", __func__, rc);
+-		if (rc)
+-			break;
+-		if (copy_to_user(ugs, &kgs, sizeof(kgs)))
+-			return -EFAULT;
++		if (!rc && copy_to_user(ugs, &kgs, sizeof(kgs)))
++			rc = -EFAULT;
++		memzero_explicit(&kgs, sizeof(kgs));
+ 		break;
+ 	}
+ 	case PKEY_CLR2SECK: {
+@@ -1374,10 +1373,8 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		rc = cca_clr2seckey(kcs.cardnr, kcs.domain, kcs.keytype,
+ 				    kcs.clrkey.clrkey, kcs.seckey.seckey);
+ 		pr_debug("%s cca_clr2seckey()=%d\n", __func__, rc);
+-		if (rc)
+-			break;
+-		if (copy_to_user(ucs, &kcs, sizeof(kcs)))
+-			return -EFAULT;
++		if (!rc && copy_to_user(ucs, &kcs, sizeof(kcs)))
++			rc = -EFAULT;
+ 		memzero_explicit(&kcs, sizeof(kcs));
+ 		break;
+ 	}
+@@ -1392,10 +1389,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 				     ksp.seckey.seckey, ksp.protkey.protkey,
+ 				     &ksp.protkey.len, &ksp.protkey.type);
+ 		pr_debug("%s cca_sec2protkey()=%d\n", __func__, rc);
+-		if (rc)
+-			break;
+-		if (copy_to_user(usp, &ksp, sizeof(ksp)))
+-			return -EFAULT;
++		if (!rc && copy_to_user(usp, &ksp, sizeof(ksp)))
++			rc = -EFAULT;
++		memzero_explicit(&ksp, sizeof(ksp));
+ 		break;
+ 	}
+ 	case PKEY_CLR2PROTK: {
+@@ -1409,10 +1405,8 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 				      kcp.protkey.protkey,
+ 				      &kcp.protkey.len, &kcp.protkey.type);
+ 		pr_debug("%s pkey_clr2protkey()=%d\n", __func__, rc);
+-		if (rc)
+-			break;
+-		if (copy_to_user(ucp, &kcp, sizeof(kcp)))
+-			return -EFAULT;
++		if (!rc && copy_to_user(ucp, &kcp, sizeof(kcp)))
++			rc = -EFAULT;
+ 		memzero_explicit(&kcp, sizeof(kcp));
+ 		break;
+ 	}
+@@ -1441,10 +1435,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		rc = pkey_skey2pkey(ksp.seckey.seckey, ksp.protkey.protkey,
+ 				    &ksp.protkey.len, &ksp.protkey.type);
+ 		pr_debug("%s pkey_skey2pkey()=%d\n", __func__, rc);
+-		if (rc)
+-			break;
+-		if (copy_to_user(usp, &ksp, sizeof(ksp)))
+-			return -EFAULT;
++		if (!rc && copy_to_user(usp, &ksp, sizeof(ksp)))
++			rc = -EFAULT;
++		memzero_explicit(&ksp, sizeof(ksp));
+ 		break;
+ 	}
+ 	case PKEY_VERIFYKEY: {
+@@ -1456,10 +1449,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		rc = pkey_verifykey(&kvk.seckey, &kvk.cardnr, &kvk.domain,
+ 				    &kvk.keysize, &kvk.attributes);
+ 		pr_debug("%s pkey_verifykey()=%d\n", __func__, rc);
+-		if (rc)
+-			break;
+-		if (copy_to_user(uvk, &kvk, sizeof(kvk)))
+-			return -EFAULT;
++		if (!rc && copy_to_user(uvk, &kvk, sizeof(kvk)))
++			rc = -EFAULT;
++		memzero_explicit(&kvk, sizeof(kvk));
+ 		break;
+ 	}
+ 	case PKEY_GENPROTK: {
+@@ -1472,10 +1464,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		rc = pkey_genprotkey(kgp.keytype, kgp.protkey.protkey,
+ 				     &kgp.protkey.len, &kgp.protkey.type);
+ 		pr_debug("%s pkey_genprotkey()=%d\n", __func__, rc);
+-		if (rc)
+-			break;
+-		if (copy_to_user(ugp, &kgp, sizeof(kgp)))
+-			return -EFAULT;
++		if (!rc && copy_to_user(ugp, &kgp, sizeof(kgp)))
++			rc = -EFAULT;
++		memzero_explicit(&kgp, sizeof(kgp));
+ 		break;
+ 	}
+ 	case PKEY_VERIFYPROTK: {
+@@ -1487,6 +1478,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		rc = pkey_verifyprotkey(kvp.protkey.protkey,
+ 					kvp.protkey.len, kvp.protkey.type);
+ 		pr_debug("%s pkey_verifyprotkey()=%d\n", __func__, rc);
++		memzero_explicit(&kvp, sizeof(kvp));
+ 		break;
+ 	}
+ 	case PKEY_KBLOB2PROTK: {
+@@ -1503,12 +1495,10 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		rc = pkey_keyblob2pkey(kkey, ktp.keylen, ktp.protkey.protkey,
+ 				       &ktp.protkey.len, &ktp.protkey.type);
+ 		pr_debug("%s pkey_keyblob2pkey()=%d\n", __func__, rc);
+-		memzero_explicit(kkey, ktp.keylen);
+-		kfree(kkey);
+-		if (rc)
+-			break;
+-		if (copy_to_user(utp, &ktp, sizeof(ktp)))
+-			return -EFAULT;
++		kfree_sensitive(kkey);
++		if (!rc && copy_to_user(utp, &ktp, sizeof(ktp)))
++			rc = -EFAULT;
++		memzero_explicit(&ktp, sizeof(ktp));
+ 		break;
+ 	}
+ 	case PKEY_GENSECK2: {
+@@ -1534,23 +1524,23 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		pr_debug("%s pkey_genseckey2()=%d\n", __func__, rc);
+ 		kfree(apqns);
+ 		if (rc) {
+-			kfree(kkey);
++			kfree_sensitive(kkey);
+ 			break;
+ 		}
+ 		if (kgs.key) {
+ 			if (kgs.keylen < klen) {
+-				kfree(kkey);
++				kfree_sensitive(kkey);
+ 				return -EINVAL;
+ 			}
+ 			if (copy_to_user(kgs.key, kkey, klen)) {
+-				kfree(kkey);
++				kfree_sensitive(kkey);
+ 				return -EFAULT;
+ 			}
+ 		}
+ 		kgs.keylen = klen;
+ 		if (copy_to_user(ugs, &kgs, sizeof(kgs)))
+ 			rc = -EFAULT;
+-		kfree(kkey);
++		kfree_sensitive(kkey);
+ 		break;
+ 	}
+ 	case PKEY_CLR2SECK2: {
+@@ -1563,11 +1553,14 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		if (copy_from_user(&kcs, ucs, sizeof(kcs)))
+ 			return -EFAULT;
+ 		apqns = _copy_apqns_from_user(kcs.apqns, kcs.apqn_entries);
+-		if (IS_ERR(apqns))
++		if (IS_ERR(apqns)) {
++			memzero_explicit(&kcs, sizeof(kcs));
+ 			return PTR_ERR(apqns);
++		}
+ 		kkey = kzalloc(klen, GFP_KERNEL);
+ 		if (!kkey) {
+ 			kfree(apqns);
++			memzero_explicit(&kcs, sizeof(kcs));
+ 			return -ENOMEM;
+ 		}
+ 		rc = pkey_clr2seckey2(apqns, kcs.apqn_entries,
+@@ -1576,16 +1569,19 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		pr_debug("%s pkey_clr2seckey2()=%d\n", __func__, rc);
+ 		kfree(apqns);
+ 		if (rc) {
+-			kfree(kkey);
++			kfree_sensitive(kkey);
++			memzero_explicit(&kcs, sizeof(kcs));
+ 			break;
+ 		}
+ 		if (kcs.key) {
+ 			if (kcs.keylen < klen) {
+-				kfree(kkey);
++				kfree_sensitive(kkey);
++				memzero_explicit(&kcs, sizeof(kcs));
+ 				return -EINVAL;
+ 			}
+ 			if (copy_to_user(kcs.key, kkey, klen)) {
+-				kfree(kkey);
++				kfree_sensitive(kkey);
++				memzero_explicit(&kcs, sizeof(kcs));
+ 				return -EFAULT;
+ 			}
+ 		}
+@@ -1593,7 +1589,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		if (copy_to_user(ucs, &kcs, sizeof(kcs)))
+ 			rc = -EFAULT;
+ 		memzero_explicit(&kcs, sizeof(kcs));
+-		kfree(kkey);
++		kfree_sensitive(kkey);
+ 		break;
+ 	}
+ 	case PKEY_VERIFYKEY2: {
+@@ -1610,7 +1606,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 				     &kvk.cardnr, &kvk.domain,
+ 				     &kvk.type, &kvk.size, &kvk.flags);
+ 		pr_debug("%s pkey_verifykey2()=%d\n", __func__, rc);
+-		kfree(kkey);
++		kfree_sensitive(kkey);
+ 		if (rc)
+ 			break;
+ 		if (copy_to_user(uvk, &kvk, sizeof(kvk)))
+@@ -1640,12 +1636,10 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 					&ktp.protkey.type);
+ 		pr_debug("%s pkey_keyblob2pkey2()=%d\n", __func__, rc);
+ 		kfree(apqns);
+-		memzero_explicit(kkey, ktp.keylen);
+-		kfree(kkey);
+-		if (rc)
+-			break;
+-		if (copy_to_user(utp, &ktp, sizeof(ktp)))
+-			return -EFAULT;
++		kfree_sensitive(kkey);
++		if (!rc && copy_to_user(utp, &ktp, sizeof(ktp)))
++			rc = -EFAULT;
++		memzero_explicit(&ktp, sizeof(ktp));
+ 		break;
+ 	}
+ 	case PKEY_APQNS4K: {
+@@ -1673,7 +1667,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		rc = pkey_apqns4key(kkey, kak.keylen, kak.flags,
+ 				    apqns, &nr_apqns);
+ 		pr_debug("%s pkey_apqns4key()=%d\n", __func__, rc);
+-		kfree(kkey);
++		kfree_sensitive(kkey);
+ 		if (rc && rc != -ENOSPC) {
+ 			kfree(apqns);
+ 			break;
+@@ -1759,7 +1753,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		protkey = kmalloc(protkeylen, GFP_KERNEL);
+ 		if (!protkey) {
+ 			kfree(apqns);
+-			kfree(kkey);
++			kfree_sensitive(kkey);
+ 			return -ENOMEM;
+ 		}
+ 		rc = pkey_keyblob2pkey3(apqns, ktp.apqn_entries,
+@@ -1767,23 +1761,22 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 					protkey, &protkeylen, &ktp.pkeytype);
+ 		pr_debug("%s pkey_keyblob2pkey3()=%d\n", __func__, rc);
+ 		kfree(apqns);
+-		memzero_explicit(kkey, ktp.keylen);
+-		kfree(kkey);
++		kfree_sensitive(kkey);
+ 		if (rc) {
+-			kfree(protkey);
++			kfree_sensitive(protkey);
+ 			break;
+ 		}
+ 		if (ktp.pkey && ktp.pkeylen) {
+ 			if (protkeylen > ktp.pkeylen) {
+-				kfree(protkey);
++				kfree_sensitive(protkey);
+ 				return -EINVAL;
+ 			}
+ 			if (copy_to_user(ktp.pkey, protkey, protkeylen)) {
+-				kfree(protkey);
++				kfree_sensitive(protkey);
+ 				return -EFAULT;
+ 			}
+ 		}
+-		kfree(protkey);
++		kfree_sensitive(protkey);
+ 		ktp.pkeylen = protkeylen;
+ 		if (copy_to_user(utp, &ktp, sizeof(ktp)))
+ 			return -EFAULT;
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
+index d32ad46318cb0..5d261c2f2d200 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
+@@ -1355,11 +1355,21 @@ static struct mpi3mr_sas_port *mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc,
+ 	mpi3mr_sas_port_sanity_check(mrioc, mr_sas_node,
+ 	    mr_sas_port->remote_identify.sas_address, hba_port);
+ 
++	if (mr_sas_node->num_phys > sizeof(mr_sas_port->phy_mask) * 8)
++		ioc_info(mrioc, "max port count %u could be too high\n",
++		    mr_sas_node->num_phys);
++
+ 	for (i = 0; i < mr_sas_node->num_phys; i++) {
+ 		if ((mr_sas_node->phy[i].remote_identify.sas_address !=
+ 		    mr_sas_port->remote_identify.sas_address) ||
+ 		    (mr_sas_node->phy[i].hba_port != hba_port))
+ 			continue;
++
++		if (i > sizeof(mr_sas_port->phy_mask) * 8) {
++			ioc_warn(mrioc, "skipping port %u, max allowed value is %zu\n",
++			    i, sizeof(mr_sas_port->phy_mask) * 8);
++			goto out_fail;
++		}
+ 		list_add_tail(&mr_sas_node->phy[i].port_siblings,
+ 		    &mr_sas_port->phy_list);
+ 		mr_sas_port->num_phys++;
+diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
+index bf921caaf6aea..054a51713d556 100644
+--- a/drivers/scsi/qedf/qedf_io.c
++++ b/drivers/scsi/qedf/qedf_io.c
+@@ -2324,9 +2324,6 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, u64 tm_lun,
+ 	io_req->fcport = fcport;
+ 	io_req->cmd_type = QEDF_TASK_MGMT_CMD;
+ 
+-	/* Record which cpu this request is associated with */
+-	io_req->cpu = smp_processor_id();
+-
+ 	/* Set TM flags */
+ 	io_req->io_req_flags = QEDF_READ;
+ 	io_req->data_xfer_len = 0;
+@@ -2349,6 +2346,9 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, u64 tm_lun,
+ 
+ 	spin_lock_irqsave(&fcport->rport_lock, flags);
+ 
++	/* Record which cpu this request is associated with */
++	io_req->cpu = smp_processor_id();
++
+ 	sqe_idx = qedf_get_sqe_idx(fcport);
+ 	sqe = &fcport->sq[sqe_idx];
+ 	memset(sqe, 0, sizeof(struct fcoe_wqe));
+diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c
+index 8648b8eb080dc..cdce2e280f663 100644
+--- a/drivers/spi/spi-cadence-xspi.c
++++ b/drivers/spi/spi-cadence-xspi.c
+@@ -145,6 +145,9 @@
+ #define CDNS_XSPI_STIG_DONE_FLAG		BIT(0)
+ #define CDNS_XSPI_TRD_STATUS			0x0104
+ 
++#define MODE_NO_OF_BYTES			GENMASK(25, 24)
++#define MODEBYTES_COUNT			1
++
+ /* Helper macros for filling command registers */
+ #define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase) ( \
+ 	FIELD_PREP(CDNS_XSPI_CMD_INSTR_TYPE, (data_phase) ? \
+@@ -157,9 +160,10 @@
+ 	FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR3, ((op)->addr.val >> 24) & 0xFF) | \
+ 	FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR4, ((op)->addr.val >> 32) & 0xFF))
+ 
+-#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op) ( \
++#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op, modebytes) ( \
+ 	FIELD_PREP(CDNS_XSPI_CMD_P1_R3_ADDR5, ((op)->addr.val >> 40) & 0xFF) | \
+ 	FIELD_PREP(CDNS_XSPI_CMD_P1_R3_CMD, (op)->cmd.opcode) | \
++	FIELD_PREP(MODE_NO_OF_BYTES, modebytes) | \
+ 	FIELD_PREP(CDNS_XSPI_CMD_P1_R3_NUM_ADDR_BYTES, (op)->addr.nbytes))
+ 
+ #define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op, chipsel) ( \
+@@ -173,12 +177,12 @@
+ #define CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op) \
+ 	FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R2_DCNT_L, (op)->data.nbytes & 0xFFFF)
+ 
+-#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op) ( \
++#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op, dummybytes) ( \
+ 	FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_DCNT_H, \
+ 		((op)->data.nbytes >> 16) & 0xffff) | \
+ 	FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY, \
+ 		  (op)->dummy.buswidth != 0 ? \
+-		  (((op)->dummy.nbytes * 8) / (op)->dummy.buswidth) : \
++		  (((dummybytes) * 8) / (op)->dummy.buswidth) : \
+ 		  0))
+ 
+ #define CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op, chipsel) ( \
+@@ -351,6 +355,7 @@ static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
+ 	u32 cmd_regs[6];
+ 	u32 cmd_status;
+ 	int ret;
++	int dummybytes = op->dummy.nbytes;
+ 
+ 	ret = cdns_xspi_wait_for_controller_idle(cdns_xspi);
+ 	if (ret < 0)
+@@ -365,7 +370,12 @@ static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
+ 	memset(cmd_regs, 0, sizeof(cmd_regs));
+ 	cmd_regs[1] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase);
+ 	cmd_regs[2] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_2(op);
+-	cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op);
++	if (dummybytes != 0) {
++		cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op, 1);
++		dummybytes--;
++	} else {
++		cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op, 0);
++	}
+ 	cmd_regs[4] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op,
+ 						       cdns_xspi->cur_cs);
+ 
+@@ -375,7 +385,7 @@ static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
+ 		cmd_regs[0] = CDNS_XSPI_STIG_DONE_FLAG;
+ 		cmd_regs[1] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_1(op);
+ 		cmd_regs[2] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op);
+-		cmd_regs[3] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op);
++		cmd_regs[3] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op, dummybytes);
+ 		cmd_regs[4] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op,
+ 							   cdns_xspi->cur_cs);
+ 
+diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
+index 6b9422bd8795d..25f836c00e226 100644
+--- a/drivers/thermal/mediatek/lvts_thermal.c
++++ b/drivers/thermal/mediatek/lvts_thermal.c
+@@ -1250,6 +1250,8 @@ static int lvts_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	lvts_data = of_device_get_match_data(dev);
++	if (!lvts_data)
++		return -ENODEV;
+ 
+ 	lvts_td->clk = devm_clk_get_enabled(dev, NULL);
+ 	if (IS_ERR(lvts_td->clk))
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index 9552228d21614..f63cdd6794419 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -1314,7 +1314,7 @@ static void imx_uart_clear_rx_errors(struct imx_port *sport)
+ 
+ }
+ 
+-#define TXTL_DEFAULT 2 /* reset default */
++#define TXTL_DEFAULT 8
+ #define RXTL_DEFAULT 8 /* 8 characters or aging timer */
+ #define TXTL_DMA 8 /* DMA burst setting */
+ #define RXTL_DMA 9 /* DMA burst setting */
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 48d745e9f9730..48028bab57e34 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2658,16 +2658,17 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 			else
+ 				xhci_handle_halted_endpoint(xhci, ep, NULL,
+ 							    EP_SOFT_RESET);
+-			goto cleanup;
++			break;
+ 		case COMP_RING_UNDERRUN:
+ 		case COMP_RING_OVERRUN:
+ 		case COMP_STOPPED_LENGTH_INVALID:
+-			goto cleanup;
++			break;
+ 		default:
+ 			xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n",
+ 				 slot_id, ep_index);
+ 			goto err_out;
+ 		}
++		return 0;
+ 	}
+ 
+ 	/* Count current td numbers if ep->skip is set */
+diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
+index 282aac45c6909..f34f9895b8984 100644
+--- a/drivers/vhost/scsi.c
++++ b/drivers/vhost/scsi.c
+@@ -497,10 +497,8 @@ vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
+ 		vq_err(vq, "Faulted on vhost_scsi_send_event\n");
+ }
+ 
+-static void vhost_scsi_evt_work(struct vhost_work *work)
++static void vhost_scsi_complete_events(struct vhost_scsi *vs, bool drop)
+ {
+-	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
+-					vs_event_work);
+ 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
+ 	struct vhost_scsi_evt *evt, *t;
+ 	struct llist_node *llnode;
+@@ -508,12 +506,20 @@ static void vhost_scsi_evt_work(struct vhost_work *work)
+ 	mutex_lock(&vq->mutex);
+ 	llnode = llist_del_all(&vs->vs_event_list);
+ 	llist_for_each_entry_safe(evt, t, llnode, list) {
+-		vhost_scsi_do_evt_work(vs, evt);
++		if (!drop)
++			vhost_scsi_do_evt_work(vs, evt);
+ 		vhost_scsi_free_evt(vs, evt);
+ 	}
+ 	mutex_unlock(&vq->mutex);
+ }
+ 
++static void vhost_scsi_evt_work(struct vhost_work *work)
++{
++	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
++					     vs_event_work);
++	vhost_scsi_complete_events(vs, false);
++}
++
+ static int vhost_scsi_copy_sgl_to_iov(struct vhost_scsi_cmd *cmd)
+ {
+ 	struct iov_iter *iter = &cmd->saved_iter;
+@@ -1509,7 +1515,8 @@ vhost_scsi_send_evt(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
+ 	}
+ 
+ 	llist_add(&evt->list, &vs->vs_event_list);
+-	vhost_vq_work_queue(vq, &vs->vs_event_work);
++	if (!vhost_vq_work_queue(vq, &vs->vs_event_work))
++		vhost_scsi_complete_events(vs, true);
+ }
+ 
+ static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index 8995730ce0bfc..1740a5f1f35e7 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -276,21 +276,36 @@ void vhost_vq_flush(struct vhost_virtqueue *vq)
+ EXPORT_SYMBOL_GPL(vhost_vq_flush);
+ 
+ /**
+- * vhost_worker_flush - flush a worker
++ * __vhost_worker_flush - flush a worker
+  * @worker: worker to flush
+  *
+- * This does not use RCU to protect the worker, so the device or worker
+- * mutex must be held.
++ * The worker's flush_mutex must be held.
+  */
+-static void vhost_worker_flush(struct vhost_worker *worker)
++static void __vhost_worker_flush(struct vhost_worker *worker)
+ {
+ 	struct vhost_flush_struct flush;
+ 
++	if (!worker->attachment_cnt || worker->killed)
++		return;
++
+ 	init_completion(&flush.wait_event);
+ 	vhost_work_init(&flush.work, vhost_flush_work);
+ 
+ 	vhost_worker_queue(worker, &flush.work);
++	/*
++	 * Drop mutex in case our worker is killed and it needs to take the
++	 * mutex to force cleanup.
++	 */
++	mutex_unlock(&worker->mutex);
+ 	wait_for_completion(&flush.wait_event);
++	mutex_lock(&worker->mutex);
++}
++
++static void vhost_worker_flush(struct vhost_worker *worker)
++{
++	mutex_lock(&worker->mutex);
++	__vhost_worker_flush(worker);
++	mutex_unlock(&worker->mutex);
+ }
+ 
+ void vhost_dev_flush(struct vhost_dev *dev)
+@@ -298,15 +313,8 @@ void vhost_dev_flush(struct vhost_dev *dev)
+ 	struct vhost_worker *worker;
+ 	unsigned long i;
+ 
+-	xa_for_each(&dev->worker_xa, i, worker) {
+-		mutex_lock(&worker->mutex);
+-		if (!worker->attachment_cnt) {
+-			mutex_unlock(&worker->mutex);
+-			continue;
+-		}
++	xa_for_each(&dev->worker_xa, i, worker)
+ 		vhost_worker_flush(worker);
+-		mutex_unlock(&worker->mutex);
+-	}
+ }
+ EXPORT_SYMBOL_GPL(vhost_dev_flush);
+ 
+@@ -392,7 +400,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
+ 	__vhost_vq_meta_reset(vq);
+ }
+ 
+-static bool vhost_worker(void *data)
++static bool vhost_run_work_list(void *data)
+ {
+ 	struct vhost_worker *worker = data;
+ 	struct vhost_work *work, *work_next;
+@@ -417,6 +425,40 @@ static bool vhost_worker(void *data)
+ 	return !!node;
+ }
+ 
++static void vhost_worker_killed(void *data)
++{
++	struct vhost_worker *worker = data;
++	struct vhost_dev *dev = worker->dev;
++	struct vhost_virtqueue *vq;
++	int i, attach_cnt = 0;
++
++	mutex_lock(&worker->mutex);
++	worker->killed = true;
++
++	for (i = 0; i < dev->nvqs; i++) {
++		vq = dev->vqs[i];
++
++		mutex_lock(&vq->mutex);
++		if (worker ==
++		    rcu_dereference_check(vq->worker,
++					  lockdep_is_held(&vq->mutex))) {
++			rcu_assign_pointer(vq->worker, NULL);
++			attach_cnt++;
++		}
++		mutex_unlock(&vq->mutex);
++	}
++
++	worker->attachment_cnt -= attach_cnt;
++	if (attach_cnt)
++		synchronize_rcu();
++	/*
++	 * Finish vhost_worker_flush calls and any other works that snuck in
++	 * before the synchronize_rcu.
++	 */
++	vhost_run_work_list(worker);
++	mutex_unlock(&worker->mutex);
++}
++
+ static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
+ {
+ 	kfree(vq->indirect);
+@@ -631,9 +673,11 @@ static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
+ 	if (!worker)
+ 		return NULL;
+ 
++	worker->dev = dev;
+ 	snprintf(name, sizeof(name), "vhost-%d", current->pid);
+ 
+-	vtsk = vhost_task_create(vhost_worker, worker, name);
++	vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed,
++				 worker, name);
+ 	if (!vtsk)
+ 		goto free_worker;
+ 
+@@ -664,22 +708,37 @@ static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq,
+ {
+ 	struct vhost_worker *old_worker;
+ 
+-	old_worker = rcu_dereference_check(vq->worker,
+-					   lockdep_is_held(&vq->dev->mutex));
+-
+ 	mutex_lock(&worker->mutex);
+-	worker->attachment_cnt++;
+-	mutex_unlock(&worker->mutex);
++	if (worker->killed) {
++		mutex_unlock(&worker->mutex);
++		return;
++	}
++
++	mutex_lock(&vq->mutex);
++
++	old_worker = rcu_dereference_check(vq->worker,
++					   lockdep_is_held(&vq->mutex));
+ 	rcu_assign_pointer(vq->worker, worker);
++	worker->attachment_cnt++;
+ 
+-	if (!old_worker)
++	if (!old_worker) {
++		mutex_unlock(&vq->mutex);
++		mutex_unlock(&worker->mutex);
+ 		return;
++	}
++	mutex_unlock(&vq->mutex);
++	mutex_unlock(&worker->mutex);
++
+ 	/*
+ 	 * Take the worker mutex to make sure we see the work queued from
+ 	 * device wide flushes which doesn't use RCU for execution.
+ 	 */
+ 	mutex_lock(&old_worker->mutex);
+-	old_worker->attachment_cnt--;
++	if (old_worker->killed) {
++		mutex_unlock(&old_worker->mutex);
++		return;
++	}
++
+ 	/*
+ 	 * We don't want to call synchronize_rcu for every vq during setup
+ 	 * because it will slow down VM startup. If we haven't done
+@@ -690,6 +749,8 @@ static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq,
+ 	mutex_lock(&vq->mutex);
+ 	if (!vhost_vq_get_backend(vq) && !vq->kick) {
+ 		mutex_unlock(&vq->mutex);
++
++		old_worker->attachment_cnt--;
+ 		mutex_unlock(&old_worker->mutex);
+ 		/*
+ 		 * vsock can queue anytime after VHOST_VSOCK_SET_GUEST_CID.
+@@ -705,7 +766,8 @@ static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq,
+ 	/* Make sure new vq queue/flush/poll calls see the new worker */
+ 	synchronize_rcu();
+ 	/* Make sure whatever was queued gets run */
+-	vhost_worker_flush(old_worker);
++	__vhost_worker_flush(old_worker);
++	old_worker->attachment_cnt--;
+ 	mutex_unlock(&old_worker->mutex);
+ }
+ 
+@@ -754,10 +816,16 @@ static int vhost_free_worker(struct vhost_dev *dev,
+ 		return -ENODEV;
+ 
+ 	mutex_lock(&worker->mutex);
+-	if (worker->attachment_cnt) {
++	if (worker->attachment_cnt || worker->killed) {
+ 		mutex_unlock(&worker->mutex);
+ 		return -EBUSY;
+ 	}
++	/*
++	 * A flush might have raced and snuck in before attachment_cnt was set
++	 * to zero. Make sure flushes are flushed from the queue before
++	 * freeing.
++	 */
++	__vhost_worker_flush(worker);
+ 	mutex_unlock(&worker->mutex);
+ 
+ 	vhost_worker_destroy(dev, worker);
+diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
+index 9e942fcda5c3f..dc94e6a7d3c22 100644
+--- a/drivers/vhost/vhost.h
++++ b/drivers/vhost/vhost.h
+@@ -28,12 +28,14 @@ struct vhost_work {
+ 
+ struct vhost_worker {
+ 	struct vhost_task	*vtsk;
++	struct vhost_dev	*dev;
+ 	/* Used to serialize device wide flushing with worker swapping. */
+ 	struct mutex		mutex;
+ 	struct llist_head	work_list;
+ 	u64			kcov_handle;
+ 	u32			id;
+ 	int			attachment_cnt;
++	bool			killed;
+ };
+ 
+ /* Poll a file (eventfd or socket) */
+diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
+index 584af7816532b..f6b0b00e4599f 100644
+--- a/drivers/virtio/virtio_pci_common.c
++++ b/drivers/virtio/virtio_pci_common.c
+@@ -236,7 +236,7 @@ void vp_del_vqs(struct virtio_device *vdev)
+ 	int i;
+ 
+ 	list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
+-		if (vp_dev->is_avq(vdev, vq->index))
++		if (vp_dev->is_avq && vp_dev->is_avq(vdev, vq->index))
+ 			continue;
+ 
+ 		if (vp_dev->per_vq_vectors) {
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 1a66be33bb048..60066822b5329 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -1924,8 +1924,17 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ next:
+ 		if (ret) {
+ 			/* Refcount held by the reclaim_bgs list after splice. */
+-			btrfs_get_block_group(bg);
+-			list_add_tail(&bg->bg_list, &retry_list);
++			spin_lock(&fs_info->unused_bgs_lock);
++			/*
++			 * This block group might be added to the unused list
++			 * during the above process. Move it back to the
++			 * reclaim list otherwise.
++			 */
++			if (list_empty(&bg->bg_list)) {
++				btrfs_get_block_group(bg);
++				list_add_tail(&bg->bg_list, &retry_list);
++			}
++			spin_unlock(&fs_info->unused_bgs_lock);
+ 		}
+ 		btrfs_put_block_group(bg);
+ 
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 41173701f1bef..1e020620748d6 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -3526,7 +3526,7 @@ struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
+ 	for (int i = 0; i < num_folios; i++) {
+ 		if (eb->folios[i]) {
+ 			detach_extent_buffer_folio(eb, eb->folios[i]);
+-			__folio_put(eb->folios[i]);
++			folio_put(eb->folios[i]);
+ 		}
+ 	}
+ 	__free_extent_buffer(eb);
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 1167899a16d05..4caa078d972a3 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -3065,8 +3065,6 @@ int btrfs_qgroup_check_inherit(struct btrfs_fs_info *fs_info,
+ 			       struct btrfs_qgroup_inherit *inherit,
+ 			       size_t size)
+ {
+-	if (!btrfs_qgroup_enabled(fs_info))
+-		return 0;
+ 	if (inherit->flags & ~BTRFS_QGROUP_INHERIT_FLAGS_SUPP)
+ 		return -EOPNOTSUPP;
+ 	if (size < sizeof(*inherit) || size > PAGE_SIZE)
+@@ -3090,6 +3088,14 @@ int btrfs_qgroup_check_inherit(struct btrfs_fs_info *fs_info,
+ 	if (size != struct_size(inherit, qgroups, inherit->num_qgroups))
+ 		return -EINVAL;
+ 
++	/*
++	 * Skip the inherit source qgroups check if qgroup is not enabled.
++	 * Qgroup can still be later enabled causing problems, but in that case
++	 * btrfs_qgroup_inherit() would just ignore those invalid ones.
++	 */
++	if (!btrfs_qgroup_enabled(fs_info))
++		return 0;
++
+ 	/*
+ 	 * Now check all the remaining qgroups, they should all:
+ 	 *
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index 4b22cfe9a98cb..afd6932f5e895 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -2100,7 +2100,7 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx,
+ 	struct btrfs_fs_info *fs_info = sctx->fs_info;
+ 	const u64 logical_end = logical_start + logical_length;
+ 	u64 cur_logical = logical_start;
+-	int ret;
++	int ret = 0;
+ 
+ 	/* The range must be inside the bg */
+ 	ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
+diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
+index d620323d08eae..ae8c56442549c 100644
+--- a/fs/btrfs/space-info.c
++++ b/fs/btrfs/space-info.c
+@@ -373,11 +373,18 @@ static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
+ 	 * "optimal" chunk size based on the fs size.  However when we actually
+ 	 * allocate the chunk we will strip this down further, making it no more
+ 	 * than 10% of the disk or 1G, whichever is smaller.
++	 *
++	 * On the zoned mode, we need to use zone_size (=
++	 * data_sinfo->chunk_size) as it is.
+ 	 */
+ 	data_sinfo = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
+-	data_chunk_size = min(data_sinfo->chunk_size,
+-			      mult_perc(fs_info->fs_devices->total_rw_bytes, 10));
+-	data_chunk_size = min_t(u64, data_chunk_size, SZ_1G);
++	if (!btrfs_is_zoned(fs_info)) {
++		data_chunk_size = min(data_sinfo->chunk_size,
++				      mult_perc(fs_info->fs_devices->total_rw_bytes, 10));
++		data_chunk_size = min_t(u64, data_chunk_size, SZ_1G);
++	} else {
++		data_chunk_size = data_sinfo->chunk_size;
++	}
+ 
+ 	/*
+ 	 * Since data allocations immediately use block groups as part of the
+@@ -405,6 +412,17 @@ static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
+ 		avail >>= 3;
+ 	else
+ 		avail >>= 1;
++
++	/*
++	 * On the zoned mode, we always allocate one zone as one chunk.
++	 * Returning non-zone size alingned bytes here will result in
++	 * less pressure for the async metadata reclaim process, and it
++	 * will over-commit too much leading to ENOSPC. Align down to the
++	 * zone size to avoid that.
++	 */
++	if (btrfs_is_zoned(fs_info))
++		avail = ALIGN_DOWN(avail, fs_info->zone_size);
++
+ 	return avail;
+ }
+ 
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 07b3675ea1694..6c60e43ec8e58 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -72,7 +72,7 @@ enum {
+ 
+ struct f2fs_fault_info {
+ 	atomic_t inject_ops;
+-	unsigned int inject_rate;
++	int inject_rate;
+ 	unsigned int inject_type;
+ };
+ 
+@@ -4597,10 +4597,14 @@ static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
+ }
+ 
+ #ifdef CONFIG_F2FS_FAULT_INJECTION
+-extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
+-							unsigned int type);
++extern int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate,
++							unsigned long type);
+ #else
+-#define f2fs_build_fault_attr(sbi, rate, type)		do { } while (0)
++static inline int f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
++					unsigned long rate, unsigned long type)
++{
++	return 0;
++}
+ #endif
+ 
+ static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 2f75a7dfc311d..0c3ebe4d9026d 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -66,21 +66,31 @@ const char *f2fs_fault_name[FAULT_MAX] = {
+ 	[FAULT_NO_SEGMENT]		= "no free segment",
+ };
+ 
+-void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
+-							unsigned int type)
++int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate,
++							unsigned long type)
+ {
+ 	struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
+ 
+ 	if (rate) {
++		if (rate > INT_MAX)
++			return -EINVAL;
+ 		atomic_set(&ffi->inject_ops, 0);
+-		ffi->inject_rate = rate;
++		ffi->inject_rate = (int)rate;
+ 	}
+ 
+-	if (type)
+-		ffi->inject_type = type;
++	if (type) {
++		if (type >= BIT(FAULT_MAX))
++			return -EINVAL;
++		ffi->inject_type = (unsigned int)type;
++	}
+ 
+ 	if (!rate && !type)
+ 		memset(ffi, 0, sizeof(struct f2fs_fault_info));
++	else
++		f2fs_info(sbi,
++			"build fault injection attr: rate: %lu, type: 0x%lx",
++								rate, type);
++	return 0;
+ }
+ #endif
+ 
+@@ -886,14 +896,17 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
+ 		case Opt_fault_injection:
+ 			if (args->from && match_int(args, &arg))
+ 				return -EINVAL;
+-			f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
++			if (f2fs_build_fault_attr(sbi, arg,
++					F2FS_ALL_FAULT_TYPE))
++				return -EINVAL;
+ 			set_opt(sbi, FAULT_INJECTION);
+ 			break;
+ 
+ 		case Opt_fault_type:
+ 			if (args->from && match_int(args, &arg))
+ 				return -EINVAL;
+-			f2fs_build_fault_attr(sbi, 0, arg);
++			if (f2fs_build_fault_attr(sbi, 0, arg))
++				return -EINVAL;
+ 			set_opt(sbi, FAULT_INJECTION);
+ 			break;
+ #else
+diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
+index a568ce96cf563..7aa3844e7a808 100644
+--- a/fs/f2fs/sysfs.c
++++ b/fs/f2fs/sysfs.c
+@@ -484,10 +484,16 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
+ 	if (ret < 0)
+ 		return ret;
+ #ifdef CONFIG_F2FS_FAULT_INJECTION
+-	if (a->struct_type == FAULT_INFO_TYPE && t >= BIT(FAULT_MAX))
+-		return -EINVAL;
+-	if (a->struct_type == FAULT_INFO_RATE && t >= UINT_MAX)
+-		return -EINVAL;
++	if (a->struct_type == FAULT_INFO_TYPE) {
++		if (f2fs_build_fault_attr(sbi, 0, t))
++			return -EINVAL;
++		return count;
++	}
++	if (a->struct_type == FAULT_INFO_RATE) {
++		if (f2fs_build_fault_attr(sbi, t, 0))
++			return -EINVAL;
++		return count;
++	}
+ #endif
+ 	if (a->struct_type == RESERVED_BLOCKS) {
+ 		spin_lock(&sbi->stat_lock);
+diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
+index aede1be4dc0cd..4545f885c41ef 100644
+--- a/fs/jffs2/super.c
++++ b/fs/jffs2/super.c
+@@ -58,6 +58,7 @@ static void jffs2_i_init_once(void *foo)
+ 	struct jffs2_inode_info *f = foo;
+ 
+ 	mutex_init(&f->sem);
++	f->target = NULL;
+ 	inode_init_once(&f->vfs_inode);
+ }
+ 
+diff --git a/fs/locks.c b/fs/locks.c
+index 90c8746874ded..c360d1992d21f 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -2448,8 +2448,9 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
+ 	error = do_lock_file_wait(filp, cmd, file_lock);
+ 
+ 	/*
+-	 * Attempt to detect a close/fcntl race and recover by releasing the
+-	 * lock that was just acquired. There is no need to do that when we're
++	 * Detect close/fcntl races and recover by zapping all POSIX locks
++	 * associated with this file and our files_struct, just like on
++	 * filp_flush(). There is no need to do that when we're
+ 	 * unlocking though, or for OFD locks.
+ 	 */
+ 	if (!error && file_lock->c.flc_type != F_UNLCK &&
+@@ -2464,9 +2465,7 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
+ 		f = files_lookup_fd_locked(files, fd);
+ 		spin_unlock(&files->file_lock);
+ 		if (f != filp) {
+-			file_lock->c.flc_type = F_UNLCK;
+-			error = do_lock_file_wait(filp, cmd, file_lock);
+-			WARN_ON_ONCE(error);
++			locks_remove_posix(filp, files);
+ 			error = -EBADF;
+ 		}
+ 	}
+diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
+index 89caef7513db3..ba50388ee4bf1 100644
+--- a/fs/nilfs2/alloc.c
++++ b/fs/nilfs2/alloc.c
+@@ -377,11 +377,12 @@ void *nilfs_palloc_block_get_entry(const struct inode *inode, __u64 nr,
+  * @target: offset number of an entry in the group (start point)
+  * @bsize: size in bits
+  * @lock: spin lock protecting @bitmap
++ * @wrap: whether to wrap around
+  */
+ static int nilfs_palloc_find_available_slot(unsigned char *bitmap,
+ 					    unsigned long target,
+ 					    unsigned int bsize,
+-					    spinlock_t *lock)
++					    spinlock_t *lock, bool wrap)
+ {
+ 	int pos, end = bsize;
+ 
+@@ -397,6 +398,8 @@ static int nilfs_palloc_find_available_slot(unsigned char *bitmap,
+ 
+ 		end = target;
+ 	}
++	if (!wrap)
++		return -ENOSPC;
+ 
+ 	/* wrap around */
+ 	for (pos = 0; pos < end; pos++) {
+@@ -495,9 +498,10 @@ int nilfs_palloc_count_max_entries(struct inode *inode, u64 nused, u64 *nmaxp)
+  * nilfs_palloc_prepare_alloc_entry - prepare to allocate a persistent object
+  * @inode: inode of metadata file using this allocator
+  * @req: nilfs_palloc_req structure exchanged for the allocation
++ * @wrap: whether to wrap around
+  */
+ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
+-				     struct nilfs_palloc_req *req)
++				     struct nilfs_palloc_req *req, bool wrap)
+ {
+ 	struct buffer_head *desc_bh, *bitmap_bh;
+ 	struct nilfs_palloc_group_desc *desc;
+@@ -516,7 +520,7 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
+ 	entries_per_group = nilfs_palloc_entries_per_group(inode);
+ 
+ 	for (i = 0; i < ngroups; i += n) {
+-		if (group >= ngroups) {
++		if (group >= ngroups && wrap) {
+ 			/* wrap around */
+ 			group = 0;
+ 			maxgroup = nilfs_palloc_group(inode, req->pr_entry_nr,
+@@ -550,7 +554,14 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
+ 			bitmap_kaddr = kmap_local_page(bitmap_bh->b_page);
+ 			bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
+ 			pos = nilfs_palloc_find_available_slot(
+-				bitmap, group_offset, entries_per_group, lock);
++				bitmap, group_offset, entries_per_group, lock,
++				wrap);
++			/*
++			 * Since the search for a free slot in the second and
++			 * subsequent bitmap blocks always starts from the
++			 * beginning, the wrap flag only has an effect on the
++			 * first search.
++			 */
+ 			kunmap_local(bitmap_kaddr);
+ 			if (pos >= 0)
+ 				goto found;
+diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h
+index b667e869ac076..d825a9faca6d9 100644
+--- a/fs/nilfs2/alloc.h
++++ b/fs/nilfs2/alloc.h
+@@ -50,8 +50,8 @@ struct nilfs_palloc_req {
+ 	struct buffer_head *pr_entry_bh;
+ };
+ 
+-int nilfs_palloc_prepare_alloc_entry(struct inode *,
+-				     struct nilfs_palloc_req *);
++int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
++				     struct nilfs_palloc_req *req, bool wrap);
+ void nilfs_palloc_commit_alloc_entry(struct inode *,
+ 				     struct nilfs_palloc_req *);
+ void nilfs_palloc_abort_alloc_entry(struct inode *, struct nilfs_palloc_req *);
+diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
+index 180fc8d36213d..fc1caf63a42ae 100644
+--- a/fs/nilfs2/dat.c
++++ b/fs/nilfs2/dat.c
+@@ -75,7 +75,7 @@ int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
+ {
+ 	int ret;
+ 
+-	ret = nilfs_palloc_prepare_alloc_entry(dat, req);
++	ret = nilfs_palloc_prepare_alloc_entry(dat, req, true);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
+index 35e6c55a0d231..d748d9dce74e4 100644
+--- a/fs/nilfs2/dir.c
++++ b/fs/nilfs2/dir.c
+@@ -135,6 +135,9 @@ static bool nilfs_check_folio(struct folio *folio, char *kaddr)
+ 			goto Enamelen;
+ 		if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))
+ 			goto Espan;
++		if (unlikely(p->inode &&
++			     NILFS_PRIVATE_INODE(le64_to_cpu(p->inode))))
++			goto Einumber;
+ 	}
+ 	if (offs != limit)
+ 		goto Eend;
+@@ -160,6 +163,9 @@ static bool nilfs_check_folio(struct folio *folio, char *kaddr)
+ 	goto bad_entry;
+ Espan:
+ 	error = "directory entry across blocks";
++	goto bad_entry;
++Einumber:
++	error = "disallowed inode number";
+ bad_entry:
+ 	nilfs_error(sb,
+ 		    "bad entry in directory #%lu: %s - offset=%lu, inode=%lu, rec_len=%zd, name_len=%d",
+diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c
+index 612e609158b52..1e86b9303b7ca 100644
+--- a/fs/nilfs2/ifile.c
++++ b/fs/nilfs2/ifile.c
+@@ -56,13 +56,10 @@ int nilfs_ifile_create_inode(struct inode *ifile, ino_t *out_ino,
+ 	struct nilfs_palloc_req req;
+ 	int ret;
+ 
+-	req.pr_entry_nr = 0;  /*
+-			       * 0 says find free inode from beginning
+-			       * of a group. dull code!!
+-			       */
++	req.pr_entry_nr = NILFS_FIRST_INO(ifile->i_sb);
+ 	req.pr_entry_bh = NULL;
+ 
+-	ret = nilfs_palloc_prepare_alloc_entry(ifile, &req);
++	ret = nilfs_palloc_prepare_alloc_entry(ifile, &req, false);
+ 	if (!ret) {
+ 		ret = nilfs_palloc_get_entry_block(ifile, req.pr_entry_nr, 1,
+ 						   &req.pr_entry_bh);
+diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
+index 2e29b98ba8bab..fe982c3e08770 100644
+--- a/fs/nilfs2/nilfs.h
++++ b/fs/nilfs2/nilfs.h
+@@ -116,9 +116,15 @@ enum {
+ #define NILFS_FIRST_INO(sb) (((struct the_nilfs *)sb->s_fs_info)->ns_first_ino)
+ 
+ #define NILFS_MDT_INODE(sb, ino) \
+-	((ino) < NILFS_FIRST_INO(sb) && (NILFS_MDT_INO_BITS & BIT(ino)))
++	((ino) < NILFS_USER_INO && (NILFS_MDT_INO_BITS & BIT(ino)))
+ #define NILFS_VALID_INODE(sb, ino) \
+-	((ino) >= NILFS_FIRST_INO(sb) || (NILFS_SYS_INO_BITS & BIT(ino)))
++	((ino) >= NILFS_FIRST_INO(sb) ||				\
++	 ((ino) < NILFS_USER_INO && (NILFS_SYS_INO_BITS & BIT(ino))))
++
++#define NILFS_PRIVATE_INODE(ino) ({					\
++	ino_t __ino = (ino);						\
++	((__ino) < NILFS_USER_INO && (__ino) != NILFS_ROOT_INO &&	\
++	 (__ino) != NILFS_SKETCH_INO); })
+ 
+ /**
+  * struct nilfs_transaction_info: context information for synchronization
+diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
+index 2ae2c1bbf6d17..5aac4f9118fd9 100644
+--- a/fs/nilfs2/the_nilfs.c
++++ b/fs/nilfs2/the_nilfs.c
+@@ -452,6 +452,12 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
+ 	}
+ 
+ 	nilfs->ns_first_ino = le32_to_cpu(sbp->s_first_ino);
++	if (nilfs->ns_first_ino < NILFS_USER_INO) {
++		nilfs_err(nilfs->ns_sb,
++			  "too small lower limit for non-reserved inode numbers: %u",
++			  nilfs->ns_first_ino);
++		return -EINVAL;
++	}
+ 
+ 	nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment);
+ 	if (nilfs->ns_blocks_per_segment < NILFS_SEG_MIN_BLOCKS) {
+diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
+index cd4ae1b8ae165..17fee562ee503 100644
+--- a/fs/nilfs2/the_nilfs.h
++++ b/fs/nilfs2/the_nilfs.h
+@@ -182,7 +182,7 @@ struct the_nilfs {
+ 	unsigned long		ns_nrsvsegs;
+ 	unsigned long		ns_first_data_block;
+ 	int			ns_inode_size;
+-	int			ns_first_ino;
++	unsigned int		ns_first_ino;
+ 	u32			ns_crc_seed;
+ 
+ 	/* /sys/fs/<nilfs>/<device> */
+diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
+index 53e7d1fa036aa..73785dece7a7f 100644
+--- a/fs/ntfs3/xattr.c
++++ b/fs/ntfs3/xattr.c
+@@ -219,8 +219,11 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
+ 		if (!ea->name_len)
+ 			break;
+ 
+-		if (ea->name_len > ea_size)
++		if (ea->name_len > ea_size) {
++			ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
++			err = -EINVAL; /* corrupted fs */
+ 			break;
++		}
+ 
+ 		if (buffer) {
+ 			/* Check if we can use field ea->name */
+diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
+index 34849b4a3243c..907765673765c 100644
+--- a/fs/orangefs/super.c
++++ b/fs/orangefs/super.c
+@@ -201,7 +201,8 @@ static int orangefs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ 		     (long)new_op->downcall.resp.statfs.files_avail);
+ 
+ 	buf->f_type = sb->s_magic;
+-	memcpy(&buf->f_fsid, &ORANGEFS_SB(sb)->fs_id, sizeof(buf->f_fsid));
++	buf->f_fsid.val[0] = ORANGEFS_SB(sb)->fs_id;
++	buf->f_fsid.val[1] = ORANGEFS_SB(sb)->id;
+ 	buf->f_bsize = new_op->downcall.resp.statfs.block_size;
+ 	buf->f_namelen = ORANGEFS_NAME_MAX;
+ 
+diff --git a/fs/super.c b/fs/super.c
+index 69ce6c6009684..4e52aba2fbea4 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -1501,8 +1501,17 @@ static int fs_bdev_thaw(struct block_device *bdev)
+ 
+ 	lockdep_assert_held(&bdev->bd_fsfreeze_mutex);
+ 
++	/*
++	 * The block device may have been frozen before it was claimed by a
++	 * filesystem. Concurrently another process might try to mount that
++	 * frozen block device and has temporarily claimed the block device for
++	 * that purpose causing a concurrent fs_bdev_thaw() to end up here. The
++	 * mounter is already about to abort mounting because they still saw an
++	 * elevanted bdev->bd_fsfreeze_count so get_bdev_super() will return
++	 * NULL in that case.
++	 */
+ 	sb = get_bdev_super(bdev);
+-	if (WARN_ON_ONCE(!sb))
++	if (!sb)
+ 		return -EINVAL;
+ 
+ 	if (sb->s_op->thaw_super)
+diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h
+index 5693a4be0d9a9..ff9c65841ae8d 100644
+--- a/include/linux/dynamic_queue_limits.h
++++ b/include/linux/dynamic_queue_limits.h
+@@ -91,7 +91,8 @@ static inline void dql_queued(struct dql *dql, unsigned int count)
+ {
+ 	unsigned long map, now, now_hi, i;
+ 
+-	BUG_ON(count > DQL_MAX_OBJECT);
++	if (WARN_ON_ONCE(count > DQL_MAX_OBJECT))
++		return;
+ 
+ 	dql->last_obj_cnt = count;
+ 
+diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
+index 1a9de119a0f73..f79853d2778b7 100644
+--- a/include/linux/fsnotify.h
++++ b/include/linux/fsnotify.h
+@@ -99,7 +99,13 @@ static inline int fsnotify_file(struct file *file, __u32 mask)
+ {
+ 	const struct path *path;
+ 
+-	if (file->f_mode & FMODE_NONOTIFY)
++	/*
++	 * FMODE_NONOTIFY are fds generated by fanotify itself which should not
++	 * generate new events. We also don't want to generate events for
++	 * FMODE_PATH fds (involves open & close events) as they are just
++	 * handle creation / destruction events and not "real" file events.
++	 */
++	if (file->f_mode & (FMODE_NONOTIFY | FMODE_PATH))
+ 		return 0;
+ 
+ 	path = &file->f_path;
+diff --git a/include/linux/mutex.h b/include/linux/mutex.h
+index 67edc4ca2beeb..a561c629d89f0 100644
+--- a/include/linux/mutex.h
++++ b/include/linux/mutex.h
+@@ -22,6 +22,8 @@
+ #include <linux/cleanup.h>
+ #include <linux/mutex_types.h>
+ 
++struct device;
++
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ # define __DEP_MAP_MUTEX_INITIALIZER(lockname)			\
+ 		, .dep_map = {					\
+@@ -117,6 +119,31 @@ do {							\
+ } while (0)
+ #endif /* CONFIG_PREEMPT_RT */
+ 
++#ifdef CONFIG_DEBUG_MUTEXES
++
++int __devm_mutex_init(struct device *dev, struct mutex *lock);
++
++#else
++
++static inline int __devm_mutex_init(struct device *dev, struct mutex *lock)
++{
++	/*
++	 * When CONFIG_DEBUG_MUTEXES is off mutex_destroy() is just a nop so
++	 * no really need to register it in the devm subsystem.
++	 */
++	return 0;
++}
++
++#endif
++
++#define devm_mutex_init(dev, mutex)			\
++({							\
++	typeof(mutex) mutex_ = (mutex);			\
++							\
++	mutex_init(mutex_);				\
++	__devm_mutex_init(dev, mutex_);			\
++})
++
+ /*
+  * See kernel/locking/mutex.c for detailed documentation of these APIs.
+  * Also see Documentation/locking/mutex-design.rst.
+diff --git a/include/linux/phy.h b/include/linux/phy.h
+index 3f68b8239bb11..a62d86bce1b63 100644
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -1121,7 +1121,7 @@ struct phy_driver {
+ 				  u8 index, enum led_brightness value);
+ 
+ 	/**
+-	 * @led_blink_set: Set a PHY LED brightness.  Index indicates
++	 * @led_blink_set: Set a PHY LED blinking.  Index indicates
+ 	 * which of the PHYs led should be configured to blink. Delays
+ 	 * are in milliseconds and if both are zero then a sensible
+ 	 * default should be chosen.  The call should adjust the
+diff --git a/include/linux/sched/vhost_task.h b/include/linux/sched/vhost_task.h
+index bc60243d43b36..25446c5d35081 100644
+--- a/include/linux/sched/vhost_task.h
++++ b/include/linux/sched/vhost_task.h
+@@ -4,7 +4,8 @@
+ 
+ struct vhost_task;
+ 
+-struct vhost_task *vhost_task_create(bool (*fn)(void *), void *arg,
++struct vhost_task *vhost_task_create(bool (*fn)(void *),
++				     void (*handle_kill)(void *), void *arg,
+ 				     const char *name);
+ void vhost_task_start(struct vhost_task *vtsk);
+ void vhost_task_stop(struct vhost_task *vtsk);
+diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
+index f187510428ca6..d46320f7fd685 100644
+--- a/include/net/bluetooth/hci.h
++++ b/include/net/bluetooth/hci.h
+@@ -324,6 +324,17 @@ enum {
+ 	 * claim to support it.
+ 	 */
+ 	HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE,
++
++	/*
++	 * When this quirk is set, the reserved bits of Primary/Secondary_PHY
++	 * inside the LE Extended Advertising Report events are discarded.
++	 * This is required for some Apple/Broadcom controllers which
++	 * abuse these reserved bits for unrelated flags.
++	 *
++	 * This quirk can be set before hci_register_dev is called or
++	 * during the hdev->setup vendor callback.
++	 */
++	HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY,
+ };
+ 
+ /* HCI device flags */
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h
+index 2d7f87bc5324b..baaff7bc09119 100644
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -395,7 +395,7 @@ enum ieee80211_bss_change {
+ 	BSS_CHANGED_HE_OBSS_PD		= 1<<28,
+ 	BSS_CHANGED_HE_BSS_COLOR	= 1<<29,
+ 	BSS_CHANGED_FILS_DISCOVERY      = 1<<30,
+-	BSS_CHANGED_UNSOL_BCAST_PROBE_RESP = 1<<31,
++	BSS_CHANGED_UNSOL_BCAST_PROBE_RESP = BIT_ULL(31),
+ 	BSS_CHANGED_MLD_VALID_LINKS	= BIT_ULL(33),
+ 	BSS_CHANGED_MLD_TTLM		= BIT_ULL(34),
+ 
+diff --git a/include/uapi/linux/cn_proc.h b/include/uapi/linux/cn_proc.h
+index f2afb7cc4926c..18e3745b86cd4 100644
+--- a/include/uapi/linux/cn_proc.h
++++ b/include/uapi/linux/cn_proc.h
+@@ -69,8 +69,7 @@ struct proc_input {
+ 
+ static inline enum proc_cn_event valid_event(enum proc_cn_event ev_type)
+ {
+-	ev_type &= PROC_EVENT_ALL;
+-	return ev_type;
++	return (enum proc_cn_event)(ev_type & PROC_EVENT_ALL);
+ }
+ 
+ /*
+diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c
+index f7f3d14fa69a7..4950e0b622b1f 100644
+--- a/kernel/dma/map_benchmark.c
++++ b/kernel/dma/map_benchmark.c
+@@ -256,6 +256,9 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
+ 		 * dma_mask changed by benchmark
+ 		 */
+ 		dma_set_mask(map->dev, old_dma_mask);
++
++		if (ret)
++			return ret;
+ 		break;
+ 	default:
+ 		return -EINVAL;
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 41a12630cbbc9..2b9ef8abff79d 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -488,6 +488,8 @@ void mm_update_next_owner(struct mm_struct *mm)
+ 	 * Search through everything else, we should not get here often.
+ 	 */
+ 	for_each_process(g) {
++		if (atomic_read(&mm->mm_users) <= 1)
++			break;
+ 		if (g->flags & PF_KTHREAD)
+ 			continue;
+ 		for_each_thread(g, c) {
+diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
+index bc8abb8549d20..6e6f6071cfa27 100644
+--- a/kernel/locking/mutex-debug.c
++++ b/kernel/locking/mutex-debug.c
+@@ -12,6 +12,7 @@
+  */
+ #include <linux/mutex.h>
+ #include <linux/delay.h>
++#include <linux/device.h>
+ #include <linux/export.h>
+ #include <linux/poison.h>
+ #include <linux/sched.h>
+@@ -89,6 +90,17 @@ void debug_mutex_init(struct mutex *lock, const char *name,
+ 	lock->magic = lock;
+ }
+ 
++static void devm_mutex_release(void *res)
++{
++	mutex_destroy(res);
++}
++
++int __devm_mutex_init(struct device *dev, struct mutex *lock)
++{
++	return devm_add_action_or_reset(dev, devm_mutex_release, lock);
++}
++EXPORT_SYMBOL_GPL(__devm_mutex_init);
++
+ /***
+  * mutex_destroy - mark a mutex unusable
+  * @lock: the mutex to be destroyed
+diff --git a/kernel/power/swap.c b/kernel/power/swap.c
+index 5bc04bfe2db1d..c6f24d17866d8 100644
+--- a/kernel/power/swap.c
++++ b/kernel/power/swap.c
+@@ -1600,7 +1600,7 @@ int swsusp_check(bool exclusive)
+ 
+ put:
+ 		if (error)
+-			fput(hib_resume_bdev_file);
++			bdev_fput(hib_resume_bdev_file);
+ 		else
+ 			pr_debug("Image signature found, resuming\n");
+ 	} else {
+diff --git a/kernel/vhost_task.c b/kernel/vhost_task.c
+index da35e5b7f0473..8800f5acc0071 100644
+--- a/kernel/vhost_task.c
++++ b/kernel/vhost_task.c
+@@ -10,38 +10,32 @@
+ 
+ enum vhost_task_flags {
+ 	VHOST_TASK_FLAGS_STOP,
++	VHOST_TASK_FLAGS_KILLED,
+ };
+ 
+ struct vhost_task {
+ 	bool (*fn)(void *data);
++	void (*handle_sigkill)(void *data);
+ 	void *data;
+ 	struct completion exited;
+ 	unsigned long flags;
+ 	struct task_struct *task;
++	/* serialize SIGKILL and vhost_task_stop calls */
++	struct mutex exit_mutex;
+ };
+ 
+ static int vhost_task_fn(void *data)
+ {
+ 	struct vhost_task *vtsk = data;
+-	bool dead = false;
+ 
+ 	for (;;) {
+ 		bool did_work;
+ 
+-		if (!dead && signal_pending(current)) {
++		if (signal_pending(current)) {
+ 			struct ksignal ksig;
+-			/*
+-			 * Calling get_signal will block in SIGSTOP,
+-			 * or clear fatal_signal_pending, but remember
+-			 * what was set.
+-			 *
+-			 * This thread won't actually exit until all
+-			 * of the file descriptors are closed, and
+-			 * the release function is called.
+-			 */
+-			dead = get_signal(&ksig);
+-			if (dead)
+-				clear_thread_flag(TIF_SIGPENDING);
++
++			if (get_signal(&ksig))
++				break;
+ 		}
+ 
+ 		/* mb paired w/ vhost_task_stop */
+@@ -57,7 +51,19 @@ static int vhost_task_fn(void *data)
+ 			schedule();
+ 	}
+ 
++	mutex_lock(&vtsk->exit_mutex);
++	/*
++	 * If a vhost_task_stop and SIGKILL race, we can ignore the SIGKILL.
++	 * When the vhost layer has called vhost_task_stop it's already stopped
++	 * new work and flushed.
++	 */
++	if (!test_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags)) {
++		set_bit(VHOST_TASK_FLAGS_KILLED, &vtsk->flags);
++		vtsk->handle_sigkill(vtsk->data);
++	}
++	mutex_unlock(&vtsk->exit_mutex);
+ 	complete(&vtsk->exited);
++
+ 	do_exit(0);
+ }
+ 
+@@ -78,12 +84,17 @@ EXPORT_SYMBOL_GPL(vhost_task_wake);
+  * @vtsk: vhost_task to stop
+  *
+  * vhost_task_fn ensures the worker thread exits after
+- * VHOST_TASK_FLAGS_SOP becomes true.
++ * VHOST_TASK_FLAGS_STOP becomes true.
+  */
+ void vhost_task_stop(struct vhost_task *vtsk)
+ {
+-	set_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags);
+-	vhost_task_wake(vtsk);
++	mutex_lock(&vtsk->exit_mutex);
++	if (!test_bit(VHOST_TASK_FLAGS_KILLED, &vtsk->flags)) {
++		set_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags);
++		vhost_task_wake(vtsk);
++	}
++	mutex_unlock(&vtsk->exit_mutex);
++
+ 	/*
+ 	 * Make sure vhost_task_fn is no longer accessing the vhost_task before
+ 	 * freeing it below.
+@@ -96,14 +107,16 @@ EXPORT_SYMBOL_GPL(vhost_task_stop);
+ /**
+  * vhost_task_create - create a copy of a task to be used by the kernel
+  * @fn: vhost worker function
+- * @arg: data to be passed to fn
++ * @handle_sigkill: vhost function to handle when we are killed
++ * @arg: data to be passed to fn and handled_kill
+  * @name: the thread's name
+  *
+  * This returns a specialized task for use by the vhost layer or NULL on
+  * failure. The returned task is inactive, and the caller must fire it up
+  * through vhost_task_start().
+  */
+-struct vhost_task *vhost_task_create(bool (*fn)(void *), void *arg,
++struct vhost_task *vhost_task_create(bool (*fn)(void *),
++				     void (*handle_sigkill)(void *), void *arg,
+ 				     const char *name)
+ {
+ 	struct kernel_clone_args args = {
+@@ -122,8 +135,10 @@ struct vhost_task *vhost_task_create(bool (*fn)(void *), void *arg,
+ 	if (!vtsk)
+ 		return NULL;
+ 	init_completion(&vtsk->exited);
++	mutex_init(&vtsk->exit_mutex);
+ 	vtsk->data = arg;
+ 	vtsk->fn = fn;
++	vtsk->handle_sigkill = handle_sigkill;
+ 
+ 	args.fn_arg = vtsk;
+ 
+diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c
+index fdba0eaf19a59..ad29721b956bc 100644
+--- a/lib/fortify_kunit.c
++++ b/lib/fortify_kunit.c
+@@ -15,10 +15,17 @@
+  */
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
++/* We don't need to fill dmesg with the fortify WARNs during testing. */
++#ifdef DEBUG
++# define FORTIFY_REPORT_KUNIT(x...) __fortify_report(x)
++#else
++# define FORTIFY_REPORT_KUNIT(x...) do { } while (0)
++#endif
++
+ /* Redefine fortify_panic() to track failures. */
+ void fortify_add_kunit_error(int write);
+ #define fortify_panic(func, write, avail, size, retfail) do {		\
+-	__fortify_report(FORTIFY_REASON(func, write), avail, size);	\
++	FORTIFY_REPORT_KUNIT(FORTIFY_REASON(func, write), avail, size);	\
+ 	fortify_add_kunit_error(write);					\
+ 	return (retfail);						\
+ } while (0)
+diff --git a/lib/kunit/try-catch.c b/lib/kunit/try-catch.c
+index d9d1df28cc52e..9c9e4dcf06d96 100644
+--- a/lib/kunit/try-catch.c
++++ b/lib/kunit/try-catch.c
+@@ -78,7 +78,6 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
+ 	time_remaining = wait_for_completion_timeout(&try_completion,
+ 						     kunit_test_timeout());
+ 	if (time_remaining == 0) {
+-		kunit_err(test, "try timed out\n");
+ 		try_catch->try_result = -ETIMEDOUT;
+ 		kthread_stop(task_struct);
+ 	}
+@@ -93,6 +92,8 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
+ 		try_catch->try_result = 0;
+ 	else if (exit_code == -EINTR)
+ 		kunit_err(test, "wake_up_process() was never called\n");
++	else if (exit_code == -ETIMEDOUT)
++		kunit_err(test, "try timed out\n");
+ 	else if (exit_code)
+ 		kunit_err(test, "Unknown error: %d\n", exit_code);
+ 
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index 3e19b87049db1..c9af72f292a8b 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -415,13 +415,20 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
+ 	else
+ 		bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
+ 
+-	if (bg_thresh >= thresh)
+-		bg_thresh = thresh / 2;
+ 	tsk = current;
+ 	if (rt_task(tsk)) {
+ 		bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
+ 		thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
+ 	}
++	/*
++	 * Dirty throttling logic assumes the limits in page units fit into
++	 * 32-bits. This gives 16TB dirty limits max which is hopefully enough.
++	 */
++	if (thresh > UINT_MAX)
++		thresh = UINT_MAX;
++	/* This makes sure bg_thresh is within 32-bits as well */
++	if (bg_thresh >= thresh)
++		bg_thresh = thresh / 2;
+ 	dtc->thresh = thresh;
+ 	dtc->bg_thresh = bg_thresh;
+ 
+@@ -471,7 +478,11 @@ static unsigned long node_dirty_limit(struct pglist_data *pgdat)
+ 	if (rt_task(tsk))
+ 		dirty += dirty / 4;
+ 
+-	return dirty;
++	/*
++	 * Dirty throttling logic assumes the limits in page units fit into
++	 * 32-bits. This gives 16TB dirty limits max which is hopefully enough.
++	 */
++	return min_t(unsigned long, dirty, UINT_MAX);
+ }
+ 
+ /**
+@@ -508,10 +519,17 @@ static int dirty_background_bytes_handler(struct ctl_table *table, int write,
+ 		void *buffer, size_t *lenp, loff_t *ppos)
+ {
+ 	int ret;
++	unsigned long old_bytes = dirty_background_bytes;
+ 
+ 	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
+-	if (ret == 0 && write)
++	if (ret == 0 && write) {
++		if (DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE) >
++								UINT_MAX) {
++			dirty_background_bytes = old_bytes;
++			return -ERANGE;
++		}
+ 		dirty_background_ratio = 0;
++	}
+ 	return ret;
+ }
+ 
+@@ -537,6 +555,10 @@ static int dirty_bytes_handler(struct ctl_table *table, int write,
+ 
+ 	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
+ 	if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
++		if (DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) > UINT_MAX) {
++			vm_dirty_bytes = old_bytes;
++			return -ERANGE;
++		}
+ 		writeback_set_ratelimit();
+ 		vm_dirty_ratio = 0;
+ 	}
+@@ -1638,7 +1660,7 @@ static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
+ 	 */
+ 	dtc->wb_thresh = __wb_calc_thresh(dtc);
+ 	dtc->wb_bg_thresh = dtc->thresh ?
+-		div64_u64(dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
++		div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
+ 
+ 	/*
+ 	 * In order to avoid the stacked BDI deadlock we need
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 08ae30fd31551..baca48ce8d0c6 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -904,8 +904,8 @@ static int hci_conn_hash_alloc_unset(struct hci_dev *hdev)
+ 			       U16_MAX, GFP_ATOMIC);
+ }
+ 
+-struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+-			      u8 role, u16 handle)
++static struct hci_conn *__hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
++				       u8 role, u16 handle)
+ {
+ 	struct hci_conn *conn;
+ 
+@@ -1046,7 +1046,16 @@ struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
+ 	if (unlikely(handle < 0))
+ 		return ERR_PTR(-ECONNREFUSED);
+ 
+-	return hci_conn_add(hdev, type, dst, role, handle);
++	return __hci_conn_add(hdev, type, dst, role, handle);
++}
++
++struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
++			      u8 role, u16 handle)
++{
++	if (handle > HCI_CONN_HANDLE_MAX)
++		return ERR_PTR(-EINVAL);
++
++	return __hci_conn_add(hdev, type, dst, role, handle);
+ }
+ 
+ static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason)
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 1ed734a7fb313..069f109d973b2 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -6312,6 +6312,13 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
+ 
+ 		evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK;
+ 		legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
++
++		if (test_bit(HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY,
++			     &hdev->quirks)) {
++			info->primary_phy &= 0x1f;
++			info->secondary_phy &= 0x1f;
++		}
++
+ 		if (legacy_evt_type != LE_ADV_INVALID) {
+ 			process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
+ 					   info->bdaddr_type, NULL, 0,
+@@ -6661,6 +6668,7 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
+ 	struct bt_iso_qos *qos;
+ 	bool pending = false;
+ 	u16 handle = __le16_to_cpu(ev->handle);
++	u32 c_sdu_interval, p_sdu_interval;
+ 
+ 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
+ 
+@@ -6685,12 +6693,25 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
+ 
+ 	pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags);
+ 
+-	/* Convert ISO Interval (1.25 ms slots) to SDU Interval (us) */
+-	qos->ucast.in.interval = le16_to_cpu(ev->interval) * 1250;
+-	qos->ucast.out.interval = qos->ucast.in.interval;
++	/* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 6, Part G
++	 * page 3075:
++	 * Transport_Latency_C_To_P = CIG_Sync_Delay + (FT_C_To_P) ×
++	 * ISO_Interval + SDU_Interval_C_To_P
++	 * ...
++	 * SDU_Interval = (CIG_Sync_Delay + (FT) x ISO_Interval) -
++	 *					Transport_Latency
++	 */
++	c_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) +
++			 (ev->c_ft * le16_to_cpu(ev->interval) * 1250)) -
++			get_unaligned_le24(ev->c_latency);
++	p_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) +
++			 (ev->p_ft * le16_to_cpu(ev->interval) * 1250)) -
++			get_unaligned_le24(ev->p_latency);
+ 
+ 	switch (conn->role) {
+ 	case HCI_ROLE_SLAVE:
++		qos->ucast.in.interval = c_sdu_interval;
++		qos->ucast.out.interval = p_sdu_interval;
+ 		/* Convert Transport Latency (us) to Latency (msec) */
+ 		qos->ucast.in.latency =
+ 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
+@@ -6704,6 +6725,8 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
+ 		qos->ucast.out.phy = ev->p_phy;
+ 		break;
+ 	case HCI_ROLE_MASTER:
++		qos->ucast.in.interval = p_sdu_interval;
++		qos->ucast.out.interval = c_sdu_interval;
+ 		/* Convert Transport Latency (us) to Latency (msec) */
+ 		qos->ucast.out.latency =
+ 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
+@@ -6894,6 +6917,10 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
+ 
+ 		bis = hci_conn_hash_lookup_handle(hdev, handle);
+ 		if (!bis) {
++			if (handle > HCI_CONN_HANDLE_MAX) {
++				bt_dev_dbg(hdev, "ignore too large handle %u", handle);
++				continue;
++			}
+ 			bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
+ 					   HCI_ROLE_SLAVE, handle);
+ 			if (IS_ERR(bis))
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index 00c0d8413c638..dd33400c21822 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -1356,8 +1356,7 @@ static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+ 		lock_sock(sk);
+ 		switch (sk->sk_state) {
+ 		case BT_CONNECT2:
+-			if (pi->conn->hcon &&
+-			    test_bit(HCI_CONN_PA_SYNC, &pi->conn->hcon->flags)) {
++			if (test_bit(BT_SK_PA_SYNC, &pi->flags)) {
+ 				iso_conn_big_sync(sk);
+ 				sk->sk_state = BT_LISTEN;
+ 			} else {
+diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c
+index de33dc1b0daad..7236349cf0598 100644
+--- a/net/bpf/bpf_dummy_struct_ops.c
++++ b/net/bpf/bpf_dummy_struct_ops.c
+@@ -79,6 +79,51 @@ static int dummy_ops_call_op(void *image, struct bpf_dummy_ops_test_args *args)
+ 		    args->args[3], args->args[4]);
+ }
+ 
++static const struct bpf_ctx_arg_aux *find_ctx_arg_info(struct bpf_prog_aux *aux, int offset)
++{
++	int i;
++
++	for (i = 0; i < aux->ctx_arg_info_size; i++)
++		if (aux->ctx_arg_info[i].offset == offset)
++			return &aux->ctx_arg_info[i];
++
++	return NULL;
++}
++
++/* There is only one check at the moment:
++ * - zero should not be passed for pointer parameters not marked as nullable.
++ */
++static int check_test_run_args(struct bpf_prog *prog, struct bpf_dummy_ops_test_args *args)
++{
++	const struct btf_type *func_proto = prog->aux->attach_func_proto;
++
++	for (u32 arg_no = 0; arg_no < btf_type_vlen(func_proto) ; ++arg_no) {
++		const struct btf_param *param = &btf_params(func_proto)[arg_no];
++		const struct bpf_ctx_arg_aux *info;
++		const struct btf_type *t;
++		int offset;
++
++		if (args->args[arg_no] != 0)
++			continue;
++
++		/* Program is validated already, so there is no need
++		 * to check if t is NULL.
++		 */
++		t = btf_type_skip_modifiers(bpf_dummy_ops_btf, param->type, NULL);
++		if (!btf_type_is_ptr(t))
++			continue;
++
++		offset = btf_ctx_arg_offset(bpf_dummy_ops_btf, func_proto, arg_no);
++		info = find_ctx_arg_info(prog->aux, offset);
++		if (info && (info->reg_type & PTR_MAYBE_NULL))
++			continue;
++
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
+ extern const struct bpf_link_ops bpf_struct_ops_link_lops;
+ 
+ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
+@@ -87,7 +132,7 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
+ 	const struct bpf_struct_ops *st_ops = &bpf_bpf_dummy_ops;
+ 	const struct btf_type *func_proto;
+ 	struct bpf_dummy_ops_test_args *args;
+-	struct bpf_tramp_links *tlinks;
++	struct bpf_tramp_links *tlinks = NULL;
+ 	struct bpf_tramp_link *link = NULL;
+ 	void *image = NULL;
+ 	unsigned int op_idx;
+@@ -109,6 +154,10 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
+ 	if (IS_ERR(args))
+ 		return PTR_ERR(args);
+ 
++	err = check_test_run_args(prog, args);
++	if (err)
++		goto out;
++
+ 	tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
+ 	if (!tlinks) {
+ 		err = -ENOMEM;
+@@ -230,7 +279,7 @@ static void bpf_dummy_unreg(void *kdata)
+ {
+ }
+ 
+-static int bpf_dummy_test_1(struct bpf_dummy_ops_state *cb)
++static int bpf_dummy_ops__test_1(struct bpf_dummy_ops_state *cb__nullable)
+ {
+ 	return 0;
+ }
+@@ -247,7 +296,7 @@ static int bpf_dummy_test_sleepable(struct bpf_dummy_ops_state *cb)
+ }
+ 
+ static struct bpf_dummy_ops __bpf_bpf_dummy_ops = {
+-	.test_1 = bpf_dummy_test_1,
++	.test_1 = bpf_dummy_ops__test_1,
+ 	.test_2 = bpf_dummy_test_2,
+ 	.test_sleepable = bpf_dummy_test_sleepable,
+ };
+diff --git a/net/core/datagram.c b/net/core/datagram.c
+index a8b625abe242c..cb72923acc21c 100644
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -435,15 +435,22 @@ static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
+ 
+ 		end = start + skb_frag_size(frag);
+ 		if ((copy = end - offset) > 0) {
+-			struct page *page = skb_frag_page(frag);
+-			u8 *vaddr = kmap(page);
++			u32 p_off, p_len, copied;
++			struct page *p;
++			u8 *vaddr;
+ 
+ 			if (copy > len)
+ 				copy = len;
+-			n = INDIRECT_CALL_1(cb, simple_copy_to_iter,
+-					vaddr + skb_frag_off(frag) + offset - start,
+-					copy, data, to);
+-			kunmap(page);
++
++			skb_frag_foreach_page(frag,
++					      skb_frag_off(frag) + offset - start,
++					      copy, p, p_off, p_len, copied) {
++				vaddr = kmap_local_page(p);
++				n = INDIRECT_CALL_1(cb, simple_copy_to_iter,
++					vaddr + p_off, p_len, data, to);
++				kunmap_local(vaddr);
++			}
++
+ 			offset += n;
+ 			if (n != copy)
+ 				goto short_copy;
+diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
+index 7adace541fe29..9712cdb8087c2 100644
+--- a/net/ipv4/inet_diag.c
++++ b/net/ipv4/inet_diag.c
+@@ -1383,6 +1383,7 @@ static int inet_diag_dump_compat(struct sk_buff *skb,
+ 	req.sdiag_family = AF_UNSPEC; /* compatibility */
+ 	req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
+ 	req.idiag_ext = rc->idiag_ext;
++	req.pad = 0;
+ 	req.idiag_states = rc->idiag_states;
+ 	req.id = rc->id;
+ 
+@@ -1398,6 +1399,7 @@ static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
+ 	req.sdiag_family = rc->idiag_family;
+ 	req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type);
+ 	req.idiag_ext = rc->idiag_ext;
++	req.pad = 0;
+ 	req.idiag_states = rc->idiag_states;
+ 	req.id = rc->id;
+ 
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 0953c915bb4de..7b692bcb61d4a 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3074,7 +3074,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
+ 			return;
+ 
+ 		if (tcp_try_undo_dsack(sk))
+-			tcp_try_keep_open(sk);
++			tcp_try_to_open(sk, flag);
+ 
+ 		tcp_identify_packet_loss(sk, ack_flag);
+ 		if (icsk->icsk_ca_state != TCP_CA_Recovery) {
+@@ -4220,6 +4220,13 @@ void tcp_parse_options(const struct net *net,
+ 				 * checked (see tcp_v{4,6}_rcv()).
+ 				 */
+ 				break;
++#endif
++#ifdef CONFIG_TCP_AO
++			case TCPOPT_AO:
++				/* TCP AO has already been checked
++				 * (see tcp_inbound_ao_hash()).
++				 */
++				break;
+ #endif
+ 			case TCPOPT_FASTOPEN:
+ 				tcp_parse_fastopen_option(
+diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
+index c2a925538542b..e0883ba709b0b 100644
+--- a/net/ipv4/tcp_metrics.c
++++ b/net/ipv4/tcp_metrics.c
+@@ -619,6 +619,7 @@ static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] =
+ 	[TCP_METRICS_ATTR_ADDR_IPV4]	= { .type = NLA_U32, },
+ 	[TCP_METRICS_ATTR_ADDR_IPV6]	= { .type = NLA_BINARY,
+ 					    .len = sizeof(struct in6_addr), },
++	[TCP_METRICS_ATTR_SADDR_IPV4]	= { .type = NLA_U32, },
+ 	/* Following attributes are not received for GET/DEL,
+ 	 * we keep them for reference
+ 	 */
+diff --git a/net/mac802154/main.c b/net/mac802154/main.c
+index 9ab7396668d22..21b7c3b280b45 100644
+--- a/net/mac802154/main.c
++++ b/net/mac802154/main.c
+@@ -161,8 +161,10 @@ void ieee802154_configure_durations(struct wpan_phy *phy,
+ 	}
+ 
+ 	phy->symbol_duration = duration;
+-	phy->lifs_period = (IEEE802154_LIFS_PERIOD * phy->symbol_duration) / NSEC_PER_SEC;
+-	phy->sifs_period = (IEEE802154_SIFS_PERIOD * phy->symbol_duration) / NSEC_PER_SEC;
++	phy->lifs_period =
++		(IEEE802154_LIFS_PERIOD * phy->symbol_duration) / NSEC_PER_USEC;
++	phy->sifs_period =
++		(IEEE802154_SIFS_PERIOD * phy->symbol_duration) / NSEC_PER_USEC;
+ }
+ EXPORT_SYMBOL(ieee802154_configure_durations);
+ 
+@@ -184,10 +186,10 @@ static void ieee802154_setup_wpan_phy_pib(struct wpan_phy *wpan_phy)
+ 	 * Should be done when all drivers sets this value.
+ 	 */
+ 
+-	wpan_phy->lifs_period =
+-		(IEEE802154_LIFS_PERIOD * wpan_phy->symbol_duration) / 1000;
+-	wpan_phy->sifs_period =
+-		(IEEE802154_SIFS_PERIOD * wpan_phy->symbol_duration) / 1000;
++	wpan_phy->lifs_period =	(IEEE802154_LIFS_PERIOD *
++				 wpan_phy->symbol_duration) / NSEC_PER_USEC;
++	wpan_phy->sifs_period =	(IEEE802154_SIFS_PERIOD *
++				 wpan_phy->symbol_duration) / NSEC_PER_USEC;
+ }
+ 
+ int ieee802154_register_hw(struct ieee802154_hw *hw)
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index faa77b031d1f3..0f77ba3306c23 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -11479,8 +11479,7 @@ static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event,
+ 
+ 	gc_seq = nft_gc_seq_begin(nft_net);
+ 
+-	if (!list_empty(&nf_tables_destroy_list))
+-		nf_tables_trans_destroy_flush_work();
++	nf_tables_trans_destroy_flush_work();
+ again:
+ 	list_for_each_entry(table, &nft_net->tables, list) {
+ 		if (nft_table_has_owner(table) &&
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index c67679a41044f..13b3998c6177f 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -7119,6 +7119,7 @@ static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
+ 	struct sctp_sock *sp = sctp_sk(sk);
+ 	struct sctp_association *asoc;
+ 	struct sctp_assoc_ids *ids;
++	size_t ids_size;
+ 	u32 num = 0;
+ 
+ 	if (sctp_style(sk, TCP))
+@@ -7131,11 +7132,11 @@ static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
+ 		num++;
+ 	}
+ 
+-	if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num)
++	ids_size = struct_size(ids, gaids_assoc_id, num);
++	if (len < ids_size)
+ 		return -EINVAL;
+ 
+-	len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num;
+-
++	len = ids_size;
+ 	ids = kmalloc(len, GFP_USER | __GFP_NOWARN);
+ 	if (unlikely(!ids))
+ 		return -ENOMEM;
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 65c416e8d25eb..c9866db2ea468 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -468,6 +468,10 @@ static const struct netlink_range_validation nl80211_punct_bitmap_range = {
+ 	.max = 0xffff,
+ };
+ 
++static const struct netlink_range_validation q_range = {
++	.max = INT_MAX,
++};
++
+ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
+ 	[0] = { .strict_start_type = NL80211_ATTR_HE_OBSS_PD },
+ 	[NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
+@@ -754,7 +758,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
+ 
+ 	[NL80211_ATTR_TXQ_LIMIT] = { .type = NLA_U32 },
+ 	[NL80211_ATTR_TXQ_MEMORY_LIMIT] = { .type = NLA_U32 },
+-	[NL80211_ATTR_TXQ_QUANTUM] = { .type = NLA_U32 },
++	[NL80211_ATTR_TXQ_QUANTUM] = NLA_POLICY_FULL_RANGE(NLA_U32, &q_range),
+ 	[NL80211_ATTR_HE_CAPABILITY] =
+ 		NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_he_capa,
+ 				       NL80211_HE_MAX_CAPABILITY_LEN),
+diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
+index 7862a81017477..a9434a72cac4f 100755
+--- a/scripts/link-vmlinux.sh
++++ b/scripts/link-vmlinux.sh
+@@ -182,7 +182,7 @@ kallsyms_step()
+ 	mksysmap ${kallsyms_vmlinux} ${kallsyms_vmlinux}.syms ${kallsymso_prev}
+ 	kallsyms ${kallsyms_vmlinux}.syms ${kallsyms_S}
+ 
+-	info AS ${kallsyms_S}
++	info AS ${kallsymso}
+ 	${CC} ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS} \
+ 	      ${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
+ 	      -c -o ${kallsymso} ${kallsyms_S}
+diff --git a/sound/core/ump.c b/sound/core/ump.c
+index 117c7ecc48563..3f61220c23b4e 100644
+--- a/sound/core/ump.c
++++ b/sound/core/ump.c
+@@ -967,6 +967,14 @@ int snd_ump_parse_endpoint(struct snd_ump_endpoint *ump)
+ 	if (err < 0)
+ 		ump_dbg(ump, "Unable to get UMP EP stream config\n");
+ 
++	/* If no protocol is set by some reason, assume the valid one */
++	if (!(ump->info.protocol & SNDRV_UMP_EP_INFO_PROTO_MIDI_MASK)) {
++		if (ump->info.protocol_caps & SNDRV_UMP_EP_INFO_PROTO_MIDI2)
++			ump->info.protocol |= SNDRV_UMP_EP_INFO_PROTO_MIDI2;
++		else if (ump->info.protocol_caps & SNDRV_UMP_EP_INFO_PROTO_MIDI1)
++			ump->info.protocol |= SNDRV_UMP_EP_INFO_PROTO_MIDI1;
++	}
++
+ 	/* Query and create blocks from Function Blocks */
+ 	for (blk = 0; blk < ump->info.num_blocks; blk++) {
+ 		err = create_block_from_fb_info(ump, blk);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 3a56434c86bd9..c0530d4aa3fc3 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -12029,6 +12029,7 @@ enum {
+ 	ALC897_FIXUP_LENOVO_HEADSET_MODE,
+ 	ALC897_FIXUP_HEADSET_MIC_PIN2,
+ 	ALC897_FIXUP_UNIS_H3C_X500S,
++	ALC897_FIXUP_HEADSET_MIC_PIN3,
+ };
+ 
+ static const struct hda_fixup alc662_fixups[] = {
+@@ -12475,10 +12476,18 @@ static const struct hda_fixup alc662_fixups[] = {
+ 			{}
+ 		},
+ 	},
++	[ALC897_FIXUP_HEADSET_MIC_PIN3] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x19, 0x03a11050 }, /* use as headset mic */
++			{ }
++		},
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1019, 0x9087, "ECS", ALC662_FIXUP_ASUS_MODE2),
++	SND_PCI_QUIRK(0x1019, 0x9859, "JP-IK LEAP W502", ALC897_FIXUP_HEADSET_MIC_PIN3),
+ 	SND_PCI_QUIRK(0x1025, 0x022f, "Acer Aspire One", ALC662_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x1025, 0x0241, "Packard Bell DOTS", ALC662_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
+diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h
+index 1ce738d91685a..670726353aa50 100644
+--- a/tools/lib/bpf/bpf_core_read.h
++++ b/tools/lib/bpf/bpf_core_read.h
+@@ -104,6 +104,7 @@ enum bpf_enum_value_kind {
+ 	case 2: val = *(const unsigned short *)p; break;		      \
+ 	case 4: val = *(const unsigned int *)p; break;			      \
+ 	case 8: val = *(const unsigned long long *)p; break;		      \
++	default: val = 0; break;					      \
+ 	}								      \
+ 	val <<= __CORE_RELO(s, field, LSHIFT_U64);			      \
+ 	if (__CORE_RELO(s, field, SIGNED))				      \
+diff --git a/tools/lib/bpf/features.c b/tools/lib/bpf/features.c
+index a336786a22a38..50befe125ddc5 100644
+--- a/tools/lib/bpf/features.c
++++ b/tools/lib/bpf/features.c
+@@ -392,11 +392,41 @@ static int probe_uprobe_multi_link(int token_fd)
+ 	link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
+ 	err = -errno; /* close() can clobber errno */
+ 
++	if (link_fd >= 0 || err != -EBADF) {
++		if (link_fd >= 0)
++			close(link_fd);
++		close(prog_fd);
++		return 0;
++	}
++
++	/* Initial multi-uprobe support in kernel didn't handle PID filtering
++	 * correctly (it was doing thread filtering, not process filtering).
++	 * So now we'll detect if PID filtering logic was fixed, and, if not,
++	 * we'll pretend multi-uprobes are not supported, if not.
++	 * Multi-uprobes are used in USDT attachment logic, and we need to be
++	 * conservative here, because multi-uprobe selection happens early at
++	 * load time, while the use of PID filtering is known late at
++	 * attachment time, at which point it's too late to undo multi-uprobe
++	 * selection.
++	 *
++	 * Creating uprobe with pid == -1 for (invalid) '/' binary will fail
++	 * early with -EINVAL on kernels with fixed PID filtering logic;
++	 * otherwise -ESRCH would be returned if passed correct binary path
++	 * (but we'll just get -BADF, of course).
++	 */
++	link_opts.uprobe_multi.pid = -1; /* invalid PID */
++	link_opts.uprobe_multi.path = "/"; /* invalid path */
++	link_opts.uprobe_multi.offsets = &offset;
++	link_opts.uprobe_multi.cnt = 1;
++
++	link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
++	err = -errno; /* close() can clobber errno */
++
+ 	if (link_fd >= 0)
+ 		close(link_fd);
+ 	close(prog_fd);
+ 
+-	return link_fd < 0 && err == -EBADF;
++	return link_fd < 0 && err == -EINVAL;
+ }
+ 
+ static int probe_kern_bpf_cookie(int token_fd)
+diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
+index 8071a3ef2a2e8..5d80d193e5bee 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -1022,6 +1022,7 @@ struct rapl_counter_info_t {
+ 
+ /* struct rapl_counter_info_t for each RAPL domain */
+ struct rapl_counter_info_t *rapl_counter_info_perdomain;
++unsigned int rapl_counter_info_perdomain_size;
+ 
+ #define RAPL_COUNTER_FLAG_USE_MSR_SUM (1u << 1)
+ 
+@@ -1415,6 +1416,9 @@ struct topo_params {
+ 	int allowed_cpus;
+ 	int allowed_cores;
+ 	int max_cpu_num;
++	int max_core_id;
++	int max_package_id;
++	int max_die_id;
+ 	int max_node_num;
+ 	int nodes_per_pkg;
+ 	int cores_per_node;
+@@ -3368,15 +3372,18 @@ void write_rapl_counter(struct rapl_counter *rc, struct rapl_counter_info_t *rci
+ 	rc->scale = rci->scale[idx];
+ }
+ 
+-int get_rapl_counters(int cpu, int domain, struct core_data *c, struct pkg_data *p)
++int get_rapl_counters(int cpu, unsigned int domain, struct core_data *c, struct pkg_data *p)
+ {
+ 	unsigned long long perf_data[NUM_RAPL_COUNTERS + 1];
+-	struct rapl_counter_info_t *rci = &rapl_counter_info_perdomain[domain];
++	struct rapl_counter_info_t *rci;
+ 
+ 	if (debug)
+ 		fprintf(stderr, "%s: cpu%d domain%d\n", __func__, cpu, domain);
+ 
+ 	assert(rapl_counter_info_perdomain);
++	assert(domain < rapl_counter_info_perdomain_size);
++
++	rci = &rapl_counter_info_perdomain[domain];
+ 
+ 	/*
+ 	 * If we have any perf counters to read, read them all now, in bulk
+@@ -4180,7 +4187,7 @@ void free_fd_rapl_percpu(void)
+ 	if (!rapl_counter_info_perdomain)
+ 		return;
+ 
+-	const int num_domains = platform->has_per_core_rapl ? topo.num_cores : topo.num_packages;
++	const int num_domains = rapl_counter_info_perdomain_size;
+ 
+ 	for (int domain_id = 0; domain_id < num_domains; ++domain_id) {
+ 		if (rapl_counter_info_perdomain[domain_id].fd_perf != -1)
+@@ -4188,6 +4195,8 @@ void free_fd_rapl_percpu(void)
+ 	}
+ 
+ 	free(rapl_counter_info_perdomain);
++	rapl_counter_info_perdomain = NULL;
++	rapl_counter_info_perdomain_size = 0;
+ }
+ 
+ void free_all_buffers(void)
+@@ -6478,17 +6487,18 @@ void linux_perf_init(void)
+ 
+ void rapl_perf_init(void)
+ {
+-	const int num_domains = platform->has_per_core_rapl ? topo.num_cores : topo.num_packages;
++	const unsigned int num_domains = (platform->has_per_core_rapl ? topo.max_core_id : topo.max_package_id) + 1;
+ 	bool *domain_visited = calloc(num_domains, sizeof(bool));
+ 
+ 	rapl_counter_info_perdomain = calloc(num_domains, sizeof(*rapl_counter_info_perdomain));
+ 	if (rapl_counter_info_perdomain == NULL)
+ 		err(-1, "calloc rapl_counter_info_percpu");
++	rapl_counter_info_perdomain_size = num_domains;
+ 
+ 	/*
+ 	 * Initialize rapl_counter_info_percpu
+ 	 */
+-	for (int domain_id = 0; domain_id < num_domains; ++domain_id) {
++	for (unsigned int domain_id = 0; domain_id < num_domains; ++domain_id) {
+ 		struct rapl_counter_info_t *rci = &rapl_counter_info_perdomain[domain_id];
+ 
+ 		rci->fd_perf = -1;
+@@ -6508,7 +6518,7 @@ void rapl_perf_init(void)
+ 		bool has_counter = 0;
+ 		double scale;
+ 		enum rapl_unit unit;
+-		int next_domain;
++		unsigned int next_domain;
+ 
+ 		memset(domain_visited, 0, num_domains * sizeof(*domain_visited));
+ 
+@@ -6521,6 +6531,8 @@ void rapl_perf_init(void)
+ 			next_domain =
+ 			    platform->has_per_core_rapl ? cpus[cpu].physical_core_id : cpus[cpu].physical_package_id;
+ 
++			assert(next_domain < num_domains);
++
+ 			if (domain_visited[next_domain])
+ 				continue;
+ 
+@@ -6967,7 +6979,6 @@ void topology_probe(bool startup)
+ 	int i;
+ 	int max_core_id = 0;
+ 	int max_package_id = 0;
+-	int max_die_id = 0;
+ 	int max_siblings = 0;
+ 
+ 	/* Initialize num_cpus, max_cpu_num */
+@@ -7084,8 +7095,8 @@ void topology_probe(bool startup)
+ 
+ 		/* get die information */
+ 		cpus[i].die_id = get_die_id(i);
+-		if (cpus[i].die_id > max_die_id)
+-			max_die_id = cpus[i].die_id;
++		if (cpus[i].die_id > topo.max_die_id)
++			topo.max_die_id = cpus[i].die_id;
+ 
+ 		/* get numa node information */
+ 		cpus[i].physical_node_id = get_physical_node_id(&cpus[i]);
+@@ -7104,6 +7115,8 @@ void topology_probe(bool startup)
+ 		if (cpus[i].thread_id == 0)
+ 			topo.num_cores++;
+ 	}
++	topo.max_core_id = max_core_id;
++	topo.max_package_id = max_package_id;
+ 
+ 	topo.cores_per_node = max_core_id + 1;
+ 	if (debug > 1)
+@@ -7111,9 +7124,9 @@ void topology_probe(bool startup)
+ 	if (!summary_only && topo.cores_per_node > 1)
+ 		BIC_PRESENT(BIC_Core);
+ 
+-	topo.num_die = max_die_id + 1;
++	topo.num_die = topo.max_die_id + 1;
+ 	if (debug > 1)
+-		fprintf(outf, "max_die_id %d, sizing for %d die\n", max_die_id, topo.num_die);
++		fprintf(outf, "max_die_id %d, sizing for %d die\n", topo.max_die_id, topo.num_die);
+ 	if (!summary_only && topo.num_die > 1)
+ 		BIC_PRESENT(BIC_Die);
+ 
+diff --git a/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c
+index f43fcb13d2c46..d3d94596ab79c 100644
+--- a/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c
++++ b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c
+@@ -98,7 +98,8 @@ static void test_dummy_init_ptr_arg(void)
+ 
+ static void test_dummy_multiple_args(void)
+ {
+-	__u64 args[5] = {0, -100, 0x8a5f, 'c', 0x1234567887654321ULL};
++	struct bpf_dummy_ops_state st = { 7 };
++	__u64 args[5] = {(__u64)&st, -100, 0x8a5f, 'c', 0x1234567887654321ULL};
+ 	LIBBPF_OPTS(bpf_test_run_opts, attr,
+ 		.ctx_in = args,
+ 		.ctx_size_in = sizeof(args),
+@@ -115,6 +116,7 @@ static void test_dummy_multiple_args(void)
+ 	fd = bpf_program__fd(skel->progs.test_2);
+ 	err = bpf_prog_test_run_opts(fd, &attr);
+ 	ASSERT_OK(err, "test_run");
++	args[0] = 7;
+ 	for (i = 0; i < ARRAY_SIZE(args); i++) {
+ 		snprintf(name, sizeof(name), "arg %zu", i);
+ 		ASSERT_EQ(skel->bss->test_2_args[i], args[i], name);
+@@ -125,7 +127,8 @@ static void test_dummy_multiple_args(void)
+ 
+ static void test_dummy_sleepable(void)
+ {
+-	__u64 args[1] = {0};
++	struct bpf_dummy_ops_state st;
++	__u64 args[1] = {(__u64)&st};
+ 	LIBBPF_OPTS(bpf_test_run_opts, attr,
+ 		.ctx_in = args,
+ 		.ctx_size_in = sizeof(args),
+@@ -144,6 +147,31 @@ static void test_dummy_sleepable(void)
+ 	dummy_st_ops_success__destroy(skel);
+ }
+ 
++/* dummy_st_ops.test_sleepable() parameter is not marked as nullable,
++ * thus bpf_prog_test_run_opts() below should be rejected as it tries
++ * to pass NULL for this parameter.
++ */
++static void test_dummy_sleepable_reject_null(void)
++{
++	__u64 args[1] = {0};
++	LIBBPF_OPTS(bpf_test_run_opts, attr,
++		.ctx_in = args,
++		.ctx_size_in = sizeof(args),
++	);
++	struct dummy_st_ops_success *skel;
++	int fd, err;
++
++	skel = dummy_st_ops_success__open_and_load();
++	if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
++		return;
++
++	fd = bpf_program__fd(skel->progs.test_sleepable);
++	err = bpf_prog_test_run_opts(fd, &attr);
++	ASSERT_EQ(err, -EINVAL, "test_run");
++
++	dummy_st_ops_success__destroy(skel);
++}
++
+ void test_dummy_st_ops(void)
+ {
+ 	if (test__start_subtest("dummy_st_ops_attach"))
+@@ -156,6 +184,8 @@ void test_dummy_st_ops(void)
+ 		test_dummy_multiple_args();
+ 	if (test__start_subtest("dummy_sleepable"))
+ 		test_dummy_sleepable();
++	if (test__start_subtest("dummy_sleepable_reject_null"))
++		test_dummy_sleepable_reject_null();
+ 
+ 	RUN_TESTS(dummy_st_ops_fail);
+ }
+diff --git a/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c b/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c
+index 1efa746c25dc7..ec0c595d47af8 100644
+--- a/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c
++++ b/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c
+@@ -11,8 +11,17 @@ int BPF_PROG(test_1, struct bpf_dummy_ops_state *state)
+ {
+ 	int ret;
+ 
+-	if (!state)
+-		return 0xf2f3f4f5;
++	/* Check that 'state' nullable status is detected correctly.
++	 * If 'state' argument would be assumed non-null by verifier
++	 * the code below would be deleted as dead (which it shouldn't).
++	 * Hide it from the compiler behind 'asm' block to avoid
++	 * unnecessary optimizations.
++	 */
++	asm volatile (
++		"if %[state] != 0 goto +2;"
++		"r0 = 0xf2f3f4f5;"
++		"exit;"
++	::[state]"p"(state));
+ 
+ 	ret = state->val;
+ 	state->val = 0x5a;
+@@ -25,7 +34,7 @@ SEC("struct_ops/test_2")
+ int BPF_PROG(test_2, struct bpf_dummy_ops_state *state, int a1, unsigned short a2,
+ 	     char a3, unsigned long a4)
+ {
+-	test_2_args[0] = (unsigned long)state;
++	test_2_args[0] = state->val;
+ 	test_2_args[1] = a1;
+ 	test_2_args[2] = a2;
+ 	test_2_args[3] = a3;
+diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
+index b634969cbb6f1..40723a6a083f4 100644
+--- a/tools/testing/selftests/kselftest_harness.h
++++ b/tools/testing/selftests/kselftest_harness.h
+@@ -66,8 +66,6 @@
+ #include <sys/wait.h>
+ #include <unistd.h>
+ #include <setjmp.h>
+-#include <syscall.h>
+-#include <linux/sched.h>
+ 
+ #include "kselftest.h"
+ 
+@@ -82,17 +80,6 @@
+ #  define TH_LOG_ENABLED 1
+ #endif
+ 
+-/* Wait for the child process to end but without sharing memory mapping. */
+-static inline pid_t clone3_vfork(void)
+-{
+-	struct clone_args args = {
+-		.flags = CLONE_VFORK,
+-		.exit_signal = SIGCHLD,
+-	};
+-
+-	return syscall(__NR_clone3, &args, sizeof(args));
+-}
+-
+ /**
+  * TH_LOG()
+  *
+@@ -437,7 +424,7 @@ static inline pid_t clone3_vfork(void)
+ 		} \
+ 		if (setjmp(_metadata->env) == 0) { \
+ 			/* _metadata and potentially self are shared with all forks. */ \
+-			child = clone3_vfork(); \
++			child = fork(); \
+ 			if (child == 0) { \
+ 				fixture_name##_setup(_metadata, self, variant->data); \
+ 				/* Let setup failure terminate early. */ \
+@@ -1016,7 +1003,14 @@ void __wait_for_test(struct __test_metadata *t)
+ 		.sa_flags = SA_SIGINFO,
+ 	};
+ 	struct sigaction saved_action;
+-	int status;
++	/*
++	 * Sets status so that WIFEXITED(status) returns true and
++	 * WEXITSTATUS(status) returns KSFT_FAIL.  This safe default value
++	 * should never be evaluated because of the waitpid(2) check and
++	 * SIGALRM handling.
++	 */
++	int status = KSFT_FAIL << 8;
++	int child;
+ 
+ 	if (sigaction(SIGALRM, &action, &saved_action)) {
+ 		t->exit_code = KSFT_FAIL;
+@@ -1028,7 +1022,15 @@ void __wait_for_test(struct __test_metadata *t)
+ 	__active_test = t;
+ 	t->timed_out = false;
+ 	alarm(t->timeout);
+-	waitpid(t->pid, &status, 0);
++	child = waitpid(t->pid, &status, 0);
++	if (child == -1 && errno != EINTR) {
++		t->exit_code = KSFT_FAIL;
++		fprintf(TH_LOG_STREAM,
++			"# %s: Failed to wait for PID %d (errno: %d)\n",
++			t->name, t->pid, errno);
++		return;
++	}
++
+ 	alarm(0);
+ 	if (sigaction(SIGALRM, &saved_action, NULL)) {
+ 		t->exit_code = KSFT_FAIL;
+@@ -1083,6 +1085,7 @@ void __wait_for_test(struct __test_metadata *t)
+ 				WTERMSIG(status));
+ 		}
+ 	} else {
++		t->exit_code = KSFT_FAIL;
+ 		fprintf(TH_LOG_STREAM,
+ 			"# %s: Test ended in some other way [%u]\n",
+ 			t->name,
+@@ -1218,6 +1221,7 @@ void __run_test(struct __fixture_metadata *f,
+ 	struct __test_xfail *xfail;
+ 	char test_name[1024];
+ 	const char *diagnostic;
++	int child;
+ 
+ 	/* reset test struct */
+ 	t->exit_code = KSFT_PASS;
+@@ -1236,15 +1240,16 @@ void __run_test(struct __fixture_metadata *f,
+ 	fflush(stdout);
+ 	fflush(stderr);
+ 
+-	t->pid = clone3_vfork();
+-	if (t->pid < 0) {
++	child = fork();
++	if (child < 0) {
+ 		ksft_print_msg("ERROR SPAWNING TEST CHILD\n");
+ 		t->exit_code = KSFT_FAIL;
+-	} else if (t->pid == 0) {
++	} else if (child == 0) {
+ 		setpgrp();
+ 		t->fn(t, variant);
+ 		_exit(t->exit_code);
+ 	} else {
++		t->pid = child;
+ 		__wait_for_test(t);
+ 	}
+ 	ksft_print_msg("         %4s  %s\n",
+diff --git a/tools/testing/selftests/net/gro.c b/tools/testing/selftests/net/gro.c
+index 353e1e867fbb2..6038b96ecee88 100644
+--- a/tools/testing/selftests/net/gro.c
++++ b/tools/testing/selftests/net/gro.c
+@@ -119,6 +119,9 @@ static void setup_sock_filter(int fd)
+ 		next_off = offsetof(struct ipv6hdr, nexthdr);
+ 	ipproto_off = ETH_HLEN + next_off;
+ 
++	/* Overridden later if exthdrs are used: */
++	opt_ipproto_off = ipproto_off;
++
+ 	if (strcmp(testname, "ip") == 0) {
+ 		if (proto == PF_INET)
+ 			optlen = sizeof(struct ip_timestamp);
+diff --git a/tools/testing/selftests/net/ip_local_port_range.c b/tools/testing/selftests/net/ip_local_port_range.c
+index 193b82745fd87..29451d2244b75 100644
+--- a/tools/testing/selftests/net/ip_local_port_range.c
++++ b/tools/testing/selftests/net/ip_local_port_range.c
+@@ -359,7 +359,7 @@ TEST_F(ip_local_port_range, late_bind)
+ 		struct sockaddr_in v4;
+ 		struct sockaddr_in6 v6;
+ 	} addr;
+-	socklen_t addr_len;
++	socklen_t addr_len = 0;
+ 	const int one = 1;
+ 	int fd, err;
+ 	__u32 range;
+diff --git a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
+index 7426a2cbd4a03..7ad5a59adff2b 100644
+--- a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
++++ b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
+@@ -1276,7 +1276,7 @@ int add_listener(int argc, char *argv[])
+ 	struct sockaddr_storage addr;
+ 	struct sockaddr_in6 *a6;
+ 	struct sockaddr_in *a4;
+-	u_int16_t family;
++	u_int16_t family = AF_UNSPEC;
+ 	int enable = 1;
+ 	int sock;
+ 	int err;
+diff --git a/tools/testing/selftests/net/msg_zerocopy.c b/tools/testing/selftests/net/msg_zerocopy.c
+index bdc03a2097e85..7ea5fb28c93db 100644
+--- a/tools/testing/selftests/net/msg_zerocopy.c
++++ b/tools/testing/selftests/net/msg_zerocopy.c
+@@ -85,6 +85,7 @@ static bool cfg_rx;
+ static int  cfg_runtime_ms	= 4200;
+ static int  cfg_verbose;
+ static int  cfg_waittime_ms	= 500;
++static int  cfg_notification_limit = 32;
+ static bool cfg_zerocopy;
+ 
+ static socklen_t cfg_alen;
+@@ -95,6 +96,7 @@ static char payload[IP_MAXPACKET];
+ static long packets, bytes, completions, expected_completions;
+ static int  zerocopied = -1;
+ static uint32_t next_completion;
++static uint32_t sends_since_notify;
+ 
+ static unsigned long gettimeofday_ms(void)
+ {
+@@ -208,6 +210,7 @@ static bool do_sendmsg(int fd, struct msghdr *msg, bool do_zerocopy, int domain)
+ 		error(1, errno, "send");
+ 	if (cfg_verbose && ret != len)
+ 		fprintf(stderr, "send: ret=%u != %u\n", ret, len);
++	sends_since_notify++;
+ 
+ 	if (len) {
+ 		packets++;
+@@ -435,7 +438,7 @@ static bool do_recv_completion(int fd, int domain)
+ 	/* Detect notification gaps. These should not happen often, if at all.
+ 	 * Gaps can occur due to drops, reordering and retransmissions.
+ 	 */
+-	if (lo != next_completion)
++	if (cfg_verbose && lo != next_completion)
+ 		fprintf(stderr, "gap: %u..%u does not append to %u\n",
+ 			lo, hi, next_completion);
+ 	next_completion = hi + 1;
+@@ -460,6 +463,7 @@ static bool do_recv_completion(int fd, int domain)
+ static void do_recv_completions(int fd, int domain)
+ {
+ 	while (do_recv_completion(fd, domain)) {}
++	sends_since_notify = 0;
+ }
+ 
+ /* Wait for all remaining completions on the errqueue */
+@@ -549,6 +553,9 @@ static void do_tx(int domain, int type, int protocol)
+ 		else
+ 			do_sendmsg(fd, &msg, cfg_zerocopy, domain);
+ 
++		if (cfg_zerocopy && sends_since_notify >= cfg_notification_limit)
++			do_recv_completions(fd, domain);
++
+ 		while (!do_poll(fd, POLLOUT)) {
+ 			if (cfg_zerocopy)
+ 				do_recv_completions(fd, domain);
+@@ -708,7 +715,7 @@ static void parse_opts(int argc, char **argv)
+ 
+ 	cfg_payload_len = max_payload_len;
+ 
+-	while ((c = getopt(argc, argv, "46c:C:D:i:mp:rs:S:t:vz")) != -1) {
++	while ((c = getopt(argc, argv, "46c:C:D:i:l:mp:rs:S:t:vz")) != -1) {
+ 		switch (c) {
+ 		case '4':
+ 			if (cfg_family != PF_UNSPEC)
+@@ -736,6 +743,9 @@ static void parse_opts(int argc, char **argv)
+ 			if (cfg_ifindex == 0)
+ 				error(1, errno, "invalid iface: %s", optarg);
+ 			break;
++		case 'l':
++			cfg_notification_limit = strtoul(optarg, NULL, 0);
++			break;
+ 		case 'm':
+ 			cfg_cork_mixed = true;
+ 			break;
+diff --git a/tools/testing/selftests/resctrl/cat_test.c b/tools/testing/selftests/resctrl/cat_test.c
+index 4cb991be8e31b..178a41d4bb1be 100644
+--- a/tools/testing/selftests/resctrl/cat_test.c
++++ b/tools/testing/selftests/resctrl/cat_test.c
+@@ -294,11 +294,30 @@ static int cat_run_test(const struct resctrl_test *test, const struct user_param
+ 	return ret;
+ }
+ 
++static bool arch_supports_noncont_cat(const struct resctrl_test *test)
++{
++	unsigned int eax, ebx, ecx, edx;
++
++	/* AMD always supports non-contiguous CBM. */
++	if (get_vendor() == ARCH_AMD)
++		return true;
++
++	/* Intel support for non-contiguous CBM needs to be discovered. */
++	if (!strcmp(test->resource, "L3"))
++		__cpuid_count(0x10, 1, eax, ebx, ecx, edx);
++	else if (!strcmp(test->resource, "L2"))
++		__cpuid_count(0x10, 2, eax, ebx, ecx, edx);
++	else
++		return false;
++
++	return ((ecx >> 3) & 1);
++}
++
+ static int noncont_cat_run_test(const struct resctrl_test *test,
+ 				const struct user_params *uparams)
+ {
+ 	unsigned long full_cache_mask, cont_mask, noncont_mask;
+-	unsigned int eax, ebx, ecx, edx, sparse_masks;
++	unsigned int sparse_masks;
+ 	int bit_center, ret;
+ 	char schemata[64];
+ 
+@@ -307,15 +326,8 @@ static int noncont_cat_run_test(const struct resctrl_test *test,
+ 	if (ret)
+ 		return ret;
+ 
+-	if (!strcmp(test->resource, "L3"))
+-		__cpuid_count(0x10, 1, eax, ebx, ecx, edx);
+-	else if (!strcmp(test->resource, "L2"))
+-		__cpuid_count(0x10, 2, eax, ebx, ecx, edx);
+-	else
+-		return -EINVAL;
+-
+-	if (sparse_masks != ((ecx >> 3) & 1)) {
+-		ksft_print_msg("CPUID output doesn't match 'sparse_masks' file content!\n");
++	if (arch_supports_noncont_cat(test) != sparse_masks) {
++		ksft_print_msg("Hardware and kernel differ on non-contiguous CBM support!\n");
+ 		return 1;
+ 	}
+ 


             reply	other threads:[~2024-07-11 11:48 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-07-11 11:47 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2024-07-27 22:47 [gentoo-commits] proj/linux-patches:6.9 commit in: / Mike Pagano
2024-07-27 13:46 Mike Pagano
2024-07-25 12:08 Mike Pagano
2024-07-18 12:14 Mike Pagano
2024-07-05 10:48 Mike Pagano
2024-07-05 10:47 Mike Pagano
2024-07-03 23:05 Mike Pagano
2024-06-27 12:31 Mike Pagano
2024-06-21 14:06 Mike Pagano
2024-06-16 14:31 Mike Pagano
2024-06-12 10:18 Mike Pagano
2024-05-30 12:02 Mike Pagano
2024-05-30 11:57 Mike Pagano
2024-05-25 15:19 Mike Pagano
2024-05-23 12:41 Mike Pagano
2024-05-17 11:32 Mike Pagano
2024-05-05 18:02 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1720698465.3507dfb3bd809fee9977bd10ae9a03a35dff682c.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox