public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:5.17 commit in: /
Date: Wed, 27 Apr 2022 13:10:27 +0000 (UTC)	[thread overview]
Message-ID: <1651065012.b08b8be59e2765bb2d36321ab9f026eeb6c43e87.mpagano@gentoo> (raw)

commit:     b08b8be59e2765bb2d36321ab9f026eeb6c43e87
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Apr 27 13:10:12 2022 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Apr 27 13:10:12 2022 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b08b8be5

Linux patch 5.17.5

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    6 +-
 1004_linux-5.17.5.patch | 5187 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5192 insertions(+), 1 deletion(-)

diff --git a/0000_README b/0000_README
index 1f4d8246..d851c5b7 100644
--- a/0000_README
+++ b/0000_README
@@ -55,10 +55,14 @@ Patch:  1002_linux-5.17.3.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.17.3
 
-Patch:  1003_linux-5.17.3.patch
+Patch:  1003_linux-5.17.4.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.17.4
 
+Patch:  1004_linux-5.17.5.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.17.5
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1004_linux-5.17.5.patch b/1004_linux-5.17.5.patch
new file mode 100644
index 00000000..024660ad
--- /dev/null
+++ b/1004_linux-5.17.5.patch
@@ -0,0 +1,5187 @@
+diff --git a/Documentation/filesystems/ext4/attributes.rst b/Documentation/filesystems/ext4/attributes.rst
+index 54386a010a8d7..871d2da7a0a91 100644
+--- a/Documentation/filesystems/ext4/attributes.rst
++++ b/Documentation/filesystems/ext4/attributes.rst
+@@ -76,7 +76,7 @@ The beginning of an extended attribute block is in
+      - Checksum of the extended attribute block.
+    * - 0x14
+      - \_\_u32
+-     - h\_reserved[2]
++     - h\_reserved[3]
+      - Zero.
+ 
+ The checksum is calculated against the FS UUID, the 64-bit block number
+diff --git a/Makefile b/Makefile
+index d7747e4c216e4..3ad5dc6be3930 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 17
+-SUBLEVEL = 4
++SUBLEVEL = 5
+ EXTRAVERSION =
+ NAME = Superb Owl
+ 
+diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
+index dd77a0c8f740b..66ba549b520fc 100644
+--- a/arch/arc/kernel/entry.S
++++ b/arch/arc/kernel/entry.S
+@@ -196,6 +196,7 @@ tracesys_exit:
+ 	st  r0, [sp, PT_r0]     ; sys call return value in pt_regs
+ 
+ 	;POST Sys Call Ptrace Hook
++	mov r0, sp		; pt_regs needed
+ 	bl  @syscall_trace_exit
+ 	b   ret_from_exception ; NOT ret_from_system_call at is saves r0 which
+ 	; we'd done before calling post hook above
+diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c
+index 1da11bdb1dfbd..1c6500c4e6a17 100644
+--- a/arch/arm/mach-vexpress/spc.c
++++ b/arch/arm/mach-vexpress/spc.c
+@@ -580,7 +580,7 @@ static int __init ve_spc_clk_init(void)
+ 		}
+ 
+ 		cluster = topology_physical_package_id(cpu_dev->id);
+-		if (init_opp_table[cluster])
++		if (cluster < 0 || init_opp_table[cluster])
+ 			continue;
+ 
+ 		if (ve_init_opp_table(cpu_dev))
+diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
+index ec5b082f3de6e..07eb69f9e7df3 100644
+--- a/arch/arm/xen/enlighten.c
++++ b/arch/arm/xen/enlighten.c
+@@ -337,12 +337,15 @@ int __init arch_xen_unpopulated_init(struct resource **res)
+ 
+ 	if (!nr_reg) {
+ 		pr_err("No extended regions are found\n");
++		of_node_put(np);
+ 		return -EINVAL;
+ 	}
+ 
+ 	regs = kcalloc(nr_reg, sizeof(*regs), GFP_KERNEL);
+-	if (!regs)
++	if (!regs) {
++		of_node_put(np);
+ 		return -ENOMEM;
++	}
+ 
+ 	/*
+ 	 * Create resource from extended regions provided by the hypervisor to be
+@@ -403,8 +406,8 @@ int __init arch_xen_unpopulated_init(struct resource **res)
+ 	*res = &xen_resource;
+ 
+ err:
++	of_node_put(np);
+ 	kfree(regs);
+-
+ 	return rc;
+ }
+ #endif
+@@ -424,8 +427,10 @@ static void __init xen_dt_guest_init(void)
+ 
+ 	if (of_address_to_resource(xen_node, GRANT_TABLE_INDEX, &res)) {
+ 		pr_err("Xen grant table region is not found\n");
++		of_node_put(xen_node);
+ 		return;
+ 	}
++	of_node_put(xen_node);
+ 	xen_grant_frames = res.start;
+ }
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
+index 1dc9d187601c5..a0bd540f27d3d 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
+@@ -89,12 +89,12 @@
+ 		pendown-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
+ 
+ 		ti,x-min = /bits/ 16 <125>;
+-		touchscreen-size-x = /bits/ 16 <4008>;
++		touchscreen-size-x = <4008>;
+ 		ti,y-min = /bits/ 16 <282>;
+-		touchscreen-size-y = /bits/ 16 <3864>;
++		touchscreen-size-y = <3864>;
+ 		ti,x-plate-ohms = /bits/ 16 <180>;
+-		touchscreen-max-pressure = /bits/ 16 <255>;
+-		touchscreen-average-samples = /bits/ 16 <10>;
++		touchscreen-max-pressure = <255>;
++		touchscreen-average-samples = <10>;
+ 		ti,debounce-tol = /bits/ 16 <3>;
+ 		ti,debounce-rep = /bits/ 16 <1>;
+ 		ti,settle-delay-usec = /bits/ 16 <150>;
+diff --git a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
+index b16c7caf34c11..87b5e23c766f7 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
+@@ -70,12 +70,12 @@
+ 		pendown-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
+ 
+ 		ti,x-min = /bits/ 16 <125>;
+-		touchscreen-size-x = /bits/ 16 <4008>;
++		touchscreen-size-x = <4008>;
+ 		ti,y-min = /bits/ 16 <282>;
+-		touchscreen-size-y = /bits/ 16 <3864>;
++		touchscreen-size-y = <3864>;
+ 		ti,x-plate-ohms = /bits/ 16 <180>;
+-		touchscreen-max-pressure = /bits/ 16 <255>;
+-		touchscreen-average-samples = /bits/ 16 <10>;
++		touchscreen-max-pressure = <255>;
++		touchscreen-average-samples = <10>;
+ 		ti,debounce-tol = /bits/ 16 <3>;
+ 		ti,debounce-rep = /bits/ 16 <1>;
+ 		ti,settle-delay-usec = /bits/ 16 <150>;
+diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+index 2151cd8c8c7ab..e1c46b80f14a0 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+@@ -1459,6 +1459,8 @@
+ 					     "imem",
+ 					     "config";
+ 
++			qcom,qmp = <&aoss_qmp>;
++
+ 			qcom,smem-states = <&ipa_smp2p_out 0>,
+ 					   <&ipa_smp2p_out 1>;
+ 			qcom,smem-state-names = "ipa-clock-enabled-valid",
+diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+index eab7a85050531..d66865131ef90 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+@@ -1714,6 +1714,8 @@
+ 			interconnect-names = "memory",
+ 					     "config";
+ 
++			qcom,qmp = <&aoss_qmp>;
++
+ 			qcom,smem-states = <&ipa_smp2p_out 0>,
+ 					   <&ipa_smp2p_out 1>;
+ 			qcom,smem-state-names = "ipa-clock-enabled-valid",
+diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+index 765d018e6306c..0bde6bbb3bc74 100644
+--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+@@ -1443,6 +1443,8 @@
+ 			interconnect-names = "memory",
+ 					     "config";
+ 
++			qcom,qmp = <&aoss_qmp>;
++
+ 			qcom,smem-states = <&ipa_smp2p_out 0>,
+ 					   <&ipa_smp2p_out 1>;
+ 			qcom,smem-state-names = "ipa-clock-enabled-valid",
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index 94e147e5456ca..dff2b483ea509 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -535,7 +535,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ 				 PMD_TYPE_TABLE)
+ #define pmd_sect(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
+ 				 PMD_TYPE_SECT)
+-#define pmd_leaf(pmd)		pmd_sect(pmd)
++#define pmd_leaf(pmd)		(pmd_present(pmd) && !pmd_table(pmd))
+ #define pmd_bad(pmd)		(!pmd_table(pmd))
+ 
+ #define pmd_leaf_size(pmd)	(pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
+@@ -625,7 +625,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+ #define pud_none(pud)		(!pud_val(pud))
+ #define pud_bad(pud)		(!pud_table(pud))
+ #define pud_present(pud)	pte_present(pud_pte(pud))
+-#define pud_leaf(pud)		pud_sect(pud)
++#define pud_leaf(pud)		(pud_present(pud) && !pud_table(pud))
+ #define pud_valid(pud)		pte_valid(pud_pte(pud))
+ 
+ static inline void set_pud(pud_t *pudp, pud_t pud)
+diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
+index 384f58a3f373f..5f8933aec75ce 100644
+--- a/arch/powerpc/kernel/time.c
++++ b/arch/powerpc/kernel/time.c
+@@ -610,23 +610,22 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
+ 		return;
+ 	}
+ 
+-	/* Conditionally hard-enable interrupts. */
+-	if (should_hard_irq_enable()) {
+-		/*
+-		 * Ensure a positive value is written to the decrementer, or
+-		 * else some CPUs will continue to take decrementer exceptions.
+-		 * When the PPC_WATCHDOG (decrementer based) is configured,
+-		 * keep this at most 31 bits, which is about 4 seconds on most
+-		 * systems, which gives the watchdog a chance of catching timer
+-		 * interrupt hard lockups.
+-		 */
+-		if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
+-			set_dec(0x7fffffff);
+-		else
+-			set_dec(decrementer_max);
++	/*
++	 * Ensure a positive value is written to the decrementer, or
++	 * else some CPUs will continue to take decrementer exceptions.
++	 * When the PPC_WATCHDOG (decrementer based) is configured,
++	 * keep this at most 31 bits, which is about 4 seconds on most
++	 * systems, which gives the watchdog a chance of catching timer
++	 * interrupt hard lockups.
++	 */
++	if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
++		set_dec(0x7fffffff);
++	else
++		set_dec(decrementer_max);
+ 
++	/* Conditionally hard-enable interrupts. */
++	if (should_hard_irq_enable())
+ 		do_hard_irq_enable();
+-	}
+ 
+ #if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
+ 	if (atomic_read(&ppc_n_lost_interrupts) != 0)
+diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
+index d42b4b6d4a791..85cfa6328222b 100644
+--- a/arch/powerpc/kvm/book3s_64_vio.c
++++ b/arch/powerpc/kvm/book3s_64_vio.c
+@@ -420,13 +420,19 @@ static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
+ 	tbl[idx % TCES_PER_PAGE] = tce;
+ }
+ 
+-static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl,
+-		unsigned long entry)
++static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt,
++		struct iommu_table *tbl, unsigned long entry)
+ {
+-	unsigned long hpa = 0;
+-	enum dma_data_direction dir = DMA_NONE;
++	unsigned long i;
++	unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
++	unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
++
++	for (i = 0; i < subpages; ++i) {
++		unsigned long hpa = 0;
++		enum dma_data_direction dir = DMA_NONE;
+ 
+-	iommu_tce_xchg_no_kill(mm, tbl, entry, &hpa, &dir);
++		iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir);
++	}
+ }
+ 
+ static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
+@@ -485,6 +491,8 @@ static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
+ 			break;
+ 	}
+ 
++	iommu_tce_kill(tbl, io_entry, subpages);
++
+ 	return ret;
+ }
+ 
+@@ -544,6 +552,8 @@ static long kvmppc_tce_iommu_map(struct kvm *kvm,
+ 			break;
+ 	}
+ 
++	iommu_tce_kill(tbl, io_entry, subpages);
++
+ 	return ret;
+ }
+ 
+@@ -590,10 +600,9 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+ 			ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
+ 					entry, ua, dir);
+ 
+-		iommu_tce_kill(stit->tbl, entry, 1);
+ 
+ 		if (ret != H_SUCCESS) {
+-			kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
++			kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry);
+ 			goto unlock_exit;
+ 		}
+ 	}
+@@ -669,13 +678,13 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
+ 		 */
+ 		if (get_user(tce, tces + i)) {
+ 			ret = H_TOO_HARD;
+-			goto invalidate_exit;
++			goto unlock_exit;
+ 		}
+ 		tce = be64_to_cpu(tce);
+ 
+ 		if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
+ 			ret = H_PARAMETER;
+-			goto invalidate_exit;
++			goto unlock_exit;
+ 		}
+ 
+ 		list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
+@@ -684,19 +693,15 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
+ 					iommu_tce_direction(tce));
+ 
+ 			if (ret != H_SUCCESS) {
+-				kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl,
+-						entry);
+-				goto invalidate_exit;
++				kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl,
++						 entry + i);
++				goto unlock_exit;
+ 			}
+ 		}
+ 
+ 		kvmppc_tce_put(stt, entry + i, tce);
+ 	}
+ 
+-invalidate_exit:
+-	list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
+-		iommu_tce_kill(stit->tbl, entry, npages);
+-
+ unlock_exit:
+ 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ 
+@@ -735,20 +740,16 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
+ 				continue;
+ 
+ 			if (ret == H_TOO_HARD)
+-				goto invalidate_exit;
++				return ret;
+ 
+ 			WARN_ON_ONCE(1);
+-			kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
++			kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i);
+ 		}
+ 	}
+ 
+ 	for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
+ 		kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
+ 
+-invalidate_exit:
+-	list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
+-		iommu_tce_kill(stit->tbl, ioba >> stt->page_shift, npages);
+-
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
+diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
+index 870b7f0c7ea56..fdeda6a9cff44 100644
+--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
++++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
+@@ -247,13 +247,19 @@ static void iommu_tce_kill_rm(struct iommu_table *tbl,
+ 		tbl->it_ops->tce_kill(tbl, entry, pages, true);
+ }
+ 
+-static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
+-		unsigned long entry)
++static void kvmppc_rm_clear_tce(struct kvm *kvm, struct kvmppc_spapr_tce_table *stt,
++		struct iommu_table *tbl, unsigned long entry)
+ {
+-	unsigned long hpa = 0;
+-	enum dma_data_direction dir = DMA_NONE;
++	unsigned long i;
++	unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
++	unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
++
++	for (i = 0; i < subpages; ++i) {
++		unsigned long hpa = 0;
++		enum dma_data_direction dir = DMA_NONE;
+ 
+-	iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
++		iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, io_entry + i, &hpa, &dir);
++	}
+ }
+ 
+ static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
+@@ -316,6 +322,8 @@ static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
+ 			break;
+ 	}
+ 
++	iommu_tce_kill_rm(tbl, io_entry, subpages);
++
+ 	return ret;
+ }
+ 
+@@ -379,6 +387,8 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
+ 			break;
+ 	}
+ 
++	iommu_tce_kill_rm(tbl, io_entry, subpages);
++
+ 	return ret;
+ }
+ 
+@@ -420,10 +430,8 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+ 			ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
+ 					stit->tbl, entry, ua, dir);
+ 
+-		iommu_tce_kill_rm(stit->tbl, entry, 1);
+-
+ 		if (ret != H_SUCCESS) {
+-			kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
++			kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry);
+ 			return ret;
+ 		}
+ 	}
+@@ -561,7 +569,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
+ 		ua = 0;
+ 		if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua)) {
+ 			ret = H_PARAMETER;
+-			goto invalidate_exit;
++			goto unlock_exit;
+ 		}
+ 
+ 		list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
+@@ -570,19 +578,15 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
+ 					iommu_tce_direction(tce));
+ 
+ 			if (ret != H_SUCCESS) {
+-				kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
+-						entry);
+-				goto invalidate_exit;
++				kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl,
++						entry + i);
++				goto unlock_exit;
+ 			}
+ 		}
+ 
+ 		kvmppc_rm_tce_put(stt, entry + i, tce);
+ 	}
+ 
+-invalidate_exit:
+-	list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
+-		iommu_tce_kill_rm(stit->tbl, entry, npages);
+-
+ unlock_exit:
+ 	if (!prereg)
+ 		arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
+@@ -620,20 +624,16 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
+ 				continue;
+ 
+ 			if (ret == H_TOO_HARD)
+-				goto invalidate_exit;
++				return ret;
+ 
+ 			WARN_ON_ONCE_RM(1);
+-			kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
++			kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry + i);
+ 		}
+ 	}
+ 
+ 	for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
+ 		kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);
+ 
+-invalidate_exit:
+-	list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
+-		iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages);
+-
+ 	return ret;
+ }
+ 
+diff --git a/arch/powerpc/perf/power10-pmu.c b/arch/powerpc/perf/power10-pmu.c
+index 0975ad0b42c42..69b4565d1a8f0 100644
+--- a/arch/powerpc/perf/power10-pmu.c
++++ b/arch/powerpc/perf/power10-pmu.c
+@@ -91,8 +91,8 @@ extern u64 PERF_REG_EXTENDED_MASK;
+ 
+ /* Table of alternatives, sorted by column 0 */
+ static const unsigned int power10_event_alternatives[][MAX_ALT] = {
+-	{ PM_CYC_ALT,			PM_CYC },
+ 	{ PM_INST_CMPL_ALT,		PM_INST_CMPL },
++	{ PM_CYC_ALT,			PM_CYC },
+ };
+ 
+ static int power10_get_alternatives(u64 event, unsigned int flags, u64 alt[])
+diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
+index 4b7c17e361003..37b2860db4833 100644
+--- a/arch/powerpc/perf/power9-pmu.c
++++ b/arch/powerpc/perf/power9-pmu.c
+@@ -133,11 +133,11 @@ int p9_dd22_bl_ev[] = {
+ 
+ /* Table of alternatives, sorted by column 0 */
+ static const unsigned int power9_event_alternatives[][MAX_ALT] = {
+-	{ PM_INST_DISP,			PM_INST_DISP_ALT },
+-	{ PM_RUN_CYC_ALT,		PM_RUN_CYC },
+-	{ PM_RUN_INST_CMPL_ALT,		PM_RUN_INST_CMPL },
+-	{ PM_LD_MISS_L1,		PM_LD_MISS_L1_ALT },
+ 	{ PM_BR_2PATH,			PM_BR_2PATH_ALT },
++	{ PM_INST_DISP,			PM_INST_DISP_ALT },
++	{ PM_RUN_CYC_ALT,               PM_RUN_CYC },
++	{ PM_LD_MISS_L1,                PM_LD_MISS_L1_ALT },
++	{ PM_RUN_INST_CMPL_ALT,         PM_RUN_INST_CMPL },
+ };
+ 
+ static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[])
+diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
+index 6785aef4cbd46..aad430668bb4d 100644
+--- a/arch/riscv/kvm/vcpu.c
++++ b/arch/riscv/kvm/vcpu.c
+@@ -38,14 +38,16 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
+ 		       sizeof(kvm_vcpu_stats_desc),
+ };
+ 
+-#define KVM_RISCV_ISA_ALLOWED	(riscv_isa_extension_mask(a) | \
+-				 riscv_isa_extension_mask(c) | \
+-				 riscv_isa_extension_mask(d) | \
+-				 riscv_isa_extension_mask(f) | \
+-				 riscv_isa_extension_mask(i) | \
+-				 riscv_isa_extension_mask(m) | \
+-				 riscv_isa_extension_mask(s) | \
+-				 riscv_isa_extension_mask(u))
++#define KVM_RISCV_ISA_DISABLE_ALLOWED	(riscv_isa_extension_mask(d) | \
++					riscv_isa_extension_mask(f))
++
++#define KVM_RISCV_ISA_DISABLE_NOT_ALLOWED	(riscv_isa_extension_mask(a) | \
++						riscv_isa_extension_mask(c) | \
++						riscv_isa_extension_mask(i) | \
++						riscv_isa_extension_mask(m))
++
++#define KVM_RISCV_ISA_ALLOWED (KVM_RISCV_ISA_DISABLE_ALLOWED | \
++			       KVM_RISCV_ISA_DISABLE_NOT_ALLOWED)
+ 
+ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
+ {
+@@ -219,7 +221,8 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
+ 	switch (reg_num) {
+ 	case KVM_REG_RISCV_CONFIG_REG(isa):
+ 		if (!vcpu->arch.ran_atleast_once) {
+-			vcpu->arch.isa = reg_val;
++			/* Ignore the disable request for these extensions */
++			vcpu->arch.isa = reg_val | KVM_RISCV_ISA_DISABLE_NOT_ALLOWED;
+ 			vcpu->arch.isa &= riscv_isa_extension_base(NULL);
+ 			vcpu->arch.isa &= KVM_RISCV_ISA_ALLOWED;
+ 			kvm_riscv_vcpu_fp_reset(vcpu);
+diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
+index 7516e4199b3c6..20fd0acd7d800 100644
+--- a/arch/x86/include/asm/compat.h
++++ b/arch/x86/include/asm/compat.h
+@@ -28,15 +28,13 @@ typedef u16		compat_ipc_pid_t;
+ typedef __kernel_fsid_t	compat_fsid_t;
+ 
+ struct compat_stat {
+-	compat_dev_t	st_dev;
+-	u16		__pad1;
++	u32		st_dev;
+ 	compat_ino_t	st_ino;
+ 	compat_mode_t	st_mode;
+ 	compat_nlink_t	st_nlink;
+ 	__compat_uid_t	st_uid;
+ 	__compat_gid_t	st_gid;
+-	compat_dev_t	st_rdev;
+-	u16		__pad2;
++	u32		st_rdev;
+ 	u32		st_size;
+ 	u32		st_blksize;
+ 	u32		st_blocks;
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 85ee96abba806..c4b4c0839dbdb 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -969,12 +969,10 @@ enum hv_tsc_page_status {
+ 	HV_TSC_PAGE_UNSET = 0,
+ 	/* TSC page MSR was written by the guest, update pending */
+ 	HV_TSC_PAGE_GUEST_CHANGED,
+-	/* TSC page MSR was written by KVM userspace, update pending */
++	/* TSC page update was triggered from the host side */
+ 	HV_TSC_PAGE_HOST_CHANGED,
+ 	/* TSC page was properly set up and is currently active  */
+ 	HV_TSC_PAGE_SET,
+-	/* TSC page is currently being updated and therefore is inactive */
+-	HV_TSC_PAGE_UPDATING,
+ 	/* TSC page was set up with an inaccessible GPA */
+ 	HV_TSC_PAGE_BROKEN,
+ };
+diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
+index 10bc257d3803b..247ac71b7a10f 100644
+--- a/arch/x86/kvm/hyperv.c
++++ b/arch/x86/kvm/hyperv.c
+@@ -1128,11 +1128,13 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
+ 	BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
+ 	BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
+ 
++	mutex_lock(&hv->hv_lock);
++
+ 	if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
++	    hv->hv_tsc_page_status == HV_TSC_PAGE_SET ||
+ 	    hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
+-		return;
++		goto out_unlock;
+ 
+-	mutex_lock(&hv->hv_lock);
+ 	if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
+ 		goto out_unlock;
+ 
+@@ -1194,45 +1196,19 @@ out_unlock:
+ 	mutex_unlock(&hv->hv_lock);
+ }
+ 
+-void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
++void kvm_hv_request_tsc_page_update(struct kvm *kvm)
+ {
+ 	struct kvm_hv *hv = to_kvm_hv(kvm);
+-	u64 gfn;
+-	int idx;
+-
+-	if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
+-	    hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET ||
+-	    tsc_page_update_unsafe(hv))
+-		return;
+ 
+ 	mutex_lock(&hv->hv_lock);
+ 
+-	if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
+-		goto out_unlock;
+-
+-	/* Preserve HV_TSC_PAGE_GUEST_CHANGED/HV_TSC_PAGE_HOST_CHANGED states */
+-	if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET)
+-		hv->hv_tsc_page_status = HV_TSC_PAGE_UPDATING;
++	if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET &&
++	    !tsc_page_update_unsafe(hv))
++		hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
+ 
+-	gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
+-
+-	hv->tsc_ref.tsc_sequence = 0;
+-
+-	/*
+-	 * Take the srcu lock as memslots will be accessed to check the gfn
+-	 * cache generation against the memslots generation.
+-	 */
+-	idx = srcu_read_lock(&kvm->srcu);
+-	if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
+-			    &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
+-		hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
+-	srcu_read_unlock(&kvm->srcu, idx);
+-
+-out_unlock:
+ 	mutex_unlock(&hv->hv_lock);
+ }
+ 
+-
+ static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
+ {
+ 	if (!hv_vcpu->enforce_cpuid)
+diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
+index ed1c4e546d049..3e79b4a9ed4ef 100644
+--- a/arch/x86/kvm/hyperv.h
++++ b/arch/x86/kvm/hyperv.h
+@@ -133,7 +133,7 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu);
+ 
+ void kvm_hv_setup_tsc_page(struct kvm *kvm,
+ 			   struct pvclock_vcpu_time_info *hv_clock);
+-void kvm_hv_invalidate_tsc_page(struct kvm *kvm);
++void kvm_hv_request_tsc_page_update(struct kvm *kvm);
+ 
+ void kvm_hv_init_vm(struct kvm *kvm);
+ void kvm_hv_destroy_vm(struct kvm *kvm);
+diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
+index 7a7b8d5b775e9..5e7e8d163b985 100644
+--- a/arch/x86/kvm/pmu.h
++++ b/arch/x86/kvm/pmu.h
+@@ -140,6 +140,15 @@ static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
+ 	return sample_period;
+ }
+ 
++static inline void pmc_update_sample_period(struct kvm_pmc *pmc)
++{
++	if (!pmc->perf_event || pmc->is_paused)
++		return;
++
++	perf_event_period(pmc->perf_event,
++			  get_sample_period(pmc, pmc->counter));
++}
++
+ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
+ void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
+ void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
+diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
+index ba40b7fced5ae..b5b0837df0d11 100644
+--- a/arch/x86/kvm/svm/pmu.c
++++ b/arch/x86/kvm/svm/pmu.c
+@@ -257,6 +257,7 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
+ 	if (pmc) {
+ 		pmc->counter += data - pmc_read_counter(pmc);
++		pmc_update_sample_period(pmc);
+ 		return 0;
+ 	}
+ 	/* MSR_EVNTSELn */
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
+index fef9758525826..e5cecd4ad2d44 100644
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -2204,51 +2204,39 @@ int sev_cpu_init(struct svm_cpu_data *sd)
+  * Pages used by hardware to hold guest encrypted state must be flushed before
+  * returning them to the system.
+  */
+-static void sev_flush_guest_memory(struct vcpu_svm *svm, void *va,
+-				   unsigned long len)
++static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
+ {
++	int asid = to_kvm_svm(vcpu->kvm)->sev_info.asid;
++
+ 	/*
+-	 * If hardware enforced cache coherency for encrypted mappings of the
+-	 * same physical page is supported, nothing to do.
++	 * Note!  The address must be a kernel address, as regular page walk
++	 * checks are performed by VM_PAGE_FLUSH, i.e. operating on a user
++	 * address is non-deterministic and unsafe.  This function deliberately
++	 * takes a pointer to deter passing in a user address.
+ 	 */
+-	if (boot_cpu_has(X86_FEATURE_SME_COHERENT))
+-		return;
++	unsigned long addr = (unsigned long)va;
+ 
+ 	/*
+-	 * If the VM Page Flush MSR is supported, use it to flush the page
+-	 * (using the page virtual address and the guest ASID).
++	 * If CPU enforced cache coherency for encrypted mappings of the
++	 * same physical page is supported, use CLFLUSHOPT instead. NOTE: cache
++	 * flush is still needed in order to work properly with DMA devices.
+ 	 */
+-	if (boot_cpu_has(X86_FEATURE_VM_PAGE_FLUSH)) {
+-		struct kvm_sev_info *sev;
+-		unsigned long va_start;
+-		u64 start, stop;
+-
+-		/* Align start and stop to page boundaries. */
+-		va_start = (unsigned long)va;
+-		start = (u64)va_start & PAGE_MASK;
+-		stop = PAGE_ALIGN((u64)va_start + len);
+-
+-		if (start < stop) {
+-			sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
+-
+-			while (start < stop) {
+-				wrmsrl(MSR_AMD64_VM_PAGE_FLUSH,
+-				       start | sev->asid);
+-
+-				start += PAGE_SIZE;
+-			}
+-
+-			return;
+-		}
+-
+-		WARN(1, "Address overflow, using WBINVD\n");
++	if (boot_cpu_has(X86_FEATURE_SME_COHERENT)) {
++		clflush_cache_range(va, PAGE_SIZE);
++		return;
+ 	}
+ 
+ 	/*
+-	 * Hardware should always have one of the above features,
+-	 * but if not, use WBINVD and issue a warning.
++	 * VM Page Flush takes a host virtual address and a guest ASID.  Fall
++	 * back to WBINVD if this faults so as not to make any problems worse
++	 * by leaving stale encrypted data in the cache.
+ 	 */
+-	WARN_ONCE(1, "Using WBINVD to flush guest memory\n");
++	if (WARN_ON_ONCE(wrmsrl_safe(MSR_AMD64_VM_PAGE_FLUSH, addr | asid)))
++		goto do_wbinvd;
++
++	return;
++
++do_wbinvd:
+ 	wbinvd_on_all_cpus();
+ }
+ 
+@@ -2262,7 +2250,8 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)
+ 	svm = to_svm(vcpu);
+ 
+ 	if (vcpu->arch.guest_state_protected)
+-		sev_flush_guest_memory(svm, svm->sev_es.vmsa, PAGE_SIZE);
++		sev_flush_encrypted_page(vcpu, svm->sev_es.vmsa);
++
+ 	__free_page(virt_to_page(svm->sev_es.vmsa));
+ 
+ 	if (svm->sev_es.ghcb_sa_free)
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index dc822a1d403d3..896ddf7392365 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -4618,6 +4618,11 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
+ 		kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
+ 	}
+ 
++	if (vmx->nested.update_vmcs01_apicv_status) {
++		vmx->nested.update_vmcs01_apicv_status = false;
++		kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
++	}
++
+ 	if ((vm_exit_reason != -1) &&
+ 	    (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)))
+ 		vmx->nested.need_vmcs12_to_shadow_sync = true;
+diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
+index 5fa3870b89881..a0c84761c9382 100644
+--- a/arch/x86/kvm/vmx/pmu_intel.c
++++ b/arch/x86/kvm/vmx/pmu_intel.c
+@@ -431,15 +431,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 			    !(msr & MSR_PMC_FULL_WIDTH_BIT))
+ 				data = (s64)(s32)data;
+ 			pmc->counter += data - pmc_read_counter(pmc);
+-			if (pmc->perf_event && !pmc->is_paused)
+-				perf_event_period(pmc->perf_event,
+-						  get_sample_period(pmc, data));
++			pmc_update_sample_period(pmc);
+ 			return 0;
+ 		} else if ((pmc = get_fixed_pmc(pmu, msr))) {
+ 			pmc->counter += data - pmc_read_counter(pmc);
+-			if (pmc->perf_event && !pmc->is_paused)
+-				perf_event_period(pmc->perf_event,
+-						  get_sample_period(pmc, data));
++			pmc_update_sample_period(pmc);
+ 			return 0;
+ 		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
+ 			if (data == pmc->eventsel)
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index b730d799c26ed..ef63cfd57029a 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -4182,6 +4182,11 @@ static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
+ {
+ 	struct vcpu_vmx *vmx = to_vmx(vcpu);
+ 
++	if (is_guest_mode(vcpu)) {
++		vmx->nested.update_vmcs01_apicv_status = true;
++		return;
++	}
++
+ 	pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
+ 	if (cpu_has_secondary_exec_ctrls()) {
+ 		if (kvm_vcpu_apicv_active(vcpu))
+diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
+index 9c6bfcd84008b..b98c7e96697a9 100644
+--- a/arch/x86/kvm/vmx/vmx.h
++++ b/arch/x86/kvm/vmx/vmx.h
+@@ -183,6 +183,7 @@ struct nested_vmx {
+ 	bool change_vmcs01_virtual_apic_mode;
+ 	bool reload_vmcs01_apic_access_page;
+ 	bool update_vmcs01_cpu_dirty_logging;
++	bool update_vmcs01_apicv_status;
+ 
+ 	/*
+ 	 * Enlightened VMCS has been enabled. It does not mean that L1 has to
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 05128162ebd58..23d176cd12a4f 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -2874,7 +2874,7 @@ static void kvm_end_pvclock_update(struct kvm *kvm)
+ 
+ static void kvm_update_masterclock(struct kvm *kvm)
+ {
+-	kvm_hv_invalidate_tsc_page(kvm);
++	kvm_hv_request_tsc_page_update(kvm);
+ 	kvm_start_pvclock_update(kvm);
+ 	pvclock_update_vm_gtod_copy(kvm);
+ 	kvm_end_pvclock_update(kvm);
+@@ -3086,8 +3086,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
+ 				       offsetof(struct compat_vcpu_info, time));
+ 	if (vcpu->xen.vcpu_time_info_set)
+ 		kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0);
+-	if (!v->vcpu_idx)
+-		kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
++	kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
+ 	return 0;
+ }
+ 
+@@ -6190,7 +6189,7 @@ static int kvm_vm_ioctl_set_clock(struct kvm *kvm, void __user *argp)
+ 	if (data.flags & ~KVM_CLOCK_VALID_FLAGS)
+ 		return -EINVAL;
+ 
+-	kvm_hv_invalidate_tsc_page(kvm);
++	kvm_hv_request_tsc_page_update(kvm);
+ 	kvm_start_pvclock_update(kvm);
+ 	pvclock_update_vm_gtod_copy(kvm);
+ 
+@@ -10297,12 +10296,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
+ 
+ static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
+ {
+-	int r;
+-
+-	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+-	r = kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
+-	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+-	return r;
++	return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
+ }
+ 
+ static int complete_emulated_pio(struct kvm_vcpu *vcpu)
+@@ -11119,8 +11113,21 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
+ 		r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
+ 		if (r < 0)
+ 			goto fail_mmu_destroy;
+-		if (kvm_apicv_activated(vcpu->kvm))
++
++		/*
++		 * Defer evaluating inhibits until the vCPU is first run, as
++		 * this vCPU will not get notified of any changes until this
++		 * vCPU is visible to other vCPUs (marked online and added to
++		 * the set of vCPUs).  Opportunistically mark APICv active as
++		 * VMX in particularly is highly unlikely to have inhibits.
++		 * Ignore the current per-VM APICv state so that vCPU creation
++		 * is guaranteed to run with a deterministic value, the request
++		 * will ensure the vCPU gets the correct state before VM-Entry.
++		 */
++		if (enable_apicv) {
+ 			vcpu->arch.apicv_active = true;
++			kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
++		}
+ 	} else
+ 		static_branch_inc(&kvm_has_noapic_vcpu);
+ 
+diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
+index 45cc0ae0af6f9..c7b9f12896f20 100644
+--- a/arch/xtensa/kernel/coprocessor.S
++++ b/arch/xtensa/kernel/coprocessor.S
+@@ -29,7 +29,7 @@
+ 	.if XTENSA_HAVE_COPROCESSOR(x);					\
+ 		.align 4;						\
+ 	.Lsave_cp_regs_cp##x:						\
+-		xchal_cp##x##_store a2 a4 a5 a6 a7;			\
++		xchal_cp##x##_store a2 a3 a4 a5 a6;			\
+ 		jx	a0;						\
+ 	.endif
+ 
+@@ -46,7 +46,7 @@
+ 	.if XTENSA_HAVE_COPROCESSOR(x);					\
+ 		.align 4;						\
+ 	.Lload_cp_regs_cp##x:						\
+-		xchal_cp##x##_load a2 a4 a5 a6 a7;			\
++		xchal_cp##x##_load a2 a3 a4 a5 a6;			\
+ 		jx	a0;						\
+ 	.endif
+ 
+diff --git a/arch/xtensa/kernel/jump_label.c b/arch/xtensa/kernel/jump_label.c
+index 0dde21e0d3de4..ad1841cecdfb7 100644
+--- a/arch/xtensa/kernel/jump_label.c
++++ b/arch/xtensa/kernel/jump_label.c
+@@ -40,7 +40,7 @@ static int patch_text_stop_machine(void *data)
+ {
+ 	struct patch *patch = data;
+ 
+-	if (atomic_inc_return(&patch->cpu_count) == 1) {
++	if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
+ 		local_patch_text(patch->addr, patch->data, patch->sz);
+ 		atomic_inc(&patch->cpu_count);
+ 	} else {
+diff --git a/block/ioctl.c b/block/ioctl.c
+index 4a86340133e46..f8703db99c734 100644
+--- a/block/ioctl.c
++++ b/block/ioctl.c
+@@ -629,7 +629,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+ 		return compat_put_long(argp,
+ 			(bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512);
+ 	case BLKGETSIZE:
+-		if (bdev_nr_sectors(bdev) > ~0UL)
++		if (bdev_nr_sectors(bdev) > ~(compat_ulong_t)0)
+ 			return -EFBIG;
+ 		return compat_put_ulong(argp, bdev_nr_sectors(bdev));
+ 
+diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
+index 0c5a51970fbf5..014ccb0f45dc4 100644
+--- a/drivers/ata/pata_marvell.c
++++ b/drivers/ata/pata_marvell.c
+@@ -77,6 +77,8 @@ static int marvell_cable_detect(struct ata_port *ap)
+ 	switch(ap->port_no)
+ 	{
+ 	case 0:
++		if (!ap->ioaddr.bmdma_addr)
++			return ATA_CBL_PATA_UNK;
+ 		if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1)
+ 			return ATA_CBL_PATA40;
+ 		return ATA_CBL_PATA80;
+diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
+index 1476156af74b4..def564d1e8faf 100644
+--- a/drivers/dma/at_xdmac.c
++++ b/drivers/dma/at_xdmac.c
+@@ -1453,7 +1453,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ {
+ 	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
+ 	struct at_xdmac		*atxdmac = to_at_xdmac(atchan->chan.device);
+-	struct at_xdmac_desc	*desc, *_desc;
++	struct at_xdmac_desc	*desc, *_desc, *iter;
+ 	struct list_head	*descs_list;
+ 	enum dma_status		ret;
+ 	int			residue, retry;
+@@ -1568,11 +1568,13 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ 	 * microblock.
+ 	 */
+ 	descs_list = &desc->descs_list;
+-	list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
+-		dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
+-		residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
+-		if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
++	list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
++		dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
++		residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth;
++		if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
++			desc = iter;
+ 			break;
++		}
+ 	}
+ 	residue += cur_ubc << dwidth;
+ 
+diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
+index 329fc2e57b703..b5b8f8181e776 100644
+--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
++++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
+@@ -415,8 +415,11 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
+ 			  (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
+ 		/* Linked list */
+ 		#ifdef CONFIG_64BIT
+-			SET_CH_64(dw, chan->dir, chan->id, llp.reg,
+-				  chunk->ll_region.paddr);
++			/* llp is not aligned on 64bit -> keep 32bit accesses */
++			SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
++				  lower_32_bits(chunk->ll_region.paddr));
++			SET_CH_32(dw, chan->dir, chan->id, llp.msb,
++				  upper_32_bits(chunk->ll_region.paddr));
+ 		#else /* CONFIG_64BIT */
+ 			SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
+ 				  lower_32_bits(chunk->ll_region.paddr));
+diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
+index 3061fe857d69f..f652da6ab47df 100644
+--- a/drivers/dma/idxd/device.c
++++ b/drivers/dma/idxd/device.c
+@@ -373,7 +373,6 @@ static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq)
+ {
+ 	lockdep_assert_held(&wq->wq_lock);
+ 
+-	idxd_wq_disable_cleanup(wq);
+ 	wq->size = 0;
+ 	wq->group = NULL;
+ }
+@@ -701,14 +700,17 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
+ 
+ 		if (wq->state == IDXD_WQ_ENABLED) {
+ 			idxd_wq_disable_cleanup(wq);
+-			idxd_wq_device_reset_cleanup(wq);
+ 			wq->state = IDXD_WQ_DISABLED;
+ 		}
++		idxd_wq_device_reset_cleanup(wq);
+ 	}
+ }
+ 
+ void idxd_device_clear_state(struct idxd_device *idxd)
+ {
++	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
++		return;
++
+ 	idxd_groups_clear_state(idxd);
+ 	idxd_engines_clear_state(idxd);
+ 	idxd_device_wqs_clear_state(idxd);
+diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
+index e289fd48711ad..c01db23e3333f 100644
+--- a/drivers/dma/idxd/submit.c
++++ b/drivers/dma/idxd/submit.c
+@@ -150,14 +150,15 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
+  */
+ int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc)
+ {
+-	int rc, retries = 0;
++	unsigned int retries = wq->enqcmds_retries;
++	int rc;
+ 
+ 	do {
+ 		rc = enqcmds(portal, desc);
+ 		if (rc == 0)
+ 			break;
+ 		cpu_relax();
+-	} while (retries++ < wq->enqcmds_retries);
++	} while (retries--);
+ 
+ 	return rc;
+ }
+diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
+index 7e19ab92b61a8..dfd549685c467 100644
+--- a/drivers/dma/idxd/sysfs.c
++++ b/drivers/dma/idxd/sysfs.c
+@@ -905,6 +905,9 @@ static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attr
+ 	u64 xfer_size;
+ 	int rc;
+ 
++	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
++		return -EPERM;
++
+ 	if (wq->state != IDXD_WQ_DISABLED)
+ 		return -EPERM;
+ 
+@@ -939,6 +942,9 @@ static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribu
+ 	u64 batch_size;
+ 	int rc;
+ 
++	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
++		return -EPERM;
++
+ 	if (wq->state != IDXD_WQ_DISABLED)
+ 		return -EPERM;
+ 
+diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
+index 75ec0754d4ad4..b1e6173fcc271 100644
+--- a/drivers/dma/imx-sdma.c
++++ b/drivers/dma/imx-sdma.c
+@@ -198,12 +198,12 @@ struct sdma_script_start_addrs {
+ 	s32 per_2_firi_addr;
+ 	s32 mcu_2_firi_addr;
+ 	s32 uart_2_per_addr;
+-	s32 uart_2_mcu_ram_addr;
++	s32 uart_2_mcu_addr;
+ 	s32 per_2_app_addr;
+ 	s32 mcu_2_app_addr;
+ 	s32 per_2_per_addr;
+ 	s32 uartsh_2_per_addr;
+-	s32 uartsh_2_mcu_ram_addr;
++	s32 uartsh_2_mcu_addr;
+ 	s32 per_2_shp_addr;
+ 	s32 mcu_2_shp_addr;
+ 	s32 ata_2_mcu_addr;
+@@ -232,8 +232,8 @@ struct sdma_script_start_addrs {
+ 	s32 mcu_2_ecspi_addr;
+ 	s32 mcu_2_sai_addr;
+ 	s32 sai_2_mcu_addr;
+-	s32 uart_2_mcu_addr;
+-	s32 uartsh_2_mcu_addr;
++	s32 uart_2_mcu_rom_addr;
++	s32 uartsh_2_mcu_rom_addr;
+ 	/* End of v3 array */
+ 	s32 mcu_2_zqspi_addr;
+ 	/* End of v4 array */
+@@ -1780,17 +1780,17 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
+ 			saddr_arr[i] = addr_arr[i];
+ 
+ 	/*
+-	 * get uart_2_mcu_addr/uartsh_2_mcu_addr rom script specially because
+-	 * they are now replaced by uart_2_mcu_ram_addr/uartsh_2_mcu_ram_addr
+-	 * to be compatible with legacy freescale/nxp sdma firmware, and they
+-	 * are located in the bottom part of sdma_script_start_addrs which are
+-	 * beyond the SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1.
++	 * For compatibility with NXP internal legacy kernel before 4.19 which
++	 * is based on uart ram script and mainline kernel based on uart rom
++	 * script, both uart ram/rom scripts are present in newer sdma
++	 * firmware. Use the rom versions if they are present (V3 or newer).
+ 	 */
+-	if (addr->uart_2_mcu_addr)
+-		sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_addr;
+-	if (addr->uartsh_2_mcu_addr)
+-		sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_addr;
+-
++	if (sdma->script_number >= SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3) {
++		if (addr->uart_2_mcu_rom_addr)
++			sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_rom_addr;
++		if (addr->uartsh_2_mcu_rom_addr)
++			sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_rom_addr;
++	}
+ }
+ 
+ static void sdma_load_firmware(const struct firmware *fw, void *context)
+@@ -1869,7 +1869,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
+ 	u32 reg, val, shift, num_map, i;
+ 	int ret = 0;
+ 
+-	if (IS_ERR(np) || IS_ERR(gpr_np))
++	if (IS_ERR(np) || !gpr_np)
+ 		goto out;
+ 
+ 	event_remap = of_find_property(np, propname, NULL);
+@@ -1917,7 +1917,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
+ 	}
+ 
+ out:
+-	if (!IS_ERR(gpr_np))
++	if (gpr_np)
+ 		of_node_put(gpr_np);
+ 
+ 	return ret;
+diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
+index 375e7e647df6b..a1517ef1f4a01 100644
+--- a/drivers/dma/mediatek/mtk-uart-apdma.c
++++ b/drivers/dma/mediatek/mtk-uart-apdma.c
+@@ -274,7 +274,7 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
+ 	unsigned int status;
+ 	int ret;
+ 
+-	ret = pm_runtime_get_sync(mtkd->ddev.dev);
++	ret = pm_runtime_resume_and_get(mtkd->ddev.dev);
+ 	if (ret < 0) {
+ 		pm_runtime_put_noidle(chan->device->dev);
+ 		return ret;
+@@ -288,18 +288,21 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
+ 	ret = readx_poll_timeout(readl, c->base + VFF_EN,
+ 			  status, !status, 10, 100);
+ 	if (ret)
+-		return ret;
++		goto err_pm;
+ 
+ 	ret = request_irq(c->irq, mtk_uart_apdma_irq_handler,
+ 			  IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan);
+ 	if (ret < 0) {
+ 		dev_err(chan->device->dev, "Can't request dma IRQ\n");
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto err_pm;
+ 	}
+ 
+ 	if (mtkd->support_33bits)
+ 		mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
+ 
++err_pm:
++	pm_runtime_put_noidle(mtkd->ddev.dev);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
+index f05ff02c0656e..40b1abeca8562 100644
+--- a/drivers/edac/synopsys_edac.c
++++ b/drivers/edac/synopsys_edac.c
+@@ -164,6 +164,11 @@
+ #define ECC_STAT_CECNT_SHIFT		8
+ #define ECC_STAT_BITNUM_MASK		0x7F
+ 
++/* ECC error count register definitions */
++#define ECC_ERRCNT_UECNT_MASK		0xFFFF0000
++#define ECC_ERRCNT_UECNT_SHIFT		16
++#define ECC_ERRCNT_CECNT_MASK		0xFFFF
++
+ /* DDR QOS Interrupt register definitions */
+ #define DDR_QOS_IRQ_STAT_OFST		0x20200
+ #define DDR_QOSUE_MASK			0x4
+@@ -423,15 +428,16 @@ static int zynqmp_get_error_info(struct synps_edac_priv *priv)
+ 	base = priv->baseaddr;
+ 	p = &priv->stat;
+ 
++	regval = readl(base + ECC_ERRCNT_OFST);
++	p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
++	p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
++	if (!p->ce_cnt)
++		goto ue_err;
++
+ 	regval = readl(base + ECC_STAT_OFST);
+ 	if (!regval)
+ 		return 1;
+ 
+-	p->ce_cnt = (regval & ECC_STAT_CECNT_MASK) >> ECC_STAT_CECNT_SHIFT;
+-	p->ue_cnt = (regval & ECC_STAT_UECNT_MASK) >> ECC_STAT_UECNT_SHIFT;
+-	if (!p->ce_cnt)
+-		goto ue_err;
+-
+ 	p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
+ 
+ 	regval = readl(base + ECC_CEADDR0_OFST);
+diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
+index e48108e694f8d..7dad6f57d9704 100644
+--- a/drivers/firmware/cirrus/cs_dsp.c
++++ b/drivers/firmware/cirrus/cs_dsp.c
+@@ -955,8 +955,7 @@ static int cs_dsp_create_control(struct cs_dsp *dsp,
+ 	ctl->alg_region = *alg_region;
+ 	if (subname && dsp->fw_ver >= 2) {
+ 		ctl->subname_len = subname_len;
+-		ctl->subname = kmemdup(subname,
+-				       strlen(subname) + 1, GFP_KERNEL);
++		ctl->subname = kasprintf(GFP_KERNEL, "%.*s", subname_len, subname);
+ 		if (!ctl->subname) {
+ 			ret = -ENOMEM;
+ 			goto err_ctl;
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 344e376b2ee99..d5a5cf2691026 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -1601,8 +1601,6 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
+ 
+ 	gpiochip_set_irq_hooks(gc);
+ 
+-	acpi_gpiochip_request_interrupts(gc);
+-
+ 	/*
+ 	 * Using barrier() here to prevent compiler from reordering
+ 	 * gc->irq.initialized before initialization of above
+@@ -1612,6 +1610,8 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
+ 
+ 	gc->irq.initialized = true;
+ 
++	acpi_gpiochip_request_interrupts(gc);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+index 87ed48d5530dc..8bd265b408470 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+@@ -138,6 +138,10 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *
+ 		cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_UNSUPPORTED;
+ 		break;
+ 	}
++
++	if (cmd.psr_set_version.psr_set_version_data.version == PSR_VERSION_UNSUPPORTED)
++		return false;
++
+ 	cmd.psr_set_version.psr_set_version_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
+ 	cmd.psr_set_version.psr_set_version_data.panel_inst = panel_inst;
+ 	cmd.psr_set_version.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data);
+diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
+index b00de57cc957e..cd32e1470b3cb 100644
+--- a/drivers/gpu/drm/i915/display/intel_psr.c
++++ b/drivers/gpu/drm/i915/display/intel_psr.c
+@@ -887,6 +887,20 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
+ 		return false;
+ 	}
+ 
++	/* Wa_16011303918:adl-p */
++	if (crtc_state->vrr.enable &&
++	    IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
++		drm_dbg_kms(&dev_priv->drm,
++			    "PSR2 not enabled, not compatible with HW stepping + VRR\n");
++		return false;
++	}
++
++	if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
++		drm_dbg_kms(&dev_priv->drm,
++			    "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
++		return false;
++	}
++
+ 	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
+ 		if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
+ 		    !HAS_PSR_HW_TRACKING(dev_priv)) {
+@@ -900,12 +914,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
+ 	if (!crtc_state->enable_psr2_sel_fetch &&
+ 	    IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
+ 		drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n");
+-		return false;
++		goto unsupported;
+ 	}
+ 
+ 	if (!psr2_granularity_check(intel_dp, crtc_state)) {
+ 		drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
+-		return false;
++		goto unsupported;
+ 	}
+ 
+ 	if (!crtc_state->enable_psr2_sel_fetch &&
+@@ -914,25 +928,15 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
+ 			    "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
+ 			    crtc_hdisplay, crtc_vdisplay,
+ 			    psr_max_h, psr_max_v);
+-		return false;
+-	}
+-
+-	if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
+-		drm_dbg_kms(&dev_priv->drm,
+-			    "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
+-		return false;
+-	}
+-
+-	/* Wa_16011303918:adl-p */
+-	if (crtc_state->vrr.enable &&
+-	    IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
+-		drm_dbg_kms(&dev_priv->drm,
+-			    "PSR2 not enabled, not compatible with HW stepping + VRR\n");
+-		return false;
++		goto unsupported;
+ 	}
+ 
+ 	tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
+ 	return true;
++
++unsupported:
++	crtc_state->enable_psr2_sel_fetch = false;
++	return false;
+ }
+ 
+ void intel_psr_compute_config(struct intel_dp *intel_dp,
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
+index fb261930ad1c7..e8a8240a68686 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
++++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
+@@ -601,29 +601,20 @@ static const struct of_device_id dt_match[] = {
+ };
+ 
+ #ifdef CONFIG_PM
+-static int adreno_resume(struct device *dev)
++static int adreno_runtime_resume(struct device *dev)
+ {
+ 	struct msm_gpu *gpu = dev_to_gpu(dev);
+ 
+ 	return gpu->funcs->pm_resume(gpu);
+ }
+ 
+-static int active_submits(struct msm_gpu *gpu)
+-{
+-	int active_submits;
+-	mutex_lock(&gpu->active_lock);
+-	active_submits = gpu->active_submits;
+-	mutex_unlock(&gpu->active_lock);
+-	return active_submits;
+-}
+-
+-static int adreno_suspend(struct device *dev)
++static int adreno_runtime_suspend(struct device *dev)
+ {
+ 	struct msm_gpu *gpu = dev_to_gpu(dev);
+ 	int remaining;
+ 
+ 	remaining = wait_event_timeout(gpu->retire_event,
+-				       active_submits(gpu) == 0,
++				       gpu->active_submits == 0,
+ 				       msecs_to_jiffies(1000));
+ 	if (remaining == 0) {
+ 		dev_err(dev, "Timeout waiting for GPU to suspend\n");
+@@ -636,7 +627,7 @@ static int adreno_suspend(struct device *dev)
+ 
+ static const struct dev_pm_ops adreno_pm_ops = {
+ 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+-	SET_RUNTIME_PM_OPS(adreno_suspend, adreno_resume, NULL)
++	SET_RUNTIME_PM_OPS(adreno_runtime_suspend, adreno_runtime_resume, NULL)
+ };
+ 
+ static struct platform_driver adreno_driver = {
+diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+index c6b69afcbac89..50e854207c70a 100644
+--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
++++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+@@ -90,7 +90,10 @@ static void mdp5_plane_reset(struct drm_plane *plane)
+ 		__drm_atomic_helper_plane_destroy_state(plane->state);
+ 
+ 	kfree(to_mdp5_plane_state(plane->state));
++	plane->state = NULL;
+ 	mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
++	if (!mdp5_state)
++		return;
+ 
+ 	if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ 		mdp5_state->base.zpos = STAGE_BASE;
+diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
+index 5d2ff67910586..acfe1b31e0792 100644
+--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
++++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
+@@ -176,6 +176,8 @@ void msm_disp_snapshot_add_block(struct msm_disp_state *disp_state, u32 len,
+ 	va_list va;
+ 
+ 	new_blk = kzalloc(sizeof(struct msm_disp_state_block), GFP_KERNEL);
++	if (!new_blk)
++		return;
+ 
+ 	va_start(va, fmt);
+ 
+diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+index 46029c5610c80..145047e193946 100644
+--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
++++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+@@ -229,7 +229,7 @@ static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts,
+ 
+ 	ret = i2c_smbus_write_byte_data(ts->i2c, reg, val);
+ 	if (ret)
+-		dev_err(&ts->dsi->dev, "I2C write failed: %d\n", ret);
++		dev_err(&ts->i2c->dev, "I2C write failed: %d\n", ret);
+ }
+ 
+ static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val)
+@@ -265,7 +265,7 @@ static int rpi_touchscreen_noop(struct drm_panel *panel)
+ 	return 0;
+ }
+ 
+-static int rpi_touchscreen_enable(struct drm_panel *panel)
++static int rpi_touchscreen_prepare(struct drm_panel *panel)
+ {
+ 	struct rpi_touchscreen *ts = panel_to_ts(panel);
+ 	int i;
+@@ -295,6 +295,13 @@ static int rpi_touchscreen_enable(struct drm_panel *panel)
+ 	rpi_touchscreen_write(ts, DSI_STARTDSI, 0x01);
+ 	msleep(100);
+ 
++	return 0;
++}
++
++static int rpi_touchscreen_enable(struct drm_panel *panel)
++{
++	struct rpi_touchscreen *ts = panel_to_ts(panel);
++
+ 	/* Turn on the backlight. */
+ 	rpi_touchscreen_i2c_write(ts, REG_PWM, 255);
+ 
+@@ -349,7 +356,7 @@ static int rpi_touchscreen_get_modes(struct drm_panel *panel,
+ static const struct drm_panel_funcs rpi_touchscreen_funcs = {
+ 	.disable = rpi_touchscreen_disable,
+ 	.unprepare = rpi_touchscreen_noop,
+-	.prepare = rpi_touchscreen_noop,
++	.prepare = rpi_touchscreen_prepare,
+ 	.enable = rpi_touchscreen_enable,
+ 	.get_modes = rpi_touchscreen_get_modes,
+ };
+diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
+index b991ba1bcd513..f63efd8d5e524 100644
+--- a/drivers/gpu/drm/radeon/radeon_sync.c
++++ b/drivers/gpu/drm/radeon/radeon_sync.c
+@@ -96,7 +96,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
+ 	struct dma_fence *f;
+ 	int r = 0;
+ 
+-	dma_resv_for_each_fence(&cursor, resv, shared, f) {
++	dma_resv_for_each_fence(&cursor, resv, !shared, f) {
+ 		fence = to_radeon_fence(f);
+ 		if (fence && fence->rdev == rdev)
+ 			radeon_sync_fence(sync, fence);
+diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
+index 9300d3354c512..64dfefeb03f5e 100644
+--- a/drivers/gpu/drm/vc4/vc4_dsi.c
++++ b/drivers/gpu/drm/vc4/vc4_dsi.c
+@@ -846,7 +846,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
+ 	unsigned long phy_clock;
+ 	int ret;
+ 
+-	ret = pm_runtime_get_sync(dev);
++	ret = pm_runtime_resume_and_get(dev);
+ 	if (ret) {
+ 		DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->variant->port);
+ 		return;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+index 31aecc46624b3..04c8a378aeed6 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+@@ -46,6 +46,21 @@ vmw_buffer_object(struct ttm_buffer_object *bo)
+ 	return container_of(bo, struct vmw_buffer_object, base);
+ }
+ 
++/**
++ * bo_is_vmw - check if the buffer object is a &vmw_buffer_object
++ * @bo: ttm buffer object to be checked
++ *
++ * Uses destroy function associated with the object to determine if this is
++ * a &vmw_buffer_object.
++ *
++ * Returns:
++ * true if the object is of &vmw_buffer_object type, false if not.
++ */
++static bool bo_is_vmw(struct ttm_buffer_object *bo)
++{
++	return bo->destroy == &vmw_bo_bo_free ||
++	       bo->destroy == &vmw_gem_destroy;
++}
+ 
+ /**
+  * vmw_bo_pin_in_placement - Validate a buffer to placement.
+@@ -615,8 +630,9 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
+ 
+ 		ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
+ 		vmw_bo_unreference(&vbo);
+-		if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
+-			     ret != -EBUSY)) {
++		if (unlikely(ret != 0)) {
++			if (ret == -ERESTARTSYS || ret == -EBUSY)
++				return -EBUSY;
+ 			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
+ 				  (unsigned int) arg->handle);
+ 			return ret;
+@@ -798,7 +814,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
+ void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
+ {
+ 	/* Is @bo embedded in a struct vmw_buffer_object? */
+-	if (vmw_bo_is_vmw_bo(bo))
++	if (!bo_is_vmw(bo))
+ 		return;
+ 
+ 	/* Kill any cached kernel maps before swapout */
+@@ -822,7 +838,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
+ 	struct vmw_buffer_object *vbo;
+ 
+ 	/* Make sure @bo is embedded in a struct vmw_buffer_object? */
+-	if (vmw_bo_is_vmw_bo(bo))
++	if (!bo_is_vmw(bo))
+ 		return;
+ 
+ 	vbo = container_of(bo, struct vmw_buffer_object, base);
+@@ -843,22 +859,3 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
+ 	if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
+ 		vmw_resource_unbind_list(vbo);
+ }
+-
+-/**
+- * vmw_bo_is_vmw_bo - check if the buffer object is a &vmw_buffer_object
+- * @bo: buffer object to be checked
+- *
+- * Uses destroy function associated with the object to determine if this is
+- * a &vmw_buffer_object.
+- *
+- * Returns:
+- * true if the object is of &vmw_buffer_object type, false if not.
+- */
+-bool vmw_bo_is_vmw_bo(struct ttm_buffer_object *bo)
+-{
+-	if (bo->destroy == &vmw_bo_bo_free ||
+-	    bo->destroy == &vmw_gem_destroy)
+-		return true;
+-
+-	return false;
+-}
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index fe36efdb7ff52..f685d426af7e3 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -997,13 +997,10 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
+ 		goto out_no_fman;
+ 	}
+ 
+-	drm_vma_offset_manager_init(&dev_priv->vma_manager,
+-				    DRM_FILE_PAGE_OFFSET_START,
+-				    DRM_FILE_PAGE_OFFSET_SIZE);
+ 	ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver,
+ 			      dev_priv->drm.dev,
+ 			      dev_priv->drm.anon_inode->i_mapping,
+-			      &dev_priv->vma_manager,
++			      dev_priv->drm.vma_offset_manager,
+ 			      dev_priv->map_mode == vmw_dma_alloc_coherent,
+ 			      false);
+ 	if (unlikely(ret != 0)) {
+@@ -1173,7 +1170,6 @@ static void vmw_driver_unload(struct drm_device *dev)
+ 	vmw_devcaps_destroy(dev_priv);
+ 	vmw_vram_manager_fini(dev_priv);
+ 	ttm_device_fini(&dev_priv->bdev);
+-	drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
+ 	vmw_release_device_late(dev_priv);
+ 	vmw_fence_manager_takedown(dev_priv->fman);
+ 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+@@ -1397,7 +1393,7 @@ vmw_get_unmapped_area(struct file *file, unsigned long uaddr,
+ 	struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
+ 
+ 	return drm_get_unmapped_area(file, uaddr, len, pgoff, flags,
+-				     &dev_priv->vma_manager);
++				     dev_priv->drm.vma_offset_manager);
+ }
+ 
+ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+index 00e8e27e48846..ace7ca150b036 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+@@ -683,6 +683,9 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
+ 	    container_of(base, struct vmw_user_surface, prime.base);
+ 	struct vmw_resource *res = &user_srf->srf.res;
+ 
++	if (base->shareable && res && res->backup)
++		drm_gem_object_put(&res->backup->base.base);
++
+ 	*p_base = NULL;
+ 	vmw_resource_unreference(&res);
+ }
+@@ -857,6 +860,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+ 			goto out_unlock;
+ 		}
+ 		vmw_bo_reference(res->backup);
++		drm_gem_object_get(&res->backup->base.base);
+ 	}
+ 
+ 	tmp = vmw_resource_reference(&srf->res);
+@@ -1513,7 +1517,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
+ 							&res->backup);
+ 		if (ret == 0)
+ 			vmw_bo_reference(res->backup);
+-
+ 	}
+ 
+ 	if (unlikely(ret != 0)) {
+@@ -1561,6 +1564,8 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
+ 			drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
+ 		rep->buffer_size = res->backup->base.base.size;
+ 		rep->buffer_handle = backup_handle;
++		if (user_srf->prime.base.shareable)
++			drm_gem_object_get(&res->backup->base.base);
+ 	} else {
+ 		rep->buffer_map_handle = 0;
+ 		rep->buffer_size = 0;
+diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
+index 43375b38ee592..8a7ce41b8c56e 100644
+--- a/drivers/input/keyboard/omap4-keypad.c
++++ b/drivers/input/keyboard/omap4-keypad.c
+@@ -393,7 +393,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
+ 	 * revision register.
+ 	 */
+ 	error = pm_runtime_get_sync(dev);
+-	if (error) {
++	if (error < 0) {
+ 		dev_err(dev, "pm_runtime_get_sync() failed\n");
+ 		pm_runtime_put_noidle(dev);
+ 		return error;
+diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
+index db3ec47681596..7a730c9d4bdf6 100644
+--- a/drivers/net/ethernet/Kconfig
++++ b/drivers/net/ethernet/Kconfig
+@@ -35,15 +35,6 @@ source "drivers/net/ethernet/aquantia/Kconfig"
+ source "drivers/net/ethernet/arc/Kconfig"
+ source "drivers/net/ethernet/asix/Kconfig"
+ source "drivers/net/ethernet/atheros/Kconfig"
+-source "drivers/net/ethernet/broadcom/Kconfig"
+-source "drivers/net/ethernet/brocade/Kconfig"
+-source "drivers/net/ethernet/cadence/Kconfig"
+-source "drivers/net/ethernet/calxeda/Kconfig"
+-source "drivers/net/ethernet/cavium/Kconfig"
+-source "drivers/net/ethernet/chelsio/Kconfig"
+-source "drivers/net/ethernet/cirrus/Kconfig"
+-source "drivers/net/ethernet/cisco/Kconfig"
+-source "drivers/net/ethernet/cortina/Kconfig"
+ 
+ config CX_ECAT
+ 	tristate "Beckhoff CX5020 EtherCAT master support"
+@@ -57,6 +48,14 @@ config CX_ECAT
+ 	  To compile this driver as a module, choose M here. The module
+ 	  will be called ec_bhf.
+ 
++source "drivers/net/ethernet/broadcom/Kconfig"
++source "drivers/net/ethernet/cadence/Kconfig"
++source "drivers/net/ethernet/calxeda/Kconfig"
++source "drivers/net/ethernet/cavium/Kconfig"
++source "drivers/net/ethernet/chelsio/Kconfig"
++source "drivers/net/ethernet/cirrus/Kconfig"
++source "drivers/net/ethernet/cisco/Kconfig"
++source "drivers/net/ethernet/cortina/Kconfig"
+ source "drivers/net/ethernet/davicom/Kconfig"
+ 
+ config DNET
+@@ -84,7 +83,6 @@ source "drivers/net/ethernet/huawei/Kconfig"
+ source "drivers/net/ethernet/i825xx/Kconfig"
+ source "drivers/net/ethernet/ibm/Kconfig"
+ source "drivers/net/ethernet/intel/Kconfig"
+-source "drivers/net/ethernet/microsoft/Kconfig"
+ source "drivers/net/ethernet/xscale/Kconfig"
+ 
+ config JME
+@@ -127,8 +125,9 @@ source "drivers/net/ethernet/mediatek/Kconfig"
+ source "drivers/net/ethernet/mellanox/Kconfig"
+ source "drivers/net/ethernet/micrel/Kconfig"
+ source "drivers/net/ethernet/microchip/Kconfig"
+-source "drivers/net/ethernet/moxa/Kconfig"
+ source "drivers/net/ethernet/mscc/Kconfig"
++source "drivers/net/ethernet/microsoft/Kconfig"
++source "drivers/net/ethernet/moxa/Kconfig"
+ source "drivers/net/ethernet/myricom/Kconfig"
+ 
+ config FEALNX
+@@ -140,10 +139,10 @@ config FEALNX
+ 	  Say Y here to support the Myson MTD-800 family of PCI-based Ethernet
+ 	  cards. <http://www.myson.com.tw/>
+ 
++source "drivers/net/ethernet/ni/Kconfig"
+ source "drivers/net/ethernet/natsemi/Kconfig"
+ source "drivers/net/ethernet/neterion/Kconfig"
+ source "drivers/net/ethernet/netronome/Kconfig"
+-source "drivers/net/ethernet/ni/Kconfig"
+ source "drivers/net/ethernet/8390/Kconfig"
+ source "drivers/net/ethernet/nvidia/Kconfig"
+ source "drivers/net/ethernet/nxp/Kconfig"
+@@ -163,6 +162,7 @@ source "drivers/net/ethernet/packetengines/Kconfig"
+ source "drivers/net/ethernet/pasemi/Kconfig"
+ source "drivers/net/ethernet/pensando/Kconfig"
+ source "drivers/net/ethernet/qlogic/Kconfig"
++source "drivers/net/ethernet/brocade/Kconfig"
+ source "drivers/net/ethernet/qualcomm/Kconfig"
+ source "drivers/net/ethernet/rdc/Kconfig"
+ source "drivers/net/ethernet/realtek/Kconfig"
+@@ -170,10 +170,10 @@ source "drivers/net/ethernet/renesas/Kconfig"
+ source "drivers/net/ethernet/rocker/Kconfig"
+ source "drivers/net/ethernet/samsung/Kconfig"
+ source "drivers/net/ethernet/seeq/Kconfig"
+-source "drivers/net/ethernet/sfc/Kconfig"
+ source "drivers/net/ethernet/sgi/Kconfig"
+ source "drivers/net/ethernet/silan/Kconfig"
+ source "drivers/net/ethernet/sis/Kconfig"
++source "drivers/net/ethernet/sfc/Kconfig"
+ source "drivers/net/ethernet/smsc/Kconfig"
+ source "drivers/net/ethernet/socionext/Kconfig"
+ source "drivers/net/ethernet/stmicro/Kconfig"
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+index 33f1a1377588b..24d715c28a355 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+@@ -486,8 +486,8 @@ int aq_nic_start(struct aq_nic_s *self)
+ 	if (err < 0)
+ 		goto err_exit;
+ 
+-	for (i = 0U, aq_vec = self->aq_vec[0];
+-		self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
++	for (i = 0U; self->aq_vecs > i; ++i) {
++		aq_vec = self->aq_vec[i];
+ 		err = aq_vec_start(aq_vec);
+ 		if (err < 0)
+ 			goto err_exit;
+@@ -517,8 +517,8 @@ int aq_nic_start(struct aq_nic_s *self)
+ 		mod_timer(&self->polling_timer, jiffies +
+ 			  AQ_CFG_POLLING_TIMER_INTERVAL);
+ 	} else {
+-		for (i = 0U, aq_vec = self->aq_vec[0];
+-			self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
++		for (i = 0U; self->aq_vecs > i; ++i) {
++			aq_vec = self->aq_vec[i];
+ 			err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
+ 						    aq_vec_isr, aq_vec,
+ 						    aq_vec_get_affinity_mask(aq_vec));
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+index 797a95142d1f4..3a529ee8c8340 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+@@ -444,22 +444,22 @@ err_exit:
+ 
+ static int aq_pm_freeze(struct device *dev)
+ {
+-	return aq_suspend_common(dev, false);
++	return aq_suspend_common(dev, true);
+ }
+ 
+ static int aq_pm_suspend_poweroff(struct device *dev)
+ {
+-	return aq_suspend_common(dev, true);
++	return aq_suspend_common(dev, false);
+ }
+ 
+ static int aq_pm_thaw(struct device *dev)
+ {
+-	return atl_resume_common(dev, false);
++	return atl_resume_common(dev, true);
+ }
+ 
+ static int aq_pm_resume_restore(struct device *dev)
+ {
+-	return atl_resume_common(dev, true);
++	return atl_resume_common(dev, false);
+ }
+ 
+ static const struct dev_pm_ops aq_pm_ops = {
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+index f4774cf051c97..6ab1f3212d246 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+@@ -43,8 +43,8 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
+ 	if (!self) {
+ 		err = -EINVAL;
+ 	} else {
+-		for (i = 0U, ring = self->ring[0];
+-			self->tx_rings > i; ++i, ring = self->ring[i]) {
++		for (i = 0U; self->tx_rings > i; ++i) {
++			ring = self->ring[i];
+ 			u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
+ 			ring[AQ_VEC_RX_ID].stats.rx.polls++;
+ 			u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
+@@ -182,8 +182,8 @@ int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
+ 	self->aq_hw_ops = aq_hw_ops;
+ 	self->aq_hw = aq_hw;
+ 
+-	for (i = 0U, ring = self->ring[0];
+-		self->tx_rings > i; ++i, ring = self->ring[i]) {
++	for (i = 0U; self->tx_rings > i; ++i) {
++		ring = self->ring[i];
+ 		err = aq_ring_init(&ring[AQ_VEC_TX_ID], ATL_RING_TX);
+ 		if (err < 0)
+ 			goto err_exit;
+@@ -224,8 +224,8 @@ int aq_vec_start(struct aq_vec_s *self)
+ 	unsigned int i = 0U;
+ 	int err = 0;
+ 
+-	for (i = 0U, ring = self->ring[0];
+-		self->tx_rings > i; ++i, ring = self->ring[i]) {
++	for (i = 0U; self->tx_rings > i; ++i) {
++		ring = self->ring[i];
+ 		err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
+ 							&ring[AQ_VEC_TX_ID]);
+ 		if (err < 0)
+@@ -248,8 +248,8 @@ void aq_vec_stop(struct aq_vec_s *self)
+ 	struct aq_ring_s *ring = NULL;
+ 	unsigned int i = 0U;
+ 
+-	for (i = 0U, ring = self->ring[0];
+-		self->tx_rings > i; ++i, ring = self->ring[i]) {
++	for (i = 0U; self->tx_rings > i; ++i) {
++		ring = self->ring[i];
+ 		self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
+ 						 &ring[AQ_VEC_TX_ID]);
+ 
+@@ -268,8 +268,8 @@ void aq_vec_deinit(struct aq_vec_s *self)
+ 	if (!self)
+ 		goto err_exit;
+ 
+-	for (i = 0U, ring = self->ring[0];
+-		self->tx_rings > i; ++i, ring = self->ring[i]) {
++	for (i = 0U; self->tx_rings > i; ++i) {
++		ring = self->ring[i];
+ 		aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
+ 		aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
+ 	}
+@@ -297,8 +297,8 @@ void aq_vec_ring_free(struct aq_vec_s *self)
+ 	if (!self)
+ 		goto err_exit;
+ 
+-	for (i = 0U, ring = self->ring[0];
+-		self->tx_rings > i; ++i, ring = self->ring[i]) {
++	for (i = 0U; self->tx_rings > i; ++i) {
++		ring = self->ring[i];
+ 		aq_ring_free(&ring[AQ_VEC_TX_ID]);
+ 		if (i < self->rx_rings)
+ 			aq_ring_free(&ring[AQ_VEC_RX_ID]);
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index d13f06cf0308a..c4f4b13ac4691 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -1656,6 +1656,7 @@ static void macb_tx_restart(struct macb_queue *queue)
+ 	unsigned int head = queue->tx_head;
+ 	unsigned int tail = queue->tx_tail;
+ 	struct macb *bp = queue->bp;
++	unsigned int head_idx, tbqp;
+ 
+ 	if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ 		queue_writel(queue, ISR, MACB_BIT(TXUBR));
+@@ -1663,6 +1664,13 @@ static void macb_tx_restart(struct macb_queue *queue)
+ 	if (head == tail)
+ 		return;
+ 
++	tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
++	tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp));
++	head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, head));
++
++	if (tbqp == head_idx)
++		return;
++
+ 	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
+ }
+ 
+diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+index 763d2c7b5fb1a..5750f9a56393a 100644
+--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+@@ -489,11 +489,15 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
+ 	info->phc_index = -1;
+ 
+ 	fman_node = of_get_parent(mac_node);
+-	if (fman_node)
++	if (fman_node) {
+ 		ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
++		of_node_put(fman_node);
++	}
+ 
+-	if (ptp_node)
++	if (ptp_node) {
+ 		ptp_dev = of_find_device_by_node(ptp_node);
++		of_node_put(ptp_node);
++	}
+ 
+ 	if (ptp_dev)
+ 		ptp = platform_get_drvdata(ptp_dev);
+diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+index d60e2016d03c6..e6c8e6d5234f8 100644
+--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+@@ -1009,8 +1009,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
+ {
+ 	u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
+ 	    link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
+-	u16 max_ltr_enc_d = 0;	/* maximum LTR decoded by platform */
+-	u16 lat_enc_d = 0;	/* latency decoded */
++	u32 max_ltr_enc_d = 0;	/* maximum LTR decoded by platform */
++	u32 lat_enc_d = 0;	/* latency decoded */
+ 	u16 lat_enc = 0;	/* latency encoded */
+ 
+ 	if (link) {
+diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
+index 73edc24d81d54..c54b72f9fd345 100644
+--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
++++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
+@@ -342,7 +342,8 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 	np = netdev_priv(netdev);
+ 	vsi = np->vsi;
+ 
+-	if (ice_is_reset_in_progress(vsi->back->state))
++	if (ice_is_reset_in_progress(vsi->back->state) ||
++	    test_bit(ICE_VF_DIS, vsi->back->state))
+ 		return NETDEV_TX_BUSY;
+ 
+ 	repr = ice_netdev_to_repr(netdev);
+diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
+index bd58d9d2e5653..6a413331572b6 100644
+--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
++++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
+@@ -52,7 +52,7 @@ static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { }
+ 
+ static inline int ice_eswitch_configure(struct ice_pf *pf)
+ {
+-	return -EOPNOTSUPP;
++	return 0;
+ }
+ 
+ static inline int ice_eswitch_rebuild(struct ice_pf *pf)
+diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
+index 4eb0599714f43..13cdb5ea594d2 100644
+--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
++++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
+@@ -641,6 +641,7 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
+ 	status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, 0,
+ 				       orom_data, hw->flash.banks.orom_size);
+ 	if (status) {
++		vfree(orom_data);
+ 		ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM data\n");
+ 		return status;
+ 	}
+diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
+index 66ea566488d12..59d5c467ea6e3 100644
+--- a/drivers/net/ethernet/intel/igc/igc_i225.c
++++ b/drivers/net/ethernet/intel/igc/igc_i225.c
+@@ -156,8 +156,15 @@ void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask)
+ {
+ 	u32 swfw_sync;
+ 
+-	while (igc_get_hw_semaphore_i225(hw))
+-		; /* Empty */
++	/* Releasing the resource requires first getting the HW semaphore.
++	 * If we fail to get the semaphore, there is nothing we can do,
++	 * except log an error and quit. We are not allowed to hang here
++	 * indefinitely, as it may cause denial of service or system crash.
++	 */
++	if (igc_get_hw_semaphore_i225(hw)) {
++		hw_dbg("Failed to release SW_FW_SYNC.\n");
++		return;
++	}
+ 
+ 	swfw_sync = rd32(IGC_SW_FW_SYNC);
+ 	swfw_sync &= ~mask;
+diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
+index 40dbf4b432345..6961f65d36b9a 100644
+--- a/drivers/net/ethernet/intel/igc/igc_phy.c
++++ b/drivers/net/ethernet/intel/igc/igc_phy.c
+@@ -581,7 +581,7 @@ static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data)
+ 	 * the lower time out
+ 	 */
+ 	for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
+-		usleep_range(500, 1000);
++		udelay(50);
+ 		mdic = rd32(IGC_MDIC);
+ 		if (mdic & IGC_MDIC_READY)
+ 			break;
+@@ -638,7 +638,7 @@ static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data)
+ 	 * the lower time out
+ 	 */
+ 	for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
+-		usleep_range(500, 1000);
++		udelay(50);
+ 		mdic = rd32(IGC_MDIC);
+ 		if (mdic & IGC_MDIC_READY)
+ 			break;
+diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
+index 0d6e3215e98f5..653e9f1e35b5c 100644
+--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
++++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
+@@ -992,6 +992,17 @@ static void igc_ptp_time_restore(struct igc_adapter *adapter)
+ 	igc_ptp_write_i225(adapter, &ts);
+ }
+ 
++static void igc_ptm_stop(struct igc_adapter *adapter)
++{
++	struct igc_hw *hw = &adapter->hw;
++	u32 ctrl;
++
++	ctrl = rd32(IGC_PTM_CTRL);
++	ctrl &= ~IGC_PTM_CTRL_EN;
++
++	wr32(IGC_PTM_CTRL, ctrl);
++}
++
+ /**
+  * igc_ptp_suspend - Disable PTP work items and prepare for suspend
+  * @adapter: Board private structure
+@@ -1009,8 +1020,10 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
+ 	adapter->ptp_tx_skb = NULL;
+ 	clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
+ 
+-	if (pci_device_is_present(adapter->pdev))
++	if (pci_device_is_present(adapter->pdev)) {
+ 		igc_ptp_time_save(adapter);
++		igc_ptm_stop(adapter);
++	}
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
+index fd3ceb74620d5..a314040c1a6af 100644
+--- a/drivers/net/ethernet/mscc/ocelot.c
++++ b/drivers/net/ethernet/mscc/ocelot.c
+@@ -2508,6 +2508,8 @@ static void ocelot_port_set_mcast_flood(struct ocelot *ocelot, int port,
+ 		val = BIT(port);
+ 
+ 	ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MC);
++	ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV4);
++	ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV6);
+ }
+ 
+ static void ocelot_port_set_bcast_flood(struct ocelot *ocelot, int port,
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+index a7ec9f4d46ced..d68ef72dcdde0 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+@@ -71,9 +71,9 @@ static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
+ 	writel(value, ioaddr + PTP_TCR);
+ 
+ 	/* wait for present system time initialize to complete */
+-	return readl_poll_timeout(ioaddr + PTP_TCR, value,
++	return readl_poll_timeout_atomic(ioaddr + PTP_TCR, value,
+ 				 !(value & PTP_TCR_TSINIT),
+-				 10000, 100000);
++				 10, 100000);
+ }
+ 
+ static int config_addend(void __iomem *ioaddr, u32 addend)
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 359d16780dbbc..1bf8f7c35b7d2 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -712,11 +712,11 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
+ 
+ 	rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
+ 	if (rd == NULL)
+-		return -ENOBUFS;
++		return -ENOMEM;
+ 
+ 	if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
+ 		kfree(rd);
+-		return -ENOBUFS;
++		return -ENOMEM;
+ 	}
+ 
+ 	rd->remote_ip = *ip;
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+index 5d156e591b35c..f7961b22e0518 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+@@ -557,7 +557,7 @@ enum brcmf_sdio_frmtype {
+ 	BRCMF_SDIO_FT_SUB,
+ };
+ 
+-#define SDIOD_DRVSTR_KEY(chip, pmu)     (((chip) << 16) | (pmu))
++#define SDIOD_DRVSTR_KEY(chip, pmu)     (((unsigned int)(chip) << 16) | (pmu))
+ 
+ /* SDIO Pad drive strength to select value mappings */
+ struct sdiod_drive_str {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+index 8a22ee5816748..df85ebc6e1df0 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+@@ -80,7 +80,7 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9);
+ 
+ 	/* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */
+-	mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf);
++	mt76_rmw_field(dev, 0x15a0c, 0xfU << 28, 0xf);
+ 
+ 	/* RG_SSUSB_CDR_BR_PE1D = 0x3 */
+ 	mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 6215d50ed3e7d..10f7c79caac2d 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1363,6 +1363,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
+ 				 warn_str, cur->nidl);
+ 			return -1;
+ 		}
++		if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
++			return NVME_NIDT_EUI64_LEN;
+ 		memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
+ 		return NVME_NIDT_EUI64_LEN;
+ 	case NVME_NIDT_NGUID:
+@@ -1371,6 +1373,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
+ 				 warn_str, cur->nidl);
+ 			return -1;
+ 		}
++		if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
++			return NVME_NIDT_NGUID_LEN;
+ 		memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
+ 		return NVME_NIDT_NGUID_LEN;
+ 	case NVME_NIDT_UUID:
+@@ -1379,6 +1383,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
+ 				 warn_str, cur->nidl);
+ 			return -1;
+ 		}
++		if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
++			return NVME_NIDT_UUID_LEN;
+ 		uuid_copy(&ids->uuid, data + sizeof(*cur));
+ 		return NVME_NIDT_UUID_LEN;
+ 	case NVME_NIDT_CSI:
+@@ -1475,12 +1481,18 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
+ 	if ((*id)->ncap == 0) /* namespace not allocated or attached */
+ 		goto out_free_id;
+ 
+-	if (ctrl->vs >= NVME_VS(1, 1, 0) &&
+-	    !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
+-		memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
+-	if (ctrl->vs >= NVME_VS(1, 2, 0) &&
+-	    !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
+-		memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
++
++	if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
++		dev_info(ctrl->device,
++			 "Ignoring bogus Namespace Identifiers\n");
++	} else {
++		if (ctrl->vs >= NVME_VS(1, 1, 0) &&
++		    !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
++			memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
++		if (ctrl->vs >= NVME_VS(1, 2, 0) &&
++		    !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
++			memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
++	}
+ 
+ 	return 0;
+ 
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index 730cc80d84ff7..68c42e8311172 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -144,6 +144,11 @@ enum nvme_quirks {
+ 	 * encoding the generation sequence number.
+ 	 */
+ 	NVME_QUIRK_SKIP_CID_GEN			= (1 << 17),
++
++	/*
++	 * Reports garbage in the namespace identifiers (eui64, nguid, uuid).
++	 */
++	NVME_QUIRK_BOGUS_NID			= (1 << 18),
+ };
+ 
+ /*
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 6a99ed6809158..e4b79bee62068 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3405,7 +3405,10 @@ static const struct pci_device_id nvme_id_table[] = {
+ 		.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ 	{ PCI_VDEVICE(INTEL, 0x5845),	/* Qemu emulated controller */
+ 		.driver_data = NVME_QUIRK_IDENTIFY_CNS |
+-				NVME_QUIRK_DISABLE_WRITE_ZEROES, },
++				NVME_QUIRK_DISABLE_WRITE_ZEROES |
++				NVME_QUIRK_BOGUS_NID, },
++	{ PCI_VDEVICE(REDHAT, 0x0010),	/* Qemu emulated controller */
++		.driver_data = NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_DEVICE(0x126f, 0x2263),	/* Silicon Motion unidentified */
+ 		.driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
+ 	{ PCI_DEVICE(0x1bb1, 0x0100),   /* Seagate Nytro Flash Storage */
+@@ -3443,6 +3446,10 @@ static const struct pci_device_id nvme_id_table[] = {
+ 		.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+ 	{ PCI_DEVICE(0x2646, 0x2263),   /* KINGSTON A2000 NVMe SSD  */
+ 		.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
++	{ PCI_DEVICE(0x1e4B, 0x1002),   /* MAXIO MAP1002 */
++		.driver_data = NVME_QUIRK_BOGUS_NID, },
++	{ PCI_DEVICE(0x1e4B, 0x1202),   /* MAXIO MAP1202 */
++		.driver_data = NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
+ 		.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
+diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
+index 295cc7952d0ed..57d20cf3da7a3 100644
+--- a/drivers/perf/arm_pmu.c
++++ b/drivers/perf/arm_pmu.c
+@@ -398,6 +398,9 @@ validate_group(struct perf_event *event)
+ 	if (!validate_event(event->pmu, &fake_pmu, leader))
+ 		return -EINVAL;
+ 
++	if (event == leader)
++		return 0;
++
+ 	for_each_sibling_event(sibling, leader) {
+ 		if (!validate_event(event->pmu, &fake_pmu, sibling))
+ 			return -EINVAL;
+@@ -487,12 +490,7 @@ __hw_perf_event_init(struct perf_event *event)
+ 		local64_set(&hwc->period_left, hwc->sample_period);
+ 	}
+ 
+-	if (event->group_leader != event) {
+-		if (validate_group(event) != 0)
+-			return -EINVAL;
+-	}
+-
+-	return 0;
++	return validate_group(event);
+ }
+ 
+ static int armpmu_event_init(struct perf_event *event)
+diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
+index c1d9ed9b7b672..19f6b456234f8 100644
+--- a/drivers/platform/x86/samsung-laptop.c
++++ b/drivers/platform/x86/samsung-laptop.c
+@@ -1121,8 +1121,6 @@ static void kbd_led_set(struct led_classdev *led_cdev,
+ 
+ 	if (value > samsung->kbd_led.max_brightness)
+ 		value = samsung->kbd_led.max_brightness;
+-	else if (value < 0)
+-		value = 0;
+ 
+ 	samsung->kbd_led_wk = value;
+ 	queue_work(samsung->led_workqueue, &samsung->kbd_led_work);
+diff --git a/drivers/reset/reset-rzg2l-usbphy-ctrl.c b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
+index 1e83150388506..a8dde46063602 100644
+--- a/drivers/reset/reset-rzg2l-usbphy-ctrl.c
++++ b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
+@@ -121,7 +121,9 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev)
+ 		return dev_err_probe(dev, PTR_ERR(priv->rstc),
+ 				     "failed to get reset\n");
+ 
+-	reset_control_deassert(priv->rstc);
++	error = reset_control_deassert(priv->rstc);
++	if (error)
++		return error;
+ 
+ 	priv->rcdev.ops = &rzg2l_usbphy_ctrl_reset_ops;
+ 	priv->rcdev.of_reset_n_cells = 1;
+diff --git a/drivers/reset/tegra/reset-bpmp.c b/drivers/reset/tegra/reset-bpmp.c
+index 24d3395964cc4..4c5bba52b1059 100644
+--- a/drivers/reset/tegra/reset-bpmp.c
++++ b/drivers/reset/tegra/reset-bpmp.c
+@@ -20,6 +20,7 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
+ 	struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
+ 	struct mrq_reset_request request;
+ 	struct tegra_bpmp_message msg;
++	int err;
+ 
+ 	memset(&request, 0, sizeof(request));
+ 	request.cmd = command;
+@@ -30,7 +31,13 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
+ 	msg.tx.data = &request;
+ 	msg.tx.size = sizeof(request);
+ 
+-	return tegra_bpmp_transfer(bpmp, &msg);
++	err = tegra_bpmp_transfer(bpmp, &msg);
++	if (err)
++		return err;
++	if (msg.rx.ret)
++		return -EINVAL;
++
++	return 0;
+ }
+ 
+ static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
+diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
+index 5521469ce678b..e16327a4b4c96 100644
+--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
++++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
+@@ -1977,7 +1977,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
+ 		if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
+ 			break;
+ 
+-		if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx))) {
++		if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
+ 			if (nopin->op_code == ISCSI_OP_NOOP_IN &&
+ 			    nopin->itt == (u16) RESERVED_ITT) {
+ 				printk(KERN_ALERT "bnx2i: Unsolicited "
+diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
+index e21b053b4f3e1..a592ca8602f9f 100644
+--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
++++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
+@@ -1721,7 +1721,7 @@ static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
+ 			struct iscsi_conn *conn = ep->conn->cls_conn->dd_data;
+ 
+ 			/* Must suspend all rx queue activity for this ep */
+-			set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
++			set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
+ 		}
+ 		/* CONN_DISCONNECT timeout may or may not be an issue depending
+ 		 * on what transcribed in TCP layer, different targets behave
+diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
+index 8c7d4dda4cf29..4365d52c6430e 100644
+--- a/drivers/scsi/cxgbi/libcxgbi.c
++++ b/drivers/scsi/cxgbi/libcxgbi.c
+@@ -1634,11 +1634,11 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
+ 	log_debug(1 << CXGBI_DBG_PDU_RX,
+ 		"csk 0x%p, conn 0x%p.\n", csk, conn);
+ 
+-	if (unlikely(!conn || conn->suspend_rx)) {
++	if (unlikely(!conn || test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
+ 		log_debug(1 << CXGBI_DBG_PDU_RX,
+-			"csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n",
++			"csk 0x%p, conn 0x%p, id %d, conn flags 0x%lx!\n",
+ 			csk, conn, conn ? conn->id : 0xFF,
+-			conn ? conn->suspend_rx : 0xFF);
++			conn ? conn->flags : 0xFF);
+ 		return;
+ 	}
+ 
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 059dae8909ee5..f228d991038a2 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -678,7 +678,8 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ 	struct iscsi_task *task;
+ 	itt_t itt;
+ 
+-	if (session->state == ISCSI_STATE_TERMINATE)
++	if (session->state == ISCSI_STATE_TERMINATE ||
++	    !test_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags))
+ 		return NULL;
+ 
+ 	if (opcode == ISCSI_OP_LOGIN || opcode == ISCSI_OP_TEXT) {
+@@ -1392,8 +1393,8 @@ static bool iscsi_set_conn_failed(struct iscsi_conn *conn)
+ 	if (conn->stop_stage == 0)
+ 		session->state = ISCSI_STATE_FAILED;
+ 
+-	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+-	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
++	set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
++	set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
+ 	return true;
+ }
+ 
+@@ -1454,7 +1455,7 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
+ 	 * Do this after dropping the extra ref because if this was a requeue
+ 	 * it's removed from that list and cleanup_queued_task would miss it.
+ 	 */
+-	if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
++	if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
+ 		/*
+ 		 * Save the task and ref in case we weren't cleaning up this
+ 		 * task and get woken up again.
+@@ -1532,7 +1533,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
+ 	int rc = 0;
+ 
+ 	spin_lock_bh(&conn->session->frwd_lock);
+-	if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
++	if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
+ 		ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n");
+ 		spin_unlock_bh(&conn->session->frwd_lock);
+ 		return -ENODATA;
+@@ -1746,7 +1747,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
+ 		goto fault;
+ 	}
+ 
+-	if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
++	if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
+ 		reason = FAILURE_SESSION_IN_RECOVERY;
+ 		sc->result = DID_REQUEUE << 16;
+ 		goto fault;
+@@ -1935,7 +1936,7 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error)
+ void iscsi_suspend_queue(struct iscsi_conn *conn)
+ {
+ 	spin_lock_bh(&conn->session->frwd_lock);
+-	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
++	set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
+ 	spin_unlock_bh(&conn->session->frwd_lock);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
+@@ -1953,7 +1954,7 @@ void iscsi_suspend_tx(struct iscsi_conn *conn)
+ 	struct Scsi_Host *shost = conn->session->host;
+ 	struct iscsi_host *ihost = shost_priv(shost);
+ 
+-	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
++	set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
+ 	if (ihost->workq)
+ 		flush_workqueue(ihost->workq);
+ }
+@@ -1961,7 +1962,7 @@ EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
+ 
+ static void iscsi_start_tx(struct iscsi_conn *conn)
+ {
+-	clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
++	clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
+ 	iscsi_conn_queue_work(conn);
+ }
+ 
+@@ -2214,6 +2215,8 @@ void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active)
+ 	iscsi_suspend_tx(conn);
+ 
+ 	spin_lock_bh(&session->frwd_lock);
++	clear_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags);
++
+ 	if (!is_active) {
+ 		/*
+ 		 * if logout timed out before userspace could even send a PDU
+@@ -3311,6 +3314,8 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
+ 	spin_lock_bh(&session->frwd_lock);
+ 	if (is_leading)
+ 		session->leadconn = conn;
++
++	set_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags);
+ 	spin_unlock_bh(&session->frwd_lock);
+ 
+ 	/*
+@@ -3323,8 +3328,8 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
+ 	/*
+ 	 * Unblock xmitworker(), Login Phase will pass through.
+ 	 */
+-	clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+-	clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
++	clear_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
++	clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_bind);
+diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
+index 2e9ffe3d1a55e..883005757ddb8 100644
+--- a/drivers/scsi/libiscsi_tcp.c
++++ b/drivers/scsi/libiscsi_tcp.c
+@@ -927,7 +927,7 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
+ 	 */
+ 	conn->last_recv = jiffies;
+ 
+-	if (unlikely(conn->suspend_rx)) {
++	if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
+ 		ISCSI_DBG_TCP(conn, "Rx suspended!\n");
+ 		*status = ISCSI_TCP_SUSPENDED;
+ 		return 0;
+diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
+index 282ecb4e39bbd..e1fe989ad7b33 100644
+--- a/drivers/scsi/qedi/qedi_iscsi.c
++++ b/drivers/scsi/qedi/qedi_iscsi.c
+@@ -859,6 +859,37 @@ static int qedi_task_xmit(struct iscsi_task *task)
+ 	return qedi_iscsi_send_ioreq(task);
+ }
+ 
++static void qedi_offload_work(struct work_struct *work)
++{
++	struct qedi_endpoint *qedi_ep =
++		container_of(work, struct qedi_endpoint, offload_work);
++	struct qedi_ctx *qedi;
++	int wait_delay = 5 * HZ;
++	int ret;
++
++	qedi = qedi_ep->qedi;
++
++	ret = qedi_iscsi_offload_conn(qedi_ep);
++	if (ret) {
++		QEDI_ERR(&qedi->dbg_ctx,
++			 "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
++			 qedi_ep->iscsi_cid, qedi_ep, ret);
++		qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
++		return;
++	}
++
++	ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
++					       (qedi_ep->state ==
++					       EP_STATE_OFLDCONN_COMPL),
++					       wait_delay);
++	if (ret <= 0 || qedi_ep->state != EP_STATE_OFLDCONN_COMPL) {
++		qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
++		QEDI_ERR(&qedi->dbg_ctx,
++			 "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
++			 qedi_ep->iscsi_cid, qedi_ep);
++	}
++}
++
+ static struct iscsi_endpoint *
+ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
+ 		int non_blocking)
+@@ -907,6 +938,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
+ 	}
+ 	qedi_ep = ep->dd_data;
+ 	memset(qedi_ep, 0, sizeof(struct qedi_endpoint));
++	INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
+ 	qedi_ep->state = EP_STATE_IDLE;
+ 	qedi_ep->iscsi_cid = (u32)-1;
+ 	qedi_ep->qedi = qedi;
+@@ -1055,12 +1087,11 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
+ 	qedi_ep = ep->dd_data;
+ 	qedi = qedi_ep->qedi;
+ 
++	flush_work(&qedi_ep->offload_work);
++
+ 	if (qedi_ep->state == EP_STATE_OFLDCONN_START)
+ 		goto ep_exit_recover;
+ 
+-	if (qedi_ep->state != EP_STATE_OFLDCONN_NONE)
+-		flush_work(&qedi_ep->offload_work);
+-
+ 	if (qedi_ep->conn) {
+ 		qedi_conn = qedi_ep->conn;
+ 		abrt_conn = qedi_conn->abrt_conn;
+@@ -1234,37 +1265,6 @@ static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid)
+ 	return rc;
+ }
+ 
+-static void qedi_offload_work(struct work_struct *work)
+-{
+-	struct qedi_endpoint *qedi_ep =
+-		container_of(work, struct qedi_endpoint, offload_work);
+-	struct qedi_ctx *qedi;
+-	int wait_delay = 5 * HZ;
+-	int ret;
+-
+-	qedi = qedi_ep->qedi;
+-
+-	ret = qedi_iscsi_offload_conn(qedi_ep);
+-	if (ret) {
+-		QEDI_ERR(&qedi->dbg_ctx,
+-			 "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
+-			 qedi_ep->iscsi_cid, qedi_ep, ret);
+-		qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+-		return;
+-	}
+-
+-	ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
+-					       (qedi_ep->state ==
+-					       EP_STATE_OFLDCONN_COMPL),
+-					       wait_delay);
+-	if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) {
+-		qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+-		QEDI_ERR(&qedi->dbg_ctx,
+-			 "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
+-			 qedi_ep->iscsi_cid, qedi_ep);
+-	}
+-}
+-
+ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
+ {
+ 	struct qedi_ctx *qedi;
+@@ -1380,7 +1380,6 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
+ 			  qedi_ep->dst_addr, qedi_ep->dst_port);
+ 	}
+ 
+-	INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
+ 	queue_work(qedi->offload_thread, &qedi_ep->offload_work);
+ 
+ 	ret = 0;
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index c7b1b2e8bb02f..bcdfcb25349ad 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -86,6 +86,9 @@ struct iscsi_internal {
+ 	struct transport_container session_cont;
+ };
+ 
++static DEFINE_IDR(iscsi_ep_idr);
++static DEFINE_MUTEX(iscsi_ep_idr_mutex);
++
+ static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
+ static struct workqueue_struct *iscsi_eh_timer_workq;
+ 
+@@ -169,6 +172,11 @@ struct device_attribute dev_attr_##_prefix##_##_name =	\
+ static void iscsi_endpoint_release(struct device *dev)
+ {
+ 	struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
++
++	mutex_lock(&iscsi_ep_idr_mutex);
++	idr_remove(&iscsi_ep_idr, ep->id);
++	mutex_unlock(&iscsi_ep_idr_mutex);
++
+ 	kfree(ep);
+ }
+ 
+@@ -181,7 +189,7 @@ static ssize_t
+ show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+ 	struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+-	return sysfs_emit(buf, "%llu\n", (unsigned long long) ep->id);
++	return sysfs_emit(buf, "%d\n", ep->id);
+ }
+ static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
+ 
+@@ -194,48 +202,32 @@ static struct attribute_group iscsi_endpoint_group = {
+ 	.attrs = iscsi_endpoint_attrs,
+ };
+ 
+-#define ISCSI_MAX_EPID -1
+-
+-static int iscsi_match_epid(struct device *dev, const void *data)
+-{
+-	struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+-	const uint64_t *epid = data;
+-
+-	return *epid == ep->id;
+-}
+-
+ struct iscsi_endpoint *
+ iscsi_create_endpoint(int dd_size)
+ {
+-	struct device *dev;
+ 	struct iscsi_endpoint *ep;
+-	uint64_t id;
+-	int err;
+-
+-	for (id = 1; id < ISCSI_MAX_EPID; id++) {
+-		dev = class_find_device(&iscsi_endpoint_class, NULL, &id,
+-					iscsi_match_epid);
+-		if (!dev)
+-			break;
+-		else
+-			put_device(dev);
+-	}
+-	if (id == ISCSI_MAX_EPID) {
+-		printk(KERN_ERR "Too many connections. Max supported %u\n",
+-		       ISCSI_MAX_EPID - 1);
+-		return NULL;
+-	}
++	int err, id;
+ 
+ 	ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
+ 	if (!ep)
+ 		return NULL;
+ 
++	mutex_lock(&iscsi_ep_idr_mutex);
++	id = idr_alloc(&iscsi_ep_idr, ep, 0, -1, GFP_NOIO);
++	if (id < 0) {
++		mutex_unlock(&iscsi_ep_idr_mutex);
++		printk(KERN_ERR "Could not allocate endpoint ID. Error %d.\n",
++		       id);
++		goto free_ep;
++	}
++	mutex_unlock(&iscsi_ep_idr_mutex);
++
+ 	ep->id = id;
+ 	ep->dev.class = &iscsi_endpoint_class;
+-	dev_set_name(&ep->dev, "ep-%llu", (unsigned long long) id);
++	dev_set_name(&ep->dev, "ep-%d", id);
+ 	err = device_register(&ep->dev);
+         if (err)
+-                goto free_ep;
++		goto free_id;
+ 
+ 	err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
+ 	if (err)
+@@ -249,6 +241,10 @@ unregister_dev:
+ 	device_unregister(&ep->dev);
+ 	return NULL;
+ 
++free_id:
++	mutex_lock(&iscsi_ep_idr_mutex);
++	idr_remove(&iscsi_ep_idr, id);
++	mutex_unlock(&iscsi_ep_idr_mutex);
+ free_ep:
+ 	kfree(ep);
+ 	return NULL;
+@@ -276,14 +272,17 @@ EXPORT_SYMBOL_GPL(iscsi_put_endpoint);
+  */
+ struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
+ {
+-	struct device *dev;
++	struct iscsi_endpoint *ep;
+ 
+-	dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
+-				iscsi_match_epid);
+-	if (!dev)
+-		return NULL;
++	mutex_lock(&iscsi_ep_idr_mutex);
++	ep = idr_find(&iscsi_ep_idr, handle);
++	if (!ep)
++		goto unlock;
+ 
+-	return iscsi_dev_to_endpoint(dev);
++	get_device(&ep->dev);
++unlock:
++	mutex_unlock(&iscsi_ep_idr_mutex);
++	return ep;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
+ 
+diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
+index ddd00efc48825..fbdb5124d7f7d 100644
+--- a/drivers/scsi/sr_ioctl.c
++++ b/drivers/scsi/sr_ioctl.c
+@@ -41,7 +41,7 @@ static int sr_read_tochdr(struct cdrom_device_info *cdi,
+ 	int result;
+ 	unsigned char *buffer;
+ 
+-	buffer = kmalloc(32, GFP_KERNEL);
++	buffer = kzalloc(32, GFP_KERNEL);
+ 	if (!buffer)
+ 		return -ENOMEM;
+ 
+@@ -55,10 +55,13 @@ static int sr_read_tochdr(struct cdrom_device_info *cdi,
+ 	cgc.data_direction = DMA_FROM_DEVICE;
+ 
+ 	result = sr_do_ioctl(cd, &cgc);
++	if (result)
++		goto err;
+ 
+ 	tochdr->cdth_trk0 = buffer[2];
+ 	tochdr->cdth_trk1 = buffer[3];
+ 
++err:
+ 	kfree(buffer);
+ 	return result;
+ }
+@@ -71,7 +74,7 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
+ 	int result;
+ 	unsigned char *buffer;
+ 
+-	buffer = kmalloc(32, GFP_KERNEL);
++	buffer = kzalloc(32, GFP_KERNEL);
+ 	if (!buffer)
+ 		return -ENOMEM;
+ 
+@@ -86,6 +89,8 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
+ 	cgc.data_direction = DMA_FROM_DEVICE;
+ 
+ 	result = sr_do_ioctl(cd, &cgc);
++	if (result)
++		goto err;
+ 
+ 	tocentry->cdte_ctrl = buffer[5] & 0xf;
+ 	tocentry->cdte_adr = buffer[5] >> 4;
+@@ -98,6 +103,7 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
+ 		tocentry->cdte_addr.lba = (((((buffer[8] << 8) + buffer[9]) << 8)
+ 			+ buffer[10]) << 8) + buffer[11];
+ 
++err:
+ 	kfree(buffer);
+ 	return result;
+ }
+@@ -384,7 +390,7 @@ int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
+ {
+ 	Scsi_CD *cd = cdi->handle;
+ 	struct packet_command cgc;
+-	char *buffer = kmalloc(32, GFP_KERNEL);
++	char *buffer = kzalloc(32, GFP_KERNEL);
+ 	int result;
+ 
+ 	if (!buffer)
+@@ -400,10 +406,13 @@ int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
+ 	cgc.data_direction = DMA_FROM_DEVICE;
+ 	cgc.timeout = IOCTL_TIMEOUT;
+ 	result = sr_do_ioctl(cd, &cgc);
++	if (result)
++		goto err;
+ 
+ 	memcpy(mcn->medium_catalog_number, buffer + 9, 13);
+ 	mcn->medium_catalog_number[13] = 0;
+ 
++err:
+ 	kfree(buffer);
+ 	return result;
+ }
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index cb285d277201c..5696e52c76e9d 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -367,7 +367,7 @@ static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
+ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
+ 				     enum ufs_trace_str_t str_t)
+ {
+-	u64 lba;
++	u64 lba = 0;
+ 	u8 opcode = 0, group_id = 0;
+ 	u32 intr, doorbell;
+ 	struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+@@ -384,7 +384,6 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
+ 		return;
+ 
+ 	opcode = cmd->cmnd[0];
+-	lba = scsi_get_lba(cmd);
+ 
+ 	if (opcode == READ_10 || opcode == WRITE_10) {
+ 		/*
+@@ -392,6 +391,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
+ 		 */
+ 		transfer_len =
+ 		       be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
++		lba = scsi_get_lba(cmd);
+ 		if (opcode == WRITE_10)
+ 			group_id = lrbp->cmd->cmnd[6];
+ 	} else if (opcode == UNMAP) {
+@@ -399,6 +399,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
+ 		 * The number of Bytes to be unmapped beginning with the lba.
+ 		 */
+ 		transfer_len = blk_rq_bytes(rq);
++		lba = scsi_get_lba(cmd);
+ 	}
+ 
+ 	intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
+index 92d9610df1fd8..938017a60c8ed 100644
+--- a/drivers/spi/atmel-quadspi.c
++++ b/drivers/spi/atmel-quadspi.c
+@@ -277,6 +277,9 @@ static int atmel_qspi_find_mode(const struct spi_mem_op *op)
+ static bool atmel_qspi_supports_op(struct spi_mem *mem,
+ 				   const struct spi_mem_op *op)
+ {
++	if (!spi_mem_default_supports_op(mem, op))
++		return false;
++
+ 	if (atmel_qspi_find_mode(op) < 0)
+ 		return false;
+ 
+diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
+index 75f3560411386..b8ac24318cb3a 100644
+--- a/drivers/spi/spi-cadence-quadspi.c
++++ b/drivers/spi/spi-cadence-quadspi.c
+@@ -1415,9 +1415,24 @@ static bool cqspi_supports_mem_op(struct spi_mem *mem,
+ 	all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr &&
+ 		    !op->data.dtr;
+ 
+-	/* Mixed DTR modes not supported. */
+-	if (!(all_true || all_false))
++	if (all_true) {
++		/* Right now we only support 8-8-8 DTR mode. */
++		if (op->cmd.nbytes && op->cmd.buswidth != 8)
++			return false;
++		if (op->addr.nbytes && op->addr.buswidth != 8)
++			return false;
++		if (op->data.nbytes && op->data.buswidth != 8)
++			return false;
++	} else if (all_false) {
++		/* Only 1-1-X ops are supported without DTR */
++		if (op->cmd.nbytes && op->cmd.buswidth > 1)
++			return false;
++		if (op->addr.nbytes && op->addr.buswidth > 1)
++			return false;
++	} else {
++		/* Mixed DTR modes are not supported. */
+ 		return false;
++	}
+ 
+ 	if (all_true)
+ 		return spi_mem_dtr_supports_op(mem, op);
+diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c
+index 5c93730615f8d..6d203477c04b1 100644
+--- a/drivers/spi/spi-mtk-nor.c
++++ b/drivers/spi/spi-mtk-nor.c
+@@ -909,7 +909,17 @@ static int __maybe_unused mtk_nor_suspend(struct device *dev)
+ 
+ static int __maybe_unused mtk_nor_resume(struct device *dev)
+ {
+-	return pm_runtime_force_resume(dev);
++	struct spi_controller *ctlr = dev_get_drvdata(dev);
++	struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
++	int ret;
++
++	ret = pm_runtime_force_resume(dev);
++	if (ret)
++		return ret;
++
++	mtk_nor_init(sp);
++
++	return 0;
+ }
+ 
+ static const struct dev_pm_ops mtk_nor_pm_ops = {
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 792fdcfdc6add..10aa0fb946138 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -946,7 +946,7 @@ cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+ 	ssize_t rc;
+ 	struct inode *inode = file_inode(iocb->ki_filp);
+ 
+-	if (iocb->ki_filp->f_flags & O_DIRECT)
++	if (iocb->ki_flags & IOCB_DIRECT)
+ 		return cifs_user_readv(iocb, iter);
+ 
+ 	rc = cifs_revalidate_mapping(inode);
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index c3be6a541c8fc..532770c30415d 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -534,12 +534,19 @@ int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
+ {
+ 	/* If tcp session is not an dfs connection, then reconnect to last target server */
+ 	spin_lock(&cifs_tcp_ses_lock);
+-	if (!server->is_dfs_conn || !server->origin_fullpath || !server->leaf_fullpath) {
++	if (!server->is_dfs_conn) {
+ 		spin_unlock(&cifs_tcp_ses_lock);
+ 		return __cifs_reconnect(server, mark_smb_session);
+ 	}
+ 	spin_unlock(&cifs_tcp_ses_lock);
+ 
++	mutex_lock(&server->refpath_lock);
++	if (!server->origin_fullpath || !server->leaf_fullpath) {
++		mutex_unlock(&server->refpath_lock);
++		return __cifs_reconnect(server, mark_smb_session);
++	}
++	mutex_unlock(&server->refpath_lock);
++
+ 	return reconnect_dfs_server(server);
+ }
+ #else
+@@ -3675,9 +3682,11 @@ static void setup_server_referral_paths(struct mount_ctx *mnt_ctx)
+ {
+ 	struct TCP_Server_Info *server = mnt_ctx->server;
+ 
++	mutex_lock(&server->refpath_lock);
+ 	server->origin_fullpath = mnt_ctx->origin_fullpath;
+ 	server->leaf_fullpath = mnt_ctx->leaf_fullpath;
+ 	server->current_fullpath = mnt_ctx->leaf_fullpath;
++	mutex_unlock(&server->refpath_lock);
+ 	mnt_ctx->origin_fullpath = mnt_ctx->leaf_fullpath = NULL;
+ }
+ 
+diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
+index 30e040da4f096..956f8e5cf3e74 100644
+--- a/fs/cifs/dfs_cache.c
++++ b/fs/cifs/dfs_cache.c
+@@ -1422,12 +1422,14 @@ static int refresh_tcon(struct cifs_ses **sessions, struct cifs_tcon *tcon, bool
+ 	struct TCP_Server_Info *server = tcon->ses->server;
+ 
+ 	mutex_lock(&server->refpath_lock);
+-	if (strcasecmp(server->leaf_fullpath, server->origin_fullpath))
+-		__refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, force_refresh);
++	if (server->origin_fullpath) {
++		if (server->leaf_fullpath && strcasecmp(server->leaf_fullpath,
++							server->origin_fullpath))
++			__refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, force_refresh);
++		__refresh_tcon(server->origin_fullpath + 1, sessions, tcon, force_refresh);
++	}
+ 	mutex_unlock(&server->refpath_lock);
+ 
+-	__refresh_tcon(server->origin_fullpath + 1, sessions, tcon, force_refresh);
+-
+ 	return 0;
+ }
+ 
+@@ -1530,11 +1532,14 @@ static void refresh_mounts(struct cifs_ses **sessions)
+ 		list_del_init(&tcon->ulist);
+ 
+ 		mutex_lock(&server->refpath_lock);
+-		if (strcasecmp(server->leaf_fullpath, server->origin_fullpath))
+-			__refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, false);
++		if (server->origin_fullpath) {
++			if (server->leaf_fullpath && strcasecmp(server->leaf_fullpath,
++								server->origin_fullpath))
++				__refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, false);
++			__refresh_tcon(server->origin_fullpath + 1, sessions, tcon, false);
++		}
+ 		mutex_unlock(&server->refpath_lock);
+ 
+-		__refresh_tcon(server->origin_fullpath + 1, sessions, tcon, false);
+ 		cifs_put_tcon(tcon);
+ 	}
+ }
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index bcd3b9bf8069b..9b80693224957 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -2271,6 +2271,10 @@ static inline int ext4_forced_shutdown(struct ext4_sb_info *sbi)
+  * Structure of a directory entry
+  */
+ #define EXT4_NAME_LEN 255
++/*
++ * Base length of the ext4 directory entry excluding the name length
++ */
++#define EXT4_BASE_DIR_LEN (sizeof(struct ext4_dir_entry_2) - EXT4_NAME_LEN)
+ 
+ struct ext4_dir_entry {
+ 	__le32	inode;			/* Inode number */
+@@ -3030,7 +3034,7 @@ extern int ext4_inode_attach_jinode(struct inode *inode);
+ extern int ext4_can_truncate(struct inode *inode);
+ extern int ext4_truncate(struct inode *);
+ extern int ext4_break_layouts(struct inode *);
+-extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length);
++extern int ext4_punch_hole(struct file *file, loff_t offset, loff_t length);
+ extern void ext4_set_inode_flags(struct inode *, bool init);
+ extern int ext4_alloc_da_blocks(struct inode *inode);
+ extern void ext4_set_aops(struct inode *inode);
+@@ -3062,6 +3066,7 @@ int ext4_fileattr_set(struct user_namespace *mnt_userns,
+ 		      struct dentry *dentry, struct fileattr *fa);
+ int ext4_fileattr_get(struct dentry *dentry, struct fileattr *fa);
+ extern void ext4_reset_inode_seed(struct inode *inode);
++int ext4_update_overhead(struct super_block *sb);
+ 
+ /* migrate.c */
+ extern int ext4_ext_migrate(struct inode *);
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index c0f3f83e0c1b1..488d7c1de941e 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4501,9 +4501,9 @@ retry:
+ 	return ret > 0 ? ret2 : ret;
+ }
+ 
+-static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len);
++static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len);
+ 
+-static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len);
++static int ext4_insert_range(struct file *file, loff_t offset, loff_t len);
+ 
+ static long ext4_zero_range(struct file *file, loff_t offset,
+ 			    loff_t len, int mode)
+@@ -4575,6 +4575,10 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ 	/* Wait all existing dio workers, newcomers will block on i_rwsem */
+ 	inode_dio_wait(inode);
+ 
++	ret = file_modified(file);
++	if (ret)
++		goto out_mutex;
++
+ 	/* Preallocate the range including the unaligned edges */
+ 	if (partial_begin || partial_end) {
+ 		ret = ext4_alloc_file_blocks(file,
+@@ -4691,7 +4695,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
+ 		return -EOPNOTSUPP;
+ 
+ 	if (mode & FALLOC_FL_PUNCH_HOLE) {
+-		ret = ext4_punch_hole(inode, offset, len);
++		ret = ext4_punch_hole(file, offset, len);
+ 		goto exit;
+ 	}
+ 
+@@ -4700,12 +4704,12 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
+ 		goto exit;
+ 
+ 	if (mode & FALLOC_FL_COLLAPSE_RANGE) {
+-		ret = ext4_collapse_range(inode, offset, len);
++		ret = ext4_collapse_range(file, offset, len);
+ 		goto exit;
+ 	}
+ 
+ 	if (mode & FALLOC_FL_INSERT_RANGE) {
+-		ret = ext4_insert_range(inode, offset, len);
++		ret = ext4_insert_range(file, offset, len);
+ 		goto exit;
+ 	}
+ 
+@@ -4741,6 +4745,10 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
+ 	/* Wait all existing dio workers, newcomers will block on i_rwsem */
+ 	inode_dio_wait(inode);
+ 
++	ret = file_modified(file);
++	if (ret)
++		goto out;
++
+ 	ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags);
+ 	if (ret)
+ 		goto out;
+@@ -5242,8 +5250,9 @@ out:
+  * This implements the fallocate's collapse range functionality for ext4
+  * Returns: 0 and non-zero on error.
+  */
+-static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
++static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
+ {
++	struct inode *inode = file_inode(file);
+ 	struct super_block *sb = inode->i_sb;
+ 	struct address_space *mapping = inode->i_mapping;
+ 	ext4_lblk_t punch_start, punch_stop;
+@@ -5295,6 +5304,10 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
+ 	/* Wait for existing dio to complete */
+ 	inode_dio_wait(inode);
+ 
++	ret = file_modified(file);
++	if (ret)
++		goto out_mutex;
++
+ 	/*
+ 	 * Prevent page faults from reinstantiating pages we have released from
+ 	 * page cache.
+@@ -5388,8 +5401,9 @@ out_mutex:
+  * by len bytes.
+  * Returns 0 on success, error otherwise.
+  */
+-static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
++static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
+ {
++	struct inode *inode = file_inode(file);
+ 	struct super_block *sb = inode->i_sb;
+ 	struct address_space *mapping = inode->i_mapping;
+ 	handle_t *handle;
+@@ -5446,6 +5460,10 @@ static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
+ 	/* Wait for existing dio to complete */
+ 	inode_dio_wait(inode);
+ 
++	ret = file_modified(file);
++	if (ret)
++		goto out_mutex;
++
+ 	/*
+ 	 * Prevent page faults from reinstantiating pages we have released from
+ 	 * page cache.
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 531a94f48637c..d8ff93a4b1b90 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3944,12 +3944,14 @@ int ext4_break_layouts(struct inode *inode)
+  * Returns: 0 on success or negative on failure
+  */
+ 
+-int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
++int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
+ {
++	struct inode *inode = file_inode(file);
+ 	struct super_block *sb = inode->i_sb;
+ 	ext4_lblk_t first_block, stop_block;
+ 	struct address_space *mapping = inode->i_mapping;
+-	loff_t first_block_offset, last_block_offset;
++	loff_t first_block_offset, last_block_offset, max_length;
++	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ 	handle_t *handle;
+ 	unsigned int credits;
+ 	int ret = 0, ret2 = 0;
+@@ -3992,6 +3994,14 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
+ 		   offset;
+ 	}
+ 
++	/*
++	 * For punch hole the length + offset needs to be within one block
++	 * before last range. Adjust the length if it goes beyond that limit.
++	 */
++	max_length = sbi->s_bitmap_maxbytes - inode->i_sb->s_blocksize;
++	if (offset + length > max_length)
++		length = max_length - offset;
++
+ 	if (offset & (sb->s_blocksize - 1) ||
+ 	    (offset + length) & (sb->s_blocksize - 1)) {
+ 		/*
+@@ -4007,6 +4017,10 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
+ 	/* Wait all existing dio workers, newcomers will block on i_rwsem */
+ 	inode_dio_wait(inode);
+ 
++	ret = file_modified(file);
++	if (ret)
++		goto out_mutex;
++
+ 	/*
+ 	 * Prevent page faults from reinstantiating pages we have released from
+ 	 * page cache.
+diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
+index a8022c2c6a582..da0aefe67673d 100644
+--- a/fs/ext4/ioctl.c
++++ b/fs/ext4/ioctl.c
+@@ -1652,3 +1652,19 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 	return ext4_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
+ }
+ #endif
++
++static void set_overhead(struct ext4_super_block *es, const void *arg)
++{
++	es->s_overhead_clusters = cpu_to_le32(*((unsigned long *) arg));
++}
++
++int ext4_update_overhead(struct super_block *sb)
++{
++	struct ext4_sb_info *sbi = EXT4_SB(sb);
++
++	if (sb_rdonly(sb) || sbi->s_overhead == 0 ||
++	    sbi->s_overhead == le32_to_cpu(sbi->s_es->s_overhead_clusters))
++		return 0;
++
++	return ext4_update_superblocks_fn(sb, set_overhead, &sbi->s_overhead);
++}
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 39e223f7bf64d..f62260264f68c 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -1466,10 +1466,10 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
+ 
+ 	de = (struct ext4_dir_entry_2 *)search_buf;
+ 	dlimit = search_buf + buf_size;
+-	while ((char *) de < dlimit) {
++	while ((char *) de < dlimit - EXT4_BASE_DIR_LEN) {
+ 		/* this code is executed quadratically often */
+ 		/* do minimal checking `by hand' */
+-		if ((char *) de + de->name_len <= dlimit &&
++		if (de->name + de->name_len <= dlimit &&
+ 		    ext4_match(dir, fname, de)) {
+ 			/* found a match - just to be sure, do
+ 			 * a full check */
+diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
+index 1d370364230e8..40b7d8485b445 100644
+--- a/fs/ext4/page-io.c
++++ b/fs/ext4/page-io.c
+@@ -134,8 +134,10 @@ static void ext4_finish_bio(struct bio *bio)
+ 				continue;
+ 			}
+ 			clear_buffer_async_write(bh);
+-			if (bio->bi_status)
++			if (bio->bi_status) {
++				set_buffer_write_io_error(bh);
+ 				buffer_io_error(bh);
++			}
+ 		} while ((bh = bh->b_this_page) != head);
+ 		spin_unlock_irqrestore(&head->b_uptodate_lock, flags);
+ 		if (!under_io) {
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index bed29f96ccc7e..ba6530c2d2711 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -4156,9 +4156,11 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
+ 	ext4_fsblk_t		first_block, last_block, b;
+ 	ext4_group_t		i, ngroups = ext4_get_groups_count(sb);
+ 	int			s, j, count = 0;
++	int			has_super = ext4_bg_has_super(sb, grp);
+ 
+ 	if (!ext4_has_feature_bigalloc(sb))
+-		return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
++		return (has_super + ext4_bg_num_gdb(sb, grp) +
++			(has_super ? le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0) +
+ 			sbi->s_itb_per_group + 2);
+ 
+ 	first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
+@@ -5266,9 +5268,18 @@ no_journal:
+ 	 * Get the # of file system overhead blocks from the
+ 	 * superblock if present.
+ 	 */
+-	if (es->s_overhead_clusters)
+-		sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
+-	else {
++	sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
++	/* ignore the precalculated value if it is ridiculous */
++	if (sbi->s_overhead > ext4_blocks_count(es))
++		sbi->s_overhead = 0;
++	/*
++	 * If the bigalloc feature is not enabled recalculating the
++	 * overhead doesn't take long, so we might as well just redo
++	 * it to make sure we are using the correct value.
++	 */
++	if (!ext4_has_feature_bigalloc(sb))
++		sbi->s_overhead = 0;
++	if (sbi->s_overhead == 0) {
+ 		err = ext4_calculate_overhead(sb);
+ 		if (err)
+ 			goto failed_mount_wq;
+@@ -5586,6 +5597,8 @@ static int ext4_fill_super(struct super_block *sb, struct fs_context *fc)
+ 		ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
+ 			 "Quota mode: %s.", descr, ext4_quota_mode(sb));
+ 
++	/* Update the s_overhead_clusters if necessary */
++	ext4_update_overhead(sb);
+ 	return 0;
+ 
+ free_sbi:
+diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
+index 3b34bb24d0af4..801ad9f4f2bef 100644
+--- a/fs/gfs2/rgrp.c
++++ b/fs/gfs2/rgrp.c
+@@ -923,15 +923,15 @@ static int read_rindex_entry(struct gfs2_inode *ip)
+ 	spin_lock_init(&rgd->rd_rsspin);
+ 	mutex_init(&rgd->rd_mutex);
+ 
+-	error = compute_bitstructs(rgd);
+-	if (error)
+-		goto fail;
+-
+ 	error = gfs2_glock_get(sdp, rgd->rd_addr,
+ 			       &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
+ 	if (error)
+ 		goto fail;
+ 
++	error = compute_bitstructs(rgd);
++	if (error)
++		goto fail_glock;
++
+ 	rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
+ 	rgd->rd_flags &= ~GFS2_RDF_PREFERRED;
+ 	if (rgd->rd_data > sdp->sd_max_rg_data)
+@@ -945,6 +945,7 @@ static int read_rindex_entry(struct gfs2_inode *ip)
+ 	}
+ 
+ 	error = 0; /* someone else read in the rgrp; free it and ignore it */
++fail_glock:
+ 	gfs2_glock_put(rgd->rd_gl);
+ 
+ fail:
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index a7c6c7498be0b..ed85051b12754 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -206,7 +206,7 @@ hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
+ 	info.flags = 0;
+ 	info.length = len;
+ 	info.low_limit = current->mm->mmap_base;
+-	info.high_limit = TASK_SIZE;
++	info.high_limit = arch_get_mmap_end(addr);
+ 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+ 	info.align_offset = 0;
+ 	return vm_unmapped_area(&info);
+@@ -222,7 +222,7 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
+ 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+ 	info.length = len;
+ 	info.low_limit = max(PAGE_SIZE, mmap_min_addr);
+-	info.high_limit = current->mm->mmap_base;
++	info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base);
+ 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+ 	info.align_offset = 0;
+ 	addr = vm_unmapped_area(&info);
+@@ -237,7 +237,7 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
+ 		VM_BUG_ON(addr != -ENOMEM);
+ 		info.flags = 0;
+ 		info.low_limit = current->mm->mmap_base;
+-		info.high_limit = TASK_SIZE;
++		info.high_limit = arch_get_mmap_end(addr);
+ 		addr = vm_unmapped_area(&info);
+ 	}
+ 
+@@ -251,6 +251,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ 	struct mm_struct *mm = current->mm;
+ 	struct vm_area_struct *vma;
+ 	struct hstate *h = hstate_file(file);
++	const unsigned long mmap_end = arch_get_mmap_end(addr);
+ 
+ 	if (len & ~huge_page_mask(h))
+ 		return -EINVAL;
+@@ -266,7 +267,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ 	if (addr) {
+ 		addr = ALIGN(addr, huge_page_size(h));
+ 		vma = find_vma(mm, addr);
+-		if (TASK_SIZE - len >= addr &&
++		if (mmap_end - len >= addr &&
+ 		    (!vma || addr + len <= vm_start_gap(vma)))
+ 			return addr;
+ 	}
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 619c67fd456dd..fbba8342172a0 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -2612,11 +2612,10 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
+ 		/* order with io_complete_rw_iopoll(), e.g. ->result updates */
+ 		if (!smp_load_acquire(&req->iopoll_completed))
+ 			break;
++		nr_events++;
+ 		if (unlikely(req->flags & REQ_F_CQE_SKIP))
+ 			continue;
+-
+ 		__io_fill_cqe(ctx, req->user_data, req->result, io_put_kbuf(req));
+-		nr_events++;
+ 	}
+ 
+ 	if (unlikely(!nr_events))
+@@ -3622,8 +3621,10 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
+ 		iovec = NULL;
+ 	}
+ 	ret = io_rw_init_file(req, FMODE_READ);
+-	if (unlikely(ret))
++	if (unlikely(ret)) {
++		kfree(iovec);
+ 		return ret;
++	}
+ 	req->result = iov_iter_count(&s->iter);
+ 
+ 	if (force_nonblock) {
+@@ -3742,8 +3743,10 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
+ 		iovec = NULL;
+ 	}
+ 	ret = io_rw_init_file(req, FMODE_WRITE);
+-	if (unlikely(ret))
++	if (unlikely(ret)) {
++		kfree(iovec);
+ 		return ret;
++	}
+ 	req->result = iov_iter_count(&s->iter);
+ 
+ 	if (force_nonblock) {
+diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
+index 5b9408e3b370d..ac7f067b7bddb 100644
+--- a/fs/jbd2/commit.c
++++ b/fs/jbd2/commit.c
+@@ -488,7 +488,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
+ 	jbd2_journal_wait_updates(journal);
+ 
+ 	commit_transaction->t_state = T_SWITCH;
+-	write_unlock(&journal->j_state_lock);
+ 
+ 	J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
+ 			journal->j_max_transaction_buffers);
+@@ -508,6 +507,8 @@ void jbd2_journal_commit_transaction(journal_t *journal)
+ 	 * has reserved.  This is consistent with the existing behaviour
+ 	 * that multiple jbd2_journal_get_write_access() calls to the same
+ 	 * buffer are perfectly permissible.
++	 * We use journal->j_state_lock here to serialize processing of
++	 * t_reserved_list with eviction of buffers from journal_unmap_buffer().
+ 	 */
+ 	while (commit_transaction->t_reserved_list) {
+ 		jh = commit_transaction->t_reserved_list;
+@@ -527,6 +528,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
+ 		jbd2_journal_refile_buffer(journal, jh);
+ 	}
+ 
++	write_unlock(&journal->j_state_lock);
+ 	/*
+ 	 * Now try to drop any written-back buffers from the journal's
+ 	 * checkpoint lists.  We do this *before* commit because it potentially
+diff --git a/fs/namei.c b/fs/namei.c
+index 3f1829b3ab5b7..509657fdf4f56 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -3673,18 +3673,14 @@ static struct dentry *filename_create(int dfd, struct filename *name,
+ {
+ 	struct dentry *dentry = ERR_PTR(-EEXIST);
+ 	struct qstr last;
++	bool want_dir = lookup_flags & LOOKUP_DIRECTORY;
++	unsigned int reval_flag = lookup_flags & LOOKUP_REVAL;
++	unsigned int create_flags = LOOKUP_CREATE | LOOKUP_EXCL;
+ 	int type;
+ 	int err2;
+ 	int error;
+-	bool is_dir = (lookup_flags & LOOKUP_DIRECTORY);
+ 
+-	/*
+-	 * Note that only LOOKUP_REVAL and LOOKUP_DIRECTORY matter here. Any
+-	 * other flags passed in are ignored!
+-	 */
+-	lookup_flags &= LOOKUP_REVAL;
+-
+-	error = filename_parentat(dfd, name, lookup_flags, path, &last, &type);
++	error = filename_parentat(dfd, name, reval_flag, path, &last, &type);
+ 	if (error)
+ 		return ERR_PTR(error);
+ 
+@@ -3698,11 +3694,13 @@ static struct dentry *filename_create(int dfd, struct filename *name,
+ 	/* don't fail immediately if it's r/o, at least try to report other errors */
+ 	err2 = mnt_want_write(path->mnt);
+ 	/*
+-	 * Do the final lookup.
++	 * Do the final lookup.  Suppress 'create' if there is a trailing
++	 * '/', and a directory wasn't requested.
+ 	 */
+-	lookup_flags |= LOOKUP_CREATE | LOOKUP_EXCL;
++	if (last.name[last.len] && !want_dir)
++		create_flags = 0;
+ 	inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT);
+-	dentry = __lookup_hash(&last, path->dentry, lookup_flags);
++	dentry = __lookup_hash(&last, path->dentry, reval_flag | create_flags);
+ 	if (IS_ERR(dentry))
+ 		goto unlock;
+ 
+@@ -3716,7 +3714,7 @@ static struct dentry *filename_create(int dfd, struct filename *name,
+ 	 * all is fine. Let's be bastards - you had / on the end, you've
+ 	 * been asking for (non-existent) directory. -ENOENT for you.
+ 	 */
+-	if (unlikely(!is_dir && last.name[last.len])) {
++	if (unlikely(!create_flags)) {
+ 		error = -ENOENT;
+ 		goto fail;
+ 	}
+diff --git a/fs/posix_acl.c b/fs/posix_acl.c
+index 80acb6885cf90..962d32468eb48 100644
+--- a/fs/posix_acl.c
++++ b/fs/posix_acl.c
+@@ -759,9 +759,14 @@ static void posix_acl_fix_xattr_userns(
+ }
+ 
+ void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns,
++				   struct inode *inode,
+ 				   void *value, size_t size)
+ {
+ 	struct user_namespace *user_ns = current_user_ns();
++
++	/* Leave ids untouched on non-idmapped mounts. */
++	if (no_idmapping(mnt_userns, i_user_ns(inode)))
++		mnt_userns = &init_user_ns;
+ 	if ((user_ns == &init_user_ns) && (mnt_userns == &init_user_ns))
+ 		return;
+ 	posix_acl_fix_xattr_userns(&init_user_ns, user_ns, mnt_userns, value,
+@@ -769,9 +774,14 @@ void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns,
+ }
+ 
+ void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns,
++				 struct inode *inode,
+ 				 void *value, size_t size)
+ {
+ 	struct user_namespace *user_ns = current_user_ns();
++
++	/* Leave ids untouched on non-idmapped mounts. */
++	if (no_idmapping(mnt_userns, i_user_ns(inode)))
++		mnt_userns = &init_user_ns;
+ 	if ((user_ns == &init_user_ns) && (mnt_userns == &init_user_ns))
+ 		return;
+ 	posix_acl_fix_xattr_userns(user_ns, &init_user_ns, mnt_userns, value,
+diff --git a/fs/stat.c b/fs/stat.c
+index 28d2020ba1f42..246d138ec0669 100644
+--- a/fs/stat.c
++++ b/fs/stat.c
+@@ -334,9 +334,6 @@ SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, stat
+ #  define choose_32_64(a,b) b
+ #endif
+ 
+-#define valid_dev(x)  choose_32_64(old_valid_dev(x),true)
+-#define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
+-
+ #ifndef INIT_STRUCT_STAT_PADDING
+ #  define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
+ #endif
+@@ -345,7 +342,9 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
+ {
+ 	struct stat tmp;
+ 
+-	if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
++	if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
++		return -EOVERFLOW;
++	if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
+ 		return -EOVERFLOW;
+ #if BITS_PER_LONG == 32
+ 	if (stat->size > MAX_NON_LFS)
+@@ -353,7 +352,7 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
+ #endif
+ 
+ 	INIT_STRUCT_STAT_PADDING(tmp);
+-	tmp.st_dev = encode_dev(stat->dev);
++	tmp.st_dev = new_encode_dev(stat->dev);
+ 	tmp.st_ino = stat->ino;
+ 	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
+ 		return -EOVERFLOW;
+@@ -363,7 +362,7 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
+ 		return -EOVERFLOW;
+ 	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
+ 	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
+-	tmp.st_rdev = encode_dev(stat->rdev);
++	tmp.st_rdev = new_encode_dev(stat->rdev);
+ 	tmp.st_size = stat->size;
+ 	tmp.st_atime = stat->atime.tv_sec;
+ 	tmp.st_mtime = stat->mtime.tv_sec;
+@@ -644,11 +643,13 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
+ {
+ 	struct compat_stat tmp;
+ 
+-	if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
++	if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
++		return -EOVERFLOW;
++	if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
+ 		return -EOVERFLOW;
+ 
+ 	memset(&tmp, 0, sizeof(tmp));
+-	tmp.st_dev = old_encode_dev(stat->dev);
++	tmp.st_dev = new_encode_dev(stat->dev);
+ 	tmp.st_ino = stat->ino;
+ 	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
+ 		return -EOVERFLOW;
+@@ -658,7 +659,7 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
+ 		return -EOVERFLOW;
+ 	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
+ 	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
+-	tmp.st_rdev = old_encode_dev(stat->rdev);
++	tmp.st_rdev = new_encode_dev(stat->rdev);
+ 	if ((u64) stat->size > MAX_NON_LFS)
+ 		return -EOVERFLOW;
+ 	tmp.st_size = stat->size;
+diff --git a/fs/xattr.c b/fs/xattr.c
+index 5c8c5175b385c..998045165916e 100644
+--- a/fs/xattr.c
++++ b/fs/xattr.c
+@@ -569,7 +569,8 @@ setxattr(struct user_namespace *mnt_userns, struct dentry *d,
+ 		}
+ 		if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
+ 		    (strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0))
+-			posix_acl_fix_xattr_from_user(mnt_userns, kvalue, size);
++			posix_acl_fix_xattr_from_user(mnt_userns, d_inode(d),
++						      kvalue, size);
+ 	}
+ 
+ 	error = vfs_setxattr(mnt_userns, d, kname, kvalue, size, flags);
+@@ -667,7 +668,8 @@ getxattr(struct user_namespace *mnt_userns, struct dentry *d,
+ 	if (error > 0) {
+ 		if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
+ 		    (strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0))
+-			posix_acl_fix_xattr_to_user(mnt_userns, kvalue, error);
++			posix_acl_fix_xattr_to_user(mnt_userns, d_inode(d),
++						    kvalue, error);
+ 		if (size && copy_to_user(value, kvalue, error))
+ 			error = -EFAULT;
+ 	} else if (error == -ERANGE && size >= XATTR_SIZE_MAX) {
+diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
+index 2ad71cc90b37d..92b10e67d5f87 100644
+--- a/include/linux/etherdevice.h
++++ b/include/linux/etherdevice.h
+@@ -134,7 +134,7 @@ static inline bool is_multicast_ether_addr(const u8 *addr)
+ #endif
+ }
+ 
+-static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
++static inline bool is_multicast_ether_addr_64bits(const u8 *addr)
+ {
+ #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ #ifdef __BIG_ENDIAN
+@@ -372,8 +372,7 @@ static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
+  * Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits.
+  */
+ 
+-static inline bool ether_addr_equal_64bits(const u8 addr1[6+2],
+-					   const u8 addr2[6+2])
++static inline bool ether_addr_equal_64bits(const u8 *addr1, const u8 *addr2)
+ {
+ #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ 	u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2);
+diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
+index 0abbd685703b9..a1fcf57493479 100644
+--- a/include/linux/memcontrol.h
++++ b/include/linux/memcontrol.h
+@@ -999,6 +999,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
+ }
+ 
+ void mem_cgroup_flush_stats(void);
++void mem_cgroup_flush_stats_delayed(void);
+ 
+ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
+ 			      int val);
+@@ -1442,6 +1443,10 @@ static inline void mem_cgroup_flush_stats(void)
+ {
+ }
+ 
++static inline void mem_cgroup_flush_stats_delayed(void)
++{
++}
++
+ static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
+ 					    enum node_stat_item idx, int val)
+ {
+diff --git a/include/linux/posix_acl_xattr.h b/include/linux/posix_acl_xattr.h
+index 060e8d2031814..1766e1de69560 100644
+--- a/include/linux/posix_acl_xattr.h
++++ b/include/linux/posix_acl_xattr.h
+@@ -34,15 +34,19 @@ posix_acl_xattr_count(size_t size)
+ 
+ #ifdef CONFIG_FS_POSIX_ACL
+ void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns,
++				   struct inode *inode,
+ 				   void *value, size_t size);
+ void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns,
++				   struct inode *inode,
+ 				 void *value, size_t size);
+ #else
+ static inline void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns,
++						 struct inode *inode,
+ 						 void *value, size_t size)
+ {
+ }
+ static inline void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns,
++					       struct inode *inode,
+ 					       void *value, size_t size)
+ {
+ }
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index e806326eca723..4b4cc633b2665 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1440,6 +1440,7 @@ struct task_struct {
+ 	int				pagefault_disabled;
+ #ifdef CONFIG_MMU
+ 	struct task_struct		*oom_reaper_list;
++	struct timer_list		oom_reaper_timer;
+ #endif
+ #ifdef CONFIG_VMAP_STACK
+ 	struct vm_struct		*stack_vm_area;
+diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
+index aa5f09ca5bcf4..0ee0515d5a175 100644
+--- a/include/linux/sched/mm.h
++++ b/include/linux/sched/mm.h
+@@ -135,6 +135,14 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
+ #endif /* CONFIG_MEMCG */
+ 
+ #ifdef CONFIG_MMU
++#ifndef arch_get_mmap_end
++#define arch_get_mmap_end(addr)	(TASK_SIZE)
++#endif
++
++#ifndef arch_get_mmap_base
++#define arch_get_mmap_base(addr, base) (base)
++#endif
++
+ extern void arch_pick_mmap_layout(struct mm_struct *mm,
+ 				  struct rlimit *rlim_stack);
+ extern unsigned long
+diff --git a/include/net/esp.h b/include/net/esp.h
+index 90cd02ff77ef6..9c5637d41d951 100644
+--- a/include/net/esp.h
++++ b/include/net/esp.h
+@@ -4,8 +4,6 @@
+ 
+ #include <linux/skbuff.h>
+ 
+-#define ESP_SKB_FRAG_MAXSIZE (PAGE_SIZE << SKB_FRAG_PAGE_ORDER)
+-
+ struct ip_esp_hdr;
+ 
+ static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb)
+diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
+index 6bd7e5a85ce76..ff82983b7ab41 100644
+--- a/include/net/netns/ipv6.h
++++ b/include/net/netns/ipv6.h
+@@ -75,8 +75,8 @@ struct netns_ipv6 {
+ 	struct list_head	fib6_walkers;
+ 	rwlock_t		fib6_walker_lock;
+ 	spinlock_t		fib6_gc_lock;
+-	unsigned int		 ip6_rt_gc_expire;
+-	unsigned long		 ip6_rt_last_gc;
++	atomic_t		ip6_rt_gc_expire;
++	unsigned long		ip6_rt_last_gc;
+ 	unsigned char		flowlabel_has_excl;
+ #ifdef CONFIG_IPV6_MULTIPLE_TABLES
+ 	bool			fib6_has_custom_rules;
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index 4ee233e5a6ffa..d1e282f0d6f18 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -52,8 +52,10 @@ enum {
+ 
+ #define ISID_SIZE			6
+ 
+-/* Connection suspend "bit" */
+-#define ISCSI_SUSPEND_BIT		1
++/* Connection flags */
++#define ISCSI_CONN_FLAG_SUSPEND_TX	BIT(0)
++#define ISCSI_CONN_FLAG_SUSPEND_RX	BIT(1)
++#define ISCSI_CONN_FLAG_BOUND		BIT(2)
+ 
+ #define ISCSI_ITT_MASK			0x1fff
+ #define ISCSI_TOTAL_CMDS_MAX		4096
+@@ -199,8 +201,7 @@ struct iscsi_conn {
+ 	struct list_head	cmdqueue;	/* data-path cmd queue */
+ 	struct list_head	requeue;	/* tasks needing another run */
+ 	struct work_struct	xmitwork;	/* per-conn. xmit workqueue */
+-	unsigned long		suspend_tx;	/* suspend Tx */
+-	unsigned long		suspend_rx;	/* suspend Rx */
++	unsigned long		flags;		/* ISCSI_CONN_FLAGs */
+ 
+ 	/* negotiated params */
+ 	unsigned		max_recv_dlength; /* initiator_max_recv_dsl*/
+diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
+index 037c77fb5dc55..3ecf9702287be 100644
+--- a/include/scsi/scsi_transport_iscsi.h
++++ b/include/scsi/scsi_transport_iscsi.h
+@@ -296,7 +296,7 @@ extern void iscsi_host_for_each_session(struct Scsi_Host *shost,
+ struct iscsi_endpoint {
+ 	void *dd_data;			/* LLD private data */
+ 	struct device dev;
+-	uint64_t id;
++	int id;
+ 	struct iscsi_cls_conn *conn;
+ };
+ 
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 0ee9ffceb9764..baa0fe350246f 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -6352,7 +6352,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
+ again:
+ 	mutex_lock(&event->mmap_mutex);
+ 	if (event->rb) {
+-		if (event->rb->nr_pages != nr_pages) {
++		if (data_page_nr(event->rb) != nr_pages) {
+ 			ret = -EINVAL;
+ 			goto unlock;
+ 		}
+diff --git a/kernel/events/internal.h b/kernel/events/internal.h
+index 082832738c8fd..5150d5f84c033 100644
+--- a/kernel/events/internal.h
++++ b/kernel/events/internal.h
+@@ -116,6 +116,11 @@ static inline int page_order(struct perf_buffer *rb)
+ }
+ #endif
+ 
++static inline int data_page_nr(struct perf_buffer *rb)
++{
++	return rb->nr_pages << page_order(rb);
++}
++
+ static inline unsigned long perf_data_size(struct perf_buffer *rb)
+ {
+ 	return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
+diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
+index 52868716ec358..fb35b926024ca 100644
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -859,11 +859,6 @@ void rb_free(struct perf_buffer *rb)
+ }
+ 
+ #else
+-static int data_page_nr(struct perf_buffer *rb)
+-{
+-	return rb->nr_pages << page_order(rb);
+-}
+-
+ static struct page *
+ __perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
+ {
+diff --git a/kernel/irq_work.c b/kernel/irq_work.c
+index f7df715ec28e6..7afa40fe5cc43 100644
+--- a/kernel/irq_work.c
++++ b/kernel/irq_work.c
+@@ -137,7 +137,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
+ 	if (!irq_work_claim(work))
+ 		return false;
+ 
+-	kasan_record_aux_stack(work);
++	kasan_record_aux_stack_noalloc(work);
+ 
+ 	preempt_disable();
+ 	if (cpu != smp_processor_id()) {
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index cddcf2f4f5251..2f461f0592789 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -3776,11 +3776,11 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
+ 
+ 	se->avg.runnable_sum = se->avg.runnable_avg * divider;
+ 
+-	se->avg.load_sum = divider;
+-	if (se_weight(se)) {
+-		se->avg.load_sum =
+-			div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se));
+-	}
++	se->avg.load_sum = se->avg.load_avg * divider;
++	if (se_weight(se) < se->avg.load_sum)
++		se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se));
++	else
++		se->avg.load_sum = 1;
+ 
+ 	enqueue_load_avg(cfs_rq, se);
+ 	cfs_rq->avg.util_avg += se->avg.util_avg;
+diff --git a/lib/xarray.c b/lib/xarray.c
+index 88ca87435e3da..32e1669d5b649 100644
+--- a/lib/xarray.c
++++ b/lib/xarray.c
+@@ -207,6 +207,8 @@ static void *xas_descend(struct xa_state *xas, struct xa_node *node)
+ 	if (xa_is_sibling(entry)) {
+ 		offset = xa_to_sibling(entry);
+ 		entry = xa_entry(xas->xa, node, offset);
++		if (node->shift && xa_is_node(entry))
++			entry = XA_RETRY_ENTRY;
+ 	}
+ 
+ 	xas->xa_offset = offset;
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 9b89a340a6629..563100f2a693e 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -628,6 +628,9 @@ static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
+ static DEFINE_SPINLOCK(stats_flush_lock);
+ static DEFINE_PER_CPU(unsigned int, stats_updates);
+ static atomic_t stats_flush_threshold = ATOMIC_INIT(0);
++static u64 flush_next_time;
++
++#define FLUSH_TIME (2UL*HZ)
+ 
+ static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
+ {
+@@ -649,6 +652,7 @@ static void __mem_cgroup_flush_stats(void)
+ 	if (!spin_trylock_irqsave(&stats_flush_lock, flag))
+ 		return;
+ 
++	flush_next_time = jiffies_64 + 2*FLUSH_TIME;
+ 	cgroup_rstat_flush_irqsafe(root_mem_cgroup->css.cgroup);
+ 	atomic_set(&stats_flush_threshold, 0);
+ 	spin_unlock_irqrestore(&stats_flush_lock, flag);
+@@ -660,10 +664,16 @@ void mem_cgroup_flush_stats(void)
+ 		__mem_cgroup_flush_stats();
+ }
+ 
++void mem_cgroup_flush_stats_delayed(void)
++{
++	if (time_after64(jiffies_64, flush_next_time))
++		mem_cgroup_flush_stats();
++}
++
+ static void flush_memcg_stats_dwork(struct work_struct *w)
+ {
+ 	__mem_cgroup_flush_stats();
+-	queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 2UL*HZ);
++	queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
+ }
+ 
+ /**
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 97a9ed8f87a96..15dcedbc17306 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1779,6 +1779,19 @@ try_again:
+ 	}
+ 
+ 	if (PageTransHuge(hpage)) {
++		/*
++		 * Bail out before SetPageHasHWPoisoned() if hpage is
++		 * huge_zero_page, although PG_has_hwpoisoned is not
++		 * checked in set_huge_zero_page().
++		 *
++		 * TODO: Handle memory failure of huge_zero_page thoroughly.
++		 */
++		if (is_huge_zero_page(hpage)) {
++			action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
++			res = -EBUSY;
++			goto unlock_mutex;
++		}
++
+ 		/*
+ 		 * The flag must be set after the refcount is bumped
+ 		 * otherwise it may race with THP split.
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 18875c216f8db..eb39f17cb86eb 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2119,14 +2119,6 @@ unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
+ 	return addr;
+ }
+ 
+-#ifndef arch_get_mmap_end
+-#define arch_get_mmap_end(addr)	(TASK_SIZE)
+-#endif
+-
+-#ifndef arch_get_mmap_base
+-#define arch_get_mmap_base(addr, base) (base)
+-#endif
+-
+ /* Get an address range which is currently unmapped.
+  * For shmat() with addr=0.
+  *
+diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
+index 459d195d2ff64..f45ff1b7626a6 100644
+--- a/mm/mmu_notifier.c
++++ b/mm/mmu_notifier.c
+@@ -1036,6 +1036,18 @@ int mmu_interval_notifier_insert_locked(
+ }
+ EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked);
+ 
++static bool
++mmu_interval_seq_released(struct mmu_notifier_subscriptions *subscriptions,
++			  unsigned long seq)
++{
++	bool ret;
++
++	spin_lock(&subscriptions->lock);
++	ret = subscriptions->invalidate_seq != seq;
++	spin_unlock(&subscriptions->lock);
++	return ret;
++}
++
+ /**
+  * mmu_interval_notifier_remove - Remove a interval notifier
+  * @interval_sub: Interval subscription to unregister
+@@ -1083,7 +1095,7 @@ void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub)
+ 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
+ 	if (seq)
+ 		wait_event(subscriptions->wq,
+-			   READ_ONCE(subscriptions->invalidate_seq) != seq);
++			   mmu_interval_seq_released(subscriptions, seq));
+ 
+ 	/* pairs with mmgrab in mmu_interval_notifier_insert() */
+ 	mmdrop(mm);
+diff --git a/mm/oom_kill.c b/mm/oom_kill.c
+index 832fb330376ef..a6bc4a6786ece 100644
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -635,7 +635,7 @@ done:
+ 	 */
+ 	set_bit(MMF_OOM_SKIP, &mm->flags);
+ 
+-	/* Drop a reference taken by wake_oom_reaper */
++	/* Drop a reference taken by queue_oom_reaper */
+ 	put_task_struct(tsk);
+ }
+ 
+@@ -647,12 +647,12 @@ static int oom_reaper(void *unused)
+ 		struct task_struct *tsk = NULL;
+ 
+ 		wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
+-		spin_lock(&oom_reaper_lock);
++		spin_lock_irq(&oom_reaper_lock);
+ 		if (oom_reaper_list != NULL) {
+ 			tsk = oom_reaper_list;
+ 			oom_reaper_list = tsk->oom_reaper_list;
+ 		}
+-		spin_unlock(&oom_reaper_lock);
++		spin_unlock_irq(&oom_reaper_lock);
+ 
+ 		if (tsk)
+ 			oom_reap_task(tsk);
+@@ -661,22 +661,48 @@ static int oom_reaper(void *unused)
+ 	return 0;
+ }
+ 
+-static void wake_oom_reaper(struct task_struct *tsk)
++static void wake_oom_reaper(struct timer_list *timer)
+ {
+-	/* mm is already queued? */
+-	if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
+-		return;
++	struct task_struct *tsk = container_of(timer, struct task_struct,
++			oom_reaper_timer);
++	struct mm_struct *mm = tsk->signal->oom_mm;
++	unsigned long flags;
+ 
+-	get_task_struct(tsk);
++	/* The victim managed to terminate on its own - see exit_mmap */
++	if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
++		put_task_struct(tsk);
++		return;
++	}
+ 
+-	spin_lock(&oom_reaper_lock);
++	spin_lock_irqsave(&oom_reaper_lock, flags);
+ 	tsk->oom_reaper_list = oom_reaper_list;
+ 	oom_reaper_list = tsk;
+-	spin_unlock(&oom_reaper_lock);
++	spin_unlock_irqrestore(&oom_reaper_lock, flags);
+ 	trace_wake_reaper(tsk->pid);
+ 	wake_up(&oom_reaper_wait);
+ }
+ 
++/*
++ * Give the OOM victim time to exit naturally before invoking the oom_reaping.
++ * The timers timeout is arbitrary... the longer it is, the longer the worst
++ * case scenario for the OOM can take. If it is too small, the oom_reaper can
++ * get in the way and release resources needed by the process exit path.
++ * e.g. The futex robust list can sit in Anon|Private memory that gets reaped
++ * before the exit path is able to wake the futex waiters.
++ */
++#define OOM_REAPER_DELAY (2*HZ)
++static void queue_oom_reaper(struct task_struct *tsk)
++{
++	/* mm is already queued? */
++	if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
++		return;
++
++	get_task_struct(tsk);
++	timer_setup(&tsk->oom_reaper_timer, wake_oom_reaper, 0);
++	tsk->oom_reaper_timer.expires = jiffies + OOM_REAPER_DELAY;
++	add_timer(&tsk->oom_reaper_timer);
++}
++
+ static int __init oom_init(void)
+ {
+ 	oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
+@@ -684,7 +710,7 @@ static int __init oom_init(void)
+ }
+ subsys_initcall(oom_init)
+ #else
+-static inline void wake_oom_reaper(struct task_struct *tsk)
++static inline void queue_oom_reaper(struct task_struct *tsk)
+ {
+ }
+ #endif /* CONFIG_MMU */
+@@ -935,7 +961,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
+ 	rcu_read_unlock();
+ 
+ 	if (can_oom_reap)
+-		wake_oom_reaper(victim);
++		queue_oom_reaper(victim);
+ 
+ 	mmdrop(mm);
+ 	put_task_struct(victim);
+@@ -971,7 +997,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
+ 	task_lock(victim);
+ 	if (task_will_free_mem(victim)) {
+ 		mark_oom_victim(victim);
+-		wake_oom_reaper(victim);
++		queue_oom_reaper(victim);
+ 		task_unlock(victim);
+ 		put_task_struct(victim);
+ 		return;
+@@ -1070,7 +1096,7 @@ bool out_of_memory(struct oom_control *oc)
+ 	 */
+ 	if (task_will_free_mem(current)) {
+ 		mark_oom_victim(current);
+-		wake_oom_reaper(current);
++		queue_oom_reaper(current);
+ 		return true;
+ 	}
+ 
+diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
+index 0780c2a57ff11..885e5adb0168d 100644
+--- a/mm/userfaultfd.c
++++ b/mm/userfaultfd.c
+@@ -72,12 +72,15 @@ int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
+ 	_dst_pte = pte_mkdirty(_dst_pte);
+ 	if (page_in_cache && !vm_shared)
+ 		writable = false;
+-	if (writable) {
+-		if (wp_copy)
+-			_dst_pte = pte_mkuffd_wp(_dst_pte);
+-		else
+-			_dst_pte = pte_mkwrite(_dst_pte);
+-	}
++
++	/*
++	 * Always mark a PTE as write-protected when needed, regardless of
++	 * VM_WRITE, which the user might change.
++	 */
++	if (wp_copy)
++		_dst_pte = pte_mkuffd_wp(_dst_pte);
++	else if (writable)
++		_dst_pte = pte_mkwrite(_dst_pte);
+ 
+ 	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
+ 
+diff --git a/mm/workingset.c b/mm/workingset.c
+index 8c03afe1d67cb..f66a18d1deaad 100644
+--- a/mm/workingset.c
++++ b/mm/workingset.c
+@@ -354,7 +354,7 @@ void workingset_refault(struct folio *folio, void *shadow)
+ 
+ 	mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr);
+ 
+-	mem_cgroup_flush_stats();
++	mem_cgroup_flush_stats_delayed();
+ 	/*
+ 	 * Compare the distance to the existing workingset size. We
+ 	 * don't activate pages that couldn't stay resident even if
+diff --git a/net/can/isotp.c b/net/can/isotp.c
+index 5bce7c66c1219..8c753dcefe7fc 100644
+--- a/net/can/isotp.c
++++ b/net/can/isotp.c
+@@ -866,6 +866,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+ 	struct canfd_frame *cf;
+ 	int ae = (so->opt.flags & CAN_ISOTP_EXTEND_ADDR) ? 1 : 0;
+ 	int wait_tx_done = (so->opt.flags & CAN_ISOTP_WAIT_TX_DONE) ? 1 : 0;
++	s64 hrtimer_sec = 0;
+ 	int off;
+ 	int err;
+ 
+@@ -964,7 +965,9 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+ 		isotp_create_fframe(cf, so, ae);
+ 
+ 		/* start timeout for FC */
+-		hrtimer_start(&so->txtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT);
++		hrtimer_sec = 1;
++		hrtimer_start(&so->txtimer, ktime_set(hrtimer_sec, 0),
++			      HRTIMER_MODE_REL_SOFT);
+ 	}
+ 
+ 	/* send the first or only CAN frame */
+@@ -977,6 +980,11 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+ 	if (err) {
+ 		pr_notice_once("can-isotp: %s: can_send_ret %pe\n",
+ 			       __func__, ERR_PTR(err));
++
++		/* no transmission -> no timeout monitoring */
++		if (hrtimer_sec)
++			hrtimer_cancel(&so->txtimer);
++
+ 		goto err_out_drop;
+ 	}
+ 
+diff --git a/net/dsa/tag_hellcreek.c b/net/dsa/tag_hellcreek.c
+index f64b805303cd7..eb204ad36eeec 100644
+--- a/net/dsa/tag_hellcreek.c
++++ b/net/dsa/tag_hellcreek.c
+@@ -21,6 +21,14 @@ static struct sk_buff *hellcreek_xmit(struct sk_buff *skb,
+ 	struct dsa_port *dp = dsa_slave_to_port(dev);
+ 	u8 *tag;
+ 
++	/* Calculate checksums (if required) before adding the trailer tag to
++	 * avoid including it in calculations. That would lead to wrong
++	 * checksums after the switch strips the tag.
++	 */
++	if (skb->ip_summed == CHECKSUM_PARTIAL &&
++	    skb_checksum_help(skb))
++		return NULL;
++
+ 	/* Tag encoding */
+ 	tag  = skb_put(skb, HELLCREEK_TAG_LEN);
+ 	*tag = BIT(dp->index);
+diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
+index 70e6c87fbe3df..d747166bb291c 100644
+--- a/net/ipv4/esp4.c
++++ b/net/ipv4/esp4.c
+@@ -446,7 +446,6 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
+ 	struct page *page;
+ 	struct sk_buff *trailer;
+ 	int tailen = esp->tailen;
+-	unsigned int allocsz;
+ 
+ 	/* this is non-NULL only with TCP/UDP Encapsulation */
+ 	if (x->encap) {
+@@ -456,8 +455,8 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
+ 			return err;
+ 	}
+ 
+-	allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES);
+-	if (allocsz > ESP_SKB_FRAG_MAXSIZE)
++	if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
++	    ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
+ 		goto cow;
+ 
+ 	if (!skb_cloned(skb)) {
+diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
+index 55d604c9b3b3e..f2120e92caf15 100644
+--- a/net/ipv6/esp6.c
++++ b/net/ipv6/esp6.c
+@@ -482,7 +482,6 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
+ 	struct page *page;
+ 	struct sk_buff *trailer;
+ 	int tailen = esp->tailen;
+-	unsigned int allocsz;
+ 
+ 	if (x->encap) {
+ 		int err = esp6_output_encap(x, skb, esp);
+@@ -491,8 +490,8 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
+ 			return err;
+ 	}
+ 
+-	allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES);
+-	if (allocsz > ESP_SKB_FRAG_MAXSIZE)
++	if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
++	    ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
+ 		goto cow;
+ 
+ 	if (!skb_cloned(skb)) {
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 8753e9cec3264..9762367361463 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -733,9 +733,6 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
+ 	else
+ 		fl6->daddr = tunnel->parms.raddr;
+ 
+-	if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
+-		return -ENOMEM;
+-
+ 	/* Push GRE header. */
+ 	protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
+ 
+@@ -743,6 +740,7 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
+ 		struct ip_tunnel_info *tun_info;
+ 		const struct ip_tunnel_key *key;
+ 		__be16 flags;
++		int tun_hlen;
+ 
+ 		tun_info = skb_tunnel_info_txcheck(skb);
+ 		if (IS_ERR(tun_info) ||
+@@ -760,9 +758,12 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
+ 		dsfield = key->tos;
+ 		flags = key->tun_flags &
+ 			(TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
+-		tunnel->tun_hlen = gre_calc_hlen(flags);
++		tun_hlen = gre_calc_hlen(flags);
+ 
+-		gre_build_header(skb, tunnel->tun_hlen,
++		if (skb_cow_head(skb, dev->needed_headroom ?: tun_hlen + tunnel->encap_hlen))
++			return -ENOMEM;
++
++		gre_build_header(skb, tun_hlen,
+ 				 flags, protocol,
+ 				 tunnel_id_to_key32(tun_info->key.tun_id),
+ 				 (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++)
+@@ -772,6 +773,9 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
+ 		if (tunnel->parms.o_flags & TUNNEL_SEQ)
+ 			tunnel->o_seqno++;
+ 
++		if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
++			return -ENOMEM;
++
+ 		gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
+ 				 protocol, tunnel->parms.o_key,
+ 				 htonl(tunnel->o_seqno));
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index da1bf48e79370..1caeb1ef20956 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -3303,6 +3303,7 @@ static int ip6_dst_gc(struct dst_ops *ops)
+ 	int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
+ 	int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
+ 	unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
++	unsigned int val;
+ 	int entries;
+ 
+ 	entries = dst_entries_get_fast(ops);
+@@ -3313,13 +3314,13 @@ static int ip6_dst_gc(struct dst_ops *ops)
+ 	    entries <= rt_max_size)
+ 		goto out;
+ 
+-	net->ipv6.ip6_rt_gc_expire++;
+-	fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
++	fib6_run_gc(atomic_inc_return(&net->ipv6.ip6_rt_gc_expire), net, true);
+ 	entries = dst_entries_get_slow(ops);
+ 	if (entries < ops->gc_thresh)
+-		net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
++		atomic_set(&net->ipv6.ip6_rt_gc_expire, rt_gc_timeout >> 1);
+ out:
+-	net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
++	val = atomic_read(&net->ipv6.ip6_rt_gc_expire);
++	atomic_set(&net->ipv6.ip6_rt_gc_expire, val - (val >> rt_elasticity));
+ 	return entries > rt_max_size;
+ }
+ 
+@@ -6514,7 +6515,7 @@ static int __net_init ip6_route_net_init(struct net *net)
+ 	net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
+ 	net->ipv6.sysctl.skip_notify_on_dev_down = 0;
+ 
+-	net->ipv6.ip6_rt_gc_expire = 30*HZ;
++	atomic_set(&net->ipv6.ip6_rt_gc_expire, 30*HZ);
+ 
+ 	ret = 0;
+ out:
+diff --git a/net/l3mdev/l3mdev.c b/net/l3mdev/l3mdev.c
+index 17927966abb33..8b14a24f10404 100644
+--- a/net/l3mdev/l3mdev.c
++++ b/net/l3mdev/l3mdev.c
+@@ -147,7 +147,7 @@ int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex)
+ 
+ 	dev = dev_get_by_index_rcu(net, ifindex);
+ 	while (dev && !netif_is_l3_master(dev))
+-		dev = netdev_master_upper_dev_get(dev);
++		dev = netdev_master_upper_dev_get_rcu(dev);
+ 
+ 	return dev ? dev->ifindex : 0;
+ }
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 47a876ccd2881..05a3795eac8e9 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -2263,6 +2263,13 @@ static int netlink_dump(struct sock *sk)
+ 	 * single netdev. The outcome is MSG_TRUNC error.
+ 	 */
+ 	skb_reserve(skb, skb_tailroom(skb) - alloc_size);
++
++	/* Make sure malicious BPF programs can not read unitialized memory
++	 * from skb->head -> skb->data
++	 */
++	skb_reset_network_header(skb);
++	skb_reset_mac_header(skb);
++
+ 	netlink_skb_set_owner_r(skb, sk);
+ 
+ 	if (nlk->dump_done_errno > 0) {
+diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
+index c591b923016a6..d77c21ff066c9 100644
+--- a/net/openvswitch/flow_netlink.c
++++ b/net/openvswitch/flow_netlink.c
+@@ -2436,7 +2436,7 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
+ 	new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
+ 
+ 	if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
+-		if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
++		if ((next_offset + req_size) > MAX_ACTIONS_BUFSIZE) {
+ 			OVS_NLERR(log, "Flow action size exceeds max %u",
+ 				  MAX_ACTIONS_BUFSIZE);
+ 			return ERR_PTR(-EMSGSIZE);
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index a7273af2d9009..e3c60251e708a 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2856,8 +2856,9 @@ tpacket_error:
+ 
+ 		status = TP_STATUS_SEND_REQUEST;
+ 		err = po->xmit(skb);
+-		if (unlikely(err > 0)) {
+-			err = net_xmit_errno(err);
++		if (unlikely(err != 0)) {
++			if (err > 0)
++				err = net_xmit_errno(err);
+ 			if (err && __packet_get_status(po, ph) ==
+ 				   TP_STATUS_AVAILABLE) {
+ 				/* skb was destructed already */
+@@ -3058,8 +3059,12 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ 		skb->no_fcs = 1;
+ 
+ 	err = po->xmit(skb);
+-	if (err > 0 && (err = net_xmit_errno(err)) != 0)
+-		goto out_unlock;
++	if (unlikely(err != 0)) {
++		if (err > 0)
++			err = net_xmit_errno(err);
++		if (err)
++			goto out_unlock;
++	}
+ 
+ 	dev_put(dev);
+ 
+diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
+index f15d6942da453..cc7e30733feb0 100644
+--- a/net/rxrpc/net_ns.c
++++ b/net/rxrpc/net_ns.c
+@@ -113,7 +113,9 @@ static __net_exit void rxrpc_exit_net(struct net *net)
+ 	struct rxrpc_net *rxnet = rxrpc_net(net);
+ 
+ 	rxnet->live = false;
++	del_timer_sync(&rxnet->peer_keepalive_timer);
+ 	cancel_work_sync(&rxnet->peer_keepalive_work);
++	/* Remove the timer again as the worker may have restarted it. */
+ 	del_timer_sync(&rxnet->peer_keepalive_timer);
+ 	rxrpc_destroy_all_calls(rxnet);
+ 	rxrpc_destroy_all_connections(rxnet);
+diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
+index cf5649292ee00..4d27300c287c4 100644
+--- a/net/sched/cls_u32.c
++++ b/net/sched/cls_u32.c
+@@ -386,14 +386,19 @@ static int u32_init(struct tcf_proto *tp)
+ 	return 0;
+ }
+ 
+-static int u32_destroy_key(struct tc_u_knode *n, bool free_pf)
++static void __u32_destroy_key(struct tc_u_knode *n)
+ {
+ 	struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
+ 
+ 	tcf_exts_destroy(&n->exts);
+-	tcf_exts_put_net(&n->exts);
+ 	if (ht && --ht->refcnt == 0)
+ 		kfree(ht);
++	kfree(n);
++}
++
++static void u32_destroy_key(struct tc_u_knode *n, bool free_pf)
++{
++	tcf_exts_put_net(&n->exts);
+ #ifdef CONFIG_CLS_U32_PERF
+ 	if (free_pf)
+ 		free_percpu(n->pf);
+@@ -402,8 +407,7 @@ static int u32_destroy_key(struct tc_u_knode *n, bool free_pf)
+ 	if (free_pf)
+ 		free_percpu(n->pcpu_success);
+ #endif
+-	kfree(n);
+-	return 0;
++	__u32_destroy_key(n);
+ }
+ 
+ /* u32_delete_key_rcu should be called when free'ing a copied
+@@ -811,10 +815,6 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
+ 	new->flags = n->flags;
+ 	RCU_INIT_POINTER(new->ht_down, ht);
+ 
+-	/* bump reference count as long as we hold pointer to structure */
+-	if (ht)
+-		ht->refcnt++;
+-
+ #ifdef CONFIG_CLS_U32_PERF
+ 	/* Statistics may be incremented by readers during update
+ 	 * so we must keep them in tact. When the node is later destroyed
+@@ -836,6 +836,10 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
+ 		return NULL;
+ 	}
+ 
++	/* bump reference count as long as we hold pointer to structure */
++	if (ht)
++		ht->refcnt++;
++
+ 	return new;
+ }
+ 
+@@ -900,13 +904,13 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
+ 				    extack);
+ 
+ 		if (err) {
+-			u32_destroy_key(new, false);
++			__u32_destroy_key(new);
+ 			return err;
+ 		}
+ 
+ 		err = u32_replace_hw_knode(tp, new, flags, extack);
+ 		if (err) {
+-			u32_destroy_key(new, false);
++			__u32_destroy_key(new);
+ 			return err;
+ 		}
+ 
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 303c5e56e4df4..68cd110722a4a 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -2538,8 +2538,10 @@ static int smc_shutdown(struct socket *sock, int how)
+ 	if (smc->use_fallback) {
+ 		rc = kernel_sock_shutdown(smc->clcsock, how);
+ 		sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
+-		if (sk->sk_shutdown == SHUTDOWN_MASK)
++		if (sk->sk_shutdown == SHUTDOWN_MASK) {
+ 			sk->sk_state = SMC_CLOSED;
++			sock_put(sk);
++		}
+ 		goto out;
+ 	}
+ 	switch (how) {
+diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
+index 70fd8b13938ed..8b0a16ba27d39 100644
+--- a/sound/hda/intel-dsp-config.c
++++ b/sound/hda/intel-dsp-config.c
+@@ -390,22 +390,36 @@ static const struct config_entry config_table[] = {
+ 
+ /* Alder Lake */
+ #if IS_ENABLED(CONFIG_SND_SOC_SOF_ALDERLAKE)
++	/* Alderlake-S */
+ 	{
+ 		.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+ 		.device = 0x7ad0,
+ 	},
++	/* RaptorLake-S */
+ 	{
+ 		.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+-		.device = 0x51c8,
++		.device = 0x7a50,
+ 	},
++	/* Alderlake-P */
+ 	{
+ 		.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+-		.device = 0x51cc,
++		.device = 0x51c8,
+ 	},
+ 	{
+ 		.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+ 		.device = 0x51cd,
+ 	},
++	/* Alderlake-PS */
++	{
++		.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
++		.device = 0x51c9,
++	},
++	/* Alderlake-M */
++	{
++		.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
++		.device = 0x51cc,
++	},
++	/* Alderlake-N */
+ 	{
+ 		.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+ 		.device = 0x54c8,
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index cf4f277dccdda..26637a6959792 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -1387,7 +1387,7 @@ static int hdmi_find_pcm_slot(struct hdmi_spec *spec,
+ 
+  last_try:
+ 	/* the last try; check the empty slots in pins */
+-	for (i = 0; i < spec->num_nids; i++) {
++	for (i = 0; i < spec->pcm_used; i++) {
+ 		if (!test_bit(i, &spec->pcm_bitmap))
+ 			return i;
+ 	}
+@@ -2263,7 +2263,9 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec)
+ 	 * dev_num is the device entry number in a pin
+ 	 */
+ 
+-	if (codec->mst_no_extra_pcms)
++	if (spec->dyn_pcm_no_legacy && codec->mst_no_extra_pcms)
++		pcm_num = spec->num_cvts;
++	else if (codec->mst_no_extra_pcms)
+ 		pcm_num = spec->num_nids;
+ 	else
+ 		pcm_num = spec->num_nids + spec->dev_num - 1;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index ca40c2bd8ba62..c66d31d8a498c 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9116,6 +9116,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1558, 0x8562, "Clevo NH[57][0-9]RZ[Q]", ALC269_FIXUP_DMIC),
+ 	SND_PCI_QUIRK(0x1558, 0x8668, "Clevo NP50B[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x866d, "Clevo NP5[05]PN[HJK]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x867c, "Clevo NP7[01]PNP", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x867d, "Clevo NP7[01]PN[HJK]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x8680, "Clevo NJ50LU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x8686, "Clevo NH50[CZ]U", ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME),
+diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c
+index 33e43013ff770..0d639a33ad969 100644
+--- a/sound/soc/atmel/sam9g20_wm8731.c
++++ b/sound/soc/atmel/sam9g20_wm8731.c
+@@ -46,35 +46,6 @@
+  */
+ #undef ENABLE_MIC_INPUT
+ 
+-static struct clk *mclk;
+-
+-static int at91sam9g20ek_set_bias_level(struct snd_soc_card *card,
+-					struct snd_soc_dapm_context *dapm,
+-					enum snd_soc_bias_level level)
+-{
+-	static int mclk_on;
+-	int ret = 0;
+-
+-	switch (level) {
+-	case SND_SOC_BIAS_ON:
+-	case SND_SOC_BIAS_PREPARE:
+-		if (!mclk_on)
+-			ret = clk_enable(mclk);
+-		if (ret == 0)
+-			mclk_on = 1;
+-		break;
+-
+-	case SND_SOC_BIAS_OFF:
+-	case SND_SOC_BIAS_STANDBY:
+-		if (mclk_on)
+-			clk_disable(mclk);
+-		mclk_on = 0;
+-		break;
+-	}
+-
+-	return ret;
+-}
+-
+ static const struct snd_soc_dapm_widget at91sam9g20ek_dapm_widgets[] = {
+ 	SND_SOC_DAPM_MIC("Int Mic", NULL),
+ 	SND_SOC_DAPM_SPK("Ext Spk", NULL),
+@@ -135,7 +106,6 @@ static struct snd_soc_card snd_soc_at91sam9g20ek = {
+ 	.owner = THIS_MODULE,
+ 	.dai_link = &at91sam9g20ek_dai,
+ 	.num_links = 1,
+-	.set_bias_level = at91sam9g20ek_set_bias_level,
+ 
+ 	.dapm_widgets = at91sam9g20ek_dapm_widgets,
+ 	.num_dapm_widgets = ARRAY_SIZE(at91sam9g20ek_dapm_widgets),
+@@ -148,7 +118,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
+ {
+ 	struct device_node *np = pdev->dev.of_node;
+ 	struct device_node *codec_np, *cpu_np;
+-	struct clk *pllb;
+ 	struct snd_soc_card *card = &snd_soc_at91sam9g20ek;
+ 	int ret;
+ 
+@@ -162,31 +131,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
+ 		return -EINVAL;
+ 	}
+ 
+-	/*
+-	 * Codec MCLK is supplied by PCK0 - set it up.
+-	 */
+-	mclk = clk_get(NULL, "pck0");
+-	if (IS_ERR(mclk)) {
+-		dev_err(&pdev->dev, "Failed to get MCLK\n");
+-		ret = PTR_ERR(mclk);
+-		goto err;
+-	}
+-
+-	pllb = clk_get(NULL, "pllb");
+-	if (IS_ERR(pllb)) {
+-		dev_err(&pdev->dev, "Failed to get PLLB\n");
+-		ret = PTR_ERR(pllb);
+-		goto err_mclk;
+-	}
+-	ret = clk_set_parent(mclk, pllb);
+-	clk_put(pllb);
+-	if (ret != 0) {
+-		dev_err(&pdev->dev, "Failed to set MCLK parent\n");
+-		goto err_mclk;
+-	}
+-
+-	clk_set_rate(mclk, MCLK_RATE);
+-
+ 	card->dev = &pdev->dev;
+ 
+ 	/* Parse device node info */
+@@ -230,9 +174,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
+ 
+ 	return ret;
+ 
+-err_mclk:
+-	clk_put(mclk);
+-	mclk = NULL;
+ err:
+ 	atmel_ssc_put_audio(0);
+ 	return ret;
+@@ -242,8 +183,6 @@ static int at91sam9g20ek_audio_remove(struct platform_device *pdev)
+ {
+ 	struct snd_soc_card *card = platform_get_drvdata(pdev);
+ 
+-	clk_disable(mclk);
+-	mclk = NULL;
+ 	snd_soc_unregister_card(card);
+ 	atmel_ssc_put_audio(0);
+ 
+diff --git a/sound/soc/codecs/msm8916-wcd-digital.c b/sound/soc/codecs/msm8916-wcd-digital.c
+index 9ad7fc0baf072..20a07c92b2fc2 100644
+--- a/sound/soc/codecs/msm8916-wcd-digital.c
++++ b/sound/soc/codecs/msm8916-wcd-digital.c
+@@ -1206,9 +1206,16 @@ static int msm8916_wcd_digital_probe(struct platform_device *pdev)
+ 
+ 	dev_set_drvdata(dev, priv);
+ 
+-	return devm_snd_soc_register_component(dev, &msm8916_wcd_digital,
++	ret = devm_snd_soc_register_component(dev, &msm8916_wcd_digital,
+ 				      msm8916_wcd_digital_dai,
+ 				      ARRAY_SIZE(msm8916_wcd_digital_dai));
++	if (ret)
++		goto err_mclk;
++
++	return 0;
++
++err_mclk:
++	clk_disable_unprepare(priv->mclk);
+ err_clk:
+ 	clk_disable_unprepare(priv->ahbclk);
+ 	return ret;
+diff --git a/sound/soc/codecs/rk817_codec.c b/sound/soc/codecs/rk817_codec.c
+index 8fffe378618d0..cce6f4e7992f5 100644
+--- a/sound/soc/codecs/rk817_codec.c
++++ b/sound/soc/codecs/rk817_codec.c
+@@ -489,7 +489,7 @@ static int rk817_platform_probe(struct platform_device *pdev)
+ 
+ 	rk817_codec_parse_dt_property(&pdev->dev, rk817_codec_data);
+ 
+-	rk817_codec_data->mclk = clk_get(pdev->dev.parent, "mclk");
++	rk817_codec_data->mclk = devm_clk_get(pdev->dev.parent, "mclk");
+ 	if (IS_ERR(rk817_codec_data->mclk)) {
+ 		dev_dbg(&pdev->dev, "Unable to get mclk\n");
+ 		ret = -ENXIO;
+diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
+index be68d573a4906..c9ff9c89adf70 100644
+--- a/sound/soc/codecs/rt5682.c
++++ b/sound/soc/codecs/rt5682.c
+@@ -2822,14 +2822,11 @@ static int rt5682_bclk_set_rate(struct clk_hw *hw, unsigned long rate,
+ 
+ 	for_each_component_dais(component, dai)
+ 		if (dai->id == RT5682_AIF1)
+-			break;
+-	if (!dai) {
+-		dev_err(rt5682->i2c_dev, "dai %d not found in component\n",
+-			RT5682_AIF1);
+-		return -ENODEV;
+-	}
++			return rt5682_set_bclk1_ratio(dai, factor);
+ 
+-	return rt5682_set_bclk1_ratio(dai, factor);
++	dev_err(rt5682->i2c_dev, "dai %d not found in component\n",
++		RT5682_AIF1);
++	return -ENODEV;
+ }
+ 
+ static const struct clk_ops rt5682_dai_clk_ops[RT5682_DAI_NUM_CLKS] = {
+diff --git a/sound/soc/codecs/rt5682s.c b/sound/soc/codecs/rt5682s.c
+index 92b8753f1267b..f2296090716f3 100644
+--- a/sound/soc/codecs/rt5682s.c
++++ b/sound/soc/codecs/rt5682s.c
+@@ -2679,14 +2679,11 @@ static int rt5682s_bclk_set_rate(struct clk_hw *hw, unsigned long rate,
+ 
+ 	for_each_component_dais(component, dai)
+ 		if (dai->id == RT5682S_AIF1)
+-			break;
+-	if (!dai) {
+-		dev_err(component->dev, "dai %d not found in component\n",
+-			RT5682S_AIF1);
+-		return -ENODEV;
+-	}
++			return rt5682s_set_bclk1_ratio(dai, factor);
+ 
+-	return rt5682s_set_bclk1_ratio(dai, factor);
++	dev_err(component->dev, "dai %d not found in component\n",
++		RT5682S_AIF1);
++	return -ENODEV;
+ }
+ 
+ static const struct clk_ops rt5682s_dai_clk_ops[RT5682S_DAI_NUM_CLKS] = {
+diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c
+index 1e75e93cf28f2..6298ebe96e941 100644
+--- a/sound/soc/codecs/wcd934x.c
++++ b/sound/soc/codecs/wcd934x.c
+@@ -1274,29 +1274,7 @@ static int wcd934x_set_sido_input_src(struct wcd934x_codec *wcd, int sido_src)
+ 	if (sido_src == wcd->sido_input_src)
+ 		return 0;
+ 
+-	if (sido_src == SIDO_SOURCE_INTERNAL) {
+-		regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
+-				   WCD934X_ANA_BUCK_HI_ACCU_EN_MASK, 0);
+-		usleep_range(100, 110);
+-		regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
+-				   WCD934X_ANA_BUCK_HI_ACCU_PRE_ENX_MASK, 0x0);
+-		usleep_range(100, 110);
+-		regmap_update_bits(wcd->regmap, WCD934X_ANA_RCO,
+-				   WCD934X_ANA_RCO_BG_EN_MASK, 0);
+-		usleep_range(100, 110);
+-		regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
+-				   WCD934X_ANA_BUCK_PRE_EN1_MASK,
+-				   WCD934X_ANA_BUCK_PRE_EN1_ENABLE);
+-		usleep_range(100, 110);
+-		regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
+-				   WCD934X_ANA_BUCK_PRE_EN2_MASK,
+-				   WCD934X_ANA_BUCK_PRE_EN2_ENABLE);
+-		usleep_range(100, 110);
+-		regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
+-				   WCD934X_ANA_BUCK_HI_ACCU_EN_MASK,
+-				   WCD934X_ANA_BUCK_HI_ACCU_ENABLE);
+-		usleep_range(100, 110);
+-	} else if (sido_src == SIDO_SOURCE_RCO_BG) {
++	if (sido_src == SIDO_SOURCE_RCO_BG) {
+ 		regmap_update_bits(wcd->regmap, WCD934X_ANA_RCO,
+ 				   WCD934X_ANA_RCO_BG_EN_MASK,
+ 				   WCD934X_ANA_RCO_BG_ENABLE);
+@@ -1382,8 +1360,6 @@ static int wcd934x_disable_ana_bias_and_syclk(struct wcd934x_codec *wcd)
+ 	regmap_update_bits(wcd->regmap, WCD934X_CLK_SYS_MCLK_PRG,
+ 			   WCD934X_EXT_CLK_BUF_EN_MASK |
+ 			   WCD934X_MCLK_EN_MASK, 0x0);
+-	wcd934x_set_sido_input_src(wcd, SIDO_SOURCE_INTERNAL);
+-
+ 	regmap_update_bits(wcd->regmap, WCD934X_ANA_BIAS,
+ 			   WCD934X_ANA_BIAS_EN_MASK, 0);
+ 	regmap_update_bits(wcd->regmap, WCD934X_ANA_BIAS,
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index b06c5682445c0..fb43b331a36e8 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -1687,8 +1687,7 @@ static void dapm_seq_run(struct snd_soc_card *card,
+ 		switch (w->id) {
+ 		case snd_soc_dapm_pre:
+ 			if (!w->event)
+-				list_for_each_entry_safe_continue(w, n, list,
+-								  power_list);
++				continue;
+ 
+ 			if (event == SND_SOC_DAPM_STREAM_START)
+ 				ret = w->event(w,
+@@ -1700,8 +1699,7 @@ static void dapm_seq_run(struct snd_soc_card *card,
+ 
+ 		case snd_soc_dapm_post:
+ 			if (!w->event)
+-				list_for_each_entry_safe_continue(w, n, list,
+-								  power_list);
++				continue;
+ 
+ 			if (event == SND_SOC_DAPM_STREAM_START)
+ 				ret = w->event(w,
+diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
+index cb24805668bd8..f413238117af7 100644
+--- a/sound/soc/soc-topology.c
++++ b/sound/soc/soc-topology.c
+@@ -1479,12 +1479,12 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
+ 	template.num_kcontrols = le32_to_cpu(w->num_kcontrols);
+ 	kc = devm_kcalloc(tplg->dev, le32_to_cpu(w->num_kcontrols), sizeof(*kc), GFP_KERNEL);
+ 	if (!kc)
+-		goto err;
++		goto hdr_err;
+ 
+ 	kcontrol_type = devm_kcalloc(tplg->dev, le32_to_cpu(w->num_kcontrols), sizeof(unsigned int),
+ 				     GFP_KERNEL);
+ 	if (!kcontrol_type)
+-		goto err;
++		goto hdr_err;
+ 
+ 	for (i = 0; i < le32_to_cpu(w->num_kcontrols); i++) {
+ 		control_hdr = (struct snd_soc_tplg_ctl_hdr *)tplg->pos;
+diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
+index e72dcae5e7ee7..0db11ba559d65 100644
+--- a/sound/soc/sof/topology.c
++++ b/sound/soc/sof/topology.c
+@@ -1569,6 +1569,46 @@ static int sof_widget_load_buffer(struct snd_soc_component *scomp, int index,
+ 	return 0;
+ }
+ 
++static void sof_disconnect_dai_widget(struct snd_soc_component *scomp,
++				      struct snd_soc_dapm_widget *w)
++{
++	struct snd_soc_card *card = scomp->card;
++	struct snd_soc_pcm_runtime *rtd;
++	struct snd_soc_dai *cpu_dai;
++	int i;
++
++	if (!w->sname)
++		return;
++
++	list_for_each_entry(rtd, &card->rtd_list, list) {
++		/* does stream match DAI link ? */
++		if (!rtd->dai_link->stream_name ||
++		    strcmp(w->sname, rtd->dai_link->stream_name))
++			continue;
++
++		switch (w->id) {
++		case snd_soc_dapm_dai_out:
++			for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
++				if (cpu_dai->capture_widget == w) {
++					cpu_dai->capture_widget = NULL;
++					break;
++				}
++			}
++			break;
++		case snd_soc_dapm_dai_in:
++			for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
++				if (cpu_dai->playback_widget == w) {
++					cpu_dai->playback_widget = NULL;
++					break;
++				}
++			}
++			break;
++		default:
++			break;
++		}
++	}
++}
++
+ /* bind PCM ID to host component ID */
+ static int spcm_bind(struct snd_soc_component *scomp, struct snd_sof_pcm *spcm,
+ 		     int dir)
+@@ -2449,6 +2489,9 @@ static int sof_widget_unload(struct snd_soc_component *scomp,
+ 			kfree(dai->dai_config);
+ 			list_del(&dai->list);
+ 		}
++
++		sof_disconnect_dai_widget(scomp, widget);
++
+ 		break;
+ 	default:
+ 		break;
+diff --git a/sound/usb/midi.c b/sound/usb/midi.c
+index 2c01649c70f61..7c6ca2b433a53 100644
+--- a/sound/usb/midi.c
++++ b/sound/usb/midi.c
+@@ -1194,6 +1194,7 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream)
+ 		} while (drain_urbs && timeout);
+ 		finish_wait(&ep->drain_wait, &wait);
+ 	}
++	port->active = 0;
+ 	spin_unlock_irq(&ep->buffer_lock);
+ }
+ 
+diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
+index 64f5544d0a0aa..7ef7a8abcc2b1 100644
+--- a/sound/usb/mixer_maps.c
++++ b/sound/usb/mixer_maps.c
+@@ -599,6 +599,10 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = {
+ 		.id = USB_ID(0x0db0, 0x419c),
+ 		.map = msi_mpg_x570s_carbon_max_wifi_alc4080_map,
+ 	},
++	{	/* MSI MAG X570S Torpedo Max */
++		.id = USB_ID(0x0db0, 0xa073),
++		.map = msi_mpg_x570s_carbon_max_wifi_alc4080_map,
++	},
+ 	{	/* MSI TRX40 */
+ 		.id = USB_ID(0x0db0, 0x543d),
+ 		.map = trx40_mobo_map,
+diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
+index 167834133b9bc..b8359a0aa008a 100644
+--- a/sound/usb/usbaudio.h
++++ b/sound/usb/usbaudio.h
+@@ -8,7 +8,7 @@
+  */
+ 
+ /* handling of USB vendor/product ID pairs as 32-bit numbers */
+-#define USB_ID(vendor, product) (((vendor) << 16) | (product))
++#define USB_ID(vendor, product) (((unsigned int)(vendor) << 16) | (product))
+ #define USB_ID_VENDOR(id) ((id) >> 16)
+ #define USB_ID_PRODUCT(id) ((u16)(id))
+ 
+diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
+index 9a770bfdc8042..15d42d871b3e6 100644
+--- a/tools/lib/perf/evlist.c
++++ b/tools/lib/perf/evlist.c
+@@ -577,7 +577,6 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
+ {
+ 	struct perf_evsel *evsel;
+ 	const struct perf_cpu_map *cpus = evlist->cpus;
+-	const struct perf_thread_map *threads = evlist->threads;
+ 
+ 	if (!ops || !ops->get || !ops->mmap)
+ 		return -EINVAL;
+@@ -589,7 +588,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
+ 	perf_evlist__for_each_entry(evlist, evsel) {
+ 		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
+ 		    evsel->sample_id == NULL &&
+-		    perf_evsel__alloc_id(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
++		    perf_evsel__alloc_id(evsel, evsel->fd->max_x, evsel->fd->max_y) < 0)
+ 			return -ENOMEM;
+ 	}
+ 
+diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
+index 1dd92d8c92799..a6bb35b0af9f9 100644
+--- a/tools/perf/builtin-report.c
++++ b/tools/perf/builtin-report.c
+@@ -349,6 +349,7 @@ static int report__setup_sample_type(struct report *rep)
+ 	struct perf_session *session = rep->session;
+ 	u64 sample_type = evlist__combined_sample_type(session->evlist);
+ 	bool is_pipe = perf_data__is_pipe(session->data);
++	struct evsel *evsel;
+ 
+ 	if (session->itrace_synth_opts->callchain ||
+ 	    session->itrace_synth_opts->add_callchain ||
+@@ -403,6 +404,19 @@ static int report__setup_sample_type(struct report *rep)
+ 	}
+ 
+ 	if (sort__mode == SORT_MODE__MEMORY) {
++		/*
++		 * FIXUP: prior to kernel 5.18, Arm SPE missed to set
++		 * PERF_SAMPLE_DATA_SRC bit in sample type.  For backward
++		 * compatibility, set the bit if it's an old perf data file.
++		 */
++		evlist__for_each_entry(session->evlist, evsel) {
++			if (strstr(evsel->name, "arm_spe") &&
++				!(sample_type & PERF_SAMPLE_DATA_SRC)) {
++				evsel->core.attr.sample_type |= PERF_SAMPLE_DATA_SRC;
++				sample_type |= PERF_SAMPLE_DATA_SRC;
++			}
++		}
++
+ 		if (!is_pipe && !(sample_type & PERF_SAMPLE_DATA_SRC)) {
+ 			ui__error("Selected --mem-mode but no mem data. "
+ 				  "Did you call perf record without -d?\n");
+diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
+index fa478ddcd18ae..537a552fe6b3b 100644
+--- a/tools/perf/builtin-script.c
++++ b/tools/perf/builtin-script.c
+@@ -459,7 +459,7 @@ static int evsel__check_attr(struct evsel *evsel, struct perf_session *session)
+ 		return -EINVAL;
+ 
+ 	if (PRINT_FIELD(DATA_SRC) &&
+-	    evsel__check_stype(evsel, PERF_SAMPLE_DATA_SRC, "DATA_SRC", PERF_OUTPUT_DATA_SRC))
++	    evsel__do_check_stype(evsel, PERF_SAMPLE_DATA_SRC, "DATA_SRC", PERF_OUTPUT_DATA_SRC, allow_user_set))
+ 		return -EINVAL;
+ 
+ 	if (PRINT_FIELD(WEIGHT) &&
+diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/vxlan_flooding_ipv6.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/vxlan_flooding_ipv6.sh
+index 429f7ee735cf4..fd23c80eba315 100755
+--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/vxlan_flooding_ipv6.sh
++++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/vxlan_flooding_ipv6.sh
+@@ -159,6 +159,17 @@ flooding_remotes_add()
+ 	local lsb
+ 	local i
+ 
++	# Prevent unwanted packets from entering the bridge and interfering
++	# with the test.
++	tc qdisc add dev br0 clsact
++	tc filter add dev br0 egress protocol all pref 1 handle 1 \
++		matchall skip_hw action drop
++	tc qdisc add dev $h1 clsact
++	tc filter add dev $h1 egress protocol all pref 1 handle 1 \
++		flower skip_hw dst_mac de:ad:be:ef:13:37 action pass
++	tc filter add dev $h1 egress protocol all pref 2 handle 2 \
++		matchall skip_hw action drop
++
+ 	for i in $(eval echo {1..$num_remotes}); do
+ 		lsb=$((i + 1))
+ 
+@@ -195,6 +206,12 @@ flooding_filters_del()
+ 	done
+ 
+ 	tc qdisc del dev $rp2 clsact
++
++	tc filter del dev $h1 egress protocol all pref 2 handle 2 matchall
++	tc filter del dev $h1 egress protocol all pref 1 handle 1 flower
++	tc qdisc del dev $h1 clsact
++	tc filter del dev br0 egress protocol all pref 1 handle 1 matchall
++	tc qdisc del dev br0 clsact
+ }
+ 
+ flooding_check_packets()
+diff --git a/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh b/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh
+index fedcb7b35af9f..af5ea50ed5c0e 100755
+--- a/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh
++++ b/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh
+@@ -172,6 +172,17 @@ flooding_filters_add()
+ 	local lsb
+ 	local i
+ 
++	# Prevent unwanted packets from entering the bridge and interfering
++	# with the test.
++	tc qdisc add dev br0 clsact
++	tc filter add dev br0 egress protocol all pref 1 handle 1 \
++		matchall skip_hw action drop
++	tc qdisc add dev $h1 clsact
++	tc filter add dev $h1 egress protocol all pref 1 handle 1 \
++		flower skip_hw dst_mac de:ad:be:ef:13:37 action pass
++	tc filter add dev $h1 egress protocol all pref 2 handle 2 \
++		matchall skip_hw action drop
++
+ 	tc qdisc add dev $rp2 clsact
+ 
+ 	for i in $(eval echo {1..$num_remotes}); do
+@@ -194,6 +205,12 @@ flooding_filters_del()
+ 	done
+ 
+ 	tc qdisc del dev $rp2 clsact
++
++	tc filter del dev $h1 egress protocol all pref 2 handle 2 matchall
++	tc filter del dev $h1 egress protocol all pref 1 handle 1 flower
++	tc qdisc del dev $h1 clsact
++	tc filter del dev br0 egress protocol all pref 1 handle 1 matchall
++	tc qdisc del dev br0 clsact
+ }
+ 
+ flooding_check_packets()
+diff --git a/tools/testing/selftests/kvm/aarch64/arch_timer.c b/tools/testing/selftests/kvm/aarch64/arch_timer.c
+index b08d30bf71c51..3b940a101bc07 100644
+--- a/tools/testing/selftests/kvm/aarch64/arch_timer.c
++++ b/tools/testing/selftests/kvm/aarch64/arch_timer.c
+@@ -362,11 +362,12 @@ static void test_init_timer_irq(struct kvm_vm *vm)
+ 	pr_debug("ptimer_irq: %d; vtimer_irq: %d\n", ptimer_irq, vtimer_irq);
+ }
+ 
++static int gic_fd;
++
+ static struct kvm_vm *test_vm_create(void)
+ {
+ 	struct kvm_vm *vm;
+ 	unsigned int i;
+-	int ret;
+ 	int nr_vcpus = test_args.nr_vcpus;
+ 
+ 	vm = vm_create_default_with_vcpus(nr_vcpus, 0, 0, guest_code, NULL);
+@@ -383,8 +384,8 @@ static struct kvm_vm *test_vm_create(void)
+ 
+ 	ucall_init(vm, NULL);
+ 	test_init_timer_irq(vm);
+-	ret = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
+-	if (ret < 0) {
++	gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
++	if (gic_fd < 0) {
+ 		print_skip("Failed to create vgic-v3");
+ 		exit(KSFT_SKIP);
+ 	}
+@@ -395,6 +396,12 @@ static struct kvm_vm *test_vm_create(void)
+ 	return vm;
+ }
+ 
++static void test_vm_cleanup(struct kvm_vm *vm)
++{
++	close(gic_fd);
++	kvm_vm_free(vm);
++}
++
+ static void test_print_help(char *name)
+ {
+ 	pr_info("Usage: %s [-h] [-n nr_vcpus] [-i iterations] [-p timer_period_ms]\n",
+@@ -478,7 +485,7 @@ int main(int argc, char *argv[])
+ 
+ 	vm = test_vm_create();
+ 	test_run(vm);
+-	kvm_vm_free(vm);
++	test_vm_cleanup(vm);
+ 
+ 	return 0;
+ }


             reply	other threads:[~2022-04-27 13:10 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-27 13:10 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2022-06-14 17:10 [gentoo-commits] proj/linux-patches:5.17 commit in: / Mike Pagano
2022-06-09 18:31 Mike Pagano
2022-06-09 11:25 Mike Pagano
2022-06-06 11:01 Mike Pagano
2022-05-30 13:58 Mike Pagano
2022-05-25 13:09 Mike Pagano
2022-05-25 11:52 Mike Pagano
2022-05-18  9:38 Mike Pagano
2022-05-15 22:08 Mike Pagano
2022-05-12 11:27 Mike Pagano
2022-05-11 16:54 Mike Pagano
2022-05-09 10:57 Mike Pagano
2022-04-29 15:22 Mike Pagano
2022-04-28 12:03 Mike Pagano
2022-04-27 13:16 Mike Pagano
2022-04-26 12:07 Mike Pagano
2022-04-20 12:06 Mike Pagano
2022-04-13 17:53 Mike Pagano
2022-04-12 19:12 Mike Pagano
2022-04-12 17:40 Mike Pagano
2022-04-08 13:09 Mike Pagano
2022-04-08 12:53 Mike Pagano
2022-03-28 22:06 Mike Pagano
2022-03-28 10:53 Mike Pagano
2022-03-19 14:39 Mike Pagano
2022-03-06 17:51 Mike Pagano
2022-01-31 13:05 Mike Pagano
2022-01-23 20:00 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1651065012.b08b8be59e2765bb2d36321ab9f026eeb6c43e87.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox