public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/linux-patches:6.9 commit in: /
@ 2024-05-05 18:02 Mike Pagano
  0 siblings, 0 replies; 18+ messages in thread
From: Mike Pagano @ 2024-05-05 18:02 UTC (permalink / raw
  To: gentoo-commits

commit:     bf5362e40b7eaa6ea37b4205fdc8f6f106de74d9
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun May  5 17:54:13 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun May  5 18:02:15 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=bf5362e4

Update to KSPP patch

Bug: https://bugs.gentoo.org/930733

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 4567_distro-Gentoo-Kconfig.patch | 33 +++++++++++++++++++++------------
 1 file changed, 21 insertions(+), 12 deletions(-)

diff --git a/4567_distro-Gentoo-Kconfig.patch b/4567_distro-Gentoo-Kconfig.patch
index 6134393f..87b8fa95 100644
--- a/4567_distro-Gentoo-Kconfig.patch
+++ b/4567_distro-Gentoo-Kconfig.patch
@@ -6,9 +6,9 @@
  source "Documentation/Kconfig"
 +
 +source "distro/Kconfig"
---- /dev/null	2024-04-27 13:10:54.188000027 -0400
-+++ b/distro/Kconfig	2024-04-27 17:56:56.723132353 -0400
-@@ -0,0 +1,295 @@
+--- /dev/null	2024-05-05 10:40:37.103999988 -0400
++++ b/distro/Kconfig	2024-05-05 13:37:37.699554927 -0400
+@@ -0,0 +1,310 @@
 +menu "Gentoo Linux"
 +
 +config GENTOO_LINUX
@@ -178,7 +178,7 @@
 +		to unmet dependencies. Search for GENTOO_KERNEL_SELF_PROTECTION_COMMON and search for 
 +		GENTOO_KERNEL_SELF_PROTECTION_{X86_64, ARM64, X86_32, ARM} for dependency information on your 
 +		specific architecture.
-+		Note 2: Please see the URL above for numeric settings, e.g. CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 
++		Note 2: Please see the URL above for numeric settings, e.g. CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
 +		for X86_64
 +
 +if GENTOO_KERNEL_SELF_PROTECTION
@@ -201,10 +201,13 @@
 +	select DEBUG_SG
 +	select HARDENED_USERCOPY if HAVE_HARDENED_USERCOPY_ALLOCATOR=y
 +	select KFENCE if HAVE_ARCH_KFENCE && (!SLAB || SLUB)
++	select PAGE_TABLE_CHECK if ARCH_SUPPORTS_PAGE_TABLE_CHECK=y && EXCLUSIVE_SYSTEM_RAM=y  
++	select PAGE_TABLE_CHECK_ENFORCED if PAGE_TABLE_CHECK=y
 +	select RANDOMIZE_KSTACK_OFFSET_DEFAULT if HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET && (INIT_STACK_NONE || !CC_IS_CLANG || CLANG_VERSION>=140000)
 +	select SECURITY_LANDLOCK
 +	select SCHED_CORE if SCHED_SMT
 +	select BUG_ON_DATA_CORRUPTION
++	select RANDOM_KMALLOC_CACHE if SLUB_TINY=n
 +	select SCHED_STACK_END_CHECK
 +	select SECCOMP if HAVE_ARCH_SECCOMP
 +	select SECCOMP_FILTER if HAVE_ARCH_SECCOMP_FILTER
@@ -245,11 +248,13 @@
 +	default n
 +	
 +	select GCC_PLUGIN_STACKLEAK
++	select X86_KERNEL_IBT if CC_HAS_IBT=y && HAVE_OBJTOOL=y && (!LD_IS_LLD=n || LLD_VERSION>=140000) 
 +	select LEGACY_VSYSCALL_NONE
 + 	select PAGE_TABLE_ISOLATION
 +	select RANDOMIZE_BASE
 +	select RANDOMIZE_MEMORY
 +	select RELOCATABLE
++	select X86_USER_SHADOW_STACK if AS_WRUSS=Y
 +	select VMAP_STACK
 +
 +
@@ -259,11 +264,21 @@
 +	depends on ARM64
 +	default n
 +
-+	select RANDOMIZE_BASE
-+	select RELOCATABLE
++	select ARM64_BTI
++	select ARM64_E0PD
++	select ARM64_EPAN if ARM64_PAN=y
++	select ARM64_MTE if (ARM64_AS_HAS_MTE=y && ARM64_TAGGED_ADDR_ABI=y ) && ( AS_HAS_ARMV8_5=y ) && ( AS_HAS_LSE_ATOMICS=y ) && ( ARM64_PAN=y )
++	select ARM64_PTR_AUTH
++	select ARM64_PTR_AUTH_KERNEL if ( ARM64_PTR_AUTH=y ) && (( CC_HAS_SIGN_RETURN_ADDRESS=y || CC_HAS_BRANCH_PROT_PAC_RET=y ) && AS_HAS_ARMV8_3=y ) && ( LD_IS_LLD=y || LD_VERSION >= 23301 || ( CC_IS_GCC=y && GCC_VERSION < 90100 )) && (CC_IS_CLANG=n || AS_HAS_CFI_NEGATE_RA_STATE=y ) && ((FUNCTION_GRAPH_TRACER=n || DYNAMIC_FTRACE_WITH_ARGS=y ))
++	select ARM64_BTI_KERNEL if ( ARM64_BTI=y ) && ( ARM64_PTR_AUTH_KERNEL=y ) && ( CC_HAS_BRANCH_PROT_PAC_RET_BTI=y ) && (CC_IS_GCC=n || GCC_VERSION >= 100100 ) && (CC_IS_GCC=n ) && ((FUNCTION_GRAPH_TRACE=n || DYNAMIC_FTRACE_WITH_ARG=y ))
 +	select ARM64_SW_TTBR0_PAN
 +	select CONFIG_UNMAP_KERNEL_AT_EL0
 +	select GCC_PLUGIN_STACKLEAK
++	select KASAN_HW_TAGS if HAVE_ARCH_KASAN_HW_TAGS=y
++	select RANDOMIZE_BASE
++	select RELOCATABLE
++	select SHADOW_CALL_STACK if ARCH_SUPPORTS_SHADOW_CALL_STACK=y && (DYNAMIC_FTRACE_WITH_ARGS=y || DYNAMIC_FTRACE_WITH_REGS=y || FUNCTION_GRAPH_TRACER=n) && MMU=y 
++	select UNWIND_PATCH_PAC_INTO_SCS if (CC_IS_CLANG=y && CLANG_VERSION >= CONFIG_150000 ) && ( ARM64_PTR_AUTH_KERNEL=y && CC_HAS_BRANCH_PROT_PAC_RET=y ) && ( SHADOW_CALL_STACK=y )
 +	select VMAP_STACK
 +
 +config GENTOO_KERNEL_SELF_PROTECTION_X86_32
@@ -304,12 +319,6 @@
 +		See the settings that become available for more details and fine-tuning.
 +
 +endmenu
-From bd3ff0b16792c18c0614c2b95e148943209f460a Mon Sep 17 00:00:00 2001
-From: Georgy Yakovlev <gyakovlev@gentoo.org>
-Date: Tue, 8 Jun 2021 13:59:57 -0700
-Subject: [PATCH 2/2] set DEFAULT_MMAP_MIN_ADDR by default
-
----
  mm/Kconfig | 2 ++
  1 file changed, 2 insertions(+)
 


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [gentoo-commits] proj/linux-patches:6.9 commit in: /
@ 2024-05-17 11:32 Mike Pagano
  0 siblings, 0 replies; 18+ messages in thread
From: Mike Pagano @ 2024-05-17 11:32 UTC (permalink / raw
  To: gentoo-commits

commit:     705e865cb7116eec6fa548403b2d0c5c6309dd0e
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri May 17 11:23:48 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri May 17 11:23:48 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=705e865c

Linux patch 6.9.1

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |   4 +
 1000_linux-6.9.1.patch | 286 +++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 290 insertions(+)

diff --git a/0000_README b/0000_README
index 887cea4c..02b4c7fb 100644
--- a/0000_README
+++ b/0000_README
@@ -43,6 +43,10 @@ EXPERIMENTAL
 Individual Patch Descriptions:
 --------------------------------------------------------------------------
 
+Patch:  1000_linux-6.9.1.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.9.1
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.

diff --git a/1000_linux-6.9.1.patch b/1000_linux-6.9.1.patch
new file mode 100644
index 00000000..277d2283
--- /dev/null
+++ b/1000_linux-6.9.1.patch
@@ -0,0 +1,286 @@
+diff --git a/Makefile b/Makefile
+index 967e97878ecdf..a7045435151e6 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 9
+-SUBLEVEL = 0
++SUBLEVEL = 1
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
+index c095a2c8f6595..39935071174a3 100644
+--- a/drivers/dma/idxd/cdev.c
++++ b/drivers/dma/idxd/cdev.c
+@@ -400,6 +400,18 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
+ 	int rc;
+ 
+ 	dev_dbg(&pdev->dev, "%s called\n", __func__);
++
++	/*
++	 * Due to an erratum in some of the devices supported by the driver,
++	 * direct user submission to the device can be unsafe.
++	 * (See the INTEL-SA-01084 security advisory)
++	 *
++	 * For the devices that exhibit this behavior, require that the user
++	 * has CAP_SYS_RAWIO capabilities.
++	 */
++	if (!idxd->user_submission_safe && !capable(CAP_SYS_RAWIO))
++		return -EPERM;
++
+ 	rc = check_vma(wq, vma, __func__);
+ 	if (rc < 0)
+ 		return rc;
+@@ -414,6 +426,70 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
+ 			vma->vm_page_prot);
+ }
+ 
++static int idxd_submit_user_descriptor(struct idxd_user_context *ctx,
++				       struct dsa_hw_desc __user *udesc)
++{
++	struct idxd_wq *wq = ctx->wq;
++	struct idxd_dev *idxd_dev = &wq->idxd->idxd_dev;
++	const uint64_t comp_addr_align = is_dsa_dev(idxd_dev) ? 0x20 : 0x40;
++	void __iomem *portal = idxd_wq_portal_addr(wq);
++	struct dsa_hw_desc descriptor __aligned(64);
++	int rc;
++
++	rc = copy_from_user(&descriptor, udesc, sizeof(descriptor));
++	if (rc)
++		return -EFAULT;
++
++	/*
++	 * DSA devices are capable of indirect ("batch") command submission.
++	 * On devices where direct user submissions are not safe, we cannot
++	 * allow this since there is no good way for us to verify these
++	 * indirect commands.
++	 */
++	if (is_dsa_dev(idxd_dev) && descriptor.opcode == DSA_OPCODE_BATCH &&
++		!wq->idxd->user_submission_safe)
++		return -EINVAL;
++	/*
++	 * As per the programming specification, the completion address must be
++	 * aligned to 32 or 64 bytes. If this is violated the hardware
++	 * engine can get very confused (security issue).
++	 */
++	if (!IS_ALIGNED(descriptor.completion_addr, comp_addr_align))
++		return -EINVAL;
++
++	if (wq_dedicated(wq))
++		iosubmit_cmds512(portal, &descriptor, 1);
++	else {
++		descriptor.priv = 0;
++		descriptor.pasid = ctx->pasid;
++		rc = idxd_enqcmds(wq, portal, &descriptor);
++		if (rc < 0)
++			return rc;
++	}
++
++	return 0;
++}
++
++static ssize_t idxd_cdev_write(struct file *filp, const char __user *buf, size_t len,
++			       loff_t *unused)
++{
++	struct dsa_hw_desc __user *udesc = (struct dsa_hw_desc __user *)buf;
++	struct idxd_user_context *ctx = filp->private_data;
++	ssize_t written = 0;
++	int i;
++
++	for (i = 0; i < len/sizeof(struct dsa_hw_desc); i++) {
++		int rc = idxd_submit_user_descriptor(ctx, udesc + i);
++
++		if (rc)
++			return written ? written : rc;
++
++		written += sizeof(struct dsa_hw_desc);
++	}
++
++	return written;
++}
++
+ static __poll_t idxd_cdev_poll(struct file *filp,
+ 			       struct poll_table_struct *wait)
+ {
+@@ -436,6 +512,7 @@ static const struct file_operations idxd_cdev_fops = {
+ 	.open = idxd_cdev_open,
+ 	.release = idxd_cdev_release,
+ 	.mmap = idxd_cdev_mmap,
++	.write = idxd_cdev_write,
+ 	.poll = idxd_cdev_poll,
+ };
+ 
+diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
+index 7b98944135eb4..868b724a3b75b 100644
+--- a/drivers/dma/idxd/idxd.h
++++ b/drivers/dma/idxd/idxd.h
+@@ -288,6 +288,7 @@ struct idxd_driver_data {
+ 	int evl_cr_off;
+ 	int cr_status_off;
+ 	int cr_result_off;
++	bool user_submission_safe;
+ 	load_device_defaults_fn_t load_device_defaults;
+ };
+ 
+@@ -374,6 +375,8 @@ struct idxd_device {
+ 
+ 	struct dentry *dbgfs_dir;
+ 	struct dentry *dbgfs_evl_file;
++
++	bool user_submission_safe;
+ };
+ 
+ static inline unsigned int evl_ent_size(struct idxd_device *idxd)
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index 264c4e47d7cca..a7295943fa223 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -47,6 +47,7 @@ static struct idxd_driver_data idxd_driver_data[] = {
+ 		.align = 32,
+ 		.dev_type = &dsa_device_type,
+ 		.evl_cr_off = offsetof(struct dsa_evl_entry, cr),
++		.user_submission_safe = false, /* See INTEL-SA-01084 security advisory */
+ 		.cr_status_off = offsetof(struct dsa_completion_record, status),
+ 		.cr_result_off = offsetof(struct dsa_completion_record, result),
+ 	},
+@@ -57,6 +58,7 @@ static struct idxd_driver_data idxd_driver_data[] = {
+ 		.align = 64,
+ 		.dev_type = &iax_device_type,
+ 		.evl_cr_off = offsetof(struct iax_evl_entry, cr),
++		.user_submission_safe = false, /* See INTEL-SA-01084 security advisory */
+ 		.cr_status_off = offsetof(struct iax_completion_record, status),
+ 		.cr_result_off = offsetof(struct iax_completion_record, error_code),
+ 		.load_device_defaults = idxd_load_iaa_device_defaults,
+@@ -774,6 +776,8 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
+ 		 idxd->hw.version);
+ 
++	idxd->user_submission_safe = data->user_submission_safe;
++
+ 	return 0;
+ 
+  err_dev_register:
+diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
+index 315c004f58e47..e16dbf9ab324c 100644
+--- a/drivers/dma/idxd/registers.h
++++ b/drivers/dma/idxd/registers.h
+@@ -6,9 +6,6 @@
+ #include <uapi/linux/idxd.h>
+ 
+ /* PCI Config */
+-#define PCI_DEVICE_ID_INTEL_DSA_SPR0	0x0b25
+-#define PCI_DEVICE_ID_INTEL_IAX_SPR0	0x0cfe
+-
+ #define DEVICE_VERSION_1		0x100
+ #define DEVICE_VERSION_2		0x200
+ 
+diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
+index 7f28f01be672b..f706eae0e76b1 100644
+--- a/drivers/dma/idxd/sysfs.c
++++ b/drivers/dma/idxd/sysfs.c
+@@ -1197,12 +1197,35 @@ static ssize_t wq_enqcmds_retries_store(struct device *dev, struct device_attrib
+ static struct device_attribute dev_attr_wq_enqcmds_retries =
+ 		__ATTR(enqcmds_retries, 0644, wq_enqcmds_retries_show, wq_enqcmds_retries_store);
+ 
++static ssize_t op_cap_show_common(struct device *dev, char *buf, unsigned long *opcap_bmap)
++{
++	ssize_t pos;
++	int i;
++
++	pos = 0;
++	for (i = IDXD_MAX_OPCAP_BITS/64 - 1; i >= 0; i--) {
++		unsigned long val = opcap_bmap[i];
++
++		/* On systems where direct user submissions are not safe, we need to clear out
++		 * the BATCH capability from the capability mask in sysfs since we cannot support
++		 * that command on such systems.
++		 */
++		if (i == DSA_OPCODE_BATCH/64 && !confdev_to_idxd(dev)->user_submission_safe)
++			clear_bit(DSA_OPCODE_BATCH % 64, &val);
++
++		pos += sysfs_emit_at(buf, pos, "%*pb", 64, &val);
++		pos += sysfs_emit_at(buf, pos, "%c", i == 0 ? '\n' : ',');
++	}
++
++	return pos;
++}
++
+ static ssize_t wq_op_config_show(struct device *dev,
+ 				 struct device_attribute *attr, char *buf)
+ {
+ 	struct idxd_wq *wq = confdev_to_wq(dev);
+ 
+-	return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, wq->opcap_bmap);
++	return op_cap_show_common(dev, buf, wq->opcap_bmap);
+ }
+ 
+ static int idxd_verify_supported_opcap(struct idxd_device *idxd, unsigned long *opmask)
+@@ -1455,7 +1478,7 @@ static ssize_t op_cap_show(struct device *dev,
+ {
+ 	struct idxd_device *idxd = confdev_to_idxd(dev);
+ 
+-	return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, idxd->opcap_bmap);
++	return op_cap_show_common(dev, buf, idxd->opcap_bmap);
+ }
+ static DEVICE_ATTR_RO(op_cap);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+index 3709d18da0e6b..075d04ba3ef28 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+@@ -1657,6 +1657,10 @@ mt7915_net_fill_forward_path(struct ieee80211_hw *hw,
+ #endif
+ 
+ const struct ieee80211_ops mt7915_ops = {
++	.add_chanctx = ieee80211_emulate_add_chanctx,
++	.remove_chanctx = ieee80211_emulate_remove_chanctx,
++	.change_chanctx = ieee80211_emulate_change_chanctx,
++	.switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
+ 	.tx = mt7915_tx,
+ 	.start = mt7915_start,
+ 	.stop = mt7915_stop,
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index cb5b7f865d585..e727941f589de 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -71,6 +71,8 @@ static bool vfio_pci_dev_in_denylist(struct pci_dev *pdev)
+ 		case PCI_DEVICE_ID_INTEL_QAT_C62X_VF:
+ 		case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
+ 		case PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF:
++		case PCI_DEVICE_ID_INTEL_DSA_SPR0:
++		case PCI_DEVICE_ID_INTEL_IAX_SPR0:
+ 			return true;
+ 		default:
+ 			return false;
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index c547d1d4feb1e..4beb29907c2bd 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -2687,8 +2687,10 @@
+ #define PCI_DEVICE_ID_INTEL_I960	0x0960
+ #define PCI_DEVICE_ID_INTEL_I960RM	0x0962
+ #define PCI_DEVICE_ID_INTEL_HDA_HSW_0	0x0a0c
++#define PCI_DEVICE_ID_INTEL_DSA_SPR0	0x0b25
+ #define PCI_DEVICE_ID_INTEL_HDA_HSW_2	0x0c0c
+ #define PCI_DEVICE_ID_INTEL_CENTERTON_ILB	0x0c60
++#define PCI_DEVICE_ID_INTEL_IAX_SPR0	0x0cfe
+ #define PCI_DEVICE_ID_INTEL_HDA_HSW_3	0x0d0c
+ #define PCI_DEVICE_ID_INTEL_HDA_BYT	0x0f04
+ #define PCI_DEVICE_ID_INTEL_SST_BYT	0x0f28
+diff --git a/security/keys/key.c b/security/keys/key.c
+index 5607900383298..0aa5f01d16ffb 100644
+--- a/security/keys/key.c
++++ b/security/keys/key.c
+@@ -463,7 +463,8 @@ static int __key_instantiate_and_link(struct key *key,
+ 			if (authkey)
+ 				key_invalidate(authkey);
+ 
+-			key_set_expiry(key, prep->expiry);
++			if (prep->expiry != TIME64_MAX)
++				key_set_expiry(key, prep->expiry);
+ 		}
+ 	}
+ 


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [gentoo-commits] proj/linux-patches:6.9 commit in: /
@ 2024-05-23 12:41 Mike Pagano
  0 siblings, 0 replies; 18+ messages in thread
From: Mike Pagano @ 2024-05-23 12:41 UTC (permalink / raw
  To: gentoo-commits

commit:     9528f04dd74f7ffae81a7909fb07c8a57bb4d814
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu May 23 12:41:03 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu May 23 12:41:03 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9528f04d

io_uring/sqpoll: ensure normal task_work is also run timely

Bug: https://bugs.gentoo.org/931942

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                     |   4 +
 2930_io-uring-sqpoll-ensure-task_work-fix.patch | 115 ++++++++++++++++++++++++
 2 files changed, 119 insertions(+)

diff --git a/0000_README b/0000_README
index 02b4c7fb..55885e7a 100644
--- a/0000_README
+++ b/0000_README
@@ -75,6 +75,10 @@ Patch:  2920_sign-file-patch-for-libressl.patch
 From:   https://bugs.gentoo.org/717166
 Desc:   sign-file: full functionality with modern LibreSSL
 
+Patch:  2930_io-uring-sqpoll-ensure-task_work-fix.patch
+From:   https://bugs.gentoo.org/931942
+Desc:   sqpoll: ensure that normal task_work is also run timely
+
 Patch:  3000_Support-printing-firmware-info.patch
 From:   https://bugs.gentoo.org/732852
 Desc:   Print firmware info (Reqs CONFIG_GENTOO_PRINT_FIRMWARE_INFO). Thanks to Georgy Yakovlev

diff --git a/2930_io-uring-sqpoll-ensure-task_work-fix.patch b/2930_io-uring-sqpoll-ensure-task_work-fix.patch
new file mode 100644
index 00000000..70e62a25
--- /dev/null
+++ b/2930_io-uring-sqpoll-ensure-task_work-fix.patch
@@ -0,0 +1,115 @@
+From mboxrd@z Thu Jan  1 00:00:00 1970
+Received: from mail-io1-f48.google.com (mail-io1-f48.google.com [209.85.166.48])
+	(using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits))
+	(No client certificate requested)
+	by smtp.subspace.kernel.org (Postfix) with ESMTPS id C0BC614884F
+	for <io-uring@vger.kernel.org>; Tue, 21 May 2024 19:43:52 +0000 (UTC)
+Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=209.85.166.48
+ARC-Seal:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116;
+	t=1716320634; cv=none; b=KU/mCJ7mBHO+iHipnVTh5iJc7FxW5xbo1S24b4Vfx1HWcIChj7ieZ6M0D4Dly+m6CHUZ/xGmrFxNLNl+hJyl1SruXRuZGd2zsG87whd+SMirOeAcxfkjgTN9YcSuuAs+cr6WBGo33TXA1wYYEdcKAp5+2MtFRlOEfEpneQZ1jRI=
+ARC-Message-Signature:i=1; a=rsa-sha256; d=subspace.kernel.org;
+	s=arc-20240116; t=1716320634; c=relaxed/simple;
+	bh=gaIkoP5Tt9ptNy9eqUxDwFHSVRdtXZdNQxS3gSxXieM=;
+	h=Message-ID:Date:MIME-Version:To:From:Subject:Cc:Content-Type; b=g/8jkVOwbG15NZZ75HTC/Dfc8RLdXo+ufUTtRf0leBrGhctRXfFOQcPJHuIp8HY9Wrch47B9oRjqZL6/m5CaK8aKCXZAQ7dCknHsT8yf8O7aMN+fNs+3QQ7EyZpc+3NjnHZ+NbtSEtGyK2eC5F75Apq4KjVZCYUl/lUQ5sCjIp0=
+ARC-Authentication-Results:i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=kernel.dk; spf=pass smtp.mailfrom=kernel.dk; dkim=pass (2048-bit key) header.d=kernel-dk.20230601.gappssmtp.com header.i=@kernel-dk.20230601.gappssmtp.com header.b=Q/x459tT; arc=none smtp.client-ip=209.85.166.48
+Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=kernel.dk
+Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=kernel.dk
+Authentication-Results: smtp.subspace.kernel.org;
+	dkim=pass (2048-bit key) header.d=kernel-dk.20230601.gappssmtp.com header.i=@kernel-dk.20230601.gappssmtp.com header.b="Q/x459tT"
+Received: by mail-io1-f48.google.com with SMTP id ca18e2360f4ac-7e1b8606bfdso19766539f.3
+        for <io-uring@vger.kernel.org>; Tue, 21 May 2024 12:43:52 -0700 (PDT)
+DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed;
+        d=kernel-dk.20230601.gappssmtp.com; s=20230601; t=1716320631; x=1716925431; darn=vger.kernel.org;
+        h=content-transfer-encoding:cc:subject:from:to:content-language
+         :user-agent:mime-version:date:message-id:from:to:cc:subject:date
+         :message-id:reply-to;
+        bh=T4xIoHSMmGrzeSfhPivp04fPK+A8FmMUIxCHFcE1zPo=;
+        b=Q/x459tTR9ak5EYWL/Ygb8HtLydtfqDpakjjofFDUwlF24E3mxFim/Nnw4x9AEj/vO
+         Nw0e7YouxTiSj9kxnTdLpVz9LuTMJxvPohmoXfgI9ReSCIJ1I95Dn70CZ1CiQW8bsad1
+         /7LdSpIPqGK1OCnLphDgXdqqyBn/URkVCoVovoxwhWgmDm4DwKMePqCdecoZ/M/o9zr5
+         yEPrJag55yEmCVL6Rfezs07paFsHgHAiX55syf6xBBP2ghaH18+oB8oeeHfbCnHxunNc
+         cTL4mATn49cvERCj4GYxEZWnSB/KVSJw2TQbs8VyyLJauzMx4Jk5S/lrhsMzDolCajWj
+         /Tyw==
+X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed;
+        d=1e100.net; s=20230601; t=1716320631; x=1716925431;
+        h=content-transfer-encoding:cc:subject:from:to:content-language
+         :user-agent:mime-version:date:message-id:x-gm-message-state:from:to
+         :cc:subject:date:message-id:reply-to;
+        bh=T4xIoHSMmGrzeSfhPivp04fPK+A8FmMUIxCHFcE1zPo=;
+        b=W1PQkCljf2qgJy1vEyfE6GT8FahYvbyD7b8TGqRMKaqyAI6lt9kofryakDyC3RKSSQ
+         FZsB+Gx9RQEUu40SOMfUjZQegR+zKaojOx6wtx37pRW85eJG4oNto15sFFfJQSwyKuyN
+         p61QuElnpiAFyaT2QpK3M3NctjaTKRzT+DhJ4+cK1Py69C+ZCqQiaCMgtkIycVWtaUWF
+         evEF260Bry0bjSBbdVfuDaic9WhdvMo2p8c726hK/Bu1CkRs3pGoxkNEHWPQlMmyxzRw
+         zZLB6bwYwQjFAJ6/O0m04m/74Qx1TvUSmx++KafWS0Mn2iVq7rbg/2gPYjLdH/wOoIVf
+         637Q==
+X-Gm-Message-State: AOJu0YyFIIKlIrtLfYpArCPZCQ5Jzozh7A3dzTYrVMDbjr9u4nAs/Wp7
+	ixCpJUwEr1gVybpU68+EwAaRu/9iGdFdgduROcwRveqm10je+a40D0fqv3ilzfyy0QQWWxpTXCD
+	P
+X-Google-Smtp-Source: AGHT+IHIvD2vBkh0fv6wTvoSX5+gjdHhX2Vpo1oJeclj6etBfpIA8v5xmG1uQE1/CW5TRH1jaVRaqw==
+X-Received: by 2002:a05:6e02:148d:b0:36d:cdc1:d76c with SMTP id e9e14a558f8ab-371f617e0c6mr462975ab.0.1716320630872;
+        Tue, 21 May 2024 12:43:50 -0700 (PDT)
+Received: from [192.168.1.116] ([96.43.243.2])
+        by smtp.gmail.com with ESMTPSA id e9e14a558f8ab-36cb9d3f219sm66602285ab.12.2024.05.21.12.43.49
+        (version=TLS1_3 cipher=TLS_AES_128_GCM_SHA256 bits=128/128);
+        Tue, 21 May 2024 12:43:49 -0700 (PDT)
+Message-ID: <45f46362-7dc2-4ab5-ab49-0f3cac1d58fb@kernel.dk>
+Date: Tue, 21 May 2024 13:43:48 -0600
+Precedence: bulk
+X-Mailing-List: io-uring@vger.kernel.org
+List-Id: <io-uring.vger.kernel.org>
+List-Subscribe: <mailto:io-uring+subscribe@vger.kernel.org>
+List-Unsubscribe: <mailto:io-uring+unsubscribe@vger.kernel.org>
+MIME-Version: 1.0
+User-Agent: Mozilla Thunderbird
+Content-Language: en-US
+To: io-uring <io-uring@vger.kernel.org>
+From: Jens Axboe <axboe@kernel.dk>
+Subject: [PATCH v2] io_uring/sqpoll: ensure that normal task_work is also run
+ timely
+Cc: Christian Heusel <christian@heusel.eu>, Andrew Udvare <audvare@gmail.com>
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 7bit
+
+With the move to private task_work, SQPOLL neglected to also run the
+normal task_work, if any is pending. This will eventually get run, but
+we should run it with the private task_work to ensure that things like
+a final fput() is processed in a timely fashion.
+
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/all/313824bc-799d-414f-96b7-e6de57c7e21d@gmail.com/
+Reported-by: Andrew Udvare <audvare@gmail.com>
+Fixes: af5d68f8892f ("io_uring/sqpoll: manage task_work privately")
+Tested-by: Christian Heusel <christian@heusel.eu>
+Tested-by: Andrew Udvare <audvare@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+
+---
+
+V2: move the task_work_run() section so we're always guaranteed it
+    runs after any task_work. Ran the previous test cases again, both
+    the yarn based one and the liburing test case, and they still work
+    as they should. Previously, if we had a retry condition due to being
+    flooded with task_work, then we'd not run the kernel side task_work.
+
+diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
+index 554c7212aa46..b3722e5275e7 100644
+--- a/io_uring/sqpoll.c
++++ b/io_uring/sqpoll.c
+@@ -238,11 +238,13 @@ static unsigned int io_sq_tw(struct llist_node **retry_list, int max_entries)
+ 	if (*retry_list) {
+ 		*retry_list = io_handle_tw_list(*retry_list, &count, max_entries);
+ 		if (count >= max_entries)
+-			return count;
++			goto out;
+ 		max_entries -= count;
+ 	}
+-
+ 	*retry_list = tctx_task_work_run(tctx, max_entries, &count);
++out:
++	if (task_work_pending(current))
++		task_work_run();
+ 	return count;
+ }
+ 
+-- 
+Jens Axboe


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [gentoo-commits] proj/linux-patches:6.9 commit in: /
@ 2024-05-25 15:19 Mike Pagano
  0 siblings, 0 replies; 18+ messages in thread
From: Mike Pagano @ 2024-05-25 15:19 UTC (permalink / raw
  To: gentoo-commits

commit:     190af96490abc9ac3659f6e8a74e7ae51df04e0a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat May 25 15:18:56 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat May 25 15:18:56 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=190af964

Linux patch 6.9.2

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |    4 +
 1001_linux-6.9.2.patch | 1480 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1484 insertions(+)

diff --git a/0000_README b/0000_README
index 55885e7a..111716b6 100644
--- a/0000_README
+++ b/0000_README
@@ -47,6 +47,10 @@ Patch:  1000_linux-6.9.1.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.9.1
 
+Patch:  1001_linux-6.9.2.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.9.2
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.

diff --git a/1001_linux-6.9.2.patch b/1001_linux-6.9.2.patch
new file mode 100644
index 00000000..0b2b2476
--- /dev/null
+++ b/1001_linux-6.9.2.patch
@@ -0,0 +1,1480 @@
+diff --git a/Documentation/ABI/stable/sysfs-block b/Documentation/ABI/stable/sysfs-block
+index 1fe9a553c37b7..f0025d1c3d5ac 100644
+--- a/Documentation/ABI/stable/sysfs-block
++++ b/Documentation/ABI/stable/sysfs-block
+@@ -101,6 +101,16 @@ Description:
+ 		devices that support receiving integrity metadata.
+ 
+ 
++What:		/sys/block/<disk>/partscan
++Date:		May 2024
++Contact:	Christoph Hellwig <hch@lst.de>
++Description:
++		The /sys/block/<disk>/partscan files reports if partition
++		scanning is enabled for the disk.  It returns "1" if partition
++		scanning is enabled, or "0" if not.  The value type is a 32-bit
++		unsigned integer, but only "0" and "1" are valid values.
++
++
+ What:		/sys/block/<disk>/<partition>/alignment_offset
+ Date:		April 2009
+ Contact:	Martin K. Petersen <martin.petersen@oracle.com>
+diff --git a/Documentation/admin-guide/hw-vuln/core-scheduling.rst b/Documentation/admin-guide/hw-vuln/core-scheduling.rst
+index cf1eeefdfc32f..a92e10ec402e7 100644
+--- a/Documentation/admin-guide/hw-vuln/core-scheduling.rst
++++ b/Documentation/admin-guide/hw-vuln/core-scheduling.rst
+@@ -67,8 +67,8 @@ arg4:
+     will be performed for all tasks in the task group of ``pid``.
+ 
+ arg5:
+-    userspace pointer to an unsigned long for storing the cookie returned by
+-    ``PR_SCHED_CORE_GET`` command. Should be 0 for all other commands.
++    userspace pointer to an unsigned long long for storing the cookie returned
++    by ``PR_SCHED_CORE_GET`` command. Should be 0 for all other commands.
+ 
+ In order for a process to push a cookie to, or pull a cookie from a process, it
+ is required to have the ptrace access mode: `PTRACE_MODE_READ_REALCREDS` to the
+diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst
+index 6fce035fdbf5c..7daf1a95f9cb4 100644
+--- a/Documentation/admin-guide/mm/damon/usage.rst
++++ b/Documentation/admin-guide/mm/damon/usage.rst
+@@ -153,7 +153,7 @@ Users can write below commands for the kdamond to the ``state`` file.
+ - ``clear_schemes_tried_regions``: Clear the DAMON-based operating scheme
+   action tried regions directory for each DAMON-based operation scheme of the
+   kdamond.
+-- ``update_schemes_effective_bytes``: Update the contents of
++- ``update_schemes_effective_quotas``: Update the contents of
+   ``effective_bytes`` files for each DAMON-based operation scheme of the
+   kdamond.  For more details, refer to :ref:`quotas directory <sysfs_quotas>`.
+ 
+@@ -342,7 +342,7 @@ Based on the user-specified :ref:`goal <sysfs_schemes_quota_goals>`, the
+ effective size quota is further adjusted.  Reading ``effective_bytes`` returns
+ the current effective size quota.  The file is not updated in real time, so
+ users should ask DAMON sysfs interface to update the content of the file for
+-the stats by writing a special keyword, ``update_schemes_effective_bytes`` to
++the stats by writing a special keyword, ``update_schemes_effective_quotas`` to
+ the relevant ``kdamonds/<N>/state`` file.
+ 
+ Under ``weights`` directory, three files (``sz_permil``,
+@@ -434,7 +434,7 @@ pages of all memory cgroups except ``/having_care_already``.::
+     # # further filter out all cgroups except one at '/having_care_already'
+     echo memcg > 1/type
+     echo /having_care_already > 1/memcg_path
+-    echo N > 1/matching
++    echo Y > 1/matching
+ 
+ Note that ``anon`` and ``memcg`` filters are currently supported only when
+ ``paddr`` :ref:`implementation <sysfs_context>` is being used.
+diff --git a/Documentation/sphinx/kernel_include.py b/Documentation/sphinx/kernel_include.py
+index abe7680883771..6387624423363 100755
+--- a/Documentation/sphinx/kernel_include.py
++++ b/Documentation/sphinx/kernel_include.py
+@@ -97,7 +97,6 @@ class KernelInclude(Include):
+         # HINT: this is the only line I had to change / commented out:
+         #path = utils.relative_path(None, path)
+ 
+-        path = nodes.reprunicode(path)
+         encoding = self.options.get(
+             'encoding', self.state.document.settings.input_encoding)
+         e_handler=self.state.document.settings.input_encoding_error_handler
+diff --git a/Makefile b/Makefile
+index a7045435151e6..8302496ee7a57 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 9
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
+index 44958ebaf626e..b77bbb67e77b0 100644
+--- a/arch/x86/include/asm/percpu.h
++++ b/arch/x86/include/asm/percpu.h
+@@ -70,7 +70,7 @@
+ 	unsigned long tcp_ptr__;				\
+ 	tcp_ptr__ = __raw_cpu_read(, this_cpu_off);		\
+ 								\
+-	tcp_ptr__ += (unsigned long)(ptr);			\
++	tcp_ptr__ += (__force unsigned long)(ptr);		\
+ 	(typeof(*(ptr)) __kernel __force *)tcp_ptr__;		\
+ })
+ #else /* CONFIG_USE_X86_SEG_SUPPORT */
+@@ -102,8 +102,8 @@
+ #endif /* CONFIG_SMP */
+ 
+ #define __my_cpu_type(var)	typeof(var) __percpu_seg_override
+-#define __my_cpu_ptr(ptr)	(__my_cpu_type(*ptr) *)(uintptr_t)(ptr)
+-#define __my_cpu_var(var)	(*__my_cpu_ptr(&var))
++#define __my_cpu_ptr(ptr)	(__my_cpu_type(*ptr)*)(__force uintptr_t)(ptr)
++#define __my_cpu_var(var)	(*__my_cpu_ptr(&(var)))
+ #define __percpu_arg(x)		__percpu_prefix "%" #x
+ #define __force_percpu_arg(x)	__force_percpu_prefix "%" #x
+ 
+diff --git a/block/genhd.c b/block/genhd.c
+index bb29a68e1d676..52a4521df067b 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -345,9 +345,7 @@ int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode)
+ 	struct file *file;
+ 	int ret = 0;
+ 
+-	if (disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN))
+-		return -EINVAL;
+-	if (test_bit(GD_SUPPRESS_PART_SCAN, &disk->state))
++	if (!disk_has_partscan(disk))
+ 		return -EINVAL;
+ 	if (disk->open_partitions)
+ 		return -EBUSY;
+@@ -503,8 +501,7 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
+ 			goto out_unregister_bdi;
+ 
+ 		/* Make sure the first partition scan will be proceed */
+-		if (get_capacity(disk) && !(disk->flags & GENHD_FL_NO_PART) &&
+-		    !test_bit(GD_SUPPRESS_PART_SCAN, &disk->state))
++		if (get_capacity(disk) && disk_has_partscan(disk))
+ 			set_bit(GD_NEED_PART_SCAN, &disk->state);
+ 
+ 		bdev_add(disk->part0, ddev->devt);
+@@ -1047,6 +1044,12 @@ static ssize_t diskseq_show(struct device *dev,
+ 	return sprintf(buf, "%llu\n", disk->diskseq);
+ }
+ 
++static ssize_t partscan_show(struct device *dev,
++		struct device_attribute *attr, char *buf)
++{
++	return sprintf(buf, "%u\n", disk_has_partscan(dev_to_disk(dev)));
++}
++
+ static DEVICE_ATTR(range, 0444, disk_range_show, NULL);
+ static DEVICE_ATTR(ext_range, 0444, disk_ext_range_show, NULL);
+ static DEVICE_ATTR(removable, 0444, disk_removable_show, NULL);
+@@ -1060,6 +1063,7 @@ static DEVICE_ATTR(stat, 0444, part_stat_show, NULL);
+ static DEVICE_ATTR(inflight, 0444, part_inflight_show, NULL);
+ static DEVICE_ATTR(badblocks, 0644, disk_badblocks_show, disk_badblocks_store);
+ static DEVICE_ATTR(diskseq, 0444, diskseq_show, NULL);
++static DEVICE_ATTR(partscan, 0444, partscan_show, NULL);
+ 
+ #ifdef CONFIG_FAIL_MAKE_REQUEST
+ ssize_t part_fail_show(struct device *dev,
+@@ -1106,6 +1110,7 @@ static struct attribute *disk_attrs[] = {
+ 	&dev_attr_events_async.attr,
+ 	&dev_attr_events_poll_msecs.attr,
+ 	&dev_attr_diskseq.attr,
++	&dev_attr_partscan.attr,
+ #ifdef CONFIG_FAIL_MAKE_REQUEST
+ 	&dev_attr_fail.attr,
+ #endif
+diff --git a/block/partitions/core.c b/block/partitions/core.c
+index b11e88c82c8cf..37b5f92d07fec 100644
+--- a/block/partitions/core.c
++++ b/block/partitions/core.c
+@@ -573,10 +573,7 @@ static int blk_add_partitions(struct gendisk *disk)
+ 	struct parsed_partitions *state;
+ 	int ret = -EAGAIN, p;
+ 
+-	if (disk->flags & GENHD_FL_NO_PART)
+-		return 0;
+-
+-	if (test_bit(GD_SUPPRESS_PART_SCAN, &disk->state))
++	if (!disk_has_partscan(disk))
+ 		return 0;
+ 
+ 	state = check_partition(disk);
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index dd6923d37931f..b21a7b246a0dc 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -5367,7 +5367,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ 			goto err;
+ 		break;
+ 	case BINDER_SET_MAX_THREADS: {
+-		int max_threads;
++		u32 max_threads;
+ 
+ 		if (copy_from_user(&max_threads, ubuf,
+ 				   sizeof(max_threads))) {
+diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
+index 7270d4d222070..5b7c80b99ae86 100644
+--- a/drivers/android/binder_internal.h
++++ b/drivers/android/binder_internal.h
+@@ -421,7 +421,7 @@ struct binder_proc {
+ 	struct list_head todo;
+ 	struct binder_stats stats;
+ 	struct list_head delivered_death;
+-	int max_threads;
++	u32 max_threads;
+ 	int requested_threads;
+ 	int requested_threads_started;
+ 	int tmp_ref;
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index e3946f7b736e3..01b99471d1bbe 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -3118,6 +3118,7 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
+ 			bt_dev_err(hdev, "Failed to get fw flavor (%d)", err);
+ 			return err;
+ 		}
++		fw_flavor = (fw_flavor & 0x00000080) >> 7;
+ 	}
+ 
+ 	mediatek = hci_get_priv(hdev);
+diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
+index 2015c9fcc3c91..28166df81cf8d 100644
+--- a/drivers/cpufreq/amd-pstate.c
++++ b/drivers/cpufreq/amd-pstate.c
+@@ -50,7 +50,8 @@
+ 
+ #define AMD_PSTATE_TRANSITION_LATENCY	20000
+ #define AMD_PSTATE_TRANSITION_DELAY	1000
+-#define AMD_PSTATE_PREFCORE_THRESHOLD	166
++#define CPPC_HIGHEST_PERF_PERFORMANCE	196
++#define CPPC_HIGHEST_PERF_DEFAULT	166
+ 
+ /*
+  * TODO: We need more time to fine tune processors with shared memory solution
+@@ -290,6 +291,21 @@ static inline int amd_pstate_enable(bool enable)
+ 	return static_call(amd_pstate_enable)(enable);
+ }
+ 
++static u32 amd_pstate_highest_perf_set(struct amd_cpudata *cpudata)
++{
++	struct cpuinfo_x86 *c = &cpu_data(0);
++
++	/*
++	 * For AMD CPUs with Family ID 19H and Model ID range 0x70 to 0x7f,
++	 * the highest performance level is set to 196.
++	 * https://bugzilla.kernel.org/show_bug.cgi?id=218759
++	 */
++	if (c->x86 == 0x19 && (c->x86_model >= 0x70 && c->x86_model <= 0x7f))
++		return CPPC_HIGHEST_PERF_PERFORMANCE;
++
++	return CPPC_HIGHEST_PERF_DEFAULT;
++}
++
+ static int pstate_init_perf(struct amd_cpudata *cpudata)
+ {
+ 	u64 cap1;
+@@ -306,7 +322,7 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
+ 	 * the default max perf.
+ 	 */
+ 	if (cpudata->hw_prefcore)
+-		highest_perf = AMD_PSTATE_PREFCORE_THRESHOLD;
++		highest_perf = amd_pstate_highest_perf_set(cpudata);
+ 	else
+ 		highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
+ 
+@@ -330,7 +346,7 @@ static int cppc_init_perf(struct amd_cpudata *cpudata)
+ 		return ret;
+ 
+ 	if (cpudata->hw_prefcore)
+-		highest_perf = AMD_PSTATE_PREFCORE_THRESHOLD;
++		highest_perf = amd_pstate_highest_perf_set(cpudata);
+ 	else
+ 		highest_perf = cppc_perf.highest_perf;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+index ac41f9c0a2834..8e7b35f764e39 100644
+--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
++++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+@@ -1055,7 +1055,12 @@ static bool setup_dsc_config(
+ 	if (!is_dsc_possible)
+ 		goto done;
+ 
+-	dsc_cfg->num_slices_v = pic_height/slice_height;
++	if (slice_height > 0) {
++		dsc_cfg->num_slices_v = pic_height / slice_height;
++	} else {
++		is_dsc_possible = false;
++		goto done;
++	}
+ 
+ 	if (target_bandwidth_kbps > 0) {
+ 		is_dsc_possible = decide_dsc_target_bpp_x16(
+diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c
+index c4d995f32191c..09ceb2a868764 100644
+--- a/drivers/media/v4l2-core/v4l2-ctrls-core.c
++++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c
+@@ -2504,8 +2504,7 @@ int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
+ EXPORT_SYMBOL(v4l2_ctrl_handler_setup);
+ 
+ /* Log the control name and value */
+-static void log_ctrl(const struct v4l2_ctrl_handler *hdl,
+-		     struct v4l2_ctrl *ctrl,
++static void log_ctrl(const struct v4l2_ctrl *ctrl,
+ 		     const char *prefix, const char *colon)
+ {
+ 	if (ctrl->flags & (V4L2_CTRL_FLAG_DISABLED | V4L2_CTRL_FLAG_WRITE_ONLY))
+@@ -2515,11 +2514,7 @@ static void log_ctrl(const struct v4l2_ctrl_handler *hdl,
+ 
+ 	pr_info("%s%s%s: ", prefix, colon, ctrl->name);
+ 
+-	if (ctrl->handler != hdl)
+-		v4l2_ctrl_lock(ctrl);
+ 	ctrl->type_ops->log(ctrl);
+-	if (ctrl->handler != hdl)
+-		v4l2_ctrl_unlock(ctrl);
+ 
+ 	if (ctrl->flags & (V4L2_CTRL_FLAG_INACTIVE |
+ 			   V4L2_CTRL_FLAG_GRABBED |
+@@ -2538,7 +2533,7 @@ static void log_ctrl(const struct v4l2_ctrl_handler *hdl,
+ void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl,
+ 				  const char *prefix)
+ {
+-	struct v4l2_ctrl_ref *ref;
++	struct v4l2_ctrl *ctrl;
+ 	const char *colon = "";
+ 	int len;
+ 
+@@ -2550,12 +2545,9 @@ void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl,
+ 	if (len && prefix[len - 1] != ' ')
+ 		colon = ": ";
+ 	mutex_lock(hdl->lock);
+-	list_for_each_entry(ref, &hdl->ctrl_refs, node) {
+-		if (ref->from_other_dev ||
+-		    (ref->ctrl->flags & V4L2_CTRL_FLAG_DISABLED))
+-			continue;
+-		log_ctrl(hdl, ref->ctrl, prefix, colon);
+-	}
++	list_for_each_entry(ctrl, &hdl->ctrls, node)
++		if (!(ctrl->flags & V4L2_CTRL_FLAG_DISABLED))
++			log_ctrl(ctrl, prefix, colon);
+ 	mutex_unlock(hdl->lock);
+ }
+ EXPORT_SYMBOL(v4l2_ctrl_handler_log_status);
+diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
+index 502518cdb4618..6453c92f0fa7c 100644
+--- a/drivers/net/ethernet/micrel/ks8851_common.c
++++ b/drivers/net/ethernet/micrel/ks8851_common.c
+@@ -328,7 +328,6 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
+ {
+ 	struct ks8851_net *ks = _ks;
+ 	struct sk_buff_head rxq;
+-	unsigned handled = 0;
+ 	unsigned long flags;
+ 	unsigned int status;
+ 	struct sk_buff *skb;
+@@ -336,24 +335,17 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
+ 	ks8851_lock(ks, &flags);
+ 
+ 	status = ks8851_rdreg16(ks, KS_ISR);
++	ks8851_wrreg16(ks, KS_ISR, status);
+ 
+ 	netif_dbg(ks, intr, ks->netdev,
+ 		  "%s: status 0x%04x\n", __func__, status);
+ 
+-	if (status & IRQ_LCI)
+-		handled |= IRQ_LCI;
+-
+ 	if (status & IRQ_LDI) {
+ 		u16 pmecr = ks8851_rdreg16(ks, KS_PMECR);
+ 		pmecr &= ~PMECR_WKEVT_MASK;
+ 		ks8851_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
+-
+-		handled |= IRQ_LDI;
+ 	}
+ 
+-	if (status & IRQ_RXPSI)
+-		handled |= IRQ_RXPSI;
+-
+ 	if (status & IRQ_TXI) {
+ 		unsigned short tx_space = ks8851_rdreg16(ks, KS_TXMIR);
+ 
+@@ -365,20 +357,12 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
+ 		if (netif_queue_stopped(ks->netdev))
+ 			netif_wake_queue(ks->netdev);
+ 		spin_unlock(&ks->statelock);
+-
+-		handled |= IRQ_TXI;
+ 	}
+ 
+-	if (status & IRQ_RXI)
+-		handled |= IRQ_RXI;
+-
+ 	if (status & IRQ_SPIBEI) {
+ 		netdev_err(ks->netdev, "%s: spi bus error\n", __func__);
+-		handled |= IRQ_SPIBEI;
+ 	}
+ 
+-	ks8851_wrreg16(ks, KS_ISR, handled);
+-
+ 	if (status & IRQ_RXI) {
+ 		/* the datasheet says to disable the rx interrupt during
+ 		 * packet read-out, however we're masking the interrupt
+diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
+index df9d767cb5242..56ede5fa02617 100644
+--- a/drivers/net/usb/ax88179_178a.c
++++ b/drivers/net/usb/ax88179_178a.c
+@@ -174,6 +174,7 @@ struct ax88179_data {
+ 	u32 wol_supported;
+ 	u32 wolopts;
+ 	u8 disconnecting;
++	u8 initialized;
+ };
+ 
+ struct ax88179_int_data {
+@@ -1673,6 +1674,18 @@ static int ax88179_reset(struct usbnet *dev)
+ 	return 0;
+ }
+ 
++static int ax88179_net_reset(struct usbnet *dev)
++{
++	struct ax88179_data *ax179_data = dev->driver_priv;
++
++	if (ax179_data->initialized)
++		ax88179_reset(dev);
++	else
++		ax179_data->initialized = 1;
++
++	return 0;
++}
++
+ static int ax88179_stop(struct usbnet *dev)
+ {
+ 	u16 tmp16;
+@@ -1692,6 +1705,7 @@ static const struct driver_info ax88179_info = {
+ 	.unbind = ax88179_unbind,
+ 	.status = ax88179_status,
+ 	.link_reset = ax88179_link_reset,
++	.reset = ax88179_net_reset,
+ 	.stop = ax88179_stop,
+ 	.flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ 	.rx_fixup = ax88179_rx_fixup,
+@@ -1704,6 +1718,7 @@ static const struct driver_info ax88178a_info = {
+ 	.unbind = ax88179_unbind,
+ 	.status = ax88179_status,
+ 	.link_reset = ax88179_link_reset,
++	.reset = ax88179_net_reset,
+ 	.stop = ax88179_stop,
+ 	.flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ 	.rx_fixup = ax88179_rx_fixup,
+@@ -1716,7 +1731,7 @@ static const struct driver_info cypress_GX3_info = {
+ 	.unbind = ax88179_unbind,
+ 	.status = ax88179_status,
+ 	.link_reset = ax88179_link_reset,
+-	.reset = ax88179_reset,
++	.reset = ax88179_net_reset,
+ 	.stop = ax88179_stop,
+ 	.flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ 	.rx_fixup = ax88179_rx_fixup,
+@@ -1729,7 +1744,7 @@ static const struct driver_info dlink_dub1312_info = {
+ 	.unbind = ax88179_unbind,
+ 	.status = ax88179_status,
+ 	.link_reset = ax88179_link_reset,
+-	.reset = ax88179_reset,
++	.reset = ax88179_net_reset,
+ 	.stop = ax88179_stop,
+ 	.flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ 	.rx_fixup = ax88179_rx_fixup,
+@@ -1742,7 +1757,7 @@ static const struct driver_info sitecom_info = {
+ 	.unbind = ax88179_unbind,
+ 	.status = ax88179_status,
+ 	.link_reset = ax88179_link_reset,
+-	.reset = ax88179_reset,
++	.reset = ax88179_net_reset,
+ 	.stop = ax88179_stop,
+ 	.flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ 	.rx_fixup = ax88179_rx_fixup,
+@@ -1755,7 +1770,7 @@ static const struct driver_info samsung_info = {
+ 	.unbind = ax88179_unbind,
+ 	.status = ax88179_status,
+ 	.link_reset = ax88179_link_reset,
+-	.reset = ax88179_reset,
++	.reset = ax88179_net_reset,
+ 	.stop = ax88179_stop,
+ 	.flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ 	.rx_fixup = ax88179_rx_fixup,
+@@ -1768,7 +1783,7 @@ static const struct driver_info lenovo_info = {
+ 	.unbind = ax88179_unbind,
+ 	.status = ax88179_status,
+ 	.link_reset = ax88179_link_reset,
+-	.reset = ax88179_reset,
++	.reset = ax88179_net_reset,
+ 	.stop = ax88179_stop,
+ 	.flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ 	.rx_fixup = ax88179_rx_fixup,
+@@ -1781,7 +1796,7 @@ static const struct driver_info belkin_info = {
+ 	.unbind = ax88179_unbind,
+ 	.status = ax88179_status,
+ 	.link_reset = ax88179_link_reset,
+-	.reset	= ax88179_reset,
++	.reset	= ax88179_net_reset,
+ 	.stop	= ax88179_stop,
+ 	.flags	= FLAG_ETHER | FLAG_FRAMING_AX,
+ 	.rx_fixup = ax88179_rx_fixup,
+@@ -1794,7 +1809,7 @@ static const struct driver_info toshiba_info = {
+ 	.unbind = ax88179_unbind,
+ 	.status = ax88179_status,
+ 	.link_reset = ax88179_link_reset,
+-	.reset	= ax88179_reset,
++	.reset	= ax88179_net_reset,
+ 	.stop = ax88179_stop,
+ 	.flags	= FLAG_ETHER | FLAG_FRAMING_AX,
+ 	.rx_fixup = ax88179_rx_fixup,
+@@ -1807,7 +1822,7 @@ static const struct driver_info mct_info = {
+ 	.unbind	= ax88179_unbind,
+ 	.status	= ax88179_status,
+ 	.link_reset = ax88179_link_reset,
+-	.reset	= ax88179_reset,
++	.reset	= ax88179_net_reset,
+ 	.stop	= ax88179_stop,
+ 	.flags	= FLAG_ETHER | FLAG_FRAMING_AX,
+ 	.rx_fixup = ax88179_rx_fixup,
+@@ -1820,7 +1835,7 @@ static const struct driver_info at_umc2000_info = {
+ 	.unbind = ax88179_unbind,
+ 	.status = ax88179_status,
+ 	.link_reset = ax88179_link_reset,
+-	.reset  = ax88179_reset,
++	.reset  = ax88179_net_reset,
+ 	.stop   = ax88179_stop,
+ 	.flags  = FLAG_ETHER | FLAG_FRAMING_AX,
+ 	.rx_fixup = ax88179_rx_fixup,
+@@ -1833,7 +1848,7 @@ static const struct driver_info at_umc200_info = {
+ 	.unbind = ax88179_unbind,
+ 	.status = ax88179_status,
+ 	.link_reset = ax88179_link_reset,
+-	.reset  = ax88179_reset,
++	.reset  = ax88179_net_reset,
+ 	.stop   = ax88179_stop,
+ 	.flags  = FLAG_ETHER | FLAG_FRAMING_AX,
+ 	.rx_fixup = ax88179_rx_fixup,
+@@ -1846,7 +1861,7 @@ static const struct driver_info at_umc2000sp_info = {
+ 	.unbind = ax88179_unbind,
+ 	.status = ax88179_status,
+ 	.link_reset = ax88179_link_reset,
+-	.reset  = ax88179_reset,
++	.reset  = ax88179_net_reset,
+ 	.stop   = ax88179_stop,
+ 	.flags  = FLAG_ETHER | FLAG_FRAMING_AX,
+ 	.rx_fixup = ax88179_rx_fixup,
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+index 4696d73c8971b..1b7254569a37a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+@@ -1484,7 +1484,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
+ 	size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX];
+ 	u32 api_ver;
+ 	int i;
+-	bool load_module = false;
+ 	bool usniffer_images = false;
+ 	bool failure = true;
+ 
+@@ -1732,19 +1731,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
+ 			goto out_unbind;
+ 		}
+ 	} else {
+-		load_module = true;
++		request_module_nowait("%s", op->name);
+ 	}
+ 	mutex_unlock(&iwlwifi_opmode_table_mtx);
+ 
+ 	complete(&drv->request_firmware_complete);
+ 
+-	/*
+-	 * Load the module last so we don't block anything
+-	 * else from proceeding if the module fails to load
+-	 * or hangs loading.
+-	 */
+-	if (load_module)
+-		request_module("%s", op->name);
+ 	failure = false;
+ 	goto free;
+ 
+diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
+index a35409eda0cf2..67518291a8ad6 100644
+--- a/drivers/remoteproc/mtk_scp.c
++++ b/drivers/remoteproc/mtk_scp.c
+@@ -132,7 +132,7 @@ static int scp_elf_read_ipi_buf_addr(struct mtk_scp *scp,
+ static int scp_ipi_init(struct mtk_scp *scp, const struct firmware *fw)
+ {
+ 	int ret;
+-	size_t offset;
++	size_t buf_sz, offset;
+ 
+ 	/* read the ipi buf addr from FW itself first */
+ 	ret = scp_elf_read_ipi_buf_addr(scp, fw, &offset);
+@@ -144,6 +144,14 @@ static int scp_ipi_init(struct mtk_scp *scp, const struct firmware *fw)
+ 	}
+ 	dev_info(scp->dev, "IPI buf addr %#010zx\n", offset);
+ 
++	/* Make sure IPI buffer fits in the L2TCM range assigned to this core */
++	buf_sz = sizeof(*scp->recv_buf) + sizeof(*scp->send_buf);
++
++	if (scp->sram_size < buf_sz + offset) {
++		dev_err(scp->dev, "IPI buffer does not fit in SRAM.\n");
++		return -EOVERFLOW;
++	}
++
+ 	scp->recv_buf = (struct mtk_share_obj __iomem *)
+ 			(scp->sram_base + offset);
+ 	scp->send_buf = (struct mtk_share_obj __iomem *)
+diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
+index 7ce7bb1640054..58ea1e1391cee 100644
+--- a/drivers/tty/serial/kgdboc.c
++++ b/drivers/tty/serial/kgdboc.c
+@@ -19,6 +19,7 @@
+ #include <linux/console.h>
+ #include <linux/vt_kern.h>
+ #include <linux/input.h>
++#include <linux/irq_work.h>
+ #include <linux/module.h>
+ #include <linux/platform_device.h>
+ #include <linux/serial_core.h>
+@@ -48,6 +49,25 @@ static struct kgdb_io		kgdboc_earlycon_io_ops;
+ static int                      (*earlycon_orig_exit)(struct console *con);
+ #endif /* IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */
+ 
++/*
++ * When we leave the debug trap handler we need to reset the keyboard status
++ * (since the original keyboard state gets partially clobbered by kdb use of
++ * the keyboard).
++ *
++ * The path to deliver the reset is somewhat circuitous.
++ *
++ * To deliver the reset we register an input handler, reset the keyboard and
++ * then deregister the input handler. However, to get this done right, we do
++ * have to carefully manage the calling context because we can only register
++ * input handlers from task context.
++ *
++ * In particular we need to trigger the action from the debug trap handler with
++ * all its NMI and/or NMI-like oddities. To solve this the kgdboc trap exit code
++ * (the "post_exception" callback) uses irq_work_queue(), which is NMI-safe, to
++ * schedule a callback from a hardirq context. From there we have to defer the
++ * work again, this time using schedule_work(), to get a callback using the
++ * system workqueue, which runs in task context.
++ */
+ #ifdef CONFIG_KDB_KEYBOARD
+ static int kgdboc_reset_connect(struct input_handler *handler,
+ 				struct input_dev *dev,
+@@ -99,10 +119,17 @@ static void kgdboc_restore_input_helper(struct work_struct *dummy)
+ 
+ static DECLARE_WORK(kgdboc_restore_input_work, kgdboc_restore_input_helper);
+ 
++static void kgdboc_queue_restore_input_helper(struct irq_work *unused)
++{
++	schedule_work(&kgdboc_restore_input_work);
++}
++
++static DEFINE_IRQ_WORK(kgdboc_restore_input_irq_work, kgdboc_queue_restore_input_helper);
++
+ static void kgdboc_restore_input(void)
+ {
+ 	if (likely(system_state == SYSTEM_RUNNING))
+-		schedule_work(&kgdboc_restore_input_work);
++		irq_work_queue(&kgdboc_restore_input_irq_work);
+ }
+ 
+ static int kgdboc_register_kbd(char **cptr)
+@@ -133,6 +160,7 @@ static void kgdboc_unregister_kbd(void)
+ 			i--;
+ 		}
+ 	}
++	irq_work_sync(&kgdboc_restore_input_irq_work);
+ 	flush_work(&kgdboc_restore_input_work);
+ }
+ #else /* ! CONFIG_KDB_KEYBOARD */
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index f94f68f1e7d2b..89fc690fdf34a 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1699,7 +1699,6 @@ static int __dwc3_gadget_get_frame(struct dwc3 *dwc)
+  */
+ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt)
+ {
+-	struct dwc3 *dwc = dep->dwc;
+ 	struct dwc3_gadget_ep_cmd_params params;
+ 	u32 cmd;
+ 	int ret;
+@@ -1724,8 +1723,7 @@ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool int
+ 	dep->resource_index = 0;
+ 
+ 	if (!interrupt) {
+-		if (!DWC3_IP_IS(DWC3) || DWC3_VER_IS_PRIOR(DWC3, 310A))
+-			mdelay(1);
++		mdelay(1);
+ 		dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+ 	} else if (!ret) {
+ 		dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
+diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
+index 0717cfcd9f8ca..191f86da283d0 100644
+--- a/drivers/usb/typec/tipd/core.c
++++ b/drivers/usb/typec/tipd/core.c
+@@ -28,6 +28,7 @@
+ #define TPS_REG_MODE			0x03
+ #define TPS_REG_CMD1			0x08
+ #define TPS_REG_DATA1			0x09
++#define TPS_REG_VERSION			0x0F
+ #define TPS_REG_INT_EVENT1		0x14
+ #define TPS_REG_INT_EVENT2		0x15
+ #define TPS_REG_INT_MASK1		0x16
+@@ -604,11 +605,11 @@ static irqreturn_t tps25750_interrupt(int irq, void *data)
+ 	if (!tps6598x_read_status(tps, &status))
+ 		goto err_clear_ints;
+ 
+-	if ((event[0] | event[1]) & TPS_REG_INT_POWER_STATUS_UPDATE)
++	if (event[0] & TPS_REG_INT_POWER_STATUS_UPDATE)
+ 		if (!tps6598x_read_power_status(tps))
+ 			goto err_clear_ints;
+ 
+-	if ((event[0] | event[1]) & TPS_REG_INT_DATA_STATUS_UPDATE)
++	if (event[0] & TPS_REG_INT_DATA_STATUS_UPDATE)
+ 		if (!tps6598x_read_data_status(tps))
+ 			goto err_clear_ints;
+ 
+@@ -617,7 +618,7 @@ static irqreturn_t tps25750_interrupt(int irq, void *data)
+ 	 * a plug event. Therefore, we need to check
+ 	 * for pr/dr status change to set TypeC dr/pr accordingly.
+ 	 */
+-	if ((event[0] | event[1]) & TPS_REG_INT_PLUG_EVENT ||
++	if (event[0] & TPS_REG_INT_PLUG_EVENT ||
+ 	    tps6598x_has_role_changed(tps, status))
+ 		tps6598x_handle_plug_event(tps, status);
+ 
+@@ -636,49 +637,67 @@ static irqreturn_t tps25750_interrupt(int irq, void *data)
+ 
+ static irqreturn_t tps6598x_interrupt(int irq, void *data)
+ {
++	int intev_len = TPS_65981_2_6_INTEVENT_LEN;
+ 	struct tps6598x *tps = data;
+-	u64 event1 = 0;
+-	u64 event2 = 0;
++	u64 event1[2] = { };
++	u64 event2[2] = { };
++	u32 version;
+ 	u32 status;
+ 	int ret;
+ 
+ 	mutex_lock(&tps->lock);
+ 
+-	ret = tps6598x_read64(tps, TPS_REG_INT_EVENT1, &event1);
+-	ret |= tps6598x_read64(tps, TPS_REG_INT_EVENT2, &event2);
++	ret = tps6598x_read32(tps, TPS_REG_VERSION, &version);
++	if (ret)
++		dev_warn(tps->dev, "%s: failed to read version (%d)\n",
++			 __func__, ret);
++
++	if (TPS_VERSION_HW_VERSION(version) == TPS_VERSION_HW_65987_8_DH ||
++	    TPS_VERSION_HW_VERSION(version) == TPS_VERSION_HW_65987_8_DK)
++		intev_len = TPS_65987_8_INTEVENT_LEN;
++
++	ret = tps6598x_block_read(tps, TPS_REG_INT_EVENT1, event1, intev_len);
++
++	ret = tps6598x_block_read(tps, TPS_REG_INT_EVENT1, event1, intev_len);
+ 	if (ret) {
+-		dev_err(tps->dev, "%s: failed to read events\n", __func__);
++		dev_err(tps->dev, "%s: failed to read event1\n", __func__);
+ 		goto err_unlock;
+ 	}
+-	trace_tps6598x_irq(event1, event2);
++	ret = tps6598x_block_read(tps, TPS_REG_INT_EVENT2, event2, intev_len);
++	if (ret) {
++		dev_err(tps->dev, "%s: failed to read event2\n", __func__);
++		goto err_unlock;
++	}
++	trace_tps6598x_irq(event1[0], event2[0]);
+ 
+-	if (!(event1 | event2))
++	if (!(event1[0] | event1[1] | event2[0] | event2[1]))
+ 		goto err_unlock;
+ 
+ 	if (!tps6598x_read_status(tps, &status))
+ 		goto err_clear_ints;
+ 
+-	if ((event1 | event2) & TPS_REG_INT_POWER_STATUS_UPDATE)
++	if ((event1[0] | event2[0]) & TPS_REG_INT_POWER_STATUS_UPDATE)
+ 		if (!tps6598x_read_power_status(tps))
+ 			goto err_clear_ints;
+ 
+-	if ((event1 | event2) & TPS_REG_INT_DATA_STATUS_UPDATE)
++	if ((event1[0] | event2[0]) & TPS_REG_INT_DATA_STATUS_UPDATE)
+ 		if (!tps6598x_read_data_status(tps))
+ 			goto err_clear_ints;
+ 
+ 	/* Handle plug insert or removal */
+-	if ((event1 | event2) & TPS_REG_INT_PLUG_EVENT)
++	if ((event1[0] | event2[0]) & TPS_REG_INT_PLUG_EVENT)
+ 		tps6598x_handle_plug_event(tps, status);
+ 
+ err_clear_ints:
+-	tps6598x_write64(tps, TPS_REG_INT_CLEAR1, event1);
+-	tps6598x_write64(tps, TPS_REG_INT_CLEAR2, event2);
++	tps6598x_block_write(tps, TPS_REG_INT_CLEAR1, event1, intev_len);
++	tps6598x_block_write(tps, TPS_REG_INT_CLEAR2, event2, intev_len);
+ 
+ err_unlock:
+ 	mutex_unlock(&tps->lock);
+ 
+-	if (event1 | event2)
++	if (event1[0] | event1[1] | event2[0] | event2[1])
+ 		return IRQ_HANDLED;
++
+ 	return IRQ_NONE;
+ }
+ 
+diff --git a/drivers/usb/typec/tipd/tps6598x.h b/drivers/usb/typec/tipd/tps6598x.h
+index 89b24519463a1..9b23e90174521 100644
+--- a/drivers/usb/typec/tipd/tps6598x.h
++++ b/drivers/usb/typec/tipd/tps6598x.h
+@@ -253,4 +253,15 @@
+ #define TPS_PTCC_DEV				2
+ #define TPS_PTCC_APP				3
+ 
++/* Version Register */
++#define TPS_VERSION_HW_VERSION_MASK            GENMASK(31, 24)
++#define TPS_VERSION_HW_VERSION(x)              TPS_FIELD_GET(TPS_VERSION_HW_VERSION_MASK, (x))
++#define TPS_VERSION_HW_65981_2_6               0x00
++#define TPS_VERSION_HW_65987_8_DH              0xF7
++#define TPS_VERSION_HW_65987_8_DK              0xF9
++
++/* Int Event Register length */
++#define TPS_65981_2_6_INTEVENT_LEN             8
++#define TPS_65987_8_INTEVENT_LEN               11
++
+ #endif /* __TPS6598X_H__ */
+diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c
+index d9d3c91125ca8..8be92fc1d12c9 100644
+--- a/drivers/usb/typec/ucsi/displayport.c
++++ b/drivers/usb/typec/ucsi/displayport.c
+@@ -275,8 +275,6 @@ static void ucsi_displayport_work(struct work_struct *work)
+ 	struct ucsi_dp *dp = container_of(work, struct ucsi_dp, work);
+ 	int ret;
+ 
+-	mutex_lock(&dp->con->lock);
+-
+ 	ret = typec_altmode_vdm(dp->alt, dp->header,
+ 				dp->vdo_data, dp->vdo_size);
+ 	if (ret)
+@@ -285,8 +283,6 @@ static void ucsi_displayport_work(struct work_struct *work)
+ 	dp->vdo_data = NULL;
+ 	dp->vdo_size = 0;
+ 	dp->header = 0;
+-
+-	mutex_unlock(&dp->con->lock);
+ }
+ 
+ void ucsi_displayport_remove_partner(struct typec_altmode *alt)
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 69e7da33ca49a..00e62b81a7363 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -233,6 +233,19 @@ static inline unsigned int disk_openers(struct gendisk *disk)
+ 	return atomic_read(&disk->part0->bd_openers);
+ }
+ 
++/**
++ * disk_has_partscan - return %true if partition scanning is enabled on a disk
++ * @disk: disk to check
++ *
++ * Returns %true if partitions scanning is enabled for @disk, or %false if
++ * partition scanning is disabled either permanently or temporarily.
++ */
++static inline bool disk_has_partscan(struct gendisk *disk)
++{
++	return !(disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN)) &&
++		!test_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
++}
++
+ /*
+  * The gendisk is refcounted by the part0 block_device, and the bd_device
+  * therein is also used for device model presentation in sysfs.
+diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
+index 5c12761cbc0e2..07198beb3d80b 100644
+--- a/include/net/bluetooth/hci.h
++++ b/include/net/bluetooth/hci.h
+@@ -1666,6 +1666,15 @@ struct hci_cp_le_set_event_mask {
+ 	__u8     mask[8];
+ } __packed;
+ 
++/* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E
++ * 7.8.2 LE Read Buffer Size command
++ * MAX_LE_MTU is 0xffff.
++ * 0 is also valid. It means that no dedicated LE Buffer exists.
++ * It should use the HCI_Read_Buffer_Size command and mtu is shared
++ * between BR/EDR and LE.
++ */
++#define HCI_MIN_LE_MTU 0x001b
++
+ #define HCI_OP_LE_READ_BUFFER_SIZE	0x2002
+ struct hci_rp_le_read_buffer_size {
+ 	__u8     status;
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index e8f581f3f3ce6..b1c8489ff93e4 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -706,6 +706,7 @@ struct hci_conn {
+ 	__u16		handle;
+ 	__u16		sync_handle;
+ 	__u16		state;
++	__u16		mtu;
+ 	__u8		mode;
+ 	__u8		type;
+ 	__u8		role;
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 05346250f7195..6ab404dda7949 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -909,11 +909,37 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ {
+ 	struct hci_conn *conn;
+ 
++	switch (type) {
++	case ACL_LINK:
++		if (!hdev->acl_mtu)
++			return ERR_PTR(-ECONNREFUSED);
++		break;
++	case ISO_LINK:
++		if (hdev->iso_mtu)
++			/* Dedicated ISO Buffer exists */
++			break;
++		fallthrough;
++	case LE_LINK:
++		if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
++			return ERR_PTR(-ECONNREFUSED);
++		if (!hdev->le_mtu && hdev->acl_mtu < HCI_MIN_LE_MTU)
++			return ERR_PTR(-ECONNREFUSED);
++		break;
++	case SCO_LINK:
++	case ESCO_LINK:
++		if (!hdev->sco_pkts)
++			/* Controller does not support SCO or eSCO over HCI */
++			return ERR_PTR(-ECONNREFUSED);
++		break;
++	default:
++		return ERR_PTR(-ECONNREFUSED);
++	}
++
+ 	bt_dev_dbg(hdev, "dst %pMR handle 0x%4.4x", dst, handle);
+ 
+ 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
+ 	if (!conn)
+-		return NULL;
++		return ERR_PTR(-ENOMEM);
+ 
+ 	bacpy(&conn->dst, dst);
+ 	bacpy(&conn->src, &hdev->bdaddr);
+@@ -944,10 +970,12 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ 	switch (type) {
+ 	case ACL_LINK:
+ 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
++		conn->mtu = hdev->acl_mtu;
+ 		break;
+ 	case LE_LINK:
+ 		/* conn->src should reflect the local identity address */
+ 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
++		conn->mtu = hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu;
+ 		break;
+ 	case ISO_LINK:
+ 		/* conn->src should reflect the local identity address */
+@@ -959,6 +987,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ 		else if (conn->role == HCI_ROLE_MASTER)
+ 			conn->cleanup = cis_cleanup;
+ 
++		conn->mtu = hdev->iso_mtu ? hdev->iso_mtu :
++			    hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu;
+ 		break;
+ 	case SCO_LINK:
+ 		if (lmp_esco_capable(hdev))
+@@ -966,9 +996,12 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ 					(hdev->esco_type & EDR_ESCO_MASK);
+ 		else
+ 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
++
++		conn->mtu = hdev->sco_mtu;
+ 		break;
+ 	case ESCO_LINK:
+ 		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
++		conn->mtu = hdev->sco_mtu;
+ 		break;
+ 	}
+ 
+@@ -1011,7 +1044,7 @@ struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
+ 
+ 	handle = hci_conn_hash_alloc_unset(hdev);
+ 	if (unlikely(handle < 0))
+-		return NULL;
++		return ERR_PTR(-ECONNREFUSED);
+ 
+ 	return hci_conn_add(hdev, type, dst, role, handle);
+ }
+@@ -1317,8 +1350,8 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
+ 		bacpy(&conn->dst, dst);
+ 	} else {
+ 		conn = hci_conn_add_unset(hdev, LE_LINK, dst, role);
+-		if (!conn)
+-			return ERR_PTR(-ENOMEM);
++		if (IS_ERR(conn))
++			return conn;
+ 		hci_conn_hold(conn);
+ 		conn->pending_sec_level = sec_level;
+ 	}
+@@ -1494,8 +1527,8 @@ static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
+ 		return ERR_PTR(-EADDRINUSE);
+ 
+ 	conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
+-	if (!conn)
+-		return ERR_PTR(-ENOMEM);
++	if (IS_ERR(conn))
++		return conn;
+ 
+ 	conn->state = BT_CONNECT;
+ 
+@@ -1538,8 +1571,8 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
+ 	BT_DBG("requesting refresh of dst_addr");
+ 
+ 	conn = hci_conn_add_unset(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
+-	if (!conn)
+-		return ERR_PTR(-ENOMEM);
++	if (IS_ERR(conn))
++		return conn;
+ 
+ 	if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
+ 		hci_conn_del(conn);
+@@ -1586,8 +1619,8 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
+ 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
+ 	if (!acl) {
+ 		acl = hci_conn_add_unset(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
+-		if (!acl)
+-			return ERR_PTR(-ENOMEM);
++		if (IS_ERR(acl))
++			return acl;
+ 	}
+ 
+ 	hci_conn_hold(acl);
+@@ -1655,9 +1688,9 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
+ 	if (!sco) {
+ 		sco = hci_conn_add_unset(hdev, type, dst, HCI_ROLE_MASTER);
+-		if (!sco) {
++		if (IS_ERR(sco)) {
+ 			hci_conn_drop(acl);
+-			return ERR_PTR(-ENOMEM);
++			return sco;
+ 		}
+ 	}
+ 
+@@ -1847,8 +1880,8 @@ struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
+ 				       qos->ucast.cis);
+ 	if (!cis) {
+ 		cis = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
+-		if (!cis)
+-			return ERR_PTR(-ENOMEM);
++		if (IS_ERR(cis))
++			return cis;
+ 		cis->cleanup = cis_cleanup;
+ 		cis->dst_type = dst_type;
+ 		cis->iso_qos.ucast.cig = BT_ISO_QOS_CIG_UNSET;
+@@ -1983,14 +2016,8 @@ static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
+ 			      struct bt_iso_io_qos *qos, __u8 phy)
+ {
+ 	/* Only set MTU if PHY is enabled */
+-	if (!qos->sdu && qos->phy) {
+-		if (hdev->iso_mtu > 0)
+-			qos->sdu = hdev->iso_mtu;
+-		else if (hdev->le_mtu > 0)
+-			qos->sdu = hdev->le_mtu;
+-		else
+-			qos->sdu = hdev->acl_mtu;
+-	}
++	if (!qos->sdu && qos->phy)
++		qos->sdu = conn->mtu;
+ 
+ 	/* Use the same PHY as ACL if set to any */
+ 	if (qos->phy == BT_ISO_PHY_ANY)
+@@ -2071,8 +2098,8 @@ struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
+ 		return ERR_PTR(-EBUSY);
+ 
+ 	conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_SLAVE);
+-	if (!conn)
+-		return ERR_PTR(-ENOMEM);
++	if (IS_ERR(conn))
++		return conn;
+ 
+ 	conn->iso_qos = *qos;
+ 	conn->state = BT_LISTEN;
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index d72d238c1656e..4de8f0dc1a523 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -954,6 +954,9 @@ static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
+ 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
+ 	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
+ 
++	if (!hdev->acl_mtu || !hdev->acl_pkts)
++		return HCI_ERROR_INVALID_PARAMETERS;
++
+ 	return rp->status;
+ }
+ 
+@@ -1263,6 +1266,9 @@ static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
+ 
+ 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
+ 
++	if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
++		return HCI_ERROR_INVALID_PARAMETERS;
++
+ 	return rp->status;
+ }
+ 
+@@ -2342,8 +2348,8 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
+ 		if (!conn) {
+ 			conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr,
+ 						  HCI_ROLE_MASTER);
+-			if (!conn)
+-				bt_dev_err(hdev, "no memory for new connection");
++			if (IS_ERR(conn))
++				bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
+ 		}
+ 	}
+ 
+@@ -3154,8 +3160,8 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
+ 						      BDADDR_BREDR)) {
+ 			conn = hci_conn_add_unset(hdev, ev->link_type,
+ 						  &ev->bdaddr, HCI_ROLE_SLAVE);
+-			if (!conn) {
+-				bt_dev_err(hdev, "no memory for new conn");
++			if (IS_ERR(conn)) {
++				bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
+ 				goto unlock;
+ 			}
+ 		} else {
+@@ -3343,8 +3349,8 @@ static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
+ 	if (!conn) {
+ 		conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr,
+ 					  HCI_ROLE_SLAVE);
+-		if (!conn) {
+-			bt_dev_err(hdev, "no memory for new connection");
++		if (IS_ERR(conn)) {
++			bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
+ 			goto unlock;
+ 		}
+ 	}
+@@ -3821,6 +3827,9 @@ static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
+ 	BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
+ 	       hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
+ 
++	if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
++		return HCI_ERROR_INVALID_PARAMETERS;
++
+ 	return rp->status;
+ }
+ 
+@@ -5768,8 +5777,8 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
+ 			goto unlock;
+ 
+ 		conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role);
+-		if (!conn) {
+-			bt_dev_err(hdev, "no memory for new connection");
++		if (IS_ERR(conn)) {
++			bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn));
+ 			goto unlock;
+ 		}
+ 
+@@ -6898,7 +6907,7 @@ static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
+ 	if (!cis) {
+ 		cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE,
+ 				   cis_handle);
+-		if (!cis) {
++		if (IS_ERR(cis)) {
+ 			hci_le_reject_cis(hdev, ev->cis_handle);
+ 			goto unlock;
+ 		}
+@@ -7007,7 +7016,7 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
+ 		if (!bis) {
+ 			bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
+ 					   HCI_ROLE_SLAVE, handle);
+-			if (!bis)
++			if (IS_ERR(bis))
+ 				continue;
+ 		}
+ 
+@@ -7079,7 +7088,7 @@ static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
+ 	pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
+ 				     HCI_ROLE_SLAVE);
+ 
+-	if (!pa_sync)
++	if (IS_ERR(pa_sync))
+ 		goto unlock;
+ 
+ 	pa_sync->sync_handle = le16_to_cpu(ev->sync_handle);
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index ef0cc80b4c0cc..6bed4aa8291de 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -1285,7 +1285,7 @@ static int iso_sock_sendmsg(struct socket *sock, struct msghdr *msg,
+ 		return -ENOTCONN;
+ 	}
+ 
+-	mtu = iso_pi(sk)->conn->hcon->hdev->iso_mtu;
++	mtu = iso_pi(sk)->conn->hcon->mtu;
+ 
+ 	release_sock(sk);
+ 
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 9223b1a698e3a..3f7a82f10fe98 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -6241,7 +6241,7 @@ static int l2cap_finish_move(struct l2cap_chan *chan)
+ 	BT_DBG("chan %p", chan);
+ 
+ 	chan->rx_state = L2CAP_RX_STATE_RECV;
+-	chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
++	chan->conn->mtu = chan->conn->hcon->mtu;
+ 
+ 	return l2cap_resegment(chan);
+ }
+@@ -6308,7 +6308,7 @@ static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
+ 	 */
+ 	chan->next_tx_seq = control->reqseq;
+ 	chan->unacked_frames = 0;
+-	chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
++	chan->conn->mtu = chan->conn->hcon->mtu;
+ 
+ 	err = l2cap_resegment(chan);
+ 
+@@ -6848,18 +6848,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
+ 
+ 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
+ 
+-	switch (hcon->type) {
+-	case LE_LINK:
+-		if (hcon->hdev->le_mtu) {
+-			conn->mtu = hcon->hdev->le_mtu;
+-			break;
+-		}
+-		fallthrough;
+-	default:
+-		conn->mtu = hcon->hdev->acl_mtu;
+-		break;
+-	}
+-
++	conn->mtu = hcon->mtu;
+ 	conn->feat_mask = 0;
+ 
+ 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index e0ad30862ee41..71d36582d4efa 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -126,7 +126,6 @@ static void sco_sock_clear_timer(struct sock *sk)
+ /* ---- SCO connections ---- */
+ static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
+ {
+-	struct hci_dev *hdev = hcon->hdev;
+ 	struct sco_conn *conn = hcon->sco_data;
+ 
+ 	if (conn) {
+@@ -144,9 +143,10 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
+ 
+ 	hcon->sco_data = conn;
+ 	conn->hcon = hcon;
++	conn->mtu = hcon->mtu;
+ 
+-	if (hdev->sco_mtu > 0)
+-		conn->mtu = hdev->sco_mtu;
++	if (hcon->mtu > 0)
++		conn->mtu = hcon->mtu;
+ 	else
+ 		conn->mtu = 60;
+ 
+diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c
+index bc700f85f80be..ea277c55a38db 100644
+--- a/security/keys/trusted-keys/trusted_tpm2.c
++++ b/security/keys/trusted-keys/trusted_tpm2.c
+@@ -38,6 +38,7 @@ static int tpm2_key_encode(struct trusted_key_payload *payload,
+ 	u8 *end_work = scratch + SCRATCH_SIZE;
+ 	u8 *priv, *pub;
+ 	u16 priv_len, pub_len;
++	int ret;
+ 
+ 	priv_len = get_unaligned_be16(src) + 2;
+ 	priv = src;
+@@ -57,8 +58,10 @@ static int tpm2_key_encode(struct trusted_key_payload *payload,
+ 		unsigned char bool[3], *w = bool;
+ 		/* tag 0 is emptyAuth */
+ 		w = asn1_encode_boolean(w, w + sizeof(bool), true);
+-		if (WARN(IS_ERR(w), "BUG: Boolean failed to encode"))
+-			return PTR_ERR(w);
++		if (WARN(IS_ERR(w), "BUG: Boolean failed to encode")) {
++			ret = PTR_ERR(w);
++			goto err;
++		}
+ 		work = asn1_encode_tag(work, end_work, 0, bool, w - bool);
+ 	}
+ 
+@@ -69,8 +72,10 @@ static int tpm2_key_encode(struct trusted_key_payload *payload,
+ 	 * trigger, so if it does there's something nefarious going on
+ 	 */
+ 	if (WARN(work - scratch + pub_len + priv_len + 14 > SCRATCH_SIZE,
+-		 "BUG: scratch buffer is too small"))
+-		return -EINVAL;
++		 "BUG: scratch buffer is too small")) {
++		ret = -EINVAL;
++		goto err;
++	}
+ 
+ 	work = asn1_encode_integer(work, end_work, options->keyhandle);
+ 	work = asn1_encode_octet_string(work, end_work, pub, pub_len);
+@@ -79,10 +84,18 @@ static int tpm2_key_encode(struct trusted_key_payload *payload,
+ 	work1 = payload->blob;
+ 	work1 = asn1_encode_sequence(work1, work1 + sizeof(payload->blob),
+ 				     scratch, work - scratch);
+-	if (WARN(IS_ERR(work1), "BUG: ASN.1 encoder failed"))
+-		return PTR_ERR(work1);
++	if (IS_ERR(work1)) {
++		ret = PTR_ERR(work1);
++		pr_err("BUG: ASN.1 encoder failed with %d\n", ret);
++		goto err;
++	}
+ 
++	kfree(scratch);
+ 	return work1 - payload->blob;
++
++err:
++	kfree(scratch);
++	return ret;
+ }
+ 
+ struct tpm2_key_context {
+diff --git a/sound/soc/intel/boards/Makefile b/sound/soc/intel/boards/Makefile
+index bbf796a5f7ba8..08cfd4baecdd7 100644
+--- a/sound/soc/intel/boards/Makefile
++++ b/sound/soc/intel/boards/Makefile
+@@ -42,6 +42,7 @@ snd-soc-sof-sdw-objs += sof_sdw.o				\
+ 			sof_sdw_rt711.o sof_sdw_rt_sdca_jack_common.o	\
+ 			sof_sdw_rt712_sdca.o sof_sdw_rt715.o	\
+ 			sof_sdw_rt715_sdca.o sof_sdw_rt722_sdca.o	\
++			sof_sdw_rt_dmic.o			\
+ 			sof_sdw_cs42l42.o sof_sdw_cs42l43.o	\
+ 			sof_sdw_cs_amp.o			\
+ 			sof_sdw_dmic.o				\
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index 08f330ed5c2ea..a90b43162a54b 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -730,7 +730,7 @@ static struct sof_sdw_codec_info codec_info_list[] = {
+ 				.dai_name = "rt712-sdca-dmic-aif1",
+ 				.dai_type = SOF_SDW_DAI_TYPE_MIC,
+ 				.dailink = {SDW_UNUSED_DAI_ID, SDW_DMIC_DAI_ID},
+-				.rtd_init = rt712_sdca_dmic_rtd_init,
++				.rtd_init = rt_dmic_rtd_init,
+ 			},
+ 		},
+ 		.dai_num = 1,
+@@ -760,7 +760,7 @@ static struct sof_sdw_codec_info codec_info_list[] = {
+ 				.dai_name = "rt712-sdca-dmic-aif1",
+ 				.dai_type = SOF_SDW_DAI_TYPE_MIC,
+ 				.dailink = {SDW_UNUSED_DAI_ID, SDW_DMIC_DAI_ID},
+-				.rtd_init = rt712_sdca_dmic_rtd_init,
++				.rtd_init = rt_dmic_rtd_init,
+ 			},
+ 		},
+ 		.dai_num = 1,
+@@ -822,7 +822,7 @@ static struct sof_sdw_codec_info codec_info_list[] = {
+ 				.dai_name = "rt715-aif2",
+ 				.dai_type = SOF_SDW_DAI_TYPE_MIC,
+ 				.dailink = {SDW_UNUSED_DAI_ID, SDW_DMIC_DAI_ID},
+-				.rtd_init = rt715_sdca_rtd_init,
++				.rtd_init = rt_dmic_rtd_init,
+ 			},
+ 		},
+ 		.dai_num = 1,
+@@ -837,7 +837,7 @@ static struct sof_sdw_codec_info codec_info_list[] = {
+ 				.dai_name = "rt715-aif2",
+ 				.dai_type = SOF_SDW_DAI_TYPE_MIC,
+ 				.dailink = {SDW_UNUSED_DAI_ID, SDW_DMIC_DAI_ID},
+-				.rtd_init = rt715_sdca_rtd_init,
++				.rtd_init = rt_dmic_rtd_init,
+ 			},
+ 		},
+ 		.dai_num = 1,
+@@ -852,7 +852,7 @@ static struct sof_sdw_codec_info codec_info_list[] = {
+ 				.dai_name = "rt715-aif2",
+ 				.dai_type = SOF_SDW_DAI_TYPE_MIC,
+ 				.dailink = {SDW_UNUSED_DAI_ID, SDW_DMIC_DAI_ID},
+-				.rtd_init = rt715_rtd_init,
++				.rtd_init = rt_dmic_rtd_init,
+ 			},
+ 		},
+ 		.dai_num = 1,
+@@ -867,7 +867,7 @@ static struct sof_sdw_codec_info codec_info_list[] = {
+ 				.dai_name = "rt715-aif2",
+ 				.dai_type = SOF_SDW_DAI_TYPE_MIC,
+ 				.dailink = {SDW_UNUSED_DAI_ID, SDW_DMIC_DAI_ID},
+-				.rtd_init = rt715_rtd_init,
++				.rtd_init = rt_dmic_rtd_init,
+ 			},
+ 		},
+ 		.dai_num = 1,
+diff --git a/sound/soc/intel/boards/sof_sdw_common.h b/sound/soc/intel/boards/sof_sdw_common.h
+index b1d57034361c4..8a541b6bb0ac3 100644
+--- a/sound/soc/intel/boards/sof_sdw_common.h
++++ b/sound/soc/intel/boards/sof_sdw_common.h
+@@ -190,6 +190,7 @@ int rt712_sdca_dmic_rtd_init(struct snd_soc_pcm_runtime *rtd);
+ int rt712_spk_rtd_init(struct snd_soc_pcm_runtime *rtd);
+ int rt715_rtd_init(struct snd_soc_pcm_runtime *rtd);
+ int rt715_sdca_rtd_init(struct snd_soc_pcm_runtime *rtd);
++int rt_dmic_rtd_init(struct snd_soc_pcm_runtime *rtd);
+ int rt_amp_spk_rtd_init(struct snd_soc_pcm_runtime *rtd);
+ int rt_sdca_jack_rtd_init(struct snd_soc_pcm_runtime *rtd);
+ 
+diff --git a/sound/soc/intel/boards/sof_sdw_rt_dmic.c b/sound/soc/intel/boards/sof_sdw_rt_dmic.c
+new file mode 100644
+index 0000000000000..9091f5b5c6484
+--- /dev/null
++++ b/sound/soc/intel/boards/sof_sdw_rt_dmic.c
+@@ -0,0 +1,52 @@
++// SPDX-License-Identifier: GPL-2.0-only
++// Copyright (c) 2024 Intel Corporation
++
++/*
++ * sof_sdw_rt_dmic - Helpers to handle Realtek SDW DMIC from generic machine driver
++ */
++
++#include <linux/device.h>
++#include <linux/errno.h>
++#include <sound/soc.h>
++#include <sound/soc-acpi.h>
++#include "sof_board_helpers.h"
++#include "sof_sdw_common.h"
++
++static const char * const dmics[] = {
++	"rt715",
++	"rt712-sdca-dmic",
++};
++
++int rt_dmic_rtd_init(struct snd_soc_pcm_runtime *rtd)
++{
++	struct snd_soc_card *card = rtd->card;
++	struct snd_soc_component *component;
++	struct snd_soc_dai *codec_dai;
++	char *mic_name;
++
++	codec_dai = get_codec_dai_by_name(rtd, dmics, ARRAY_SIZE(dmics));
++	if (!codec_dai)
++		return -EINVAL;
++
++	component = codec_dai->component;
++
++	/*
++	 * rt715-sdca (aka rt714) is a special case that uses different name in card->components
++	 * and component->name_prefix.
++	 */
++	if (!strcmp(component->name_prefix, "rt714"))
++		mic_name = devm_kasprintf(card->dev, GFP_KERNEL, "rt715-sdca");
++	else
++		mic_name = devm_kasprintf(card->dev, GFP_KERNEL, "%s", component->name_prefix);
++
++	card->components = devm_kasprintf(card->dev, GFP_KERNEL,
++					  "%s mic:%s", card->components,
++					  mic_name);
++	if (!card->components)
++		return -ENOMEM;
++
++	dev_dbg(card->dev, "card->components: %s\n", card->components);
++
++	return 0;
++}
++MODULE_IMPORT_NS(SND_SOC_INTEL_SOF_BOARD_HELPERS);


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [gentoo-commits] proj/linux-patches:6.9 commit in: /
@ 2024-05-30 11:57 Mike Pagano
  0 siblings, 0 replies; 18+ messages in thread
From: Mike Pagano @ 2024-05-30 11:57 UTC (permalink / raw
  To: gentoo-commits

commit:     24062c254ef9346a9c275220e0212bee52969156
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu May 30 11:57:03 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu May 30 11:57:03 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=24062c25

Linux patch 6.9.3

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |     4 +
 1002_linux-6.9.3.patch | 18354 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 18358 insertions(+)

diff --git a/0000_README b/0000_README
index 111716b6..41e4fabf 100644
--- a/0000_README
+++ b/0000_README
@@ -51,6 +51,10 @@ Patch:  1001_linux-6.9.2.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.9.2
 
+Patch:  1002_linux-6.9.3.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.9.3
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.

diff --git a/1002_linux-6.9.3.patch b/1002_linux-6.9.3.patch
new file mode 100644
index 00000000..e699e5e2
--- /dev/null
+++ b/1002_linux-6.9.3.patch
@@ -0,0 +1,18354 @@
+diff --git a/Documentation/devicetree/bindings/iio/adc/adi,axi-adc.yaml b/Documentation/devicetree/bindings/iio/adc/adi,axi-adc.yaml
+index 3d49d21ad33df..e1f450b80db27 100644
+--- a/Documentation/devicetree/bindings/iio/adc/adi,axi-adc.yaml
++++ b/Documentation/devicetree/bindings/iio/adc/adi,axi-adc.yaml
+@@ -28,6 +28,9 @@ properties:
+   reg:
+     maxItems: 1
+ 
++  clocks:
++    maxItems: 1
++
+   dmas:
+     maxItems: 1
+ 
+@@ -48,6 +51,7 @@ required:
+   - compatible
+   - dmas
+   - reg
++  - clocks
+ 
+ additionalProperties: false
+ 
+@@ -58,6 +62,7 @@ examples:
+         reg = <0x44a00000 0x10000>;
+         dmas = <&rx_dma 0>;
+         dma-names = "rx";
++        clocks = <&axi_clk>;
+         #io-backend-cells = <0>;
+     };
+ ...
+diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml
+index cf456f8d9ddcb..c87677f5e2a25 100644
+--- a/Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml
++++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml
+@@ -37,15 +37,15 @@ properties:
+       active low.
+     maxItems: 1
+ 
+-  dovdd-supply:
++  DOVDD-supply:
+     description:
+       Definition of the regulator used as interface power supply.
+ 
+-  avdd-supply:
++  AVDD-supply:
+     description:
+       Definition of the regulator used as analog power supply.
+ 
+-  dvdd-supply:
++  DVDD-supply:
+     description:
+       Definition of the regulator used as digital power supply.
+ 
+@@ -59,9 +59,9 @@ required:
+   - reg
+   - clocks
+   - clock-names
+-  - dovdd-supply
+-  - avdd-supply
+-  - dvdd-supply
++  - DOVDD-supply
++  - AVDD-supply
++  - DVDD-supply
+   - reset-gpios
+   - port
+ 
+@@ -82,9 +82,9 @@ examples:
+                 clock-names = "xvclk";
+                 reset-gpios = <&gpio1 3 GPIO_ACTIVE_LOW>;
+ 
+-                dovdd-supply = <&sw2_reg>;
+-                dvdd-supply = <&sw2_reg>;
+-                avdd-supply = <&reg_peri_3p15v>;
++                DOVDD-supply = <&sw2_reg>;
++                DVDD-supply = <&sw2_reg>;
++                AVDD-supply = <&reg_peri_3p15v>;
+ 
+                 port {
+                         ov2680_to_mipi: endpoint {
+diff --git a/Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml b/Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml
+index b634f57cd011d..ca81c8afba79c 100644
+--- a/Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml
++++ b/Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml
+@@ -18,13 +18,15 @@ properties:
+     oneOf:
+       - enum:
+           - loongson,ls2k1000-thermal
++          - loongson,ls2k2000-thermal
+       - items:
+           - enum:
+-              - loongson,ls2k2000-thermal
++              - loongson,ls2k0500-thermal
+           - const: loongson,ls2k1000-thermal
+ 
+   reg:
+-    maxItems: 1
++    minItems: 1
++    maxItems: 2
+ 
+   interrupts:
+     maxItems: 1
+@@ -38,6 +40,24 @@ required:
+   - interrupts
+   - '#thermal-sensor-cells'
+ 
++if:
++  properties:
++    compatible:
++      contains:
++        enum:
++          - loongson,ls2k2000-thermal
++
++then:
++  properties:
++    reg:
++      minItems: 2
++      maxItems: 2
++
++else:
++  properties:
++    reg:
++      maxItems: 1
++
+ unevaluatedProperties: false
+ 
+ examples:
+diff --git a/Makefile b/Makefile
+index 8302496ee7a57..8def0819eb55b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 9
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
+index bddc82f789421..a83d29fed1756 100644
+--- a/arch/arm/configs/sunxi_defconfig
++++ b/arch/arm/configs/sunxi_defconfig
+@@ -110,6 +110,7 @@ CONFIG_DRM_PANEL_LVDS=y
+ CONFIG_DRM_PANEL_SIMPLE=y
+ CONFIG_DRM_PANEL_EDP=y
+ CONFIG_DRM_SIMPLE_BRIDGE=y
++CONFIG_DRM_DW_HDMI=y
+ CONFIG_DRM_LIMA=y
+ CONFIG_FB_SIMPLE=y
+ CONFIG_BACKLIGHT_CLASS_DEVICE=y
+diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
+index 0a7186a93882d..d4d7451c2c129 100644
+--- a/arch/arm64/include/asm/irqflags.h
++++ b/arch/arm64/include/asm/irqflags.h
+@@ -5,7 +5,6 @@
+ #ifndef __ASM_IRQFLAGS_H
+ #define __ASM_IRQFLAGS_H
+ 
+-#include <asm/alternative.h>
+ #include <asm/barrier.h>
+ #include <asm/ptrace.h>
+ #include <asm/sysreg.h>
+diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
+index ebb0158997cab..82e8a60173828 100644
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -1535,6 +1535,27 @@ static void fpsimd_save_kernel_state(struct task_struct *task)
+ 	task->thread.kernel_fpsimd_cpu = smp_processor_id();
+ }
+ 
++/*
++ * Invalidate any task's FPSIMD state that is present on this cpu.
++ * The FPSIMD context should be acquired with get_cpu_fpsimd_context()
++ * before calling this function.
++ */
++static void fpsimd_flush_cpu_state(void)
++{
++	WARN_ON(!system_supports_fpsimd());
++	__this_cpu_write(fpsimd_last_state.st, NULL);
++
++	/*
++	 * Leaving streaming mode enabled will cause issues for any kernel
++	 * NEON and leaving streaming mode or ZA enabled may increase power
++	 * consumption.
++	 */
++	if (system_supports_sme())
++		sme_smstop();
++
++	set_thread_flag(TIF_FOREIGN_FPSTATE);
++}
++
+ void fpsimd_thread_switch(struct task_struct *next)
+ {
+ 	bool wrong_task, wrong_cpu;
+@@ -1552,7 +1573,7 @@ void fpsimd_thread_switch(struct task_struct *next)
+ 
+ 	if (test_tsk_thread_flag(next, TIF_KERNEL_FPSTATE)) {
+ 		fpsimd_load_kernel_state(next);
+-		set_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE);
++		fpsimd_flush_cpu_state();
+ 	} else {
+ 		/*
+ 		 * Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
+@@ -1842,27 +1863,6 @@ void fpsimd_flush_task_state(struct task_struct *t)
+ 	barrier();
+ }
+ 
+-/*
+- * Invalidate any task's FPSIMD state that is present on this cpu.
+- * The FPSIMD context should be acquired with get_cpu_fpsimd_context()
+- * before calling this function.
+- */
+-static void fpsimd_flush_cpu_state(void)
+-{
+-	WARN_ON(!system_supports_fpsimd());
+-	__this_cpu_write(fpsimd_last_state.st, NULL);
+-
+-	/*
+-	 * Leaving streaming mode enabled will cause issues for any kernel
+-	 * NEON and leaving streaming mode or ZA enabled may increase power
+-	 * consumption.
+-	 */
+-	if (system_supports_sme())
+-		sme_smstop();
+-
+-	set_thread_flag(TIF_FOREIGN_FPSTATE);
+-}
+-
+ /*
+  * Save the FPSIMD state to memory and invalidate cpu view.
+  * This function must be called with preemption disabled.
+diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
+index 6ffa295851945..99837d4e8c977 100644
+--- a/arch/m68k/Kconfig
++++ b/arch/m68k/Kconfig
+@@ -3,8 +3,8 @@ config M68K
+ 	bool
+ 	default y
+ 	select ARCH_32BIT_OFF_T
+-	select ARCH_HAS_CPU_CACHE_ALIASING
+ 	select ARCH_HAS_BINFMT_FLAT
++	select ARCH_HAS_CPU_CACHE_ALIASING
+ 	select ARCH_HAS_CPU_FINALIZE_INIT if MMU
+ 	select ARCH_HAS_CURRENT_STACK_POINTER
+ 	select ARCH_HAS_DMA_PREP_COHERENT if M68K_NONCOHERENT_DMA && !COLDFIRE
+diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
+index 3bcdd32a6b366..338b474910f74 100644
+--- a/arch/m68k/kernel/entry.S
++++ b/arch/m68k/kernel/entry.S
+@@ -430,7 +430,9 @@ resume:
+ 	movec	%a0,%dfc
+ 
+ 	/* restore status register */
+-	movew	%a1@(TASK_THREAD+THREAD_SR),%sr
++	movew	%a1@(TASK_THREAD+THREAD_SR),%d0
++	oriw	#0x0700,%d0
++	movew	%d0,%sr
+ 
+ 	rts
+ 
+diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c
+index 4c8f8cbfa05f3..e7f0f72c1b36e 100644
+--- a/arch/m68k/mac/misc.c
++++ b/arch/m68k/mac/misc.c
+@@ -453,30 +453,18 @@ void mac_poweroff(void)
+ 
+ void mac_reset(void)
+ {
+-	if (macintosh_config->adb_type == MAC_ADB_II &&
+-	    macintosh_config->ident != MAC_MODEL_SE30) {
+-		/* need ROMBASE in booter */
+-		/* indeed, plus need to MAP THE ROM !! */
+-
+-		if (mac_bi_data.rombase == 0)
+-			mac_bi_data.rombase = 0x40800000;
+-
+-		/* works on some */
+-		rom_reset = (void *) (mac_bi_data.rombase + 0xa);
+-
+-		local_irq_disable();
+-		rom_reset();
+ #ifdef CONFIG_ADB_CUDA
+-	} else if (macintosh_config->adb_type == MAC_ADB_EGRET ||
+-	           macintosh_config->adb_type == MAC_ADB_CUDA) {
++	if (macintosh_config->adb_type == MAC_ADB_EGRET ||
++	    macintosh_config->adb_type == MAC_ADB_CUDA) {
+ 		cuda_restart();
++	} else
+ #endif
+ #ifdef CONFIG_ADB_PMU
+-	} else if (macintosh_config->adb_type == MAC_ADB_PB2) {
++	if (macintosh_config->adb_type == MAC_ADB_PB2) {
+ 		pmu_restart();
++	} else
+ #endif
+-	} else if (CPU_IS_030) {
+-
++	if (CPU_IS_030) {
+ 		/* 030-specific reset routine.  The idea is general, but the
+ 		 * specific registers to reset are '030-specific.  Until I
+ 		 * have a non-030 machine, I can't test anything else.
+@@ -524,6 +512,18 @@ void mac_reset(void)
+ 		    "jmp %/a0@\n\t" /* jump to the reset vector */
+ 		    ".chip 68k"
+ 		    : : "r" (offset), "a" (rombase) : "a0");
++	} else {
++		/* need ROMBASE in booter */
++		/* indeed, plus need to MAP THE ROM !! */
++
++		if (mac_bi_data.rombase == 0)
++			mac_bi_data.rombase = 0x40800000;
++
++		/* works on some */
++		rom_reset = (void *)(mac_bi_data.rombase + 0xa);
++
++		local_irq_disable();
++		rom_reset();
+ 	}
+ 
+ 	/* should never get here */
+diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
+index 86e02929f3ace..3c27d1c727189 100644
+--- a/arch/openrisc/kernel/process.c
++++ b/arch/openrisc/kernel/process.c
+@@ -65,7 +65,7 @@ void machine_restart(char *cmd)
+ }
+ 
+ /*
+- * This is used if pm_power_off has not been set by a power management
++ * This is used if a sys-off handler was not set by a power management
+  * driver, in this case we can assume we are on a simulator.  On
+  * OpenRISC simulators l.nop 1 will trigger the simulator exit.
+  */
+@@ -89,10 +89,8 @@ void machine_halt(void)
+ void machine_power_off(void)
+ {
+ 	printk(KERN_INFO "*** MACHINE POWER OFF ***\n");
+-	if (pm_power_off != NULL)
+-		pm_power_off();
+-	else
+-		default_power_off();
++	do_kernel_power_off();
++	default_power_off();
+ }
+ 
+ /*
+diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
+index 9370888c9a7e3..90554a5558fbc 100644
+--- a/arch/openrisc/kernel/traps.c
++++ b/arch/openrisc/kernel/traps.c
+@@ -180,29 +180,39 @@ asmlinkage void unhandled_exception(struct pt_regs *regs, int ea, int vector)
+ 
+ asmlinkage void do_fpe_trap(struct pt_regs *regs, unsigned long address)
+ {
+-	int code = FPE_FLTUNK;
+-	unsigned long fpcsr = regs->fpcsr;
+-
+-	if (fpcsr & SPR_FPCSR_IVF)
+-		code = FPE_FLTINV;
+-	else if (fpcsr & SPR_FPCSR_OVF)
+-		code = FPE_FLTOVF;
+-	else if (fpcsr & SPR_FPCSR_UNF)
+-		code = FPE_FLTUND;
+-	else if (fpcsr & SPR_FPCSR_DZF)
+-		code = FPE_FLTDIV;
+-	else if (fpcsr & SPR_FPCSR_IXF)
+-		code = FPE_FLTRES;
+-
+-	/* Clear all flags */
+-	regs->fpcsr &= ~SPR_FPCSR_ALLF;
+-
+-	force_sig_fault(SIGFPE, code, (void __user *)regs->pc);
++	if (user_mode(regs)) {
++		int code = FPE_FLTUNK;
++		unsigned long fpcsr = regs->fpcsr;
++
++		if (fpcsr & SPR_FPCSR_IVF)
++			code = FPE_FLTINV;
++		else if (fpcsr & SPR_FPCSR_OVF)
++			code = FPE_FLTOVF;
++		else if (fpcsr & SPR_FPCSR_UNF)
++			code = FPE_FLTUND;
++		else if (fpcsr & SPR_FPCSR_DZF)
++			code = FPE_FLTDIV;
++		else if (fpcsr & SPR_FPCSR_IXF)
++			code = FPE_FLTRES;
++
++		/* Clear all flags */
++		regs->fpcsr &= ~SPR_FPCSR_ALLF;
++
++		force_sig_fault(SIGFPE, code, (void __user *)regs->pc);
++	} else {
++		pr_emerg("KERNEL: Illegal fpe exception 0x%.8lx\n", regs->pc);
++		die("Die:", regs, SIGFPE);
++	}
+ }
+ 
+ asmlinkage void do_trap(struct pt_regs *regs, unsigned long address)
+ {
+-	force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc);
++	if (user_mode(regs)) {
++		force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc);
++	} else {
++		pr_emerg("KERNEL: Illegal trap exception 0x%.8lx\n", regs->pc);
++		die("Die:", regs, SIGILL);
++	}
+ }
+ 
+ asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address)
+diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
+index 6f0c92e8149d8..dcf61cbd31470 100644
+--- a/arch/parisc/kernel/parisc_ksyms.c
++++ b/arch/parisc/kernel/parisc_ksyms.c
+@@ -22,6 +22,7 @@ EXPORT_SYMBOL(memset);
+ #include <linux/atomic.h>
+ EXPORT_SYMBOL(__xchg8);
+ EXPORT_SYMBOL(__xchg32);
++EXPORT_SYMBOL(__cmpxchg_u8);
+ EXPORT_SYMBOL(__cmpxchg_u32);
+ EXPORT_SYMBOL(__cmpxchg_u64);
+ #ifdef CONFIG_SMP
+diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
+index 8e6c84df4ca10..e205135ae1fea 100644
+--- a/arch/powerpc/sysdev/fsl_msi.c
++++ b/arch/powerpc/sysdev/fsl_msi.c
+@@ -564,10 +564,12 @@ static const struct fsl_msi_feature ipic_msi_feature = {
+ 	.msiir_offset = 0x38,
+ };
+ 
++#ifdef CONFIG_EPAPR_PARAVIRT
+ static const struct fsl_msi_feature vmpic_msi_feature = {
+ 	.fsl_pic_ip = FSL_PIC_IP_VMPIC,
+ 	.msiir_offset = 0,
+ };
++#endif
+ 
+ static const struct of_device_id fsl_of_msi_ids[] = {
+ 	{
+diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
+index 2468c55933cd0..9d1b07932794e 100644
+--- a/arch/riscv/include/asm/csr.h
++++ b/arch/riscv/include/asm/csr.h
+@@ -281,7 +281,7 @@
+ #define CSR_HPMCOUNTER30H	0xc9e
+ #define CSR_HPMCOUNTER31H	0xc9f
+ 
+-#define CSR_SSCOUNTOVF		0xda0
++#define CSR_SCOUNTOVF		0xda0
+ 
+ #define CSR_SSTATUS		0x100
+ #define CSR_SIE			0x104
+diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
+index ec9d692838fca..fb5d1950042b7 100644
+--- a/arch/riscv/net/bpf_jit_comp64.c
++++ b/arch/riscv/net/bpf_jit_comp64.c
+@@ -498,33 +498,33 @@ static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
+ 		break;
+ 	/* src_reg = atomic_fetch_<op>(dst_reg + off16, src_reg) */
+ 	case BPF_ADD | BPF_FETCH:
+-		emit(is64 ? rv_amoadd_d(rs, rs, rd, 0, 0) :
+-		     rv_amoadd_w(rs, rs, rd, 0, 0), ctx);
++		emit(is64 ? rv_amoadd_d(rs, rs, rd, 1, 1) :
++		     rv_amoadd_w(rs, rs, rd, 1, 1), ctx);
+ 		if (!is64)
+ 			emit_zextw(rs, rs, ctx);
+ 		break;
+ 	case BPF_AND | BPF_FETCH:
+-		emit(is64 ? rv_amoand_d(rs, rs, rd, 0, 0) :
+-		     rv_amoand_w(rs, rs, rd, 0, 0), ctx);
++		emit(is64 ? rv_amoand_d(rs, rs, rd, 1, 1) :
++		     rv_amoand_w(rs, rs, rd, 1, 1), ctx);
+ 		if (!is64)
+ 			emit_zextw(rs, rs, ctx);
+ 		break;
+ 	case BPF_OR | BPF_FETCH:
+-		emit(is64 ? rv_amoor_d(rs, rs, rd, 0, 0) :
+-		     rv_amoor_w(rs, rs, rd, 0, 0), ctx);
++		emit(is64 ? rv_amoor_d(rs, rs, rd, 1, 1) :
++		     rv_amoor_w(rs, rs, rd, 1, 1), ctx);
+ 		if (!is64)
+ 			emit_zextw(rs, rs, ctx);
+ 		break;
+ 	case BPF_XOR | BPF_FETCH:
+-		emit(is64 ? rv_amoxor_d(rs, rs, rd, 0, 0) :
+-		     rv_amoxor_w(rs, rs, rd, 0, 0), ctx);
++		emit(is64 ? rv_amoxor_d(rs, rs, rd, 1, 1) :
++		     rv_amoxor_w(rs, rs, rd, 1, 1), ctx);
+ 		if (!is64)
+ 			emit_zextw(rs, rs, ctx);
+ 		break;
+ 	/* src_reg = atomic_xchg(dst_reg + off16, src_reg); */
+ 	case BPF_XCHG:
+-		emit(is64 ? rv_amoswap_d(rs, rs, rd, 0, 0) :
+-		     rv_amoswap_w(rs, rs, rd, 0, 0), ctx);
++		emit(is64 ? rv_amoswap_d(rs, rs, rd, 1, 1) :
++		     rv_amoswap_w(rs, rs, rd, 1, 1), ctx);
+ 		if (!is64)
+ 			emit_zextw(rs, rs, ctx);
+ 		break;
+diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h
+index 5cc46e0dde620..9725586f42597 100644
+--- a/arch/s390/include/asm/gmap.h
++++ b/arch/s390/include/asm/gmap.h
+@@ -146,7 +146,7 @@ int gmap_mprotect_notify(struct gmap *, unsigned long start,
+ 
+ void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4],
+ 			     unsigned long gaddr, unsigned long vmaddr);
+-int gmap_mark_unmergeable(void);
++int s390_disable_cow_sharing(void);
+ void s390_unlist_old_asce(struct gmap *gmap);
+ int s390_replace_asce(struct gmap *gmap);
+ void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns);
+diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
+index bb1b4bef1878b..4c2dc7abc2858 100644
+--- a/arch/s390/include/asm/mmu.h
++++ b/arch/s390/include/asm/mmu.h
+@@ -32,6 +32,11 @@ typedef struct {
+ 	unsigned int uses_skeys:1;
+ 	/* The mmu context uses CMM. */
+ 	unsigned int uses_cmm:1;
++	/*
++	 * The mmu context allows COW-sharing of memory pages (KSM, zeropage).
++	 * Note that COW-sharing during fork() is currently always allowed.
++	 */
++	unsigned int allow_cow_sharing:1;
+ 	/* The gmaps associated with this context are allowed to use huge pages. */
+ 	unsigned int allow_gmap_hpage_1m:1;
+ } mm_context_t;
+diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
+index 929af18b09081..a7789a9f62186 100644
+--- a/arch/s390/include/asm/mmu_context.h
++++ b/arch/s390/include/asm/mmu_context.h
+@@ -35,6 +35,7 @@ static inline int init_new_context(struct task_struct *tsk,
+ 	mm->context.has_pgste = 0;
+ 	mm->context.uses_skeys = 0;
+ 	mm->context.uses_cmm = 0;
++	mm->context.allow_cow_sharing = 1;
+ 	mm->context.allow_gmap_hpage_1m = 0;
+ #endif
+ 	switch (mm->context.asce_limit) {
+diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
+index 60950e7a25f58..259c2439c2517 100644
+--- a/arch/s390/include/asm/pgtable.h
++++ b/arch/s390/include/asm/pgtable.h
+@@ -566,10 +566,20 @@ static inline pud_t set_pud_bit(pud_t pud, pgprot_t prot)
+ }
+ 
+ /*
+- * In the case that a guest uses storage keys
+- * faults should no longer be backed by zero pages
++ * As soon as the guest uses storage keys or enables PV, we deduplicate all
++ * mapped shared zeropages and prevent new shared zeropages from getting
++ * mapped.
+  */
+-#define mm_forbids_zeropage mm_has_pgste
++#define mm_forbids_zeropage mm_forbids_zeropage
++static inline int mm_forbids_zeropage(struct mm_struct *mm)
++{
++#ifdef CONFIG_PGSTE
++	if (!mm->context.allow_cow_sharing)
++		return 1;
++#endif
++	return 0;
++}
++
+ static inline int mm_uses_skeys(struct mm_struct *mm)
+ {
+ #ifdef CONFIG_PGSTE
+diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
+index 48de296e8905c..fb9b32f936c45 100644
+--- a/arch/s390/kernel/vmlinux.lds.S
++++ b/arch/s390/kernel/vmlinux.lds.S
+@@ -209,13 +209,13 @@ SECTIONS
+ 	.dynstr ALIGN(8) : {
+ 		*(.dynstr)
+ 	}
+-#endif
+ 	.hash ALIGN(8) : {
+ 		*(.hash)
+ 	}
+ 	.gnu.hash ALIGN(8) : {
+ 		*(.gnu.hash)
+ 	}
++#endif
+ 
+ 	. = ALIGN(PAGE_SIZE);
+ 	__init_end = .;		/* freed after init ends here */
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 7721eb522f43d..82e9631cd9efb 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -2631,9 +2631,7 @@ static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
+ 		if (r)
+ 			break;
+ 
+-		mmap_write_lock(current->mm);
+-		r = gmap_mark_unmergeable();
+-		mmap_write_unlock(current->mm);
++		r = s390_disable_cow_sharing();
+ 		if (r)
+ 			break;
+ 
+diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
+index 12d22a7fa32fd..474a25ca5c48f 100644
+--- a/arch/s390/mm/gmap.c
++++ b/arch/s390/mm/gmap.c
+@@ -2549,41 +2549,6 @@ static inline void thp_split_mm(struct mm_struct *mm)
+ }
+ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+ 
+-/*
+- * Remove all empty zero pages from the mapping for lazy refaulting
+- * - This must be called after mm->context.has_pgste is set, to avoid
+- *   future creation of zero pages
+- * - This must be called after THP was disabled.
+- *
+- * mm contracts with s390, that even if mm were to remove a page table,
+- * racing with the loop below and so causing pte_offset_map_lock() to fail,
+- * it will never insert a page table containing empty zero pages once
+- * mm_forbids_zeropage(mm) i.e. mm->context.has_pgste is set.
+- */
+-static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
+-			   unsigned long end, struct mm_walk *walk)
+-{
+-	unsigned long addr;
+-
+-	for (addr = start; addr != end; addr += PAGE_SIZE) {
+-		pte_t *ptep;
+-		spinlock_t *ptl;
+-
+-		ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+-		if (!ptep)
+-			break;
+-		if (is_zero_pfn(pte_pfn(*ptep)))
+-			ptep_xchg_direct(walk->mm, addr, ptep, __pte(_PAGE_INVALID));
+-		pte_unmap_unlock(ptep, ptl);
+-	}
+-	return 0;
+-}
+-
+-static const struct mm_walk_ops zap_zero_walk_ops = {
+-	.pmd_entry	= __zap_zero_pages,
+-	.walk_lock	= PGWALK_WRLOCK,
+-};
+-
+ /*
+  * switch on pgstes for its userspace process (for kvm)
+  */
+@@ -2601,22 +2566,142 @@ int s390_enable_sie(void)
+ 	mm->context.has_pgste = 1;
+ 	/* split thp mappings and disable thp for future mappings */
+ 	thp_split_mm(mm);
+-	walk_page_range(mm, 0, TASK_SIZE, &zap_zero_walk_ops, NULL);
+ 	mmap_write_unlock(mm);
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(s390_enable_sie);
+ 
+-int gmap_mark_unmergeable(void)
++static int find_zeropage_pte_entry(pte_t *pte, unsigned long addr,
++				   unsigned long end, struct mm_walk *walk)
++{
++	unsigned long *found_addr = walk->private;
++
++	/* Return 1 of the page is a zeropage. */
++	if (is_zero_pfn(pte_pfn(*pte))) {
++		/*
++		 * Shared zeropage in e.g., a FS DAX mapping? We cannot do the
++		 * right thing and likely don't care: FAULT_FLAG_UNSHARE
++		 * currently only works in COW mappings, which is also where
++		 * mm_forbids_zeropage() is checked.
++		 */
++		if (!is_cow_mapping(walk->vma->vm_flags))
++			return -EFAULT;
++
++		*found_addr = addr;
++		return 1;
++	}
++	return 0;
++}
++
++static const struct mm_walk_ops find_zeropage_ops = {
++	.pte_entry	= find_zeropage_pte_entry,
++	.walk_lock	= PGWALK_WRLOCK,
++};
++
++/*
++ * Unshare all shared zeropages, replacing them by anonymous pages. Note that
++ * we cannot simply zap all shared zeropages, because this could later
++ * trigger unexpected userfaultfd missing events.
++ *
++ * This must be called after mm->context.allow_cow_sharing was
++ * set to 0, to avoid future mappings of shared zeropages.
++ *
++ * mm contracts with s390, that even if mm were to remove a page table,
++ * and racing with walk_page_range_vma() calling pte_offset_map_lock()
++ * would fail, it will never insert a page table containing empty zero
++ * pages once mm_forbids_zeropage(mm) i.e.
++ * mm->context.allow_cow_sharing is set to 0.
++ */
++static int __s390_unshare_zeropages(struct mm_struct *mm)
++{
++	struct vm_area_struct *vma;
++	VMA_ITERATOR(vmi, mm, 0);
++	unsigned long addr;
++	vm_fault_t fault;
++	int rc;
++
++	for_each_vma(vmi, vma) {
++		/*
++		 * We could only look at COW mappings, but it's more future
++		 * proof to catch unexpected zeropages in other mappings and
++		 * fail.
++		 */
++		if ((vma->vm_flags & VM_PFNMAP) || is_vm_hugetlb_page(vma))
++			continue;
++		addr = vma->vm_start;
++
++retry:
++		rc = walk_page_range_vma(vma, addr, vma->vm_end,
++					 &find_zeropage_ops, &addr);
++		if (rc < 0)
++			return rc;
++		else if (!rc)
++			continue;
++
++		/* addr was updated by find_zeropage_pte_entry() */
++		fault = handle_mm_fault(vma, addr,
++					FAULT_FLAG_UNSHARE | FAULT_FLAG_REMOTE,
++					NULL);
++		if (fault & VM_FAULT_OOM)
++			return -ENOMEM;
++		/*
++		 * See break_ksm(): even after handle_mm_fault() returned 0, we
++		 * must start the lookup from the current address, because
++		 * handle_mm_fault() may back out if there's any difficulty.
++		 *
++		 * VM_FAULT_SIGBUS and VM_FAULT_SIGSEGV are unexpected but
++		 * maybe they could trigger in the future on concurrent
++		 * truncation. In that case, the shared zeropage would be gone
++		 * and we can simply retry and make progress.
++		 */
++		cond_resched();
++		goto retry;
++	}
++
++	return 0;
++}
++
++static int __s390_disable_cow_sharing(struct mm_struct *mm)
+ {
++	int rc;
++
++	if (!mm->context.allow_cow_sharing)
++		return 0;
++
++	mm->context.allow_cow_sharing = 0;
++
++	/* Replace all shared zeropages by anonymous pages. */
++	rc = __s390_unshare_zeropages(mm);
+ 	/*
+ 	 * Make sure to disable KSM (if enabled for the whole process or
+ 	 * individual VMAs). Note that nothing currently hinders user space
+ 	 * from re-enabling it.
+ 	 */
+-	return ksm_disable(current->mm);
++	if (!rc)
++		rc = ksm_disable(mm);
++	if (rc)
++		mm->context.allow_cow_sharing = 1;
++	return rc;
++}
++
++/*
++ * Disable most COW-sharing of memory pages for the whole process:
++ * (1) Disable KSM and unmerge/unshare any KSM pages.
++ * (2) Disallow shared zeropages and unshare any zerpages that are mapped.
++ *
++ * Not that we currently don't bother with COW-shared pages that are shared
++ * with parent/child processes due to fork().
++ */
++int s390_disable_cow_sharing(void)
++{
++	int rc;
++
++	mmap_write_lock(current->mm);
++	rc = __s390_disable_cow_sharing(current->mm);
++	mmap_write_unlock(current->mm);
++	return rc;
+ }
+-EXPORT_SYMBOL_GPL(gmap_mark_unmergeable);
++EXPORT_SYMBOL_GPL(s390_disable_cow_sharing);
+ 
+ /*
+  * Enable storage key handling from now on and initialize the storage
+@@ -2685,7 +2770,7 @@ int s390_enable_skey(void)
+ 		goto out_up;
+ 
+ 	mm->context.uses_skeys = 1;
+-	rc = gmap_mark_unmergeable();
++	rc = __s390_disable_cow_sharing(mm);
+ 	if (rc) {
+ 		mm->context.uses_skeys = 0;
+ 		goto out_up;
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index 5af0402e94b88..1d168a98ae21b 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -1427,8 +1427,12 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
+ 	EMIT6_DISP_LH(0xeb000000, is32 ? (op32) : (op64),		\
+ 		      (insn->imm & BPF_FETCH) ? src_reg : REG_W0,	\
+ 		      src_reg, dst_reg, off);				\
+-	if (is32 && (insn->imm & BPF_FETCH))				\
+-		EMIT_ZERO(src_reg);					\
++	if (insn->imm & BPF_FETCH) {					\
++		/* bcr 14,0 - see atomic_fetch_{add,and,or,xor}() */	\
++		_EMIT2(0x07e0);						\
++		if (is32)                                               \
++			EMIT_ZERO(src_reg);				\
++	}								\
+ } while (0)
+ 		case BPF_ADD:
+ 		case BPF_ADD | BPF_FETCH:
+diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c
+index aed1ea8e2c2f0..74051b8ddf3e7 100644
+--- a/arch/sh/kernel/kprobes.c
++++ b/arch/sh/kernel/kprobes.c
+@@ -44,17 +44,12 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
+ 	if (OPCODE_RTE(opcode))
+ 		return -EFAULT;	/* Bad breakpoint */
+ 
++	memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+ 	p->opcode = opcode;
+ 
+ 	return 0;
+ }
+ 
+-void __kprobes arch_copy_kprobe(struct kprobe *p)
+-{
+-	memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+-	p->opcode = *p->addr;
+-}
+-
+ void __kprobes arch_arm_kprobe(struct kprobe *p)
+ {
+ 	*p->addr = BREAKPOINT_INSTRUCTION;
+diff --git a/arch/sh/lib/checksum.S b/arch/sh/lib/checksum.S
+index 3e07074e00981..06fed5a21e8ba 100644
+--- a/arch/sh/lib/checksum.S
++++ b/arch/sh/lib/checksum.S
+@@ -33,7 +33,8 @@
+  */
+ 
+ /*	
+- * asmlinkage __wsum csum_partial(const void *buf, int len, __wsum sum);
++ * unsigned int csum_partial(const unsigned char *buf, int len,
++ *                           unsigned int sum);
+  */
+ 
+ .text
+@@ -45,31 +46,11 @@ ENTRY(csum_partial)
+ 	   * Fortunately, it is easy to convert 2-byte alignment to 4-byte
+ 	   * alignment for the unrolled loop.
+ 	   */
++	mov	r5, r1
+ 	mov	r4, r0
+-	tst	#3, r0		! Check alignment.
+-	bt/s	2f		! Jump if alignment is ok.
+-	 mov	r4, r7		! Keep a copy to check for alignment
++	tst	#2, r0		! Check alignment.
++	bt	2f		! Jump if alignment is ok.
+ 	!
+-	tst	#1, r0		! Check alignment.
+-	bt	21f		! Jump if alignment is boundary of 2bytes.
+-
+-	! buf is odd
+-	tst	r5, r5
+-	add	#-1, r5
+-	bt	9f
+-	mov.b	@r4+, r0
+-	extu.b	r0, r0
+-	addc	r0, r6		! t=0 from previous tst
+-	mov	r6, r0
+-	shll8	r6
+-	shlr16	r0
+-	shlr8	r0
+-	or	r0, r6
+-	mov	r4, r0
+-	tst	#2, r0
+-	bt	2f
+-21:
+-	! buf is 2 byte aligned (len could be 0)
+ 	add	#-2, r5		! Alignment uses up two bytes.
+ 	cmp/pz	r5		!
+ 	bt/s	1f		! Jump if we had at least two bytes.
+@@ -77,17 +58,16 @@ ENTRY(csum_partial)
+ 	bra	6f
+ 	 add	#2, r5		! r5 was < 2.  Deal with it.
+ 1:
++	mov	r5, r1		! Save new len for later use.
+ 	mov.w	@r4+, r0
+ 	extu.w	r0, r0
+ 	addc	r0, r6
+ 	bf	2f
+ 	add	#1, r6
+ 2:
+-	! buf is 4 byte aligned (len could be 0)
+-	mov	r5, r1
+ 	mov	#-5, r0
+-	shld	r0, r1
+-	tst	r1, r1
++	shld	r0, r5
++	tst	r5, r5
+ 	bt/s	4f		! if it's =0, go to 4f
+ 	 clrt
+ 	.align	2
+@@ -109,31 +89,30 @@ ENTRY(csum_partial)
+ 	addc	r0, r6
+ 	addc	r2, r6
+ 	movt	r0
+-	dt	r1
++	dt	r5
+ 	bf/s	3b
+ 	 cmp/eq	#1, r0
+-	! here, we know r1==0
+-	addc	r1, r6			! add carry to r6
++	! here, we know r5==0
++	addc	r5, r6			! add carry to r6
+ 4:
+-	mov	r5, r0
++	mov	r1, r0
+ 	and	#0x1c, r0
+ 	tst	r0, r0
+-	bt	6f
+-	! 4 bytes or more remaining
+-	mov	r0, r1
+-	shlr2	r1
++	bt/s	6f
++	 mov	r0, r5
++	shlr2	r5
+ 	mov	#0, r2
+ 5:
+ 	addc	r2, r6
+ 	mov.l	@r4+, r2
+ 	movt	r0
+-	dt	r1
++	dt	r5
+ 	bf/s	5b
+ 	 cmp/eq	#1, r0
+ 	addc	r2, r6
+-	addc	r1, r6		! r1==0 here, so it means add carry-bit
++	addc	r5, r6		! r5==0 here, so it means add carry-bit
+ 6:
+-	! 3 bytes or less remaining
++	mov	r1, r5
+ 	mov	#3, r0
+ 	and	r0, r5
+ 	tst	r5, r5
+@@ -159,16 +138,6 @@ ENTRY(csum_partial)
+ 	mov	#0, r0
+ 	addc	r0, r6
+ 9:
+-	! Check if the buffer was misaligned, if so realign sum
+-	mov	r7, r0
+-	tst	#1, r0
+-	bt	10f
+-	mov	r6, r0
+-	shll8	r6
+-	shlr16	r0
+-	shlr8	r0
+-	or	r0, r6
+-10:
+ 	rts
+ 	 mov	r6, r0
+ 
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 928820e61cb50..f5b3d14ff385b 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -501,7 +501,7 @@ config X86_FRED
+ 	  When enabled, try to use Flexible Return and Event Delivery
+ 	  instead of the legacy SYSCALL/SYSENTER/IDT architecture for
+ 	  ring transitions and exception/interrupt handling if the
+-	  system supports.
++	  system supports it.
+ 
+ if X86_32
+ config X86_BIGSMP
+diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
+index bf4a10a5794f1..1dcb794c5479e 100644
+--- a/arch/x86/boot/compressed/head_64.S
++++ b/arch/x86/boot/compressed/head_64.S
+@@ -398,6 +398,11 @@ SYM_CODE_START(startup_64)
+ 	call	sev_enable
+ #endif
+ 
++	/* Preserve only the CR4 bits that must be preserved, and clear the rest */
++	movq	%cr4, %rax
++	andl	$(X86_CR4_PAE | X86_CR4_MCE | X86_CR4_LA57), %eax
++	movq	%rax, %cr4
++
+ 	/*
+ 	 * configure_5level_paging() updates the number of paging levels using
+ 	 * a trampoline in 32-bit addressable memory if the current number does
+diff --git a/arch/x86/crypto/nh-avx2-x86_64.S b/arch/x86/crypto/nh-avx2-x86_64.S
+index ef73a3ab87263..791386d9a83aa 100644
+--- a/arch/x86/crypto/nh-avx2-x86_64.S
++++ b/arch/x86/crypto/nh-avx2-x86_64.S
+@@ -154,5 +154,6 @@ SYM_TYPED_FUNC_START(nh_avx2)
+ 	vpaddq		T1, T0, T0
+ 	vpaddq		T4, T0, T0
+ 	vmovdqu		T0, (HASH)
++	vzeroupper
+ 	RET
+ SYM_FUNC_END(nh_avx2)
+diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
+index 9918212faf914..0ffb072be9561 100644
+--- a/arch/x86/crypto/sha256-avx2-asm.S
++++ b/arch/x86/crypto/sha256-avx2-asm.S
+@@ -716,6 +716,7 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx)
+ 	popq	%r13
+ 	popq	%r12
+ 	popq	%rbx
++	vzeroupper
+ 	RET
+ SYM_FUNC_END(sha256_transform_rorx)
+ 
+diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
+index f08496cd68708..24973f42c43ff 100644
+--- a/arch/x86/crypto/sha512-avx2-asm.S
++++ b/arch/x86/crypto/sha512-avx2-asm.S
+@@ -680,6 +680,7 @@ SYM_TYPED_FUNC_START(sha512_transform_rorx)
+ 	pop	%r12
+ 	pop	%rbx
+ 
++	vzeroupper
+ 	RET
+ SYM_FUNC_END(sha512_transform_rorx)
+ 
+diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h
+index 44b08b53ab32f..c1d6cd58f8094 100644
+--- a/arch/x86/include/asm/cmpxchg_64.h
++++ b/arch/x86/include/asm/cmpxchg_64.h
+@@ -62,7 +62,7 @@ static __always_inline u128 arch_cmpxchg128_local(volatile u128 *ptr, u128 old,
+ 	asm volatile(_lock "cmpxchg16b %[ptr]"				\
+ 		     CC_SET(e)						\
+ 		     : CC_OUT(e) (ret),					\
+-		       [ptr] "+m" (*ptr),				\
++		       [ptr] "+m" (*(_ptr)),				\
+ 		       "+a" (o.low), "+d" (o.high)			\
+ 		     : "b" (n.low), "c" (n.high)			\
+ 		     : "memory");					\
+diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
+index 9abb8cc4cd474..b786449626267 100644
+--- a/arch/x86/include/asm/pgtable_types.h
++++ b/arch/x86/include/asm/pgtable_types.h
+@@ -567,6 +567,8 @@ static inline void update_page_count(int level, unsigned long pages) { }
+ extern pte_t *lookup_address(unsigned long address, unsigned int *level);
+ extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
+ 				    unsigned int *level);
++pte_t *lookup_address_in_pgd_attr(pgd_t *pgd, unsigned long address,
++				  unsigned int *level, bool *nx, bool *rw);
+ extern pmd_t *lookup_pmd_address(unsigned long address);
+ extern phys_addr_t slow_virt_to_phys(void *__address);
+ extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn,
+diff --git a/arch/x86/include/asm/sparsemem.h b/arch/x86/include/asm/sparsemem.h
+index 1be13b2dfe8bf..64df897c0ee30 100644
+--- a/arch/x86/include/asm/sparsemem.h
++++ b/arch/x86/include/asm/sparsemem.h
+@@ -37,8 +37,6 @@ extern int phys_to_target_node(phys_addr_t start);
+ #define phys_to_target_node phys_to_target_node
+ extern int memory_add_physaddr_to_nid(u64 start);
+ #define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
+-extern int numa_fill_memblks(u64 start, u64 end);
+-#define numa_fill_memblks numa_fill_memblks
+ #endif
+ #endif /* __ASSEMBLY__ */
+ 
+diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
+index 13b45b9c806da..620f0af713ca2 100644
+--- a/arch/x86/kernel/cpu/microcode/amd.c
++++ b/arch/x86/kernel/cpu/microcode/amd.c
+@@ -465,7 +465,7 @@ static bool early_apply_microcode(u32 cpuid_1_eax, u32 old_rev, void *ucode, siz
+ 	return !__apply_microcode_amd(mc);
+ }
+ 
+-static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
++static bool get_builtin_microcode(struct cpio_data *cp, u8 family)
+ {
+ 	char fw_name[36] = "amd-ucode/microcode_amd.bin";
+ 	struct firmware fw;
+diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
+index 1123ef3ccf901..4334033658edf 100644
+--- a/arch/x86/kernel/tsc_sync.c
++++ b/arch/x86/kernel/tsc_sync.c
+@@ -193,11 +193,9 @@ bool tsc_store_and_check_tsc_adjust(bool bootcpu)
+ 	cur->warned = false;
+ 
+ 	/*
+-	 * If a non-zero TSC value for socket 0 may be valid then the default
+-	 * adjusted value cannot assumed to be zero either.
++	 * The default adjust value cannot be assumed to be zero on any socket.
+ 	 */
+-	if (tsc_async_resets)
+-		cur->adjusted = bootval;
++	cur->adjusted = bootval;
+ 
+ 	/*
+ 	 * Check whether this CPU is the first in a package to come up. In
+diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
+index 12af572201a29..da9347552be69 100644
+--- a/arch/x86/lib/x86-opcode-map.txt
++++ b/arch/x86/lib/x86-opcode-map.txt
+@@ -148,7 +148,7 @@ AVXcode:
+ 65: SEG=GS (Prefix)
+ 66: Operand-Size (Prefix)
+ 67: Address-Size (Prefix)
+-68: PUSH Iz (d64)
++68: PUSH Iz
+ 69: IMUL Gv,Ev,Iz
+ 6a: PUSH Ib (d64)
+ 6b: IMUL Gv,Ev,Ib
+@@ -698,10 +698,10 @@ AVXcode: 2
+ 4d: vrcp14ss/d Vsd,Hpd,Wsd (66),(ev)
+ 4e: vrsqrt14ps/d Vpd,Wpd (66),(ev)
+ 4f: vrsqrt14ss/d Vsd,Hsd,Wsd (66),(ev)
+-50: vpdpbusd Vx,Hx,Wx (66),(ev)
+-51: vpdpbusds Vx,Hx,Wx (66),(ev)
+-52: vdpbf16ps Vx,Hx,Wx (F3),(ev) | vpdpwssd Vx,Hx,Wx (66),(ev) | vp4dpwssd Vdqq,Hdqq,Wdq (F2),(ev)
+-53: vpdpwssds Vx,Hx,Wx (66),(ev) | vp4dpwssds Vdqq,Hdqq,Wdq (F2),(ev)
++50: vpdpbusd Vx,Hx,Wx (66)
++51: vpdpbusds Vx,Hx,Wx (66)
++52: vdpbf16ps Vx,Hx,Wx (F3),(ev) | vpdpwssd Vx,Hx,Wx (66) | vp4dpwssd Vdqq,Hdqq,Wdq (F2),(ev)
++53: vpdpwssds Vx,Hx,Wx (66) | vp4dpwssds Vdqq,Hdqq,Wdq (F2),(ev)
+ 54: vpopcntb/w Vx,Wx (66),(ev)
+ 55: vpopcntd/q Vx,Wx (66),(ev)
+ 58: vpbroadcastd Vx,Wx (66),(v)
+diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
+index 65e9a6e391c04..ce84ba86e69e9 100644
+--- a/arch/x86/mm/numa.c
++++ b/arch/x86/mm/numa.c
+@@ -929,6 +929,8 @@ int memory_add_physaddr_to_nid(u64 start)
+ }
+ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
+ 
++#endif
++
+ static int __init cmp_memblk(const void *a, const void *b)
+ {
+ 	const struct numa_memblk *ma = *(const struct numa_memblk **)a;
+@@ -1001,5 +1003,3 @@ int __init numa_fill_memblks(u64 start, u64 end)
+ 	}
+ 	return 0;
+ }
+-
+-#endif
+diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
+index 80c9037ffadff..19fdfbb171ed6 100644
+--- a/arch/x86/mm/pat/set_memory.c
++++ b/arch/x86/mm/pat/set_memory.c
+@@ -619,7 +619,8 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
+  * Validate strict W^X semantics.
+  */
+ static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long start,
+-				  unsigned long pfn, unsigned long npg)
++				  unsigned long pfn, unsigned long npg,
++				  bool nx, bool rw)
+ {
+ 	unsigned long end;
+ 
+@@ -641,6 +642,10 @@ static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long star
+ 	if ((pgprot_val(new) & (_PAGE_RW | _PAGE_NX)) != _PAGE_RW)
+ 		return new;
+ 
++	/* Non-leaf translation entries can disable writing or execution. */
++	if (!rw || nx)
++		return new;
++
+ 	end = start + npg * PAGE_SIZE - 1;
+ 	WARN_ONCE(1, "CPA detected W^X violation: %016llx -> %016llx range: 0x%016lx - 0x%016lx PFN %lx\n",
+ 		  (unsigned long long)pgprot_val(old),
+@@ -657,20 +662,26 @@ static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long star
+ 
+ /*
+  * Lookup the page table entry for a virtual address in a specific pgd.
+- * Return a pointer to the entry and the level of the mapping.
++ * Return a pointer to the entry, the level of the mapping, and the effective
++ * NX and RW bits of all page table levels.
+  */
+-pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
+-			     unsigned int *level)
++pte_t *lookup_address_in_pgd_attr(pgd_t *pgd, unsigned long address,
++				  unsigned int *level, bool *nx, bool *rw)
+ {
+ 	p4d_t *p4d;
+ 	pud_t *pud;
+ 	pmd_t *pmd;
+ 
+ 	*level = PG_LEVEL_NONE;
++	*nx = false;
++	*rw = true;
+ 
+ 	if (pgd_none(*pgd))
+ 		return NULL;
+ 
++	*nx |= pgd_flags(*pgd) & _PAGE_NX;
++	*rw &= pgd_flags(*pgd) & _PAGE_RW;
++
+ 	p4d = p4d_offset(pgd, address);
+ 	if (p4d_none(*p4d))
+ 		return NULL;
+@@ -679,6 +690,9 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
+ 	if (p4d_leaf(*p4d) || !p4d_present(*p4d))
+ 		return (pte_t *)p4d;
+ 
++	*nx |= p4d_flags(*p4d) & _PAGE_NX;
++	*rw &= p4d_flags(*p4d) & _PAGE_RW;
++
+ 	pud = pud_offset(p4d, address);
+ 	if (pud_none(*pud))
+ 		return NULL;
+@@ -687,6 +701,9 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
+ 	if (pud_leaf(*pud) || !pud_present(*pud))
+ 		return (pte_t *)pud;
+ 
++	*nx |= pud_flags(*pud) & _PAGE_NX;
++	*rw &= pud_flags(*pud) & _PAGE_RW;
++
+ 	pmd = pmd_offset(pud, address);
+ 	if (pmd_none(*pmd))
+ 		return NULL;
+@@ -695,11 +712,26 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
+ 	if (pmd_leaf(*pmd) || !pmd_present(*pmd))
+ 		return (pte_t *)pmd;
+ 
++	*nx |= pmd_flags(*pmd) & _PAGE_NX;
++	*rw &= pmd_flags(*pmd) & _PAGE_RW;
++
+ 	*level = PG_LEVEL_4K;
+ 
+ 	return pte_offset_kernel(pmd, address);
+ }
+ 
++/*
++ * Lookup the page table entry for a virtual address in a specific pgd.
++ * Return a pointer to the entry and the level of the mapping.
++ */
++pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
++			     unsigned int *level)
++{
++	bool nx, rw;
++
++	return lookup_address_in_pgd_attr(pgd, address, level, &nx, &rw);
++}
++
+ /*
+  * Lookup the page table entry for a virtual address. Return a pointer
+  * to the entry and the level of the mapping.
+@@ -715,13 +747,16 @@ pte_t *lookup_address(unsigned long address, unsigned int *level)
+ EXPORT_SYMBOL_GPL(lookup_address);
+ 
+ static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
+-				  unsigned int *level)
++				  unsigned int *level, bool *nx, bool *rw)
+ {
+-	if (cpa->pgd)
+-		return lookup_address_in_pgd(cpa->pgd + pgd_index(address),
+-					       address, level);
++	pgd_t *pgd;
++
++	if (!cpa->pgd)
++		pgd = pgd_offset_k(address);
++	else
++		pgd = cpa->pgd + pgd_index(address);
+ 
+-	return lookup_address(address, level);
++	return lookup_address_in_pgd_attr(pgd, address, level, nx, rw);
+ }
+ 
+ /*
+@@ -849,12 +884,13 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
+ 	pgprot_t old_prot, new_prot, req_prot, chk_prot;
+ 	pte_t new_pte, *tmp;
+ 	enum pg_level level;
++	bool nx, rw;
+ 
+ 	/*
+ 	 * Check for races, another CPU might have split this page
+ 	 * up already:
+ 	 */
+-	tmp = _lookup_address_cpa(cpa, address, &level);
++	tmp = _lookup_address_cpa(cpa, address, &level, &nx, &rw);
+ 	if (tmp != kpte)
+ 		return 1;
+ 
+@@ -965,7 +1001,8 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
+ 	new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages,
+ 				      psize, CPA_DETECT);
+ 
+-	new_prot = verify_rwx(old_prot, new_prot, lpaddr, old_pfn, numpages);
++	new_prot = verify_rwx(old_prot, new_prot, lpaddr, old_pfn, numpages,
++			      nx, rw);
+ 
+ 	/*
+ 	 * If there is a conflict, split the large page.
+@@ -1046,6 +1083,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
+ 	pte_t *pbase = (pte_t *)page_address(base);
+ 	unsigned int i, level;
+ 	pgprot_t ref_prot;
++	bool nx, rw;
+ 	pte_t *tmp;
+ 
+ 	spin_lock(&pgd_lock);
+@@ -1053,7 +1091,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
+ 	 * Check for races, another CPU might have split this page
+ 	 * up for us already:
+ 	 */
+-	tmp = _lookup_address_cpa(cpa, address, &level);
++	tmp = _lookup_address_cpa(cpa, address, &level, &nx, &rw);
+ 	if (tmp != kpte) {
+ 		spin_unlock(&pgd_lock);
+ 		return 1;
+@@ -1594,10 +1632,11 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
+ 	int do_split, err;
+ 	unsigned int level;
+ 	pte_t *kpte, old_pte;
++	bool nx, rw;
+ 
+ 	address = __cpa_addr(cpa, cpa->curpage);
+ repeat:
+-	kpte = _lookup_address_cpa(cpa, address, &level);
++	kpte = _lookup_address_cpa(cpa, address, &level, &nx, &rw);
+ 	if (!kpte)
+ 		return __cpa_process_fault(cpa, address, primary);
+ 
+@@ -1619,7 +1658,8 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
+ 		new_prot = static_protections(new_prot, address, pfn, 1, 0,
+ 					      CPA_PROTECT);
+ 
+-		new_prot = verify_rwx(old_prot, new_prot, address, pfn, 1);
++		new_prot = verify_rwx(old_prot, new_prot, address, pfn, 1,
++				      nx, rw);
+ 
+ 		new_prot = pgprot_clear_protnone_bits(new_prot);
+ 
+diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
+index bc31863c5ee63..a18591f6e6d94 100644
+--- a/arch/x86/purgatory/Makefile
++++ b/arch/x86/purgatory/Makefile
+@@ -42,7 +42,8 @@ KCOV_INSTRUMENT := n
+ # make up the standalone purgatory.ro
+ 
+ PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
+-PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss -g0
++PURGATORY_CFLAGS := -mcmodel=small -ffreestanding -fno-zero-initialized-in-bss -g0
++PURGATORY_CFLAGS += -fpic -fvisibility=hidden
+ PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING
+ PURGATORY_CFLAGS += -fno-stack-protector
+ 
+diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
+index b029fb81ebeee..e7a44a7f617fb 100644
+--- a/arch/x86/tools/relocs.c
++++ b/arch/x86/tools/relocs.c
+@@ -746,6 +746,15 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
+ 		if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) {
+ 			continue;
+ 		}
++
++		/*
++		 * Do not perform relocations in .notes sections; any
++		 * values there are meant for pre-boot consumption (e.g.
++		 * startup_xen).
++		 */
++		if (sec_applies->shdr.sh_type == SHT_NOTE)
++			continue;
++
+ 		sh_symtab = sec_symtab->symtab;
+ 		sym_strtab = sec_symtab->link->strtab;
+ 		for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
+diff --git a/block/blk-core.c b/block/blk-core.c
+index b795ac177281a..6fcf8ed5fc1f1 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -987,10 +987,11 @@ void update_io_ticks(struct block_device *part, unsigned long now, bool end)
+ 	unsigned long stamp;
+ again:
+ 	stamp = READ_ONCE(part->bd_stamp);
+-	if (unlikely(time_after(now, stamp))) {
+-		if (likely(try_cmpxchg(&part->bd_stamp, &stamp, now)))
+-			__part_stat_add(part, io_ticks, end ? now - stamp : 1);
+-	}
++	if (unlikely(time_after(now, stamp)) &&
++	    likely(try_cmpxchg(&part->bd_stamp, &stamp, now)) &&
++	    (end || part_in_flight(part)))
++		__part_stat_add(part, io_ticks, now - stamp);
++
+ 	if (part->bd_partno) {
+ 		part = bdev_whole(part);
+ 		goto again;
+diff --git a/block/blk-merge.c b/block/blk-merge.c
+index 4e3483a16b757..ae61a9c2fc93c 100644
+--- a/block/blk-merge.c
++++ b/block/blk-merge.c
+@@ -779,6 +779,8 @@ static void blk_account_io_merge_request(struct request *req)
+ 	if (blk_do_io_stat(req)) {
+ 		part_stat_lock();
+ 		part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
++		part_stat_local_dec(req->part,
++				    in_flight[op_is_write(req_op(req))]);
+ 		part_stat_unlock();
+ 	}
+ }
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 32afb87efbd0e..5ac2087b05630 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -997,6 +997,8 @@ static inline void blk_account_io_done(struct request *req, u64 now)
+ 		update_io_ticks(req->part, jiffies, true);
+ 		part_stat_inc(req->part, ios[sgrp]);
+ 		part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
++		part_stat_local_dec(req->part,
++				    in_flight[op_is_write(req_op(req))]);
+ 		part_stat_unlock();
+ 	}
+ }
+@@ -1019,6 +1021,8 @@ static inline void blk_account_io_start(struct request *req)
+ 
+ 		part_stat_lock();
+ 		update_io_ticks(req->part, jiffies, false);
++		part_stat_local_inc(req->part,
++				    in_flight[op_is_write(req_op(req))]);
+ 		part_stat_unlock();
+ 	}
+ }
+diff --git a/block/blk.h b/block/blk.h
+index d9f584984bc44..da5dbc6b13068 100644
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -357,6 +357,7 @@ static inline bool blk_do_io_stat(struct request *rq)
+ }
+ 
+ void update_io_ticks(struct block_device *part, unsigned long now, bool end);
++unsigned int part_in_flight(struct block_device *part);
+ 
+ static inline void req_set_nomerge(struct request_queue *q, struct request *req)
+ {
+diff --git a/block/fops.c b/block/fops.c
+index 679d9b752fe82..df2c68d3f198e 100644
+--- a/block/fops.c
++++ b/block/fops.c
+@@ -390,7 +390,7 @@ static int blkdev_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+ 
+ 	iomap->bdev = bdev;
+ 	iomap->offset = ALIGN_DOWN(offset, bdev_logical_block_size(bdev));
+-	if (iomap->offset >= isize)
++	if (offset >= isize)
+ 		return -EIO;
+ 	iomap->type = IOMAP_MAPPED;
+ 	iomap->addr = iomap->offset;
+diff --git a/block/genhd.c b/block/genhd.c
+index 52a4521df067b..345d80ab9791b 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -118,7 +118,7 @@ static void part_stat_read_all(struct block_device *part,
+ 	}
+ }
+ 
+-static unsigned int part_in_flight(struct block_device *part)
++unsigned int part_in_flight(struct block_device *part)
+ {
+ 	unsigned int inflight = 0;
+ 	int cpu;
+diff --git a/block/partitions/cmdline.c b/block/partitions/cmdline.c
+index c03bc105e5753..152c85df92b20 100644
+--- a/block/partitions/cmdline.c
++++ b/block/partitions/cmdline.c
+@@ -70,8 +70,8 @@ static int parse_subpart(struct cmdline_subpart **subpart, char *partdef)
+ 	}
+ 
+ 	if (*partdef == '(') {
+-		int length;
+-		char *next = strchr(++partdef, ')');
++		partdef++;
++		char *next = strsep(&partdef, ")");
+ 
+ 		if (!next) {
+ 			pr_warn("cmdline partition format is invalid.");
+@@ -79,11 +79,7 @@ static int parse_subpart(struct cmdline_subpart **subpart, char *partdef)
+ 			goto fail;
+ 		}
+ 
+-		length = min_t(int, next - partdef,
+-			       sizeof(new_subpart->name) - 1);
+-		strscpy(new_subpart->name, partdef, length);
+-
+-		partdef = ++next;
++		strscpy(new_subpart->name, next, sizeof(new_subpart->name));
+ 	} else
+ 		new_subpart->name[0] = '\0';
+ 
+@@ -117,14 +113,12 @@ static void free_subpart(struct cmdline_parts *parts)
+ 	}
+ }
+ 
+-static int parse_parts(struct cmdline_parts **parts, const char *bdevdef)
++static int parse_parts(struct cmdline_parts **parts, char *bdevdef)
+ {
+ 	int ret = -EINVAL;
+ 	char *next;
+-	int length;
+ 	struct cmdline_subpart **next_subpart;
+ 	struct cmdline_parts *newparts;
+-	char buf[BDEVNAME_SIZE + 32 + 4];
+ 
+ 	*parts = NULL;
+ 
+@@ -132,28 +126,19 @@ static int parse_parts(struct cmdline_parts **parts, const char *bdevdef)
+ 	if (!newparts)
+ 		return -ENOMEM;
+ 
+-	next = strchr(bdevdef, ':');
++	next = strsep(&bdevdef, ":");
+ 	if (!next) {
+ 		pr_warn("cmdline partition has no block device.");
+ 		goto fail;
+ 	}
+ 
+-	length = min_t(int, next - bdevdef, sizeof(newparts->name) - 1);
+-	strscpy(newparts->name, bdevdef, length);
++	strscpy(newparts->name, next, sizeof(newparts->name));
+ 	newparts->nr_subparts = 0;
+ 
+ 	next_subpart = &newparts->subpart;
+ 
+-	while (next && *(++next)) {
+-		bdevdef = next;
+-		next = strchr(bdevdef, ',');
+-
+-		length = (!next) ? (sizeof(buf) - 1) :
+-			min_t(int, next - bdevdef, sizeof(buf) - 1);
+-
+-		strscpy(buf, bdevdef, length);
+-
+-		ret = parse_subpart(next_subpart, buf);
++	while ((next = strsep(&bdevdef, ","))) {
++		ret = parse_subpart(next_subpart, next);
+ 		if (ret)
+ 			goto fail;
+ 
+@@ -199,24 +184,17 @@ static int cmdline_parts_parse(struct cmdline_parts **parts,
+ 
+ 	*parts = NULL;
+ 
+-	next = pbuf = buf = kstrdup(cmdline, GFP_KERNEL);
++	pbuf = buf = kstrdup(cmdline, GFP_KERNEL);
+ 	if (!buf)
+ 		return -ENOMEM;
+ 
+ 	next_parts = parts;
+ 
+-	while (next && *pbuf) {
+-		next = strchr(pbuf, ';');
+-		if (next)
+-			*next = '\0';
+-
+-		ret = parse_parts(next_parts, pbuf);
++	while ((next = strsep(&pbuf, ";"))) {
++		ret = parse_parts(next_parts, next);
+ 		if (ret)
+ 			goto fail;
+ 
+-		if (next)
+-			pbuf = ++next;
+-
+ 		next_parts = &(*next_parts)->next_parts;
+ 	}
+ 
+@@ -250,7 +228,6 @@ static struct cmdline_parts *bdev_parts;
+ static int add_part(int slot, struct cmdline_subpart *subpart,
+ 		struct parsed_partitions *state)
+ {
+-	int label_min;
+ 	struct partition_meta_info *info;
+ 	char tmp[sizeof(info->volname) + 4];
+ 
+@@ -262,9 +239,7 @@ static int add_part(int slot, struct cmdline_subpart *subpart,
+ 
+ 	info = &state->parts[slot].info;
+ 
+-	label_min = min_t(int, sizeof(info->volname) - 1,
+-			  sizeof(subpart->name));
+-	strscpy(info->volname, subpart->name, label_min);
++	strscpy(info->volname, subpart->name, sizeof(info->volname));
+ 
+ 	snprintf(tmp, sizeof(tmp), "(%s)", info->volname);
+ 	strlcat(state->pp_buf, tmp, PAGE_SIZE);
+diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig
+index 59ec726b7c770..684767ab23e24 100644
+--- a/crypto/asymmetric_keys/Kconfig
++++ b/crypto/asymmetric_keys/Kconfig
+@@ -15,6 +15,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
+ 	select MPILIB
+ 	select CRYPTO_HASH_INFO
+ 	select CRYPTO_AKCIPHER
++	select CRYPTO_SIG
+ 	select CRYPTO_HASH
+ 	help
+ 	  This option provides support for asymmetric public key type handling.
+@@ -85,5 +86,7 @@ config FIPS_SIGNATURE_SELFTEST
+ 	depends on ASYMMETRIC_KEY_TYPE
+ 	depends on PKCS7_MESSAGE_PARSER=X509_CERTIFICATE_PARSER
+ 	depends on X509_CERTIFICATE_PARSER
++	depends on CRYPTO_RSA
++	depends on CRYPTO_SHA256
+ 
+ endif # ASYMMETRIC_KEY_TYPE
+diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c
+index 736c2eb8c0f37..f677ad2177c2f 100644
+--- a/drivers/accessibility/speakup/main.c
++++ b/drivers/accessibility/speakup/main.c
+@@ -574,7 +574,7 @@ static u_long get_word(struct vc_data *vc)
+ 	}
+ 	attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
+ 	buf[cnt++] = attr_ch;
+-	while (tmpx < vc->vc_cols - 1 && cnt < sizeof(buf) - 1) {
++	while (tmpx < vc->vc_cols - 1 && cnt < ARRAY_SIZE(buf) - 1) {
+ 		tmp_pos += 2;
+ 		tmpx++;
+ 		ch = get_char(vc, (u_short *)tmp_pos, &temp);
+diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
+index 04e273167e92a..8e01792228d1e 100644
+--- a/drivers/acpi/acpi_lpss.c
++++ b/drivers/acpi/acpi_lpss.c
+@@ -325,6 +325,7 @@ static const struct lpss_device_desc bsw_i2c_dev_desc = {
+ 
+ static const struct property_entry bsw_spi_properties[] = {
+ 	PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_BSW_SSP),
++	PROPERTY_ENTRY_U32("num-cs", 2),
+ 	{ }
+ };
+ 
+diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
+index 30f3fc13c29d1..8d18af396de92 100644
+--- a/drivers/acpi/acpica/Makefile
++++ b/drivers/acpi/acpica/Makefile
+@@ -5,6 +5,7 @@
+ 
+ ccflags-y			:= -D_LINUX -DBUILDING_ACPICA
+ ccflags-$(CONFIG_ACPI_DEBUG)	+= -DACPI_DEBUG_OUTPUT
++CFLAGS_tbfind.o 		+= $(call cc-disable-warning, stringop-truncation)
+ 
+ # use acpi.o to put all files here into acpi.o modparam namespace
+ obj-y	+= acpi.o
+diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
+index d9fa730416f19..a87b10eef77df 100644
+--- a/drivers/acpi/bus.c
++++ b/drivers/acpi/bus.c
+@@ -316,9 +316,14 @@ static void acpi_bus_osc_negotiate_platform_control(void)
+ 		capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PAD_SUPPORT;
+ 	if (IS_ENABLED(CONFIG_ACPI_PROCESSOR))
+ 		capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT;
++	if (IS_ENABLED(CONFIG_ACPI_THERMAL))
++		capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_FAST_THERMAL_SAMPLING_SUPPORT;
+ 
+ 	capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
+ 	capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PCLPI_SUPPORT;
++	capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_OVER_16_PSTATES_SUPPORT;
++	capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_GED_SUPPORT;
++	capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_IRQ_RESOURCE_SOURCE_SUPPORT;
+ 	if (IS_ENABLED(CONFIG_ACPI_PRMT))
+ 		capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PRM_SUPPORT;
+ 	if (IS_ENABLED(CONFIG_ACPI_FFH))
+diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
+index e45e64993c504..3b09fd39eeb4f 100644
+--- a/drivers/acpi/numa/srat.c
++++ b/drivers/acpi/numa/srat.c
+@@ -208,6 +208,11 @@ int __init srat_disabled(void)
+ 	return acpi_numa < 0;
+ }
+ 
++__weak int __init numa_fill_memblks(u64 start, u64 end)
++{
++	return NUMA_NO_MEMBLK;
++}
++
+ #if defined(CONFIG_X86) || defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH)
+ /*
+  * Callback for SLIT parsing.  pxm_to_node() returns NUMA_NO_NODE for
+diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
+index ed33cf7192d21..eed63f95e89d0 100644
+--- a/drivers/block/null_blk/main.c
++++ b/drivers/block/null_blk/main.c
+@@ -2113,6 +2113,8 @@ static void __exit null_exit(void)
+ 
+ 	if (tag_set.ops)
+ 		blk_mq_free_tag_set(&tag_set);
++
++	mutex_destroy(&lock);
+ }
+ 
+ module_init(null_init);
+diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
+index 9658b33c824a7..18f34998a1204 100644
+--- a/drivers/bluetooth/btmrvl_main.c
++++ b/drivers/bluetooth/btmrvl_main.c
+@@ -121,13 +121,6 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
+ 				((event->data[2] == MODULE_BROUGHT_UP) ||
+ 				(event->data[2] == MODULE_ALREADY_UP)) ?
+ 				"Bring-up succeed" : "Bring-up failed");
+-
+-			if (event->length > 3 && event->data[3])
+-				priv->btmrvl_dev.dev_type = HCI_AMP;
+-			else
+-				priv->btmrvl_dev.dev_type = HCI_PRIMARY;
+-
+-			BT_DBG("dev_type: %d", priv->btmrvl_dev.dev_type);
+ 		} else if (priv->btmrvl_dev.sendcmdflag &&
+ 				event->data[1] == MODULE_SHUTDOWN_REQ) {
+ 			BT_DBG("EVENT:%s", (event->data[2]) ?
+@@ -686,8 +679,6 @@ int btmrvl_register_hdev(struct btmrvl_private *priv)
+ 	hdev->wakeup = btmrvl_wakeup;
+ 	SET_HCIDEV_DEV(hdev, &card->func->dev);
+ 
+-	hdev->dev_type = priv->btmrvl_dev.dev_type;
+-
+ 	ret = hci_register_dev(hdev);
+ 	if (ret < 0) {
+ 		BT_ERR("Can not register HCI device");
+diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
+index 638074992c829..35fb26cbf2294 100644
+--- a/drivers/bluetooth/btqca.c
++++ b/drivers/bluetooth/btqca.c
+@@ -148,8 +148,10 @@ static int qca_read_fw_build_info(struct hci_dev *hdev)
+ 	}
+ 
+ 	build_label = kstrndup(&edl->data[1], build_lbl_len, GFP_KERNEL);
+-	if (!build_label)
++	if (!build_label) {
++		err = -ENOMEM;
+ 		goto out;
++	}
+ 
+ 	hci_set_fw_info(hdev, "%s", build_label);
+ 
+diff --git a/drivers/bluetooth/btrsi.c b/drivers/bluetooth/btrsi.c
+index 634cf8f5ed2db..0c91d7635ac39 100644
+--- a/drivers/bluetooth/btrsi.c
++++ b/drivers/bluetooth/btrsi.c
+@@ -134,7 +134,6 @@ static int rsi_hci_attach(void *priv, struct rsi_proto_ops *ops)
+ 		hdev->bus = HCI_USB;
+ 
+ 	hci_set_drvdata(hdev, h_adapter);
+-	hdev->dev_type = HCI_PRIMARY;
+ 	hdev->open = rsi_hci_open;
+ 	hdev->close = rsi_hci_close;
+ 	hdev->flush = rsi_hci_flush;
+diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
+index f19d31ee37ea8..fdcfe9c50313e 100644
+--- a/drivers/bluetooth/btsdio.c
++++ b/drivers/bluetooth/btsdio.c
+@@ -32,9 +32,6 @@ static const struct sdio_device_id btsdio_table[] = {
+ 	/* Generic Bluetooth Type-B SDIO device */
+ 	{ SDIO_DEVICE_CLASS(SDIO_CLASS_BT_B) },
+ 
+-	/* Generic Bluetooth AMP controller */
+-	{ SDIO_DEVICE_CLASS(SDIO_CLASS_BT_AMP) },
+-
+ 	{ }	/* Terminating entry */
+ };
+ 
+@@ -319,11 +316,6 @@ static int btsdio_probe(struct sdio_func *func,
+ 	hdev->bus = HCI_SDIO;
+ 	hci_set_drvdata(hdev, data);
+ 
+-	if (id->class == SDIO_CLASS_BT_AMP)
+-		hdev->dev_type = HCI_AMP;
+-	else
+-		hdev->dev_type = HCI_PRIMARY;
+-
+ 	data->hdev = hdev;
+ 
+ 	SET_HCIDEV_DEV(hdev, &func->dev);
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 01b99471d1bbe..fb716849b60f3 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -4332,11 +4332,6 @@ static int btusb_probe(struct usb_interface *intf,
+ 	hdev->bus = HCI_USB;
+ 	hci_set_drvdata(hdev, data);
+ 
+-	if (id->driver_info & BTUSB_AMP)
+-		hdev->dev_type = HCI_AMP;
+-	else
+-		hdev->dev_type = HCI_PRIMARY;
+-
+ 	data->hdev = hdev;
+ 
+ 	SET_HCIDEV_DEV(hdev, &intf->dev);
+diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c
+index 9a7243d5db71f..0c2f15235b4cd 100644
+--- a/drivers/bluetooth/hci_bcm4377.c
++++ b/drivers/bluetooth/hci_bcm4377.c
+@@ -2361,7 +2361,6 @@ static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	bcm4377->hdev = hdev;
+ 
+ 	hdev->bus = HCI_PCI;
+-	hdev->dev_type = HCI_PRIMARY;
+ 	hdev->open = bcm4377_hci_open;
+ 	hdev->close = bcm4377_hci_close;
+ 	hdev->send = bcm4377_hci_send_frame;
+diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
+index a26367e9fb197..17a2f158a0dfa 100644
+--- a/drivers/bluetooth/hci_ldisc.c
++++ b/drivers/bluetooth/hci_ldisc.c
+@@ -667,11 +667,6 @@ static int hci_uart_register_dev(struct hci_uart *hu)
+ 	if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags))
+ 		set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+ 
+-	if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags))
+-		hdev->dev_type = HCI_AMP;
+-	else
+-		hdev->dev_type = HCI_PRIMARY;
+-
+ 	/* Only call open() for the protocol after hdev is fully initialized as
+ 	 * open() (or a timer/workqueue it starts) may attempt to reference it.
+ 	 */
+@@ -722,7 +717,6 @@ static int hci_uart_set_flags(struct hci_uart *hu, unsigned long flags)
+ {
+ 	unsigned long valid_flags = BIT(HCI_UART_RAW_DEVICE) |
+ 				    BIT(HCI_UART_RESET_ON_INIT) |
+-				    BIT(HCI_UART_CREATE_AMP) |
+ 				    BIT(HCI_UART_INIT_PENDING) |
+ 				    BIT(HCI_UART_EXT_CONFIG) |
+ 				    BIT(HCI_UART_VND_DETECT);
+diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
+index 85c0d9b68f5f7..89a22e9b3253a 100644
+--- a/drivers/bluetooth/hci_serdev.c
++++ b/drivers/bluetooth/hci_serdev.c
+@@ -366,11 +366,6 @@ int hci_uart_register_device_priv(struct hci_uart *hu,
+ 	if (test_bit(HCI_UART_EXT_CONFIG, &hu->hdev_flags))
+ 		set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks);
+ 
+-	if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags))
+-		hdev->dev_type = HCI_AMP;
+-	else
+-		hdev->dev_type = HCI_PRIMARY;
+-
+ 	if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
+ 		return 0;
+ 
+diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
+index 68c8c7e95d64d..00bf7ae82c5b7 100644
+--- a/drivers/bluetooth/hci_uart.h
++++ b/drivers/bluetooth/hci_uart.h
+@@ -37,7 +37,6 @@
+ 
+ #define HCI_UART_RAW_DEVICE	0
+ #define HCI_UART_RESET_ON_INIT	1
+-#define HCI_UART_CREATE_AMP	2
+ #define HCI_UART_INIT_PENDING	3
+ #define HCI_UART_EXT_CONFIG	4
+ #define HCI_UART_VND_DETECT	5
+diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
+index 572d68d52965f..28750a40f0ed5 100644
+--- a/drivers/bluetooth/hci_vhci.c
++++ b/drivers/bluetooth/hci_vhci.c
+@@ -384,17 +384,10 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
+ {
+ 	struct hci_dev *hdev;
+ 	struct sk_buff *skb;
+-	__u8 dev_type;
+ 
+ 	if (data->hdev)
+ 		return -EBADFD;
+ 
+-	/* bits 0-1 are dev_type (Primary or AMP) */
+-	dev_type = opcode & 0x03;
+-
+-	if (dev_type != HCI_PRIMARY && dev_type != HCI_AMP)
+-		return -EINVAL;
+-
+ 	/* bits 2-5 are reserved (must be zero) */
+ 	if (opcode & 0x3c)
+ 		return -EINVAL;
+@@ -412,7 +405,6 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
+ 	data->hdev = hdev;
+ 
+ 	hdev->bus = HCI_VIRTUAL;
+-	hdev->dev_type = dev_type;
+ 	hci_set_drvdata(hdev, data);
+ 
+ 	hdev->open  = vhci_open_dev;
+@@ -634,7 +626,7 @@ static void vhci_open_timeout(struct work_struct *work)
+ 	struct vhci_data *data = container_of(work, struct vhci_data,
+ 					      open_timeout.work);
+ 
+-	vhci_create_device(data, amp ? HCI_AMP : HCI_PRIMARY);
++	vhci_create_device(data, 0x00);
+ }
+ 
+ static int vhci_open(struct inode *inode, struct file *file)
+diff --git a/drivers/bluetooth/virtio_bt.c b/drivers/bluetooth/virtio_bt.c
+index 2ac70b560c46d..18208e152a367 100644
+--- a/drivers/bluetooth/virtio_bt.c
++++ b/drivers/bluetooth/virtio_bt.c
+@@ -274,7 +274,6 @@ static int virtbt_probe(struct virtio_device *vdev)
+ 
+ 	switch (type) {
+ 	case VIRTIO_BT_CONFIG_TYPE_PRIMARY:
+-	case VIRTIO_BT_CONFIG_TYPE_AMP:
+ 		break;
+ 	default:
+ 		return -EINVAL;
+@@ -303,7 +302,6 @@ static int virtbt_probe(struct virtio_device *vdev)
+ 	vbt->hdev = hdev;
+ 
+ 	hdev->bus = HCI_VIRTIO;
+-	hdev->dev_type = type;
+ 	hci_set_drvdata(hdev, vbt);
+ 
+ 	hdev->open  = virtbt_open;
+diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
+index 379bc245c5202..0e903d6e22e30 100644
+--- a/drivers/char/hw_random/stm32-rng.c
++++ b/drivers/char/hw_random/stm32-rng.c
+@@ -220,7 +220,8 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+ 				if (err && i > RNG_NB_RECOVER_TRIES) {
+ 					dev_err((struct device *)priv->rng.priv,
+ 						"Couldn't recover from seed error\n");
+-					return -ENOTRECOVERABLE;
++					retval = -ENOTRECOVERABLE;
++					goto exit_rpm;
+ 				}
+ 
+ 				continue;
+@@ -238,7 +239,8 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+ 			if (err && i > RNG_NB_RECOVER_TRIES) {
+ 				dev_err((struct device *)priv->rng.priv,
+ 					"Couldn't recover from seed error");
+-				return -ENOTRECOVERABLE;
++				retval = -ENOTRECOVERABLE;
++				goto exit_rpm;
+ 			}
+ 
+ 			continue;
+@@ -250,6 +252,7 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+ 		max -= sizeof(u32);
+ 	}
+ 
++exit_rpm:
+ 	pm_runtime_mark_last_busy((struct device *) priv->rng.priv);
+ 	pm_runtime_put_sync_autosuspend((struct device *) priv->rng.priv);
+ 
+@@ -353,13 +356,15 @@ static int stm32_rng_init(struct hwrng *rng)
+ 	err = readl_relaxed_poll_timeout_atomic(priv->base + RNG_SR, reg,
+ 						reg & RNG_SR_DRDY,
+ 						10, 100000);
+-	if (err | (reg & ~RNG_SR_DRDY)) {
++	if (err || (reg & ~RNG_SR_DRDY)) {
+ 		clk_disable_unprepare(priv->clk);
+ 		dev_err((struct device *)priv->rng.priv,
+ 			"%s: timeout:%x SR: %x!\n", __func__, err, reg);
+ 		return -EINVAL;
+ 	}
+ 
++	clk_disable_unprepare(priv->clk);
++
+ 	return 0;
+ }
+ 
+@@ -384,6 +389,11 @@ static int __maybe_unused stm32_rng_runtime_suspend(struct device *dev)
+ static int __maybe_unused stm32_rng_suspend(struct device *dev)
+ {
+ 	struct stm32_rng_private *priv = dev_get_drvdata(dev);
++	int err;
++
++	err = clk_prepare_enable(priv->clk);
++	if (err)
++		return err;
+ 
+ 	if (priv->data->has_cond_reset) {
+ 		priv->pm_conf.nscr = readl_relaxed(priv->base + RNG_NSCR);
+@@ -465,6 +475,8 @@ static int __maybe_unused stm32_rng_resume(struct device *dev)
+ 		writel_relaxed(reg, priv->base + RNG_CR);
+ 	}
+ 
++	clk_disable_unprepare(priv->clk);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/clk/clk-renesas-pcie.c b/drivers/clk/clk-renesas-pcie.c
+index 53e21ac302e6d..4c3a5e4eb77ac 100644
+--- a/drivers/clk/clk-renesas-pcie.c
++++ b/drivers/clk/clk-renesas-pcie.c
+@@ -25,10 +25,12 @@
+ #define RS9_REG_SS_AMP_0V7			0x1
+ #define RS9_REG_SS_AMP_0V8			0x2
+ #define RS9_REG_SS_AMP_0V9			0x3
++#define RS9_REG_SS_AMP_DEFAULT			RS9_REG_SS_AMP_0V8
+ #define RS9_REG_SS_AMP_MASK			0x3
+ #define RS9_REG_SS_SSC_100			0
+ #define RS9_REG_SS_SSC_M025			(1 << 3)
+ #define RS9_REG_SS_SSC_M050			(3 << 3)
++#define RS9_REG_SS_SSC_DEFAULT			RS9_REG_SS_SSC_100
+ #define RS9_REG_SS_SSC_MASK			(3 << 3)
+ #define RS9_REG_SS_SSC_LOCK			BIT(5)
+ #define RS9_REG_SR				0x2
+@@ -205,8 +207,8 @@ static int rs9_get_common_config(struct rs9_driver_data *rs9)
+ 	int ret;
+ 
+ 	/* Set defaults */
+-	rs9->pll_amplitude = RS9_REG_SS_AMP_0V7;
+-	rs9->pll_ssc = RS9_REG_SS_SSC_100;
++	rs9->pll_amplitude = RS9_REG_SS_AMP_DEFAULT;
++	rs9->pll_ssc = RS9_REG_SS_SSC_DEFAULT;
+ 
+ 	/* Output clock amplitude */
+ 	ret = of_property_read_u32(np, "renesas,out-amplitude-microvolt",
+@@ -247,13 +249,13 @@ static void rs9_update_config(struct rs9_driver_data *rs9)
+ 	int i;
+ 
+ 	/* If amplitude is non-default, update it. */
+-	if (rs9->pll_amplitude != RS9_REG_SS_AMP_0V7) {
++	if (rs9->pll_amplitude != RS9_REG_SS_AMP_DEFAULT) {
+ 		regmap_update_bits(rs9->regmap, RS9_REG_SS, RS9_REG_SS_AMP_MASK,
+ 				   rs9->pll_amplitude);
+ 	}
+ 
+ 	/* If SSC is non-default, update it. */
+-	if (rs9->pll_ssc != RS9_REG_SS_SSC_100) {
++	if (rs9->pll_ssc != RS9_REG_SS_SSC_DEFAULT) {
+ 		regmap_update_bits(rs9->regmap, RS9_REG_SS, RS9_REG_SS_SSC_MASK,
+ 				   rs9->pll_ssc);
+ 	}
+diff --git a/drivers/clk/mediatek/clk-mt8365-mm.c b/drivers/clk/mediatek/clk-mt8365-mm.c
+index 01a2ef8f594ef..3f62ec7507336 100644
+--- a/drivers/clk/mediatek/clk-mt8365-mm.c
++++ b/drivers/clk/mediatek/clk-mt8365-mm.c
+@@ -53,7 +53,7 @@ static const struct mtk_gate mm_clks[] = {
+ 	GATE_MM0(CLK_MM_MM_DSI0, "mm_dsi0", "mm_sel", 17),
+ 	GATE_MM0(CLK_MM_MM_DISP_RDMA1, "mm_disp_rdma1", "mm_sel", 18),
+ 	GATE_MM0(CLK_MM_MM_MDP_RDMA1, "mm_mdp_rdma1", "mm_sel", 19),
+-	GATE_MM0(CLK_MM_DPI0_DPI0, "mm_dpi0_dpi0", "vpll_dpix", 20),
++	GATE_MM0(CLK_MM_DPI0_DPI0, "mm_dpi0_dpi0", "dpi0_sel", 20),
+ 	GATE_MM0(CLK_MM_MM_FAKE, "mm_fake", "mm_sel", 21),
+ 	GATE_MM0(CLK_MM_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 22),
+ 	GATE_MM0(CLK_MM_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 23),
+diff --git a/drivers/clk/mediatek/clk-pllfh.c b/drivers/clk/mediatek/clk-pllfh.c
+index 3a2b3f90be25d..094ec8a26d668 100644
+--- a/drivers/clk/mediatek/clk-pllfh.c
++++ b/drivers/clk/mediatek/clk-pllfh.c
+@@ -68,7 +68,7 @@ void fhctl_parse_dt(const u8 *compatible_node, struct mtk_pllfh_data *pllfhs,
+ 
+ 	node = of_find_compatible_node(NULL, NULL, compatible_node);
+ 	if (!node) {
+-		pr_err("cannot find \"%s\"\n", compatible_node);
++		pr_warn("cannot find \"%s\"\n", compatible_node);
+ 		return;
+ 	}
+ 
+diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
+index 8ab08e7b5b6c6..1bb51a0588726 100644
+--- a/drivers/clk/qcom/Kconfig
++++ b/drivers/clk/qcom/Kconfig
+@@ -474,6 +474,7 @@ config SC_CAMCC_7280
+ 
+ config SC_CAMCC_8280XP
+ 	tristate "SC8280XP Camera Clock Controller"
++	depends on ARM64 || COMPILE_TEST
+ 	select SC_GCC_8280XP
+ 	help
+ 	  Support for the camera clock controller on Qualcomm Technologies, Inc
+@@ -1094,6 +1095,7 @@ config SM_GPUCC_8550
+ 
+ config SM_GPUCC_8650
+ 	tristate "SM8650 Graphics Clock Controller"
++	depends on ARM64 || COMPILE_TEST
+ 	select SM_GCC_8650
+ 	help
+ 	  Support for the graphics clock controller on SM8650 devices.
+diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c
+index 678b805f13d45..5e3da5558f4e0 100644
+--- a/drivers/clk/qcom/apss-ipq-pll.c
++++ b/drivers/clk/qcom/apss-ipq-pll.c
+@@ -73,8 +73,9 @@ static struct clk_alpha_pll ipq_pll_stromer_plus = {
+ 	},
+ };
+ 
++/* 1.008 GHz configuration */
+ static const struct alpha_pll_config ipq5018_pll_config = {
+-	.l = 0x32,
++	.l = 0x2a,
+ 	.config_ctl_val = 0x4001075b,
+ 	.config_ctl_hi_val = 0x304,
+ 	.main_output_mask = BIT(0),
+diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
+index 8a412ef47e163..734a73f322b3a 100644
+--- a/drivers/clk/qcom/clk-alpha-pll.c
++++ b/drivers/clk/qcom/clk-alpha-pll.c
+@@ -213,7 +213,6 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
+ 		[PLL_OFF_USER_CTL] = 0x18,
+ 		[PLL_OFF_USER_CTL_U] = 0x1c,
+ 		[PLL_OFF_CONFIG_CTL] = 0x20,
+-		[PLL_OFF_CONFIG_CTL_U] = 0xff,
+ 		[PLL_OFF_TEST_CTL] = 0x30,
+ 		[PLL_OFF_TEST_CTL_U] = 0x34,
+ 		[PLL_OFF_STATUS] = 0x28,
+diff --git a/drivers/clk/qcom/dispcc-sm6350.c b/drivers/clk/qcom/dispcc-sm6350.c
+index 839435362010e..e4b7464c4d0e9 100644
+--- a/drivers/clk/qcom/dispcc-sm6350.c
++++ b/drivers/clk/qcom/dispcc-sm6350.c
+@@ -221,26 +221,17 @@ static struct clk_rcg2 disp_cc_mdss_dp_crypto_clk_src = {
+ 	},
+ };
+ 
+-static const struct freq_tbl ftbl_disp_cc_mdss_dp_link_clk_src[] = {
+-	F(162000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
+-	F(270000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
+-	F(540000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
+-	F(810000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
+-	{ }
+-};
+-
+ static struct clk_rcg2 disp_cc_mdss_dp_link_clk_src = {
+ 	.cmd_rcgr = 0x10f8,
+ 	.mnd_width = 0,
+ 	.hid_width = 5,
+ 	.parent_map = disp_cc_parent_map_0,
+-	.freq_tbl = ftbl_disp_cc_mdss_dp_link_clk_src,
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "disp_cc_mdss_dp_link_clk_src",
+ 		.parent_data = disp_cc_parent_data_0,
+ 		.num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ 		.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_byte2_ops,
+ 	},
+ };
+ 
+diff --git a/drivers/clk/qcom/dispcc-sm8450.c b/drivers/clk/qcom/dispcc-sm8450.c
+index 92e9c4e7b13dc..49bb4f58c3915 100644
+--- a/drivers/clk/qcom/dispcc-sm8450.c
++++ b/drivers/clk/qcom/dispcc-sm8450.c
+@@ -309,26 +309,17 @@ static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
+ 	},
+ };
+ 
+-static const struct freq_tbl ftbl_disp_cc_mdss_dptx0_link_clk_src[] = {
+-	F(162000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+-	F(270000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+-	F(540000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+-	F(810000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+-	{ }
+-};
+-
+ static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
+ 	.cmd_rcgr = 0x819c,
+ 	.mnd_width = 0,
+ 	.hid_width = 5,
+ 	.parent_map = disp_cc_parent_map_3,
+-	.freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ 	.clkr.hw.init = &(struct clk_init_data) {
+ 		.name = "disp_cc_mdss_dptx0_link_clk_src",
+ 		.parent_data = disp_cc_parent_data_3,
+ 		.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_byte2_ops,
+ 	},
+ };
+ 
+@@ -382,13 +373,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = {
+ 	.mnd_width = 0,
+ 	.hid_width = 5,
+ 	.parent_map = disp_cc_parent_map_3,
+-	.freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ 	.clkr.hw.init = &(struct clk_init_data) {
+ 		.name = "disp_cc_mdss_dptx1_link_clk_src",
+ 		.parent_data = disp_cc_parent_data_3,
+ 		.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_byte2_ops,
+ 	},
+ };
+ 
+@@ -442,13 +432,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = {
+ 	.mnd_width = 0,
+ 	.hid_width = 5,
+ 	.parent_map = disp_cc_parent_map_3,
+-	.freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ 	.clkr.hw.init = &(struct clk_init_data) {
+ 		.name = "disp_cc_mdss_dptx2_link_clk_src",
+ 		.parent_data = disp_cc_parent_data_3,
+ 		.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_byte2_ops,
+ 	},
+ };
+ 
+@@ -502,13 +491,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = {
+ 	.mnd_width = 0,
+ 	.hid_width = 5,
+ 	.parent_map = disp_cc_parent_map_3,
+-	.freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ 	.clkr.hw.init = &(struct clk_init_data) {
+ 		.name = "disp_cc_mdss_dptx3_link_clk_src",
+ 		.parent_data = disp_cc_parent_data_3,
+ 		.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_byte2_ops,
+ 	},
+ };
+ 
+diff --git a/drivers/clk/qcom/dispcc-sm8550.c b/drivers/clk/qcom/dispcc-sm8550.c
+index 3672c73ac11c6..38ecea805503d 100644
+--- a/drivers/clk/qcom/dispcc-sm8550.c
++++ b/drivers/clk/qcom/dispcc-sm8550.c
+@@ -345,26 +345,17 @@ static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
+ 	},
+ };
+ 
+-static const struct freq_tbl ftbl_disp_cc_mdss_dptx0_link_clk_src[] = {
+-	F(162000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+-	F(270000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+-	F(540000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+-	F(810000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+-	{ }
+-};
+-
+ static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
+ 	.cmd_rcgr = 0x8170,
+ 	.mnd_width = 0,
+ 	.hid_width = 5,
+ 	.parent_map = disp_cc_parent_map_7,
+-	.freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ 	.clkr.hw.init = &(struct clk_init_data) {
+ 		.name = "disp_cc_mdss_dptx0_link_clk_src",
+ 		.parent_data = disp_cc_parent_data_7,
+ 		.num_parents = ARRAY_SIZE(disp_cc_parent_data_7),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_byte2_ops,
+ 	},
+ };
+ 
+@@ -418,13 +409,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = {
+ 	.mnd_width = 0,
+ 	.hid_width = 5,
+ 	.parent_map = disp_cc_parent_map_3,
+-	.freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ 	.clkr.hw.init = &(struct clk_init_data) {
+ 		.name = "disp_cc_mdss_dptx1_link_clk_src",
+ 		.parent_data = disp_cc_parent_data_3,
+ 		.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_byte2_ops,
+ 	},
+ };
+ 
+@@ -478,13 +468,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = {
+ 	.mnd_width = 0,
+ 	.hid_width = 5,
+ 	.parent_map = disp_cc_parent_map_3,
+-	.freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ 	.clkr.hw.init = &(struct clk_init_data) {
+ 		.name = "disp_cc_mdss_dptx2_link_clk_src",
+ 		.parent_data = disp_cc_parent_data_3,
+ 		.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_byte2_ops,
+ 	},
+ };
+ 
+@@ -538,13 +527,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = {
+ 	.mnd_width = 0,
+ 	.hid_width = 5,
+ 	.parent_map = disp_cc_parent_map_3,
+-	.freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ 	.clkr.hw.init = &(struct clk_init_data) {
+ 		.name = "disp_cc_mdss_dptx3_link_clk_src",
+ 		.parent_data = disp_cc_parent_data_3,
+ 		.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_byte2_ops,
+ 	},
+ };
+ 
+diff --git a/drivers/clk/qcom/dispcc-sm8650.c b/drivers/clk/qcom/dispcc-sm8650.c
+index 9539db0d91145..3eb64bcad487e 100644
+--- a/drivers/clk/qcom/dispcc-sm8650.c
++++ b/drivers/clk/qcom/dispcc-sm8650.c
+@@ -343,26 +343,17 @@ static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
+ 	},
+ };
+ 
+-static const struct freq_tbl ftbl_disp_cc_mdss_dptx0_link_clk_src[] = {
+-	F(162000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+-	F(270000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+-	F(540000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+-	F(810000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+-	{ }
+-};
+-
+ static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
+ 	.cmd_rcgr = 0x8170,
+ 	.mnd_width = 0,
+ 	.hid_width = 5,
+ 	.parent_map = disp_cc_parent_map_7,
+-	.freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ 	.clkr.hw.init = &(const struct clk_init_data) {
+ 		.name = "disp_cc_mdss_dptx0_link_clk_src",
+ 		.parent_data = disp_cc_parent_data_7,
+ 		.num_parents = ARRAY_SIZE(disp_cc_parent_data_7),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_byte2_ops,
+ 	},
+ };
+ 
+@@ -416,13 +407,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = {
+ 	.mnd_width = 0,
+ 	.hid_width = 5,
+ 	.parent_map = disp_cc_parent_map_3,
+-	.freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ 	.clkr.hw.init = &(const struct clk_init_data) {
+ 		.name = "disp_cc_mdss_dptx1_link_clk_src",
+ 		.parent_data = disp_cc_parent_data_3,
+ 		.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_byte2_ops,
+ 	},
+ };
+ 
+@@ -476,13 +466,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = {
+ 	.mnd_width = 0,
+ 	.hid_width = 5,
+ 	.parent_map = disp_cc_parent_map_3,
+-	.freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ 	.clkr.hw.init = &(const struct clk_init_data) {
+ 		.name = "disp_cc_mdss_dptx2_link_clk_src",
+ 		.parent_data = disp_cc_parent_data_3,
+ 		.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_byte2_ops,
+ 	},
+ };
+ 
+@@ -536,13 +525,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = {
+ 	.mnd_width = 0,
+ 	.hid_width = 5,
+ 	.parent_map = disp_cc_parent_map_3,
+-	.freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ 	.clkr.hw.init = &(const struct clk_init_data) {
+ 		.name = "disp_cc_mdss_dptx3_link_clk_src",
+ 		.parent_data = disp_cc_parent_data_3,
+ 		.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_byte2_ops,
+ 	},
+ };
+ 
+diff --git a/drivers/clk/qcom/mmcc-msm8998.c b/drivers/clk/qcom/mmcc-msm8998.c
+index 1180e48c687ac..275fb3b71ede4 100644
+--- a/drivers/clk/qcom/mmcc-msm8998.c
++++ b/drivers/clk/qcom/mmcc-msm8998.c
+@@ -2535,6 +2535,8 @@ static struct clk_branch vmem_ahb_clk = {
+ 
+ static struct gdsc video_top_gdsc = {
+ 	.gdscr = 0x1024,
++	.cxcs = (unsigned int []){ 0x1028, 0x1034, 0x1038 },
++	.cxc_count = 3,
+ 	.pd = {
+ 		.name = "video_top",
+ 	},
+@@ -2543,20 +2545,26 @@ static struct gdsc video_top_gdsc = {
+ 
+ static struct gdsc video_subcore0_gdsc = {
+ 	.gdscr = 0x1040,
++	.cxcs = (unsigned int []){ 0x1048 },
++	.cxc_count = 1,
+ 	.pd = {
+ 		.name = "video_subcore0",
+ 	},
+ 	.parent = &video_top_gdsc.pd,
+ 	.pwrsts = PWRSTS_OFF_ON,
++	.flags = HW_CTRL,
+ };
+ 
+ static struct gdsc video_subcore1_gdsc = {
+ 	.gdscr = 0x1044,
++	.cxcs = (unsigned int []){ 0x104c },
++	.cxc_count = 1,
+ 	.pd = {
+ 		.name = "video_subcore1",
+ 	},
+ 	.parent = &video_top_gdsc.pd,
+ 	.pwrsts = PWRSTS_OFF_ON,
++	.flags = HW_CTRL,
+ };
+ 
+ static struct gdsc mdss_gdsc = {
+diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+index 4c2872f45387f..ff3f85e906fe1 100644
+--- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c
++++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+@@ -139,7 +139,7 @@ static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
+ 	DEF_MOD("avb3",		214,	R8A779A0_CLK_S3D2),
+ 	DEF_MOD("avb4",		215,	R8A779A0_CLK_S3D2),
+ 	DEF_MOD("avb5",		216,	R8A779A0_CLK_S3D2),
+-	DEF_MOD("canfd0",	328,	R8A779A0_CLK_CANFD),
++	DEF_MOD("canfd0",	328,	R8A779A0_CLK_S3D2),
+ 	DEF_MOD("csi40",	331,	R8A779A0_CLK_CSI0),
+ 	DEF_MOD("csi41",	400,	R8A779A0_CLK_CSI0),
+ 	DEF_MOD("csi42",	401,	R8A779A0_CLK_CSI0),
+diff --git a/drivers/clk/renesas/r9a07g043-cpg.c b/drivers/clk/renesas/r9a07g043-cpg.c
+index 33532673d25d7..26b71547fdbe3 100644
+--- a/drivers/clk/renesas/r9a07g043-cpg.c
++++ b/drivers/clk/renesas/r9a07g043-cpg.c
+@@ -280,6 +280,10 @@ static struct rzg2l_mod_clk r9a07g043_mod_clks[] = {
+ 				0x5a8, 1),
+ 	DEF_MOD("tsu_pclk",	R9A07G043_TSU_PCLK, R9A07G043_CLK_TSU,
+ 				0x5ac, 0),
++#ifdef CONFIG_RISCV
++	DEF_MOD("nceplic_aclk",	R9A07G043_NCEPLIC_ACLK, R9A07G043_CLK_P1,
++				0x608, 0),
++#endif
+ };
+ 
+ static struct rzg2l_reset r9a07g043_resets[] = {
+@@ -338,6 +342,10 @@ static struct rzg2l_reset r9a07g043_resets[] = {
+ 	DEF_RST(R9A07G043_ADC_PRESETN, 0x8a8, 0),
+ 	DEF_RST(R9A07G043_ADC_ADRST_N, 0x8a8, 1),
+ 	DEF_RST(R9A07G043_TSU_PRESETN, 0x8ac, 0),
++#ifdef CONFIG_RISCV
++	DEF_RST(R9A07G043_NCEPLIC_ARESETN, 0x908, 0),
++#endif
++
+ };
+ 
+ static const unsigned int r9a07g043_crit_mod_clks[] __initconst = {
+@@ -347,6 +355,7 @@ static const unsigned int r9a07g043_crit_mod_clks[] __initconst = {
+ #endif
+ #ifdef CONFIG_RISCV
+ 	MOD_CLK_BASE + R9A07G043_IAX45_CLK,
++	MOD_CLK_BASE + R9A07G043_NCEPLIC_ACLK,
+ #endif
+ 	MOD_CLK_BASE + R9A07G043_DMAC_ACLK,
+ };
+diff --git a/drivers/clk/samsung/clk-exynosautov9.c b/drivers/clk/samsung/clk-exynosautov9.c
+index e9c06eb93e666..f04bacacab2cb 100644
+--- a/drivers/clk/samsung/clk-exynosautov9.c
++++ b/drivers/clk/samsung/clk-exynosautov9.c
+@@ -352,13 +352,13 @@ static const struct samsung_pll_clock top_pll_clks[] __initconst = {
+ 	/* CMU_TOP_PURECLKCOMP */
+ 	PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared0_pll", "oscclk",
+ 	    PLL_LOCKTIME_PLL_SHARED0, PLL_CON3_PLL_SHARED0, NULL),
+-	PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared1_pll", "oscclk",
++	PLL(pll_0822x, FOUT_SHARED1_PLL, "fout_shared1_pll", "oscclk",
+ 	    PLL_LOCKTIME_PLL_SHARED1, PLL_CON3_PLL_SHARED1, NULL),
+-	PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared2_pll", "oscclk",
++	PLL(pll_0822x, FOUT_SHARED2_PLL, "fout_shared2_pll", "oscclk",
+ 	    PLL_LOCKTIME_PLL_SHARED2, PLL_CON3_PLL_SHARED2, NULL),
+-	PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared3_pll", "oscclk",
++	PLL(pll_0822x, FOUT_SHARED3_PLL, "fout_shared3_pll", "oscclk",
+ 	    PLL_LOCKTIME_PLL_SHARED3, PLL_CON3_PLL_SHARED3, NULL),
+-	PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared4_pll", "oscclk",
++	PLL(pll_0822x, FOUT_SHARED4_PLL, "fout_shared4_pll", "oscclk",
+ 	    PLL_LOCKTIME_PLL_SHARED4, PLL_CON3_PLL_SHARED4, NULL),
+ };
+ 
+diff --git a/drivers/clk/samsung/clk-gs101.c b/drivers/clk/samsung/clk-gs101.c
+index d065e343a85dd..bd3c1b02715b5 100644
+--- a/drivers/clk/samsung/clk-gs101.c
++++ b/drivers/clk/samsung/clk-gs101.c
+@@ -2763,33 +2763,33 @@ static const struct samsung_mux_clock peric0_mux_clks[] __initconst = {
+ 	MUX(CLK_MOUT_PERIC0_USI0_UART_USER,
+ 	    "mout_peric0_usi0_uart_user", mout_peric0_usi0_uart_user_p,
+ 	    PLL_CON0_MUX_CLKCMU_PERIC0_USI0_UART_USER, 4, 1),
+-	MUX(CLK_MOUT_PERIC0_USI14_USI_USER,
+-	    "mout_peric0_usi14_usi_user", mout_peric0_usi_usi_user_p,
+-	    PLL_CON0_MUX_CLKCMU_PERIC0_USI14_USI_USER, 4, 1),
+-	MUX(CLK_MOUT_PERIC0_USI1_USI_USER,
+-	    "mout_peric0_usi1_usi_user", mout_peric0_usi_usi_user_p,
+-	    PLL_CON0_MUX_CLKCMU_PERIC0_USI1_USI_USER, 4, 1),
+-	MUX(CLK_MOUT_PERIC0_USI2_USI_USER,
+-	    "mout_peric0_usi2_usi_user", mout_peric0_usi_usi_user_p,
+-	    PLL_CON0_MUX_CLKCMU_PERIC0_USI2_USI_USER, 4, 1),
+-	MUX(CLK_MOUT_PERIC0_USI3_USI_USER,
+-	    "mout_peric0_usi3_usi_user", mout_peric0_usi_usi_user_p,
+-	    PLL_CON0_MUX_CLKCMU_PERIC0_USI3_USI_USER, 4, 1),
+-	MUX(CLK_MOUT_PERIC0_USI4_USI_USER,
+-	    "mout_peric0_usi4_usi_user", mout_peric0_usi_usi_user_p,
+-	    PLL_CON0_MUX_CLKCMU_PERIC0_USI4_USI_USER, 4, 1),
+-	MUX(CLK_MOUT_PERIC0_USI5_USI_USER,
+-	    "mout_peric0_usi5_usi_user", mout_peric0_usi_usi_user_p,
+-	    PLL_CON0_MUX_CLKCMU_PERIC0_USI5_USI_USER, 4, 1),
+-	MUX(CLK_MOUT_PERIC0_USI6_USI_USER,
+-	    "mout_peric0_usi6_usi_user", mout_peric0_usi_usi_user_p,
+-	    PLL_CON0_MUX_CLKCMU_PERIC0_USI6_USI_USER, 4, 1),
+-	MUX(CLK_MOUT_PERIC0_USI7_USI_USER,
+-	    "mout_peric0_usi7_usi_user", mout_peric0_usi_usi_user_p,
+-	    PLL_CON0_MUX_CLKCMU_PERIC0_USI7_USI_USER, 4, 1),
+-	MUX(CLK_MOUT_PERIC0_USI8_USI_USER,
+-	    "mout_peric0_usi8_usi_user", mout_peric0_usi_usi_user_p,
+-	    PLL_CON0_MUX_CLKCMU_PERIC0_USI8_USI_USER, 4, 1),
++	nMUX(CLK_MOUT_PERIC0_USI14_USI_USER,
++	     "mout_peric0_usi14_usi_user", mout_peric0_usi_usi_user_p,
++	     PLL_CON0_MUX_CLKCMU_PERIC0_USI14_USI_USER, 4, 1),
++	nMUX(CLK_MOUT_PERIC0_USI1_USI_USER,
++	     "mout_peric0_usi1_usi_user", mout_peric0_usi_usi_user_p,
++	     PLL_CON0_MUX_CLKCMU_PERIC0_USI1_USI_USER, 4, 1),
++	nMUX(CLK_MOUT_PERIC0_USI2_USI_USER,
++	     "mout_peric0_usi2_usi_user", mout_peric0_usi_usi_user_p,
++	     PLL_CON0_MUX_CLKCMU_PERIC0_USI2_USI_USER, 4, 1),
++	nMUX(CLK_MOUT_PERIC0_USI3_USI_USER,
++	     "mout_peric0_usi3_usi_user", mout_peric0_usi_usi_user_p,
++	     PLL_CON0_MUX_CLKCMU_PERIC0_USI3_USI_USER, 4, 1),
++	nMUX(CLK_MOUT_PERIC0_USI4_USI_USER,
++	     "mout_peric0_usi4_usi_user", mout_peric0_usi_usi_user_p,
++	     PLL_CON0_MUX_CLKCMU_PERIC0_USI4_USI_USER, 4, 1),
++	nMUX(CLK_MOUT_PERIC0_USI5_USI_USER,
++	     "mout_peric0_usi5_usi_user", mout_peric0_usi_usi_user_p,
++	     PLL_CON0_MUX_CLKCMU_PERIC0_USI5_USI_USER, 4, 1),
++	nMUX(CLK_MOUT_PERIC0_USI6_USI_USER,
++	     "mout_peric0_usi6_usi_user", mout_peric0_usi_usi_user_p,
++	     PLL_CON0_MUX_CLKCMU_PERIC0_USI6_USI_USER, 4, 1),
++	nMUX(CLK_MOUT_PERIC0_USI7_USI_USER,
++	     "mout_peric0_usi7_usi_user", mout_peric0_usi_usi_user_p,
++	     PLL_CON0_MUX_CLKCMU_PERIC0_USI7_USI_USER, 4, 1),
++	nMUX(CLK_MOUT_PERIC0_USI8_USI_USER,
++	     "mout_peric0_usi8_usi_user", mout_peric0_usi_usi_user_p,
++	     PLL_CON0_MUX_CLKCMU_PERIC0_USI8_USI_USER, 4, 1),
+ };
+ 
+ static const struct samsung_div_clock peric0_div_clks[] __initconst = {
+@@ -2798,33 +2798,42 @@ static const struct samsung_div_clock peric0_div_clks[] __initconst = {
+ 	DIV(CLK_DOUT_PERIC0_USI0_UART,
+ 	    "dout_peric0_usi0_uart", "mout_peric0_usi0_uart_user",
+ 	    CLK_CON_DIV_DIV_CLK_PERIC0_USI0_UART, 0, 4),
+-	DIV(CLK_DOUT_PERIC0_USI14_USI,
+-	    "dout_peric0_usi14_usi", "mout_peric0_usi14_usi_user",
+-	    CLK_CON_DIV_DIV_CLK_PERIC0_USI14_USI, 0, 4),
+-	DIV(CLK_DOUT_PERIC0_USI1_USI,
+-	    "dout_peric0_usi1_usi", "mout_peric0_usi1_usi_user",
+-	    CLK_CON_DIV_DIV_CLK_PERIC0_USI1_USI, 0, 4),
+-	DIV(CLK_DOUT_PERIC0_USI2_USI,
+-	    "dout_peric0_usi2_usi", "mout_peric0_usi2_usi_user",
+-	    CLK_CON_DIV_DIV_CLK_PERIC0_USI2_USI, 0, 4),
+-	DIV(CLK_DOUT_PERIC0_USI3_USI,
+-	    "dout_peric0_usi3_usi", "mout_peric0_usi3_usi_user",
+-	    CLK_CON_DIV_DIV_CLK_PERIC0_USI3_USI, 0, 4),
+-	DIV(CLK_DOUT_PERIC0_USI4_USI,
+-	    "dout_peric0_usi4_usi", "mout_peric0_usi4_usi_user",
+-	    CLK_CON_DIV_DIV_CLK_PERIC0_USI4_USI, 0, 4),
+-	DIV(CLK_DOUT_PERIC0_USI5_USI,
+-	    "dout_peric0_usi5_usi", "mout_peric0_usi5_usi_user",
+-	    CLK_CON_DIV_DIV_CLK_PERIC0_USI5_USI, 0, 4),
+-	DIV(CLK_DOUT_PERIC0_USI6_USI,
+-	    "dout_peric0_usi6_usi", "mout_peric0_usi6_usi_user",
+-	    CLK_CON_DIV_DIV_CLK_PERIC0_USI6_USI, 0, 4),
+-	DIV(CLK_DOUT_PERIC0_USI7_USI,
+-	    "dout_peric0_usi7_usi", "mout_peric0_usi7_usi_user",
+-	    CLK_CON_DIV_DIV_CLK_PERIC0_USI7_USI, 0, 4),
+-	DIV(CLK_DOUT_PERIC0_USI8_USI,
+-	    "dout_peric0_usi8_usi", "mout_peric0_usi8_usi_user",
+-	    CLK_CON_DIV_DIV_CLK_PERIC0_USI8_USI, 0, 4),
++	DIV_F(CLK_DOUT_PERIC0_USI14_USI,
++	      "dout_peric0_usi14_usi", "mout_peric0_usi14_usi_user",
++	      CLK_CON_DIV_DIV_CLK_PERIC0_USI14_USI, 0, 4,
++	      CLK_SET_RATE_PARENT, 0),
++	DIV_F(CLK_DOUT_PERIC0_USI1_USI,
++	      "dout_peric0_usi1_usi", "mout_peric0_usi1_usi_user",
++	      CLK_CON_DIV_DIV_CLK_PERIC0_USI1_USI, 0, 4,
++	      CLK_SET_RATE_PARENT, 0),
++	DIV_F(CLK_DOUT_PERIC0_USI2_USI,
++	      "dout_peric0_usi2_usi", "mout_peric0_usi2_usi_user",
++	      CLK_CON_DIV_DIV_CLK_PERIC0_USI2_USI, 0, 4,
++	      CLK_SET_RATE_PARENT, 0),
++	DIV_F(CLK_DOUT_PERIC0_USI3_USI,
++	      "dout_peric0_usi3_usi", "mout_peric0_usi3_usi_user",
++	      CLK_CON_DIV_DIV_CLK_PERIC0_USI3_USI, 0, 4,
++	      CLK_SET_RATE_PARENT, 0),
++	DIV_F(CLK_DOUT_PERIC0_USI4_USI,
++	      "dout_peric0_usi4_usi", "mout_peric0_usi4_usi_user",
++	      CLK_CON_DIV_DIV_CLK_PERIC0_USI4_USI, 0, 4,
++	      CLK_SET_RATE_PARENT, 0),
++	DIV_F(CLK_DOUT_PERIC0_USI5_USI,
++	      "dout_peric0_usi5_usi", "mout_peric0_usi5_usi_user",
++	      CLK_CON_DIV_DIV_CLK_PERIC0_USI5_USI, 0, 4,
++	      CLK_SET_RATE_PARENT, 0),
++	DIV_F(CLK_DOUT_PERIC0_USI6_USI,
++	      "dout_peric0_usi6_usi", "mout_peric0_usi6_usi_user",
++	      CLK_CON_DIV_DIV_CLK_PERIC0_USI6_USI, 0, 4,
++	      CLK_SET_RATE_PARENT, 0),
++	DIV_F(CLK_DOUT_PERIC0_USI7_USI,
++	      "dout_peric0_usi7_usi", "mout_peric0_usi7_usi_user",
++	      CLK_CON_DIV_DIV_CLK_PERIC0_USI7_USI, 0, 4,
++	      CLK_SET_RATE_PARENT, 0),
++	DIV_F(CLK_DOUT_PERIC0_USI8_USI,
++	      "dout_peric0_usi8_usi", "mout_peric0_usi8_usi_user",
++	      CLK_CON_DIV_DIV_CLK_PERIC0_USI8_USI, 0, 4,
++	      CLK_SET_RATE_PARENT, 0),
+ };
+ 
+ static const struct samsung_gate_clock peric0_gate_clks[] __initconst = {
+@@ -2857,11 +2866,11 @@ static const struct samsung_gate_clock peric0_gate_clks[] __initconst = {
+ 	GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_0,
+ 	     "gout_peric0_peric0_top0_ipclk_0", "dout_peric0_usi1_usi",
+ 	     CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_0,
+-	     21, 0, 0),
++	     21, CLK_SET_RATE_PARENT, 0),
+ 	GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_1,
+ 	     "gout_peric0_peric0_top0_ipclk_1", "dout_peric0_usi2_usi",
+ 	     CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_1,
+-	     21, 0, 0),
++	     21, CLK_SET_RATE_PARENT, 0),
+ 	GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_10,
+ 	     "gout_peric0_peric0_top0_ipclk_10", "dout_peric0_i3c",
+ 	     CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_10,
+@@ -2889,27 +2898,27 @@ static const struct samsung_gate_clock peric0_gate_clks[] __initconst = {
+ 	GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_2,
+ 	     "gout_peric0_peric0_top0_ipclk_2", "dout_peric0_usi3_usi",
+ 	     CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_2,
+-	     21, 0, 0),
++	     21, CLK_SET_RATE_PARENT, 0),
+ 	GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_3,
+ 	     "gout_peric0_peric0_top0_ipclk_3", "dout_peric0_usi4_usi",
+ 	     CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_3,
+-	     21, 0, 0),
++	     21, CLK_SET_RATE_PARENT, 0),
+ 	GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_4,
+ 	     "gout_peric0_peric0_top0_ipclk_4", "dout_peric0_usi5_usi",
+ 	     CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_4,
+-	     21, 0, 0),
++	     21, CLK_SET_RATE_PARENT, 0),
+ 	GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_5,
+ 	     "gout_peric0_peric0_top0_ipclk_5", "dout_peric0_usi6_usi",
+ 	     CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_5,
+-	     21, 0, 0),
++	     21, CLK_SET_RATE_PARENT, 0),
+ 	GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_6,
+ 	     "gout_peric0_peric0_top0_ipclk_6", "dout_peric0_usi7_usi",
+ 	     CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_6,
+-	     21, 0, 0),
++	     21, CLK_SET_RATE_PARENT, 0),
+ 	GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_7,
+ 	     "gout_peric0_peric0_top0_ipclk_7", "dout_peric0_usi8_usi",
+ 	     CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_7,
+-	     21, 0, 0),
++	     21, CLK_SET_RATE_PARENT, 0),
+ 	GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_8,
+ 	     "gout_peric0_peric0_top0_ipclk_8", "dout_peric0_i3c",
+ 	     CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_8,
+@@ -2990,7 +2999,7 @@ static const struct samsung_gate_clock peric0_gate_clks[] __initconst = {
+ 	GATE(CLK_GOUT_PERIC0_PERIC0_TOP1_IPCLK_2,
+ 	     "gout_peric0_peric0_top1_ipclk_2", "dout_peric0_usi14_usi",
+ 	     CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_2,
+-	     21, 0, 0),
++	     21, CLK_SET_RATE_PARENT, 0),
+ 	/* Disabling this clock makes the system hang. Mark the clock as critical. */
+ 	GATE(CLK_GOUT_PERIC0_PERIC0_TOP1_PCLK_0,
+ 	     "gout_peric0_peric0_top1_pclk_0", "mout_peric0_bus_user",
+@@ -3230,47 +3239,53 @@ static const struct samsung_mux_clock peric1_mux_clks[] __initconst = {
+ 	MUX(CLK_MOUT_PERIC1_I3C_USER,
+ 	    "mout_peric1_i3c_user", mout_peric1_nonbususer_p,
+ 	    PLL_CON0_MUX_CLKCMU_PERIC1_I3C_USER, 4, 1),
+-	MUX(CLK_MOUT_PERIC1_USI0_USI_USER,
+-	    "mout_peric1_usi0_usi_user", mout_peric1_nonbususer_p,
+-	    PLL_CON0_MUX_CLKCMU_PERIC1_USI0_USI_USER, 4, 1),
+-	MUX(CLK_MOUT_PERIC1_USI10_USI_USER,
+-	    "mout_peric1_usi10_usi_user", mout_peric1_nonbususer_p,
+-	    PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USI_USER, 4, 1),
+-	MUX(CLK_MOUT_PERIC1_USI11_USI_USER,
+-	    "mout_peric1_usi11_usi_user", mout_peric1_nonbususer_p,
+-	    PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USI_USER, 4, 1),
+-	MUX(CLK_MOUT_PERIC1_USI12_USI_USER,
+-	    "mout_peric1_usi12_usi_user", mout_peric1_nonbususer_p,
+-	    PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USI_USER, 4, 1),
+-	MUX(CLK_MOUT_PERIC1_USI13_USI_USER,
+-	    "mout_peric1_usi13_usi_user", mout_peric1_nonbususer_p,
+-	    PLL_CON0_MUX_CLKCMU_PERIC1_USI13_USI_USER, 4, 1),
+-	MUX(CLK_MOUT_PERIC1_USI9_USI_USER,
+-	    "mout_peric1_usi9_usi_user", mout_peric1_nonbususer_p,
+-	    PLL_CON0_MUX_CLKCMU_PERIC1_USI9_USI_USER, 4, 1),
++	nMUX(CLK_MOUT_PERIC1_USI0_USI_USER,
++	     "mout_peric1_usi0_usi_user", mout_peric1_nonbususer_p,
++	     PLL_CON0_MUX_CLKCMU_PERIC1_USI0_USI_USER, 4, 1),
++	nMUX(CLK_MOUT_PERIC1_USI10_USI_USER,
++	     "mout_peric1_usi10_usi_user", mout_peric1_nonbususer_p,
++	     PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USI_USER, 4, 1),
++	nMUX(CLK_MOUT_PERIC1_USI11_USI_USER,
++	     "mout_peric1_usi11_usi_user", mout_peric1_nonbususer_p,
++	     PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USI_USER, 4, 1),
++	nMUX(CLK_MOUT_PERIC1_USI12_USI_USER,
++	     "mout_peric1_usi12_usi_user", mout_peric1_nonbususer_p,
++	     PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USI_USER, 4, 1),
++	nMUX(CLK_MOUT_PERIC1_USI13_USI_USER,
++	     "mout_peric1_usi13_usi_user", mout_peric1_nonbususer_p,
++	     PLL_CON0_MUX_CLKCMU_PERIC1_USI13_USI_USER, 4, 1),
++	nMUX(CLK_MOUT_PERIC1_USI9_USI_USER,
++	     "mout_peric1_usi9_usi_user", mout_peric1_nonbususer_p,
++	     PLL_CON0_MUX_CLKCMU_PERIC1_USI9_USI_USER, 4, 1),
+ };
+ 
+ static const struct samsung_div_clock peric1_div_clks[] __initconst = {
+ 	DIV(CLK_DOUT_PERIC1_I3C, "dout_peric1_i3c", "mout_peric1_i3c_user",
+ 	    CLK_CON_DIV_DIV_CLK_PERIC1_I3C, 0, 4),
+-	DIV(CLK_DOUT_PERIC1_USI0_USI,
+-	    "dout_peric1_usi0_usi", "mout_peric1_usi0_usi_user",
+-	    CLK_CON_DIV_DIV_CLK_PERIC1_USI0_USI, 0, 4),
+-	DIV(CLK_DOUT_PERIC1_USI10_USI,
+-	    "dout_peric1_usi10_usi", "mout_peric1_usi10_usi_user",
+-	    CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI, 0, 4),
+-	DIV(CLK_DOUT_PERIC1_USI11_USI,
+-	    "dout_peric1_usi11_usi", "mout_peric1_usi11_usi_user",
+-	    CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI, 0, 4),
+-	DIV(CLK_DOUT_PERIC1_USI12_USI,
+-	    "dout_peric1_usi12_usi", "mout_peric1_usi12_usi_user",
+-	    CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI, 0, 4),
+-	DIV(CLK_DOUT_PERIC1_USI13_USI,
+-	    "dout_peric1_usi13_usi", "mout_peric1_usi13_usi_user",
+-	    CLK_CON_DIV_DIV_CLK_PERIC1_USI13_USI, 0, 4),
+-	DIV(CLK_DOUT_PERIC1_USI9_USI,
+-	    "dout_peric1_usi9_usi", "mout_peric1_usi9_usi_user",
+-	    CLK_CON_DIV_DIV_CLK_PERIC1_USI9_USI, 0, 4),
++	DIV_F(CLK_DOUT_PERIC1_USI0_USI,
++	      "dout_peric1_usi0_usi", "mout_peric1_usi0_usi_user",
++	      CLK_CON_DIV_DIV_CLK_PERIC1_USI0_USI, 0, 4,
++	      CLK_SET_RATE_PARENT, 0),
++	DIV_F(CLK_DOUT_PERIC1_USI10_USI,
++	      "dout_peric1_usi10_usi", "mout_peric1_usi10_usi_user",
++	      CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI, 0, 4,
++	      CLK_SET_RATE_PARENT, 0),
++	DIV_F(CLK_DOUT_PERIC1_USI11_USI,
++	      "dout_peric1_usi11_usi", "mout_peric1_usi11_usi_user",
++	      CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI, 0, 4,
++	      CLK_SET_RATE_PARENT, 0),
++	DIV_F(CLK_DOUT_PERIC1_USI12_USI,
++	      "dout_peric1_usi12_usi", "mout_peric1_usi12_usi_user",
++	      CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI, 0, 4,
++	      CLK_SET_RATE_PARENT, 0),
++	DIV_F(CLK_DOUT_PERIC1_USI13_USI,
++	      "dout_peric1_usi13_usi", "mout_peric1_usi13_usi_user",
++	      CLK_CON_DIV_DIV_CLK_PERIC1_USI13_USI, 0, 4,
++	      CLK_SET_RATE_PARENT, 0),
++	DIV_F(CLK_DOUT_PERIC1_USI9_USI,
++	      "dout_peric1_usi9_usi", "mout_peric1_usi9_usi_user",
++	      CLK_CON_DIV_DIV_CLK_PERIC1_USI9_USI, 0, 4,
++	      CLK_SET_RATE_PARENT, 0),
+ };
+ 
+ static const struct samsung_gate_clock peric1_gate_clks[] __initconst = {
+@@ -3305,27 +3320,27 @@ static const struct samsung_gate_clock peric1_gate_clks[] __initconst = {
+ 	GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_1,
+ 	     "gout_peric1_peric1_top0_ipclk_1", "dout_peric1_usi0_usi",
+ 	     CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_1,
+-	     21, 0, 0),
++	     21, CLK_SET_RATE_PARENT, 0),
+ 	GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_2,
+ 	     "gout_peric1_peric1_top0_ipclk_2", "dout_peric1_usi9_usi",
+ 	     CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_2,
+-	     21, 0, 0),
++	     21, CLK_SET_RATE_PARENT, 0),
+ 	GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_3,
+ 	     "gout_peric1_peric1_top0_ipclk_3", "dout_peric1_usi10_usi",
+ 	     CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_3,
+-	     21, 0, 0),
++	     21, CLK_SET_RATE_PARENT, 0),
+ 	GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_4,
+ 	     "gout_peric1_peric1_top0_ipclk_4", "dout_peric1_usi11_usi",
+ 	     CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_4,
+-	     21, 0, 0),
++	     21, CLK_SET_RATE_PARENT, 0),
+ 	GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_5,
+ 	     "gout_peric1_peric1_top0_ipclk_5", "dout_peric1_usi12_usi",
+ 	     CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_5,
+-	     21, 0, 0),
++	     21, CLK_SET_RATE_PARENT, 0),
+ 	GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_6,
+ 	     "gout_peric1_peric1_top0_ipclk_6", "dout_peric1_usi13_usi",
+ 	     CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_6,
+-	     21, 0, 0),
++	     21, CLK_SET_RATE_PARENT, 0),
+ 	GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_8,
+ 	     "gout_peric1_peric1_top0_ipclk_8", "dout_peric1_i3c",
+ 	     CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_8,
+diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
+index a763309e6f129..556167350bff5 100644
+--- a/drivers/clk/samsung/clk.h
++++ b/drivers/clk/samsung/clk.h
+@@ -133,7 +133,7 @@ struct samsung_mux_clock {
+ 		.name		= cname,			\
+ 		.parent_names	= pnames,			\
+ 		.num_parents	= ARRAY_SIZE(pnames),		\
+-		.flags		= (f) | CLK_SET_RATE_NO_REPARENT, \
++		.flags		= f,				\
+ 		.offset		= o,				\
+ 		.shift		= s,				\
+ 		.width		= w,				\
+@@ -141,9 +141,16 @@ struct samsung_mux_clock {
+ 	}
+ 
+ #define MUX(_id, cname, pnames, o, s, w)			\
+-	__MUX(_id, cname, pnames, o, s, w, 0, 0)
++	__MUX(_id, cname, pnames, o, s, w, CLK_SET_RATE_NO_REPARENT, 0)
+ 
+ #define MUX_F(_id, cname, pnames, o, s, w, f, mf)		\
++	__MUX(_id, cname, pnames, o, s, w, (f) | CLK_SET_RATE_NO_REPARENT, mf)
++
++/* Used by MUX clocks where reparenting on clock rate change is allowed. */
++#define nMUX(_id, cname, pnames, o, s, w)			\
++	__MUX(_id, cname, pnames, o, s, w, 0, 0)
++
++#define nMUX_F(_id, cname, pnames, o, s, w, f, mf)		\
+ 	__MUX(_id, cname, pnames, o, s, w, f, mf)
+ 
+ /**
+diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
+index 1a1857b0a6f48..ea8438550b490 100644
+--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
++++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
+@@ -481,9 +481,12 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
+ static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
+ {
+ 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
++	struct private_data *priv;
++
+ 	if (!policy)
+ 		return 0;
+-	struct private_data *priv = policy->driver_data;
++
++	priv = policy->driver_data;
+ 
+ 	cpufreq_cpu_put(policy);
+ 
+diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
+index 64420d9cfd1ed..15f1d41920a33 100644
+--- a/drivers/cpufreq/cppc_cpufreq.c
++++ b/drivers/cpufreq/cppc_cpufreq.c
+@@ -741,10 +741,15 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
+ {
+ 	struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
+ 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+-	struct cppc_cpudata *cpu_data = policy->driver_data;
++	struct cppc_cpudata *cpu_data;
+ 	u64 delivered_perf;
+ 	int ret;
+ 
++	if (!policy)
++		return -ENODEV;
++
++	cpu_data = policy->driver_data;
++
+ 	cpufreq_cpu_put(policy);
+ 
+ 	ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0);
+@@ -822,10 +827,15 @@ static struct cpufreq_driver cppc_cpufreq_driver = {
+ static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu)
+ {
+ 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+-	struct cppc_cpudata *cpu_data = policy->driver_data;
++	struct cppc_cpudata *cpu_data;
+ 	u64 desired_perf;
+ 	int ret;
+ 
++	if (!policy)
++		return -ENODEV;
++
++	cpu_data = policy->driver_data;
++
+ 	cpufreq_cpu_put(policy);
+ 
+ 	ret = cppc_get_desired_perf(cpu, &desired_perf);
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 66e10a19d76ab..fd9c3ed21f49c 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -1679,10 +1679,13 @@ static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
+ 	 */
+ 	if (cpufreq_driver->offline) {
+ 		cpufreq_driver->offline(policy);
+-	} else if (cpufreq_driver->exit) {
+-		cpufreq_driver->exit(policy);
+-		policy->freq_table = NULL;
++		return;
+ 	}
++
++	if (cpufreq_driver->exit)
++		cpufreq_driver->exit(policy);
++
++	policy->freq_table = NULL;
+ }
+ 
+ static int cpufreq_offline(unsigned int cpu)
+@@ -1740,7 +1743,7 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
+ 	}
+ 
+ 	/* We did light-weight exit earlier, do full tear down now */
+-	if (cpufreq_driver->offline)
++	if (cpufreq_driver->offline && cpufreq_driver->exit)
+ 		cpufreq_driver->exit(policy);
+ 
+ 	up_write(&policy->rwsem);
+diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
+index 07989bb8c220a..3fdc64b5a65e7 100644
+--- a/drivers/crypto/bcm/spu2.c
++++ b/drivers/crypto/bcm/spu2.c
+@@ -495,7 +495,7 @@ static void spu2_dump_omd(u8 *omd, u16 hash_key_len, u16 ciph_key_len,
+ 	if (hash_iv_len) {
+ 		packet_log("  Hash IV Length %u bytes\n", hash_iv_len);
+ 		packet_dump("  hash IV: ", ptr, hash_iv_len);
+-		ptr += ciph_key_len;
++		ptr += hash_iv_len;
+ 	}
+ 
+ 	if (ciph_iv_len) {
+diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
+index 4733012377601..ff6ceb4feee04 100644
+--- a/drivers/crypto/ccp/sp-platform.c
++++ b/drivers/crypto/ccp/sp-platform.c
+@@ -39,44 +39,38 @@ static const struct sp_dev_vdata dev_vdata[] = {
+ 	},
+ };
+ 
+-#ifdef CONFIG_ACPI
+ static const struct acpi_device_id sp_acpi_match[] = {
+ 	{ "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] },
+ 	{ },
+ };
+ MODULE_DEVICE_TABLE(acpi, sp_acpi_match);
+-#endif
+ 
+-#ifdef CONFIG_OF
+ static const struct of_device_id sp_of_match[] = {
+ 	{ .compatible = "amd,ccp-seattle-v1a",
+ 	  .data = (const void *)&dev_vdata[0] },
+ 	{ },
+ };
+ MODULE_DEVICE_TABLE(of, sp_of_match);
+-#endif
+ 
+ static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev)
+ {
+-#ifdef CONFIG_OF
+ 	const struct of_device_id *match;
+ 
+ 	match = of_match_node(sp_of_match, pdev->dev.of_node);
+ 	if (match && match->data)
+ 		return (struct sp_dev_vdata *)match->data;
+-#endif
++
+ 	return NULL;
+ }
+ 
+ static struct sp_dev_vdata *sp_get_acpi_version(struct platform_device *pdev)
+ {
+-#ifdef CONFIG_ACPI
+ 	const struct acpi_device_id *match;
+ 
+ 	match = acpi_match_device(sp_acpi_match, &pdev->dev);
+ 	if (match && match->driver_data)
+ 		return (struct sp_dev_vdata *)match->driver_data;
+-#endif
++
+ 	return NULL;
+ }
+ 
+@@ -212,12 +206,8 @@ static int sp_platform_resume(struct platform_device *pdev)
+ static struct platform_driver sp_platform_driver = {
+ 	.driver = {
+ 		.name = "ccp",
+-#ifdef CONFIG_ACPI
+ 		.acpi_match_table = sp_acpi_match,
+-#endif
+-#ifdef CONFIG_OF
+ 		.of_match_table = sp_of_match,
+-#endif
+ 	},
+ 	.probe = sp_platform_probe,
+ 	.remove_new = sp_platform_remove,
+diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
+index 1102c47f8293d..1d0ef47a9f250 100644
+--- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
++++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
+@@ -296,7 +296,7 @@ static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
+ {
+ 	if (adf_gen4_init_thd2arb_map(accel_dev))
+ 		dev_warn(&GET_DEV(accel_dev),
+-			 "Generate of the thread to arbiter map failed");
++			 "Failed to generate thread to arbiter mapping");
+ 
+ 	return GET_HW_DATA(accel_dev)->thd_to_arb_map;
+ }
+diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+index 927506cf271d0..fb34fd7f03952 100644
+--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
++++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+@@ -208,7 +208,7 @@ static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
+ {
+ 	if (adf_gen4_init_thd2arb_map(accel_dev))
+ 		dev_warn(&GET_DEV(accel_dev),
+-			 "Generate of the thread to arbiter map failed");
++			 "Failed to generate thread to arbiter mapping");
+ 
+ 	return GET_HW_DATA(accel_dev)->thd_to_arb_map;
+ }
+diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
+index 9762f2bf7727f..d26564cebdec4 100644
+--- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
++++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
+@@ -197,7 +197,9 @@ module_pci_driver(adf_driver);
+ MODULE_LICENSE("Dual BSD/GPL");
+ MODULE_AUTHOR("Intel");
+ MODULE_FIRMWARE(ADF_4XXX_FW);
++MODULE_FIRMWARE(ADF_402XX_FW);
+ MODULE_FIRMWARE(ADF_4XXX_MMP);
++MODULE_FIRMWARE(ADF_402XX_MMP);
+ MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+ MODULE_VERSION(ADF_DRV_VERSION);
+ MODULE_SOFTDEP("pre: crypto-intel_qat");
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c
+index 7fc7a77f6aed9..c7ad8cf07863b 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c
+@@ -149,5 +149,6 @@ void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data)
+ 	tl_data->sl_exec_counters = sl_exec_counters;
+ 	tl_data->rp_counters = rp_counters;
+ 	tl_data->num_rp_counters = ARRAY_SIZE(rp_counters);
++	tl_data->max_sl_cnt = ADF_GEN4_TL_MAX_SLICES_PER_TYPE;
+ }
+ EXPORT_SYMBOL_GPL(adf_gen4_init_tl_data);
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c
+index d4f2db3c53d8c..e10f0024f4b85 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_rl.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c
+@@ -1125,7 +1125,7 @@ int adf_rl_start(struct adf_accel_dev *accel_dev)
+ 	}
+ 
+ 	if ((fw_caps & RL_CAPABILITY_MASK) != RL_CAPABILITY_VALUE) {
+-		dev_info(&GET_DEV(accel_dev), "not supported\n");
++		dev_info(&GET_DEV(accel_dev), "feature not supported by FW\n");
+ 		ret = -EOPNOTSUPP;
+ 		goto ret_free;
+ 	}
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
+index 2ff714d11bd2f..74fb0c2ed2412 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
+@@ -41,6 +41,20 @@ static int validate_tl_data(struct adf_tl_hw_data *tl_data)
+ 	return 0;
+ }
+ 
++static int validate_tl_slice_counters(struct icp_qat_fw_init_admin_slice_cnt *slice_count,
++				      u8 max_slices_per_type)
++{
++	u8 *sl_counter = (u8 *)slice_count;
++	int i;
++
++	for (i = 0; i < ADF_TL_SL_CNT_COUNT; i++) {
++		if (sl_counter[i] > max_slices_per_type)
++			return -EINVAL;
++	}
++
++	return 0;
++}
++
+ static int adf_tl_alloc_mem(struct adf_accel_dev *accel_dev)
+ {
+ 	struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
+@@ -214,6 +228,13 @@ int adf_tl_run(struct adf_accel_dev *accel_dev, int state)
+ 		return ret;
+ 	}
+ 
++	ret = validate_tl_slice_counters(&telemetry->slice_cnt, tl_data->max_sl_cnt);
++	if (ret) {
++		dev_err(dev, "invalid value returned by FW\n");
++		adf_send_admin_tl_stop(accel_dev);
++		return ret;
++	}
++
+ 	telemetry->hbuffs = state;
+ 	atomic_set(&telemetry->state, state);
+ 
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
+index 9be81cd3b8860..e54a406cc1b4a 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
++++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
+@@ -40,6 +40,7 @@ struct adf_tl_hw_data {
+ 	u8 num_dev_counters;
+ 	u8 num_rp_counters;
+ 	u8 max_rp;
++	u8 max_sl_cnt;
+ };
+ 
+ struct adf_telemetry {
+diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
+index 79b4e74804f6d..6bfc59e677478 100644
+--- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
++++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
+@@ -138,6 +138,10 @@ int cn10k_cpt_hw_ctx_init(struct pci_dev *pdev,
+ 		return -ENOMEM;
+ 	cptr_dma = dma_map_single(&pdev->dev, hctx, CN10K_CPT_HW_CTX_SIZE,
+ 				  DMA_BIDIRECTIONAL);
++	if (dma_mapping_error(&pdev->dev, cptr_dma)) {
++		kfree(hctx);
++		return -ENOMEM;
++	}
+ 
+ 	cn10k_cpt_hw_ctx_set(hctx, 1);
+ 	er_ctx->hw_ctx = hctx;
+diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
+index 797e1ebff2997..f24b67c64d5ec 100644
+--- a/drivers/dax/bus.c
++++ b/drivers/dax/bus.c
+@@ -192,7 +192,7 @@ static u64 dev_dax_size(struct dev_dax *dev_dax)
+ 	u64 size = 0;
+ 	int i;
+ 
+-	WARN_ON_ONCE(!rwsem_is_locked(&dax_dev_rwsem));
++	lockdep_assert_held(&dax_dev_rwsem);
+ 
+ 	for (i = 0; i < dev_dax->nr_range; i++)
+ 		size += range_len(&dev_dax->ranges[i].range);
+@@ -302,7 +302,7 @@ static unsigned long long dax_region_avail_size(struct dax_region *dax_region)
+ 	resource_size_t size = resource_size(&dax_region->res);
+ 	struct resource *res;
+ 
+-	WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
++	lockdep_assert_held(&dax_region_rwsem);
+ 
+ 	for_each_dax_region_resource(dax_region, res)
+ 		size -= resource_size(res);
+@@ -447,7 +447,7 @@ static void trim_dev_dax_range(struct dev_dax *dev_dax)
+ 	struct range *range = &dev_dax->ranges[i].range;
+ 	struct dax_region *dax_region = dev_dax->region;
+ 
+-	WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
++	lockdep_assert_held_write(&dax_region_rwsem);
+ 	dev_dbg(&dev_dax->dev, "delete range[%d]: %#llx:%#llx\n", i,
+ 		(unsigned long long)range->start,
+ 		(unsigned long long)range->end);
+@@ -465,26 +465,17 @@ static void free_dev_dax_ranges(struct dev_dax *dev_dax)
+ 		trim_dev_dax_range(dev_dax);
+ }
+ 
+-static void __unregister_dev_dax(void *dev)
++static void unregister_dev_dax(void *dev)
+ {
+ 	struct dev_dax *dev_dax = to_dev_dax(dev);
+ 
+ 	dev_dbg(dev, "%s\n", __func__);
+ 
++	down_write(&dax_region_rwsem);
+ 	kill_dev_dax(dev_dax);
+ 	device_del(dev);
+ 	free_dev_dax_ranges(dev_dax);
+ 	put_device(dev);
+-}
+-
+-static void unregister_dev_dax(void *dev)
+-{
+-	if (rwsem_is_locked(&dax_region_rwsem))
+-		return __unregister_dev_dax(dev);
+-
+-	if (WARN_ON_ONCE(down_write_killable(&dax_region_rwsem) != 0))
+-		return;
+-	__unregister_dev_dax(dev);
+ 	up_write(&dax_region_rwsem);
+ }
+ 
+@@ -507,7 +498,7 @@ static int __free_dev_dax_id(struct dev_dax *dev_dax)
+ 	struct dax_region *dax_region;
+ 	int rc = dev_dax->id;
+ 
+-	WARN_ON_ONCE(!rwsem_is_locked(&dax_dev_rwsem));
++	lockdep_assert_held_write(&dax_dev_rwsem);
+ 
+ 	if (!dev_dax->dyn_id || dev_dax->id < 0)
+ 		return -1;
+@@ -560,15 +551,10 @@ static ssize_t delete_store(struct device *dev, struct device_attribute *attr,
+ 	if (!victim)
+ 		return -ENXIO;
+ 
+-	rc = down_write_killable(&dax_region_rwsem);
+-	if (rc)
+-		return rc;
+-	rc = down_write_killable(&dax_dev_rwsem);
+-	if (rc) {
+-		up_write(&dax_region_rwsem);
+-		return rc;
+-	}
++	device_lock(dev);
++	device_lock(victim);
+ 	dev_dax = to_dev_dax(victim);
++	down_write(&dax_dev_rwsem);
+ 	if (victim->driver || dev_dax_size(dev_dax))
+ 		rc = -EBUSY;
+ 	else {
+@@ -589,11 +575,12 @@ static ssize_t delete_store(struct device *dev, struct device_attribute *attr,
+ 			rc = -EBUSY;
+ 	}
+ 	up_write(&dax_dev_rwsem);
++	device_unlock(victim);
+ 
+ 	/* won the race to invalidate the device, clean it up */
+ 	if (do_del)
+ 		devm_release_action(dev, unregister_dev_dax, victim);
+-	up_write(&dax_region_rwsem);
++	device_unlock(dev);
+ 	put_device(victim);
+ 
+ 	return rc;
+@@ -705,7 +692,7 @@ static void dax_mapping_release(struct device *dev)
+ 	put_device(parent);
+ }
+ 
+-static void __unregister_dax_mapping(void *data)
++static void unregister_dax_mapping(void *data)
+ {
+ 	struct device *dev = data;
+ 	struct dax_mapping *mapping = to_dax_mapping(dev);
+@@ -713,25 +700,12 @@ static void __unregister_dax_mapping(void *data)
+ 
+ 	dev_dbg(dev, "%s\n", __func__);
+ 
+-	WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
+-
+ 	dev_dax->ranges[mapping->range_id].mapping = NULL;
+ 	mapping->range_id = -1;
+ 
+ 	device_unregister(dev);
+ }
+ 
+-static void unregister_dax_mapping(void *data)
+-{
+-	if (rwsem_is_locked(&dax_region_rwsem))
+-		return __unregister_dax_mapping(data);
+-
+-	if (WARN_ON_ONCE(down_write_killable(&dax_region_rwsem) != 0))
+-		return;
+-	__unregister_dax_mapping(data);
+-	up_write(&dax_region_rwsem);
+-}
+-
+ static struct dev_dax_range *get_dax_range(struct device *dev)
+ {
+ 	struct dax_mapping *mapping = to_dax_mapping(dev);
+@@ -830,7 +804,7 @@ static int devm_register_dax_mapping(struct dev_dax *dev_dax, int range_id)
+ 	struct device *dev;
+ 	int rc;
+ 
+-	WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
++	lockdep_assert_held_write(&dax_region_rwsem);
+ 
+ 	if (dev_WARN_ONCE(&dev_dax->dev, !dax_region->dev->driver,
+ 				"region disabled\n"))
+@@ -876,7 +850,7 @@ static int alloc_dev_dax_range(struct dev_dax *dev_dax, u64 start,
+ 	struct resource *alloc;
+ 	int i, rc;
+ 
+-	WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
++	lockdep_assert_held_write(&dax_region_rwsem);
+ 
+ 	/* handle the seed alloc special case */
+ 	if (!size) {
+@@ -935,7 +909,7 @@ static int adjust_dev_dax_range(struct dev_dax *dev_dax, struct resource *res, r
+ 	struct device *dev = &dev_dax->dev;
+ 	int rc;
+ 
+-	WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
++	lockdep_assert_held_write(&dax_region_rwsem);
+ 
+ 	if (dev_WARN_ONCE(dev, !size, "deletion is handled by dev_dax_shrink\n"))
+ 		return -EINVAL;
+@@ -963,11 +937,11 @@ static ssize_t size_show(struct device *dev,
+ 	unsigned long long size;
+ 	int rc;
+ 
+-	rc = down_write_killable(&dax_dev_rwsem);
++	rc = down_read_interruptible(&dax_dev_rwsem);
+ 	if (rc)
+ 		return rc;
+ 	size = dev_dax_size(dev_dax);
+-	up_write(&dax_dev_rwsem);
++	up_read(&dax_dev_rwsem);
+ 
+ 	return sysfs_emit(buf, "%llu\n", size);
+ }
+@@ -1566,12 +1540,8 @@ static struct dev_dax *__devm_create_dev_dax(struct dev_dax_data *data)
+ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
+ {
+ 	struct dev_dax *dev_dax;
+-	int rc;
+-
+-	rc = down_write_killable(&dax_region_rwsem);
+-	if (rc)
+-		return ERR_PTR(rc);
+ 
++	down_write(&dax_region_rwsem);
+ 	dev_dax = __devm_create_dev_dax(data);
+ 	up_write(&dax_region_rwsem);
+ 
+diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c
+index d0f6693ca1426..32019dc33cca7 100644
+--- a/drivers/dpll/dpll_core.c
++++ b/drivers/dpll/dpll_core.c
+@@ -449,7 +449,7 @@ static int dpll_pin_prop_dup(const struct dpll_pin_properties *src,
+ 				   sizeof(*src->freq_supported);
+ 		dst->freq_supported = kmemdup(src->freq_supported,
+ 					      freq_size, GFP_KERNEL);
+-		if (!src->freq_supported)
++		if (!dst->freq_supported)
+ 			return -ENOMEM;
+ 	}
+ 	if (src->board_label) {
+diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
+index 9c5b6f8bd8bd5..27996b7924c82 100644
+--- a/drivers/edac/skx_common.c
++++ b/drivers/edac/skx_common.c
+@@ -648,7 +648,7 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
+ 	memset(&res, 0, sizeof(res));
+ 	res.mce  = mce;
+ 	res.addr = mce->addr & MCI_ADDR_PHYSADDR;
+-	if (!pfn_to_online_page(res.addr >> PAGE_SHIFT)) {
++	if (!pfn_to_online_page(res.addr >> PAGE_SHIFT) && !arch_is_platform_page(res.addr)) {
+ 		pr_err("Invalid address 0x%llx in IA32_MC%d_ADDR\n", mce->addr, mce->bank);
+ 		return NOTIFY_DONE;
+ 	}
+diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
+index 90283f160a228..29c24578ad2bf 100644
+--- a/drivers/firmware/qcom/qcom_scm.c
++++ b/drivers/firmware/qcom/qcom_scm.c
+@@ -1624,7 +1624,7 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
+  * We do not yet support re-entrant calls via the qseecom interface. To prevent
+  + any potential issues with this, only allow validated machines for now.
+  */
+-static const struct of_device_id qcom_scm_qseecom_allowlist[] = {
++static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
+ 	{ .compatible = "lenovo,thinkpad-x13s", },
+ 	{ }
+ };
+@@ -1713,7 +1713,7 @@ static int qcom_scm_qseecom_init(struct qcom_scm *scm)
+  */
+ bool qcom_scm_is_available(void)
+ {
+-	return !!__scm;
++	return !!READ_ONCE(__scm);
+ }
+ EXPORT_SYMBOL_GPL(qcom_scm_is_available);
+ 
+@@ -1794,10 +1794,12 @@ static int qcom_scm_probe(struct platform_device *pdev)
+ 	if (!scm)
+ 		return -ENOMEM;
+ 
++	scm->dev = &pdev->dev;
+ 	ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
+ 	if (ret < 0)
+ 		return ret;
+ 
++	init_completion(&scm->waitq_comp);
+ 	mutex_init(&scm->scm_bw_lock);
+ 
+ 	scm->path = devm_of_icc_get(&pdev->dev, NULL);
+@@ -1829,10 +1831,8 @@ static int qcom_scm_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		return ret;
+ 
+-	__scm = scm;
+-	__scm->dev = &pdev->dev;
+-
+-	init_completion(&__scm->waitq_comp);
++	/* Let all above stores be available after this */
++	smp_store_release(&__scm, scm);
+ 
+ 	irq = platform_get_irq_optional(pdev, 0);
+ 	if (irq < 0) {
+diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
+index 322aada20f742..ac34876a97f8b 100644
+--- a/drivers/firmware/raspberrypi.c
++++ b/drivers/firmware/raspberrypi.c
+@@ -9,6 +9,7 @@
+ #include <linux/dma-mapping.h>
+ #include <linux/kref.h>
+ #include <linux/mailbox_client.h>
++#include <linux/mailbox_controller.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/of_platform.h>
+@@ -97,8 +98,8 @@ int rpi_firmware_property_list(struct rpi_firmware *fw,
+ 	if (size & 3)
+ 		return -EINVAL;
+ 
+-	buf = dma_alloc_coherent(fw->cl.dev, PAGE_ALIGN(size), &bus_addr,
+-				 GFP_ATOMIC);
++	buf = dma_alloc_coherent(fw->chan->mbox->dev, PAGE_ALIGN(size),
++				 &bus_addr, GFP_ATOMIC);
+ 	if (!buf)
+ 		return -ENOMEM;
+ 
+@@ -126,7 +127,7 @@ int rpi_firmware_property_list(struct rpi_firmware *fw,
+ 		ret = -EINVAL;
+ 	}
+ 
+-	dma_free_coherent(fw->cl.dev, PAGE_ALIGN(size), buf, bus_addr);
++	dma_free_coherent(fw->chan->mbox->dev, PAGE_ALIGN(size), buf, bus_addr);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/gpio/gpio-npcm-sgpio.c b/drivers/gpio/gpio-npcm-sgpio.c
+index d31788b43abcc..2605706145434 100644
+--- a/drivers/gpio/gpio-npcm-sgpio.c
++++ b/drivers/gpio/gpio-npcm-sgpio.c
+@@ -434,7 +434,7 @@ static void npcm_sgpio_irq_handler(struct irq_desc *desc)
+ 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ 	struct irq_chip *ic = irq_desc_get_chip(desc);
+ 	struct npcm_sgpio *gpio = gpiochip_get_data(gc);
+-	unsigned int i, j, girq;
++	unsigned int i, j;
+ 	unsigned long reg;
+ 
+ 	chained_irq_enter(ic, desc);
+@@ -443,11 +443,9 @@ static void npcm_sgpio_irq_handler(struct irq_desc *desc)
+ 		const struct npcm_sgpio_bank *bank = &npcm_sgpio_banks[i];
+ 
+ 		reg = ioread8(bank_reg(gpio, bank, EVENT_STS));
+-		for_each_set_bit(j, &reg, 8) {
+-			girq = irq_find_mapping(gc->irq.domain,
+-						i * 8 + gpio->nout_sgpio + j);
+-			generic_handle_domain_irq(gc->irq.domain, girq);
+-		}
++		for_each_set_bit(j, &reg, 8)
++			generic_handle_domain_irq(gc->irq.domain,
++						  i * 8 + gpio->nout_sgpio + j);
+ 	}
+ 
+ 	chained_irq_exit(ic, desc);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+index b7e57aa273619..b0d192c6e63eb 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+@@ -402,6 +402,11 @@ bool cm_helper_translate_curve_to_hw_format(struct dc_context *ctx,
+ 				i += increment) {
+ 			if (j == hw_points - 1)
+ 				break;
++			if (i >= TRANSFER_FUNC_POINTS) {
++				DC_LOG_ERROR("Index out of bounds: i=%d, TRANSFER_FUNC_POINTS=%d\n",
++					     i, TRANSFER_FUNC_POINTS);
++				return false;
++			}
+ 			rgb_resulted[j].red = output_tf->tf_pts.red[i];
+ 			rgb_resulted[j].green = output_tf->tf_pts.green[i];
+ 			rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+index 9067ca78f8511..a14f99f4f14a5 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+@@ -999,8 +999,7 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
+ 		if (pipe_ctx->plane_res.dpp)
+ 			update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->plane_res.hubp->inst] = false;
+ 
+-		if ((pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp) &&
+-			pipe_ctx->plane_res.mpcc_inst >= 0)
++		if (pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp)
+ 			update_state->pg_pipe_res_update[PG_MPCC][pipe_ctx->plane_res.mpcc_inst] = false;
+ 
+ 		if (pipe_ctx->stream_res.dsc)
+diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
+index 626709bec6f5f..2577f0cef8fcd 100644
+--- a/drivers/gpu/drm/arm/malidp_mw.c
++++ b/drivers/gpu/drm/arm/malidp_mw.c
+@@ -72,7 +72,10 @@ static void malidp_mw_connector_reset(struct drm_connector *connector)
+ 		__drm_atomic_helper_connector_destroy_state(connector->state);
+ 
+ 	kfree(connector->state);
+-	__drm_atomic_helper_connector_reset(connector, &mw_state->base);
++	connector->state = NULL;
++
++	if (mw_state)
++		__drm_atomic_helper_connector_reset(connector, &mw_state->base);
+ }
+ 
+ static enum drm_connector_status
+diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
+index 9d96d28d6fe8e..59e9ad3499696 100644
+--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
++++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
+@@ -2066,10 +2066,8 @@ static int anx7625_setup_dsi_device(struct anx7625_data *ctx)
+ 	};
+ 
+ 	host = of_find_mipi_dsi_host_by_node(ctx->pdata.mipi_host_node);
+-	if (!host) {
+-		DRM_DEV_ERROR(dev, "fail to find dsi host.\n");
+-		return -EPROBE_DEFER;
+-	}
++	if (!host)
++		return dev_err_probe(dev, -EPROBE_DEFER, "fail to find dsi host.\n");
+ 
+ 	dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+ 	if (IS_ERR(dsi)) {
+@@ -2471,15 +2469,22 @@ static void anx7625_bridge_atomic_disable(struct drm_bridge *bridge,
+ 	mutex_unlock(&ctx->aux_lock);
+ }
+ 
++static void
++anx7625_audio_update_connector_status(struct anx7625_data *ctx,
++				      enum drm_connector_status status);
++
+ static enum drm_connector_status
+ anx7625_bridge_detect(struct drm_bridge *bridge)
+ {
+ 	struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+ 	struct device *dev = ctx->dev;
++	enum drm_connector_status status;
+ 
+ 	DRM_DEV_DEBUG_DRIVER(dev, "drm bridge detect\n");
+ 
+-	return anx7625_sink_detect(ctx);
++	status = anx7625_sink_detect(ctx);
++	anx7625_audio_update_connector_status(ctx, status);
++	return status;
+ }
+ 
+ static const struct drm_edid *anx7625_bridge_edid_read(struct drm_bridge *bridge,
+diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+index e226acc5c15e1..8a91ef0ae0651 100644
+--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
++++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+@@ -2059,6 +2059,9 @@ static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
+ 	mhdp_state = to_cdns_mhdp_bridge_state(new_state);
+ 
+ 	mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
++	if (!mhdp_state->current_mode)
++		return;
++
+ 	drm_mode_set_name(mhdp_state->current_mode);
+ 
+ 	dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__, mode->name);
+diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
+index 82d23e4df09eb..ff3284b6b1a37 100644
+--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
++++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
+@@ -563,10 +563,8 @@ static int chipone_dsi_host_attach(struct chipone *icn)
+ 
+ 	host = of_find_mipi_dsi_host_by_node(host_node);
+ 	of_node_put(host_node);
+-	if (!host) {
+-		dev_err(dev, "failed to find dsi host\n");
+-		return -EPROBE_DEFER;
+-	}
++	if (!host)
++		return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n");
+ 
+ 	dsi = mipi_dsi_device_register_full(host, &info);
+ 	if (IS_ERR(dsi)) {
+diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
+index 4b2ae27f0a57f..1a9defa15663c 100644
+--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
++++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
+@@ -494,10 +494,8 @@ static int lt8912_attach_dsi(struct lt8912 *lt)
+ 						 };
+ 
+ 	host = of_find_mipi_dsi_host_by_node(lt->host_node);
+-	if (!host) {
+-		dev_err(dev, "failed to find dsi host\n");
+-		return -EPROBE_DEFER;
+-	}
++	if (!host)
++		return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n");
+ 
+ 	dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+ 	if (IS_ERR(dsi)) {
+diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
+index a9c7e2b07ea10..b99fe87ec7389 100644
+--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
++++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
+@@ -761,10 +761,8 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611,
+ 	int ret;
+ 
+ 	host = of_find_mipi_dsi_host_by_node(dsi_node);
+-	if (!host) {
+-		dev_err(lt9611->dev, "failed to find dsi host\n");
+-		return ERR_PTR(-EPROBE_DEFER);
+-	}
++	if (!host)
++		return ERR_PTR(dev_err_probe(lt9611->dev, -EPROBE_DEFER, "failed to find dsi host\n"));
+ 
+ 	dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+ 	if (IS_ERR(dsi)) {
+diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+index f4f593ad8f795..ab702471f3ab1 100644
+--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
++++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+@@ -266,10 +266,8 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc,
+ 	int ret;
+ 
+ 	host = of_find_mipi_dsi_host_by_node(dsi_node);
+-	if (!host) {
+-		dev_err(dev, "failed to find dsi host\n");
+-		return ERR_PTR(-EPROBE_DEFER);
+-	}
++	if (!host)
++		return ERR_PTR(dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n"));
+ 
+ 	dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+ 	if (IS_ERR(dsi)) {
+diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
+index 90a89d70d8328..fea4f00a20f83 100644
+--- a/drivers/gpu/drm/bridge/tc358775.c
++++ b/drivers/gpu/drm/bridge/tc358775.c
+@@ -610,10 +610,8 @@ static int tc_attach_host(struct tc_data *tc)
+ 						};
+ 
+ 	host = of_find_mipi_dsi_host_by_node(tc->host_node);
+-	if (!host) {
+-		dev_err(dev, "failed to find dsi host\n");
+-		return -EPROBE_DEFER;
+-	}
++	if (!host)
++		return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n");
+ 
+ 	dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+ 	if (IS_ERR(dsi)) {
+diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c
+index ca3348109bcd2..6b559e0713012 100644
+--- a/drivers/gpu/drm/bridge/ti-dlpc3433.c
++++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c
+@@ -319,12 +319,11 @@ static int dlpc_host_attach(struct dlpc *dlpc)
+ 		.channel = 0,
+ 		.node = NULL,
+ 	};
++	int ret;
+ 
+ 	host = of_find_mipi_dsi_host_by_node(dlpc->host_node);
+-	if (!host) {
+-		DRM_DEV_ERROR(dev, "failed to find dsi host\n");
+-		return -EPROBE_DEFER;
+-	}
++	if (!host)
++		return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n");
+ 
+ 	dlpc->dsi = mipi_dsi_device_register_full(host, &info);
+ 	if (IS_ERR(dlpc->dsi)) {
+@@ -336,7 +335,11 @@ static int dlpc_host_attach(struct dlpc *dlpc)
+ 	dlpc->dsi->format = MIPI_DSI_FMT_RGB565;
+ 	dlpc->dsi->lanes = dlpc->dsi_lanes;
+ 
+-	return devm_mipi_dsi_attach(dev, dlpc->dsi);
++	ret = devm_mipi_dsi_attach(dev, dlpc->dsi);
++	if (ret)
++		DRM_DEV_ERROR(dev, "failed to attach dsi host\n");
++
++	return ret;
+ }
+ 
+ static int dlpc3433_probe(struct i2c_client *client)
+@@ -367,10 +370,8 @@ static int dlpc3433_probe(struct i2c_client *client)
+ 	drm_bridge_add(&dlpc->bridge);
+ 
+ 	ret = dlpc_host_attach(dlpc);
+-	if (ret) {
+-		DRM_DEV_ERROR(dev, "failed to attach dsi host\n");
++	if (ret)
+ 		goto err_remove_bridge;
+-	}
+ 
+ 	return 0;
+ 
+diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml
+index 0857773e5c5fd..8bc63912fddb4 100644
+--- a/drivers/gpu/drm/ci/test.yml
++++ b/drivers/gpu/drm/ci/test.yml
+@@ -252,11 +252,11 @@ i915:cml:
+ i915:tgl:
+   extends:
+     - .i915
+-  parallel: 8
++  parallel: 5
+   variables:
+-    DEVICE_TYPE: asus-cx9400-volteer
++    DEVICE_TYPE: acer-cp514-2h-1130g7-volteer
+     GPU_VERSION: tgl
+-    RUNNER_TAG: mesa-ci-x86-64-lava-asus-cx9400-volteer
++    RUNNER_TAG: mesa-ci-x86-64-lava-acer-cp514-2h-1130g7-volteer
+ 
+ .amdgpu:
+   extends:
+diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
+index 521a71c61b164..17ed94885dc3b 100644
+--- a/drivers/gpu/drm/drm_bridge.c
++++ b/drivers/gpu/drm/drm_bridge.c
+@@ -687,11 +687,17 @@ void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
+ 				 */
+ 				list_for_each_entry_from(next, &encoder->bridge_chain,
+ 							 chain_node) {
+-					if (next->pre_enable_prev_first) {
++					if (!next->pre_enable_prev_first) {
+ 						next = list_prev_entry(next, chain_node);
+ 						limit = next;
+ 						break;
+ 					}
++
++					if (list_is_last(&next->chain_node,
++							 &encoder->bridge_chain)) {
++						limit = next;
++						break;
++					}
+ 				}
+ 
+ 				/* Call these bridges in reverse order */
+@@ -774,7 +780,7 @@ void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
+ 					/* Found first bridge that does NOT
+ 					 * request prev to be enabled first
+ 					 */
+-					limit = list_prev_entry(next, chain_node);
++					limit = next;
+ 					break;
+ 				}
+ 			}
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 923c4423151c1..9064cdeb1319b 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -7324,7 +7324,7 @@ static void drm_parse_tiled_block(struct drm_connector *connector,
+ static bool displayid_is_tiled_block(const struct displayid_iter *iter,
+ 				     const struct displayid_block *block)
+ {
+-	return (displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_12 &&
++	return (displayid_version(iter) < DISPLAY_ID_STRUCTURE_VER_20 &&
+ 		block->tag == DATA_BLOCK_TILED_DISPLAY) ||
+ 		(displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_20 &&
+ 		 block->tag == DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY);
+diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
+index ef6e416522f8a..9874ff6d47181 100644
+--- a/drivers/gpu/drm/drm_mipi_dsi.c
++++ b/drivers/gpu/drm/drm_mipi_dsi.c
+@@ -654,7 +654,7 @@ EXPORT_SYMBOL(mipi_dsi_set_maximum_return_packet_size);
+  *
+  * Return: 0 on success or a negative error code on failure.
+  */
+-ssize_t mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable)
++int mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable)
+ {
+ 	/* Note: Needs updating for non-default PPS or algorithm */
+ 	u8 tx[2] = { enable << 0, 0 };
+@@ -679,8 +679,8 @@ EXPORT_SYMBOL(mipi_dsi_compression_mode);
+  *
+  * Return: 0 on success or a negative error code on failure.
+  */
+-ssize_t mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
+-				       const struct drm_dsc_picture_parameter_set *pps)
++int mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
++				   const struct drm_dsc_picture_parameter_set *pps)
+ {
+ 	struct mipi_dsi_msg msg = {
+ 		.channel = dsi->channel,
+diff --git a/drivers/gpu/drm/imagination/pvr_vm_mips.c b/drivers/gpu/drm/imagination/pvr_vm_mips.c
+index b7fef3c797e6c..4f99b4af871c0 100644
+--- a/drivers/gpu/drm/imagination/pvr_vm_mips.c
++++ b/drivers/gpu/drm/imagination/pvr_vm_mips.c
+@@ -46,7 +46,7 @@ pvr_vm_mips_init(struct pvr_device *pvr_dev)
+ 	if (!mips_data)
+ 		return -ENOMEM;
+ 
+-	for (page_nr = 0; page_nr < ARRAY_SIZE(mips_data->pt_pages); page_nr++) {
++	for (page_nr = 0; page_nr < PVR_MIPS_PT_PAGE_COUNT; page_nr++) {
+ 		mips_data->pt_pages[page_nr] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ 		if (!mips_data->pt_pages[page_nr]) {
+ 			err = -ENOMEM;
+@@ -102,7 +102,7 @@ pvr_vm_mips_fini(struct pvr_device *pvr_dev)
+ 	int page_nr;
+ 
+ 	vunmap(mips_data->pt);
+-	for (page_nr = ARRAY_SIZE(mips_data->pt_pages) - 1; page_nr >= 0; page_nr--) {
++	for (page_nr = PVR_MIPS_PT_PAGE_COUNT - 1; page_nr >= 0; page_nr--) {
+ 		dma_unmap_page(from_pvr_device(pvr_dev)->dev,
+ 			       mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE);
+ 
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+index a04499c4f9ca2..29207b2756c14 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+@@ -1009,10 +1009,10 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
+ 
+ 	mtk_crtc->mmsys_dev = priv->mmsys_dev;
+ 	mtk_crtc->ddp_comp_nr = path_len;
+-	mtk_crtc->ddp_comp = devm_kmalloc_array(dev,
+-						mtk_crtc->ddp_comp_nr + (conn_routes ? 1 : 0),
+-						sizeof(*mtk_crtc->ddp_comp),
+-						GFP_KERNEL);
++	mtk_crtc->ddp_comp = devm_kcalloc(dev,
++					  mtk_crtc->ddp_comp_nr + (conn_routes ? 1 : 0),
++					  sizeof(*mtk_crtc->ddp_comp),
++					  GFP_KERNEL);
+ 	if (!mtk_crtc->ddp_comp)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+index 4f2e3feabc0f8..1bf229615b018 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+@@ -38,6 +38,9 @@ static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev,
+ 
+ 	size = round_up(size, PAGE_SIZE);
+ 
++	if (size == 0)
++		return ERR_PTR(-EINVAL);
++
+ 	mtk_gem_obj = kzalloc(sizeof(*mtk_gem_obj), GFP_KERNEL);
+ 	if (!mtk_gem_obj)
+ 		return ERR_PTR(-ENOMEM);
+diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
+index 2a82119eb58ed..2a942dc6a6dc2 100644
+--- a/drivers/gpu/drm/meson/meson_vclk.c
++++ b/drivers/gpu/drm/meson/meson_vclk.c
+@@ -790,13 +790,13 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
+ 				 FREQ_1000_1001(params[i].pixel_freq));
+ 		DRM_DEBUG_DRIVER("i = %d phy_freq = %d alt = %d\n",
+ 				 i, params[i].phy_freq,
+-				 FREQ_1000_1001(params[i].phy_freq/10)*10);
++				 FREQ_1000_1001(params[i].phy_freq/1000)*1000);
+ 		/* Match strict frequency */
+ 		if (phy_freq == params[i].phy_freq &&
+ 		    vclk_freq == params[i].vclk_freq)
+ 			return MODE_OK;
+ 		/* Match 1000/1001 variant */
+-		if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/10)*10) &&
++		if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/1000)*1000) &&
+ 		    vclk_freq == FREQ_1000_1001(params[i].vclk_freq))
+ 			return MODE_OK;
+ 	}
+@@ -1070,7 +1070,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
+ 
+ 	for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
+ 		if ((phy_freq == params[freq].phy_freq ||
+-		     phy_freq == FREQ_1000_1001(params[freq].phy_freq/10)*10) &&
++		     phy_freq == FREQ_1000_1001(params[freq].phy_freq/1000)*1000) &&
+ 		    (vclk_freq == params[freq].vclk_freq ||
+ 		     vclk_freq == FREQ_1000_1001(params[freq].vclk_freq))) {
+ 			if (vclk_freq != params[freq].vclk_freq)
+diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
+index adbd5a367395c..707489776e913 100644
+--- a/drivers/gpu/drm/msm/dp/dp_aux.c
++++ b/drivers/gpu/drm/msm/dp/dp_aux.c
+@@ -38,6 +38,7 @@ struct dp_aux_private {
+ 	bool no_send_stop;
+ 	bool initted;
+ 	bool is_edp;
++	bool enable_xfers;
+ 	u32 offset;
+ 	u32 segment;
+ 
+@@ -304,6 +305,17 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
+ 		goto exit;
+ 	}
+ 
++	/*
++	 * If we're using DP and an external display isn't connected then the
++	 * transfer won't succeed. Return right away. If we don't do this we
++	 * can end up with long timeouts if someone tries to access the DP AUX
++	 * character device when no DP device is connected.
++	 */
++	if (!aux->is_edp && !aux->enable_xfers) {
++		ret = -ENXIO;
++		goto exit;
++	}
++
+ 	/*
+ 	 * For eDP it's important to give a reasonably long wait here for HPD
+ 	 * to be asserted. This is because the panel driver may have _just_
+@@ -313,7 +325,8 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
+ 	 * avoid ever doing the extra long wait for DP.
+ 	 */
+ 	if (aux->is_edp) {
+-		ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog);
++		ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog,
++								500000);
+ 		if (ret) {
+ 			DRM_DEBUG_DP("Panel not ready for aux transactions\n");
+ 			goto exit;
+@@ -436,6 +449,14 @@ irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux)
+ 	return IRQ_HANDLED;
+ }
+ 
++void dp_aux_enable_xfers(struct drm_dp_aux *dp_aux, bool enabled)
++{
++	struct dp_aux_private *aux;
++
++	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
++	aux->enable_xfers = enabled;
++}
++
+ void dp_aux_reconfig(struct drm_dp_aux *dp_aux)
+ {
+ 	struct dp_aux_private *aux;
+@@ -513,7 +534,7 @@ static int dp_wait_hpd_asserted(struct drm_dp_aux *dp_aux,
+ 	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+ 
+ 	pm_runtime_get_sync(aux->dev);
+-	ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog);
++	ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog, wait_us);
+ 	pm_runtime_put_sync(aux->dev);
+ 
+ 	return ret;
+diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
+index f47d591c1f54e..4f65e892a8076 100644
+--- a/drivers/gpu/drm/msm/dp/dp_aux.h
++++ b/drivers/gpu/drm/msm/dp/dp_aux.h
+@@ -12,6 +12,7 @@
+ int dp_aux_register(struct drm_dp_aux *dp_aux);
+ void dp_aux_unregister(struct drm_dp_aux *dp_aux);
+ irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux);
++void dp_aux_enable_xfers(struct drm_dp_aux *dp_aux, bool enabled);
+ void dp_aux_init(struct drm_dp_aux *dp_aux);
+ void dp_aux_deinit(struct drm_dp_aux *dp_aux);
+ void dp_aux_reconfig(struct drm_dp_aux *dp_aux);
+diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
+index 3e7c84cdef472..628c8181ddd48 100644
+--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
++++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
+@@ -263,17 +263,18 @@ void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable)
+ 	dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
+ }
+ 
+-int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog)
++int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog,
++					      unsigned long wait_us)
+ {
+ 	u32 state;
+ 	struct dp_catalog_private *catalog = container_of(dp_catalog,
+ 				struct dp_catalog_private, dp_catalog);
+ 
+-	/* poll for hpd connected status every 2ms and timeout after 500ms */
++	/* poll for hpd connected status every 2ms and timeout after wait_us */
+ 	return readl_poll_timeout(catalog->io.aux.base +
+ 				REG_DP_DP_HPD_INT_STATUS,
+ 				state, state & DP_DP_HPD_STATE_STATUS_CONNECTED,
+-				2000, 500000);
++				min(wait_us, 2000), wait_us);
+ }
+ 
+ static void dump_regs(void __iomem *base, int len)
+diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
+index 75ec290127c77..72a85810607e8 100644
+--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
++++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
+@@ -87,7 +87,8 @@ int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read);
+ int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog);
+ void dp_catalog_aux_reset(struct dp_catalog *dp_catalog);
+ void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable);
+-int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog);
++int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog,
++					      unsigned long wait_us);
+ u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog);
+ 
+ /* DP Controller APIs */
+diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
+index c4dda1faef677..112c7e54fc7a6 100644
+--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
+@@ -1052,14 +1052,14 @@ static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
+ 	if (ret)
+ 		return ret;
+ 
+-	if (voltage_swing_level >= DP_TRAIN_VOLTAGE_SWING_MAX) {
++	if (voltage_swing_level >= DP_TRAIN_LEVEL_MAX) {
+ 		drm_dbg_dp(ctrl->drm_dev,
+ 				"max. voltage swing level reached %d\n",
+ 				voltage_swing_level);
+ 		max_level_reached |= DP_TRAIN_MAX_SWING_REACHED;
+ 	}
+ 
+-	if (pre_emphasis_level >= DP_TRAIN_PRE_EMPHASIS_MAX) {
++	if (pre_emphasis_level >= DP_TRAIN_LEVEL_MAX) {
+ 		drm_dbg_dp(ctrl->drm_dev,
+ 				"max. pre-emphasis level reached %d\n",
+ 				pre_emphasis_level);
+@@ -1150,7 +1150,7 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
+ 		}
+ 
+ 		if (ctrl->link->phy_params.v_level >=
+-			DP_TRAIN_VOLTAGE_SWING_MAX) {
++			DP_TRAIN_LEVEL_MAX) {
+ 			DRM_ERROR_RATELIMITED("max v_level reached\n");
+ 			return -EAGAIN;
+ 		}
+diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
+index ffbfde9225898..36a0ef1cdc1b9 100644
+--- a/drivers/gpu/drm/msm/dp/dp_display.c
++++ b/drivers/gpu/drm/msm/dp/dp_display.c
+@@ -555,6 +555,8 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
+ 	int ret;
+ 	struct platform_device *pdev = dp->dp_display.pdev;
+ 
++	dp_aux_enable_xfers(dp->aux, true);
++
+ 	mutex_lock(&dp->event_mutex);
+ 
+ 	state =  dp->hpd_state;
+@@ -620,6 +622,8 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
+ 	u32 state;
+ 	struct platform_device *pdev = dp->dp_display.pdev;
+ 
++	dp_aux_enable_xfers(dp->aux, false);
++
+ 	mutex_lock(&dp->event_mutex);
+ 
+ 	state = dp->hpd_state;
+diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
+index 49dfac1fd1ef2..ea911d9244be7 100644
+--- a/drivers/gpu/drm/msm/dp/dp_link.c
++++ b/drivers/gpu/drm/msm/dp/dp_link.c
+@@ -1109,6 +1109,7 @@ int dp_link_get_colorimetry_config(struct dp_link *dp_link)
+ int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status)
+ {
+ 	int i;
++	u8 max_p_level;
+ 	int v_max = 0, p_max = 0;
+ 	struct dp_link_private *link;
+ 
+@@ -1140,30 +1141,29 @@ int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status)
+ 	 * Adjust the voltage swing and pre-emphasis level combination to within
+ 	 * the allowable range.
+ 	 */
+-	if (dp_link->phy_params.v_level > DP_TRAIN_VOLTAGE_SWING_MAX) {
++	if (dp_link->phy_params.v_level > DP_TRAIN_LEVEL_MAX) {
+ 		drm_dbg_dp(link->drm_dev,
+ 			"Requested vSwingLevel=%d, change to %d\n",
+ 			dp_link->phy_params.v_level,
+-			DP_TRAIN_VOLTAGE_SWING_MAX);
+-		dp_link->phy_params.v_level = DP_TRAIN_VOLTAGE_SWING_MAX;
++			DP_TRAIN_LEVEL_MAX);
++		dp_link->phy_params.v_level = DP_TRAIN_LEVEL_MAX;
+ 	}
+ 
+-	if (dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_MAX) {
++	if (dp_link->phy_params.p_level > DP_TRAIN_LEVEL_MAX) {
+ 		drm_dbg_dp(link->drm_dev,
+ 			"Requested preEmphasisLevel=%d, change to %d\n",
+ 			dp_link->phy_params.p_level,
+-			DP_TRAIN_PRE_EMPHASIS_MAX);
+-		dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_MAX;
++			DP_TRAIN_LEVEL_MAX);
++		dp_link->phy_params.p_level = DP_TRAIN_LEVEL_MAX;
+ 	}
+ 
+-	if ((dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_LVL_1)
+-		&& (dp_link->phy_params.v_level ==
+-			DP_TRAIN_VOLTAGE_SWING_LVL_2)) {
++	max_p_level = DP_TRAIN_LEVEL_MAX - dp_link->phy_params.v_level;
++	if (dp_link->phy_params.p_level > max_p_level) {
+ 		drm_dbg_dp(link->drm_dev,
+ 			"Requested preEmphasisLevel=%d, change to %d\n",
+ 			dp_link->phy_params.p_level,
+-			DP_TRAIN_PRE_EMPHASIS_LVL_1);
+-		dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_LVL_1;
++			max_p_level);
++		dp_link->phy_params.p_level = max_p_level;
+ 	}
+ 
+ 	drm_dbg_dp(link->drm_dev, "adjusted: v_level=%d, p_level=%d\n",
+diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
+index 83da170bc56bf..42aed9c90b732 100644
+--- a/drivers/gpu/drm/msm/dp/dp_link.h
++++ b/drivers/gpu/drm/msm/dp/dp_link.h
+@@ -19,19 +19,7 @@ struct dp_link_info {
+ 	unsigned long capabilities;
+ };
+ 
+-enum dp_link_voltage_level {
+-	DP_TRAIN_VOLTAGE_SWING_LVL_0	= 0,
+-	DP_TRAIN_VOLTAGE_SWING_LVL_1	= 1,
+-	DP_TRAIN_VOLTAGE_SWING_LVL_2	= 2,
+-	DP_TRAIN_VOLTAGE_SWING_MAX	= DP_TRAIN_VOLTAGE_SWING_LVL_2,
+-};
+-
+-enum dp_link_preemaphasis_level {
+-	DP_TRAIN_PRE_EMPHASIS_LVL_0	= 0,
+-	DP_TRAIN_PRE_EMPHASIS_LVL_1	= 1,
+-	DP_TRAIN_PRE_EMPHASIS_LVL_2	= 2,
+-	DP_TRAIN_PRE_EMPHASIS_MAX	= DP_TRAIN_PRE_EMPHASIS_LVL_2,
+-};
++#define DP_TRAIN_LEVEL_MAX	3
+ 
+ struct dp_link_test_video {
+ 	u32 test_video_pattern;
+diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c
+index ea10bf81582e9..0f895b8a99d62 100644
+--- a/drivers/gpu/drm/mxsfb/lcdif_drv.c
++++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c
+@@ -343,6 +343,9 @@ static int __maybe_unused lcdif_suspend(struct device *dev)
+ 	if (ret)
+ 		return ret;
+ 
++	if (pm_runtime_suspended(dev))
++		return 0;
++
+ 	return lcdif_rpm_suspend(dev);
+ }
+ 
+@@ -350,7 +353,8 @@ static int __maybe_unused lcdif_resume(struct device *dev)
+ {
+ 	struct drm_device *drm = dev_get_drvdata(dev);
+ 
+-	lcdif_rpm_resume(dev);
++	if (!pm_runtime_suspended(dev))
++		lcdif_rpm_resume(dev);
+ 
+ 	return drm_mode_config_helper_resume(drm);
+ }
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
+index 6a0a4d3b8902d..027867c2a8c5b 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
+@@ -1080,7 +1080,7 @@ r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize)
+ 	ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ 	if (ret) {
+ 		nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+-		return PTR_ERR(ctrl);
++		return ret;
+ 	}
+ 
+ 	memcpy(data, ctrl->data, size);
+diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
+index b715301ec79f6..6c49270cb290a 100644
+--- a/drivers/gpu/drm/omapdrm/Kconfig
++++ b/drivers/gpu/drm/omapdrm/Kconfig
+@@ -4,7 +4,7 @@ config DRM_OMAP
+ 	depends on DRM && OF
+ 	depends on ARCH_OMAP2PLUS
+ 	select DRM_KMS_HELPER
+-	select FB_DMAMEM_HELPERS if DRM_FBDEV_EMULATION
++	select FB_DMAMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
+ 	select VIDEOMODE_HELPERS
+ 	select HDMI
+ 	default n
+diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
+index 6b08b137af1ad..523be34682caf 100644
+--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
++++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
+@@ -51,6 +51,10 @@ static void pan_worker(struct work_struct *work)
+ 	omap_gem_roll(bo, fbi->var.yoffset * npages);
+ }
+ 
++FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(omap_fbdev,
++				   drm_fb_helper_damage_range,
++				   drm_fb_helper_damage_area)
++
+ static int omap_fbdev_pan_display(struct fb_var_screeninfo *var,
+ 		struct fb_info *fbi)
+ {
+@@ -78,11 +82,9 @@ static int omap_fbdev_pan_display(struct fb_var_screeninfo *var,
+ 
+ static int omap_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+ {
+-	struct drm_fb_helper *helper = info->par;
+-	struct drm_framebuffer *fb = helper->fb;
+-	struct drm_gem_object *bo = drm_gem_fb_get_obj(fb, 0);
++	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ 
+-	return drm_gem_mmap_obj(bo, omap_gem_mmap_size(bo), vma);
++	return fb_deferred_io_mmap(info, vma);
+ }
+ 
+ static void omap_fbdev_fb_destroy(struct fb_info *info)
+@@ -94,6 +96,7 @@ static void omap_fbdev_fb_destroy(struct fb_info *info)
+ 
+ 	DBG();
+ 
++	fb_deferred_io_cleanup(info);
+ 	drm_fb_helper_fini(helper);
+ 
+ 	omap_gem_unpin(bo);
+@@ -104,15 +107,19 @@ static void omap_fbdev_fb_destroy(struct fb_info *info)
+ 	kfree(fbdev);
+ }
+ 
++/*
++ * For now, we cannot use FB_DEFAULT_DEFERRED_OPS and fb_deferred_io_mmap()
++ * because we use write-combine.
++ */
+ static const struct fb_ops omap_fb_ops = {
+ 	.owner = THIS_MODULE,
+-	__FB_DEFAULT_DMAMEM_OPS_RDWR,
++	__FB_DEFAULT_DEFERRED_OPS_RDWR(omap_fbdev),
+ 	.fb_check_var	= drm_fb_helper_check_var,
+ 	.fb_set_par	= drm_fb_helper_set_par,
+ 	.fb_setcmap	= drm_fb_helper_setcmap,
+ 	.fb_blank	= drm_fb_helper_blank,
+ 	.fb_pan_display = omap_fbdev_pan_display,
+-	__FB_DEFAULT_DMAMEM_OPS_DRAW,
++	__FB_DEFAULT_DEFERRED_OPS_DRAW(omap_fbdev),
+ 	.fb_ioctl	= drm_fb_helper_ioctl,
+ 	.fb_mmap	= omap_fbdev_fb_mmap,
+ 	.fb_destroy	= omap_fbdev_fb_destroy,
+@@ -213,6 +220,15 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
+ 	fbi->fix.smem_start = dma_addr;
+ 	fbi->fix.smem_len = bo->size;
+ 
++	/* deferred I/O */
++	helper->fbdefio.delay = HZ / 20;
++	helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
++
++	fbi->fbdefio = &helper->fbdefio;
++	ret = fb_deferred_io_init(fbi);
++	if (ret)
++		goto fail;
++
+ 	/* if we have DMM, then we can use it for scrolling by just
+ 	 * shuffling pages around in DMM rather than doing sw blit.
+ 	 */
+@@ -238,8 +254,20 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
+ 	return ret;
+ }
+ 
++static int omap_fbdev_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
++{
++	if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
++		return 0;
++
++	if (helper->fb->funcs->dirty)
++		return helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
++
++	return 0;
++}
++
+ static const struct drm_fb_helper_funcs omap_fb_helper_funcs = {
+ 	.fb_probe = omap_fbdev_create,
++	.fb_dirty = omap_fbdev_dirty,
+ };
+ 
+ static struct drm_fb_helper *get_fb(struct fb_info *fbi)
+diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
+index d58f90bc48fba..745f3e48f02ac 100644
+--- a/drivers/gpu/drm/panel/panel-edp.c
++++ b/drivers/gpu/drm/panel/panel-edp.c
+@@ -1865,6 +1865,13 @@ static const struct panel_delay delay_200_500_e50 = {
+ 	.enable = 50,
+ };
+ 
++static const struct panel_delay delay_200_500_e50_p2e200 = {
++	.hpd_absent = 200,
++	.unprepare = 500,
++	.enable = 50,
++	.prepare_to_enable = 200,
++};
++
+ static const struct panel_delay delay_200_500_e80 = {
+ 	.hpd_absent = 200,
+ 	.unprepare = 500,
+@@ -2034,7 +2041,7 @@ static const struct edp_panel_entry edp_panels[] = {
+ 	EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d6, &delay_200_500_e80_d50, "N140BGA-EA4"),
+ 	EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"),
+ 
+-	EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50, "MNC207QS1-1"),
++	EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50_p2e200, "MNC207QS1-1"),
+ 
+ 	EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d51, &delay_200_500_e200, "Unknown"),
+ 	EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5b, &delay_200_500_e200, "Unknown"),
+diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
+index 9d87cc1a357e3..1a26205701b5e 100644
+--- a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
++++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
+@@ -295,8 +295,6 @@ static int ltk050h3148w_init_sequence(struct ltk050h3146w *ctx)
+ 	mipi_dsi_dcs_write_seq(dsi, 0xbd, 0x00);
+ 	mipi_dsi_dcs_write_seq(dsi, 0xc6, 0xef);
+ 	mipi_dsi_dcs_write_seq(dsi, 0xd4, 0x02);
+-	mipi_dsi_dcs_write_seq(dsi, 0x11);
+-	mipi_dsi_dcs_write_seq(dsi, 0x29);
+ 
+ 	ret = mipi_dsi_dcs_set_tear_on(dsi, 1);
+ 	if (ret < 0) {
+@@ -326,7 +324,8 @@ static const struct drm_display_mode ltk050h3148w_mode = {
+ static const struct ltk050h3146w_desc ltk050h3148w_data = {
+ 	.mode = &ltk050h3148w_mode,
+ 	.init = ltk050h3148w_init_sequence,
+-	.mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_VIDEO_BURST,
++	.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
++		      MIPI_DSI_MODE_VIDEO_BURST,
+ };
+ 
+ static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx)
+diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+index 648ce92014265..028fdac293f77 100644
+--- a/drivers/gpu/drm/panel/panel-novatek-nt35950.c
++++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+@@ -556,10 +556,8 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)
+ 		}
+ 		dsi_r_host = of_find_mipi_dsi_host_by_node(dsi_r);
+ 		of_node_put(dsi_r);
+-		if (!dsi_r_host) {
+-			dev_err(dev, "Cannot get secondary DSI host\n");
+-			return -EPROBE_DEFER;
+-		}
++		if (!dsi_r_host)
++			return dev_err_probe(dev, -EPROBE_DEFER, "Cannot get secondary DSI host\n");
+ 
+ 		nt->dsi[1] = mipi_dsi_device_register_full(dsi_r_host, info);
+ 		if (!nt->dsi[1]) {
+diff --git a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
+index 76c2a8f6718c8..9c336c71562b9 100644
+--- a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
++++ b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
+@@ -109,19 +109,17 @@ static int atana33xc20_resume(struct device *dev)
+ 		if (hpd_asserted < 0)
+ 			ret = hpd_asserted;
+ 
+-		if (ret)
++		if (ret) {
+ 			dev_warn(dev, "Error waiting for HPD GPIO: %d\n", ret);
+-
+-		return ret;
+-	}
+-
+-	if (p->aux->wait_hpd_asserted) {
++			goto error;
++		}
++	} else if (p->aux->wait_hpd_asserted) {
+ 		ret = p->aux->wait_hpd_asserted(p->aux, HPD_MAX_US);
+ 
+-		if (ret)
++		if (ret) {
+ 			dev_warn(dev, "Controller error waiting for HPD: %d\n", ret);
+-
+-		return ret;
++			goto error;
++		}
+ 	}
+ 
+ 	/*
+@@ -133,6 +131,12 @@ static int atana33xc20_resume(struct device *dev)
+ 	 * right times.
+ 	 */
+ 	return 0;
++
++error:
++	drm_dp_dpcd_set_powered(p->aux, false);
++	regulator_disable(p->supply);
++
++	return ret;
+ }
+ 
+ static int atana33xc20_disable(struct drm_panel *panel)
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index 20e3df1c59d48..e8fe5a69454d0 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -2591,6 +2591,9 @@ static const struct panel_desc innolux_g121x1_l03 = {
+ 		.unprepare = 200,
+ 		.disable = 400,
+ 	},
++	.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
++	.bus_flags = DRM_BUS_FLAG_DE_HIGH,
++	.connector_type = DRM_MODE_CONNECTOR_LVDS,
+ };
+ 
+ static const struct display_timing innolux_g156hce_l01_timings = {
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+index fdd768bbd487c..62ebbdb16253d 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+@@ -706,6 +706,8 @@ static void vop2_setup_scale(struct vop2 *vop2, const struct vop2_win *win,
+ 	const struct drm_format_info *info;
+ 	u16 hor_scl_mode, ver_scl_mode;
+ 	u16 hscl_filter_mode, vscl_filter_mode;
++	uint16_t cbcr_src_w = src_w;
++	uint16_t cbcr_src_h = src_h;
+ 	u8 gt2 = 0;
+ 	u8 gt4 = 0;
+ 	u32 val;
+@@ -763,27 +765,27 @@ static void vop2_setup_scale(struct vop2 *vop2, const struct vop2_win *win,
+ 	vop2_win_write(win, VOP2_WIN_YRGB_VSCL_FILTER_MODE, vscl_filter_mode);
+ 
+ 	if (info->is_yuv) {
+-		src_w /= info->hsub;
+-		src_h /= info->vsub;
++		cbcr_src_w /= info->hsub;
++		cbcr_src_h /= info->vsub;
+ 
+ 		gt4 = 0;
+ 		gt2 = 0;
+ 
+-		if (src_h >= (4 * dst_h)) {
++		if (cbcr_src_h >= (4 * dst_h)) {
+ 			gt4 = 1;
+-			src_h >>= 2;
+-		} else if (src_h >= (2 * dst_h)) {
++			cbcr_src_h >>= 2;
++		} else if (cbcr_src_h >= (2 * dst_h)) {
+ 			gt2 = 1;
+-			src_h >>= 1;
++			cbcr_src_h >>= 1;
+ 		}
+ 
+-		hor_scl_mode = scl_get_scl_mode(src_w, dst_w);
+-		ver_scl_mode = scl_get_scl_mode(src_h, dst_h);
++		hor_scl_mode = scl_get_scl_mode(cbcr_src_w, dst_w);
++		ver_scl_mode = scl_get_scl_mode(cbcr_src_h, dst_h);
+ 
+-		val = vop2_scale_factor(src_w, dst_w);
++		val = vop2_scale_factor(cbcr_src_w, dst_w);
+ 		vop2_win_write(win, VOP2_WIN_SCALE_CBCR_X, val);
+ 
+-		val = vop2_scale_factor(src_h, dst_h);
++		val = vop2_scale_factor(cbcr_src_h, dst_h);
+ 		vop2_win_write(win, VOP2_WIN_SCALE_CBCR_Y, val);
+ 
+ 		vop2_win_write(win, VOP2_WIN_VSD_CBCR_GT4, gt4);
+diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
+index d8751ea203032..5f8d51b293705 100644
+--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
+@@ -2740,6 +2740,8 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
+ 		index = 1;
+ 
+ 	addr = of_get_address(dev->of_node, index, NULL, NULL);
++	if (!addr)
++		return -EINVAL;
+ 
+ 	vc4_hdmi->audio.dma_data.addr = be32_to_cpup(addr) + mai_data->offset;
+ 	vc4_hdmi->audio.dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+index 5b24d5f63701a..c2f9fc1f20a24 100644
+--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
++++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+@@ -227,6 +227,11 @@ static void amd_sfh_resume(struct amd_mp2_dev *mp2)
+ 	struct amd_mp2_sensor_info info;
+ 	int i, status;
+ 
++	if (!cl_data->is_any_sensor_enabled) {
++		amd_sfh_clear_intr(mp2);
++		return;
++	}
++
+ 	for (i = 0; i < cl_data->num_hid_devices; i++) {
+ 		if (cl_data->sensor_sts[i] == SENSOR_DISABLED) {
+ 			info.sensor_idx = cl_data->sensor_idx[i];
+@@ -252,6 +257,11 @@ static void amd_sfh_suspend(struct amd_mp2_dev *mp2)
+ 	struct amdtp_cl_data *cl_data = mp2->cl_data;
+ 	int i, status;
+ 
++	if (!cl_data->is_any_sensor_enabled) {
++		amd_sfh_clear_intr(mp2);
++		return;
++	}
++
+ 	for (i = 0; i < cl_data->num_hid_devices; i++) {
+ 		if (cl_data->sensor_idx[i] != HPD_IDX &&
+ 		    cl_data->sensor_sts[i] == SENSOR_ENABLED) {
+diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+index 56bd4f02f3191..4b8232360cc46 100644
+--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
++++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+@@ -173,6 +173,11 @@ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 
+ 	/* request and enable interrupt */
+ 	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
++	if (ret < 0) {
++		dev_err(dev, "ISH: Failed to allocate IRQ vectors\n");
++		return ret;
++	}
++
+ 	if (!pdev->msi_enabled && !pdev->msix_enabled)
+ 		irq_flag = IRQF_SHARED;
+ 
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 1e2cd7c8716e8..64ace0b968f07 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -715,8 +715,10 @@ cma_validate_port(struct ib_device *device, u32 port,
+ 		rcu_read_lock();
+ 		ndev = rcu_dereference(sgid_attr->ndev);
+ 		if (!net_eq(dev_net(ndev), dev_addr->net) ||
+-		    ndev->ifindex != bound_if_index)
++		    ndev->ifindex != bound_if_index) {
++			rdma_put_gid_attr(sgid_attr);
+ 			sgid_attr = ERR_PTR(-ENODEV);
++		}
+ 		rcu_read_unlock();
+ 		goto out;
+ 	}
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+index 439d0c7c5d0ca..04258676d0726 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+@@ -1013,7 +1013,8 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+ 	hwq_attr.stride = sizeof(struct sq_sge);
+ 	hwq_attr.depth = bnxt_qplib_get_depth(sq);
+ 	hwq_attr.aux_stride = psn_sz;
+-	hwq_attr.aux_depth = bnxt_qplib_set_sq_size(sq, qp->wqe_mode);
++	hwq_attr.aux_depth = psn_sz ? bnxt_qplib_set_sq_size(sq, qp->wqe_mode)
++				    : 0;
+ 	/* Update msn tbl size */
+ 	if (BNXT_RE_HW_RETX(qp->dev_cap_flags) && psn_sz) {
+ 		hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
+diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
+index 7250d0643b5c5..68e22f368d43a 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
++++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
+@@ -149,7 +149,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+ 		return ret;
+ 	}
+ 
+-	ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
++	ret = xa_err(xa_store_irq(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
+ 	if (ret) {
+ 		ibdev_err(ibdev, "failed to xa_store CQ, ret = %d.\n", ret);
+ 		goto err_put;
+@@ -163,7 +163,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+ 	return 0;
+ 
+ err_xa:
+-	xa_erase(&cq_table->array, hr_cq->cqn);
++	xa_erase_irq(&cq_table->array, hr_cq->cqn);
+ err_put:
+ 	hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
+ 
+@@ -182,7 +182,7 @@ static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+ 		dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret,
+ 			hr_cq->cqn);
+ 
+-	xa_erase(&cq_table->array, hr_cq->cqn);
++	xa_erase_irq(&cq_table->array, hr_cq->cqn);
+ 
+ 	/* Waiting interrupt process procedure carried out */
+ 	synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
+@@ -476,13 +476,6 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
+ 	struct ib_event event;
+ 	struct ib_cq *ibcq;
+ 
+-	hr_cq = xa_load(&hr_dev->cq_table.array,
+-			cqn & (hr_dev->caps.num_cqs - 1));
+-	if (!hr_cq) {
+-		dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn);
+-		return;
+-	}
+-
+ 	if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
+ 	    event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
+ 	    event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
+@@ -491,7 +484,16 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
+ 		return;
+ 	}
+ 
+-	refcount_inc(&hr_cq->refcount);
++	xa_lock(&hr_dev->cq_table.array);
++	hr_cq = xa_load(&hr_dev->cq_table.array,
++			cqn & (hr_dev->caps.num_cqs - 1));
++	if (hr_cq)
++		refcount_inc(&hr_cq->refcount);
++	xa_unlock(&hr_dev->cq_table.array);
++	if (!hr_cq) {
++		dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn);
++		return;
++	}
+ 
+ 	ibcq = &hr_cq->ib_cq;
+ 	if (ibcq->event_handler) {
+diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
+index c3cbd0a494bfd..0b47c6d68804f 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -100,6 +100,9 @@
+ #define CQ_BANKID_SHIFT 2
+ #define CQ_BANKID_MASK GENMASK(1, 0)
+ 
++#define HNS_ROCE_MAX_CQ_COUNT 0xFFFF
++#define HNS_ROCE_MAX_CQ_PERIOD 0xFFFF
++
+ enum {
+ 	SERV_TYPE_RC,
+ 	SERV_TYPE_UC,
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
+index a4b3f19161dc1..658c522be7583 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
+@@ -281,7 +281,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
+ 	return hem;
+ 
+ fail:
+-	hns_roce_free_hem(hr_dev, hem);
++	kfree(hem);
+ 	return NULL;
+ }
+ 
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
+index 6fb51db9682b8..9c415b2541af8 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
+@@ -57,16 +57,16 @@ enum {
+ };
+ 
+ #define check_whether_bt_num_3(type, hop_num) \
+-	(type < HEM_TYPE_MTT && hop_num == 2)
++	((type) < HEM_TYPE_MTT && (hop_num) == 2)
+ 
+ #define check_whether_bt_num_2(type, hop_num) \
+-	((type < HEM_TYPE_MTT && hop_num == 1) || \
+-	(type >= HEM_TYPE_MTT && hop_num == 2))
++	(((type) < HEM_TYPE_MTT && (hop_num) == 1) || \
++	((type) >= HEM_TYPE_MTT && (hop_num) == 2))
+ 
+ #define check_whether_bt_num_1(type, hop_num) \
+-	((type < HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0) || \
+-	(type >= HEM_TYPE_MTT && hop_num == 1) || \
+-	(type >= HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0))
++	(((type) < HEM_TYPE_MTT && (hop_num) == HNS_ROCE_HOP_NUM_0) || \
++	((type) >= HEM_TYPE_MTT && (hop_num) == 1) || \
++	((type) >= HEM_TYPE_MTT && (hop_num) == HNS_ROCE_HOP_NUM_0))
+ 
+ struct hns_roce_hem {
+ 	void *buf;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index ba7ae792d279d..8800464c9a0cd 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -2105,7 +2105,7 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
+ 					 caps->gmv_bt_num *
+ 					 (HNS_HW_PAGE_SIZE / caps->gmv_entry_sz));
+ 
+-		caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE /
++		caps->gmv_entry_num = caps->gmv_bt_num * (HNS_HW_PAGE_SIZE /
+ 							  caps->gmv_entry_sz);
+ 	} else {
+ 		u32 func_num = max_t(u32, 1, hr_dev->func_num);
+@@ -3711,8 +3711,9 @@ static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
+ 		   wc->status == IB_WC_WR_FLUSH_ERR))
+ 		return;
+ 
+-	ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status);
+-	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe,
++	ibdev_err_ratelimited(&hr_dev->ib_dev, "error cqe status 0x%x:\n",
++			      cqe_status);
++	print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 16, 4, cqe,
+ 		       cq->cqe_size, false);
+ 	wc->vendor_err = hr_reg_read(cqe, CQE_SUB_STATUS);
+ 
+@@ -5802,7 +5803,7 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
+ 			dev_info(hr_dev->dev,
+ 				 "cq_period(%u) reached the upper limit, adjusted to 65.\n",
+ 				 cq_period);
+-			cq_period = HNS_ROCE_MAX_CQ_PERIOD;
++			cq_period = HNS_ROCE_MAX_CQ_PERIOD_HIP08;
+ 		}
+ 		cq_period *= HNS_ROCE_CLOCK_ADJUST;
+ 	}
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+index df04bc8ede57b..dfed6b4ddb04d 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -1334,7 +1334,7 @@ struct fmea_ram_ecc {
+ 
+ /* only for RNR timeout issue of HIP08 */
+ #define HNS_ROCE_CLOCK_ADJUST 1000
+-#define HNS_ROCE_MAX_CQ_PERIOD 65
++#define HNS_ROCE_MAX_CQ_PERIOD_HIP08 65
+ #define HNS_ROCE_MAX_EQ_PERIOD 65
+ #define HNS_ROCE_RNR_TIMER_10NS 1
+ #define HNS_ROCE_1US_CFG 999
+diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
+index 1dc60c2b2b7ab..d202258368ed9 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -37,9 +37,11 @@
+ #include <rdma/ib_smi.h>
+ #include <rdma/ib_user_verbs.h>
+ #include <rdma/ib_cache.h>
++#include "hnae3.h"
+ #include "hns_roce_common.h"
+ #include "hns_roce_device.h"
+ #include "hns_roce_hem.h"
++#include "hns_roce_hw_v2.h"
+ 
+ static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u32 port,
+ 			    const u8 *addr)
+@@ -192,6 +194,12 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
+ 			    IB_ATOMIC_HCA : IB_ATOMIC_NONE;
+ 	props->max_pkeys = 1;
+ 	props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay;
++	props->max_ah = INT_MAX;
++	props->cq_caps.max_cq_moderation_period = HNS_ROCE_MAX_CQ_PERIOD;
++	props->cq_caps.max_cq_moderation_count = HNS_ROCE_MAX_CQ_COUNT;
++	if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
++		props->cq_caps.max_cq_moderation_period = HNS_ROCE_MAX_CQ_PERIOD_HIP08;
++
+ 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
+ 		props->max_srq = hr_dev->caps.num_srqs;
+ 		props->max_srq_wr = hr_dev->caps.max_srq_wrs;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
+index 9e05b57a2d67d..80c050d7d0ea6 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
++++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
+@@ -441,18 +441,18 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ 	struct ib_device *ibdev = &hr_dev->ib_dev;
+ 	struct hns_roce_mr *mr = to_hr_mr(ibmr);
+ 	struct hns_roce_mtr *mtr = &mr->pbl_mtr;
+-	int ret = 0;
++	int ret, sg_num = 0;
+ 
+ 	mr->npages = 0;
+ 	mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
+ 				 sizeof(dma_addr_t), GFP_KERNEL);
+ 	if (!mr->page_list)
+-		return ret;
++		return sg_num;
+ 
+-	ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
+-	if (ret < 1) {
++	sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
++	if (sg_num < 1) {
+ 		ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
+-			  mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret);
++			  mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num);
+ 		goto err_page_list;
+ 	}
+ 
+@@ -463,17 +463,16 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ 	ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
+ 	if (ret) {
+ 		ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
+-		ret = 0;
++		sg_num = 0;
+ 	} else {
+ 		mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size);
+-		ret = mr->npages;
+ 	}
+ 
+ err_page_list:
+ 	kvfree(mr->page_list);
+ 	mr->page_list = NULL;
+ 
+-	return ret;
++	return sg_num;
+ }
+ 
+ static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
+diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
+index 4abae94778544..8f48c6723e07e 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
++++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
+@@ -123,7 +123,7 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
+ 		return ret;
+ 	}
+ 
+-	ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
++	ret = xa_err(xa_store_irq(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
+ 	if (ret) {
+ 		ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret);
+ 		goto err_put;
+@@ -136,7 +136,7 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
+ 	return 0;
+ 
+ err_xa:
+-	xa_erase(&srq_table->xa, srq->srqn);
++	xa_erase_irq(&srq_table->xa, srq->srqn);
+ err_put:
+ 	hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
+ 
+@@ -154,7 +154,7 @@ static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
+ 		dev_err(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n",
+ 			ret, srq->srqn);
+ 
+-	xa_erase(&srq_table->xa, srq->srqn);
++	xa_erase_irq(&srq_table->xa, srq->srqn);
+ 
+ 	if (refcount_dec_and_test(&srq->refcount))
+ 		complete(&srq->free);
+diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c
+index 4a71e678d09c1..89fcc09ded8a4 100644
+--- a/drivers/infiniband/hw/mana/cq.c
++++ b/drivers/infiniband/hw/mana/cq.c
+@@ -39,37 +39,13 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ 	}
+ 
+ 	cq->cqe = attr->cqe;
+-	cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE,
+-			       IB_ACCESS_LOCAL_WRITE);
+-	if (IS_ERR(cq->umem)) {
+-		err = PTR_ERR(cq->umem);
+-		ibdev_dbg(ibdev, "Failed to get umem for create cq, err %d\n",
+-			  err);
+-		return err;
+-	}
+-
+-	err = mana_ib_create_zero_offset_dma_region(mdev, cq->umem, &cq->gdma_region);
++	err = mana_ib_create_queue(mdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE, &cq->queue);
+ 	if (err) {
+-		ibdev_dbg(ibdev,
+-			  "Failed to create dma region for create cq, %d\n",
+-			  err);
+-		goto err_release_umem;
++		ibdev_dbg(ibdev, "Failed to create queue for create cq, %d\n", err);
++		return err;
+ 	}
+ 
+-	ibdev_dbg(ibdev,
+-		  "create_dma_region ret %d gdma_region 0x%llx\n",
+-		  err, cq->gdma_region);
+-
+-	/*
+-	 * The CQ ID is not known at this time. The ID is generated at create_qp
+-	 */
+-	cq->id = INVALID_QUEUE_ID;
+-
+ 	return 0;
+-
+-err_release_umem:
+-	ib_umem_release(cq->umem);
+-	return err;
+ }
+ 
+ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+@@ -78,24 +54,16 @@ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+ 	struct ib_device *ibdev = ibcq->device;
+ 	struct mana_ib_dev *mdev;
+ 	struct gdma_context *gc;
+-	int err;
+ 
+ 	mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ 	gc = mdev_to_gc(mdev);
+ 
+-	err = mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region);
+-	if (err) {
+-		ibdev_dbg(ibdev,
+-			  "Failed to destroy dma region, %d\n", err);
+-		return err;
++	if (cq->queue.id != INVALID_QUEUE_ID) {
++		kfree(gc->cq_table[cq->queue.id]);
++		gc->cq_table[cq->queue.id] = NULL;
+ 	}
+ 
+-	if (cq->id != INVALID_QUEUE_ID) {
+-		kfree(gc->cq_table[cq->id]);
+-		gc->cq_table[cq->id] = NULL;
+-	}
+-
+-	ib_umem_release(cq->umem);
++	mana_ib_destroy_queue(mdev, &cq->queue);
+ 
+ 	return 0;
+ }
+@@ -113,8 +81,10 @@ int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
+ 	struct gdma_context *gc = mdev_to_gc(mdev);
+ 	struct gdma_queue *gdma_cq;
+ 
++	if (cq->queue.id >= gc->max_num_cqs)
++		return -EINVAL;
+ 	/* Create CQ table entry */
+-	WARN_ON(gc->cq_table[cq->id]);
++	WARN_ON(gc->cq_table[cq->queue.id]);
+ 	gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
+ 	if (!gdma_cq)
+ 		return -ENOMEM;
+@@ -122,7 +92,7 @@ int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
+ 	gdma_cq->cq.context = cq;
+ 	gdma_cq->type = GDMA_CQ;
+ 	gdma_cq->cq.callback = mana_ib_cq_handler;
+-	gdma_cq->id = cq->id;
+-	gc->cq_table[cq->id] = gdma_cq;
++	gdma_cq->id = cq->queue.id;
++	gc->cq_table[cq->queue.id] = gdma_cq;
+ 	return 0;
+ }
+diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
+index 71e33feee61bb..4524c6b807487 100644
+--- a/drivers/infiniband/hw/mana/main.c
++++ b/drivers/infiniband/hw/mana/main.c
+@@ -237,6 +237,49 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
+ 		ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret);
+ }
+ 
++int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
++			 struct mana_ib_queue *queue)
++{
++	struct ib_umem *umem;
++	int err;
++
++	queue->umem = NULL;
++	queue->id = INVALID_QUEUE_ID;
++	queue->gdma_region = GDMA_INVALID_DMA_REGION;
++
++	umem = ib_umem_get(&mdev->ib_dev, addr, size, IB_ACCESS_LOCAL_WRITE);
++	if (IS_ERR(umem)) {
++		err = PTR_ERR(umem);
++		ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %d\n", err);
++		return err;
++	}
++
++	err = mana_ib_create_zero_offset_dma_region(mdev, umem, &queue->gdma_region);
++	if (err) {
++		ibdev_dbg(&mdev->ib_dev, "Failed to create dma region, %d\n", err);
++		goto free_umem;
++	}
++	queue->umem = umem;
++
++	ibdev_dbg(&mdev->ib_dev,
++		  "create_dma_region ret %d gdma_region 0x%llx\n",
++		  err, queue->gdma_region);
++
++	return 0;
++free_umem:
++	ib_umem_release(umem);
++	return err;
++}
++
++void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue)
++{
++	/* Ignore return code as there is not much we can do about it.
++	 * The error message is printed inside.
++	 */
++	mana_ib_gd_destroy_dma_region(mdev, queue->gdma_region);
++	ib_umem_release(queue->umem);
++}
++
+ static int
+ mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
+ 			    struct gdma_context *gc,
+diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
+index f83390eebb7d7..6acb5c281c368 100644
+--- a/drivers/infiniband/hw/mana/mana_ib.h
++++ b/drivers/infiniband/hw/mana/mana_ib.h
+@@ -45,6 +45,12 @@ struct mana_ib_adapter_caps {
+ 	u32 max_inline_data_size;
+ };
+ 
++struct mana_ib_queue {
++	struct ib_umem *umem;
++	u64 gdma_region;
++	u64 id;
++};
++
+ struct mana_ib_dev {
+ 	struct ib_device ib_dev;
+ 	struct gdma_dev *gdma_dev;
+@@ -82,10 +88,8 @@ struct mana_ib_mr {
+ 
+ struct mana_ib_cq {
+ 	struct ib_cq ibcq;
+-	struct ib_umem *umem;
++	struct mana_ib_queue queue;
+ 	int cqe;
+-	u64 gdma_region;
+-	u64 id;
+ 	u32 comp_vector;
+ };
+ 
+@@ -169,6 +173,10 @@ int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+ int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
+ 				  mana_handle_t gdma_region);
+ 
++int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
++			 struct mana_ib_queue *queue);
++void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue);
++
+ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
+ 				struct ib_wq_init_attr *init_attr,
+ 				struct ib_udata *udata);
+diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
+index 6e7627745c957..d7485ee6a6854 100644
+--- a/drivers/infiniband/hw/mana/qp.c
++++ b/drivers/infiniband/hw/mana/qp.c
+@@ -197,7 +197,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
+ 		wq_spec.gdma_region = wq->gdma_region;
+ 		wq_spec.queue_size = wq->wq_buf_size;
+ 
+-		cq_spec.gdma_region = cq->gdma_region;
++		cq_spec.gdma_region = cq->queue.gdma_region;
+ 		cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE;
+ 		cq_spec.modr_ctx_id = 0;
+ 		eq = &mpc->ac->eqs[cq->comp_vector % gc->max_num_queues];
+@@ -213,16 +213,16 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
+ 
+ 		/* The GDMA regions are now owned by the WQ object */
+ 		wq->gdma_region = GDMA_INVALID_DMA_REGION;
+-		cq->gdma_region = GDMA_INVALID_DMA_REGION;
++		cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
+ 
+ 		wq->id = wq_spec.queue_index;
+-		cq->id = cq_spec.queue_index;
++		cq->queue.id = cq_spec.queue_index;
+ 
+ 		ibdev_dbg(&mdev->ib_dev,
+ 			  "ret %d rx_object 0x%llx wq id %llu cq id %llu\n",
+-			  ret, wq->rx_object, wq->id, cq->id);
++			  ret, wq->rx_object, wq->id, cq->queue.id);
+ 
+-		resp.entries[i].cqid = cq->id;
++		resp.entries[i].cqid = cq->queue.id;
+ 		resp.entries[i].wqid = wq->id;
+ 
+ 		mana_ind_table[i] = wq->rx_object;
+@@ -232,7 +232,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
+ 		if (ret)
+ 			goto fail;
+ 
+-		gdma_cq_allocated[i] = gc->cq_table[cq->id];
++		gdma_cq_allocated[i] = gc->cq_table[cq->queue.id];
+ 	}
+ 	resp.num_entries = i;
+ 
+@@ -264,7 +264,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
+ 		wq = container_of(ibwq, struct mana_ib_wq, ibwq);
+ 		cq = container_of(ibcq, struct mana_ib_cq, ibcq);
+ 
+-		gc->cq_table[cq->id] = NULL;
++		gc->cq_table[cq->queue.id] = NULL;
+ 		kfree(gdma_cq_allocated[i]);
+ 
+ 		mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
+@@ -374,7 +374,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
+ 	wq_spec.gdma_region = qp->sq_gdma_region;
+ 	wq_spec.queue_size = ucmd.sq_buf_size;
+ 
+-	cq_spec.gdma_region = send_cq->gdma_region;
++	cq_spec.gdma_region = send_cq->queue.gdma_region;
+ 	cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE;
+ 	cq_spec.modr_ctx_id = 0;
+ 	eq_vec = send_cq->comp_vector % gc->max_num_queues;
+@@ -392,10 +392,10 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
+ 
+ 	/* The GDMA regions are now owned by the WQ object */
+ 	qp->sq_gdma_region = GDMA_INVALID_DMA_REGION;
+-	send_cq->gdma_region = GDMA_INVALID_DMA_REGION;
++	send_cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
+ 
+ 	qp->sq_id = wq_spec.queue_index;
+-	send_cq->id = cq_spec.queue_index;
++	send_cq->queue.id = cq_spec.queue_index;
+ 
+ 	/* Create CQ table entry */
+ 	err = mana_ib_install_cq_cb(mdev, send_cq);
+@@ -404,10 +404,10 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
+ 
+ 	ibdev_dbg(&mdev->ib_dev,
+ 		  "ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err,
+-		  qp->tx_object, qp->sq_id, send_cq->id);
++		  qp->tx_object, qp->sq_id, send_cq->queue.id);
+ 
+ 	resp.sqid = qp->sq_id;
+-	resp.cqid = send_cq->id;
++	resp.cqid = send_cq->queue.id;
+ 	resp.tx_vp_offset = pd->tx_vp_offset;
+ 
+ 	err = ib_copy_to_udata(udata, &resp, sizeof(resp));
+@@ -422,7 +422,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
+ 
+ err_release_gdma_cq:
+ 	kfree(gdma_cq);
+-	gc->cq_table[send_cq->id] = NULL;
++	gc->cq_table[send_cq->queue.id] = NULL;
+ 
+ err_destroy_wq_obj:
+ 	mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
+diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
+index 96ffbbaf0a73d..5a22be14d958f 100644
+--- a/drivers/infiniband/hw/mlx5/mem.c
++++ b/drivers/infiniband/hw/mlx5/mem.c
+@@ -30,6 +30,7 @@
+  * SOFTWARE.
+  */
+ 
++#include <linux/io.h>
+ #include <rdma/ib_umem_odp.h>
+ #include "mlx5_ib.h"
+ #include <linux/jiffies.h>
+@@ -108,7 +109,6 @@ static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id,
+ 	__be32 mmio_wqe[16] = {};
+ 	unsigned long flags;
+ 	unsigned int idx;
+-	int i;
+ 
+ 	if (unlikely(dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR))
+ 		return -EIO;
+@@ -148,10 +148,8 @@ static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id,
+ 	 * we hit doorbell
+ 	 */
+ 	wmb();
+-	for (i = 0; i < 8; i++)
+-		mlx5_write64(&mmio_wqe[i * 2],
+-			     bf->bfreg->map + bf->offset + i * 8);
+-	io_stop_wc();
++	__iowrite64_copy(bf->bfreg->map + bf->offset, mmio_wqe,
++			 sizeof(mmio_wqe) / 8);
+ 
+ 	bf->offset ^= bf->buf_size;
+ 
+diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+index a8de35c07c9ef..f255a12e26a02 100644
+--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
++++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+@@ -643,9 +643,10 @@ struct mlx5_ib_mkey {
+ 	unsigned int ndescs;
+ 	struct wait_queue_head wait;
+ 	refcount_t usecount;
+-	/* User Mkey must hold either a rb_key or a cache_ent. */
++	/* Cacheable user Mkey must hold either a rb_key or a cache_ent. */
+ 	struct mlx5r_cache_rb_key rb_key;
+ 	struct mlx5_cache_ent *cache_ent;
++	u8 cacheable : 1;
+ };
+ 
+ #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
+diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
+index a8ee2ca1f4a17..ecc111ed5d86e 100644
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -1158,6 +1158,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
+ 		if (IS_ERR(mr))
+ 			return mr;
+ 		mr->mmkey.rb_key = rb_key;
++		mr->mmkey.cacheable = true;
+ 		return mr;
+ 	}
+ 
+@@ -1168,6 +1169,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
+ 	mr->ibmr.pd = pd;
+ 	mr->umem = umem;
+ 	mr->page_shift = order_base_2(page_size);
++	mr->mmkey.cacheable = true;
+ 	set_mr_fields(dev, mr, umem->length, access_flags, iova);
+ 
+ 	return mr;
+@@ -1570,7 +1572,8 @@ static bool can_use_umr_rereg_access(struct mlx5_ib_dev *dev,
+ 	unsigned int diffs = current_access_flags ^ target_access_flags;
+ 
+ 	if (diffs & ~(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
+-		      IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING))
++		      IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING |
++		      IB_ACCESS_REMOTE_ATOMIC))
+ 		return false;
+ 	return mlx5r_umr_can_reconfig(dev, current_access_flags,
+ 				      target_access_flags);
+@@ -1835,6 +1838,23 @@ static int cache_ent_find_and_store(struct mlx5_ib_dev *dev,
+ 	return ret;
+ }
+ 
++static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
++{
++	struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
++	struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
++
++	if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr))
++		return 0;
++
++	if (ent) {
++		spin_lock_irq(&ent->mkeys_queue.lock);
++		ent->in_use--;
++		mr->mmkey.cache_ent = NULL;
++		spin_unlock_irq(&ent->mkeys_queue.lock);
++	}
++	return destroy_mkey(dev, mr);
++}
++
+ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
+ {
+ 	struct mlx5_ib_mr *mr = to_mmr(ibmr);
+@@ -1880,16 +1900,9 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
+ 	}
+ 
+ 	/* Stop DMA */
+-	if (mr->umem && mlx5r_umr_can_load_pas(dev, mr->umem->length))
+-		if (mlx5r_umr_revoke_mr(mr) ||
+-		    cache_ent_find_and_store(dev, mr))
+-			mr->mmkey.cache_ent = NULL;
+-
+-	if (!mr->mmkey.cache_ent) {
+-		rc = destroy_mkey(to_mdev(mr->ibmr.device), mr);
+-		if (rc)
+-			return rc;
+-	}
++	rc = mlx5_revoke_mr(mr);
++	if (rc)
++		return rc;
+ 
+ 	if (mr->umem) {
+ 		bool is_odp = is_odp_mr(mr);
+diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
+index b78b8c0856abd..c997b7cbf2a9e 100644
+--- a/drivers/infiniband/sw/rxe/rxe_comp.c
++++ b/drivers/infiniband/sw/rxe/rxe_comp.c
+@@ -131,12 +131,12 @@ void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
+ {
+ 	int must_sched;
+ 
+-	skb_queue_tail(&qp->resp_pkts, skb);
+-
+-	must_sched = skb_queue_len(&qp->resp_pkts) > 1;
++	must_sched = skb_queue_len(&qp->resp_pkts) > 0;
+ 	if (must_sched != 0)
+ 		rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_COMPLETER_SCHED);
+ 
++	skb_queue_tail(&qp->resp_pkts, skb);
++
+ 	if (must_sched)
+ 		rxe_sched_task(&qp->comp.task);
+ 	else
+diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
+index cd59666158b18..e5827064ab1e2 100644
+--- a/drivers/infiniband/sw/rxe/rxe_net.c
++++ b/drivers/infiniband/sw/rxe/rxe_net.c
+@@ -366,18 +366,10 @@ static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt)
+ 	rxe_get(pkt->qp);
+ 	atomic_inc(&pkt->qp->skb_out);
+ 
+-	if (skb->protocol == htons(ETH_P_IP)) {
++	if (skb->protocol == htons(ETH_P_IP))
+ 		err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
+-	} else if (skb->protocol == htons(ETH_P_IPV6)) {
++	else
+ 		err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
+-	} else {
+-		rxe_dbg_qp(pkt->qp, "Unknown layer 3 protocol: %d\n",
+-				skb->protocol);
+-		atomic_dec(&pkt->qp->skb_out);
+-		rxe_put(pkt->qp);
+-		kfree_skb(skb);
+-		return -EINVAL;
+-	}
+ 
+ 	if (unlikely(net_xmit_eval(err))) {
+ 		rxe_dbg_qp(pkt->qp, "error sending packet: %d\n", err);
+diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
+index 614581989b381..a49784e5156c5 100644
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
+@@ -888,6 +888,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp,
+ {
+ 	int err = 0;
+ 	unsigned long flags;
++	int good = 0;
+ 
+ 	spin_lock_irqsave(&qp->sq.sq_lock, flags);
+ 	while (ibwr) {
+@@ -895,12 +896,15 @@ static int rxe_post_send_kernel(struct rxe_qp *qp,
+ 		if (err) {
+ 			*bad_wr = ibwr;
+ 			break;
++		} else {
++			good++;
+ 		}
+ 		ibwr = ibwr->next;
+ 	}
+ 	spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
+ 
+-	if (!err)
++	/* kickoff processing of any posted wqes */
++	if (good)
+ 		rxe_sched_task(&qp->req.task);
+ 
+ 	spin_lock_irqsave(&qp->state_lock, flags);
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+index 4bd161e86f8dd..562df2b3ef187 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+@@ -184,8 +184,12 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
+ 
+ 	ppriv = ipoib_priv(pdev);
+ 
+-	snprintf(intf_name, sizeof(intf_name), "%s.%04x",
+-		 ppriv->dev->name, pkey);
++	/* If you increase IFNAMSIZ, update snprintf below
++	 * to allow longer names.
++	 */
++	BUILD_BUG_ON(IFNAMSIZ != 16);
++	snprintf(intf_name, sizeof(intf_name), "%.10s.%04x", ppriv->dev->name,
++		 pkey);
+ 
+ 	ndev = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
+ 	if (IS_ERR(ndev)) {
+diff --git a/drivers/input/input.c b/drivers/input/input.c
+index 7114854375678..fd4997ba263c7 100644
+--- a/drivers/input/input.c
++++ b/drivers/input/input.c
+@@ -1378,19 +1378,19 @@ static int input_print_modalias_bits(char *buf, int size,
+ 				     char name, const unsigned long *bm,
+ 				     unsigned int min_bit, unsigned int max_bit)
+ {
+-	int len = 0, i;
++	int bit = min_bit;
++	int len = 0;
+ 
+ 	len += snprintf(buf, max(size, 0), "%c", name);
+-	for (i = min_bit; i < max_bit; i++)
+-		if (bm[BIT_WORD(i)] & BIT_MASK(i))
+-			len += snprintf(buf + len, max(size - len, 0), "%X,", i);
++	for_each_set_bit_from(bit, bm, max_bit)
++		len += snprintf(buf + len, max(size - len, 0), "%X,", bit);
+ 	return len;
+ }
+ 
+-static int input_print_modalias(char *buf, int size, const struct input_dev *id,
+-				int add_cr)
++static int input_print_modalias_parts(char *buf, int size, int full_len,
++				      const struct input_dev *id)
+ {
+-	int len;
++	int len, klen, remainder, space;
+ 
+ 	len = snprintf(buf, max(size, 0),
+ 		       "input:b%04Xv%04Xp%04Xe%04X-",
+@@ -1399,8 +1399,48 @@ static int input_print_modalias(char *buf, int size, const struct input_dev *id,
+ 
+ 	len += input_print_modalias_bits(buf + len, size - len,
+ 				'e', id->evbit, 0, EV_MAX);
+-	len += input_print_modalias_bits(buf + len, size - len,
++
++	/*
++	 * Calculate the remaining space in the buffer making sure we
++	 * have place for the terminating 0.
++	 */
++	space = max(size - (len + 1), 0);
++
++	klen = input_print_modalias_bits(buf + len, size - len,
+ 				'k', id->keybit, KEY_MIN_INTERESTING, KEY_MAX);
++	len += klen;
++
++	/*
++	 * If we have more data than we can fit in the buffer, check
++	 * if we can trim key data to fit in the rest. We will indicate
++	 * that key data is incomplete by adding "+" sign at the end, like
++	 * this: * "k1,2,3,45,+,".
++	 *
++	 * Note that we shortest key info (if present) is "k+," so we
++	 * can only try to trim if key data is longer than that.
++	 */
++	if (full_len && size < full_len + 1 && klen > 3) {
++		remainder = full_len - len;
++		/*
++		 * We can only trim if we have space for the remainder
++		 * and also for at least "k+," which is 3 more characters.
++		 */
++		if (remainder <= space - 3) {
++			/*
++			 * We are guaranteed to have 'k' in the buffer, so
++			 * we need at least 3 additional bytes for storing
++			 * "+," in addition to the remainder.
++			 */
++			for (int i = size - 1 - remainder - 3; i >= 0; i--) {
++				if (buf[i] == 'k' || buf[i] == ',') {
++					strcpy(buf + i + 1, "+,");
++					len = i + 3; /* Not counting '\0' */
++					break;
++				}
++			}
++		}
++	}
++
+ 	len += input_print_modalias_bits(buf + len, size - len,
+ 				'r', id->relbit, 0, REL_MAX);
+ 	len += input_print_modalias_bits(buf + len, size - len,
+@@ -1416,12 +1456,25 @@ static int input_print_modalias(char *buf, int size, const struct input_dev *id,
+ 	len += input_print_modalias_bits(buf + len, size - len,
+ 				'w', id->swbit, 0, SW_MAX);
+ 
+-	if (add_cr)
+-		len += snprintf(buf + len, max(size - len, 0), "\n");
+-
+ 	return len;
+ }
+ 
++static int input_print_modalias(char *buf, int size, const struct input_dev *id)
++{
++	int full_len;
++
++	/*
++	 * Printing is done in 2 passes: first one figures out total length
++	 * needed for the modalias string, second one will try to trim key
++	 * data in case when buffer is too small for the entire modalias.
++	 * If the buffer is too small regardless, it will fill as much as it
++	 * can (without trimming key data) into the buffer and leave it to
++	 * the caller to figure out what to do with the result.
++	 */
++	full_len = input_print_modalias_parts(NULL, 0, 0, id);
++	return input_print_modalias_parts(buf, size, full_len, id);
++}
++
+ static ssize_t input_dev_show_modalias(struct device *dev,
+ 				       struct device_attribute *attr,
+ 				       char *buf)
+@@ -1429,7 +1482,9 @@ static ssize_t input_dev_show_modalias(struct device *dev,
+ 	struct input_dev *id = to_input_dev(dev);
+ 	ssize_t len;
+ 
+-	len = input_print_modalias(buf, PAGE_SIZE, id, 1);
++	len = input_print_modalias(buf, PAGE_SIZE, id);
++	if (len < PAGE_SIZE - 2)
++		len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ 
+ 	return min_t(int, len, PAGE_SIZE);
+ }
+@@ -1641,6 +1696,23 @@ static int input_add_uevent_bm_var(struct kobj_uevent_env *env,
+ 	return 0;
+ }
+ 
++/*
++ * This is a pretty gross hack. When building uevent data the driver core
++ * may try adding more environment variables to kobj_uevent_env without
++ * telling us, so we have no idea how much of the buffer we can use to
++ * avoid overflows/-ENOMEM elsewhere. To work around this let's artificially
++ * reduce amount of memory we will use for the modalias environment variable.
++ *
++ * The potential additions are:
++ *
++ * SEQNUM=18446744073709551615 - (%llu - 28 bytes)
++ * HOME=/ (6 bytes)
++ * PATH=/sbin:/bin:/usr/sbin:/usr/bin (34 bytes)
++ *
++ * 68 bytes total. Allow extra buffer - 96 bytes
++ */
++#define UEVENT_ENV_EXTRA_LEN	96
++
+ static int input_add_uevent_modalias_var(struct kobj_uevent_env *env,
+ 					 const struct input_dev *dev)
+ {
+@@ -1650,9 +1722,11 @@ static int input_add_uevent_modalias_var(struct kobj_uevent_env *env,
+ 		return -ENOMEM;
+ 
+ 	len = input_print_modalias(&env->buf[env->buflen - 1],
+-				   sizeof(env->buf) - env->buflen,
+-				   dev, 0);
+-	if (len >= (sizeof(env->buf) - env->buflen))
++				   (int)sizeof(env->buf) - env->buflen -
++					UEVENT_ENV_EXTRA_LEN,
++				   dev);
++	if (len >= ((int)sizeof(env->buf) - env->buflen -
++					UEVENT_ENV_EXTRA_LEN))
+ 		return -ENOMEM;
+ 
+ 	env->buflen += len;
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index ac6754a85f350..f440ca440d924 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -2097,6 +2097,8 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
+ 			amd_iommu_max_glx_val = glxval;
+ 		else
+ 			amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
++
++		iommu_enable_gt(iommu);
+ 	}
+ 
+ 	if (check_feature(FEATURE_PPR) && alloc_ppr_log(iommu))
+@@ -2773,7 +2775,6 @@ static void early_enable_iommu(struct amd_iommu *iommu)
+ 	iommu_enable_command_buffer(iommu);
+ 	iommu_enable_event_buffer(iommu);
+ 	iommu_set_exclusion_range(iommu);
+-	iommu_enable_gt(iommu);
+ 	iommu_enable_ga(iommu);
+ 	iommu_enable_xt(iommu);
+ 	iommu_enable_irtcachedis(iommu);
+@@ -2830,7 +2831,6 @@ static void early_enable_iommus(void)
+ 			iommu_disable_irtcachedis(iommu);
+ 			iommu_enable_command_buffer(iommu);
+ 			iommu_enable_event_buffer(iommu);
+-			iommu_enable_gt(iommu);
+ 			iommu_enable_ga(iommu);
+ 			iommu_enable_xt(iommu);
+ 			iommu_enable_irtcachedis(iommu);
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index a7ecd90303dc4..e4a03588a8a0f 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -221,12 +221,11 @@ int intel_iommu_sm = IS_ENABLED(CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON);
+ int intel_iommu_enabled = 0;
+ EXPORT_SYMBOL_GPL(intel_iommu_enabled);
+ 
+-static int dmar_map_gfx = 1;
+ static int intel_iommu_superpage = 1;
+ static int iommu_identity_mapping;
+ static int iommu_skip_te_disable;
++static int disable_igfx_iommu;
+ 
+-#define IDENTMAP_GFX		2
+ #define IDENTMAP_AZALIA		4
+ 
+ const struct iommu_ops intel_iommu_ops;
+@@ -265,7 +264,7 @@ static int __init intel_iommu_setup(char *str)
+ 			no_platform_optin = 1;
+ 			pr_info("IOMMU disabled\n");
+ 		} else if (!strncmp(str, "igfx_off", 8)) {
+-			dmar_map_gfx = 0;
++			disable_igfx_iommu = 1;
+ 			pr_info("Disable GFX device mapping\n");
+ 		} else if (!strncmp(str, "forcedac", 8)) {
+ 			pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n");
+@@ -2402,9 +2401,6 @@ static int device_def_domain_type(struct device *dev)
+ 
+ 		if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
+ 			return IOMMU_DOMAIN_IDENTITY;
+-
+-		if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
+-			return IOMMU_DOMAIN_IDENTITY;
+ 	}
+ 
+ 	return 0;
+@@ -2705,9 +2701,6 @@ static int __init init_dmars(void)
+ 		iommu_set_root_entry(iommu);
+ 	}
+ 
+-	if (!dmar_map_gfx)
+-		iommu_identity_mapping |= IDENTMAP_GFX;
+-
+ 	check_tylersburg_isoch();
+ 
+ 	ret = si_domain_init(hw_pass_through);
+@@ -2798,7 +2791,7 @@ static void __init init_no_remapping_devices(void)
+ 		/* This IOMMU has *only* gfx devices. Either bypass it or
+ 		   set the gfx_mapped flag, as appropriate */
+ 		drhd->gfx_dedicated = 1;
+-		if (!dmar_map_gfx)
++		if (disable_igfx_iommu)
+ 			drhd->ignored = 1;
+ 	}
+ }
+@@ -4875,7 +4868,7 @@ static void quirk_iommu_igfx(struct pci_dev *dev)
+ 		return;
+ 
+ 	pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
+-	dmar_map_gfx = 0;
++	disable_igfx_iommu = 1;
+ }
+ 
+ /* G4x/GM45 integrated gfx dmar support is totally busted. */
+@@ -4956,8 +4949,8 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
+ 
+ 	if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
+ 		pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
+-		dmar_map_gfx = 0;
+-	} else if (dmar_map_gfx) {
++		disable_igfx_iommu = 1;
++	} else if (!disable_igfx_iommu) {
+ 		/* we have to ensure the gfx device is idle before we flush */
+ 		pci_info(dev, "Disabling batched IOTLB flush on Ironlake\n");
+ 		iommu_set_dma_strict();
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index a95a483def2d2..659a77f7bb833 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -3317,15 +3317,26 @@ EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed);
+ static int __iommu_set_group_pasid(struct iommu_domain *domain,
+ 				   struct iommu_group *group, ioasid_t pasid)
+ {
+-	struct group_device *device;
+-	int ret = 0;
++	struct group_device *device, *last_gdev;
++	int ret;
+ 
+ 	for_each_group_device(group, device) {
+ 		ret = domain->ops->set_dev_pasid(domain, device->dev, pasid);
+ 		if (ret)
+-			break;
++			goto err_revert;
+ 	}
+ 
++	return 0;
++
++err_revert:
++	last_gdev = device;
++	for_each_group_device(group, device) {
++		const struct iommu_ops *ops = dev_iommu_ops(device->dev);
++
++		if (device == last_gdev)
++			break;
++		ops->remove_dev_pasid(device->dev, pasid);
++	}
+ 	return ret;
+ }
+ 
+@@ -3383,10 +3394,8 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
+ 	}
+ 
+ 	ret = __iommu_set_group_pasid(domain, group, pasid);
+-	if (ret) {
+-		__iommu_remove_group_pasid(group, pasid);
++	if (ret)
+ 		xa_erase(&group->pasid_array, pasid);
+-	}
+ out_unlock:
+ 	mutex_unlock(&group->mutex);
+ 	return ret;
+diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c
+index 9c8b1349ee17b..a1430ab60a8a3 100644
+--- a/drivers/irqchip/irq-alpine-msi.c
++++ b/drivers/irqchip/irq-alpine-msi.c
+@@ -165,7 +165,7 @@ static int alpine_msix_middle_domain_alloc(struct irq_domain *domain,
+ 	return 0;
+ 
+ err_sgi:
+-	irq_domain_free_irqs_parent(domain, virq, i - 1);
++	irq_domain_free_irqs_parent(domain, virq, i);
+ 	alpine_msix_free_sgi(priv, sgi, nr_irqs);
+ 	return err;
+ }
+diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
+index 6e1e1f011bb29..dd4d699170f4e 100644
+--- a/drivers/irqchip/irq-loongson-pch-msi.c
++++ b/drivers/irqchip/irq-loongson-pch-msi.c
+@@ -136,7 +136,7 @@ static int pch_msi_middle_domain_alloc(struct irq_domain *domain,
+ 
+ err_hwirq:
+ 	pch_msi_free_hwirq(priv, hwirq, nr_irqs);
+-	irq_domain_free_irqs_parent(domain, virq, i - 1);
++	irq_domain_free_irqs_parent(domain, virq, i);
+ 
+ 	return err;
+ }
+diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c
+index db9270da5b8e9..b6ddf1d47cb4e 100644
+--- a/drivers/macintosh/via-macii.c
++++ b/drivers/macintosh/via-macii.c
+@@ -140,24 +140,19 @@ static int macii_probe(void)
+ /* Initialize the driver */
+ static int macii_init(void)
+ {
+-	unsigned long flags;
+ 	int err;
+ 
+-	local_irq_save(flags);
+-
+ 	err = macii_init_via();
+ 	if (err)
+-		goto out;
++		return err;
+ 
+ 	err = request_irq(IRQ_MAC_ADB, macii_interrupt, 0, "ADB",
+ 			  macii_interrupt);
+ 	if (err)
+-		goto out;
++		return err;
+ 
+ 	macii_state = idle;
+-out:
+-	local_irq_restore(flags);
+-	return err;
++	return 0;
+ }
+ 
+ /* initialize the hardware */
+diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
+index 5eabdb06c6498..2ac43d1f1b92c 100644
+--- a/drivers/md/dm-delay.c
++++ b/drivers/md/dm-delay.c
+@@ -154,8 +154,10 @@ static void delay_dtr(struct dm_target *ti)
+ {
+ 	struct delay_c *dc = ti->private;
+ 
+-	if (dc->kdelayd_wq)
++	if (dc->kdelayd_wq) {
++		timer_shutdown_sync(&dc->delay_timer);
+ 		destroy_workqueue(dc->kdelayd_wq);
++	}
+ 
+ 	if (dc->read.dev)
+ 		dm_put_device(ti, dc->read.dev);
+@@ -240,19 +242,18 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ 		ret = delay_class_ctr(ti, &dc->flush, argv);
+ 		if (ret)
+ 			goto bad;
+-		max_delay = max(max_delay, dc->write.delay);
+-		max_delay = max(max_delay, dc->flush.delay);
+ 		goto out;
+ 	}
+ 
+ 	ret = delay_class_ctr(ti, &dc->write, argv + 3);
+ 	if (ret)
+ 		goto bad;
++	max_delay = max(max_delay, dc->write.delay);
++
+ 	if (argc == 6) {
+ 		ret = delay_class_ctr(ti, &dc->flush, argv + 3);
+ 		if (ret)
+ 			goto bad;
+-		max_delay = max(max_delay, dc->flush.delay);
+ 		goto out;
+ 	}
+ 
+@@ -267,8 +268,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ 		 * In case of small requested delays, use kthread instead of
+ 		 * timers and workqueue to achieve better latency.
+ 		 */
+-		dc->worker = kthread_create(&flush_worker_fn, dc,
+-					    "dm-delay-flush-worker");
++		dc->worker = kthread_run(&flush_worker_fn, dc, "dm-delay-flush-worker");
+ 		if (IS_ERR(dc->worker)) {
+ 			ret = PTR_ERR(dc->worker);
+ 			dc->worker = NULL;
+@@ -335,7 +335,7 @@ static void delay_presuspend(struct dm_target *ti)
+ 	mutex_unlock(&delayed_bios_lock);
+ 
+ 	if (!delay_is_fast(dc))
+-		del_timer_sync(&dc->delay_timer);
++		timer_delete(&dc->delay_timer);
+ 	flush_delayed_bios(dc, true);
+ }
+ 
+diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
+index 059afc24c08be..0a2d37eb38ef9 100644
+--- a/drivers/md/md-bitmap.c
++++ b/drivers/md/md-bitmap.c
+@@ -1424,7 +1424,7 @@ __acquires(bitmap->lock)
+ 	sector_t chunk = offset >> bitmap->chunkshift;
+ 	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
+ 	unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
+-	sector_t csize;
++	sector_t csize = ((sector_t)1) << bitmap->chunkshift;
+ 	int err;
+ 
+ 	if (page >= bitmap->pages) {
+@@ -1433,6 +1433,7 @@ __acquires(bitmap->lock)
+ 		 * End-of-device while looking for a whole page or
+ 		 * user set a huge number to sysfs bitmap_set_bits.
+ 		 */
++		*blocks = csize - (offset & (csize - 1));
+ 		return NULL;
+ 	}
+ 	err = md_bitmap_checkpage(bitmap, page, create, 0);
+@@ -1441,8 +1442,7 @@ __acquires(bitmap->lock)
+ 	    bitmap->bp[page].map == NULL)
+ 		csize = ((sector_t)1) << (bitmap->chunkshift +
+ 					  PAGE_COUNTER_SHIFT);
+-	else
+-		csize = ((sector_t)1) << bitmap->chunkshift;
++
+ 	*blocks = csize - (offset & (csize - 1));
+ 
+ 	if (err < 0)
+diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c
+index f548b1bb75fb9..e932d25ca7b3a 100644
+--- a/drivers/media/i2c/et8ek8/et8ek8_driver.c
++++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c
+@@ -1475,7 +1475,7 @@ static int et8ek8_probe(struct i2c_client *client)
+ 	return ret;
+ }
+ 
+-static void __exit et8ek8_remove(struct i2c_client *client)
++static void et8ek8_remove(struct i2c_client *client)
+ {
+ 	struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ 	struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
+@@ -1517,7 +1517,7 @@ static struct i2c_driver et8ek8_i2c_driver = {
+ 		.of_match_table	= et8ek8_of_table,
+ 	},
+ 	.probe		= et8ek8_probe,
+-	.remove		= __exit_p(et8ek8_remove),
++	.remove		= et8ek8_remove,
+ 	.id_table	= et8ek8_id_table,
+ };
+ 
+diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+index c42adc5a408db..00090e7f5f9da 100644
+--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
++++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+@@ -1752,11 +1752,6 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
+ 
+ 	v4l2_async_nf_init(&cio2->notifier, &cio2->v4l2_dev);
+ 
+-	/* Register notifier for subdevices we care */
+-	r = cio2_parse_firmware(cio2);
+-	if (r)
+-		goto fail_clean_notifier;
+-
+ 	r = devm_request_irq(dev, pci_dev->irq, cio2_irq, IRQF_SHARED,
+ 			     CIO2_NAME, cio2);
+ 	if (r) {
+@@ -1764,6 +1759,11 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
+ 		goto fail_clean_notifier;
+ 	}
+ 
++	/* Register notifier for subdevices we care */
++	r = cio2_parse_firmware(cio2);
++	if (r)
++		goto fail_clean_notifier;
++
+ 	pm_runtime_put_noidle(dev);
+ 	pm_runtime_allow(dev);
+ 
+diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c
+index 7481f553f9595..24ec576dc3bff 100644
+--- a/drivers/media/pci/ngene/ngene-core.c
++++ b/drivers/media/pci/ngene/ngene-core.c
+@@ -1488,7 +1488,9 @@ static int init_channel(struct ngene_channel *chan)
+ 	}
+ 
+ 	if (dev->ci.en && (io & NGENE_IO_TSOUT)) {
+-		dvb_ca_en50221_init(adapter, dev->ci.en, 0, 1);
++		ret = dvb_ca_en50221_init(adapter, dev->ci.en, 0, 1);
++		if (ret != 0)
++			goto err;
+ 		set_transfer(chan, 1);
+ 		chan->dev->channel[2].DataFormatFlags = DF_SWAP32;
+ 		set_transfer(&chan->dev->channel[2], 1);
+diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
+index 2d7b0508cc9af..6f7d27a48eff0 100644
+--- a/drivers/media/platform/cadence/cdns-csi2rx.c
++++ b/drivers/media/platform/cadence/cdns-csi2rx.c
+@@ -239,10 +239,6 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
+ 
+ 	writel(reg, csi2rx->base + CSI2RX_STATIC_CFG_REG);
+ 
+-	ret = v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, true);
+-	if (ret)
+-		goto err_disable_pclk;
+-
+ 	/* Enable DPHY clk and data lanes. */
+ 	if (csi2rx->dphy) {
+ 		reg = CSI2RX_DPHY_CL_EN | CSI2RX_DPHY_CL_RST;
+@@ -252,6 +248,13 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
+ 		}
+ 
+ 		writel(reg, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG);
++
++		ret = csi2rx_configure_ext_dphy(csi2rx);
++		if (ret) {
++			dev_err(csi2rx->dev,
++				"Failed to configure external DPHY: %d\n", ret);
++			goto err_disable_pclk;
++		}
+ 	}
+ 
+ 	/*
+@@ -291,14 +294,9 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
+ 
+ 	reset_control_deassert(csi2rx->sys_rst);
+ 
+-	if (csi2rx->dphy) {
+-		ret = csi2rx_configure_ext_dphy(csi2rx);
+-		if (ret) {
+-			dev_err(csi2rx->dev,
+-				"Failed to configure external DPHY: %d\n", ret);
+-			goto err_disable_sysclk;
+-		}
+-	}
++	ret = v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, true);
++	if (ret)
++		goto err_disable_sysclk;
+ 
+ 	clk_disable_unprepare(csi2rx->p_clk);
+ 
+@@ -312,6 +310,10 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
+ 		clk_disable_unprepare(csi2rx->pixel_clk[i - 1]);
+ 	}
+ 
++	if (csi2rx->dphy) {
++		writel(0, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG);
++		phy_power_off(csi2rx->dphy);
++	}
+ err_disable_pclk:
+ 	clk_disable_unprepare(csi2rx->p_clk);
+ 
+diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-vin.h b/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
+index 792336dada447..997a66318a293 100644
+--- a/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
++++ b/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
+@@ -59,7 +59,7 @@ enum rvin_isp_id {
+ 
+ #define RVIN_REMOTES_MAX \
+ 	(((unsigned int)RVIN_CSI_MAX) > ((unsigned int)RVIN_ISP_MAX) ? \
+-	 RVIN_CSI_MAX : RVIN_ISP_MAX)
++	 (unsigned int)RVIN_CSI_MAX : (unsigned int)RVIN_ISP_MAX)
+ 
+ /**
+  * enum rvin_dma_state - DMA states
+diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
+index f1c5c0a6a335c..e3e6aa87fe081 100644
+--- a/drivers/media/radio/radio-shark2.c
++++ b/drivers/media/radio/radio-shark2.c
+@@ -62,7 +62,7 @@ struct shark_device {
+ #ifdef SHARK_USE_LEDS
+ 	struct work_struct led_work;
+ 	struct led_classdev leds[NO_LEDS];
+-	char led_names[NO_LEDS][32];
++	char led_names[NO_LEDS][64];
+ 	atomic_t brightness[NO_LEDS];
+ 	unsigned long brightness_new;
+ #endif
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index bbd90123a4e76..91a41aa3ced24 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -14,6 +14,7 @@
+ #include <linux/module.h>
+ #include <linux/slab.h>
+ #include <linux/usb.h>
++#include <linux/usb/quirks.h>
+ #include <linux/usb/uvc.h>
+ #include <linux/videodev2.h>
+ #include <linux/vmalloc.h>
+@@ -2232,6 +2233,9 @@ static int uvc_probe(struct usb_interface *intf,
+ 		goto error;
+ 	}
+ 
++	if (dev->quirks & UVC_QUIRK_NO_RESET_RESUME)
++		udev->quirks &= ~USB_QUIRK_RESET_RESUME;
++
+ 	uvc_dbg(dev, PROBE, "UVC device initialized\n");
+ 	usb_enable_autosuspend(udev);
+ 	return 0;
+@@ -2574,6 +2578,33 @@ static const struct usb_device_id uvc_ids[] = {
+ 	  .bInterfaceSubClass	= 1,
+ 	  .bInterfaceProtocol	= 0,
+ 	  .driver_info		= UVC_INFO_QUIRK(UVC_QUIRK_RESTORE_CTRLS_ON_INIT) },
++	/* Logitech Rally Bar Huddle */
++	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
++				| USB_DEVICE_ID_MATCH_INT_INFO,
++	  .idVendor		= 0x046d,
++	  .idProduct		= 0x087c,
++	  .bInterfaceClass	= USB_CLASS_VIDEO,
++	  .bInterfaceSubClass	= 1,
++	  .bInterfaceProtocol	= 0,
++	  .driver_info		= UVC_INFO_QUIRK(UVC_QUIRK_NO_RESET_RESUME) },
++	/* Logitech Rally Bar */
++	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
++				| USB_DEVICE_ID_MATCH_INT_INFO,
++	  .idVendor		= 0x046d,
++	  .idProduct		= 0x089b,
++	  .bInterfaceClass	= USB_CLASS_VIDEO,
++	  .bInterfaceSubClass	= 1,
++	  .bInterfaceProtocol	= 0,
++	  .driver_info		= UVC_INFO_QUIRK(UVC_QUIRK_NO_RESET_RESUME) },
++	/* Logitech Rally Bar Mini */
++	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
++				| USB_DEVICE_ID_MATCH_INT_INFO,
++	  .idVendor		= 0x046d,
++	  .idProduct		= 0x08d3,
++	  .bInterfaceClass	= USB_CLASS_VIDEO,
++	  .bInterfaceSubClass	= 1,
++	  .bInterfaceProtocol	= 0,
++	  .driver_info		= UVC_INFO_QUIRK(UVC_QUIRK_NO_RESET_RESUME) },
+ 	/* Chicony CNF7129 (Asus EEE 100HE) */
+ 	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
+ 				| USB_DEVICE_ID_MATCH_INT_INFO,
+diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
+index 6fb0a78b1b009..88218693f6f0b 100644
+--- a/drivers/media/usb/uvc/uvcvideo.h
++++ b/drivers/media/usb/uvc/uvcvideo.h
+@@ -73,6 +73,7 @@
+ #define UVC_QUIRK_FORCE_Y8		0x00000800
+ #define UVC_QUIRK_FORCE_BPP		0x00001000
+ #define UVC_QUIRK_WAKE_AUTOSUSPEND	0x00002000
++#define UVC_QUIRK_NO_RESET_RESUME	0x00004000
+ 
+ /* Format flags */
+ #define UVC_FMT_FLAG_COMPRESSED		0x00000001
+diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
+index 4c6198c48dd61..45836f0a2b0a7 100644
+--- a/drivers/media/v4l2-core/v4l2-subdev.c
++++ b/drivers/media/v4l2-core/v4l2-subdev.c
+@@ -732,6 +732,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
+ 		memset(&sel, 0, sizeof(sel));
+ 		sel.which = crop->which;
+ 		sel.pad = crop->pad;
++		sel.stream = crop->stream;
+ 		sel.target = V4L2_SEL_TGT_CROP;
+ 
+ 		rval = v4l2_subdev_call(
+@@ -756,6 +757,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
+ 		memset(&sel, 0, sizeof(sel));
+ 		sel.which = crop->which;
+ 		sel.pad = crop->pad;
++		sel.stream = crop->stream;
+ 		sel.target = V4L2_SEL_TGT_CROP;
+ 		sel.r = crop->rect;
+ 
+diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile
+index 95ef971b5e1cb..b28701138b4bc 100644
+--- a/drivers/misc/lkdtm/Makefile
++++ b/drivers/misc/lkdtm/Makefile
+@@ -19,7 +19,7 @@ KASAN_SANITIZE_rodata.o			:= n
+ KCSAN_SANITIZE_rodata.o			:= n
+ KCOV_INSTRUMENT_rodata.o		:= n
+ OBJECT_FILES_NON_STANDARD_rodata.o	:= y
+-CFLAGS_REMOVE_rodata.o			+= $(CC_FLAGS_LTO) $(RETHUNK_CFLAGS)
++CFLAGS_REMOVE_rodata.o			+= $(CC_FLAGS_LTO) $(RETHUNK_CFLAGS) $(CC_FLAGS_CFI)
+ 
+ OBJCOPYFLAGS :=
+ OBJCOPYFLAGS_rodata_objcopy.o	:= \
+diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c
+index b93404d656509..5b861dbff27e9 100644
+--- a/drivers/misc/lkdtm/perms.c
++++ b/drivers/misc/lkdtm/perms.c
+@@ -61,7 +61,7 @@ static void *setup_function_descriptor(func_desc_t *fdesc, void *dst)
+ 	return fdesc;
+ }
+ 
+-static noinline void execute_location(void *dst, bool write)
++static noinline __nocfi void execute_location(void *dst, bool write)
+ {
+ 	void (*func)(void);
+ 	func_desc_t fdesc;
+diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
+index 0de87bc638405..6d5c755411de0 100644
+--- a/drivers/mtd/mtdcore.c
++++ b/drivers/mtd/mtdcore.c
+@@ -956,8 +956,10 @@ static int mtd_otp_nvmem_add(struct mtd_info *mtd)
+ 
+ 	if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) {
+ 		size = mtd_otp_size(mtd, true);
+-		if (size < 0)
+-			return size;
++		if (size < 0) {
++			err = size;
++			goto err;
++		}
+ 
+ 		if (size > 0) {
+ 			nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
+diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c
+index a74e64e0cfa32..c02e50608816a 100644
+--- a/drivers/mtd/nand/raw/nand_hynix.c
++++ b/drivers/mtd/nand/raw/nand_hynix.c
+@@ -401,7 +401,7 @@ static int hynix_nand_rr_init(struct nand_chip *chip)
+ 	if (ret)
+ 		pr_warn("failed to initialize read-retry infrastructure");
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static void hynix_nand_extract_oobsize(struct nand_chip *chip,
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 0918bd6fa81dd..5a202edfec371 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -3146,6 +3146,7 @@ static int mv88e6xxx_software_reset(struct mv88e6xxx_chip *chip)
+ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
+ {
+ 	struct gpio_desc *gpiod = chip->reset;
++	int err;
+ 
+ 	/* If there is a GPIO connected to the reset pin, toggle it */
+ 	if (gpiod) {
+@@ -3154,17 +3155,26 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
+ 		 * mid-byte, causing the first EEPROM read after the reset
+ 		 * from the wrong location resulting in the switch booting
+ 		 * to wrong mode and inoperable.
++		 * For this reason, switch families with EEPROM support
++		 * generally wait for EEPROM loads to complete as their pre-
++		 * and post-reset handlers.
+ 		 */
+-		if (chip->info->ops->get_eeprom)
+-			mv88e6xxx_g2_eeprom_wait(chip);
++		if (chip->info->ops->hardware_reset_pre) {
++			err = chip->info->ops->hardware_reset_pre(chip);
++			if (err)
++				dev_err(chip->dev, "pre-reset error: %d\n", err);
++		}
+ 
+ 		gpiod_set_value_cansleep(gpiod, 1);
+ 		usleep_range(10000, 20000);
+ 		gpiod_set_value_cansleep(gpiod, 0);
+ 		usleep_range(10000, 20000);
+ 
+-		if (chip->info->ops->get_eeprom)
+-			mv88e6xxx_g2_eeprom_wait(chip);
++		if (chip->info->ops->hardware_reset_post) {
++			err = chip->info->ops->hardware_reset_post(chip);
++			if (err)
++				dev_err(chip->dev, "post-reset error: %d\n", err);
++		}
+ 	}
+ }
+ 
+@@ -4394,6 +4404,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
+ 	.watchdog_ops = &mv88e6390_watchdog_ops,
+ 	.mgmt_rsvd2cpu =  mv88e6390_g1_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.rmu_disable = mv88e6390_g1_rmu_disable,
+ 	.atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -4584,6 +4596,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
+ 	.watchdog_ops = &mv88e6097_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.rmu_disable = mv88e6352_g1_rmu_disable,
+ 	.atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -4684,6 +4698,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
+ 	.watchdog_ops = &mv88e6097_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.rmu_disable = mv88e6352_g1_rmu_disable,
+ 	.atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -4778,6 +4794,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
+ 	.watchdog_ops = &mv88e6390_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.rmu_disable = mv88e6390_g1_rmu_disable,
+ 	.atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -4836,6 +4854,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
+ 	.watchdog_ops = &mv88e6390_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.rmu_disable = mv88e6390_g1_rmu_disable,
+ 	.atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -4892,6 +4912,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
+ 	.watchdog_ops = &mv88e6390_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.rmu_disable = mv88e6390_g1_rmu_disable,
+ 	.atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -4951,6 +4973,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
+ 	.watchdog_ops = &mv88e6097_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.rmu_disable = mv88e6352_g1_rmu_disable,
+ 	.atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -5004,6 +5028,8 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
+ 	.watchdog_ops = &mv88e6250_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6250_g1_wait_eeprom_done_prereset,
++	.hardware_reset_post = mv88e6xxx_g1_wait_eeprom_done,
+ 	.reset = mv88e6250_g1_reset,
+ 	.vtu_getnext = mv88e6185_g1_vtu_getnext,
+ 	.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+@@ -5051,6 +5077,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
+ 	.watchdog_ops = &mv88e6390_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.rmu_disable = mv88e6390_g1_rmu_disable,
+ 	.atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -5110,6 +5138,8 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
+ 	.watchdog_ops = &mv88e6390_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.vtu_getnext = mv88e6185_g1_vtu_getnext,
+ 	.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+@@ -5156,6 +5186,8 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
+ 	.set_egress_port = mv88e6095_g1_set_egress_port,
+ 	.watchdog_ops = &mv88e6390_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.vtu_getnext = mv88e6185_g1_vtu_getnext,
+ 	.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+@@ -5206,6 +5238,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
+ 	.watchdog_ops = &mv88e6390_watchdog_ops,
+ 	.mgmt_rsvd2cpu =  mv88e6390_g1_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.rmu_disable = mv88e6390_g1_rmu_disable,
+ 	.atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -5361,6 +5395,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
+ 	.watchdog_ops = &mv88e6097_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.rmu_disable = mv88e6352_g1_rmu_disable,
+ 	.atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -5423,6 +5459,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
+ 	.watchdog_ops = &mv88e6390_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.rmu_disable = mv88e6390_g1_rmu_disable,
+ 	.atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -5485,6 +5523,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
+ 	.watchdog_ops = &mv88e6390_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.rmu_disable = mv88e6390_g1_rmu_disable,
+ 	.atu_get_hash = mv88e6165_g1_atu_get_hash,
+@@ -5550,6 +5590,8 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = {
+ 	.watchdog_ops = &mv88e6393x_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6393x_port_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
++	.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
++	.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
+ 	.reset = mv88e6352_g1_reset,
+ 	.rmu_disable = mv88e6390_g1_rmu_disable,
+ 	.atu_get_hash = mv88e6165_g1_atu_get_hash,
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
+index 85eb293381a7e..c34caf9815c5c 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.h
++++ b/drivers/net/dsa/mv88e6xxx/chip.h
+@@ -487,6 +487,12 @@ struct mv88e6xxx_ops {
+ 	int (*ppu_enable)(struct mv88e6xxx_chip *chip);
+ 	int (*ppu_disable)(struct mv88e6xxx_chip *chip);
+ 
++	/* Additional handlers to run before and after hard reset, to make sure
++	 * that the switch and EEPROM are in a good state.
++	 */
++	int (*hardware_reset_pre)(struct mv88e6xxx_chip *chip);
++	int (*hardware_reset_post)(struct mv88e6xxx_chip *chip);
++
+ 	/* Switch Software Reset */
+ 	int (*reset)(struct mv88e6xxx_chip *chip);
+ 
+diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
+index 49444a72ff095..9820cd5967574 100644
+--- a/drivers/net/dsa/mv88e6xxx/global1.c
++++ b/drivers/net/dsa/mv88e6xxx/global1.c
+@@ -75,6 +75,95 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
+ 	return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
+ }
+ 
++static int mv88e6250_g1_eeprom_reload(struct mv88e6xxx_chip *chip)
++{
++	/* MV88E6185_G1_CTL1_RELOAD_EEPROM is also valid for 88E6250 */
++	int bit = __bf_shf(MV88E6185_G1_CTL1_RELOAD_EEPROM);
++	u16 val;
++	int err;
++
++	err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
++	if (err)
++		return err;
++
++	val |= MV88E6185_G1_CTL1_RELOAD_EEPROM;
++
++	err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, val);
++	if (err)
++		return err;
++
++	return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_CTL1, bit, 0);
++}
++
++/* Returns 0 when done, -EBUSY when waiting, other negative codes on error */
++static int mv88e6xxx_g1_is_eeprom_done(struct mv88e6xxx_chip *chip)
++{
++	u16 val;
++	int err;
++
++	err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
++	if (err < 0) {
++		dev_err(chip->dev, "Error reading status");
++		return err;
++	}
++
++	/* If the switch is still resetting, it may not
++	 * respond on the bus, and so MDIO read returns
++	 * 0xffff. Differentiate between that, and waiting for
++	 * the EEPROM to be done by bit 0 being set.
++	 */
++	if (val == 0xffff || !(val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE)))
++		return -EBUSY;
++
++	return 0;
++}
++
++/* As the EEInt (EEPROM done) flag clears on read if the status register, this
++ * function must be called directly after a hard reset or EEPROM ReLoad request,
++ * or the done condition may have been missed
++ */
++int mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip)
++{
++	const unsigned long timeout = jiffies + 1 * HZ;
++	int ret;
++
++	/* Wait up to 1 second for the switch to finish reading the
++	 * EEPROM.
++	 */
++	while (time_before(jiffies, timeout)) {
++		ret = mv88e6xxx_g1_is_eeprom_done(chip);
++		if (ret != -EBUSY)
++			return ret;
++	}
++
++	dev_err(chip->dev, "Timeout waiting for EEPROM done");
++	return -ETIMEDOUT;
++}
++
++int mv88e6250_g1_wait_eeprom_done_prereset(struct mv88e6xxx_chip *chip)
++{
++	int ret;
++
++	ret = mv88e6xxx_g1_is_eeprom_done(chip);
++	if (ret != -EBUSY)
++		return ret;
++
++	/* Pre-reset, we don't know the state of the switch - when
++	 * mv88e6xxx_g1_is_eeprom_done() returns -EBUSY, that may be because
++	 * the switch is actually busy reading the EEPROM, or because
++	 * MV88E6XXX_G1_STS_IRQ_EEPROM_DONE has been cleared by an unrelated
++	 * status register read already.
++	 *
++	 * To account for the latter case, trigger another EEPROM reload for
++	 * another chance at seeing the done flag.
++	 */
++	ret = mv88e6250_g1_eeprom_reload(chip);
++	if (ret)
++		return ret;
++
++	return mv88e6xxx_g1_wait_eeprom_done(chip);
++}
++
+ /* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
+  * Offset 0x02: Switch MAC Address Register Bytes 2 & 3
+  * Offset 0x03: Switch MAC Address Register Bytes 4 & 5
+diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
+index 1095261f5b490..3dbb7a1b8fe11 100644
+--- a/drivers/net/dsa/mv88e6xxx/global1.h
++++ b/drivers/net/dsa/mv88e6xxx/global1.h
+@@ -282,6 +282,8 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
+ int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
+ int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
+ int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
++int mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip);
++int mv88e6250_g1_wait_eeprom_done_prereset(struct mv88e6xxx_chip *chip);
+ 
+ int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
+ int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);
+diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
+index 705c3eb19cd3f..d1fbadbf86d4a 100644
+--- a/drivers/net/ethernet/cortina/gemini.c
++++ b/drivers/net/ethernet/cortina/gemini.c
+@@ -1107,10 +1107,13 @@ static void gmac_tx_irq_enable(struct net_device *netdev,
+ {
+ 	struct gemini_ethernet_port *port = netdev_priv(netdev);
+ 	struct gemini_ethernet *geth = port->geth;
++	unsigned long flags;
+ 	u32 val, mask;
+ 
+ 	netdev_dbg(netdev, "%s device %d\n", __func__, netdev->dev_id);
+ 
++	spin_lock_irqsave(&geth->irq_lock, flags);
++
+ 	mask = GMAC0_IRQ0_TXQ0_INTS << (6 * netdev->dev_id + txq);
+ 
+ 	if (en)
+@@ -1119,6 +1122,8 @@ static void gmac_tx_irq_enable(struct net_device *netdev,
+ 	val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+ 	val = en ? val | mask : val & ~mask;
+ 	writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
++
++	spin_unlock_irqrestore(&geth->irq_lock, flags);
+ }
+ 
+ static void gmac_tx_irq(struct net_device *netdev, unsigned int txq_num)
+@@ -1415,15 +1420,19 @@ static unsigned int gmac_rx(struct net_device *netdev, unsigned int budget)
+ 	union gmac_rxdesc_3 word3;
+ 	struct page *page = NULL;
+ 	unsigned int page_offs;
++	unsigned long flags;
+ 	unsigned short r, w;
+ 	union dma_rwptr rw;
+ 	dma_addr_t mapping;
+ 	int frag_nr = 0;
+ 
++	spin_lock_irqsave(&geth->irq_lock, flags);
+ 	rw.bits32 = readl(ptr_reg);
+ 	/* Reset interrupt as all packages until here are taken into account */
+ 	writel(DEFAULT_Q0_INT_BIT << netdev->dev_id,
+ 	       geth->base + GLOBAL_INTERRUPT_STATUS_1_REG);
++	spin_unlock_irqrestore(&geth->irq_lock, flags);
++
+ 	r = rw.bits.rptr;
+ 	w = rw.bits.wptr;
+ 
+@@ -1726,10 +1735,9 @@ static irqreturn_t gmac_irq(int irq, void *data)
+ 		gmac_update_hw_stats(netdev);
+ 
+ 	if (val & (GMAC0_RX_OVERRUN_INT_BIT << (netdev->dev_id * 8))) {
++		spin_lock(&geth->irq_lock);
+ 		writel(GMAC0_RXDERR_INT_BIT << (netdev->dev_id * 8),
+ 		       geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+-
+-		spin_lock(&geth->irq_lock);
+ 		u64_stats_update_begin(&port->ir_stats_syncp);
+ 		++port->stats.rx_fifo_errors;
+ 		u64_stats_update_end(&port->ir_stats_syncp);
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
+index 9f07f4947b631..5c45f42232d32 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
+@@ -2769,7 +2769,7 @@ static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog,
+ 	if (priv->min_num_stack_tx_queues + num_xdp_tx_queues >
+ 	    priv->num_tx_rings) {
+ 		NL_SET_ERR_MSG_FMT_MOD(extack,
+-				       "Reserving %d XDP TXQs does not leave a minimum of %d for stack (total %d)",
++				       "Reserving %d XDP TXQs leaves under %d for stack (total %d)",
+ 				       num_xdp_tx_queues,
+ 				       priv->min_num_stack_tx_queues,
+ 				       priv->num_tx_rings);
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 8bd213da8fb6f..a72d8a2eb0b31 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -3674,29 +3674,6 @@ fec_set_mac_address(struct net_device *ndev, void *p)
+ 	return 0;
+ }
+ 
+-#ifdef CONFIG_NET_POLL_CONTROLLER
+-/**
+- * fec_poll_controller - FEC Poll controller function
+- * @dev: The FEC network adapter
+- *
+- * Polled functionality used by netconsole and others in non interrupt mode
+- *
+- */
+-static void fec_poll_controller(struct net_device *dev)
+-{
+-	int i;
+-	struct fec_enet_private *fep = netdev_priv(dev);
+-
+-	for (i = 0; i < FEC_IRQ_NUM; i++) {
+-		if (fep->irq[i] > 0) {
+-			disable_irq(fep->irq[i]);
+-			fec_enet_interrupt(fep->irq[i], dev);
+-			enable_irq(fep->irq[i]);
+-		}
+-	}
+-}
+-#endif
+-
+ static inline void fec_enet_set_netdev_features(struct net_device *netdev,
+ 	netdev_features_t features)
+ {
+@@ -4003,9 +3980,6 @@ static const struct net_device_ops fec_netdev_ops = {
+ 	.ndo_tx_timeout		= fec_timeout,
+ 	.ndo_set_mac_address	= fec_set_mac_address,
+ 	.ndo_eth_ioctl		= phy_do_ioctl_running,
+-#ifdef CONFIG_NET_POLL_CONTROLLER
+-	.ndo_poll_controller	= fec_poll_controller,
+-#endif
+ 	.ndo_set_features	= fec_set_features,
+ 	.ndo_bpf		= fec_enet_bpf,
+ 	.ndo_xdp_xmit		= fec_enet_xdp_xmit,
+diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
+index fc91c4d411863..4df561d64bc38 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
++++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
+@@ -1424,14 +1424,14 @@ ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
+ 		goto exit;
+ 	}
+ 
+-	conf_idx = le32_to_cpu(seg->signed_seg_idx);
+-	start = le32_to_cpu(seg->signed_buf_start);
+ 	count = le32_to_cpu(seg->signed_buf_count);
+-
+ 	state = ice_download_pkg_sig_seg(hw, seg);
+-	if (state)
++	if (state || !count)
+ 		goto exit;
+ 
++	conf_idx = le32_to_cpu(seg->signed_seg_idx);
++	start = le32_to_cpu(seg->signed_buf_start);
++
+ 	state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start,
+ 					    count);
+ 
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+index 986d429d11755..6972d728431cb 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+@@ -376,7 +376,8 @@ static int idpf_set_ringparam(struct net_device *netdev,
+ 			    new_tx_count);
+ 
+ 	if (new_tx_count == vport->txq_desc_count &&
+-	    new_rx_count == vport->rxq_desc_count)
++	    new_rx_count == vport->rxq_desc_count &&
++	    kring->tcp_data_split == idpf_vport_get_hsplit(vport))
+ 		goto unlock_mutex;
+ 
+ 	if (!idpf_vport_set_hsplit(vport, kring->tcp_data_split)) {
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index caa13b9cedff0..d7d73295f0dc4 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -110,16 +110,16 @@ static const struct mtk_reg_map mt7986_reg_map = {
+ 	.tx_irq_mask		= 0x461c,
+ 	.tx_irq_status		= 0x4618,
+ 	.pdma = {
+-		.rx_ptr		= 0x6100,
+-		.rx_cnt_cfg	= 0x6104,
+-		.pcrx_ptr	= 0x6108,
+-		.glo_cfg	= 0x6204,
+-		.rst_idx	= 0x6208,
+-		.delay_irq	= 0x620c,
+-		.irq_status	= 0x6220,
+-		.irq_mask	= 0x6228,
+-		.adma_rx_dbg0	= 0x6238,
+-		.int_grp	= 0x6250,
++		.rx_ptr		= 0x4100,
++		.rx_cnt_cfg	= 0x4104,
++		.pcrx_ptr	= 0x4108,
++		.glo_cfg	= 0x4204,
++		.rst_idx	= 0x4208,
++		.delay_irq	= 0x420c,
++		.irq_status	= 0x4220,
++		.irq_mask	= 0x4228,
++		.adma_rx_dbg0	= 0x4238,
++		.int_grp	= 0x4250,
+ 	},
+ 	.qdma = {
+ 		.qtx_cfg	= 0x4400,
+@@ -1107,7 +1107,7 @@ static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
+ 	rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
+ 	rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
+ 	rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
+-	if (mtk_is_netsys_v2_or_greater(eth)) {
++	if (mtk_is_netsys_v3_or_greater(eth)) {
+ 		rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
+ 		rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
+ 	}
+@@ -1139,7 +1139,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
+ 		eth->scratch_ring = eth->sram_base;
+ 	else
+ 		eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
+-						       cnt * soc->txrx.txd_size,
++						       cnt * soc->tx.desc_size,
+ 						       &eth->phy_scratch_ring,
+ 						       GFP_KERNEL);
+ 	if (unlikely(!eth->scratch_ring))
+@@ -1155,17 +1155,17 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
+ 	if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
+ 		return -ENOMEM;
+ 
+-	phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
++	phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
+ 
+ 	for (i = 0; i < cnt; i++) {
+ 		dma_addr_t addr = dma_addr + i * MTK_QDMA_PAGE_SIZE;
+ 		struct mtk_tx_dma_v2 *txd;
+ 
+-		txd = eth->scratch_ring + i * soc->txrx.txd_size;
++		txd = eth->scratch_ring + i * soc->tx.desc_size;
+ 		txd->txd1 = addr;
+ 		if (i < cnt - 1)
+ 			txd->txd2 = eth->phy_scratch_ring +
+-				    (i + 1) * soc->txrx.txd_size;
++				    (i + 1) * soc->tx.desc_size;
+ 
+ 		txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
+ 		if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
+@@ -1416,7 +1416,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
+ 	if (itxd == ring->last_free)
+ 		return -ENOMEM;
+ 
+-	itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
++	itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
+ 	memset(itx_buf, 0, sizeof(*itx_buf));
+ 
+ 	txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
+@@ -1457,7 +1457,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
+ 
+ 			memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
+ 			txd_info.size = min_t(unsigned int, frag_size,
+-					      soc->txrx.dma_max_len);
++					      soc->tx.dma_max_len);
+ 			txd_info.qid = queue;
+ 			txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
+ 					!(frag_size - txd_info.size);
+@@ -1470,7 +1470,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
+ 			mtk_tx_set_dma_desc(dev, txd, &txd_info);
+ 
+ 			tx_buf = mtk_desc_to_tx_buf(ring, txd,
+-						    soc->txrx.txd_size);
++						    soc->tx.desc_size);
+ 			if (new_desc)
+ 				memset(tx_buf, 0, sizeof(*tx_buf));
+ 			tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
+@@ -1513,7 +1513,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
+ 	} else {
+ 		int next_idx;
+ 
+-		next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
++		next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
+ 					 ring->dma_size);
+ 		mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
+ 	}
+@@ -1522,7 +1522,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
+ 
+ err_dma:
+ 	do {
+-		tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
++		tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
+ 
+ 		/* unmap dma */
+ 		mtk_tx_unmap(eth, tx_buf, NULL, false);
+@@ -1547,7 +1547,7 @@ static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
+ 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ 			frag = &skb_shinfo(skb)->frags[i];
+ 			nfrags += DIV_ROUND_UP(skb_frag_size(frag),
+-					       eth->soc->txrx.dma_max_len);
++					       eth->soc->tx.dma_max_len);
+ 		}
+ 	} else {
+ 		nfrags += skb_shinfo(skb)->nr_frags;
+@@ -1654,7 +1654,7 @@ static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
+ 
+ 		ring = &eth->rx_ring[i];
+ 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
+-		rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
++		rxd = ring->dma + idx * eth->soc->rx.desc_size;
+ 		if (rxd->rxd2 & RX_DMA_DONE) {
+ 			ring->calc_idx_update = true;
+ 			return ring;
+@@ -1822,7 +1822,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
+ 	}
+ 	htxd = txd;
+ 
+-	tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
++	tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
+ 	memset(tx_buf, 0, sizeof(*tx_buf));
+ 	htx_buf = tx_buf;
+ 
+@@ -1841,7 +1841,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
+ 				goto unmap;
+ 
+ 			tx_buf = mtk_desc_to_tx_buf(ring, txd,
+-						    soc->txrx.txd_size);
++						    soc->tx.desc_size);
+ 			memset(tx_buf, 0, sizeof(*tx_buf));
+ 			n_desc++;
+ 		}
+@@ -1879,7 +1879,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
+ 	} else {
+ 		int idx;
+ 
+-		idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
++		idx = txd_to_idx(ring, txd, soc->tx.desc_size);
+ 		mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
+ 			MT7628_TX_CTX_IDX0);
+ 	}
+@@ -1890,7 +1890,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
+ 
+ unmap:
+ 	while (htxd != txd) {
+-		tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
++		tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
+ 		mtk_tx_unmap(eth, tx_buf, NULL, false);
+ 
+ 		htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+@@ -2021,14 +2021,14 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
+ 			goto rx_done;
+ 
+ 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
+-		rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
++		rxd = ring->dma + idx * eth->soc->rx.desc_size;
+ 		data = ring->data[idx];
+ 
+ 		if (!mtk_rx_get_desc(eth, &trxd, rxd))
+ 			break;
+ 
+ 		/* find out which mac the packet come from. values start at 1 */
+-		if (mtk_is_netsys_v2_or_greater(eth)) {
++		if (mtk_is_netsys_v3_or_greater(eth)) {
+ 			u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
+ 
+ 			switch (val) {
+@@ -2140,7 +2140,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
+ 		skb->dev = netdev;
+ 		bytes += skb->len;
+ 
+-		if (mtk_is_netsys_v2_or_greater(eth)) {
++		if (mtk_is_netsys_v3_or_greater(eth)) {
+ 			reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
+ 			hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
+ 			if (hash != MTK_RXD5_FOE_ENTRY)
+@@ -2156,7 +2156,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
+ 			rxdcsum = &trxd.rxd4;
+ 		}
+ 
+-		if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
++		if (*rxdcsum & eth->soc->rx.dma_l4_valid)
+ 			skb->ip_summed = CHECKSUM_UNNECESSARY;
+ 		else
+ 			skb_checksum_none_assert(skb);
+@@ -2280,7 +2280,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
+ 			break;
+ 
+ 		tx_buf = mtk_desc_to_tx_buf(ring, desc,
+-					    eth->soc->txrx.txd_size);
++					    eth->soc->tx.desc_size);
+ 		if (!tx_buf->data)
+ 			break;
+ 
+@@ -2331,7 +2331,7 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
+ 		}
+ 		mtk_tx_unmap(eth, tx_buf, &bq, true);
+ 
+-		desc = ring->dma + cpu * eth->soc->txrx.txd_size;
++		desc = ring->dma + cpu * eth->soc->tx.desc_size;
+ 		ring->last_free = desc;
+ 		atomic_inc(&ring->free_count);
+ 
+@@ -2421,7 +2421,7 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget)
+ 	do {
+ 		int rx_done;
+ 
+-		mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
++		mtk_w32(eth, eth->soc->rx.irq_done_mask,
+ 			reg_map->pdma.irq_status);
+ 		rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
+ 		rx_done_total += rx_done;
+@@ -2437,10 +2437,10 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget)
+ 			return budget;
+ 
+ 	} while (mtk_r32(eth, reg_map->pdma.irq_status) &
+-		 eth->soc->txrx.rx_irq_done_mask);
++		 eth->soc->rx.irq_done_mask);
+ 
+ 	if (napi_complete_done(napi, rx_done_total))
+-		mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
++		mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
+ 
+ 	return rx_done_total;
+ }
+@@ -2449,7 +2449,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
+ {
+ 	const struct mtk_soc_data *soc = eth->soc;
+ 	struct mtk_tx_ring *ring = &eth->tx_ring;
+-	int i, sz = soc->txrx.txd_size;
++	int i, sz = soc->tx.desc_size;
+ 	struct mtk_tx_dma_v2 *txd;
+ 	int ring_size;
+ 	u32 ofs, val;
+@@ -2572,14 +2572,14 @@ static void mtk_tx_clean(struct mtk_eth *eth)
+ 	}
+ 	if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
+ 		dma_free_coherent(eth->dma_dev,
+-				  ring->dma_size * soc->txrx.txd_size,
++				  ring->dma_size * soc->tx.desc_size,
+ 				  ring->dma, ring->phys);
+ 		ring->dma = NULL;
+ 	}
+ 
+ 	if (ring->dma_pdma) {
+ 		dma_free_coherent(eth->dma_dev,
+-				  ring->dma_size * soc->txrx.txd_size,
++				  ring->dma_size * soc->tx.desc_size,
+ 				  ring->dma_pdma, ring->phys_pdma);
+ 		ring->dma_pdma = NULL;
+ 	}
+@@ -2634,15 +2634,15 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
+ 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
+ 	    rx_flag != MTK_RX_FLAGS_NORMAL) {
+ 		ring->dma = dma_alloc_coherent(eth->dma_dev,
+-					       rx_dma_size * eth->soc->txrx.rxd_size,
+-					       &ring->phys, GFP_KERNEL);
++				rx_dma_size * eth->soc->rx.desc_size,
++				&ring->phys, GFP_KERNEL);
+ 	} else {
+ 		struct mtk_tx_ring *tx_ring = &eth->tx_ring;
+ 
+ 		ring->dma = tx_ring->dma + tx_ring_size *
+-			    eth->soc->txrx.txd_size * (ring_no + 1);
++			    eth->soc->tx.desc_size * (ring_no + 1);
+ 		ring->phys = tx_ring->phys + tx_ring_size *
+-			     eth->soc->txrx.txd_size * (ring_no + 1);
++			     eth->soc->tx.desc_size * (ring_no + 1);
+ 	}
+ 
+ 	if (!ring->dma)
+@@ -2653,7 +2653,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
+ 		dma_addr_t dma_addr;
+ 		void *data;
+ 
+-		rxd = ring->dma + i * eth->soc->txrx.rxd_size;
++		rxd = ring->dma + i * eth->soc->rx.desc_size;
+ 		if (ring->page_pool) {
+ 			data = mtk_page_pool_get_buff(ring->page_pool,
+ 						      &dma_addr, GFP_KERNEL);
+@@ -2690,7 +2690,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
+ 
+ 		rxd->rxd3 = 0;
+ 		rxd->rxd4 = 0;
+-		if (mtk_is_netsys_v2_or_greater(eth)) {
++		if (mtk_is_netsys_v3_or_greater(eth)) {
+ 			rxd->rxd5 = 0;
+ 			rxd->rxd6 = 0;
+ 			rxd->rxd7 = 0;
+@@ -2744,7 +2744,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_
+ 			if (!ring->data[i])
+ 				continue;
+ 
+-			rxd = ring->dma + i * eth->soc->txrx.rxd_size;
++			rxd = ring->dma + i * eth->soc->rx.desc_size;
+ 			if (!rxd->rxd1)
+ 				continue;
+ 
+@@ -2761,7 +2761,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_
+ 
+ 	if (!in_sram && ring->dma) {
+ 		dma_free_coherent(eth->dma_dev,
+-				  ring->dma_size * eth->soc->txrx.rxd_size,
++				  ring->dma_size * eth->soc->rx.desc_size,
+ 				  ring->dma, ring->phys);
+ 		ring->dma = NULL;
+ 	}
+@@ -3124,7 +3124,7 @@ static void mtk_dma_free(struct mtk_eth *eth)
+ 			netdev_reset_queue(eth->netdev[i]);
+ 	if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
+ 		dma_free_coherent(eth->dma_dev,
+-				  MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
++				  MTK_QDMA_RING_SIZE * soc->tx.desc_size,
+ 				  eth->scratch_ring, eth->phy_scratch_ring);
+ 		eth->scratch_ring = NULL;
+ 		eth->phy_scratch_ring = 0;
+@@ -3174,7 +3174,7 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
+ 
+ 	eth->rx_events++;
+ 	if (likely(napi_schedule_prep(&eth->rx_napi))) {
+-		mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
++		mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
+ 		__napi_schedule(&eth->rx_napi);
+ 	}
+ 
+@@ -3200,9 +3200,9 @@ static irqreturn_t mtk_handle_irq(int irq, void *_eth)
+ 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+ 
+ 	if (mtk_r32(eth, reg_map->pdma.irq_mask) &
+-	    eth->soc->txrx.rx_irq_done_mask) {
++	    eth->soc->rx.irq_done_mask) {
+ 		if (mtk_r32(eth, reg_map->pdma.irq_status) &
+-		    eth->soc->txrx.rx_irq_done_mask)
++		    eth->soc->rx.irq_done_mask)
+ 			mtk_handle_irq_rx(irq, _eth);
+ 	}
+ 	if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
+@@ -3220,10 +3220,10 @@ static void mtk_poll_controller(struct net_device *dev)
+ 	struct mtk_eth *eth = mac->hw;
+ 
+ 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+-	mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
++	mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
+ 	mtk_handle_irq_rx(eth->irq[2], dev);
+ 	mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+-	mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
++	mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
+ }
+ #endif
+ 
+@@ -3387,7 +3387,7 @@ static int mtk_open(struct net_device *dev)
+ 		napi_enable(&eth->tx_napi);
+ 		napi_enable(&eth->rx_napi);
+ 		mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+-		mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
++		mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
+ 		refcount_set(&eth->dma_refcnt, 1);
+ 	}
+ 	else
+@@ -3471,7 +3471,7 @@ static int mtk_stop(struct net_device *dev)
+ 	mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
+ 
+ 	mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+-	mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
++	mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
+ 	napi_disable(&eth->tx_napi);
+ 	napi_disable(&eth->rx_napi);
+ 
+@@ -3893,7 +3893,7 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
+ 	else
+ 		mtk_hw_reset(eth);
+ 
+-	if (mtk_is_netsys_v2_or_greater(eth)) {
++	if (mtk_is_netsys_v3_or_greater(eth)) {
+ 		/* Set FE to PDMAv2 if necessary */
+ 		val = mtk_r32(eth, MTK_FE_GLO_MISC);
+ 		mtk_w32(eth,  val | BIT(4), MTK_FE_GLO_MISC);
+@@ -3947,9 +3947,9 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
+ 
+ 	/* FE int grouping */
+ 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
+-	mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
++	mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4);
+ 	mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
+-	mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
++	mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4);
+ 	mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
+ 
+ 	if (mtk_is_netsys_v3_or_greater(eth)) {
+@@ -5039,11 +5039,15 @@ static const struct mtk_soc_data mt2701_data = {
+ 	.required_clks = MT7623_CLKS_BITMAP,
+ 	.required_pctl = true,
+ 	.version = 1,
+-	.txrx = {
+-		.txd_size = sizeof(struct mtk_tx_dma),
+-		.rxd_size = sizeof(struct mtk_rx_dma),
+-		.rx_irq_done_mask = MTK_RX_DONE_INT,
+-		.rx_dma_l4_valid = RX_DMA_L4_VALID,
++	.tx = {
++		.desc_size = sizeof(struct mtk_tx_dma),
++		.dma_max_len = MTK_TX_DMA_BUF_LEN,
++		.dma_len_offset = 16,
++	},
++	.rx = {
++		.desc_size = sizeof(struct mtk_rx_dma),
++		.irq_done_mask = MTK_RX_DONE_INT,
++		.dma_l4_valid = RX_DMA_L4_VALID,
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
+ 		.dma_len_offset = 16,
+ 	},
+@@ -5059,11 +5063,15 @@ static const struct mtk_soc_data mt7621_data = {
+ 	.offload_version = 1,
+ 	.hash_offset = 2,
+ 	.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
+-	.txrx = {
+-		.txd_size = sizeof(struct mtk_tx_dma),
+-		.rxd_size = sizeof(struct mtk_rx_dma),
+-		.rx_irq_done_mask = MTK_RX_DONE_INT,
+-		.rx_dma_l4_valid = RX_DMA_L4_VALID,
++	.tx = {
++		.desc_size = sizeof(struct mtk_tx_dma),
++		.dma_max_len = MTK_TX_DMA_BUF_LEN,
++		.dma_len_offset = 16,
++	},
++	.rx = {
++		.desc_size = sizeof(struct mtk_rx_dma),
++		.irq_done_mask = MTK_RX_DONE_INT,
++		.dma_l4_valid = RX_DMA_L4_VALID,
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
+ 		.dma_len_offset = 16,
+ 	},
+@@ -5081,11 +5089,15 @@ static const struct mtk_soc_data mt7622_data = {
+ 	.hash_offset = 2,
+ 	.has_accounting = true,
+ 	.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
+-	.txrx = {
+-		.txd_size = sizeof(struct mtk_tx_dma),
+-		.rxd_size = sizeof(struct mtk_rx_dma),
+-		.rx_irq_done_mask = MTK_RX_DONE_INT,
+-		.rx_dma_l4_valid = RX_DMA_L4_VALID,
++	.tx = {
++		.desc_size = sizeof(struct mtk_tx_dma),
++		.dma_max_len = MTK_TX_DMA_BUF_LEN,
++		.dma_len_offset = 16,
++	},
++	.rx = {
++		.desc_size = sizeof(struct mtk_rx_dma),
++		.irq_done_mask = MTK_RX_DONE_INT,
++		.dma_l4_valid = RX_DMA_L4_VALID,
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
+ 		.dma_len_offset = 16,
+ 	},
+@@ -5102,11 +5114,15 @@ static const struct mtk_soc_data mt7623_data = {
+ 	.hash_offset = 2,
+ 	.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
+ 	.disable_pll_modes = true,
+-	.txrx = {
+-		.txd_size = sizeof(struct mtk_tx_dma),
+-		.rxd_size = sizeof(struct mtk_rx_dma),
+-		.rx_irq_done_mask = MTK_RX_DONE_INT,
+-		.rx_dma_l4_valid = RX_DMA_L4_VALID,
++	.tx = {
++		.desc_size = sizeof(struct mtk_tx_dma),
++		.dma_max_len = MTK_TX_DMA_BUF_LEN,
++		.dma_len_offset = 16,
++	},
++	.rx = {
++		.desc_size = sizeof(struct mtk_rx_dma),
++		.irq_done_mask = MTK_RX_DONE_INT,
++		.dma_l4_valid = RX_DMA_L4_VALID,
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
+ 		.dma_len_offset = 16,
+ 	},
+@@ -5121,11 +5137,15 @@ static const struct mtk_soc_data mt7629_data = {
+ 	.required_pctl = false,
+ 	.has_accounting = true,
+ 	.version = 1,
+-	.txrx = {
+-		.txd_size = sizeof(struct mtk_tx_dma),
+-		.rxd_size = sizeof(struct mtk_rx_dma),
+-		.rx_irq_done_mask = MTK_RX_DONE_INT,
+-		.rx_dma_l4_valid = RX_DMA_L4_VALID,
++	.tx = {
++		.desc_size = sizeof(struct mtk_tx_dma),
++		.dma_max_len = MTK_TX_DMA_BUF_LEN,
++		.dma_len_offset = 16,
++	},
++	.rx = {
++		.desc_size = sizeof(struct mtk_rx_dma),
++		.irq_done_mask = MTK_RX_DONE_INT,
++		.dma_l4_valid = RX_DMA_L4_VALID,
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
+ 		.dma_len_offset = 16,
+ 	},
+@@ -5143,14 +5163,18 @@ static const struct mtk_soc_data mt7981_data = {
+ 	.hash_offset = 4,
+ 	.has_accounting = true,
+ 	.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
+-	.txrx = {
+-		.txd_size = sizeof(struct mtk_tx_dma_v2),
+-		.rxd_size = sizeof(struct mtk_rx_dma_v2),
+-		.rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+-		.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
++	.tx = {
++		.desc_size = sizeof(struct mtk_tx_dma_v2),
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ 		.dma_len_offset = 8,
+ 	},
++	.rx = {
++		.desc_size = sizeof(struct mtk_rx_dma),
++		.irq_done_mask = MTK_RX_DONE_INT,
++		.dma_l4_valid = RX_DMA_L4_VALID_V2,
++		.dma_max_len = MTK_TX_DMA_BUF_LEN,
++		.dma_len_offset = 16,
++	},
+ };
+ 
+ static const struct mtk_soc_data mt7986_data = {
+@@ -5165,14 +5189,18 @@ static const struct mtk_soc_data mt7986_data = {
+ 	.hash_offset = 4,
+ 	.has_accounting = true,
+ 	.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
+-	.txrx = {
+-		.txd_size = sizeof(struct mtk_tx_dma_v2),
+-		.rxd_size = sizeof(struct mtk_rx_dma_v2),
+-		.rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+-		.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
++	.tx = {
++		.desc_size = sizeof(struct mtk_tx_dma_v2),
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ 		.dma_len_offset = 8,
+ 	},
++	.rx = {
++		.desc_size = sizeof(struct mtk_rx_dma),
++		.irq_done_mask = MTK_RX_DONE_INT,
++		.dma_l4_valid = RX_DMA_L4_VALID_V2,
++		.dma_max_len = MTK_TX_DMA_BUF_LEN,
++		.dma_len_offset = 16,
++	},
+ };
+ 
+ static const struct mtk_soc_data mt7988_data = {
+@@ -5187,11 +5215,15 @@ static const struct mtk_soc_data mt7988_data = {
+ 	.hash_offset = 4,
+ 	.has_accounting = true,
+ 	.foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
+-	.txrx = {
+-		.txd_size = sizeof(struct mtk_tx_dma_v2),
+-		.rxd_size = sizeof(struct mtk_rx_dma_v2),
+-		.rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+-		.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
++	.tx = {
++		.desc_size = sizeof(struct mtk_tx_dma_v2),
++		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
++		.dma_len_offset = 8,
++	},
++	.rx = {
++		.desc_size = sizeof(struct mtk_rx_dma_v2),
++		.irq_done_mask = MTK_RX_DONE_INT_V2,
++		.dma_l4_valid = RX_DMA_L4_VALID_V2,
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ 		.dma_len_offset = 8,
+ 	},
+@@ -5204,11 +5236,15 @@ static const struct mtk_soc_data rt5350_data = {
+ 	.required_clks = MT7628_CLKS_BITMAP,
+ 	.required_pctl = false,
+ 	.version = 1,
+-	.txrx = {
+-		.txd_size = sizeof(struct mtk_tx_dma),
+-		.rxd_size = sizeof(struct mtk_rx_dma),
+-		.rx_irq_done_mask = MTK_RX_DONE_INT,
+-		.rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
++	.tx = {
++		.desc_size = sizeof(struct mtk_tx_dma),
++		.dma_max_len = MTK_TX_DMA_BUF_LEN,
++		.dma_len_offset = 16,
++	},
++	.rx = {
++		.desc_size = sizeof(struct mtk_rx_dma),
++		.irq_done_mask = MTK_RX_DONE_INT,
++		.dma_l4_valid = RX_DMA_L4_VALID_PDMA,
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
+ 		.dma_len_offset = 16,
+ 	},
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+index 9ae3b8a71d0e6..39b50de1decbf 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -327,8 +327,8 @@
+ /* QDMA descriptor txd3 */
+ #define TX_DMA_OWNER_CPU	BIT(31)
+ #define TX_DMA_LS0		BIT(30)
+-#define TX_DMA_PLEN0(x)		(((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
+-#define TX_DMA_PLEN1(x)		((x) & eth->soc->txrx.dma_max_len)
++#define TX_DMA_PLEN0(x)		(((x) & eth->soc->tx.dma_max_len) << eth->soc->tx.dma_len_offset)
++#define TX_DMA_PLEN1(x)		((x) & eth->soc->tx.dma_max_len)
+ #define TX_DMA_SWC		BIT(14)
+ #define TX_DMA_PQID		GENMASK(3, 0)
+ #define TX_DMA_ADDR64_MASK	GENMASK(3, 0)
+@@ -348,8 +348,8 @@
+ /* QDMA descriptor rxd2 */
+ #define RX_DMA_DONE		BIT(31)
+ #define RX_DMA_LSO		BIT(30)
+-#define RX_DMA_PREP_PLEN0(x)	(((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
+-#define RX_DMA_GET_PLEN0(x)	(((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
++#define RX_DMA_PREP_PLEN0(x)	(((x) & eth->soc->rx.dma_max_len) << eth->soc->rx.dma_len_offset)
++#define RX_DMA_GET_PLEN0(x)	(((x) >> eth->soc->rx.dma_len_offset) & eth->soc->rx.dma_max_len)
+ #define RX_DMA_VTAG		BIT(15)
+ #define RX_DMA_ADDR64_MASK	GENMASK(3, 0)
+ #if IS_ENABLED(CONFIG_64BIT)
+@@ -1153,10 +1153,9 @@ struct mtk_reg_map {
+  * @foe_entry_size		Foe table entry size.
+  * @has_accounting		Bool indicating support for accounting of
+  *				offloaded flows.
+- * @txd_size			Tx DMA descriptor size.
+- * @rxd_size			Rx DMA descriptor size.
+- * @rx_irq_done_mask		Rx irq done register mask.
+- * @rx_dma_l4_valid		Rx DMA valid register mask.
++ * @desc_size			Tx/Rx DMA descriptor size.
++ * @irq_done_mask		Rx irq done register mask.
++ * @dma_l4_valid		Rx DMA valid register mask.
+  * @dma_max_len			Max DMA tx/rx buffer length.
+  * @dma_len_offset		Tx/Rx DMA length field offset.
+  */
+@@ -1174,13 +1173,17 @@ struct mtk_soc_data {
+ 	bool		has_accounting;
+ 	bool		disable_pll_modes;
+ 	struct {
+-		u32	txd_size;
+-		u32	rxd_size;
+-		u32	rx_irq_done_mask;
+-		u32	rx_dma_l4_valid;
++		u32	desc_size;
+ 		u32	dma_max_len;
+ 		u32	dma_len_offset;
+-	} txrx;
++	} tx;
++	struct {
++		u32	desc_size;
++		u32	irq_done_mask;
++		u32	dma_l4_valid;
++		u32	dma_max_len;
++		u32	dma_len_offset;
++	} rx;
+ };
+ 
+ #define MTK_DMA_MONITOR_TIMEOUT		msecs_to_jiffies(1000)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+index 4957412ff1f65..20768ef2e9d2b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+@@ -969,19 +969,32 @@ static void cmd_work_handler(struct work_struct *work)
+ 	bool poll_cmd = ent->polling;
+ 	struct mlx5_cmd_layout *lay;
+ 	struct mlx5_core_dev *dev;
+-	unsigned long cb_timeout;
+-	struct semaphore *sem;
++	unsigned long timeout;
+ 	unsigned long flags;
+ 	int alloc_ret;
+ 	int cmd_mode;
+ 
++	complete(&ent->handling);
++
+ 	dev = container_of(cmd, struct mlx5_core_dev, cmd);
+-	cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
++	timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
+ 
+-	complete(&ent->handling);
+-	sem = ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem;
+-	down(sem);
+ 	if (!ent->page_queue) {
++		if (down_timeout(&cmd->vars.sem, timeout)) {
++			mlx5_core_warn(dev, "%s(0x%x) timed out while waiting for a slot.\n",
++				       mlx5_command_str(ent->op), ent->op);
++			if (ent->callback) {
++				ent->callback(-EBUSY, ent->context);
++				mlx5_free_cmd_msg(dev, ent->out);
++				free_msg(dev, ent->in);
++				cmd_ent_put(ent);
++			} else {
++				ent->ret = -EBUSY;
++				complete(&ent->done);
++			}
++			complete(&ent->slotted);
++			return;
++		}
+ 		alloc_ret = cmd_alloc_index(cmd, ent);
+ 		if (alloc_ret < 0) {
+ 			mlx5_core_err_rl(dev, "failed to allocate command entry\n");
+@@ -994,10 +1007,11 @@ static void cmd_work_handler(struct work_struct *work)
+ 				ent->ret = -EAGAIN;
+ 				complete(&ent->done);
+ 			}
+-			up(sem);
++			up(&cmd->vars.sem);
+ 			return;
+ 		}
+ 	} else {
++		down(&cmd->vars.pages_sem);
+ 		ent->idx = cmd->vars.max_reg_cmds;
+ 		spin_lock_irqsave(&cmd->alloc_lock, flags);
+ 		clear_bit(ent->idx, &cmd->vars.bitmask);
+@@ -1005,6 +1019,8 @@ static void cmd_work_handler(struct work_struct *work)
+ 		spin_unlock_irqrestore(&cmd->alloc_lock, flags);
+ 	}
+ 
++	complete(&ent->slotted);
++
+ 	lay = get_inst(cmd, ent->idx);
+ 	ent->lay = lay;
+ 	memset(lay, 0, sizeof(*lay));
+@@ -1023,7 +1039,7 @@ static void cmd_work_handler(struct work_struct *work)
+ 	ent->ts1 = ktime_get_ns();
+ 	cmd_mode = cmd->mode;
+ 
+-	if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, cb_timeout))
++	if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, timeout))
+ 		cmd_ent_get(ent);
+ 	set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
+ 
+@@ -1143,6 +1159,9 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
+ 		ent->ret = -ECANCELED;
+ 		goto out_err;
+ 	}
++
++	wait_for_completion(&ent->slotted);
++
+ 	if (cmd->mode == CMD_MODE_POLLING || ent->polling)
+ 		wait_for_completion(&ent->done);
+ 	else if (!wait_for_completion_timeout(&ent->done, timeout))
+@@ -1157,6 +1176,9 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
+ 	} else if (err == -ECANCELED) {
+ 		mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
+ 			       mlx5_command_str(ent->op), ent->op);
++	} else if (err == -EBUSY) {
++		mlx5_core_warn(dev, "%s(0x%x) timeout while waiting for command semaphore.\n",
++			       mlx5_command_str(ent->op), ent->op);
+ 	}
+ 	mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
+ 		      err, deliv_status_to_str(ent->status), ent->status);
+@@ -1208,6 +1230,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
+ 	ent->polling = force_polling;
+ 
+ 	init_completion(&ent->handling);
++	init_completion(&ent->slotted);
+ 	if (!callback)
+ 		init_completion(&ent->done);
+ 
+@@ -1225,7 +1248,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
+ 		return 0; /* mlx5_cmd_comp_handler() will put(ent) */
+ 
+ 	err = wait_func(dev, ent);
+-	if (err == -ETIMEDOUT || err == -ECANCELED)
++	if (err == -ETIMEDOUT || err == -ECANCELED || err == -EBUSY)
+ 		goto out_free;
+ 
+ 	ds = ent->ts2 - ent->ts1;
+@@ -1611,6 +1634,9 @@ static int cmd_comp_notifier(struct notifier_block *nb,
+ 	dev = container_of(cmd, struct mlx5_core_dev, cmd);
+ 	eqe = data;
+ 
++	if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
++		return NOTIFY_DONE;
++
+ 	mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
+ 
+ 	return NOTIFY_OK;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+index 06592b9f04242..9240cfe25d102 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+@@ -28,8 +28,10 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
+ 			      struct mlx5e_xsk_param *xsk,
+ 			      struct mlx5_core_dev *mdev)
+ {
+-	/* AF_XDP doesn't support frames larger than PAGE_SIZE. */
+-	if (xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) {
++	/* AF_XDP doesn't support frames larger than PAGE_SIZE,
++	 * and xsk->chunk_size is limited to 65535 bytes.
++	 */
++	if ((size_t)xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) {
+ 		mlx5_core_err(mdev, "XSK chunk size %u out of bounds [%u, %lu]\n", xsk->chunk_size,
+ 			      MLX5E_MIN_XSK_CHUNK_SIZE, PAGE_SIZE);
+ 		return false;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 319930c04093b..64497b6eebd36 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -6058,7 +6058,7 @@ static int mlx5e_resume(struct auxiliary_device *adev)
+ 	return 0;
+ }
+ 
+-static int _mlx5e_suspend(struct auxiliary_device *adev)
++static int _mlx5e_suspend(struct auxiliary_device *adev, bool pre_netdev_reg)
+ {
+ 	struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
+ 	struct mlx5e_priv *priv = mlx5e_dev->priv;
+@@ -6067,7 +6067,7 @@ static int _mlx5e_suspend(struct auxiliary_device *adev)
+ 	struct mlx5_core_dev *pos;
+ 	int i;
+ 
+-	if (!netif_device_present(netdev)) {
++	if (!pre_netdev_reg && !netif_device_present(netdev)) {
+ 		if (test_bit(MLX5E_STATE_DESTROYING, &priv->state))
+ 			mlx5_sd_for_each_dev(i, mdev, pos)
+ 				mlx5e_destroy_mdev_resources(pos);
+@@ -6090,7 +6090,7 @@ static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
+ 
+ 	actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx);
+ 	if (actual_adev)
+-		err = _mlx5e_suspend(actual_adev);
++		err = _mlx5e_suspend(actual_adev, false);
+ 
+ 	mlx5_sd_cleanup(mdev);
+ 	return err;
+@@ -6157,7 +6157,7 @@ static int _mlx5e_probe(struct auxiliary_device *adev)
+ 	return 0;
+ 
+ err_resume:
+-	_mlx5e_suspend(adev);
++	_mlx5e_suspend(adev, true);
+ err_profile_cleanup:
+ 	profile->cleanup(priv);
+ err_destroy_netdev:
+@@ -6197,7 +6197,7 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
+ 	mlx5_core_uplink_netdev_set(mdev, NULL);
+ 	mlx5e_dcbnl_delete_app(priv);
+ 	unregister_netdev(priv->netdev);
+-	_mlx5e_suspend(adev);
++	_mlx5e_suspend(adev, false);
+ 	priv->profile->cleanup(priv);
+ 	mlx5e_destroy_netdev(priv);
+ 	mlx5e_devlink_port_unregister(mlx5e_dev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+index 1b9bc32efd6fa..c5ea1d1d2b035 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+@@ -1874,7 +1874,7 @@ int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_
+ 				 "Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n",
+ 				 addr, vid, vport_num);
+ 			NL_SET_ERR_MSG_FMT_MOD(extack,
+-					       "Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n",
++					       "Failed to lookup vlan metadata for MDB (MAC=%pM,vid=%u,vport=%u)\n",
+ 					       addr, vid, vport_num);
+ 			return -EINVAL;
+ 		}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+index 349e28a6dd8df..ef55674876cb4 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+@@ -833,7 +833,7 @@ int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
+ 					     struct mlx5_eswitch *slave_esw, int max_slaves);
+ void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
+ 					      struct mlx5_eswitch *slave_esw);
+-int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
++int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw);
+ 
+ bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev);
+ void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev);
+@@ -925,7 +925,7 @@ mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
+ static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw) { return 0; }
+ 
+ static inline int
+-mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
++mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw)
+ {
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index 844d3e3a65ddf..e8caf12f4c4f8 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -2502,6 +2502,16 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw)
+ 	esw_offloads_cleanup_reps(esw);
+ }
+ 
++static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
++				   struct mlx5_eswitch_rep *rep, u8 rep_type)
++{
++	if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
++			   REP_REGISTERED, REP_LOADED) == REP_REGISTERED)
++		return esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
++
++	return 0;
++}
++
+ static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
+ 				      struct mlx5_eswitch_rep *rep, u8 rep_type)
+ {
+@@ -2526,13 +2536,11 @@ static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
+ 	int err;
+ 
+ 	rep = mlx5_eswitch_get_rep(esw, vport_num);
+-	for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
+-		if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
+-				   REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
+-			err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
+-			if (err)
+-				goto err_reps;
+-		}
++	for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
++		err = __esw_offloads_load_rep(esw, rep, rep_type);
++		if (err)
++			goto err_reps;
++	}
+ 
+ 	return 0;
+ 
+@@ -3277,7 +3285,7 @@ static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
+ 		esw_vport_destroy_offloads_acl_tables(esw, vport);
+ }
+ 
+-int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
++int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw)
+ {
+ 	struct mlx5_eswitch_rep *rep;
+ 	unsigned long i;
+@@ -3290,13 +3298,13 @@ int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
+ 	if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
+ 		return 0;
+ 
+-	ret = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK);
++	ret = __esw_offloads_load_rep(esw, rep, REP_IB);
+ 	if (ret)
+ 		return ret;
+ 
+ 	mlx5_esw_for_each_rep(esw, i, rep) {
+ 		if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED)
+-			mlx5_esw_offloads_rep_load(esw, rep->vport);
++			__esw_offloads_load_rep(esw, rep, REP_IB);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+index 69d482f7c5a29..37598d116f3b8 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+@@ -814,7 +814,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
+ 	if (shared_fdb)
+ 		for (i = 0; i < ldev->ports; i++)
+ 			if (!(ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
+-				mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
++				mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
+ }
+ 
+ static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
+@@ -922,7 +922,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
+ 			mlx5_rescan_drivers_locked(dev0);
+ 
+ 			for (i = 0; i < ldev->ports; i++) {
+-				err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
++				err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
+ 				if (err)
+ 					break;
+ 			}
+@@ -933,7 +933,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
+ 				mlx5_deactivate_lag(ldev);
+ 				mlx5_lag_add_devices(ldev);
+ 				for (i = 0; i < ldev->ports; i++)
+-					mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
++					mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
+ 				mlx5_core_err(dev0, "Failed to enable lag\n");
+ 				return;
+ 			}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+index 82889f30506ea..571ea26edd0ca 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+@@ -99,7 +99,7 @@ static int enable_mpesw(struct mlx5_lag *ldev)
+ 	dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+ 	mlx5_rescan_drivers_locked(dev0);
+ 	for (i = 0; i < ldev->ports; i++) {
+-		err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
++		err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
+ 		if (err)
+ 			goto err_rescan_drivers;
+ 	}
+@@ -113,7 +113,7 @@ static int enable_mpesw(struct mlx5_lag *ldev)
+ err_add_devices:
+ 	mlx5_lag_add_devices(ldev);
+ 	for (i = 0; i < ldev->ports; i++)
+-		mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
++		mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
+ 	mlx5_mpesw_metadata_cleanup(ldev);
+ 	return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 331ce47f51a17..6574c145dc1e2 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -1680,6 +1680,8 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev)
+ 	struct devlink *devlink = priv_to_devlink(dev);
+ 	int err;
+ 
++	devl_lock(devlink);
++	devl_register(devlink);
+ 	dev->state = MLX5_DEVICE_STATE_UP;
+ 	err = mlx5_function_enable(dev, true, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
+ 	if (err) {
+@@ -1693,27 +1695,21 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev)
+ 		goto query_hca_caps_err;
+ 	}
+ 
+-	devl_lock(devlink);
+-	devl_register(devlink);
+-
+ 	err = mlx5_devlink_params_register(priv_to_devlink(dev));
+ 	if (err) {
+ 		mlx5_core_warn(dev, "mlx5_devlink_param_reg err = %d\n", err);
+-		goto params_reg_err;
++		goto query_hca_caps_err;
+ 	}
+ 
+ 	devl_unlock(devlink);
+ 	return 0;
+ 
+-params_reg_err:
+-	devl_unregister(devlink);
+-	devl_unlock(devlink);
+ query_hca_caps_err:
+-	devl_unregister(devlink);
+-	devl_unlock(devlink);
+ 	mlx5_function_disable(dev, true);
+ out:
+ 	dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
++	devl_unregister(devlink);
++	devl_unlock(devlink);
+ 	return err;
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+index 7ebe712808275..b2986175d9afe 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+@@ -60,6 +60,13 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
+ 		goto remap_err;
+ 	}
+ 
++	/* Peer devlink logic expects to work on unregistered devlink instance. */
++	err = mlx5_core_peer_devlink_set(sf_dev, devlink);
++	if (err) {
++		mlx5_core_warn(mdev, "mlx5_core_peer_devlink_set err=%d\n", err);
++		goto peer_devlink_set_err;
++	}
++
+ 	if (MLX5_ESWITCH_MANAGER(sf_dev->parent_mdev))
+ 		err = mlx5_init_one_light(mdev);
+ 	else
+@@ -69,20 +76,10 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
+ 		goto init_one_err;
+ 	}
+ 
+-	err = mlx5_core_peer_devlink_set(sf_dev, devlink);
+-	if (err) {
+-		mlx5_core_warn(mdev, "mlx5_core_peer_devlink_set err=%d\n", err);
+-		goto peer_devlink_set_err;
+-	}
+-
+ 	return 0;
+ 
+-peer_devlink_set_err:
+-	if (mlx5_dev_is_lightweight(sf_dev->mdev))
+-		mlx5_uninit_one_light(sf_dev->mdev);
+-	else
+-		mlx5_uninit_one(sf_dev->mdev);
+ init_one_err:
++peer_devlink_set_err:
+ 	iounmap(mdev->iseg);
+ remap_err:
+ 	mlx5_mdev_uninit(mdev);
+diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+index 2635ef8958c80..61d88207eed42 100644
+--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+@@ -1087,8 +1087,6 @@ static int lan966x_probe(struct platform_device *pdev)
+ 	platform_set_drvdata(pdev, lan966x);
+ 	lan966x->dev = &pdev->dev;
+ 
+-	lan966x->debugfs_root = debugfs_create_dir("lan966x", NULL);
+-
+ 	if (!device_get_mac_address(&pdev->dev, mac_addr)) {
+ 		ether_addr_copy(lan966x->base_mac, mac_addr);
+ 	} else {
+@@ -1179,6 +1177,8 @@ static int lan966x_probe(struct platform_device *pdev)
+ 		return dev_err_probe(&pdev->dev, -ENODEV,
+ 				     "no ethernet-ports child found\n");
+ 
++	lan966x->debugfs_root = debugfs_create_dir("lan966x", NULL);
++
+ 	/* init switch */
+ 	lan966x_init(lan966x);
+ 	lan966x_stats_init(lan966x);
+@@ -1257,6 +1257,8 @@ static int lan966x_probe(struct platform_device *pdev)
+ 	destroy_workqueue(lan966x->stats_queue);
+ 	mutex_destroy(&lan966x->stats_lock);
+ 
++	debugfs_remove_recursive(lan966x->debugfs_root);
++
+ 	return err;
+ }
+ 
+diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
+index 2729a2c5acf9c..ca814fe8a775b 100644
+--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
++++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
+@@ -848,7 +848,7 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
+ 	}
+ 
+ 	if (!wait_for_completion_timeout(&ctx->comp_event,
+-					 (msecs_to_jiffies(hwc->hwc_timeout) * HZ))) {
++					 (msecs_to_jiffies(hwc->hwc_timeout)))) {
+ 		dev_err(hwc->dev, "HWC: Request timed out!\n");
+ 		err = -ETIMEDOUT;
+ 		goto out;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
+index c278f8893042b..8159b4c315b5d 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
+@@ -1206,7 +1206,6 @@ static void qed_slowpath_task(struct work_struct *work)
+ static int qed_slowpath_wq_start(struct qed_dev *cdev)
+ {
+ 	struct qed_hwfn *hwfn;
+-	char name[NAME_SIZE];
+ 	int i;
+ 
+ 	if (IS_VF(cdev))
+@@ -1215,11 +1214,11 @@ static int qed_slowpath_wq_start(struct qed_dev *cdev)
+ 	for_each_hwfn(cdev, i) {
+ 		hwfn = &cdev->hwfns[i];
+ 
+-		snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
+-			 cdev->pdev->bus->number,
+-			 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
++		hwfn->slowpath_wq = alloc_workqueue("slowpath-%02x:%02x.%02x",
++					 0, 0, cdev->pdev->bus->number,
++					 PCI_SLOT(cdev->pdev->devfn),
++					 hwfn->abs_pf_id);
+ 
+-		hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
+ 		if (!hwfn->slowpath_wq) {
+ 			DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
+ 			return -ENOMEM;
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 0fc5fe564ae50..7df2017c9a8d6 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -4335,11 +4335,11 @@ static void rtl8169_doorbell(struct rtl8169_private *tp)
+ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+ 				      struct net_device *dev)
+ {
+-	unsigned int frags = skb_shinfo(skb)->nr_frags;
+ 	struct rtl8169_private *tp = netdev_priv(dev);
+ 	unsigned int entry = tp->cur_tx % NUM_TX_DESC;
+ 	struct TxDesc *txd_first, *txd_last;
+ 	bool stop_queue, door_bell;
++	unsigned int frags;
+ 	u32 opts[2];
+ 
+ 	if (unlikely(!rtl_tx_slots_avail(tp))) {
+@@ -4362,6 +4362,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+ 
+ 	txd_first = tp->TxDescArray + entry;
+ 
++	frags = skb_shinfo(skb)->nr_frags;
+ 	if (frags) {
+ 		if (rtl8169_xmit_frags(tp, skb, opts, entry))
+ 			goto err_dma_1;
+@@ -4655,10 +4656,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
+ 		rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
+ 	}
+ 
+-	if (napi_schedule_prep(&tp->napi)) {
+-		rtl_irq_disable(tp);
+-		__napi_schedule(&tp->napi);
+-	}
++	rtl_irq_disable(tp);
++	napi_schedule(&tp->napi);
+ out:
+ 	rtl_ack_events(tp, status);
+ 
+diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
+index 46eee747c6992..45ef5ac0788a8 100644
+--- a/drivers/net/ethernet/smsc/smc91x.h
++++ b/drivers/net/ethernet/smsc/smc91x.h
+@@ -156,8 +156,8 @@ static inline void mcf_outsw(void *a, unsigned char *p, int l)
+ 		writew(*wp++, a);
+ }
+ 
+-#define SMC_inw(a, r)		_swapw(readw((a) + (r)))
+-#define SMC_outw(lp, v, a, r)	writew(_swapw(v), (a) + (r))
++#define SMC_inw(a, r)		ioread16be((a) + (r))
++#define SMC_outw(lp, v, a, r)	iowrite16be(v, (a) + (r))
+ #define SMC_insw(a, r, p, l)	mcf_insw(a + r, p, l)
+ #define SMC_outsw(a, r, p, l)	mcf_outsw(a + r, p, l)
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+index dddcaa9220cc3..64b21c83e2b84 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+@@ -261,6 +261,8 @@ struct stmmac_priv {
+ 	struct stmmac_extra_stats xstats ____cacheline_aligned_in_smp;
+ 	struct stmmac_safety_stats sstats;
+ 	struct plat_stmmacenet_data *plat;
++	/* Protect est parameters */
++	struct mutex est_lock;
+ 	struct dma_features dma_cap;
+ 	struct stmmac_counters mmc;
+ 	int hw_cap_support;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+index e04830a3a1fb1..0c5aab6dd7a73 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+@@ -70,11 +70,11 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
+ 	/* If EST is enabled, disabled it before adjust ptp time. */
+ 	if (priv->plat->est && priv->plat->est->enable) {
+ 		est_rst = true;
+-		mutex_lock(&priv->plat->est->lock);
++		mutex_lock(&priv->est_lock);
+ 		priv->plat->est->enable = false;
+ 		stmmac_est_configure(priv, priv, priv->plat->est,
+ 				     priv->plat->clk_ptp_rate);
+-		mutex_unlock(&priv->plat->est->lock);
++		mutex_unlock(&priv->est_lock);
+ 	}
+ 
+ 	write_lock_irqsave(&priv->ptp_lock, flags);
+@@ -87,7 +87,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
+ 		ktime_t current_time_ns, basetime;
+ 		u64 cycle_time;
+ 
+-		mutex_lock(&priv->plat->est->lock);
++		mutex_lock(&priv->est_lock);
+ 		priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
+ 		current_time_ns = timespec64_to_ktime(current_time);
+ 		time.tv_nsec = priv->plat->est->btr_reserve[0];
+@@ -104,7 +104,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
+ 		priv->plat->est->enable = true;
+ 		ret = stmmac_est_configure(priv, priv, priv->plat->est,
+ 					   priv->plat->clk_ptp_rate);
+-		mutex_unlock(&priv->plat->est->lock);
++		mutex_unlock(&priv->est_lock);
+ 		if (ret)
+ 			netdev_err(priv->dev, "failed to configure EST\n");
+ 	}
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+index cce00719937db..620c16e9be3a6 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+@@ -1004,17 +1004,19 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
+ 		if (!plat->est)
+ 			return -ENOMEM;
+ 
+-		mutex_init(&priv->plat->est->lock);
++		mutex_init(&priv->est_lock);
+ 	} else {
++		mutex_lock(&priv->est_lock);
+ 		memset(plat->est, 0, sizeof(*plat->est));
++		mutex_unlock(&priv->est_lock);
+ 	}
+ 
+ 	size = qopt->num_entries;
+ 
+-	mutex_lock(&priv->plat->est->lock);
++	mutex_lock(&priv->est_lock);
+ 	priv->plat->est->gcl_size = size;
+ 	priv->plat->est->enable = qopt->cmd == TAPRIO_CMD_REPLACE;
+-	mutex_unlock(&priv->plat->est->lock);
++	mutex_unlock(&priv->est_lock);
+ 
+ 	for (i = 0; i < size; i++) {
+ 		s64 delta_ns = qopt->entries[i].interval;
+@@ -1045,7 +1047,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
+ 		priv->plat->est->gcl[i] = delta_ns | (gates << wid);
+ 	}
+ 
+-	mutex_lock(&priv->plat->est->lock);
++	mutex_lock(&priv->est_lock);
+ 	/* Adjust for real system time */
+ 	priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
+ 	current_time_ns = timespec64_to_ktime(current_time);
+@@ -1068,7 +1070,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
+ 	tc_taprio_map_maxsdu_txq(priv, qopt);
+ 
+ 	if (fpe && !priv->dma_cap.fpesel) {
+-		mutex_unlock(&priv->plat->est->lock);
++		mutex_unlock(&priv->est_lock);
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+@@ -1079,7 +1081,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
+ 
+ 	ret = stmmac_est_configure(priv, priv, priv->plat->est,
+ 				   priv->plat->clk_ptp_rate);
+-	mutex_unlock(&priv->plat->est->lock);
++	mutex_unlock(&priv->est_lock);
+ 	if (ret) {
+ 		netdev_err(priv->dev, "failed to configure EST\n");
+ 		goto disable;
+@@ -1096,7 +1098,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
+ 
+ disable:
+ 	if (priv->plat->est) {
+-		mutex_lock(&priv->plat->est->lock);
++		mutex_lock(&priv->est_lock);
+ 		priv->plat->est->enable = false;
+ 		stmmac_est_configure(priv, priv, priv->plat->est,
+ 				     priv->plat->clk_ptp_rate);
+@@ -1105,7 +1107,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
+ 			priv->xstats.max_sdu_txq_drop[i] = 0;
+ 			priv->xstats.mtl_est_txq_hlbf[i] = 0;
+ 		}
+-		mutex_unlock(&priv->plat->est->lock);
++		mutex_unlock(&priv->est_lock);
+ 	}
+ 
+ 	priv->plat->fpe_cfg->enable = false;
+diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
+index 9bd1df8308d24..d3a2fbb14140e 100644
+--- a/drivers/net/ethernet/sun/sungem.c
++++ b/drivers/net/ethernet/sun/sungem.c
+@@ -949,17 +949,6 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id)
+ 	return IRQ_HANDLED;
+ }
+ 
+-#ifdef CONFIG_NET_POLL_CONTROLLER
+-static void gem_poll_controller(struct net_device *dev)
+-{
+-	struct gem *gp = netdev_priv(dev);
+-
+-	disable_irq(gp->pdev->irq);
+-	gem_interrupt(gp->pdev->irq, dev);
+-	enable_irq(gp->pdev->irq);
+-}
+-#endif
+-
+ static void gem_tx_timeout(struct net_device *dev, unsigned int txqueue)
+ {
+ 	struct gem *gp = netdev_priv(dev);
+@@ -2839,9 +2828,6 @@ static const struct net_device_ops gem_netdev_ops = {
+ 	.ndo_change_mtu		= gem_change_mtu,
+ 	.ndo_validate_addr	= eth_validate_addr,
+ 	.ndo_set_mac_address    = gem_set_mac_address,
+-#ifdef CONFIG_NET_POLL_CONTROLLER
+-	.ndo_poll_controller    = gem_poll_controller,
+-#endif
+ };
+ 
+ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+index b69af69a1ccd3..61ebac7ba6580 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
++++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+@@ -2152,7 +2152,12 @@ static int prueth_probe(struct platform_device *pdev)
+ 
+ 		prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev;
+ 
+-		emac_phy_connect(prueth->emac[PRUETH_MAC0]);
++		ret = emac_phy_connect(prueth->emac[PRUETH_MAC0]);
++		if (ret) {
++			dev_err(dev,
++				"can't connect to MII0 PHY, error -%d", ret);
++			goto netdev_unregister;
++		}
+ 		phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev);
+ 	}
+ 
+@@ -2164,7 +2169,12 @@ static int prueth_probe(struct platform_device *pdev)
+ 		}
+ 
+ 		prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev;
+-		emac_phy_connect(prueth->emac[PRUETH_MAC1]);
++		ret = emac_phy_connect(prueth->emac[PRUETH_MAC1]);
++		if (ret) {
++			dev_err(dev,
++				"can't connect to MII1 PHY, error %d", ret);
++			goto netdev_unregister;
++		}
+ 		phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev);
+ 	}
+ 
+diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+index 945c13d1a9829..c09a6f7445754 100644
+--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
++++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+@@ -1958,6 +1958,8 @@ int wx_sw_init(struct wx *wx)
+ 		return -ENOMEM;
+ 	}
+ 
++	bitmap_zero(wx->state, WX_STATE_NBITS);
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL(wx_sw_init);
+diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+index 6fae161cbcb82..07ba3a270a14f 100644
+--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
++++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+@@ -2690,15 +2690,63 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
+ 		wx->rss_enabled = false;
+ 	}
+ 
+-	if (changed &
+-	    (NETIF_F_HW_VLAN_CTAG_RX |
+-	     NETIF_F_HW_VLAN_STAG_RX))
++	netdev->features = features;
++
++	if (wx->mac.type == wx_mac_sp && changed & NETIF_F_HW_VLAN_CTAG_RX)
++		wx->do_reset(netdev);
++	else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER))
+ 		wx_set_rx_mode(netdev);
+ 
+-	return 1;
++	return 0;
+ }
+ EXPORT_SYMBOL(wx_set_features);
+ 
++#define NETIF_VLAN_STRIPPING_FEATURES	(NETIF_F_HW_VLAN_CTAG_RX | \
++					 NETIF_F_HW_VLAN_STAG_RX)
++
++#define NETIF_VLAN_INSERTION_FEATURES	(NETIF_F_HW_VLAN_CTAG_TX | \
++					 NETIF_F_HW_VLAN_STAG_TX)
++
++#define NETIF_VLAN_FILTERING_FEATURES	(NETIF_F_HW_VLAN_CTAG_FILTER | \
++					 NETIF_F_HW_VLAN_STAG_FILTER)
++
++netdev_features_t wx_fix_features(struct net_device *netdev,
++				  netdev_features_t features)
++{
++	netdev_features_t changed = netdev->features ^ features;
++	struct wx *wx = netdev_priv(netdev);
++
++	if (changed & NETIF_VLAN_STRIPPING_FEATURES) {
++		if ((features & NETIF_VLAN_STRIPPING_FEATURES) != NETIF_VLAN_STRIPPING_FEATURES &&
++		    (features & NETIF_VLAN_STRIPPING_FEATURES) != 0) {
++			features &= ~NETIF_VLAN_STRIPPING_FEATURES;
++			features |= netdev->features & NETIF_VLAN_STRIPPING_FEATURES;
++			wx_err(wx, "802.1Q and 802.1ad VLAN stripping must be either both on or both off.");
++		}
++	}
++
++	if (changed & NETIF_VLAN_INSERTION_FEATURES) {
++		if ((features & NETIF_VLAN_INSERTION_FEATURES) != NETIF_VLAN_INSERTION_FEATURES &&
++		    (features & NETIF_VLAN_INSERTION_FEATURES) != 0) {
++			features &= ~NETIF_VLAN_INSERTION_FEATURES;
++			features |= netdev->features & NETIF_VLAN_INSERTION_FEATURES;
++			wx_err(wx, "802.1Q and 802.1ad VLAN insertion must be either both on or both off.");
++		}
++	}
++
++	if (changed & NETIF_VLAN_FILTERING_FEATURES) {
++		if ((features & NETIF_VLAN_FILTERING_FEATURES) != NETIF_VLAN_FILTERING_FEATURES &&
++		    (features & NETIF_VLAN_FILTERING_FEATURES) != 0) {
++			features &= ~NETIF_VLAN_FILTERING_FEATURES;
++			features |= netdev->features & NETIF_VLAN_FILTERING_FEATURES;
++			wx_err(wx, "802.1Q and 802.1ad VLAN filtering must be either both on or both off.");
++		}
++	}
++
++	return features;
++}
++EXPORT_SYMBOL(wx_fix_features);
++
+ void wx_set_ring(struct wx *wx, u32 new_tx_count,
+ 		 u32 new_rx_count, struct wx_ring *temp_ring)
+ {
+diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
+index ec909e876720c..c41b29ea812ff 100644
+--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.h
++++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
+@@ -30,6 +30,8 @@ int wx_setup_resources(struct wx *wx);
+ void wx_get_stats64(struct net_device *netdev,
+ 		    struct rtnl_link_stats64 *stats);
+ int wx_set_features(struct net_device *netdev, netdev_features_t features);
++netdev_features_t wx_fix_features(struct net_device *netdev,
++				  netdev_features_t features);
+ void wx_set_ring(struct wx *wx, u32 new_tx_count,
+ 		 u32 new_rx_count, struct wx_ring *temp_ring);
+ 
+diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
+index 1fdeb464d5f4a..5aaf7b1fa2db9 100644
+--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
++++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
+@@ -982,8 +982,13 @@ struct wx_hw_stats {
+ 	u64 qmprc;
+ };
+ 
++enum wx_state {
++	WX_STATE_RESETTING,
++	WX_STATE_NBITS,		/* must be last */
++};
+ struct wx {
+ 	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
++	DECLARE_BITMAP(state, WX_STATE_NBITS);
+ 
+ 	void *priv;
+ 	u8 __iomem *hw_addr;
+@@ -1071,6 +1076,8 @@ struct wx {
+ 	u64 hw_csum_rx_good;
+ 	u64 hw_csum_rx_error;
+ 	u64 alloc_rx_buff_failed;
++
++	void (*do_reset)(struct net_device *netdev);
+ };
+ 
+ #define WX_INTR_ALL (~0ULL)
+@@ -1131,4 +1138,19 @@ static inline struct wx *phylink_to_wx(struct phylink_config *config)
+ 	return container_of(config, struct wx, phylink_config);
+ }
+ 
++static inline int wx_set_state_reset(struct wx *wx)
++{
++	u8 timeout = 50;
++
++	while (test_and_set_bit(WX_STATE_RESETTING, wx->state)) {
++		timeout--;
++		if (!timeout)
++			return -EBUSY;
++
++		usleep_range(1000, 2000);
++	}
++
++	return 0;
++}
++
+ #endif /* _WX_TYPE_H_ */
+diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
+index 786a652ae64f3..46a5a3e952021 100644
+--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
++++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
+@@ -52,7 +52,7 @@ static int ngbe_set_ringparam(struct net_device *netdev,
+ 	struct wx *wx = netdev_priv(netdev);
+ 	u32 new_rx_count, new_tx_count;
+ 	struct wx_ring *temp_ring;
+-	int i;
++	int i, err = 0;
+ 
+ 	new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD);
+ 	new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE);
+@@ -64,6 +64,10 @@ static int ngbe_set_ringparam(struct net_device *netdev,
+ 	    new_rx_count == wx->rx_ring_count)
+ 		return 0;
+ 
++	err = wx_set_state_reset(wx);
++	if (err)
++		return err;
++
+ 	if (!netif_running(wx->netdev)) {
+ 		for (i = 0; i < wx->num_tx_queues; i++)
+ 			wx->tx_ring[i]->count = new_tx_count;
+@@ -72,14 +76,16 @@ static int ngbe_set_ringparam(struct net_device *netdev,
+ 		wx->tx_ring_count = new_tx_count;
+ 		wx->rx_ring_count = new_rx_count;
+ 
+-		return 0;
++		goto clear_reset;
+ 	}
+ 
+ 	/* allocate temporary buffer to store rings in */
+ 	i = max_t(int, wx->num_tx_queues, wx->num_rx_queues);
+ 	temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL);
+-	if (!temp_ring)
+-		return -ENOMEM;
++	if (!temp_ring) {
++		err = -ENOMEM;
++		goto clear_reset;
++	}
+ 
+ 	ngbe_down(wx);
+ 
+@@ -89,7 +95,9 @@ static int ngbe_set_ringparam(struct net_device *netdev,
+ 	wx_configure(wx);
+ 	ngbe_up(wx);
+ 
+-	return 0;
++clear_reset:
++	clear_bit(WX_STATE_RESETTING, wx->state);
++	return err;
+ }
+ 
+ static int ngbe_set_channels(struct net_device *dev,
+diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+index fdd6b4f70b7a5..e894e01d030d1 100644
+--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
++++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+@@ -499,6 +499,7 @@ static const struct net_device_ops ngbe_netdev_ops = {
+ 	.ndo_start_xmit         = wx_xmit_frame,
+ 	.ndo_set_rx_mode        = wx_set_rx_mode,
+ 	.ndo_set_features       = wx_set_features,
++	.ndo_fix_features       = wx_fix_features,
+ 	.ndo_validate_addr      = eth_validate_addr,
+ 	.ndo_set_mac_address    = wx_set_mac,
+ 	.ndo_get_stats64        = wx_get_stats64,
+diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
+index db675512ce4dc..31fde3fa7c6b5 100644
+--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
++++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
+@@ -19,7 +19,7 @@ static int txgbe_set_ringparam(struct net_device *netdev,
+ 	struct wx *wx = netdev_priv(netdev);
+ 	u32 new_rx_count, new_tx_count;
+ 	struct wx_ring *temp_ring;
+-	int i;
++	int i, err = 0;
+ 
+ 	new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD);
+ 	new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE);
+@@ -31,6 +31,10 @@ static int txgbe_set_ringparam(struct net_device *netdev,
+ 	    new_rx_count == wx->rx_ring_count)
+ 		return 0;
+ 
++	err = wx_set_state_reset(wx);
++	if (err)
++		return err;
++
+ 	if (!netif_running(wx->netdev)) {
+ 		for (i = 0; i < wx->num_tx_queues; i++)
+ 			wx->tx_ring[i]->count = new_tx_count;
+@@ -39,14 +43,16 @@ static int txgbe_set_ringparam(struct net_device *netdev,
+ 		wx->tx_ring_count = new_tx_count;
+ 		wx->rx_ring_count = new_rx_count;
+ 
+-		return 0;
++		goto clear_reset;
+ 	}
+ 
+ 	/* allocate temporary buffer to store rings in */
+ 	i = max_t(int, wx->num_tx_queues, wx->num_rx_queues);
+ 	temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL);
+-	if (!temp_ring)
+-		return -ENOMEM;
++	if (!temp_ring) {
++		err = -ENOMEM;
++		goto clear_reset;
++	}
+ 
+ 	txgbe_down(wx);
+ 
+@@ -55,7 +61,9 @@ static int txgbe_set_ringparam(struct net_device *netdev,
+ 
+ 	txgbe_up(wx);
+ 
+-	return 0;
++clear_reset:
++	clear_bit(WX_STATE_RESETTING, wx->state);
++	return err;
+ }
+ 
+ static int txgbe_set_channels(struct net_device *dev,
+diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+index bd4624d14ca03..8c7a74981b907 100644
+--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
++++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+@@ -269,6 +269,8 @@ static int txgbe_sw_init(struct wx *wx)
+ 	wx->tx_work_limit = TXGBE_DEFAULT_TX_WORK;
+ 	wx->rx_work_limit = TXGBE_DEFAULT_RX_WORK;
+ 
++	wx->do_reset = txgbe_do_reset;
++
+ 	return 0;
+ }
+ 
+@@ -421,6 +423,34 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc)
+ 	return 0;
+ }
+ 
++static void txgbe_reinit_locked(struct wx *wx)
++{
++	int err = 0;
++
++	netif_trans_update(wx->netdev);
++
++	err = wx_set_state_reset(wx);
++	if (err) {
++		wx_err(wx, "wait device reset timeout\n");
++		return;
++	}
++
++	txgbe_down(wx);
++	txgbe_up(wx);
++
++	clear_bit(WX_STATE_RESETTING, wx->state);
++}
++
++void txgbe_do_reset(struct net_device *netdev)
++{
++	struct wx *wx = netdev_priv(netdev);
++
++	if (netif_running(netdev))
++		txgbe_reinit_locked(wx);
++	else
++		txgbe_reset(wx);
++}
++
+ static const struct net_device_ops txgbe_netdev_ops = {
+ 	.ndo_open               = txgbe_open,
+ 	.ndo_stop               = txgbe_close,
+@@ -428,6 +458,7 @@ static const struct net_device_ops txgbe_netdev_ops = {
+ 	.ndo_start_xmit         = wx_xmit_frame,
+ 	.ndo_set_rx_mode        = wx_set_rx_mode,
+ 	.ndo_set_features       = wx_set_features,
++	.ndo_fix_features       = wx_fix_features,
+ 	.ndo_validate_addr      = eth_validate_addr,
+ 	.ndo_set_mac_address    = wx_set_mac,
+ 	.ndo_get_stats64        = wx_get_stats64,
+diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+index 1b4ff50d58571..f434a7865cb7b 100644
+--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
++++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+@@ -134,6 +134,7 @@ extern char txgbe_driver_name[];
+ void txgbe_down(struct wx *wx);
+ void txgbe_up(struct wx *wx);
+ int txgbe_setup_tc(struct net_device *dev, u8 tc);
++void txgbe_do_reset(struct net_device *netdev);
+ 
+ #define NODE_PROP(_NAME, _PROP)			\
+ 	(const struct software_node) {		\
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index ddb50a0e2bc82..87780465cd0d5 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -4676,7 +4676,8 @@ static int lan8841_suspend(struct phy_device *phydev)
+ 	struct kszphy_priv *priv = phydev->priv;
+ 	struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv;
+ 
+-	ptp_cancel_worker_sync(ptp_priv->ptp_clock);
++	if (ptp_priv->ptp_clock)
++		ptp_cancel_worker_sync(ptp_priv->ptp_clock);
+ 
+ 	return genphy_suspend(phydev);
+ }
+diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
+index 7b8afa589a53c..284375f662f1e 100644
+--- a/drivers/net/usb/aqc111.c
++++ b/drivers/net/usb/aqc111.c
+@@ -1141,17 +1141,15 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ 			continue;
+ 		}
+ 
+-		/* Clone SKB */
+-		new_skb = skb_clone(skb, GFP_ATOMIC);
++		new_skb = netdev_alloc_skb_ip_align(dev->net, pkt_len);
+ 
+ 		if (!new_skb)
+ 			goto err;
+ 
+-		new_skb->len = pkt_len;
++		skb_put(new_skb, pkt_len);
++		memcpy(new_skb->data, skb->data, pkt_len);
+ 		skb_pull(new_skb, AQ_RX_HW_PAD);
+-		skb_set_tail_pointer(new_skb, new_skb->len);
+ 
+-		new_skb->truesize = SKB_TRUESIZE(new_skb->len);
+ 		if (aqc111_data->rx_checksum)
+ 			aqc111_rx_checksum(new_skb, pkt_desc);
+ 
+diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
+index 2fa46baa589e5..cbea246664795 100644
+--- a/drivers/net/usb/smsc95xx.c
++++ b/drivers/net/usb/smsc95xx.c
+@@ -1810,9 +1810,11 @@ static int smsc95xx_reset_resume(struct usb_interface *intf)
+ 
+ static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
+ {
+-	skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2);
++	u16 *csum_ptr = (u16 *)(skb_tail_pointer(skb) - 2);
++
++	skb->csum = (__force __wsum)get_unaligned(csum_ptr);
+ 	skb->ip_summed = CHECKSUM_COMPLETE;
+-	skb_trim(skb, skb->len - 2);
++	skb_trim(skb, skb->len - 2); /* remove csum */
+ }
+ 
+ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+@@ -1870,25 +1872,22 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ 				if (dev->net->features & NETIF_F_RXCSUM)
+ 					smsc95xx_rx_csum_offload(skb);
+ 				skb_trim(skb, skb->len - 4); /* remove fcs */
+-				skb->truesize = size + sizeof(struct sk_buff);
+ 
+ 				return 1;
+ 			}
+ 
+-			ax_skb = skb_clone(skb, GFP_ATOMIC);
++			ax_skb = netdev_alloc_skb_ip_align(dev->net, size);
+ 			if (unlikely(!ax_skb)) {
+ 				netdev_warn(dev->net, "Error allocating skb\n");
+ 				return 0;
+ 			}
+ 
+-			ax_skb->len = size;
+-			ax_skb->data = packet;
+-			skb_set_tail_pointer(ax_skb, size);
++			skb_put(ax_skb, size);
++			memcpy(ax_skb->data, packet, size);
+ 
+ 			if (dev->net->features & NETIF_F_RXCSUM)
+ 				smsc95xx_rx_csum_offload(ax_skb);
+ 			skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */
+-			ax_skb->truesize = size + sizeof(struct sk_buff);
+ 
+ 			usbnet_skb_return(dev, ax_skb);
+ 		}
+diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
+index 3164451e1010c..0a662e42ed965 100644
+--- a/drivers/net/usb/sr9700.c
++++ b/drivers/net/usb/sr9700.c
+@@ -421,19 +421,15 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ 			skb_pull(skb, 3);
+ 			skb->len = len;
+ 			skb_set_tail_pointer(skb, len);
+-			skb->truesize = len + sizeof(struct sk_buff);
+ 			return 2;
+ 		}
+ 
+-		/* skb_clone is used for address align */
+-		sr_skb = skb_clone(skb, GFP_ATOMIC);
++		sr_skb = netdev_alloc_skb_ip_align(dev->net, len);
+ 		if (!sr_skb)
+ 			return 0;
+ 
+-		sr_skb->len = len;
+-		sr_skb->data = skb->data + 3;
+-		skb_set_tail_pointer(sr_skb, len);
+-		sr_skb->truesize = len + sizeof(struct sk_buff);
++		skb_put(sr_skb, len);
++		memcpy(sr_skb->data, skb->data + 3, len);
+ 		usbnet_skb_return(dev, sr_skb);
+ 
+ 		skb_pull(skb, len + SR_RX_OVERHEAD);
+diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
+index 815f8f599f5d1..5a55db349cb57 100644
+--- a/drivers/net/wireless/ath/ar5523/ar5523.c
++++ b/drivers/net/wireless/ath/ar5523/ar5523.c
+@@ -1594,6 +1594,20 @@ static int ar5523_probe(struct usb_interface *intf,
+ 	struct ar5523 *ar;
+ 	int error = -ENOMEM;
+ 
++	static const u8 bulk_ep_addr[] = {
++		AR5523_CMD_TX_PIPE | USB_DIR_OUT,
++		AR5523_DATA_TX_PIPE | USB_DIR_OUT,
++		AR5523_CMD_RX_PIPE | USB_DIR_IN,
++		AR5523_DATA_RX_PIPE | USB_DIR_IN,
++		0};
++
++	if (!usb_check_bulk_endpoints(intf, bulk_ep_addr)) {
++		dev_err(&dev->dev,
++			"Could not find all expected endpoints\n");
++		error = -ENODEV;
++		goto out;
++	}
++
+ 	/*
+ 	 * Load firmware if the device requires it.  This will return
+ 	 * -ENXIO on success and we'll get called back afer the usb
+diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
+index 9ce6f49ab2614..fa5e2e6518313 100644
+--- a/drivers/net/wireless/ath/ath10k/core.c
++++ b/drivers/net/wireless/ath/ath10k/core.c
+@@ -720,6 +720,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ 		.max_spatial_stream = 4,
+ 		.fw = {
+ 			.dir = WCN3990_HW_1_0_FW_DIR,
++			.board = WCN3990_HW_1_0_BOARD_DATA_FILE,
++			.board_size = WCN3990_BOARD_DATA_SZ,
++			.board_ext_size = WCN3990_BOARD_EXT_DATA_SZ,
+ 		},
+ 		.sw_decrypt_mcast_mgmt = true,
+ 		.rx_desc_ops = &wcn3990_rx_desc_ops,
+diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+index 394bf3c32abff..0f6de862c3a9b 100644
+--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
++++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+@@ -439,7 +439,7 @@ ath10k_dbg_sta_write_peer_debug_trigger(struct file *file,
+ 	}
+ out:
+ 	mutex_unlock(&ar->conf_mutex);
+-	return count;
++	return ret ?: count;
+ }
+ 
+ static const struct file_operations fops_peer_debug_trigger = {
+diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
+index 93c0730919966..9aa2d821b5078 100644
+--- a/drivers/net/wireless/ath/ath10k/hw.h
++++ b/drivers/net/wireless/ath/ath10k/hw.h
+@@ -133,6 +133,7 @@ enum qca9377_chip_id_rev {
+ /* WCN3990 1.0 definitions */
+ #define WCN3990_HW_1_0_DEV_VERSION	ATH10K_HW_WCN3990
+ #define WCN3990_HW_1_0_FW_DIR		ATH10K_FW_DIR "/WCN3990/hw1.0"
++#define WCN3990_HW_1_0_BOARD_DATA_FILE "board.bin"
+ 
+ #define ATH10K_FW_FILE_BASE		"firmware"
+ #define ATH10K_FW_API_MAX		6
+diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
+index ec556bb88d658..ba37e6c7ced08 100644
+--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
++++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
+@@ -491,4 +491,7 @@ struct host_interest {
+ #define QCA4019_BOARD_DATA_SZ	  12064
+ #define QCA4019_BOARD_EXT_DATA_SZ 0
+ 
++#define WCN3990_BOARD_DATA_SZ	  26328
++#define WCN3990_BOARD_EXT_DATA_SZ 0
++
+ #endif /* __TARGADDRS_H__ */
+diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
+index 2e9661f4bea82..80d255aaff1be 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi.c
++++ b/drivers/net/wireless/ath/ath10k/wmi.c
+@@ -1763,12 +1763,32 @@ void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch,
+ 
+ int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
+ {
+-	unsigned long time_left;
++	unsigned long time_left, i;
+ 
+ 	time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
+ 						WMI_SERVICE_READY_TIMEOUT_HZ);
+-	if (!time_left)
+-		return -ETIMEDOUT;
++	if (!time_left) {
++		/* Sometimes the PCI HIF doesn't receive interrupt
++		 * for the service ready message even if the buffer
++		 * was completed. PCIe sniffer shows that it's
++		 * because the corresponding CE ring doesn't fires
++		 * it. Workaround here by polling CE rings once.
++		 */
++		ath10k_warn(ar, "failed to receive service ready completion, polling..\n");
++
++		for (i = 0; i < CE_COUNT; i++)
++			ath10k_hif_send_complete_check(ar, i, 1);
++
++		time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
++							WMI_SERVICE_READY_TIMEOUT_HZ);
++		if (!time_left) {
++			ath10k_warn(ar, "polling timed out\n");
++			return -ETIMEDOUT;
++		}
++
++		ath10k_warn(ar, "service ready completion received, continuing normally\n");
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
+index 9f4bf41a3d41e..2fca415322aec 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.c
++++ b/drivers/net/wireless/ath/ath11k/mac.c
+@@ -1231,14 +1231,7 @@ static int ath11k_mac_vif_setup_ps(struct ath11k_vif *arvif)
+ 
+ 	enable_ps = arvif->ps;
+ 
+-	if (!arvif->is_started) {
+-		/* mac80211 can update vif powersave state while disconnected.
+-		 * Firmware doesn't behave nicely and consumes more power than
+-		 * necessary if PS is disabled on a non-started vdev. Hence
+-		 * force-enable PS for non-running vdevs.
+-		 */
+-		psmode = WMI_STA_PS_MODE_ENABLED;
+-	} else if (enable_ps) {
++	if (enable_ps) {
+ 		psmode = WMI_STA_PS_MODE_ENABLED;
+ 		param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
+ 
+diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
+index 92845ffff44ad..40b6abccbb508 100644
+--- a/drivers/net/wireless/ath/ath12k/qmi.c
++++ b/drivers/net/wireless/ath/ath12k/qmi.c
+@@ -3178,6 +3178,9 @@ static const struct qmi_msg_handler ath12k_qmi_msg_handlers[] = {
+ 		.decoded_size = sizeof(struct qmi_wlanfw_fw_ready_ind_msg_v01),
+ 		.fn = ath12k_qmi_msg_fw_ready_cb,
+ 	},
++
++	/* end of list */
++	{},
+ };
+ 
+ static int ath12k_qmi_ops_new_server(struct qmi_handle *qmi_hdl,
+diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
+index 9d69a17699264..34de3d16efc09 100644
+--- a/drivers/net/wireless/ath/ath12k/wmi.c
++++ b/drivers/net/wireless/ath/ath12k/wmi.c
+@@ -1900,7 +1900,7 @@ static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
+ 		if (arg->bw_160)
+ 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ);
+ 		if (arg->bw_320)
+-			cmd->peer_flags |= cpu_to_le32(WMI_PEER_EXT_320MHZ);
++			cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_320MHZ);
+ 
+ 		/* Typically if STBC is enabled for VHT it should be enabled
+ 		 * for HT as well
+diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
+index e902ca80eba78..0226c31a6cae2 100644
+--- a/drivers/net/wireless/ath/carl9170/tx.c
++++ b/drivers/net/wireless/ath/carl9170/tx.c
+@@ -280,7 +280,8 @@ static void carl9170_tx_release(struct kref *ref)
+ 	 * carl9170_tx_fill_rateinfo() has filled the rate information
+ 	 * before we get to this point.
+ 	 */
+-	memset_after(&txinfo->status, 0, rates);
++	memset(&txinfo->pad, 0, sizeof(txinfo->pad));
++	memset(&txinfo->rate_driver_data, 0, sizeof(txinfo->rate_driver_data));
+ 
+ 	if (atomic_read(&ar->tx_total_queued))
+ 		ar->tx_schedule = true;
+diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
+index c4edf83559410..a3e03580cd9ff 100644
+--- a/drivers/net/wireless/ath/carl9170/usb.c
++++ b/drivers/net/wireless/ath/carl9170/usb.c
+@@ -1069,6 +1069,38 @@ static int carl9170_usb_probe(struct usb_interface *intf,
+ 			ar->usb_ep_cmd_is_bulk = true;
+ 	}
+ 
++	/* Verify that all expected endpoints are present */
++	if (ar->usb_ep_cmd_is_bulk) {
++		u8 bulk_ep_addr[] = {
++			AR9170_USB_EP_RX | USB_DIR_IN,
++			AR9170_USB_EP_TX | USB_DIR_OUT,
++			AR9170_USB_EP_CMD | USB_DIR_OUT,
++			0};
++		u8 int_ep_addr[] = {
++			AR9170_USB_EP_IRQ | USB_DIR_IN,
++			0};
++		if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) ||
++		    !usb_check_int_endpoints(intf, int_ep_addr))
++			err = -ENODEV;
++	} else {
++		u8 bulk_ep_addr[] = {
++			AR9170_USB_EP_RX | USB_DIR_IN,
++			AR9170_USB_EP_TX | USB_DIR_OUT,
++			0};
++		u8 int_ep_addr[] = {
++			AR9170_USB_EP_IRQ | USB_DIR_IN,
++			AR9170_USB_EP_CMD | USB_DIR_OUT,
++			0};
++		if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) ||
++		    !usb_check_int_endpoints(intf, int_ep_addr))
++			err = -ENODEV;
++	}
++
++	if (err) {
++		carl9170_free(ar);
++		return err;
++	}
++
+ 	usb_set_intfdata(intf, ar);
+ 	SET_IEEE80211_DEV(ar->hw, &intf->dev);
+ 
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+index d7fb88bb6ae1a..06698a714b523 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+@@ -1675,6 +1675,15 @@ struct brcmf_random_seed_footer {
+ #define BRCMF_RANDOM_SEED_MAGIC		0xfeedc0de
+ #define BRCMF_RANDOM_SEED_LENGTH	0x100
+ 
++static noinline_for_stack void
++brcmf_pcie_provide_random_bytes(struct brcmf_pciedev_info *devinfo, u32 address)
++{
++	u8 randbuf[BRCMF_RANDOM_SEED_LENGTH];
++
++	get_random_bytes(randbuf, BRCMF_RANDOM_SEED_LENGTH);
++	memcpy_toio(devinfo->tcm + address, randbuf, BRCMF_RANDOM_SEED_LENGTH);
++}
++
+ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
+ 					const struct firmware *fw, void *nvram,
+ 					u32 nvram_len)
+@@ -1717,7 +1726,6 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
+ 				.length = cpu_to_le32(rand_len),
+ 				.magic = cpu_to_le32(BRCMF_RANDOM_SEED_MAGIC),
+ 			};
+-			void *randbuf;
+ 
+ 			/* Some Apple chips/firmwares expect a buffer of random
+ 			 * data to be present before NVRAM
+@@ -1729,10 +1737,7 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
+ 				    sizeof(footer));
+ 
+ 			address -= rand_len;
+-			randbuf = kzalloc(rand_len, GFP_KERNEL);
+-			get_random_bytes(randbuf, rand_len);
+-			memcpy_toio(devinfo->tcm + address, randbuf, rand_len);
+-			kfree(randbuf);
++			brcmf_pcie_provide_random_bytes(devinfo, address);
+ 		}
+ 	} else {
+ 		brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+index 535edb51d1c09..5416e41189657 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+- * Copyright (C) 2013-2014, 2018-2020, 2022-2023 Intel Corporation
++ * Copyright (C) 2013-2014, 2018-2020, 2022-2024 Intel Corporation
+  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+  */
+ #include <linux/ieee80211.h>
+@@ -259,7 +259,6 @@ static void iwl_mvm_bt_coex_enable_esr(struct iwl_mvm *mvm,
+ 				       struct ieee80211_vif *vif, bool enable)
+ {
+ 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+-	int link_id;
+ 
+ 	lockdep_assert_held(&mvm->mutex);
+ 
+@@ -267,37 +266,22 @@ static void iwl_mvm_bt_coex_enable_esr(struct iwl_mvm *mvm,
+ 		return;
+ 
+ 	/* Done already */
+-	if (mvmvif->bt_coex_esr_disabled == !enable)
++	if ((mvmvif->esr_disable_reason & IWL_MVM_ESR_DISABLE_COEX) == !enable)
+ 		return;
+ 
+-	mvmvif->bt_coex_esr_disabled = !enable;
+-
+-	/* Nothing to do */
+-	if (mvmvif->esr_active == enable)
+-		return;
+-
+-	if (enable) {
+-		/* Try to re-enable eSR*/
+-		iwl_mvm_mld_select_links(mvm, vif, false);
+-		return;
+-	}
+-
+-	/*
+-	 * Find the primary link, as we want to switch to it and drop the
+-	 * secondary one.
+-	 */
+-	link_id = iwl_mvm_mld_get_primary_link(mvm, vif, vif->active_links);
+-	WARN_ON(link_id < 0);
++	if (enable)
++		mvmvif->esr_disable_reason &= ~IWL_MVM_ESR_DISABLE_COEX;
++	else
++		mvmvif->esr_disable_reason |= IWL_MVM_ESR_DISABLE_COEX;
+ 
+-	ieee80211_set_active_links_async(vif,
+-					 vif->active_links & BIT(link_id));
++	iwl_mvm_recalc_esr(mvm, vif);
+ }
+ 
+ /*
+  * This function receives the LB link id and checks if eSR should be
+  * enabled or disabled (due to BT coex)
+  */
+-bool
++static bool
+ iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
+ 				   struct ieee80211_vif *vif,
+ 				   int link_id, int primary_link)
+@@ -338,7 +322,7 @@ iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
+ 	if (!link_rssi)
+ 		wifi_loss_rate = mvm->last_bt_notif.wifi_loss_mid_high_rssi;
+ 
+-	else if (!mvmvif->bt_coex_esr_disabled)
++	else if (!(mvmvif->esr_disable_reason & IWL_MVM_ESR_DISABLE_COEX))
+ 		 /* RSSI needs to get really low to disable eSR... */
+ 		wifi_loss_rate =
+ 			link_rssi <= -IWL_MVM_BT_COEX_DISABLE_ESR_THRESH ?
+@@ -354,9 +338,9 @@ iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
+ 	return wifi_loss_rate <= IWL_MVM_BT_COEX_WIFI_LOSS_THRESH;
+ }
+ 
+-void iwl_mvm_bt_coex_update_vif_esr(struct iwl_mvm *mvm,
+-				    struct ieee80211_vif *vif,
+-				    int link_id)
++void iwl_mvm_bt_coex_update_link_esr(struct iwl_mvm *mvm,
++				     struct ieee80211_vif *vif,
++				     int link_id)
+ {
+ 	unsigned long usable_links = ieee80211_vif_usable_links(vif);
+ 	int primary_link = iwl_mvm_mld_get_primary_link(mvm, vif,
+@@ -418,7 +402,7 @@ static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
+ 		return;
+ 	}
+ 
+-	iwl_mvm_bt_coex_update_vif_esr(mvm, vif, link_id);
++	iwl_mvm_bt_coex_update_link_esr(mvm, vif, link_id);
+ 
+ 	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2))
+ 		min_ag_for_static_smps = BT_VERY_HIGH_TRAFFIC;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index 8f4b063d6243e..7ed7444c98715 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -1564,6 +1564,17 @@ static int iwl_mvm_alloc_bcast_mcast_sta(struct iwl_mvm *mvm,
+ 					IWL_STA_MULTICAST);
+ }
+ 
++void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif)
++{
++	lockdep_assert_held(&mvm->mutex);
++
++	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
++		return;
++
++	INIT_DELAYED_WORK(&mvmvif->csa_work,
++			  iwl_mvm_channel_switch_disconnect_wk);
++}
++
+ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
+ 				     struct ieee80211_vif *vif)
+ {
+@@ -1574,6 +1585,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
+ 
+ 	mutex_lock(&mvm->mutex);
+ 
++	iwl_mvm_mac_init_mvmvif(mvm, mvmvif);
++
+ 	mvmvif->mvm = mvm;
+ 
+ 	/* the first link always points to the default one */
+@@ -1651,15 +1664,10 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
+ 				     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
+ 	}
+ 
+-	if (vif->p2p || iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1) < 5)
+-		vif->driver_flags |= IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW;
+-
+ 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ 		mvm->p2p_device_vif = vif;
+ 
+ 	iwl_mvm_tcm_add_vif(mvm, vif);
+-	INIT_DELAYED_WORK(&mvmvif->csa_work,
+-			  iwl_mvm_channel_switch_disconnect_wk);
+ 
+ 	if (vif->type == NL80211_IFTYPE_MONITOR) {
+ 		mvm->monitor_on = true;
+@@ -1697,6 +1705,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
+ void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
+ 				 struct ieee80211_vif *vif)
+ {
++	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
++
+ 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ 		/*
+ 		 * Flush the ROC worker which will flush the OFFCHANNEL queue.
+@@ -1705,6 +1715,8 @@ void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
+ 		 */
+ 		flush_work(&mvm->roc_done_wk);
+ 	}
++
++	cancel_delayed_work_sync(&mvmvif->csa_work);
+ }
+ 
+ /* This function is doing the common part of removing the interface for
+@@ -3792,6 +3804,24 @@ iwl_mvm_sta_state_auth_to_assoc(struct ieee80211_hw *hw,
+ 	return callbacks->update_sta(mvm, vif, sta);
+ }
+ 
++static void iwl_mvm_bt_coex_update_vif_esr(struct iwl_mvm *mvm,
++					   struct ieee80211_vif *vif)
++{
++	unsigned long usable_links = ieee80211_vif_usable_links(vif);
++	u8 link_id;
++
++	for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
++		struct ieee80211_bss_conf *link_conf =
++			link_conf_dereference_protected(vif, link_id);
++
++		if (WARN_ON_ONCE(!link_conf))
++			return;
++
++		if (link_conf->chanreq.oper.chan->band == NL80211_BAND_2GHZ)
++			iwl_mvm_bt_coex_update_link_esr(mvm, vif, link_id);
++	}
++}
++
+ static int
+ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm,
+ 				      struct ieee80211_vif *vif,
+@@ -3819,6 +3849,9 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm,
+ 		callbacks->mac_ctxt_changed(mvm, vif, false);
+ 		iwl_mvm_mei_host_associated(mvm, vif, mvm_sta);
+ 
++		/* Calculate eSR mode due to BT coex */
++		iwl_mvm_bt_coex_update_vif_esr(mvm, vif);
++
+ 		/* when client is authorized (AP station marked as such),
+ 		 * try to enable more links
+ 		 */
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+index 084314bf6f369..df183a79db4c8 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+@@ -14,6 +14,8 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
+ 
+ 	mutex_lock(&mvm->mutex);
+ 
++	iwl_mvm_mac_init_mvmvif(mvm, mvmvif);
++
+ 	mvmvif->mvm = mvm;
+ 
+ 	/* Not much to do here. The stack will not allow interface
+@@ -92,6 +94,9 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
+ 		mvm->csme_vif = vif;
+ 	}
+ 
++	if (vif->p2p || iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1) < 5)
++		vif->driver_flags |= IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW;
++
+ 	goto out_unlock;
+ 
+  out_free_bf:
+@@ -189,17 +194,13 @@ static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw,
+ 	mutex_unlock(&mvm->mutex);
+ }
+ 
+-static unsigned int iwl_mvm_mld_count_active_links(struct ieee80211_vif *vif)
++static unsigned int iwl_mvm_mld_count_active_links(struct iwl_mvm_vif *mvmvif)
+ {
+ 	unsigned int n_active = 0;
+ 	int i;
+ 
+ 	for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
+-		struct ieee80211_bss_conf *link_conf;
+-
+-		link_conf = link_conf_dereference_protected(vif, i);
+-		if (link_conf &&
+-		    rcu_access_pointer(link_conf->chanctx_conf))
++		if (mvmvif->link[i] && mvmvif->link[i]->phy_ctxt)
+ 			n_active++;
+ 	}
+ 
+@@ -245,18 +246,18 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
+ {
+ 	u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
+ 	struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
+-	unsigned int n_active = iwl_mvm_mld_count_active_links(vif);
+ 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
++	unsigned int n_active = iwl_mvm_mld_count_active_links(mvmvif);
+ 	unsigned int link_id = link_conf->link_id;
+ 	int ret;
+ 
+-	/* if the assigned one was not counted yet, count it now */
+-	if (!rcu_access_pointer(link_conf->chanctx_conf))
+-		n_active++;
+-
+ 	if (WARN_ON_ONCE(!mvmvif->link[link_id]))
+ 		return -EINVAL;
+ 
++	/* if the assigned one was not counted yet, count it now */
++	if (!mvmvif->link[link_id]->phy_ctxt)
++		n_active++;
++
+ 	/* mac parameters such as HE support can change at this stage
+ 	 * For sta, need first to configure correct state from drv_sta_state
+ 	 * and only after that update mac config.
+@@ -296,13 +297,8 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
+ 	 * this needs the phy context assigned (and in FW?), and we cannot
+ 	 * do it later because it needs to be initialized as soon as we're
+ 	 * able to TX on the link, i.e. when active.
+-	 *
+-	 * Firmware restart isn't quite correct yet for MLO, but we don't
+-	 * need to do it in that case anyway since it will happen from the
+-	 * normal station state callback.
+ 	 */
+-	if (mvmvif->ap_sta &&
+-	    !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
++	if (mvmvif->ap_sta) {
+ 		struct ieee80211_link_sta *link_sta;
+ 
+ 		rcu_read_lock();
+@@ -416,7 +412,7 @@ __iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm,
+ 
+ {
+ 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+-	unsigned int n_active = iwl_mvm_mld_count_active_links(vif);
++	unsigned int n_active = iwl_mvm_mld_count_active_links(mvmvif);
+ 	unsigned int link_id = link_conf->link_id;
+ 
+ 	/* shouldn't happen, but verify link_id is valid before accessing */
+@@ -608,10 +604,49 @@ struct iwl_mvm_link_sel_data {
+ 	bool active;
+ };
+ 
+-static bool iwl_mvm_mld_valid_link_pair(struct iwl_mvm_link_sel_data *a,
++static bool iwl_mvm_mld_valid_link_pair(struct ieee80211_vif *vif,
++					struct iwl_mvm_link_sel_data *a,
+ 					struct iwl_mvm_link_sel_data *b)
+ {
+-	return a->band != b->band;
++	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
++
++	if (a->band == b->band)
++		return false;
++
++	/* BT Coex effects eSR mode only if one of the link is on LB */
++	if (a->band == NL80211_BAND_2GHZ || b->band == NL80211_BAND_2GHZ)
++		return !(mvmvif->esr_disable_reason & IWL_MVM_ESR_DISABLE_COEX);
++
++	return true;
++}
++
++static u8
++iwl_mvm_set_link_selection_data(struct ieee80211_vif *vif,
++				struct iwl_mvm_link_sel_data *data,
++				unsigned long usable_links)
++{
++	u8 n_data = 0;
++	unsigned long link_id;
++
++	rcu_read_lock();
++
++	for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
++		struct ieee80211_bss_conf *link_conf =
++			rcu_dereference(vif->link_conf[link_id]);
++
++		if (WARN_ON_ONCE(!link_conf))
++			continue;
++
++		data[n_data].link_id = link_id;
++		data[n_data].band = link_conf->chanreq.oper.chan->band;
++		data[n_data].width = link_conf->chanreq.oper.width;
++		data[n_data].active = vif->active_links & BIT(link_id);
++		n_data++;
++	}
++
++	rcu_read_unlock();
++
++	return n_data;
+ }
+ 
+ void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+@@ -621,7 +656,7 @@ void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ 	unsigned long usable_links = ieee80211_vif_usable_links(vif);
+ 	u32 max_active_links = iwl_mvm_max_active_links(mvm, vif);
+ 	u16 new_active_links;
+-	u8 link_id, n_data = 0, i, j;
++	u8 n_data, i, j;
+ 
+ 	if (!IWL_MVM_AUTO_EML_ENABLE)
+ 		return;
+@@ -646,23 +681,7 @@ void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ 	if (hweight16(vif->active_links) == max_active_links)
+ 		return;
+ 
+-	rcu_read_lock();
+-
+-	for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+-		struct ieee80211_bss_conf *link_conf =
+-			rcu_dereference(vif->link_conf[link_id]);
+-
+-		if (WARN_ON_ONCE(!link_conf))
+-			continue;
+-
+-		data[n_data].link_id = link_id;
+-		data[n_data].band = link_conf->chanreq.oper.chan->band;
+-		data[n_data].width = link_conf->chanreq.oper.width;
+-		data[n_data].active = vif->active_links & BIT(link_id);
+-		n_data++;
+-	}
+-
+-	rcu_read_unlock();
++	n_data = iwl_mvm_set_link_selection_data(vif, data, usable_links);
+ 
+ 	/* this is expected to be the current active link */
+ 	if (n_data == 1)
+@@ -686,7 +705,8 @@ void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ 			if (i == j)
+ 				continue;
+ 
+-			if (iwl_mvm_mld_valid_link_pair(&data[i], &data[j]))
++			if (iwl_mvm_mld_valid_link_pair(vif, &data[i],
++							&data[j]))
+ 				break;
+ 		}
+ 
+@@ -702,7 +722,7 @@ void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ 				if (i == j)
+ 					continue;
+ 
+-				if (iwl_mvm_mld_valid_link_pair(&data[i],
++				if (iwl_mvm_mld_valid_link_pair(vif, &data[i],
+ 								&data[j]))
+ 					break;
+ 			}
+@@ -1264,6 +1284,33 @@ int iwl_mvm_mld_get_primary_link(struct iwl_mvm *mvm,
+ 	return data[1].link_id;
+ }
+ 
++void iwl_mvm_recalc_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
++{
++	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
++	bool enable = !mvmvif->esr_disable_reason;
++	int link_id;
++
++	/* Nothing to do */
++	if (mvmvif->esr_active == enable)
++		return;
++
++	if (enable) {
++		/* Try to re-enable eSR */
++		iwl_mvm_mld_select_links(mvm, vif, false);
++		return;
++	}
++
++	/*
++	 * Find the primary link, as we want to switch to it and drop the
++	 * secondary one.
++	 */
++	link_id = iwl_mvm_mld_get_primary_link(mvm, vif, vif->active_links);
++	WARN_ON(link_id < 0);
++
++	ieee80211_set_active_links_async(vif,
++					 vif->active_links & BIT(link_id));
++}
++
+ /*
+  * This function receives a bitmap of usable links and check if we can enter
+  * eSR on those links.
+@@ -1272,14 +1319,13 @@ static bool iwl_mvm_can_enter_esr(struct iwl_mvm *mvm,
+ 				  struct ieee80211_vif *vif,
+ 				  unsigned long desired_links)
+ {
+-	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+-	int primary_link = iwl_mvm_mld_get_primary_link(mvm, vif,
+-							desired_links);
++	u16 usable_links = ieee80211_vif_usable_links(vif);
++	struct iwl_mvm_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS];
+ 	const struct wiphy_iftype_ext_capab *ext_capa;
+-	bool ret = true;
+-	int link_id;
++	u8 n_data;
+ 
+-	if (primary_link < 0)
++	if (!ieee80211_vif_is_mld(vif) || !vif->cfg.assoc ||
++	    hweight16(usable_links) <= 1)
+ 		return false;
+ 
+ 	if (!(vif->cfg.eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP))
+@@ -1291,26 +1337,13 @@ static bool iwl_mvm_can_enter_esr(struct iwl_mvm *mvm,
+ 	    !(ext_capa->eml_capabilities & IEEE80211_EML_CAP_EMLSR_SUPP))
+ 		return false;
+ 
+-	for_each_set_bit(link_id, &desired_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+-		struct ieee80211_bss_conf *link_conf =
+-			link_conf_dereference_protected(vif, link_id);
+-
+-		if (WARN_ON_ONCE(!link_conf))
+-			continue;
++	n_data = iwl_mvm_set_link_selection_data(vif, data, desired_links);
+ 
+-		/* BT Coex effects eSR mode only if one of the link is on LB */
+-		if (link_conf->chanreq.oper.chan->band != NL80211_BAND_2GHZ)
+-			continue;
++	if (n_data != 2)
++		return false;
+ 
+-		ret = iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif, link_id,
+-							 primary_link);
+-		// Mark eSR as disabled for the next time
+-		if (!ret)
+-			mvmvif->bt_coex_esr_disabled = true;
+-		break;
+-	}
+ 
+-	return ret;
++	return iwl_mvm_mld_valid_link_pair(vif, &data[0], &data[1]);
+ }
+ 
+ static bool iwl_mvm_mld_can_activate_links(struct ieee80211_hw *hw,
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+index 23e64a757cfe8..36dc291d98dd6 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+@@ -9,7 +9,9 @@
+ u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ 			   int filter_link_id)
+ {
++	struct ieee80211_link_sta *link_sta;
+ 	struct iwl_mvm_sta *mvmsta;
++	struct ieee80211_vif *vif;
+ 	unsigned int link_id;
+ 	u32 result = 0;
+ 
+@@ -17,26 +19,27 @@ u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ 		return 0;
+ 
+ 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
++	vif = mvmsta->vif;
+ 
+ 	/* it's easy when the STA is not an MLD */
+ 	if (!sta->valid_links)
+ 		return BIT(mvmsta->deflink.sta_id);
+ 
+ 	/* but if it is an MLD, get the mask of all the FW STAs it has ... */
+-	for (link_id = 0; link_id < ARRAY_SIZE(mvmsta->link); link_id++) {
+-		struct iwl_mvm_link_sta *link_sta;
++	for_each_sta_active_link(vif, sta, link_sta, link_id) {
++		struct iwl_mvm_link_sta *mvm_link_sta;
+ 
+ 		/* unless we have a specific link in mind */
+ 		if (filter_link_id >= 0 && link_id != filter_link_id)
+ 			continue;
+ 
+-		link_sta =
++		mvm_link_sta =
+ 			rcu_dereference_check(mvmsta->link[link_id],
+ 					      lockdep_is_held(&mvm->mutex));
+-		if (!link_sta)
++		if (!mvm_link_sta)
+ 			continue;
+ 
+-		result |= BIT(link_sta->sta_id);
++		result |= BIT(mvm_link_sta->sta_id);
+ 	}
+ 
+ 	return result;
+@@ -582,14 +585,14 @@ static int iwl_mvm_mld_alloc_sta_links(struct iwl_mvm *mvm,
+ 				       struct ieee80211_sta *sta)
+ {
+ 	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
++	struct ieee80211_link_sta *link_sta;
+ 	unsigned int link_id;
+ 	int ret;
+ 
+ 	lockdep_assert_held(&mvm->mutex);
+ 
+-	for (link_id = 0; link_id < ARRAY_SIZE(sta->link); link_id++) {
+-		if (!rcu_access_pointer(sta->link[link_id]) ||
+-		    mvm_sta->link[link_id])
++	for_each_sta_active_link(vif, sta, link_sta, link_id) {
++		if (WARN_ON(mvm_sta->link[link_id]))
+ 			continue;
+ 
+ 		ret = iwl_mvm_mld_alloc_sta_link(mvm, vif, sta, link_id);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+index f0b24f00938bd..d1ab35eb55b23 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+@@ -346,6 +346,14 @@ struct iwl_mvm_vif_link_info {
+ 	u16 mgmt_queue;
+ };
+ 
++/**
++ * enum iwl_mvm_esr_disable_reason - reasons for which we can't enable EMLSR
++ * @IWL_MVM_ESR_DISABLE_COEX: COEX is preventing the enablement of EMLSR
++ */
++enum iwl_mvm_esr_disable_reason {
++	IWL_MVM_ESR_DISABLE_COEX	= BIT(0),
++};
++
+ /**
+  * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context
+  * @mvm: pointer back to the mvm struct
+@@ -361,7 +369,6 @@ struct iwl_mvm_vif_link_info {
+  * @pm_enabled - indicate if MAC power management is allowed
+  * @monitor_active: indicates that monitor context is configured, and that the
+  *	interface should get quota etc.
+- * @bt_coex_esr_disabled: indicates if esr is disabled due to bt coex
+  * @low_latency: bit flags for low latency
+  *	see enum &iwl_mvm_low_latency_cause for causes.
+  * @low_latency_actual: boolean, indicates low latency is set,
+@@ -378,6 +385,7 @@ struct iwl_mvm_vif_link_info {
+  * @deflink: default link data for use in non-MLO
+  * @link: link data for each link in MLO
+  * @esr_active: indicates eSR mode is active
++ * @esr_disable_reason: a bitmap of enum iwl_mvm_esr_disable_reason
+  * @pm_enabled: indicates powersave is enabled
+  */
+ struct iwl_mvm_vif {
+@@ -392,7 +400,6 @@ struct iwl_mvm_vif {
+ 	bool pm_enabled;
+ 	bool monitor_active;
+ 	bool esr_active;
+-	bool bt_coex_esr_disabled;
+ 
+ 	u8 low_latency: 6;
+ 	u8 low_latency_actual: 1;
+@@ -400,6 +407,7 @@ struct iwl_mvm_vif {
+ 	u8 authorized:1;
+ 	bool ps_disabled;
+ 
++	u32 esr_disable_reason;
+ 	u32 ap_beacon_time;
+ 	struct iwl_mvm_vif_bf_data bf_data;
+ 
+@@ -1572,15 +1580,14 @@ static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm,
+ 	struct iwl_trans *trans = mvm->fwrt.trans;
+ 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ 
+-	lockdep_assert_held(&mvm->mutex);
+-
+ 	if (vif->type == NL80211_IFTYPE_AP)
+ 		return mvm->fw->ucode_capa.num_beacons;
+ 
++	/* Check if HW supports eSR or STR */
+ 	if ((iwl_mvm_is_esr_supported(trans) &&
+-	     !mvmvif->bt_coex_esr_disabled) ||
+-	    ((CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM &&
+-	     CSR_HW_RFID_IS_CDB(trans->hw_rf_id))))
++	     !(mvmvif->esr_disable_reason & ~IWL_MVM_ESR_DISABLE_COEX)) ||
++	    (CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM &&
++	     CSR_HW_RFID_IS_CDB(trans->hw_rf_id)))
+ 		return IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM;
+ 
+ 	return 1;
+@@ -1776,6 +1783,8 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
+ 
+ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
+ 
++void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif);
++
+ /*
+  * FW notifications / CMD responses handlers
+  * Convention: iwl_mvm_rx_<NAME OF THE CMD>
+@@ -2133,12 +2142,9 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
+ u8 iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm *mvm, u8 enabled_ants);
+ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
+ 			   struct ieee80211_tx_info *info, u8 ac);
+-bool iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
+-					struct ieee80211_vif *vif,
+-					int link_id, int primary_link);
+-void iwl_mvm_bt_coex_update_vif_esr(struct iwl_mvm *mvm,
+-				    struct ieee80211_vif *vif,
+-				    int link_id);
++void iwl_mvm_bt_coex_update_link_esr(struct iwl_mvm *mvm,
++				     struct ieee80211_vif *vif,
++				     int link_id);
+ 
+ /* beacon filtering */
+ #ifdef CONFIG_IWLWIFI_DEBUGFS
+@@ -2779,4 +2785,8 @@ int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
+ 			struct ieee80211_channel *channel,
+ 			struct ieee80211_vif *vif,
+ 			int duration, u32 activity);
++
++/* EMLSR */
++void iwl_mvm_recalc_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
++
+ #endif /* __IWL_MVM_H__ */
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+index b1add7942c5bf..395aef04f691f 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+@@ -889,8 +889,8 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm,
+ 
+ 		if (link_info->phy_ctxt &&
+ 		    link_info->phy_ctxt->channel->band == NL80211_BAND_2GHZ)
+-			iwl_mvm_bt_coex_update_vif_esr(mvm, bss_conf->vif,
+-						       link_id);
++			iwl_mvm_bt_coex_update_link_esr(mvm, bss_conf->vif,
++							link_id);
+ 
+ 		/* make sure that beacon statistics don't go backwards with TCM
+ 		 * request to clear statistics
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+index 11559563ae381..22bc032cffc8b 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+@@ -3171,8 +3171,13 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
+ 		struct iwl_mvm_vif_link_info *link_info =
+ 			scan_vif->link[mvm->scan_link_id];
+ 
+-		if (!WARN_ON(!link_info))
++		/* It is possible that by the time the scan is complete the link
++		 * was already removed and is not valid.
++		 */
++		if (link_info)
+ 			memcpy(info.tsf_bssid, link_info->bssid, ETH_ALEN);
++		else
++			IWL_DEBUG_SCAN(mvm, "Scan link is no longer valid\n");
+ 
+ 		ieee80211_scan_completed(mvm->hw, &info);
+ 		mvm->scan_vif = NULL;
+diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
+index ce8fea76dbb24..7a4ef5c83be48 100644
+--- a/drivers/net/wireless/marvell/mwl8k.c
++++ b/drivers/net/wireless/marvell/mwl8k.c
+@@ -2718,7 +2718,7 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
+ 		cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST);
+ 		cmd->numaddr = cpu_to_le16(mc_count);
+ 		netdev_hw_addr_list_for_each(ha, mc_list) {
+-			memcpy(cmd->addr[i], ha->addr, ETH_ALEN);
++			memcpy(cmd->addr[i++], ha->addr, ETH_ALEN);
+ 		}
+ 	}
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+index 7a2f5d38562b4..14304b0637158 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+@@ -4,6 +4,13 @@
+ #include "mac.h"
+ #include "../dma.h"
+ 
++static const u8 wmm_queue_map[] = {
++	[IEEE80211_AC_BK] = 0,
++	[IEEE80211_AC_BE] = 1,
++	[IEEE80211_AC_VI] = 2,
++	[IEEE80211_AC_VO] = 3,
++};
++
+ static void
+ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
+ {
+@@ -22,10 +29,10 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
+ 	struct ieee80211_sta *sta;
+ 	struct mt7603_sta *msta;
+ 	struct mt76_wcid *wcid;
++	u8 tid = 0, hwq = 0;
+ 	void *priv;
+ 	int idx;
+ 	u32 val;
+-	u8 tid = 0;
+ 
+ 	if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr))
+ 		goto free;
+@@ -42,19 +49,36 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
+ 		goto free;
+ 
+ 	priv = msta = container_of(wcid, struct mt7603_sta, wcid);
+-	val = le32_to_cpu(txd[0]);
+-	val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX);
+-	val |= FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_HW_QUEUE_MGMT);
+-	txd[0] = cpu_to_le32(val);
+ 
+ 	sta = container_of(priv, struct ieee80211_sta, drv_priv);
+ 	hdr = (struct ieee80211_hdr *)&skb->data[MT_TXD_SIZE];
+-	if (ieee80211_is_data_qos(hdr->frame_control))
++
++	hwq = wmm_queue_map[IEEE80211_AC_BE];
++	if (ieee80211_is_data_qos(hdr->frame_control)) {
+ 		tid = *ieee80211_get_qos_ctl(hdr) &
+-		      IEEE80211_QOS_CTL_TAG1D_MASK;
+-	skb_set_queue_mapping(skb, tid_to_ac[tid]);
++			 IEEE80211_QOS_CTL_TAG1D_MASK;
++		u8 qid = tid_to_ac[tid];
++		hwq = wmm_queue_map[qid];
++		skb_set_queue_mapping(skb, qid);
++	} else if (ieee80211_is_data(hdr->frame_control)) {
++		skb_set_queue_mapping(skb, IEEE80211_AC_BE);
++		hwq = wmm_queue_map[IEEE80211_AC_BE];
++	} else {
++		skb_pull(skb, MT_TXD_SIZE);
++		if (!ieee80211_is_bufferable_mmpdu(skb))
++			goto free;
++		skb_push(skb, MT_TXD_SIZE);
++		skb_set_queue_mapping(skb, MT_TXQ_PSD);
++		hwq = MT_TX_HW_QUEUE_MGMT;
++	}
++
+ 	ieee80211_sta_set_buffered(sta, tid, true);
+ 
++	val = le32_to_cpu(txd[0]);
++	val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX);
++	val |= FIELD_PREP(MT_TXD0_Q_IDX, hwq);
++	txd[0] = cpu_to_le32(val);
++
+ 	spin_lock_bh(&dev->ps_lock);
+ 	__skb_queue_tail(&msta->psq, skb);
+ 	if (skb_queue_len(&msta->psq) >= 64) {
+@@ -151,12 +175,6 @@ static int mt7603_poll_tx(struct napi_struct *napi, int budget)
+ 
+ int mt7603_dma_init(struct mt7603_dev *dev)
+ {
+-	static const u8 wmm_queue_map[] = {
+-		[IEEE80211_AC_BK] = 0,
+-		[IEEE80211_AC_BE] = 1,
+-		[IEEE80211_AC_VI] = 2,
+-		[IEEE80211_AC_VO] = 3,
+-	};
+ 	int ret;
+ 	int i;
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+index cf21d06257e53..dc8a77f0a1cc4 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+@@ -1393,6 +1393,7 @@ void mt7603_pse_client_reset(struct mt7603_dev *dev)
+ 		   MT_CLIENT_RESET_TX_R_E_2_S);
+ 
+ 	/* Start PSE client TX abort */
++	mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_FORCE_TX_EOF);
+ 	mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_1);
+ 	mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_1_S,
+ 		       MT_CLIENT_RESET_TX_R_E_1_S, 500);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+index af0c2b2aacb00..fb8bd50eb7de8 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+@@ -283,7 +283,7 @@ __mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
+ 	};
+ 	struct sk_buff *skb;
+ 
+-	if (is_mt799x(dev) && !wcid->sta)
++	if (wcid && !wcid->sta)
+ 		hdr.muar_idx = 0xe;
+ 
+ 	mt76_connac_mcu_get_wlan_idx(dev, wcid, &hdr.wlan_idx_lo,
+@@ -2527,6 +2527,7 @@ int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend)
+ 			__le16 tag;
+ 			__le16 len;
+ 			u8 suspend;
++			u8 pad[7];
+ 		} __packed hif_suspend;
+ 	} req = {
+ 		.hif_suspend = {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+index 6c3696c8c7002..450f4d221184b 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+@@ -1049,6 +1049,7 @@ static ssize_t
+ mt7915_rate_txpower_set(struct file *file, const char __user *user_buf,
+ 			size_t count, loff_t *ppos)
+ {
++	int i, ret, pwr, pwr160 = 0, pwr80 = 0, pwr40 = 0, pwr20 = 0;
+ 	struct mt7915_phy *phy = file->private_data;
+ 	struct mt7915_dev *dev = phy->dev;
+ 	struct mt76_phy *mphy = phy->mt76;
+@@ -1057,7 +1058,6 @@ mt7915_rate_txpower_set(struct file *file, const char __user *user_buf,
+ 		.band_idx = phy->mt76->band_idx,
+ 	};
+ 	char buf[100];
+-	int i, ret, pwr160 = 0, pwr80 = 0, pwr40 = 0, pwr20 = 0;
+ 	enum mac80211_rx_encoding mode;
+ 	u32 offs = 0, len = 0;
+ 
+@@ -1130,8 +1130,8 @@ mt7915_rate_txpower_set(struct file *file, const char __user *user_buf,
+ 	if (ret)
+ 		goto out;
+ 
+-	mphy->txpower_cur = max(mphy->txpower_cur,
+-				max(pwr160, max(pwr80, max(pwr40, pwr20))));
++	pwr = max3(pwr80, pwr40, pwr20);
++	mphy->txpower_cur = max3(mphy->txpower_cur, pwr160, pwr);
+ out:
+ 	mutex_unlock(&dev->mt76.mutex);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
+index 2a0bbfe7bfa5e..b8315a89f4a9a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
+@@ -535,7 +535,7 @@ struct mt7925_wow_pattern_tlv {
+ 	u8 offset;
+ 	u8 mask[MT76_CONNAC_WOW_MASK_MAX_LEN];
+ 	u8 pattern[MT76_CONNAC_WOW_PATTEN_MAX_LEN];
+-	u8 rsv[4];
++	u8 rsv[7];
+ } __packed;
+ 
+ static inline enum connac3_mcu_cipher_type
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+index b44abe2acc81b..e86c05d0eecc9 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+@@ -3729,6 +3729,7 @@ int mt7996_mcu_get_temperature(struct mt7996_phy *phy)
+ 	} __packed * res;
+ 	struct sk_buff *skb;
+ 	int ret;
++	u32 temp;
+ 
+ 	ret = mt76_mcu_send_and_get_msg(&phy->dev->mt76, MCU_WM_UNI_CMD(THERMAL),
+ 					&req, sizeof(req), true, &skb);
+@@ -3736,8 +3737,10 @@ int mt7996_mcu_get_temperature(struct mt7996_phy *phy)
+ 		return ret;
+ 
+ 	res = (void *)skb->data;
++	temp = le32_to_cpu(res->temperature);
++	dev_kfree_skb(skb);
+ 
+-	return le32_to_cpu(res->temperature);
++	return temp;
+ }
+ 
+ int mt7996_mcu_set_thermal_throttling(struct mt7996_phy *phy, u8 state)
+@@ -4464,7 +4467,7 @@ int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy)
+ 		u8 band_idx;
+ 	} __packed req = {
+ 		.tag = cpu_to_le16(UNI_TXPOWER_POWER_LIMIT_TABLE_CTRL),
+-		.len = cpu_to_le16(sizeof(req) + MT7996_SKU_RATE_NUM - 4),
++		.len = cpu_to_le16(sizeof(req) + MT7996_SKU_PATH_NUM - 4),
+ 		.power_ctrl_id = UNI_TXPOWER_POWER_LIMIT_TABLE_CTRL,
+ 		.power_limit_type = TX_POWER_LIMIT_TABLE_RATE,
+ 		.band_idx = phy->mt76->band_idx,
+@@ -4479,7 +4482,7 @@ int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy)
+ 	mphy->txpower_cur = tx_power;
+ 
+ 	skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
+-				 sizeof(req) + MT7996_SKU_RATE_NUM);
++				 sizeof(req) + MT7996_SKU_PATH_NUM);
+ 	if (!skb)
+ 		return -ENOMEM;
+ 
+@@ -4503,6 +4506,9 @@ int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy)
+ 	/* eht */
+ 	skb_put_data(skb, &la.eht[0], sizeof(la.eht));
+ 
++	/* padding */
++	skb_put_zero(skb, MT7996_SKU_PATH_NUM - MT7996_SKU_RATE_NUM);
++
+ 	return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ 				     MCU_WM_UNI_CMD(TXPOWER), true);
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+index 304e5fd148034..928a9663b49e0 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+@@ -519,7 +519,7 @@ static void mt7996_irq_tasklet(struct tasklet_struct *t)
+ 	struct mt7996_dev *dev = from_tasklet(dev, t, mt76.irq_tasklet);
+ 	struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+ 	struct mtk_wed_device *wed_hif2 = &dev->mt76.mmio.wed_hif2;
+-	u32 i, intr, mask, intr1;
++	u32 i, intr, mask, intr1 = 0;
+ 
+ 	if (dev->hif2 && mtk_wed_device_active(wed_hif2)) {
+ 		mtk_wed_device_irq_set_mask(wed_hif2, 0);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+index 36d1f247d55aa..ddeb40d522c5a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+@@ -50,6 +50,7 @@
+ #define MT7996_CFEND_RATE_11B		0x03	/* 11B LP, 11M */
+ 
+ #define MT7996_SKU_RATE_NUM		417
++#define MT7996_SKU_PATH_NUM		494
+ 
+ #define MT7996_MAX_TWT_AGRT		16
+ #define MT7996_MAX_STA_TWT_AGRT		8
+diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
+index 31290d8cb7f7c..92074b73ebebd 100644
+--- a/drivers/net/wireless/realtek/rtw89/ps.c
++++ b/drivers/net/wireless/realtek/rtw89/ps.c
+@@ -55,7 +55,8 @@ static void rtw89_ps_power_mode_change_with_hci(struct rtw89_dev *rtwdev,
+ 
+ static void rtw89_ps_power_mode_change(struct rtw89_dev *rtwdev, bool enter)
+ {
+-	if (rtwdev->chip->low_power_hci_modes & BIT(rtwdev->ps_mode))
++	if (rtwdev->chip->low_power_hci_modes & BIT(rtwdev->ps_mode) &&
++	    !test_bit(RTW89_FLAG_WOWLAN, rtwdev->flags))
+ 		rtw89_ps_power_mode_change_with_hci(rtwdev, enter);
+ 	else
+ 		rtw89_mac_power_mode_change(rtwdev, enter);
+diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
+index ccad026defb50..ca4835008b56e 100644
+--- a/drivers/net/wireless/realtek/rtw89/wow.c
++++ b/drivers/net/wireless/realtek/rtw89/wow.c
+@@ -457,14 +457,17 @@ static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow)
+ 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ 	struct ieee80211_vif *wow_vif = rtw_wow->wow_vif;
+ 	struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
++	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ 	const struct rtw89_chip_info *chip = rtwdev->chip;
+ 	bool include_bb = !!chip->bbmcu_nr;
++	bool disable_intr_for_dlfw = false;
+ 	struct ieee80211_sta *wow_sta;
+ 	struct rtw89_sta *rtwsta = NULL;
+ 	bool is_conn = true;
+ 	int ret;
+ 
+-	rtw89_hci_disable_intr(rtwdev);
++	if (chip_id == RTL8852C || chip_id == RTL8922A)
++		disable_intr_for_dlfw = true;
+ 
+ 	wow_sta = ieee80211_find_sta(wow_vif, rtwvif->bssid);
+ 	if (wow_sta)
+@@ -472,12 +475,18 @@ static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow)
+ 	else
+ 		is_conn = false;
+ 
++	if (disable_intr_for_dlfw)
++		rtw89_hci_disable_intr(rtwdev);
++
+ 	ret = rtw89_fw_download(rtwdev, fw_type, include_bb);
+ 	if (ret) {
+ 		rtw89_warn(rtwdev, "download fw failed\n");
+ 		return ret;
+ 	}
+ 
++	if (disable_intr_for_dlfw)
++		rtw89_hci_enable_intr(rtwdev);
++
+ 	rtw89_phy_init_rf_reg(rtwdev, true);
+ 
+ 	ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta,
+@@ -520,7 +529,6 @@ static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow)
+ 	}
+ 
+ 	rtw89_mac_hw_mgnt_sec(rtwdev, wow);
+-	rtw89_hci_enable_intr(rtwdev);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/of/module.c b/drivers/of/module.c
+index f58e624953a20..780fd82a7ecc5 100644
+--- a/drivers/of/module.c
++++ b/drivers/of/module.c
+@@ -29,14 +29,15 @@ ssize_t of_modalias(const struct device_node *np, char *str, ssize_t len)
+ 	csize = snprintf(str, len, "of:N%pOFn%c%s", np, 'T',
+ 			 of_node_get_device_type(np));
+ 	tsize = csize;
++	if (csize >= len)
++		csize = len > 0 ? len - 1 : 0;
+ 	len -= csize;
+-	if (str)
+-		str += csize;
++	str += csize;
+ 
+ 	of_property_for_each_string(np, "compatible", p, compat) {
+ 		csize = strlen(compat) + 1;
+ 		tsize += csize;
+-		if (csize > len)
++		if (csize >= len)
+ 			continue;
+ 
+ 		csize = snprintf(str, len, "C%s", compat);
+diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c
+index 5d1f0e9fdb08d..dba3991256586 100644
+--- a/drivers/perf/hisilicon/hisi_pcie_pmu.c
++++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c
+@@ -350,15 +350,27 @@ static bool hisi_pcie_pmu_validate_event_group(struct perf_event *event)
+ 			return false;
+ 
+ 		for (num = 0; num < counters; num++) {
++			/*
++			 * If we find a related event, then it's a valid group
++			 * since we don't need to allocate a new counter for it.
++			 */
+ 			if (hisi_pcie_pmu_cmp_event(event_group[num], sibling))
+ 				break;
+ 		}
+ 
++		/*
++		 * Otherwise it's a new event but if there's no available counter,
++		 * fail the check since we cannot schedule all the events in
++		 * the group simultaneously.
++		 */
++		if (num == HISI_PCIE_MAX_COUNTERS)
++			return false;
++
+ 		if (num == counters)
+ 			event_group[counters++] = sibling;
+ 	}
+ 
+-	return counters <= HISI_PCIE_MAX_COUNTERS;
++	return true;
+ }
+ 
+ static int hisi_pcie_pmu_event_init(struct perf_event *event)
+diff --git a/drivers/perf/hisilicon/hns3_pmu.c b/drivers/perf/hisilicon/hns3_pmu.c
+index 16869bf5bf4cc..60062eaa342aa 100644
+--- a/drivers/perf/hisilicon/hns3_pmu.c
++++ b/drivers/perf/hisilicon/hns3_pmu.c
+@@ -1085,15 +1085,27 @@ static bool hns3_pmu_validate_event_group(struct perf_event *event)
+ 			return false;
+ 
+ 		for (num = 0; num < counters; num++) {
++			/*
++			 * If we find a related event, then it's a valid group
++			 * since we don't need to allocate a new counter for it.
++			 */
+ 			if (hns3_pmu_cmp_event(event_group[num], sibling))
+ 				break;
+ 		}
+ 
++		/*
++		 * Otherwise it's a new event but if there's no available counter,
++		 * fail the check since we cannot schedule all the events in
++		 * the group simultaneously.
++		 */
++		if (num == HNS3_PMU_MAX_HW_EVENTS)
++			return false;
++
+ 		if (num == counters)
+ 			event_group[counters++] = sibling;
+ 	}
+ 
+-	return counters <= HNS3_PMU_MAX_HW_EVENTS;
++	return true;
+ }
+ 
+ static u32 hns3_pmu_get_filter_condition(struct perf_event *event)
+@@ -1515,7 +1527,7 @@ static int hns3_pmu_irq_register(struct pci_dev *pdev,
+ 		return ret;
+ 	}
+ 
+-	ret = devm_add_action(&pdev->dev, hns3_pmu_free_irq, pdev);
++	ret = devm_add_action_or_reset(&pdev->dev, hns3_pmu_free_irq, pdev);
+ 	if (ret) {
+ 		pci_err(pdev, "failed to add free irq action, ret = %d.\n", ret);
+ 		return ret;
+diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
+index 8cbe6e5f9c39a..3e44d2fb8bf81 100644
+--- a/drivers/perf/riscv_pmu_sbi.c
++++ b/drivers/perf/riscv_pmu_sbi.c
+@@ -27,7 +27,7 @@
+ 
+ #define ALT_SBI_PMU_OVERFLOW(__ovl)					\
+ asm volatile(ALTERNATIVE_2(						\
+-	"csrr %0, " __stringify(CSR_SSCOUNTOVF),			\
++	"csrr %0, " __stringify(CSR_SCOUNTOVF),				\
+ 	"csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF),		\
+ 		THEAD_VENDOR_ID, ERRATA_THEAD_PMU,			\
+ 		CONFIG_ERRATA_THEAD_PMU,				\
+diff --git a/drivers/platform/x86/xiaomi-wmi.c b/drivers/platform/x86/xiaomi-wmi.c
+index 54a2546bb93bf..be80f0bda9484 100644
+--- a/drivers/platform/x86/xiaomi-wmi.c
++++ b/drivers/platform/x86/xiaomi-wmi.c
+@@ -2,8 +2,10 @@
+ /* WMI driver for Xiaomi Laptops */
+ 
+ #include <linux/acpi.h>
++#include <linux/device.h>
+ #include <linux/input.h>
+ #include <linux/module.h>
++#include <linux/mutex.h>
+ #include <linux/wmi.h>
+ 
+ #include <uapi/linux/input-event-codes.h>
+@@ -20,12 +22,21 @@
+ 
+ struct xiaomi_wmi {
+ 	struct input_dev *input_dev;
++	struct mutex key_lock;	/* Protects the key event sequence */
+ 	unsigned int key_code;
+ };
+ 
++static void xiaomi_mutex_destroy(void *data)
++{
++	struct mutex *lock = data;
++
++	mutex_destroy(lock);
++}
++
+ static int xiaomi_wmi_probe(struct wmi_device *wdev, const void *context)
+ {
+ 	struct xiaomi_wmi *data;
++	int ret;
+ 
+ 	if (wdev == NULL || context == NULL)
+ 		return -EINVAL;
+@@ -35,6 +46,11 @@ static int xiaomi_wmi_probe(struct wmi_device *wdev, const void *context)
+ 		return -ENOMEM;
+ 	dev_set_drvdata(&wdev->dev, data);
+ 
++	mutex_init(&data->key_lock);
++	ret = devm_add_action_or_reset(&wdev->dev, xiaomi_mutex_destroy, &data->key_lock);
++	if (ret < 0)
++		return ret;
++
+ 	data->input_dev = devm_input_allocate_device(&wdev->dev);
+ 	if (data->input_dev == NULL)
+ 		return -ENOMEM;
+@@ -59,10 +75,12 @@ static void xiaomi_wmi_notify(struct wmi_device *wdev, union acpi_object *dummy)
+ 	if (data == NULL)
+ 		return;
+ 
++	mutex_lock(&data->key_lock);
+ 	input_report_key(data->input_dev, data->key_code, 1);
+ 	input_sync(data->input_dev);
+ 	input_report_key(data->input_dev, data->key_code, 0);
+ 	input_sync(data->input_dev);
++	mutex_unlock(&data->key_lock);
+ }
+ 
+ static const struct wmi_device_id xiaomi_wmi_id_table[] = {
+diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
+index 0d2c3724d0bc0..b86e11bdc07ef 100644
+--- a/drivers/power/supply/power_supply_sysfs.c
++++ b/drivers/power/supply/power_supply_sysfs.c
+@@ -271,23 +271,6 @@ static ssize_t power_supply_show_usb_type(struct device *dev,
+ 	return count;
+ }
+ 
+-static ssize_t power_supply_show_charge_behaviour(struct device *dev,
+-						  struct power_supply *psy,
+-						  union power_supply_propval *value,
+-						  char *buf)
+-{
+-	int ret;
+-
+-	ret = power_supply_get_property(psy,
+-					POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR,
+-					value);
+-	if (ret < 0)
+-		return ret;
+-
+-	return power_supply_charge_behaviour_show(dev, psy->desc->charge_behaviours,
+-						  value->intval, buf);
+-}
+-
+ static ssize_t power_supply_show_property(struct device *dev,
+ 					  struct device_attribute *attr,
+ 					  char *buf) {
+@@ -321,7 +304,8 @@ static ssize_t power_supply_show_property(struct device *dev,
+ 						&value, buf);
+ 		break;
+ 	case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR:
+-		ret = power_supply_show_charge_behaviour(dev, psy, &value, buf);
++		ret = power_supply_charge_behaviour_show(dev, psy->desc->charge_behaviours,
++							 value.intval, buf);
+ 		break;
+ 	case POWER_SUPPLY_PROP_MODEL_NAME ... POWER_SUPPLY_PROP_SERIAL_NUMBER:
+ 		ret = sysfs_emit(buf, "%s\n", value.strval);
+diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
+index 6506cfb89aa94..ee2ced88ab34f 100644
+--- a/drivers/ptp/ptp_ocp.c
++++ b/drivers/ptp/ptp_ocp.c
+@@ -4562,7 +4562,7 @@ static int ptp_ocp_dpll_direction_set(const struct dpll_pin *pin,
+ 		return -EOPNOTSUPP;
+ 	mode = direction == DPLL_PIN_DIRECTION_INPUT ?
+ 			    SMA_MODE_IN : SMA_MODE_OUT;
+-	return ptp_ocp_sma_store_val(bp, 0, mode, sma_nr);
++	return ptp_ocp_sma_store_val(bp, 0, mode, sma_nr + 1);
+ }
+ 
+ static int ptp_ocp_dpll_frequency_set(const struct dpll_pin *pin,
+@@ -4583,7 +4583,7 @@ static int ptp_ocp_dpll_frequency_set(const struct dpll_pin *pin,
+ 	tbl = bp->sma_op->tbl[sma->mode];
+ 	for (i = 0; tbl[i].name; i++)
+ 		if (tbl[i].frequency == frequency)
+-			return ptp_ocp_sma_store_val(bp, i, sma->mode, sma_nr);
++			return ptp_ocp_sma_store_val(bp, i, sma->mode, sma_nr + 1);
+ 	return -EINVAL;
+ }
+ 
+@@ -4600,7 +4600,7 @@ static int ptp_ocp_dpll_frequency_get(const struct dpll_pin *pin,
+ 	u32 val;
+ 	int i;
+ 
+-	val = bp->sma_op->get(bp, sma_nr);
++	val = bp->sma_op->get(bp, sma_nr + 1);
+ 	tbl = bp->sma_op->tbl[sma->mode];
+ 	for (i = 0; tbl[i].name; i++)
+ 		if (val == tbl[i].value) {
+diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
+index a02fdbc612562..e12b6ff70baba 100644
+--- a/drivers/pwm/pwm-meson.c
++++ b/drivers/pwm/pwm-meson.c
+@@ -147,7 +147,7 @@ static int meson_pwm_calc(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	struct meson_pwm *meson = to_meson_pwm(chip);
+ 	struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm];
+ 	unsigned int cnt, duty_cnt;
+-	unsigned long fin_freq;
++	long fin_freq;
+ 	u64 duty, period, freq;
+ 
+ 	duty = state->duty_cycle;
+@@ -167,14 +167,15 @@ static int meson_pwm_calc(struct pwm_chip *chip, struct pwm_device *pwm,
+ 		freq = ULONG_MAX;
+ 
+ 	fin_freq = clk_round_rate(channel->clk, freq);
+-	if (fin_freq == 0) {
+-		dev_err(pwmchip_parent(chip), "invalid source clock frequency\n");
+-		return -EINVAL;
++	if (fin_freq <= 0) {
++		dev_err(pwmchip_parent(chip),
++			"invalid source clock frequency %llu\n", freq);
++		return fin_freq ? fin_freq : -EINVAL;
+ 	}
+ 
+-	dev_dbg(pwmchip_parent(chip), "fin_freq: %lu Hz\n", fin_freq);
++	dev_dbg(pwmchip_parent(chip), "fin_freq: %ld Hz\n", fin_freq);
+ 
+-	cnt = div_u64(fin_freq * period, NSEC_PER_SEC);
++	cnt = mul_u64_u64_div_u64(fin_freq, period, NSEC_PER_SEC);
+ 	if (cnt > 0xffff) {
+ 		dev_err(pwmchip_parent(chip), "unable to get period cnt\n");
+ 		return -EINVAL;
+@@ -189,7 +190,7 @@ static int meson_pwm_calc(struct pwm_chip *chip, struct pwm_device *pwm,
+ 		channel->hi = 0;
+ 		channel->lo = cnt;
+ 	} else {
+-		duty_cnt = div_u64(fin_freq * duty, NSEC_PER_SEC);
++		duty_cnt = mul_u64_u64_div_u64(fin_freq, duty, NSEC_PER_SEC);
+ 
+ 		dev_dbg(pwmchip_parent(chip), "duty=%llu duty_cnt=%u\n", duty, duty_cnt);
+ 
+diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c
+index 39d80da0e14af..f07b1126e7a81 100644
+--- a/drivers/pwm/pwm-sti.c
++++ b/drivers/pwm/pwm-sti.c
+@@ -624,32 +624,20 @@ static int sti_pwm_probe(struct platform_device *pdev)
+ 		return ret;
+ 
+ 	if (cdata->pwm_num_devs) {
+-		pc->pwm_clk = of_clk_get_by_name(dev->of_node, "pwm");
++		pc->pwm_clk = devm_clk_get_prepared(dev, "pwm");
+ 		if (IS_ERR(pc->pwm_clk)) {
+ 			dev_err(dev, "failed to get PWM clock\n");
+ 			return PTR_ERR(pc->pwm_clk);
+ 		}
+-
+-		ret = clk_prepare(pc->pwm_clk);
+-		if (ret) {
+-			dev_err(dev, "failed to prepare clock\n");
+-			return ret;
+-		}
+ 	}
+ 
+ 	if (cdata->cpt_num_devs) {
+-		pc->cpt_clk = of_clk_get_by_name(dev->of_node, "capture");
++		pc->cpt_clk = devm_clk_get_prepared(dev, "capture");
+ 		if (IS_ERR(pc->cpt_clk)) {
+ 			dev_err(dev, "failed to get PWM capture clock\n");
+ 			return PTR_ERR(pc->cpt_clk);
+ 		}
+ 
+-		ret = clk_prepare(pc->cpt_clk);
+-		if (ret) {
+-			dev_err(dev, "failed to prepare clock\n");
+-			return ret;
+-		}
+-
+ 		cdata->ddata = devm_kzalloc(dev, cdata->cpt_num_devs * sizeof(*cdata->ddata), GFP_KERNEL);
+ 		if (!cdata->ddata)
+ 			return -ENOMEM;
+@@ -664,27 +652,7 @@ static int sti_pwm_probe(struct platform_device *pdev)
+ 		mutex_init(&ddata->lock);
+ 	}
+ 
+-	ret = pwmchip_add(chip);
+-	if (ret < 0) {
+-		clk_unprepare(pc->pwm_clk);
+-		clk_unprepare(pc->cpt_clk);
+-		return ret;
+-	}
+-
+-	platform_set_drvdata(pdev, chip);
+-
+-	return 0;
+-}
+-
+-static void sti_pwm_remove(struct platform_device *pdev)
+-{
+-	struct pwm_chip *chip = platform_get_drvdata(pdev);
+-	struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
+-
+-	pwmchip_remove(chip);
+-
+-	clk_unprepare(pc->pwm_clk);
+-	clk_unprepare(pc->cpt_clk);
++	return devm_pwmchip_add(dev, chip);
+ }
+ 
+ static const struct of_device_id sti_pwm_of_match[] = {
+@@ -699,7 +667,6 @@ static struct platform_driver sti_pwm_driver = {
+ 		.of_match_table = sti_pwm_of_match,
+ 	},
+ 	.probe = sti_pwm_probe,
+-	.remove_new = sti_pwm_remove,
+ };
+ module_platform_driver(sti_pwm_driver);
+ 
+diff --git a/drivers/s390/cio/trace.h b/drivers/s390/cio/trace.h
+index 86993de253451..a4c5c6736b310 100644
+--- a/drivers/s390/cio/trace.h
++++ b/drivers/s390/cio/trace.h
+@@ -50,7 +50,7 @@ DECLARE_EVENT_CLASS(s390_class_schib,
+ 		__entry->devno = schib->pmcw.dev;
+ 		__entry->schib = *schib;
+ 		__entry->pmcw_ena = schib->pmcw.ena;
+-		__entry->pmcw_st = schib->pmcw.ena;
++		__entry->pmcw_st = schib->pmcw.st;
+ 		__entry->pmcw_dnv = schib->pmcw.dnv;
+ 		__entry->pmcw_dev = schib->pmcw.dev;
+ 		__entry->pmcw_lpm = schib->pmcw.lpm;
+diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
+index 52db147d9979d..f6dd077d47c9a 100644
+--- a/drivers/scsi/bfa/bfad_debugfs.c
++++ b/drivers/scsi/bfa/bfad_debugfs.c
+@@ -250,7 +250,7 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
+ 	unsigned long flags;
+ 	void *kern_buf;
+ 
+-	kern_buf = memdup_user(buf, nbytes);
++	kern_buf = memdup_user_nul(buf, nbytes);
+ 	if (IS_ERR(kern_buf))
+ 		return PTR_ERR(kern_buf);
+ 
+@@ -317,7 +317,7 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
+ 	unsigned long flags;
+ 	void *kern_buf;
+ 
+-	kern_buf = memdup_user(buf, nbytes);
++	kern_buf = memdup_user_nul(buf, nbytes);
+ 	if (IS_ERR(kern_buf))
+ 		return PTR_ERR(kern_buf);
+ 
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index af18d20f30794..49c57a9c110b5 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -5850,7 +5850,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h)
+ {
+ 	struct Scsi_Host *sh;
+ 
+-	sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info));
++	sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info *));
+ 	if (sh == NULL) {
+ 		dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
+ 		return -ENOMEM;
+diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
+index f6e6db8b8aba9..e97f4e01a865a 100644
+--- a/drivers/scsi/libsas/sas_expander.c
++++ b/drivers/scsi/libsas/sas_expander.c
+@@ -239,8 +239,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
+ 	/* help some expanders that fail to zero sas_address in the 'no
+ 	 * device' case
+ 	 */
+-	if (phy->attached_dev_type == SAS_PHY_UNUSED ||
+-	    phy->linkrate < SAS_LINK_RATE_1_5_GBPS)
++	if (phy->attached_dev_type == SAS_PHY_UNUSED)
+ 		memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
+ 	else
+ 		memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE);
+diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c
+index 451fd236bfd05..96174353e3898 100644
+--- a/drivers/scsi/qedf/qedf_debugfs.c
++++ b/drivers/scsi/qedf/qedf_debugfs.c
+@@ -170,7 +170,7 @@ qedf_dbg_debug_cmd_write(struct file *filp, const char __user *buffer,
+ 	if (!count || *ppos)
+ 		return 0;
+ 
+-	kern_buf = memdup_user(buffer, count);
++	kern_buf = memdup_user_nul(buffer, count);
+ 	if (IS_ERR(kern_buf))
+ 		return PTR_ERR(kern_buf);
+ 
+diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
+index 55ff3d7482b3e..a1545dad0c0ce 100644
+--- a/drivers/scsi/qla2xxx/qla_dfs.c
++++ b/drivers/scsi/qla2xxx/qla_dfs.c
+@@ -274,7 +274,7 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
+ 		seq_printf(s, "Driver: estimate iocb used [%d] high water limit [%d]\n",
+ 			   iocbs_used, ha->base_qpair->fwres.iocbs_limit);
+ 
+-		seq_printf(s, "estimate exchange used[%d] high water limit [%d] n",
++		seq_printf(s, "estimate exchange used[%d] high water limit [%d]\n",
+ 			   exch_used, ha->base_qpair->fwres.exch_limit);
+ 
+ 		if (ql2xenforce_iocb_limit == 2) {
+diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
+index b0cd071c4719b..0b2e5690dacfa 100644
+--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
++++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
+@@ -14,7 +14,8 @@
+ #define CMDQ_POLL_ENABLE_MASK	BIT(0)
+ #define CMDQ_EOC_IRQ_EN		BIT(0)
+ #define CMDQ_REG_TYPE		1
+-#define CMDQ_JUMP_RELATIVE	1
++#define CMDQ_JUMP_RELATIVE	0
++#define CMDQ_JUMP_ABSOLUTE	1
+ 
+ struct cmdq_instruction {
+ 	union {
+@@ -397,7 +398,7 @@ int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr)
+ 	struct cmdq_instruction inst = {};
+ 
+ 	inst.op = CMDQ_CODE_JUMP;
+-	inst.offset = CMDQ_JUMP_RELATIVE;
++	inst.offset = CMDQ_JUMP_ABSOLUTE;
+ 	inst.value = addr >>
+ 		cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
+ 	return cmdq_pkt_append_command(pkt, inst);
+diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c
+index f913e9bd57ed4..823fd108fa039 100644
+--- a/drivers/soc/qcom/pmic_glink.c
++++ b/drivers/soc/qcom/pmic_glink.c
+@@ -11,6 +11,7 @@
+ #include <linux/slab.h>
+ #include <linux/soc/qcom/pdr.h>
+ #include <linux/soc/qcom/pmic_glink.h>
++#include <linux/spinlock.h>
+ 
+ enum {
+ 	PMIC_GLINK_CLIENT_BATT = 0,
+@@ -36,7 +37,7 @@ struct pmic_glink {
+ 	unsigned int pdr_state;
+ 
+ 	/* serializing clients list updates */
+-	struct mutex client_lock;
++	spinlock_t client_lock;
+ 	struct list_head clients;
+ };
+ 
+@@ -58,10 +59,11 @@ static void _devm_pmic_glink_release_client(struct device *dev, void *res)
+ {
+ 	struct pmic_glink_client *client = (struct pmic_glink_client *)res;
+ 	struct pmic_glink *pg = client->pg;
++	unsigned long flags;
+ 
+-	mutex_lock(&pg->client_lock);
++	spin_lock_irqsave(&pg->client_lock, flags);
+ 	list_del(&client->node);
+-	mutex_unlock(&pg->client_lock);
++	spin_unlock_irqrestore(&pg->client_lock, flags);
+ }
+ 
+ struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev,
+@@ -72,6 +74,7 @@ struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev,
+ {
+ 	struct pmic_glink_client *client;
+ 	struct pmic_glink *pg = dev_get_drvdata(dev->parent);
++	unsigned long flags;
+ 
+ 	client = devres_alloc(_devm_pmic_glink_release_client, sizeof(*client), GFP_KERNEL);
+ 	if (!client)
+@@ -83,9 +86,14 @@ struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev,
+ 	client->pdr_notify = pdr;
+ 	client->priv = priv;
+ 
+-	mutex_lock(&pg->client_lock);
++	mutex_lock(&pg->state_lock);
++	spin_lock_irqsave(&pg->client_lock, flags);
++
+ 	list_add(&client->node, &pg->clients);
+-	mutex_unlock(&pg->client_lock);
++	client->pdr_notify(client->priv, pg->client_state);
++
++	spin_unlock_irqrestore(&pg->client_lock, flags);
++	mutex_unlock(&pg->state_lock);
+ 
+ 	devres_add(dev, client);
+ 
+@@ -107,6 +115,7 @@ static int pmic_glink_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
+ 	struct pmic_glink_client *client;
+ 	struct pmic_glink_hdr *hdr;
+ 	struct pmic_glink *pg = dev_get_drvdata(&rpdev->dev);
++	unsigned long flags;
+ 
+ 	if (len < sizeof(*hdr)) {
+ 		dev_warn(pg->dev, "ignoring truncated message\n");
+@@ -115,10 +124,12 @@ static int pmic_glink_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
+ 
+ 	hdr = data;
+ 
++	spin_lock_irqsave(&pg->client_lock, flags);
+ 	list_for_each_entry(client, &pg->clients, node) {
+ 		if (client->id == le32_to_cpu(hdr->owner))
+ 			client->cb(data, len, client->priv);
+ 	}
++	spin_unlock_irqrestore(&pg->client_lock, flags);
+ 
+ 	return 0;
+ }
+@@ -158,6 +169,7 @@ static void pmic_glink_state_notify_clients(struct pmic_glink *pg)
+ {
+ 	struct pmic_glink_client *client;
+ 	unsigned int new_state = pg->client_state;
++	unsigned long flags;
+ 
+ 	if (pg->client_state != SERVREG_SERVICE_STATE_UP) {
+ 		if (pg->pdr_state == SERVREG_SERVICE_STATE_UP && pg->ept)
+@@ -168,8 +180,10 @@ static void pmic_glink_state_notify_clients(struct pmic_glink *pg)
+ 	}
+ 
+ 	if (new_state != pg->client_state) {
++		spin_lock_irqsave(&pg->client_lock, flags);
+ 		list_for_each_entry(client, &pg->clients, node)
+ 			client->pdr_notify(client->priv, new_state);
++		spin_unlock_irqrestore(&pg->client_lock, flags);
+ 		pg->client_state = new_state;
+ 	}
+ }
+@@ -256,7 +270,7 @@ static int pmic_glink_probe(struct platform_device *pdev)
+ 	pg->dev = &pdev->dev;
+ 
+ 	INIT_LIST_HEAD(&pg->clients);
+-	mutex_init(&pg->client_lock);
++	spin_lock_init(&pg->client_lock);
+ 	mutex_init(&pg->state_lock);
+ 
+ 	match_data = (unsigned long *)of_device_get_match_data(&pdev->dev);
+diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c
+index 938a4ea89c590..8c30191b21a77 100644
+--- a/drivers/staging/media/atomisp/pci/sh_css.c
++++ b/drivers/staging/media/atomisp/pci/sh_css.c
+@@ -4690,6 +4690,7 @@ static int load_video_binaries(struct ia_css_pipe *pipe)
+ 						  sizeof(struct ia_css_binary),
+ 						  GFP_KERNEL);
+ 		if (!mycs->yuv_scaler_binary) {
++			mycs->num_yuv_scaler = 0;
+ 			err = -ENOMEM;
+ 			return err;
+ 		}
+diff --git a/drivers/staging/media/starfive/camss/stf-camss.c b/drivers/staging/media/starfive/camss/stf-camss.c
+index a587f860101ae..323aa70fdeaf1 100644
+--- a/drivers/staging/media/starfive/camss/stf-camss.c
++++ b/drivers/staging/media/starfive/camss/stf-camss.c
+@@ -162,6 +162,12 @@ static int stfcamss_register_devs(struct stfcamss *stfcamss)
+ 
+ static void stfcamss_unregister_devs(struct stfcamss *stfcamss)
+ {
++	struct stf_capture *cap_yuv = &stfcamss->captures[STF_CAPTURE_YUV];
++	struct stf_isp_dev *isp_dev = &stfcamss->isp_dev;
++
++	media_entity_remove_links(&isp_dev->subdev.entity);
++	media_entity_remove_links(&cap_yuv->video.vdev.entity);
++
+ 	stf_isp_unregister(&stfcamss->isp_dev);
+ 	stf_capture_unregister(stfcamss);
+ }
+diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
+index fd4bd650c77a6..4e5c213a89225 100644
+--- a/drivers/thermal/mediatek/lvts_thermal.c
++++ b/drivers/thermal/mediatek/lvts_thermal.c
+@@ -1530,11 +1530,15 @@ static const struct lvts_data mt7988_lvts_ap_data = {
+ static const struct lvts_data mt8192_lvts_mcu_data = {
+ 	.lvts_ctrl	= mt8192_lvts_mcu_data_ctrl,
+ 	.num_lvts_ctrl	= ARRAY_SIZE(mt8192_lvts_mcu_data_ctrl),
++	.temp_factor	= LVTS_COEFF_A_MT8195,
++	.temp_offset	= LVTS_COEFF_B_MT8195,
+ };
+ 
+ static const struct lvts_data mt8192_lvts_ap_data = {
+ 	.lvts_ctrl	= mt8192_lvts_ap_data_ctrl,
+ 	.num_lvts_ctrl	= ARRAY_SIZE(mt8192_lvts_ap_data_ctrl),
++	.temp_factor	= LVTS_COEFF_A_MT8195,
++	.temp_offset	= LVTS_COEFF_B_MT8195,
+ };
+ 
+ static const struct lvts_data mt8195_lvts_mcu_data = {
+diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
+index 6d7c16ccb44dc..4edee8d929a75 100644
+--- a/drivers/thermal/qcom/tsens.c
++++ b/drivers/thermal/qcom/tsens.c
+@@ -264,7 +264,7 @@ void compute_intercept_slope(struct tsens_priv *priv, u32 *p1,
+ 	for (i = 0; i < priv->num_sensors; i++) {
+ 		dev_dbg(priv->dev,
+ 			"%s: sensor%d - data_point1:%#x data_point2:%#x\n",
+-			__func__, i, p1[i], p2[i]);
++			__func__, i, p1[i], p2 ? p2[i] : 0);
+ 
+ 		if (!priv->sensor[i].slope)
+ 			priv->sensor[i].slope = SLOPE_DEFAULT;
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index 34a31bc720230..38b7d02384d7c 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -431,7 +431,6 @@ static void update_temperature(struct thermal_zone_device *tz)
+ 	trace_thermal_temperature(tz);
+ 
+ 	thermal_genl_sampling_temp(tz->id, temp);
+-	thermal_debug_update_temp(tz);
+ }
+ 
+ static void thermal_zone_device_check(struct work_struct *work)
+@@ -475,6 +474,8 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz,
+ 	for_each_trip(tz, trip)
+ 		handle_thermal_trip(tz, trip);
+ 
++	thermal_debug_update_temp(tz);
++
+ 	monitor_thermal_zone(tz);
+ }
+ 
+@@ -897,6 +898,7 @@ __thermal_cooling_device_register(struct device_node *np,
+ {
+ 	struct thermal_cooling_device *cdev;
+ 	struct thermal_zone_device *pos = NULL;
++	unsigned long current_state;
+ 	int id, ret;
+ 
+ 	if (!ops || !ops->get_max_state || !ops->get_cur_state ||
+@@ -934,6 +936,10 @@ __thermal_cooling_device_register(struct device_node *np,
+ 	if (ret)
+ 		goto out_cdev_type;
+ 
++	ret = cdev->ops->get_cur_state(cdev, &current_state);
++	if (ret)
++		goto out_cdev_type;
++
+ 	thermal_cooling_device_setup_sysfs(cdev);
+ 
+ 	ret = dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
+@@ -947,6 +953,8 @@ __thermal_cooling_device_register(struct device_node *np,
+ 		return ERR_PTR(ret);
+ 	}
+ 
++	thermal_debug_cdev_add(cdev, current_state);
++
+ 	/* Add 'this' new cdev to the global cdev list */
+ 	mutex_lock(&thermal_list_lock);
+ 
+@@ -962,8 +970,6 @@ __thermal_cooling_device_register(struct device_node *np,
+ 
+ 	mutex_unlock(&thermal_list_lock);
+ 
+-	thermal_debug_cdev_add(cdev);
+-
+ 	return cdev;
+ 
+ out_cooling_dev:
+diff --git a/drivers/thermal/thermal_debugfs.c b/drivers/thermal/thermal_debugfs.c
+index 5693cc8b231aa..403f74d663dce 100644
+--- a/drivers/thermal/thermal_debugfs.c
++++ b/drivers/thermal/thermal_debugfs.c
+@@ -435,6 +435,14 @@ void thermal_debug_cdev_state_update(const struct thermal_cooling_device *cdev,
+ 	}
+ 
+ 	cdev_dbg->current_state = new_state;
++
++	/*
++	 * Create a record for the new state if it is not there, so its
++	 * duration will be printed by cdev_dt_seq_show() as expected if it
++	 * runs before the next state transition.
++	 */
++	thermal_debugfs_cdev_record_get(thermal_dbg, cdev_dbg->durations, new_state);
++
+ 	transition = (old_state << 16) | new_state;
+ 
+ 	/*
+@@ -460,8 +468,9 @@ void thermal_debug_cdev_state_update(const struct thermal_cooling_device *cdev,
+  * Allocates a cooling device object for debug, initializes the
+  * statistics and create the entries in sysfs.
+  * @cdev: a pointer to a cooling device
++ * @state: current state of the cooling device
+  */
+-void thermal_debug_cdev_add(struct thermal_cooling_device *cdev)
++void thermal_debug_cdev_add(struct thermal_cooling_device *cdev, int state)
+ {
+ 	struct thermal_debugfs *thermal_dbg;
+ 	struct cdev_debugfs *cdev_dbg;
+@@ -478,9 +487,16 @@ void thermal_debug_cdev_add(struct thermal_cooling_device *cdev)
+ 		INIT_LIST_HEAD(&cdev_dbg->durations[i]);
+ 	}
+ 
+-	cdev_dbg->current_state = 0;
++	cdev_dbg->current_state = state;
+ 	cdev_dbg->timestamp = ktime_get();
+ 
++	/*
++	 * Create a record for the initial cooling device state, so its
++	 * duration will be printed by cdev_dt_seq_show() as expected if it
++	 * runs before the first state transition.
++	 */
++	thermal_debugfs_cdev_record_get(thermal_dbg, cdev_dbg->durations, state);
++
+ 	debugfs_create_file("trans_table", 0400, thermal_dbg->d_top,
+ 			    thermal_dbg, &tt_fops);
+ 
+@@ -555,7 +571,6 @@ void thermal_debug_tz_trip_up(struct thermal_zone_device *tz,
+ 	struct tz_episode *tze;
+ 	struct tz_debugfs *tz_dbg;
+ 	struct thermal_debugfs *thermal_dbg = tz->debugfs;
+-	int temperature = tz->temperature;
+ 	int trip_id = thermal_zone_trip_id(tz, trip);
+ 	ktime_t now = ktime_get();
+ 
+@@ -624,12 +639,6 @@ void thermal_debug_tz_trip_up(struct thermal_zone_device *tz,
+ 
+ 	tze = list_first_entry(&tz_dbg->tz_episodes, struct tz_episode, node);
+ 	tze->trip_stats[trip_id].timestamp = now;
+-	tze->trip_stats[trip_id].max = max(tze->trip_stats[trip_id].max, temperature);
+-	tze->trip_stats[trip_id].min = min(tze->trip_stats[trip_id].min, temperature);
+-	tze->trip_stats[trip_id].count++;
+-	tze->trip_stats[trip_id].avg = tze->trip_stats[trip_id].avg +
+-		(temperature - tze->trip_stats[trip_id].avg) /
+-		tze->trip_stats[trip_id].count;
+ 
+ unlock:
+ 	mutex_unlock(&thermal_dbg->lock);
+diff --git a/drivers/thermal/thermal_debugfs.h b/drivers/thermal/thermal_debugfs.h
+index 155b9af5fe870..c28bd4c114124 100644
+--- a/drivers/thermal/thermal_debugfs.h
++++ b/drivers/thermal/thermal_debugfs.h
+@@ -2,7 +2,7 @@
+ 
+ #ifdef CONFIG_THERMAL_DEBUGFS
+ void thermal_debug_init(void);
+-void thermal_debug_cdev_add(struct thermal_cooling_device *cdev);
++void thermal_debug_cdev_add(struct thermal_cooling_device *cdev, int state);
+ void thermal_debug_cdev_remove(struct thermal_cooling_device *cdev);
+ void thermal_debug_cdev_state_update(const struct thermal_cooling_device *cdev, int state);
+ void thermal_debug_tz_add(struct thermal_zone_device *tz);
+@@ -14,7 +14,7 @@ void thermal_debug_tz_trip_down(struct thermal_zone_device *tz,
+ void thermal_debug_update_temp(struct thermal_zone_device *tz);
+ #else
+ static inline void thermal_debug_init(void) {}
+-static inline void thermal_debug_cdev_add(struct thermal_cooling_device *cdev) {}
++static inline void thermal_debug_cdev_add(struct thermal_cooling_device *cdev, int state) {}
+ static inline void thermal_debug_cdev_remove(struct thermal_cooling_device *cdev) {}
+ static inline void thermal_debug_cdev_state_update(const struct thermal_cooling_device *cdev,
+ 						   int state) {}
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index 4036566febcba..afbf7837b53ec 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -245,16 +245,18 @@ enum gsm_encoding {
+ 
+ enum gsm_mux_state {
+ 	GSM_SEARCH,
+-	GSM_START,
+-	GSM_ADDRESS,
+-	GSM_CONTROL,
+-	GSM_LEN,
+-	GSM_DATA,
+-	GSM_FCS,
+-	GSM_OVERRUN,
+-	GSM_LEN0,
+-	GSM_LEN1,
+-	GSM_SSOF,
++	GSM0_ADDRESS,
++	GSM0_CONTROL,
++	GSM0_LEN0,
++	GSM0_LEN1,
++	GSM0_DATA,
++	GSM0_FCS,
++	GSM0_SSOF,
++	GSM1_START,
++	GSM1_ADDRESS,
++	GSM1_CONTROL,
++	GSM1_DATA,
++	GSM1_OVERRUN,
+ };
+ 
+ /*
+@@ -2847,6 +2849,30 @@ static void gsm_queue(struct gsm_mux *gsm)
+ 	return;
+ }
+ 
++/**
++ * gsm0_receive_state_check_and_fix	-	check and correct receive state
++ * @gsm: gsm data for this ldisc instance
++ *
++ * Ensures that the current receive state is valid for basic option mode.
++ */
++
++static void gsm0_receive_state_check_and_fix(struct gsm_mux *gsm)
++{
++	switch (gsm->state) {
++	case GSM_SEARCH:
++	case GSM0_ADDRESS:
++	case GSM0_CONTROL:
++	case GSM0_LEN0:
++	case GSM0_LEN1:
++	case GSM0_DATA:
++	case GSM0_FCS:
++	case GSM0_SSOF:
++		break;
++	default:
++		gsm->state = GSM_SEARCH;
++		break;
++	}
++}
+ 
+ /**
+  *	gsm0_receive	-	perform processing for non-transparency
+@@ -2860,26 +2886,27 @@ static void gsm0_receive(struct gsm_mux *gsm, u8 c)
+ {
+ 	unsigned int len;
+ 
++	gsm0_receive_state_check_and_fix(gsm);
+ 	switch (gsm->state) {
+ 	case GSM_SEARCH:	/* SOF marker */
+ 		if (c == GSM0_SOF) {
+-			gsm->state = GSM_ADDRESS;
++			gsm->state = GSM0_ADDRESS;
+ 			gsm->address = 0;
+ 			gsm->len = 0;
+ 			gsm->fcs = INIT_FCS;
+ 		}
+ 		break;
+-	case GSM_ADDRESS:	/* Address EA */
++	case GSM0_ADDRESS:	/* Address EA */
+ 		gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ 		if (gsm_read_ea(&gsm->address, c))
+-			gsm->state = GSM_CONTROL;
++			gsm->state = GSM0_CONTROL;
+ 		break;
+-	case GSM_CONTROL:	/* Control Byte */
++	case GSM0_CONTROL:	/* Control Byte */
+ 		gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ 		gsm->control = c;
+-		gsm->state = GSM_LEN0;
++		gsm->state = GSM0_LEN0;
+ 		break;
+-	case GSM_LEN0:		/* Length EA */
++	case GSM0_LEN0:		/* Length EA */
+ 		gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ 		if (gsm_read_ea(&gsm->len, c)) {
+ 			if (gsm->len > gsm->mru) {
+@@ -2889,14 +2916,14 @@ static void gsm0_receive(struct gsm_mux *gsm, u8 c)
+ 			}
+ 			gsm->count = 0;
+ 			if (!gsm->len)
+-				gsm->state = GSM_FCS;
++				gsm->state = GSM0_FCS;
+ 			else
+-				gsm->state = GSM_DATA;
++				gsm->state = GSM0_DATA;
+ 			break;
+ 		}
+-		gsm->state = GSM_LEN1;
++		gsm->state = GSM0_LEN1;
+ 		break;
+-	case GSM_LEN1:
++	case GSM0_LEN1:
+ 		gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ 		len = c;
+ 		gsm->len |= len << 7;
+@@ -2907,26 +2934,29 @@ static void gsm0_receive(struct gsm_mux *gsm, u8 c)
+ 		}
+ 		gsm->count = 0;
+ 		if (!gsm->len)
+-			gsm->state = GSM_FCS;
++			gsm->state = GSM0_FCS;
+ 		else
+-			gsm->state = GSM_DATA;
++			gsm->state = GSM0_DATA;
+ 		break;
+-	case GSM_DATA:		/* Data */
++	case GSM0_DATA:		/* Data */
+ 		gsm->buf[gsm->count++] = c;
+-		if (gsm->count == gsm->len) {
++		if (gsm->count >= MAX_MRU) {
++			gsm->bad_size++;
++			gsm->state = GSM_SEARCH;
++		} else if (gsm->count >= gsm->len) {
+ 			/* Calculate final FCS for UI frames over all data */
+ 			if ((gsm->control & ~PF) != UIH) {
+ 				gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf,
+ 							     gsm->count);
+ 			}
+-			gsm->state = GSM_FCS;
++			gsm->state = GSM0_FCS;
+ 		}
+ 		break;
+-	case GSM_FCS:		/* FCS follows the packet */
++	case GSM0_FCS:		/* FCS follows the packet */
+ 		gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+-		gsm->state = GSM_SSOF;
++		gsm->state = GSM0_SSOF;
+ 		break;
+-	case GSM_SSOF:
++	case GSM0_SSOF:
+ 		gsm->state = GSM_SEARCH;
+ 		if (c == GSM0_SOF)
+ 			gsm_queue(gsm);
+@@ -2939,6 +2969,29 @@ static void gsm0_receive(struct gsm_mux *gsm, u8 c)
+ 	}
+ }
+ 
++/**
++ * gsm1_receive_state_check_and_fix	-	check and correct receive state
++ * @gsm: gsm data for this ldisc instance
++ *
++ * Ensures that the current receive state is valid for advanced option mode.
++ */
++
++static void gsm1_receive_state_check_and_fix(struct gsm_mux *gsm)
++{
++	switch (gsm->state) {
++	case GSM_SEARCH:
++	case GSM1_START:
++	case GSM1_ADDRESS:
++	case GSM1_CONTROL:
++	case GSM1_DATA:
++	case GSM1_OVERRUN:
++		break;
++	default:
++		gsm->state = GSM_SEARCH;
++		break;
++	}
++}
++
+ /**
+  *	gsm1_receive	-	perform processing for non-transparency
+  *	@gsm: gsm data for this ldisc instance
+@@ -2949,6 +3002,7 @@ static void gsm0_receive(struct gsm_mux *gsm, u8 c)
+ 
+ static void gsm1_receive(struct gsm_mux *gsm, u8 c)
+ {
++	gsm1_receive_state_check_and_fix(gsm);
+ 	/* handle XON/XOFF */
+ 	if ((c & ISO_IEC_646_MASK) == XON) {
+ 		gsm->constipated = true;
+@@ -2961,11 +3015,11 @@ static void gsm1_receive(struct gsm_mux *gsm, u8 c)
+ 	}
+ 	if (c == GSM1_SOF) {
+ 		/* EOF is only valid in frame if we have got to the data state */
+-		if (gsm->state == GSM_DATA) {
++		if (gsm->state == GSM1_DATA) {
+ 			if (gsm->count < 1) {
+ 				/* Missing FSC */
+ 				gsm->malformed++;
+-				gsm->state = GSM_START;
++				gsm->state = GSM1_START;
+ 				return;
+ 			}
+ 			/* Remove the FCS from data */
+@@ -2981,14 +3035,14 @@ static void gsm1_receive(struct gsm_mux *gsm, u8 c)
+ 			gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->buf[gsm->count]);
+ 			gsm->len = gsm->count;
+ 			gsm_queue(gsm);
+-			gsm->state  = GSM_START;
++			gsm->state  = GSM1_START;
+ 			return;
+ 		}
+ 		/* Any partial frame was a runt so go back to start */
+-		if (gsm->state != GSM_START) {
++		if (gsm->state != GSM1_START) {
+ 			if (gsm->state != GSM_SEARCH)
+ 				gsm->malformed++;
+-			gsm->state = GSM_START;
++			gsm->state = GSM1_START;
+ 		}
+ 		/* A SOF in GSM_START means we are still reading idling or
+ 		   framing bytes */
+@@ -3009,30 +3063,30 @@ static void gsm1_receive(struct gsm_mux *gsm, u8 c)
+ 		gsm->escape = false;
+ 	}
+ 	switch (gsm->state) {
+-	case GSM_START:		/* First byte after SOF */
++	case GSM1_START:		/* First byte after SOF */
+ 		gsm->address = 0;
+-		gsm->state = GSM_ADDRESS;
++		gsm->state = GSM1_ADDRESS;
+ 		gsm->fcs = INIT_FCS;
+ 		fallthrough;
+-	case GSM_ADDRESS:	/* Address continuation */
++	case GSM1_ADDRESS:	/* Address continuation */
+ 		gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ 		if (gsm_read_ea(&gsm->address, c))
+-			gsm->state = GSM_CONTROL;
++			gsm->state = GSM1_CONTROL;
+ 		break;
+-	case GSM_CONTROL:	/* Control Byte */
++	case GSM1_CONTROL:	/* Control Byte */
+ 		gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ 		gsm->control = c;
+ 		gsm->count = 0;
+-		gsm->state = GSM_DATA;
++		gsm->state = GSM1_DATA;
+ 		break;
+-	case GSM_DATA:		/* Data */
+-		if (gsm->count > gsm->mru) {	/* Allow one for the FCS */
+-			gsm->state = GSM_OVERRUN;
++	case GSM1_DATA:		/* Data */
++		if (gsm->count > gsm->mru || gsm->count > MAX_MRU) {	/* Allow one for the FCS */
++			gsm->state = GSM1_OVERRUN;
+ 			gsm->bad_size++;
+ 		} else
+ 			gsm->buf[gsm->count++] = c;
+ 		break;
+-	case GSM_OVERRUN:	/* Over-long - eg a dropped SOF */
++	case GSM1_OVERRUN:	/* Over-long - eg a dropped SOF */
+ 		break;
+ 	default:
+ 		pr_debug("%s: unhandled state: %d\n", __func__, gsm->state);
+diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
+index 5daa38d9c64e0..61d81b11f6a3d 100644
+--- a/drivers/tty/serial/8250/8250_bcm7271.c
++++ b/drivers/tty/serial/8250/8250_bcm7271.c
+@@ -675,18 +675,46 @@ static void init_real_clk_rates(struct device *dev, struct brcmuart_priv *priv)
+ 	clk_set_rate(priv->baud_mux_clk, priv->default_mux_rate);
+ }
+ 
++static u32 find_quot(struct device *dev, u32 freq, u32 baud, u32 *percent)
++{
++	u32 quot;
++	u32 rate;
++	u64 hires_rate;
++	u64 hires_baud;
++	u64 hires_err;
++
++	rate = freq / 16;
++	quot = DIV_ROUND_CLOSEST(rate, baud);
++	if (!quot)
++		return 0;
++
++	/* increase resolution to get xx.xx percent */
++	hires_rate = div_u64((u64)rate * 10000, (u64)quot);
++	hires_baud = (u64)baud * 10000;
++
++	/* get the delta */
++	if (hires_rate > hires_baud)
++		hires_err = (hires_rate - hires_baud);
++	else
++		hires_err = (hires_baud - hires_rate);
++
++	*percent = (unsigned long)DIV_ROUND_CLOSEST_ULL(hires_err, baud);
++
++	dev_dbg(dev, "Baud rate: %u, MUX Clk: %u, Error: %u.%u%%\n",
++		baud, freq, *percent / 100, *percent % 100);
++
++	return quot;
++}
++
+ static void set_clock_mux(struct uart_port *up, struct brcmuart_priv *priv,
+ 			u32 baud)
+ {
+ 	u32 percent;
+ 	u32 best_percent = UINT_MAX;
+ 	u32 quot;
++	u32 freq;
+ 	u32 best_quot = 1;
+-	u32 rate;
+-	int best_index = -1;
+-	u64 hires_rate;
+-	u64 hires_baud;
+-	u64 hires_err;
++	u32 best_freq = 0;
+ 	int rc;
+ 	int i;
+ 	int real_baud;
+@@ -695,44 +723,35 @@ static void set_clock_mux(struct uart_port *up, struct brcmuart_priv *priv,
+ 	if (priv->baud_mux_clk == NULL)
+ 		return;
+ 
+-	/* Find the closest match for specified baud */
+-	for (i = 0; i < ARRAY_SIZE(priv->real_rates); i++) {
+-		if (priv->real_rates[i] == 0)
+-			continue;
+-		rate = priv->real_rates[i] / 16;
+-		quot = DIV_ROUND_CLOSEST(rate, baud);
+-		if (!quot)
+-			continue;
+-
+-		/* increase resolution to get xx.xx percent */
+-		hires_rate = (u64)rate * 10000;
+-		hires_baud = (u64)baud * 10000;
+-
+-		hires_err = div_u64(hires_rate, (u64)quot);
+-
+-		/* get the delta */
+-		if (hires_err > hires_baud)
+-			hires_err = (hires_err - hires_baud);
+-		else
+-			hires_err = (hires_baud - hires_err);
+-
+-		percent = (unsigned long)DIV_ROUND_CLOSEST_ULL(hires_err, baud);
+-		dev_dbg(up->dev,
+-			"Baud rate: %u, MUX Clk: %u, Error: %u.%u%%\n",
+-			baud, priv->real_rates[i], percent / 100,
+-			percent % 100);
+-		if (percent < best_percent) {
+-			best_percent = percent;
+-			best_index = i;
+-			best_quot = quot;
++	/* Try default_mux_rate first */
++	quot = find_quot(up->dev, priv->default_mux_rate, baud, &percent);
++	if (quot) {
++		best_percent = percent;
++		best_freq = priv->default_mux_rate;
++		best_quot = quot;
++	}
++	/* If more than 1% error, find the closest match for specified baud */
++	if (best_percent > 100) {
++		for (i = 0; i < ARRAY_SIZE(priv->real_rates); i++) {
++			freq = priv->real_rates[i];
++			if (freq == 0 || freq == priv->default_mux_rate)
++				continue;
++			quot = find_quot(up->dev, freq, baud, &percent);
++			if (!quot)
++				continue;
++
++			if (percent < best_percent) {
++				best_percent = percent;
++				best_freq = freq;
++				best_quot = quot;
++			}
+ 		}
+ 	}
+-	if (best_index == -1) {
++	if (!best_freq) {
+ 		dev_err(up->dev, "Error, %d BAUD rate is too fast.\n", baud);
+ 		return;
+ 	}
+-	rate = priv->real_rates[best_index];
+-	rc = clk_set_rate(priv->baud_mux_clk, rate);
++	rc = clk_set_rate(priv->baud_mux_clk, best_freq);
+ 	if (rc)
+ 		dev_err(up->dev, "Error selecting BAUD MUX clock\n");
+ 
+@@ -741,8 +760,8 @@ static void set_clock_mux(struct uart_port *up, struct brcmuart_priv *priv,
+ 		dev_err(up->dev, "Error, baud: %d has %u.%u%% error\n",
+ 			baud, percent / 100, percent % 100);
+ 
+-	real_baud = rate / 16 / best_quot;
+-	dev_dbg(up->dev, "Selecting BAUD MUX rate: %u\n", rate);
++	real_baud = best_freq / 16 / best_quot;
++	dev_dbg(up->dev, "Selecting BAUD MUX rate: %u\n", best_freq);
+ 	dev_dbg(up->dev, "Requested baud: %u, Actual baud: %u\n",
+ 		baud, real_baud);
+ 
+@@ -751,7 +770,7 @@ static void set_clock_mux(struct uart_port *up, struct brcmuart_priv *priv,
+ 	i += (i / 2);
+ 	priv->char_wait = ns_to_ktime(i);
+ 
+-	up->uartclk = rate;
++	up->uartclk = best_freq;
+ }
+ 
+ static void brcmstb_set_termios(struct uart_port *up,
+diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
+index 9ff6bbe9c0863..d14988d1492fe 100644
+--- a/drivers/tty/serial/8250/8250_mtk.c
++++ b/drivers/tty/serial/8250/8250_mtk.c
+@@ -209,15 +209,19 @@ static int mtk8250_startup(struct uart_port *port)
+ 
+ static void mtk8250_shutdown(struct uart_port *port)
+ {
+-#ifdef CONFIG_SERIAL_8250_DMA
+ 	struct uart_8250_port *up = up_to_u8250p(port);
+ 	struct mtk8250_data *data = port->private_data;
++	int irq = data->rx_wakeup_irq;
+ 
++#ifdef CONFIG_SERIAL_8250_DMA
+ 	if (up->dma)
+ 		data->rx_status = DMA_RX_SHUTDOWN;
+ #endif
+ 
+-	return serial8250_do_shutdown(port);
++	serial8250_do_shutdown(port);
++
++	if (irq >= 0)
++		serial8250_do_set_mctrl(&up->port, TIOCM_RTS);
+ }
+ 
+ static void mtk8250_disable_intrs(struct uart_8250_port *up, int mask)
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index 929206a9a6e11..12915fffac279 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -554,16 +554,28 @@ static bool sc16is7xx_regmap_noinc(struct device *dev, unsigned int reg)
+ 	return reg == SC16IS7XX_RHR_REG;
+ }
+ 
++/*
++ * Configure programmable baud rate generator (divisor) according to the
++ * desired baud rate.
++ *
++ * From the datasheet, the divisor is computed according to:
++ *
++ *              XTAL1 input frequency
++ *             -----------------------
++ *                    prescaler
++ * divisor = ---------------------------
++ *            baud-rate x sampling-rate
++ */
+ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
+ {
+ 	struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+ 	u8 lcr;
+-	u8 prescaler = 0;
++	unsigned int prescaler = 1;
+ 	unsigned long clk = port->uartclk, div = clk / 16 / baud;
+ 
+ 	if (div >= BIT(16)) {
+-		prescaler = SC16IS7XX_MCR_CLKSEL_BIT;
+-		div /= 4;
++		prescaler = 4;
++		div /= prescaler;
+ 	}
+ 
+ 	/* Enable enhanced features */
+@@ -573,9 +585,10 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
+ 			      SC16IS7XX_EFR_ENABLE_BIT);
+ 	sc16is7xx_efr_unlock(port);
+ 
++	/* If bit MCR_CLKSEL is set, the divide by 4 prescaler is activated. */
+ 	sc16is7xx_port_update(port, SC16IS7XX_MCR_REG,
+ 			      SC16IS7XX_MCR_CLKSEL_BIT,
+-			      prescaler);
++			      prescaler == 1 ? 0 : SC16IS7XX_MCR_CLKSEL_BIT);
+ 
+ 	/* Backup LCR and access special register set (DLL/DLH) */
+ 	lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG);
+@@ -591,7 +604,7 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
+ 	/* Restore LCR and access to general register set */
+ 	sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
+ 
+-	return DIV_ROUND_CLOSEST(clk / 16, div);
++	return DIV_ROUND_CLOSEST((clk / prescaler) / 16, div);
+ }
+ 
+ static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen,
+diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
+index 768bf87cd80d3..005d63ab1f441 100644
+--- a/drivers/ufs/core/ufs-mcq.c
++++ b/drivers/ufs/core/ufs-mcq.c
+@@ -601,8 +601,7 @@ static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba,
+ 	addr = le64_to_cpu(cmd_desc_base_addr) & CQE_UCD_BA;
+ 
+ 	while (sq_head_slot != hwq->sq_tail_slot) {
+-		utrd = hwq->sqe_base_addr +
+-				sq_head_slot * sizeof(struct utp_transfer_req_desc);
++		utrd = hwq->sqe_base_addr + sq_head_slot;
+ 		match = le64_to_cpu(utrd->command_desc_base_addr) & CQE_UCD_BA;
+ 		if (addr == match) {
+ 			ufshcd_mcq_nullify_sqe(utrd);
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index a0f8e930167d7..1322a9c318cff 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -4289,7 +4289,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
+ 		 * Make sure UIC command completion interrupt is disabled before
+ 		 * issuing UIC command.
+ 		 */
+-		wmb();
++		ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+ 		reenable_intr = true;
+ 	}
+ 	spin_unlock_irqrestore(hba->host->host_lock, flags);
+@@ -10400,7 +10400,7 @@ int ufshcd_system_restore(struct device *dev)
+ 	 * are updated with the latest queue addresses. Only after
+ 	 * updating these addresses, we can queue the new commands.
+ 	 */
+-	mb();
++	ufshcd_readl(hba, REG_UTP_TASK_REQ_LIST_BASE_H);
+ 
+ 	/* Resuming from hibernate, assume that link was OFF */
+ 	ufshcd_set_link_off(hba);
+@@ -10621,7 +10621,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
+ 	 * Make sure that UFS interrupts are disabled and any pending interrupt
+ 	 * status is cleared before registering UFS interrupt handler.
+ 	 */
+-	mb();
++	ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+ 
+ 	/* IRQ registration */
+ 	err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
+diff --git a/drivers/ufs/host/cdns-pltfrm.c b/drivers/ufs/host/cdns-pltfrm.c
+index bb30267da4711..66811d8d1929c 100644
+--- a/drivers/ufs/host/cdns-pltfrm.c
++++ b/drivers/ufs/host/cdns-pltfrm.c
+@@ -136,7 +136,7 @@ static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba)
+ 	 * Make sure the register was updated,
+ 	 * UniPro layer will not work with an incorrect value.
+ 	 */
+-	mb();
++	ufshcd_readl(hba, CDNS_UFS_REG_HCLKDIV);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
+index 7a00004bfd036..62c343444d973 100644
+--- a/drivers/ufs/host/ufs-qcom.c
++++ b/drivers/ufs/host/ufs-qcom.c
+@@ -284,9 +284,6 @@ static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
+ 
+ 	if (host->hw_ver.major >= 0x05)
+ 		ufshcd_rmwl(host->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0);
+-
+-	/* make sure above configuration is applied before we return */
+-	mb();
+ }
+ 
+ /*
+@@ -415,7 +412,7 @@ static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
+ 		    REG_UFS_CFG2);
+ 
+ 	/* Ensure that HW clock gating is enabled before next operations */
+-	mb();
++	ufshcd_readl(hba, REG_UFS_CFG2);
+ }
+ 
+ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
+@@ -507,7 +504,7 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
+ 		 * make sure above write gets applied before we return from
+ 		 * this function.
+ 		 */
+-		mb();
++		ufshcd_readl(hba, REG_UFS_SYS1CLK_1US);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
+index 9dd9a391ebb76..b9de170983c9e 100644
+--- a/drivers/ufs/host/ufs-qcom.h
++++ b/drivers/ufs/host/ufs-qcom.h
+@@ -151,10 +151,10 @@ static inline void ufs_qcom_assert_reset(struct ufs_hba *hba)
+ 	ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, UFS_PHY_SOFT_RESET, REG_UFS_CFG1);
+ 
+ 	/*
+-	 * Make sure assertion of ufs phy reset is written to
+-	 * register before returning
++	 * Dummy read to ensure the write takes effect before doing any sort
++	 * of delay
+ 	 */
+-	mb();
++	ufshcd_readl(hba, REG_UFS_CFG1);
+ }
+ 
+ static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba)
+@@ -162,10 +162,10 @@ static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba)
+ 	ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, 0, REG_UFS_CFG1);
+ 
+ 	/*
+-	 * Make sure de-assertion of ufs phy reset is written to
+-	 * register before returning
++	 * Dummy read to ensure the write takes effect before doing any sort
++	 * of delay
+ 	 */
+-	mb();
++	ufshcd_readl(hba, REG_UFS_CFG1);
+ }
+ 
+ /* Host controller hardware version: major.minor.step */
+diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
+index 197b6d5268e94..3f46663aa563d 100644
+--- a/drivers/video/fbdev/Kconfig
++++ b/drivers/video/fbdev/Kconfig
+@@ -1648,8 +1648,8 @@ config FB_COBALT
+ 	select FB_IOMEM_HELPERS
+ 
+ config FB_SH7760
+-	bool "SH7760/SH7763/SH7720/SH7721 LCDC support"
+-	depends on FB=y && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \
++	tristate "SH7760/SH7763/SH7720/SH7721 LCDC support"
++	depends on FB && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \
+ 		|| CPU_SUBTYPE_SH7720 || CPU_SUBTYPE_SH7721)
+ 	select FB_IOMEM_HELPERS
+ 	help
+diff --git a/drivers/video/fbdev/core/Kconfig b/drivers/video/fbdev/core/Kconfig
+index db09fe87fcd4f..0ab8848ba2f10 100644
+--- a/drivers/video/fbdev/core/Kconfig
++++ b/drivers/video/fbdev/core/Kconfig
+@@ -144,6 +144,12 @@ config FB_DMAMEM_HELPERS
+ 	select FB_SYS_IMAGEBLIT
+ 	select FB_SYSMEM_FOPS
+ 
++config FB_DMAMEM_HELPERS_DEFERRED
++	bool
++	depends on FB_CORE
++	select FB_DEFERRED_IO
++	select FB_DMAMEM_HELPERS
++
+ config FB_IOMEM_FOPS
+ 	tristate
+ 	depends on FB_CORE
+diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
+index eb2297b37504c..d35d2cf999988 100644
+--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
++++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
+@@ -1575,7 +1575,7 @@ sh_mobile_lcdc_overlay_fb_init(struct sh_mobile_lcdc_overlay *ovl)
+ 	 */
+ 	info->fix = sh_mobile_lcdc_overlay_fix;
+ 	snprintf(info->fix.id, sizeof(info->fix.id),
+-		 "SH Mobile LCDC Overlay %u", ovl->index);
++		 "SHMobile ovl %u", ovl->index);
+ 	info->fix.smem_start = ovl->dma_handle;
+ 	info->fix.smem_len = ovl->fb_size;
+ 	info->fix.line_length = ovl->pitch;
+diff --git a/drivers/video/fbdev/sis/init301.c b/drivers/video/fbdev/sis/init301.c
+index a8fb41f1a2580..09329072004f4 100644
+--- a/drivers/video/fbdev/sis/init301.c
++++ b/drivers/video/fbdev/sis/init301.c
+@@ -172,7 +172,7 @@ static const unsigned char SiS_HiTVGroup3_2[] = {
+ };
+ 
+ /* 301C / 302ELV extended Part2 TV registers (4 tap scaler) */
+-
++#ifdef CONFIG_FB_SIS_315
+ static const unsigned char SiS_Part2CLVX_1[] = {
+     0x00,0x00,
+     0x00,0x20,0x00,0x00,0x7F,0x20,0x02,0x7F,0x7D,0x20,0x04,0x7F,0x7D,0x1F,0x06,0x7E,
+@@ -245,7 +245,6 @@ static const unsigned char SiS_Part2CLVX_6[] = {   /* 1080i */
+     0xFF,0xFF,
+ };
+ 
+-#ifdef CONFIG_FB_SIS_315
+ /* 661 et al LCD data structure (2.03.00) */
+ static const unsigned char SiS_LCDStruct661[] = {
+     /* 1024x768 */
+diff --git a/drivers/virt/acrn/mm.c b/drivers/virt/acrn/mm.c
+index fa5d9ca6be570..9c75de0656d8d 100644
+--- a/drivers/virt/acrn/mm.c
++++ b/drivers/virt/acrn/mm.c
+@@ -155,43 +155,83 @@ int acrn_vm_memseg_unmap(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
+ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
+ {
+ 	struct vm_memory_region_batch *regions_info;
+-	int nr_pages, i = 0, order, nr_regions = 0;
++	int nr_pages, i, order, nr_regions = 0;
+ 	struct vm_memory_mapping *region_mapping;
+ 	struct vm_memory_region_op *vm_region;
+ 	struct page **pages = NULL, *page;
+ 	void *remap_vaddr;
+ 	int ret, pinned;
+ 	u64 user_vm_pa;
+-	unsigned long pfn;
+ 	struct vm_area_struct *vma;
+ 
+ 	if (!vm || !memmap)
+ 		return -EINVAL;
+ 
++	/* Get the page number of the map region */
++	nr_pages = memmap->len >> PAGE_SHIFT;
++	if (!nr_pages)
++		return -EINVAL;
++
+ 	mmap_read_lock(current->mm);
+ 	vma = vma_lookup(current->mm, memmap->vma_base);
+ 	if (vma && ((vma->vm_flags & VM_PFNMAP) != 0)) {
++		unsigned long start_pfn, cur_pfn;
++		spinlock_t *ptl;
++		bool writable;
++		pte_t *ptep;
++
+ 		if ((memmap->vma_base + memmap->len) > vma->vm_end) {
+ 			mmap_read_unlock(current->mm);
+ 			return -EINVAL;
+ 		}
+ 
+-		ret = follow_pfn(vma, memmap->vma_base, &pfn);
++		for (i = 0; i < nr_pages; i++) {
++			ret = follow_pte(vma->vm_mm,
++					 memmap->vma_base + i * PAGE_SIZE,
++					 &ptep, &ptl);
++			if (ret)
++				break;
++
++			cur_pfn = pte_pfn(ptep_get(ptep));
++			if (i == 0)
++				start_pfn = cur_pfn;
++			writable = !!pte_write(ptep_get(ptep));
++			pte_unmap_unlock(ptep, ptl);
++
++			/* Disallow write access if the PTE is not writable. */
++			if (!writable &&
++			    (memmap->attr & ACRN_MEM_ACCESS_WRITE)) {
++				ret = -EFAULT;
++				break;
++			}
++
++			/* Disallow refcounted pages. */
++			if (pfn_valid(cur_pfn) &&
++			    !PageReserved(pfn_to_page(cur_pfn))) {
++				ret = -EFAULT;
++				break;
++			}
++
++			/* Disallow non-contiguous ranges. */
++			if (cur_pfn != start_pfn + i) {
++				ret = -EINVAL;
++				break;
++			}
++		}
+ 		mmap_read_unlock(current->mm);
+-		if (ret < 0) {
++
++		if (ret) {
+ 			dev_dbg(acrn_dev.this_device,
+ 				"Failed to lookup PFN at VMA:%pK.\n", (void *)memmap->vma_base);
+ 			return ret;
+ 		}
+ 
+ 		return acrn_mm_region_add(vm, memmap->user_vm_pa,
+-			 PFN_PHYS(pfn), memmap->len,
++			 PFN_PHYS(start_pfn), memmap->len,
+ 			 ACRN_MEM_TYPE_WB, memmap->attr);
+ 	}
+ 	mmap_read_unlock(current->mm);
+ 
+-	/* Get the page number of the map region */
+-	nr_pages = memmap->len >> PAGE_SHIFT;
+ 	pages = vzalloc(array_size(nr_pages, sizeof(*pages)));
+ 	if (!pages)
+ 		return -ENOMEM;
+@@ -235,12 +275,11 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
+ 	mutex_unlock(&vm->regions_mapping_lock);
+ 
+ 	/* Calculate count of vm_memory_region_op */
+-	while (i < nr_pages) {
++	for (i = 0; i < nr_pages; i += 1 << order) {
+ 		page = pages[i];
+ 		VM_BUG_ON_PAGE(PageTail(page), page);
+ 		order = compound_order(page);
+ 		nr_regions++;
+-		i += 1 << order;
+ 	}
+ 
+ 	/* Prepare the vm_memory_region_batch */
+@@ -257,8 +296,7 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
+ 	regions_info->vmid = vm->vmid;
+ 	regions_info->regions_gpa = virt_to_phys(vm_region);
+ 	user_vm_pa = memmap->user_vm_pa;
+-	i = 0;
+-	while (i < nr_pages) {
++	for (i = 0; i < nr_pages; i += 1 << order) {
+ 		u32 region_size;
+ 
+ 		page = pages[i];
+@@ -274,7 +312,6 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
+ 
+ 		vm_region++;
+ 		user_vm_pa += region_size;
+-		i += 1 << order;
+ 	}
+ 
+ 	/* Inform the ACRN Hypervisor to set up EPT mappings */
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 2776112dbdf8d..87f487b116577 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -2773,13 +2773,19 @@ static int fiemap_next_leaf_item(struct btrfs_inode *inode, struct btrfs_path *p
+ 		goto out;
+ 	}
+ 
+-	/* See the comment at fiemap_search_slot() about why we clone. */
+-	copy_extent_buffer_full(clone, path->nodes[0]);
+ 	/*
+ 	 * Important to preserve the start field, for the optimizations when
+ 	 * checking if extents are shared (see extent_fiemap()).
++	 *
++	 * We must set ->start before calling copy_extent_buffer_full().  If we
++	 * are on sub-pagesize blocksize, we use ->start to determine the offset
++	 * into the folio where our eb exists, and if we update ->start after
++	 * the fact then any subsequent reads of the eb may read from a
++	 * different offset in the folio than where we originally copied into.
+ 	 */
+ 	clone->start = path->nodes[0]->start;
++	/* See the comment at fiemap_search_slot() about why we clone. */
++	copy_extent_buffer_full(clone, path->nodes[0]);
+ 
+ 	slot = path->slots[0];
+ 	btrfs_release_path(path);
+diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
+index 1f2f70a1b824e..decedc4ee15f6 100644
+--- a/fs/dlm/ast.c
++++ b/fs/dlm/ast.c
+@@ -12,6 +12,7 @@
+ #include <trace/events/dlm.h>
+ 
+ #include "dlm_internal.h"
++#include "lvb_table.h"
+ #include "memory.h"
+ #include "lock.h"
+ #include "user.h"
+@@ -42,6 +43,7 @@ int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
+ 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
+ 	int rv = DLM_ENQUEUE_CALLBACK_SUCCESS;
+ 	struct dlm_callback *cb;
++	int copy_lvb = 0;
+ 	int prev_mode;
+ 
+ 	if (flags & DLM_CB_BAST) {
+@@ -73,6 +75,17 @@ int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
+ 				goto out;
+ 			}
+ 		}
++	} else if (flags & DLM_CB_CAST) {
++		if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) {
++			if (lkb->lkb_last_cast)
++				prev_mode = lkb->lkb_last_cb->mode;
++			else
++				prev_mode = -1;
++
++			if (!status && lkb->lkb_lksb->sb_lvbptr &&
++			    dlm_lvb_operations[prev_mode + 1][mode + 1])
++				copy_lvb = 1;
++		}
+ 	}
+ 
+ 	cb = dlm_allocate_cb();
+@@ -85,6 +98,7 @@ int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
+ 	cb->mode = mode;
+ 	cb->sb_status = status;
+ 	cb->sb_flags = (sbflags & 0x000000FF);
++	cb->copy_lvb = copy_lvb;
+ 	kref_init(&cb->ref);
+ 	if (!test_and_set_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags))
+ 		rv = DLM_ENQUEUE_CALLBACK_NEED_SCHED;
+diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
+index 3b4dbce849f0f..a9137c90f3483 100644
+--- a/fs/dlm/dlm_internal.h
++++ b/fs/dlm/dlm_internal.h
+@@ -222,6 +222,7 @@ struct dlm_callback {
+ 	int			sb_status;	/* copy to lksb status */
+ 	uint8_t			sb_flags;	/* copy to lksb flags */
+ 	int8_t			mode; /* rq mode of bast, gr mode of cast */
++	int			copy_lvb;
+ 
+ 	struct list_head	list;
+ 	struct kref		ref;
+diff --git a/fs/dlm/user.c b/fs/dlm/user.c
+index 9f9b68448830e..12a483deeef5e 100644
+--- a/fs/dlm/user.c
++++ b/fs/dlm/user.c
+@@ -21,7 +21,6 @@
+ #include "dlm_internal.h"
+ #include "lockspace.h"
+ #include "lock.h"
+-#include "lvb_table.h"
+ #include "user.h"
+ #include "ast.h"
+ #include "config.h"
+@@ -806,8 +805,7 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
+ 	struct dlm_lkb *lkb;
+ 	DECLARE_WAITQUEUE(wait, current);
+ 	struct dlm_callback *cb;
+-	int rv, ret, copy_lvb = 0;
+-	int old_mode, new_mode;
++	int rv, ret;
+ 
+ 	if (count == sizeof(struct dlm_device_version)) {
+ 		rv = copy_version_to_user(buf, count);
+@@ -864,9 +862,6 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
+ 
+ 	lkb = list_first_entry(&proc->asts, struct dlm_lkb, lkb_cb_list);
+ 
+-	/* rem_lkb_callback sets a new lkb_last_cast */
+-	old_mode = lkb->lkb_last_cast->mode;
+-
+ 	rv = dlm_dequeue_lkb_callback(lkb, &cb);
+ 	switch (rv) {
+ 	case DLM_DEQUEUE_CALLBACK_EMPTY:
+@@ -895,12 +890,6 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
+ 	if (cb->flags & DLM_CB_BAST) {
+ 		trace_dlm_bast(lkb->lkb_resource->res_ls, lkb, cb->mode);
+ 	} else if (cb->flags & DLM_CB_CAST) {
+-		new_mode = cb->mode;
+-
+-		if (!cb->sb_status && lkb->lkb_lksb->sb_lvbptr &&
+-		    dlm_lvb_operations[old_mode + 1][new_mode + 1])
+-			copy_lvb = 1;
+-
+ 		lkb->lkb_lksb->sb_status = cb->sb_status;
+ 		lkb->lkb_lksb->sb_flags = cb->sb_flags;
+ 		trace_dlm_ast(lkb->lkb_resource->res_ls, lkb);
+@@ -908,7 +897,7 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
+ 
+ 	ret = copy_result_to_user(lkb->lkb_ua,
+ 				  test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
+-				  cb->flags, cb->mode, copy_lvb, buf, count);
++				  cb->flags, cb->mode, cb->copy_lvb, buf, count);
+ 
+ 	kref_put(&cb->ref, dlm_release_callback);
+ 
+diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
+index 3fe41964c0d8d..7f9f68c00ef63 100644
+--- a/fs/ecryptfs/keystore.c
++++ b/fs/ecryptfs/keystore.c
+@@ -300,9 +300,11 @@ write_tag_66_packet(char *signature, u8 cipher_code,
+ 	 *         | Key Identifier Size      | 1 or 2 bytes |
+ 	 *         | Key Identifier           | arbitrary    |
+ 	 *         | File Encryption Key Size | 1 or 2 bytes |
++	 *         | Cipher Code              | 1 byte       |
+ 	 *         | File Encryption Key      | arbitrary    |
++	 *         | Checksum                 | 2 bytes      |
+ 	 */
+-	data_len = (5 + ECRYPTFS_SIG_SIZE_HEX + crypt_stat->key_size);
++	data_len = (8 + ECRYPTFS_SIG_SIZE_HEX + crypt_stat->key_size);
+ 	*packet = kmalloc(data_len, GFP_KERNEL);
+ 	message = *packet;
+ 	if (!message) {
+diff --git a/fs/exec.c b/fs/exec.c
+index cf1df7f16e55c..0c5f06d08c355 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -67,6 +67,7 @@
+ #include <linux/time_namespace.h>
+ #include <linux/user_events.h>
+ #include <linux/rseq.h>
++#include <linux/ksm.h>
+ 
+ #include <linux/uaccess.h>
+ #include <asm/mmu_context.h>
+@@ -267,6 +268,14 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
+ 		goto err_free;
+ 	}
+ 
++	/*
++	 * Need to be called with mmap write lock
++	 * held, to avoid race with ksmd.
++	 */
++	err = ksm_execve(mm);
++	if (err)
++		goto err_ksm;
++
+ 	/*
+ 	 * Place the stack at the largest stack address the architecture
+ 	 * supports. Later, we'll move this to an appropriate place. We don't
+@@ -288,6 +297,8 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
+ 	bprm->p = vma->vm_end - sizeof(void *);
+ 	return 0;
+ err:
++	ksm_exit(mm);
++err_ksm:
+ 	mmap_write_unlock(mm);
+ err_free:
+ 	bprm->vma = NULL;
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 537803250ca9a..6de6bf57699be 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -2887,9 +2887,6 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
+ 	if (IS_ERR(folio))
+ 		return PTR_ERR(folio);
+ 
+-	/* In case writeback began while the folio was unlocked */
+-	folio_wait_stable(folio);
+-
+ #ifdef CONFIG_FS_ENCRYPTION
+ 	ret = ext4_block_write_begin(folio, pos, len, ext4_da_get_block_prep);
+ #else
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 12b3f196010b8..714f83632e3f9 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -6113,6 +6113,7 @@ ext4_mb_new_blocks_simple(struct ext4_allocation_request *ar, int *errp)
+ 	ext4_mb_mark_bb(sb, block, 1, true);
+ 	ar->len = 1;
+ 
++	*errp = 0;
+ 	return block;
+ }
+ 
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 5e4f65c14dfb9..a630b27a4cc6e 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -2897,7 +2897,7 @@ static int ext4_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
+ 	inode = ext4_new_inode_start_handle(idmap, dir, mode,
+ 					    NULL, 0, NULL,
+ 					    EXT4_HT_DIR,
+-			EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) +
++			EXT4_MAXQUOTAS_TRANS_BLOCKS(dir->i_sb) +
+ 			  4 + EXT4_XATTR_TRANS_BLOCKS);
+ 	handle = ext4_journal_current_handle();
+ 	err = PTR_ERR(inode);
+diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
+index eac698b8dd387..b01320502624a 100644
+--- a/fs/f2fs/checkpoint.c
++++ b/fs/f2fs/checkpoint.c
+@@ -179,22 +179,22 @@ static bool __f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
+ 		break;
+ 	case META_SIT:
+ 		if (unlikely(blkaddr >= SIT_BLK_CNT(sbi)))
+-			goto err;
++			goto check_only;
+ 		break;
+ 	case META_SSA:
+ 		if (unlikely(blkaddr >= MAIN_BLKADDR(sbi) ||
+ 			blkaddr < SM_I(sbi)->ssa_blkaddr))
+-			goto err;
++			goto check_only;
+ 		break;
+ 	case META_CP:
+ 		if (unlikely(blkaddr >= SIT_I(sbi)->sit_base_addr ||
+ 			blkaddr < __start_cp_addr(sbi)))
+-			goto err;
++			goto check_only;
+ 		break;
+ 	case META_POR:
+ 		if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
+ 			blkaddr < MAIN_BLKADDR(sbi)))
+-			goto err;
++			goto check_only;
+ 		break;
+ 	case DATA_GENERIC:
+ 	case DATA_GENERIC_ENHANCE:
+@@ -228,6 +228,7 @@ static bool __f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
+ 	return true;
+ err:
+ 	f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
++check_only:
+ 	return false;
+ }
+ 
+diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
+index 34540f9d011ca..2507fe34cbdf0 100644
+--- a/fs/gfs2/glock.c
++++ b/fs/gfs2/glock.c
+@@ -166,19 +166,45 @@ static bool glock_blocked_by_withdraw(struct gfs2_glock *gl)
+ 	return true;
+ }
+ 
+-void gfs2_glock_free(struct gfs2_glock *gl)
++static void __gfs2_glock_free(struct gfs2_glock *gl)
+ {
+-	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+-
+-	gfs2_glock_assert_withdraw(gl, atomic_read(&gl->gl_revokes) == 0);
+ 	rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
+ 	smp_mb();
+ 	wake_up_glock(gl);
+ 	call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
++}
++
++void gfs2_glock_free(struct gfs2_glock *gl) {
++	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
++
++	__gfs2_glock_free(gl);
++	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
++		wake_up(&sdp->sd_kill_wait);
++}
++
++void gfs2_glock_free_later(struct gfs2_glock *gl) {
++	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
++
++	spin_lock(&lru_lock);
++	list_add(&gl->gl_lru, &sdp->sd_dead_glocks);
++	spin_unlock(&lru_lock);
+ 	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
+ 		wake_up(&sdp->sd_kill_wait);
+ }
+ 
++static void gfs2_free_dead_glocks(struct gfs2_sbd *sdp)
++{
++	struct list_head *list = &sdp->sd_dead_glocks;
++
++	while(!list_empty(list)) {
++		struct gfs2_glock *gl;
++
++		gl = list_first_entry(list, struct gfs2_glock, gl_lru);
++		list_del_init(&gl->gl_lru);
++		__gfs2_glock_free(gl);
++	}
++}
++
+ /**
+  * gfs2_glock_hold() - increment reference count on glock
+  * @gl: The glock to hold
+@@ -591,7 +617,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
+ 	struct gfs2_holder *gh;
+ 	unsigned state = ret & LM_OUT_ST_MASK;
+ 
+-	spin_lock(&gl->gl_lockref.lock);
+ 	trace_gfs2_glock_state_change(gl, state);
+ 	state_change(gl, state);
+ 	gh = find_first_waiter(gl);
+@@ -639,7 +664,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
+ 			       gl->gl_target, state);
+ 			GLOCK_BUG_ON(gl, 1);
+ 		}
+-		spin_unlock(&gl->gl_lockref.lock);
+ 		return;
+ 	}
+ 
+@@ -662,7 +686,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
+ 	}
+ out:
+ 	clear_bit(GLF_LOCK, &gl->gl_flags);
+-	spin_unlock(&gl->gl_lockref.lock);
+ }
+ 
+ static bool is_system_glock(struct gfs2_glock *gl)
+@@ -690,6 +713,7 @@ __acquires(&gl->gl_lockref.lock)
+ {
+ 	const struct gfs2_glock_operations *glops = gl->gl_ops;
+ 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
++	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+ 	unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
+ 	int ret;
+ 
+@@ -718,6 +742,9 @@ __acquires(&gl->gl_lockref.lock)
+ 	    (gl->gl_state == LM_ST_EXCLUSIVE) ||
+ 	    (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
+ 		clear_bit(GLF_BLOCKING, &gl->gl_flags);
++	if (!glops->go_inval && !glops->go_sync)
++		goto skip_inval;
++
+ 	spin_unlock(&gl->gl_lockref.lock);
+ 	if (glops->go_sync) {
+ 		ret = glops->go_sync(gl);
+@@ -730,6 +757,7 @@ __acquires(&gl->gl_lockref.lock)
+ 				fs_err(sdp, "Error %d syncing glock \n", ret);
+ 				gfs2_dump_glock(NULL, gl, true);
+ 			}
++			spin_lock(&gl->gl_lockref.lock);
+ 			goto skip_inval;
+ 		}
+ 	}
+@@ -750,9 +778,10 @@ __acquires(&gl->gl_lockref.lock)
+ 		glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
+ 		clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
+ 	}
++	spin_lock(&gl->gl_lockref.lock);
+ 
+ skip_inval:
+-	gfs2_glock_hold(gl);
++	gl->gl_lockref.count++;
+ 	/*
+ 	 * Check for an error encountered since we called go_sync and go_inval.
+ 	 * If so, we can't withdraw from the glock code because the withdraw
+@@ -794,31 +823,37 @@ __acquires(&gl->gl_lockref.lock)
+ 			 */
+ 			clear_bit(GLF_LOCK, &gl->gl_flags);
+ 			clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
+-			gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
+-			goto out;
++			__gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
++			return;
+ 		} else {
+ 			clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
+ 		}
+ 	}
+ 
+-	if (sdp->sd_lockstruct.ls_ops->lm_lock)	{
+-		/* lock_dlm */
+-		ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
++	if (ls->ls_ops->lm_lock) {
++		spin_unlock(&gl->gl_lockref.lock);
++		ret = ls->ls_ops->lm_lock(gl, target, lck_flags);
++		spin_lock(&gl->gl_lockref.lock);
++
+ 		if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
+ 		    target == LM_ST_UNLOCKED &&
+-		    test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
+-			finish_xmote(gl, target);
+-			gfs2_glock_queue_work(gl, 0);
++		    test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
++			/*
++			 * The lockspace has been released and the lock has
++			 * been unlocked implicitly.
++			 */
+ 		} else if (ret) {
+ 			fs_err(sdp, "lm_lock ret %d\n", ret);
+-			GLOCK_BUG_ON(gl, !gfs2_withdrawing_or_withdrawn(sdp));
++			target = gl->gl_state | LM_OUT_ERROR;
++		} else {
++			/* The operation will be completed asynchronously. */
++			return;
+ 		}
+-	} else { /* lock_nolock */
+-		finish_xmote(gl, target);
+-		gfs2_glock_queue_work(gl, 0);
+ 	}
+-out:
+-	spin_lock(&gl->gl_lockref.lock);
++
++	/* Complete the operation now. */
++	finish_xmote(gl, target);
++	__gfs2_glock_queue_work(gl, 0);
+ }
+ 
+ /**
+@@ -1071,11 +1106,12 @@ static void glock_work_func(struct work_struct *work)
+ 	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
+ 	unsigned int drop_refs = 1;
+ 
+-	if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
++	spin_lock(&gl->gl_lockref.lock);
++	if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
++		clear_bit(GLF_REPLY_PENDING, &gl->gl_flags);
+ 		finish_xmote(gl, gl->gl_reply);
+ 		drop_refs++;
+ 	}
+-	spin_lock(&gl->gl_lockref.lock);
+ 	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
+ 	    gl->gl_state != LM_ST_UNLOCKED &&
+ 	    gl->gl_demote_state != LM_ST_EXCLUSIVE) {
+@@ -2148,8 +2184,11 @@ static void thaw_glock(struct gfs2_glock *gl)
+ 		return;
+ 	if (!lockref_get_not_dead(&gl->gl_lockref))
+ 		return;
++
++	spin_lock(&gl->gl_lockref.lock);
+ 	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
+-	gfs2_glock_queue_work(gl, 0);
++	__gfs2_glock_queue_work(gl, 0);
++	spin_unlock(&gl->gl_lockref.lock);
+ }
+ 
+ /**
+@@ -2225,6 +2264,8 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
+ 	wait_event_timeout(sdp->sd_kill_wait,
+ 			   atomic_read(&sdp->sd_glock_disposal) == 0,
+ 			   HZ * 600);
++	gfs2_lm_unmount(sdp);
++	gfs2_free_dead_glocks(sdp);
+ 	glock_hash_walk(dump_glock_func, sdp);
+ }
+ 
+diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
+index 0114f3e0ebe01..86987a59a0580 100644
+--- a/fs/gfs2/glock.h
++++ b/fs/gfs2/glock.h
+@@ -252,6 +252,7 @@ void gfs2_gl_dq_holders(struct gfs2_sbd *sdp);
+ void gfs2_glock_thaw(struct gfs2_sbd *sdp);
+ void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
+ void gfs2_glock_free(struct gfs2_glock *gl);
++void gfs2_glock_free_later(struct gfs2_glock *gl);
+ 
+ int __init gfs2_glock_init(void);
+ void gfs2_glock_exit(void);
+diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
+index 45653cbc8a87d..e0e8dfeee777d 100644
+--- a/fs/gfs2/glops.c
++++ b/fs/gfs2/glops.c
+@@ -82,6 +82,9 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
+ 	GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
+ 	spin_unlock(&sdp->sd_ail_lock);
+ 	gfs2_log_unlock(sdp);
++
++	if (gfs2_withdrawing(sdp))
++		gfs2_withdraw(sdp);
+ }
+ 
+ 
+diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
+index 95a334d64da2a..60abd7050c998 100644
+--- a/fs/gfs2/incore.h
++++ b/fs/gfs2/incore.h
+@@ -838,6 +838,7 @@ struct gfs2_sbd {
+ 	/* For quiescing the filesystem */
+ 	struct gfs2_holder sd_freeze_gh;
+ 	struct mutex sd_freeze_mutex;
++	struct list_head sd_dead_glocks;
+ 
+ 	char sd_fsname[GFS2_FSNAME_LEN + 3 * sizeof(int) + 2];
+ 	char sd_table_name[GFS2_FSNAME_LEN];
+diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
+index d1ac5d0679ea6..e028e55e67d95 100644
+--- a/fs/gfs2/lock_dlm.c
++++ b/fs/gfs2/lock_dlm.c
+@@ -121,6 +121,11 @@ static void gdlm_ast(void *arg)
+ 	struct gfs2_glock *gl = arg;
+ 	unsigned ret = gl->gl_state;
+ 
++	/* If the glock is dead, we only react to a dlm_unlock() reply. */
++	if (__lockref_is_dead(&gl->gl_lockref) &&
++	    gl->gl_lksb.sb_status != -DLM_EUNLOCK)
++		return;
++
+ 	gfs2_update_reply_times(gl);
+ 	BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
+ 
+@@ -171,6 +176,9 @@ static void gdlm_bast(void *arg, int mode)
+ {
+ 	struct gfs2_glock *gl = arg;
+ 
++	if (__lockref_is_dead(&gl->gl_lockref))
++		return;
++
+ 	switch (mode) {
+ 	case DLM_LOCK_EX:
+ 		gfs2_glock_cb(gl, LM_ST_UNLOCKED);
+@@ -291,8 +299,12 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
+ 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+ 	int error;
+ 
+-	if (gl->gl_lksb.sb_lkid == 0)
+-		goto out_free;
++	BUG_ON(!__lockref_is_dead(&gl->gl_lockref));
++
++	if (gl->gl_lksb.sb_lkid == 0) {
++		gfs2_glock_free(gl);
++		return;
++	}
+ 
+ 	clear_bit(GLF_BLOCKING, &gl->gl_flags);
+ 	gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
+@@ -300,13 +312,17 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
+ 	gfs2_update_request_times(gl);
+ 
+ 	/* don't want to call dlm if we've unmounted the lock protocol */
+-	if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags))
+-		goto out_free;
++	if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
++		gfs2_glock_free(gl);
++		return;
++	}
+ 	/* don't want to skip dlm_unlock writing the lvb when lock has one */
+ 
+ 	if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
+-	    !gl->gl_lksb.sb_lvbptr)
+-		goto out_free;
++	    !gl->gl_lksb.sb_lvbptr) {
++		gfs2_glock_free_later(gl);
++		return;
++	}
+ 
+ again:
+ 	error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK,
+@@ -321,10 +337,6 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
+ 		       gl->gl_name.ln_type,
+ 		       (unsigned long long)gl->gl_name.ln_number, error);
+ 	}
+-	return;
+-
+-out_free:
+-	gfs2_glock_free(gl);
+ }
+ 
+ static void gdlm_cancel(struct gfs2_glock *gl)
+diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
+index 572d58e86296f..cde7118599abb 100644
+--- a/fs/gfs2/ops_fstype.c
++++ b/fs/gfs2/ops_fstype.c
+@@ -136,6 +136,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
+ 	atomic_set(&sdp->sd_log_in_flight, 0);
+ 	init_waitqueue_head(&sdp->sd_log_flush_wait);
+ 	mutex_init(&sdp->sd_freeze_mutex);
++	INIT_LIST_HEAD(&sdp->sd_dead_glocks);
+ 
+ 	return sdp;
+ 
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index e5f79466340d2..2d780b4701a23 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -646,10 +646,7 @@ static void gfs2_put_super(struct super_block *sb)
+ 	gfs2_gl_hash_clear(sdp);
+ 	truncate_inode_pages_final(&sdp->sd_aspace);
+ 	gfs2_delete_debugfs_file(sdp);
+-	/*  Unmount the locking protocol  */
+-	gfs2_lm_unmount(sdp);
+ 
+-	/*  At this point, we're through participating in the lockspace  */
+ 	gfs2_sys_fs_del(sdp);
+ 	free_sbd(sdp);
+ }
+diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
+index f52141ce94853..fc3ecb180ac53 100644
+--- a/fs/gfs2/util.c
++++ b/fs/gfs2/util.c
+@@ -350,7 +350,6 @@ int gfs2_withdraw(struct gfs2_sbd *sdp)
+ 			fs_err(sdp, "telling LM to unmount\n");
+ 			lm->lm_unmount(sdp);
+ 		}
+-		set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
+ 		fs_err(sdp, "File system withdrawn\n");
+ 		dump_stack();
+ 		clear_bit(SDF_WITHDRAW_IN_PROG, &sdp->sd_flags);
+diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
+index 00224f3a8d6e7..defb4162c3d5b 100644
+--- a/fs/jffs2/xattr.c
++++ b/fs/jffs2/xattr.c
+@@ -1110,6 +1110,9 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
+ 		return rc;
+ 
+ 	request = PAD(sizeof(struct jffs2_raw_xattr) + strlen(xname) + 1 + size);
++	if (request > c->sector_size - c->cleanmarker_size)
++		return -ERANGE;
++
+ 	rc = jffs2_reserve_space(c, request, &length,
+ 				 ALLOC_NORMAL, JFFS2_SUMMARY_XATTR_SIZE);
+ 	if (rc) {
+diff --git a/fs/libfs.c b/fs/libfs.c
+index 3a6f2cb364f8c..b635ee5adbcce 100644
+--- a/fs/libfs.c
++++ b/fs/libfs.c
+@@ -295,6 +295,18 @@ int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry)
+ 	return 0;
+ }
+ 
++static int simple_offset_replace(struct offset_ctx *octx, struct dentry *dentry,
++				 long offset)
++{
++	int ret;
++
++	ret = mtree_store(&octx->mt, offset, dentry, GFP_KERNEL);
++	if (ret)
++		return ret;
++	offset_set(dentry, offset);
++	return 0;
++}
++
+ /**
+  * simple_offset_remove - Remove an entry to a directory's offset map
+  * @octx: directory offset ctx to be updated
+@@ -345,6 +357,36 @@ int simple_offset_empty(struct dentry *dentry)
+ 	return ret;
+ }
+ 
++/**
++ * simple_offset_rename - handle directory offsets for rename
++ * @old_dir: parent directory of source entry
++ * @old_dentry: dentry of source entry
++ * @new_dir: parent_directory of destination entry
++ * @new_dentry: dentry of destination
++ *
++ * Caller provides appropriate serialization.
++ *
++ * User space expects the directory offset value of the replaced
++ * (new) directory entry to be unchanged after a rename.
++ *
++ * Returns zero on success, a negative errno value on failure.
++ */
++int simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry,
++			 struct inode *new_dir, struct dentry *new_dentry)
++{
++	struct offset_ctx *old_ctx = old_dir->i_op->get_offset_ctx(old_dir);
++	struct offset_ctx *new_ctx = new_dir->i_op->get_offset_ctx(new_dir);
++	long new_offset = dentry2offset(new_dentry);
++
++	simple_offset_remove(old_ctx, old_dentry);
++
++	if (new_offset) {
++		offset_set(new_dentry, 0);
++		return simple_offset_replace(new_ctx, old_dentry, new_offset);
++	}
++	return simple_offset_add(new_ctx, old_dentry);
++}
++
+ /**
+  * simple_offset_rename_exchange - exchange rename with directory offsets
+  * @old_dir: parent of dentry being moved
+@@ -352,6 +394,9 @@ int simple_offset_empty(struct dentry *dentry)
+  * @new_dir: destination parent
+  * @new_dentry: destination dentry
+  *
++ * This API preserves the directory offset values. Caller provides
++ * appropriate serialization.
++ *
+  * Returns zero on success. Otherwise a negative errno is returned and the
+  * rename is rolled back.
+  */
+@@ -369,11 +414,11 @@ int simple_offset_rename_exchange(struct inode *old_dir,
+ 	simple_offset_remove(old_ctx, old_dentry);
+ 	simple_offset_remove(new_ctx, new_dentry);
+ 
+-	ret = simple_offset_add(new_ctx, old_dentry);
++	ret = simple_offset_replace(new_ctx, old_dentry, new_index);
+ 	if (ret)
+ 		goto out_restore;
+ 
+-	ret = simple_offset_add(old_ctx, new_dentry);
++	ret = simple_offset_replace(old_ctx, new_dentry, old_index);
+ 	if (ret) {
+ 		simple_offset_remove(new_ctx, old_dentry);
+ 		goto out_restore;
+@@ -388,10 +433,8 @@ int simple_offset_rename_exchange(struct inode *old_dir,
+ 	return 0;
+ 
+ out_restore:
+-	offset_set(old_dentry, old_index);
+-	mtree_store(&old_ctx->mt, old_index, old_dentry, GFP_KERNEL);
+-	offset_set(new_dentry, new_index);
+-	mtree_store(&new_ctx->mt, new_index, new_dentry, GFP_KERNEL);
++	(void)simple_offset_replace(old_ctx, old_dentry, old_index);
++	(void)simple_offset_replace(new_ctx, new_dentry, new_index);
+ 	return ret;
+ }
+ 
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index ecd18bffeebc7..4d23bb1d08c0a 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -48,12 +48,10 @@ enum {
+ 	NFSD_MaxBlkSize,
+ 	NFSD_MaxConnections,
+ 	NFSD_Filecache,
+-#ifdef CONFIG_NFSD_V4
+ 	NFSD_Leasetime,
+ 	NFSD_Gracetime,
+ 	NFSD_RecoveryDir,
+ 	NFSD_V4EndGrace,
+-#endif
+ 	NFSD_MaxReserved
+ };
+ 
+@@ -1360,7 +1358,9 @@ static int nfsd_fill_super(struct super_block *sb, struct fs_context *fc)
+ #ifdef CONFIG_NFSD_V4
+ 		[NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR},
+ 		[NFSD_Gracetime] = {"nfsv4gracetime", &transaction_ops, S_IWUSR|S_IRUSR},
++#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
+ 		[NFSD_RecoveryDir] = {"nfsv4recoverydir", &transaction_ops, S_IWUSR|S_IRUSR},
++#endif
+ 		[NFSD_V4EndGrace] = {"v4_end_grace", &transaction_ops, S_IWUSR|S_IRUGO},
+ #endif
+ 		/* last one */ {""}
+diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
+index f1a01c191cf53..8be471ce4f195 100644
+--- a/fs/nilfs2/ioctl.c
++++ b/fs/nilfs2/ioctl.c
+@@ -60,7 +60,7 @@ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs,
+ 	if (argv->v_nmembs == 0)
+ 		return 0;
+ 
+-	if (argv->v_size > PAGE_SIZE)
++	if ((size_t)argv->v_size > PAGE_SIZE)
+ 		return -EINVAL;
+ 
+ 	/*
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index aa5290cb7467c..3c1e4e9eafa31 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -2124,8 +2124,10 @@ static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
+ {
+ 	spin_lock(&sci->sc_state_lock);
+ 	if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
+-		sci->sc_timer.expires = jiffies + sci->sc_interval;
+-		add_timer(&sci->sc_timer);
++		if (sci->sc_task) {
++			sci->sc_timer.expires = jiffies + sci->sc_interval;
++			add_timer(&sci->sc_timer);
++		}
+ 		sci->sc_state |= NILFS_SEGCTOR_COMMIT;
+ 	}
+ 	spin_unlock(&sci->sc_state_lock);
+@@ -2172,19 +2174,36 @@ static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
+ 	struct nilfs_segctor_wait_request wait_req;
+ 	int err = 0;
+ 
+-	spin_lock(&sci->sc_state_lock);
+ 	init_wait(&wait_req.wq);
+ 	wait_req.err = 0;
+ 	atomic_set(&wait_req.done, 0);
++	init_waitqueue_entry(&wait_req.wq, current);
++
++	/*
++	 * To prevent a race issue where completion notifications from the
++	 * log writer thread are missed, increment the request sequence count
++	 * "sc_seq_request" and insert a wait queue entry using the current
++	 * sequence number into the "sc_wait_request" queue at the same time
++	 * within the lock section of "sc_state_lock".
++	 */
++	spin_lock(&sci->sc_state_lock);
+ 	wait_req.seq = ++sci->sc_seq_request;
++	add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
+ 	spin_unlock(&sci->sc_state_lock);
+ 
+-	init_waitqueue_entry(&wait_req.wq, current);
+-	add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
+-	set_current_state(TASK_INTERRUPTIBLE);
+ 	wake_up(&sci->sc_wait_daemon);
+ 
+ 	for (;;) {
++		set_current_state(TASK_INTERRUPTIBLE);
++
++		/*
++		 * Synchronize only while the log writer thread is alive.
++		 * Leave flushing out after the log writer thread exits to
++		 * the cleanup work in nilfs_segctor_destroy().
++		 */
++		if (!sci->sc_task)
++			break;
++
+ 		if (atomic_read(&wait_req.done)) {
+ 			err = wait_req.err;
+ 			break;
+@@ -2200,7 +2219,7 @@ static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
+ 	return err;
+ }
+ 
+-static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
++static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err, bool force)
+ {
+ 	struct nilfs_segctor_wait_request *wrq, *n;
+ 	unsigned long flags;
+@@ -2208,7 +2227,7 @@ static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
+ 	spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
+ 	list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) {
+ 		if (!atomic_read(&wrq->done) &&
+-		    nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
++		    (force || nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq))) {
+ 			wrq->err = err;
+ 			atomic_set(&wrq->done, 1);
+ 		}
+@@ -2326,10 +2345,21 @@ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
+  */
+ static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
+ {
++	bool thread_is_alive;
++
+ 	spin_lock(&sci->sc_state_lock);
+ 	sci->sc_seq_accepted = sci->sc_seq_request;
++	thread_is_alive = (bool)sci->sc_task;
+ 	spin_unlock(&sci->sc_state_lock);
+-	del_timer_sync(&sci->sc_timer);
++
++	/*
++	 * This function does not race with the log writer thread's
++	 * termination.  Therefore, deleting sc_timer, which should not be
++	 * done after the log writer thread exits, can be done safely outside
++	 * the area protected by sc_state_lock.
++	 */
++	if (thread_is_alive)
++		del_timer_sync(&sci->sc_timer);
+ }
+ 
+ /**
+@@ -2346,7 +2376,7 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
+ 	if (mode == SC_LSEG_SR) {
+ 		sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
+ 		sci->sc_seq_done = sci->sc_seq_accepted;
+-		nilfs_segctor_wakeup(sci, err);
++		nilfs_segctor_wakeup(sci, err, false);
+ 		sci->sc_flush_request = 0;
+ 	} else {
+ 		if (mode == SC_FLUSH_FILE)
+@@ -2355,7 +2385,7 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
+ 			sci->sc_flush_request &= ~FLUSH_DAT_BIT;
+ 
+ 		/* re-enable timer if checkpoint creation was not done */
+-		if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
++		if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) && sci->sc_task &&
+ 		    time_before(jiffies, sci->sc_timer.expires))
+ 			add_timer(&sci->sc_timer);
+ 	}
+@@ -2545,6 +2575,7 @@ static int nilfs_segctor_thread(void *arg)
+ 	int timeout = 0;
+ 
+ 	sci->sc_timer_task = current;
++	timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
+ 
+ 	/* start sync. */
+ 	sci->sc_task = current;
+@@ -2612,6 +2643,7 @@ static int nilfs_segctor_thread(void *arg)
+  end_thread:
+ 	/* end sync. */
+ 	sci->sc_task = NULL;
++	timer_shutdown_sync(&sci->sc_timer);
+ 	wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
+ 	spin_unlock(&sci->sc_state_lock);
+ 	return 0;
+@@ -2675,7 +2707,6 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
+ 	INIT_LIST_HEAD(&sci->sc_gc_inodes);
+ 	INIT_LIST_HEAD(&sci->sc_iput_queue);
+ 	INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
+-	timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
+ 
+ 	sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
+ 	sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
+@@ -2729,6 +2760,13 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
+ 		|| sci->sc_seq_request != sci->sc_seq_done);
+ 	spin_unlock(&sci->sc_state_lock);
+ 
++	/*
++	 * Forcibly wake up tasks waiting in nilfs_segctor_sync(), which can
++	 * be called from delayed iput() via nilfs_evict_inode() and can race
++	 * with the above log writer thread termination.
++	 */
++	nilfs_segctor_wakeup(sci, 0, true);
++
+ 	if (flush_work(&sci->sc_iput_work))
+ 		flag = true;
+ 
+@@ -2754,7 +2792,6 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
+ 
+ 	down_write(&nilfs->ns_segctor_sem);
+ 
+-	timer_shutdown_sync(&sci->sc_timer);
+ 	kfree(sci);
+ }
+ 
+diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
+index 263635199b60d..1937e8e612f87 100644
+--- a/fs/ntfs3/dir.c
++++ b/fs/ntfs3/dir.c
+@@ -475,6 +475,7 @@ static int ntfs_readdir(struct file *file, struct dir_context *ctx)
+ 		vbo = (u64)bit << index_bits;
+ 		if (vbo >= i_size) {
+ 			ntfs_inode_err(dir, "Looks like your dir is corrupt");
++			ctx->pos = eod;
+ 			err = -EINVAL;
+ 			goto out;
+ 		}
+diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
+index daabaad63aaf6..14284f0ed46aa 100644
+--- a/fs/ntfs3/index.c
++++ b/fs/ntfs3/index.c
+@@ -1533,6 +1533,11 @@ static int indx_add_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
+ 		goto out1;
+ 	}
+ 
++	if (data_size <= le64_to_cpu(alloc->nres.data_size)) {
++		/* Reuse index. */
++		goto out;
++	}
++
+ 	/* Increase allocation. */
+ 	err = attr_set_size(ni, ATTR_ALLOC, in->name, in->name_len,
+ 			    &indx->alloc_run, data_size, &data_size, true,
+@@ -1546,6 +1551,7 @@ static int indx_add_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
+ 	if (in->name == I30_NAME)
+ 		i_size_write(&ni->vfs_inode, data_size);
+ 
++out:
+ 	*vbn = bit << indx->idx2vbn_bits;
+ 
+ 	return 0;
+diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
+index d273eda1cf45d..ba5a39a2f957d 100644
+--- a/fs/ntfs3/inode.c
++++ b/fs/ntfs3/inode.c
+@@ -37,7 +37,7 @@ static struct inode *ntfs_read_mft(struct inode *inode,
+ 	bool is_dir;
+ 	unsigned long ino = inode->i_ino;
+ 	u32 rp_fa = 0, asize, t32;
+-	u16 roff, rsize, names = 0;
++	u16 roff, rsize, names = 0, links = 0;
+ 	const struct ATTR_FILE_NAME *fname = NULL;
+ 	const struct INDEX_ROOT *root;
+ 	struct REPARSE_DATA_BUFFER rp; // 0x18 bytes
+@@ -200,11 +200,12 @@ static struct inode *ntfs_read_mft(struct inode *inode,
+ 		    rsize < SIZEOF_ATTRIBUTE_FILENAME)
+ 			goto out;
+ 
++		names += 1;
+ 		fname = Add2Ptr(attr, roff);
+ 		if (fname->type == FILE_NAME_DOS)
+ 			goto next_attr;
+ 
+-		names += 1;
++		links += 1;
+ 		if (name && name->len == fname->name_len &&
+ 		    !ntfs_cmp_names_cpu(name, (struct le_str *)&fname->name_len,
+ 					NULL, false))
+@@ -429,7 +430,7 @@ static struct inode *ntfs_read_mft(struct inode *inode,
+ 		ni->mi.dirty = true;
+ 	}
+ 
+-	set_nlink(inode, names);
++	set_nlink(inode, links);
+ 
+ 	if (S_ISDIR(mode)) {
+ 		ni->std_fa |= FILE_ATTRIBUTE_DIRECTORY;
+diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
+index 6aa3a9d44df1b..6c76503edc200 100644
+--- a/fs/ntfs3/record.c
++++ b/fs/ntfs3/record.c
+@@ -534,16 +534,9 @@ bool mi_remove_attr(struct ntfs_inode *ni, struct mft_inode *mi,
+ 	if (aoff + asize > used)
+ 		return false;
+ 
+-	if (ni && is_attr_indexed(attr)) {
++	if (ni && is_attr_indexed(attr) && attr->type == ATTR_NAME) {
+ 		u16 links = le16_to_cpu(ni->mi.mrec->hard_links);
+-		struct ATTR_FILE_NAME *fname =
+-			attr->type != ATTR_NAME ?
+-				NULL :
+-				resident_data_ex(attr,
+-						 SIZEOF_ATTRIBUTE_FILENAME);
+-		if (fname && fname->type == FILE_NAME_DOS) {
+-			/* Do not decrease links count deleting DOS name. */
+-		} else if (!links) {
++		if (!links) {
+ 			/* minor error. Not critical. */
+ 		} else {
+ 			ni->mi.mrec->hard_links = cpu_to_le16(links - 1);
+diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
+index b26d95a8d3274..0ed534f759e92 100644
+--- a/fs/ntfs3/super.c
++++ b/fs/ntfs3/super.c
+@@ -1861,8 +1861,6 @@ static int __init init_ntfs_fs(void)
+ {
+ 	int err;
+ 
+-	pr_info("ntfs3: Max link count %u\n", NTFS_LINK_MAX);
+-
+ 	if (IS_ENABLED(CONFIG_NTFS3_FS_POSIX_ACL))
+ 		pr_info("ntfs3: Enabled Linux POSIX ACLs support\n");
+ 	if (IS_ENABLED(CONFIG_NTFS3_64BIT_CLUSTER))
+diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
+index 4a0779e3ef792..a7b527ea50d3c 100644
+--- a/fs/openpromfs/inode.c
++++ b/fs/openpromfs/inode.c
+@@ -355,10 +355,10 @@ static struct inode *openprom_iget(struct super_block *sb, ino_t ino)
+ 	return inode;
+ }
+ 
+-static int openprom_remount(struct super_block *sb, int *flags, char *data)
++static int openpromfs_reconfigure(struct fs_context *fc)
+ {
+-	sync_filesystem(sb);
+-	*flags |= SB_NOATIME;
++	sync_filesystem(fc->root->d_sb);
++	fc->sb_flags |= SB_NOATIME;
+ 	return 0;
+ }
+ 
+@@ -366,7 +366,6 @@ static const struct super_operations openprom_sops = {
+ 	.alloc_inode	= openprom_alloc_inode,
+ 	.free_inode	= openprom_free_inode,
+ 	.statfs		= simple_statfs,
+-	.remount_fs	= openprom_remount,
+ };
+ 
+ static int openprom_fill_super(struct super_block *s, struct fs_context *fc)
+@@ -415,6 +414,7 @@ static int openpromfs_get_tree(struct fs_context *fc)
+ 
+ static const struct fs_context_operations openpromfs_context_ops = {
+ 	.get_tree	= openpromfs_get_tree,
++	.reconfigure	= openpromfs_reconfigure,
+ };
+ 
+ static int openpromfs_init_fs_context(struct fs_context *fc)
+diff --git a/fs/smb/server/mgmt/share_config.c b/fs/smb/server/mgmt/share_config.c
+index a2f0a2edceb8a..e0a6b758094fc 100644
+--- a/fs/smb/server/mgmt/share_config.c
++++ b/fs/smb/server/mgmt/share_config.c
+@@ -165,8 +165,12 @@ static struct ksmbd_share_config *share_config_request(struct unicode_map *um,
+ 
+ 		share->path = kstrndup(ksmbd_share_config_path(resp), path_len,
+ 				      GFP_KERNEL);
+-		if (share->path)
++		if (share->path) {
+ 			share->path_sz = strlen(share->path);
++			while (share->path_sz > 1 &&
++			       share->path[share->path_sz - 1] == '/')
++				share->path[--share->path_sz] = '\0';
++		}
+ 		share->create_mask = resp->create_mask;
+ 		share->directory_mask = resp->directory_mask;
+ 		share->force_create_mode = resp->force_create_mode;
+diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
+index b9d9116fc2b3f..a8f52c4ebbdad 100644
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -610,19 +610,24 @@ static int oplock_break_pending(struct oplock_info *opinfo, int req_op_level)
+ 		if (opinfo->op_state == OPLOCK_CLOSING)
+ 			return -ENOENT;
+ 		else if (opinfo->level <= req_op_level) {
+-			if (opinfo->is_lease &&
+-			    opinfo->o_lease->state !=
+-			     (SMB2_LEASE_HANDLE_CACHING_LE |
+-			      SMB2_LEASE_READ_CACHING_LE))
++			if (opinfo->is_lease == false)
++				return 1;
++
++			if (opinfo->o_lease->state !=
++			    (SMB2_LEASE_HANDLE_CACHING_LE |
++			     SMB2_LEASE_READ_CACHING_LE))
+ 				return 1;
+ 		}
+ 	}
+ 
+ 	if (opinfo->level <= req_op_level) {
+-		if (opinfo->is_lease &&
+-		    opinfo->o_lease->state !=
+-		     (SMB2_LEASE_HANDLE_CACHING_LE |
+-		      SMB2_LEASE_READ_CACHING_LE)) {
++		if (opinfo->is_lease == false) {
++			wake_up_oplock_break(opinfo);
++			return 1;
++		}
++		if (opinfo->o_lease->state !=
++		    (SMB2_LEASE_HANDLE_CACHING_LE |
++		     SMB2_LEASE_READ_CACHING_LE)) {
+ 			wake_up_oplock_break(opinfo);
+ 			return 1;
+ 		}
+diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h
+index 566497eeb3b81..bc1f6b378195f 100644
+--- a/include/drm/drm_displayid.h
++++ b/include/drm/drm_displayid.h
+@@ -30,7 +30,6 @@ struct drm_edid;
+ #define VESA_IEEE_OUI				0x3a0292
+ 
+ /* DisplayID Structure versions */
+-#define DISPLAY_ID_STRUCTURE_VER_12		0x12
+ #define DISPLAY_ID_STRUCTURE_VER_20		0x20
+ 
+ /* DisplayID Structure v1r2 Data Blocks */
+diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
+index c0aec0d4d664e..3011d33eccbd2 100644
+--- a/include/drm/drm_mipi_dsi.h
++++ b/include/drm/drm_mipi_dsi.h
+@@ -241,9 +241,9 @@ int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi);
+ int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi);
+ int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
+ 					    u16 value);
+-ssize_t mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable);
+-ssize_t mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
+-				       const struct drm_dsc_picture_parameter_set *pps);
++int mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable);
++int mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
++				   const struct drm_dsc_picture_parameter_set *pps);
+ 
+ ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
+ 			       size_t size);
+diff --git a/include/linux/acpi.h b/include/linux/acpi.h
+index 34829f2c517ac..168201e4c7827 100644
+--- a/include/linux/acpi.h
++++ b/include/linux/acpi.h
+@@ -573,9 +573,13 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
+ #define OSC_SB_CPCV2_SUPPORT			0x00000040
+ #define OSC_SB_PCLPI_SUPPORT			0x00000080
+ #define OSC_SB_OSLPI_SUPPORT			0x00000100
++#define OSC_SB_FAST_THERMAL_SAMPLING_SUPPORT	0x00000200
++#define OSC_SB_OVER_16_PSTATES_SUPPORT		0x00000400
++#define OSC_SB_GED_SUPPORT			0x00000800
+ #define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT		0x00001000
+-#define OSC_SB_GENERIC_INITIATOR_SUPPORT	0x00002000
++#define OSC_SB_IRQ_RESOURCE_SOURCE_SUPPORT	0x00002000
+ #define OSC_SB_CPC_FLEXIBLE_ADR_SPACE		0x00004000
++#define OSC_SB_GENERIC_INITIATOR_SUPPORT	0x00020000
+ #define OSC_SB_NATIVE_USB4_SUPPORT		0x00040000
+ #define OSC_SB_PRM_SUPPORT			0x00200000
+ #define OSC_SB_FFH_OPR_SUPPORT			0x00400000
+diff --git a/include/linux/bitops.h b/include/linux/bitops.h
+index 2ba557e067fe6..f7f5a783da2aa 100644
+--- a/include/linux/bitops.h
++++ b/include/linux/bitops.h
+@@ -80,6 +80,7 @@ __check_bitop_pr(__test_and_set_bit);
+ __check_bitop_pr(__test_and_clear_bit);
+ __check_bitop_pr(__test_and_change_bit);
+ __check_bitop_pr(test_bit);
++__check_bitop_pr(test_bit_acquire);
+ 
+ #undef __check_bitop_pr
+ 
+diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h
+index 6bfe70decc9fb..ae80a303c216b 100644
+--- a/include/linux/dev_printk.h
++++ b/include/linux/dev_printk.h
+@@ -129,6 +129,16 @@ void _dev_info(const struct device *dev, const char *fmt, ...)
+ 		_dev_printk(level, dev, fmt, ##__VA_ARGS__);		\
+ 	})
+ 
++/*
++ * Dummy dev_printk for disabled debugging statements to use whilst maintaining
++ * gcc's format checking.
++ */
++#define dev_no_printk(level, dev, fmt, ...)				\
++	({								\
++		if (0)							\
++			_dev_printk(level, dev, fmt, ##__VA_ARGS__);	\
++	})
++
+ /*
+  * #defines for all the dev_<level> macros to prefix with whatever
+  * possible use of #define dev_fmt(fmt) ...
+@@ -158,10 +168,7 @@ void _dev_info(const struct device *dev, const char *fmt, ...)
+ 	dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
+ #else
+ #define dev_dbg(dev, fmt, ...)						\
+-({									\
+-	if (0)								\
+-		dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
+-})
++	dev_no_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
+ #endif
+ 
+ #ifdef CONFIG_PRINTK
+@@ -247,20 +254,14 @@ do {									\
+ } while (0)
+ #else
+ #define dev_dbg_ratelimited(dev, fmt, ...)				\
+-do {									\
+-	if (0)								\
+-		dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
+-} while (0)
++	dev_no_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
+ #endif
+ 
+ #ifdef VERBOSE_DEBUG
+ #define dev_vdbg	dev_dbg
+ #else
+ #define dev_vdbg(dev, fmt, ...)						\
+-({									\
+-	if (0)								\
+-		dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
+-})
++	dev_no_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
+ #endif
+ 
+ /*
+diff --git a/include/linux/fb.h b/include/linux/fb.h
+index 0dd27364d56fe..811e47f9d1c3f 100644
+--- a/include/linux/fb.h
++++ b/include/linux/fb.h
+@@ -694,6 +694,10 @@ extern int fb_deferred_io_fsync(struct file *file, loff_t start,
+ 	__FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, sys) \
+ 	__FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, sys)
+ 
++#define FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(__prefix, __damage_range, __damage_area) \
++	__FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, sys) \
++	__FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, sys)
++
+ /*
+  * Initializes struct fb_ops for deferred I/O.
+  */
+diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
+index 6aeebe0a67770..6eaa190d0083c 100644
+--- a/include/linux/fortify-string.h
++++ b/include/linux/fortify-string.h
+@@ -734,7 +734,8 @@ __FORTIFY_INLINE void *kmemdup(const void * const POS0 p, size_t size, gfp_t gfp
+ 	if (__compiletime_lessthan(p_size, size))
+ 		__read_overflow();
+ 	if (p_size < size)
+-		fortify_panic(FORTIFY_FUNC_kmemdup, FORTIFY_READ, p_size, size, NULL);
++		fortify_panic(FORTIFY_FUNC_kmemdup, FORTIFY_READ, p_size, size,
++			      __real_kmemdup(p, 0, gfp));
+ 	return __real_kmemdup(p, size, gfp);
+ }
+ 
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 8dfd53b52744a..b09f141321105 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -3340,6 +3340,8 @@ void simple_offset_init(struct offset_ctx *octx);
+ int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry);
+ void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry);
+ int simple_offset_empty(struct dentry *dentry);
++int simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry,
++			 struct inode *new_dir, struct dentry *new_dentry);
+ int simple_offset_rename_exchange(struct inode *old_dir,
+ 				  struct dentry *old_dentry,
+ 				  struct inode *new_dir,
+diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
+index 3385a2cc5b099..ac5be38d8aaf0 100644
+--- a/include/linux/ieee80211.h
++++ b/include/linux/ieee80211.h
+@@ -5302,7 +5302,7 @@ static inline bool ieee80211_mle_basic_sta_prof_size_ok(const u8 *data,
+ 		info_len += 1;
+ 
+ 	return prof->sta_info_len >= info_len &&
+-	       fixed + prof->sta_info_len <= len;
++	       fixed + prof->sta_info_len - 1 <= len;
+ }
+ 
+ /**
+diff --git a/include/linux/ksm.h b/include/linux/ksm.h
+index 401348e9f92b4..7e2b1de3996ac 100644
+--- a/include/linux/ksm.h
++++ b/include/linux/ksm.h
+@@ -59,6 +59,14 @@ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+ 	return 0;
+ }
+ 
++static inline int ksm_execve(struct mm_struct *mm)
++{
++	if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
++		return __ksm_enter(mm);
++
++	return 0;
++}
++
+ static inline void ksm_exit(struct mm_struct *mm)
+ {
+ 	if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
+@@ -107,6 +115,11 @@ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+ 	return 0;
+ }
+ 
++static inline int ksm_execve(struct mm_struct *mm)
++{
++	return 0;
++}
++
+ static inline void ksm_exit(struct mm_struct *mm)
+ {
+ }
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index bf9324a31ae97..80452bd982531 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -862,6 +862,7 @@ struct mlx5_cmd_work_ent {
+ 	void		       *context;
+ 	int			idx;
+ 	struct completion	handling;
++	struct completion	slotted;
+ 	struct completion	done;
+ 	struct mlx5_cmd        *cmd;
+ 	struct work_struct	work;
+diff --git a/include/linux/numa.h b/include/linux/numa.h
+index 915033a757315..1d43371fafd2f 100644
+--- a/include/linux/numa.h
++++ b/include/linux/numa.h
+@@ -36,12 +36,7 @@ int memory_add_physaddr_to_nid(u64 start);
+ int phys_to_target_node(u64 start);
+ #endif
+ 
+-#ifndef numa_fill_memblks
+-static inline int __init numa_fill_memblks(u64 start, u64 end)
+-{
+-	return NUMA_NO_MEMBLK;
+-}
+-#endif
++int numa_fill_memblks(u64 start, u64 end);
+ 
+ #else /* !CONFIG_NUMA */
+ static inline int numa_nearest_node(int node, unsigned int state)
+diff --git a/include/linux/printk.h b/include/linux/printk.h
+index 955e31860095e..2fde40cc96778 100644
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -126,7 +126,7 @@ struct va_format {
+ #define no_printk(fmt, ...)				\
+ ({							\
+ 	if (0)						\
+-		printk(fmt, ##__VA_ARGS__);		\
++		_printk(fmt, ##__VA_ARGS__);		\
+ 	0;						\
+ })
+ 
+diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
+index dfa1828cd756a..c0d74f97fd187 100644
+--- a/include/linux/stmmac.h
++++ b/include/linux/stmmac.h
+@@ -117,7 +117,6 @@ struct stmmac_axi {
+ 
+ #define EST_GCL		1024
+ struct stmmac_est {
+-	struct mutex lock;
+ 	int enable;
+ 	u32 btr_reserve[2];
+ 	u32 btr_offset[2];
+diff --git a/include/net/ax25.h b/include/net/ax25.h
+index 0d939e5aee4ec..c2a85fd3f5ea4 100644
+--- a/include/net/ax25.h
++++ b/include/net/ax25.h
+@@ -216,7 +216,7 @@ typedef struct {
+ struct ctl_table;
+ 
+ typedef struct ax25_dev {
+-	struct ax25_dev		*next;
++	struct list_head	list;
+ 
+ 	struct net_device	*dev;
+ 	netdevice_tracker	dev_tracker;
+@@ -330,7 +330,6 @@ int ax25_addr_size(const ax25_digi *);
+ void ax25_digi_invert(const ax25_digi *, ax25_digi *);
+ 
+ /* ax25_dev.c */
+-extern ax25_dev *ax25_dev_list;
+ extern spinlock_t ax25_dev_lock;
+ 
+ #if IS_ENABLED(CONFIG_AX25)
+diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
+index eaec5d6caa29d..b3228bd6cd6be 100644
+--- a/include/net/bluetooth/bluetooth.h
++++ b/include/net/bluetooth/bluetooth.h
+@@ -285,7 +285,7 @@ void bt_err_ratelimited(const char *fmt, ...);
+ 	bt_err_ratelimited("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__)
+ 
+ /* Connection and socket states */
+-enum {
++enum bt_sock_state {
+ 	BT_CONNECTED = 1, /* Equal to TCP_ESTABLISHED to make net code happy */
+ 	BT_OPEN,
+ 	BT_BOUND,
+diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
+index 07198beb3d80b..f187510428ca6 100644
+--- a/include/net/bluetooth/hci.h
++++ b/include/net/bluetooth/hci.h
+@@ -33,9 +33,6 @@
+ #define HCI_MAX_FRAME_SIZE	(HCI_MAX_ACL_SIZE + 4)
+ 
+ #define HCI_LINK_KEY_SIZE	16
+-#define HCI_AMP_LINK_KEY_SIZE	(2 * HCI_LINK_KEY_SIZE)
+-
+-#define HCI_MAX_AMP_ASSOC_SIZE	672
+ 
+ #define HCI_MAX_CPB_DATA_SIZE	252
+ 
+@@ -71,26 +68,6 @@
+ #define HCI_SMD		9
+ #define HCI_VIRTIO	10
+ 
+-/* HCI controller types */
+-#define HCI_PRIMARY	0x00
+-#define HCI_AMP		0x01
+-
+-/* First BR/EDR Controller shall have ID = 0 */
+-#define AMP_ID_BREDR	0x00
+-
+-/* AMP controller types */
+-#define AMP_TYPE_BREDR	0x00
+-#define AMP_TYPE_80211	0x01
+-
+-/* AMP controller status */
+-#define AMP_STATUS_POWERED_DOWN			0x00
+-#define AMP_STATUS_BLUETOOTH_ONLY		0x01
+-#define AMP_STATUS_NO_CAPACITY			0x02
+-#define AMP_STATUS_LOW_CAPACITY			0x03
+-#define AMP_STATUS_MEDIUM_CAPACITY		0x04
+-#define AMP_STATUS_HIGH_CAPACITY		0x05
+-#define AMP_STATUS_FULL_CAPACITY		0x06
+-
+ /* HCI device quirks */
+ enum {
+ 	/* When this quirk is set, the HCI Reset command is send when
+@@ -528,7 +505,6 @@ enum {
+ #define ESCO_LINK	0x02
+ /* Low Energy links do not have defined link type. Use invented one */
+ #define LE_LINK		0x80
+-#define AMP_LINK	0x81
+ #define ISO_LINK	0x82
+ #define INVALID_LINK	0xff
+ 
+@@ -944,56 +920,6 @@ struct hci_cp_io_capability_neg_reply {
+ 	__u8     reason;
+ } __packed;
+ 
+-#define HCI_OP_CREATE_PHY_LINK		0x0435
+-struct hci_cp_create_phy_link {
+-	__u8     phy_handle;
+-	__u8     key_len;
+-	__u8     key_type;
+-	__u8     key[HCI_AMP_LINK_KEY_SIZE];
+-} __packed;
+-
+-#define HCI_OP_ACCEPT_PHY_LINK		0x0436
+-struct hci_cp_accept_phy_link {
+-	__u8     phy_handle;
+-	__u8     key_len;
+-	__u8     key_type;
+-	__u8     key[HCI_AMP_LINK_KEY_SIZE];
+-} __packed;
+-
+-#define HCI_OP_DISCONN_PHY_LINK		0x0437
+-struct hci_cp_disconn_phy_link {
+-	__u8     phy_handle;
+-	__u8     reason;
+-} __packed;
+-
+-struct ext_flow_spec {
+-	__u8       id;
+-	__u8       stype;
+-	__le16     msdu;
+-	__le32     sdu_itime;
+-	__le32     acc_lat;
+-	__le32     flush_to;
+-} __packed;
+-
+-#define HCI_OP_CREATE_LOGICAL_LINK	0x0438
+-#define HCI_OP_ACCEPT_LOGICAL_LINK	0x0439
+-struct hci_cp_create_accept_logical_link {
+-	__u8                  phy_handle;
+-	struct ext_flow_spec  tx_flow_spec;
+-	struct ext_flow_spec  rx_flow_spec;
+-} __packed;
+-
+-#define HCI_OP_DISCONN_LOGICAL_LINK	0x043a
+-struct hci_cp_disconn_logical_link {
+-	__le16   log_handle;
+-} __packed;
+-
+-#define HCI_OP_LOGICAL_LINK_CANCEL	0x043b
+-struct hci_cp_logical_link_cancel {
+-	__u8     phy_handle;
+-	__u8     flow_spec_id;
+-} __packed;
+-
+ #define HCI_OP_ENHANCED_SETUP_SYNC_CONN		0x043d
+ struct hci_coding_format {
+ 	__u8	id;
+@@ -1615,46 +1541,6 @@ struct hci_rp_read_enc_key_size {
+ 	__u8     key_size;
+ } __packed;
+ 
+-#define HCI_OP_READ_LOCAL_AMP_INFO	0x1409
+-struct hci_rp_read_local_amp_info {
+-	__u8     status;
+-	__u8     amp_status;
+-	__le32   total_bw;
+-	__le32   max_bw;
+-	__le32   min_latency;
+-	__le32   max_pdu;
+-	__u8     amp_type;
+-	__le16   pal_cap;
+-	__le16   max_assoc_size;
+-	__le32   max_flush_to;
+-	__le32   be_flush_to;
+-} __packed;
+-
+-#define HCI_OP_READ_LOCAL_AMP_ASSOC	0x140a
+-struct hci_cp_read_local_amp_assoc {
+-	__u8     phy_handle;
+-	__le16   len_so_far;
+-	__le16   max_len;
+-} __packed;
+-struct hci_rp_read_local_amp_assoc {
+-	__u8     status;
+-	__u8     phy_handle;
+-	__le16   rem_len;
+-	__u8     frag[];
+-} __packed;
+-
+-#define HCI_OP_WRITE_REMOTE_AMP_ASSOC	0x140b
+-struct hci_cp_write_remote_amp_assoc {
+-	__u8     phy_handle;
+-	__le16   len_so_far;
+-	__le16   rem_len;
+-	__u8     frag[];
+-} __packed;
+-struct hci_rp_write_remote_amp_assoc {
+-	__u8     status;
+-	__u8     phy_handle;
+-} __packed;
+-
+ #define HCI_OP_GET_MWS_TRANSPORT_CONFIG	0x140c
+ 
+ #define HCI_OP_ENABLE_DUT_MODE		0x1803
+@@ -2035,7 +1921,7 @@ struct hci_cp_le_set_ext_adv_data {
+ 	__u8  operation;
+ 	__u8  frag_pref;
+ 	__u8  length;
+-	__u8  data[];
++	__u8  data[] __counted_by(length);
+ } __packed;
+ 
+ #define HCI_OP_LE_SET_EXT_SCAN_RSP_DATA		0x2038
+@@ -2044,7 +1930,7 @@ struct hci_cp_le_set_ext_scan_rsp_data {
+ 	__u8  operation;
+ 	__u8  frag_pref;
+ 	__u8  length;
+-	__u8  data[];
++	__u8  data[] __counted_by(length);
+ } __packed;
+ 
+ #define HCI_OP_LE_SET_EXT_ADV_ENABLE		0x2039
+@@ -2070,7 +1956,7 @@ struct hci_cp_le_set_per_adv_data {
+ 	__u8  handle;
+ 	__u8  operation;
+ 	__u8  length;
+-	__u8  data[];
++	__u8  data[] __counted_by(length);
+ } __packed;
+ 
+ #define HCI_OP_LE_SET_PER_ADV_ENABLE		0x2040
+@@ -2171,7 +2057,7 @@ struct hci_cis {
+ 
+ struct hci_cp_le_create_cis {
+ 	__u8    num_cis;
+-	struct hci_cis cis[];
++	struct hci_cis cis[] __counted_by(num_cis);
+ } __packed;
+ 
+ #define HCI_OP_LE_REMOVE_CIG			0x2065
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index b1c8489ff93e4..5277c6d5134ca 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -126,7 +126,6 @@ enum suspended_state {
+ struct hci_conn_hash {
+ 	struct list_head list;
+ 	unsigned int     acl_num;
+-	unsigned int     amp_num;
+ 	unsigned int     sco_num;
+ 	unsigned int     iso_num;
+ 	unsigned int     le_num;
+@@ -247,6 +246,7 @@ struct adv_info {
+ 	bool	periodic;
+ 	__u8	mesh;
+ 	__u8	instance;
++	__u8	handle;
+ 	__u32	flags;
+ 	__u16	timeout;
+ 	__u16	remaining_time;
+@@ -341,14 +341,6 @@ struct adv_monitor {
+ /* Default authenticated payload timeout 30s */
+ #define DEFAULT_AUTH_PAYLOAD_TIMEOUT   0x0bb8
+ 
+-struct amp_assoc {
+-	__u16	len;
+-	__u16	offset;
+-	__u16	rem_len;
+-	__u16	len_so_far;
+-	__u8	data[HCI_MAX_AMP_ASSOC_SIZE];
+-};
+-
+ #define HCI_MAX_PAGES	3
+ 
+ struct hci_dev {
+@@ -361,7 +353,6 @@ struct hci_dev {
+ 	unsigned long	flags;
+ 	__u16		id;
+ 	__u8		bus;
+-	__u8		dev_type;
+ 	bdaddr_t	bdaddr;
+ 	bdaddr_t	setup_addr;
+ 	bdaddr_t	public_addr;
+@@ -467,21 +458,6 @@ struct hci_dev {
+ 	__u16		sniff_min_interval;
+ 	__u16		sniff_max_interval;
+ 
+-	__u8		amp_status;
+-	__u32		amp_total_bw;
+-	__u32		amp_max_bw;
+-	__u32		amp_min_latency;
+-	__u32		amp_max_pdu;
+-	__u8		amp_type;
+-	__u16		amp_pal_cap;
+-	__u16		amp_assoc_size;
+-	__u32		amp_max_flush_to;
+-	__u32		amp_be_flush_to;
+-
+-	struct amp_assoc	loc_assoc;
+-
+-	__u8		flow_ctl_mode;
+-
+ 	unsigned int	auto_accept_delay;
+ 
+ 	unsigned long	quirks;
+@@ -501,11 +477,6 @@ struct hci_dev {
+ 	unsigned int	le_pkts;
+ 	unsigned int	iso_pkts;
+ 
+-	__u16		block_len;
+-	__u16		block_mtu;
+-	__u16		num_blocks;
+-	__u16		block_cnt;
+-
+ 	unsigned long	acl_last_tx;
+ 	unsigned long	sco_last_tx;
+ 	unsigned long	le_last_tx;
+@@ -778,7 +749,6 @@ struct hci_conn {
+ 	void		*l2cap_data;
+ 	void		*sco_data;
+ 	void		*iso_data;
+-	struct amp_mgr	*amp_mgr;
+ 
+ 	struct list_head link_list;
+ 	struct hci_conn	*parent;
+@@ -805,7 +775,6 @@ struct hci_chan {
+ 	struct sk_buff_head data_q;
+ 	unsigned int	sent;
+ 	__u8		state;
+-	bool		amp;
+ };
+ 
+ struct hci_conn_params {
+@@ -1014,9 +983,6 @@ static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
+ 	case ACL_LINK:
+ 		h->acl_num++;
+ 		break;
+-	case AMP_LINK:
+-		h->amp_num++;
+-		break;
+ 	case LE_LINK:
+ 		h->le_num++;
+ 		if (c->role == HCI_ROLE_SLAVE)
+@@ -1043,9 +1009,6 @@ static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
+ 	case ACL_LINK:
+ 		h->acl_num--;
+ 		break;
+-	case AMP_LINK:
+-		h->amp_num--;
+-		break;
+ 	case LE_LINK:
+ 		h->le_num--;
+ 		if (c->role == HCI_ROLE_SLAVE)
+@@ -1067,8 +1030,6 @@ static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type)
+ 	switch (type) {
+ 	case ACL_LINK:
+ 		return h->acl_num;
+-	case AMP_LINK:
+-		return h->amp_num;
+ 	case LE_LINK:
+ 		return h->le_num;
+ 	case SCO_LINK:
+@@ -1085,7 +1046,7 @@ static inline unsigned int hci_conn_count(struct hci_dev *hdev)
+ {
+ 	struct hci_conn_hash *c = &hdev->conn_hash;
+ 
+-	return c->acl_num + c->amp_num + c->sco_num + c->le_num + c->iso_num;
++	return c->acl_num + c->sco_num + c->le_num + c->iso_num;
+ }
+ 
+ static inline bool hci_conn_valid(struct hci_dev *hdev, struct hci_conn *conn)
+@@ -1611,10 +1572,6 @@ static inline void hci_conn_drop(struct hci_conn *conn)
+ 			}
+ 			break;
+ 
+-		case AMP_LINK:
+-			timeo = conn->disc_timeout;
+-			break;
+-
+ 		default:
+ 			timeo = 0;
+ 			break;
+diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
+index a4278aa618ab1..3434cfc26b6af 100644
+--- a/include/net/bluetooth/l2cap.h
++++ b/include/net/bluetooth/l2cap.h
+@@ -548,6 +548,9 @@ struct l2cap_chan {
+ 	__u16		tx_credits;
+ 	__u16		rx_credits;
+ 
++	/* estimated available receive buffer space or -1 if unknown */
++	ssize_t		rx_avail;
++
+ 	__u8		tx_state;
+ 	__u8		rx_state;
+ 
+@@ -682,10 +685,15 @@ struct l2cap_user {
+ /* ----- L2CAP socket info ----- */
+ #define l2cap_pi(sk) ((struct l2cap_pinfo *) sk)
+ 
++struct l2cap_rx_busy {
++	struct list_head	list;
++	struct sk_buff		*skb;
++};
++
+ struct l2cap_pinfo {
+ 	struct bt_sock		bt;
+ 	struct l2cap_chan	*chan;
+-	struct sk_buff		*rx_busy_skb;
++	struct list_head	rx_busy;
+ };
+ 
+ enum {
+@@ -943,6 +951,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
+ int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu);
+ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len);
+ void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
++void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail);
+ int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator);
+ void l2cap_chan_set_defaults(struct l2cap_chan *chan);
+ int l2cap_ertm_init(struct l2cap_chan *chan);
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 6ae35199d3b3c..2bcf30381d75f 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1539,11 +1539,10 @@ static inline int tcp_space_from_win(const struct sock *sk, int win)
+ 	return __tcp_space_from_win(tcp_sk(sk)->scaling_ratio, win);
+ }
+ 
+-/* Assume a conservative default of 1200 bytes of payload per 4K page.
++/* Assume a 50% default for skb->len/skb->truesize ratio.
+  * This may be adjusted later in tcp_measure_rcv_mss().
+  */
+-#define TCP_DEFAULT_SCALING_RATIO ((1200 << TCP_RMEM_TO_WIN_SCALE) / \
+-				   SKB_TRUESIZE(4096))
++#define TCP_DEFAULT_SCALING_RATIO (1 << (TCP_RMEM_TO_WIN_SCALE - 1))
+ 
+ static inline void tcp_scaling_ratio_init(struct sock *sk)
+ {
+diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
+index 4eed9028bb119..517015ef36a84 100644
+--- a/include/trace/events/asoc.h
++++ b/include/trace/events/asoc.h
+@@ -12,6 +12,8 @@
+ #define DAPM_DIRECT "(direct)"
+ #define DAPM_ARROW(dir) (((dir) == SND_SOC_DAPM_DIR_OUT) ? "->" : "<-")
+ 
++TRACE_DEFINE_ENUM(SND_SOC_DAPM_DIR_OUT);
++
+ struct snd_soc_jack;
+ struct snd_soc_card;
+ struct snd_soc_dapm_widget;
+diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
+index 3c42b9f1bada3..bcd84985faf48 100644
+--- a/include/uapi/linux/bpf.h
++++ b/include/uapi/linux/bpf.h
+@@ -7150,7 +7150,7 @@ struct bpf_fib_lookup {
+ 
+ 		/* output: MTU value */
+ 		__u16	mtu_result;
+-	};
++	} __attribute__((packed, aligned(2)));
+ 	/* input: L3 device index for lookup
+ 	 * output: device index from FIB lookup
+ 	 */
+diff --git a/include/uapi/linux/virtio_bt.h b/include/uapi/linux/virtio_bt.h
+index af798f4c96804..3cc7d633456b6 100644
+--- a/include/uapi/linux/virtio_bt.h
++++ b/include/uapi/linux/virtio_bt.h
+@@ -13,7 +13,6 @@
+ 
+ enum virtio_bt_config_type {
+ 	VIRTIO_BT_CONFIG_TYPE_PRIMARY	= 0,
+-	VIRTIO_BT_CONFIG_TYPE_AMP	= 1,
+ };
+ 
+ enum virtio_bt_config_vendor {
+diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
+index 522196dfb0ff5..318ed067dbf64 100644
+--- a/io_uring/io-wq.c
++++ b/io_uring/io-wq.c
+@@ -564,10 +564,7 @@ static void io_worker_handle_work(struct io_wq_acct *acct,
+ 		 * clear the stalled flag.
+ 		 */
+ 		work = io_get_next_work(acct, worker);
+-		raw_spin_unlock(&acct->lock);
+ 		if (work) {
+-			__io_worker_busy(wq, worker);
+-
+ 			/*
+ 			 * Make sure cancelation can find this, even before
+ 			 * it becomes the active work. That avoids a window
+@@ -578,9 +575,15 @@ static void io_worker_handle_work(struct io_wq_acct *acct,
+ 			raw_spin_lock(&worker->lock);
+ 			worker->next_work = work;
+ 			raw_spin_unlock(&worker->lock);
+-		} else {
+-			break;
+ 		}
++
++		raw_spin_unlock(&acct->lock);
++
++		if (!work)
++			break;
++
++		__io_worker_busy(wq, worker);
++
+ 		io_assign_current_work(worker, work);
+ 		__set_current_state(TASK_RUNNING);
+ 
+diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
+index 6426ee382276b..03e52094630ac 100644
+--- a/io_uring/io_uring.h
++++ b/io_uring/io_uring.h
+@@ -340,7 +340,7 @@ static inline int io_run_task_work(void)
+ 
+ static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
+ {
+-	return task_work_pending(current) || !wq_list_empty(&ctx->work_llist);
++	return task_work_pending(current) || !llist_empty(&ctx->work_llist);
+ }
+ 
+ static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts)
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 4afb475d41974..3896e6220d0bf 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -1041,6 +1041,7 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 	struct io_kiocb *notif;
+ 
+ 	zc->done_io = 0;
++	req->flags |= REQ_F_POLL_NO_LAZY;
+ 
+ 	if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
+ 		return -EINVAL;
+diff --git a/io_uring/nop.c b/io_uring/nop.c
+index d956599a3c1b8..1a4e312dfe510 100644
+--- a/io_uring/nop.c
++++ b/io_uring/nop.c
+@@ -12,6 +12,8 @@
+ 
+ int io_nop_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ {
++	if (READ_ONCE(sqe->rw_flags))
++		return -EINVAL;
+ 	return 0;
+ }
+ 
+diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
+index 3983708cef5b4..158ab09c605ba 100644
+--- a/io_uring/sqpoll.c
++++ b/io_uring/sqpoll.c
+@@ -238,11 +238,13 @@ static unsigned int io_sq_tw(struct llist_node **retry_list, int max_entries)
+ 	if (*retry_list) {
+ 		*retry_list = io_handle_tw_list(*retry_list, &count, max_entries);
+ 		if (count >= max_entries)
+-			return count;
++			goto out;
+ 		max_entries -= count;
+ 	}
+-
+ 	*retry_list = tctx_task_work_run(tctx, max_entries, &count);
++out:
++	if (task_work_pending(current))
++		task_work_run();
+ 	return count;
+ }
+ 
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index c287925471f68..cb61d8880dbe0 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -3985,6 +3985,11 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
+ 			 * check permissions at attach time.
+ 			 */
+ 			return -EPERM;
++
++		ptype = attach_type_to_prog_type(attach_type);
++		if (prog->type != ptype)
++			return -EINVAL;
++
+ 		return prog->enforce_expected_attach_type &&
+ 			prog->expected_attach_type != attach_type ?
+ 			-EINVAL : 0;
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index cb7ad1f795e18..2c90b1eb12e2c 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2359,6 +2359,8 @@ static void mark_btf_ld_reg(struct bpf_verifier_env *env,
+ 	regs[regno].type = PTR_TO_BTF_ID | flag;
+ 	regs[regno].btf = btf;
+ 	regs[regno].btf_id = btf_id;
++	if (type_may_be_null(flag))
++		regs[regno].id = ++env->id_gen;
+ }
+ 
+ #define DEF_NOT_SUBREG	(0)
+@@ -3615,7 +3617,8 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
+ 				 * sreg needs precision before this insn
+ 				 */
+ 				bt_clear_reg(bt, dreg);
+-				bt_set_reg(bt, sreg);
++				if (sreg != BPF_REG_FP)
++					bt_set_reg(bt, sreg);
+ 			} else {
+ 				/* dreg = K
+ 				 * dreg needs precision after this insn.
+@@ -3631,7 +3634,8 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
+ 				 * both dreg and sreg need precision
+ 				 * before this insn
+ 				 */
+-				bt_set_reg(bt, sreg);
++				if (sreg != BPF_REG_FP)
++					bt_set_reg(bt, sreg);
+ 			} /* else dreg += K
+ 			   * dreg still needs precision before this insn
+ 			   */
+@@ -5386,8 +5390,6 @@ static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno,
+ 		 */
+ 		mark_btf_ld_reg(env, cur_regs(env), value_regno, PTR_TO_BTF_ID, kptr_field->kptr.btf,
+ 				kptr_field->kptr.btf_id, btf_ld_kptr_type(env, kptr_field));
+-		/* For mark_ptr_or_null_reg */
+-		val_reg->id = ++env->id_gen;
+ 	} else if (class == BPF_STX) {
+ 		val_reg = reg_state(env, value_regno);
+ 		if (!register_is_null(val_reg) &&
+@@ -5705,7 +5707,8 @@ static bool is_trusted_reg(const struct bpf_reg_state *reg)
+ 		return true;
+ 
+ 	/* Types listed in the reg2btf_ids are always trusted */
+-	if (reg2btf_ids[base_type(reg->type)])
++	if (reg2btf_ids[base_type(reg->type)] &&
++	    !bpf_type_has_unsafe_modifiers(reg->type))
+ 		return true;
+ 
+ 	/* If a register is not referenced, it is trusted if it has the
+@@ -6325,6 +6328,7 @@ static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val,
+ #define BTF_TYPE_SAFE_RCU(__type)  __PASTE(__type, __safe_rcu)
+ #define BTF_TYPE_SAFE_RCU_OR_NULL(__type)  __PASTE(__type, __safe_rcu_or_null)
+ #define BTF_TYPE_SAFE_TRUSTED(__type)  __PASTE(__type, __safe_trusted)
++#define BTF_TYPE_SAFE_TRUSTED_OR_NULL(__type)  __PASTE(__type, __safe_trusted_or_null)
+ 
+ /*
+  * Allow list few fields as RCU trusted or full trusted.
+@@ -6388,7 +6392,7 @@ BTF_TYPE_SAFE_TRUSTED(struct dentry) {
+ 	struct inode *d_inode;
+ };
+ 
+-BTF_TYPE_SAFE_TRUSTED(struct socket) {
++BTF_TYPE_SAFE_TRUSTED_OR_NULL(struct socket) {
+ 	struct sock *sk;
+ };
+ 
+@@ -6423,11 +6427,20 @@ static bool type_is_trusted(struct bpf_verifier_env *env,
+ 	BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct linux_binprm));
+ 	BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct file));
+ 	BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct dentry));
+-	BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct socket));
+ 
+ 	return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_trusted");
+ }
+ 
++static bool type_is_trusted_or_null(struct bpf_verifier_env *env,
++				    struct bpf_reg_state *reg,
++				    const char *field_name, u32 btf_id)
++{
++	BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED_OR_NULL(struct socket));
++
++	return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id,
++					  "__safe_trusted_or_null");
++}
++
+ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
+ 				   struct bpf_reg_state *regs,
+ 				   int regno, int off, int size,
+@@ -6536,6 +6549,8 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
+ 		 */
+ 		if (type_is_trusted(env, reg, field_name, btf_id)) {
+ 			flag |= PTR_TRUSTED;
++		} else if (type_is_trusted_or_null(env, reg, field_name, btf_id)) {
++			flag |= PTR_TRUSTED | PTR_MAYBE_NULL;
+ 		} else if (in_rcu_cs(env) && !type_may_be_null(reg->type)) {
+ 			if (type_is_rcu(env, reg, field_name, btf_id)) {
+ 				/* ignore __rcu tag and mark it MEM_RCU */
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index 4237c8748715d..da24187c4e025 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -2948,7 +2948,7 @@ bool current_cpuset_is_being_rebound(void)
+ static int update_relax_domain_level(struct cpuset *cs, s64 val)
+ {
+ #ifdef CONFIG_SMP
+-	if (val < -1 || val >= sched_domain_level_max)
++	if (val < -1 || val > sched_domain_level_max + 1)
+ 		return -EINVAL;
+ #endif
+ 
+diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
+index 147b5945d67a0..2a453de9f3d95 100644
+--- a/kernel/rcu/tasks.h
++++ b/kernel/rcu/tasks.h
+@@ -1994,7 +1994,7 @@ void show_rcu_tasks_trace_gp_kthread(void)
+ {
+ 	char buf[64];
+ 
+-	sprintf(buf, "N%lu h:%lu/%lu/%lu",
++	snprintf(buf, sizeof(buf), "N%lu h:%lu/%lu/%lu",
+ 		data_race(n_trc_holdouts),
+ 		data_race(n_heavy_reader_ofl_updates),
+ 		data_race(n_heavy_reader_updates),
+diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
+index 5d666428546b0..b5ec62b2d850a 100644
+--- a/kernel/rcu/tree_stall.h
++++ b/kernel/rcu/tree_stall.h
+@@ -504,7 +504,8 @@ static void print_cpu_stall_info(int cpu)
+ 			rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu));
+ 	rcuc_starved = rcu_is_rcuc_kthread_starving(rdp, &j);
+ 	if (rcuc_starved)
+-		sprintf(buf, " rcuc=%ld jiffies(starved)", j);
++		// Print signed value, as negative values indicate a probable bug.
++		snprintf(buf, sizeof(buf), " rcuc=%ld jiffies(starved)", j);
+ 	pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%04x/%ld/%#lx softirq=%u/%u fqs=%ld%s%s\n",
+ 	       cpu,
+ 	       "O."[!!cpu_online(cpu)],
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 7019a40457a6d..d211d40a2edc9 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -11402,7 +11402,7 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of,
+ {
+ 	struct task_group *tg = css_tg(of_css(of));
+ 	u64 period = tg_get_cfs_period(tg);
+-	u64 burst = tg_get_cfs_burst(tg);
++	u64 burst = tg->cfs_bandwidth.burst;
+ 	u64 quota;
+ 	int ret;
+ 
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index c62805dbd6088..213c94d027a4c 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -6675,22 +6675,42 @@ static inline void hrtick_update(struct rq *rq)
+ #ifdef CONFIG_SMP
+ static inline bool cpu_overutilized(int cpu)
+ {
+-	unsigned long rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
+-	unsigned long rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
++	unsigned long  rq_util_min, rq_util_max;
++
++	if (!sched_energy_enabled())
++		return false;
++
++	rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
++	rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
+ 
+ 	/* Return true only if the utilization doesn't fit CPU's capacity */
+ 	return !util_fits_cpu(cpu_util_cfs(cpu), rq_util_min, rq_util_max, cpu);
+ }
+ 
+-static inline void update_overutilized_status(struct rq *rq)
++static inline void set_rd_overutilized_status(struct root_domain *rd,
++					      unsigned int status)
+ {
+-	if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) {
+-		WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED);
+-		trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED);
+-	}
++	if (!sched_energy_enabled())
++		return;
++
++	WRITE_ONCE(rd->overutilized, status);
++	trace_sched_overutilized_tp(rd, !!status);
++}
++
++static inline void check_update_overutilized_status(struct rq *rq)
++{
++	/*
++	 * overutilized field is used for load balancing decisions only
++	 * if energy aware scheduler is being used
++	 */
++	if (!sched_energy_enabled())
++		return;
++
++	if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu))
++		set_rd_overutilized_status(rq->rd, SG_OVERUTILIZED);
+ }
+ #else
+-static inline void update_overutilized_status(struct rq *rq) { }
++static inline void check_update_overutilized_status(struct rq *rq) { }
+ #endif
+ 
+ /* Runqueue only has SCHED_IDLE tasks enqueued */
+@@ -6791,7 +6811,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ 	 * and the following generally works well enough in practice.
+ 	 */
+ 	if (!task_new)
+-		update_overutilized_status(rq);
++		check_update_overutilized_status(rq);
+ 
+ enqueue_throttle:
+ 	assert_list_leaf_cfs_rq(rq);
+@@ -10608,19 +10628,14 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
+ 		env->fbq_type = fbq_classify_group(&sds->busiest_stat);
+ 
+ 	if (!env->sd->parent) {
+-		struct root_domain *rd = env->dst_rq->rd;
+-
+ 		/* update overload indicator if we are at root domain */
+-		WRITE_ONCE(rd->overload, sg_status & SG_OVERLOAD);
++		WRITE_ONCE(env->dst_rq->rd->overload, sg_status & SG_OVERLOAD);
+ 
+ 		/* Update over-utilization (tipping point, U >= 0) indicator */
+-		WRITE_ONCE(rd->overutilized, sg_status & SG_OVERUTILIZED);
+-		trace_sched_overutilized_tp(rd, sg_status & SG_OVERUTILIZED);
++		set_rd_overutilized_status(env->dst_rq->rd,
++					   sg_status & SG_OVERUTILIZED);
+ 	} else if (sg_status & SG_OVERUTILIZED) {
+-		struct root_domain *rd = env->dst_rq->rd;
+-
+-		WRITE_ONCE(rd->overutilized, SG_OVERUTILIZED);
+-		trace_sched_overutilized_tp(rd, SG_OVERUTILIZED);
++		set_rd_overutilized_status(env->dst_rq->rd, SG_OVERUTILIZED);
+ 	}
+ 
+ 	update_idle_cpu_scan(env, sum_util);
+@@ -12621,7 +12636,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
+ 		task_tick_numa(rq, curr);
+ 
+ 	update_misfit_status(curr, rq);
+-	update_overutilized_status(task_rq(curr));
++	check_update_overutilized_status(task_rq(curr));
+ 
+ 	task_tick_core(rq, curr);
+ }
+diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
+index 99ea5986038ce..3127c9b30af18 100644
+--- a/kernel/sched/topology.c
++++ b/kernel/sched/topology.c
+@@ -1468,7 +1468,7 @@ static void set_domain_attribute(struct sched_domain *sd,
+ 	} else
+ 		request = attr->relax_domain_level;
+ 
+-	if (sd->level > request) {
++	if (sd->level >= request) {
+ 		/* Turn off idle balance on this domain: */
+ 		sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
+ 	}
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index da1710499698b..1e12df9bb531b 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -1595,12 +1595,15 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
+ unsigned long ftrace_location_range(unsigned long start, unsigned long end)
+ {
+ 	struct dyn_ftrace *rec;
++	unsigned long ip = 0;
+ 
++	rcu_read_lock();
+ 	rec = lookup_rec(start, end);
+ 	if (rec)
+-		return rec->ip;
++		ip = rec->ip;
++	rcu_read_unlock();
+ 
+-	return 0;
++	return ip;
+ }
+ 
+ /**
+@@ -1614,25 +1617,22 @@ unsigned long ftrace_location_range(unsigned long start, unsigned long end)
+  */
+ unsigned long ftrace_location(unsigned long ip)
+ {
+-	struct dyn_ftrace *rec;
++	unsigned long loc;
+ 	unsigned long offset;
+ 	unsigned long size;
+ 
+-	rec = lookup_rec(ip, ip);
+-	if (!rec) {
++	loc = ftrace_location_range(ip, ip);
++	if (!loc) {
+ 		if (!kallsyms_lookup_size_offset(ip, &size, &offset))
+ 			goto out;
+ 
+ 		/* map sym+0 to __fentry__ */
+ 		if (!offset)
+-			rec = lookup_rec(ip, ip + size - 1);
++			loc = ftrace_location_range(ip, ip + size - 1);
+ 	}
+ 
+-	if (rec)
+-		return rec->ip;
+-
+ out:
+-	return 0;
++	return loc;
+ }
+ 
+ /**
+@@ -6596,6 +6596,8 @@ static int ftrace_process_locs(struct module *mod,
+ 	/* We should have used all pages unless we skipped some */
+ 	if (pg_unuse) {
+ 		WARN_ON(!skipped);
++		/* Need to synchronize with ftrace_location_range() */
++		synchronize_rcu();
+ 		ftrace_free_pages(pg_unuse);
+ 	}
+ 	return ret;
+@@ -6809,6 +6811,9 @@ void ftrace_release_mod(struct module *mod)
+  out_unlock:
+ 	mutex_unlock(&ftrace_lock);
+ 
++	/* Need to synchronize with ftrace_location_range() */
++	if (tmp_page)
++		synchronize_rcu();
+ 	for (pg = tmp_page; pg; pg = tmp_page) {
+ 
+ 		/* Needs to be called outside of ftrace_lock */
+@@ -7142,6 +7147,7 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
+ 	unsigned long start = (unsigned long)(start_ptr);
+ 	unsigned long end = (unsigned long)(end_ptr);
+ 	struct ftrace_page **last_pg = &ftrace_pages_start;
++	struct ftrace_page *tmp_page = NULL;
+ 	struct ftrace_page *pg;
+ 	struct dyn_ftrace *rec;
+ 	struct dyn_ftrace key;
+@@ -7183,12 +7189,8 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
+ 		ftrace_update_tot_cnt--;
+ 		if (!pg->index) {
+ 			*last_pg = pg->next;
+-			if (pg->records) {
+-				free_pages((unsigned long)pg->records, pg->order);
+-				ftrace_number_of_pages -= 1 << pg->order;
+-			}
+-			ftrace_number_of_groups--;
+-			kfree(pg);
++			pg->next = tmp_page;
++			tmp_page = pg;
+ 			pg = container_of(last_pg, struct ftrace_page, next);
+ 			if (!(*last_pg))
+ 				ftrace_pages = pg;
+@@ -7205,6 +7207,11 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
+ 		clear_func_from_hashes(func);
+ 		kfree(func);
+ 	}
++	/* Need to synchronize with ftrace_location_range() */
++	if (tmp_page) {
++		synchronize_rcu();
++		ftrace_free_pages(tmp_page);
++	}
+ }
+ 
+ void __init ftrace_free_init_mem(void)
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 6511dc3a00da8..54887f4c35b2e 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1449,6 +1449,11 @@ static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
+  *
+  * As a safety measure we check to make sure the data pages have not
+  * been corrupted.
++ *
++ * Callers of this function need to guarantee that the list of pages doesn't get
++ * modified during the check. In particular, if it's possible that the function
++ * is invoked with concurrent readers which can swap in a new reader page then
++ * the caller should take cpu_buffer->reader_lock.
+  */
+ static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
+ {
+@@ -2200,8 +2205,12 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
+ 		 */
+ 		synchronize_rcu();
+ 		for_each_buffer_cpu(buffer, cpu) {
++			unsigned long flags;
++
+ 			cpu_buffer = buffer->buffers[cpu];
++			raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ 			rb_check_pages(cpu_buffer);
++			raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ 		}
+ 		atomic_dec(&buffer->record_disabled);
+ 	}
+diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c
+index 70d428c394b68..82b191f33a28f 100644
+--- a/kernel/trace/trace_events_user.c
++++ b/kernel/trace/trace_events_user.c
+@@ -1989,6 +1989,80 @@ static int user_event_set_tp_name(struct user_event *user)
+ 	return 0;
+ }
+ 
++/*
++ * Counts how many ';' without a trailing space are in the args.
++ */
++static int count_semis_no_space(char *args)
++{
++	int count = 0;
++
++	while ((args = strchr(args, ';'))) {
++		args++;
++
++		if (!isspace(*args))
++			count++;
++	}
++
++	return count;
++}
++
++/*
++ * Copies the arguments while ensuring all ';' have a trailing space.
++ */
++static char *insert_space_after_semis(char *args, int count)
++{
++	char *fixed, *pos;
++	int len;
++
++	len = strlen(args) + count;
++	fixed = kmalloc(len + 1, GFP_KERNEL);
++
++	if (!fixed)
++		return NULL;
++
++	pos = fixed;
++
++	/* Insert a space after ';' if there is no trailing space. */
++	while (*args) {
++		*pos = *args++;
++
++		if (*pos++ == ';' && !isspace(*args))
++			*pos++ = ' ';
++	}
++
++	*pos = '\0';
++
++	return fixed;
++}
++
++static char **user_event_argv_split(char *args, int *argc)
++{
++	char **split;
++	char *fixed;
++	int count;
++
++	/* Count how many ';' without a trailing space */
++	count = count_semis_no_space(args);
++
++	/* No fixup is required */
++	if (!count)
++		return argv_split(GFP_KERNEL, args, argc);
++
++	/* We must fixup 'field;field' to 'field; field' */
++	fixed = insert_space_after_semis(args, count);
++
++	if (!fixed)
++		return NULL;
++
++	/* We do a normal split afterwards */
++	split = argv_split(GFP_KERNEL, fixed, argc);
++
++	/* We can free since argv_split makes a copy */
++	kfree(fixed);
++
++	return split;
++}
++
+ /*
+  * Parses the event name, arguments and flags then registers if successful.
+  * The name buffer lifetime is owned by this method for success cases only.
+@@ -2012,7 +2086,7 @@ static int user_event_parse(struct user_event_group *group, char *name,
+ 		return -EPERM;
+ 
+ 	if (args) {
+-		argv = argv_split(GFP_KERNEL, args, &argc);
++		argv = user_event_argv_split(args, &argc);
+ 
+ 		if (!argv)
+ 			return -ENOMEM;
+diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c
+index 493ec02dd5b32..fdba0eaf19a59 100644
+--- a/lib/fortify_kunit.c
++++ b/lib/fortify_kunit.c
+@@ -267,28 +267,28 @@ DEFINE_ALLOC_SIZE_TEST_PAIR(vmalloc)
+ 									\
+ 	checker((expected_pages) * PAGE_SIZE,				\
+ 		kvmalloc((alloc_pages) * PAGE_SIZE, gfp),		\
+-		vfree(p));						\
++		kvfree(p));						\
+ 	checker((expected_pages) * PAGE_SIZE,				\
+ 		kvmalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
+-		vfree(p));						\
++		kvfree(p));						\
+ 	checker((expected_pages) * PAGE_SIZE,				\
+ 		kvzalloc((alloc_pages) * PAGE_SIZE, gfp),		\
+-		vfree(p));						\
++		kvfree(p));						\
+ 	checker((expected_pages) * PAGE_SIZE,				\
+ 		kvzalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
+-		vfree(p));						\
++		kvfree(p));						\
+ 	checker((expected_pages) * PAGE_SIZE,				\
+ 		kvcalloc(1, (alloc_pages) * PAGE_SIZE, gfp),		\
+-		vfree(p));						\
++		kvfree(p));						\
+ 	checker((expected_pages) * PAGE_SIZE,				\
+ 		kvcalloc((alloc_pages) * PAGE_SIZE, 1, gfp),		\
+-		vfree(p));						\
++		kvfree(p));						\
+ 	checker((expected_pages) * PAGE_SIZE,				\
+ 		kvmalloc_array(1, (alloc_pages) * PAGE_SIZE, gfp),	\
+-		vfree(p));						\
++		kvfree(p));						\
+ 	checker((expected_pages) * PAGE_SIZE,				\
+ 		kvmalloc_array((alloc_pages) * PAGE_SIZE, 1, gfp),	\
+-		vfree(p));						\
++		kvfree(p));						\
+ 									\
+ 	prev_size = (expected_pages) * PAGE_SIZE;			\
+ 	orig = kvmalloc(prev_size, gfp);				\
+@@ -917,19 +917,19 @@ static void kmemdup_test(struct kunit *test)
+ 
+ 	/* Out of bounds by 1 byte. */
+ 	copy = kmemdup(src, len + 1, GFP_KERNEL);
+-	KUNIT_EXPECT_NULL(test, copy);
++	KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
+ 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+ 	kfree(copy);
+ 
+ 	/* Way out of bounds. */
+ 	copy = kmemdup(src, len * 2, GFP_KERNEL);
+-	KUNIT_EXPECT_NULL(test, copy);
++	KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
+ 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+ 	kfree(copy);
+ 
+ 	/* Starting offset causing out of bounds. */
+ 	copy = kmemdup(src + 1, len, GFP_KERNEL);
+-	KUNIT_EXPECT_NULL(test, copy);
++	KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
+ 	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3);
+ 	kfree(copy);
+ }
+diff --git a/lib/kunit/device.c b/lib/kunit/device.c
+index abc603730b8ea..25c81ed465fb7 100644
+--- a/lib/kunit/device.c
++++ b/lib/kunit/device.c
+@@ -51,7 +51,7 @@ int kunit_bus_init(void)
+ 
+ 	error = bus_register(&kunit_bus_type);
+ 	if (error)
+-		bus_unregister(&kunit_bus_type);
++		root_device_unregister(kunit_bus_device);
+ 	return error;
+ }
+ 
+diff --git a/lib/kunit/test.c b/lib/kunit/test.c
+index 1d1475578515c..b8514dbb337c0 100644
+--- a/lib/kunit/test.c
++++ b/lib/kunit/test.c
+@@ -712,6 +712,9 @@ int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_
+ {
+ 	unsigned int i;
+ 
++	if (num_suites == 0)
++		return 0;
++
+ 	if (!kunit_enabled() && num_suites > 0) {
+ 		pr_info("kunit: disabled\n");
+ 		return 0;
+diff --git a/lib/kunit/try-catch.c b/lib/kunit/try-catch.c
+index f7825991d576a..d9d1df28cc52e 100644
+--- a/lib/kunit/try-catch.c
++++ b/lib/kunit/try-catch.c
+@@ -11,6 +11,7 @@
+ #include <linux/completion.h>
+ #include <linux/kernel.h>
+ #include <linux/kthread.h>
++#include <linux/sched/task.h>
+ 
+ #include "try-catch-impl.h"
+ 
+@@ -65,13 +66,14 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
+ 	try_catch->context = context;
+ 	try_catch->try_completion = &try_completion;
+ 	try_catch->try_result = 0;
+-	task_struct = kthread_run(kunit_generic_run_threadfn_adapter,
+-				  try_catch,
+-				  "kunit_try_catch_thread");
++	task_struct = kthread_create(kunit_generic_run_threadfn_adapter,
++				     try_catch, "kunit_try_catch_thread");
+ 	if (IS_ERR(task_struct)) {
+ 		try_catch->catch(try_catch->context);
+ 		return;
+ 	}
++	get_task_struct(task_struct);
++	wake_up_process(task_struct);
+ 
+ 	time_remaining = wait_for_completion_timeout(&try_completion,
+ 						     kunit_test_timeout());
+@@ -81,6 +83,7 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
+ 		kthread_stop(task_struct);
+ 	}
+ 
++	put_task_struct(task_struct);
+ 	exit_code = try_catch->try_result;
+ 
+ 	if (!exit_code)
+diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
+index d4a3730b08fa7..4ce9604388069 100644
+--- a/lib/slub_kunit.c
++++ b/lib/slub_kunit.c
+@@ -55,7 +55,7 @@ static void test_next_pointer(struct kunit *test)
+ 
+ 	ptr_addr = (unsigned long *)(p + s->offset);
+ 	tmp = *ptr_addr;
+-	p[s->offset] = 0x12;
++	p[s->offset] = ~p[s->offset];
+ 
+ 	/*
+ 	 * Expecting three errors.
+diff --git a/lib/test_hmm.c b/lib/test_hmm.c
+index 717dcb8301273..b823ba7cb6a15 100644
+--- a/lib/test_hmm.c
++++ b/lib/test_hmm.c
+@@ -1226,8 +1226,8 @@ static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk)
+ 	unsigned long *src_pfns;
+ 	unsigned long *dst_pfns;
+ 
+-	src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
+-	dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
++	src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
++	dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
+ 
+ 	migrate_device_range(src_pfns, start_pfn, npages);
+ 	for (i = 0; i < npages; i++) {
+@@ -1250,8 +1250,8 @@ static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk)
+ 	}
+ 	migrate_device_pages(src_pfns, dst_pfns, npages);
+ 	migrate_device_finalize(src_pfns, dst_pfns, npages);
+-	kfree(src_pfns);
+-	kfree(dst_pfns);
++	kvfree(src_pfns);
++	kvfree(dst_pfns);
+ }
+ 
+ /* Removes free pages from the free list so they can't be re-allocated */
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 94ab99b6b574a..1f84a41aeb850 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -3467,8 +3467,7 @@ static int shmem_rename2(struct mnt_idmap *idmap,
+ 			return error;
+ 	}
+ 
+-	simple_offset_remove(shmem_get_offset_ctx(old_dir), old_dentry);
+-	error = simple_offset_add(shmem_get_offset_ctx(new_dir), old_dentry);
++	error = simple_offset_rename(old_dir, old_dentry, new_dir, new_dentry);
+ 	if (error)
+ 		return error;
+ 
+diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
+index 3c3539c573e7f..829f7b1089fc6 100644
+--- a/mm/userfaultfd.c
++++ b/mm/userfaultfd.c
+@@ -316,6 +316,38 @@ static int mfill_atomic_pte_copy(pmd_t *dst_pmd,
+ 	goto out;
+ }
+ 
++static int mfill_atomic_pte_zeroed_folio(pmd_t *dst_pmd,
++					 struct vm_area_struct *dst_vma,
++					 unsigned long dst_addr)
++{
++	struct folio *folio;
++	int ret = -ENOMEM;
++
++	folio = vma_alloc_zeroed_movable_folio(dst_vma, dst_addr);
++	if (!folio)
++		return ret;
++
++	if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL))
++		goto out_put;
++
++	/*
++	 * The memory barrier inside __folio_mark_uptodate makes sure that
++	 * zeroing out the folio become visible before mapping the page
++	 * using set_pte_at(). See do_anonymous_page().
++	 */
++	__folio_mark_uptodate(folio);
++
++	ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
++				       &folio->page, true, 0);
++	if (ret)
++		goto out_put;
++
++	return 0;
++out_put:
++	folio_put(folio);
++	return ret;
++}
++
+ static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd,
+ 				     struct vm_area_struct *dst_vma,
+ 				     unsigned long dst_addr)
+@@ -324,6 +356,9 @@ static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd,
+ 	spinlock_t *ptl;
+ 	int ret;
+ 
++	if (mm_forbids_zeropage(dst_vma->vm_mm))
++		return mfill_atomic_pte_zeroed_folio(dst_pmd, dst_vma, dst_addr);
++
+ 	_dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
+ 					 dst_vma->vm_page_prot));
+ 	ret = -EAGAIN;
+diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
+index 282ec581c0720..c9d55b99a7a57 100644
+--- a/net/ax25/ax25_dev.c
++++ b/net/ax25/ax25_dev.c
+@@ -22,11 +22,12 @@
+ #include <net/sock.h>
+ #include <linux/uaccess.h>
+ #include <linux/fcntl.h>
++#include <linux/list.h>
+ #include <linux/mm.h>
+ #include <linux/interrupt.h>
+ #include <linux/init.h>
+ 
+-ax25_dev *ax25_dev_list;
++static LIST_HEAD(ax25_dev_list);
+ DEFINE_SPINLOCK(ax25_dev_lock);
+ 
+ ax25_dev *ax25_addr_ax25dev(ax25_address *addr)
+@@ -34,10 +35,11 @@ ax25_dev *ax25_addr_ax25dev(ax25_address *addr)
+ 	ax25_dev *ax25_dev, *res = NULL;
+ 
+ 	spin_lock_bh(&ax25_dev_lock);
+-	for (ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next)
++	list_for_each_entry(ax25_dev, &ax25_dev_list, list)
+ 		if (ax25cmp(addr, (const ax25_address *)ax25_dev->dev->dev_addr) == 0) {
+ 			res = ax25_dev;
+ 			ax25_dev_hold(ax25_dev);
++			break;
+ 		}
+ 	spin_unlock_bh(&ax25_dev_lock);
+ 
+@@ -59,7 +61,6 @@ void ax25_dev_device_up(struct net_device *dev)
+ 	}
+ 
+ 	refcount_set(&ax25_dev->refcount, 1);
+-	dev->ax25_ptr     = ax25_dev;
+ 	ax25_dev->dev     = dev;
+ 	netdev_hold(dev, &ax25_dev->dev_tracker, GFP_KERNEL);
+ 	ax25_dev->forward = NULL;
+@@ -85,10 +86,9 @@ void ax25_dev_device_up(struct net_device *dev)
+ #endif
+ 
+ 	spin_lock_bh(&ax25_dev_lock);
+-	ax25_dev->next = ax25_dev_list;
+-	ax25_dev_list  = ax25_dev;
++	list_add(&ax25_dev->list, &ax25_dev_list);
++	dev->ax25_ptr     = ax25_dev;
+ 	spin_unlock_bh(&ax25_dev_lock);
+-	ax25_dev_hold(ax25_dev);
+ 
+ 	ax25_register_dev_sysctl(ax25_dev);
+ }
+@@ -111,32 +111,19 @@ void ax25_dev_device_down(struct net_device *dev)
+ 	/*
+ 	 *	Remove any packet forwarding that points to this device.
+ 	 */
+-	for (s = ax25_dev_list; s != NULL; s = s->next)
++	list_for_each_entry(s, &ax25_dev_list, list)
+ 		if (s->forward == dev)
+ 			s->forward = NULL;
+ 
+-	if ((s = ax25_dev_list) == ax25_dev) {
+-		ax25_dev_list = s->next;
+-		goto unlock_put;
+-	}
+-
+-	while (s != NULL && s->next != NULL) {
+-		if (s->next == ax25_dev) {
+-			s->next = ax25_dev->next;
+-			goto unlock_put;
++	list_for_each_entry(s, &ax25_dev_list, list) {
++		if (s == ax25_dev) {
++			list_del(&s->list);
++			break;
+ 		}
+-
+-		s = s->next;
+ 	}
+-	spin_unlock_bh(&ax25_dev_lock);
+-	dev->ax25_ptr = NULL;
+-	ax25_dev_put(ax25_dev);
+-	return;
+ 
+-unlock_put:
+-	spin_unlock_bh(&ax25_dev_lock);
+-	ax25_dev_put(ax25_dev);
+ 	dev->ax25_ptr = NULL;
++	spin_unlock_bh(&ax25_dev_lock);
+ 	netdev_put(dev, &ax25_dev->dev_tracker);
+ 	ax25_dev_put(ax25_dev);
+ }
+@@ -200,16 +187,13 @@ struct net_device *ax25_fwd_dev(struct net_device *dev)
+  */
+ void __exit ax25_dev_free(void)
+ {
+-	ax25_dev *s, *ax25_dev;
++	ax25_dev *s, *n;
+ 
+ 	spin_lock_bh(&ax25_dev_lock);
+-	ax25_dev = ax25_dev_list;
+-	while (ax25_dev != NULL) {
+-		s        = ax25_dev;
+-		netdev_put(ax25_dev->dev, &ax25_dev->dev_tracker);
+-		ax25_dev = ax25_dev->next;
++	list_for_each_entry_safe(s, n, &ax25_dev_list, list) {
++		netdev_put(s->dev, &s->dev_tracker);
++		list_del(&s->list);
+ 		kfree(s);
+ 	}
+-	ax25_dev_list = NULL;
+ 	spin_unlock_bh(&ax25_dev_lock);
+ }
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 6ab404dda7949..08ae30fd31551 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -1173,8 +1173,7 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
+ 
+ 	list_for_each_entry(d, &hci_dev_list, list) {
+ 		if (!test_bit(HCI_UP, &d->flags) ||
+-		    hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
+-		    d->dev_type != HCI_PRIMARY)
++		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
+ 			continue;
+ 
+ 		/* Simple routing:
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index bc5086423ab83..24f6b6a5c7721 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -395,11 +395,6 @@ int hci_inquiry(void __user *arg)
+ 		goto done;
+ 	}
+ 
+-	if (hdev->dev_type != HCI_PRIMARY) {
+-		err = -EOPNOTSUPP;
+-		goto done;
+-	}
+-
+ 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
+ 		err = -EOPNOTSUPP;
+ 		goto done;
+@@ -752,11 +747,6 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
+ 		goto done;
+ 	}
+ 
+-	if (hdev->dev_type != HCI_PRIMARY) {
+-		err = -EOPNOTSUPP;
+-		goto done;
+-	}
+-
+ 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
+ 		err = -EOPNOTSUPP;
+ 		goto done;
+@@ -910,7 +900,7 @@ int hci_get_dev_info(void __user *arg)
+ 
+ 	strscpy(di.name, hdev->name, sizeof(di.name));
+ 	di.bdaddr   = hdev->bdaddr;
+-	di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
++	di.type     = (hdev->bus & 0x0f);
+ 	di.flags    = flags;
+ 	di.pkt_type = hdev->pkt_type;
+ 	if (lmp_bredr_capable(hdev)) {
+@@ -1026,8 +1016,7 @@ static void hci_power_on(struct work_struct *work)
+ 	 */
+ 	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
+ 	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
+-	    (hdev->dev_type == HCI_PRIMARY &&
+-	     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
++	    (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
+ 	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
+ 		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
+ 		hci_dev_do_close(hdev);
+@@ -1769,6 +1758,15 @@ struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
+ 
+ 		adv->pending = true;
+ 		adv->instance = instance;
++
++		/* If controller support only one set and the instance is set to
++		 * 1 then there is no option other than using handle 0x00.
++		 */
++		if (hdev->le_num_of_adv_sets == 1 && instance == 1)
++			adv->handle = 0x00;
++		else
++			adv->handle = instance;
++
+ 		list_add(&adv->list, &hdev->adv_instances);
+ 		hdev->adv_instance_cnt++;
+ 	}
+@@ -2635,21 +2633,7 @@ int hci_register_dev(struct hci_dev *hdev)
+ 	if (!hdev->open || !hdev->close || !hdev->send)
+ 		return -EINVAL;
+ 
+-	/* Do not allow HCI_AMP devices to register at index 0,
+-	 * so the index can be used as the AMP controller ID.
+-	 */
+-	switch (hdev->dev_type) {
+-	case HCI_PRIMARY:
+-		id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
+-		break;
+-	case HCI_AMP:
+-		id = ida_alloc_range(&hci_index_ida, 1, HCI_MAX_ID - 1,
+-				     GFP_KERNEL);
+-		break;
+-	default:
+-		return -EINVAL;
+-	}
+-
++	id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
+ 	if (id < 0)
+ 		return id;
+ 
+@@ -2701,12 +2685,10 @@ int hci_register_dev(struct hci_dev *hdev)
+ 	hci_dev_set_flag(hdev, HCI_SETUP);
+ 	hci_dev_set_flag(hdev, HCI_AUTO_OFF);
+ 
+-	if (hdev->dev_type == HCI_PRIMARY) {
+-		/* Assume BR/EDR support until proven otherwise (such as
+-		 * through reading supported features during init.
+-		 */
+-		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
+-	}
++	/* Assume BR/EDR support until proven otherwise (such as
++	 * through reading supported features during init.
++	 */
++	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
+ 
+ 	write_lock(&hci_dev_list_lock);
+ 	list_add(&hdev->list, &hci_dev_list);
+@@ -3242,17 +3224,7 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
+ 
+ 	hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
+ 
+-	switch (hdev->dev_type) {
+-	case HCI_PRIMARY:
+-		hci_add_acl_hdr(skb, conn->handle, flags);
+-		break;
+-	case HCI_AMP:
+-		hci_add_acl_hdr(skb, chan->handle, flags);
+-		break;
+-	default:
+-		bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
+-		return;
+-	}
++	hci_add_acl_hdr(skb, conn->handle, flags);
+ 
+ 	list = skb_shinfo(skb)->frag_list;
+ 	if (!list) {
+@@ -3412,9 +3384,6 @@ static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
+ 	case ACL_LINK:
+ 		cnt = hdev->acl_cnt;
+ 		break;
+-	case AMP_LINK:
+-		cnt = hdev->block_cnt;
+-		break;
+ 	case SCO_LINK:
+ 	case ESCO_LINK:
+ 		cnt = hdev->sco_cnt;
+@@ -3612,12 +3581,6 @@ static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
+ 
+ }
+ 
+-static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
+-{
+-	/* Calculate count of blocks used by this packet */
+-	return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
+-}
+-
+ static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
+ {
+ 	unsigned long last_tx;
+@@ -3731,81 +3694,15 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev)
+ 		hci_prio_recalculate(hdev, ACL_LINK);
+ }
+ 
+-static void hci_sched_acl_blk(struct hci_dev *hdev)
+-{
+-	unsigned int cnt = hdev->block_cnt;
+-	struct hci_chan *chan;
+-	struct sk_buff *skb;
+-	int quote;
+-	u8 type;
+-
+-	BT_DBG("%s", hdev->name);
+-
+-	if (hdev->dev_type == HCI_AMP)
+-		type = AMP_LINK;
+-	else
+-		type = ACL_LINK;
+-
+-	__check_timeout(hdev, cnt, type);
+-
+-	while (hdev->block_cnt > 0 &&
+-	       (chan = hci_chan_sent(hdev, type, &quote))) {
+-		u32 priority = (skb_peek(&chan->data_q))->priority;
+-		while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
+-			int blocks;
+-
+-			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+-			       skb->len, skb->priority);
+-
+-			/* Stop if priority has changed */
+-			if (skb->priority < priority)
+-				break;
+-
+-			skb = skb_dequeue(&chan->data_q);
+-
+-			blocks = __get_blocks(hdev, skb);
+-			if (blocks > hdev->block_cnt)
+-				return;
+-
+-			hci_conn_enter_active_mode(chan->conn,
+-						   bt_cb(skb)->force_active);
+-
+-			hci_send_frame(hdev, skb);
+-			hdev->acl_last_tx = jiffies;
+-
+-			hdev->block_cnt -= blocks;
+-			quote -= blocks;
+-
+-			chan->sent += blocks;
+-			chan->conn->sent += blocks;
+-		}
+-	}
+-
+-	if (cnt != hdev->block_cnt)
+-		hci_prio_recalculate(hdev, type);
+-}
+-
+ static void hci_sched_acl(struct hci_dev *hdev)
+ {
+ 	BT_DBG("%s", hdev->name);
+ 
+ 	/* No ACL link over BR/EDR controller */
+-	if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
+-		return;
+-
+-	/* No AMP link over AMP controller */
+-	if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
++	if (!hci_conn_num(hdev, ACL_LINK))
+ 		return;
+ 
+-	switch (hdev->flow_ctl_mode) {
+-	case HCI_FLOW_CTL_MODE_PACKET_BASED:
+-		hci_sched_acl_pkt(hdev);
+-		break;
+-
+-	case HCI_FLOW_CTL_MODE_BLOCK_BASED:
+-		hci_sched_acl_blk(hdev);
+-		break;
+-	}
++	hci_sched_acl_pkt(hdev);
+ }
+ 
+ static void hci_sched_le(struct hci_dev *hdev)
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 4de8f0dc1a523..cce73749f2dce 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -913,21 +913,6 @@ static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
+ 	return rp->status;
+ }
+ 
+-static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
+-					struct sk_buff *skb)
+-{
+-	struct hci_rp_read_flow_control_mode *rp = data;
+-
+-	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
+-
+-	if (rp->status)
+-		return rp->status;
+-
+-	hdev->flow_ctl_mode = rp->mode;
+-
+-	return rp->status;
+-}
+-
+ static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
+ 				  struct sk_buff *skb)
+ {
+@@ -1071,28 +1056,6 @@ static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
+ 	return rp->status;
+ }
+ 
+-static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
+-				      struct sk_buff *skb)
+-{
+-	struct hci_rp_read_data_block_size *rp = data;
+-
+-	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
+-
+-	if (rp->status)
+-		return rp->status;
+-
+-	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
+-	hdev->block_len = __le16_to_cpu(rp->block_len);
+-	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
+-
+-	hdev->block_cnt = hdev->num_blocks;
+-
+-	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
+-	       hdev->block_cnt, hdev->block_len);
+-
+-	return rp->status;
+-}
+-
+ static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
+ 			    struct sk_buff *skb)
+ {
+@@ -1127,30 +1090,6 @@ static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
+ 	return rp->status;
+ }
+ 
+-static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
+-				     struct sk_buff *skb)
+-{
+-	struct hci_rp_read_local_amp_info *rp = data;
+-
+-	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
+-
+-	if (rp->status)
+-		return rp->status;
+-
+-	hdev->amp_status = rp->amp_status;
+-	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
+-	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
+-	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
+-	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
+-	hdev->amp_type = rp->amp_type;
+-	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
+-	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
+-	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
+-	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
+-
+-	return rp->status;
+-}
+-
+ static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
+ 				       struct sk_buff *skb)
+ {
+@@ -4121,12 +4060,6 @@ static const struct hci_cc {
+ 	HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
+ 	       sizeof(struct hci_rp_read_page_scan_type)),
+ 	HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
+-	HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
+-	       sizeof(struct hci_rp_read_data_block_size)),
+-	HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
+-	       sizeof(struct hci_rp_read_flow_control_mode)),
+-	HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
+-	       sizeof(struct hci_rp_read_local_amp_info)),
+ 	HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
+ 	       sizeof(struct hci_rp_read_clock)),
+ 	HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
+@@ -4317,7 +4250,7 @@ static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
+ 	hci_dev_lock(hdev);
+ 
+ 	/* Remove connection if command failed */
+-	for (i = 0; cp->num_cis; cp->num_cis--, i++) {
++	for (i = 0; i < cp->num_cis; i++) {
+ 		struct hci_conn *conn;
+ 		u16 handle;
+ 
+@@ -4333,6 +4266,7 @@ static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
+ 			hci_conn_del(conn);
+ 		}
+ 	}
++	cp->num_cis = 0;
+ 
+ 	if (pending)
+ 		hci_le_create_cis_pending(hdev);
+@@ -4461,11 +4395,6 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
+ 			     flex_array_size(ev, handles, ev->num)))
+ 		return;
+ 
+-	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
+-		bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
+-		return;
+-	}
+-
+ 	bt_dev_dbg(hdev, "num %d", ev->num);
+ 
+ 	for (i = 0; i < ev->num; i++) {
+@@ -4533,78 +4462,6 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
+ 	queue_work(hdev->workqueue, &hdev->tx_work);
+ }
+ 
+-static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
+-						 __u16 handle)
+-{
+-	struct hci_chan *chan;
+-
+-	switch (hdev->dev_type) {
+-	case HCI_PRIMARY:
+-		return hci_conn_hash_lookup_handle(hdev, handle);
+-	case HCI_AMP:
+-		chan = hci_chan_lookup_handle(hdev, handle);
+-		if (chan)
+-			return chan->conn;
+-		break;
+-	default:
+-		bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
+-		break;
+-	}
+-
+-	return NULL;
+-}
+-
+-static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
+-				    struct sk_buff *skb)
+-{
+-	struct hci_ev_num_comp_blocks *ev = data;
+-	int i;
+-
+-	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
+-			     flex_array_size(ev, handles, ev->num_hndl)))
+-		return;
+-
+-	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
+-		bt_dev_err(hdev, "wrong event for mode %d",
+-			   hdev->flow_ctl_mode);
+-		return;
+-	}
+-
+-	bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
+-		   ev->num_hndl);
+-
+-	for (i = 0; i < ev->num_hndl; i++) {
+-		struct hci_comp_blocks_info *info = &ev->handles[i];
+-		struct hci_conn *conn = NULL;
+-		__u16  handle, block_count;
+-
+-		handle = __le16_to_cpu(info->handle);
+-		block_count = __le16_to_cpu(info->blocks);
+-
+-		conn = __hci_conn_lookup_handle(hdev, handle);
+-		if (!conn)
+-			continue;
+-
+-		conn->sent -= block_count;
+-
+-		switch (conn->type) {
+-		case ACL_LINK:
+-		case AMP_LINK:
+-			hdev->block_cnt += block_count;
+-			if (hdev->block_cnt > hdev->num_blocks)
+-				hdev->block_cnt = hdev->num_blocks;
+-			break;
+-
+-		default:
+-			bt_dev_err(hdev, "unknown type %d conn %p",
+-				   conn->type, conn);
+-			break;
+-		}
+-	}
+-
+-	queue_work(hdev->workqueue, &hdev->tx_work);
+-}
+-
+ static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
+ 				struct sk_buff *skb)
+ {
+@@ -7512,9 +7369,6 @@ static const struct hci_ev {
+ 	/* [0x3e = HCI_EV_LE_META] */
+ 	HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
+ 		      sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
+-	/* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
+-	HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
+-	       sizeof(struct hci_ev_num_comp_blocks)),
+ 	/* [0xff = HCI_EV_VENDOR] */
+ 	HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
+ };
+diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
+index 703b84bd48d5b..69c2ba1e843eb 100644
+--- a/net/bluetooth/hci_sock.c
++++ b/net/bluetooth/hci_sock.c
+@@ -485,7 +485,7 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
+ 			return NULL;
+ 
+ 		ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
+-		ni->type = hdev->dev_type;
++		ni->type = 0x00; /* Old hdev->dev_type */
+ 		ni->bus = hdev->bus;
+ 		bacpy(&ni->bdaddr, &hdev->bdaddr);
+ 		memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name,
+@@ -1007,9 +1007,6 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
+ 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
+ 		return -EOPNOTSUPP;
+ 
+-	if (hdev->dev_type != HCI_PRIMARY)
+-		return -EOPNOTSUPP;
+-
+ 	switch (cmd) {
+ 	case HCISETRAW:
+ 		if (!capable(CAP_NET_ADMIN))
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 4c707eb64e6f6..64f794d198cdc 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -1043,11 +1043,10 @@ static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
+ 	struct hci_cp_ext_adv_set *set;
+ 	u8 data[sizeof(*cp) + sizeof(*set) * 1];
+ 	u8 size;
++	struct adv_info *adv = NULL;
+ 
+ 	/* If request specifies an instance that doesn't exist, fail */
+ 	if (instance > 0) {
+-		struct adv_info *adv;
+-
+ 		adv = hci_find_adv_instance(hdev, instance);
+ 		if (!adv)
+ 			return -EINVAL;
+@@ -1066,7 +1065,7 @@ static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
+ 	cp->num_of_sets = !!instance;
+ 	cp->enable = 0x00;
+ 
+-	set->handle = instance;
++	set->handle = adv ? adv->handle : instance;
+ 
+ 	size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets;
+ 
+@@ -1235,31 +1234,27 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
+ 
+ static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
+ {
+-	struct {
+-		struct hci_cp_le_set_ext_scan_rsp_data cp;
+-		u8 data[HCI_MAX_EXT_AD_LENGTH];
+-	} pdu;
++	DEFINE_FLEX(struct hci_cp_le_set_ext_scan_rsp_data, pdu, data, length,
++		    HCI_MAX_EXT_AD_LENGTH);
+ 	u8 len;
+ 	struct adv_info *adv = NULL;
+ 	int err;
+ 
+-	memset(&pdu, 0, sizeof(pdu));
+-
+ 	if (instance) {
+ 		adv = hci_find_adv_instance(hdev, instance);
+ 		if (!adv || !adv->scan_rsp_changed)
+ 			return 0;
+ 	}
+ 
+-	len = eir_create_scan_rsp(hdev, instance, pdu.data);
++	len = eir_create_scan_rsp(hdev, instance, pdu->data);
+ 
+-	pdu.cp.handle = instance;
+-	pdu.cp.length = len;
+-	pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
+-	pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
++	pdu->handle = adv ? adv->handle : instance;
++	pdu->length = len;
++	pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
++	pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG;
+ 
+ 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
+-				    sizeof(pdu.cp) + len, &pdu.cp,
++				    struct_size(pdu, data, len), pdu,
+ 				    HCI_CMD_TIMEOUT);
+ 	if (err)
+ 		return err;
+@@ -1267,7 +1262,7 @@ static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
+ 	if (adv) {
+ 		adv->scan_rsp_changed = false;
+ 	} else {
+-		memcpy(hdev->scan_rsp_data, pdu.data, len);
++		memcpy(hdev->scan_rsp_data, pdu->data, len);
+ 		hdev->scan_rsp_data_len = len;
+ 	}
+ 
+@@ -1335,7 +1330,7 @@ int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance)
+ 
+ 	memset(set, 0, sizeof(*set));
+ 
+-	set->handle = instance;
++	set->handle = adv ? adv->handle : instance;
+ 
+ 	/* Set duration per instance since controller is responsible for
+ 	 * scheduling it.
+@@ -1411,29 +1406,25 @@ static int hci_set_per_adv_params_sync(struct hci_dev *hdev, u8 instance,
+ 
+ static int hci_set_per_adv_data_sync(struct hci_dev *hdev, u8 instance)
+ {
+-	struct {
+-		struct hci_cp_le_set_per_adv_data cp;
+-		u8 data[HCI_MAX_PER_AD_LENGTH];
+-	} pdu;
++	DEFINE_FLEX(struct hci_cp_le_set_per_adv_data, pdu, data, length,
++		    HCI_MAX_PER_AD_LENGTH);
+ 	u8 len;
+-
+-	memset(&pdu, 0, sizeof(pdu));
++	struct adv_info *adv = NULL;
+ 
+ 	if (instance) {
+-		struct adv_info *adv = hci_find_adv_instance(hdev, instance);
+-
++		adv = hci_find_adv_instance(hdev, instance);
+ 		if (!adv || !adv->periodic)
+ 			return 0;
+ 	}
+ 
+-	len = eir_create_per_adv_data(hdev, instance, pdu.data);
++	len = eir_create_per_adv_data(hdev, instance, pdu->data);
+ 
+-	pdu.cp.length = len;
+-	pdu.cp.handle = instance;
+-	pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
++	pdu->length = len;
++	pdu->handle = adv ? adv->handle : instance;
++	pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
+ 
+ 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_DATA,
+-				     sizeof(pdu.cp) + len, &pdu,
++				     struct_size(pdu, data, len), pdu,
+ 				     HCI_CMD_TIMEOUT);
+ }
+ 
+@@ -1727,31 +1718,27 @@ int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason)
+ 
+ static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
+ {
+-	struct {
+-		struct hci_cp_le_set_ext_adv_data cp;
+-		u8 data[HCI_MAX_EXT_AD_LENGTH];
+-	} pdu;
++	DEFINE_FLEX(struct hci_cp_le_set_ext_adv_data, pdu, data, length,
++		    HCI_MAX_EXT_AD_LENGTH);
+ 	u8 len;
+ 	struct adv_info *adv = NULL;
+ 	int err;
+ 
+-	memset(&pdu, 0, sizeof(pdu));
+-
+ 	if (instance) {
+ 		adv = hci_find_adv_instance(hdev, instance);
+ 		if (!adv || !adv->adv_data_changed)
+ 			return 0;
+ 	}
+ 
+-	len = eir_create_adv_data(hdev, instance, pdu.data);
++	len = eir_create_adv_data(hdev, instance, pdu->data);
+ 
+-	pdu.cp.length = len;
+-	pdu.cp.handle = instance;
+-	pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
+-	pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
++	pdu->length = len;
++	pdu->handle = adv ? adv->handle : instance;
++	pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
++	pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG;
+ 
+ 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA,
+-				    sizeof(pdu.cp) + len, &pdu.cp,
++				    struct_size(pdu, data, len), pdu,
+ 				    HCI_CMD_TIMEOUT);
+ 	if (err)
+ 		return err;
+@@ -1760,7 +1747,7 @@ static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
+ 	if (adv) {
+ 		adv->adv_data_changed = false;
+ 	} else {
+-		memcpy(hdev->adv_data, pdu.data, len);
++		memcpy(hdev->adv_data, pdu->data, len);
+ 		hdev->adv_data_len = len;
+ 	}
+ 
+@@ -3523,10 +3510,6 @@ static int hci_unconf_init_sync(struct hci_dev *hdev)
+ /* Read Local Supported Features. */
+ static int hci_read_local_features_sync(struct hci_dev *hdev)
+ {
+-	 /* Not all AMP controllers support this command */
+-	if (hdev->dev_type == HCI_AMP && !(hdev->commands[14] & 0x20))
+-		return 0;
+-
+ 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES,
+ 				     0, NULL, HCI_CMD_TIMEOUT);
+ }
+@@ -3561,51 +3544,6 @@ static int hci_read_local_cmds_sync(struct hci_dev *hdev)
+ 	return 0;
+ }
+ 
+-/* Read Local AMP Info */
+-static int hci_read_local_amp_info_sync(struct hci_dev *hdev)
+-{
+-	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_AMP_INFO,
+-				     0, NULL, HCI_CMD_TIMEOUT);
+-}
+-
+-/* Read Data Blk size */
+-static int hci_read_data_block_size_sync(struct hci_dev *hdev)
+-{
+-	return __hci_cmd_sync_status(hdev, HCI_OP_READ_DATA_BLOCK_SIZE,
+-				     0, NULL, HCI_CMD_TIMEOUT);
+-}
+-
+-/* Read Flow Control Mode */
+-static int hci_read_flow_control_mode_sync(struct hci_dev *hdev)
+-{
+-	return __hci_cmd_sync_status(hdev, HCI_OP_READ_FLOW_CONTROL_MODE,
+-				     0, NULL, HCI_CMD_TIMEOUT);
+-}
+-
+-/* Read Location Data */
+-static int hci_read_location_data_sync(struct hci_dev *hdev)
+-{
+-	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCATION_DATA,
+-				     0, NULL, HCI_CMD_TIMEOUT);
+-}
+-
+-/* AMP Controller init stage 1 command sequence */
+-static const struct hci_init_stage amp_init1[] = {
+-	/* HCI_OP_READ_LOCAL_VERSION */
+-	HCI_INIT(hci_read_local_version_sync),
+-	/* HCI_OP_READ_LOCAL_COMMANDS */
+-	HCI_INIT(hci_read_local_cmds_sync),
+-	/* HCI_OP_READ_LOCAL_AMP_INFO */
+-	HCI_INIT(hci_read_local_amp_info_sync),
+-	/* HCI_OP_READ_DATA_BLOCK_SIZE */
+-	HCI_INIT(hci_read_data_block_size_sync),
+-	/* HCI_OP_READ_FLOW_CONTROL_MODE */
+-	HCI_INIT(hci_read_flow_control_mode_sync),
+-	/* HCI_OP_READ_LOCATION_DATA */
+-	HCI_INIT(hci_read_location_data_sync),
+-	{}
+-};
+-
+ static int hci_init1_sync(struct hci_dev *hdev)
+ {
+ 	int err;
+@@ -3619,28 +3557,9 @@ static int hci_init1_sync(struct hci_dev *hdev)
+ 			return err;
+ 	}
+ 
+-	switch (hdev->dev_type) {
+-	case HCI_PRIMARY:
+-		hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
+-		return hci_init_stage_sync(hdev, br_init1);
+-	case HCI_AMP:
+-		hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
+-		return hci_init_stage_sync(hdev, amp_init1);
+-	default:
+-		bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
+-		break;
+-	}
+-
+-	return 0;
++	return hci_init_stage_sync(hdev, br_init1);
+ }
+ 
+-/* AMP Controller init stage 2 command sequence */
+-static const struct hci_init_stage amp_init2[] = {
+-	/* HCI_OP_READ_LOCAL_FEATURES */
+-	HCI_INIT(hci_read_local_features_sync),
+-	{}
+-};
+-
+ /* Read Buffer Size (ACL mtu, max pkt, etc.) */
+ static int hci_read_buffer_size_sync(struct hci_dev *hdev)
+ {
+@@ -3898,9 +3817,6 @@ static int hci_init2_sync(struct hci_dev *hdev)
+ 
+ 	bt_dev_dbg(hdev, "");
+ 
+-	if (hdev->dev_type == HCI_AMP)
+-		return hci_init_stage_sync(hdev, amp_init2);
+-
+ 	err = hci_init_stage_sync(hdev, hci_init2);
+ 	if (err)
+ 		return err;
+@@ -4728,13 +4644,6 @@ static int hci_init_sync(struct hci_dev *hdev)
+ 	if (err < 0)
+ 		return err;
+ 
+-	/* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
+-	 * BR/EDR/LE type controllers. AMP controllers only need the
+-	 * first two stages of init.
+-	 */
+-	if (hdev->dev_type != HCI_PRIMARY)
+-		return 0;
+-
+ 	err = hci_init3_sync(hdev);
+ 	if (err < 0)
+ 		return err;
+@@ -4963,12 +4872,8 @@ int hci_dev_open_sync(struct hci_dev *hdev)
+ 		 * In case of user channel usage, it is not important
+ 		 * if a public address or static random address is
+ 		 * available.
+-		 *
+-		 * This check is only valid for BR/EDR controllers
+-		 * since AMP controllers do not have an address.
+ 		 */
+ 		if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+-		    hdev->dev_type == HCI_PRIMARY &&
+ 		    !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
+ 		    !bacmp(&hdev->static_addr, BDADDR_ANY)) {
+ 			ret = -EADDRNOTAVAIL;
+@@ -5003,8 +4908,7 @@ int hci_dev_open_sync(struct hci_dev *hdev)
+ 		    !hci_dev_test_flag(hdev, HCI_CONFIG) &&
+ 		    !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
+ 		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+-		    hci_dev_test_flag(hdev, HCI_MGMT) &&
+-		    hdev->dev_type == HCI_PRIMARY) {
++		    hci_dev_test_flag(hdev, HCI_MGMT)) {
+ 			ret = hci_powered_update_sync(hdev);
+ 			mgmt_power_on(hdev, ret);
+ 		}
+@@ -5149,8 +5053,7 @@ int hci_dev_close_sync(struct hci_dev *hdev)
+ 
+ 	auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
+ 
+-	if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
+-	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
++	if (!auto_off && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ 	    hci_dev_test_flag(hdev, HCI_MGMT))
+ 		__mgmt_power_off(hdev);
+ 
+@@ -5212,9 +5115,6 @@ int hci_dev_close_sync(struct hci_dev *hdev)
+ 	hdev->flags &= BIT(HCI_RAW);
+ 	hci_dev_clear_volatile_flags(hdev);
+ 
+-	/* Controller radio is available but is currently powered down */
+-	hdev->amp_status = AMP_STATUS_POWERED_DOWN;
+-
+ 	memset(hdev->eir, 0, sizeof(hdev->eir));
+ 	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
+ 	bacpy(&hdev->random_addr, BDADDR_ANY);
+@@ -5251,8 +5151,7 @@ static int hci_power_on_sync(struct hci_dev *hdev)
+ 	 */
+ 	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
+ 	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
+-	    (hdev->dev_type == HCI_PRIMARY &&
+-	     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
++	    (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
+ 	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
+ 		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
+ 		hci_dev_close_sync(hdev);
+@@ -5354,27 +5253,11 @@ int hci_stop_discovery_sync(struct hci_dev *hdev)
+ 	return 0;
+ }
+ 
+-static int hci_disconnect_phy_link_sync(struct hci_dev *hdev, u16 handle,
+-					u8 reason)
+-{
+-	struct hci_cp_disconn_phy_link cp;
+-
+-	memset(&cp, 0, sizeof(cp));
+-	cp.phy_handle = HCI_PHY_HANDLE(handle);
+-	cp.reason = reason;
+-
+-	return __hci_cmd_sync_status(hdev, HCI_OP_DISCONN_PHY_LINK,
+-				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+-}
+-
+ static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ 			       u8 reason)
+ {
+ 	struct hci_cp_disconnect cp;
+ 
+-	if (conn->type == AMP_LINK)
+-		return hci_disconnect_phy_link_sync(hdev, conn->handle, reason);
+-
+ 	if (test_bit(HCI_CONN_BIG_CREATED, &conn->flags)) {
+ 		/* This is a BIS connection, hci_conn_del will
+ 		 * do the necessary cleanup.
+@@ -6493,10 +6376,8 @@ static int hci_le_create_conn_sync(struct hci_dev *hdev, void *data)
+ 
+ int hci_le_create_cis_sync(struct hci_dev *hdev)
+ {
+-	struct {
+-		struct hci_cp_le_create_cis cp;
+-		struct hci_cis cis[0x1f];
+-	} cmd;
++	DEFINE_FLEX(struct hci_cp_le_create_cis, cmd, cis, num_cis, 0x1f);
++	size_t aux_num_cis = 0;
+ 	struct hci_conn *conn;
+ 	u8 cig = BT_ISO_QOS_CIG_UNSET;
+ 
+@@ -6523,8 +6404,6 @@ int hci_le_create_cis_sync(struct hci_dev *hdev)
+ 	 * remains pending.
+ 	 */
+ 
+-	memset(&cmd, 0, sizeof(cmd));
+-
+ 	hci_dev_lock(hdev);
+ 
+ 	rcu_read_lock();
+@@ -6561,7 +6440,7 @@ int hci_le_create_cis_sync(struct hci_dev *hdev)
+ 		goto done;
+ 
+ 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
+-		struct hci_cis *cis = &cmd.cis[cmd.cp.num_cis];
++		struct hci_cis *cis = &cmd->cis[aux_num_cis];
+ 
+ 		if (hci_conn_check_create_cis(conn) ||
+ 		    conn->iso_qos.ucast.cig != cig)
+@@ -6570,25 +6449,25 @@ int hci_le_create_cis_sync(struct hci_dev *hdev)
+ 		set_bit(HCI_CONN_CREATE_CIS, &conn->flags);
+ 		cis->acl_handle = cpu_to_le16(conn->parent->handle);
+ 		cis->cis_handle = cpu_to_le16(conn->handle);
+-		cmd.cp.num_cis++;
++		aux_num_cis++;
+ 
+-		if (cmd.cp.num_cis >= ARRAY_SIZE(cmd.cis))
++		if (aux_num_cis >= 0x1f)
+ 			break;
+ 	}
++	cmd->num_cis = aux_num_cis;
+ 
+ done:
+ 	rcu_read_unlock();
+ 
+ 	hci_dev_unlock(hdev);
+ 
+-	if (!cmd.cp.num_cis)
++	if (!aux_num_cis)
+ 		return 0;
+ 
+ 	/* Wait for HCI_LE_CIS_Established */
+ 	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CIS,
+-					sizeof(cmd.cp) + sizeof(cmd.cis[0]) *
+-					cmd.cp.num_cis, &cmd,
+-					HCI_EVT_LE_CIS_ESTABLISHED,
++					struct_size(cmd, cis, cmd->num_cis),
++					cmd, HCI_EVT_LE_CIS_ESTABLISHED,
+ 					conn->conn_timeout, NULL);
+ }
+ 
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index 6bed4aa8291de..6cb41f9d174e2 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -85,8 +85,9 @@ static void iso_sock_disconn(struct sock *sk);
+ 
+ typedef bool (*iso_sock_match_t)(struct sock *sk, void *data);
+ 
+-static struct sock *iso_get_sock_listen(bdaddr_t *src, bdaddr_t *dst,
+-					iso_sock_match_t match, void *data);
++static struct sock *iso_get_sock(bdaddr_t *src, bdaddr_t *dst,
++				 enum bt_sock_state state,
++				 iso_sock_match_t match, void *data);
+ 
+ /* ---- ISO timers ---- */
+ #define ISO_CONN_TIMEOUT	(HZ * 40)
+@@ -233,10 +234,11 @@ static void iso_conn_del(struct hci_conn *hcon, int err)
+ 		 * terminated are not processed anymore.
+ 		 */
+ 		if (test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags)) {
+-			parent = iso_get_sock_listen(&hcon->src,
+-						     &hcon->dst,
+-						     iso_match_conn_sync_handle,
+-						     hcon);
++			parent = iso_get_sock(&hcon->src,
++					      &hcon->dst,
++					      BT_LISTEN,
++					      iso_match_conn_sync_handle,
++					      hcon);
+ 
+ 			if (parent) {
+ 				set_bit(BT_SK_PA_SYNC_TERM,
+@@ -581,22 +583,23 @@ static struct sock *__iso_get_sock_listen_by_sid(bdaddr_t *ba, bdaddr_t *bc,
+ 	return NULL;
+ }
+ 
+-/* Find socket listening:
++/* Find socket in given state:
+  * source bdaddr (Unicast)
+  * destination bdaddr (Broadcast only)
+  * match func - pass NULL to ignore
+  * match func data - pass -1 to ignore
+  * Returns closest match.
+  */
+-static struct sock *iso_get_sock_listen(bdaddr_t *src, bdaddr_t *dst,
+-					iso_sock_match_t match, void *data)
++static struct sock *iso_get_sock(bdaddr_t *src, bdaddr_t *dst,
++				 enum bt_sock_state state,
++				 iso_sock_match_t match, void *data)
+ {
+ 	struct sock *sk = NULL, *sk1 = NULL;
+ 
+ 	read_lock(&iso_sk_list.lock);
+ 
+ 	sk_for_each(sk, &iso_sk_list.head) {
+-		if (sk->sk_state != BT_LISTEN)
++		if (sk->sk_state != state)
+ 			continue;
+ 
+ 		/* Match Broadcast destination */
+@@ -1777,32 +1780,37 @@ static void iso_conn_ready(struct iso_conn *conn)
+ 						 HCI_EVT_LE_BIG_SYNC_ESTABILISHED);
+ 
+ 			/* Get reference to PA sync parent socket, if it exists */
+-			parent = iso_get_sock_listen(&hcon->src,
+-						     &hcon->dst,
+-						     iso_match_pa_sync_flag, NULL);
++			parent = iso_get_sock(&hcon->src, &hcon->dst,
++					      BT_LISTEN,
++					      iso_match_pa_sync_flag,
++					      NULL);
+ 			if (!parent && ev)
+-				parent = iso_get_sock_listen(&hcon->src,
+-							     &hcon->dst,
+-							     iso_match_big, ev);
++				parent = iso_get_sock(&hcon->src,
++						      &hcon->dst,
++						      BT_LISTEN,
++						      iso_match_big, ev);
+ 		} else if (test_bit(HCI_CONN_PA_SYNC_FAILED, &hcon->flags)) {
+ 			ev2 = hci_recv_event_data(hcon->hdev,
+ 						  HCI_EV_LE_PA_SYNC_ESTABLISHED);
+ 			if (ev2)
+-				parent = iso_get_sock_listen(&hcon->src,
+-							     &hcon->dst,
+-							     iso_match_sid, ev2);
++				parent = iso_get_sock(&hcon->src,
++						      &hcon->dst,
++						      BT_LISTEN,
++						      iso_match_sid, ev2);
+ 		} else if (test_bit(HCI_CONN_PA_SYNC, &hcon->flags)) {
+ 			ev3 = hci_recv_event_data(hcon->hdev,
+ 						  HCI_EVT_LE_BIG_INFO_ADV_REPORT);
+ 			if (ev3)
+-				parent = iso_get_sock_listen(&hcon->src,
+-							     &hcon->dst,
+-							     iso_match_sync_handle, ev3);
++				parent = iso_get_sock(&hcon->src,
++						      &hcon->dst,
++						      BT_LISTEN,
++						      iso_match_sync_handle,
++						      ev3);
+ 		}
+ 
+ 		if (!parent)
+-			parent = iso_get_sock_listen(&hcon->src,
+-							BDADDR_ANY, NULL, NULL);
++			parent = iso_get_sock(&hcon->src, BDADDR_ANY,
++					      BT_LISTEN, NULL, NULL);
+ 
+ 		if (!parent)
+ 			return;
+@@ -1923,8 +1931,8 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
+ 	 */
+ 	ev1 = hci_recv_event_data(hdev, HCI_EV_LE_PA_SYNC_ESTABLISHED);
+ 	if (ev1) {
+-		sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr, iso_match_sid,
+-					 ev1);
++		sk = iso_get_sock(&hdev->bdaddr, bdaddr, BT_LISTEN,
++				  iso_match_sid, ev1);
+ 		if (sk && !ev1->status)
+ 			iso_pi(sk)->sync_handle = le16_to_cpu(ev1->handle);
+ 
+@@ -1934,12 +1942,12 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
+ 	ev2 = hci_recv_event_data(hdev, HCI_EVT_LE_BIG_INFO_ADV_REPORT);
+ 	if (ev2) {
+ 		/* Try to get PA sync listening socket, if it exists */
+-		sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr,
+-						iso_match_pa_sync_flag, NULL);
++		sk = iso_get_sock(&hdev->bdaddr, bdaddr, BT_LISTEN,
++				  iso_match_pa_sync_flag, NULL);
+ 
+ 		if (!sk) {
+-			sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr,
+-						 iso_match_sync_handle, ev2);
++			sk = iso_get_sock(&hdev->bdaddr, bdaddr, BT_LISTEN,
++					  iso_match_sync_handle, ev2);
+ 
+ 			/* If PA Sync is in process of terminating,
+ 			 * do not handle any more BIGInfo adv reports.
+@@ -1979,8 +1987,8 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
+ 		u8 *base;
+ 		struct hci_conn *hcon;
+ 
+-		sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr,
+-					 iso_match_sync_handle_pa_report, ev3);
++		sk = iso_get_sock(&hdev->bdaddr, bdaddr, BT_LISTEN,
++				  iso_match_sync_handle_pa_report, ev3);
+ 		if (!sk)
+ 			goto done;
+ 
+@@ -2029,7 +2037,8 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
+ 			hcon->le_per_adv_data_len = 0;
+ 		}
+ 	} else {
+-		sk = iso_get_sock_listen(&hdev->bdaddr, BDADDR_ANY, NULL, NULL);
++		sk = iso_get_sock(&hdev->bdaddr, BDADDR_ANY,
++				  BT_LISTEN, NULL, NULL);
+ 	}
+ 
+ done:
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 3f7a82f10fe98..4a633c1b68825 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -457,6 +457,9 @@ struct l2cap_chan *l2cap_chan_create(void)
+ 	/* Set default lock nesting level */
+ 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
+ 
++	/* Available receive buffer space is initially unknown */
++	chan->rx_avail = -1;
++
+ 	write_lock(&chan_list_lock);
+ 	list_add(&chan->global_l, &chan_list);
+ 	write_unlock(&chan_list_lock);
+@@ -538,6 +541,28 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan)
+ }
+ EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
+ 
++static __u16 l2cap_le_rx_credits(struct l2cap_chan *chan)
++{
++	size_t sdu_len = chan->sdu ? chan->sdu->len : 0;
++
++	if (chan->mps == 0)
++		return 0;
++
++	/* If we don't know the available space in the receiver buffer, give
++	 * enough credits for a full packet.
++	 */
++	if (chan->rx_avail == -1)
++		return (chan->imtu / chan->mps) + 1;
++
++	/* If we know how much space is available in the receive buffer, give
++	 * out as many credits as would fill the buffer.
++	 */
++	if (chan->rx_avail <= sdu_len)
++		return 0;
++
++	return DIV_ROUND_UP(chan->rx_avail - sdu_len, chan->mps);
++}
++
+ static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
+ {
+ 	chan->sdu = NULL;
+@@ -546,8 +571,7 @@ static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
+ 	chan->tx_credits = tx_credits;
+ 	/* Derive MPS from connection MTU to stop HCI fragmentation */
+ 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
+-	/* Give enough credits for a full packet */
+-	chan->rx_credits = (chan->imtu / chan->mps) + 1;
++	chan->rx_credits = l2cap_le_rx_credits(chan);
+ 
+ 	skb_queue_head_init(&chan->tx_q);
+ }
+@@ -559,7 +583,7 @@ static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
+ 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
+ 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
+ 		chan->mps = L2CAP_ECRED_MIN_MPS;
+-		chan->rx_credits = (chan->imtu / chan->mps) + 1;
++		chan->rx_credits = l2cap_le_rx_credits(chan);
+ 	}
+ }
+ 
+@@ -3906,7 +3930,7 @@ static inline int l2cap_command_rej(struct l2cap_conn *conn,
+ }
+ 
+ static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
+-			  u8 *data, u8 rsp_code, u8 amp_id)
++			  u8 *data, u8 rsp_code)
+ {
+ 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
+ 	struct l2cap_conn_rsp rsp;
+@@ -3985,17 +4009,8 @@ static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
+ 				status = L2CAP_CS_AUTHOR_PEND;
+ 				chan->ops->defer(chan);
+ 			} else {
+-				/* Force pending result for AMP controllers.
+-				 * The connection will succeed after the
+-				 * physical link is up.
+-				 */
+-				if (amp_id == AMP_ID_BREDR) {
+-					l2cap_state_change(chan, BT_CONFIG);
+-					result = L2CAP_CR_SUCCESS;
+-				} else {
+-					l2cap_state_change(chan, BT_CONNECT2);
+-					result = L2CAP_CR_PEND;
+-				}
++				l2cap_state_change(chan, BT_CONNECT2);
++				result = L2CAP_CR_PEND;
+ 				status = L2CAP_CS_NO_INFO;
+ 			}
+ 		} else {
+@@ -4060,7 +4075,7 @@ static int l2cap_connect_req(struct l2cap_conn *conn,
+ 		mgmt_device_connected(hdev, hcon, NULL, 0);
+ 	hci_dev_unlock(hdev);
+ 
+-	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
++	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP);
+ 	return 0;
+ }
+ 
+@@ -6513,9 +6528,7 @@ static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
+ {
+ 	struct l2cap_conn *conn = chan->conn;
+ 	struct l2cap_le_credits pkt;
+-	u16 return_credits;
+-
+-	return_credits = (chan->imtu / chan->mps) + 1;
++	u16 return_credits = l2cap_le_rx_credits(chan);
+ 
+ 	if (chan->rx_credits >= return_credits)
+ 		return;
+@@ -6534,6 +6547,19 @@ static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
+ 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
+ }
+ 
++void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail)
++{
++	if (chan->rx_avail == rx_avail)
++		return;
++
++	BT_DBG("chan %p has %zd bytes avail for rx", chan, rx_avail);
++
++	chan->rx_avail = rx_avail;
++
++	if (chan->state == BT_CONNECTED)
++		l2cap_chan_le_send_credits(chan);
++}
++
+ static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
+ {
+ 	int err;
+@@ -6543,6 +6569,12 @@ static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
+ 	/* Wait recv to confirm reception before updating the credits */
+ 	err = chan->ops->recv(chan, skb);
+ 
++	if (err < 0 && chan->rx_avail != -1) {
++		BT_ERR("Queueing received LE L2CAP data failed");
++		l2cap_send_disconn_req(chan, ECONNRESET);
++		return err;
++	}
++
+ 	/* Update credits whenever an SDU is received */
+ 	l2cap_chan_le_send_credits(chan);
+ 
+@@ -6565,7 +6597,8 @@ static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
+ 	}
+ 
+ 	chan->rx_credits--;
+-	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
++	BT_DBG("chan %p: rx_credits %u -> %u",
++	       chan, chan->rx_credits + 1, chan->rx_credits);
+ 
+ 	/* Update if remote had run out of credits, this should only happens
+ 	 * if the remote is not using the entire MPS.
+@@ -7453,10 +7486,6 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+ 	struct l2cap_conn *conn = hcon->l2cap_data;
+ 	int len;
+ 
+-	/* For AMP controller do not create l2cap conn */
+-	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
+-		goto drop;
+-
+ 	if (!conn)
+ 		conn = l2cap_conn_add(hcon);
+ 
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index 5cc83f906c123..8645461d45e81 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -1131,6 +1131,34 @@ static int l2cap_sock_sendmsg(struct socket *sock, struct msghdr *msg,
+ 	return err;
+ }
+ 
++static void l2cap_publish_rx_avail(struct l2cap_chan *chan)
++{
++	struct sock *sk = chan->data;
++	ssize_t avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc);
++	int expected_skbs, skb_overhead;
++
++	if (avail <= 0) {
++		l2cap_chan_rx_avail(chan, 0);
++		return;
++	}
++
++	if (!chan->mps) {
++		l2cap_chan_rx_avail(chan, -1);
++		return;
++	}
++
++	/* Correct available memory by estimated sk_buff overhead.
++	 * This is significant due to small transfer sizes. However, accept
++	 * at least one full packet if receive space is non-zero.
++	 */
++	expected_skbs = DIV_ROUND_UP(avail, chan->mps);
++	skb_overhead = expected_skbs * sizeof(struct sk_buff);
++	if (skb_overhead < avail)
++		l2cap_chan_rx_avail(chan, avail - skb_overhead);
++	else
++		l2cap_chan_rx_avail(chan, -1);
++}
++
+ static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+ 			      size_t len, int flags)
+ {
+@@ -1167,28 +1195,33 @@ static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+ 	else
+ 		err = bt_sock_recvmsg(sock, msg, len, flags);
+ 
+-	if (pi->chan->mode != L2CAP_MODE_ERTM)
++	if (pi->chan->mode != L2CAP_MODE_ERTM &&
++	    pi->chan->mode != L2CAP_MODE_LE_FLOWCTL &&
++	    pi->chan->mode != L2CAP_MODE_EXT_FLOWCTL)
+ 		return err;
+ 
+-	/* Attempt to put pending rx data in the socket buffer */
+-
+ 	lock_sock(sk);
+ 
+-	if (!test_bit(CONN_LOCAL_BUSY, &pi->chan->conn_state))
+-		goto done;
++	l2cap_publish_rx_avail(pi->chan);
+ 
+-	if (pi->rx_busy_skb) {
+-		if (!__sock_queue_rcv_skb(sk, pi->rx_busy_skb))
+-			pi->rx_busy_skb = NULL;
+-		else
++	/* Attempt to put pending rx data in the socket buffer */
++	while (!list_empty(&pi->rx_busy)) {
++		struct l2cap_rx_busy *rx_busy =
++			list_first_entry(&pi->rx_busy,
++					 struct l2cap_rx_busy,
++					 list);
++		if (__sock_queue_rcv_skb(sk, rx_busy->skb) < 0)
+ 			goto done;
++		list_del(&rx_busy->list);
++		kfree(rx_busy);
+ 	}
+ 
+ 	/* Restore data flow when half of the receive buffer is
+ 	 * available.  This avoids resending large numbers of
+ 	 * frames.
+ 	 */
+-	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf >> 1)
++	if (test_bit(CONN_LOCAL_BUSY, &pi->chan->conn_state) &&
++	    atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf >> 1)
+ 		l2cap_chan_busy(pi->chan, 0);
+ 
+ done:
+@@ -1449,17 +1482,20 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
+ static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
+ {
+ 	struct sock *sk = chan->data;
++	struct l2cap_pinfo *pi = l2cap_pi(sk);
+ 	int err;
+ 
+ 	lock_sock(sk);
+ 
+-	if (l2cap_pi(sk)->rx_busy_skb) {
++	if (chan->mode == L2CAP_MODE_ERTM && !list_empty(&pi->rx_busy)) {
+ 		err = -ENOMEM;
+ 		goto done;
+ 	}
+ 
+ 	if (chan->mode != L2CAP_MODE_ERTM &&
+-	    chan->mode != L2CAP_MODE_STREAMING) {
++	    chan->mode != L2CAP_MODE_STREAMING &&
++	    chan->mode != L2CAP_MODE_LE_FLOWCTL &&
++	    chan->mode != L2CAP_MODE_EXT_FLOWCTL) {
+ 		/* Even if no filter is attached, we could potentially
+ 		 * get errors from security modules, etc.
+ 		 */
+@@ -1470,7 +1506,9 @@ static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
+ 
+ 	err = __sock_queue_rcv_skb(sk, skb);
+ 
+-	/* For ERTM, handle one skb that doesn't fit into the recv
++	l2cap_publish_rx_avail(chan);
++
++	/* For ERTM and LE, handle a skb that doesn't fit into the recv
+ 	 * buffer.  This is important to do because the data frames
+ 	 * have already been acked, so the skb cannot be discarded.
+ 	 *
+@@ -1479,8 +1517,18 @@ static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
+ 	 * acked and reassembled until there is buffer space
+ 	 * available.
+ 	 */
+-	if (err < 0 && chan->mode == L2CAP_MODE_ERTM) {
+-		l2cap_pi(sk)->rx_busy_skb = skb;
++	if (err < 0 &&
++	    (chan->mode == L2CAP_MODE_ERTM ||
++	     chan->mode == L2CAP_MODE_LE_FLOWCTL ||
++	     chan->mode == L2CAP_MODE_EXT_FLOWCTL)) {
++		struct l2cap_rx_busy *rx_busy =
++			kmalloc(sizeof(*rx_busy), GFP_KERNEL);
++		if (!rx_busy) {
++			err = -ENOMEM;
++			goto done;
++		}
++		rx_busy->skb = skb;
++		list_add_tail(&rx_busy->list, &pi->rx_busy);
+ 		l2cap_chan_busy(chan, 1);
+ 		err = 0;
+ 	}
+@@ -1706,6 +1754,8 @@ static const struct l2cap_ops l2cap_chan_ops = {
+ 
+ static void l2cap_sock_destruct(struct sock *sk)
+ {
++	struct l2cap_rx_busy *rx_busy, *next;
++
+ 	BT_DBG("sk %p", sk);
+ 
+ 	if (l2cap_pi(sk)->chan) {
+@@ -1713,9 +1763,10 @@ static void l2cap_sock_destruct(struct sock *sk)
+ 		l2cap_chan_put(l2cap_pi(sk)->chan);
+ 	}
+ 
+-	if (l2cap_pi(sk)->rx_busy_skb) {
+-		kfree_skb(l2cap_pi(sk)->rx_busy_skb);
+-		l2cap_pi(sk)->rx_busy_skb = NULL;
++	list_for_each_entry_safe(rx_busy, next, &l2cap_pi(sk)->rx_busy, list) {
++		kfree_skb(rx_busy->skb);
++		list_del(&rx_busy->list);
++		kfree(rx_busy);
+ 	}
+ 
+ 	skb_queue_purge(&sk->sk_receive_queue);
+@@ -1799,6 +1850,8 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
+ 
+ 	chan->data = sk;
+ 	chan->ops = &l2cap_chan_ops;
++
++	l2cap_publish_rx_avail(chan);
+ }
+ 
+ static struct proto l2cap_proto = {
+@@ -1820,6 +1873,8 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
+ 	sk->sk_destruct = l2cap_sock_destruct;
+ 	sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
+ 
++	INIT_LIST_HEAD(&l2cap_pi(sk)->rx_busy);
++
+ 	chan = l2cap_chan_create();
+ 	if (!chan) {
+ 		sk_free(sk);
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 965f621ef865a..80f220b7e19d5 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -443,8 +443,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
+ 
+ 	count = 0;
+ 	list_for_each_entry(d, &hci_dev_list, list) {
+-		if (d->dev_type == HCI_PRIMARY &&
+-		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
++		if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
+ 			count++;
+ 	}
+ 
+@@ -468,8 +467,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
+ 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
+ 			continue;
+ 
+-		if (d->dev_type == HCI_PRIMARY &&
+-		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
++		if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
+ 			rp->index[count++] = cpu_to_le16(d->id);
+ 			bt_dev_dbg(hdev, "Added hci%u", d->id);
+ 		}
+@@ -503,8 +501,7 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
+ 
+ 	count = 0;
+ 	list_for_each_entry(d, &hci_dev_list, list) {
+-		if (d->dev_type == HCI_PRIMARY &&
+-		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
++		if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
+ 			count++;
+ 	}
+ 
+@@ -528,8 +525,7 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
+ 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
+ 			continue;
+ 
+-		if (d->dev_type == HCI_PRIMARY &&
+-		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
++		if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
+ 			rp->index[count++] = cpu_to_le16(d->id);
+ 			bt_dev_dbg(hdev, "Added hci%u", d->id);
+ 		}
+@@ -561,10 +557,8 @@ static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
+ 	read_lock(&hci_dev_list_lock);
+ 
+ 	count = 0;
+-	list_for_each_entry(d, &hci_dev_list, list) {
+-		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
+-			count++;
+-	}
++	list_for_each_entry(d, &hci_dev_list, list)
++		count++;
+ 
+ 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
+ 	if (!rp) {
+@@ -585,16 +579,10 @@ static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
+ 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
+ 			continue;
+ 
+-		if (d->dev_type == HCI_PRIMARY) {
+-			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
+-				rp->entry[count].type = 0x01;
+-			else
+-				rp->entry[count].type = 0x00;
+-		} else if (d->dev_type == HCI_AMP) {
+-			rp->entry[count].type = 0x02;
+-		} else {
+-			continue;
+-		}
++		if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
++			rp->entry[count].type = 0x01;
++		else
++			rp->entry[count].type = 0x00;
+ 
+ 		rp->entry[count].bus = d->bus;
+ 		rp->entry[count++].index = cpu_to_le16(d->id);
+@@ -9331,23 +9319,14 @@ void mgmt_index_added(struct hci_dev *hdev)
+ 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
+ 		return;
+ 
+-	switch (hdev->dev_type) {
+-	case HCI_PRIMARY:
+-		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
+-			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
+-					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
+-			ev.type = 0x01;
+-		} else {
+-			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
+-					 HCI_MGMT_INDEX_EVENTS);
+-			ev.type = 0x00;
+-		}
+-		break;
+-	case HCI_AMP:
+-		ev.type = 0x02;
+-		break;
+-	default:
+-		return;
++	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
++		mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
++				 HCI_MGMT_UNCONF_INDEX_EVENTS);
++		ev.type = 0x01;
++	} else {
++		mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
++				 HCI_MGMT_INDEX_EVENTS);
++		ev.type = 0x00;
+ 	}
+ 
+ 	ev.bus = hdev->bus;
+@@ -9364,25 +9343,16 @@ void mgmt_index_removed(struct hci_dev *hdev)
+ 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
+ 		return;
+ 
+-	switch (hdev->dev_type) {
+-	case HCI_PRIMARY:
+-		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
++	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
+ 
+-		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
+-			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
+-					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
+-			ev.type = 0x01;
+-		} else {
+-			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
+-					 HCI_MGMT_INDEX_EVENTS);
+-			ev.type = 0x00;
+-		}
+-		break;
+-	case HCI_AMP:
+-		ev.type = 0x02;
+-		break;
+-	default:
+-		return;
++	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
++		mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
++				 HCI_MGMT_UNCONF_INDEX_EVENTS);
++		ev.type = 0x01;
++	} else {
++		mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
++				 HCI_MGMT_INDEX_EVENTS);
++		ev.type = 0x00;
+ 	}
+ 
+ 	ev.bus = hdev->bus;
+diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
+index c366ccc8b3db7..ecac7886988b1 100644
+--- a/net/bridge/br_device.c
++++ b/net/bridge/br_device.c
+@@ -27,6 +27,7 @@ EXPORT_SYMBOL_GPL(nf_br_ops);
+ /* net device transmit always called with BH disabled */
+ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
++	enum skb_drop_reason reason = pskb_may_pull_reason(skb, ETH_HLEN);
+ 	struct net_bridge_mcast_port *pmctx_null = NULL;
+ 	struct net_bridge *br = netdev_priv(dev);
+ 	struct net_bridge_mcast *brmctx = &br->multicast_ctx;
+@@ -38,6 +39,11 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	const unsigned char *dest;
+ 	u16 vid = 0;
+ 
++	if (unlikely(reason != SKB_NOT_DROPPED_YET)) {
++		kfree_skb_reason(skb, reason);
++		return NETDEV_TX_OK;
++	}
++
+ 	memset(skb->cb, 0, sizeof(struct br_input_skb_cb));
+ 	br_tc_skb_miss_set(skb, false);
+ 
+diff --git a/net/bridge/br_mst.c b/net/bridge/br_mst.c
+index ee680adcee179..3c66141d34d62 100644
+--- a/net/bridge/br_mst.c
++++ b/net/bridge/br_mst.c
+@@ -78,7 +78,7 @@ static void br_mst_vlan_set_state(struct net_bridge_port *p, struct net_bridge_v
+ {
+ 	struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
+ 
+-	if (v->state == state)
++	if (br_vlan_get_state(v) == state)
+ 		return;
+ 
+ 	br_vlan_set_state(v, state);
+@@ -100,11 +100,12 @@ int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state,
+ 	};
+ 	struct net_bridge_vlan_group *vg;
+ 	struct net_bridge_vlan *v;
+-	int err;
++	int err = 0;
+ 
++	rcu_read_lock();
+ 	vg = nbp_vlan_group(p);
+ 	if (!vg)
+-		return 0;
++		goto out;
+ 
+ 	/* MSTI 0 (CST) state changes are notified via the regular
+ 	 * SWITCHDEV_ATTR_ID_PORT_STP_STATE.
+@@ -112,17 +113,20 @@ int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state,
+ 	if (msti) {
+ 		err = switchdev_port_attr_set(p->dev, &attr, extack);
+ 		if (err && err != -EOPNOTSUPP)
+-			return err;
++			goto out;
+ 	}
+ 
+-	list_for_each_entry(v, &vg->vlan_list, vlist) {
++	err = 0;
++	list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
+ 		if (v->brvlan->msti != msti)
+ 			continue;
+ 
+ 		br_mst_vlan_set_state(p, v, state);
+ 	}
+ 
+-	return 0;
++out:
++	rcu_read_unlock();
++	return err;
+ }
+ 
+ static void br_mst_vlan_sync_state(struct net_bridge_vlan *pv, u16 msti)
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 331848eca7d31..e8fb4ef8a85f8 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -10488,8 +10488,9 @@ static struct net_device *netdev_wait_allrefs_any(struct list_head *list)
+ 			rebroadcast_time = jiffies;
+ 		}
+ 
++		rcu_barrier();
++
+ 		if (!wait) {
+-			rcu_barrier();
+ 			wait = WAIT_REFS_MIN_MSECS;
+ 		} else {
+ 			msleep(wait);
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index 7a437f0d41905..7e45c34c8340a 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -1683,6 +1683,7 @@ static int inet_fill_ifaddr(struct sk_buff *skb, const struct in_ifaddr *ifa,
+ 	struct nlmsghdr  *nlh;
+ 	unsigned long tstamp;
+ 	u32 preferred, valid;
++	u32 flags;
+ 
+ 	nlh = nlmsg_put(skb, args->portid, args->seq, args->event, sizeof(*ifm),
+ 			args->flags);
+@@ -1692,7 +1693,13 @@ static int inet_fill_ifaddr(struct sk_buff *skb, const struct in_ifaddr *ifa,
+ 	ifm = nlmsg_data(nlh);
+ 	ifm->ifa_family = AF_INET;
+ 	ifm->ifa_prefixlen = ifa->ifa_prefixlen;
+-	ifm->ifa_flags = READ_ONCE(ifa->ifa_flags);
++
++	flags = READ_ONCE(ifa->ifa_flags);
++	/* Warning : ifm->ifa_flags is an __u8, it holds only 8 bits.
++	 * The 32bit value is given in IFA_FLAGS attribute.
++	 */
++	ifm->ifa_flags = (__u8)flags;
++
+ 	ifm->ifa_scope = ifa->ifa_scope;
+ 	ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
+ 
+@@ -1701,7 +1708,7 @@ static int inet_fill_ifaddr(struct sk_buff *skb, const struct in_ifaddr *ifa,
+ 		goto nla_put_failure;
+ 
+ 	tstamp = READ_ONCE(ifa->ifa_tstamp);
+-	if (!(ifm->ifa_flags & IFA_F_PERMANENT)) {
++	if (!(flags & IFA_F_PERMANENT)) {
+ 		preferred = READ_ONCE(ifa->ifa_preferred_lft);
+ 		valid = READ_ONCE(ifa->ifa_valid_lft);
+ 		if (preferred != INFINITY_LIFE_TIME) {
+@@ -1732,7 +1739,7 @@ static int inet_fill_ifaddr(struct sk_buff *skb, const struct in_ifaddr *ifa,
+ 	     nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
+ 	    (ifa->ifa_proto &&
+ 	     nla_put_u8(skb, IFA_PROTO, ifa->ifa_proto)) ||
+-	    nla_put_u32(skb, IFA_FLAGS, ifm->ifa_flags) ||
++	    nla_put_u32(skb, IFA_FLAGS, flags) ||
+ 	    (ifa->ifa_rt_priority &&
+ 	     nla_put_u32(skb, IFA_RT_PRIORITY, ifa->ifa_rt_priority)) ||
+ 	    put_cacheinfo(skb, READ_ONCE(ifa->ifa_cstamp), tstamp,
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index e0cef75f85fb9..92511b7fd5249 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -2001,7 +2001,7 @@ int tcp_v4_early_demux(struct sk_buff *skb)
+ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
+ 		     enum skb_drop_reason *reason)
+ {
+-	u32 limit, tail_gso_size, tail_gso_segs;
++	u32 tail_gso_size, tail_gso_segs;
+ 	struct skb_shared_info *shinfo;
+ 	const struct tcphdr *th;
+ 	struct tcphdr *thtail;
+@@ -2010,6 +2010,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
+ 	bool fragstolen;
+ 	u32 gso_segs;
+ 	u32 gso_size;
++	u64 limit;
+ 	int delta;
+ 
+ 	/* In case all data was pulled from skb frags (in __pskb_pull_tail()),
+@@ -2107,7 +2108,13 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
+ 	__skb_push(skb, hdrlen);
+ 
+ no_coalesce:
+-	limit = (u32)READ_ONCE(sk->sk_rcvbuf) + (u32)(READ_ONCE(sk->sk_sndbuf) >> 1);
++	/* sk->sk_backlog.len is reset only at the end of __release_sock().
++	 * Both sk->sk_backlog.len and sk->sk_rmem_alloc could reach
++	 * sk_rcvbuf in normal conditions.
++	 */
++	limit = ((u64)READ_ONCE(sk->sk_rcvbuf)) << 1;
++
++	limit += ((u32)READ_ONCE(sk->sk_sndbuf)) >> 1;
+ 
+ 	/* Only socket owner can try to collapse/prune rx queues
+ 	 * to reduce memory overhead, so add a little headroom here.
+@@ -2115,6 +2122,8 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
+ 	 */
+ 	limit += 64 * 1024;
+ 
++	limit = min_t(u64, limit, UINT_MAX);
++
+ 	if (unlikely(sk_add_backlog(sk, skb, limit))) {
+ 		bh_unlock_sock(sk);
+ 		*reason = SKB_DROP_REASON_SOCKET_BACKLOG;
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index b32cf2eeeb41d..b5ad0c527c521 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -427,15 +427,21 @@ static struct sock *udp4_lib_lookup2(struct net *net,
+ {
+ 	struct sock *sk, *result;
+ 	int score, badness;
++	bool need_rescore;
+ 
+ 	result = NULL;
+ 	badness = 0;
+ 	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
+-		score = compute_score(sk, net, saddr, sport,
+-				      daddr, hnum, dif, sdif);
++		need_rescore = false;
++rescore:
++		score = compute_score(need_rescore ? result : sk, net, saddr,
++				      sport, daddr, hnum, dif, sdif);
+ 		if (score > badness) {
+ 			badness = score;
+ 
++			if (need_rescore)
++				continue;
++
+ 			if (sk->sk_state == TCP_ESTABLISHED) {
+ 				result = sk;
+ 				continue;
+@@ -456,9 +462,14 @@ static struct sock *udp4_lib_lookup2(struct net *net,
+ 			if (IS_ERR(result))
+ 				continue;
+ 
+-			badness = compute_score(result, net, saddr, sport,
+-						daddr, hnum, dif, sdif);
+-
++			/* compute_score is too long of a function to be
++			 * inlined, and calling it again here yields
++			 * measureable overhead for some
++			 * workloads. Work around it by jumping
++			 * backwards to rescore 'result'.
++			 */
++			need_rescore = true;
++			goto rescore;
+ 		}
+ 	}
+ 	return result;
+diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
+index acb4f119e11f0..148bf9e3131a1 100644
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -369,7 +369,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
+ 	 * the source of the fragment, with the Pointer field set to zero.
+ 	 */
+ 	nexthdr = hdr->nexthdr;
+-	if (ipv6frag_thdr_truncated(skb, skb_transport_offset(skb), &nexthdr)) {
++	if (ipv6frag_thdr_truncated(skb, skb_network_offset(skb) + sizeof(struct ipv6hdr), &nexthdr)) {
+ 		__IP6_INC_STATS(net, __in6_dev_get_safely(skb->dev),
+ 				IPSTATS_MIB_INHDRERRORS);
+ 		icmpv6_param_prob(skb, ICMPV6_HDR_INCOMP, 0);
+diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
+index 35508abd76f43..a31521e270f78 100644
+--- a/net/ipv6/seg6.c
++++ b/net/ipv6/seg6.c
+@@ -551,6 +551,8 @@ int __init seg6_init(void)
+ #endif
+ #ifdef CONFIG_IPV6_SEG6_LWTUNNEL
+ out_unregister_genl:
++#endif
++#if IS_ENABLED(CONFIG_IPV6_SEG6_LWTUNNEL) || IS_ENABLED(CONFIG_IPV6_SEG6_HMAC)
+ 	genl_unregister_family(&seg6_genl_family);
+ #endif
+ out_unregister_pernet:
+@@ -564,8 +566,9 @@ void seg6_exit(void)
+ 	seg6_hmac_exit();
+ #endif
+ #ifdef CONFIG_IPV6_SEG6_LWTUNNEL
++	seg6_local_exit();
+ 	seg6_iptunnel_exit();
+ #endif
+-	unregister_pernet_subsys(&ip6_segments_ops);
+ 	genl_unregister_family(&seg6_genl_family);
++	unregister_pernet_subsys(&ip6_segments_ops);
+ }
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 8f7aa8bac1e7b..e0dd5bc2b30eb 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -168,15 +168,21 @@ static struct sock *udp6_lib_lookup2(struct net *net,
+ {
+ 	struct sock *sk, *result;
+ 	int score, badness;
++	bool need_rescore;
+ 
+ 	result = NULL;
+ 	badness = -1;
+ 	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
+-		score = compute_score(sk, net, saddr, sport,
+-				      daddr, hnum, dif, sdif);
++		need_rescore = false;
++rescore:
++		score = compute_score(need_rescore ? result : sk, net, saddr,
++				      sport, daddr, hnum, dif, sdif);
+ 		if (score > badness) {
+ 			badness = score;
+ 
++			if (need_rescore)
++				continue;
++
+ 			if (sk->sk_state == TCP_ESTABLISHED) {
+ 				result = sk;
+ 				continue;
+@@ -197,8 +203,14 @@ static struct sock *udp6_lib_lookup2(struct net *net,
+ 			if (IS_ERR(result))
+ 				continue;
+ 
+-			badness = compute_score(sk, net, saddr, sport,
+-						daddr, hnum, dif, sdif);
++			/* compute_score is too long of a function to be
++			 * inlined, and calling it again here yields
++			 * measureable overhead for some
++			 * workloads. Work around it by jumping
++			 * backwards to rescore 'result'.
++			 */
++			need_rescore = true;
++			goto rescore;
+ 		}
+ 	}
+ 	return result;
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index 8d21ff25f1602..4a0fb8731eee9 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -887,22 +887,20 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
+ 	return 1;
+ }
+ 
+-/* UDP encapsulation receive handler. See net/ipv4/udp.c.
+- * Return codes:
+- * 0 : success.
+- * <0: error
+- * >0: skb should be passed up to userspace as UDP.
++/* UDP encapsulation receive and error receive handlers.
++ * See net/ipv4/udp.c for details.
++ *
++ * Note that these functions are called from inside an
++ * RCU-protected region, but without the socket being locked.
++ *
++ * Hence we use rcu_dereference_sk_user_data to access the
++ * tunnel data structure rather the usual l2tp_sk_to_tunnel
++ * accessor function.
+  */
+ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ {
+ 	struct l2tp_tunnel *tunnel;
+ 
+-	/* Note that this is called from the encap_rcv hook inside an
+-	 * RCU-protected region, but without the socket being locked.
+-	 * Hence we use rcu_dereference_sk_user_data to access the
+-	 * tunnel data structure rather the usual l2tp_sk_to_tunnel
+-	 * accessor function.
+-	 */
+ 	tunnel = rcu_dereference_sk_user_data(sk);
+ 	if (!tunnel)
+ 		goto pass_up;
+@@ -919,6 +917,29 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ }
+ EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
+ 
++static void l2tp_udp_encap_err_recv(struct sock *sk, struct sk_buff *skb, int err,
++				    __be16 port, u32 info, u8 *payload)
++{
++	struct l2tp_tunnel *tunnel;
++
++	tunnel = rcu_dereference_sk_user_data(sk);
++	if (!tunnel || tunnel->fd < 0)
++		return;
++
++	sk->sk_err = err;
++	sk_error_report(sk);
++
++	if (ip_hdr(skb)->version == IPVERSION) {
++		if (inet_test_bit(RECVERR, sk))
++			return ip_icmp_error(sk, skb, err, port, info, payload);
++#if IS_ENABLED(CONFIG_IPV6)
++	} else {
++		if (inet6_test_bit(RECVERR6, sk))
++			return ipv6_icmp_error(sk, skb, err, port, info, payload);
++#endif
++	}
++}
++
+ /************************************************************************
+  * Transmit handling
+  ***********************************************************************/
+@@ -1493,6 +1514,7 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
+ 			.sk_user_data = tunnel,
+ 			.encap_type = UDP_ENCAP_L2TPINUDP,
+ 			.encap_rcv = l2tp_udp_encap_recv,
++			.encap_err_rcv = l2tp_udp_encap_err_recv,
+ 			.encap_destroy = l2tp_udp_encap_destroy,
+ 		};
+ 
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index f67c1d0218121..07abaf7820c56 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -1607,10 +1607,10 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
+ 	/* abort any running channel switch or color change */
+ 	link_conf->csa_active = false;
+ 	link_conf->color_change_active = false;
+-	if (sdata->csa_blocked_tx) {
++	if (sdata->csa_blocked_queues) {
+ 		ieee80211_wake_vif_queues(local, sdata,
+ 					  IEEE80211_QUEUE_STOP_REASON_CSA);
+-		sdata->csa_blocked_tx = false;
++		sdata->csa_blocked_queues = false;
+ 	}
+ 
+ 	ieee80211_free_next_beacon(link);
+@@ -3648,7 +3648,7 @@ void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif, bool block_t
+ 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ 	struct ieee80211_local *local = sdata->local;
+ 
+-	sdata->csa_blocked_tx = block_tx;
++	sdata->csa_blocked_queues = block_tx;
+ 	sdata_info(sdata, "channel switch failed, disconnecting\n");
+ 	wiphy_work_queue(local->hw.wiphy, &ifmgd->csa_connection_drop_work);
+ }
+@@ -3734,10 +3734,10 @@ static int __ieee80211_csa_finalize(struct ieee80211_link_data *link_data)
+ 
+ 	ieee80211_link_info_change_notify(sdata, link_data, changed);
+ 
+-	if (sdata->csa_blocked_tx) {
++	if (sdata->csa_blocked_queues) {
+ 		ieee80211_wake_vif_queues(local, sdata,
+ 					  IEEE80211_QUEUE_STOP_REASON_CSA);
+-		sdata->csa_blocked_tx = false;
++		sdata->csa_blocked_queues = false;
+ 	}
+ 
+ 	err = drv_post_channel_switch(link_data);
+@@ -4019,7 +4019,7 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
+ 	    !ieee80211_hw_check(&local->hw, HANDLES_QUIET_CSA)) {
+ 		ieee80211_stop_vif_queues(local, sdata,
+ 					  IEEE80211_QUEUE_STOP_REASON_CSA);
+-		sdata->csa_blocked_tx = true;
++		sdata->csa_blocked_queues = true;
+ 	}
+ 
+ 	cfg80211_ch_switch_started_notify(sdata->dev,
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index bd507d6b65e3f..70c67c860e995 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -974,6 +974,7 @@ struct ieee80211_link_data_managed {
+ 
+ 	bool csa_waiting_bcn;
+ 	bool csa_ignored_same_chan;
++	bool csa_blocked_tx;
+ 	struct wiphy_delayed_work chswitch_work;
+ 
+ 	struct wiphy_work request_smps_work;
+@@ -1092,7 +1093,7 @@ struct ieee80211_sub_if_data {
+ 
+ 	unsigned long state;
+ 
+-	bool csa_blocked_tx;
++	bool csa_blocked_queues;
+ 
+ 	char name[IFNAMSIZ];
+ 
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index 395de62d9cb2d..ef6b0fc82d022 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -544,10 +544,10 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
+ 	sdata->vif.bss_conf.csa_active = false;
+ 	if (sdata->vif.type == NL80211_IFTYPE_STATION)
+ 		sdata->deflink.u.mgd.csa_waiting_bcn = false;
+-	if (sdata->csa_blocked_tx) {
++	if (sdata->csa_blocked_queues) {
+ 		ieee80211_wake_vif_queues(local, sdata,
+ 					  IEEE80211_QUEUE_STOP_REASON_CSA);
+-		sdata->csa_blocked_tx = false;
++		sdata->csa_blocked_queues = false;
+ 	}
+ 
+ 	wiphy_work_cancel(local->hw.wiphy, &sdata->deflink.csa_finalize_work);
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 3bbb216a0fc8c..497677e3d8b27 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -1933,13 +1933,14 @@ static void ieee80211_chswitch_post_beacon(struct ieee80211_link_data *link)
+ 
+ 	WARN_ON(!link->conf->csa_active);
+ 
+-	if (sdata->csa_blocked_tx) {
++	if (sdata->csa_blocked_queues) {
+ 		ieee80211_wake_vif_queues(local, sdata,
+ 					  IEEE80211_QUEUE_STOP_REASON_CSA);
+-		sdata->csa_blocked_tx = false;
++		sdata->csa_blocked_queues = false;
+ 	}
+ 
+ 	link->conf->csa_active = false;
++	link->u.mgd.csa_blocked_tx = false;
+ 	link->u.mgd.csa_waiting_bcn = false;
+ 
+ 	ret = drv_post_channel_switch(link);
+@@ -1999,13 +2000,14 @@ ieee80211_sta_abort_chanswitch(struct ieee80211_link_data *link)
+ 
+ 	ieee80211_link_unreserve_chanctx(link);
+ 
+-	if (sdata->csa_blocked_tx) {
++	if (sdata->csa_blocked_queues) {
+ 		ieee80211_wake_vif_queues(local, sdata,
+ 					  IEEE80211_QUEUE_STOP_REASON_CSA);
+-		sdata->csa_blocked_tx = false;
++		sdata->csa_blocked_queues = false;
+ 	}
+ 
+ 	link->conf->csa_active = false;
++	link->u.mgd.csa_blocked_tx = false;
+ 
+ 	drv_abort_channel_switch(link);
+ }
+@@ -2165,12 +2167,13 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
+ 	link->csa_chanreq = csa_ie.chanreq;
+ 	link->u.mgd.csa_ignored_same_chan = false;
+ 	link->u.mgd.beacon_crc_valid = false;
++	link->u.mgd.csa_blocked_tx = csa_ie.mode;
+ 
+ 	if (csa_ie.mode &&
+ 	    !ieee80211_hw_check(&local->hw, HANDLES_QUIET_CSA)) {
+ 		ieee80211_stop_vif_queues(local, sdata,
+ 					  IEEE80211_QUEUE_STOP_REASON_CSA);
+-		sdata->csa_blocked_tx = true;
++		sdata->csa_blocked_queues = true;
+ 	}
+ 
+ 	cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chanreq.oper,
+@@ -2199,7 +2202,8 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
+ 	 * reset when the disconnection worker runs.
+ 	 */
+ 	link->conf->csa_active = true;
+-	sdata->csa_blocked_tx =
++	link->u.mgd.csa_blocked_tx = csa_ie.mode;
++	sdata->csa_blocked_queues =
+ 		csa_ie.mode && !ieee80211_hw_check(&local->hw, HANDLES_QUIET_CSA);
+ 
+ 	wiphy_work_queue(sdata->local->hw.wiphy,
+@@ -3252,12 +3256,13 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
+ 	}
+ 
+ 	sdata->vif.bss_conf.csa_active = false;
++	sdata->deflink.u.mgd.csa_blocked_tx = false;
+ 	sdata->deflink.u.mgd.csa_waiting_bcn = false;
+ 	sdata->deflink.u.mgd.csa_ignored_same_chan = false;
+-	if (sdata->csa_blocked_tx) {
++	if (sdata->csa_blocked_queues) {
+ 		ieee80211_wake_vif_queues(local, sdata,
+ 					  IEEE80211_QUEUE_STOP_REASON_CSA);
+-		sdata->csa_blocked_tx = false;
++		sdata->csa_blocked_queues = false;
+ 	}
+ 
+ 	/* existing TX TSPEC sessions no longer exist */
+@@ -3563,19 +3568,32 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
+ 	struct ieee80211_local *local = sdata->local;
+ 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ 	u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
+-	bool tx;
++	bool tx = false;
+ 
+ 	lockdep_assert_wiphy(local->hw.wiphy);
+ 
+ 	if (!ifmgd->associated)
+ 		return;
+ 
+-	/*
+-	 * MLO drivers should have HANDLES_QUIET_CSA, so that csa_blocked_tx
+-	 * is always false; if they don't then this may try to transmit the
+-	 * frame but queues will be stopped.
+-	 */
+-	tx = !sdata->csa_blocked_tx;
++	/* only transmit if we have a link that makes that worthwhile */
++	for (unsigned int link_id = 0;
++	     link_id < ARRAY_SIZE(sdata->link);
++	     link_id++) {
++		struct ieee80211_link_data *link;
++
++		if (!ieee80211_vif_link_active(&sdata->vif, link_id))
++			continue;
++
++		link = sdata_dereference(sdata->link[link_id], sdata);
++		if (WARN_ON_ONCE(!link))
++			continue;
++
++		if (link->u.mgd.csa_blocked_tx)
++			continue;
++
++		tx = true;
++		break;
++	}
+ 
+ 	if (!ifmgd->driver_disconnect) {
+ 		unsigned int link_id;
+@@ -3608,10 +3626,11 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
+ 	/* the other links will be destroyed */
+ 	sdata->vif.bss_conf.csa_active = false;
+ 	sdata->deflink.u.mgd.csa_waiting_bcn = false;
+-	if (sdata->csa_blocked_tx) {
++	sdata->deflink.u.mgd.csa_blocked_tx = false;
++	if (sdata->csa_blocked_queues) {
+ 		ieee80211_wake_vif_queues(local, sdata,
+ 					  IEEE80211_QUEUE_STOP_REASON_CSA);
+-		sdata->csa_blocked_tx = false;
++		sdata->csa_blocked_queues = false;
+ 	}
+ 
+ 	ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx,
+diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
+index 73850312580f7..3da1c5c450358 100644
+--- a/net/mac80211/scan.c
++++ b/net/mac80211/scan.c
+@@ -708,19 +708,11 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
+ 		return -EBUSY;
+ 
+ 	/* For an MLO connection, if a link ID was specified, validate that it
+-	 * is indeed active. If no link ID was specified, select one of the
+-	 * active links.
++	 * is indeed active.
+ 	 */
+-	if (ieee80211_vif_is_mld(&sdata->vif)) {
+-		if (req->tsf_report_link_id >= 0) {
+-			if (!(sdata->vif.active_links &
+-			      BIT(req->tsf_report_link_id)))
+-				return -EINVAL;
+-		} else {
+-			req->tsf_report_link_id =
+-				__ffs(sdata->vif.active_links);
+-		}
+-	}
++	if (ieee80211_vif_is_mld(&sdata->vif) && req->tsf_report_link_id >= 0 &&
++	    !(sdata->vif.active_links & BIT(req->tsf_report_link_id)))
++		return -EINVAL;
+ 
+ 	if (!__ieee80211_can_leave_ch(sdata))
+ 		return -EBUSY;
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index a10ebf3ee10a1..9d1ee199490bb 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -308,6 +308,9 @@ struct mptcp_sock {
+ 			free_first:1,
+ 			rcvspace_init:1;
+ 	u32		notsent_lowat;
++	int		keepalive_cnt;
++	int		keepalive_idle;
++	int		keepalive_intvl;
+ 	struct work_struct work;
+ 	struct sk_buff  *ooo_last_skb;
+ 	struct rb_root  out_of_order_queue;
+diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
+index 73fdf423de44e..19ee684f9e401 100644
+--- a/net/mptcp/sockopt.c
++++ b/net/mptcp/sockopt.c
+@@ -181,8 +181,6 @@ static int mptcp_setsockopt_sol_socket_int(struct mptcp_sock *msk, int optname,
+ 
+ 	switch (optname) {
+ 	case SO_KEEPALIVE:
+-		mptcp_sol_socket_sync_intval(msk, optname, val);
+-		return 0;
+ 	case SO_DEBUG:
+ 	case SO_MARK:
+ 	case SO_PRIORITY:
+@@ -624,6 +622,31 @@ static int mptcp_setsockopt_sol_tcp_congestion(struct mptcp_sock *msk, sockptr_t
+ 	return ret;
+ }
+ 
++static int __mptcp_setsockopt_set_val(struct mptcp_sock *msk, int max,
++				      int (*set_val)(struct sock *, int),
++				      int *msk_val, int val)
++{
++	struct mptcp_subflow_context *subflow;
++	int err = 0;
++
++	mptcp_for_each_subflow(msk, subflow) {
++		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++		int ret;
++
++		lock_sock(ssk);
++		ret = set_val(ssk, val);
++		err = err ? : ret;
++		release_sock(ssk);
++	}
++
++	if (!err) {
++		*msk_val = val;
++		sockopt_seq_inc(msk);
++	}
++
++	return err;
++}
++
+ static int __mptcp_setsockopt_sol_tcp_cork(struct mptcp_sock *msk, int val)
+ {
+ 	struct mptcp_subflow_context *subflow;
+@@ -820,6 +843,22 @@ static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
+ 	case TCP_NODELAY:
+ 		ret = __mptcp_setsockopt_sol_tcp_nodelay(msk, val);
+ 		break;
++	case TCP_KEEPIDLE:
++		ret = __mptcp_setsockopt_set_val(msk, MAX_TCP_KEEPIDLE,
++						 &tcp_sock_set_keepidle_locked,
++						 &msk->keepalive_idle, val);
++		break;
++	case TCP_KEEPINTVL:
++		ret = __mptcp_setsockopt_set_val(msk, MAX_TCP_KEEPINTVL,
++						 &tcp_sock_set_keepintvl,
++						 &msk->keepalive_intvl, val);
++		break;
++	case TCP_KEEPCNT:
++		ret = __mptcp_setsockopt_set_val(msk, MAX_TCP_KEEPCNT,
++						 &tcp_sock_set_keepcnt,
++						 &msk->keepalive_cnt,
++						 val);
++		break;
+ 	default:
+ 		ret = -ENOPROTOOPT;
+ 	}
+@@ -1322,6 +1361,8 @@ static int mptcp_put_int_option(struct mptcp_sock *msk, char __user *optval,
+ static int mptcp_getsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
+ 				    char __user *optval, int __user *optlen)
+ {
++	struct sock *sk = (void *)msk;
++
+ 	switch (optname) {
+ 	case TCP_ULP:
+ 	case TCP_CONGESTION:
+@@ -1340,6 +1381,18 @@ static int mptcp_getsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
+ 		return mptcp_put_int_option(msk, optval, optlen, msk->cork);
+ 	case TCP_NODELAY:
+ 		return mptcp_put_int_option(msk, optval, optlen, msk->nodelay);
++	case TCP_KEEPIDLE:
++		return mptcp_put_int_option(msk, optval, optlen,
++					    msk->keepalive_idle ? :
++					    READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_keepalive_time) / HZ);
++	case TCP_KEEPINTVL:
++		return mptcp_put_int_option(msk, optval, optlen,
++					    msk->keepalive_intvl ? :
++					    READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_keepalive_intvl) / HZ);
++	case TCP_KEEPCNT:
++		return mptcp_put_int_option(msk, optval, optlen,
++					    msk->keepalive_cnt ? :
++					    READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_keepalive_probes));
+ 	case TCP_NOTSENT_LOWAT:
+ 		return mptcp_put_int_option(msk, optval, optlen, msk->notsent_lowat);
+ 	}
+@@ -1457,6 +1510,9 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
+ 		tcp_set_congestion_control(ssk, msk->ca_name, false, true);
+ 	__tcp_sock_set_cork(ssk, !!msk->cork);
+ 	__tcp_sock_set_nodelay(ssk, !!msk->nodelay);
++	tcp_sock_set_keepidle_locked(ssk, msk->keepalive_idle);
++	tcp_sock_set_keepintvl(ssk, msk->keepalive_intvl);
++	tcp_sock_set_keepcnt(ssk, msk->keepalive_cnt);
+ 
+ 	inet_assign_bit(TRANSPARENT, ssk, inet_test_bit(TRANSPARENT, sk));
+ 	inet_assign_bit(FREEBIND, ssk, inet_test_bit(FREEBIND, sk));
+diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
+index 70480869ad1c5..bd2b17b219ae9 100644
+--- a/net/netrom/nr_route.c
++++ b/net/netrom/nr_route.c
+@@ -285,22 +285,14 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
+ 	return 0;
+ }
+ 
+-static inline void __nr_remove_node(struct nr_node *nr_node)
++static void nr_remove_node_locked(struct nr_node *nr_node)
+ {
++	lockdep_assert_held(&nr_node_list_lock);
++
+ 	hlist_del_init(&nr_node->node_node);
+ 	nr_node_put(nr_node);
+ }
+ 
+-#define nr_remove_node_locked(__node) \
+-	__nr_remove_node(__node)
+-
+-static void nr_remove_node(struct nr_node *nr_node)
+-{
+-	spin_lock_bh(&nr_node_list_lock);
+-	__nr_remove_node(nr_node);
+-	spin_unlock_bh(&nr_node_list_lock);
+-}
+-
+ static inline void __nr_remove_neigh(struct nr_neigh *nr_neigh)
+ {
+ 	hlist_del_init(&nr_neigh->neigh_node);
+@@ -339,6 +331,7 @@ static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct n
+ 		return -EINVAL;
+ 	}
+ 
++	spin_lock_bh(&nr_node_list_lock);
+ 	nr_node_lock(nr_node);
+ 	for (i = 0; i < nr_node->count; i++) {
+ 		if (nr_node->routes[i].neighbour == nr_neigh) {
+@@ -352,7 +345,7 @@ static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct n
+ 			nr_node->count--;
+ 
+ 			if (nr_node->count == 0) {
+-				nr_remove_node(nr_node);
++				nr_remove_node_locked(nr_node);
+ 			} else {
+ 				switch (i) {
+ 				case 0:
+@@ -367,12 +360,14 @@ static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct n
+ 				nr_node_put(nr_node);
+ 			}
+ 			nr_node_unlock(nr_node);
++			spin_unlock_bh(&nr_node_list_lock);
+ 
+ 			return 0;
+ 		}
+ 	}
+ 	nr_neigh_put(nr_neigh);
+ 	nr_node_unlock(nr_node);
++	spin_unlock_bh(&nr_node_list_lock);
+ 	nr_node_put(nr_node);
+ 
+ 	return -EINVAL;
+diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
+index 33b21a0c05481..8a848ce72e291 100644
+--- a/net/openvswitch/flow.c
++++ b/net/openvswitch/flow.c
+@@ -561,7 +561,6 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
+ 	 */
+ 	key->tp.src = htons(icmp->icmp6_type);
+ 	key->tp.dst = htons(icmp->icmp6_code);
+-	memset(&key->ipv6.nd, 0, sizeof(key->ipv6.nd));
+ 
+ 	if (icmp->icmp6_code == 0 &&
+ 	    (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
+@@ -570,6 +569,8 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
+ 		struct nd_msg *nd;
+ 		int offset;
+ 
++		memset(&key->ipv6.nd, 0, sizeof(key->ipv6.nd));
++
+ 		/* In order to process neighbor discovery options, we need the
+ 		 * entire packet.
+ 		 */
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 18f616f487eaa..150451ddd7553 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2522,8 +2522,7 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
+ 		ts = __packet_set_timestamp(po, ph, skb);
+ 		__packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
+ 
+-		if (!packet_read_pending(&po->tx_ring))
+-			complete(&po->skb_completion);
++		complete(&po->skb_completion);
+ 	}
+ 
+ 	sock_wfree(skb);
+diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c
+index abb0c70ffc8b0..654a3cc0d3479 100644
+--- a/net/qrtr/ns.c
++++ b/net/qrtr/ns.c
+@@ -725,6 +725,24 @@ int qrtr_ns_init(void)
+ 	if (ret < 0)
+ 		goto err_wq;
+ 
++	/* As the qrtr ns socket owner and creator is the same module, we have
++	 * to decrease the qrtr module reference count to guarantee that it
++	 * remains zero after the ns socket is created, otherwise, executing
++	 * "rmmod" command is unable to make the qrtr module deleted after the
++	 *  qrtr module is inserted successfully.
++	 *
++	 * However, the reference count is increased twice in
++	 * sock_create_kern(): one is to increase the reference count of owner
++	 * of qrtr socket's proto_ops struct; another is to increment the
++	 * reference count of owner of qrtr proto struct. Therefore, we must
++	 * decrement the module reference count twice to ensure that it keeps
++	 * zero after server's listening socket is created. Of course, we
++	 * must bump the module reference count twice as well before the socket
++	 * is closed.
++	 */
++	module_put(qrtr_ns.sock->ops->owner);
++	module_put(qrtr_ns.sock->sk->sk_prot_creator->owner);
++
+ 	return 0;
+ 
+ err_wq:
+@@ -739,6 +757,15 @@ void qrtr_ns_remove(void)
+ {
+ 	cancel_work_sync(&qrtr_ns.work);
+ 	destroy_workqueue(qrtr_ns.workqueue);
++
++	/* sock_release() expects the two references that were put during
++	 * qrtr_ns_init(). This function is only called during module remove,
++	 * so try_stop_module() has already set the refcnt to 0. Use
++	 * __module_get() instead of try_module_get() to successfully take two
++	 * references.
++	 */
++	__module_get(qrtr_ns.sock->ops->owner);
++	__module_get(qrtr_ns.sock->sk->sk_prot_creator->owner);
+ 	sock_release(qrtr_ns.sock);
+ }
+ EXPORT_SYMBOL_GPL(qrtr_ns_remove);
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index 24de941847003..96ab50eda9c2e 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -1033,17 +1033,11 @@ svcauth_gss_proc_init_verf(struct cache_detail *cd, struct svc_rqst *rqstp,
+ 
+ static void gss_free_in_token_pages(struct gssp_in_token *in_token)
+ {
+-	u32 inlen;
+ 	int i;
+ 
+ 	i = 0;
+-	inlen = in_token->page_len;
+-	while (inlen) {
+-		if (in_token->pages[i])
+-			put_page(in_token->pages[i]);
+-		inlen -= inlen > PAGE_SIZE ? PAGE_SIZE : inlen;
+-	}
+-
++	while (in_token->pages[i])
++		put_page(in_token->pages[i++]);
+ 	kfree(in_token->pages);
+ 	in_token->pages = NULL;
+ }
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index b33e429336fb7..2b4b1276d4e86 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -1265,8 +1265,6 @@ svc_generic_init_request(struct svc_rqst *rqstp,
+ 	if (rqstp->rq_proc >= versp->vs_nproc)
+ 		goto err_bad_proc;
+ 	rqstp->rq_procinfo = procp = &versp->vs_proc[rqstp->rq_proc];
+-	if (!procp)
+-		goto err_bad_proc;
+ 
+ 	/* Initialize storage for argp and resp */
+ 	memset(rqstp->rq_argp, 0, procp->pc_argzero);
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 9a6ad5974dff5..e94839d89b09d 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -2270,7 +2270,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
+ 			goto out_err;
+ 	}
+ 
+-	if (sk->sk_shutdown & SEND_SHUTDOWN)
++	if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
+ 		goto pipe_err;
+ 
+ 	while (sent < len) {
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 30ff9a4708134..65c416e8d25eb 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -9162,6 +9162,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
+ 	struct wiphy *wiphy;
+ 	int err, tmp, n_ssids = 0, n_channels, i;
+ 	size_t ie_len, size;
++	size_t ssids_offset, ie_offset;
+ 
+ 	wiphy = &rdev->wiphy;
+ 
+@@ -9207,21 +9208,20 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
+ 		return -EINVAL;
+ 
+ 	size = struct_size(request, channels, n_channels);
++	ssids_offset = size;
+ 	size = size_add(size, array_size(sizeof(*request->ssids), n_ssids));
++	ie_offset = size;
+ 	size = size_add(size, ie_len);
+ 	request = kzalloc(size, GFP_KERNEL);
+ 	if (!request)
+ 		return -ENOMEM;
++	request->n_channels = n_channels;
+ 
+ 	if (n_ssids)
+-		request->ssids = (void *)&request->channels[n_channels];
++		request->ssids = (void *)request + ssids_offset;
+ 	request->n_ssids = n_ssids;
+-	if (ie_len) {
+-		if (n_ssids)
+-			request->ie = (void *)(request->ssids + n_ssids);
+-		else
+-			request->ie = (void *)(request->channels + n_channels);
+-	}
++	if (ie_len)
++		request->ie = (void *)request + ie_offset;
+ 
+ 	i = 0;
+ 	if (scan_freqs) {
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index 5a5dd3ce497fc..9b0dbcd6cf79a 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -2207,12 +2207,16 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
+ 	tmp.pub.use_for = data->use_for;
+ 	tmp.pub.cannot_use_reasons = data->cannot_use_reasons;
+ 
+-	if (data->bss_source != BSS_SOURCE_DIRECT) {
++	switch (data->bss_source) {
++	case BSS_SOURCE_MBSSID:
+ 		tmp.pub.transmitted_bss = data->source_bss;
++		fallthrough;
++	case BSS_SOURCE_STA_PROFILE:
+ 		ts = bss_from_pub(data->source_bss)->ts;
+ 		tmp.pub.bssid_index = data->bssid_index;
+ 		tmp.pub.max_bssid_indicator = data->max_bssid_indicator;
+-	} else {
++		break;
++	case BSS_SOURCE_DIRECT:
+ 		ts = jiffies;
+ 
+ 		if (channel->band == NL80211_BAND_60GHZ) {
+@@ -2227,6 +2231,7 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
+ 				regulatory_hint_found_beacon(wiphy, channel,
+ 							     gfp);
+ 		}
++		break;
+ 	}
+ 
+ 	/*
+@@ -2655,6 +2660,7 @@ struct tbtt_info_iter_data {
+ 	u8 param_ch_count;
+ 	u32 use_for;
+ 	u8 mld_id, link_id;
++	bool non_tx;
+ };
+ 
+ static enum cfg80211_rnr_iter_ret
+@@ -2665,14 +2671,20 @@ cfg802121_mld_ap_rnr_iter(void *_data, u8 type,
+ 	const struct ieee80211_rnr_mld_params *mld_params;
+ 	struct tbtt_info_iter_data *data = _data;
+ 	u8 link_id;
++	bool non_tx = false;
+ 
+ 	if (type == IEEE80211_TBTT_INFO_TYPE_TBTT &&
+ 	    tbtt_info_len >= offsetofend(struct ieee80211_tbtt_info_ge_11,
+-					 mld_params))
+-		mld_params = (void *)(tbtt_info +
+-				      offsetof(struct ieee80211_tbtt_info_ge_11,
+-					       mld_params));
+-	else if (type == IEEE80211_TBTT_INFO_TYPE_MLD &&
++					 mld_params)) {
++		const struct ieee80211_tbtt_info_ge_11 *tbtt_info_ge_11 =
++			(void *)tbtt_info;
++
++		non_tx = (tbtt_info_ge_11->bss_params &
++			  (IEEE80211_RNR_TBTT_PARAMS_MULTI_BSSID |
++			   IEEE80211_RNR_TBTT_PARAMS_TRANSMITTED_BSSID)) ==
++			 IEEE80211_RNR_TBTT_PARAMS_MULTI_BSSID;
++		mld_params = &tbtt_info_ge_11->mld_params;
++	} else if (type == IEEE80211_TBTT_INFO_TYPE_MLD &&
+ 		 tbtt_info_len >= sizeof(struct ieee80211_rnr_mld_params))
+ 		mld_params = (void *)tbtt_info;
+ 	else
+@@ -2691,6 +2703,7 @@ cfg802121_mld_ap_rnr_iter(void *_data, u8 type,
+ 	data->param_ch_count =
+ 		le16_get_bits(mld_params->params,
+ 			      IEEE80211_RNR_MLD_PARAMS_BSS_CHANGE_COUNT);
++	data->non_tx = non_tx;
+ 
+ 	if (type == IEEE80211_TBTT_INFO_TYPE_TBTT)
+ 		data->use_for = NL80211_BSS_USE_FOR_ALL;
+@@ -2702,7 +2715,7 @@ cfg802121_mld_ap_rnr_iter(void *_data, u8 type,
+ static u8
+ cfg80211_rnr_info_for_mld_ap(const u8 *ie, size_t ielen, u8 mld_id, u8 link_id,
+ 			     const struct ieee80211_neighbor_ap_info **ap_info,
+-			     u8 *param_ch_count)
++			     u8 *param_ch_count, bool *non_tx)
+ {
+ 	struct tbtt_info_iter_data data = {
+ 		.mld_id = mld_id,
+@@ -2713,6 +2726,7 @@ cfg80211_rnr_info_for_mld_ap(const u8 *ie, size_t ielen, u8 mld_id, u8 link_id,
+ 
+ 	*ap_info = data.ap_info;
+ 	*param_ch_count = data.param_ch_count;
++	*non_tx = data.non_tx;
+ 
+ 	return data.use_for;
+ }
+@@ -2892,6 +2906,7 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
+ 		ssize_t profile_len;
+ 		u8 param_ch_count;
+ 		u8 link_id, use_for;
++		bool non_tx;
+ 
+ 		if (!ieee80211_mle_basic_sta_prof_size_ok((u8 *)mle->sta_prof[i],
+ 							  mle->sta_prof_len[i]))
+@@ -2937,10 +2952,24 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
+ 						       tx_data->ielen,
+ 						       mld_id, link_id,
+ 						       &ap_info,
+-						       &param_ch_count);
++						       &param_ch_count,
++						       &non_tx);
+ 		if (!use_for)
+ 			continue;
+ 
++		/*
++		 * As of 802.11be_D5.0, the specification does not give us any
++		 * way of discovering both the MaxBSSID and the Multiple-BSSID
++		 * Index. It does seem like the Multiple-BSSID Index element
++		 * may be provided, but section 9.4.2.45 explicitly forbids
++		 * including a Multiple-BSSID Element (in this case without any
++		 * subelements).
++		 * Without both pieces of information we cannot calculate the
++		 * reference BSSID, so simply ignore the BSS.
++		 */
++		if (non_tx)
++			continue;
++
+ 		/* We could sanity check the BSSID is included */
+ 
+ 		if (!ieee80211_operating_class_to_band(ap_info->op_class,
+diff --git a/samples/landlock/sandboxer.c b/samples/landlock/sandboxer.c
+index 32e930c853bba..8b8ecd65c28c4 100644
+--- a/samples/landlock/sandboxer.c
++++ b/samples/landlock/sandboxer.c
+@@ -153,7 +153,7 @@ static int populate_ruleset_net(const char *const env_var, const int ruleset_fd,
+ 				const __u64 allowed_access)
+ {
+ 	int ret = 1;
+-	char *env_port_name, *strport;
++	char *env_port_name, *env_port_name_next, *strport;
+ 	struct landlock_net_port_attr net_port = {
+ 		.allowed_access = allowed_access,
+ 		.port = 0,
+@@ -165,7 +165,8 @@ static int populate_ruleset_net(const char *const env_var, const int ruleset_fd,
+ 	env_port_name = strdup(env_port_name);
+ 	unsetenv(env_var);
+ 
+-	while ((strport = strsep(&env_port_name, ENV_DELIMITER))) {
++	env_port_name_next = env_port_name;
++	while ((strport = strsep(&env_port_name_next, ENV_DELIMITER))) {
+ 		net_port.port = atoi(strport);
+ 		if (landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
+ 				      &net_port, 0)) {
+diff --git a/scripts/module.lds.S b/scripts/module.lds.S
+index bf5bcf2836d81..89ff01a22634f 100644
+--- a/scripts/module.lds.S
++++ b/scripts/module.lds.S
+@@ -13,6 +13,7 @@ SECTIONS {
+ 	/DISCARD/ : {
+ 		*(.discard)
+ 		*(.discard.*)
++		*(.export_symbol)
+ 	}
+ 
+ 	__ksymtab		0 : { *(SORT(___ksymtab+*)) }
+diff --git a/sound/core/init.c b/sound/core/init.c
+index 4ed5037d8693b..66d7265fea920 100644
+--- a/sound/core/init.c
++++ b/sound/core/init.c
+@@ -313,8 +313,8 @@ static int snd_card_init(struct snd_card *card, struct device *parent,
+ 	card->number = idx;
+ #ifdef MODULE
+ 	WARN_ON(!module);
+-	card->module = module;
+ #endif
++	card->module = module;
+ 	INIT_LIST_HEAD(&card->devices);
+ 	init_rwsem(&card->controls_rwsem);
+ 	rwlock_init(&card->ctl_files_rwlock);
+@@ -516,6 +516,14 @@ void snd_card_disconnect(struct snd_card *card)
+ 		}
+ 	}
+ 
++#ifdef CONFIG_PM
++	/* wake up sleepers here before other callbacks for avoiding potential
++	 * deadlocks with other locks (e.g. in kctls);
++	 * then this notifies the shutdown and sleepers would abort immediately
++	 */
++	wake_up_all(&card->power_sleep);
++#endif
++
+ 	/* notify all connected devices about disconnection */
+ 	/* at this point, they cannot respond to any calls except release() */
+ 
+@@ -543,7 +551,6 @@ void snd_card_disconnect(struct snd_card *card)
+ 	}
+ 
+ #ifdef CONFIG_PM
+-	wake_up(&card->power_sleep);
+ 	snd_power_sync_ref(card);
+ #endif
+ }
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index 4d2ee99c12a3f..d104adc75a8b0 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -544,6 +544,14 @@ static int snd_timer_start1(struct snd_timer_instance *timeri,
+ 			     SNDRV_TIMER_IFLG_START))
+ 		return -EBUSY;
+ 
++	/* check the actual time for the start tick;
++	 * bail out as error if it's way too low (< 100us)
++	 */
++	if (start) {
++		if ((u64)snd_timer_hw_resolution(timer) * ticks < 100000)
++			return -EINVAL;
++	}
++
+ 	if (start)
+ 		timeri->ticks = timeri->cticks = ticks;
+ 	else if (!timeri->cticks)
+diff --git a/sound/pci/hda/cs35l41_hda_property.c b/sound/pci/hda/cs35l41_hda_property.c
+index 8fb688e414148..4f5e581cdd5ff 100644
+--- a/sound/pci/hda/cs35l41_hda_property.c
++++ b/sound/pci/hda/cs35l41_hda_property.c
+@@ -110,8 +110,8 @@ static const struct cs35l41_config cs35l41_config_table[] = {
+ 	{ "10431F62", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 0, 0, 0 },
+ 	{ "10433A60", 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 },
+ 	{ "17AA386F", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, -1, -1, 0, 0, 0 },
+-	{ "17AA3877", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 },
+-	{ "17AA3878", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 },
++	{ "17AA3877", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, -1, -1, 0, 0, 0 },
++	{ "17AA3878", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, -1, -1, 0, 0, 0 },
+ 	{ "17AA38A9", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 2, -1, 0, 0, 0 },
+ 	{ "17AA38AB", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 2, -1, 0, 0, 0 },
+ 	{ "17AA38B4", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 },
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index b29739bd330b1..3b8b4ab488a61 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10180,8 +10180,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8c70, "HP EliteBook 835 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8c89, "HP ProBook 460 G11", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c8a, "HP EliteBook 630", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c8c, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8c8d, "HP ProBook 440 G11", ALC236_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8c8e, "HP ProBook 460 G11", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c90, "HP EliteBook 640", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c91, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+diff --git a/sound/soc/intel/avs/boards/ssm4567.c b/sound/soc/intel/avs/boards/ssm4567.c
+index d6f7f046c24e5..f634261e4f604 100644
+--- a/sound/soc/intel/avs/boards/ssm4567.c
++++ b/sound/soc/intel/avs/boards/ssm4567.c
+@@ -172,7 +172,6 @@ static int avs_ssm4567_probe(struct platform_device *pdev)
+ 	card->dapm_routes = card_base_routes;
+ 	card->num_dapm_routes = ARRAY_SIZE(card_base_routes);
+ 	card->fully_routed = true;
+-	card->disable_route_checks = true;
+ 
+ 	ret = snd_soc_fixup_dai_links_platform_name(card, pname);
+ 	if (ret)
+diff --git a/sound/soc/intel/avs/cldma.c b/sound/soc/intel/avs/cldma.c
+index d7a9390b5e483..585579840b646 100644
+--- a/sound/soc/intel/avs/cldma.c
++++ b/sound/soc/intel/avs/cldma.c
+@@ -35,7 +35,7 @@ struct hda_cldma {
+ 
+ 	unsigned int buffer_size;
+ 	unsigned int num_periods;
+-	unsigned int stream_tag;
++	unsigned char stream_tag;
+ 	void __iomem *sd_addr;
+ 
+ 	struct snd_dma_buffer dmab_data;
+diff --git a/sound/soc/intel/avs/icl.c b/sound/soc/intel/avs/icl.c
+index d2554c8577326..284d38f3b1caf 100644
+--- a/sound/soc/intel/avs/icl.c
++++ b/sound/soc/intel/avs/icl.c
+@@ -66,7 +66,7 @@ struct avs_icl_memwnd2 {
+ 		struct avs_icl_memwnd2_desc slot_desc[AVS_ICL_MEMWND2_SLOTS_COUNT];
+ 		u8 rsvd[SZ_4K];
+ 	};
+-	u8 slot_array[AVS_ICL_MEMWND2_SLOTS_COUNT][PAGE_SIZE];
++	u8 slot_array[AVS_ICL_MEMWND2_SLOTS_COUNT][SZ_4K];
+ } __packed;
+ 
+ #define AVS_ICL_SLOT_UNUSED \
+@@ -89,8 +89,7 @@ static int avs_icl_slot_offset(struct avs_dev *adev, union avs_icl_memwnd2_slot_
+ 
+ 	for (i = 0; i < AVS_ICL_MEMWND2_SLOTS_COUNT; i++)
+ 		if (desc[i].slot_id.val == slot_type.val)
+-			return offsetof(struct avs_icl_memwnd2, slot_array) +
+-			       avs_skl_log_buffer_offset(adev, i);
++			return offsetof(struct avs_icl_memwnd2, slot_array) + i * SZ_4K;
+ 	return -ENXIO;
+ }
+ 
+diff --git a/sound/soc/intel/avs/path.c b/sound/soc/intel/avs/path.c
+index e785fc2a7008f..a44ed33b56080 100644
+--- a/sound/soc/intel/avs/path.c
++++ b/sound/soc/intel/avs/path.c
+@@ -367,6 +367,7 @@ static int avs_asrc_create(struct avs_dev *adev, struct avs_path_module *mod)
+ 	struct avs_tplg_module *t = mod->template;
+ 	struct avs_asrc_cfg cfg;
+ 
++	memset(&cfg, 0, sizeof(cfg));
+ 	cfg.base.cpc = t->cfg_base->cpc;
+ 	cfg.base.ibs = t->cfg_base->ibs;
+ 	cfg.base.obs = t->cfg_base->obs;
+diff --git a/sound/soc/intel/avs/pcm.c b/sound/soc/intel/avs/pcm.c
+index 2cafbc392cdbe..72f1bc3b7b1fe 100644
+--- a/sound/soc/intel/avs/pcm.c
++++ b/sound/soc/intel/avs/pcm.c
+@@ -356,6 +356,7 @@ static int avs_dai_hda_be_prepare(struct snd_pcm_substream *substream, struct sn
+ 					   stream_info->sig_bits);
+ 	format_val = snd_hdac_stream_format(runtime->channels, bits, runtime->rate);
+ 
++	snd_hdac_ext_stream_decouple(bus, link_stream, true);
+ 	snd_hdac_ext_stream_reset(link_stream);
+ 	snd_hdac_ext_stream_setup(link_stream, format_val);
+ 
+@@ -611,6 +612,7 @@ static int avs_dai_fe_prepare(struct snd_pcm_substream *substream, struct snd_so
+ 	struct avs_dev *adev = to_avs_dev(dai->dev);
+ 	struct hdac_ext_stream *host_stream;
+ 	unsigned int format_val;
++	struct hdac_bus *bus;
+ 	unsigned int bits;
+ 	int ret;
+ 
+@@ -620,6 +622,8 @@ static int avs_dai_fe_prepare(struct snd_pcm_substream *substream, struct snd_so
+ 	if (hdac_stream(host_stream)->prepared)
+ 		return 0;
+ 
++	bus = hdac_stream(host_stream)->bus;
++	snd_hdac_ext_stream_decouple(bus, data->host_stream, true);
+ 	snd_hdac_stream_reset(hdac_stream(host_stream));
+ 
+ 	stream_info = snd_soc_dai_get_pcm_stream(dai, substream->stream);
+diff --git a/sound/soc/intel/avs/probes.c b/sound/soc/intel/avs/probes.c
+index 817e543036f29..7e781a3156909 100644
+--- a/sound/soc/intel/avs/probes.c
++++ b/sound/soc/intel/avs/probes.c
+@@ -19,8 +19,11 @@ static int avs_dsp_init_probe(struct avs_dev *adev, union avs_connector_node_id
+ 	struct avs_probe_cfg cfg = {{0}};
+ 	struct avs_module_entry mentry;
+ 	u8 dummy;
++	int ret;
+ 
+-	avs_get_module_entry(adev, &AVS_PROBE_MOD_UUID, &mentry);
++	ret = avs_get_module_entry(adev, &AVS_PROBE_MOD_UUID, &mentry);
++	if (ret)
++		return ret;
+ 
+ 	/*
+ 	 * Probe module uses no cycles, audio data format and input and output
+@@ -39,11 +42,12 @@ static int avs_dsp_init_probe(struct avs_dev *adev, union avs_connector_node_id
+ static void avs_dsp_delete_probe(struct avs_dev *adev)
+ {
+ 	struct avs_module_entry mentry;
++	int ret;
+ 
+-	avs_get_module_entry(adev, &AVS_PROBE_MOD_UUID, &mentry);
+-
+-	/* There is only ever one probe module instance. */
+-	avs_dsp_delete_module(adev, mentry.module_id, 0, INVALID_PIPELINE_ID, 0);
++	ret = avs_get_module_entry(adev, &AVS_PROBE_MOD_UUID, &mentry);
++	if (!ret)
++		/* There is only ever one probe module instance. */
++		avs_dsp_delete_module(adev, mentry.module_id, 0, INVALID_PIPELINE_ID, 0);
+ }
+ 
+ static inline struct hdac_ext_stream *avs_compr_get_host_stream(struct snd_compr_stream *cstream)
+diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c
+index 540f7a29310a9..3fe3f38c6cb69 100644
+--- a/sound/soc/intel/boards/bxt_da7219_max98357a.c
++++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c
+@@ -768,6 +768,7 @@ static struct snd_soc_card broxton_audio_card = {
+ 	.dapm_routes = audio_map,
+ 	.num_dapm_routes = ARRAY_SIZE(audio_map),
+ 	.fully_routed = true,
++	.disable_route_checks = true,
+ 	.late_probe = bxt_card_late_probe,
+ };
+ 
+diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c
+index c0eb65c14aa97..afc499be8db26 100644
+--- a/sound/soc/intel/boards/bxt_rt298.c
++++ b/sound/soc/intel/boards/bxt_rt298.c
+@@ -574,6 +574,7 @@ static struct snd_soc_card broxton_rt298 = {
+ 	.dapm_routes = broxton_rt298_map,
+ 	.num_dapm_routes = ARRAY_SIZE(broxton_rt298_map),
+ 	.fully_routed = true,
++	.disable_route_checks = true,
+ 	.late_probe = bxt_card_late_probe,
+ 
+ };
+diff --git a/sound/soc/intel/boards/glk_rt5682_max98357a.c b/sound/soc/intel/boards/glk_rt5682_max98357a.c
+index 657e4658234ce..4098b2d32f9bc 100644
+--- a/sound/soc/intel/boards/glk_rt5682_max98357a.c
++++ b/sound/soc/intel/boards/glk_rt5682_max98357a.c
+@@ -649,6 +649,8 @@ static int geminilake_audio_probe(struct platform_device *pdev)
+ 	card = &glk_audio_card_rt5682_m98357a;
+ 	card->dev = &pdev->dev;
+ 	snd_soc_card_set_drvdata(card, ctx);
++	if (!snd_soc_acpi_sof_parent(&pdev->dev))
++		card->disable_route_checks = true;
+ 
+ 	/* override platform name, if required */
+ 	mach = pdev->dev.platform_data;
+diff --git a/sound/soc/intel/boards/kbl_da7219_max98357a.c b/sound/soc/intel/boards/kbl_da7219_max98357a.c
+index a5d8965303a88..9dbc15f9d1c9b 100644
+--- a/sound/soc/intel/boards/kbl_da7219_max98357a.c
++++ b/sound/soc/intel/boards/kbl_da7219_max98357a.c
+@@ -639,6 +639,7 @@ static struct snd_soc_card kabylake_audio_card_da7219_m98357a = {
+ 	.dapm_routes = kabylake_map,
+ 	.num_dapm_routes = ARRAY_SIZE(kabylake_map),
+ 	.fully_routed = true,
++	.disable_route_checks = true,
+ 	.late_probe = kabylake_card_late_probe,
+ };
+ 
+diff --git a/sound/soc/intel/boards/kbl_da7219_max98927.c b/sound/soc/intel/boards/kbl_da7219_max98927.c
+index 98c11ec0adc01..e662da5af83b5 100644
+--- a/sound/soc/intel/boards/kbl_da7219_max98927.c
++++ b/sound/soc/intel/boards/kbl_da7219_max98927.c
+@@ -1036,6 +1036,7 @@ static struct snd_soc_card kbl_audio_card_da7219_m98927 = {
+ 	.codec_conf = max98927_codec_conf,
+ 	.num_configs = ARRAY_SIZE(max98927_codec_conf),
+ 	.fully_routed = true,
++	.disable_route_checks = true,
+ 	.late_probe = kabylake_card_late_probe,
+ };
+ 
+@@ -1054,6 +1055,7 @@ static struct snd_soc_card kbl_audio_card_max98927 = {
+ 	.codec_conf = max98927_codec_conf,
+ 	.num_configs = ARRAY_SIZE(max98927_codec_conf),
+ 	.fully_routed = true,
++	.disable_route_checks = true,
+ 	.late_probe = kabylake_card_late_probe,
+ };
+ 
+@@ -1071,6 +1073,7 @@ static struct snd_soc_card kbl_audio_card_da7219_m98373 = {
+ 	.codec_conf = max98373_codec_conf,
+ 	.num_configs = ARRAY_SIZE(max98373_codec_conf),
+ 	.fully_routed = true,
++	.disable_route_checks = true,
+ 	.late_probe = kabylake_card_late_probe,
+ };
+ 
+@@ -1088,6 +1091,7 @@ static struct snd_soc_card kbl_audio_card_max98373 = {
+ 	.codec_conf = max98373_codec_conf,
+ 	.num_configs = ARRAY_SIZE(max98373_codec_conf),
+ 	.fully_routed = true,
++	.disable_route_checks = true,
+ 	.late_probe = kabylake_card_late_probe,
+ };
+ 
+diff --git a/sound/soc/intel/boards/kbl_rt5660.c b/sound/soc/intel/boards/kbl_rt5660.c
+index 30e0aca161cd5..894d127c482a3 100644
+--- a/sound/soc/intel/boards/kbl_rt5660.c
++++ b/sound/soc/intel/boards/kbl_rt5660.c
+@@ -518,6 +518,7 @@ static struct snd_soc_card kabylake_audio_card_rt5660 = {
+ 	.dapm_routes = kabylake_rt5660_map,
+ 	.num_dapm_routes = ARRAY_SIZE(kabylake_rt5660_map),
+ 	.fully_routed = true,
++	.disable_route_checks = true,
+ 	.late_probe = kabylake_card_late_probe,
+ };
+ 
+diff --git a/sound/soc/intel/boards/kbl_rt5663_max98927.c b/sound/soc/intel/boards/kbl_rt5663_max98927.c
+index 9071b1f1cbd00..646e8ff8e9619 100644
+--- a/sound/soc/intel/boards/kbl_rt5663_max98927.c
++++ b/sound/soc/intel/boards/kbl_rt5663_max98927.c
+@@ -966,6 +966,7 @@ static struct snd_soc_card kabylake_audio_card_rt5663_m98927 = {
+ 	.codec_conf = max98927_codec_conf,
+ 	.num_configs = ARRAY_SIZE(max98927_codec_conf),
+ 	.fully_routed = true,
++	.disable_route_checks = true,
+ 	.late_probe = kabylake_card_late_probe,
+ };
+ 
+@@ -982,6 +983,7 @@ static struct snd_soc_card kabylake_audio_card_rt5663 = {
+ 	.dapm_routes = kabylake_5663_map,
+ 	.num_dapm_routes = ARRAY_SIZE(kabylake_5663_map),
+ 	.fully_routed = true,
++	.disable_route_checks = true,
+ 	.late_probe = kabylake_card_late_probe,
+ };
+ 
+diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+index 178fe9c37df62..924d5d1de03ac 100644
+--- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
++++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+@@ -791,6 +791,7 @@ static struct snd_soc_card kabylake_audio_card = {
+ 	.codec_conf = max98927_codec_conf,
+ 	.num_configs = ARRAY_SIZE(max98927_codec_conf),
+ 	.fully_routed = true,
++	.disable_route_checks = true,
+ 	.late_probe = kabylake_card_late_probe,
+ };
+ 
+diff --git a/sound/soc/intel/boards/skl_hda_dsp_generic.c b/sound/soc/intel/boards/skl_hda_dsp_generic.c
+index 6e172719c9795..4aa7fd2a05e46 100644
+--- a/sound/soc/intel/boards/skl_hda_dsp_generic.c
++++ b/sound/soc/intel/boards/skl_hda_dsp_generic.c
+@@ -227,6 +227,8 @@ static int skl_hda_audio_probe(struct platform_device *pdev)
+ 	ctx->common_hdmi_codec_drv = mach->mach_params.common_hdmi_codec_drv;
+ 
+ 	hda_soc_card.dev = &pdev->dev;
++	if (!snd_soc_acpi_sof_parent(&pdev->dev))
++		hda_soc_card.disable_route_checks = true;
+ 
+ 	if (mach->mach_params.dmic_num > 0) {
+ 		snprintf(hda_soc_components, sizeof(hda_soc_components),
+diff --git a/sound/soc/intel/boards/skl_nau88l25_max98357a.c b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
+index 0e7025834594a..e4630c33176e2 100644
+--- a/sound/soc/intel/boards/skl_nau88l25_max98357a.c
++++ b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
+@@ -654,6 +654,7 @@ static struct snd_soc_card skylake_audio_card = {
+ 	.dapm_routes = skylake_map,
+ 	.num_dapm_routes = ARRAY_SIZE(skylake_map),
+ 	.fully_routed = true,
++	.disable_route_checks = true,
+ 	.late_probe = skylake_card_late_probe,
+ };
+ 
+diff --git a/sound/soc/intel/boards/skl_rt286.c b/sound/soc/intel/boards/skl_rt286.c
+index c59c60e280916..9a80442749081 100644
+--- a/sound/soc/intel/boards/skl_rt286.c
++++ b/sound/soc/intel/boards/skl_rt286.c
+@@ -523,6 +523,7 @@ static struct snd_soc_card skylake_rt286 = {
+ 	.dapm_routes = skylake_rt286_map,
+ 	.num_dapm_routes = ARRAY_SIZE(skylake_rt286_map),
+ 	.fully_routed = true,
++	.disable_route_checks = true,
+ 	.late_probe = skylake_card_late_probe,
+ };
+ 
+diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
+index dd2f806526c10..ef00792e1d49a 100644
+--- a/sound/soc/kirkwood/kirkwood-dma.c
++++ b/sound/soc/kirkwood/kirkwood-dma.c
+@@ -182,6 +182,9 @@ static int kirkwood_dma_hw_params(struct snd_soc_component *component,
+ 	const struct mbus_dram_target_info *dram = mv_mbus_dram_info();
+ 	unsigned long addr = substream->runtime->dma_addr;
+ 
++	if (!dram)
++		return 0;
++
+ 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ 		kirkwood_dma_conf_mbus_windows(priv->io,
+ 			KIRKWOOD_PLAYBACK_WIN, addr, dram);
+diff --git a/sound/soc/mediatek/common/mtk-soundcard-driver.c b/sound/soc/mediatek/common/mtk-soundcard-driver.c
+index a58e1e3674dec..000a086a8cf44 100644
+--- a/sound/soc/mediatek/common/mtk-soundcard-driver.c
++++ b/sound/soc/mediatek/common/mtk-soundcard-driver.c
+@@ -22,7 +22,11 @@ static int set_card_codec_info(struct snd_soc_card *card,
+ 
+ 	codec_node = of_get_child_by_name(sub_node, "codec");
+ 	if (!codec_node) {
+-		dev_dbg(dev, "%s no specified codec\n", dai_link->name);
++		dev_dbg(dev, "%s no specified codec: setting dummy.\n", dai_link->name);
++
++		dai_link->codecs = &snd_soc_dummy_dlc;
++		dai_link->num_codecs = 1;
++		dai_link->dynamic = 1;
+ 		return 0;
+ 	}
+ 
+diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
+index c1682bcdb5a66..6a39ca632f55e 100644
+--- a/sound/soc/sof/intel/hda-dai.c
++++ b/sound/soc/sof/intel/hda-dai.c
+@@ -439,10 +439,17 @@ int sdw_hda_dai_hw_params(struct snd_pcm_substream *substream,
+ 			  int link_id)
+ {
+ 	struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(cpu_dai, substream->stream);
++	struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ 	const struct hda_dai_widget_dma_ops *ops;
++	struct snd_soc_dai_link_ch_map *ch_maps;
+ 	struct hdac_ext_stream *hext_stream;
++	struct snd_soc_dai *dai;
+ 	struct snd_sof_dev *sdev;
++	bool cpu_dai_found = false;
++	int cpu_dai_id;
++	int ch_mask;
+ 	int ret;
++	int j;
+ 
+ 	ret = non_hda_dai_hw_params(substream, params, cpu_dai);
+ 	if (ret < 0) {
+@@ -457,9 +464,29 @@ int sdw_hda_dai_hw_params(struct snd_pcm_substream *substream,
+ 	if (!hext_stream)
+ 		return -ENODEV;
+ 
+-	/* in the case of SoundWire we need to program the PCMSyCM registers */
++	/*
++	 * in the case of SoundWire we need to program the PCMSyCM registers. In case
++	 * of aggregated devices, we need to define the channel mask for each sublink
++	 * by reconstructing the split done in soc-pcm.c
++	 */
++	for_each_rtd_cpu_dais(rtd, cpu_dai_id, dai) {
++		if (dai == cpu_dai) {
++			cpu_dai_found = true;
++			break;
++		}
++	}
++
++	if (!cpu_dai_found)
++		return -ENODEV;
++
++	ch_mask = 0;
++	for_each_link_ch_maps(rtd->dai_link, j, ch_maps) {
++		if (ch_maps->cpu == cpu_dai_id)
++			ch_mask |= ch_maps->ch_mask;
++	}
++
+ 	ret = hdac_bus_eml_sdw_map_stream_ch(sof_to_bus(sdev), link_id, cpu_dai->id,
+-					     GENMASK(params_channels(params) - 1, 0),
++					     ch_mask,
+ 					     hdac_stream(hext_stream)->stream_tag,
+ 					     substream->stream);
+ 	if (ret < 0) {
+diff --git a/sound/soc/sof/intel/lnl.c b/sound/soc/sof/intel/lnl.c
+index aeb4350cce6bb..6055a33bb4bf6 100644
+--- a/sound/soc/sof/intel/lnl.c
++++ b/sound/soc/sof/intel/lnl.c
+@@ -16,6 +16,7 @@
+ #include "hda-ipc.h"
+ #include "../sof-audio.h"
+ #include "mtl.h"
++#include "lnl.h"
+ #include <sound/hda-mlink.h>
+ 
+ /* LunarLake ops */
+@@ -208,7 +209,7 @@ const struct sof_intel_dsp_desc lnl_chip_info = {
+ 	.ipc_ack = MTL_DSP_REG_HFIPCXIDA,
+ 	.ipc_ack_mask = MTL_DSP_REG_HFIPCXIDA_DONE,
+ 	.ipc_ctl = MTL_DSP_REG_HFIPCXCTL,
+-	.rom_status_reg = MTL_DSP_ROM_STS,
++	.rom_status_reg = LNL_DSP_REG_HFDSC,
+ 	.rom_init_timeout = 300,
+ 	.ssp_count = MTL_SSP_COUNT,
+ 	.d0i3_offset = MTL_HDA_VS_D0I3C,
+diff --git a/sound/soc/sof/intel/lnl.h b/sound/soc/sof/intel/lnl.h
+new file mode 100644
+index 0000000000000..4f4734fe7e089
+--- /dev/null
++++ b/sound/soc/sof/intel/lnl.h
+@@ -0,0 +1,15 @@
++/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
++/*
++ * This file is provided under a dual BSD/GPLv2 license.  When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * Copyright(c) 2024 Intel Corporation. All rights reserved.
++ */
++
++#ifndef __SOF_INTEL_LNL_H
++#define __SOF_INTEL_LNL_H
++
++#define LNL_DSP_REG_HFDSC		0x160200 /* DSP core0 status */
++#define LNL_DSP_REG_HFDEC		0x160204 /* DSP core0 error */
++
++#endif /* __SOF_INTEL_LNL_H */
+diff --git a/sound/soc/sof/intel/mtl.c b/sound/soc/sof/intel/mtl.c
+index 060c34988e90d..05023763080d9 100644
+--- a/sound/soc/sof/intel/mtl.c
++++ b/sound/soc/sof/intel/mtl.c
+@@ -439,7 +439,7 @@ int mtl_dsp_cl_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot)
+ {
+ 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ 	const struct sof_intel_dsp_desc *chip = hda->desc;
+-	unsigned int status;
++	unsigned int status, target_status;
+ 	u32 ipc_hdr, flags;
+ 	char *dump_msg;
+ 	int ret;
+@@ -485,13 +485,40 @@ int mtl_dsp_cl_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot)
+ 
+ 	mtl_enable_ipc_interrupts(sdev);
+ 
++	if (chip->rom_status_reg == MTL_DSP_ROM_STS) {
++		/*
++		 * Workaround: when the ROM status register is pointing to
++		 * the SRAM window (MTL_DSP_ROM_STS) the platform cannot catch
++		 * ROM_INIT_DONE because of a very short timing window.
++		 * Follow the recommendations and skip target state waiting.
++		 */
++		return 0;
++	}
++
+ 	/*
+-	 * ACE workaround: don't wait for ROM INIT.
+-	 * The platform cannot catch ROM_INIT_DONE because of a very short
+-	 * timing window. Follow the recommendations and skip this part.
++	 * step 7:
++	 * - Cold/Full boot: wait for ROM init to proceed to download the firmware
++	 * - IMR boot: wait for ROM firmware entered (firmware booted up from IMR)
+ 	 */
++	if (imr_boot)
++		target_status = FSR_STATE_FW_ENTERED;
++	else
++		target_status = FSR_STATE_INIT_DONE;
+ 
+-	return 0;
++	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
++					chip->rom_status_reg, status,
++					(FSR_TO_STATE_CODE(status) == target_status),
++					HDA_DSP_REG_POLL_INTERVAL_US,
++					chip->rom_init_timeout *
++					USEC_PER_MSEC);
++
++	if (!ret)
++		return 0;
++
++	if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
++		dev_err(sdev->dev,
++			"%s: timeout with rom_status_reg (%#x) read\n",
++			__func__, chip->rom_status_reg);
+ 
+ err:
+ 	flags = SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX | SOF_DBG_DUMP_OPTIONAL;
+@@ -503,6 +530,7 @@ int mtl_dsp_cl_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot)
+ 	dump_msg = kasprintf(GFP_KERNEL, "Boot iteration failed: %d/%d",
+ 			     hda->boot_iteration, HDA_FW_BOOT_ATTEMPTS);
+ 	snd_sof_dsp_dbg_dump(sdev, dump_msg, flags);
++	mtl_enable_interrupts(sdev, false);
+ 	mtl_dsp_core_power_down(sdev, SOF_DSP_PRIMARY_CORE);
+ 
+ 	kfree(dump_msg);
+@@ -727,7 +755,7 @@ const struct sof_intel_dsp_desc mtl_chip_info = {
+ 	.ipc_ack = MTL_DSP_REG_HFIPCXIDA,
+ 	.ipc_ack_mask = MTL_DSP_REG_HFIPCXIDA_DONE,
+ 	.ipc_ctl = MTL_DSP_REG_HFIPCXCTL,
+-	.rom_status_reg = MTL_DSP_ROM_STS,
++	.rom_status_reg = MTL_DSP_REG_HFFLGPXQWY,
+ 	.rom_init_timeout	= 300,
+ 	.ssp_count = MTL_SSP_COUNT,
+ 	.ssp_base_offset = CNL_SSP_BASE_OFFSET,
+@@ -755,7 +783,7 @@ const struct sof_intel_dsp_desc arl_s_chip_info = {
+ 	.ipc_ack = MTL_DSP_REG_HFIPCXIDA,
+ 	.ipc_ack_mask = MTL_DSP_REG_HFIPCXIDA_DONE,
+ 	.ipc_ctl = MTL_DSP_REG_HFIPCXCTL,
+-	.rom_status_reg = MTL_DSP_ROM_STS,
++	.rom_status_reg = MTL_DSP_REG_HFFLGPXQWY,
+ 	.rom_init_timeout	= 300,
+ 	.ssp_count = MTL_SSP_COUNT,
+ 	.ssp_base_offset = CNL_SSP_BASE_OFFSET,
+diff --git a/sound/soc/sof/intel/mtl.h b/sound/soc/sof/intel/mtl.h
+index ea8c1b83f7127..3c56427a966bf 100644
+--- a/sound/soc/sof/intel/mtl.h
++++ b/sound/soc/sof/intel/mtl.h
+@@ -70,8 +70,8 @@
+ #define MTL_DSP_ROM_STS			MTL_SRAM_WINDOW_OFFSET(0) /* ROM status */
+ #define MTL_DSP_ROM_ERROR		(MTL_SRAM_WINDOW_OFFSET(0) + 0x4) /* ROM error code */
+ 
+-#define MTL_DSP_REG_HFFLGPXQWY		0x163200 /* ROM debug status */
+-#define MTL_DSP_REG_HFFLGPXQWY_ERROR	0x163204 /* ROM debug error code */
++#define MTL_DSP_REG_HFFLGPXQWY		0x163200 /* DSP core0 status */
++#define MTL_DSP_REG_HFFLGPXQWY_ERROR	0x163204 /* DSP core0 error */
+ #define MTL_DSP_REG_HfIMRIS1		0x162088
+ #define MTL_DSP_REG_HfIMRIS1_IU_MASK	BIT(0)
+ 
+diff --git a/tools/arch/x86/lib/x86-opcode-map.txt b/tools/arch/x86/lib/x86-opcode-map.txt
+index 12af572201a29..da9347552be69 100644
+--- a/tools/arch/x86/lib/x86-opcode-map.txt
++++ b/tools/arch/x86/lib/x86-opcode-map.txt
+@@ -148,7 +148,7 @@ AVXcode:
+ 65: SEG=GS (Prefix)
+ 66: Operand-Size (Prefix)
+ 67: Address-Size (Prefix)
+-68: PUSH Iz (d64)
++68: PUSH Iz
+ 69: IMUL Gv,Ev,Iz
+ 6a: PUSH Ib (d64)
+ 6b: IMUL Gv,Ev,Ib
+@@ -698,10 +698,10 @@ AVXcode: 2
+ 4d: vrcp14ss/d Vsd,Hpd,Wsd (66),(ev)
+ 4e: vrsqrt14ps/d Vpd,Wpd (66),(ev)
+ 4f: vrsqrt14ss/d Vsd,Hsd,Wsd (66),(ev)
+-50: vpdpbusd Vx,Hx,Wx (66),(ev)
+-51: vpdpbusds Vx,Hx,Wx (66),(ev)
+-52: vdpbf16ps Vx,Hx,Wx (F3),(ev) | vpdpwssd Vx,Hx,Wx (66),(ev) | vp4dpwssd Vdqq,Hdqq,Wdq (F2),(ev)
+-53: vpdpwssds Vx,Hx,Wx (66),(ev) | vp4dpwssds Vdqq,Hdqq,Wdq (F2),(ev)
++50: vpdpbusd Vx,Hx,Wx (66)
++51: vpdpbusds Vx,Hx,Wx (66)
++52: vdpbf16ps Vx,Hx,Wx (F3),(ev) | vpdpwssd Vx,Hx,Wx (66) | vp4dpwssd Vdqq,Hdqq,Wdq (F2),(ev)
++53: vpdpwssds Vx,Hx,Wx (66) | vp4dpwssds Vdqq,Hdqq,Wdq (F2),(ev)
+ 54: vpopcntb/w Vx,Wx (66),(ev)
+ 55: vpopcntd/q Vx,Wx (66),(ev)
+ 58: vpbroadcastd Vx,Wx (66),(v)
+diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
+index cc6e6aae2447d..958e92acca8e2 100644
+--- a/tools/bpf/bpftool/common.c
++++ b/tools/bpf/bpftool/common.c
+@@ -244,29 +244,101 @@ int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type)
+ 	return fd;
+ }
+ 
+-int mount_bpffs_for_pin(const char *name, bool is_dir)
++int create_and_mount_bpffs_dir(const char *dir_name)
+ {
+ 	char err_str[ERR_MAX_LEN];
+-	char *file;
+-	char *dir;
++	bool dir_exists;
+ 	int err = 0;
+ 
+-	if (is_dir && is_bpffs(name))
++	if (is_bpffs(dir_name))
+ 		return err;
+ 
+-	file = malloc(strlen(name) + 1);
+-	if (!file) {
++	dir_exists = access(dir_name, F_OK) == 0;
++
++	if (!dir_exists) {
++		char *temp_name;
++		char *parent_name;
++
++		temp_name = strdup(dir_name);
++		if (!temp_name) {
++			p_err("mem alloc failed");
++			return -1;
++		}
++
++		parent_name = dirname(temp_name);
++
++		if (is_bpffs(parent_name)) {
++			/* nothing to do if already mounted */
++			free(temp_name);
++			return err;
++		}
++
++		if (access(parent_name, F_OK) == -1) {
++			p_err("can't create dir '%s' to pin BPF object: parent dir '%s' doesn't exist",
++			      dir_name, parent_name);
++			free(temp_name);
++			return -1;
++		}
++
++		free(temp_name);
++	}
++
++	if (block_mount) {
++		p_err("no BPF file system found, not mounting it due to --nomount option");
++		return -1;
++	}
++
++	if (!dir_exists) {
++		err = mkdir(dir_name, S_IRWXU);
++		if (err) {
++			p_err("failed to create dir '%s': %s", dir_name, strerror(errno));
++			return err;
++		}
++	}
++
++	err = mnt_fs(dir_name, "bpf", err_str, ERR_MAX_LEN);
++	if (err) {
++		err_str[ERR_MAX_LEN - 1] = '\0';
++		p_err("can't mount BPF file system on given dir '%s': %s",
++		      dir_name, err_str);
++
++		if (!dir_exists)
++			rmdir(dir_name);
++	}
++
++	return err;
++}
++
++int mount_bpffs_for_file(const char *file_name)
++{
++	char err_str[ERR_MAX_LEN];
++	char *temp_name;
++	char *dir;
++	int err = 0;
++
++	if (access(file_name, F_OK) != -1) {
++		p_err("can't pin BPF object: path '%s' already exists", file_name);
++		return -1;
++	}
++
++	temp_name = strdup(file_name);
++	if (!temp_name) {
+ 		p_err("mem alloc failed");
+ 		return -1;
+ 	}
+ 
+-	strcpy(file, name);
+-	dir = dirname(file);
++	dir = dirname(temp_name);
+ 
+ 	if (is_bpffs(dir))
+ 		/* nothing to do if already mounted */
+ 		goto out_free;
+ 
++	if (access(dir, F_OK) == -1) {
++		p_err("can't pin BPF object: dir '%s' doesn't exist", dir);
++		err = -1;
++		goto out_free;
++	}
++
+ 	if (block_mount) {
+ 		p_err("no BPF file system found, not mounting it due to --nomount option");
+ 		err = -1;
+@@ -276,12 +348,12 @@ int mount_bpffs_for_pin(const char *name, bool is_dir)
+ 	err = mnt_fs(dir, "bpf", err_str, ERR_MAX_LEN);
+ 	if (err) {
+ 		err_str[ERR_MAX_LEN - 1] = '\0';
+-		p_err("can't mount BPF file system to pin the object (%s): %s",
+-		      name, err_str);
++		p_err("can't mount BPF file system to pin the object '%s': %s",
++		      file_name, err_str);
+ 	}
+ 
+ out_free:
+-	free(file);
++	free(temp_name);
+ 	return err;
+ }
+ 
+@@ -289,7 +361,7 @@ int do_pin_fd(int fd, const char *name)
+ {
+ 	int err;
+ 
+-	err = mount_bpffs_for_pin(name, false);
++	err = mount_bpffs_for_file(name);
+ 	if (err)
+ 		return err;
+ 
+diff --git a/tools/bpf/bpftool/iter.c b/tools/bpf/bpftool/iter.c
+index 6b0e5202ca7a9..5c39c2ed36a2b 100644
+--- a/tools/bpf/bpftool/iter.c
++++ b/tools/bpf/bpftool/iter.c
+@@ -76,7 +76,7 @@ static int do_pin(int argc, char **argv)
+ 		goto close_obj;
+ 	}
+ 
+-	err = mount_bpffs_for_pin(path, false);
++	err = mount_bpffs_for_file(path);
+ 	if (err)
+ 		goto close_link;
+ 
+diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
+index b8bb08d10dec9..9eb764fe4cc8b 100644
+--- a/tools/bpf/bpftool/main.h
++++ b/tools/bpf/bpftool/main.h
+@@ -142,7 +142,8 @@ const char *get_fd_type_name(enum bpf_obj_type type);
+ char *get_fdinfo(int fd, const char *key);
+ int open_obj_pinned(const char *path, bool quiet);
+ int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type);
+-int mount_bpffs_for_pin(const char *name, bool is_dir);
++int mount_bpffs_for_file(const char *file_name);
++int create_and_mount_bpffs_dir(const char *dir_name);
+ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(int *, char ***));
+ int do_pin_fd(int fd, const char *name);
+ 
+diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
+index 9cb42a3366c07..4c4cf16a40ba7 100644
+--- a/tools/bpf/bpftool/prog.c
++++ b/tools/bpf/bpftool/prog.c
+@@ -1778,7 +1778,10 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
+ 		goto err_close_obj;
+ 	}
+ 
+-	err = mount_bpffs_for_pin(pinfile, !first_prog_only);
++	if (first_prog_only)
++		err = mount_bpffs_for_file(pinfile);
++	else
++		err = create_and_mount_bpffs_dir(pinfile);
+ 	if (err)
+ 		goto err_close_obj;
+ 
+diff --git a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
+index 26004f0c5a6ae..7bdbcac3cf628 100644
+--- a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
++++ b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
+@@ -102,8 +102,8 @@ int iter(struct bpf_iter__task_file *ctx)
+ 				       BPF_LINK_TYPE_PERF_EVENT___local)) {
+ 		struct bpf_link *link = (struct bpf_link *) file->private_data;
+ 
+-		if (link->type == bpf_core_enum_value(enum bpf_link_type___local,
+-						      BPF_LINK_TYPE_PERF_EVENT___local)) {
++		if (BPF_CORE_READ(link, type) == bpf_core_enum_value(enum bpf_link_type___local,
++								     BPF_LINK_TYPE_PERF_EVENT___local)) {
+ 			e.has_bpf_cookie = true;
+ 			e.bpf_cookie = get_bpf_cookie(link);
+ 		}
+diff --git a/tools/bpf/bpftool/struct_ops.c b/tools/bpf/bpftool/struct_ops.c
+index d573f2640d8e9..aa43dead249cb 100644
+--- a/tools/bpf/bpftool/struct_ops.c
++++ b/tools/bpf/bpftool/struct_ops.c
+@@ -515,7 +515,7 @@ static int do_register(int argc, char **argv)
+ 	if (argc == 1)
+ 		linkdir = GET_ARG();
+ 
+-	if (linkdir && mount_bpffs_for_pin(linkdir, true)) {
++	if (linkdir && create_and_mount_bpffs_dir(linkdir)) {
+ 		p_err("can't mount bpffs for pinning");
+ 		return -1;
+ 	}
+diff --git a/tools/include/nolibc/stdlib.h b/tools/include/nolibc/stdlib.h
+index bacfd35c51565..5be9d3c7435a8 100644
+--- a/tools/include/nolibc/stdlib.h
++++ b/tools/include/nolibc/stdlib.h
+@@ -185,7 +185,7 @@ void *realloc(void *old_ptr, size_t new_size)
+ 	if (__builtin_expect(!ret, 0))
+ 		return NULL;
+ 
+-	memcpy(ret, heap->user_p, heap->len);
++	memcpy(ret, heap->user_p, user_p_len);
+ 	munmap(heap, heap->len);
+ 	return ret;
+ }
+diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
+index 3c42b9f1bada3..bcd84985faf48 100644
+--- a/tools/include/uapi/linux/bpf.h
++++ b/tools/include/uapi/linux/bpf.h
+@@ -7150,7 +7150,7 @@ struct bpf_fib_lookup {
+ 
+ 		/* output: MTU value */
+ 		__u16	mtu_result;
+-	};
++	} __attribute__((packed, aligned(2)));
+ 	/* input: L3 device index for lookup
+ 	 * output: device index from FIB lookup
+ 	 */
+diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
+index 97ec005c3c47f..145b8bd1d5ff1 100644
+--- a/tools/lib/bpf/bpf.c
++++ b/tools/lib/bpf/bpf.c
+@@ -105,7 +105,7 @@ int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts)
+  */
+ int probe_memcg_account(int token_fd)
+ {
+-	const size_t attr_sz = offsetofend(union bpf_attr, attach_btf_obj_fd);
++	const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd);
+ 	struct bpf_insn insns[] = {
+ 		BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns),
+ 		BPF_EXIT_INSN(),
+diff --git a/tools/lib/bpf/features.c b/tools/lib/bpf/features.c
+index 4e783cc7fc4b5..a336786a22a38 100644
+--- a/tools/lib/bpf/features.c
++++ b/tools/lib/bpf/features.c
+@@ -22,7 +22,7 @@ int probe_fd(int fd)
+ 
+ static int probe_kern_prog_name(int token_fd)
+ {
+-	const size_t attr_sz = offsetofend(union bpf_attr, prog_name);
++	const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd);
+ 	struct bpf_insn insns[] = {
+ 		BPF_MOV64_IMM(BPF_REG_0, 0),
+ 		BPF_EXIT_INSN(),
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index a2061fcd612d7..f515cf264a0a2 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -7321,9 +7321,9 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog
+ 	char *cp, errmsg[STRERR_BUFSIZE];
+ 	size_t log_buf_size = 0;
+ 	char *log_buf = NULL, *tmp;
+-	int btf_fd, ret, err;
+ 	bool own_log_buf = true;
+ 	__u32 log_level = prog->log_level;
++	int ret, err;
+ 
+ 	if (prog->type == BPF_PROG_TYPE_UNSPEC) {
+ 		/*
+@@ -7347,9 +7347,8 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog
+ 	load_attr.prog_ifindex = prog->prog_ifindex;
+ 
+ 	/* specify func_info/line_info only if kernel supports them */
+-	btf_fd = btf__fd(obj->btf);
+-	if (btf_fd >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
+-		load_attr.prog_btf_fd = btf_fd;
++	if (obj->btf && btf__fd(obj->btf) >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
++		load_attr.prog_btf_fd = btf__fd(obj->btf);
+ 		load_attr.func_info = prog->func_info;
+ 		load_attr.func_info_rec_size = prog->func_info_rec_size;
+ 		load_attr.func_info_cnt = prog->func_info_cnt;
+@@ -11476,7 +11475,7 @@ static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, stru
+ 
+ 	n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern);
+ 	if (n < 1) {
+-		pr_warn("kprobe multi pattern is invalid: %s\n", pattern);
++		pr_warn("kprobe multi pattern is invalid: %s\n", spec);
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c
+index 19be9c63d5e84..e812876d79c7e 100644
+--- a/tools/testing/selftests/bpf/cgroup_helpers.c
++++ b/tools/testing/selftests/bpf/cgroup_helpers.c
+@@ -508,6 +508,9 @@ int cgroup_setup_and_join(const char *path) {
+ /**
+  * setup_classid_environment() - Setup the cgroupv1 net_cls environment
+  *
++ * This function should only be called in a custom mount namespace, e.g.
++ * created by running setup_cgroup_environment.
++ *
+  * After calling this function, cleanup_classid_environment should be called
+  * once testing is complete.
+  *
+diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c
+index 6db27a9088e97..be96bf022316f 100644
+--- a/tools/testing/selftests/bpf/network_helpers.c
++++ b/tools/testing/selftests/bpf/network_helpers.c
+@@ -461,6 +461,8 @@ struct nstoken *open_netns(const char *name)
+ 
+ 	return token;
+ fail:
++	if (token->orig_netns_fd != -1)
++		close(token->orig_netns_fd);
+ 	free(token);
+ 	return NULL;
+ }
+diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup1_hierarchy.c b/tools/testing/selftests/bpf/prog_tests/cgroup1_hierarchy.c
+index 74d6d7546f40f..25332e596750f 100644
+--- a/tools/testing/selftests/bpf/prog_tests/cgroup1_hierarchy.c
++++ b/tools/testing/selftests/bpf/prog_tests/cgroup1_hierarchy.c
+@@ -87,9 +87,12 @@ void test_cgroup1_hierarchy(void)
+ 		goto destroy;
+ 
+ 	/* Setup cgroup1 hierarchy */
++	err = setup_cgroup_environment();
++	if (!ASSERT_OK(err, "setup_cgroup_environment"))
++		goto destroy;
+ 	err = setup_classid_environment();
+ 	if (!ASSERT_OK(err, "setup_classid_environment"))
+-		goto destroy;
++		goto cleanup_cgroup;
+ 
+ 	err = join_classid();
+ 	if (!ASSERT_OK(err, "join_cgroup1"))
+@@ -153,6 +156,8 @@ void test_cgroup1_hierarchy(void)
+ 
+ cleanup:
+ 	cleanup_classid_environment();
++cleanup_cgroup:
++	cleanup_cgroup_environment();
+ destroy:
+ 	test_cgroup1_hierarchy__destroy(skel);
+ }
+diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
+index 498d3bdaa4b0b..bad0ea167be70 100644
+--- a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
++++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
+@@ -107,8 +107,8 @@ void test_xdp_do_redirect(void)
+ 			    .attach_point = BPF_TC_INGRESS);
+ 
+ 	memcpy(&data[sizeof(__u64)], &pkt_udp, sizeof(pkt_udp));
+-	*((__u32 *)data) = 0x42; /* metadata test value */
+-	*((__u32 *)data + 4) = 0;
++	((__u32 *)data)[0] = 0x42; /* metadata test value */
++	((__u32 *)data)[1] = 0;
+ 
+ 	skel = test_xdp_do_redirect__open();
+ 	if (!ASSERT_OK_PTR(skel, "skel"))
+diff --git a/tools/testing/selftests/bpf/progs/bench_local_storage_create.c b/tools/testing/selftests/bpf/progs/bench_local_storage_create.c
+index e4bfbba6c1936..c8ec0d0368e4a 100644
+--- a/tools/testing/selftests/bpf/progs/bench_local_storage_create.c
++++ b/tools/testing/selftests/bpf/progs/bench_local_storage_create.c
+@@ -61,14 +61,15 @@ SEC("lsm.s/socket_post_create")
+ int BPF_PROG(socket_post_create, struct socket *sock, int family, int type,
+ 	     int protocol, int kern)
+ {
++	struct sock *sk = sock->sk;
+ 	struct storage *stg;
+ 	__u32 pid;
+ 
+ 	pid = bpf_get_current_pid_tgid() >> 32;
+-	if (pid != bench_pid)
++	if (pid != bench_pid || !sk)
+ 		return 0;
+ 
+-	stg = bpf_sk_storage_get(&sk_storage_map, sock->sk, NULL,
++	stg = bpf_sk_storage_get(&sk_storage_map, sk, NULL,
+ 				 BPF_LOCAL_STORAGE_GET_F_CREATE);
+ 
+ 	if (stg)
+diff --git a/tools/testing/selftests/bpf/progs/local_storage.c b/tools/testing/selftests/bpf/progs/local_storage.c
+index e5e3a8b8dd075..637e75df2e146 100644
+--- a/tools/testing/selftests/bpf/progs/local_storage.c
++++ b/tools/testing/selftests/bpf/progs/local_storage.c
+@@ -140,11 +140,12 @@ int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address,
+ {
+ 	__u32 pid = bpf_get_current_pid_tgid() >> 32;
+ 	struct local_storage *storage;
++	struct sock *sk = sock->sk;
+ 
+-	if (pid != monitored_pid)
++	if (pid != monitored_pid || !sk)
+ 		return 0;
+ 
+-	storage = bpf_sk_storage_get(&sk_storage_map, sock->sk, 0, 0);
++	storage = bpf_sk_storage_get(&sk_storage_map, sk, 0, 0);
+ 	if (!storage)
+ 		return 0;
+ 
+@@ -155,24 +156,24 @@ int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address,
+ 	/* This tests that we can associate multiple elements
+ 	 * with the local storage.
+ 	 */
+-	storage = bpf_sk_storage_get(&sk_storage_map2, sock->sk, 0,
++	storage = bpf_sk_storage_get(&sk_storage_map2, sk, 0,
+ 				     BPF_LOCAL_STORAGE_GET_F_CREATE);
+ 	if (!storage)
+ 		return 0;
+ 
+-	if (bpf_sk_storage_delete(&sk_storage_map2, sock->sk))
++	if (bpf_sk_storage_delete(&sk_storage_map2, sk))
+ 		return 0;
+ 
+-	storage = bpf_sk_storage_get(&sk_storage_map2, sock->sk, 0,
++	storage = bpf_sk_storage_get(&sk_storage_map2, sk, 0,
+ 				     BPF_LOCAL_STORAGE_GET_F_CREATE);
+ 	if (!storage)
+ 		return 0;
+ 
+-	if (bpf_sk_storage_delete(&sk_storage_map, sock->sk))
++	if (bpf_sk_storage_delete(&sk_storage_map, sk))
+ 		return 0;
+ 
+ 	/* Ensure that the sk_storage_map is disconnected from the storage. */
+-	if (!sock->sk->sk_bpf_storage || sock->sk->sk_bpf_storage->smap)
++	if (!sk->sk_bpf_storage || sk->sk_bpf_storage->smap)
+ 		return 0;
+ 
+ 	sk_storage_result = 0;
+@@ -185,11 +186,12 @@ int BPF_PROG(socket_post_create, struct socket *sock, int family, int type,
+ {
+ 	__u32 pid = bpf_get_current_pid_tgid() >> 32;
+ 	struct local_storage *storage;
++	struct sock *sk = sock->sk;
+ 
+-	if (pid != monitored_pid)
++	if (pid != monitored_pid || !sk)
+ 		return 0;
+ 
+-	storage = bpf_sk_storage_get(&sk_storage_map, sock->sk, 0,
++	storage = bpf_sk_storage_get(&sk_storage_map, sk, 0,
+ 				     BPF_LOCAL_STORAGE_GET_F_CREATE);
+ 	if (!storage)
+ 		return 0;
+diff --git a/tools/testing/selftests/bpf/progs/lsm_cgroup.c b/tools/testing/selftests/bpf/progs/lsm_cgroup.c
+index 02c11d16b692a..d7598538aa2da 100644
+--- a/tools/testing/selftests/bpf/progs/lsm_cgroup.c
++++ b/tools/testing/selftests/bpf/progs/lsm_cgroup.c
+@@ -103,11 +103,15 @@ static __always_inline int real_bind(struct socket *sock,
+ 				     int addrlen)
+ {
+ 	struct sockaddr_ll sa = {};
++	struct sock *sk = sock->sk;
+ 
+-	if (sock->sk->__sk_common.skc_family != AF_PACKET)
++	if (!sk)
++		return 1;
++
++	if (sk->__sk_common.skc_family != AF_PACKET)
+ 		return 1;
+ 
+-	if (sock->sk->sk_kern_sock)
++	if (sk->sk_kern_sock)
+ 		return 1;
+ 
+ 	bpf_probe_read_kernel(&sa, sizeof(sa), address);
+diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c
+index 024a0faafb3be..43612de44fbf5 100644
+--- a/tools/testing/selftests/bpf/test_sockmap.c
++++ b/tools/testing/selftests/bpf/test_sockmap.c
+@@ -2104,9 +2104,9 @@ int main(int argc, char **argv)
+ 		free(options.whitelist);
+ 	if (options.blacklist)
+ 		free(options.blacklist);
++	close(cg_fd);
+ 	if (cg_created)
+ 		cleanup_cgroup_environment();
+-	close(cg_fd);
+ 	return err;
+ }
+ 
+diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c
+index 0340d4ca8f51c..432db923bced0 100644
+--- a/tools/testing/selftests/cgroup/cgroup_util.c
++++ b/tools/testing/selftests/cgroup/cgroup_util.c
+@@ -195,10 +195,10 @@ int cg_write_numeric(const char *cgroup, const char *control, long value)
+ 	return cg_write(cgroup, control, buf);
+ }
+ 
+-int cg_find_unified_root(char *root, size_t len)
++int cg_find_unified_root(char *root, size_t len, bool *nsdelegate)
+ {
+ 	char buf[10 * PAGE_SIZE];
+-	char *fs, *mount, *type;
++	char *fs, *mount, *type, *options;
+ 	const char delim[] = "\n\t ";
+ 
+ 	if (read_text("/proc/self/mounts", buf, sizeof(buf)) <= 0)
+@@ -211,12 +211,14 @@ int cg_find_unified_root(char *root, size_t len)
+ 	for (fs = strtok(buf, delim); fs; fs = strtok(NULL, delim)) {
+ 		mount = strtok(NULL, delim);
+ 		type = strtok(NULL, delim);
+-		strtok(NULL, delim);
++		options = strtok(NULL, delim);
+ 		strtok(NULL, delim);
+ 		strtok(NULL, delim);
+ 
+ 		if (strcmp(type, "cgroup2") == 0) {
+ 			strncpy(root, mount, len);
++			if (nsdelegate)
++				*nsdelegate = !!strstr(options, "nsdelegate");
+ 			return 0;
+ 		}
+ 	}
+diff --git a/tools/testing/selftests/cgroup/cgroup_util.h b/tools/testing/selftests/cgroup/cgroup_util.h
+index 1df7f202214af..89e8519fb2719 100644
+--- a/tools/testing/selftests/cgroup/cgroup_util.h
++++ b/tools/testing/selftests/cgroup/cgroup_util.h
+@@ -21,7 +21,7 @@ static inline int values_close(long a, long b, int err)
+ 	return abs(a - b) <= (a + b) / 100 * err;
+ }
+ 
+-extern int cg_find_unified_root(char *root, size_t len);
++extern int cg_find_unified_root(char *root, size_t len, bool *nsdelegate);
+ extern char *cg_name(const char *root, const char *name);
+ extern char *cg_name_indexed(const char *root, const char *name, int index);
+ extern char *cg_control(const char *cgroup, const char *control);
+diff --git a/tools/testing/selftests/cgroup/test_core.c b/tools/testing/selftests/cgroup/test_core.c
+index 80aa6b2373b96..a5672a91d273c 100644
+--- a/tools/testing/selftests/cgroup/test_core.c
++++ b/tools/testing/selftests/cgroup/test_core.c
+@@ -18,6 +18,8 @@
+ #include "../kselftest.h"
+ #include "cgroup_util.h"
+ 
++static bool nsdelegate;
++
+ static int touch_anon(char *buf, size_t size)
+ {
+ 	int fd;
+@@ -775,6 +777,9 @@ static int test_cgcore_lesser_ns_open(const char *root)
+ 	pid_t pid;
+ 	int status;
+ 
++	if (!nsdelegate)
++		return KSFT_SKIP;
++
+ 	cg_test_a = cg_name(root, "cg_test_a");
+ 	cg_test_b = cg_name(root, "cg_test_b");
+ 
+@@ -862,7 +867,7 @@ int main(int argc, char *argv[])
+ 	char root[PATH_MAX];
+ 	int i, ret = EXIT_SUCCESS;
+ 
+-	if (cg_find_unified_root(root, sizeof(root)))
++	if (cg_find_unified_root(root, sizeof(root), &nsdelegate))
+ 		ksft_exit_skip("cgroup v2 isn't mounted\n");
+ 
+ 	if (cg_read_strstr(root, "cgroup.subtree_control", "memory"))
+diff --git a/tools/testing/selftests/cgroup/test_cpu.c b/tools/testing/selftests/cgroup/test_cpu.c
+index 24020a2c68dcd..186bf96f6a284 100644
+--- a/tools/testing/selftests/cgroup/test_cpu.c
++++ b/tools/testing/selftests/cgroup/test_cpu.c
+@@ -700,7 +700,7 @@ int main(int argc, char *argv[])
+ 	char root[PATH_MAX];
+ 	int i, ret = EXIT_SUCCESS;
+ 
+-	if (cg_find_unified_root(root, sizeof(root)))
++	if (cg_find_unified_root(root, sizeof(root), NULL))
+ 		ksft_exit_skip("cgroup v2 isn't mounted\n");
+ 
+ 	if (cg_read_strstr(root, "cgroup.subtree_control", "cpu"))
+diff --git a/tools/testing/selftests/cgroup/test_cpuset.c b/tools/testing/selftests/cgroup/test_cpuset.c
+index b061ed1e05b4d..4034d14ba69ac 100644
+--- a/tools/testing/selftests/cgroup/test_cpuset.c
++++ b/tools/testing/selftests/cgroup/test_cpuset.c
+@@ -249,7 +249,7 @@ int main(int argc, char *argv[])
+ 	char root[PATH_MAX];
+ 	int i, ret = EXIT_SUCCESS;
+ 
+-	if (cg_find_unified_root(root, sizeof(root)))
++	if (cg_find_unified_root(root, sizeof(root), NULL))
+ 		ksft_exit_skip("cgroup v2 isn't mounted\n");
+ 
+ 	if (cg_read_strstr(root, "cgroup.subtree_control", "cpuset"))
+diff --git a/tools/testing/selftests/cgroup/test_freezer.c b/tools/testing/selftests/cgroup/test_freezer.c
+index 8845353aca53b..8730645d363a7 100644
+--- a/tools/testing/selftests/cgroup/test_freezer.c
++++ b/tools/testing/selftests/cgroup/test_freezer.c
+@@ -827,7 +827,7 @@ int main(int argc, char *argv[])
+ 	char root[PATH_MAX];
+ 	int i, ret = EXIT_SUCCESS;
+ 
+-	if (cg_find_unified_root(root, sizeof(root)))
++	if (cg_find_unified_root(root, sizeof(root), NULL))
+ 		ksft_exit_skip("cgroup v2 isn't mounted\n");
+ 	for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ 		switch (tests[i].fn(root)) {
+diff --git a/tools/testing/selftests/cgroup/test_hugetlb_memcg.c b/tools/testing/selftests/cgroup/test_hugetlb_memcg.c
+index f0fefeb4cc24c..856f9508ea562 100644
+--- a/tools/testing/selftests/cgroup/test_hugetlb_memcg.c
++++ b/tools/testing/selftests/cgroup/test_hugetlb_memcg.c
+@@ -214,7 +214,7 @@ int main(int argc, char **argv)
+ 		return ret;
+ 	}
+ 
+-	if (cg_find_unified_root(root, sizeof(root)))
++	if (cg_find_unified_root(root, sizeof(root), NULL))
+ 		ksft_exit_skip("cgroup v2 isn't mounted\n");
+ 
+ 	switch (test_hugetlb_memcg(root)) {
+diff --git a/tools/testing/selftests/cgroup/test_kill.c b/tools/testing/selftests/cgroup/test_kill.c
+index 6153690319c9c..0e5bb6c7307a5 100644
+--- a/tools/testing/selftests/cgroup/test_kill.c
++++ b/tools/testing/selftests/cgroup/test_kill.c
+@@ -276,7 +276,7 @@ int main(int argc, char *argv[])
+ 	char root[PATH_MAX];
+ 	int i, ret = EXIT_SUCCESS;
+ 
+-	if (cg_find_unified_root(root, sizeof(root)))
++	if (cg_find_unified_root(root, sizeof(root), NULL))
+ 		ksft_exit_skip("cgroup v2 isn't mounted\n");
+ 	for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ 		switch (tests[i].fn(root)) {
+diff --git a/tools/testing/selftests/cgroup/test_kmem.c b/tools/testing/selftests/cgroup/test_kmem.c
+index c82f974b85c94..137506db03127 100644
+--- a/tools/testing/selftests/cgroup/test_kmem.c
++++ b/tools/testing/selftests/cgroup/test_kmem.c
+@@ -420,7 +420,7 @@ int main(int argc, char **argv)
+ 	char root[PATH_MAX];
+ 	int i, ret = EXIT_SUCCESS;
+ 
+-	if (cg_find_unified_root(root, sizeof(root)))
++	if (cg_find_unified_root(root, sizeof(root), NULL))
+ 		ksft_exit_skip("cgroup v2 isn't mounted\n");
+ 
+ 	/*
+diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c
+index c7c9572003a8c..b462416b38061 100644
+--- a/tools/testing/selftests/cgroup/test_memcontrol.c
++++ b/tools/testing/selftests/cgroup/test_memcontrol.c
+@@ -1314,7 +1314,7 @@ int main(int argc, char **argv)
+ 	char root[PATH_MAX];
+ 	int i, proc_status, ret = EXIT_SUCCESS;
+ 
+-	if (cg_find_unified_root(root, sizeof(root)))
++	if (cg_find_unified_root(root, sizeof(root), NULL))
+ 		ksft_exit_skip("cgroup v2 isn't mounted\n");
+ 
+ 	/*
+diff --git a/tools/testing/selftests/cgroup/test_zswap.c b/tools/testing/selftests/cgroup/test_zswap.c
+index f0e488ed90d89..ef7f395453173 100644
+--- a/tools/testing/selftests/cgroup/test_zswap.c
++++ b/tools/testing/selftests/cgroup/test_zswap.c
+@@ -440,7 +440,7 @@ int main(int argc, char **argv)
+ 	char root[PATH_MAX];
+ 	int i, ret = EXIT_SUCCESS;
+ 
+-	if (cg_find_unified_root(root, sizeof(root)))
++	if (cg_find_unified_root(root, sizeof(root), NULL))
+ 		ksft_exit_skip("cgroup v2 isn't mounted\n");
+ 
+ 	if (!zswap_configured())
+diff --git a/tools/testing/selftests/damon/_damon_sysfs.py b/tools/testing/selftests/damon/_damon_sysfs.py
+index d23d7398a27a8..fe77d7e73a25b 100644
+--- a/tools/testing/selftests/damon/_damon_sysfs.py
++++ b/tools/testing/selftests/damon/_damon_sysfs.py
+@@ -287,6 +287,8 @@ class DamonCtx:
+         nr_schemes_file = os.path.join(
+                 self.sysfs_dir(), 'schemes', 'nr_schemes')
+         content, err = read_file(nr_schemes_file)
++        if err is not None:
++            return err
+         if int(content) != len(self.schemes):
+             err = write_file(nr_schemes_file, '%d' % len(self.schemes))
+             if err != None:
+diff --git a/tools/testing/selftests/filesystems/binderfs/Makefile b/tools/testing/selftests/filesystems/binderfs/Makefile
+index c2f7cef919c04..eb4c3b4119348 100644
+--- a/tools/testing/selftests/filesystems/binderfs/Makefile
++++ b/tools/testing/selftests/filesystems/binderfs/Makefile
+@@ -3,6 +3,4 @@
+ CFLAGS += $(KHDR_INCLUDES) -pthread
+ TEST_GEN_PROGS := binderfs_test
+ 
+-binderfs_test: binderfs_test.c ../../kselftest.h ../../kselftest_harness.h
+-
+ include ../../lib.mk
+diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_btfarg.tc b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_btfarg.tc
+index b9c21a81d2481..c0cdad4c400e8 100644
+--- a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_btfarg.tc
++++ b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_btfarg.tc
+@@ -53,7 +53,7 @@ fi
+ 
+ echo > dynamic_events
+ 
+-if [ "$FIELDS" ] ; then
++if [ "$FIELDS" -a "$FPROBES" ] ; then
+ echo "t:tpevent ${TP2} obj_size=s->object_size" >> dynamic_events
+ echo "f:fpevent ${TP3}%return path=\$retval->name:string" >> dynamic_events
+ echo "t:tpevent2 ${TP4} p->se.group_node.next->prev" >> dynamic_events
+diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/fprobe_entry_arg.tc b/tools/testing/selftests/ftrace/test.d/dynevent/fprobe_entry_arg.tc
+index d183b8a8ecf82..1e251ce2998ea 100644
+--- a/tools/testing/selftests/ftrace/test.d/dynevent/fprobe_entry_arg.tc
++++ b/tools/testing/selftests/ftrace/test.d/dynevent/fprobe_entry_arg.tc
+@@ -11,7 +11,7 @@ echo 1 > events/tests/enable
+ echo > trace
+ cat trace > /dev/null
+ 
+-function streq() {
++streq() {
+ 	test $1 = $2
+ }
+ 
+diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_entry_arg.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_entry_arg.tc
+index 53b82f36a1d01..e50470b531648 100644
+--- a/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_entry_arg.tc
++++ b/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_entry_arg.tc
+@@ -11,7 +11,7 @@ echo 1 > events/kprobes/enable
+ echo > trace
+ cat trace > /dev/null
+ 
+-function streq() {
++streq() {
+ 	test $1 = $2
+ }
+ 
+diff --git a/tools/testing/selftests/kcmp/kcmp_test.c b/tools/testing/selftests/kcmp/kcmp_test.c
+index 25110c7c0b3ed..d7a8e321bb16b 100644
+--- a/tools/testing/selftests/kcmp/kcmp_test.c
++++ b/tools/testing/selftests/kcmp/kcmp_test.c
+@@ -91,7 +91,7 @@ int main(int argc, char **argv)
+ 		ksft_print_header();
+ 		ksft_set_plan(3);
+ 
+-		fd2 = open(kpath, O_RDWR, 0644);
++		fd2 = open(kpath, O_RDWR);
+ 		if (fd2 < 0) {
+ 			perror("Can't open file");
+ 			ksft_exit_fail();
+diff --git a/tools/testing/selftests/kselftest/ktap_helpers.sh b/tools/testing/selftests/kselftest/ktap_helpers.sh
+index f2fbb914e058d..79a125eb24c2e 100644
+--- a/tools/testing/selftests/kselftest/ktap_helpers.sh
++++ b/tools/testing/selftests/kselftest/ktap_helpers.sh
+@@ -43,7 +43,7 @@ __ktap_test() {
+ 	directive="$3" # optional
+ 
+ 	local directive_str=
+-	[[ ! -z "$directive" ]] && directive_str="# $directive"
++	[ ! -z "$directive" ] && directive_str="# $directive"
+ 
+ 	echo $result $KTAP_TESTNO $description $directive_str
+ 
+@@ -99,7 +99,7 @@ ktap_exit_fail_msg() {
+ ktap_finished() {
+ 	ktap_print_totals
+ 
+-	if [ $(("$KTAP_CNT_PASS" + "$KTAP_CNT_SKIP")) -eq "$KSFT_NUM_TESTS" ]; then
++	if [ $((KTAP_CNT_PASS + KTAP_CNT_SKIP)) -eq "$KSFT_NUM_TESTS" ]; then
+ 		exit "$KSFT_PASS"
+ 	else
+ 		exit "$KSFT_FAIL"
+diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
+index da2cade3bab0e..8ae203d8ed7fa 100644
+--- a/tools/testing/selftests/lib.mk
++++ b/tools/testing/selftests/lib.mk
+@@ -7,6 +7,8 @@ else ifneq ($(filter -%,$(LLVM)),)
+ LLVM_SUFFIX := $(LLVM)
+ endif
+ 
++CLANG := $(LLVM_PREFIX)clang$(LLVM_SUFFIX)
++
+ CLANG_TARGET_FLAGS_arm          := arm-linux-gnueabi
+ CLANG_TARGET_FLAGS_arm64        := aarch64-linux-gnu
+ CLANG_TARGET_FLAGS_hexagon      := hexagon-linux-musl
+@@ -18,7 +20,13 @@ CLANG_TARGET_FLAGS_riscv        := riscv64-linux-gnu
+ CLANG_TARGET_FLAGS_s390         := s390x-linux-gnu
+ CLANG_TARGET_FLAGS_x86          := x86_64-linux-gnu
+ CLANG_TARGET_FLAGS_x86_64       := x86_64-linux-gnu
+-CLANG_TARGET_FLAGS              := $(CLANG_TARGET_FLAGS_$(ARCH))
++
++# Default to host architecture if ARCH is not explicitly given.
++ifeq ($(ARCH),)
++CLANG_TARGET_FLAGS := $(shell $(CLANG) -print-target-triple)
++else
++CLANG_TARGET_FLAGS := $(CLANG_TARGET_FLAGS_$(ARCH))
++endif
+ 
+ ifeq ($(CROSS_COMPILE),)
+ ifeq ($(CLANG_TARGET_FLAGS),)
+@@ -30,7 +38,7 @@ else
+ CLANG_FLAGS     += --target=$(notdir $(CROSS_COMPILE:%-=%))
+ endif # CROSS_COMPILE
+ 
+-CC := $(LLVM_PREFIX)clang$(LLVM_SUFFIX) $(CLANG_FLAGS) -fintegrated-as
++CC := $(CLANG) $(CLANG_FLAGS) -fintegrated-as
+ else
+ CC := $(CROSS_COMPILE)gcc
+ endif # LLVM
+diff --git a/tools/testing/selftests/net/amt.sh b/tools/testing/selftests/net/amt.sh
+index 75528788cb95e..5175a42cbe8a2 100755
+--- a/tools/testing/selftests/net/amt.sh
++++ b/tools/testing/selftests/net/amt.sh
+@@ -210,8 +210,8 @@ check_features()
+ 
+ test_ipv4_forward()
+ {
+-	RESULT4=$(ip netns exec "${LISTENER}" nc -w 1 -l -u 239.0.0.1 4000)
+-	if [ "$RESULT4" == "172.17.0.2" ]; then
++	RESULT4=$(ip netns exec "${LISTENER}" timeout 15 socat - UDP4-LISTEN:4000,readbytes=128 || true)
++	if echo "$RESULT4" | grep -q "172.17.0.2"; then
+ 		printf "TEST: %-60s  [ OK ]\n" "IPv4 amt multicast forwarding"
+ 		exit 0
+ 	else
+@@ -222,8 +222,8 @@ test_ipv4_forward()
+ 
+ test_ipv6_forward()
+ {
+-	RESULT6=$(ip netns exec "${LISTENER}" nc -w 1 -l -u ff0e::5:6 6000)
+-	if [ "$RESULT6" == "2001:db8:3::2" ]; then
++	RESULT6=$(ip netns exec "${LISTENER}" timeout 15 socat - UDP6-LISTEN:6000,readbytes=128 || true)
++	if echo "$RESULT6" | grep -q "2001:db8:3::2"; then
+ 		printf "TEST: %-60s  [ OK ]\n" "IPv6 amt multicast forwarding"
+ 		exit 0
+ 	else
+@@ -236,14 +236,14 @@ send_mcast4()
+ {
+ 	sleep 2
+ 	ip netns exec "${SOURCE}" bash -c \
+-		'echo 172.17.0.2 | nc -w 1 -u 239.0.0.1 4000' &
++		'printf "%s %128s" 172.17.0.2 | nc -w 1 -u 239.0.0.1 4000' &
+ }
+ 
+ send_mcast6()
+ {
+ 	sleep 2
+ 	ip netns exec "${SOURCE}" bash -c \
+-		'echo 2001:db8:3::2 | nc -w 1 -u ff0e::5:6 6000' &
++		'printf "%s %128s" 2001:db8:3::2 | nc -w 1 -u ff0e::5:6 6000' &
+ }
+ 
+ check_features
+diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
+index 5e4390cac17ed..04de7a6ba6f31 100644
+--- a/tools/testing/selftests/net/config
++++ b/tools/testing/selftests/net/config
+@@ -30,6 +30,7 @@ CONFIG_IP_GRE=m
+ CONFIG_NETFILTER=y
+ CONFIG_NETFILTER_ADVANCED=y
+ CONFIG_NF_CONNTRACK=m
++CONFIG_IPV6_MROUTE=y
+ CONFIG_IPV6_SIT=y
+ CONFIG_IP_DCCP=m
+ CONFIG_NF_NAT=m
+diff --git a/tools/testing/selftests/net/forwarding/bridge_igmp.sh b/tools/testing/selftests/net/forwarding/bridge_igmp.sh
+index 2aa66d2a1702b..e6a3e04fd83f3 100755
+--- a/tools/testing/selftests/net/forwarding/bridge_igmp.sh
++++ b/tools/testing/selftests/net/forwarding/bridge_igmp.sh
+@@ -478,10 +478,10 @@ v3exc_timeout_test()
+ 	RET=0
+ 	local X=("192.0.2.20" "192.0.2.30")
+ 
+-	# GMI should be 3 seconds
++	# GMI should be 5 seconds
+ 	ip link set dev br0 type bridge mcast_query_interval 100 \
+ 					mcast_query_response_interval 100 \
+-					mcast_membership_interval 300
++					mcast_membership_interval 500
+ 
+ 	v3exclude_prepare $h1 $ALL_MAC $ALL_GROUP
+ 	ip link set dev br0 type bridge mcast_query_interval 500 \
+@@ -489,7 +489,7 @@ v3exc_timeout_test()
+ 					mcast_membership_interval 1500
+ 
+ 	$MZ $h1 -c 1 -b $ALL_MAC -B $ALL_GROUP -t ip "proto=2,p=$MZPKT_ALLOW2" -q
+-	sleep 3
++	sleep 5
+ 	bridge -j -d -s mdb show dev br0 \
+ 		| jq -e ".[].mdb[] | \
+ 			 select(.grp == \"$TEST_GROUP\" and \
+diff --git a/tools/testing/selftests/net/forwarding/bridge_mld.sh b/tools/testing/selftests/net/forwarding/bridge_mld.sh
+index e2b9ff773c6b6..f84ab2e657547 100755
+--- a/tools/testing/selftests/net/forwarding/bridge_mld.sh
++++ b/tools/testing/selftests/net/forwarding/bridge_mld.sh
+@@ -478,10 +478,10 @@ mldv2exc_timeout_test()
+ 	RET=0
+ 	local X=("2001:db8:1::20" "2001:db8:1::30")
+ 
+-	# GMI should be 3 seconds
++	# GMI should be 5 seconds
+ 	ip link set dev br0 type bridge mcast_query_interval 100 \
+ 					mcast_query_response_interval 100 \
+-					mcast_membership_interval 300
++					mcast_membership_interval 500
+ 
+ 	mldv2exclude_prepare $h1
+ 	ip link set dev br0 type bridge mcast_query_interval 500 \
+@@ -489,7 +489,7 @@ mldv2exc_timeout_test()
+ 					mcast_membership_interval 1500
+ 
+ 	$MZ $h1 -c 1 $MZPKT_ALLOW2 -q
+-	sleep 3
++	sleep 5
+ 	bridge -j -d -s mdb show dev br0 \
+ 		| jq -e ".[].mdb[] | \
+ 			 select(.grp == \"$TEST_GROUP\" and \
+diff --git a/tools/testing/selftests/net/lib.sh b/tools/testing/selftests/net/lib.sh
+index f9fe182dfbd44..56a9454b7ba35 100644
+--- a/tools/testing/selftests/net/lib.sh
++++ b/tools/testing/selftests/net/lib.sh
+@@ -73,15 +73,17 @@ setup_ns()
+ 	local ns=""
+ 	local ns_name=""
+ 	local ns_list=""
++	local ns_exist=
+ 	for ns_name in "$@"; do
+ 		# Some test may setup/remove same netns multi times
+ 		if unset ${ns_name} 2> /dev/null; then
+ 			ns="${ns_name,,}-$(mktemp -u XXXXXX)"
+ 			eval readonly ${ns_name}="$ns"
++			ns_exist=false
+ 		else
+ 			eval ns='$'${ns_name}
+ 			cleanup_ns "$ns"
+-
++			ns_exist=true
+ 		fi
+ 
+ 		if ! ip netns add "$ns"; then
+@@ -90,7 +92,7 @@ setup_ns()
+ 			return $ksft_skip
+ 		fi
+ 		ip -n "$ns" link set lo up
+-		ns_list="$ns_list $ns"
++		! $ns_exist && ns_list="$ns_list $ns"
+ 	done
+ 	NS_LIST="$NS_LIST $ns_list"
+ }
+diff --git a/tools/testing/selftests/power_supply/test_power_supply_properties.sh b/tools/testing/selftests/power_supply/test_power_supply_properties.sh
+index df272dfe1d2a9..a66b1313ed882 100755
+--- a/tools/testing/selftests/power_supply/test_power_supply_properties.sh
++++ b/tools/testing/selftests/power_supply/test_power_supply_properties.sh
+@@ -23,7 +23,7 @@ count_tests() {
+ 	total_tests=0
+ 
+ 	for i in $SUPPLIES; do
+-		total_tests=$(("$total_tests" + "$NUM_TESTS"))
++		total_tests=$((total_tests + NUM_TESTS))
+ 	done
+ 
+ 	echo "$total_tests"
+diff --git a/tools/testing/selftests/resctrl/Makefile b/tools/testing/selftests/resctrl/Makefile
+index 2deac2031de9e..021863f86053a 100644
+--- a/tools/testing/selftests/resctrl/Makefile
++++ b/tools/testing/selftests/resctrl/Makefile
+@@ -5,6 +5,8 @@ CFLAGS += $(KHDR_INCLUDES)
+ 
+ TEST_GEN_PROGS := resctrl_tests
+ 
++LOCAL_HDRS += $(wildcard *.h)
++
+ include ../lib.mk
+ 
+-$(OUTPUT)/resctrl_tests: $(wildcard *.[ch])
++$(OUTPUT)/resctrl_tests: $(wildcard *.c)
+diff --git a/tools/tracing/latency/latency-collector.c b/tools/tracing/latency/latency-collector.c
+index 0fd9c747d396d..cf263fe9deaf4 100644
+--- a/tools/tracing/latency/latency-collector.c
++++ b/tools/tracing/latency/latency-collector.c
+@@ -935,12 +935,12 @@ static void show_available(void)
+ 	}
+ 
+ 	if (!tracers) {
+-		warnx(no_tracer_msg);
++		warnx("%s", no_tracer_msg);
+ 		return;
+ 	}
+ 
+ 	if (!found) {
+-		warnx(no_latency_tr_msg);
++		warnx("%s", no_latency_tr_msg);
+ 		tracefs_list_free(tracers);
+ 		return;
+ 	}
+@@ -983,7 +983,7 @@ static const char *find_default_tracer(void)
+ 	for (i = 0; relevant_tracers[i]; i++) {
+ 		valid = tracer_valid(relevant_tracers[i], &notracer);
+ 		if (notracer)
+-			errx(EXIT_FAILURE, no_tracer_msg);
++			errx(EXIT_FAILURE, "%s", no_tracer_msg);
+ 		if (valid)
+ 			return relevant_tracers[i];
+ 	}
+@@ -1878,7 +1878,7 @@ static void scan_arguments(int argc, char *argv[])
+ 			}
+ 			valid = tracer_valid(current_tracer, &notracer);
+ 			if (notracer)
+-				errx(EXIT_FAILURE, no_tracer_msg);
++				errx(EXIT_FAILURE, "%s", no_tracer_msg);
+ 			if (!valid)
+ 				errx(EXIT_FAILURE,
+ "The tracer %s is not supported by your kernel!\n", current_tracer);


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [gentoo-commits] proj/linux-patches:6.9 commit in: /
@ 2024-05-30 12:02 Mike Pagano
  0 siblings, 0 replies; 18+ messages in thread
From: Mike Pagano @ 2024-05-30 12:02 UTC (permalink / raw
  To: gentoo-commits

commit:     e9f56ab9ee755cdb0ac1635e3b7c3939049f79d0
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu May 30 12:02:23 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu May 30 12:02:23 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e9f56ab9

Remove redundant patch

Removed:
2930_io-uring-sqpoll-ensure-task_work-fix.patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                     |   4 -
 2930_io-uring-sqpoll-ensure-task_work-fix.patch | 115 ------------------------
 2 files changed, 119 deletions(-)

diff --git a/0000_README b/0000_README
index 41e4fabf..83130556 100644
--- a/0000_README
+++ b/0000_README
@@ -83,10 +83,6 @@ Patch:  2920_sign-file-patch-for-libressl.patch
 From:   https://bugs.gentoo.org/717166
 Desc:   sign-file: full functionality with modern LibreSSL
 
-Patch:  2930_io-uring-sqpoll-ensure-task_work-fix.patch
-From:   https://bugs.gentoo.org/931942
-Desc:   sqpoll: ensure that normal task_work is also run timely
-
 Patch:  3000_Support-printing-firmware-info.patch
 From:   https://bugs.gentoo.org/732852
 Desc:   Print firmware info (Reqs CONFIG_GENTOO_PRINT_FIRMWARE_INFO). Thanks to Georgy Yakovlev

diff --git a/2930_io-uring-sqpoll-ensure-task_work-fix.patch b/2930_io-uring-sqpoll-ensure-task_work-fix.patch
deleted file mode 100644
index 70e62a25..00000000
--- a/2930_io-uring-sqpoll-ensure-task_work-fix.patch
+++ /dev/null
@@ -1,115 +0,0 @@
-From mboxrd@z Thu Jan  1 00:00:00 1970
-Received: from mail-io1-f48.google.com (mail-io1-f48.google.com [209.85.166.48])
-	(using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits))
-	(No client certificate requested)
-	by smtp.subspace.kernel.org (Postfix) with ESMTPS id C0BC614884F
-	for <io-uring@vger.kernel.org>; Tue, 21 May 2024 19:43:52 +0000 (UTC)
-Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=209.85.166.48
-ARC-Seal:i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116;
-	t=1716320634; cv=none; b=KU/mCJ7mBHO+iHipnVTh5iJc7FxW5xbo1S24b4Vfx1HWcIChj7ieZ6M0D4Dly+m6CHUZ/xGmrFxNLNl+hJyl1SruXRuZGd2zsG87whd+SMirOeAcxfkjgTN9YcSuuAs+cr6WBGo33TXA1wYYEdcKAp5+2MtFRlOEfEpneQZ1jRI=
-ARC-Message-Signature:i=1; a=rsa-sha256; d=subspace.kernel.org;
-	s=arc-20240116; t=1716320634; c=relaxed/simple;
-	bh=gaIkoP5Tt9ptNy9eqUxDwFHSVRdtXZdNQxS3gSxXieM=;
-	h=Message-ID:Date:MIME-Version:To:From:Subject:Cc:Content-Type; b=g/8jkVOwbG15NZZ75HTC/Dfc8RLdXo+ufUTtRf0leBrGhctRXfFOQcPJHuIp8HY9Wrch47B9oRjqZL6/m5CaK8aKCXZAQ7dCknHsT8yf8O7aMN+fNs+3QQ7EyZpc+3NjnHZ+NbtSEtGyK2eC5F75Apq4KjVZCYUl/lUQ5sCjIp0=
-ARC-Authentication-Results:i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=kernel.dk; spf=pass smtp.mailfrom=kernel.dk; dkim=pass (2048-bit key) header.d=kernel-dk.20230601.gappssmtp.com header.i=@kernel-dk.20230601.gappssmtp.com header.b=Q/x459tT; arc=none smtp.client-ip=209.85.166.48
-Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=kernel.dk
-Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=kernel.dk
-Authentication-Results: smtp.subspace.kernel.org;
-	dkim=pass (2048-bit key) header.d=kernel-dk.20230601.gappssmtp.com header.i=@kernel-dk.20230601.gappssmtp.com header.b="Q/x459tT"
-Received: by mail-io1-f48.google.com with SMTP id ca18e2360f4ac-7e1b8606bfdso19766539f.3
-        for <io-uring@vger.kernel.org>; Tue, 21 May 2024 12:43:52 -0700 (PDT)
-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed;
-        d=kernel-dk.20230601.gappssmtp.com; s=20230601; t=1716320631; x=1716925431; darn=vger.kernel.org;
-        h=content-transfer-encoding:cc:subject:from:to:content-language
-         :user-agent:mime-version:date:message-id:from:to:cc:subject:date
-         :message-id:reply-to;
-        bh=T4xIoHSMmGrzeSfhPivp04fPK+A8FmMUIxCHFcE1zPo=;
-        b=Q/x459tTR9ak5EYWL/Ygb8HtLydtfqDpakjjofFDUwlF24E3mxFim/Nnw4x9AEj/vO
-         Nw0e7YouxTiSj9kxnTdLpVz9LuTMJxvPohmoXfgI9ReSCIJ1I95Dn70CZ1CiQW8bsad1
-         /7LdSpIPqGK1OCnLphDgXdqqyBn/URkVCoVovoxwhWgmDm4DwKMePqCdecoZ/M/o9zr5
-         yEPrJag55yEmCVL6Rfezs07paFsHgHAiX55syf6xBBP2ghaH18+oB8oeeHfbCnHxunNc
-         cTL4mATn49cvERCj4GYxEZWnSB/KVSJw2TQbs8VyyLJauzMx4Jk5S/lrhsMzDolCajWj
-         /Tyw==
-X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed;
-        d=1e100.net; s=20230601; t=1716320631; x=1716925431;
-        h=content-transfer-encoding:cc:subject:from:to:content-language
-         :user-agent:mime-version:date:message-id:x-gm-message-state:from:to
-         :cc:subject:date:message-id:reply-to;
-        bh=T4xIoHSMmGrzeSfhPivp04fPK+A8FmMUIxCHFcE1zPo=;
-        b=W1PQkCljf2qgJy1vEyfE6GT8FahYvbyD7b8TGqRMKaqyAI6lt9kofryakDyC3RKSSQ
-         FZsB+Gx9RQEUu40SOMfUjZQegR+zKaojOx6wtx37pRW85eJG4oNto15sFFfJQSwyKuyN
-         p61QuElnpiAFyaT2QpK3M3NctjaTKRzT+DhJ4+cK1Py69C+ZCqQiaCMgtkIycVWtaUWF
-         evEF260Bry0bjSBbdVfuDaic9WhdvMo2p8c726hK/Bu1CkRs3pGoxkNEHWPQlMmyxzRw
-         zZLB6bwYwQjFAJ6/O0m04m/74Qx1TvUSmx++KafWS0Mn2iVq7rbg/2gPYjLdH/wOoIVf
-         637Q==
-X-Gm-Message-State: AOJu0YyFIIKlIrtLfYpArCPZCQ5Jzozh7A3dzTYrVMDbjr9u4nAs/Wp7
-	ixCpJUwEr1gVybpU68+EwAaRu/9iGdFdgduROcwRveqm10je+a40D0fqv3ilzfyy0QQWWxpTXCD
-	P
-X-Google-Smtp-Source: AGHT+IHIvD2vBkh0fv6wTvoSX5+gjdHhX2Vpo1oJeclj6etBfpIA8v5xmG1uQE1/CW5TRH1jaVRaqw==
-X-Received: by 2002:a05:6e02:148d:b0:36d:cdc1:d76c with SMTP id e9e14a558f8ab-371f617e0c6mr462975ab.0.1716320630872;
-        Tue, 21 May 2024 12:43:50 -0700 (PDT)
-Received: from [192.168.1.116] ([96.43.243.2])
-        by smtp.gmail.com with ESMTPSA id e9e14a558f8ab-36cb9d3f219sm66602285ab.12.2024.05.21.12.43.49
-        (version=TLS1_3 cipher=TLS_AES_128_GCM_SHA256 bits=128/128);
-        Tue, 21 May 2024 12:43:49 -0700 (PDT)
-Message-ID: <45f46362-7dc2-4ab5-ab49-0f3cac1d58fb@kernel.dk>
-Date: Tue, 21 May 2024 13:43:48 -0600
-Precedence: bulk
-X-Mailing-List: io-uring@vger.kernel.org
-List-Id: <io-uring.vger.kernel.org>
-List-Subscribe: <mailto:io-uring+subscribe@vger.kernel.org>
-List-Unsubscribe: <mailto:io-uring+unsubscribe@vger.kernel.org>
-MIME-Version: 1.0
-User-Agent: Mozilla Thunderbird
-Content-Language: en-US
-To: io-uring <io-uring@vger.kernel.org>
-From: Jens Axboe <axboe@kernel.dk>
-Subject: [PATCH v2] io_uring/sqpoll: ensure that normal task_work is also run
- timely
-Cc: Christian Heusel <christian@heusel.eu>, Andrew Udvare <audvare@gmail.com>
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 7bit
-
-With the move to private task_work, SQPOLL neglected to also run the
-normal task_work, if any is pending. This will eventually get run, but
-we should run it with the private task_work to ensure that things like
-a final fput() is processed in a timely fashion.
-
-Cc: stable@vger.kernel.org
-Link: https://lore.kernel.org/all/313824bc-799d-414f-96b7-e6de57c7e21d@gmail.com/
-Reported-by: Andrew Udvare <audvare@gmail.com>
-Fixes: af5d68f8892f ("io_uring/sqpoll: manage task_work privately")
-Tested-by: Christian Heusel <christian@heusel.eu>
-Tested-by: Andrew Udvare <audvare@gmail.com>
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-
----
-
-V2: move the task_work_run() section so we're always guaranteed it
-    runs after any task_work. Ran the previous test cases again, both
-    the yarn based one and the liburing test case, and they still work
-    as they should. Previously, if we had a retry condition due to being
-    flooded with task_work, then we'd not run the kernel side task_work.
-
-diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
-index 554c7212aa46..b3722e5275e7 100644
---- a/io_uring/sqpoll.c
-+++ b/io_uring/sqpoll.c
-@@ -238,11 +238,13 @@ static unsigned int io_sq_tw(struct llist_node **retry_list, int max_entries)
- 	if (*retry_list) {
- 		*retry_list = io_handle_tw_list(*retry_list, &count, max_entries);
- 		if (count >= max_entries)
--			return count;
-+			goto out;
- 		max_entries -= count;
- 	}
--
- 	*retry_list = tctx_task_work_run(tctx, max_entries, &count);
-+out:
-+	if (task_work_pending(current))
-+		task_work_run();
- 	return count;
- }
- 
--- 
-Jens Axboe


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [gentoo-commits] proj/linux-patches:6.9 commit in: /
@ 2024-06-12 10:18 Mike Pagano
  0 siblings, 0 replies; 18+ messages in thread
From: Mike Pagano @ 2024-06-12 10:18 UTC (permalink / raw
  To: gentoo-commits

commit:     d7841cc5239438b2482d2a4d4e645706a7bfbe78
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jun 12 10:18:30 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jun 12 10:18:30 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d7841cc5

Linux patch 6.9.4

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |     4 +
 1003_linux-6.9.4.patch | 15797 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 15801 insertions(+)

diff --git a/0000_README b/0000_README
index 83130556..f824f87c 100644
--- a/0000_README
+++ b/0000_README
@@ -55,6 +55,10 @@ Patch:  1002_linux-6.9.3.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.9.3
 
+Patch:  1003_linux-6.9.4.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.9.4
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.

diff --git a/1003_linux-6.9.4.patch b/1003_linux-6.9.4.patch
new file mode 100644
index 00000000..3470f007
--- /dev/null
+++ b/1003_linux-6.9.4.patch
@@ -0,0 +1,15797 @@
+diff --git a/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml b/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
+index b6a7cb32f61e5..835b6db00c279 100644
+--- a/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
++++ b/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
+@@ -77,6 +77,9 @@ properties:
+   vpcie12v-supply:
+     description: The 12v regulator to use for PCIe.
+ 
++  iommu-map: true
++  iommu-map-mask: true
++
+ required:
+   - compatible
+   - reg
+diff --git a/Documentation/devicetree/bindings/pci/rockchip,rk3399-pcie.yaml b/Documentation/devicetree/bindings/pci/rockchip,rk3399-pcie.yaml
+index 531008f0b6ac3..002b728cbc718 100644
+--- a/Documentation/devicetree/bindings/pci/rockchip,rk3399-pcie.yaml
++++ b/Documentation/devicetree/bindings/pci/rockchip,rk3399-pcie.yaml
+@@ -37,6 +37,7 @@ properties:
+     description: This property is needed if using 24MHz OSC for RC's PHY.
+ 
+   ep-gpios:
++    maxItems: 1
+     description: pre-reset GPIO
+ 
+   vpcie12v-supply:
+diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
+index ba966a78a1283..7543456862b80 100644
+--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
++++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
+@@ -198,7 +198,6 @@ allOf:
+             enum:
+               - qcom,sm8550-qmp-gen4x2-pcie-phy
+               - qcom,sm8650-qmp-gen4x2-pcie-phy
+-              - qcom,x1e80100-qmp-gen3x2-pcie-phy
+               - qcom,x1e80100-qmp-gen4x2-pcie-phy
+     then:
+       properties:
+diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
+index 91a6cc38ff7ff..c4c4fb38c51a9 100644
+--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
++++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
+@@ -71,7 +71,6 @@ required:
+   - reg
+   - clocks
+   - clock-names
+-  - power-domains
+   - resets
+   - reset-names
+   - vdda-phy-supply
+@@ -127,6 +126,21 @@ allOf:
+             - const: ref
+             - const: qref
+ 
++  - if:
++      properties:
++        compatible:
++          contains:
++            enum:
++              - qcom,msm8996-qmp-ufs-phy
++              - qcom,msm8998-qmp-ufs-phy
++    then:
++      properties:
++        power-domains:
++          false
++    else:
++      required:
++        - power-domains
++
+ additionalProperties: false
+ 
+ examples:
+diff --git a/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml b/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml
+index 0f200e3f97a9a..fce7f8a19e9c0 100644
+--- a/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml
++++ b/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml
+@@ -15,9 +15,6 @@ description: |
+ properties:
+   compatible:
+     oneOf:
+-      - enum:
+-          - qcom,sc8180x-usb-hs-phy
+-          - qcom,usb-snps-femto-v2-phy
+       - items:
+           - enum:
+               - qcom,sa8775p-usb-hs-phy
+@@ -26,6 +23,7 @@ properties:
+       - items:
+           - enum:
+               - qcom,sc7280-usb-hs-phy
++              - qcom,sc8180x-usb-hs-phy
+               - qcom,sdx55-usb-hs-phy
+               - qcom,sdx65-usb-hs-phy
+               - qcom,sm6375-usb-hs-phy
+diff --git a/Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml
+index bd72a326e6e06..60f30a59f3853 100644
+--- a/Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml
++++ b/Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml
+@@ -97,7 +97,8 @@ patternProperties:
+             then:
+               properties:
+                 groups:
+-                  enum: [emmc, emmc_rst]
++                  items:
++                    enum: [emmc, emmc_rst]
+           - if:
+               properties:
+                 function:
+@@ -105,8 +106,9 @@ patternProperties:
+             then:
+               properties:
+                 groups:
+-                  enum: [esw, esw_p0_p1, esw_p2_p3_p4, rgmii_via_esw,
+-                         rgmii_via_gmac1, rgmii_via_gmac2, mdc_mdio]
++                  items:
++                    enum: [esw, esw_p0_p1, esw_p2_p3_p4, rgmii_via_esw,
++                           rgmii_via_gmac1, rgmii_via_gmac2, mdc_mdio]
+           - if:
+               properties:
+                 function:
+@@ -123,10 +125,11 @@ patternProperties:
+             then:
+               properties:
+                 groups:
+-                  enum: [i2s_in_mclk_bclk_ws, i2s1_in_data, i2s2_in_data,
+-                         i2s3_in_data, i2s4_in_data, i2s_out_mclk_bclk_ws,
+-                         i2s1_out_data, i2s2_out_data, i2s3_out_data,
+-                         i2s4_out_data]
++                  items:
++                    enum: [i2s_in_mclk_bclk_ws, i2s1_in_data, i2s2_in_data,
++                           i2s3_in_data, i2s4_in_data, i2s_out_mclk_bclk_ws,
++                           i2s1_out_data, i2s2_out_data, i2s3_out_data,
++                           i2s4_out_data]
+           - if:
+               properties:
+                 function:
+@@ -159,10 +162,11 @@ patternProperties:
+             then:
+               properties:
+                 groups:
+-                  enum: [pcie0_0_waken, pcie0_1_waken, pcie1_0_waken,
+-                         pcie0_0_clkreq, pcie0_1_clkreq, pcie1_0_clkreq,
+-                         pcie0_pad_perst, pcie1_pad_perst, pcie_pereset,
+-                         pcie_wake, pcie_clkreq]
++                  items:
++                    enum: [pcie0_0_waken, pcie0_1_waken, pcie1_0_waken,
++                           pcie0_0_clkreq, pcie0_1_clkreq, pcie1_0_clkreq,
++                           pcie0_pad_perst, pcie1_pad_perst, pcie_pereset,
++                           pcie_wake, pcie_clkreq]
+           - if:
+               properties:
+                 function:
+@@ -178,11 +182,12 @@ patternProperties:
+             then:
+               properties:
+                 groups:
+-                  enum: [pwm_ch1_0, pwm_ch1_1, pwm_ch1_2, pwm_ch2_0, pwm_ch2_1,
+-                         pwm_ch2_2, pwm_ch3_0, pwm_ch3_1, pwm_ch3_2, pwm_ch4_0,
+-                         pwm_ch4_1, pwm_ch4_2, pwm_ch4_3, pwm_ch5_0, pwm_ch5_1,
+-                         pwm_ch5_2, pwm_ch6_0, pwm_ch6_1, pwm_ch6_2, pwm_ch6_3,
+-                         pwm_ch7_0, pwm_0, pwm_1]
++                  items:
++                    enum: [pwm_ch1_0, pwm_ch1_1, pwm_ch1_2, pwm_ch2_0, pwm_ch2_1,
++                           pwm_ch2_2, pwm_ch3_0, pwm_ch3_1, pwm_ch3_2, pwm_ch4_0,
++                           pwm_ch4_1, pwm_ch4_2, pwm_ch4_3, pwm_ch5_0, pwm_ch5_1,
++                           pwm_ch5_2, pwm_ch6_0, pwm_ch6_1, pwm_ch6_2, pwm_ch6_3,
++                           pwm_ch7_0, pwm_0, pwm_1]
+           - if:
+               properties:
+                 function:
+@@ -260,33 +265,34 @@ patternProperties:
+           pins:
+             description:
+               An array of strings. Each string contains the name of a pin.
+-            enum: [GPIO_A, I2S1_IN, I2S1_OUT, I2S_BCLK, I2S_WS, I2S_MCLK, TXD0,
+-                   RXD0, SPI_WP, SPI_HOLD, SPI_CLK, SPI_MOSI, SPI_MISO, SPI_CS,
+-                   I2C_SDA, I2C_SCL, I2S2_IN, I2S3_IN, I2S4_IN, I2S2_OUT,
+-                   I2S3_OUT, I2S4_OUT, GPIO_B, MDC, MDIO, G2_TXD0, G2_TXD1,
+-                   G2_TXD2, G2_TXD3, G2_TXEN, G2_TXC, G2_RXD0, G2_RXD1, G2_RXD2,
+-                   G2_RXD3, G2_RXDV, G2_RXC, NCEB, NWEB, NREB, NDL4, NDL5, NDL6,
+-                   NDL7, NRB, NCLE, NALE, NDL0, NDL1, NDL2, NDL3, MDI_TP_P0,
+-                   MDI_TN_P0, MDI_RP_P0, MDI_RN_P0, MDI_TP_P1, MDI_TN_P1,
+-                   MDI_RP_P1, MDI_RN_P1, MDI_RP_P2, MDI_RN_P2, MDI_TP_P2,
+-                   MDI_TN_P2, MDI_TP_P3, MDI_TN_P3, MDI_RP_P3, MDI_RN_P3,
+-                   MDI_RP_P4, MDI_RN_P4, MDI_TP_P4, MDI_TN_P4, PMIC_SCL,
+-                   PMIC_SDA, SPIC1_CLK, SPIC1_MOSI, SPIC1_MISO, SPIC1_CS,
+-                   GPIO_D, WATCHDOG, RTS3_N, CTS3_N, TXD3, RXD3, PERST0_N,
+-                   PERST1_N, WLED_N, EPHY_LED0_N, AUXIN0, AUXIN1, AUXIN2,
+-                   AUXIN3, TXD4, RXD4, RTS4_N, CST4_N, PWM1, PWM2, PWM3, PWM4,
+-                   PWM5, PWM6, PWM7, GPIO_E, TOP_5G_CLK, TOP_5G_DATA,
+-                   WF0_5G_HB0, WF0_5G_HB1, WF0_5G_HB2, WF0_5G_HB3, WF0_5G_HB4,
+-                   WF0_5G_HB5, WF0_5G_HB6, XO_REQ, TOP_RST_N, SYS_WATCHDOG,
+-                   EPHY_LED0_N_JTDO, EPHY_LED1_N_JTDI, EPHY_LED2_N_JTMS,
+-                   EPHY_LED3_N_JTCLK, EPHY_LED4_N_JTRST_N, WF2G_LED_N,
+-                   WF5G_LED_N, GPIO_9, GPIO_10, GPIO_11, GPIO_12, UART1_TXD,
+-                   UART1_RXD, UART1_CTS, UART1_RTS, UART2_TXD, UART2_RXD,
+-                   UART2_CTS, UART2_RTS, SMI_MDC, SMI_MDIO, PCIE_PERESET_N,
+-                   PWM_0, GPIO_0, GPIO_1, GPIO_2, GPIO_3, GPIO_4, GPIO_5,
+-                   GPIO_6, GPIO_7, GPIO_8, UART0_TXD, UART0_RXD, TOP_2G_CLK,
+-                   TOP_2G_DATA, WF0_2G_HB0, WF0_2G_HB1, WF0_2G_HB2, WF0_2G_HB3,
+-                   WF0_2G_HB4, WF0_2G_HB5, WF0_2G_HB6]
++            items:
++              enum: [GPIO_A, I2S1_IN, I2S1_OUT, I2S_BCLK, I2S_WS, I2S_MCLK, TXD0,
++                     RXD0, SPI_WP, SPI_HOLD, SPI_CLK, SPI_MOSI, SPI_MISO, SPI_CS,
++                     I2C_SDA, I2C_SCL, I2S2_IN, I2S3_IN, I2S4_IN, I2S2_OUT,
++                     I2S3_OUT, I2S4_OUT, GPIO_B, MDC, MDIO, G2_TXD0, G2_TXD1,
++                     G2_TXD2, G2_TXD3, G2_TXEN, G2_TXC, G2_RXD0, G2_RXD1, G2_RXD2,
++                     G2_RXD3, G2_RXDV, G2_RXC, NCEB, NWEB, NREB, NDL4, NDL5, NDL6,
++                     NDL7, NRB, NCLE, NALE, NDL0, NDL1, NDL2, NDL3, MDI_TP_P0,
++                     MDI_TN_P0, MDI_RP_P0, MDI_RN_P0, MDI_TP_P1, MDI_TN_P1,
++                     MDI_RP_P1, MDI_RN_P1, MDI_RP_P2, MDI_RN_P2, MDI_TP_P2,
++                     MDI_TN_P2, MDI_TP_P3, MDI_TN_P3, MDI_RP_P3, MDI_RN_P3,
++                     MDI_RP_P4, MDI_RN_P4, MDI_TP_P4, MDI_TN_P4, PMIC_SCL,
++                     PMIC_SDA, SPIC1_CLK, SPIC1_MOSI, SPIC1_MISO, SPIC1_CS,
++                     GPIO_D, WATCHDOG, RTS3_N, CTS3_N, TXD3, RXD3, PERST0_N,
++                     PERST1_N, WLED_N, EPHY_LED0_N, AUXIN0, AUXIN1, AUXIN2,
++                     AUXIN3, TXD4, RXD4, RTS4_N, CST4_N, PWM1, PWM2, PWM3, PWM4,
++                     PWM5, PWM6, PWM7, GPIO_E, TOP_5G_CLK, TOP_5G_DATA,
++                     WF0_5G_HB0, WF0_5G_HB1, WF0_5G_HB2, WF0_5G_HB3, WF0_5G_HB4,
++                     WF0_5G_HB5, WF0_5G_HB6, XO_REQ, TOP_RST_N, SYS_WATCHDOG,
++                     EPHY_LED0_N_JTDO, EPHY_LED1_N_JTDI, EPHY_LED2_N_JTMS,
++                     EPHY_LED3_N_JTCLK, EPHY_LED4_N_JTRST_N, WF2G_LED_N,
++                     WF5G_LED_N, GPIO_9, GPIO_10, GPIO_11, GPIO_12, UART1_TXD,
++                     UART1_RXD, UART1_CTS, UART1_RTS, UART2_TXD, UART2_RXD,
++                     UART2_CTS, UART2_RTS, SMI_MDC, SMI_MDIO, PCIE_PERESET_N,
++                     PWM_0, GPIO_0, GPIO_1, GPIO_2, GPIO_3, GPIO_4, GPIO_5,
++                     GPIO_6, GPIO_7, GPIO_8, UART0_TXD, UART0_RXD, TOP_2G_CLK,
++                     TOP_2G_DATA, WF0_2G_HB0, WF0_2G_HB1, WF0_2G_HB2, WF0_2G_HB3,
++                     WF0_2G_HB4, WF0_2G_HB5, WF0_2G_HB6]
+ 
+           bias-disable: true
+ 
+diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sm4450-tlmm.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sm4450-tlmm.yaml
+index bb675c8ec220f..1b941b276b3f8 100644
+--- a/Documentation/devicetree/bindings/pinctrl/qcom,sm4450-tlmm.yaml
++++ b/Documentation/devicetree/bindings/pinctrl/qcom,sm4450-tlmm.yaml
+@@ -72,40 +72,24 @@ $defs:
+         description:
+           Specify the alternative function to be configured for the specified
+           pins.
+-        enum: [ gpio, atest_char, atest_char0, atest_char1, atest_char2,
+-                atest_char3, atest_usb0, atest_usb00, atest_usb01, atest_usb02,
+-                atest_usb03, audio_ref, cam_mclk, cci_async, cci_i2c,
+-                cci_timer0, cci_timer1, cci_timer2, cci_timer3, cci_timer4,
+-                cmu_rng0, cmu_rng1, cmu_rng2, cmu_rng3, coex_uart1, cri_trng,
+-                cri_trng0, cri_trng1, dbg_out, ddr_bist, ddr_pxi0, ddr_pxi1,
+-                dp0_hot, gcc_gp1, gcc_gp2, gcc_gp3, host2wlan_sol, ibi_i3c,
+-                jitter_bist, mdp_vsync, mdp_vsync0, mdp_vsync1, mdp_vsync2,
+-                mdp_vsync3, mi2s0_data0, mi2s0_data1, mi2s0_sck, mi2s0_ws,
+-                mi2s2_data0, mi2s2_data1, mi2s2_sck, mi2s2_ws, mi2s_mclk0,
+-                mi2s_mclk1, nav_gpio0, nav_gpio1, nav_gpio2, pcie0_clk,
+-                phase_flag0, phase_flag1, phase_flag10, phase_flag11,
+-                phase_flag12, phase_flag13, phase_flag14, phase_flag15,
+-                phase_flag16, phase_flag17, phase_flag18, phase_flag19,
+-                phase_flag2, phase_flag20, phase_flag21, phase_flag22,
+-                phase_flag23, phase_flag24, phase_flag25, phase_flag26,
+-                phase_flag27, phase_flag28, phase_flag29, phase_flag3,
+-                phase_flag30, phase_flag31, phase_flag4, phase_flag5,
+-                phase_flag6, phase_flag7, phase_flag8, phase_flag9,
+-                pll_bist, pll_clk, prng_rosc0, prng_rosc1, prng_rosc2,
+-                prng_rosc3, qdss_cti, qdss_gpio, qdss_gpio0, qdss_gpio1,
+-                qdss_gpio10, qdss_gpio11, qdss_gpio12, qdss_gpio13, qdss_gpio14,
+-                qdss_gpio15, qdss_gpio2, qdss_gpio3, qdss_gpio4, qdss_gpio5,
+-                qdss_gpio6, qdss_gpio7, qdss_gpio8, qdss_gpio9, qlink0_enable,
+-                qlink0_request, qlink0_wmss, qlink1_enable, qlink1_request,
+-                qlink1_wmss, qlink2_enable, qlink2_request, qlink2_wmss,
+-                qup0_se0, qup0_se1, qup0_se2, qup0_se3, qup0_se4, qup0_se5,
+-                qup0_se6, qup0_se7, qup1_se0, qup1_se1, qup1_se2, qup1_se3,
+-                qup1_se4, qup1_se5, qup1_se6, sd_write, tb_trig, tgu_ch0,
+-                tgu_ch1, tgu_ch2, tgu_ch3, tmess_prng0, tmess_prng1,
+-                tmess_prng2, tmess_prng3, tsense_pwm1, tsense_pwm2, uim0_clk,
+-                uim0_data, uim0_present, uim0_reset, uim1_clk, uim1_data,
+-                uim1_present, uim1_reset, usb0_hs, usb0_phy, vfr_0, vfr_1,
+-                vsense_trigger ]
++        enum: [ gpio, atest_char, atest_usb0, audio_ref_clk, cam_mclk,
++                cci_async_in0, cci_i2c, cci, cmu_rng, coex_uart1_rx,
++                coex_uart1_tx, cri_trng, dbg_out_clk, ddr_bist,
++                ddr_pxi0_test, ddr_pxi1_test, gcc_gp1_clk, gcc_gp2_clk,
++                gcc_gp3_clk, host2wlan_sol, ibi_i3c_qup0, ibi_i3c_qup1,
++                jitter_bist_ref, mdp_vsync0_out, mdp_vsync1_out,
++                mdp_vsync2_out, mdp_vsync3_out, mdp_vsync, nav,
++                pcie0_clk_req, phase_flag, pll_bist_sync, pll_clk_aux,
++                prng_rosc, qdss_cti_trig0, qdss_cti_trig1, qdss_gpio,
++                qlink0_enable, qlink0_request, qlink0_wmss_reset,
++                qup0_se0, qup0_se1, qup0_se2, qup0_se3, qup0_se4,
++                qup1_se0, qup1_se1, qup1_se2, qup1_se2_l2, qup1_se3,
++                qup1_se4, sd_write_protect, tb_trig_sdc1, tb_trig_sdc2,
++                tgu_ch0_trigout, tgu_ch1_trigout, tgu_ch2_trigout,
++                tgu_ch3_trigout, tmess_prng, tsense_pwm1_out,
++                tsense_pwm2_out, uim0, uim1, usb0_hs_ac, usb0_phy_ps,
++                vfr_0_mira, vfr_0_mirb, vfr_1, vsense_trigger_mirnat,
++                wlan1_adc_dtest0, wlan1_adc_dtest1 ]
+ 
+         required:
+           - pins
+diff --git a/Documentation/driver-api/fpga/fpga-bridge.rst b/Documentation/driver-api/fpga/fpga-bridge.rst
+index 6042085340953..833f68fb07008 100644
+--- a/Documentation/driver-api/fpga/fpga-bridge.rst
++++ b/Documentation/driver-api/fpga/fpga-bridge.rst
+@@ -6,9 +6,12 @@ API to implement a new FPGA bridge
+ 
+ * struct fpga_bridge - The FPGA Bridge structure
+ * struct fpga_bridge_ops - Low level Bridge driver ops
+-* fpga_bridge_register() - Create and register a bridge
++* __fpga_bridge_register() - Create and register a bridge
+ * fpga_bridge_unregister() - Unregister a bridge
+ 
++The helper macro ``fpga_bridge_register()`` automatically sets
++the module that registers the FPGA bridge as the owner.
++
+ .. kernel-doc:: include/linux/fpga/fpga-bridge.h
+    :functions: fpga_bridge
+ 
+@@ -16,7 +19,7 @@ API to implement a new FPGA bridge
+    :functions: fpga_bridge_ops
+ 
+ .. kernel-doc:: drivers/fpga/fpga-bridge.c
+-   :functions: fpga_bridge_register
++   :functions: __fpga_bridge_register
+ 
+ .. kernel-doc:: drivers/fpga/fpga-bridge.c
+    :functions: fpga_bridge_unregister
+diff --git a/Documentation/driver-api/fpga/fpga-mgr.rst b/Documentation/driver-api/fpga/fpga-mgr.rst
+index 49c0a95126532..8d2b79f696c1f 100644
+--- a/Documentation/driver-api/fpga/fpga-mgr.rst
++++ b/Documentation/driver-api/fpga/fpga-mgr.rst
+@@ -24,7 +24,8 @@ How to support a new FPGA device
+ --------------------------------
+ 
+ To add another FPGA manager, write a driver that implements a set of ops.  The
+-probe function calls fpga_mgr_register() or fpga_mgr_register_full(), such as::
++probe function calls ``fpga_mgr_register()`` or ``fpga_mgr_register_full()``,
++such as::
+ 
+ 	static const struct fpga_manager_ops socfpga_fpga_ops = {
+ 		.write_init = socfpga_fpga_ops_configure_init,
+@@ -69,10 +70,11 @@ probe function calls fpga_mgr_register() or fpga_mgr_register_full(), such as::
+ 	}
+ 
+ Alternatively, the probe function could call one of the resource managed
+-register functions, devm_fpga_mgr_register() or devm_fpga_mgr_register_full().
+-When these functions are used, the parameter syntax is the same, but the call
+-to fpga_mgr_unregister() should be removed. In the above example, the
+-socfpga_fpga_remove() function would not be required.
++register functions, ``devm_fpga_mgr_register()`` or
++``devm_fpga_mgr_register_full()``.  When these functions are used, the
++parameter syntax is the same, but the call to ``fpga_mgr_unregister()`` should be
++removed. In the above example, the ``socfpga_fpga_remove()`` function would not be
++required.
+ 
+ The ops will implement whatever device specific register writes are needed to
+ do the programming sequence for this particular FPGA.  These ops return 0 for
+@@ -125,15 +127,19 @@ API for implementing a new FPGA Manager driver
+ * struct fpga_manager -  the FPGA manager struct
+ * struct fpga_manager_ops -  Low level FPGA manager driver ops
+ * struct fpga_manager_info -  Parameter structure for fpga_mgr_register_full()
+-* fpga_mgr_register_full() -  Create and register an FPGA manager using the
++* __fpga_mgr_register_full() -  Create and register an FPGA manager using the
+   fpga_mgr_info structure to provide the full flexibility of options
+-* fpga_mgr_register() -  Create and register an FPGA manager using standard
++* __fpga_mgr_register() -  Create and register an FPGA manager using standard
+   arguments
+-* devm_fpga_mgr_register_full() -  Resource managed version of
+-  fpga_mgr_register_full()
+-* devm_fpga_mgr_register() -  Resource managed version of fpga_mgr_register()
++* __devm_fpga_mgr_register_full() -  Resource managed version of
++  __fpga_mgr_register_full()
++* __devm_fpga_mgr_register() -  Resource managed version of __fpga_mgr_register()
+ * fpga_mgr_unregister() -  Unregister an FPGA manager
+ 
++Helper macros ``fpga_mgr_register_full()``, ``fpga_mgr_register()``,
++``devm_fpga_mgr_register_full()``, and ``devm_fpga_mgr_register()`` are available
++to ease the registration.
++
+ .. kernel-doc:: include/linux/fpga/fpga-mgr.h
+    :functions: fpga_mgr_states
+ 
+@@ -147,16 +153,16 @@ API for implementing a new FPGA Manager driver
+    :functions: fpga_manager_info
+ 
+ .. kernel-doc:: drivers/fpga/fpga-mgr.c
+-   :functions: fpga_mgr_register_full
++   :functions: __fpga_mgr_register_full
+ 
+ .. kernel-doc:: drivers/fpga/fpga-mgr.c
+-   :functions: fpga_mgr_register
++   :functions: __fpga_mgr_register
+ 
+ .. kernel-doc:: drivers/fpga/fpga-mgr.c
+-   :functions: devm_fpga_mgr_register_full
++   :functions: __devm_fpga_mgr_register_full
+ 
+ .. kernel-doc:: drivers/fpga/fpga-mgr.c
+-   :functions: devm_fpga_mgr_register
++   :functions: __devm_fpga_mgr_register
+ 
+ .. kernel-doc:: drivers/fpga/fpga-mgr.c
+    :functions: fpga_mgr_unregister
+diff --git a/Documentation/driver-api/fpga/fpga-region.rst b/Documentation/driver-api/fpga/fpga-region.rst
+index dc55d60a0b4a5..2d03b5fb76575 100644
+--- a/Documentation/driver-api/fpga/fpga-region.rst
++++ b/Documentation/driver-api/fpga/fpga-region.rst
+@@ -46,13 +46,16 @@ API to add a new FPGA region
+ ----------------------------
+ 
+ * struct fpga_region - The FPGA region struct
+-* struct fpga_region_info - Parameter structure for fpga_region_register_full()
+-* fpga_region_register_full() -  Create and register an FPGA region using the
++* struct fpga_region_info - Parameter structure for __fpga_region_register_full()
++* __fpga_region_register_full() -  Create and register an FPGA region using the
+   fpga_region_info structure to provide the full flexibility of options
+-* fpga_region_register() -  Create and register an FPGA region using standard
++* __fpga_region_register() -  Create and register an FPGA region using standard
+   arguments
+ * fpga_region_unregister() -  Unregister an FPGA region
+ 
++Helper macros ``fpga_region_register()`` and ``fpga_region_register_full()``
++automatically set the module that registers the FPGA region as the owner.
++
+ The FPGA region's probe function will need to get a reference to the FPGA
+ Manager it will be using to do the programming.  This usually would happen
+ during the region's probe function.
+@@ -82,10 +85,10 @@ following APIs to handle building or tearing down that list.
+    :functions: fpga_region_info
+ 
+ .. kernel-doc:: drivers/fpga/fpga-region.c
+-   :functions: fpga_region_register_full
++   :functions: __fpga_region_register_full
+ 
+ .. kernel-doc:: drivers/fpga/fpga-region.c
+-   :functions: fpga_region_register
++   :functions: __fpga_region_register
+ 
+ .. kernel-doc:: drivers/fpga/fpga-region.c
+    :functions: fpga_region_unregister
+diff --git a/Documentation/iio/adis16475.rst b/Documentation/iio/adis16475.rst
+index 91cabb7d8d057..130f9e97cc17c 100644
+--- a/Documentation/iio/adis16475.rst
++++ b/Documentation/iio/adis16475.rst
+@@ -66,11 +66,9 @@ specific device folder path ``/sys/bus/iio/devices/iio:deviceX``.
+ +-------------------------------------------+----------------------------------------------------------+
+ | in_accel_x_calibbias                      | Calibration offset for the X-axis accelerometer channel. |
+ +-------------------------------------------+----------------------------------------------------------+
+-| in_accel_calibbias_x                      | x-axis acceleration offset correction                    |
+-+-------------------------------------------+----------------------------------------------------------+
+ | in_accel_x_raw                            | Raw X-axis accelerometer channel value.                  |
+ +-------------------------------------------+----------------------------------------------------------+
+-| in_accel_calibbias_y                      | y-axis acceleration offset correction                    |
++| in_accel_y_calibbias                      | Calibration offset for the Y-axis accelerometer channel. |
+ +-------------------------------------------+----------------------------------------------------------+
+ | in_accel_y_raw                            | Raw Y-axis accelerometer channel value.                  |
+ +-------------------------------------------+----------------------------------------------------------+
+@@ -94,11 +92,9 @@ specific device folder path ``/sys/bus/iio/devices/iio:deviceX``.
+ +---------------------------------------+------------------------------------------------------+
+ | in_anglvel_x_calibbias                | Calibration offset for the X-axis gyroscope channel. |
+ +---------------------------------------+------------------------------------------------------+
+-| in_anglvel_calibbias_x                | x-axis gyroscope offset correction                   |
+-+---------------------------------------+------------------------------------------------------+
+ | in_anglvel_x_raw                      | Raw X-axis gyroscope channel value.                  |
+ +---------------------------------------+------------------------------------------------------+
+-| in_anglvel_calibbias_y                | y-axis gyroscope offset correction                   |
++| in_anglvel_y_calibbias                | Calibration offset for the Y-axis gyroscope channel. |
+ +---------------------------------------+------------------------------------------------------+
+ | in_anglvel_y_raw                      | Raw Y-axis gyroscope channel value.                  |
+ +---------------------------------------+------------------------------------------------------+
+diff --git a/Makefile b/Makefile
+index 8def0819eb55b..91f1d4d34e809 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 9
+-SUBLEVEL = 3
++SUBLEVEL = 4
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm64/boot/dts/amlogic/meson-s4.dtsi b/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
+index ce90b35686a21..10896f9df682d 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
+@@ -65,10 +65,15 @@ xtal: xtal-clk {
+ 		#clock-cells = <0>;
+ 	};
+ 
+-	pwrc: power-controller {
+-		compatible = "amlogic,meson-s4-pwrc";
+-		#power-domain-cells = <1>;
+-		status = "okay";
++	firmware {
++		sm: secure-monitor {
++			compatible = "amlogic,meson-gxbb-sm";
++
++			pwrc: power-controller {
++				compatible = "amlogic,meson-s4-pwrc";
++				#power-domain-cells = <1>;
++			};
++		};
+ 	};
+ 
+ 	soc {
+diff --git a/arch/arm64/include/asm/asm-bug.h b/arch/arm64/include/asm/asm-bug.h
+index c762038ba4009..6e73809f6492a 100644
+--- a/arch/arm64/include/asm/asm-bug.h
++++ b/arch/arm64/include/asm/asm-bug.h
+@@ -28,6 +28,7 @@
+ 	14470:	.long 14471f - .;			\
+ _BUGVERBOSE_LOCATION(__FILE__, __LINE__)		\
+ 		.short flags; 				\
++		.align 2;				\
+ 		.popsection;				\
+ 	14471:
+ #else
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index c4a0a35e02c72..6cda738a41577 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -195,6 +195,23 @@ void kvm_arch_create_vm_debugfs(struct kvm *kvm)
+ 	kvm_sys_regs_create_debugfs(kvm);
+ }
+ 
++static void kvm_destroy_mpidr_data(struct kvm *kvm)
++{
++	struct kvm_mpidr_data *data;
++
++	mutex_lock(&kvm->arch.config_lock);
++
++	data = rcu_dereference_protected(kvm->arch.mpidr_data,
++					 lockdep_is_held(&kvm->arch.config_lock));
++	if (data) {
++		rcu_assign_pointer(kvm->arch.mpidr_data, NULL);
++		synchronize_rcu();
++		kfree(data);
++	}
++
++	mutex_unlock(&kvm->arch.config_lock);
++}
++
+ /**
+  * kvm_arch_destroy_vm - destroy the VM data structure
+  * @kvm:	pointer to the KVM struct
+@@ -209,7 +226,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
+ 	if (is_protected_kvm_enabled())
+ 		pkvm_destroy_hyp_vm(kvm);
+ 
+-	kfree(kvm->arch.mpidr_data);
++	kvm_destroy_mpidr_data(kvm);
++
+ 	kfree(kvm->arch.sysreg_masks);
+ 	kvm_destroy_vcpus(kvm);
+ 
+@@ -395,6 +413,13 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
+ 
+ 	vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
+ 
++	/*
++	 * This vCPU may have been created after mpidr_data was initialized.
++	 * Throw out the pre-computed mappings if that is the case which forces
++	 * KVM to fall back to iteratively searching the vCPUs.
++	 */
++	kvm_destroy_mpidr_data(vcpu->kvm);
++
+ 	err = kvm_vgic_vcpu_init(vcpu);
+ 	if (err)
+ 		return err;
+@@ -594,7 +619,8 @@ static void kvm_init_mpidr_data(struct kvm *kvm)
+ 
+ 	mutex_lock(&kvm->arch.config_lock);
+ 
+-	if (kvm->arch.mpidr_data || atomic_read(&kvm->online_vcpus) == 1)
++	if (rcu_access_pointer(kvm->arch.mpidr_data) ||
++	    atomic_read(&kvm->online_vcpus) == 1)
+ 		goto out;
+ 
+ 	kvm_for_each_vcpu(c, vcpu, kvm) {
+@@ -631,7 +657,7 @@ static void kvm_init_mpidr_data(struct kvm *kvm)
+ 		data->cmpidr_to_idx[index] = c;
+ 	}
+ 
+-	kvm->arch.mpidr_data = data;
++	rcu_assign_pointer(kvm->arch.mpidr_data, data);
+ out:
+ 	mutex_unlock(&kvm->arch.config_lock);
+ }
+@@ -2470,21 +2496,27 @@ static int __init init_hyp_mode(void)
+ 
+ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
+ {
+-	struct kvm_vcpu *vcpu;
++	struct kvm_vcpu *vcpu = NULL;
++	struct kvm_mpidr_data *data;
+ 	unsigned long i;
+ 
+ 	mpidr &= MPIDR_HWID_BITMASK;
+ 
+-	if (kvm->arch.mpidr_data) {
+-		u16 idx = kvm_mpidr_index(kvm->arch.mpidr_data, mpidr);
++	rcu_read_lock();
++	data = rcu_dereference(kvm->arch.mpidr_data);
+ 
+-		vcpu = kvm_get_vcpu(kvm,
+-				    kvm->arch.mpidr_data->cmpidr_to_idx[idx]);
++	if (data) {
++		u16 idx = kvm_mpidr_index(data, mpidr);
++
++		vcpu = kvm_get_vcpu(kvm, data->cmpidr_to_idx[idx]);
+ 		if (mpidr != kvm_vcpu_get_mpidr_aff(vcpu))
+ 			vcpu = NULL;
++	}
+ 
++	rcu_read_unlock();
++
++	if (vcpu)
+ 		return vcpu;
+-	}
+ 
+ 	kvm_for_each_vcpu(i, vcpu, kvm) {
+ 		if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
+diff --git a/arch/loongarch/include/asm/perf_event.h b/arch/loongarch/include/asm/perf_event.h
+index 52b638059e40b..f948a0676daf8 100644
+--- a/arch/loongarch/include/asm/perf_event.h
++++ b/arch/loongarch/include/asm/perf_event.h
+@@ -13,8 +13,7 @@
+ 
+ #define perf_arch_fetch_caller_regs(regs, __ip) { \
+ 	(regs)->csr_era = (__ip); \
+-	(regs)->regs[3] = current_stack_pointer; \
+-	(regs)->regs[22] = (unsigned long) __builtin_frame_address(0); \
++	(regs)->regs[3] = (unsigned long) __builtin_frame_address(0); \
+ }
+ 
+ #endif /* __LOONGARCH_PERF_EVENT_H__ */
+diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
+index 4393bee64eaf8..85c4d29ef43e9 100644
+--- a/arch/microblaze/kernel/Makefile
++++ b/arch/microblaze/kernel/Makefile
+@@ -7,7 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
+ # Do not trace early boot code and low level code
+ CFLAGS_REMOVE_timer.o = -pg
+ CFLAGS_REMOVE_intc.o = -pg
+-CFLAGS_REMOVE_early_printk.o = -pg
+ CFLAGS_REMOVE_ftrace.o = -pg
+ CFLAGS_REMOVE_process.o = -pg
+ endif
+diff --git a/arch/microblaze/kernel/cpu/cpuinfo-static.c b/arch/microblaze/kernel/cpu/cpuinfo-static.c
+index 85dbda4a08a81..03da36dc6d9c9 100644
+--- a/arch/microblaze/kernel/cpu/cpuinfo-static.c
++++ b/arch/microblaze/kernel/cpu/cpuinfo-static.c
+@@ -18,7 +18,7 @@ static const char family_string[] = CONFIG_XILINX_MICROBLAZE0_FAMILY;
+ static const char cpu_ver_string[] = CONFIG_XILINX_MICROBLAZE0_HW_VER;
+ 
+ #define err_printk(x) \
+-	early_printk("ERROR: Microblaze " x "-different for kernel and DTS\n");
++	pr_err("ERROR: Microblaze " x "-different for kernel and DTS\n");
+ 
+ void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu)
+ {
+diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
+index a41e542ba94dd..51172625fa3a5 100644
+--- a/arch/powerpc/include/asm/hvcall.h
++++ b/arch/powerpc/include/asm/hvcall.h
+@@ -570,7 +570,7 @@ struct hvcall_mpp_data {
+ 	unsigned long backing_mem;
+ };
+ 
+-int h_get_mpp(struct hvcall_mpp_data *);
++long h_get_mpp(struct hvcall_mpp_data *mpp_data);
+ 
+ struct hvcall_mpp_x_data {
+ 	unsigned long coalesced_bytes;
+diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
+index 005601243dda4..076ae60b4a55d 100644
+--- a/arch/powerpc/include/asm/ppc-opcode.h
++++ b/arch/powerpc/include/asm/ppc-opcode.h
+@@ -510,6 +510,7 @@
+ #define PPC_RAW_STB(r, base, i)		(0x98000000 | ___PPC_RS(r) | ___PPC_RA(base) | IMM_L(i))
+ #define PPC_RAW_LBZ(r, base, i)		(0x88000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
+ #define PPC_RAW_LDX(r, base, b)		(0x7c00002a | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
++#define PPC_RAW_LHA(r, base, i)		(0xa8000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
+ #define PPC_RAW_LHZ(r, base, i)		(0xa0000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
+ #define PPC_RAW_LHBRX(r, base, b)	(0x7c00062c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
+ #define PPC_RAW_LWBRX(r, base, b)	(0x7c00042c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
+@@ -532,6 +533,7 @@
+ #define PPC_RAW_MULW(d, a, b)		(0x7c0001d6 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
+ #define PPC_RAW_MULHWU(d, a, b)		(0x7c000016 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
+ #define PPC_RAW_MULI(d, a, i)		(0x1c000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
++#define PPC_RAW_DIVW(d, a, b)		(0x7c0003d6 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
+ #define PPC_RAW_DIVWU(d, a, b)		(0x7c000396 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
+ #define PPC_RAW_DIVDU(d, a, b)		(0x7c000392 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
+ #define PPC_RAW_DIVDE(t, a, b)		(0x7c000352 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
+@@ -550,6 +552,8 @@
+ #define PPC_RAW_XOR(d, a, b)		(0x7c000278 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
+ #define PPC_RAW_XORI(d, a, i)		(0x68000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
+ #define PPC_RAW_XORIS(d, a, i)		(0x6c000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
++#define PPC_RAW_EXTSB(d, a)		(0x7c000774 | ___PPC_RA(d) | ___PPC_RS(a))
++#define PPC_RAW_EXTSH(d, a)		(0x7c000734 | ___PPC_RA(d) | ___PPC_RS(a))
+ #define PPC_RAW_EXTSW(d, a)		(0x7c0007b4 | ___PPC_RA(d) | ___PPC_RS(a))
+ #define PPC_RAW_SLW(d, a, s)		(0x7c000030 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s))
+ #define PPC_RAW_SLD(d, a, s)		(0x7c000036 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s))
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index 8e86eb577eb8e..692a7c6f5fd91 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -4857,7 +4857,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
+ 	 * entering a nested guest in which case the decrementer is now owned
+ 	 * by L2 and the L1 decrementer is provided in hdec_expires
+ 	 */
+-	if (!kvmhv_is_nestedv2() && kvmppc_core_pending_dec(vcpu) &&
++	if (kvmppc_core_pending_dec(vcpu) &&
+ 			((tb < kvmppc_dec_expires_host_tb(vcpu)) ||
+ 			 (trap == BOOK3S_INTERRUPT_SYSCALL &&
+ 			  kvmppc_get_gpr(vcpu, 3) == H_ENTER_NESTED)))
+diff --git a/arch/powerpc/kvm/book3s_hv_nestedv2.c b/arch/powerpc/kvm/book3s_hv_nestedv2.c
+index 8e6f5355f08b5..1091f7a83b255 100644
+--- a/arch/powerpc/kvm/book3s_hv_nestedv2.c
++++ b/arch/powerpc/kvm/book3s_hv_nestedv2.c
+@@ -71,8 +71,8 @@ gs_msg_ops_kvmhv_nestedv2_config_fill_info(struct kvmppc_gs_buff *gsb,
+ 	}
+ 
+ 	if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT)) {
+-		kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_OUTPUT,
+-					 cfg->vcpu_run_output_cfg);
++		rc = kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_OUTPUT,
++					      cfg->vcpu_run_output_cfg);
+ 		if (rc < 0)
+ 			return rc;
+ 	}
+diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
+index 2f39c50ca729e..43b97032a91c0 100644
+--- a/arch/powerpc/net/bpf_jit_comp32.c
++++ b/arch/powerpc/net/bpf_jit_comp32.c
+@@ -450,10 +450,16 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
+ 			}
+ 			break;
+ 		case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
+-			EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, src_reg));
++			if (off)
++				EMIT(PPC_RAW_DIVW(dst_reg, src2_reg, src_reg));
++			else
++				EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, src_reg));
+ 			break;
+ 		case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
+-			EMIT(PPC_RAW_DIVWU(_R0, src2_reg, src_reg));
++			if (off)
++				EMIT(PPC_RAW_DIVW(_R0, src2_reg, src_reg));
++			else
++				EMIT(PPC_RAW_DIVWU(_R0, src2_reg, src_reg));
+ 			EMIT(PPC_RAW_MULW(_R0, src_reg, _R0));
+ 			EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0));
+ 			break;
+@@ -467,10 +473,16 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
+ 			if (imm == 1) {
+ 				EMIT(PPC_RAW_MR(dst_reg, src2_reg));
+ 			} else if (is_power_of_2((u32)imm)) {
+-				EMIT(PPC_RAW_SRWI(dst_reg, src2_reg, ilog2(imm)));
++				if (off)
++					EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg, ilog2(imm)));
++				else
++					EMIT(PPC_RAW_SRWI(dst_reg, src2_reg, ilog2(imm)));
+ 			} else {
+ 				PPC_LI32(_R0, imm);
+-				EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, _R0));
++				if (off)
++					EMIT(PPC_RAW_DIVW(dst_reg, src2_reg, _R0));
++				else
++					EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, _R0));
+ 			}
+ 			break;
+ 		case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
+@@ -480,11 +492,19 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
+ 			if (!is_power_of_2((u32)imm)) {
+ 				bpf_set_seen_register(ctx, tmp_reg);
+ 				PPC_LI32(tmp_reg, imm);
+-				EMIT(PPC_RAW_DIVWU(_R0, src2_reg, tmp_reg));
++				if (off)
++					EMIT(PPC_RAW_DIVW(_R0, src2_reg, tmp_reg));
++				else
++					EMIT(PPC_RAW_DIVWU(_R0, src2_reg, tmp_reg));
+ 				EMIT(PPC_RAW_MULW(_R0, tmp_reg, _R0));
+ 				EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0));
+ 			} else if (imm == 1) {
+ 				EMIT(PPC_RAW_LI(dst_reg, 0));
++			} else if (off) {
++				EMIT(PPC_RAW_SRAWI(_R0, src2_reg, ilog2(imm)));
++				EMIT(PPC_RAW_ADDZE(_R0, _R0));
++				EMIT(PPC_RAW_SLWI(_R0, _R0, ilog2(imm)));
++				EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0));
+ 			} else {
+ 				imm = ilog2((u32)imm);
+ 				EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 32 - imm, 31));
+@@ -497,11 +517,21 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
+ 				imm = -imm;
+ 			if (!is_power_of_2(imm))
+ 				return -EOPNOTSUPP;
+-			if (imm == 1)
++			if (imm == 1) {
+ 				EMIT(PPC_RAW_LI(dst_reg, 0));
+-			else
++				EMIT(PPC_RAW_LI(dst_reg_h, 0));
++			} else if (off) {
++				EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, 31));
++				EMIT(PPC_RAW_XOR(dst_reg, src2_reg, dst_reg_h));
++				EMIT(PPC_RAW_SUBFC(dst_reg, dst_reg_h, dst_reg));
++				EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 32 - ilog2(imm), 31));
++				EMIT(PPC_RAW_XOR(dst_reg, dst_reg, dst_reg_h));
++				EMIT(PPC_RAW_SUBFC(dst_reg, dst_reg_h, dst_reg));
++				EMIT(PPC_RAW_SUBFE(dst_reg_h, dst_reg_h, dst_reg_h));
++			} else {
+ 				EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 32 - ilog2(imm), 31));
+-			EMIT(PPC_RAW_LI(dst_reg_h, 0));
++				EMIT(PPC_RAW_LI(dst_reg_h, 0));
++			}
+ 			break;
+ 		case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
+ 			if (!imm)
+@@ -727,15 +757,30 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
+ 		 * MOV
+ 		 */
+ 		case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
+-			if (dst_reg == src_reg)
+-				break;
+-			EMIT(PPC_RAW_MR(dst_reg, src_reg));
+-			EMIT(PPC_RAW_MR(dst_reg_h, src_reg_h));
++			if (off == 8) {
++				EMIT(PPC_RAW_EXTSB(dst_reg, src_reg));
++				EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg, 31));
++			} else if (off == 16) {
++				EMIT(PPC_RAW_EXTSH(dst_reg, src_reg));
++				EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg, 31));
++			} else if (off == 32 && dst_reg == src_reg) {
++				EMIT(PPC_RAW_SRAWI(dst_reg_h, src_reg, 31));
++			} else if (off == 32) {
++				EMIT(PPC_RAW_MR(dst_reg, src_reg));
++				EMIT(PPC_RAW_SRAWI(dst_reg_h, src_reg, 31));
++			} else if (dst_reg != src_reg) {
++				EMIT(PPC_RAW_MR(dst_reg, src_reg));
++				EMIT(PPC_RAW_MR(dst_reg_h, src_reg_h));
++			}
+ 			break;
+ 		case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
+ 			/* special mov32 for zext */
+ 			if (imm == 1)
+ 				EMIT(PPC_RAW_LI(dst_reg_h, 0));
++			else if (off == 8)
++				EMIT(PPC_RAW_EXTSB(dst_reg, src_reg));
++			else if (off == 16)
++				EMIT(PPC_RAW_EXTSH(dst_reg, src_reg));
+ 			else if (dst_reg != src_reg)
+ 				EMIT(PPC_RAW_MR(dst_reg, src_reg));
+ 			break;
+@@ -751,6 +796,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
+ 		 * BPF_FROM_BE/LE
+ 		 */
+ 		case BPF_ALU | BPF_END | BPF_FROM_LE:
++		case BPF_ALU64 | BPF_END | BPF_FROM_LE:
+ 			switch (imm) {
+ 			case 16:
+ 				/* Copy 16 bits to upper part */
+@@ -785,6 +831,8 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
+ 				EMIT(PPC_RAW_MR(dst_reg_h, tmp_reg));
+ 				break;
+ 			}
++			if (BPF_CLASS(code) == BPF_ALU64 && imm != 64)
++				EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ 			break;
+ 		case BPF_ALU | BPF_END | BPF_FROM_BE:
+ 			switch (imm) {
+@@ -918,11 +966,17 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
+ 		 * BPF_LDX
+ 		 */
+ 		case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
++		case BPF_LDX | BPF_MEMSX | BPF_B:
+ 		case BPF_LDX | BPF_PROBE_MEM | BPF_B:
++		case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
+ 		case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
++		case BPF_LDX | BPF_MEMSX | BPF_H:
+ 		case BPF_LDX | BPF_PROBE_MEM | BPF_H:
++		case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
+ 		case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
++		case BPF_LDX | BPF_MEMSX | BPF_W:
+ 		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
++		case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
+ 		case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
+ 		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
+ 			/*
+@@ -931,7 +985,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
+ 			 * load only if addr is kernel address (see is_kernel_addr()), otherwise
+ 			 * set dst_reg=0 and move on.
+ 			 */
+-			if (BPF_MODE(code) == BPF_PROBE_MEM) {
++			if (BPF_MODE(code) == BPF_PROBE_MEM || BPF_MODE(code) == BPF_PROBE_MEMSX) {
+ 				PPC_LI32(_R0, TASK_SIZE - off);
+ 				EMIT(PPC_RAW_CMPLW(src_reg, _R0));
+ 				PPC_BCC_SHORT(COND_GT, (ctx->idx + 4) * 4);
+@@ -953,30 +1007,48 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
+ 				 * as there are two load instructions for dst_reg_h & dst_reg
+ 				 * respectively.
+ 				 */
+-				if (size == BPF_DW)
++				if (size == BPF_DW ||
++				    (size == BPF_B && BPF_MODE(code) == BPF_PROBE_MEMSX))
+ 					PPC_JMP((ctx->idx + 3) * 4);
+ 				else
+ 					PPC_JMP((ctx->idx + 2) * 4);
+ 			}
+ 
+-			switch (size) {
+-			case BPF_B:
+-				EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
+-				break;
+-			case BPF_H:
+-				EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
+-				break;
+-			case BPF_W:
+-				EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
+-				break;
+-			case BPF_DW:
+-				EMIT(PPC_RAW_LWZ(dst_reg_h, src_reg, off));
+-				EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off + 4));
+-				break;
+-			}
++			if (BPF_MODE(code) == BPF_MEMSX || BPF_MODE(code) == BPF_PROBE_MEMSX) {
++				switch (size) {
++				case BPF_B:
++					EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
++					EMIT(PPC_RAW_EXTSB(dst_reg, dst_reg));
++					break;
++				case BPF_H:
++					EMIT(PPC_RAW_LHA(dst_reg, src_reg, off));
++					break;
++				case BPF_W:
++					EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
++					break;
++				}
++				if (!fp->aux->verifier_zext)
++					EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg, 31));
+ 
+-			if (size != BPF_DW && !fp->aux->verifier_zext)
+-				EMIT(PPC_RAW_LI(dst_reg_h, 0));
++			} else {
++				switch (size) {
++				case BPF_B:
++					EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
++					break;
++				case BPF_H:
++					EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
++					break;
++				case BPF_W:
++					EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
++					break;
++				case BPF_DW:
++					EMIT(PPC_RAW_LWZ(dst_reg_h, src_reg, off));
++					EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off + 4));
++					break;
++				}
++				if (size != BPF_DW && !fp->aux->verifier_zext)
++					EMIT(PPC_RAW_LI(dst_reg_h, 0));
++			}
+ 
+ 			if (BPF_MODE(code) == BPF_PROBE_MEM) {
+ 				int insn_idx = ctx->idx - 1;
+@@ -1068,6 +1140,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
+ 		case BPF_JMP | BPF_JA:
+ 			PPC_JMP(addrs[i + 1 + off]);
+ 			break;
++		case BPF_JMP32 | BPF_JA:
++			PPC_JMP(addrs[i + 1 + imm]);
++			break;
+ 
+ 		case BPF_JMP | BPF_JGT | BPF_K:
+ 		case BPF_JMP | BPF_JGT | BPF_X:
+diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
+index 4e9916bb03d71..c1d8bee8f7018 100644
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -1886,10 +1886,10 @@ notrace void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf)
+  * h_get_mpp
+  * H_GET_MPP hcall returns info in 7 parms
+  */
+-int h_get_mpp(struct hvcall_mpp_data *mpp_data)
++long h_get_mpp(struct hvcall_mpp_data *mpp_data)
+ {
+-	int rc;
+-	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
++	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
++	long rc;
+ 
+ 	rc = plpar_hcall9(H_GET_MPP, retbuf);
+ 
+diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
+index f73c4d1c26af9..0ed56e56271fe 100644
+--- a/arch/powerpc/platforms/pseries/lparcfg.c
++++ b/arch/powerpc/platforms/pseries/lparcfg.c
+@@ -113,8 +113,8 @@ struct hvcall_ppp_data {
+  */
+ static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data)
+ {
+-	unsigned long rc;
+-	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
++	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
++	long rc;
+ 
+ 	rc = plpar_hcall9(H_GET_PPP, retbuf);
+ 
+@@ -193,7 +193,7 @@ static void parse_ppp_data(struct seq_file *m)
+ 	struct hvcall_ppp_data ppp_data;
+ 	struct device_node *root;
+ 	const __be32 *perf_level;
+-	int rc;
++	long rc;
+ 
+ 	rc = h_get_ppp(&ppp_data);
+ 	if (rc)
+@@ -361,8 +361,8 @@ static int read_dt_lpar_name(struct seq_file *m)
+ 
+ static void read_lpar_name(struct seq_file *m)
+ {
+-	if (read_rtas_lpar_name(m) && read_dt_lpar_name(m))
+-		pr_err_once("Error can't get the LPAR name");
++	if (read_rtas_lpar_name(m))
++		read_dt_lpar_name(m);
+ }
+ 
+ #define SPLPAR_MAXLENGTH 1026*(sizeof(char))
+diff --git a/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi b/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
+index 45b58b6f3df88..2b3e952513e44 100644
+--- a/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
++++ b/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
+@@ -279,24 +279,6 @@ &i2c6 {
+ 	status = "okay";
+ };
+ 
+-&i2srx {
+-	pinctrl-names = "default";
+-	pinctrl-0 = <&i2srx_pins>;
+-	status = "okay";
+-};
+-
+-&i2stx0 {
+-	pinctrl-names = "default";
+-	pinctrl-0 = <&mclk_ext_pins>;
+-	status = "okay";
+-};
+-
+-&i2stx1 {
+-	pinctrl-names = "default";
+-	pinctrl-0 = <&i2stx1_pins>;
+-	status = "okay";
+-};
+-
+ &mmc0 {
+ 	max-frequency = <100000000>;
+ 	assigned-clocks = <&syscrg JH7110_SYSCLK_SDIO0_SDCARD>;
+@@ -447,46 +429,6 @@ GPOEN_SYS_I2C6_DATA,
+ 		};
+ 	};
+ 
+-	i2srx_pins: i2srx-0 {
+-		clk-sd-pins {
+-			pinmux = <GPIOMUX(38, GPOUT_LOW,
+-					      GPOEN_DISABLE,
+-					      GPI_SYS_I2SRX_BCLK)>,
+-				 <GPIOMUX(63, GPOUT_LOW,
+-					      GPOEN_DISABLE,
+-					      GPI_SYS_I2SRX_LRCK)>,
+-				 <GPIOMUX(38, GPOUT_LOW,
+-					      GPOEN_DISABLE,
+-					      GPI_SYS_I2STX1_BCLK)>,
+-				 <GPIOMUX(63, GPOUT_LOW,
+-					      GPOEN_DISABLE,
+-					      GPI_SYS_I2STX1_LRCK)>,
+-				 <GPIOMUX(61, GPOUT_LOW,
+-					      GPOEN_DISABLE,
+-					      GPI_SYS_I2SRX_SDIN0)>;
+-			input-enable;
+-		};
+-	};
+-
+-	i2stx1_pins: i2stx1-0 {
+-		sd-pins {
+-			pinmux = <GPIOMUX(44, GPOUT_SYS_I2STX1_SDO0,
+-					      GPOEN_ENABLE,
+-					      GPI_NONE)>;
+-			bias-disable;
+-			input-disable;
+-		};
+-	};
+-
+-	mclk_ext_pins: mclk-ext-0 {
+-		mclk-ext-pins {
+-			pinmux = <GPIOMUX(4, GPOUT_LOW,
+-					     GPOEN_DISABLE,
+-					     GPI_SYS_MCLK_EXT)>;
+-			input-enable;
+-		};
+-	};
+-
+ 	mmc0_pins: mmc0-0 {
+ 		 rst-pins {
+ 			pinmux = <GPIOMUX(62, GPOUT_SYS_SDIO0_RST,
+@@ -622,40 +564,6 @@ GPOEN_ENABLE,
+ 		};
+ 	};
+ 
+-	tdm_pins: tdm-0 {
+-		tx-pins {
+-			pinmux = <GPIOMUX(44, GPOUT_SYS_TDM_TXD,
+-					      GPOEN_ENABLE,
+-					      GPI_NONE)>;
+-			bias-pull-up;
+-			drive-strength = <2>;
+-			input-disable;
+-			input-schmitt-disable;
+-			slew-rate = <0>;
+-		};
+-
+-		rx-pins {
+-			pinmux = <GPIOMUX(61, GPOUT_HIGH,
+-					      GPOEN_DISABLE,
+-					      GPI_SYS_TDM_RXD)>;
+-			input-enable;
+-		};
+-
+-		sync-pins {
+-			pinmux = <GPIOMUX(63, GPOUT_HIGH,
+-					      GPOEN_DISABLE,
+-					      GPI_SYS_TDM_SYNC)>;
+-			input-enable;
+-		};
+-
+-		pcmclk-pins {
+-			pinmux = <GPIOMUX(38, GPOUT_HIGH,
+-					      GPOEN_DISABLE,
+-					      GPI_SYS_TDM_CLK)>;
+-			input-enable;
+-		};
+-	};
+-
+ 	uart0_pins: uart0-0 {
+ 		tx-pins {
+ 			pinmux = <GPIOMUX(5, GPOUT_SYS_UART0_TX,
+@@ -681,12 +589,6 @@ GPOEN_DISABLE,
+ 	};
+ };
+ 
+-&tdm {
+-	pinctrl-names = "default";
+-	pinctrl-0 = <&tdm_pins>;
+-	status = "okay";
+-};
+-
+ &uart0 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&uart0_pins>;
+diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
+index 6e68f8dff76bc..0fab508a65b3c 100644
+--- a/arch/riscv/include/asm/sbi.h
++++ b/arch/riscv/include/asm/sbi.h
+@@ -370,6 +370,8 @@ static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1
+ static inline void sbi_init(void) {}
+ #endif /* CONFIG_RISCV_SBI */
+ 
++unsigned long riscv_get_mvendorid(void);
++unsigned long riscv_get_marchid(void);
+ unsigned long riscv_cached_mvendorid(unsigned int cpu_id);
+ unsigned long riscv_cached_marchid(unsigned int cpu_id);
+ unsigned long riscv_cached_mimpid(unsigned int cpu_id);
+diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
+index d11d6320fb0d2..c1f3655238fd2 100644
+--- a/arch/riscv/kernel/cpu.c
++++ b/arch/riscv/kernel/cpu.c
+@@ -139,6 +139,34 @@ int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid)
+ 	return -1;
+ }
+ 
++unsigned long __init riscv_get_marchid(void)
++{
++	struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
++
++#if IS_ENABLED(CONFIG_RISCV_SBI)
++	ci->marchid = sbi_spec_is_0_1() ? 0 : sbi_get_marchid();
++#elif IS_ENABLED(CONFIG_RISCV_M_MODE)
++	ci->marchid = csr_read(CSR_MARCHID);
++#else
++	ci->marchid = 0;
++#endif
++	return ci->marchid;
++}
++
++unsigned long __init riscv_get_mvendorid(void)
++{
++	struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
++
++#if IS_ENABLED(CONFIG_RISCV_SBI)
++	ci->mvendorid = sbi_spec_is_0_1() ? 0 : sbi_get_mvendorid();
++#elif IS_ENABLED(CONFIG_RISCV_M_MODE)
++	ci->mvendorid = csr_read(CSR_MVENDORID);
++#else
++	ci->mvendorid = 0;
++#endif
++	return ci->mvendorid;
++}
++
+ DEFINE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
+ 
+ unsigned long riscv_cached_mvendorid(unsigned int cpu_id)
+@@ -170,12 +198,16 @@ static int riscv_cpuinfo_starting(unsigned int cpu)
+ 	struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
+ 
+ #if IS_ENABLED(CONFIG_RISCV_SBI)
+-	ci->mvendorid = sbi_spec_is_0_1() ? 0 : sbi_get_mvendorid();
+-	ci->marchid = sbi_spec_is_0_1() ? 0 : sbi_get_marchid();
++	if (!ci->mvendorid)
++		ci->mvendorid = sbi_spec_is_0_1() ? 0 : sbi_get_mvendorid();
++	if (!ci->marchid)
++		ci->marchid = sbi_spec_is_0_1() ? 0 : sbi_get_marchid();
+ 	ci->mimpid = sbi_spec_is_0_1() ? 0 : sbi_get_mimpid();
+ #elif IS_ENABLED(CONFIG_RISCV_M_MODE)
+-	ci->mvendorid = csr_read(CSR_MVENDORID);
+-	ci->marchid = csr_read(CSR_MARCHID);
++	if (!ci->mvendorid)
++		ci->mvendorid = csr_read(CSR_MVENDORID);
++	if (!ci->marchid)
++		ci->marchid = csr_read(CSR_MARCHID);
+ 	ci->mimpid = csr_read(CSR_MIMPID);
+ #else
+ 	ci->mvendorid = 0;
+diff --git a/arch/riscv/kernel/cpu_ops_sbi.c b/arch/riscv/kernel/cpu_ops_sbi.c
+index 1cc7df740eddc..e6fbaaf549562 100644
+--- a/arch/riscv/kernel/cpu_ops_sbi.c
++++ b/arch/riscv/kernel/cpu_ops_sbi.c
+@@ -72,7 +72,7 @@ static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle)
+ 	/* Make sure tidle is updated */
+ 	smp_mb();
+ 	bdata->task_ptr = tidle;
+-	bdata->stack_ptr = task_stack_page(tidle) + THREAD_SIZE;
++	bdata->stack_ptr = task_pt_regs(tidle);
+ 	/* Make sure boot data is updated */
+ 	smp_mb();
+ 	hsm_data = __pa(bdata);
+diff --git a/arch/riscv/kernel/cpu_ops_spinwait.c b/arch/riscv/kernel/cpu_ops_spinwait.c
+index 613872b0a21ac..24869eb889085 100644
+--- a/arch/riscv/kernel/cpu_ops_spinwait.c
++++ b/arch/riscv/kernel/cpu_ops_spinwait.c
+@@ -34,8 +34,7 @@ static void cpu_update_secondary_bootdata(unsigned int cpuid,
+ 
+ 	/* Make sure tidle is updated */
+ 	smp_mb();
+-	WRITE_ONCE(__cpu_spinwait_stack_pointer[hartid],
+-		   task_stack_page(tidle) + THREAD_SIZE);
++	WRITE_ONCE(__cpu_spinwait_stack_pointer[hartid], task_pt_regs(tidle));
+ 	WRITE_ONCE(__cpu_spinwait_task_pointer[hartid], tidle);
+ }
+ 
+diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
+index 3ed2359eae353..5ef48cb20ee11 100644
+--- a/arch/riscv/kernel/cpufeature.c
++++ b/arch/riscv/kernel/cpufeature.c
+@@ -490,6 +490,8 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
+ 	struct acpi_table_header *rhct;
+ 	acpi_status status;
+ 	unsigned int cpu;
++	u64 boot_vendorid;
++	u64 boot_archid;
+ 
+ 	if (!acpi_disabled) {
+ 		status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct);
+@@ -497,6 +499,9 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
+ 			return;
+ 	}
+ 
++	boot_vendorid = riscv_get_mvendorid();
++	boot_archid = riscv_get_marchid();
++
+ 	for_each_possible_cpu(cpu) {
+ 		struct riscv_isainfo *isainfo = &hart_isa[cpu];
+ 		unsigned long this_hwcap = 0;
+@@ -544,8 +549,7 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
+ 		 * CPU cores with the ratified spec will contain non-zero
+ 		 * marchid.
+ 		 */
+-		if (acpi_disabled && riscv_cached_mvendorid(cpu) == THEAD_VENDOR_ID &&
+-		    riscv_cached_marchid(cpu) == 0x0) {
++		if (acpi_disabled && boot_vendorid == THEAD_VENDOR_ID && boot_archid == 0x0) {
+ 			this_hwcap &= ~isa2hwcap[RISCV_ISA_EXT_v];
+ 			clear_bit(RISCV_ISA_EXT_v, isainfo->isa);
+ 		}
+@@ -599,7 +603,7 @@ static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap)
+ 
+ 			if (ext->subset_ext_size) {
+ 				for (int j = 0; j < ext->subset_ext_size; j++) {
+-					if (riscv_isa_extension_check(ext->subset_ext_ids[i]))
++					if (riscv_isa_extension_check(ext->subset_ext_ids[j]))
+ 						set_bit(ext->subset_ext_ids[j], isainfo->isa);
+ 				}
+ 			}
+diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
+index d41090fc32035..4b3c50da48ba1 100644
+--- a/arch/riscv/kernel/smpboot.c
++++ b/arch/riscv/kernel/smpboot.c
+@@ -26,7 +26,7 @@
+ #include <linux/sched/task_stack.h>
+ #include <linux/sched/mm.h>
+ 
+-#include <asm/cpufeature.h>
++#include <asm/cacheflush.h>
+ #include <asm/cpu_ops.h>
+ #include <asm/irq.h>
+ #include <asm/mmu_context.h>
+@@ -234,9 +234,10 @@ asmlinkage __visible void smp_callin(void)
+ 	riscv_user_isa_enable();
+ 
+ 	/*
+-	 * Remote TLB flushes are ignored while the CPU is offline, so emit
+-	 * a local TLB flush right now just in case.
++	 * Remote cache and TLB flushes are ignored while the CPU is offline,
++	 * so flush them both right now just in case.
+ 	 */
++	local_flush_icache_all();
+ 	local_flush_tlb_all();
+ 	complete(&cpu_running);
+ 	/*
+diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
+index 64a9c093aef93..528ec7cc9a622 100644
+--- a/arch/riscv/kernel/stacktrace.c
++++ b/arch/riscv/kernel/stacktrace.c
+@@ -18,6 +18,16 @@
+ 
+ extern asmlinkage void ret_from_exception(void);
+ 
++static inline int fp_is_valid(unsigned long fp, unsigned long sp)
++{
++	unsigned long low, high;
++
++	low = sp + sizeof(struct stackframe);
++	high = ALIGN(sp, THREAD_SIZE);
++
++	return !(fp < low || fp > high || fp & 0x07);
++}
++
+ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
+ 			     bool (*fn)(void *, unsigned long), void *arg)
+ {
+@@ -41,21 +51,19 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
+ 	}
+ 
+ 	for (;;) {
+-		unsigned long low, high;
+ 		struct stackframe *frame;
+ 
+ 		if (unlikely(!__kernel_text_address(pc) || (level++ >= 0 && !fn(arg, pc))))
+ 			break;
+ 
+-		/* Validate frame pointer */
+-		low = sp + sizeof(struct stackframe);
+-		high = ALIGN(sp, THREAD_SIZE);
+-		if (unlikely(fp < low || fp > high || fp & 0x7))
++		if (unlikely(!fp_is_valid(fp, sp)))
+ 			break;
++
+ 		/* Unwind stack frame */
+ 		frame = (struct stackframe *)fp - 1;
+ 		sp = fp;
+-		if (regs && (regs->epc == pc) && (frame->fp & 0x7)) {
++		if (regs && (regs->epc == pc) && fp_is_valid(frame->ra, sp)) {
++			/* We hit function where ra is not saved on the stack */
+ 			fp = frame->ra;
+ 			pc = regs->ra;
+ 		} else {
+diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
+index 6cf89314209a6..9e2c9027e986a 100644
+--- a/arch/s390/boot/startup.c
++++ b/arch/s390/boot/startup.c
+@@ -32,7 +32,6 @@ unsigned long __bootdata_preserved(max_mappable);
+ unsigned long __bootdata(ident_map_size);
+ 
+ u64 __bootdata_preserved(stfle_fac_list[16]);
+-u64 __bootdata_preserved(alt_stfle_fac_list[16]);
+ struct oldmem_data __bootdata_preserved(oldmem_data);
+ 
+ struct machine_info machine;
+diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
+index 621f23d5ae30a..77e479d44f1e3 100644
+--- a/arch/s390/include/asm/ftrace.h
++++ b/arch/s390/include/asm/ftrace.h
+@@ -8,12 +8,8 @@
+ 
+ #ifndef __ASSEMBLY__
+ 
+-#ifdef CONFIG_CC_IS_CLANG
+-/* https://llvm.org/pr41424 */
+-#define ftrace_return_address(n) 0UL
+-#else
+-#define ftrace_return_address(n) __builtin_return_address(n)
+-#endif
++unsigned long return_address(unsigned int n);
++#define ftrace_return_address(n) return_address(n)
+ 
+ void ftrace_caller(void);
+ 
+diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
+index db9982f0e8cd0..bbbdc5abe2b2c 100644
+--- a/arch/s390/include/asm/processor.h
++++ b/arch/s390/include/asm/processor.h
+@@ -98,6 +98,7 @@ void cpu_detect_mhz_feature(void);
+ 
+ extern const struct seq_operations cpuinfo_op;
+ extern void execve_tail(void);
++unsigned long vdso_text_size(void);
+ unsigned long vdso_size(void);
+ 
+ /*
+diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
+index 433fde85b14ea..85b6738b826af 100644
+--- a/arch/s390/include/asm/stacktrace.h
++++ b/arch/s390/include/asm/stacktrace.h
+@@ -2,6 +2,7 @@
+ #ifndef _ASM_S390_STACKTRACE_H
+ #define _ASM_S390_STACKTRACE_H
+ 
++#include <linux/stacktrace.h>
+ #include <linux/uaccess.h>
+ #include <linux/ptrace.h>
+ 
+@@ -12,6 +13,17 @@ struct stack_frame_user {
+ 	unsigned long empty2[4];
+ };
+ 
++struct stack_frame_vdso_wrapper {
++	struct stack_frame_user sf;
++	unsigned long return_address;
++};
++
++struct perf_callchain_entry_ctx;
++
++void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie,
++				 struct perf_callchain_entry_ctx *entry,
++				 const struct pt_regs *regs, bool perf);
++
+ enum stack_type {
+ 	STACK_TYPE_UNKNOWN,
+ 	STACK_TYPE_TASK,
+diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
+index fa029d0dc28ff..db2d9ba5a86d2 100644
+--- a/arch/s390/kernel/Makefile
++++ b/arch/s390/kernel/Makefile
+@@ -11,6 +11,8 @@ CFLAGS_REMOVE_ftrace.o		= $(CC_FLAGS_FTRACE)
+ # Do not trace early setup code
+ CFLAGS_REMOVE_early.o		= $(CC_FLAGS_FTRACE)
+ CFLAGS_REMOVE_rethook.o		= $(CC_FLAGS_FTRACE)
++CFLAGS_REMOVE_stacktrace.o	= $(CC_FLAGS_FTRACE)
++CFLAGS_REMOVE_unwind_bc.o	= $(CC_FLAGS_FTRACE)
+ 
+ endif
+ 
+diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
+index fa5f6885c74aa..2f65bca2f3f1c 100644
+--- a/arch/s390/kernel/asm-offsets.c
++++ b/arch/s390/kernel/asm-offsets.c
+@@ -66,6 +66,11 @@ int main(void)
+ 	OFFSET(__SF_SIE_CONTROL_PHYS, stack_frame, sie_control_block_phys);
+ 	DEFINE(STACK_FRAME_OVERHEAD, sizeof(struct stack_frame));
+ 	BLANK();
++	OFFSET(__SFUSER_BACKCHAIN, stack_frame_user, back_chain);
++	DEFINE(STACK_FRAME_USER_OVERHEAD, sizeof(struct stack_frame_user));
++	OFFSET(__SFVDSO_RETURN_ADDRESS, stack_frame_vdso_wrapper, return_address);
++	DEFINE(STACK_FRAME_VDSO_OVERHEAD, sizeof(struct stack_frame_vdso_wrapper));
++	BLANK();
+ 	/* idle data offsets */
+ 	OFFSET(__CLOCK_IDLE_ENTER, s390_idle_data, clock_idle_enter);
+ 	OFFSET(__TIMER_IDLE_ENTER, s390_idle_data, timer_idle_enter);
+diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
+index 1486350a41775..469e8d3fbfbf3 100644
+--- a/arch/s390/kernel/ipl.c
++++ b/arch/s390/kernel/ipl.c
+@@ -962,8 +962,8 @@ static ssize_t reipl_nvme_scpdata_write(struct file *filp, struct kobject *kobj,
+ 		scpdata_len += padding;
+ 	}
+ 
+-	reipl_block_nvme->hdr.len = IPL_BP_FCP_LEN + scpdata_len;
+-	reipl_block_nvme->nvme.len = IPL_BP0_FCP_LEN + scpdata_len;
++	reipl_block_nvme->hdr.len = IPL_BP_NVME_LEN + scpdata_len;
++	reipl_block_nvme->nvme.len = IPL_BP0_NVME_LEN + scpdata_len;
+ 	reipl_block_nvme->nvme.scp_data_len = scpdata_len;
+ 
+ 	return count;
+@@ -1858,9 +1858,9 @@ static int __init dump_nvme_init(void)
+ 	}
+ 	dump_block_nvme->hdr.len = IPL_BP_NVME_LEN;
+ 	dump_block_nvme->hdr.version = IPL_PARM_BLOCK_VERSION;
+-	dump_block_nvme->fcp.len = IPL_BP0_NVME_LEN;
+-	dump_block_nvme->fcp.pbt = IPL_PBT_NVME;
+-	dump_block_nvme->fcp.opt = IPL_PB0_NVME_OPT_DUMP;
++	dump_block_nvme->nvme.len = IPL_BP0_NVME_LEN;
++	dump_block_nvme->nvme.pbt = IPL_PBT_NVME;
++	dump_block_nvme->nvme.opt = IPL_PB0_NVME_OPT_DUMP;
+ 	dump_capabilities |= DUMP_TYPE_NVME;
+ 	return 0;
+ }
+diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
+index dfa77da2fd2ec..5fff629b1a898 100644
+--- a/arch/s390/kernel/perf_event.c
++++ b/arch/s390/kernel/perf_event.c
+@@ -218,39 +218,7 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+ 			 struct pt_regs *regs)
+ {
+-	struct stack_frame_user __user *sf;
+-	unsigned long ip, sp;
+-	bool first = true;
+-
+-	if (is_compat_task())
+-		return;
+-	perf_callchain_store(entry, instruction_pointer(regs));
+-	sf = (void __user *)user_stack_pointer(regs);
+-	pagefault_disable();
+-	while (entry->nr < entry->max_stack) {
+-		if (__get_user(sp, &sf->back_chain))
+-			break;
+-		if (__get_user(ip, &sf->gprs[8]))
+-			break;
+-		if (ip & 0x1) {
+-			/*
+-			 * If the instruction address is invalid, and this
+-			 * is the first stack frame, assume r14 has not
+-			 * been written to the stack yet. Otherwise exit.
+-			 */
+-			if (first && !(regs->gprs[14] & 0x1))
+-				ip = regs->gprs[14];
+-			else
+-				break;
+-		}
+-		perf_callchain_store(entry, ip);
+-		/* Sanity check: ABI requires SP to be aligned 8 bytes. */
+-		if (!sp || sp & 0x7)
+-			break;
+-		sf = (void __user *)sp;
+-		first = false;
+-	}
+-	pagefault_enable();
++	arch_stack_walk_user_common(NULL, NULL, entry, regs, true);
+ }
+ 
+ /* Perf definitions for PMU event attributes in sysfs */
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index 24ed33f044ec3..7ecd27c62d564 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -155,7 +155,7 @@ unsigned int __bootdata_preserved(zlib_dfltcc_support);
+ EXPORT_SYMBOL(zlib_dfltcc_support);
+ u64 __bootdata_preserved(stfle_fac_list[16]);
+ EXPORT_SYMBOL(stfle_fac_list);
+-u64 __bootdata_preserved(alt_stfle_fac_list[16]);
++u64 alt_stfle_fac_list[16];
+ struct oldmem_data __bootdata_preserved(oldmem_data);
+ 
+ unsigned long VMALLOC_START;
+diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
+index 94f440e383031..640363b2a1059 100644
+--- a/arch/s390/kernel/stacktrace.c
++++ b/arch/s390/kernel/stacktrace.c
+@@ -5,6 +5,7 @@
+  *  Copyright IBM Corp. 2006
+  */
+ 
++#include <linux/perf_event.h>
+ #include <linux/stacktrace.h>
+ #include <linux/uaccess.h>
+ #include <linux/compat.h>
+@@ -62,42 +63,121 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
+ 	return 0;
+ }
+ 
+-void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
+-			  const struct pt_regs *regs)
++static inline bool store_ip(stack_trace_consume_fn consume_entry, void *cookie,
++			    struct perf_callchain_entry_ctx *entry, bool perf,
++			    unsigned long ip)
++{
++#ifdef CONFIG_PERF_EVENTS
++	if (perf) {
++		if (perf_callchain_store(entry, ip))
++			return false;
++		return true;
++	}
++#endif
++	return consume_entry(cookie, ip);
++}
++
++static inline bool ip_invalid(unsigned long ip)
++{
++	/*
++	 * Perform some basic checks if an instruction address taken
++	 * from unreliable source is invalid.
++	 */
++	if (ip & 1)
++		return true;
++	if (ip < mmap_min_addr)
++		return true;
++	if (ip >= current->mm->context.asce_limit)
++		return true;
++	return false;
++}
++
++static inline bool ip_within_vdso(unsigned long ip)
+ {
++	return in_range(ip, current->mm->context.vdso_base, vdso_text_size());
++}
++
++void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie,
++				 struct perf_callchain_entry_ctx *entry,
++				 const struct pt_regs *regs, bool perf)
++{
++	struct stack_frame_vdso_wrapper __user *sf_vdso;
+ 	struct stack_frame_user __user *sf;
+ 	unsigned long ip, sp;
+ 	bool first = true;
+ 
+ 	if (is_compat_task())
+ 		return;
+-	if (!consume_entry(cookie, instruction_pointer(regs)))
++	if (!current->mm)
++		return;
++	ip = instruction_pointer(regs);
++	if (!store_ip(consume_entry, cookie, entry, perf, ip))
+ 		return;
+ 	sf = (void __user *)user_stack_pointer(regs);
+ 	pagefault_disable();
+ 	while (1) {
+ 		if (__get_user(sp, &sf->back_chain))
+ 			break;
+-		if (__get_user(ip, &sf->gprs[8]))
++		/*
++		 * VDSO entry code has a non-standard stack frame layout.
++		 * See VDSO user wrapper code for details.
++		 */
++		if (!sp && ip_within_vdso(ip)) {
++			sf_vdso = (void __user *)sf;
++			if (__get_user(ip, &sf_vdso->return_address))
++				break;
++			sp = (unsigned long)sf + STACK_FRAME_VDSO_OVERHEAD;
++			sf = (void __user *)sp;
++			if (__get_user(sp, &sf->back_chain))
++				break;
++		} else {
++			sf = (void __user *)sp;
++			if (__get_user(ip, &sf->gprs[8]))
++				break;
++		}
++		/* Sanity check: ABI requires SP to be 8 byte aligned. */
++		if (sp & 0x7)
+ 			break;
+-		if (ip & 0x1) {
++		if (ip_invalid(ip)) {
+ 			/*
+ 			 * If the instruction address is invalid, and this
+ 			 * is the first stack frame, assume r14 has not
+ 			 * been written to the stack yet. Otherwise exit.
+ 			 */
+-			if (first && !(regs->gprs[14] & 0x1))
+-				ip = regs->gprs[14];
+-			else
++			if (!first)
++				break;
++			ip = regs->gprs[14];
++			if (ip_invalid(ip))
+ 				break;
+ 		}
+-		if (!consume_entry(cookie, ip))
+-			break;
+-		/* Sanity check: ABI requires SP to be aligned 8 bytes. */
+-		if (!sp || sp & 0x7)
+-			break;
+-		sf = (void __user *)sp;
++		if (!store_ip(consume_entry, cookie, entry, perf, ip))
++			return;
+ 		first = false;
+ 	}
+ 	pagefault_enable();
+ }
++
++void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
++			  const struct pt_regs *regs)
++{
++	arch_stack_walk_user_common(consume_entry, cookie, NULL, regs, false);
++}
++
++unsigned long return_address(unsigned int n)
++{
++	struct unwind_state state;
++	unsigned long addr;
++
++	/* Increment to skip current stack entry */
++	n++;
++
++	unwind_for_each_frame(&state, NULL, NULL, 0) {
++		addr = unwind_get_return_address(&state);
++		if (!addr)
++			break;
++		if (!n--)
++			return addr;
++	}
++	return 0;
++}
++EXPORT_SYMBOL_GPL(return_address);
+diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
+index a45b3a4c91db0..2f967ac2b8e3e 100644
+--- a/arch/s390/kernel/vdso.c
++++ b/arch/s390/kernel/vdso.c
+@@ -210,17 +210,22 @@ static unsigned long vdso_addr(unsigned long start, unsigned long len)
+ 	return addr;
+ }
+ 
+-unsigned long vdso_size(void)
++unsigned long vdso_text_size(void)
+ {
+-	unsigned long size = VVAR_NR_PAGES * PAGE_SIZE;
++	unsigned long size;
+ 
+ 	if (is_compat_task())
+-		size += vdso32_end - vdso32_start;
++		size = vdso32_end - vdso32_start;
+ 	else
+-		size += vdso64_end - vdso64_start;
++		size = vdso64_end - vdso64_start;
+ 	return PAGE_ALIGN(size);
+ }
+ 
++unsigned long vdso_size(void)
++{
++	return vdso_text_size() + VVAR_NR_PAGES * PAGE_SIZE;
++}
++
+ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ {
+ 	unsigned long addr = VDSO_BASE;
+diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
+index b12a274cbb473..4800d80decee6 100644
+--- a/arch/s390/kernel/vdso32/Makefile
++++ b/arch/s390/kernel/vdso32/Makefile
+@@ -19,8 +19,10 @@ KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS))
+ KBUILD_AFLAGS_32 += -m31 -s
+ 
+ KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
++KBUILD_CFLAGS_32 := $(filter-out -mpacked-stack,$(KBUILD_CFLAGS))
+ KBUILD_CFLAGS_32 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_32))
+-KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin
++KBUILD_CFLAGS_32 := $(filter-out -fno-asynchronous-unwind-tables,$(KBUILD_CFLAGS_32))
++KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin -fasynchronous-unwind-tables
+ 
+ LDFLAGS_vdso32.so.dbg += -shared -soname=linux-vdso32.so.1 \
+ 	--hash-style=both --build-id=sha1 -melf_s390 -T
+diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
+index ef98327260972..2f2e4e997030c 100644
+--- a/arch/s390/kernel/vdso64/Makefile
++++ b/arch/s390/kernel/vdso64/Makefile
+@@ -24,9 +24,11 @@ KBUILD_AFLAGS_64 := $(filter-out -m64,$(KBUILD_AFLAGS))
+ KBUILD_AFLAGS_64 += -m64
+ 
+ KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
++KBUILD_CFLAGS_64 := $(filter-out -mpacked-stack,$(KBUILD_CFLAGS_64))
+ KBUILD_CFLAGS_64 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_64))
+ KBUILD_CFLAGS_64 := $(filter-out -munaligned-symbols,$(KBUILD_CFLAGS_64))
+-KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin
++KBUILD_CFLAGS_64 := $(filter-out -fno-asynchronous-unwind-tables,$(KBUILD_CFLAGS_64))
++KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin -fasynchronous-unwind-tables
+ ldflags-y := -shared -soname=linux-vdso64.so.1 \
+ 	     --hash-style=both --build-id=sha1 -T
+ 
+diff --git a/arch/s390/kernel/vdso64/vdso_user_wrapper.S b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
+index 85247ef5a41b8..e26e68675c08d 100644
+--- a/arch/s390/kernel/vdso64/vdso_user_wrapper.S
++++ b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
+@@ -6,8 +6,6 @@
+ #include <asm/dwarf.h>
+ #include <asm/ptrace.h>
+ 
+-#define WRAPPER_FRAME_SIZE (STACK_FRAME_OVERHEAD+8)
+-
+ /*
+  * Older glibc version called vdso without allocating a stackframe. This wrapper
+  * is just used to allocate a stackframe. See
+@@ -20,16 +18,17 @@
+ 	__ALIGN
+ __kernel_\func:
+ 	CFI_STARTPROC
+-	aghi	%r15,-WRAPPER_FRAME_SIZE
+-	CFI_DEF_CFA_OFFSET (STACK_FRAME_OVERHEAD + WRAPPER_FRAME_SIZE)
+-	CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
+-	stg	%r14,STACK_FRAME_OVERHEAD(%r15)
+-	CFI_REL_OFFSET 14, STACK_FRAME_OVERHEAD
++	aghi	%r15,-STACK_FRAME_VDSO_OVERHEAD
++	CFI_DEF_CFA_OFFSET (STACK_FRAME_USER_OVERHEAD + STACK_FRAME_VDSO_OVERHEAD)
++	CFI_VAL_OFFSET 15,-STACK_FRAME_USER_OVERHEAD
++	stg	%r14,__SFVDSO_RETURN_ADDRESS(%r15)
++	CFI_REL_OFFSET 14,__SFVDSO_RETURN_ADDRESS
++	xc	__SFUSER_BACKCHAIN(8,%r15),__SFUSER_BACKCHAIN(%r15)
+ 	brasl	%r14,__s390_vdso_\func
+-	lg	%r14,STACK_FRAME_OVERHEAD(%r15)
++	lg	%r14,__SFVDSO_RETURN_ADDRESS(%r15)
+ 	CFI_RESTORE 14
+-	aghi	%r15,WRAPPER_FRAME_SIZE
+-	CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
++	aghi	%r15,STACK_FRAME_VDSO_OVERHEAD
++	CFI_DEF_CFA_OFFSET STACK_FRAME_USER_OVERHEAD
+ 	CFI_RESTORE 15
+ 	br	%r14
+ 	CFI_ENDPROC
+diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
+index ffc5cb92fa367..d82bc3fdb86e7 100644
+--- a/arch/um/drivers/line.c
++++ b/arch/um/drivers/line.c
+@@ -676,24 +676,26 @@ void register_winch_irq(int fd, int tty_fd, int pid, struct tty_port *port,
+ 		goto cleanup;
+ 	}
+ 
+-	*winch = ((struct winch) { .list  	= LIST_HEAD_INIT(winch->list),
+-				   .fd  	= fd,
++	*winch = ((struct winch) { .fd  	= fd,
+ 				   .tty_fd 	= tty_fd,
+ 				   .pid  	= pid,
+ 				   .port 	= port,
+ 				   .stack	= stack });
+ 
++	spin_lock(&winch_handler_lock);
++	list_add(&winch->list, &winch_handlers);
++	spin_unlock(&winch_handler_lock);
++
+ 	if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt,
+ 			   IRQF_SHARED, "winch", winch) < 0) {
+ 		printk(KERN_ERR "register_winch_irq - failed to register "
+ 		       "IRQ\n");
++		spin_lock(&winch_handler_lock);
++		list_del(&winch->list);
++		spin_unlock(&winch_handler_lock);
+ 		goto out_free;
+ 	}
+ 
+-	spin_lock(&winch_handler_lock);
+-	list_add(&winch->list, &winch_handlers);
+-	spin_unlock(&winch_handler_lock);
+-
+ 	return;
+ 
+  out_free:
+diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
+index 63fc062add708..ef805eaa9e013 100644
+--- a/arch/um/drivers/ubd_kern.c
++++ b/arch/um/drivers/ubd_kern.c
+@@ -1092,7 +1092,7 @@ static int __init ubd_init(void)
+ 
+ 	if (irq_req_buffer == NULL) {
+ 		printk(KERN_ERR "Failed to initialize ubd buffering\n");
+-		return -1;
++		return -ENOMEM;
+ 	}
+ 	io_req_buffer = kmalloc_array(UBD_REQ_BUFFER_SIZE,
+ 				      sizeof(struct io_thread_req *),
+@@ -1103,7 +1103,7 @@ static int __init ubd_init(void)
+ 
+ 	if (io_req_buffer == NULL) {
+ 		printk(KERN_ERR "Failed to initialize ubd buffering\n");
+-		return -1;
++		return -ENOMEM;
+ 	}
+ 	platform_driver_register(&ubd_driver);
+ 	mutex_lock(&ubd_lock);
+diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
+index dc2feae789cbb..63e5f108a6b95 100644
+--- a/arch/um/drivers/vector_kern.c
++++ b/arch/um/drivers/vector_kern.c
+@@ -141,7 +141,7 @@ static bool get_bpf_flash(struct arglist *def)
+ 
+ 	if (allow != NULL) {
+ 		if (kstrtoul(allow, 10, &result) == 0)
+-			return (allow > 0);
++			return result > 0;
+ 	}
+ 	return false;
+ }
+diff --git a/arch/um/include/asm/kasan.h b/arch/um/include/asm/kasan.h
+index 0d6547f4ec85c..f97bb1f7b8514 100644
+--- a/arch/um/include/asm/kasan.h
++++ b/arch/um/include/asm/kasan.h
+@@ -24,7 +24,6 @@
+ 
+ #ifdef CONFIG_KASAN
+ void kasan_init(void);
+-void kasan_map_memory(void *start, unsigned long len);
+ extern int kasan_um_is_ready;
+ 
+ #ifdef CONFIG_STATIC_LINK
+diff --git a/arch/um/include/asm/mmu.h b/arch/um/include/asm/mmu.h
+index a7555e43ed14a..f2923c767bb9a 100644
+--- a/arch/um/include/asm/mmu.h
++++ b/arch/um/include/asm/mmu.h
+@@ -14,8 +14,6 @@ typedef struct mm_context {
+ 	struct uml_arch_mm_context arch;
+ } mm_context_t;
+ 
+-extern void __switch_mm(struct mm_id * mm_idp);
+-
+ /* Avoid tangled inclusion with asm/ldt.h */
+ extern long init_new_ldt(struct mm_context *to_mm, struct mm_context *from_mm);
+ extern void free_ldt(struct mm_context *mm);
+diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h
+index 6c3779541845b..5a7c05275aa74 100644
+--- a/arch/um/include/asm/processor-generic.h
++++ b/arch/um/include/asm/processor-generic.h
+@@ -94,7 +94,6 @@ extern struct cpuinfo_um boot_cpu_data;
+ #define current_cpu_data boot_cpu_data
+ #define cache_line_size()	(boot_cpu_data.cache_alignment)
+ 
+-extern unsigned long get_thread_reg(int reg, jmp_buf *buf);
+ #define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
+ extern unsigned long __get_wchan(struct task_struct *p);
+ 
+diff --git a/arch/um/include/shared/kern_util.h b/arch/um/include/shared/kern_util.h
+index 789b83013f355..af8cdfc75897e 100644
+--- a/arch/um/include/shared/kern_util.h
++++ b/arch/um/include/shared/kern_util.h
+@@ -66,4 +66,6 @@ extern void fatal_sigsegv(void) __attribute__ ((noreturn));
+ 
+ void um_idle_sleep(void);
+ 
++void kasan_map_memory(void *start, size_t len);
++
+ #endif
+diff --git a/arch/um/include/shared/skas/mm_id.h b/arch/um/include/shared/skas/mm_id.h
+index e82e203f5f419..92dbf727e3842 100644
+--- a/arch/um/include/shared/skas/mm_id.h
++++ b/arch/um/include/shared/skas/mm_id.h
+@@ -15,4 +15,6 @@ struct mm_id {
+ 	int kill;
+ };
+ 
++void __switch_mm(struct mm_id *mm_idp);
++
+ #endif
+diff --git a/arch/um/os-Linux/mem.c b/arch/um/os-Linux/mem.c
+index 8530b2e086049..c6c9495b14321 100644
+--- a/arch/um/os-Linux/mem.c
++++ b/arch/um/os-Linux/mem.c
+@@ -15,6 +15,7 @@
+ #include <sys/vfs.h>
+ #include <linux/magic.h>
+ #include <init.h>
++#include <kern_util.h>
+ #include <os.h>
+ 
+ /*
+diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
+index c5d614d28a759..74777a97e394a 100644
+--- a/arch/x86/Kconfig.debug
++++ b/arch/x86/Kconfig.debug
+@@ -248,6 +248,7 @@ config UNWINDER_ORC
+ 
+ config UNWINDER_FRAME_POINTER
+ 	bool "Frame pointer unwinder"
++	select ARCH_WANT_FRAME_POINTERS
+ 	select FRAME_POINTER
+ 	help
+ 	  This option enables the frame pointer unwinder for unwinding kernel
+@@ -271,7 +272,3 @@ config UNWINDER_GUESS
+ 	  overhead.
+ 
+ endchoice
+-
+-config FRAME_POINTER
+-	depends on !UNWINDER_ORC && !UNWINDER_GUESS
+-	bool
+diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
+index b77bbb67e77b0..ce5111cec36e2 100644
+--- a/arch/x86/include/asm/percpu.h
++++ b/arch/x86/include/asm/percpu.h
+@@ -59,36 +59,30 @@
+ #define __force_percpu_prefix	"%%"__stringify(__percpu_seg)":"
+ #define __my_cpu_offset		this_cpu_read(this_cpu_off)
+ 
+-#ifdef CONFIG_USE_X86_SEG_SUPPORT
+-/*
+- * Efficient implementation for cases in which the compiler supports
+- * named address spaces.  Allows the compiler to perform additional
+- * optimizations that can save more instructions.
+- */
+-#define arch_raw_cpu_ptr(ptr)					\
+-({								\
+-	unsigned long tcp_ptr__;				\
+-	tcp_ptr__ = __raw_cpu_read(, this_cpu_off);		\
+-								\
+-	tcp_ptr__ += (__force unsigned long)(ptr);		\
+-	(typeof(*(ptr)) __kernel __force *)tcp_ptr__;		\
+-})
+-#else /* CONFIG_USE_X86_SEG_SUPPORT */
++#ifdef CONFIG_X86_64
++#define __raw_my_cpu_offset	raw_cpu_read_8(this_cpu_off);
++#else
++#define __raw_my_cpu_offset	raw_cpu_read_4(this_cpu_off);
++#endif
++
+ /*
+  * Compared to the generic __my_cpu_offset version, the following
+  * saves one instruction and avoids clobbering a temp register.
++ *
++ * arch_raw_cpu_ptr should not be used in 32-bit VDSO for a 64-bit
++ * kernel, because games are played with CONFIG_X86_64 there and
++ * sizeof(this_cpu_off) becames 4.
+  */
+-#define arch_raw_cpu_ptr(ptr)					\
++#ifndef BUILD_VDSO32_64
++#define arch_raw_cpu_ptr(_ptr)					\
+ ({								\
+-	unsigned long tcp_ptr__;				\
+-	asm ("mov " __percpu_arg(1) ", %0"			\
+-	     : "=r" (tcp_ptr__)					\
+-	     : "m" (__my_cpu_var(this_cpu_off)));		\
+-								\
+-	tcp_ptr__ += (unsigned long)(ptr);			\
+-	(typeof(*(ptr)) __kernel __force *)tcp_ptr__;		\
++	unsigned long tcp_ptr__ = __raw_my_cpu_offset;		\
++	tcp_ptr__ += (__force unsigned long)(_ptr);		\
++	(typeof(*(_ptr)) __kernel __force *)tcp_ptr__;		\
+ })
+-#endif /* CONFIG_USE_X86_SEG_SUPPORT */
++#else
++#define arch_raw_cpu_ptr(_ptr) ({ BUILD_BUG(); (typeof(_ptr))0; })
++#endif
+ 
+ #define PER_CPU_VAR(var)	%__percpu_seg:(var)__percpu_rel
+ 
+diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
+index 185738c727661..29cd0543fc2fb 100644
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -1036,7 +1036,8 @@ static void __vector_schedule_cleanup(struct apic_chip_data *apicd)
+ 			add_timer_on(&cl->timer, cpu);
+ 		}
+ 	} else {
+-		apicd->prev_vector = 0;
++		pr_warn("IRQ %u schedule cleanup for offline CPU %u\n", apicd->irq, cpu);
++		free_moved_vector(apicd);
+ 	}
+ 	raw_spin_unlock(&vector_lock);
+ }
+@@ -1073,6 +1074,7 @@ void irq_complete_move(struct irq_cfg *cfg)
+  */
+ void irq_force_complete_move(struct irq_desc *desc)
+ {
++	unsigned int cpu = smp_processor_id();
+ 	struct apic_chip_data *apicd;
+ 	struct irq_data *irqd;
+ 	unsigned int vector;
+@@ -1097,10 +1099,11 @@ void irq_force_complete_move(struct irq_desc *desc)
+ 		goto unlock;
+ 
+ 	/*
+-	 * If prev_vector is empty, no action required.
++	 * If prev_vector is empty or the descriptor is neither currently
++	 * nor previously on the outgoing CPU no action required.
+ 	 */
+ 	vector = apicd->prev_vector;
+-	if (!vector)
++	if (!vector || (apicd->cpu != cpu && apicd->prev_cpu != cpu))
+ 		goto unlock;
+ 
+ 	/*
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 605c26c009c8a..ae987a26f26e4 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1589,6 +1589,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
+ 	if (have_cpuid_p()) {
+ 		cpu_detect(c);
+ 		get_cpu_vendor(c);
++		intel_unlock_cpuid_leafs(c);
+ 		get_cpu_cap(c);
+ 		setup_force_cpu_cap(X86_FEATURE_CPUID);
+ 		get_cpu_address_sizes(c);
+@@ -1748,7 +1749,7 @@ static void generic_identify(struct cpuinfo_x86 *c)
+ 	cpu_detect(c);
+ 
+ 	get_cpu_vendor(c);
+-
++	intel_unlock_cpuid_leafs(c);
+ 	get_cpu_cap(c);
+ 
+ 	get_cpu_address_sizes(c);
+diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
+index ea9e07d57c8dd..1beccefbaff9a 100644
+--- a/arch/x86/kernel/cpu/cpu.h
++++ b/arch/x86/kernel/cpu/cpu.h
+@@ -61,9 +61,11 @@ extern __ro_after_init enum tsx_ctrl_states tsx_ctrl_state;
+ 
+ extern void __init tsx_init(void);
+ void tsx_ap_init(void);
++void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c);
+ #else
+ static inline void tsx_init(void) { }
+ static inline void tsx_ap_init(void) { }
++static inline void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c) { }
+ #endif /* CONFIG_CPU_SUP_INTEL */
+ 
+ extern void init_spectral_chicken(struct cpuinfo_x86 *c);
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index be30d7fa2e66b..93efd9edc7242 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -268,19 +268,26 @@ static void detect_tme_early(struct cpuinfo_x86 *c)
+ 	c->x86_phys_bits -= keyid_bits;
+ }
+ 
++void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c)
++{
++	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
++		return;
++
++	if (c->x86 < 6 || (c->x86 == 6 && c->x86_model < 0xd))
++		return;
++
++	/*
++	 * The BIOS can have limited CPUID to leaf 2, which breaks feature
++	 * enumeration. Unlock it and update the maximum leaf info.
++	 */
++	if (msr_clear_bit(MSR_IA32_MISC_ENABLE, MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0)
++		c->cpuid_level = cpuid_eax(0);
++}
++
+ static void early_init_intel(struct cpuinfo_x86 *c)
+ {
+ 	u64 misc_enable;
+ 
+-	/* Unmask CPUID levels if masked: */
+-	if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
+-		if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
+-				  MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) {
+-			c->cpuid_level = cpuid_eax(0);
+-			get_cpu_cap(c);
+-		}
+-	}
+-
+ 	if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
+ 		(c->x86 == 0x6 && c->x86_model >= 0x0e))
+ 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
+diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c
+index d17c9b71eb4a2..621a151ccf7d0 100644
+--- a/arch/x86/kernel/cpu/topology.c
++++ b/arch/x86/kernel/cpu/topology.c
+@@ -128,6 +128,9 @@ static void topo_set_cpuids(unsigned int cpu, u32 apic_id, u32 acpi_id)
+ 
+ static __init bool check_for_real_bsp(u32 apic_id)
+ {
++	bool is_bsp = false, has_apic_base = boot_cpu_data.x86 >= 6;
++	u64 msr;
++
+ 	/*
+ 	 * There is no real good way to detect whether this a kdump()
+ 	 * kernel, but except on the Voyager SMP monstrosity which is not
+@@ -144,17 +147,61 @@ static __init bool check_for_real_bsp(u32 apic_id)
+ 	if (topo_info.real_bsp_apic_id != BAD_APICID)
+ 		return false;
+ 
++	/*
++	 * Check whether the enumeration order is broken by evaluating the
++	 * BSP bit in the APICBASE MSR. If the CPU does not have the
++	 * APICBASE MSR then the BSP detection is not possible and the
++	 * kernel must rely on the firmware enumeration order.
++	 */
++	if (has_apic_base) {
++		rdmsrl(MSR_IA32_APICBASE, msr);
++		is_bsp = !!(msr & MSR_IA32_APICBASE_BSP);
++	}
++
+ 	if (apic_id == topo_info.boot_cpu_apic_id) {
+-		topo_info.real_bsp_apic_id = apic_id;
+-		return false;
++		/*
++		 * If the boot CPU has the APIC BSP bit set then the
++		 * firmware enumeration is agreeing. If the CPU does not
++		 * have the APICBASE MSR then the only choice is to trust
++		 * the enumeration order.
++		 */
++		if (is_bsp || !has_apic_base) {
++			topo_info.real_bsp_apic_id = apic_id;
++			return false;
++		}
++		/*
++		 * If the boot APIC is enumerated first, but the APICBASE
++		 * MSR does not have the BSP bit set, then there is no way
++		 * to discover the real BSP here. Assume a crash kernel and
++		 * limit the number of CPUs to 1 as an INIT to the real BSP
++		 * would reset the machine.
++		 */
++		pr_warn("Enumerated BSP APIC %x is not marked in APICBASE MSR\n", apic_id);
++		pr_warn("Assuming crash kernel. Limiting to one CPU to prevent machine INIT\n");
++		set_nr_cpu_ids(1);
++		goto fwbug;
+ 	}
+ 
+-	pr_warn("Boot CPU APIC ID not the first enumerated APIC ID: %x > %x\n",
++	pr_warn("Boot CPU APIC ID not the first enumerated APIC ID: %x != %x\n",
+ 		topo_info.boot_cpu_apic_id, apic_id);
++
++	if (is_bsp) {
++		/*
++		 * The boot CPU has the APIC BSP bit set. Use it and complain
++		 * about the broken firmware enumeration.
++		 */
++		topo_info.real_bsp_apic_id = topo_info.boot_cpu_apic_id;
++		goto fwbug;
++	}
++
+ 	pr_warn("Crash kernel detected. Disabling real BSP to prevent machine INIT\n");
+ 
+ 	topo_info.real_bsp_apic_id = apic_id;
+ 	return true;
++
++fwbug:
++	pr_warn(FW_BUG "APIC enumeration order not specification compliant\n");
++	return false;
+ }
+ 
+ static unsigned int topo_unit_count(u32 lvlid, enum x86_topology_domains at_level,
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 77352a4abd87f..b1002b79886ab 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -1232,9 +1232,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
+ 		entry->eax = entry->ebx = entry->ecx = 0;
+ 		break;
+ 	case 0x80000008: {
+-		unsigned g_phys_as = (entry->eax >> 16) & 0xff;
+-		unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
+-		unsigned phys_as = entry->eax & 0xff;
++		unsigned int virt_as = max((entry->eax >> 8) & 0xff, 48U);
++		unsigned int phys_as;
+ 
+ 		/*
+ 		 * If TDP (NPT) is disabled use the adjusted host MAXPHYADDR as
+@@ -1242,16 +1241,16 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
+ 		 * reductions in MAXPHYADDR for memory encryption affect shadow
+ 		 * paging, too.
+ 		 *
+-		 * If TDP is enabled but an explicit guest MAXPHYADDR is not
+-		 * provided, use the raw bare metal MAXPHYADDR as reductions to
+-		 * the HPAs do not affect GPAs.
++		 * If TDP is enabled, use the raw bare metal MAXPHYADDR as
++		 * reductions to the HPAs do not affect GPAs.
+ 		 */
+-		if (!tdp_enabled)
+-			g_phys_as = boot_cpu_data.x86_phys_bits;
+-		else if (!g_phys_as)
+-			g_phys_as = phys_as;
++		if (!tdp_enabled) {
++			phys_as = boot_cpu_data.x86_phys_bits;
++		} else {
++			phys_as = entry->eax & 0xff;
++		}
+ 
+-		entry->eax = g_phys_as | (virt_as << 8);
++		entry->eax = phys_as | (virt_as << 8);
+ 		entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8));
+ 		entry->edx = 0;
+ 		cpuid_entry_override(entry, CPUID_8000_0008_EBX);
+diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
+index 0cc9520666efb..39255f0eb14df 100644
+--- a/arch/x86/pci/mmconfig-shared.c
++++ b/arch/x86/pci/mmconfig-shared.c
+@@ -518,7 +518,34 @@ static bool __ref pci_mmcfg_reserved(struct device *dev,
+ {
+ 	struct resource *conflict;
+ 
+-	if (!early && !acpi_disabled) {
++	if (early) {
++
++		/*
++		 * Don't try to do this check unless configuration type 1
++		 * is available.  How about type 2?
++		 */
++
++		/*
++		 * 946f2ee5c731 ("Check that MCFG points to an e820
++		 * reserved area") added this E820 check in 2006 to work
++		 * around BIOS defects.
++		 *
++		 * Per PCI Firmware r3.3, sec 4.1.2, ECAM space must be
++		 * reserved by a PNP0C02 resource, but it need not be
++		 * mentioned in E820.  Before the ACPI interpreter is
++		 * available, we can't check for PNP0C02 resources, so
++		 * there's no reliable way to verify the region in this
++		 * early check.  Keep it only for the old machines that
++		 * motivated 946f2ee5c731.
++		 */
++		if (dmi_get_bios_year() < 2016 && raw_pci_ops)
++			return is_mmconf_reserved(e820__mapped_all, cfg, dev,
++						  "E820 entry");
++
++		return true;
++	}
++
++	if (!acpi_disabled) {
+ 		if (is_mmconf_reserved(is_acpi_reserved, cfg, dev,
+ 				       "ACPI motherboard resource"))
+ 			return true;
+@@ -551,16 +578,7 @@ static bool __ref pci_mmcfg_reserved(struct device *dev,
+ 	 * For MCFG information constructed from hotpluggable host bridge's
+ 	 * _CBA method, just assume it's reserved.
+ 	 */
+-	if (pci_mmcfg_running_state)
+-		return true;
+-
+-	/* Don't try to do this check unless configuration
+-	   type 1 is available. how about type 2 ?*/
+-	if (raw_pci_ops)
+-		return is_mmconf_reserved(e820__mapped_all, cfg, dev,
+-					  "E820 entry");
+-
+-	return false;
++	return pci_mmcfg_running_state;
+ }
+ 
+ static void __init pci_mmcfg_reject_broken(int early)
+diff --git a/arch/x86/um/shared/sysdep/archsetjmp.h b/arch/x86/um/shared/sysdep/archsetjmp.h
+index 166cedbab9266..8c81d1a604a94 100644
+--- a/arch/x86/um/shared/sysdep/archsetjmp.h
++++ b/arch/x86/um/shared/sysdep/archsetjmp.h
+@@ -1,6 +1,13 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
++#ifndef __X86_UM_SYSDEP_ARCHSETJMP_H
++#define __X86_UM_SYSDEP_ARCHSETJMP_H
++
+ #ifdef __i386__
+ #include "archsetjmp_32.h"
+ #else
+ #include "archsetjmp_64.h"
+ #endif
++
++unsigned long get_thread_reg(int reg, jmp_buf *buf);
++
++#endif /* __X86_UM_SYSDEP_ARCHSETJMP_H */
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index a01ca255b0c64..b88722dfc4f86 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -382,3 +382,36 @@ void __init xen_add_extra_mem(unsigned long start_pfn, unsigned long n_pfns)
+ 
+ 	memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
+ }
++
++#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
++int __init arch_xen_unpopulated_init(struct resource **res)
++{
++	unsigned int i;
++
++	if (!xen_domain())
++		return -ENODEV;
++
++	/* Must be set strictly before calling xen_free_unpopulated_pages(). */
++	*res = &iomem_resource;
++
++	/*
++	 * Initialize with pages from the extra memory regions (see
++	 * arch/x86/xen/setup.c).
++	 */
++	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
++		unsigned int j;
++
++		for (j = 0; j < xen_extra_mem[i].n_pfns; j++) {
++			struct page *pg =
++				pfn_to_page(xen_extra_mem[i].start_pfn + j);
++
++			xen_free_unpopulated_pages(1, &pg);
++		}
++
++		/* Zero so region is not also added to the balloon driver. */
++		xen_extra_mem[i].n_pfns = 0;
++	}
++
++	return 0;
++}
++#endif
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index 059467086b131..96af8224992e6 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -323,6 +323,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk,
+ 	blkg->q = disk->queue;
+ 	INIT_LIST_HEAD(&blkg->q_node);
+ 	blkg->blkcg = blkcg;
++	blkg->iostat.blkg = blkg;
+ #ifdef CONFIG_BLK_CGROUP_PUNT_BIO
+ 	spin_lock_init(&blkg->async_bio_lock);
+ 	bio_list_init(&blkg->async_bios);
+@@ -619,12 +620,45 @@ static void blkg_destroy_all(struct gendisk *disk)
+ 	spin_unlock_irq(&q->queue_lock);
+ }
+ 
++static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src)
++{
++	int i;
++
++	for (i = 0; i < BLKG_IOSTAT_NR; i++) {
++		dst->bytes[i] = src->bytes[i];
++		dst->ios[i] = src->ios[i];
++	}
++}
++
++static void __blkg_clear_stat(struct blkg_iostat_set *bis)
++{
++	struct blkg_iostat cur = {0};
++	unsigned long flags;
++
++	flags = u64_stats_update_begin_irqsave(&bis->sync);
++	blkg_iostat_set(&bis->cur, &cur);
++	blkg_iostat_set(&bis->last, &cur);
++	u64_stats_update_end_irqrestore(&bis->sync, flags);
++}
++
++static void blkg_clear_stat(struct blkcg_gq *blkg)
++{
++	int cpu;
++
++	for_each_possible_cpu(cpu) {
++		struct blkg_iostat_set *s = per_cpu_ptr(blkg->iostat_cpu, cpu);
++
++		__blkg_clear_stat(s);
++	}
++	__blkg_clear_stat(&blkg->iostat);
++}
++
+ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
+ 			     struct cftype *cftype, u64 val)
+ {
+ 	struct blkcg *blkcg = css_to_blkcg(css);
+ 	struct blkcg_gq *blkg;
+-	int i, cpu;
++	int i;
+ 
+ 	mutex_lock(&blkcg_pol_mutex);
+ 	spin_lock_irq(&blkcg->lock);
+@@ -635,18 +669,7 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
+ 	 * anyway.  If you get hit by a race, retry.
+ 	 */
+ 	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
+-		for_each_possible_cpu(cpu) {
+-			struct blkg_iostat_set *bis =
+-				per_cpu_ptr(blkg->iostat_cpu, cpu);
+-			memset(bis, 0, sizeof(*bis));
+-
+-			/* Re-initialize the cleared blkg_iostat_set */
+-			u64_stats_init(&bis->sync);
+-			bis->blkg = blkg;
+-		}
+-		memset(&blkg->iostat, 0, sizeof(blkg->iostat));
+-		u64_stats_init(&blkg->iostat.sync);
+-
++		blkg_clear_stat(blkg);
+ 		for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ 			struct blkcg_policy *pol = blkcg_policy[i];
+ 
+@@ -949,16 +972,6 @@ void blkg_conf_exit(struct blkg_conf_ctx *ctx)
+ }
+ EXPORT_SYMBOL_GPL(blkg_conf_exit);
+ 
+-static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src)
+-{
+-	int i;
+-
+-	for (i = 0; i < BLKG_IOSTAT_NR; i++) {
+-		dst->bytes[i] = src->bytes[i];
+-		dst->ios[i] = src->ios[i];
+-	}
+-}
+-
+ static void blkg_iostat_add(struct blkg_iostat *dst, struct blkg_iostat *src)
+ {
+ 	int i;
+@@ -1024,7 +1037,19 @@ static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu)
+ 		struct blkg_iostat cur;
+ 		unsigned int seq;
+ 
++		/*
++		 * Order assignment of `next_bisc` from `bisc->lnode.next` in
++		 * llist_for_each_entry_safe and clearing `bisc->lqueued` for
++		 * avoiding to assign `next_bisc` with new next pointer added
++		 * in blk_cgroup_bio_start() in case of re-ordering.
++		 *
++		 * The pair barrier is implied in llist_add() in blk_cgroup_bio_start().
++		 */
++		smp_mb();
++
+ 		WRITE_ONCE(bisc->lqueued, false);
++		if (bisc == &blkg->iostat)
++			goto propagate_up; /* propagate up to parent only */
+ 
+ 		/* fetch the current per-cpu values */
+ 		do {
+@@ -1034,10 +1059,24 @@ static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu)
+ 
+ 		blkcg_iostat_update(blkg, &cur, &bisc->last);
+ 
++propagate_up:
+ 		/* propagate global delta to parent (unless that's root) */
+-		if (parent && parent->parent)
++		if (parent && parent->parent) {
+ 			blkcg_iostat_update(parent, &blkg->iostat.cur,
+ 					    &blkg->iostat.last);
++			/*
++			 * Queue parent->iostat to its blkcg's lockless
++			 * list to propagate up to the grandparent if the
++			 * iostat hasn't been queued yet.
++			 */
++			if (!parent->iostat.lqueued) {
++				struct llist_head *plhead;
++
++				plhead = per_cpu_ptr(parent->blkcg->lhead, cpu);
++				llist_add(&parent->iostat.lnode, plhead);
++				parent->iostat.lqueued = true;
++			}
++		}
+ 	}
+ 	raw_spin_unlock_irqrestore(&blkg_stat_lock, flags);
+ out:
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index 9d6033e01f2e1..15319b217bf3f 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -751,6 +751,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+ 	unsigned int top, bottom, alignment, ret = 0;
+ 
+ 	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
++	t->max_user_sectors = min_not_zero(t->max_user_sectors,
++			b->max_user_sectors);
+ 	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
+ 	t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
+ 	t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
+diff --git a/drivers/base/base.h b/drivers/base/base.h
+index 0738ccad08b2e..db4f910e8e36e 100644
+--- a/drivers/base/base.h
++++ b/drivers/base/base.h
+@@ -192,11 +192,14 @@ extern struct kset *devices_kset;
+ void devices_kset_move_last(struct device *dev);
+ 
+ #if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
+-void module_add_driver(struct module *mod, struct device_driver *drv);
++int module_add_driver(struct module *mod, struct device_driver *drv);
+ void module_remove_driver(struct device_driver *drv);
+ #else
+-static inline void module_add_driver(struct module *mod,
+-				     struct device_driver *drv) { }
++static inline int module_add_driver(struct module *mod,
++				    struct device_driver *drv)
++{
++	return 0;
++}
+ static inline void module_remove_driver(struct device_driver *drv) { }
+ #endif
+ 
+diff --git a/drivers/base/bus.c b/drivers/base/bus.c
+index daee55c9b2d9e..ffea0728b8b2f 100644
+--- a/drivers/base/bus.c
++++ b/drivers/base/bus.c
+@@ -674,7 +674,12 @@ int bus_add_driver(struct device_driver *drv)
+ 		if (error)
+ 			goto out_del_list;
+ 	}
+-	module_add_driver(drv->owner, drv);
++	error = module_add_driver(drv->owner, drv);
++	if (error) {
++		printk(KERN_ERR "%s: failed to create module links for %s\n",
++			__func__, drv->name);
++		goto out_detach;
++	}
+ 
+ 	error = driver_create_file(drv, &driver_attr_uevent);
+ 	if (error) {
+@@ -699,6 +704,8 @@ int bus_add_driver(struct device_driver *drv)
+ 
+ 	return 0;
+ 
++out_detach:
++	driver_detach(drv);
+ out_del_list:
+ 	klist_del(&priv->knode_bus);
+ out_unregister:
+diff --git a/drivers/base/module.c b/drivers/base/module.c
+index 46ad4d636731d..a1b55da07127d 100644
+--- a/drivers/base/module.c
++++ b/drivers/base/module.c
+@@ -30,14 +30,14 @@ static void module_create_drivers_dir(struct module_kobject *mk)
+ 	mutex_unlock(&drivers_dir_mutex);
+ }
+ 
+-void module_add_driver(struct module *mod, struct device_driver *drv)
++int module_add_driver(struct module *mod, struct device_driver *drv)
+ {
+ 	char *driver_name;
+-	int no_warn;
+ 	struct module_kobject *mk = NULL;
++	int ret;
+ 
+ 	if (!drv)
+-		return;
++		return 0;
+ 
+ 	if (mod)
+ 		mk = &mod->mkobj;
+@@ -56,17 +56,37 @@ void module_add_driver(struct module *mod, struct device_driver *drv)
+ 	}
+ 
+ 	if (!mk)
+-		return;
++		return 0;
++
++	ret = sysfs_create_link(&drv->p->kobj, &mk->kobj, "module");
++	if (ret)
++		return ret;
+ 
+-	/* Don't check return codes; these calls are idempotent */
+-	no_warn = sysfs_create_link(&drv->p->kobj, &mk->kobj, "module");
+ 	driver_name = make_driver_name(drv);
+-	if (driver_name) {
+-		module_create_drivers_dir(mk);
+-		no_warn = sysfs_create_link(mk->drivers_dir, &drv->p->kobj,
+-					    driver_name);
+-		kfree(driver_name);
++	if (!driver_name) {
++		ret = -ENOMEM;
++		goto out;
++	}
++
++	module_create_drivers_dir(mk);
++	if (!mk->drivers_dir) {
++		ret = -EINVAL;
++		goto out;
+ 	}
++
++	ret = sysfs_create_link(mk->drivers_dir, &drv->p->kobj, driver_name);
++	if (ret)
++		goto out;
++
++	kfree(driver_name);
++
++	return 0;
++out:
++	sysfs_remove_link(&drv->p->kobj, "module");
++	sysfs_remove_link(mk->drivers_dir, driver_name);
++	kfree(driver_name);
++
++	return ret;
+ }
+ 
+ void module_remove_driver(struct device_driver *drv)
+diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
+index eed63f95e89d0..620679a0ac381 100644
+--- a/drivers/block/null_blk/main.c
++++ b/drivers/block/null_blk/main.c
+@@ -404,13 +404,25 @@ static int nullb_update_nr_hw_queues(struct nullb_device *dev,
+ static int nullb_apply_submit_queues(struct nullb_device *dev,
+ 				     unsigned int submit_queues)
+ {
+-	return nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues);
++	int ret;
++
++	mutex_lock(&lock);
++	ret = nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues);
++	mutex_unlock(&lock);
++
++	return ret;
+ }
+ 
+ static int nullb_apply_poll_queues(struct nullb_device *dev,
+ 				   unsigned int poll_queues)
+ {
+-	return nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues);
++	int ret;
++
++	mutex_lock(&lock);
++	ret = nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues);
++	mutex_unlock(&lock);
++
++	return ret;
+ }
+ 
+ NULLB_DEVICE_ATTR(size, ulong, NULL);
+@@ -457,28 +469,32 @@ static ssize_t nullb_device_power_store(struct config_item *item,
+ 	if (ret < 0)
+ 		return ret;
+ 
++	ret = count;
++	mutex_lock(&lock);
+ 	if (!dev->power && newp) {
+ 		if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
+-			return count;
++			goto out;
++
+ 		ret = null_add_dev(dev);
+ 		if (ret) {
+ 			clear_bit(NULLB_DEV_FL_UP, &dev->flags);
+-			return ret;
++			goto out;
+ 		}
+ 
+ 		set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
+ 		dev->power = newp;
++		ret = count;
+ 	} else if (dev->power && !newp) {
+ 		if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
+-			mutex_lock(&lock);
+ 			dev->power = newp;
+ 			null_del_dev(dev->nullb);
+-			mutex_unlock(&lock);
+ 		}
+ 		clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
+ 	}
+ 
+-	return count;
++out:
++	mutex_unlock(&lock);
++	return ret;
+ }
+ 
+ CONFIGFS_ATTR(nullb_device_, power);
+@@ -1918,15 +1934,12 @@ static int null_add_dev(struct nullb_device *dev)
+ 	nullb->q->queuedata = nullb;
+ 	blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
+ 
+-	mutex_lock(&lock);
+ 	rv = ida_alloc(&nullb_indexes, GFP_KERNEL);
+-	if (rv < 0) {
+-		mutex_unlock(&lock);
++	if (rv < 0)
+ 		goto out_cleanup_disk;
+-	}
++
+ 	nullb->index = rv;
+ 	dev->index = rv;
+-	mutex_unlock(&lock);
+ 
+ 	if (config_item_name(&dev->group.cg_item)) {
+ 		/* Use configfs dir name as the device name */
+@@ -1955,9 +1968,7 @@ static int null_add_dev(struct nullb_device *dev)
+ 	if (rv)
+ 		goto out_ida_free;
+ 
+-	mutex_lock(&lock);
+ 	list_add_tail(&nullb->list, &nullb_list);
+-	mutex_unlock(&lock);
+ 
+ 	pr_info("disk %s created\n", nullb->disk_name);
+ 
+@@ -2006,7 +2017,9 @@ static int null_create_dev(void)
+ 	if (!dev)
+ 		return -ENOMEM;
+ 
++	mutex_lock(&lock);
+ 	ret = null_add_dev(dev);
++	mutex_unlock(&lock);
+ 	if (ret) {
+ 		null_free_dev(dev);
+ 		return ret;
+@@ -2121,4 +2134,5 @@ module_init(null_init);
+ module_exit(null_exit);
+ 
+ MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
++MODULE_DESCRIPTION("multi queue aware block test driver");
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
+index ee951b265213f..58e9dcc2a3087 100644
+--- a/drivers/char/ppdev.c
++++ b/drivers/char/ppdev.c
+@@ -296,28 +296,35 @@ static int register_device(int minor, struct pp_struct *pp)
+ 	if (!port) {
+ 		pr_warn("%s: no associated port!\n", name);
+ 		rc = -ENXIO;
+-		goto err;
++		goto err_free_name;
+ 	}
+ 
+ 	index = ida_alloc(&ida_index, GFP_KERNEL);
++	if (index < 0) {
++		pr_warn("%s: failed to get index!\n", name);
++		rc = index;
++		goto err_put_port;
++	}
++
+ 	memset(&ppdev_cb, 0, sizeof(ppdev_cb));
+ 	ppdev_cb.irq_func = pp_irq;
+ 	ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
+ 	ppdev_cb.private = pp;
+ 	pdev = parport_register_dev_model(port, name, &ppdev_cb, index);
+-	parport_put_port(port);
+ 
+ 	if (!pdev) {
+ 		pr_warn("%s: failed to register device!\n", name);
+ 		rc = -ENXIO;
+ 		ida_free(&ida_index, index);
+-		goto err;
++		goto err_put_port;
+ 	}
+ 
+ 	pp->pdev = pdev;
+ 	pp->index = index;
+ 	dev_dbg(&pdev->dev, "registered pardevice\n");
+-err:
++err_put_port:
++	parport_put_port(port);
++err_free_name:
+ 	kfree(name);
+ 	return rc;
+ }
+diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c
+index 3f9eaf27b41b8..c9eca24bbad47 100644
+--- a/drivers/char/tpm/tpm_tis_spi_main.c
++++ b/drivers/char/tpm/tpm_tis_spi_main.c
+@@ -37,6 +37,7 @@
+ #include "tpm_tis_spi.h"
+ 
+ #define MAX_SPI_FRAMESIZE 64
++#define SPI_HDRSIZE 4
+ 
+ /*
+  * TCG SPI flow control is documented in section 6.4 of the spec[1]. In short,
+@@ -247,7 +248,7 @@ static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
+ int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy,
+ 		     int irq, const struct tpm_tis_phy_ops *phy_ops)
+ {
+-	phy->iobuf = devm_kmalloc(&spi->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
++	phy->iobuf = devm_kmalloc(&spi->dev, SPI_HDRSIZE + MAX_SPI_FRAMESIZE, GFP_KERNEL);
+ 	if (!phy->iobuf)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
+index 5c186e0a39b96..812b2948b6c65 100644
+--- a/drivers/cxl/core/region.c
++++ b/drivers/cxl/core/region.c
+@@ -2719,6 +2719,7 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
+ 		if (i == 0) {
+ 			cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
+ 			if (!cxl_nvb) {
++				kfree(cxlr_pmem);
+ 				cxlr_pmem = ERR_PTR(-ENODEV);
+ 				goto out;
+ 			}
+diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h
+index e5f13260fc524..7c5cd069f10cc 100644
+--- a/drivers/cxl/core/trace.h
++++ b/drivers/cxl/core/trace.h
+@@ -253,8 +253,8 @@ TRACE_EVENT(cxl_generic_event,
+  * DRAM Event Record
+  * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
+  */
+-#define CXL_DPA_FLAGS_MASK			0x3F
+-#define CXL_DPA_MASK				(~CXL_DPA_FLAGS_MASK)
++#define CXL_DPA_FLAGS_MASK			GENMASK(1, 0)
++#define CXL_DPA_MASK				GENMASK_ULL(63, 6)
+ 
+ #define CXL_DPA_VOLATILE			BIT(0)
+ #define CXL_DPA_NOT_REPAIRABLE			BIT(1)
+diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
+index 101394f16930f..237bce21d1e72 100644
+--- a/drivers/dma-buf/sync_debug.c
++++ b/drivers/dma-buf/sync_debug.c
+@@ -110,12 +110,12 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
+ 
+ 	seq_printf(s, "%s: %d\n", obj->name, obj->value);
+ 
+-	spin_lock_irq(&obj->lock);
++	spin_lock(&obj->lock); /* Caller already disabled IRQ. */
+ 	list_for_each(pos, &obj->pt_list) {
+ 		struct sync_pt *pt = container_of(pos, struct sync_pt, link);
+ 		sync_print_fence(s, &pt->base, false);
+ 	}
+-	spin_unlock_irq(&obj->lock);
++	spin_unlock(&obj->lock);
+ }
+ 
+ static void sync_print_sync_file(struct seq_file *s,
+diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
+index 1398814d8fbb6..e3505e56784b1 100644
+--- a/drivers/dma/idma64.c
++++ b/drivers/dma/idma64.c
+@@ -598,7 +598,9 @@ static int idma64_probe(struct idma64_chip *chip)
+ 
+ 	idma64->dma.dev = chip->sysdev;
+ 
+-	dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
++	ret = dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
++	if (ret)
++		return ret;
+ 
+ 	ret = dma_async_device_register(&idma64->dma);
+ 	if (ret)
+diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
+index 39935071174a3..fd9bbee4cc42f 100644
+--- a/drivers/dma/idxd/cdev.c
++++ b/drivers/dma/idxd/cdev.c
+@@ -577,7 +577,6 @@ void idxd_wq_del_cdev(struct idxd_wq *wq)
+ 	struct idxd_cdev *idxd_cdev;
+ 
+ 	idxd_cdev = wq->idxd_cdev;
+-	ida_destroy(&file_ida);
+ 	wq->idxd_cdev = NULL;
+ 	cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev));
+ 	put_device(cdev_dev(idxd_cdev));
+diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
+index 5f869eacd19ab..3da94b3822923 100644
+--- a/drivers/extcon/Kconfig
++++ b/drivers/extcon/Kconfig
+@@ -116,7 +116,8 @@ config EXTCON_MAX77843
+ 
+ config EXTCON_MAX8997
+ 	tristate "Maxim MAX8997 EXTCON Support"
+-	depends on MFD_MAX8997 && IRQ_DOMAIN
++	depends on MFD_MAX8997
++	select IRQ_DOMAIN
+ 	help
+ 	  If you say yes here you get support for the MUIC device of
+ 	  Maxim MAX8997 PMIC. The MAX8997 MUIC is a USB port accessory
+diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
+index 5f3a3e913d28f..d19c78a78ae3a 100644
+--- a/drivers/firmware/dmi-id.c
++++ b/drivers/firmware/dmi-id.c
+@@ -169,9 +169,14 @@ static int dmi_dev_uevent(const struct device *dev, struct kobj_uevent_env *env)
+ 	return 0;
+ }
+ 
++static void dmi_dev_release(struct device *dev)
++{
++	kfree(dev);
++}
++
+ static struct class dmi_class = {
+ 	.name = "dmi",
+-	.dev_release = (void(*)(struct device *)) kfree,
++	.dev_release = dmi_dev_release,
+ 	.dev_uevent = dmi_dev_uevent,
+ };
+ 
+diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
+index 70e9789ff9de0..6a337f1f8787b 100644
+--- a/drivers/firmware/efi/libstub/fdt.c
++++ b/drivers/firmware/efi/libstub/fdt.c
+@@ -335,8 +335,8 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
+ 
+ fail:
+ 	efi_free(fdt_size, fdt_addr);
+-
+-	efi_bs_call(free_pool, priv.runtime_map);
++	if (!efi_novamap)
++		efi_bs_call(free_pool, priv.runtime_map);
+ 
+ 	return EFI_LOAD_ERROR;
+ }
+diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
+index d5a8182cf2e1c..1983fd3bf392e 100644
+--- a/drivers/firmware/efi/libstub/x86-stub.c
++++ b/drivers/firmware/efi/libstub/x86-stub.c
+@@ -776,6 +776,26 @@ static void error(char *str)
+ 	efi_warn("Decompression failed: %s\n", str);
+ }
+ 
++static const char *cmdline_memmap_override;
++
++static efi_status_t parse_options(const char *cmdline)
++{
++	static const char opts[][14] = {
++		"mem=", "memmap=", "efi_fake_mem=", "hugepages="
++	};
++
++	for (int i = 0; i < ARRAY_SIZE(opts); i++) {
++		const char *p = strstr(cmdline, opts[i]);
++
++		if (p == cmdline || (p > cmdline && isspace(p[-1]))) {
++			cmdline_memmap_override = opts[i];
++			break;
++		}
++	}
++
++	return efi_parse_options(cmdline);
++}
++
+ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
+ {
+ 	unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
+@@ -807,6 +827,10 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
+ 		    !memcmp(efistub_fw_vendor(), ami, sizeof(ami))) {
+ 			efi_debug("AMI firmware v2.0 or older detected - disabling physical KASLR\n");
+ 			seed[0] = 0;
++		} else if (cmdline_memmap_override) {
++			efi_info("%s detected on the kernel command line - disabling physical KASLR\n",
++				 cmdline_memmap_override);
++			seed[0] = 0;
+ 		}
+ 
+ 		boot_params_ptr->hdr.loadflags |= KASLR_FLAG;
+@@ -883,7 +907,7 @@ void __noreturn efi_stub_entry(efi_handle_t handle,
+ 	}
+ 
+ #ifdef CONFIG_CMDLINE_BOOL
+-	status = efi_parse_options(CONFIG_CMDLINE);
++	status = parse_options(CONFIG_CMDLINE);
+ 	if (status != EFI_SUCCESS) {
+ 		efi_err("Failed to parse options\n");
+ 		goto fail;
+@@ -892,7 +916,7 @@ void __noreturn efi_stub_entry(efi_handle_t handle,
+ 	if (!IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) {
+ 		unsigned long cmdline_paddr = ((u64)hdr->cmd_line_ptr |
+ 					       ((u64)boot_params->ext_cmd_line_ptr << 32));
+-		status = efi_parse_options((char *)cmdline_paddr);
++		status = parse_options((char *)cmdline_paddr);
+ 		if (status != EFI_SUCCESS) {
+ 			efi_err("Failed to parse options\n");
+ 			goto fail;
+diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
+index 79c473b3c7c3d..8ef395b49bf8a 100644
+--- a/drivers/fpga/fpga-bridge.c
++++ b/drivers/fpga/fpga-bridge.c
+@@ -55,33 +55,26 @@ int fpga_bridge_disable(struct fpga_bridge *bridge)
+ }
+ EXPORT_SYMBOL_GPL(fpga_bridge_disable);
+ 
+-static struct fpga_bridge *__fpga_bridge_get(struct device *dev,
++static struct fpga_bridge *__fpga_bridge_get(struct device *bridge_dev,
+ 					     struct fpga_image_info *info)
+ {
+ 	struct fpga_bridge *bridge;
+-	int ret = -ENODEV;
+ 
+-	bridge = to_fpga_bridge(dev);
++	bridge = to_fpga_bridge(bridge_dev);
+ 
+ 	bridge->info = info;
+ 
+-	if (!mutex_trylock(&bridge->mutex)) {
+-		ret = -EBUSY;
+-		goto err_dev;
+-	}
++	if (!mutex_trylock(&bridge->mutex))
++		return ERR_PTR(-EBUSY);
+ 
+-	if (!try_module_get(dev->parent->driver->owner))
+-		goto err_ll_mod;
++	if (!try_module_get(bridge->br_ops_owner)) {
++		mutex_unlock(&bridge->mutex);
++		return ERR_PTR(-ENODEV);
++	}
+ 
+ 	dev_dbg(&bridge->dev, "get\n");
+ 
+ 	return bridge;
+-
+-err_ll_mod:
+-	mutex_unlock(&bridge->mutex);
+-err_dev:
+-	put_device(dev);
+-	return ERR_PTR(ret);
+ }
+ 
+ /**
+@@ -98,13 +91,18 @@ static struct fpga_bridge *__fpga_bridge_get(struct device *dev,
+ struct fpga_bridge *of_fpga_bridge_get(struct device_node *np,
+ 				       struct fpga_image_info *info)
+ {
+-	struct device *dev;
++	struct fpga_bridge *bridge;
++	struct device *bridge_dev;
+ 
+-	dev = class_find_device_by_of_node(&fpga_bridge_class, np);
+-	if (!dev)
++	bridge_dev = class_find_device_by_of_node(&fpga_bridge_class, np);
++	if (!bridge_dev)
+ 		return ERR_PTR(-ENODEV);
+ 
+-	return __fpga_bridge_get(dev, info);
++	bridge = __fpga_bridge_get(bridge_dev, info);
++	if (IS_ERR(bridge))
++		put_device(bridge_dev);
++
++	return bridge;
+ }
+ EXPORT_SYMBOL_GPL(of_fpga_bridge_get);
+ 
+@@ -125,6 +123,7 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data)
+ struct fpga_bridge *fpga_bridge_get(struct device *dev,
+ 				    struct fpga_image_info *info)
+ {
++	struct fpga_bridge *bridge;
+ 	struct device *bridge_dev;
+ 
+ 	bridge_dev = class_find_device(&fpga_bridge_class, NULL, dev,
+@@ -132,7 +131,11 @@ struct fpga_bridge *fpga_bridge_get(struct device *dev,
+ 	if (!bridge_dev)
+ 		return ERR_PTR(-ENODEV);
+ 
+-	return __fpga_bridge_get(bridge_dev, info);
++	bridge = __fpga_bridge_get(bridge_dev, info);
++	if (IS_ERR(bridge))
++		put_device(bridge_dev);
++
++	return bridge;
+ }
+ EXPORT_SYMBOL_GPL(fpga_bridge_get);
+ 
+@@ -146,7 +149,7 @@ void fpga_bridge_put(struct fpga_bridge *bridge)
+ 	dev_dbg(&bridge->dev, "put\n");
+ 
+ 	bridge->info = NULL;
+-	module_put(bridge->dev.parent->driver->owner);
++	module_put(bridge->br_ops_owner);
+ 	mutex_unlock(&bridge->mutex);
+ 	put_device(&bridge->dev);
+ }
+@@ -316,18 +319,19 @@ static struct attribute *fpga_bridge_attrs[] = {
+ ATTRIBUTE_GROUPS(fpga_bridge);
+ 
+ /**
+- * fpga_bridge_register - create and register an FPGA Bridge device
++ * __fpga_bridge_register - create and register an FPGA Bridge device
+  * @parent:	FPGA bridge device from pdev
+  * @name:	FPGA bridge name
+  * @br_ops:	pointer to structure of fpga bridge ops
+  * @priv:	FPGA bridge private data
++ * @owner:	owner module containing the br_ops
+  *
+  * Return: struct fpga_bridge pointer or ERR_PTR()
+  */
+ struct fpga_bridge *
+-fpga_bridge_register(struct device *parent, const char *name,
+-		     const struct fpga_bridge_ops *br_ops,
+-		     void *priv)
++__fpga_bridge_register(struct device *parent, const char *name,
++		       const struct fpga_bridge_ops *br_ops,
++		       void *priv, struct module *owner)
+ {
+ 	struct fpga_bridge *bridge;
+ 	int id, ret;
+@@ -357,6 +361,7 @@ fpga_bridge_register(struct device *parent, const char *name,
+ 
+ 	bridge->name = name;
+ 	bridge->br_ops = br_ops;
++	bridge->br_ops_owner = owner;
+ 	bridge->priv = priv;
+ 
+ 	bridge->dev.groups = br_ops->groups;
+@@ -386,7 +391,7 @@ fpga_bridge_register(struct device *parent, const char *name,
+ 
+ 	return ERR_PTR(ret);
+ }
+-EXPORT_SYMBOL_GPL(fpga_bridge_register);
++EXPORT_SYMBOL_GPL(__fpga_bridge_register);
+ 
+ /**
+  * fpga_bridge_unregister - unregister an FPGA bridge
+diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
+index 06651389c5926..0f4035b089a2e 100644
+--- a/drivers/fpga/fpga-mgr.c
++++ b/drivers/fpga/fpga-mgr.c
+@@ -664,20 +664,16 @@ static struct attribute *fpga_mgr_attrs[] = {
+ };
+ ATTRIBUTE_GROUPS(fpga_mgr);
+ 
+-static struct fpga_manager *__fpga_mgr_get(struct device *dev)
++static struct fpga_manager *__fpga_mgr_get(struct device *mgr_dev)
+ {
+ 	struct fpga_manager *mgr;
+ 
+-	mgr = to_fpga_manager(dev);
++	mgr = to_fpga_manager(mgr_dev);
+ 
+-	if (!try_module_get(dev->parent->driver->owner))
+-		goto err_dev;
++	if (!try_module_get(mgr->mops_owner))
++		mgr = ERR_PTR(-ENODEV);
+ 
+ 	return mgr;
+-
+-err_dev:
+-	put_device(dev);
+-	return ERR_PTR(-ENODEV);
+ }
+ 
+ static int fpga_mgr_dev_match(struct device *dev, const void *data)
+@@ -693,12 +689,18 @@ static int fpga_mgr_dev_match(struct device *dev, const void *data)
+  */
+ struct fpga_manager *fpga_mgr_get(struct device *dev)
+ {
+-	struct device *mgr_dev = class_find_device(&fpga_mgr_class, NULL, dev,
+-						   fpga_mgr_dev_match);
++	struct fpga_manager *mgr;
++	struct device *mgr_dev;
++
++	mgr_dev = class_find_device(&fpga_mgr_class, NULL, dev, fpga_mgr_dev_match);
+ 	if (!mgr_dev)
+ 		return ERR_PTR(-ENODEV);
+ 
+-	return __fpga_mgr_get(mgr_dev);
++	mgr = __fpga_mgr_get(mgr_dev);
++	if (IS_ERR(mgr))
++		put_device(mgr_dev);
++
++	return mgr;
+ }
+ EXPORT_SYMBOL_GPL(fpga_mgr_get);
+ 
+@@ -711,13 +713,18 @@ EXPORT_SYMBOL_GPL(fpga_mgr_get);
+  */
+ struct fpga_manager *of_fpga_mgr_get(struct device_node *node)
+ {
+-	struct device *dev;
++	struct fpga_manager *mgr;
++	struct device *mgr_dev;
+ 
+-	dev = class_find_device_by_of_node(&fpga_mgr_class, node);
+-	if (!dev)
++	mgr_dev = class_find_device_by_of_node(&fpga_mgr_class, node);
++	if (!mgr_dev)
+ 		return ERR_PTR(-ENODEV);
+ 
+-	return __fpga_mgr_get(dev);
++	mgr = __fpga_mgr_get(mgr_dev);
++	if (IS_ERR(mgr))
++		put_device(mgr_dev);
++
++	return mgr;
+ }
+ EXPORT_SYMBOL_GPL(of_fpga_mgr_get);
+ 
+@@ -727,7 +734,7 @@ EXPORT_SYMBOL_GPL(of_fpga_mgr_get);
+  */
+ void fpga_mgr_put(struct fpga_manager *mgr)
+ {
+-	module_put(mgr->dev.parent->driver->owner);
++	module_put(mgr->mops_owner);
+ 	put_device(&mgr->dev);
+ }
+ EXPORT_SYMBOL_GPL(fpga_mgr_put);
+@@ -766,9 +773,10 @@ void fpga_mgr_unlock(struct fpga_manager *mgr)
+ EXPORT_SYMBOL_GPL(fpga_mgr_unlock);
+ 
+ /**
+- * fpga_mgr_register_full - create and register an FPGA Manager device
++ * __fpga_mgr_register_full - create and register an FPGA Manager device
+  * @parent:	fpga manager device from pdev
+  * @info:	parameters for fpga manager
++ * @owner:	owner module containing the ops
+  *
+  * The caller of this function is responsible for calling fpga_mgr_unregister().
+  * Using devm_fpga_mgr_register_full() instead is recommended.
+@@ -776,7 +784,8 @@ EXPORT_SYMBOL_GPL(fpga_mgr_unlock);
+  * Return: pointer to struct fpga_manager pointer or ERR_PTR()
+  */
+ struct fpga_manager *
+-fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info)
++__fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info,
++			 struct module *owner)
+ {
+ 	const struct fpga_manager_ops *mops = info->mops;
+ 	struct fpga_manager *mgr;
+@@ -804,6 +813,8 @@ fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *in
+ 
+ 	mutex_init(&mgr->ref_mutex);
+ 
++	mgr->mops_owner = owner;
++
+ 	mgr->name = info->name;
+ 	mgr->mops = info->mops;
+ 	mgr->priv = info->priv;
+@@ -841,14 +852,15 @@ fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *in
+ 
+ 	return ERR_PTR(ret);
+ }
+-EXPORT_SYMBOL_GPL(fpga_mgr_register_full);
++EXPORT_SYMBOL_GPL(__fpga_mgr_register_full);
+ 
+ /**
+- * fpga_mgr_register - create and register an FPGA Manager device
++ * __fpga_mgr_register - create and register an FPGA Manager device
+  * @parent:	fpga manager device from pdev
+  * @name:	fpga manager name
+  * @mops:	pointer to structure of fpga manager ops
+  * @priv:	fpga manager private data
++ * @owner:	owner module containing the ops
+  *
+  * The caller of this function is responsible for calling fpga_mgr_unregister().
+  * Using devm_fpga_mgr_register() instead is recommended. This simple
+@@ -859,8 +871,8 @@ EXPORT_SYMBOL_GPL(fpga_mgr_register_full);
+  * Return: pointer to struct fpga_manager pointer or ERR_PTR()
+  */
+ struct fpga_manager *
+-fpga_mgr_register(struct device *parent, const char *name,
+-		  const struct fpga_manager_ops *mops, void *priv)
++__fpga_mgr_register(struct device *parent, const char *name,
++		    const struct fpga_manager_ops *mops, void *priv, struct module *owner)
+ {
+ 	struct fpga_manager_info info = { 0 };
+ 
+@@ -868,9 +880,9 @@ fpga_mgr_register(struct device *parent, const char *name,
+ 	info.mops = mops;
+ 	info.priv = priv;
+ 
+-	return fpga_mgr_register_full(parent, &info);
++	return __fpga_mgr_register_full(parent, &info, owner);
+ }
+-EXPORT_SYMBOL_GPL(fpga_mgr_register);
++EXPORT_SYMBOL_GPL(__fpga_mgr_register);
+ 
+ /**
+  * fpga_mgr_unregister - unregister an FPGA manager
+@@ -900,9 +912,10 @@ static void devm_fpga_mgr_unregister(struct device *dev, void *res)
+ }
+ 
+ /**
+- * devm_fpga_mgr_register_full - resource managed variant of fpga_mgr_register()
++ * __devm_fpga_mgr_register_full - resource managed variant of fpga_mgr_register()
+  * @parent:	fpga manager device from pdev
+  * @info:	parameters for fpga manager
++ * @owner:	owner module containing the ops
+  *
+  * Return:  fpga manager pointer on success, negative error code otherwise.
+  *
+@@ -910,7 +923,8 @@ static void devm_fpga_mgr_unregister(struct device *dev, void *res)
+  * function will be called automatically when the managing device is detached.
+  */
+ struct fpga_manager *
+-devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info)
++__devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info,
++			      struct module *owner)
+ {
+ 	struct fpga_mgr_devres *dr;
+ 	struct fpga_manager *mgr;
+@@ -919,7 +933,7 @@ devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_inf
+ 	if (!dr)
+ 		return ERR_PTR(-ENOMEM);
+ 
+-	mgr = fpga_mgr_register_full(parent, info);
++	mgr = __fpga_mgr_register_full(parent, info, owner);
+ 	if (IS_ERR(mgr)) {
+ 		devres_free(dr);
+ 		return mgr;
+@@ -930,14 +944,15 @@ devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_inf
+ 
+ 	return mgr;
+ }
+-EXPORT_SYMBOL_GPL(devm_fpga_mgr_register_full);
++EXPORT_SYMBOL_GPL(__devm_fpga_mgr_register_full);
+ 
+ /**
+- * devm_fpga_mgr_register - resource managed variant of fpga_mgr_register()
++ * __devm_fpga_mgr_register - resource managed variant of fpga_mgr_register()
+  * @parent:	fpga manager device from pdev
+  * @name:	fpga manager name
+  * @mops:	pointer to structure of fpga manager ops
+  * @priv:	fpga manager private data
++ * @owner:	owner module containing the ops
+  *
+  * Return:  fpga manager pointer on success, negative error code otherwise.
+  *
+@@ -946,8 +961,9 @@ EXPORT_SYMBOL_GPL(devm_fpga_mgr_register_full);
+  * device is detached.
+  */
+ struct fpga_manager *
+-devm_fpga_mgr_register(struct device *parent, const char *name,
+-		       const struct fpga_manager_ops *mops, void *priv)
++__devm_fpga_mgr_register(struct device *parent, const char *name,
++			 const struct fpga_manager_ops *mops, void *priv,
++			 struct module *owner)
+ {
+ 	struct fpga_manager_info info = { 0 };
+ 
+@@ -955,9 +971,9 @@ devm_fpga_mgr_register(struct device *parent, const char *name,
+ 	info.mops = mops;
+ 	info.priv = priv;
+ 
+-	return devm_fpga_mgr_register_full(parent, &info);
++	return __devm_fpga_mgr_register_full(parent, &info, owner);
+ }
+-EXPORT_SYMBOL_GPL(devm_fpga_mgr_register);
++EXPORT_SYMBOL_GPL(__devm_fpga_mgr_register);
+ 
+ static void fpga_mgr_dev_release(struct device *dev)
+ {
+diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
+index b364a929425ce..753cd142503e0 100644
+--- a/drivers/fpga/fpga-region.c
++++ b/drivers/fpga/fpga-region.c
+@@ -53,7 +53,7 @@ static struct fpga_region *fpga_region_get(struct fpga_region *region)
+ 	}
+ 
+ 	get_device(dev);
+-	if (!try_module_get(dev->parent->driver->owner)) {
++	if (!try_module_get(region->ops_owner)) {
+ 		put_device(dev);
+ 		mutex_unlock(&region->mutex);
+ 		return ERR_PTR(-ENODEV);
+@@ -75,7 +75,7 @@ static void fpga_region_put(struct fpga_region *region)
+ 
+ 	dev_dbg(dev, "put\n");
+ 
+-	module_put(dev->parent->driver->owner);
++	module_put(region->ops_owner);
+ 	put_device(dev);
+ 	mutex_unlock(&region->mutex);
+ }
+@@ -181,14 +181,16 @@ static struct attribute *fpga_region_attrs[] = {
+ ATTRIBUTE_GROUPS(fpga_region);
+ 
+ /**
+- * fpga_region_register_full - create and register an FPGA Region device
++ * __fpga_region_register_full - create and register an FPGA Region device
+  * @parent: device parent
+  * @info: parameters for FPGA Region
++ * @owner: module containing the get_bridges function
+  *
+  * Return: struct fpga_region or ERR_PTR()
+  */
+ struct fpga_region *
+-fpga_region_register_full(struct device *parent, const struct fpga_region_info *info)
++__fpga_region_register_full(struct device *parent, const struct fpga_region_info *info,
++			    struct module *owner)
+ {
+ 	struct fpga_region *region;
+ 	int id, ret = 0;
+@@ -213,6 +215,7 @@ fpga_region_register_full(struct device *parent, const struct fpga_region_info *
+ 	region->compat_id = info->compat_id;
+ 	region->priv = info->priv;
+ 	region->get_bridges = info->get_bridges;
++	region->ops_owner = owner;
+ 
+ 	mutex_init(&region->mutex);
+ 	INIT_LIST_HEAD(&region->bridge_list);
+@@ -241,13 +244,14 @@ fpga_region_register_full(struct device *parent, const struct fpga_region_info *
+ 
+ 	return ERR_PTR(ret);
+ }
+-EXPORT_SYMBOL_GPL(fpga_region_register_full);
++EXPORT_SYMBOL_GPL(__fpga_region_register_full);
+ 
+ /**
+- * fpga_region_register - create and register an FPGA Region device
++ * __fpga_region_register - create and register an FPGA Region device
+  * @parent: device parent
+  * @mgr: manager that programs this region
+  * @get_bridges: optional function to get bridges to a list
++ * @owner: module containing the get_bridges function
+  *
+  * This simple version of the register function should be sufficient for most users.
+  * The fpga_region_register_full() function is available for users that need to
+@@ -256,17 +260,17 @@ EXPORT_SYMBOL_GPL(fpga_region_register_full);
+  * Return: struct fpga_region or ERR_PTR()
+  */
+ struct fpga_region *
+-fpga_region_register(struct device *parent, struct fpga_manager *mgr,
+-		     int (*get_bridges)(struct fpga_region *))
++__fpga_region_register(struct device *parent, struct fpga_manager *mgr,
++		       int (*get_bridges)(struct fpga_region *), struct module *owner)
+ {
+ 	struct fpga_region_info info = { 0 };
+ 
+ 	info.mgr = mgr;
+ 	info.get_bridges = get_bridges;
+ 
+-	return fpga_region_register_full(parent, &info);
++	return __fpga_region_register_full(parent, &info, owner);
+ }
+-EXPORT_SYMBOL_GPL(fpga_region_register);
++EXPORT_SYMBOL_GPL(__fpga_region_register);
+ 
+ /**
+  * fpga_region_unregister - unregister an FPGA region
+diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
+index 7f140df40f35b..c1e190d3ea244 100644
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -128,7 +128,24 @@ static bool acpi_gpio_deferred_req_irqs_done;
+ 
+ static int acpi_gpiochip_find(struct gpio_chip *gc, const void *data)
+ {
+-	return device_match_acpi_handle(&gc->gpiodev->dev, data);
++	/* First check the actual GPIO device */
++	if (device_match_acpi_handle(&gc->gpiodev->dev, data))
++		return true;
++
++	/*
++	 * When the ACPI device is artificially split to the banks of GPIOs,
++	 * where each of them is represented by a separate GPIO device,
++	 * the firmware node of the physical device may not be shared among
++	 * the banks as they may require different values for the same property,
++	 * e.g., number of GPIOs in a certain bank. In such case the ACPI handle
++	 * of a GPIO device is NULL and can not be used. Hence we have to check
++	 * the parent device to be sure that there is no match before bailing
++	 * out.
++	 */
++	if (gc->parent)
++		return device_match_acpi_handle(gc->parent, data);
++
++	return false;
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 7753a2e64d411..941d6e379b8a6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -5809,13 +5809,18 @@ static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
+ 	*speed = PCI_SPEED_UNKNOWN;
+ 	*width = PCIE_LNK_WIDTH_UNKNOWN;
+ 
+-	while ((parent = pci_upstream_bridge(parent))) {
+-		/* skip upstream/downstream switches internal to dGPU*/
+-		if (parent->vendor == PCI_VENDOR_ID_ATI)
+-			continue;
+-		*speed = pcie_get_speed_cap(parent);
+-		*width = pcie_get_width_cap(parent);
+-		break;
++	if (amdgpu_device_pcie_dynamic_switching_supported(adev)) {
++		while ((parent = pci_upstream_bridge(parent))) {
++			/* skip upstream/downstream switches internal to dGPU*/
++			if (parent->vendor == PCI_VENDOR_ID_ATI)
++				continue;
++			*speed = pcie_get_speed_cap(parent);
++			*width = pcie_get_width_cap(parent);
++			break;
++		}
++	} else {
++		/* use the current speeds rather than max if switching is not supported */
++		pcie_bandwidth_available(adev->pdev, NULL, speed, width);
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+index b53c8fd4e8cf3..d89d6829f1df4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+@@ -431,16 +431,16 @@ static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
+ 
+ static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev)
+ {
+-	const char *chip_name;
++	char ucode_prefix[15];
+ 	int r;
+ 
+-	chip_name = "gc_9_4_3";
++	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
+ 
+-	r = gfx_v9_4_3_init_rlc_microcode(adev, chip_name);
++	r = gfx_v9_4_3_init_rlc_microcode(adev, ucode_prefix);
+ 	if (r)
+ 		return r;
+ 
+-	r = gfx_v9_4_3_init_cp_compute_microcode(adev, chip_name);
++	r = gfx_v9_4_3_init_cp_compute_microcode(adev, ucode_prefix);
+ 	if (r)
+ 		return r;
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index cb31a699c6622..1a269099f19f8 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -613,6 +613,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+ 		&connector->base,
+ 		dev->mode_config.tile_property,
+ 		0);
++	connector->colorspace_property = master->base.colorspace_property;
++	if (connector->colorspace_property)
++		drm_connector_attach_colorspace_property(connector);
+ 
+ 	drm_connector_set_path_property(connector, pathprop);
+ 
+diff --git a/drivers/gpu/drm/bridge/imx/Kconfig b/drivers/gpu/drm/bridge/imx/Kconfig
+index 5965e8027529a..8dd89efa8ea7d 100644
+--- a/drivers/gpu/drm/bridge/imx/Kconfig
++++ b/drivers/gpu/drm/bridge/imx/Kconfig
+@@ -8,8 +8,8 @@ config DRM_IMX8MP_DW_HDMI_BRIDGE
+ 	depends on OF
+ 	depends on COMMON_CLK
+ 	select DRM_DW_HDMI
+-	select DRM_IMX8MP_HDMI_PVI
+-	select PHY_FSL_SAMSUNG_HDMI_PHY
++	imply DRM_IMX8MP_HDMI_PVI
++	imply PHY_FSL_SAMSUNG_HDMI_PHY
+ 	help
+ 	  Choose this to enable support for the internal HDMI encoder found
+ 	  on the i.MX8MP SoC.
+diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
+index fea4f00a20f83..c737670631929 100644
+--- a/drivers/gpu/drm/bridge/tc358775.c
++++ b/drivers/gpu/drm/bridge/tc358775.c
+@@ -454,10 +454,6 @@ static void tc_bridge_enable(struct drm_bridge *bridge)
+ 	dev_dbg(tc->dev, "bus_formats %04x bpc %d\n",
+ 		connector->display_info.bus_formats[0],
+ 		tc->bpc);
+-	/*
+-	 * Default hardware register settings of tc358775 configured
+-	 * with MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA jeida-24 format
+-	 */
+ 	if (connector->display_info.bus_formats[0] ==
+ 		MEDIA_BUS_FMT_RGB888_1X7X4_SPWG) {
+ 		/* VESA-24 */
+@@ -468,14 +464,15 @@ static void tc_bridge_enable(struct drm_bridge *bridge)
+ 		d2l_write(tc->i2c, LV_MX1619, LV_MX(LVI_B6, LVI_B7, LVI_B1, LVI_B2));
+ 		d2l_write(tc->i2c, LV_MX2023, LV_MX(LVI_B3, LVI_B4, LVI_B5, LVI_L0));
+ 		d2l_write(tc->i2c, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_R6));
+-	} else { /*  MEDIA_BUS_FMT_RGB666_1X7X3_SPWG - JEIDA-18 */
+-		d2l_write(tc->i2c, LV_MX0003, LV_MX(LVI_R0, LVI_R1, LVI_R2, LVI_R3));
+-		d2l_write(tc->i2c, LV_MX0407, LV_MX(LVI_R4, LVI_L0, LVI_R5, LVI_G0));
+-		d2l_write(tc->i2c, LV_MX0811, LV_MX(LVI_G1, LVI_G2, LVI_L0, LVI_L0));
+-		d2l_write(tc->i2c, LV_MX1215, LV_MX(LVI_G3, LVI_G4, LVI_G5, LVI_B0));
+-		d2l_write(tc->i2c, LV_MX1619, LV_MX(LVI_L0, LVI_L0, LVI_B1, LVI_B2));
+-		d2l_write(tc->i2c, LV_MX2023, LV_MX(LVI_B3, LVI_B4, LVI_B5, LVI_L0));
+-		d2l_write(tc->i2c, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_L0));
++	} else {
++		/* JEIDA-18 and JEIDA-24 */
++		d2l_write(tc->i2c, LV_MX0003, LV_MX(LVI_R2, LVI_R3, LVI_R4, LVI_R5));
++		d2l_write(tc->i2c, LV_MX0407, LV_MX(LVI_R6, LVI_R1, LVI_R7, LVI_G2));
++		d2l_write(tc->i2c, LV_MX0811, LV_MX(LVI_G3, LVI_G4, LVI_G0, LVI_G1));
++		d2l_write(tc->i2c, LV_MX1215, LV_MX(LVI_G5, LVI_G6, LVI_G7, LVI_B2));
++		d2l_write(tc->i2c, LV_MX1619, LV_MX(LVI_B0, LVI_B1, LVI_B3, LVI_B4));
++		d2l_write(tc->i2c, LV_MX2023, LV_MX(LVI_B5, LVI_B6, LVI_B7, LVI_L0));
++		d2l_write(tc->i2c, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_R0));
+ 	}
+ 
+ 	d2l_write(tc->i2c, VFUEN, VFUEN_EN);
+diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+index 4814b7b6d1fd1..57a7ed13f9965 100644
+--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
++++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+@@ -478,7 +478,6 @@ static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge,
+ 		dev_err(ctx->dev, "failed to lock PLL, ret=%i\n", ret);
+ 		/* On failure, disable PLL again and exit. */
+ 		regmap_write(ctx->regmap, REG_RC_PLL_EN, 0x00);
+-		regulator_disable(ctx->vcc);
+ 		return;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+index 7a6dc371c384e..bc6209df0f680 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
++++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+@@ -919,6 +919,12 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
+ 	if (IS_DG2(gt->i915)) {
+ 		u8 first_ccs = __ffs(CCS_MASK(gt));
+ 
++		/*
++		 * Store the number of active cslices before
++		 * changing the CCS engine configuration
++		 */
++		gt->ccs.cslices = CCS_MASK(gt);
++
+ 		/* Mask off all the CCS engine */
+ 		info->engine_mask &= ~GENMASK(CCS3, CCS0);
+ 		/* Put back in the first CCS engine */
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
+index 99b71bb7da0a6..3c62a44e9106c 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
++++ b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
+@@ -19,7 +19,7 @@ unsigned int intel_gt_apply_ccs_mode(struct intel_gt *gt)
+ 
+ 	/* Build the value for the fixed CCS load balancing */
+ 	for (cslice = 0; cslice < I915_MAX_CCS; cslice++) {
+-		if (CCS_MASK(gt) & BIT(cslice))
++		if (gt->ccs.cslices & BIT(cslice))
+ 			/*
+ 			 * If available, assign the cslice
+ 			 * to the first available engine...
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
+index def7dd0eb6f19..cfdd2ad5e9549 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
++++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
+@@ -207,6 +207,14 @@ struct intel_gt {
+ 					    [MAX_ENGINE_INSTANCE + 1];
+ 	enum intel_submission_method submission_method;
+ 
++	struct {
++		/*
++		 * Mask of the non fused CCS slices
++		 * to be used for the load balancing
++		 */
++		intel_engine_mask_t cslices;
++	} ccs;
++
+ 	/*
+ 	 * Default address space (either GGTT or ppGTT depending on arch).
+ 	 *
+diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
+index 58012edd4eb0e..4f4f53c42a9c5 100644
+--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
++++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
+@@ -29,9 +29,9 @@
+  */
+ 
+ #define GUC_KLV_LEN_MIN				1u
+-#define GUC_KLV_0_KEY				(0xffff << 16)
+-#define GUC_KLV_0_LEN				(0xffff << 0)
+-#define GUC_KLV_n_VALUE				(0xffffffff << 0)
++#define GUC_KLV_0_KEY				(0xffffu << 16)
++#define GUC_KLV_0_LEN				(0xffffu << 0)
++#define GUC_KLV_n_VALUE				(0xffffffffu << 0)
+ 
+ /**
+  * DOC: GuC Self Config KLVs
+diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
+index 0ba72102636aa..536366956447a 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dp.c
++++ b/drivers/gpu/drm/mediatek/mtk_dp.c
+@@ -2104,7 +2104,7 @@ static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux,
+ 
+ 	if (mtk_dp->bridge.type != DRM_MODE_CONNECTOR_eDP &&
+ 	    !mtk_dp->train_info.cable_plugged_in) {
+-		ret = -EAGAIN;
++		ret = -EIO;
+ 		goto err;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c b/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
+index a6bc1bdb3d0d8..a10cff3ca1fef 100644
+--- a/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
++++ b/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
+@@ -95,6 +95,7 @@ static int dw_mipi_dsi_phy_init(void *priv_data)
+ 		return ret;
+ 	}
+ 
++	clk_disable_unprepare(mipi_dsi->px_clk);
+ 	ret = clk_set_rate(mipi_dsi->px_clk, mipi_dsi->mode->clock * 1000);
+ 
+ 	if (ret) {
+@@ -103,6 +104,12 @@ static int dw_mipi_dsi_phy_init(void *priv_data)
+ 		return ret;
+ 	}
+ 
++	ret = clk_prepare_enable(mipi_dsi->px_clk);
++	if (ret) {
++		dev_err(mipi_dsi->dev, "Failed to enable DSI Pixel clock (ret %d)\n", ret);
++		return ret;
++	}
++
+ 	switch (mipi_dsi->dsi_device->format) {
+ 	case MIPI_DSI_FMT_RGB888:
+ 		dpi_data_format = DPI_COLOR_24BIT;
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index cf0b1de1c0712..7b72327df7f3f 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -284,7 +284,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+ 
+ 	a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
+ 
+-	get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
++	get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0),
+ 		rbmemptr_stats(ring, index, cpcycles_start));
+ 	get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
+ 		rbmemptr_stats(ring, index, alwayson_start));
+@@ -330,7 +330,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+ 	OUT_PKT7(ring, CP_SET_MARKER, 1);
+ 	OUT_RING(ring, 0x00e); /* IB1LIST end */
+ 
+-	get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
++	get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0),
+ 		rbmemptr_stats(ring, index, cpcycles_end));
+ 	get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
+ 		rbmemptr_stats(ring, index, alwayson_end));
+@@ -3062,7 +3062,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
+ 
+ 	ret = a6xx_set_supported_hw(&pdev->dev, config->info);
+ 	if (ret) {
+-		a6xx_destroy(&(a6xx_gpu->base.base));
++		a6xx_llc_slices_destroy(a6xx_gpu);
++		kfree(a6xx_gpu);
+ 		return ERR_PTR(ret);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+index fc1d5736d7fcc..489be1c0c7046 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+@@ -448,9 +448,6 @@ static void dpu_encoder_phys_cmd_enable_helper(
+ 
+ 	_dpu_encoder_phys_cmd_pingpong_config(phys_enc);
+ 
+-	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+-		return;
+-
+ 	ctl = phys_enc->hw_ctl;
+ 	ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
+ }
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+index a06f69d0b257d..2e50049f2f850 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+@@ -545,6 +545,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
+ {
+ 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ 	u32 intf_active = 0;
++	u32 dsc_active = 0;
+ 	u32 wb_active = 0;
+ 	u32 mode_sel = 0;
+ 
+@@ -560,6 +561,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
+ 
+ 	intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
+ 	wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
++	dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
+ 
+ 	if (cfg->intf)
+ 		intf_active |= BIT(cfg->intf - INTF_0);
+@@ -567,17 +569,18 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
+ 	if (cfg->wb)
+ 		wb_active |= BIT(cfg->wb - WB_0);
+ 
++	if (cfg->dsc)
++		dsc_active |= cfg->dsc;
++
+ 	DPU_REG_WRITE(c, CTL_TOP, mode_sel);
+ 	DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
+ 	DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
++	DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
+ 
+ 	if (cfg->merge_3d)
+ 		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
+ 			      BIT(cfg->merge_3d - MERGE_3D_0));
+ 
+-	if (cfg->dsc)
+-		DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc);
+-
+ 	if (cfg->cdm)
+ 		DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cfg->cdm);
+ }
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+index 6a0a74832fb64..b85881aab0478 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+@@ -223,9 +223,11 @@ static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, unsigned int
+ 
+ 	VERB("IRQ=[%d, %d]\n", DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
+ 
+-	if (!irq_entry->cb)
++	if (!irq_entry->cb) {
+ 		DRM_ERROR("no registered cb, IRQ=[%d, %d]\n",
+ 			  DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
++		return;
++	}
+ 
+ 	atomic_inc(&irq_entry->count);
+ 
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
+index 9d86a6aca6f2a..c80be74cf10b5 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
+@@ -356,8 +356,8 @@ int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
+ {
+ 	int ret;
+ 
+-	DBG("Set clk rates: pclk=%d, byteclk=%lu",
+-		msm_host->mode->clock, msm_host->byte_clk_rate);
++	DBG("Set clk rates: pclk=%lu, byteclk=%lu",
++	    msm_host->pixel_clk_rate, msm_host->byte_clk_rate);
+ 
+ 	ret = dev_pm_opp_set_rate(&msm_host->pdev->dev,
+ 				  msm_host->byte_clk_rate);
+@@ -430,9 +430,9 @@ int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host)
+ {
+ 	int ret;
+ 
+-	DBG("Set clk rates: pclk=%d, byteclk=%lu, esc_clk=%lu, dsi_src_clk=%lu",
+-		msm_host->mode->clock, msm_host->byte_clk_rate,
+-		msm_host->esc_clk_rate, msm_host->src_clk_rate);
++	DBG("Set clk rates: pclk=%lu, byteclk=%lu, esc_clk=%lu, dsi_src_clk=%lu",
++	    msm_host->pixel_clk_rate, msm_host->byte_clk_rate,
++	    msm_host->esc_clk_rate, msm_host->src_clk_rate);
+ 
+ 	ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
+ 	if (ret) {
+diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
+index 80f74ee0fc786..47e53e17b4e58 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
++++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
+@@ -272,6 +272,9 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
+ 		getparam->value = (u64)ttm_resource_manager_usage(vram_mgr);
+ 		break;
+ 	}
++	case NOUVEAU_GETPARAM_HAS_VMA_TILEMODE:
++		getparam->value = 1;
++		break;
+ 	default:
+ 		NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
+ 		return -EINVAL;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
+index db8cbf6151129..186add400ea5f 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
+@@ -241,28 +241,28 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
+ 	}
+ 
+ 	nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
+-	if (!nouveau_cli_uvmm(cli) || internal) {
+-		/* for BO noVM allocs, don't assign kinds */
+-		if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
+-			nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
+-			if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+-				kfree(nvbo);
+-				return ERR_PTR(-EINVAL);
+-			}
+ 
+-			nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
+-		} else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+-			nvbo->kind = (tile_flags & 0x00007f00) >> 8;
+-			nvbo->comp = (tile_flags & 0x00030000) >> 16;
+-			if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+-				kfree(nvbo);
+-				return ERR_PTR(-EINVAL);
+-			}
+-		} else {
+-			nvbo->zeta = (tile_flags & 0x00000007);
++	if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
++		nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
++		if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
++			kfree(nvbo);
++			return ERR_PTR(-EINVAL);
++		}
++
++		nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
++	} else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
++		nvbo->kind = (tile_flags & 0x00007f00) >> 8;
++		nvbo->comp = (tile_flags & 0x00030000) >> 16;
++		if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
++			kfree(nvbo);
++			return ERR_PTR(-EINVAL);
+ 		}
+-		nvbo->mode = tile_mode;
++	} else {
++		nvbo->zeta = (tile_flags & 0x00000007);
++	}
++	nvbo->mode = tile_mode;
+ 
++	if (!nouveau_cli_uvmm(cli) || internal) {
+ 		/* Determine the desirable target GPU page size for the buffer. */
+ 		for (i = 0; i < vmm->page_nr; i++) {
+ 			/* Because we cannot currently allow VMM maps to fail
+@@ -304,12 +304,6 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
+ 		}
+ 		nvbo->page = vmm->page[pi].shift;
+ 	} else {
+-		/* reject other tile flags when in VM mode. */
+-		if (tile_mode)
+-			return ERR_PTR(-EINVAL);
+-		if (tile_flags & ~NOUVEAU_GEM_TILE_NONCONTIG)
+-			return ERR_PTR(-EINVAL);
+-
+ 		/* Determine the desirable target GPU page size for the buffer. */
+ 		for (i = 0; i < vmm->page_nr; i++) {
+ 			/* Because we cannot currently allow VMM maps to fail
+diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+index 88e80fe98112d..e8f385b9c6182 100644
+--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
++++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+@@ -282,15 +282,15 @@ static const struct drm_display_mode et028013dma_mode = {
+ static const struct drm_display_mode jt240mhqs_hwt_ek_e3_mode = {
+ 	.clock = 6000,
+ 	.hdisplay = 240,
+-	.hsync_start = 240 + 28,
+-	.hsync_end = 240 + 28 + 10,
+-	.htotal = 240 + 28 + 10 + 10,
++	.hsync_start = 240 + 38,
++	.hsync_end = 240 + 38 + 10,
++	.htotal = 240 + 38 + 10 + 10,
+ 	.vdisplay = 280,
+-	.vsync_start = 280 + 8,
+-	.vsync_end = 280 + 8 + 4,
+-	.vtotal = 280 + 8 + 4 + 4,
+-	.width_mm = 43,
+-	.height_mm = 37,
++	.vsync_start = 280 + 48,
++	.vsync_end = 280 + 48 + 4,
++	.vtotal = 280 + 48 + 4 + 4,
++	.width_mm = 37,
++	.height_mm = 43,
+ 	.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+ };
+ 
+diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
+index d32ff3857e658..b3b37ed832ca0 100644
+--- a/drivers/gpu/drm/xe/xe_device.c
++++ b/drivers/gpu/drm/xe/xe_device.c
+@@ -389,8 +389,14 @@ static int xe_set_dma_info(struct xe_device *xe)
+ 	return err;
+ }
+ 
+-/*
+- * Initialize MMIO resources that don't require any knowledge about tile count.
++/**
++ * xe_device_probe_early: Device early probe
++ * @xe: xe device instance
++ *
++ * Initialize MMIO resources that don't require any
++ * knowledge about tile count. Also initialize pcode
++ *
++ * Return: 0 on success, error code on failure
+  */
+ int xe_device_probe_early(struct xe_device *xe)
+ {
+@@ -404,6 +410,10 @@ int xe_device_probe_early(struct xe_device *xe)
+ 	if (err)
+ 		return err;
+ 
++	err = xe_pcode_probe_early(xe);
++	if (err)
++		return err;
++
+ 	return 0;
+ }
+ 
+@@ -482,11 +492,8 @@ int xe_device_probe(struct xe_device *xe)
+ 	if (err)
+ 		return err;
+ 
+-	for_each_gt(gt, xe, id) {
+-		err = xe_pcode_probe(gt);
+-		if (err)
+-			return err;
+-	}
++	for_each_gt(gt, xe, id)
++		xe_pcode_init(gt);
+ 
+ 	err = xe_display_init_noirq(xe);
+ 	if (err)
+diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
+index 2ba4fb9511f63..aca519f5b85d9 100644
+--- a/drivers/gpu/drm/xe/xe_migrate.c
++++ b/drivers/gpu/drm/xe/xe_migrate.c
+@@ -33,7 +33,6 @@
+ #include "xe_sync.h"
+ #include "xe_trace.h"
+ #include "xe_vm.h"
+-#include "xe_wa.h"
+ 
+ /**
+  * struct xe_migrate - migrate context.
+@@ -299,10 +298,6 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
+ }
+ 
+ /*
+- * Due to workaround 16017236439, odd instance hardware copy engines are
+- * faster than even instance ones.
+- * This function returns the mask involving all fast copy engines and the
+- * reserved copy engine to be used as logical mask for migrate engine.
+  * Including the reserved copy engine is required to avoid deadlocks due to
+  * migrate jobs servicing the faults gets stuck behind the job that faulted.
+  */
+@@ -316,8 +311,7 @@ static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt)
+ 		if (hwe->class != XE_ENGINE_CLASS_COPY)
+ 			continue;
+ 
+-		if (!XE_WA(gt, 16017236439) ||
+-		    xe_gt_is_usm_hwe(gt, hwe) || hwe->instance & 1)
++		if (xe_gt_is_usm_hwe(gt, hwe))
+ 			logical_mask |= BIT(hwe->logical_instance);
+ 	}
+ 
+@@ -368,6 +362,10 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
+ 		if (!hwe || !logical_mask)
+ 			return ERR_PTR(-EINVAL);
+ 
++		/*
++		 * XXX: Currently only reserving 1 (likely slow) BCS instance on
++		 * PVC, may want to revisit if performance is needed.
++		 */
+ 		m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
+ 					    EXEC_QUEUE_FLAG_KERNEL |
+ 					    EXEC_QUEUE_FLAG_PERMANENT |
+diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c
+index b324dc2a5debe..81f4ae2ea08f3 100644
+--- a/drivers/gpu/drm/xe/xe_pcode.c
++++ b/drivers/gpu/drm/xe/xe_pcode.c
+@@ -10,6 +10,7 @@
+ 
+ #include <drm/drm_managed.h>
+ 
++#include "xe_device.h"
+ #include "xe_gt.h"
+ #include "xe_mmio.h"
+ #include "xe_pcode_api.h"
+@@ -43,8 +44,6 @@ static int pcode_mailbox_status(struct xe_gt *gt)
+ 		[PCODE_ERROR_MASK] = {-EPROTO, "Unknown"},
+ 	};
+ 
+-	lockdep_assert_held(&gt->pcode.lock);
+-
+ 	err = xe_mmio_read32(gt, PCODE_MAILBOX) & PCODE_ERROR_MASK;
+ 	if (err) {
+ 		drm_err(&gt_to_xe(gt)->drm, "PCODE Mailbox failed: %d %s", err,
+@@ -55,17 +54,15 @@ static int pcode_mailbox_status(struct xe_gt *gt)
+ 	return 0;
+ }
+ 
+-static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
+-			    unsigned int timeout_ms, bool return_data,
+-			    bool atomic)
++static int __pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
++			      unsigned int timeout_ms, bool return_data,
++			      bool atomic)
+ {
+ 	int err;
+ 
+ 	if (gt_to_xe(gt)->info.skip_pcode)
+ 		return 0;
+ 
+-	lockdep_assert_held(&gt->pcode.lock);
+-
+ 	if ((xe_mmio_read32(gt, PCODE_MAILBOX) & PCODE_READY) != 0)
+ 		return -EAGAIN;
+ 
+@@ -87,6 +84,18 @@ static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
+ 	return pcode_mailbox_status(gt);
+ }
+ 
++static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
++			    unsigned int timeout_ms, bool return_data,
++			    bool atomic)
++{
++	if (gt_to_xe(gt)->info.skip_pcode)
++		return 0;
++
++	lockdep_assert_held(&gt->pcode.lock);
++
++	return __pcode_mailbox_rw(gt, mbox, data0, data1, timeout_ms, return_data, atomic);
++}
++
+ int xe_pcode_write_timeout(struct xe_gt *gt, u32 mbox, u32 data, int timeout)
+ {
+ 	int err;
+@@ -109,15 +118,19 @@ int xe_pcode_read(struct xe_gt *gt, u32 mbox, u32 *val, u32 *val1)
+ 	return err;
+ }
+ 
+-static int xe_pcode_try_request(struct xe_gt *gt, u32 mbox,
+-				u32 request, u32 reply_mask, u32 reply,
+-				u32 *status, bool atomic, int timeout_us)
++static int pcode_try_request(struct xe_gt *gt, u32 mbox,
++			     u32 request, u32 reply_mask, u32 reply,
++			     u32 *status, bool atomic, int timeout_us, bool locked)
+ {
+ 	int slept, wait = 10;
+ 
+ 	for (slept = 0; slept < timeout_us; slept += wait) {
+-		*status = pcode_mailbox_rw(gt, mbox, &request, NULL, 1, true,
+-					   atomic);
++		if (locked)
++			*status = pcode_mailbox_rw(gt, mbox, &request, NULL, 1, true,
++						   atomic);
++		else
++			*status = __pcode_mailbox_rw(gt, mbox, &request, NULL, 1, true,
++						     atomic);
+ 		if ((*status == 0) && ((request & reply_mask) == reply))
+ 			return 0;
+ 
+@@ -158,8 +171,8 @@ int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request,
+ 
+ 	mutex_lock(&gt->pcode.lock);
+ 
+-	ret = xe_pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
+-				   false, timeout_base_ms * 1000);
++	ret = pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
++				false, timeout_base_ms * 1000, true);
+ 	if (!ret)
+ 		goto out;
+ 
+@@ -177,8 +190,8 @@ int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request,
+ 		"PCODE timeout, retrying with preemption disabled\n");
+ 	drm_WARN_ON_ONCE(&gt_to_xe(gt)->drm, timeout_base_ms > 1);
+ 	preempt_disable();
+-	ret = xe_pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
+-				   true, timeout_base_ms * 1000);
++	ret = pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
++				true, 50 * 1000, true);
+ 	preempt_enable();
+ 
+ out:
+@@ -238,59 +251,71 @@ int xe_pcode_init_min_freq_table(struct xe_gt *gt, u32 min_gt_freq,
+ }
+ 
+ /**
+- * xe_pcode_init - Ensure PCODE is initialized
+- * @gt: gt instance
++ * xe_pcode_ready - Ensure PCODE is initialized
++ * @xe: xe instance
++ * @locked: true if lock held, false otherwise
+  *
+- * This function ensures that PCODE is properly initialized. To be called during
+- * probe and resume paths.
++ * PCODE init mailbox is polled only on root gt of root tile
++ * as the root tile provides the initialization is complete only
++ * after all the tiles have completed the initialization.
++ * Called only on early probe without locks and with locks in
++ * resume path.
+  *
+- * It returns 0 on success, and -error number on failure.
++ * Returns 0 on success, and -error number on failure.
+  */
+-int xe_pcode_init(struct xe_gt *gt)
++int xe_pcode_ready(struct xe_device *xe, bool locked)
+ {
+ 	u32 status, request = DGFX_GET_INIT_STATUS;
++	struct xe_gt *gt = xe_root_mmio_gt(xe);
+ 	int timeout_us = 180000000; /* 3 min */
+ 	int ret;
+ 
+-	if (gt_to_xe(gt)->info.skip_pcode)
++	if (xe->info.skip_pcode)
+ 		return 0;
+ 
+-	if (!IS_DGFX(gt_to_xe(gt)))
++	if (!IS_DGFX(xe))
+ 		return 0;
+ 
+-	mutex_lock(&gt->pcode.lock);
+-	ret = xe_pcode_try_request(gt, DGFX_PCODE_STATUS, request,
+-				   DGFX_INIT_STATUS_COMPLETE,
+-				   DGFX_INIT_STATUS_COMPLETE,
+-				   &status, false, timeout_us);
+-	mutex_unlock(&gt->pcode.lock);
++	if (locked)
++		mutex_lock(&gt->pcode.lock);
++
++	ret = pcode_try_request(gt, DGFX_PCODE_STATUS, request,
++				DGFX_INIT_STATUS_COMPLETE,
++				DGFX_INIT_STATUS_COMPLETE,
++				&status, false, timeout_us, locked);
++
++	if (locked)
++		mutex_unlock(&gt->pcode.lock);
+ 
+ 	if (ret)
+-		drm_err(&gt_to_xe(gt)->drm,
++		drm_err(&xe->drm,
+ 			"PCODE initialization timedout after: 3 min\n");
+ 
+ 	return ret;
+ }
+ 
+ /**
+- * xe_pcode_probe - Prepare xe_pcode and also ensure PCODE is initialized.
++ * xe_pcode_init: initialize components of PCODE
+  * @gt: gt instance
+  *
+- * This function initializes the xe_pcode component, and when needed, it ensures
+- * that PCODE has properly performed its initialization and it is really ready
+- * to go. To be called once only during probe.
+- *
+- * It returns 0 on success, and -error number on failure.
++ * This function initializes the xe_pcode component.
++ * To be called once only during probe.
+  */
+-int xe_pcode_probe(struct xe_gt *gt)
++void xe_pcode_init(struct xe_gt *gt)
+ {
+ 	drmm_mutex_init(&gt_to_xe(gt)->drm, &gt->pcode.lock);
++}
+ 
+-	if (gt_to_xe(gt)->info.skip_pcode)
+-		return 0;
+-
+-	if (!IS_DGFX(gt_to_xe(gt)))
+-		return 0;
+-
+-	return xe_pcode_init(gt);
++/**
++ * xe_pcode_probe_early: initializes PCODE
++ * @xe: xe instance
++ *
++ * This function checks the initialization status of PCODE
++ * To be called once only during early probe without locks.
++ *
++ * Returns 0 on success, error code otherwise
++ */
++int xe_pcode_probe_early(struct xe_device *xe)
++{
++	return xe_pcode_ready(xe, false);
+ }
+diff --git a/drivers/gpu/drm/xe/xe_pcode.h b/drivers/gpu/drm/xe/xe_pcode.h
+index 08cb1d047cba2..3f54c6d2a57d2 100644
+--- a/drivers/gpu/drm/xe/xe_pcode.h
++++ b/drivers/gpu/drm/xe/xe_pcode.h
+@@ -8,9 +8,11 @@
+ 
+ #include <linux/types.h>
+ struct xe_gt;
++struct xe_device;
+ 
+-int xe_pcode_probe(struct xe_gt *gt);
+-int xe_pcode_init(struct xe_gt *gt);
++void xe_pcode_init(struct xe_gt *gt);
++int xe_pcode_probe_early(struct xe_device *xe);
++int xe_pcode_ready(struct xe_device *xe, bool locked);
+ int xe_pcode_init_min_freq_table(struct xe_gt *gt, u32 min_gt_freq,
+ 				 u32 max_gt_freq);
+ int xe_pcode_read(struct xe_gt *gt, u32 mbox, u32 *val, u32 *val1);
+diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
+index 53b3b0b019acd..944cf4d76099e 100644
+--- a/drivers/gpu/drm/xe/xe_pm.c
++++ b/drivers/gpu/drm/xe/xe_pm.c
+@@ -54,13 +54,15 @@ int xe_pm_suspend(struct xe_device *xe)
+ 	u8 id;
+ 	int err;
+ 
++	drm_dbg(&xe->drm, "Suspending device\n");
++
+ 	for_each_gt(gt, xe, id)
+ 		xe_gt_suspend_prepare(gt);
+ 
+ 	/* FIXME: Super racey... */
+ 	err = xe_bo_evict_all(xe);
+ 	if (err)
+-		return err;
++		goto err;
+ 
+ 	xe_display_pm_suspend(xe);
+ 
+@@ -68,7 +70,7 @@ int xe_pm_suspend(struct xe_device *xe)
+ 		err = xe_gt_suspend(gt);
+ 		if (err) {
+ 			xe_display_pm_resume(xe);
+-			return err;
++			goto err;
+ 		}
+ 	}
+ 
+@@ -76,7 +78,11 @@ int xe_pm_suspend(struct xe_device *xe)
+ 
+ 	xe_display_pm_suspend_late(xe);
+ 
++	drm_dbg(&xe->drm, "Device suspended\n");
+ 	return 0;
++err:
++	drm_dbg(&xe->drm, "Device suspend failed %d\n", err);
++	return err;
+ }
+ 
+ /**
+@@ -92,14 +98,14 @@ int xe_pm_resume(struct xe_device *xe)
+ 	u8 id;
+ 	int err;
+ 
++	drm_dbg(&xe->drm, "Resuming device\n");
++
+ 	for_each_tile(tile, xe, id)
+ 		xe_wa_apply_tile_workarounds(tile);
+ 
+-	for_each_gt(gt, xe, id) {
+-		err = xe_pcode_init(gt);
+-		if (err)
+-			return err;
+-	}
++	err = xe_pcode_ready(xe, true);
++	if (err)
++		return err;
+ 
+ 	xe_display_pm_resume_early(xe);
+ 
+@@ -109,7 +115,7 @@ int xe_pm_resume(struct xe_device *xe)
+ 	 */
+ 	err = xe_bo_restore_kernel(xe);
+ 	if (err)
+-		return err;
++		goto err;
+ 
+ 	xe_irq_resume(xe);
+ 
+@@ -120,9 +126,13 @@ int xe_pm_resume(struct xe_device *xe)
+ 
+ 	err = xe_bo_restore_user(xe);
+ 	if (err)
+-		return err;
++		goto err;
+ 
++	drm_dbg(&xe->drm, "Device resumed\n");
+ 	return 0;
++err:
++	drm_dbg(&xe->drm, "Device resume failed %d\n", err);
++	return err;
+ }
+ 
+ static bool xe_pm_pci_d3cold_capable(struct xe_device *xe)
+@@ -310,11 +320,9 @@ int xe_pm_runtime_resume(struct xe_device *xe)
+ 	xe->d3cold.power_lost = xe_guc_in_reset(&gt->uc.guc);
+ 
+ 	if (xe->d3cold.allowed && xe->d3cold.power_lost) {
+-		for_each_gt(gt, xe, id) {
+-			err = xe_pcode_init(gt);
+-			if (err)
+-				goto out;
+-		}
++		err = xe_pcode_ready(xe, true);
++		if (err)
++			goto out;
+ 
+ 		/*
+ 		 * This only restores pinned memory which is the memory
+diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+index 88eb33acd5f0d..face8d6b2a6fb 100644
+--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
++++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+@@ -256,12 +256,12 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto err_dp;
+ 
++	drm_bridge_add(dpsub->bridge);
++
+ 	if (dpsub->dma_enabled) {
+ 		ret = zynqmp_dpsub_drm_init(dpsub);
+ 		if (ret)
+ 			goto err_disp;
+-	} else {
+-		drm_bridge_add(dpsub->bridge);
+ 	}
+ 
+ 	dev_info(&pdev->dev, "ZynqMP DisplayPort Subsystem driver probed");
+@@ -288,9 +288,8 @@ static void zynqmp_dpsub_remove(struct platform_device *pdev)
+ 
+ 	if (dpsub->drm)
+ 		zynqmp_dpsub_drm_cleanup(dpsub);
+-	else
+-		drm_bridge_remove(dpsub->bridge);
+ 
++	drm_bridge_remove(dpsub->bridge);
+ 	zynqmp_disp_remove(dpsub);
+ 	zynqmp_dp_remove(dpsub);
+ 
+diff --git a/drivers/hwmon/intel-m10-bmc-hwmon.c b/drivers/hwmon/intel-m10-bmc-hwmon.c
+index 6500ca548f9c7..ca2dff1589251 100644
+--- a/drivers/hwmon/intel-m10-bmc-hwmon.c
++++ b/drivers/hwmon/intel-m10-bmc-hwmon.c
+@@ -429,7 +429,7 @@ static const struct m10bmc_sdata n6000bmc_curr_tbl[] = {
+ };
+ 
+ static const struct m10bmc_sdata n6000bmc_power_tbl[] = {
+-	{ 0x724, 0x0, 0x0, 0x0, 0x0, 1, "Board Power" },
++	{ 0x724, 0x0, 0x0, 0x0, 0x0, 1000, "Board Power" },
+ };
+ 
+ static const struct hwmon_channel_info * const n6000bmc_hinfo[] = {
+diff --git a/drivers/hwmon/shtc1.c b/drivers/hwmon/shtc1.c
+index 1f96e94967ee8..439dd3dba5fc8 100644
+--- a/drivers/hwmon/shtc1.c
++++ b/drivers/hwmon/shtc1.c
+@@ -238,7 +238,7 @@ static int shtc1_probe(struct i2c_client *client)
+ 
+ 	if (np) {
+ 		data->setup.blocking_io = of_property_read_bool(np, "sensirion,blocking-io");
+-		data->setup.high_precision = !of_property_read_bool(np, "sensicon,low-precision");
++		data->setup.high_precision = !of_property_read_bool(np, "sensirion,low-precision");
+ 	} else {
+ 		if (client->dev.platform_data)
+ 			data->setup = *(struct shtc1_platform_data *)dev->platform_data;
+diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
+index c2ca4a02dfce1..a0bdfabddbc68 100644
+--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
++++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
+@@ -1240,6 +1240,8 @@ static void etm4_init_arch_data(void *info)
+ 	drvdata->nr_event = FIELD_GET(TRCIDR0_NUMEVENT_MASK, etmidr0);
+ 	/* QSUPP, bits[16:15] Q element support field */
+ 	drvdata->q_support = FIELD_GET(TRCIDR0_QSUPP_MASK, etmidr0);
++	if (drvdata->q_support)
++		drvdata->q_filt = !!(etmidr0 & TRCIDR0_QFILT);
+ 	/* TSSIZE, bits[28:24] Global timestamp size field */
+ 	drvdata->ts_size = FIELD_GET(TRCIDR0_TSSIZE_MASK, etmidr0);
+ 
+@@ -1732,16 +1734,14 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
+ 	state->trcccctlr = etm4x_read32(csa, TRCCCCTLR);
+ 	state->trcbbctlr = etm4x_read32(csa, TRCBBCTLR);
+ 	state->trctraceidr = etm4x_read32(csa, TRCTRACEIDR);
+-	state->trcqctlr = etm4x_read32(csa, TRCQCTLR);
++	if (drvdata->q_filt)
++		state->trcqctlr = etm4x_read32(csa, TRCQCTLR);
+ 
+ 	state->trcvictlr = etm4x_read32(csa, TRCVICTLR);
+ 	state->trcviiectlr = etm4x_read32(csa, TRCVIIECTLR);
+ 	state->trcvissctlr = etm4x_read32(csa, TRCVISSCTLR);
+ 	if (drvdata->nr_pe_cmp)
+ 		state->trcvipcssctlr = etm4x_read32(csa, TRCVIPCSSCTLR);
+-	state->trcvdctlr = etm4x_read32(csa, TRCVDCTLR);
+-	state->trcvdsacctlr = etm4x_read32(csa, TRCVDSACCTLR);
+-	state->trcvdarcctlr = etm4x_read32(csa, TRCVDARCCTLR);
+ 
+ 	for (i = 0; i < drvdata->nrseqstate - 1; i++)
+ 		state->trcseqevr[i] = etm4x_read32(csa, TRCSEQEVRn(i));
+@@ -1758,7 +1758,8 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
+ 		state->trccntvr[i] = etm4x_read32(csa, TRCCNTVRn(i));
+ 	}
+ 
+-	for (i = 0; i < drvdata->nr_resource * 2; i++)
++	/* Resource selector pair 0 is reserved */
++	for (i = 2; i < drvdata->nr_resource * 2; i++)
+ 		state->trcrsctlr[i] = etm4x_read32(csa, TRCRSCTLRn(i));
+ 
+ 	for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+@@ -1843,8 +1844,10 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
+ {
+ 	int i;
+ 	struct etmv4_save_state *state = drvdata->save_state;
+-	struct csdev_access tmp_csa = CSDEV_ACCESS_IOMEM(drvdata->base);
+-	struct csdev_access *csa = &tmp_csa;
++	struct csdev_access *csa = &drvdata->csdev->access;
++
++	if (WARN_ON(!drvdata->csdev))
++		return;
+ 
+ 	etm4_cs_unlock(drvdata, csa);
+ 	etm4x_relaxed_write32(csa, state->trcclaimset, TRCCLAIMSET);
+@@ -1863,16 +1866,14 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
+ 	etm4x_relaxed_write32(csa, state->trcccctlr, TRCCCCTLR);
+ 	etm4x_relaxed_write32(csa, state->trcbbctlr, TRCBBCTLR);
+ 	etm4x_relaxed_write32(csa, state->trctraceidr, TRCTRACEIDR);
+-	etm4x_relaxed_write32(csa, state->trcqctlr, TRCQCTLR);
++	if (drvdata->q_filt)
++		etm4x_relaxed_write32(csa, state->trcqctlr, TRCQCTLR);
+ 
+ 	etm4x_relaxed_write32(csa, state->trcvictlr, TRCVICTLR);
+ 	etm4x_relaxed_write32(csa, state->trcviiectlr, TRCVIIECTLR);
+ 	etm4x_relaxed_write32(csa, state->trcvissctlr, TRCVISSCTLR);
+ 	if (drvdata->nr_pe_cmp)
+ 		etm4x_relaxed_write32(csa, state->trcvipcssctlr, TRCVIPCSSCTLR);
+-	etm4x_relaxed_write32(csa, state->trcvdctlr, TRCVDCTLR);
+-	etm4x_relaxed_write32(csa, state->trcvdsacctlr, TRCVDSACCTLR);
+-	etm4x_relaxed_write32(csa, state->trcvdarcctlr, TRCVDARCCTLR);
+ 
+ 	for (i = 0; i < drvdata->nrseqstate - 1; i++)
+ 		etm4x_relaxed_write32(csa, state->trcseqevr[i], TRCSEQEVRn(i));
+@@ -1889,7 +1890,8 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
+ 		etm4x_relaxed_write32(csa, state->trccntvr[i], TRCCNTVRn(i));
+ 	}
+ 
+-	for (i = 0; i < drvdata->nr_resource * 2; i++)
++	/* Resource selector pair 0 is reserved */
++	for (i = 2; i < drvdata->nr_resource * 2; i++)
+ 		etm4x_relaxed_write32(csa, state->trcrsctlr[i], TRCRSCTLRn(i));
+ 
+ 	for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+@@ -2213,6 +2215,9 @@ static int etm4_probe_platform_dev(struct platform_device *pdev)
+ 	ret = etm4_probe(&pdev->dev);
+ 
+ 	pm_runtime_put(&pdev->dev);
++	if (ret)
++		pm_runtime_disable(&pdev->dev);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
+index 9ea678bc2e8e5..9e9165f62e81f 100644
+--- a/drivers/hwtracing/coresight/coresight-etm4x.h
++++ b/drivers/hwtracing/coresight/coresight-etm4x.h
+@@ -43,9 +43,6 @@
+ #define TRCVIIECTLR			0x084
+ #define TRCVISSCTLR			0x088
+ #define TRCVIPCSSCTLR			0x08C
+-#define TRCVDCTLR			0x0A0
+-#define TRCVDSACCTLR			0x0A4
+-#define TRCVDARCCTLR			0x0A8
+ /* Derived resources registers */
+ #define TRCSEQEVRn(n)			(0x100 + (n * 4)) /* n = 0-2 */
+ #define TRCSEQRSTEVR			0x118
+@@ -90,9 +87,6 @@
+ /* Address Comparator registers n = 0-15 */
+ #define TRCACVRn(n)			(0x400 + (n * 8))
+ #define TRCACATRn(n)			(0x480 + (n * 8))
+-/* Data Value Comparator Value registers, n = 0-7 */
+-#define TRCDVCVRn(n)			(0x500 + (n * 16))
+-#define TRCDVCMRn(n)			(0x580 + (n * 16))
+ /* ContextID/Virtual ContextID comparators, n = 0-7 */
+ #define TRCCIDCVRn(n)			(0x600 + (n * 8))
+ #define TRCVMIDCVRn(n)			(0x640 + (n * 8))
+@@ -141,6 +135,7 @@
+ #define TRCIDR0_TRCCCI				BIT(7)
+ #define TRCIDR0_RETSTACK			BIT(9)
+ #define TRCIDR0_NUMEVENT_MASK			GENMASK(11, 10)
++#define TRCIDR0_QFILT				BIT(14)
+ #define TRCIDR0_QSUPP_MASK			GENMASK(16, 15)
+ #define TRCIDR0_TSSIZE_MASK			GENMASK(28, 24)
+ 
+@@ -272,9 +267,6 @@
+ /* List of registers accessible via System instructions */
+ #define ETM4x_ONLY_SYSREG_LIST(op, val)		\
+ 	CASE_##op((val), TRCPROCSELR)		\
+-	CASE_##op((val), TRCVDCTLR)		\
+-	CASE_##op((val), TRCVDSACCTLR)		\
+-	CASE_##op((val), TRCVDARCCTLR)		\
+ 	CASE_##op((val), TRCOSLAR)
+ 
+ #define ETM_COMMON_SYSREG_LIST(op, val)		\
+@@ -422,22 +414,6 @@
+ 	CASE_##op((val), TRCACATRn(13))		\
+ 	CASE_##op((val), TRCACATRn(14))		\
+ 	CASE_##op((val), TRCACATRn(15))		\
+-	CASE_##op((val), TRCDVCVRn(0))		\
+-	CASE_##op((val), TRCDVCVRn(1))		\
+-	CASE_##op((val), TRCDVCVRn(2))		\
+-	CASE_##op((val), TRCDVCVRn(3))		\
+-	CASE_##op((val), TRCDVCVRn(4))		\
+-	CASE_##op((val), TRCDVCVRn(5))		\
+-	CASE_##op((val), TRCDVCVRn(6))		\
+-	CASE_##op((val), TRCDVCVRn(7))		\
+-	CASE_##op((val), TRCDVCMRn(0))		\
+-	CASE_##op((val), TRCDVCMRn(1))		\
+-	CASE_##op((val), TRCDVCMRn(2))		\
+-	CASE_##op((val), TRCDVCMRn(3))		\
+-	CASE_##op((val), TRCDVCMRn(4))		\
+-	CASE_##op((val), TRCDVCMRn(5))		\
+-	CASE_##op((val), TRCDVCMRn(6))		\
+-	CASE_##op((val), TRCDVCMRn(7))		\
+ 	CASE_##op((val), TRCCIDCVRn(0))		\
+ 	CASE_##op((val), TRCCIDCVRn(1))		\
+ 	CASE_##op((val), TRCCIDCVRn(2))		\
+@@ -907,9 +883,6 @@ struct etmv4_save_state {
+ 	u32	trcviiectlr;
+ 	u32	trcvissctlr;
+ 	u32	trcvipcssctlr;
+-	u32	trcvdctlr;
+-	u32	trcvdsacctlr;
+-	u32	trcvdarcctlr;
+ 
+ 	u32	trcseqevr[ETM_MAX_SEQ_STATES];
+ 	u32	trcseqrstevr;
+@@ -982,6 +955,7 @@ struct etmv4_save_state {
+  * @os_unlock:  True if access to management registers is allowed.
+  * @instrp0:	Tracing of load and store instructions
+  *		as P0 elements is supported.
++ * @q_filt:	Q element filtering support, if Q elements are supported.
+  * @trcbb:	Indicates if the trace unit supports branch broadcast tracing.
+  * @trccond:	If the trace unit supports conditional
+  *		instruction tracing.
+@@ -1044,6 +1018,7 @@ struct etmv4_drvdata {
+ 	bool				boot_enable;
+ 	bool				os_unlock;
+ 	bool				instrp0;
++	bool				q_filt;
+ 	bool				trcbb;
+ 	bool				trccond;
+ 	bool				retstack;
+diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
+index 534fbefc7f6aa..20895d3915623 100644
+--- a/drivers/hwtracing/stm/core.c
++++ b/drivers/hwtracing/stm/core.c
+@@ -868,8 +868,11 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
+ 		return -ENOMEM;
+ 
+ 	stm->major = register_chrdev(0, stm_data->name, &stm_fops);
+-	if (stm->major < 0)
+-		goto err_free;
++	if (stm->major < 0) {
++		err = stm->major;
++		vfree(stm);
++		return err;
++	}
+ 
+ 	device_initialize(&stm->dev);
+ 	stm->dev.devt = MKDEV(stm->major, 0);
+@@ -913,10 +916,8 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
+ err_device:
+ 	unregister_chrdev(stm->major, stm_data->name);
+ 
+-	/* matches device_initialize() above */
++	/* calls stm_device_release() */
+ 	put_device(&stm->dev);
+-err_free:
+-	vfree(stm);
+ 
+ 	return err;
+ }
+diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
+index 4bb7d6756947c..2fce3e84ba646 100644
+--- a/drivers/i2c/busses/i2c-cadence.c
++++ b/drivers/i2c/busses/i2c-cadence.c
+@@ -633,6 +633,7 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
+ 
+ 	if (hold_clear) {
+ 		ctrl_reg &= ~CDNS_I2C_CR_HOLD;
++		ctrl_reg &= ~CDNS_I2C_CR_CLR_FIFO;
+ 		/*
+ 		 * In case of Xilinx Zynq SOC, clear the HOLD bit before transfer size
+ 		 * register reaches '0'. This is an IP bug which causes transfer size
+diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c
+index bbea521b05dda..a73f5bb9a1645 100644
+--- a/drivers/i2c/busses/i2c-synquacer.c
++++ b/drivers/i2c/busses/i2c-synquacer.c
+@@ -550,17 +550,13 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
+ 	device_property_read_u32(&pdev->dev, "socionext,pclk-rate",
+ 				 &i2c->pclkrate);
+ 
+-	i2c->pclk = devm_clk_get(&pdev->dev, "pclk");
+-	if (PTR_ERR(i2c->pclk) == -EPROBE_DEFER)
+-		return -EPROBE_DEFER;
+-	if (!IS_ERR_OR_NULL(i2c->pclk)) {
+-		dev_dbg(&pdev->dev, "clock source %p\n", i2c->pclk);
+-
+-		ret = clk_prepare_enable(i2c->pclk);
+-		if (ret)
+-			return dev_err_probe(&pdev->dev, ret, "failed to enable clock\n");
+-		i2c->pclkrate = clk_get_rate(i2c->pclk);
+-	}
++	i2c->pclk = devm_clk_get_enabled(&pdev->dev, "pclk");
++	if (IS_ERR(i2c->pclk))
++		return dev_err_probe(&pdev->dev, PTR_ERR(i2c->pclk),
++				     "failed to get and enable clock\n");
++
++	dev_dbg(&pdev->dev, "clock source %p\n", i2c->pclk);
++	i2c->pclkrate = clk_get_rate(i2c->pclk);
+ 
+ 	if (i2c->pclkrate < SYNQUACER_I2C_MIN_CLK_RATE ||
+ 	    i2c->pclkrate > SYNQUACER_I2C_MAX_CLK_RATE)
+@@ -615,8 +611,6 @@ static void synquacer_i2c_remove(struct platform_device *pdev)
+ 	struct synquacer_i2c *i2c = platform_get_drvdata(pdev);
+ 
+ 	i2c_del_adapter(&i2c->adapter);
+-	if (!IS_ERR(i2c->pclk))
+-		clk_disable_unprepare(i2c->pclk);
+ };
+ 
+ static const struct of_device_id synquacer_i2c_dt_ids[] __maybe_unused = {
+diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
+index 5ee4db68988e2..a2298ab460a37 100644
+--- a/drivers/i3c/master/svc-i3c-master.c
++++ b/drivers/i3c/master/svc-i3c-master.c
+@@ -1080,7 +1080,7 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
+ 	 * and yield the above events handler.
+ 	 */
+ 	if (SVC_I3C_MSTATUS_IBIWON(reg)) {
+-		ret = -ENXIO;
++		ret = -EAGAIN;
+ 		*actual_len = 0;
+ 		goto emit_stop;
+ 	}
+diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
+index 4156639b3c8bd..a543b91124b07 100644
+--- a/drivers/iio/adc/adi-axi-adc.c
++++ b/drivers/iio/adc/adi-axi-adc.c
+@@ -207,9 +207,9 @@ static int adi_axi_adc_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		return ret;
+ 
+-	if (*expected_ver > ver) {
++	if (ADI_AXI_PCORE_VER_MAJOR(ver) != ADI_AXI_PCORE_VER_MAJOR(*expected_ver)) {
+ 		dev_err(&pdev->dev,
+-			"IP core version is too old. Expected %d.%.2d.%c, Reported %d.%.2d.%c\n",
++			"Major version mismatch. Expected %d.%.2d.%c, Reported %d.%.2d.%c\n",
+ 			ADI_AXI_PCORE_VER_MAJOR(*expected_ver),
+ 			ADI_AXI_PCORE_VER_MINOR(*expected_ver),
+ 			ADI_AXI_PCORE_VER_PATCH(*expected_ver),
+diff --git a/drivers/iio/adc/pac1934.c b/drivers/iio/adc/pac1934.c
+index e0c2742da5236..8a0c357422121 100644
+--- a/drivers/iio/adc/pac1934.c
++++ b/drivers/iio/adc/pac1934.c
+@@ -787,6 +787,15 @@ static int pac1934_read_raw(struct iio_dev *indio_dev,
+ 	s64 curr_energy;
+ 	int ret, channel = chan->channel - 1;
+ 
++	/*
++	 * For AVG the index should be between 5 to 8.
++	 * To calculate PAC1934_CH_VOLTAGE_AVERAGE,
++	 * respectively PAC1934_CH_CURRENT real index, we need
++	 * to remove the added offset (PAC1934_MAX_NUM_CHANNELS).
++	 */
++	if (channel >= PAC1934_MAX_NUM_CHANNELS)
++		channel = channel - PAC1934_MAX_NUM_CHANNELS;
++
+ 	ret = pac1934_retrieve_data(info, PAC1934_MIN_UPDATE_WAIT_TIME_US);
+ 	if (ret < 0)
+ 		return ret;
+diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
+index b5d3c9cea5c4e..283c207571064 100644
+--- a/drivers/iio/adc/stm32-adc.c
++++ b/drivers/iio/adc/stm32-adc.c
+@@ -2234,6 +2234,7 @@ static int stm32_adc_generic_chan_init(struct iio_dev *indio_dev,
+ 			if (vin[0] != val || vin[1] >= adc_info->max_channels) {
+ 				dev_err(&indio_dev->dev, "Invalid channel in%d-in%d\n",
+ 					vin[0], vin[1]);
++				ret = -EINVAL;
+ 				goto err;
+ 			}
+ 		} else if (ret != -EINVAL) {
+diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
+index 4302093b92c75..8684ba246969b 100644
+--- a/drivers/iio/industrialio-core.c
++++ b/drivers/iio/industrialio-core.c
+@@ -1654,8 +1654,10 @@ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv)
+ 		return NULL;
+ 
+ 	indio_dev = &iio_dev_opaque->indio_dev;
+-	indio_dev->priv = (char *)iio_dev_opaque +
+-		ALIGN(sizeof(struct iio_dev_opaque), IIO_DMA_MINALIGN);
++
++	if (sizeof_priv)
++		indio_dev->priv = (char *)iio_dev_opaque +
++			ALIGN(sizeof(*iio_dev_opaque), IIO_DMA_MINALIGN);
+ 
+ 	indio_dev->dev.parent = parent;
+ 	indio_dev->dev.type = &iio_device_type;
+diff --git a/drivers/iio/pressure/dps310.c b/drivers/iio/pressure/dps310.c
+index 1ff091b2f764d..d0a516d56da47 100644
+--- a/drivers/iio/pressure/dps310.c
++++ b/drivers/iio/pressure/dps310.c
+@@ -730,7 +730,7 @@ static int dps310_read_pressure(struct dps310_data *data, int *val, int *val2,
+ 	}
+ }
+ 
+-static int dps310_calculate_temp(struct dps310_data *data)
++static int dps310_calculate_temp(struct dps310_data *data, int *val)
+ {
+ 	s64 c0;
+ 	s64 t;
+@@ -746,7 +746,9 @@ static int dps310_calculate_temp(struct dps310_data *data)
+ 	t = c0 + ((s64)data->temp_raw * (s64)data->c1);
+ 
+ 	/* Convert to milliCelsius and scale the temperature */
+-	return (int)div_s64(t * 1000LL, kt);
++	*val = (int)div_s64(t * 1000LL, kt);
++
++	return 0;
+ }
+ 
+ static int dps310_read_temp(struct dps310_data *data, int *val, int *val2,
+@@ -768,11 +770,10 @@ static int dps310_read_temp(struct dps310_data *data, int *val, int *val2,
+ 		if (rc)
+ 			return rc;
+ 
+-		rc = dps310_calculate_temp(data);
+-		if (rc < 0)
++		rc = dps310_calculate_temp(data, val);
++		if (rc)
+ 			return rc;
+ 
+-		*val = rc;
+ 		return IIO_VAL_INT;
+ 
+ 	case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
+index f253295795f0a..be0743dac3fff 100644
+--- a/drivers/infiniband/core/addr.c
++++ b/drivers/infiniband/core/addr.c
+@@ -348,16 +348,10 @@ static int dst_fetch_ha(const struct dst_entry *dst,
+ 
+ static bool has_gateway(const struct dst_entry *dst, sa_family_t family)
+ {
+-	struct rtable *rt;
+-	struct rt6_info *rt6;
+-
+-	if (family == AF_INET) {
+-		rt = container_of(dst, struct rtable, dst);
+-		return rt->rt_uses_gateway;
+-	}
++	if (family == AF_INET)
++		return dst_rtable(dst)->rt_uses_gateway;
+ 
+-	rt6 = container_of(dst, struct rt6_info, dst);
+-	return rt6->rt6i_flags & RTF_GATEWAY;
++	return dst_rt6_info(dst)->rt6i_flags & RTF_GATEWAY;
+ }
+ 
+ static int fetch_ha(const struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
+diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
+index 6e8cc28debd97..80d16c92a08b3 100644
+--- a/drivers/input/misc/ims-pcu.c
++++ b/drivers/input/misc/ims-pcu.c
+@@ -42,8 +42,8 @@ struct ims_pcu_backlight {
+ #define IMS_PCU_PART_NUMBER_LEN		15
+ #define IMS_PCU_SERIAL_NUMBER_LEN	8
+ #define IMS_PCU_DOM_LEN			8
+-#define IMS_PCU_FW_VERSION_LEN		(9 + 1)
+-#define IMS_PCU_BL_VERSION_LEN		(9 + 1)
++#define IMS_PCU_FW_VERSION_LEN		16
++#define IMS_PCU_BL_VERSION_LEN		16
+ #define IMS_PCU_BL_RESET_REASON_LEN	(2 + 1)
+ 
+ #define IMS_PCU_PCU_B_DEVICE_ID		5
+diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c
+index 5c288fe7accf1..79f478d3a9b37 100644
+--- a/drivers/input/misc/pm8xxx-vibrator.c
++++ b/drivers/input/misc/pm8xxx-vibrator.c
+@@ -13,7 +13,8 @@
+ 
+ #define VIB_MAX_LEVEL_mV	(3100)
+ #define VIB_MIN_LEVEL_mV	(1200)
+-#define VIB_MAX_LEVELS		(VIB_MAX_LEVEL_mV - VIB_MIN_LEVEL_mV)
++#define VIB_PER_STEP_mV		(100)
++#define VIB_MAX_LEVELS		(VIB_MAX_LEVEL_mV - VIB_MIN_LEVEL_mV + VIB_PER_STEP_mV)
+ 
+ #define MAX_FF_SPEED		0xff
+ 
+@@ -117,10 +118,10 @@ static void pm8xxx_work_handler(struct work_struct *work)
+ 		vib->active = true;
+ 		vib->level = ((VIB_MAX_LEVELS * vib->speed) / MAX_FF_SPEED) +
+ 						VIB_MIN_LEVEL_mV;
+-		vib->level /= 100;
++		vib->level /= VIB_PER_STEP_mV;
+ 	} else {
+ 		vib->active = false;
+-		vib->level = VIB_MIN_LEVEL_mV / 100;
++		vib->level = VIB_MIN_LEVEL_mV / VIB_PER_STEP_mV;
+ 	}
+ 
+ 	pm8xxx_vib_set(vib, vib->active);
+diff --git a/drivers/input/mouse/cyapa.c b/drivers/input/mouse/cyapa.c
+index 5979deabe23d1..256f757a13267 100644
+--- a/drivers/input/mouse/cyapa.c
++++ b/drivers/input/mouse/cyapa.c
+@@ -1347,10 +1347,16 @@ static int cyapa_suspend(struct device *dev)
+ 	u8 power_mode;
+ 	int error;
+ 
+-	error = mutex_lock_interruptible(&cyapa->state_sync_lock);
++	error = mutex_lock_interruptible(&cyapa->input->mutex);
+ 	if (error)
+ 		return error;
+ 
++	error = mutex_lock_interruptible(&cyapa->state_sync_lock);
++	if (error) {
++		mutex_unlock(&cyapa->input->mutex);
++		return error;
++	}
++
+ 	/*
+ 	 * Runtime PM is enable only when device is in operational mode and
+ 	 * users in use, so need check it before disable it to
+@@ -1385,6 +1391,8 @@ static int cyapa_suspend(struct device *dev)
+ 		cyapa->irq_wake = (enable_irq_wake(client->irq) == 0);
+ 
+ 	mutex_unlock(&cyapa->state_sync_lock);
++	mutex_unlock(&cyapa->input->mutex);
++
+ 	return 0;
+ }
+ 
+@@ -1394,6 +1402,7 @@ static int cyapa_resume(struct device *dev)
+ 	struct cyapa *cyapa = i2c_get_clientdata(client);
+ 	int error;
+ 
++	mutex_lock(&cyapa->input->mutex);
+ 	mutex_lock(&cyapa->state_sync_lock);
+ 
+ 	if (device_may_wakeup(dev) && cyapa->irq_wake) {
+@@ -1412,6 +1421,7 @@ static int cyapa_resume(struct device *dev)
+ 	enable_irq(client->irq);
+ 
+ 	mutex_unlock(&cyapa->state_sync_lock);
++	mutex_unlock(&cyapa->input->mutex);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/input/serio/ioc3kbd.c b/drivers/input/serio/ioc3kbd.c
+index 50552dc7b4f5e..676b0bda3d720 100644
+--- a/drivers/input/serio/ioc3kbd.c
++++ b/drivers/input/serio/ioc3kbd.c
+@@ -200,9 +200,16 @@ static void ioc3kbd_remove(struct platform_device *pdev)
+ 	serio_unregister_port(d->aux);
+ }
+ 
++static const struct platform_device_id ioc3kbd_id_table[] = {
++	{ "ioc3-kbd", },
++	{ }
++};
++MODULE_DEVICE_TABLE(platform, ioc3kbd_id_table);
++
+ static struct platform_driver ioc3kbd_driver = {
+ 	.probe          = ioc3kbd_probe,
+ 	.remove_new     = ioc3kbd_remove,
++	.id_table	= ioc3kbd_id_table,
+ 	.driver = {
+ 		.name = "ioc3-kbd",
+ 	},
+diff --git a/drivers/interconnect/qcom/qcm2290.c b/drivers/interconnect/qcom/qcm2290.c
+index 96735800b13c0..ba4cc08684d63 100644
+--- a/drivers/interconnect/qcom/qcm2290.c
++++ b/drivers/interconnect/qcom/qcm2290.c
+@@ -164,7 +164,7 @@ static struct qcom_icc_node mas_snoc_bimc = {
+ 	.name = "mas_snoc_bimc",
+ 	.buswidth = 16,
+ 	.qos.ap_owned = true,
+-	.qos.qos_port = 2,
++	.qos.qos_port = 6,
+ 	.qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ 	.mas_rpm_id = 164,
+ 	.slv_rpm_id = -1,
+diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
+index 4e3936a39d0ed..e1b414b403534 100644
+--- a/drivers/leds/leds-pwm.c
++++ b/drivers/leds/leds-pwm.c
+@@ -53,7 +53,13 @@ static int led_pwm_set(struct led_classdev *led_cdev,
+ 		duty = led_dat->pwmstate.period - duty;
+ 
+ 	led_dat->pwmstate.duty_cycle = duty;
+-	led_dat->pwmstate.enabled = true;
++	/*
++	 * Disabling a PWM doesn't guarantee that it emits the inactive level.
++	 * So keep it on. Only for suspending the PWM should be disabled because
++	 * otherwise it refuses to suspend. The possible downside is that the
++	 * LED might stay (or even go) on.
++	 */
++	led_dat->pwmstate.enabled = !(led_cdev->flags & LED_SUSPENDED);
+ 	return pwm_apply_might_sleep(led_dat->pwm, &led_dat->pwmstate);
+ }
+ 
+diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
+index ead2200f39ba0..033aff11f87cf 100644
+--- a/drivers/mailbox/mtk-cmdq-mailbox.c
++++ b/drivers/mailbox/mtk-cmdq-mailbox.c
+@@ -465,7 +465,7 @@ static void cmdq_mbox_shutdown(struct mbox_chan *chan)
+ 	struct cmdq_task *task, *tmp;
+ 	unsigned long flags;
+ 
+-	WARN_ON(pm_runtime_get_sync(cmdq->mbox.dev));
++	WARN_ON(pm_runtime_get_sync(cmdq->mbox.dev) < 0);
+ 
+ 	spin_lock_irqsave(&thread->chan->lock, flags);
+ 	if (list_empty(&thread->task_busy_list))
+diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
+index 559a172ebc6cb..da09834990b87 100644
+--- a/drivers/media/cec/core/cec-adap.c
++++ b/drivers/media/cec/core/cec-adap.c
+@@ -490,6 +490,15 @@ int cec_thread_func(void *_adap)
+ 			goto unlock;
+ 		}
+ 
++		if (adap->transmit_in_progress &&
++		    adap->transmit_in_progress_aborted) {
++			if (adap->transmitting)
++				cec_data_cancel(adap->transmitting,
++						CEC_TX_STATUS_ABORTED, 0);
++			adap->transmit_in_progress = false;
++			adap->transmit_in_progress_aborted = false;
++			goto unlock;
++		}
+ 		if (adap->transmit_in_progress && timeout) {
+ 			/*
+ 			 * If we timeout, then log that. Normally this does
+@@ -771,6 +780,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
+ {
+ 	struct cec_data *data;
+ 	bool is_raw = msg_is_raw(msg);
++	int err;
+ 
+ 	if (adap->devnode.unregistered)
+ 		return -ENODEV;
+@@ -935,11 +945,13 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
+ 	 * Release the lock and wait, retake the lock afterwards.
+ 	 */
+ 	mutex_unlock(&adap->lock);
+-	wait_for_completion_killable(&data->c);
+-	if (!data->completed)
+-		cancel_delayed_work_sync(&data->work);
++	err = wait_for_completion_killable(&data->c);
++	cancel_delayed_work_sync(&data->work);
+ 	mutex_lock(&adap->lock);
+ 
++	if (err)
++		adap->transmit_in_progress_aborted = true;
++
+ 	/* Cancel the transmit if it was interrupted */
+ 	if (!data->completed) {
+ 		if (data->msg.tx_status & CEC_TX_STATUS_OK)
+@@ -1575,9 +1587,12 @@ static int cec_config_thread_func(void *arg)
+  */
+ static void cec_claim_log_addrs(struct cec_adapter *adap, bool block)
+ {
+-	if (WARN_ON(adap->is_configuring || adap->is_configured))
++	if (WARN_ON(adap->is_claiming_log_addrs ||
++		    adap->is_configuring || adap->is_configured))
+ 		return;
+ 
++	adap->is_claiming_log_addrs = true;
++
+ 	init_completion(&adap->config_completion);
+ 
+ 	/* Ready to kick off the thread */
+@@ -1592,6 +1607,7 @@ static void cec_claim_log_addrs(struct cec_adapter *adap, bool block)
+ 		wait_for_completion(&adap->config_completion);
+ 		mutex_lock(&adap->lock);
+ 	}
++	adap->is_claiming_log_addrs = false;
+ }
+ 
+ /*
+diff --git a/drivers/media/cec/core/cec-api.c b/drivers/media/cec/core/cec-api.c
+index 67dc79ef17050..3ef9153443044 100644
+--- a/drivers/media/cec/core/cec-api.c
++++ b/drivers/media/cec/core/cec-api.c
+@@ -178,7 +178,7 @@ static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
+ 			   CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU |
+ 			   CEC_LOG_ADDRS_FL_CDC_ONLY;
+ 	mutex_lock(&adap->lock);
+-	if (!adap->is_configuring &&
++	if (!adap->is_claiming_log_addrs && !adap->is_configuring &&
+ 	    (!log_addrs.num_log_addrs || !adap->is_configured) &&
+ 	    !cec_is_busy(adap, fh)) {
+ 		err = __cec_s_log_addrs(adap, &log_addrs, block);
+@@ -664,6 +664,8 @@ static int cec_release(struct inode *inode, struct file *filp)
+ 		list_del_init(&data->xfer_list);
+ 	}
+ 	mutex_unlock(&adap->lock);
++
++	mutex_lock(&fh->lock);
+ 	while (!list_empty(&fh->msgs)) {
+ 		struct cec_msg_entry *entry =
+ 			list_first_entry(&fh->msgs, struct cec_msg_entry, list);
+@@ -681,6 +683,7 @@ static int cec_release(struct inode *inode, struct file *filp)
+ 			kfree(entry);
+ 		}
+ 	}
++	mutex_unlock(&fh->lock);
+ 	kfree(fh);
+ 
+ 	cec_put_device(devnode);
+diff --git a/drivers/media/i2c/ov2680.c b/drivers/media/i2c/ov2680.c
+index 39d321e2b7f98..4577a8977c85a 100644
+--- a/drivers/media/i2c/ov2680.c
++++ b/drivers/media/i2c/ov2680.c
+@@ -1116,25 +1116,24 @@ static int ov2680_parse_dt(struct ov2680_dev *sensor)
+ 	sensor->pixel_rate = sensor->link_freq[0] * 2;
+ 	do_div(sensor->pixel_rate, 10);
+ 
+-	/* Verify bus cfg */
+-	if (bus_cfg.bus.mipi_csi2.num_data_lanes != 1) {
+-		ret = dev_err_probe(dev, -EINVAL,
+-				    "only a 1-lane CSI2 config is supported");
+-		goto out_free_bus_cfg;
++	if (!bus_cfg.nr_of_link_frequencies) {
++		dev_warn(dev, "Consider passing 'link-frequencies' in DT\n");
++		goto skip_link_freq_validation;
+ 	}
+ 
+ 	for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++)
+ 		if (bus_cfg.link_frequencies[i] == sensor->link_freq[0])
+ 			break;
+ 
+-	if (bus_cfg.nr_of_link_frequencies == 0 ||
+-	    bus_cfg.nr_of_link_frequencies == i) {
++	if (bus_cfg.nr_of_link_frequencies == i) {
+ 		ret = dev_err_probe(dev, -EINVAL,
+ 				    "supported link freq %lld not found\n",
+ 				    sensor->link_freq[0]);
+ 		goto out_free_bus_cfg;
+ 	}
+ 
++skip_link_freq_validation:
++	ret = 0;
+ out_free_bus_cfg:
+ 	v4l2_fwnode_endpoint_free(&bus_cfg);
+ 	return ret;
+diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c
+index a22b7dfc656e1..1a2b14a3e219c 100644
+--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c
++++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c
+@@ -58,13 +58,15 @@ int mtk_vcodec_init_enc_clk(struct mtk_vcodec_enc_dev *mtkdev)
+ 	return 0;
+ }
+ 
+-void mtk_vcodec_enc_pw_on(struct mtk_vcodec_pm *pm)
++int mtk_vcodec_enc_pw_on(struct mtk_vcodec_pm *pm)
+ {
+ 	int ret;
+ 
+ 	ret = pm_runtime_resume_and_get(pm->dev);
+ 	if (ret)
+ 		dev_err(pm->dev, "pm_runtime_resume_and_get fail: %d", ret);
++
++	return ret;
+ }
+ 
+ void mtk_vcodec_enc_pw_off(struct mtk_vcodec_pm *pm)
+diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h
+index 157ea08ba9e36..2e28f25e36cc4 100644
+--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h
++++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h
+@@ -10,7 +10,7 @@
+ #include "mtk_vcodec_enc_drv.h"
+ 
+ int mtk_vcodec_init_enc_clk(struct mtk_vcodec_enc_dev *dev);
+-void mtk_vcodec_enc_pw_on(struct mtk_vcodec_pm *pm);
++int mtk_vcodec_enc_pw_on(struct mtk_vcodec_pm *pm);
+ void mtk_vcodec_enc_pw_off(struct mtk_vcodec_pm *pm);
+ void mtk_vcodec_enc_clock_on(struct mtk_vcodec_pm *pm);
+ void mtk_vcodec_enc_clock_off(struct mtk_vcodec_pm *pm);
+diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c
+index c402a686f3cb2..e83747b8d69ab 100644
+--- a/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c
++++ b/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c
+@@ -64,7 +64,9 @@ int venc_if_encode(struct mtk_vcodec_enc_ctx *ctx,
+ 	ctx->dev->curr_ctx = ctx;
+ 	spin_unlock_irqrestore(&ctx->dev->irqlock, flags);
+ 
+-	mtk_vcodec_enc_pw_on(&ctx->dev->pm);
++	ret = mtk_vcodec_enc_pw_on(&ctx->dev->pm);
++	if (ret)
++		goto venc_if_encode_pw_on_err;
+ 	mtk_vcodec_enc_clock_on(&ctx->dev->pm);
+ 	ret = ctx->enc_if->encode(ctx->drv_handle, opt, frm_buf,
+ 				  bs_buf, result);
+@@ -75,6 +77,7 @@ int venc_if_encode(struct mtk_vcodec_enc_ctx *ctx,
+ 	ctx->dev->curr_ctx = NULL;
+ 	spin_unlock_irqrestore(&ctx->dev->irqlock, flags);
+ 
++venc_if_encode_pw_on_err:
+ 	mtk_venc_unlock(ctx);
+ 	return ret;
+ }
+diff --git a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig
+index 47a8c0fb7eb9f..99c401e653bc4 100644
+--- a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig
++++ b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig
+@@ -8,6 +8,7 @@ config VIDEO_SUN8I_A83T_MIPI_CSI2
+ 	select VIDEO_V4L2_SUBDEV_API
+ 	select V4L2_FWNODE
+ 	select REGMAP_MMIO
++	select GENERIC_PHY
+ 	select GENERIC_PHY_MIPI_DPHY
+ 	help
+ 	   Support for the Allwinner A83T MIPI CSI-2 controller and D-PHY.
+diff --git a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
+index 6da83d0cffaae..22442fce76078 100644
+--- a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
++++ b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
+@@ -786,15 +786,14 @@ static void ti_csi2rx_buffer_queue(struct vb2_buffer *vb)
+ 			dev_warn(csi->dev,
+ 				 "Failed to drain DMA. Next frame might be bogus\n");
+ 
++		spin_lock_irqsave(&dma->lock, flags);
+ 		ret = ti_csi2rx_start_dma(csi, buf);
+ 		if (ret) {
+-			dev_err(csi->dev, "Failed to start DMA: %d\n", ret);
+-			spin_lock_irqsave(&dma->lock, flags);
+ 			vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ 			dma->state = TI_CSI2RX_DMA_IDLE;
+ 			spin_unlock_irqrestore(&dma->lock, flags);
++			dev_err(csi->dev, "Failed to start DMA: %d\n", ret);
+ 		} else {
+-			spin_lock_irqsave(&dma->lock, flags);
+ 			list_add_tail(&buf->list, &dma->submitted);
+ 			spin_unlock_irqrestore(&dma->lock, flags);
+ 		}
+diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
+index 790787f0eba84..bcb24d8964981 100644
+--- a/drivers/media/usb/b2c2/flexcop-usb.c
++++ b/drivers/media/usb/b2c2/flexcop-usb.c
+@@ -515,7 +515,7 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb)
+ 
+ 	alt = fc_usb->uintf->cur_altsetting;
+ 
+-	if (alt->desc.bNumEndpoints < 1)
++	if (alt->desc.bNumEndpoints < 2)
+ 		return -ENODEV;
+ 	if (!usb_endpoint_is_isoc_in(&alt->endpoint[0].desc))
+ 		return -ENODEV;
+diff --git a/drivers/media/usb/stk1160/stk1160-video.c b/drivers/media/usb/stk1160/stk1160-video.c
+index 366f0e4a5dc0d..e79c45db60ab5 100644
+--- a/drivers/media/usb/stk1160/stk1160-video.c
++++ b/drivers/media/usb/stk1160/stk1160-video.c
+@@ -99,7 +99,7 @@ void stk1160_buffer_done(struct stk1160 *dev)
+ static inline
+ void stk1160_copy_video(struct stk1160 *dev, u8 *src, int len)
+ {
+-	int linesdone, lineoff, lencopy;
++	int linesdone, lineoff, lencopy, offset;
+ 	int bytesperline = dev->width * 2;
+ 	struct stk1160_buffer *buf = dev->isoc_ctl.buf;
+ 	u8 *dst = buf->mem;
+@@ -139,8 +139,13 @@ void stk1160_copy_video(struct stk1160 *dev, u8 *src, int len)
+ 	 * Check if we have enough space left in the buffer.
+ 	 * In that case, we force loop exit after copy.
+ 	 */
+-	if (lencopy > buf->bytesused - buf->length) {
+-		lencopy = buf->bytesused - buf->length;
++	offset = dst - (u8 *)buf->mem;
++	if (offset > buf->length) {
++		dev_warn_ratelimited(dev->dev, "out of bounds offset\n");
++		return;
++	}
++	if (lencopy > buf->length - offset) {
++		lencopy = buf->length - offset;
+ 		remain = lencopy;
+ 	}
+ 
+@@ -182,8 +187,13 @@ void stk1160_copy_video(struct stk1160 *dev, u8 *src, int len)
+ 		 * Check if we have enough space left in the buffer.
+ 		 * In that case, we force loop exit after copy.
+ 		 */
+-		if (lencopy > buf->bytesused - buf->length) {
+-			lencopy = buf->bytesused - buf->length;
++		offset = dst - (u8 *)buf->mem;
++		if (offset > buf->length) {
++			dev_warn_ratelimited(dev->dev, "offset out of bounds\n");
++			return;
++		}
++		if (lencopy > buf->length - offset) {
++			lencopy = buf->length - offset;
+ 			remain = lencopy;
+ 		}
+ 
+diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
+index 45836f0a2b0a7..19d20871afefa 100644
+--- a/drivers/media/v4l2-core/v4l2-subdev.c
++++ b/drivers/media/v4l2-core/v4l2-subdev.c
+@@ -412,15 +412,6 @@ static int call_s_stream(struct v4l2_subdev *sd, int enable)
+ 	if (WARN_ON(!!sd->enabled_streams == !!enable))
+ 		return 0;
+ 
+-#if IS_REACHABLE(CONFIG_LEDS_CLASS)
+-	if (!IS_ERR_OR_NULL(sd->privacy_led)) {
+-		if (enable)
+-			led_set_brightness(sd->privacy_led,
+-					   sd->privacy_led->max_brightness);
+-		else
+-			led_set_brightness(sd->privacy_led, 0);
+-	}
+-#endif
+ 	ret = sd->ops->video->s_stream(sd, enable);
+ 
+ 	if (!enable && ret < 0) {
+@@ -428,9 +419,20 @@ static int call_s_stream(struct v4l2_subdev *sd, int enable)
+ 		ret = 0;
+ 	}
+ 
+-	if (!ret)
++	if (!ret) {
+ 		sd->enabled_streams = enable ? BIT(0) : 0;
+ 
++#if IS_REACHABLE(CONFIG_LEDS_CLASS)
++		if (!IS_ERR_OR_NULL(sd->privacy_led)) {
++			if (enable)
++				led_set_brightness(sd->privacy_led,
++						   sd->privacy_led->max_brightness);
++			else
++				led_set_brightness(sd->privacy_led, 0);
++		}
++#endif
++	}
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
+index 4f8d962bb5b2a..1300ccab3d21b 100644
+--- a/drivers/misc/vmw_vmci/vmci_guest.c
++++ b/drivers/misc/vmw_vmci/vmci_guest.c
+@@ -625,7 +625,8 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
+ 	if (!vmci_dev) {
+ 		dev_err(&pdev->dev,
+ 			"Can't allocate memory for VMCI device\n");
+-		return -ENOMEM;
++		error = -ENOMEM;
++		goto err_unmap_mmio_base;
+ 	}
+ 
+ 	vmci_dev->dev = &pdev->dev;
+@@ -642,7 +643,8 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
+ 		if (!vmci_dev->tx_buffer) {
+ 			dev_err(&pdev->dev,
+ 				"Can't allocate memory for datagram tx buffer\n");
+-			return -ENOMEM;
++			error = -ENOMEM;
++			goto err_unmap_mmio_base;
+ 		}
+ 
+ 		vmci_dev->data_buffer = dma_alloc_coherent(&pdev->dev, VMCI_DMA_DG_BUFFER_SIZE,
+@@ -893,6 +895,10 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
+ err_free_data_buffers:
+ 	vmci_free_dg_buffers(vmci_dev);
+ 
++err_unmap_mmio_base:
++	if (mmio_base != NULL)
++		pci_iounmap(pdev, mmio_base);
++
+ 	/* The rest are managed resources and will be freed by PCI core */
+ 	return error;
+ }
+diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
+index d659c59422e1e..562034af653eb 100644
+--- a/drivers/mmc/host/sdhci_am654.c
++++ b/drivers/mmc/host/sdhci_am654.c
+@@ -143,16 +143,24 @@ struct sdhci_am654_data {
+ 	struct regmap *base;
+ 	int otap_del_sel[ARRAY_SIZE(td)];
+ 	int itap_del_sel[ARRAY_SIZE(td)];
++	u32 itap_del_ena[ARRAY_SIZE(td)];
+ 	int clkbuf_sel;
+ 	int trm_icp;
+ 	int drv_strength;
+ 	int strb_sel;
+ 	u32 flags;
+ 	u32 quirks;
++	bool dll_enable;
+ 
+ #define SDHCI_AM654_QUIRK_FORCE_CDTEST BIT(0)
+ };
+ 
++struct window {
++	u8 start;
++	u8 end;
++	u8 length;
++};
++
+ struct sdhci_am654_driver_data {
+ 	const struct sdhci_pltfm_data *pdata;
+ 	u32 flags;
+@@ -232,11 +240,13 @@ static void sdhci_am654_setup_dll(struct sdhci_host *host, unsigned int clock)
+ }
+ 
+ static void sdhci_am654_write_itapdly(struct sdhci_am654_data *sdhci_am654,
+-				      u32 itapdly)
++				      u32 itapdly, u32 enable)
+ {
+ 	/* Set ITAPCHGWIN before writing to ITAPDLY */
+ 	regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK,
+ 			   1 << ITAPCHGWIN_SHIFT);
++	regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPDLYENA_MASK,
++			   enable << ITAPDLYENA_SHIFT);
+ 	regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPDLYSEL_MASK,
+ 			   itapdly << ITAPDLYSEL_SHIFT);
+ 	regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK, 0);
+@@ -253,8 +263,8 @@ static void sdhci_am654_setup_delay_chain(struct sdhci_am654_data *sdhci_am654,
+ 	mask = SELDLYTXCLK_MASK | SELDLYRXCLK_MASK;
+ 	regmap_update_bits(sdhci_am654->base, PHY_CTRL5, mask, val);
+ 
+-	sdhci_am654_write_itapdly(sdhci_am654,
+-				  sdhci_am654->itap_del_sel[timing]);
++	sdhci_am654_write_itapdly(sdhci_am654, sdhci_am654->itap_del_sel[timing],
++				  sdhci_am654->itap_del_ena[timing]);
+ }
+ 
+ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
+@@ -263,7 +273,6 @@ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
+ 	struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
+ 	unsigned char timing = host->mmc->ios.timing;
+ 	u32 otap_del_sel;
+-	u32 otap_del_ena;
+ 	u32 mask, val;
+ 
+ 	regmap_update_bits(sdhci_am654->base, PHY_CTRL1, ENDLL_MASK, 0);
+@@ -272,10 +281,9 @@ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
+ 
+ 	/* Setup DLL Output TAP delay */
+ 	otap_del_sel = sdhci_am654->otap_del_sel[timing];
+-	otap_del_ena = (timing > MMC_TIMING_UHS_SDR25) ? 1 : 0;
+ 
+ 	mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK;
+-	val = (otap_del_ena << OTAPDLYENA_SHIFT) |
++	val = (0x1 << OTAPDLYENA_SHIFT) |
+ 	      (otap_del_sel << OTAPDLYSEL_SHIFT);
+ 
+ 	/* Write to STRBSEL for HS400 speed mode */
+@@ -290,10 +298,21 @@ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
+ 
+ 	regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val);
+ 
+-	if (timing > MMC_TIMING_UHS_SDR25 && clock >= CLOCK_TOO_SLOW_HZ)
++	if (timing > MMC_TIMING_UHS_SDR25 && clock >= CLOCK_TOO_SLOW_HZ) {
+ 		sdhci_am654_setup_dll(host, clock);
+-	else
++		sdhci_am654->dll_enable = true;
++
++		if (timing == MMC_TIMING_MMC_HS400) {
++			sdhci_am654->itap_del_ena[timing] = 0x1;
++			sdhci_am654->itap_del_sel[timing] = sdhci_am654->itap_del_sel[timing - 1];
++		}
++
++		sdhci_am654_write_itapdly(sdhci_am654, sdhci_am654->itap_del_sel[timing],
++					  sdhci_am654->itap_del_ena[timing]);
++	} else {
+ 		sdhci_am654_setup_delay_chain(sdhci_am654, timing);
++		sdhci_am654->dll_enable = false;
++	}
+ 
+ 	regmap_update_bits(sdhci_am654->base, PHY_CTRL5, CLKBUFSEL_MASK,
+ 			   sdhci_am654->clkbuf_sel);
+@@ -306,6 +325,8 @@ static void sdhci_j721e_4bit_set_clock(struct sdhci_host *host,
+ 	struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
+ 	unsigned char timing = host->mmc->ios.timing;
+ 	u32 otap_del_sel;
++	u32 itap_del_ena;
++	u32 itap_del_sel;
+ 	u32 mask, val;
+ 
+ 	/* Setup DLL Output TAP delay */
+@@ -314,8 +335,19 @@ static void sdhci_j721e_4bit_set_clock(struct sdhci_host *host,
+ 	mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK;
+ 	val = (0x1 << OTAPDLYENA_SHIFT) |
+ 	      (otap_del_sel << OTAPDLYSEL_SHIFT);
+-	regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val);
+ 
++	/* Setup Input TAP delay */
++	itap_del_ena = sdhci_am654->itap_del_ena[timing];
++	itap_del_sel = sdhci_am654->itap_del_sel[timing];
++
++	mask |= ITAPDLYENA_MASK | ITAPDLYSEL_MASK;
++	val |= (itap_del_ena << ITAPDLYENA_SHIFT) |
++	       (itap_del_sel << ITAPDLYSEL_SHIFT);
++
++	regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK,
++			   1 << ITAPCHGWIN_SHIFT);
++	regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val);
++	regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK, 0);
+ 	regmap_update_bits(sdhci_am654->base, PHY_CTRL5, CLKBUFSEL_MASK,
+ 			   sdhci_am654->clkbuf_sel);
+ 
+@@ -408,40 +440,105 @@ static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask)
+ 	return 0;
+ }
+ 
+-#define ITAP_MAX	32
++#define ITAPDLY_LENGTH 32
++#define ITAPDLY_LAST_INDEX (ITAPDLY_LENGTH - 1)
++
++static u32 sdhci_am654_calculate_itap(struct sdhci_host *host, struct window
++			  *fail_window, u8 num_fails, bool circular_buffer)
++{
++	u8 itap = 0, start_fail = 0, end_fail = 0, pass_length = 0;
++	u8 first_fail_start = 0, last_fail_end = 0;
++	struct device *dev = mmc_dev(host->mmc);
++	struct window pass_window = {0, 0, 0};
++	int prev_fail_end = -1;
++	u8 i;
++
++	if (!num_fails)
++		return ITAPDLY_LAST_INDEX >> 1;
++
++	if (fail_window->length == ITAPDLY_LENGTH) {
++		dev_err(dev, "No passing ITAPDLY, return 0\n");
++		return 0;
++	}
++
++	first_fail_start = fail_window->start;
++	last_fail_end = fail_window[num_fails - 1].end;
++
++	for (i = 0; i < num_fails; i++) {
++		start_fail = fail_window[i].start;
++		end_fail = fail_window[i].end;
++		pass_length = start_fail - (prev_fail_end + 1);
++
++		if (pass_length > pass_window.length) {
++			pass_window.start = prev_fail_end + 1;
++			pass_window.length = pass_length;
++		}
++		prev_fail_end = end_fail;
++	}
++
++	if (!circular_buffer)
++		pass_length = ITAPDLY_LAST_INDEX - last_fail_end;
++	else
++		pass_length = ITAPDLY_LAST_INDEX - last_fail_end + first_fail_start;
++
++	if (pass_length > pass_window.length) {
++		pass_window.start = last_fail_end + 1;
++		pass_window.length = pass_length;
++	}
++
++	if (!circular_buffer)
++		itap = pass_window.start + (pass_window.length >> 1);
++	else
++		itap = (pass_window.start + (pass_window.length >> 1)) % ITAPDLY_LENGTH;
++
++	return (itap > ITAPDLY_LAST_INDEX) ? ITAPDLY_LAST_INDEX >> 1 : itap;
++}
++
+ static int sdhci_am654_platform_execute_tuning(struct sdhci_host *host,
+ 					       u32 opcode)
+ {
+ 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ 	struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
+-	int cur_val, prev_val = 1, fail_len = 0, pass_window = 0, pass_len;
+-	u32 itap;
++	unsigned char timing = host->mmc->ios.timing;
++	struct window fail_window[ITAPDLY_LENGTH];
++	u8 curr_pass, itap;
++	u8 fail_index = 0;
++	u8 prev_pass = 1;
++
++	memset(fail_window, 0, sizeof(fail_window));
+ 
+ 	/* Enable ITAPDLY */
+-	regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPDLYENA_MASK,
+-			   1 << ITAPDLYENA_SHIFT);
++	sdhci_am654->itap_del_ena[timing] = 0x1;
++
++	for (itap = 0; itap < ITAPDLY_LENGTH; itap++) {
++		sdhci_am654_write_itapdly(sdhci_am654, itap, sdhci_am654->itap_del_ena[timing]);
+ 
+-	for (itap = 0; itap < ITAP_MAX; itap++) {
+-		sdhci_am654_write_itapdly(sdhci_am654, itap);
++		curr_pass = !mmc_send_tuning(host->mmc, opcode, NULL);
+ 
+-		cur_val = !mmc_send_tuning(host->mmc, opcode, NULL);
+-		if (cur_val && !prev_val)
+-			pass_window = itap;
++		if (!curr_pass && prev_pass)
++			fail_window[fail_index].start = itap;
+ 
+-		if (!cur_val)
+-			fail_len++;
++		if (!curr_pass) {
++			fail_window[fail_index].end = itap;
++			fail_window[fail_index].length++;
++		}
++
++		if (curr_pass && !prev_pass)
++			fail_index++;
+ 
+-		prev_val = cur_val;
++		prev_pass = curr_pass;
+ 	}
+-	/*
+-	 * Having determined the length of the failing window and start of
+-	 * the passing window calculate the length of the passing window and
+-	 * set the final value halfway through it considering the range as a
+-	 * circular buffer
+-	 */
+-	pass_len = ITAP_MAX - fail_len;
+-	itap = (pass_window + (pass_len >> 1)) % ITAP_MAX;
+-	sdhci_am654_write_itapdly(sdhci_am654, itap);
++
++	if (fail_window[fail_index].length != 0)
++		fail_index++;
++
++	itap = sdhci_am654_calculate_itap(host, fail_window, fail_index,
++					  sdhci_am654->dll_enable);
++
++	sdhci_am654_write_itapdly(sdhci_am654, itap, sdhci_am654->itap_del_ena[timing]);
++
++	/* Save ITAPDLY */
++	sdhci_am654->itap_del_sel[timing] = itap;
+ 
+ 	return 0;
+ }
+@@ -590,9 +687,12 @@ static int sdhci_am654_get_otap_delay(struct sdhci_host *host,
+ 				host->mmc->caps2 &= ~td[i].capability;
+ 		}
+ 
+-		if (td[i].itap_binding)
+-			device_property_read_u32(dev, td[i].itap_binding,
+-						 &sdhci_am654->itap_del_sel[i]);
++		if (td[i].itap_binding) {
++			ret = device_property_read_u32(dev, td[i].itap_binding,
++						       &sdhci_am654->itap_del_sel[i]);
++			if (!ret)
++				sdhci_am654->itap_del_ena[i] = 0x1;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/net/Makefile b/drivers/net/Makefile
+index 7cab36f94782e..db55ffdb5792d 100644
+--- a/drivers/net/Makefile
++++ b/drivers/net/Makefile
+@@ -48,7 +48,9 @@ obj-$(CONFIG_MHI_NET) += mhi_net.o
+ obj-$(CONFIG_ARCNET) += arcnet/
+ obj-$(CONFIG_CAIF) += caif/
+ obj-$(CONFIG_CAN) += can/
+-obj-$(CONFIG_NET_DSA) += dsa/
++ifdef CONFIG_NET_DSA
++obj-y += dsa/
++endif
+ obj-$(CONFIG_ETHERNET) += ethernet/
+ obj-$(CONFIG_FDDI) += fddi/
+ obj-$(CONFIG_HIPPI) += hippi/
+diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
+index 2b510f150dd88..2a5861a88d0e6 100644
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -3050,7 +3050,7 @@ phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit)
+ 		else
+ 			interface = PHY_INTERFACE_MODE_MII;
+ 	} else if (val == bitval[P_RMII_SEL]) {
+-		interface = PHY_INTERFACE_MODE_RGMII;
++		interface = PHY_INTERFACE_MODE_RMII;
+ 	} else {
+ 		interface = PHY_INTERFACE_MODE_RGMII;
+ 		if (data8 & P_RGMII_ID_EG_ENABLE)
+diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
+index 2d8a66ea82fab..713a595370bff 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_com.c
++++ b/drivers/net/ethernet/amazon/ena/ena_com.c
+@@ -312,7 +312,6 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
+ 			      struct ena_com_io_sq *io_sq)
+ {
+ 	size_t size;
+-	int dev_node = 0;
+ 
+ 	memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
+ 
+@@ -325,12 +324,9 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
+ 	size = io_sq->desc_entry_size * io_sq->q_depth;
+ 
+ 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
+-		dev_node = dev_to_node(ena_dev->dmadev);
+-		set_dev_node(ena_dev->dmadev, ctx->numa_node);
+ 		io_sq->desc_addr.virt_addr =
+ 			dma_alloc_coherent(ena_dev->dmadev, size, &io_sq->desc_addr.phys_addr,
+ 					   GFP_KERNEL);
+-		set_dev_node(ena_dev->dmadev, dev_node);
+ 		if (!io_sq->desc_addr.virt_addr) {
+ 			io_sq->desc_addr.virt_addr =
+ 				dma_alloc_coherent(ena_dev->dmadev, size,
+@@ -354,10 +350,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
+ 		size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
+ 			io_sq->bounce_buf_ctrl.buffers_num;
+ 
+-		dev_node = dev_to_node(ena_dev->dmadev);
+-		set_dev_node(ena_dev->dmadev, ctx->numa_node);
+ 		io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+-		set_dev_node(ena_dev->dmadev, dev_node);
+ 		if (!io_sq->bounce_buf_ctrl.base_buffer)
+ 			io_sq->bounce_buf_ctrl.base_buffer =
+ 				devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+@@ -397,7 +390,6 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
+ 			      struct ena_com_io_cq *io_cq)
+ {
+ 	size_t size;
+-	int prev_node = 0;
+ 
+ 	memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
+ 
+@@ -409,11 +401,8 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
+ 
+ 	size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
+ 
+-	prev_node = dev_to_node(ena_dev->dmadev);
+-	set_dev_node(ena_dev->dmadev, ctx->numa_node);
+ 	io_cq->cdesc_addr.virt_addr =
+ 		dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
+-	set_dev_node(ena_dev->dmadev, prev_node);
+ 	if (!io_cq->cdesc_addr.virt_addr) {
+ 		io_cq->cdesc_addr.virt_addr =
+ 			dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr,
+diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
+index d266a87297a5e..54798df8e2544 100644
+--- a/drivers/net/ethernet/cisco/enic/enic_main.c
++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
+@@ -1117,18 +1117,30 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
+ 	pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
+ 
+ 	if (port[IFLA_PORT_PROFILE]) {
++		if (nla_len(port[IFLA_PORT_PROFILE]) != PORT_PROFILE_MAX) {
++			memcpy(pp, &prev_pp, sizeof(*pp));
++			return -EINVAL;
++		}
+ 		pp->set |= ENIC_SET_NAME;
+ 		memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
+ 			PORT_PROFILE_MAX);
+ 	}
+ 
+ 	if (port[IFLA_PORT_INSTANCE_UUID]) {
++		if (nla_len(port[IFLA_PORT_INSTANCE_UUID]) != PORT_UUID_MAX) {
++			memcpy(pp, &prev_pp, sizeof(*pp));
++			return -EINVAL;
++		}
+ 		pp->set |= ENIC_SET_INSTANCE;
+ 		memcpy(pp->instance_uuid,
+ 			nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
+ 	}
+ 
+ 	if (port[IFLA_PORT_HOST_UUID]) {
++		if (nla_len(port[IFLA_PORT_HOST_UUID]) != PORT_UUID_MAX) {
++			memcpy(pp, &prev_pp, sizeof(*pp));
++			return -EINVAL;
++		}
+ 		pp->set |= ENIC_SET_HOST;
+ 		memcpy(pp->host_uuid,
+ 			nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index a72d8a2eb0b31..881ece735dcf1 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -4130,6 +4130,14 @@ static int fec_enet_init(struct net_device *ndev)
+ 	return ret;
+ }
+ 
++static void fec_enet_deinit(struct net_device *ndev)
++{
++	struct fec_enet_private *fep = netdev_priv(ndev);
++
++	netif_napi_del(&fep->napi);
++	fec_enet_free_queue(ndev);
++}
++
+ #ifdef CONFIG_OF
+ static int fec_reset_phy(struct platform_device *pdev)
+ {
+@@ -4524,6 +4532,7 @@ fec_probe(struct platform_device *pdev)
+ 	fec_enet_mii_remove(fep);
+ failed_mii_init:
+ failed_irq:
++	fec_enet_deinit(ndev);
+ failed_init:
+ 	fec_ptp_stop(pdev);
+ failed_reset:
+@@ -4587,6 +4596,7 @@ fec_drv_remove(struct platform_device *pdev)
+ 	pm_runtime_put_noidle(&pdev->dev);
+ 	pm_runtime_disable(&pdev->dev);
+ 
++	fec_enet_deinit(ndev);
+ 	free_netdev(ndev);
+ }
+ 
+diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
+index 181d9bfbee220..e32f6724f5681 100644
+--- a/drivers/net/ethernet/freescale/fec_ptp.c
++++ b/drivers/net/ethernet/freescale/fec_ptp.c
+@@ -104,14 +104,13 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
+ 	struct timespec64 ts;
+ 	u64 ns;
+ 
+-	if (fep->pps_enable == enable)
+-		return 0;
+-
+-	fep->pps_channel = DEFAULT_PPS_CHANNEL;
+-	fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
+-
+ 	spin_lock_irqsave(&fep->tmreg_lock, flags);
+ 
++	if (fep->pps_enable == enable) {
++		spin_unlock_irqrestore(&fep->tmreg_lock, flags);
++		return 0;
++	}
++
+ 	if (enable) {
+ 		/* clear capture or output compare interrupt status if have.
+ 		 */
+@@ -532,6 +531,9 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
+ 	int ret = 0;
+ 
+ 	if (rq->type == PTP_CLK_REQ_PPS) {
++		fep->pps_channel = DEFAULT_PPS_CHANNEL;
++		fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
++
+ 		ret = fec_ptp_enable_pps(fep, on);
+ 
+ 		return ret;
+diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
+index d9f6cc71d900a..e7d28432ba038 100644
+--- a/drivers/net/ethernet/intel/ice/ice_common.c
++++ b/drivers/net/ethernet/intel/ice/ice_common.c
+@@ -3135,6 +3135,16 @@ ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
+ 	case ICE_PHY_TYPE_HIGH_100G_AUI2:
+ 		speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
+ 		break;
++	case ICE_PHY_TYPE_HIGH_200G_CR4_PAM4:
++	case ICE_PHY_TYPE_HIGH_200G_SR4:
++	case ICE_PHY_TYPE_HIGH_200G_FR4:
++	case ICE_PHY_TYPE_HIGH_200G_LR4:
++	case ICE_PHY_TYPE_HIGH_200G_DR4:
++	case ICE_PHY_TYPE_HIGH_200G_KR4_PAM4:
++	case ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC:
++	case ICE_PHY_TYPE_HIGH_200G_AUI4:
++		speed_phy_type_high = ICE_AQ_LINK_SPEED_200GB;
++		break;
+ 	default:
+ 		speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
+ 		break;
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+index 78b833b3e1d7e..62c8205fcebae 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+@@ -3593,7 +3593,6 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
+ 	struct ice_pf *pf = vsi->back;
+ 	int new_rx = 0, new_tx = 0;
+ 	bool locked = false;
+-	u32 curr_combined;
+ 	int ret = 0;
+ 
+ 	/* do not support changing channels in Safe Mode */
+@@ -3615,22 +3614,8 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+-	curr_combined = ice_get_combined_cnt(vsi);
+-
+-	/* these checks are for cases where user didn't specify a particular
+-	 * value on cmd line but we get non-zero value anyway via
+-	 * get_channels(); look at ethtool.c in ethtool repository (the user
+-	 * space part), particularly, do_schannels() routine
+-	 */
+-	if (ch->rx_count == vsi->num_rxq - curr_combined)
+-		ch->rx_count = 0;
+-	if (ch->tx_count == vsi->num_txq - curr_combined)
+-		ch->tx_count = 0;
+-	if (ch->combined_count == curr_combined)
+-		ch->combined_count = 0;
+-
+-	if (!(ch->combined_count || (ch->rx_count && ch->tx_count))) {
+-		netdev_err(dev, "Please specify at least 1 Rx and 1 Tx channel\n");
++	if (ch->rx_count && ch->tx_count) {
++		netdev_err(dev, "Dedicated RX or TX channels cannot be used simultaneously\n");
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
+index 2e9ad27cb9d13..6e8f2aab60801 100644
+--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
+@@ -45,14 +45,15 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
+ 		return -EINVAL;
+ 
+ 	err = ice_fltr_add_vlan(vsi, vlan);
+-	if (err && err != -EEXIST) {
++	if (!err)
++		vsi->num_vlan++;
++	else if (err == -EEXIST)
++		err = 0;
++	else
+ 		dev_err(ice_pf_to_dev(vsi->back), "Failure Adding VLAN %d on VSI %i, status %d\n",
+ 			vlan->vid, vsi->vsi_num, err);
+-		return err;
+-	}
+ 
+-	vsi->num_vlan++;
+-	return 0;
++	return err;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+index 6972d728431cb..1885ba618981d 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+@@ -222,14 +222,19 @@ static int idpf_set_channels(struct net_device *netdev,
+ 			     struct ethtool_channels *ch)
+ {
+ 	struct idpf_vport_config *vport_config;
+-	u16 combined, num_txq, num_rxq;
+ 	unsigned int num_req_tx_q;
+ 	unsigned int num_req_rx_q;
+ 	struct idpf_vport *vport;
++	u16 num_txq, num_rxq;
+ 	struct device *dev;
+ 	int err = 0;
+ 	u16 idx;
+ 
++	if (ch->rx_count && ch->tx_count) {
++		netdev_err(netdev, "Dedicated RX or TX channels cannot be used simultaneously\n");
++		return -EINVAL;
++	}
++
+ 	idpf_vport_ctrl_lock(netdev);
+ 	vport = idpf_netdev_to_vport(netdev);
+ 
+@@ -239,20 +244,6 @@ static int idpf_set_channels(struct net_device *netdev,
+ 	num_txq = vport_config->user_config.num_req_tx_qs;
+ 	num_rxq = vport_config->user_config.num_req_rx_qs;
+ 
+-	combined = min(num_txq, num_rxq);
+-
+-	/* these checks are for cases where user didn't specify a particular
+-	 * value on cmd line but we get non-zero value anyway via
+-	 * get_channels(); look at ethtool.c in ethtool repository (the user
+-	 * space part), particularly, do_schannels() routine
+-	 */
+-	if (ch->combined_count == combined)
+-		ch->combined_count = 0;
+-	if (ch->combined_count && ch->rx_count == num_rxq - combined)
+-		ch->rx_count = 0;
+-	if (ch->combined_count && ch->tx_count == num_txq - combined)
+-		ch->tx_count = 0;
+-
+ 	num_req_tx_q = ch->combined_count + ch->tx_count;
+ 	num_req_rx_q = ch->combined_count + ch->rx_count;
+ 
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
+index 5d3532c27d57f..ae8a48c480708 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
+@@ -1394,6 +1394,7 @@ static int idpf_vport_open(struct idpf_vport *vport, bool alloc_res)
+ 	}
+ 
+ 	idpf_rx_init_buf_tail(vport);
++	idpf_vport_intr_ena(vport);
+ 
+ 	err = idpf_send_config_queues_msg(vport);
+ 	if (err) {
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+index f5bc4a2780745..7fc77ed9d1232 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+@@ -3747,9 +3747,9 @@ static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
+  */
+ void idpf_vport_intr_deinit(struct idpf_vport *vport)
+ {
++	idpf_vport_intr_dis_irq_all(vport);
+ 	idpf_vport_intr_napi_dis_all(vport);
+ 	idpf_vport_intr_napi_del_all(vport);
+-	idpf_vport_intr_dis_irq_all(vport);
+ 	idpf_vport_intr_rel_irq(vport);
+ }
+ 
+@@ -4180,7 +4180,6 @@ int idpf_vport_intr_init(struct idpf_vport *vport)
+ 
+ 	idpf_vport_intr_map_vector_to_qs(vport);
+ 	idpf_vport_intr_napi_add_all(vport);
+-	idpf_vport_intr_napi_ena_all(vport);
+ 
+ 	err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport);
+ 	if (err)
+@@ -4194,17 +4193,20 @@ int idpf_vport_intr_init(struct idpf_vport *vport)
+ 	if (err)
+ 		goto unroll_vectors_alloc;
+ 
+-	idpf_vport_intr_ena_irq_all(vport);
+-
+ 	return 0;
+ 
+ unroll_vectors_alloc:
+-	idpf_vport_intr_napi_dis_all(vport);
+ 	idpf_vport_intr_napi_del_all(vport);
+ 
+ 	return err;
+ }
+ 
++void idpf_vport_intr_ena(struct idpf_vport *vport)
++{
++	idpf_vport_intr_napi_ena_all(vport);
++	idpf_vport_intr_ena_irq_all(vport);
++}
++
+ /**
+  * idpf_config_rss - Send virtchnl messages to configure RSS
+  * @vport: virtual port
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+index df76493faa756..85a1466890d43 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+@@ -988,6 +988,7 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport);
+ void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
+ void idpf_vport_intr_deinit(struct idpf_vport *vport);
+ int idpf_vport_intr_init(struct idpf_vport *vport);
++void idpf_vport_intr_ena(struct idpf_vport *vport);
+ enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded);
+ int idpf_config_rss(struct idpf_vport *vport);
+ int idpf_init_rss(struct idpf_vport *vport);
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+index ed440dd0c4f9f..84fb6b8de2a12 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+@@ -3676,9 +3676,7 @@ struct ixgbe_info {
+ #define IXGBE_KRM_LINK_S1(P)		((P) ? 0x8200 : 0x4200)
+ #define IXGBE_KRM_LINK_CTRL_1(P)	((P) ? 0x820C : 0x420C)
+ #define IXGBE_KRM_AN_CNTL_1(P)		((P) ? 0x822C : 0x422C)
+-#define IXGBE_KRM_AN_CNTL_4(P)		((P) ? 0x8238 : 0x4238)
+ #define IXGBE_KRM_AN_CNTL_8(P)		((P) ? 0x8248 : 0x4248)
+-#define IXGBE_KRM_PCS_KX_AN(P)		((P) ? 0x9918 : 0x5918)
+ #define IXGBE_KRM_SGMII_CTRL(P)		((P) ? 0x82A0 : 0x42A0)
+ #define IXGBE_KRM_LP_BASE_PAGE_HIGH(P)	((P) ? 0x836C : 0x436C)
+ #define IXGBE_KRM_DSP_TXFFE_STATE_4(P)	((P) ? 0x8634 : 0x4634)
+@@ -3688,7 +3686,6 @@ struct ixgbe_info {
+ #define IXGBE_KRM_PMD_FLX_MASK_ST20(P)	((P) ? 0x9054 : 0x5054)
+ #define IXGBE_KRM_TX_COEFF_CTRL_1(P)	((P) ? 0x9520 : 0x5520)
+ #define IXGBE_KRM_RX_ANA_CTL(P)		((P) ? 0x9A00 : 0x5A00)
+-#define IXGBE_KRM_FLX_TMRS_CTRL_ST31(P)	((P) ? 0x9180 : 0x5180)
+ 
+ #define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA		~(0x3 << 20)
+ #define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR		BIT(20)
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+index 2decb0710b6e3..a5f6449344450 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+@@ -1722,59 +1722,9 @@ static int ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+ 		return -EINVAL;
+ 	}
+ 
+-	(void)mac->ops.write_iosf_sb_reg(hw,
+-			IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+-			IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+-
+-	/* change mode enforcement rules to hybrid */
+-	(void)mac->ops.read_iosf_sb_reg(hw,
+-			IXGBE_KRM_FLX_TMRS_CTRL_ST31(hw->bus.lan_id),
+-			IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+-	reg_val |= 0x0400;
+-
+-	(void)mac->ops.write_iosf_sb_reg(hw,
+-			IXGBE_KRM_FLX_TMRS_CTRL_ST31(hw->bus.lan_id),
+-			IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+-
+-	/* manually control the config */
+-	(void)mac->ops.read_iosf_sb_reg(hw,
+-			IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+-			IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+-	reg_val |= 0x20002240;
+-
+-	(void)mac->ops.write_iosf_sb_reg(hw,
+-			IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+-			IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+-
+-	/* move the AN base page values */
+-	(void)mac->ops.read_iosf_sb_reg(hw,
+-			IXGBE_KRM_PCS_KX_AN(hw->bus.lan_id),
+-			IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+-	reg_val |= 0x1;
+-
+-	(void)mac->ops.write_iosf_sb_reg(hw,
+-			IXGBE_KRM_PCS_KX_AN(hw->bus.lan_id),
+-			IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+-
+-	/* set the AN37 over CB mode */
+-	(void)mac->ops.read_iosf_sb_reg(hw,
+-			IXGBE_KRM_AN_CNTL_4(hw->bus.lan_id),
+-			IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+-	reg_val |= 0x20000000;
+-
+-	(void)mac->ops.write_iosf_sb_reg(hw,
+-			IXGBE_KRM_AN_CNTL_4(hw->bus.lan_id),
+-			IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+-
+-	/* restart AN manually */
+-	(void)mac->ops.read_iosf_sb_reg(hw,
+-			IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+-			IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+-	reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+-
+-	(void)mac->ops.write_iosf_sb_reg(hw,
+-			IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+-			IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
++	status = mac->ops.write_iosf_sb_reg(hw,
++				IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
++				IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ 
+ 	/* Toggle port SW reset by AN reset. */
+ 	status = ixgbe_restart_an_internal_phy_x550em(hw);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+index 1723e9912ae07..6cddb4da85b71 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+@@ -1407,7 +1407,10 @@ static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid,
+ 	otx2_qos_read_txschq_cfg(pfvf, node, old_cfg);
+ 
+ 	/* delete the txschq nodes allocated for this node */
++	otx2_qos_disable_sq(pfvf, qid);
++	otx2_qos_free_hw_node_schq(pfvf, node);
+ 	otx2_qos_free_sw_node_schq(pfvf, node);
++	pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
+ 
+ 	/* mark this node as htb inner node */
+ 	WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
+@@ -1554,6 +1557,7 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force
+ 		dwrr_del_node = true;
+ 
+ 	/* destroy the leaf node */
++	otx2_qos_disable_sq(pfvf, qid);
+ 	otx2_qos_destroy_node(pfvf, node);
+ 	pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+index caa34b9c161e5..33e32584b07f5 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+@@ -102,8 +102,14 @@ static inline void
+ mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
+ {
+ 	int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
++	struct udphdr *udphdr;
+ 
+-	udp_hdr(skb)->len = htons(payload_len);
++	if (skb->encapsulation)
++		udphdr = (struct udphdr *)skb_inner_transport_header(skb);
++	else
++		udphdr = udp_hdr(skb);
++
++	udphdr->len = htons(payload_len);
+ }
+ 
+ struct mlx5e_accel_tx_state {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+index 41a2543a52cda..e51b03d4c717f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+@@ -750,8 +750,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
+ err_fs_ft:
+ 	if (rx->allow_tunnel_mode)
+ 		mlx5_eswitch_unblock_encap(mdev);
+-	mlx5_del_flow_rules(rx->status.rule);
+-	mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
++	mlx5_ipsec_rx_status_destroy(ipsec, rx);
+ err_add:
+ 	mlx5_destroy_flow_table(rx->ft.status);
+ err_fs_ft_status:
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+index 82064614846f5..359050f0b54dd 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+@@ -97,18 +97,11 @@ mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
+ 		if (!x || !x->xso.offload_handle)
+ 			goto out_disable;
+ 
+-		if (xo->inner_ipproto) {
+-			/* Cannot support tunnel packet over IPsec tunnel mode
+-			 * because we cannot offload three IP header csum
+-			 */
+-			if (x->props.mode == XFRM_MODE_TUNNEL)
+-				goto out_disable;
+-
+-			/* Only support UDP or TCP L4 checksum */
+-			if (xo->inner_ipproto != IPPROTO_UDP &&
+-			    xo->inner_ipproto != IPPROTO_TCP)
+-				goto out_disable;
+-		}
++		/* Only support UDP or TCP L4 checksum */
++		if (xo->inner_ipproto &&
++		    xo->inner_ipproto != IPPROTO_UDP &&
++		    xo->inner_ipproto != IPPROTO_TCP)
++			goto out_disable;
+ 
+ 		return features;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 64497b6eebd36..47be07af214ff 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -3790,7 +3790,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ 		mlx5e_fold_sw_stats64(priv, stats);
+ 	}
+ 
+-	stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
++	stats->rx_missed_errors = priv->stats.qcnt.rx_out_of_buffer;
+ 
+ 	stats->rx_length_errors =
+ 		PPORT_802_3_GET(pstats, a_in_range_length_errors) +
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+index e21a3b4128ce8..0964b16ca5619 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+@@ -153,7 +153,11 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
+ 
+ 	*hopbyhop = 0;
+ 	if (skb->encapsulation) {
+-		ihs = skb_inner_tcp_all_headers(skb);
++		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
++			ihs = skb_inner_transport_offset(skb) +
++			      sizeof(struct udphdr);
++		else
++			ihs = skb_inner_tcp_all_headers(skb);
+ 		stats->tso_inner_packets++;
+ 		stats->tso_inner_bytes += skb->len - ihs;
+ 	} else {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+index 37598d116f3b8..58a452d20daf7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+@@ -720,6 +720,7 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
+ 	struct mlx5_core_dev *dev;
+ 	u8 mode;
+ #endif
++	bool roce_support;
+ 	int i;
+ 
+ 	for (i = 0; i < ldev->ports; i++)
+@@ -746,6 +747,11 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
+ 		if (mlx5_sriov_is_enabled(ldev->pf[i].dev))
+ 			return false;
+ #endif
++	roce_support = mlx5_get_roce_state(ldev->pf[MLX5_LAG_P1].dev);
++	for (i = 1; i < ldev->ports; i++)
++		if (mlx5_get_roce_state(ldev->pf[i].dev) != roce_support)
++			return false;
++
+ 	return true;
+ }
+ 
+@@ -913,8 +919,10 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
+ 		} else if (roce_lag) {
+ 			dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+ 			mlx5_rescan_drivers_locked(dev0);
+-			for (i = 1; i < ldev->ports; i++)
+-				mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
++			for (i = 1; i < ldev->ports; i++) {
++				if (mlx5_get_roce_state(ldev->pf[i].dev))
++					mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
++			}
+ 		} else if (shared_fdb) {
+ 			int i;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
+index dd5d186dc6148..f6deb5a3f8202 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
+@@ -100,10 +100,6 @@ static bool ft_create_alias_supported(struct mlx5_core_dev *dev)
+ 
+ static bool mlx5_sd_is_supported(struct mlx5_core_dev *dev, u8 host_buses)
+ {
+-	/* Feature is currently implemented for PFs only */
+-	if (!mlx5_core_is_pf(dev))
+-		return false;
+-
+ 	/* Honor the SW implementation limit */
+ 	if (host_buses > MLX5_SD_MAX_GROUP_SZ)
+ 		return false;
+@@ -162,6 +158,14 @@ static int sd_init(struct mlx5_core_dev *dev)
+ 	bool sdm;
+ 	int err;
+ 
++	/* Feature is currently implemented for PFs only */
++	if (!mlx5_core_is_pf(dev))
++		return 0;
++
++	/* Block on embedded CPU PFs */
++	if (mlx5_core_is_ecpf(dev))
++		return 0;
++
+ 	if (!MLX5_CAP_MCAM_REG(dev, mpir))
+ 		return 0;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+index af50ff9e5f267..ce49c9514f911 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+@@ -539,7 +539,7 @@ mlxsw_sp_span_gretap6_route(const struct net_device *to_dev,
+ 	if (!dst || dst->error)
+ 		goto out;
+ 
+-	rt6 = container_of(dst, struct rt6_info, dst);
++	rt6 = dst_rt6_info(dst);
+ 
+ 	dev = dst->dev;
+ 	*saddrp = fl6.saddr;
+diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+index 61d88207eed42..6695ed661ef83 100644
+--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+@@ -474,14 +474,14 @@ static int lan966x_port_hwtstamp_set(struct net_device *dev,
+ 	    cfg->source != HWTSTAMP_SOURCE_PHYLIB)
+ 		return -EOPNOTSUPP;
+ 
++	if (cfg->source == HWTSTAMP_SOURCE_NETDEV && !port->lan966x->ptp)
++		return -EOPNOTSUPP;
++
+ 	err = lan966x_ptp_setup_traps(port, cfg);
+ 	if (err)
+ 		return err;
+ 
+ 	if (cfg->source == HWTSTAMP_SOURCE_NETDEV) {
+-		if (!port->lan966x->ptp)
+-			return -EOPNOTSUPP;
+-
+ 		err = lan966x_ptp_hwtstamp_set(port, cfg, extack);
+ 		if (err) {
+ 			lan966x_ptp_del_traps(port);
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_classifier.c b/drivers/net/ethernet/ti/icssg/icssg_classifier.c
+index 6df53ab17fbc5..902a2717785cb 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_classifier.c
++++ b/drivers/net/ethernet/ti/icssg/icssg_classifier.c
+@@ -360,7 +360,7 @@ void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr)
+ {
+ 	const u8 mask_addr[] = { 0, 0, 0, 0, 0, 0, };
+ 
+-	rx_class_ft1_set_start_len(miig_rt, slice, 0, 6);
++	rx_class_ft1_set_start_len(miig_rt, slice, ETH_ALEN, ETH_ALEN);
+ 	rx_class_ft1_set_da(miig_rt, slice, 0, mac_addr);
+ 	rx_class_ft1_set_da_mask(miig_rt, slice, 0, mask_addr);
+ 	rx_class_ft1_cfg_set_type(miig_rt, slice, 0, FT1_CFG_TYPE_EQ);
+diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
+index 2d5b021b4ea60..fef4eff7753a7 100644
+--- a/drivers/net/ipvlan/ipvlan_core.c
++++ b/drivers/net/ipvlan/ipvlan_core.c
+@@ -439,7 +439,7 @@ static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
+ 
+ 	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+ 
+-	err = ip_local_out(net, skb->sk, skb);
++	err = ip_local_out(net, NULL, skb);
+ 	if (unlikely(net_xmit_eval(err)))
+ 		DEV_STATS_INC(dev, tx_errors);
+ 	else
+@@ -494,7 +494,7 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
+ 
+ 	memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+ 
+-	err = ip6_local_out(dev_net(dev), skb->sk, skb);
++	err = ip6_local_out(dev_net(dev), NULL, skb);
+ 	if (unlikely(net_xmit_eval(err)))
+ 		DEV_STATS_INC(dev, tx_errors);
+ 	else
+diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c
+index a4d2e76a8d587..16789cd446e9e 100644
+--- a/drivers/net/netkit.c
++++ b/drivers/net/netkit.c
+@@ -55,6 +55,7 @@ static void netkit_prep_forward(struct sk_buff *skb, bool xnet)
+ 	skb_scrub_packet(skb, xnet);
+ 	skb->priority = 0;
+ 	nf_skip_egress(skb, true);
++	skb_reset_mac_header(skb);
+ }
+ 
+ static struct netkit *netkit_priv(const struct net_device *dev)
+@@ -78,6 +79,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		     skb_orphan_frags(skb, GFP_ATOMIC)))
+ 		goto drop;
+ 	netkit_prep_forward(skb, !net_eq(dev_net(dev), dev_net(peer)));
++	eth_skb_pkt_type(skb, peer);
+ 	skb->dev = peer;
+ 	entry = rcu_dereference(nk->active);
+ 	if (entry)
+@@ -85,7 +87,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	switch (ret) {
+ 	case NETKIT_NEXT:
+ 	case NETKIT_PASS:
+-		skb->protocol = eth_type_trans(skb, skb->dev);
++		eth_skb_pull_mac(skb);
+ 		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+ 		if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) {
+ 			dev_sw_netstats_tx_add(dev, 1, len);
+@@ -155,6 +157,16 @@ static void netkit_set_multicast(struct net_device *dev)
+ 	/* Nothing to do, we receive whatever gets pushed to us! */
+ }
+ 
++static int netkit_set_macaddr(struct net_device *dev, void *sa)
++{
++	struct netkit *nk = netkit_priv(dev);
++
++	if (nk->mode != NETKIT_L2)
++		return -EOPNOTSUPP;
++
++	return eth_mac_addr(dev, sa);
++}
++
+ static void netkit_set_headroom(struct net_device *dev, int headroom)
+ {
+ 	struct netkit *nk = netkit_priv(dev), *nk2;
+@@ -198,6 +210,7 @@ static const struct net_device_ops netkit_netdev_ops = {
+ 	.ndo_start_xmit		= netkit_xmit,
+ 	.ndo_set_rx_mode	= netkit_set_multicast,
+ 	.ndo_set_rx_headroom	= netkit_set_headroom,
++	.ndo_set_mac_address	= netkit_set_macaddr,
+ 	.ndo_get_iflink		= netkit_get_iflink,
+ 	.ndo_get_peer_dev	= netkit_peer_dev,
+ 	.ndo_get_stats64	= netkit_get_stats,
+@@ -300,9 +313,11 @@ static int netkit_validate(struct nlattr *tb[], struct nlattr *data[],
+ 
+ 	if (!attr)
+ 		return 0;
+-	NL_SET_ERR_MSG_ATTR(extack, attr,
+-			    "Setting Ethernet address is not supported");
+-	return -EOPNOTSUPP;
++	if (nla_len(attr) != ETH_ALEN)
++		return -EINVAL;
++	if (!is_valid_ether_addr(nla_data(attr)))
++		return -EADDRNOTAVAIL;
++	return 0;
+ }
+ 
+ static struct rtnl_link_ops netkit_link_ops;
+@@ -365,6 +380,9 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
+ 		strscpy(ifname, "nk%d", IFNAMSIZ);
+ 		ifname_assign_type = NET_NAME_ENUM;
+ 	}
++	if (mode != NETKIT_L2 &&
++	    (tb[IFLA_ADDRESS] || tbp[IFLA_ADDRESS]))
++		return -EOPNOTSUPP;
+ 
+ 	net = rtnl_link_get_net(src_net, tbp);
+ 	if (IS_ERR(net))
+@@ -379,7 +397,7 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
+ 
+ 	netif_inherit_tso_max(peer, dev);
+ 
+-	if (mode == NETKIT_L2)
++	if (mode == NETKIT_L2 && !(ifmp && tbp[IFLA_ADDRESS]))
+ 		eth_hw_addr_random(peer);
+ 	if (ifmp && dev->ifindex)
+ 		peer->ifindex = ifmp->ifi_index;
+@@ -402,7 +420,7 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
+ 	if (err < 0)
+ 		goto err_configure_peer;
+ 
+-	if (mode == NETKIT_L2)
++	if (mode == NETKIT_L2 && !tb[IFLA_ADDRESS])
+ 		eth_hw_addr_random(dev);
+ 	if (tb[IFLA_IFNAME])
+ 		nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 87780465cd0d5..13370439a7cae 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -3516,7 +3516,7 @@ static int lan8841_config_intr(struct phy_device *phydev)
+ 
+ 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ 		err = phy_read(phydev, LAN8814_INTS);
+-		if (err)
++		if (err < 0)
+ 			return err;
+ 
+ 		/* Enable / disable interrupts. It is OK to enable PTP interrupt
+@@ -3532,6 +3532,14 @@ static int lan8841_config_intr(struct phy_device *phydev)
+ 			return err;
+ 
+ 		err = phy_read(phydev, LAN8814_INTS);
++		if (err < 0)
++			return err;
++
++		/* Getting a positive value doesn't mean that is an error, it
++		 * just indicates what was the status. Therefore make sure to
++		 * clear the value and say that there is no error.
++		 */
++		err = 0;
+ 	}
+ 
+ 	return err;
+@@ -4814,6 +4822,7 @@ static struct phy_driver ksphy_driver[] = {
+ 	/* PHY_BASIC_FEATURES */
+ 	.probe		= kszphy_probe,
+ 	.config_init	= ksz8061_config_init,
++	.soft_reset	= genphy_soft_reset,
+ 	.config_intr	= kszphy_config_intr,
+ 	.handle_interrupt = kszphy_handle_interrupt,
+ 	.suspend	= kszphy_suspend,
+diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
+index cbea246664795..8e82184be5e7d 100644
+--- a/drivers/net/usb/smsc95xx.c
++++ b/drivers/net/usb/smsc95xx.c
+@@ -879,7 +879,7 @@ static int smsc95xx_start_rx_path(struct usbnet *dev)
+ static int smsc95xx_reset(struct usbnet *dev)
+ {
+ 	struct smsc95xx_priv *pdata = dev->driver_priv;
+-	u32 read_buf, write_buf, burst_cap;
++	u32 read_buf, burst_cap;
+ 	int ret = 0, timeout;
+ 
+ 	netif_dbg(dev, ifup, dev->net, "entering smsc95xx_reset\n");
+@@ -1003,10 +1003,13 @@ static int smsc95xx_reset(struct usbnet *dev)
+ 		return ret;
+ 	netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", read_buf);
+ 
++	ret = smsc95xx_read_reg(dev, LED_GPIO_CFG, &read_buf);
++	if (ret < 0)
++		return ret;
+ 	/* Configure GPIO pins as LED outputs */
+-	write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
+-		LED_GPIO_CFG_FDX_LED;
+-	ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf);
++	read_buf |= LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
++		    LED_GPIO_CFG_FDX_LED;
++	ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, read_buf);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
+index bb95ce43cd97d..c3af9ad5e1547 100644
+--- a/drivers/net/vrf.c
++++ b/drivers/net/vrf.c
+@@ -653,7 +653,7 @@ static int vrf_finish_output6(struct net *net, struct sock *sk,
+ 	skb->dev = dev;
+ 
+ 	rcu_read_lock();
+-	nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
++	nexthop = rt6_nexthop(dst_rt6_info(dst), &ipv6_hdr(skb)->daddr);
+ 	neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
+ 	if (unlikely(!neigh))
+ 		neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
+@@ -860,7 +860,7 @@ static int vrf_rt6_create(struct net_device *dev)
+ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
+ 	struct dst_entry *dst = skb_dst(skb);
+-	struct rtable *rt = (struct rtable *)dst;
++	struct rtable *rt = dst_rtable(dst);
+ 	struct net_device *dev = dst->dev;
+ 	unsigned int hh_len = LL_RESERVED_SPACE(dev);
+ 	struct neighbour *neigh;
+diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
+index 3a9148fb1422b..6b64f28a9174d 100644
+--- a/drivers/net/vxlan/vxlan_core.c
++++ b/drivers/net/vxlan/vxlan_core.c
+@@ -2528,7 +2528,7 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ 		}
+ 
+ 		if (!info) {
+-			u32 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
++			u32 rt6i_flags = dst_rt6_info(ndst)->rt6i_flags;
+ 
+ 			err = encap_bypass_if_local(skb, dev, vxlan, AF_INET6,
+ 						    dst_port, ifindex, vni,
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 095f59e7aa937..d513fd27589df 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -414,7 +414,15 @@ static inline void nvme_end_req_zoned(struct request *req)
+ 	}
+ }
+ 
+-static inline void nvme_end_req(struct request *req)
++static inline void __nvme_end_req(struct request *req)
++{
++	nvme_end_req_zoned(req);
++	nvme_trace_bio_complete(req);
++	if (req->cmd_flags & REQ_NVME_MPATH)
++		nvme_mpath_end_request(req);
++}
++
++void nvme_end_req(struct request *req)
+ {
+ 	blk_status_t status = nvme_error_status(nvme_req(req)->status);
+ 
+@@ -424,10 +432,7 @@ static inline void nvme_end_req(struct request *req)
+ 		else
+ 			nvme_log_error(req);
+ 	}
+-	nvme_end_req_zoned(req);
+-	nvme_trace_bio_complete(req);
+-	if (req->cmd_flags & REQ_NVME_MPATH)
+-		nvme_mpath_end_request(req);
++	__nvme_end_req(req);
+ 	blk_mq_end_request(req, status);
+ }
+ 
+@@ -476,7 +481,7 @@ void nvme_complete_batch_req(struct request *req)
+ {
+ 	trace_nvme_complete_rq(req);
+ 	nvme_cleanup_cmd(req);
+-	nvme_end_req_zoned(req);
++	__nvme_end_req(req);
+ }
+ EXPORT_SYMBOL_GPL(nvme_complete_batch_req);
+ 
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index d16e976ae1a47..a4e46eb20be63 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -118,7 +118,8 @@ void nvme_failover_req(struct request *req)
+ 	blk_steal_bios(&ns->head->requeue_list, req);
+ 	spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
+ 
+-	blk_mq_end_request(req, 0);
++	nvme_req(req)->status = 0;
++	nvme_end_req(req);
+ 	kblockd_schedule_work(&ns->head->requeue_work);
+ }
+ 
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index 05532c2811774..d7bcc6d51e84e 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -767,6 +767,7 @@ static inline bool nvme_state_terminal(struct nvme_ctrl *ctrl)
+ 	}
+ }
+ 
++void nvme_end_req(struct request *req);
+ void nvme_complete_rq(struct request *req);
+ void nvme_complete_batch_req(struct request *req);
+ 
+diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
+index 7fda69395c1ef..dfdff6aba6953 100644
+--- a/drivers/nvme/target/configfs.c
++++ b/drivers/nvme/target/configfs.c
+@@ -676,10 +676,18 @@ static ssize_t nvmet_ns_enable_store(struct config_item *item,
+ 	if (kstrtobool(page, &enable))
+ 		return -EINVAL;
+ 
++	/*
++	 * take a global nvmet_config_sem because the disable routine has a
++	 * window where it releases the subsys-lock, giving a chance to
++	 * a parallel enable to concurrently execute causing the disable to
++	 * have a misaccounting of the ns percpu_ref.
++	 */
++	down_write(&nvmet_config_sem);
+ 	if (enable)
+ 		ret = nvmet_ns_enable(ns);
+ 	else
+ 		nvmet_ns_disable(ns);
++	up_write(&nvmet_config_sem);
+ 
+ 	return ret ? ret : count;
+ }
+diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
+index 746a11dcb67f1..c43a1479de2ce 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -604,11 +604,16 @@ static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
+ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
+ {
+ 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
++	struct dw_pcie_ep_func *ep_func;
++	struct device *dev = pci->dev;
++	struct pci_epc *epc = ep->epc;
+ 	unsigned int offset, ptm_cap_base;
+ 	unsigned int nbars;
+ 	u8 hdr_type;
++	u8 func_no;
++	int i, ret;
++	void *addr;
+ 	u32 reg;
+-	int i;
+ 
+ 	hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) &
+ 		   PCI_HEADER_TYPE_MASK;
+@@ -619,6 +624,58 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
+ 		return -EIO;
+ 	}
+ 
++	dw_pcie_version_detect(pci);
++
++	dw_pcie_iatu_detect(pci);
++
++	ret = dw_pcie_edma_detect(pci);
++	if (ret)
++		return ret;
++
++	if (!ep->ib_window_map) {
++		ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
++						       GFP_KERNEL);
++		if (!ep->ib_window_map)
++			goto err_remove_edma;
++	}
++
++	if (!ep->ob_window_map) {
++		ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows,
++						       GFP_KERNEL);
++		if (!ep->ob_window_map)
++			goto err_remove_edma;
++	}
++
++	if (!ep->outbound_addr) {
++		addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
++				    GFP_KERNEL);
++		if (!addr)
++			goto err_remove_edma;
++		ep->outbound_addr = addr;
++	}
++
++	for (func_no = 0; func_no < epc->max_functions; func_no++) {
++
++		ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
++		if (ep_func)
++			continue;
++
++		ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
++		if (!ep_func)
++			goto err_remove_edma;
++
++		ep_func->func_no = func_no;
++		ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
++							      PCI_CAP_ID_MSI);
++		ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
++							       PCI_CAP_ID_MSIX);
++
++		list_add_tail(&ep_func->list, &ep->func_list);
++	}
++
++	if (ep->ops->init)
++		ep->ops->init(ep);
++
+ 	offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
+ 	ptm_cap_base = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
+ 
+@@ -658,14 +715,17 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
+ 	dw_pcie_dbi_ro_wr_dis(pci);
+ 
+ 	return 0;
++
++err_remove_edma:
++	dw_pcie_edma_remove(pci);
++
++	return ret;
+ }
+ EXPORT_SYMBOL_GPL(dw_pcie_ep_init_complete);
+ 
+ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+ {
+ 	int ret;
+-	void *addr;
+-	u8 func_no;
+ 	struct resource *res;
+ 	struct pci_epc *epc;
+ 	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+@@ -673,7 +733,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+ 	struct platform_device *pdev = to_platform_device(dev);
+ 	struct device_node *np = dev->of_node;
+ 	const struct pci_epc_features *epc_features;
+-	struct dw_pcie_ep_func *ep_func;
+ 
+ 	INIT_LIST_HEAD(&ep->func_list);
+ 
+@@ -691,26 +750,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+ 	if (ep->ops->pre_init)
+ 		ep->ops->pre_init(ep);
+ 
+-	dw_pcie_version_detect(pci);
+-
+-	dw_pcie_iatu_detect(pci);
+-
+-	ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
+-					       GFP_KERNEL);
+-	if (!ep->ib_window_map)
+-		return -ENOMEM;
+-
+-	ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows,
+-					       GFP_KERNEL);
+-	if (!ep->ob_window_map)
+-		return -ENOMEM;
+-
+-	addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
+-			    GFP_KERNEL);
+-	if (!addr)
+-		return -ENOMEM;
+-	ep->outbound_addr = addr;
+-
+ 	epc = devm_pci_epc_create(dev, &epc_ops);
+ 	if (IS_ERR(epc)) {
+ 		dev_err(dev, "Failed to create epc device\n");
+@@ -724,23 +763,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+ 	if (ret < 0)
+ 		epc->max_functions = 1;
+ 
+-	for (func_no = 0; func_no < epc->max_functions; func_no++) {
+-		ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
+-		if (!ep_func)
+-			return -ENOMEM;
+-
+-		ep_func->func_no = func_no;
+-		ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
+-							      PCI_CAP_ID_MSI);
+-		ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
+-							       PCI_CAP_ID_MSIX);
+-
+-		list_add_tail(&ep_func->list, &ep->func_list);
+-	}
+-
+-	if (ep->ops->init)
+-		ep->ops->init(ep);
+-
+ 	ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
+ 			       ep->page_size);
+ 	if (ret < 0) {
+@@ -756,25 +778,25 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+ 		goto err_exit_epc_mem;
+ 	}
+ 
+-	ret = dw_pcie_edma_detect(pci);
+-	if (ret)
+-		goto err_free_epc_mem;
+-
+ 	if (ep->ops->get_features) {
+ 		epc_features = ep->ops->get_features(ep);
+ 		if (epc_features->core_init_notifier)
+ 			return 0;
+ 	}
+ 
++	/*
++	 * NOTE:- Avoid accessing the hardware (Ex:- DBI space) before this
++	 * step as platforms that implement 'core_init_notifier' feature may
++	 * not have the hardware ready (i.e. core initialized) for access
++	 * (Ex: tegra194). Any hardware access on such platforms result
++	 * in system hang.
++	 */
+ 	ret = dw_pcie_ep_init_complete(ep);
+ 	if (ret)
+-		goto err_remove_edma;
++		goto err_free_epc_mem;
+ 
+ 	return 0;
+ 
+-err_remove_edma:
+-	dw_pcie_edma_remove(pci);
+-
+ err_free_epc_mem:
+ 	pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
+ 			      epc->mem->window.page_size);
+diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
+index 1f7b662cb8e15..e440c09d1dc11 100644
+--- a/drivers/pci/controller/dwc/pcie-tegra194.c
++++ b/drivers/pci/controller/dwc/pcie-tegra194.c
+@@ -2273,11 +2273,14 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
+ 		ret = tegra_pcie_config_ep(pcie, pdev);
+ 		if (ret < 0)
+ 			goto fail;
++		else
++			return 0;
+ 		break;
+ 
+ 	default:
+ 		dev_err(dev, "Invalid PCIe device type %d\n",
+ 			pcie->of_data->mode);
++		ret = -EINVAL;
+ 	}
+ 
+ fail:
+diff --git a/drivers/pci/of_property.c b/drivers/pci/of_property.c
+index c2c7334152bc0..03539e5053720 100644
+--- a/drivers/pci/of_property.c
++++ b/drivers/pci/of_property.c
+@@ -238,6 +238,8 @@ static int of_pci_prop_intr_map(struct pci_dev *pdev, struct of_changeset *ocs,
+ 		return 0;
+ 
+ 	int_map = kcalloc(map_sz, sizeof(u32), GFP_KERNEL);
++	if (!int_map)
++		return -ENOMEM;
+ 	mapp = int_map;
+ 
+ 	list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index e5f243dd42884..70b8c87055cb6 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -4629,7 +4629,7 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
+ 	 * avoid LTSSM race as recommended in Implementation Note at the
+ 	 * end of PCIe r6.0.1 sec 7.5.3.7.
+ 	 */
+-	rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt);
++	rc = pcie_wait_for_link_status(pdev, true, false);
+ 	if (rc)
+ 		return rc;
+ 
+diff --git a/drivers/pci/pcie/edr.c b/drivers/pci/pcie/edr.c
+index 5f4914d313a17..e86298dbbcff6 100644
+--- a/drivers/pci/pcie/edr.c
++++ b/drivers/pci/pcie/edr.c
+@@ -32,10 +32,10 @@ static int acpi_enable_dpc(struct pci_dev *pdev)
+ 	int status = 0;
+ 
+ 	/*
+-	 * Behavior when calling unsupported _DSM functions is undefined,
+-	 * so check whether EDR_PORT_DPC_ENABLE_DSM is supported.
++	 * Per PCI Firmware r3.3, sec 4.6.12, EDR_PORT_DPC_ENABLE_DSM is
++	 * optional. Return success if it's not implemented.
+ 	 */
+-	if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
++	if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 6,
+ 			    1ULL << EDR_PORT_DPC_ENABLE_DSM))
+ 		return 0;
+ 
+@@ -46,12 +46,7 @@ static int acpi_enable_dpc(struct pci_dev *pdev)
+ 	argv4.package.count = 1;
+ 	argv4.package.elements = &req;
+ 
+-	/*
+-	 * Per Downstream Port Containment Related Enhancements ECN to PCI
+-	 * Firmware Specification r3.2, sec 4.6.12, EDR_PORT_DPC_ENABLE_DSM is
+-	 * optional.  Return success if it's not implemented.
+-	 */
+-	obj = acpi_evaluate_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
++	obj = acpi_evaluate_dsm(adev->handle, &pci_acpi_dsm_guid, 6,
+ 				EDR_PORT_DPC_ENABLE_DSM, &argv4);
+ 	if (!obj)
+ 		return 0;
+@@ -85,8 +80,9 @@ static struct pci_dev *acpi_dpc_port_get(struct pci_dev *pdev)
+ 	u16 port;
+ 
+ 	/*
+-	 * Behavior when calling unsupported _DSM functions is undefined,
+-	 * so check whether EDR_PORT_DPC_ENABLE_DSM is supported.
++	 * If EDR_PORT_LOCATE_DSM is not implemented under the target of
++	 * EDR, the target is the port that experienced the containment
++	 * event (PCI Firmware r3.3, sec 4.6.13).
+ 	 */
+ 	if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
+ 			    1ULL << EDR_PORT_LOCATE_DSM))
+@@ -103,6 +99,16 @@ static struct pci_dev *acpi_dpc_port_get(struct pci_dev *pdev)
+ 		return NULL;
+ 	}
+ 
++	/*
++	 * Bit 31 represents the success/failure of the operation. If bit
++	 * 31 is set, the operation failed.
++	 */
++	if (obj->integer.value & BIT(31)) {
++		ACPI_FREE(obj);
++		pci_err(pdev, "Locate Port _DSM failed\n");
++		return NULL;
++	}
++
+ 	/*
+ 	 * Firmware returns DPC port BDF details in following format:
+ 	 *	15:8 = bus
+diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c
+index 8a81be2dd5ecf..88c17c1d6d499 100644
+--- a/drivers/perf/arm_dmc620_pmu.c
++++ b/drivers/perf/arm_dmc620_pmu.c
+@@ -542,12 +542,16 @@ static int dmc620_pmu_event_init(struct perf_event *event)
+ 	if (event->cpu < 0)
+ 		return -EINVAL;
+ 
++	hwc->idx = -1;
++
++	if (event->group_leader == event)
++		return 0;
++
+ 	/*
+ 	 * We can't atomically disable all HW counters so only one event allowed,
+ 	 * although software events are acceptable.
+ 	 */
+-	if (event->group_leader != event &&
+-			!is_software_event(event->group_leader))
++	if (!is_software_event(event->group_leader))
+ 		return -EINVAL;
+ 
+ 	for_each_sibling_event(sibling, event->group_leader) {
+@@ -556,7 +560,6 @@ static int dmc620_pmu_event_init(struct perf_event *event)
+ 			return -EINVAL;
+ 	}
+ 
+-	hwc->idx = -1;
+ 	return 0;
+ }
+ 
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+index c21cdb8dbfe74..acc2b5b9ea255 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
++++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+@@ -1382,6 +1382,13 @@ static const u8 qmp_dp_v5_voltage_swing_hbr_rbr[4][4] = {
+ 	{ 0x3f, 0xff, 0xff, 0xff }
+ };
+ 
++static const u8 qmp_dp_v6_voltage_swing_hbr_rbr[4][4] = {
++	{ 0x27, 0x2f, 0x36, 0x3f },
++	{ 0x31, 0x3e, 0x3f, 0xff },
++	{ 0x36, 0x3f, 0xff, 0xff },
++	{ 0x3f, 0xff, 0xff, 0xff }
++};
++
+ static const u8 qmp_dp_v6_pre_emphasis_hbr_rbr[4][4] = {
+ 	{ 0x20, 0x2d, 0x34, 0x3a },
+ 	{ 0x20, 0x2e, 0x35, 0xff },
+@@ -2001,6 +2008,51 @@ static const struct qmp_phy_cfg sm8550_usb3dpphy_cfg = {
+ 	.num_vregs		= ARRAY_SIZE(qmp_phy_vreg_l),
+ };
+ 
++static const struct qmp_phy_cfg sm8650_usb3dpphy_cfg = {
++	.offsets		= &qmp_combo_offsets_v3,
++
++	.serdes_tbl		= sm8550_usb3_serdes_tbl,
++	.serdes_tbl_num		= ARRAY_SIZE(sm8550_usb3_serdes_tbl),
++	.tx_tbl			= sm8550_usb3_tx_tbl,
++	.tx_tbl_num		= ARRAY_SIZE(sm8550_usb3_tx_tbl),
++	.rx_tbl			= sm8550_usb3_rx_tbl,
++	.rx_tbl_num		= ARRAY_SIZE(sm8550_usb3_rx_tbl),
++	.pcs_tbl		= sm8550_usb3_pcs_tbl,
++	.pcs_tbl_num		= ARRAY_SIZE(sm8550_usb3_pcs_tbl),
++	.pcs_usb_tbl		= sm8550_usb3_pcs_usb_tbl,
++	.pcs_usb_tbl_num	= ARRAY_SIZE(sm8550_usb3_pcs_usb_tbl),
++
++	.dp_serdes_tbl		= qmp_v6_dp_serdes_tbl,
++	.dp_serdes_tbl_num	= ARRAY_SIZE(qmp_v6_dp_serdes_tbl),
++	.dp_tx_tbl		= qmp_v6_dp_tx_tbl,
++	.dp_tx_tbl_num		= ARRAY_SIZE(qmp_v6_dp_tx_tbl),
++
++	.serdes_tbl_rbr		= qmp_v6_dp_serdes_tbl_rbr,
++	.serdes_tbl_rbr_num	= ARRAY_SIZE(qmp_v6_dp_serdes_tbl_rbr),
++	.serdes_tbl_hbr		= qmp_v6_dp_serdes_tbl_hbr,
++	.serdes_tbl_hbr_num	= ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr),
++	.serdes_tbl_hbr2	= qmp_v6_dp_serdes_tbl_hbr2,
++	.serdes_tbl_hbr2_num	= ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr2),
++	.serdes_tbl_hbr3	= qmp_v6_dp_serdes_tbl_hbr3,
++	.serdes_tbl_hbr3_num	= ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr3),
++
++	.swing_hbr_rbr		= &qmp_dp_v6_voltage_swing_hbr_rbr,
++	.pre_emphasis_hbr_rbr	= &qmp_dp_v6_pre_emphasis_hbr_rbr,
++	.swing_hbr3_hbr2	= &qmp_dp_v5_voltage_swing_hbr3_hbr2,
++	.pre_emphasis_hbr3_hbr2 = &qmp_dp_v5_pre_emphasis_hbr3_hbr2,
++
++	.dp_aux_init		= qmp_v4_dp_aux_init,
++	.configure_dp_tx	= qmp_v4_configure_dp_tx,
++	.configure_dp_phy	= qmp_v4_configure_dp_phy,
++	.calibrate_dp_phy	= qmp_v4_calibrate_dp_phy,
++
++	.regs			= qmp_v6_usb3phy_regs_layout,
++	.reset_list		= msm8996_usb3phy_reset_l,
++	.num_resets		= ARRAY_SIZE(msm8996_usb3phy_reset_l),
++	.vreg_list		= qmp_phy_vreg_l,
++	.num_vregs		= ARRAY_SIZE(qmp_phy_vreg_l),
++};
++
+ static int qmp_combo_dp_serdes_init(struct qmp_combo *qmp)
+ {
+ 	const struct qmp_phy_cfg *cfg = qmp->cfg;
+@@ -3631,7 +3683,7 @@ static const struct of_device_id qmp_combo_of_match_table[] = {
+ 	},
+ 	{
+ 		.compatible = "qcom,sm8650-qmp-usb3-dp-phy",
+-		.data = &sm8550_usb3dpphy_cfg,
++		.data = &sm8650_usb3dpphy_cfg,
+ 	},
+ 	{
+ 		.compatible = "qcom,x1e80100-qmp-usb3-dp-phy",
+diff --git a/drivers/pinctrl/qcom/pinctrl-sm7150.c b/drivers/pinctrl/qcom/pinctrl-sm7150.c
+index c25357ca1963e..b9f067de8ef0e 100644
+--- a/drivers/pinctrl/qcom/pinctrl-sm7150.c
++++ b/drivers/pinctrl/qcom/pinctrl-sm7150.c
+@@ -65,7 +65,7 @@ enum {
+ 		.intr_detection_width = 2,		\
+ 	}
+ 
+-#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv)	\
++#define SDC_QDSD_PINGROUP(pg_name, _tile, ctl, pull, drv) \
+ 	{						\
+ 		.grp = PINCTRL_PINGROUP(#pg_name, 	\
+ 			pg_name##_pins, 		\
+@@ -75,7 +75,7 @@ enum {
+ 		.intr_cfg_reg = 0,			\
+ 		.intr_status_reg = 0,			\
+ 		.intr_target_reg = 0,			\
+-		.tile = SOUTH,				\
++		.tile = _tile,				\
+ 		.mux_bit = -1,				\
+ 		.pull_bit = pull,			\
+ 		.drv_bit = drv,				\
+@@ -101,7 +101,7 @@ enum {
+ 		.intr_cfg_reg = 0,			\
+ 		.intr_status_reg = 0,			\
+ 		.intr_target_reg = 0,			\
+-		.tile = SOUTH,				\
++		.tile = WEST,				\
+ 		.mux_bit = -1,				\
+ 		.pull_bit = 3,				\
+ 		.drv_bit = 0,				\
+@@ -1199,13 +1199,13 @@ static const struct msm_pingroup sm7150_groups[] = {
+ 	[117] = PINGROUP(117, NORTH, _, _, _, _, _, _, _, _, _),
+ 	[118] = PINGROUP(118, NORTH, _, _, _, _, _, _, _, _, _),
+ 	[119] = UFS_RESET(ufs_reset, 0x9f000),
+-	[120] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x9a000, 15, 0),
+-	[121] = SDC_QDSD_PINGROUP(sdc1_clk, 0x9a000, 13, 6),
+-	[122] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x9a000, 11, 3),
+-	[123] = SDC_QDSD_PINGROUP(sdc1_data, 0x9a000, 9, 0),
+-	[124] = SDC_QDSD_PINGROUP(sdc2_clk, 0x98000, 14, 6),
+-	[125] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x98000, 11, 3),
+-	[126] = SDC_QDSD_PINGROUP(sdc2_data, 0x98000, 9, 0),
++	[120] = SDC_QDSD_PINGROUP(sdc1_rclk, WEST, 0x9a000, 15, 0),
++	[121] = SDC_QDSD_PINGROUP(sdc1_clk, WEST, 0x9a000, 13, 6),
++	[122] = SDC_QDSD_PINGROUP(sdc1_cmd, WEST, 0x9a000, 11, 3),
++	[123] = SDC_QDSD_PINGROUP(sdc1_data, WEST, 0x9a000, 9, 0),
++	[124] = SDC_QDSD_PINGROUP(sdc2_clk, SOUTH, 0x98000, 14, 6),
++	[125] = SDC_QDSD_PINGROUP(sdc2_cmd, SOUTH, 0x98000, 11, 3),
++	[126] = SDC_QDSD_PINGROUP(sdc2_data, SOUTH, 0x98000, 9, 0),
+ };
+ 
+ static const struct msm_gpio_wakeirq_map sm7150_pdc_map[] = {
+diff --git a/drivers/pinctrl/renesas/pfc-r8a779h0.c b/drivers/pinctrl/renesas/pfc-r8a779h0.c
+index afa8f06c85cf5..0cbfe7637fc97 100644
+--- a/drivers/pinctrl/renesas/pfc-r8a779h0.c
++++ b/drivers/pinctrl/renesas/pfc-r8a779h0.c
+@@ -75,10 +75,10 @@
+ #define GPSR0_9		F_(MSIOF5_SYNC,		IP1SR0_7_4)
+ #define GPSR0_8		F_(MSIOF5_SS1,		IP1SR0_3_0)
+ #define GPSR0_7		F_(MSIOF5_SS2,		IP0SR0_31_28)
+-#define GPSR0_6		F_(IRQ0,		IP0SR0_27_24)
+-#define GPSR0_5		F_(IRQ1,		IP0SR0_23_20)
+-#define GPSR0_4		F_(IRQ2,		IP0SR0_19_16)
+-#define GPSR0_3		F_(IRQ3,		IP0SR0_15_12)
++#define GPSR0_6		F_(IRQ0_A,		IP0SR0_27_24)
++#define GPSR0_5		F_(IRQ1_A,		IP0SR0_23_20)
++#define GPSR0_4		F_(IRQ2_A,		IP0SR0_19_16)
++#define GPSR0_3		F_(IRQ3_A,		IP0SR0_15_12)
+ #define GPSR0_2		F_(GP0_02,		IP0SR0_11_8)
+ #define GPSR0_1		F_(GP0_01,		IP0SR0_7_4)
+ #define GPSR0_0		F_(GP0_00,		IP0SR0_3_0)
+@@ -265,10 +265,10 @@
+ #define IP0SR0_3_0	F_(0, 0)		FM(ERROROUTC_N_B)	FM(TCLK2_B)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP0SR0_7_4	F_(0, 0)		FM(MSIOF3_SS1)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP0SR0_11_8	F_(0, 0)		FM(MSIOF3_SS2)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR0_15_12	FM(IRQ3)		FM(MSIOF3_SCK)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR0_19_16	FM(IRQ2)		FM(MSIOF3_TXD)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR0_23_20	FM(IRQ1)		FM(MSIOF3_RXD)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR0_27_24	FM(IRQ0)		FM(MSIOF3_SYNC)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR0_15_12	FM(IRQ3_A)		FM(MSIOF3_SCK)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR0_19_16	FM(IRQ2_A)		FM(MSIOF3_TXD)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR0_23_20	FM(IRQ1_A)		FM(MSIOF3_RXD)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR0_27_24	FM(IRQ0_A)		FM(MSIOF3_SYNC)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ #define IP0SR0_31_28	FM(MSIOF5_SS2)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ 
+ /* IP1SR0 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+@@ -672,16 +672,16 @@ static const u16 pinmux_data[] = {
+ 
+ 	PINMUX_IPSR_GPSR(IP0SR0_11_8,	MSIOF3_SS2),
+ 
+-	PINMUX_IPSR_GPSR(IP0SR0_15_12,	IRQ3),
++	PINMUX_IPSR_GPSR(IP0SR0_15_12,	IRQ3_A),
+ 	PINMUX_IPSR_GPSR(IP0SR0_15_12,	MSIOF3_SCK),
+ 
+-	PINMUX_IPSR_GPSR(IP0SR0_19_16,	IRQ2),
++	PINMUX_IPSR_GPSR(IP0SR0_19_16,	IRQ2_A),
+ 	PINMUX_IPSR_GPSR(IP0SR0_19_16,	MSIOF3_TXD),
+ 
+-	PINMUX_IPSR_GPSR(IP0SR0_23_20,	IRQ1),
++	PINMUX_IPSR_GPSR(IP0SR0_23_20,	IRQ1_A),
+ 	PINMUX_IPSR_GPSR(IP0SR0_23_20,	MSIOF3_RXD),
+ 
+-	PINMUX_IPSR_GPSR(IP0SR0_27_24,	IRQ0),
++	PINMUX_IPSR_GPSR(IP0SR0_27_24,	IRQ0_A),
+ 	PINMUX_IPSR_GPSR(IP0SR0_27_24,	MSIOF3_SYNC),
+ 
+ 	PINMUX_IPSR_GPSR(IP0SR0_31_28,	MSIOF5_SS2),
+diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+index 20425afc6b331..248ab71b9f9da 100644
+--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
++++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+@@ -892,6 +892,8 @@ static int rzg2l_set_power_source(struct rzg2l_pinctrl *pctrl, u32 pin, u32 caps
+ 		val = PVDD_1800;
+ 		break;
+ 	case 2500:
++		if (!(caps & (PIN_CFG_IO_VMC_ETH0 | PIN_CFG_IO_VMC_ETH1)))
++			return -EINVAL;
+ 		val = PVDD_2500;
+ 		break;
+ 	case 3300:
+diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c
+index 910df7c654f48..003e765dedea1 100644
+--- a/drivers/platform/x86/intel/tpmi.c
++++ b/drivers/platform/x86/intel/tpmi.c
+@@ -763,8 +763,11 @@ static int intel_vsec_tpmi_init(struct auxiliary_device *auxdev)
+ 		 * when actual device nodes created outside this
+ 		 * loop via tpmi_create_devices().
+ 		 */
+-		if (pfs->pfs_header.tpmi_id == TPMI_INFO_ID)
+-			tpmi_process_info(tpmi_info, pfs);
++		if (pfs->pfs_header.tpmi_id == TPMI_INFO_ID) {
++			ret = tpmi_process_info(tpmi_info, pfs);
++			if (ret)
++				return ret;
++		}
+ 
+ 		if (pfs->pfs_header.tpmi_id == TPMI_CONTROL_ID)
+ 			tpmi_set_control_base(auxdev, tpmi_info, pfs);
+diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
+index ef730200a04bd..bb8e72deb3542 100644
+--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
++++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
+@@ -240,6 +240,7 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
+ 	bool read_blocked = 0, write_blocked = 0;
+ 	struct intel_tpmi_plat_info *plat_info;
+ 	struct tpmi_uncore_struct *tpmi_uncore;
++	bool uncore_sysfs_added = false;
+ 	int ret, i, pkg = 0;
+ 	int num_resources;
+ 
+@@ -384,9 +385,15 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
+ 			}
+ 			/* Point to next cluster offset */
+ 			cluster_offset >>= UNCORE_MAX_CLUSTER_PER_DOMAIN;
++			uncore_sysfs_added = true;
+ 		}
+ 	}
+ 
++	if (!uncore_sysfs_added) {
++		ret = -ENODEV;
++		goto remove_clusters;
++	}
++
+ 	auxiliary_set_drvdata(auxdev, tpmi_uncore);
+ 
+ 	tpmi_uncore->root_cluster.root_domain = true;
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 82429e59999da..87a4a381bd988 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -3044,10 +3044,9 @@ static void tpacpi_send_radiosw_update(void)
+ 
+ static void hotkey_exit(void)
+ {
+-#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ 	mutex_lock(&hotkey_mutex);
++#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ 	hotkey_poll_stop_sync();
+-	mutex_unlock(&hotkey_mutex);
+ #endif
+ 	dbg_printk(TPACPI_DBG_EXIT | TPACPI_DBG_HKEY,
+ 		   "restoring original HKEY status and mask\n");
+@@ -3057,6 +3056,8 @@ static void hotkey_exit(void)
+ 	      hotkey_mask_set(hotkey_orig_mask)) |
+ 	     hotkey_status_set(false)) != 0)
+ 		pr_err("failed to restore hot key mask to BIOS defaults\n");
++
++	mutex_unlock(&hotkey_mutex);
+ }
+ 
+ static void __init hotkey_unmap(const unsigned int scancode)
+diff --git a/drivers/regulator/bd71828-regulator.c b/drivers/regulator/bd71828-regulator.c
+index 08d4ee369287e..dd871ffe979c3 100644
+--- a/drivers/regulator/bd71828-regulator.c
++++ b/drivers/regulator/bd71828-regulator.c
+@@ -206,14 +206,11 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ 			.suspend_reg = BD71828_REG_BUCK1_SUSP_VOLT,
+ 			.suspend_mask = BD71828_MASK_BUCK1267_VOLT,
+ 			.suspend_on_mask = BD71828_MASK_SUSP_EN,
+-			.lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ 			/*
+ 			 * LPSR voltage is same as SUSPEND voltage. Allow
+-			 * setting it so that regulator can be set enabled at
+-			 * LPSR state
++			 * only enabling/disabling regulator for LPSR state
+ 			 */
+-			.lpsr_reg = BD71828_REG_BUCK1_SUSP_VOLT,
+-			.lpsr_mask = BD71828_MASK_BUCK1267_VOLT,
++			.lpsr_on_mask = BD71828_MASK_LPSR_EN,
+ 		},
+ 		.reg_inits = buck1_inits,
+ 		.reg_init_amnt = ARRAY_SIZE(buck1_inits),
+@@ -288,13 +285,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ 				     ROHM_DVS_LEVEL_SUSPEND |
+ 				     ROHM_DVS_LEVEL_LPSR,
+ 			.run_reg = BD71828_REG_BUCK3_VOLT,
+-			.idle_reg = BD71828_REG_BUCK3_VOLT,
+-			.suspend_reg = BD71828_REG_BUCK3_VOLT,
+-			.lpsr_reg = BD71828_REG_BUCK3_VOLT,
+ 			.run_mask = BD71828_MASK_BUCK3_VOLT,
+-			.idle_mask = BD71828_MASK_BUCK3_VOLT,
+-			.suspend_mask = BD71828_MASK_BUCK3_VOLT,
+-			.lpsr_mask = BD71828_MASK_BUCK3_VOLT,
+ 			.idle_on_mask = BD71828_MASK_IDLE_EN,
+ 			.suspend_on_mask = BD71828_MASK_SUSP_EN,
+ 			.lpsr_on_mask = BD71828_MASK_LPSR_EN,
+@@ -329,13 +320,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ 				     ROHM_DVS_LEVEL_SUSPEND |
+ 				     ROHM_DVS_LEVEL_LPSR,
+ 			.run_reg = BD71828_REG_BUCK4_VOLT,
+-			.idle_reg = BD71828_REG_BUCK4_VOLT,
+-			.suspend_reg = BD71828_REG_BUCK4_VOLT,
+-			.lpsr_reg = BD71828_REG_BUCK4_VOLT,
+ 			.run_mask = BD71828_MASK_BUCK4_VOLT,
+-			.idle_mask = BD71828_MASK_BUCK4_VOLT,
+-			.suspend_mask = BD71828_MASK_BUCK4_VOLT,
+-			.lpsr_mask = BD71828_MASK_BUCK4_VOLT,
+ 			.idle_on_mask = BD71828_MASK_IDLE_EN,
+ 			.suspend_on_mask = BD71828_MASK_SUSP_EN,
+ 			.lpsr_on_mask = BD71828_MASK_LPSR_EN,
+@@ -370,13 +355,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ 				     ROHM_DVS_LEVEL_SUSPEND |
+ 				     ROHM_DVS_LEVEL_LPSR,
+ 			.run_reg = BD71828_REG_BUCK5_VOLT,
+-			.idle_reg = BD71828_REG_BUCK5_VOLT,
+-			.suspend_reg = BD71828_REG_BUCK5_VOLT,
+-			.lpsr_reg = BD71828_REG_BUCK5_VOLT,
+ 			.run_mask = BD71828_MASK_BUCK5_VOLT,
+-			.idle_mask = BD71828_MASK_BUCK5_VOLT,
+-			.suspend_mask = BD71828_MASK_BUCK5_VOLT,
+-			.lpsr_mask = BD71828_MASK_BUCK5_VOLT,
+ 			.idle_on_mask = BD71828_MASK_IDLE_EN,
+ 			.suspend_on_mask = BD71828_MASK_SUSP_EN,
+ 			.lpsr_on_mask = BD71828_MASK_LPSR_EN,
+@@ -493,13 +472,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ 				     ROHM_DVS_LEVEL_SUSPEND |
+ 				     ROHM_DVS_LEVEL_LPSR,
+ 			.run_reg = BD71828_REG_LDO1_VOLT,
+-			.idle_reg = BD71828_REG_LDO1_VOLT,
+-			.suspend_reg = BD71828_REG_LDO1_VOLT,
+-			.lpsr_reg = BD71828_REG_LDO1_VOLT,
+ 			.run_mask = BD71828_MASK_LDO_VOLT,
+-			.idle_mask = BD71828_MASK_LDO_VOLT,
+-			.suspend_mask = BD71828_MASK_LDO_VOLT,
+-			.lpsr_mask = BD71828_MASK_LDO_VOLT,
+ 			.idle_on_mask = BD71828_MASK_IDLE_EN,
+ 			.suspend_on_mask = BD71828_MASK_SUSP_EN,
+ 			.lpsr_on_mask = BD71828_MASK_LPSR_EN,
+@@ -533,13 +506,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ 				     ROHM_DVS_LEVEL_SUSPEND |
+ 				     ROHM_DVS_LEVEL_LPSR,
+ 			.run_reg = BD71828_REG_LDO2_VOLT,
+-			.idle_reg = BD71828_REG_LDO2_VOLT,
+-			.suspend_reg = BD71828_REG_LDO2_VOLT,
+-			.lpsr_reg = BD71828_REG_LDO2_VOLT,
+ 			.run_mask = BD71828_MASK_LDO_VOLT,
+-			.idle_mask = BD71828_MASK_LDO_VOLT,
+-			.suspend_mask = BD71828_MASK_LDO_VOLT,
+-			.lpsr_mask = BD71828_MASK_LDO_VOLT,
+ 			.idle_on_mask = BD71828_MASK_IDLE_EN,
+ 			.suspend_on_mask = BD71828_MASK_SUSP_EN,
+ 			.lpsr_on_mask = BD71828_MASK_LPSR_EN,
+@@ -573,13 +540,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ 				     ROHM_DVS_LEVEL_SUSPEND |
+ 				     ROHM_DVS_LEVEL_LPSR,
+ 			.run_reg = BD71828_REG_LDO3_VOLT,
+-			.idle_reg = BD71828_REG_LDO3_VOLT,
+-			.suspend_reg = BD71828_REG_LDO3_VOLT,
+-			.lpsr_reg = BD71828_REG_LDO3_VOLT,
+ 			.run_mask = BD71828_MASK_LDO_VOLT,
+-			.idle_mask = BD71828_MASK_LDO_VOLT,
+-			.suspend_mask = BD71828_MASK_LDO_VOLT,
+-			.lpsr_mask = BD71828_MASK_LDO_VOLT,
+ 			.idle_on_mask = BD71828_MASK_IDLE_EN,
+ 			.suspend_on_mask = BD71828_MASK_SUSP_EN,
+ 			.lpsr_on_mask = BD71828_MASK_LPSR_EN,
+@@ -614,13 +575,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ 				     ROHM_DVS_LEVEL_SUSPEND |
+ 				     ROHM_DVS_LEVEL_LPSR,
+ 			.run_reg = BD71828_REG_LDO4_VOLT,
+-			.idle_reg = BD71828_REG_LDO4_VOLT,
+-			.suspend_reg = BD71828_REG_LDO4_VOLT,
+-			.lpsr_reg = BD71828_REG_LDO4_VOLT,
+ 			.run_mask = BD71828_MASK_LDO_VOLT,
+-			.idle_mask = BD71828_MASK_LDO_VOLT,
+-			.suspend_mask = BD71828_MASK_LDO_VOLT,
+-			.lpsr_mask = BD71828_MASK_LDO_VOLT,
+ 			.idle_on_mask = BD71828_MASK_IDLE_EN,
+ 			.suspend_on_mask = BD71828_MASK_SUSP_EN,
+ 			.lpsr_on_mask = BD71828_MASK_LPSR_EN,
+@@ -655,13 +610,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ 				     ROHM_DVS_LEVEL_SUSPEND |
+ 				     ROHM_DVS_LEVEL_LPSR,
+ 			.run_reg = BD71828_REG_LDO5_VOLT,
+-			.idle_reg = BD71828_REG_LDO5_VOLT,
+-			.suspend_reg = BD71828_REG_LDO5_VOLT,
+-			.lpsr_reg = BD71828_REG_LDO5_VOLT,
+ 			.run_mask = BD71828_MASK_LDO_VOLT,
+-			.idle_mask = BD71828_MASK_LDO_VOLT,
+-			.suspend_mask = BD71828_MASK_LDO_VOLT,
+-			.lpsr_mask = BD71828_MASK_LDO_VOLT,
+ 			.idle_on_mask = BD71828_MASK_IDLE_EN,
+ 			.suspend_on_mask = BD71828_MASK_SUSP_EN,
+ 			.lpsr_on_mask = BD71828_MASK_LPSR_EN,
+@@ -720,9 +669,6 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
+ 			.suspend_reg = BD71828_REG_LDO7_VOLT,
+ 			.lpsr_reg = BD71828_REG_LDO7_VOLT,
+ 			.run_mask = BD71828_MASK_LDO_VOLT,
+-			.idle_mask = BD71828_MASK_LDO_VOLT,
+-			.suspend_mask = BD71828_MASK_LDO_VOLT,
+-			.lpsr_mask = BD71828_MASK_LDO_VOLT,
+ 			.idle_on_mask = BD71828_MASK_IDLE_EN,
+ 			.suspend_on_mask = BD71828_MASK_SUSP_EN,
+ 			.lpsr_on_mask = BD71828_MASK_LPSR_EN,
+diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
+index d492683365532..6e1ace660b8cf 100644
+--- a/drivers/regulator/helpers.c
++++ b/drivers/regulator/helpers.c
+@@ -161,6 +161,32 @@ int regulator_get_voltage_sel_pickable_regmap(struct regulator_dev *rdev)
+ }
+ EXPORT_SYMBOL_GPL(regulator_get_voltage_sel_pickable_regmap);
+ 
++static int write_separate_vsel_and_range(struct regulator_dev *rdev,
++					 unsigned int sel, unsigned int range)
++{
++	bool range_updated;
++	int ret;
++
++	ret = regmap_update_bits_base(rdev->regmap, rdev->desc->vsel_range_reg,
++				      rdev->desc->vsel_range_mask,
++				      range, &range_updated, false, false);
++	if (ret)
++		return ret;
++
++	/*
++	 * Some PMICs treat the vsel_reg same as apply-bit. Force it to be
++	 * written if the range changed, even if the old selector was same as
++	 * the new one
++	 */
++	if (rdev->desc->range_applied_by_vsel && range_updated)
++		return regmap_write_bits(rdev->regmap,
++					rdev->desc->vsel_reg,
++					rdev->desc->vsel_mask, sel);
++
++	return regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg,
++				  rdev->desc->vsel_mask, sel);
++}
++
+ /**
+  * regulator_set_voltage_sel_pickable_regmap - pickable range set_voltage_sel
+  *
+@@ -199,21 +225,12 @@ int regulator_set_voltage_sel_pickable_regmap(struct regulator_dev *rdev,
+ 	range = rdev->desc->linear_range_selectors_bitfield[i];
+ 	range <<= ffs(rdev->desc->vsel_range_mask) - 1;
+ 
+-	if (rdev->desc->vsel_reg == rdev->desc->vsel_range_reg) {
+-		ret = regmap_update_bits(rdev->regmap,
+-					 rdev->desc->vsel_reg,
++	if (rdev->desc->vsel_reg == rdev->desc->vsel_range_reg)
++		ret = regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg,
+ 					 rdev->desc->vsel_range_mask |
+ 					 rdev->desc->vsel_mask, sel | range);
+-	} else {
+-		ret = regmap_update_bits(rdev->regmap,
+-					 rdev->desc->vsel_range_reg,
+-					 rdev->desc->vsel_range_mask, range);
+-		if (ret)
+-			return ret;
+-
+-		ret = regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg,
+-				  rdev->desc->vsel_mask, sel);
+-	}
++	else
++		ret = write_separate_vsel_and_range(rdev, sel, range);
+ 
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/regulator/tps6287x-regulator.c b/drivers/regulator/tps6287x-regulator.c
+index 9b7c3d77789e3..3c9d79e003e4b 100644
+--- a/drivers/regulator/tps6287x-regulator.c
++++ b/drivers/regulator/tps6287x-regulator.c
+@@ -115,6 +115,7 @@ static struct regulator_desc tps6287x_reg = {
+ 	.vsel_mask = 0xFF,
+ 	.vsel_range_reg = TPS6287X_CTRL2,
+ 	.vsel_range_mask = TPS6287X_CTRL2_VRANGE,
++	.range_applied_by_vsel = true,
+ 	.ramp_reg = TPS6287X_CTRL1,
+ 	.ramp_mask = TPS6287X_CTRL1_VRAMP,
+ 	.ramp_delay_table = tps6287x_ramp_table,
+diff --git a/drivers/regulator/tps6594-regulator.c b/drivers/regulator/tps6594-regulator.c
+index b7f0c87797577..5fad61785e72f 100644
+--- a/drivers/regulator/tps6594-regulator.c
++++ b/drivers/regulator/tps6594-regulator.c
+@@ -287,30 +287,30 @@ static struct tps6594_regulator_irq_type *tps6594_ldos_irq_types[] = {
+ static const struct regulator_desc multi_regs[] = {
+ 	TPS6594_REGULATOR("BUCK12", "buck12", TPS6594_BUCK_1,
+ 			  REGULATOR_VOLTAGE, tps6594_bucks_ops, TPS6594_MASK_BUCKS_VSET,
+-			  TPS6594_REG_BUCKX_VOUT_1(1),
++			  TPS6594_REG_BUCKX_VOUT_1(0),
+ 			  TPS6594_MASK_BUCKS_VSET,
+-			  TPS6594_REG_BUCKX_CTRL(1),
++			  TPS6594_REG_BUCKX_CTRL(0),
+ 			  TPS6594_BIT_BUCK_EN, 0, 0, bucks_ranges,
+ 			  4, 4000, 0, NULL, 0, 0),
+ 	TPS6594_REGULATOR("BUCK34", "buck34", TPS6594_BUCK_3,
+ 			  REGULATOR_VOLTAGE, tps6594_bucks_ops, TPS6594_MASK_BUCKS_VSET,
+-			  TPS6594_REG_BUCKX_VOUT_1(3),
++			  TPS6594_REG_BUCKX_VOUT_1(2),
+ 			  TPS6594_MASK_BUCKS_VSET,
+-			  TPS6594_REG_BUCKX_CTRL(3),
++			  TPS6594_REG_BUCKX_CTRL(2),
+ 			  TPS6594_BIT_BUCK_EN, 0, 0, bucks_ranges,
+ 			  4, 0, 0, NULL, 0, 0),
+ 	TPS6594_REGULATOR("BUCK123", "buck123", TPS6594_BUCK_1,
+ 			  REGULATOR_VOLTAGE, tps6594_bucks_ops, TPS6594_MASK_BUCKS_VSET,
+-			  TPS6594_REG_BUCKX_VOUT_1(1),
++			  TPS6594_REG_BUCKX_VOUT_1(0),
+ 			  TPS6594_MASK_BUCKS_VSET,
+-			  TPS6594_REG_BUCKX_CTRL(1),
++			  TPS6594_REG_BUCKX_CTRL(0),
+ 			  TPS6594_BIT_BUCK_EN, 0, 0, bucks_ranges,
+ 			  4, 4000, 0, NULL, 0, 0),
+ 	TPS6594_REGULATOR("BUCK1234", "buck1234", TPS6594_BUCK_1,
+ 			  REGULATOR_VOLTAGE, tps6594_bucks_ops, TPS6594_MASK_BUCKS_VSET,
+-			  TPS6594_REG_BUCKX_VOUT_1(1),
++			  TPS6594_REG_BUCKX_VOUT_1(0),
+ 			  TPS6594_MASK_BUCKS_VSET,
+-			  TPS6594_REG_BUCKX_CTRL(1),
++			  TPS6594_REG_BUCKX_CTRL(0),
+ 			  TPS6594_BIT_BUCK_EN, 0, 0, bucks_ranges,
+ 			  4, 4000, 0, NULL, 0, 0),
+ };
+diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
+index cce0bafd4c926..f13837907bd5e 100644
+--- a/drivers/s390/crypto/ap_bus.c
++++ b/drivers/s390/crypto/ap_bus.c
+@@ -767,9 +767,9 @@ static void ap_check_bindings_complete(void)
+ 		if (bound == apqns) {
+ 			if (!completion_done(&ap_apqn_bindings_complete)) {
+ 				complete_all(&ap_apqn_bindings_complete);
++				ap_send_bindings_complete_uevent();
+ 				pr_debug("%s all apqn bindings complete\n", __func__);
+ 			}
+-			ap_send_bindings_complete_uevent();
+ 		}
+ 	}
+ }
+@@ -929,6 +929,12 @@ static int ap_device_probe(struct device *dev)
+ 			goto out;
+ 	}
+ 
++	/*
++	 * Rearm the bindings complete completion to trigger
++	 * bindings complete when all devices are bound again
++	 */
++	reinit_completion(&ap_apqn_bindings_complete);
++
+ 	/* Add queue/card to list of active queues/cards */
+ 	spin_lock_bh(&ap_queues_lock);
+ 	if (is_queue_dev(dev))
+diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
+index 613eab7297046..41fe8a043d61f 100644
+--- a/drivers/s390/net/qeth_core.h
++++ b/drivers/s390/net/qeth_core.h
+@@ -956,7 +956,7 @@ static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb,
+ 	struct dst_entry *dst = skb_dst(skb);
+ 	struct rt6_info *rt;
+ 
+-	rt = (struct rt6_info *) dst;
++	rt = dst_rt6_info(dst);
+ 	if (dst) {
+ 		if (proto == htons(ETH_P_IPV6))
+ 			dst = dst_check(dst, rt6_get_cookie(rt));
+@@ -970,15 +970,14 @@ static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb,
+ static inline __be32 qeth_next_hop_v4_rcu(struct sk_buff *skb,
+ 					  struct dst_entry *dst)
+ {
+-	struct rtable *rt = (struct rtable *) dst;
+-
+-	return (rt) ? rt_nexthop(rt, ip_hdr(skb)->daddr) : ip_hdr(skb)->daddr;
++	return (dst) ? rt_nexthop(dst_rtable(dst), ip_hdr(skb)->daddr) :
++		       ip_hdr(skb)->daddr;
+ }
+ 
+ static inline struct in6_addr *qeth_next_hop_v6_rcu(struct sk_buff *skb,
+ 						    struct dst_entry *dst)
+ {
+-	struct rt6_info *rt = (struct rt6_info *) dst;
++	struct rt6_info *rt = dst_rt6_info(dst);
+ 
+ 	if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
+ 		return &rt->rt6i_gateway;
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 65cdc8b77e358..caac482fff2ff 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -3707,8 +3707,10 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ 	 */
+ 	if (sdkp->first_scan ||
+ 	    q->limits.max_sectors > q->limits.max_dev_sectors ||
+-	    q->limits.max_sectors > q->limits.max_hw_sectors)
++	    q->limits.max_sectors > q->limits.max_hw_sectors) {
+ 		q->limits.max_sectors = rw_max;
++		q->limits.max_user_sectors = rw_max;
++	}
+ 
+ 	sdkp->first_scan = 0;
+ 
+diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
+index 0efc1c3bee5f5..3e7cf04aaf2a6 100644
+--- a/drivers/soundwire/cadence_master.c
++++ b/drivers/soundwire/cadence_master.c
+@@ -1880,7 +1880,7 @@ struct sdw_cdns_pdi *sdw_cdns_alloc_pdi(struct sdw_cdns *cdns,
+ 
+ 	/* check if we found a PDI, else find in bi-directional */
+ 	if (!pdi)
+-		pdi = cdns_find_pdi(cdns, 2, stream->num_bd, stream->bd,
++		pdi = cdns_find_pdi(cdns, 0, stream->num_bd, stream->bd,
+ 				    dai_id);
+ 
+ 	if (pdi) {
+diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
+index 4a68abcdcc353..4c4ff074e3f6f 100644
+--- a/drivers/spi/spi-stm32.c
++++ b/drivers/spi/spi-stm32.c
+@@ -1016,8 +1016,10 @@ static irqreturn_t stm32fx_spi_irq_event(int irq, void *dev_id)
+ static irqreturn_t stm32fx_spi_irq_thread(int irq, void *dev_id)
+ {
+ 	struct spi_controller *ctrl = dev_id;
++	struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
+ 
+ 	spi_finalize_current_transfer(ctrl);
++	stm32fx_spi_disable(spi);
+ 
+ 	return IRQ_HANDLED;
+ }
+@@ -1055,7 +1057,7 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
+ 		mask |= STM32H7_SPI_SR_TXP | STM32H7_SPI_SR_RXP;
+ 
+ 	if (!(sr & mask)) {
+-		dev_warn(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
++		dev_vdbg(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
+ 			 sr, ier);
+ 		spin_unlock_irqrestore(&spi->lock, flags);
+ 		return IRQ_NONE;
+@@ -1185,8 +1187,6 @@ static int stm32_spi_prepare_msg(struct spi_controller *ctrl,
+ 			 ~clrb) | setb,
+ 			spi->base + spi->cfg->regs->cpol.reg);
+ 
+-	stm32_spi_enable(spi);
+-
+ 	spin_unlock_irqrestore(&spi->lock, flags);
+ 
+ 	return 0;
+@@ -1204,6 +1204,7 @@ static void stm32fx_spi_dma_tx_cb(void *data)
+ 
+ 	if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) {
+ 		spi_finalize_current_transfer(spi->ctrl);
++		stm32fx_spi_disable(spi);
+ 	}
+ }
+ 
+@@ -1218,6 +1219,7 @@ static void stm32_spi_dma_rx_cb(void *data)
+ 	struct stm32_spi *spi = data;
+ 
+ 	spi_finalize_current_transfer(spi->ctrl);
++	spi->cfg->disable(spi);
+ }
+ 
+ /**
+@@ -1305,6 +1307,8 @@ static int stm32fx_spi_transfer_one_irq(struct stm32_spi *spi)
+ 
+ 	stm32_spi_set_bits(spi, STM32FX_SPI_CR2, cr2);
+ 
++	stm32_spi_enable(spi);
++
+ 	/* starting data transfer when buffer is loaded */
+ 	if (spi->tx_buf)
+ 		spi->cfg->write_tx(spi);
+@@ -1341,6 +1345,8 @@ static int stm32h7_spi_transfer_one_irq(struct stm32_spi *spi)
+ 
+ 	spin_lock_irqsave(&spi->lock, flags);
+ 
++	stm32_spi_enable(spi);
++
+ 	/* Be sure to have data in fifo before starting data transfer */
+ 	if (spi->tx_buf)
+ 		stm32h7_spi_write_txfifo(spi);
+@@ -1372,6 +1378,8 @@ static void stm32fx_spi_transfer_one_dma_start(struct stm32_spi *spi)
+ 		 */
+ 		stm32_spi_set_bits(spi, STM32FX_SPI_CR2, STM32FX_SPI_CR2_ERRIE);
+ 	}
++
++	stm32_spi_enable(spi);
+ }
+ 
+ /**
+@@ -1405,6 +1413,8 @@ static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi)
+ 
+ 	stm32_spi_set_bits(spi, STM32H7_SPI_IER, ier);
+ 
++	stm32_spi_enable(spi);
++
+ 	if (STM32_SPI_HOST_MODE(spi))
+ 		stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART);
+ }
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index a2c467d9e92f5..2cea7aeb10f95 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -1242,6 +1242,7 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
+ 	else
+ 		rx_dev = ctlr->dev.parent;
+ 
++	ret = -ENOMSG;
+ 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ 		/* The sync is done before each transfer. */
+ 		unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
+@@ -1271,6 +1272,9 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
+ 			}
+ 		}
+ 	}
++	/* No transfer has been mapped, bail out with success */
++	if (ret)
++		return 0;
+ 
+ 	ctlr->cur_rx_dma_dev = rx_dev;
+ 	ctlr->cur_tx_dma_dev = tx_dev;
+diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
+index 9ed1180fe31f1..937c15324513f 100644
+--- a/drivers/spmi/spmi-pmic-arb.c
++++ b/drivers/spmi/spmi-pmic-arb.c
+@@ -1462,8 +1462,8 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
+ 	 */
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
+ 	core = devm_ioremap(&ctrl->dev, res->start, resource_size(res));
+-	if (IS_ERR(core))
+-		return PTR_ERR(core);
++	if (!core)
++		return -ENOMEM;
+ 
+ 	pmic_arb->core_size = resource_size(res);
+ 
+@@ -1495,15 +1495,15 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
+ 						   "obsrvr");
+ 		pmic_arb->rd_base = devm_ioremap(&ctrl->dev, res->start,
+ 						 resource_size(res));
+-		if (IS_ERR(pmic_arb->rd_base))
+-			return PTR_ERR(pmic_arb->rd_base);
++		if (!pmic_arb->rd_base)
++			return -ENOMEM;
+ 
+ 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ 						   "chnls");
+ 		pmic_arb->wr_base = devm_ioremap(&ctrl->dev, res->start,
+ 						 resource_size(res));
+-		if (IS_ERR(pmic_arb->wr_base))
+-			return PTR_ERR(pmic_arb->wr_base);
++		if (!pmic_arb->wr_base)
++			return -ENOMEM;
+ 	}
+ 
+ 	pmic_arb->max_periphs = PMIC_ARB_MAX_PERIPHS;
+diff --git a/drivers/staging/greybus/arche-apb-ctrl.c b/drivers/staging/greybus/arche-apb-ctrl.c
+index 8541995008da8..aa6f266b62a14 100644
+--- a/drivers/staging/greybus/arche-apb-ctrl.c
++++ b/drivers/staging/greybus/arche-apb-ctrl.c
+@@ -466,6 +466,7 @@ static const struct of_device_id arche_apb_ctrl_of_match[] = {
+ 	{ .compatible = "usbffff,2", },
+ 	{ },
+ };
++MODULE_DEVICE_TABLE(of, arche_apb_ctrl_of_match);
+ 
+ static struct platform_driver arche_apb_ctrl_device_driver = {
+ 	.probe		= arche_apb_ctrl_probe,
+diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
+index 891b75327d7f7..b33977ccd5271 100644
+--- a/drivers/staging/greybus/arche-platform.c
++++ b/drivers/staging/greybus/arche-platform.c
+@@ -619,14 +619,7 @@ static const struct of_device_id arche_platform_of_match[] = {
+ 	{ .compatible = "google,arche-platform", },
+ 	{ },
+ };
+-
+-static const struct of_device_id arche_combined_id[] = {
+-	/* Use PID/VID of SVC device */
+-	{ .compatible = "google,arche-platform", },
+-	{ .compatible = "usbffff,2", },
+-	{ },
+-};
+-MODULE_DEVICE_TABLE(of, arche_combined_id);
++MODULE_DEVICE_TABLE(of, arche_platform_of_match);
+ 
+ static struct platform_driver arche_platform_device_driver = {
+ 	.probe		= arche_platform_probe,
+diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
+index a5c2fe963866d..00360f4a04854 100644
+--- a/drivers/staging/greybus/light.c
++++ b/drivers/staging/greybus/light.c
+@@ -142,6 +142,9 @@ static int __gb_lights_flash_brightness_set(struct gb_channel *channel)
+ 		channel = get_channel_from_mode(channel->light,
+ 						GB_CHANNEL_MODE_TORCH);
+ 
++	if (!channel)
++		return -EINVAL;
++
+ 	/* For not flash we need to convert brightness to intensity */
+ 	intensity = channel->intensity_uA.min +
+ 			(channel->intensity_uA.step * channel->led->brightness);
+@@ -528,7 +531,10 @@ static int gb_lights_light_v4l2_register(struct gb_light *light)
+ 	}
+ 
+ 	channel_flash = get_channel_from_mode(light, GB_CHANNEL_MODE_FLASH);
+-	WARN_ON(!channel_flash);
++	if (!channel_flash) {
++		dev_err(dev, "failed to get flash channel from mode\n");
++		return -EINVAL;
++	}
+ 
+ 	fled = &channel_flash->fled;
+ 
+diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
+index 5efb2b593be33..3d2b83d6ab51a 100644
+--- a/drivers/tty/serial/max3100.c
++++ b/drivers/tty/serial/max3100.c
+@@ -45,6 +45,9 @@
+ #include <linux/freezer.h>
+ #include <linux/tty.h>
+ #include <linux/tty_flip.h>
++#include <linux/types.h>
++
++#include <asm/unaligned.h>
+ 
+ #include <linux/serial_max3100.h>
+ 
+@@ -191,7 +194,7 @@ static void max3100_timeout(struct timer_list *t)
+ static int max3100_sr(struct max3100_port *s, u16 tx, u16 *rx)
+ {
+ 	struct spi_message message;
+-	u16 etx, erx;
++	__be16 etx, erx;
+ 	int status;
+ 	struct spi_transfer tran = {
+ 		.tx_buf = &etx,
+@@ -213,7 +216,7 @@ static int max3100_sr(struct max3100_port *s, u16 tx, u16 *rx)
+ 	return 0;
+ }
+ 
+-static int max3100_handlerx(struct max3100_port *s, u16 rx)
++static int max3100_handlerx_unlocked(struct max3100_port *s, u16 rx)
+ {
+ 	unsigned int status = 0;
+ 	int ret = 0, cts;
+@@ -254,6 +257,17 @@ static int max3100_handlerx(struct max3100_port *s, u16 rx)
+ 	return ret;
+ }
+ 
++static int max3100_handlerx(struct max3100_port *s, u16 rx)
++{
++	unsigned long flags;
++	int ret;
++
++	uart_port_lock_irqsave(&s->port, &flags);
++	ret = max3100_handlerx_unlocked(s, rx);
++	uart_port_unlock_irqrestore(&s->port, flags);
++	return ret;
++}
++
+ static void max3100_work(struct work_struct *w)
+ {
+ 	struct max3100_port *s = container_of(w, struct max3100_port, work);
+@@ -738,13 +752,14 @@ static int max3100_probe(struct spi_device *spi)
+ 	mutex_lock(&max3100s_lock);
+ 
+ 	if (!uart_driver_registered) {
+-		uart_driver_registered = 1;
+ 		retval = uart_register_driver(&max3100_uart_driver);
+ 		if (retval) {
+ 			printk(KERN_ERR "Couldn't register max3100 uart driver\n");
+ 			mutex_unlock(&max3100s_lock);
+ 			return retval;
+ 		}
++
++		uart_driver_registered = 1;
+ 	}
+ 
+ 	for (i = 0; i < MAX_MAX3100; i++)
+@@ -830,6 +845,7 @@ static void max3100_remove(struct spi_device *spi)
+ 		}
+ 	pr_debug("removing max3100 driver\n");
+ 	uart_unregister_driver(&max3100_uart_driver);
++	uart_driver_registered = 0;
+ 
+ 	mutex_unlock(&max3100s_lock);
+ }
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index 12915fffac279..ace2c4b333acc 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -18,6 +18,7 @@
+ #include <linux/module.h>
+ #include <linux/property.h>
+ #include <linux/regmap.h>
++#include <linux/sched.h>
+ #include <linux/serial_core.h>
+ #include <linux/serial.h>
+ #include <linux/tty.h>
+@@ -25,7 +26,6 @@
+ #include <linux/spi/spi.h>
+ #include <linux/uaccess.h>
+ #include <linux/units.h>
+-#include <uapi/linux/sched/types.h>
+ 
+ #define SC16IS7XX_NAME			"sc16is7xx"
+ #define SC16IS7XX_MAX_DEVS		8
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
+index e512eaa57ed56..a6f3517dce749 100644
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -1271,9 +1271,14 @@ static void sci_dma_rx_chan_invalidate(struct sci_port *s)
+ static void sci_dma_rx_release(struct sci_port *s)
+ {
+ 	struct dma_chan *chan = s->chan_rx_saved;
++	struct uart_port *port = &s->port;
++	unsigned long flags;
+ 
++	uart_port_lock_irqsave(port, &flags);
+ 	s->chan_rx_saved = NULL;
+ 	sci_dma_rx_chan_invalidate(s);
++	uart_port_unlock_irqrestore(port, flags);
++
+ 	dmaengine_terminate_sync(chan);
+ 	dma_free_coherent(chan->device->dev, s->buf_len_rx * 2, s->rx_buf[0],
+ 			  sg_dma_address(&s->sg_rx[0]));
+diff --git a/drivers/usb/fotg210/fotg210-core.c b/drivers/usb/fotg210/fotg210-core.c
+index 958fc40eae86b..0655afe7f9779 100644
+--- a/drivers/usb/fotg210/fotg210-core.c
++++ b/drivers/usb/fotg210/fotg210-core.c
+@@ -95,6 +95,7 @@ static int fotg210_gemini_init(struct fotg210 *fotg, struct resource *res,
+ 
+ /**
+  * fotg210_vbus() - Called by gadget driver to enable/disable VBUS
++ * @fotg: pointer to a private fotg210 object
+  * @enable: true to enable VBUS, false to disable VBUS
+  */
+ void fotg210_vbus(struct fotg210 *fotg, bool enable)
+diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
+index 4a42574b4a7fe..ec1dceb087293 100644
+--- a/drivers/usb/gadget/function/u_audio.c
++++ b/drivers/usb/gadget/function/u_audio.c
+@@ -57,13 +57,13 @@ struct uac_rtd_params {
+ 
+   /* Volume/Mute controls and their state */
+   int fu_id; /* Feature Unit ID */
+-  struct snd_kcontrol *snd_kctl_volume;
+-  struct snd_kcontrol *snd_kctl_mute;
++  struct snd_ctl_elem_id snd_kctl_volume_id;
++  struct snd_ctl_elem_id snd_kctl_mute_id;
+   s16 volume_min, volume_max, volume_res;
+   s16 volume;
+   int mute;
+ 
+-	struct snd_kcontrol *snd_kctl_rate; /* read-only current rate */
++	struct snd_ctl_elem_id snd_kctl_rate_id; /* read-only current rate */
+ 	int srate; /* selected samplerate */
+ 	int active; /* playback/capture running */
+ 
+@@ -494,14 +494,13 @@ static inline void free_ep_fback(struct uac_rtd_params *prm, struct usb_ep *ep)
+ static void set_active(struct uac_rtd_params *prm, bool active)
+ {
+ 	// notifying through the Rate ctrl
+-	struct snd_kcontrol *kctl = prm->snd_kctl_rate;
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&prm->lock, flags);
+ 	if (prm->active != active) {
+ 		prm->active = active;
+ 		snd_ctl_notify(prm->uac->card, SNDRV_CTL_EVENT_MASK_VALUE,
+-				&kctl->id);
++				&prm->snd_kctl_rate_id);
+ 	}
+ 	spin_unlock_irqrestore(&prm->lock, flags);
+ }
+@@ -807,7 +806,7 @@ int u_audio_set_volume(struct g_audio *audio_dev, int playback, s16 val)
+ 
+ 	if (change)
+ 		snd_ctl_notify(uac->card, SNDRV_CTL_EVENT_MASK_VALUE,
+-				&prm->snd_kctl_volume->id);
++				&prm->snd_kctl_volume_id);
+ 
+ 	return 0;
+ }
+@@ -856,7 +855,7 @@ int u_audio_set_mute(struct g_audio *audio_dev, int playback, int val)
+ 
+ 	if (change)
+ 		snd_ctl_notify(uac->card, SNDRV_CTL_EVENT_MASK_VALUE,
+-			       &prm->snd_kctl_mute->id);
++			       &prm->snd_kctl_mute_id);
+ 
+ 	return 0;
+ }
+@@ -1331,7 +1330,7 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
+ 			err = snd_ctl_add(card, kctl);
+ 			if (err < 0)
+ 				goto snd_fail;
+-			prm->snd_kctl_mute = kctl;
++			prm->snd_kctl_mute_id = kctl->id;
+ 			prm->mute = 0;
+ 		}
+ 
+@@ -1359,7 +1358,7 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
+ 			err = snd_ctl_add(card, kctl);
+ 			if (err < 0)
+ 				goto snd_fail;
+-			prm->snd_kctl_volume = kctl;
++			prm->snd_kctl_volume_id = kctl->id;
+ 			prm->volume = fu->volume_max;
+ 			prm->volume_max = fu->volume_max;
+ 			prm->volume_min = fu->volume_min;
+@@ -1383,7 +1382,7 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
+ 		err = snd_ctl_add(card, kctl);
+ 		if (err < 0)
+ 			goto snd_fail;
+-		prm->snd_kctl_rate = kctl;
++		prm->snd_kctl_rate_id = kctl->id;
+ 	}
+ 
+ 	strscpy(card->driver, card_name, sizeof(card->driver));
+@@ -1420,6 +1419,8 @@ void g_audio_cleanup(struct g_audio *g_audio)
+ 		return;
+ 
+ 	uac = g_audio->uac;
++	g_audio->uac = NULL;
++
+ 	card = uac->card;
+ 	if (card)
+ 		snd_card_free_when_closed(card);
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 69dd866698833..990008aebe8fd 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -2269,24 +2269,24 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
+ }
+ 
+ static struct xhci_interrupter *
+-xhci_alloc_interrupter(struct xhci_hcd *xhci, int segs, gfp_t flags)
++xhci_alloc_interrupter(struct xhci_hcd *xhci, unsigned int segs, gfp_t flags)
+ {
+ 	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+ 	struct xhci_interrupter *ir;
+-	unsigned int num_segs = segs;
++	unsigned int max_segs;
+ 	int ret;
+ 
++	if (!segs)
++		segs = ERST_DEFAULT_SEGS;
++
++	max_segs = BIT(HCS_ERST_MAX(xhci->hcs_params2));
++	segs = min(segs, max_segs);
++
+ 	ir = kzalloc_node(sizeof(*ir), flags, dev_to_node(dev));
+ 	if (!ir)
+ 		return NULL;
+ 
+-	/* number of ring segments should be greater than 0 */
+-	if (segs <= 0)
+-		num_segs = min_t(unsigned int, 1 << HCS_ERST_MAX(xhci->hcs_params2),
+-			 ERST_MAX_SEGS);
+-
+-	ir->event_ring = xhci_ring_alloc(xhci, num_segs, 1, TYPE_EVENT, 0,
+-					 flags);
++	ir->event_ring = xhci_ring_alloc(xhci, segs, 1, TYPE_EVENT, 0, flags);
+ 	if (!ir->event_ring) {
+ 		xhci_warn(xhci, "Failed to allocate interrupter event ring\n");
+ 		kfree(ir);
+@@ -2344,7 +2344,7 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
+ }
+ 
+ struct xhci_interrupter *
+-xhci_create_secondary_interrupter(struct usb_hcd *hcd, int num_seg)
++xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs)
+ {
+ 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ 	struct xhci_interrupter *ir;
+@@ -2354,7 +2354,7 @@ xhci_create_secondary_interrupter(struct usb_hcd *hcd, int num_seg)
+ 	if (!xhci->interrupters || xhci->max_interrupters <= 1)
+ 		return NULL;
+ 
+-	ir = xhci_alloc_interrupter(xhci, num_seg, GFP_KERNEL);
++	ir = xhci_alloc_interrupter(xhci, segs, GFP_KERNEL);
+ 	if (!ir)
+ 		return NULL;
+ 
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 6f4bf98a62824..31566e82bbd39 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1392,8 +1392,8 @@ struct urb_priv {
+ 	struct	xhci_td	td[] __counted_by(num_tds);
+ };
+ 
+-/* Reasonable limit for number of Event Ring segments (spec allows 32k) */
+-#define	ERST_MAX_SEGS	2
++/* Number of Event Ring segments to allocate, when amount is not specified. (spec allows 32k) */
++#define	ERST_DEFAULT_SEGS	2
+ /* Poll every 60 seconds */
+ #define	POLL_TIMEOUT	60
+ /* Stop endpoint command timeout (secs) for URB cancellation watchdog timer */
+@@ -1833,7 +1833,7 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
+ void xhci_free_container_ctx(struct xhci_hcd *xhci,
+ 		struct xhci_container_ctx *ctx);
+ struct xhci_interrupter *
+-xhci_create_secondary_interrupter(struct usb_hcd *hcd, int num_seg);
++xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs);
+ void xhci_remove_secondary_interrupter(struct usb_hcd
+ 				       *hcd, struct xhci_interrupter *ir);
+ 
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index bd6ae92aa39e7..7801501837b69 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -619,7 +619,8 @@ static int ucsi_read_pdos(struct ucsi_connector *con,
+ 	u64 command;
+ 	int ret;
+ 
+-	if (ucsi->quirks & UCSI_NO_PARTNER_PDOS)
++	if (is_partner &&
++	    ucsi->quirks & UCSI_NO_PARTNER_PDOS)
+ 		return 0;
+ 
+ 	command = UCSI_COMMAND(UCSI_GET_PDOS) | UCSI_CONNECTOR_NUMBER(con->num);
+@@ -823,12 +824,6 @@ static int ucsi_register_partner_pdos(struct ucsi_connector *con)
+ 			return PTR_ERR(cap);
+ 
+ 		con->partner_source_caps = cap;
+-
+-		ret = typec_partner_set_usb_power_delivery(con->partner, con->partner_pd);
+-		if (ret) {
+-			usb_power_delivery_unregister_capabilities(con->partner_source_caps);
+-			return ret;
+-		}
+ 	}
+ 
+ 	ret = ucsi_get_pdos(con, TYPEC_SINK, 1, caps.pdo);
+@@ -843,15 +838,9 @@ static int ucsi_register_partner_pdos(struct ucsi_connector *con)
+ 			return PTR_ERR(cap);
+ 
+ 		con->partner_sink_caps = cap;
+-
+-		ret = typec_partner_set_usb_power_delivery(con->partner, con->partner_pd);
+-		if (ret) {
+-			usb_power_delivery_unregister_capabilities(con->partner_sink_caps);
+-			return ret;
+-		}
+ 	}
+ 
+-	return 0;
++	return typec_partner_set_usb_power_delivery(con->partner, con->partner_pd);
+ }
+ 
+ static void ucsi_unregister_partner_pdos(struct ucsi_connector *con)
+@@ -1572,7 +1561,6 @@ static int ucsi_register_port(struct ucsi *ucsi, struct ucsi_connector *con)
+ 		}
+ 
+ 		con->port_source_caps = pd_cap;
+-		typec_port_set_usb_power_delivery(con->port, con->pd);
+ 	}
+ 
+ 	memset(&pd_caps, 0, sizeof(pd_caps));
+@@ -1589,9 +1577,10 @@ static int ucsi_register_port(struct ucsi *ucsi, struct ucsi_connector *con)
+ 		}
+ 
+ 		con->port_sink_caps = pd_cap;
+-		typec_port_set_usb_power_delivery(con->port, con->pd);
+ 	}
+ 
++	typec_port_set_usb_power_delivery(con->port, con->pd);
++
+ 	/* Alternate modes */
+ 	ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_CON);
+ 	if (ret) {
+diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
+index fb5392b749fff..e80c5d75b5419 100644
+--- a/drivers/vfio/pci/vfio_pci_intrs.c
++++ b/drivers/vfio/pci/vfio_pci_intrs.c
+@@ -277,8 +277,10 @@ static int vfio_intx_enable(struct vfio_pci_core_device *vdev,
+ 		return -ENOMEM;
+ 
+ 	ctx = vfio_irq_ctx_alloc(vdev, 0);
+-	if (!ctx)
++	if (!ctx) {
++		kfree(name);
+ 		return -ENOMEM;
++	}
+ 
+ 	ctx->name = name;
+ 	ctx->trigger = trigger;
+diff --git a/drivers/video/backlight/mp3309c.c b/drivers/video/backlight/mp3309c.c
+index c80a1481e742b..4e98e60417d23 100644
+--- a/drivers/video/backlight/mp3309c.c
++++ b/drivers/video/backlight/mp3309c.c
+@@ -205,8 +205,9 @@ static int mp3309c_parse_fwnode(struct mp3309c_chip *chip,
+ 				struct mp3309c_platform_data *pdata)
+ {
+ 	int ret, i;
+-	unsigned int num_levels, tmp_value;
++	unsigned int tmp_value;
+ 	struct device *dev = chip->dev;
++	int num_levels;
+ 
+ 	if (!dev_fwnode(dev))
+ 		return dev_err_probe(dev, -ENODEV, "failed to get firmware node\n");
+diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
+index 1f5b3dd31fcfc..89bc8da80519f 100644
+--- a/drivers/virtio/virtio_balloon.c
++++ b/drivers/virtio/virtio_balloon.c
+@@ -450,7 +450,7 @@ static void start_update_balloon_size(struct virtio_balloon *vb)
+ 	vb->adjustment_signal_pending = true;
+ 	if (!vb->adjustment_in_progress) {
+ 		vb->adjustment_in_progress = true;
+-		pm_stay_awake(vb->vdev->dev.parent);
++		pm_stay_awake(&vb->vdev->dev);
+ 	}
+ 	spin_unlock_irqrestore(&vb->adjustment_lock, flags);
+ 
+@@ -462,7 +462,7 @@ static void end_update_balloon_size(struct virtio_balloon *vb)
+ 	spin_lock_irq(&vb->adjustment_lock);
+ 	if (!vb->adjustment_signal_pending && vb->adjustment_in_progress) {
+ 		vb->adjustment_in_progress = false;
+-		pm_relax(vb->vdev->dev.parent);
++		pm_relax(&vb->vdev->dev);
+ 	}
+ 	spin_unlock_irq(&vb->adjustment_lock);
+ }
+@@ -1029,6 +1029,15 @@ static int virtballoon_probe(struct virtio_device *vdev)
+ 
+ 	spin_lock_init(&vb->adjustment_lock);
+ 
++	/*
++	 * The virtio balloon itself can't wake up the device, but it is
++	 * responsible for processing wakeup events passed up from the transport
++	 * layer. Wakeup sources don't support nesting/chaining calls, so we use
++	 * our own wakeup source to ensure wakeup events are properly handled
++	 * without trampling on the transport layer's wakeup source.
++	 */
++	device_set_wakeup_capable(&vb->vdev->dev, true);
++
+ 	virtio_device_ready(vdev);
+ 
+ 	if (towards_target(vb))
+diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
+index b655fccaf7733..584af7816532b 100644
+--- a/drivers/virtio/virtio_pci_common.c
++++ b/drivers/virtio/virtio_pci_common.c
+@@ -348,8 +348,10 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
+ 				  vring_interrupt, 0,
+ 				  vp_dev->msix_names[msix_vec],
+ 				  vqs[i]);
+-		if (err)
++		if (err) {
++			vp_del_vq(vqs[i]);
+ 			goto error_find;
++		}
+ 	}
+ 	return 0;
+ 
+diff --git a/drivers/watchdog/bd9576_wdt.c b/drivers/watchdog/bd9576_wdt.c
+index 4a20e07fbb699..f00ea1b4e40b6 100644
+--- a/drivers/watchdog/bd9576_wdt.c
++++ b/drivers/watchdog/bd9576_wdt.c
+@@ -29,7 +29,6 @@ struct bd9576_wdt_priv {
+ 	struct gpio_desc	*gpiod_en;
+ 	struct device		*dev;
+ 	struct regmap		*regmap;
+-	bool			always_running;
+ 	struct watchdog_device	wdd;
+ };
+ 
+@@ -62,10 +61,7 @@ static int bd9576_wdt_stop(struct watchdog_device *wdd)
+ {
+ 	struct bd9576_wdt_priv *priv = watchdog_get_drvdata(wdd);
+ 
+-	if (!priv->always_running)
+-		bd9576_wdt_disable(priv);
+-	else
+-		set_bit(WDOG_HW_RUNNING, &wdd->status);
++	bd9576_wdt_disable(priv);
+ 
+ 	return 0;
+ }
+@@ -264,9 +260,6 @@ static int bd9576_wdt_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		return ret;
+ 
+-	priv->always_running = device_property_read_bool(dev->parent,
+-							 "always-running");
+-
+ 	watchdog_set_drvdata(&priv->wdd, priv);
+ 
+ 	priv->wdd.info			= &bd957x_wdt_ident;
+@@ -281,9 +274,6 @@ static int bd9576_wdt_probe(struct platform_device *pdev)
+ 
+ 	watchdog_stop_on_reboot(&priv->wdd);
+ 
+-	if (priv->always_running)
+-		bd9576_wdt_start(&priv->wdd);
+-
+ 	return devm_watchdog_register_device(dev, &priv->wdd);
+ }
+ 
+diff --git a/drivers/watchdog/cpu5wdt.c b/drivers/watchdog/cpu5wdt.c
+index 688b112e712ba..9f279c0e13a66 100644
+--- a/drivers/watchdog/cpu5wdt.c
++++ b/drivers/watchdog/cpu5wdt.c
+@@ -252,7 +252,7 @@ static void cpu5wdt_exit(void)
+ 	if (cpu5wdt_device.queue) {
+ 		cpu5wdt_device.queue = 0;
+ 		wait_for_completion(&cpu5wdt_device.stop);
+-		del_timer(&cpu5wdt_device.timer);
++		timer_shutdown_sync(&cpu5wdt_device.timer);
+ 	}
+ 
+ 	misc_deregister(&cpu5wdt_misc);
+diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c
+index 5d2df008b92a5..34a917221e316 100644
+--- a/drivers/watchdog/sa1100_wdt.c
++++ b/drivers/watchdog/sa1100_wdt.c
+@@ -191,9 +191,8 @@ static int sa1100dog_probe(struct platform_device *pdev)
+ 	if (!res)
+ 		return -ENXIO;
+ 	reg_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+-	ret = PTR_ERR_OR_ZERO(reg_base);
+-	if (ret)
+-		return ret;
++	if (!reg_base)
++		return -ENOMEM;
+ 
+ 	clk = clk_get(NULL, "OSTIMER0");
+ 	if (IS_ERR(clk)) {
+diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
+index 3205e5d724c8c..1a9ded0cddcb0 100644
+--- a/drivers/xen/xenbus/xenbus_probe.c
++++ b/drivers/xen/xenbus/xenbus_probe.c
+@@ -65,13 +65,17 @@
+ #include "xenbus.h"
+ 
+ 
+-static int xs_init_irq;
++static int xs_init_irq = -1;
+ int xen_store_evtchn;
+ EXPORT_SYMBOL_GPL(xen_store_evtchn);
+ 
+ struct xenstore_domain_interface *xen_store_interface;
+ EXPORT_SYMBOL_GPL(xen_store_interface);
+ 
++#define XS_INTERFACE_READY \
++	((xen_store_interface != NULL) && \
++	 (xen_store_interface->connection == XENSTORE_CONNECTED))
++
+ enum xenstore_init xen_store_domain_type;
+ EXPORT_SYMBOL_GPL(xen_store_domain_type);
+ 
+@@ -751,19 +755,19 @@ static void xenbus_probe(void)
+ {
+ 	xenstored_ready = 1;
+ 
+-	if (!xen_store_interface) {
++	if (!xen_store_interface)
+ 		xen_store_interface = memremap(xen_store_gfn << XEN_PAGE_SHIFT,
+ 					       XEN_PAGE_SIZE, MEMREMAP_WB);
+-		/*
+-		 * Now it is safe to free the IRQ used for xenstore late
+-		 * initialization. No need to unbind: it is about to be
+-		 * bound again from xb_init_comms. Note that calling
+-		 * unbind_from_irqhandler now would result in xen_evtchn_close()
+-		 * being called and the event channel not being enabled again
+-		 * afterwards, resulting in missed event notifications.
+-		 */
++	/*
++	 * Now it is safe to free the IRQ used for xenstore late
++	 * initialization. No need to unbind: it is about to be
++	 * bound again from xb_init_comms. Note that calling
++	 * unbind_from_irqhandler now would result in xen_evtchn_close()
++	 * being called and the event channel not being enabled again
++	 * afterwards, resulting in missed event notifications.
++	 */
++	if (xs_init_irq >= 0)
+ 		free_irq(xs_init_irq, &xb_waitq);
+-	}
+ 
+ 	/*
+ 	 * In the HVM case, xenbus_init() deferred its call to
+@@ -822,7 +826,7 @@ static int __init xenbus_probe_initcall(void)
+ 	if (xen_store_domain_type == XS_PV ||
+ 	    (xen_store_domain_type == XS_HVM &&
+ 	     !xs_hvm_defer_init_for_callback() &&
+-	     xen_store_interface != NULL))
++	     XS_INTERFACE_READY))
+ 		xenbus_probe();
+ 
+ 	/*
+@@ -831,7 +835,7 @@ static int __init xenbus_probe_initcall(void)
+ 	 * started, then probe.  It will be triggered when communication
+ 	 * starts happening, by waiting on xb_waitq.
+ 	 */
+-	if (xen_store_domain_type == XS_LOCAL || xen_store_interface == NULL) {
++	if (xen_store_domain_type == XS_LOCAL || !XS_INTERFACE_READY) {
+ 		struct task_struct *probe_task;
+ 
+ 		probe_task = kthread_run(xenbus_probe_thread, NULL,
+@@ -1014,6 +1018,12 @@ static int __init xenbus_init(void)
+ 			xen_store_interface =
+ 				memremap(xen_store_gfn << XEN_PAGE_SHIFT,
+ 					 XEN_PAGE_SIZE, MEMREMAP_WB);
++			if (!xen_store_interface) {
++				pr_err("%s: cannot map HVM_PARAM_STORE_PFN=%llx\n",
++				       __func__, v);
++				err = -EINVAL;
++				goto out_error;
++			}
+ 			if (xen_store_interface->connection != XENSTORE_CONNECTED)
+ 				wait = true;
+ 		}
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index d9494b5fc7c18..5f77f9df24760 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -3896,15 +3896,14 @@ static int check_swap_activate(struct swap_info_struct *sis,
+ 	struct address_space *mapping = swap_file->f_mapping;
+ 	struct inode *inode = mapping->host;
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+-	sector_t cur_lblock;
+-	sector_t last_lblock;
+-	sector_t pblock;
+-	sector_t lowest_pblock = -1;
+-	sector_t highest_pblock = 0;
++	block_t cur_lblock;
++	block_t last_lblock;
++	block_t pblock;
++	block_t lowest_pblock = -1;
++	block_t highest_pblock = 0;
+ 	int nr_extents = 0;
+-	unsigned long nr_pblocks;
++	unsigned int nr_pblocks;
+ 	unsigned int blks_per_sec = BLKS_PER_SEC(sbi);
+-	unsigned int sec_blks_mask = BLKS_PER_SEC(sbi) - 1;
+ 	unsigned int not_aligned = 0;
+ 	int ret = 0;
+ 
+@@ -3942,8 +3941,8 @@ static int check_swap_activate(struct swap_info_struct *sis,
+ 		pblock = map.m_pblk;
+ 		nr_pblocks = map.m_len;
+ 
+-		if ((pblock - SM_I(sbi)->main_blkaddr) & sec_blks_mask ||
+-				nr_pblocks & sec_blks_mask ||
++		if ((pblock - SM_I(sbi)->main_blkaddr) % blks_per_sec ||
++				nr_pblocks % blks_per_sec ||
+ 				!f2fs_valid_pinned_area(sbi, pblock)) {
+ 			bool last_extent = false;
+ 
+@@ -4185,7 +4184,7 @@ static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+ 	if (WARN_ON_ONCE(map.m_pblk == COMPRESS_ADDR))
+ 		return -EINVAL;
+ 
+-	if (map.m_pblk != NULL_ADDR) {
++	if (map.m_flags & F2FS_MAP_MAPPED) {
+ 		iomap->length = blks_to_bytes(inode, map.m_len);
+ 		iomap->type = IOMAP_MAPPED;
+ 		iomap->flags |= IOMAP_F_MERGED;
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index fced2b7652f40..07b3675ea1694 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -2309,7 +2309,7 @@ static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
+ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
+ 				 struct inode *inode, blkcnt_t *count, bool partial)
+ {
+-	blkcnt_t diff = 0, release = 0;
++	long long diff = 0, release = 0;
+ 	block_t avail_user_block_count;
+ 	int ret;
+ 
+@@ -2329,26 +2329,27 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
+ 	percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
+ 
+ 	spin_lock(&sbi->stat_lock);
+-	sbi->total_valid_block_count += (block_t)(*count);
+-	avail_user_block_count = get_available_block_count(sbi, inode, true);
+ 
+-	if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
++	avail_user_block_count = get_available_block_count(sbi, inode, true);
++	diff = (long long)sbi->total_valid_block_count + *count -
++						avail_user_block_count;
++	if (unlikely(diff > 0)) {
+ 		if (!partial) {
+ 			spin_unlock(&sbi->stat_lock);
++			release = *count;
+ 			goto enospc;
+ 		}
+-
+-		diff = sbi->total_valid_block_count - avail_user_block_count;
+ 		if (diff > *count)
+ 			diff = *count;
+ 		*count -= diff;
+ 		release = diff;
+-		sbi->total_valid_block_count -= diff;
+ 		if (!*count) {
+ 			spin_unlock(&sbi->stat_lock);
+ 			goto enospc;
+ 		}
+ 	}
++	sbi->total_valid_block_count += (block_t)(*count);
++
+ 	spin_unlock(&sbi->stat_lock);
+ 
+ 	if (unlikely(release)) {
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 1761ad125f97a..208dedc161d53 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -952,9 +952,14 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ 				  ATTR_GID | ATTR_TIMES_SET))))
+ 		return -EPERM;
+ 
+-	if ((attr->ia_valid & ATTR_SIZE) &&
+-		!f2fs_is_compress_backend_ready(inode))
+-		return -EOPNOTSUPP;
++	if ((attr->ia_valid & ATTR_SIZE)) {
++		if (!f2fs_is_compress_backend_ready(inode))
++			return -EOPNOTSUPP;
++		if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED) &&
++			!IS_ALIGNED(attr->ia_size,
++			F2FS_BLK_TO_BYTES(F2FS_I(inode)->i_cluster_size)))
++			return -EINVAL;
++	}
+ 
+ 	err = setattr_prepare(idmap, dentry, attr);
+ 	if (err)
+@@ -1325,6 +1330,9 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
+ 				f2fs_put_page(psrc, 1);
+ 				return PTR_ERR(pdst);
+ 			}
++
++			f2fs_wait_on_page_writeback(pdst, DATA, true, true);
++
+ 			memcpy_page(pdst, 0, psrc, 0, PAGE_SIZE);
+ 			set_page_dirty(pdst);
+ 			set_page_private_gcing(pdst);
+@@ -1817,15 +1825,6 @@ static long f2fs_fallocate(struct file *file, int mode,
+ 		(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
+ 		return -EOPNOTSUPP;
+ 
+-	/*
+-	 * Pinned file should not support partial truncation since the block
+-	 * can be used by applications.
+-	 */
+-	if ((f2fs_compressed_file(inode) || f2fs_is_pinned_file(inode)) &&
+-		(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
+-			FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
+-		return -EOPNOTSUPP;
+-
+ 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
+ 			FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
+ 			FALLOC_FL_INSERT_RANGE))
+@@ -1833,6 +1832,17 @@ static long f2fs_fallocate(struct file *file, int mode,
+ 
+ 	inode_lock(inode);
+ 
++	/*
++	 * Pinned file should not support partial truncation since the block
++	 * can be used by applications.
++	 */
++	if ((f2fs_compressed_file(inode) || f2fs_is_pinned_file(inode)) &&
++		(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
++			FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE))) {
++		ret = -EOPNOTSUPP;
++		goto out;
++	}
++
+ 	ret = file_modified(file);
+ 	if (ret)
+ 		goto out;
+@@ -2837,7 +2847,8 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
+ 			goto out;
+ 	}
+ 
+-	if (f2fs_compressed_file(src) || f2fs_compressed_file(dst)) {
++	if (f2fs_compressed_file(src) || f2fs_compressed_file(dst) ||
++		f2fs_is_pinned_file(src) || f2fs_is_pinned_file(dst)) {
+ 		ret = -EOPNOTSUPP;
+ 		goto out_unlock;
+ 	}
+@@ -3522,9 +3533,6 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
+ 	if (!f2fs_sb_has_compression(sbi))
+ 		return -EOPNOTSUPP;
+ 
+-	if (!f2fs_compressed_file(inode))
+-		return -EINVAL;
+-
+ 	if (f2fs_readonly(sbi->sb))
+ 		return -EROFS;
+ 
+@@ -3543,7 +3551,8 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
+ 		goto out;
+ 	}
+ 
+-	if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
++	if (!f2fs_compressed_file(inode) ||
++		is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+@@ -3570,9 +3579,12 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
+ 		struct dnode_of_data dn;
+ 		pgoff_t end_offset, count;
+ 
++		f2fs_lock_op(sbi);
++
+ 		set_new_dnode(&dn, inode, NULL, NULL, 0);
+ 		ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
+ 		if (ret) {
++			f2fs_unlock_op(sbi);
+ 			if (ret == -ENOENT) {
+ 				page_idx = f2fs_get_next_page_offset(&dn,
+ 								page_idx);
+@@ -3590,6 +3602,8 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
+ 
+ 		f2fs_put_dnode(&dn);
+ 
++		f2fs_unlock_op(sbi);
++
+ 		if (ret < 0)
+ 			break;
+ 
+@@ -3641,7 +3655,8 @@ static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count,
+ 
+ 	while (count) {
+ 		int compr_blocks = 0;
+-		blkcnt_t reserved;
++		blkcnt_t reserved = 0;
++		blkcnt_t to_reserved;
+ 		int ret;
+ 
+ 		for (i = 0; i < cluster_size; i++) {
+@@ -3661,20 +3676,26 @@ static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count,
+ 			 * fails in release_compress_blocks(), so NEW_ADDR
+ 			 * is a possible case.
+ 			 */
+-			if (blkaddr == NEW_ADDR ||
+-				__is_valid_data_blkaddr(blkaddr)) {
++			if (blkaddr == NEW_ADDR) {
++				reserved++;
++				continue;
++			}
++			if (__is_valid_data_blkaddr(blkaddr)) {
+ 				compr_blocks++;
+ 				continue;
+ 			}
+ 		}
+ 
+-		reserved = cluster_size - compr_blocks;
++		to_reserved = cluster_size - compr_blocks - reserved;
+ 
+ 		/* for the case all blocks in cluster were reserved */
+-		if (reserved == 1)
++		if (to_reserved == 1) {
++			dn->ofs_in_node += cluster_size;
+ 			goto next;
++		}
+ 
+-		ret = inc_valid_block_count(sbi, dn->inode, &reserved, false);
++		ret = inc_valid_block_count(sbi, dn->inode,
++						&to_reserved, false);
+ 		if (unlikely(ret))
+ 			return ret;
+ 
+@@ -3685,7 +3706,7 @@ static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count,
+ 
+ 		f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
+ 
+-		*reserved_blocks += reserved;
++		*reserved_blocks += to_reserved;
+ next:
+ 		count -= cluster_size;
+ 	}
+@@ -3704,9 +3725,6 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
+ 	if (!f2fs_sb_has_compression(sbi))
+ 		return -EOPNOTSUPP;
+ 
+-	if (!f2fs_compressed_file(inode))
+-		return -EINVAL;
+-
+ 	if (f2fs_readonly(sbi->sb))
+ 		return -EROFS;
+ 
+@@ -3718,7 +3736,8 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
+ 
+ 	inode_lock(inode);
+ 
+-	if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
++	if (!f2fs_compressed_file(inode) ||
++		!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ 		ret = -EINVAL;
+ 		goto unlock_inode;
+ 	}
+@@ -3735,9 +3754,12 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
+ 		struct dnode_of_data dn;
+ 		pgoff_t end_offset, count;
+ 
++		f2fs_lock_op(sbi);
++
+ 		set_new_dnode(&dn, inode, NULL, NULL, 0);
+ 		ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
+ 		if (ret) {
++			f2fs_unlock_op(sbi);
+ 			if (ret == -ENOENT) {
+ 				page_idx = f2fs_get_next_page_offset(&dn,
+ 								page_idx);
+@@ -3755,6 +3777,8 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
+ 
+ 		f2fs_put_dnode(&dn);
+ 
++		f2fs_unlock_op(sbi);
++
+ 		if (ret < 0)
+ 			break;
+ 
+@@ -4119,9 +4143,6 @@ static int f2fs_ioc_decompress_file(struct file *filp)
+ 	if (!(filp->f_mode & FMODE_WRITE))
+ 		return -EBADF;
+ 
+-	if (!f2fs_compressed_file(inode))
+-		return -EINVAL;
+-
+ 	f2fs_balance_fs(sbi, true);
+ 
+ 	file_start_write(filp);
+@@ -4132,7 +4153,8 @@ static int f2fs_ioc_decompress_file(struct file *filp)
+ 		goto out;
+ 	}
+ 
+-	if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
++	if (!f2fs_compressed_file(inode) ||
++		is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+@@ -4197,9 +4219,6 @@ static int f2fs_ioc_compress_file(struct file *filp)
+ 	if (!(filp->f_mode & FMODE_WRITE))
+ 		return -EBADF;
+ 
+-	if (!f2fs_compressed_file(inode))
+-		return -EINVAL;
+-
+ 	f2fs_balance_fs(sbi, true);
+ 
+ 	file_start_write(filp);
+@@ -4210,7 +4229,8 @@ static int f2fs_ioc_compress_file(struct file *filp)
+ 		goto out;
+ 	}
+ 
+-	if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
++	if (!f2fs_compressed_file(inode) ||
++		is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
+index 8852814dab7f6..e86c7f01539a7 100644
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -1554,10 +1554,15 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+ 			int err;
+ 
+ 			inode = f2fs_iget(sb, dni.ino);
+-			if (IS_ERR(inode) || is_bad_inode(inode) ||
+-					special_file(inode->i_mode))
++			if (IS_ERR(inode))
+ 				continue;
+ 
++			if (is_bad_inode(inode) ||
++					special_file(inode->i_mode)) {
++				iput(inode);
++				continue;
++			}
++
+ 			err = f2fs_gc_pinned_control(inode, gc_type, segno);
+ 			if (err == -EAGAIN) {
+ 				iput(inode);
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index b3de6d6cdb021..7df5ad84cb5ea 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -1319,6 +1319,7 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
+ 	}
+ 	if (unlikely(new_ni.blk_addr != NULL_ADDR)) {
+ 		err = -EFSCORRUPTED;
++		dec_valid_node_count(sbi, dn->inode, !ofs);
+ 		set_sbi_flag(sbi, SBI_NEED_FSCK);
+ 		f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
+ 		goto fail;
+@@ -1345,7 +1346,6 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
+ 	if (ofs == 0)
+ 		inc_valid_inode_count(sbi);
+ 	return page;
+-
+ fail:
+ 	clear_node_page_dirty(page);
+ 	f2fs_put_page(page, 1);
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index 4fd76e867e0a2..6474b7338e811 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -3559,6 +3559,8 @@ int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
+ 	if (segment_full) {
+ 		if (type == CURSEG_COLD_DATA_PINNED &&
+ 		    !((curseg->segno + 1) % sbi->segs_per_sec)) {
++			write_sum_page(sbi, curseg->sum_blk,
++					GET_SUM_BLOCK(sbi, curseg->segno));
+ 			reset_curseg_fields(curseg);
+ 			goto skip_new_segment;
+ 		}
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index 3ec8bb5e68ff5..9eb191b5c4de1 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -1813,7 +1813,8 @@ static void fuse_resend(struct fuse_conn *fc)
+ 	spin_unlock(&fc->lock);
+ 
+ 	list_for_each_entry_safe(req, next, &to_queue, list) {
+-		__set_bit(FR_PENDING, &req->flags);
++		set_bit(FR_PENDING, &req->flags);
++		clear_bit(FR_SENT, &req->flags);
+ 		/* mark the request as resend request */
+ 		req->in.h.unique |= FUSE_UNIQUE_RESEND;
+ 	}
+diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c
+index 267b622d923b1..912ad0a1df021 100644
+--- a/fs/netfs/buffered_write.c
++++ b/fs/netfs/buffered_write.c
+@@ -163,7 +163,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
+ 	struct folio *folio;
+ 	enum netfs_how_to_modify howto;
+ 	enum netfs_folio_trace trace;
+-	unsigned int bdp_flags = (iocb->ki_flags & IOCB_SYNC) ? 0: BDP_ASYNC;
++	unsigned int bdp_flags = (iocb->ki_flags & IOCB_NOWAIT) ? BDP_ASYNC : 0;
+ 	ssize_t written = 0, ret, ret2;
+ 	loff_t i_size, pos = iocb->ki_pos, from, to;
+ 	size_t max_chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
+diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
+index ce8f8934bca51..569ae4ec60845 100644
+--- a/fs/nfs/filelayout/filelayout.c
++++ b/fs/nfs/filelayout/filelayout.c
+@@ -883,7 +883,7 @@ filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio,
+ 						      NFS4_MAX_UINT64,
+ 						      IOMODE_READ,
+ 						      false,
+-						      GFP_KERNEL);
++						      nfs_io_gfp_mask());
+ 		if (IS_ERR(pgio->pg_lseg)) {
+ 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
+ 			pgio->pg_lseg = NULL;
+@@ -907,7 +907,7 @@ filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
+ 						      NFS4_MAX_UINT64,
+ 						      IOMODE_RW,
+ 						      false,
+-						      GFP_NOFS);
++						      nfs_io_gfp_mask());
+ 		if (IS_ERR(pgio->pg_lseg)) {
+ 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
+ 			pgio->pg_lseg = NULL;
+diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
+index d0a0956f8a134..cac1157be2c29 100644
+--- a/fs/nfs/fs_context.c
++++ b/fs/nfs/fs_context.c
+@@ -1112,9 +1112,12 @@ static int nfs23_parse_monolithic(struct fs_context *fc,
+ 		ctx->acdirmax	= data->acdirmax;
+ 		ctx->need_mount	= false;
+ 
+-		memcpy(sap, &data->addr, sizeof(data->addr));
+-		ctx->nfs_server.addrlen = sizeof(data->addr);
+-		ctx->nfs_server.port = ntohs(data->addr.sin_port);
++		if (!is_remount_fc(fc)) {
++			memcpy(sap, &data->addr, sizeof(data->addr));
++			ctx->nfs_server.addrlen = sizeof(data->addr);
++			ctx->nfs_server.port = ntohs(data->addr.sin_port);
++		}
++
+ 		if (sap->ss_family != AF_INET ||
+ 		    !nfs_verify_server_address(sap))
+ 			goto out_no_address;
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 662e86ea3a2dd..5b452411e8fdf 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -2116,6 +2116,7 @@ static int nfs4_try_migration(struct nfs_server *server, const struct cred *cred
+ {
+ 	struct nfs_client *clp = server->nfs_client;
+ 	struct nfs4_fs_locations *locations = NULL;
++	struct nfs_fattr *fattr;
+ 	struct inode *inode;
+ 	struct page *page;
+ 	int status, result;
+@@ -2125,19 +2126,16 @@ static int nfs4_try_migration(struct nfs_server *server, const struct cred *cred
+ 			(unsigned long long)server->fsid.minor,
+ 			clp->cl_hostname);
+ 
+-	result = 0;
+ 	page = alloc_page(GFP_KERNEL);
+ 	locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
+-	if (page == NULL || locations == NULL) {
+-		dprintk("<-- %s: no memory\n", __func__);
+-		goto out;
+-	}
+-	locations->fattr = nfs_alloc_fattr();
+-	if (locations->fattr == NULL) {
++	fattr = nfs_alloc_fattr();
++	if (page == NULL || locations == NULL || fattr == NULL) {
+ 		dprintk("<-- %s: no memory\n", __func__);
++		result = 0;
+ 		goto out;
+ 	}
+ 
++	locations->fattr = fattr;
+ 	inode = d_inode(server->super->s_root);
+ 	result = nfs4_proc_get_locations(server, NFS_FH(inode), locations,
+ 					 page, cred);
+diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
+index 855519713bf79..4085fe30bf481 100644
+--- a/fs/ntfs3/fslog.c
++++ b/fs/ntfs3/fslog.c
+@@ -1184,7 +1184,8 @@ static int read_log_page(struct ntfs_log *log, u32 vbo,
+ static int log_read_rst(struct ntfs_log *log, bool first,
+ 			struct restart_info *info)
+ {
+-	u32 skip, vbo;
++	u32 skip;
++	u64 vbo;
+ 	struct RESTART_HDR *r_page = NULL;
+ 
+ 	/* Determine which restart area we are looking for. */
+diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
+index ba5a39a2f957d..90d4e9a9859e4 100644
+--- a/fs/ntfs3/inode.c
++++ b/fs/ntfs3/inode.c
+@@ -577,13 +577,18 @@ static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
+ 	clear_buffer_uptodate(bh);
+ 
+ 	if (is_resident(ni)) {
+-		ni_lock(ni);
+-		err = attr_data_read_resident(ni, &folio->page);
+-		ni_unlock(ni);
+-
+-		if (!err)
+-			set_buffer_uptodate(bh);
++		bh->b_blocknr = RESIDENT_LCN;
+ 		bh->b_size = block_size;
++		if (!folio) {
++			err = 0;
++		} else {
++			ni_lock(ni);
++			err = attr_data_read_resident(ni, &folio->page);
++			ni_unlock(ni);
++
++			if (!err)
++				set_buffer_uptodate(bh);
++		}
+ 		return err;
+ 	}
+ 
+diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h
+index 9c7478150a035..3d6143c7abc03 100644
+--- a/fs/ntfs3/ntfs.h
++++ b/fs/ntfs3/ntfs.h
+@@ -59,7 +59,7 @@ struct GUID {
+ struct cpu_str {
+ 	u8 len;
+ 	u8 unused;
+-	u16 name[10];
++	u16 name[];
+ };
+ 
+ struct le_str {
+diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
+index c803c10dd97ef..33aeaaa056d70 100644
+--- a/fs/ocfs2/localalloc.c
++++ b/fs/ocfs2/localalloc.c
+@@ -863,14 +863,8 @@ static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
+ 
+ 	numfound = bitoff = startoff = 0;
+ 	left = le32_to_cpu(alloc->id1.bitmap1.i_total);
+-	while ((bitoff = ocfs2_find_next_zero_bit(bitmap, left, startoff)) != -1) {
+-		if (bitoff == left) {
+-			/* mlog(0, "bitoff (%d) == left", bitoff); */
+-			break;
+-		}
+-		/* mlog(0, "Found a zero: bitoff = %d, startoff = %d, "
+-		   "numfound = %d\n", bitoff, startoff, numfound);*/
+-
++	while ((bitoff = ocfs2_find_next_zero_bit(bitmap, left, startoff)) <
++	       left) {
+ 		/* Ok, we found a zero bit... is it contig. or do we
+ 		 * start over?*/
+ 		if (bitoff == startoff) {
+@@ -976,9 +970,9 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
+ 	start = count = 0;
+ 	left = le32_to_cpu(alloc->id1.bitmap1.i_total);
+ 
+-	while ((bit_off = ocfs2_find_next_zero_bit(bitmap, left, start))
+-	       != -1) {
+-		if ((bit_off < left) && (bit_off == start)) {
++	while ((bit_off = ocfs2_find_next_zero_bit(bitmap, left, start)) <
++	       left) {
++		if (bit_off == start) {
+ 			count++;
+ 			start++;
+ 			continue;
+@@ -1002,8 +996,7 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
+ 				goto bail;
+ 			}
+ 		}
+-		if (bit_off >= left)
+-			break;
++
+ 		count = 1;
+ 		start = bit_off + 1;
+ 	}
+diff --git a/fs/ocfs2/reservations.c b/fs/ocfs2/reservations.c
+index a9d1296d736dc..1fe61974d9f02 100644
+--- a/fs/ocfs2/reservations.c
++++ b/fs/ocfs2/reservations.c
+@@ -414,7 +414,7 @@ static int ocfs2_resmap_find_free_bits(struct ocfs2_reservation_map *resmap,
+ 
+ 	start = search_start;
+ 	while ((offset = ocfs2_find_next_zero_bit(bitmap, resmap->m_bitmap_len,
+-						 start)) != -1) {
++					start)) < resmap->m_bitmap_len) {
+ 		/* Search reached end of the region */
+ 		if (offset >= (search_start + search_len))
+ 			break;
+diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
+index 166c8918c825a..961998415308d 100644
+--- a/fs/ocfs2/suballoc.c
++++ b/fs/ocfs2/suballoc.c
+@@ -1290,10 +1290,8 @@ static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
+ 	found = start = best_offset = best_size = 0;
+ 	bitmap = bg->bg_bitmap;
+ 
+-	while((offset = ocfs2_find_next_zero_bit(bitmap, total_bits, start)) != -1) {
+-		if (offset == total_bits)
+-			break;
+-
++	while ((offset = ocfs2_find_next_zero_bit(bitmap, total_bits, start)) <
++	       total_bits) {
+ 		if (!ocfs2_test_bg_bit_allocatable(bg_bh, offset)) {
+ 			/* We found a zero, but we can't use it as it
+ 			 * hasn't been put to disk yet! */
+diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
+index 0f8b4a719237c..02d89a285d0dc 100644
+--- a/fs/overlayfs/dir.c
++++ b/fs/overlayfs/dir.c
+@@ -327,9 +327,6 @@ static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
+ 	struct dentry *newdentry;
+ 	int err;
+ 
+-	if (!attr->hardlink && !IS_POSIXACL(udir))
+-		attr->mode &= ~current_umask();
+-
+ 	inode_lock_nested(udir, I_MUTEX_PARENT);
+ 	newdentry = ovl_create_real(ofs, udir,
+ 				    ovl_lookup_upper(ofs, dentry->d_name.name,
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index 39277c37185ca..4fb21affe4e11 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -1277,7 +1277,7 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
+ 	struct cifsFileInfo *smb_file_src = src_file->private_data;
+ 	struct cifsFileInfo *smb_file_target = dst_file->private_data;
+ 	struct cifs_tcon *target_tcon, *src_tcon;
+-	unsigned long long destend, fstart, fend, new_size;
++	unsigned long long destend, fstart, fend, old_size, new_size;
+ 	unsigned int xid;
+ 	int rc;
+ 
+@@ -1342,6 +1342,9 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
+ 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
+ 	if (rc)
+ 		goto unlock;
++	if (fend > target_cifsi->netfs.zero_point)
++		target_cifsi->netfs.zero_point = fend + 1;
++	old_size = target_cifsi->netfs.remote_i_size;
+ 
+ 	/* Discard all the folios that overlap the destination region. */
+ 	cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
+@@ -1354,12 +1357,13 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
+ 	if (target_tcon->ses->server->ops->duplicate_extents) {
+ 		rc = target_tcon->ses->server->ops->duplicate_extents(xid,
+ 			smb_file_src, smb_file_target, off, len, destoff);
+-		if (rc == 0 && new_size > i_size_read(target_inode)) {
++		if (rc == 0 && new_size > old_size) {
+ 			truncate_setsize(target_inode, new_size);
+-			netfs_resize_file(&target_cifsi->netfs, new_size, true);
+ 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
+ 					      new_size);
+ 		}
++		if (rc == 0 && new_size > target_cifsi->netfs.zero_point)
++			target_cifsi->netfs.zero_point = new_size;
+ 	}
+ 
+ 	/* force revalidate of size and timestamps of target file now
+@@ -1451,6 +1455,8 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
+ 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
+ 	if (rc)
+ 		goto unlock;
++	if (fend > target_cifsi->netfs.zero_point)
++		target_cifsi->netfs.zero_point = fend + 1;
+ 
+ 	/* Discard all the folios that overlap the destination region. */
+ 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 28f0b7d19d534..6fea0aed43461 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -2028,6 +2028,7 @@ smb2_duplicate_extents(const unsigned int xid,
+ 		 * size will be queried on next revalidate, but it is important
+ 		 * to make sure that file's cached size is updated immediately
+ 		 */
++		netfs_resize_file(netfs_inode(inode), dest_off + len, true);
+ 		cifs_setsize(inode, dest_off + len);
+ 	}
+ 	rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index 2f831a3a91afe..bbf8918417fd8 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -341,7 +341,7 @@ const struct address_space_operations udf_aops = {
+  */
+ int udf_expand_file_adinicb(struct inode *inode)
+ {
+-	struct page *page;
++	struct folio *folio;
+ 	struct udf_inode_info *iinfo = UDF_I(inode);
+ 	int err;
+ 
+@@ -357,12 +357,13 @@ int udf_expand_file_adinicb(struct inode *inode)
+ 		return 0;
+ 	}
+ 
+-	page = find_or_create_page(inode->i_mapping, 0, GFP_KERNEL);
+-	if (!page)
+-		return -ENOMEM;
++	folio = __filemap_get_folio(inode->i_mapping, 0,
++			FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_KERNEL);
++	if (IS_ERR(folio))
++		return PTR_ERR(folio);
+ 
+-	if (!PageUptodate(page))
+-		udf_adinicb_readpage(page);
++	if (!folio_test_uptodate(folio))
++		udf_adinicb_readpage(&folio->page);
+ 	down_write(&iinfo->i_data_sem);
+ 	memset(iinfo->i_data + iinfo->i_lenEAttr, 0x00,
+ 	       iinfo->i_lenAlloc);
+@@ -371,22 +372,22 @@ int udf_expand_file_adinicb(struct inode *inode)
+ 		iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
+ 	else
+ 		iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
+-	set_page_dirty(page);
+-	unlock_page(page);
++	folio_mark_dirty(folio);
++	folio_unlock(folio);
+ 	up_write(&iinfo->i_data_sem);
+ 	err = filemap_fdatawrite(inode->i_mapping);
+ 	if (err) {
+ 		/* Restore everything back so that we don't lose data... */
+-		lock_page(page);
++		folio_lock(folio);
+ 		down_write(&iinfo->i_data_sem);
+-		memcpy_to_page(page, 0, iinfo->i_data + iinfo->i_lenEAttr,
+-			       inode->i_size);
+-		unlock_page(page);
++		memcpy_from_folio(iinfo->i_data + iinfo->i_lenEAttr,
++				folio, 0, inode->i_size);
++		folio_unlock(folio);
+ 		iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
+ 		iinfo->i_lenAlloc = inode->i_size;
+ 		up_write(&iinfo->i_data_sem);
+ 	}
+-	put_page(page);
++	folio_put(folio);
+ 	mark_inode_dirty(inode);
+ 
+ 	return err;
+diff --git a/include/linux/counter.h b/include/linux/counter.h
+index 702e9108bbb44..b767b5c821f58 100644
+--- a/include/linux/counter.h
++++ b/include/linux/counter.h
+@@ -359,7 +359,6 @@ struct counter_ops {
+  * @num_counts:		number of Counts specified in @counts
+  * @ext:		optional array of Counter device extensions
+  * @num_ext:		number of Counter device extensions specified in @ext
+- * @priv:		optional private data supplied by driver
+  * @dev:		internal device structure
+  * @chrdev:		internal character device structure
+  * @events_list:	list of current watching Counter events
+diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
+index 297231854ada5..e44913a8200fd 100644
+--- a/include/linux/etherdevice.h
++++ b/include/linux/etherdevice.h
+@@ -632,6 +632,14 @@ static inline void eth_skb_pkt_type(struct sk_buff *skb,
+ 	}
+ }
+ 
++static inline struct ethhdr *eth_skb_pull_mac(struct sk_buff *skb)
++{
++	struct ethhdr *eth = (struct ethhdr *)skb->data;
++
++	skb_pull_inline(skb, ETH_HLEN);
++	return eth;
++}
++
+ /**
+  * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
+  * @skb: Buffer to pad
+diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
+index 6eaa190d0083c..9754f97e71e52 100644
+--- a/include/linux/fortify-string.h
++++ b/include/linux/fortify-string.h
+@@ -71,17 +71,30 @@ void __write_overflow_field(size_t avail, size_t wanted) __compiletime_warning("
+ 	__ret;							\
+ })
+ 
+-#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
++#if defined(__SANITIZE_ADDRESS__)
++
++#if !defined(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX) && !defined(CONFIG_GENERIC_ENTRY)
++extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset);
++extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove);
++extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
++#elif defined(CONFIG_KASAN_GENERIC)
++extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(__asan_memset);
++extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(__asan_memmove);
++extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(__asan_memcpy);
++#else /* CONFIG_KASAN_SW_TAGS */
++extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(__hwasan_memset);
++extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(__hwasan_memmove);
++extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(__hwasan_memcpy);
++#endif
++
+ extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr);
+ extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp);
+-extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
+-extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove);
+-extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset);
+ extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat);
+ extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy);
+ extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen);
+ extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat);
+ extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy);
++
+ #else
+ 
+ #if defined(__SANITIZE_MEMORY__)
+@@ -106,6 +119,7 @@ extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size)
+ #define __underlying_strlen	__builtin_strlen
+ #define __underlying_strncat	__builtin_strncat
+ #define __underlying_strncpy	__builtin_strncpy
++
+ #endif
+ 
+ /**
+diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h
+index 223da48a6d18b..94c4edd047e54 100644
+--- a/include/linux/fpga/fpga-bridge.h
++++ b/include/linux/fpga/fpga-bridge.h
+@@ -45,6 +45,7 @@ struct fpga_bridge_info {
+  * @dev: FPGA bridge device
+  * @mutex: enforces exclusive reference to bridge
+  * @br_ops: pointer to struct of FPGA bridge ops
++ * @br_ops_owner: module containing the br_ops
+  * @info: fpga image specific information
+  * @node: FPGA bridge list node
+  * @priv: low level driver private date
+@@ -54,6 +55,7 @@ struct fpga_bridge {
+ 	struct device dev;
+ 	struct mutex mutex; /* for exclusive reference to bridge */
+ 	const struct fpga_bridge_ops *br_ops;
++	struct module *br_ops_owner;
+ 	struct fpga_image_info *info;
+ 	struct list_head node;
+ 	void *priv;
+@@ -79,10 +81,12 @@ int of_fpga_bridge_get_to_list(struct device_node *np,
+ 			       struct fpga_image_info *info,
+ 			       struct list_head *bridge_list);
+ 
++#define fpga_bridge_register(parent, name, br_ops, priv) \
++	__fpga_bridge_register(parent, name, br_ops, priv, THIS_MODULE)
+ struct fpga_bridge *
+-fpga_bridge_register(struct device *parent, const char *name,
+-		     const struct fpga_bridge_ops *br_ops,
+-		     void *priv);
++__fpga_bridge_register(struct device *parent, const char *name,
++		       const struct fpga_bridge_ops *br_ops, void *priv,
++		       struct module *owner);
+ void fpga_bridge_unregister(struct fpga_bridge *br);
+ 
+ #endif /* _LINUX_FPGA_BRIDGE_H */
+diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
+index 54f63459efd6e..0d4fe068f3d8a 100644
+--- a/include/linux/fpga/fpga-mgr.h
++++ b/include/linux/fpga/fpga-mgr.h
+@@ -201,6 +201,7 @@ struct fpga_manager_ops {
+  * @state: state of fpga manager
+  * @compat_id: FPGA manager id for compatibility check.
+  * @mops: pointer to struct of fpga manager ops
++ * @mops_owner: module containing the mops
+  * @priv: low level driver private date
+  */
+ struct fpga_manager {
+@@ -210,6 +211,7 @@ struct fpga_manager {
+ 	enum fpga_mgr_states state;
+ 	struct fpga_compat_id *compat_id;
+ 	const struct fpga_manager_ops *mops;
++	struct module *mops_owner;
+ 	void *priv;
+ };
+ 
+@@ -230,18 +232,30 @@ struct fpga_manager *fpga_mgr_get(struct device *dev);
+ 
+ void fpga_mgr_put(struct fpga_manager *mgr);
+ 
++#define fpga_mgr_register_full(parent, info) \
++	__fpga_mgr_register_full(parent, info, THIS_MODULE)
+ struct fpga_manager *
+-fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info);
++__fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info,
++			 struct module *owner);
+ 
++#define fpga_mgr_register(parent, name, mops, priv) \
++	__fpga_mgr_register(parent, name, mops, priv, THIS_MODULE)
+ struct fpga_manager *
+-fpga_mgr_register(struct device *parent, const char *name,
+-		  const struct fpga_manager_ops *mops, void *priv);
++__fpga_mgr_register(struct device *parent, const char *name,
++		    const struct fpga_manager_ops *mops, void *priv, struct module *owner);
++
+ void fpga_mgr_unregister(struct fpga_manager *mgr);
+ 
++#define devm_fpga_mgr_register_full(parent, info) \
++	__devm_fpga_mgr_register_full(parent, info, THIS_MODULE)
+ struct fpga_manager *
+-devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info);
++__devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info,
++			      struct module *owner);
++#define devm_fpga_mgr_register(parent, name, mops, priv) \
++	__devm_fpga_mgr_register(parent, name, mops, priv, THIS_MODULE)
+ struct fpga_manager *
+-devm_fpga_mgr_register(struct device *parent, const char *name,
+-		       const struct fpga_manager_ops *mops, void *priv);
++__devm_fpga_mgr_register(struct device *parent, const char *name,
++			 const struct fpga_manager_ops *mops, void *priv,
++			 struct module *owner);
+ 
+ #endif /*_LINUX_FPGA_MGR_H */
+diff --git a/include/linux/fpga/fpga-region.h b/include/linux/fpga/fpga-region.h
+index 9d4d32909340a..5fbc05fe70a6b 100644
+--- a/include/linux/fpga/fpga-region.h
++++ b/include/linux/fpga/fpga-region.h
+@@ -36,6 +36,7 @@ struct fpga_region_info {
+  * @mgr: FPGA manager
+  * @info: FPGA image info
+  * @compat_id: FPGA region id for compatibility check.
++ * @ops_owner: module containing the get_bridges function
+  * @priv: private data
+  * @get_bridges: optional function to get bridges to a list
+  */
+@@ -46,6 +47,7 @@ struct fpga_region {
+ 	struct fpga_manager *mgr;
+ 	struct fpga_image_info *info;
+ 	struct fpga_compat_id *compat_id;
++	struct module *ops_owner;
+ 	void *priv;
+ 	int (*get_bridges)(struct fpga_region *region);
+ };
+@@ -58,12 +60,17 @@ fpga_region_class_find(struct device *start, const void *data,
+ 
+ int fpga_region_program_fpga(struct fpga_region *region);
+ 
++#define fpga_region_register_full(parent, info) \
++	__fpga_region_register_full(parent, info, THIS_MODULE)
+ struct fpga_region *
+-fpga_region_register_full(struct device *parent, const struct fpga_region_info *info);
++__fpga_region_register_full(struct device *parent, const struct fpga_region_info *info,
++			    struct module *owner);
+ 
++#define fpga_region_register(parent, mgr, get_bridges) \
++	__fpga_region_register(parent, mgr, get_bridges, THIS_MODULE)
+ struct fpga_region *
+-fpga_region_register(struct device *parent, struct fpga_manager *mgr,
+-		     int (*get_bridges)(struct fpga_region *));
++__fpga_region_register(struct device *parent, struct fpga_manager *mgr,
++		       int (*get_bridges)(struct fpga_region *), struct module *owner);
+ void fpga_region_unregister(struct fpga_region *region);
+ 
+ #endif /* _FPGA_REGION_H */
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index c940b329a475f..5e5a9c6774bde 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -10267,9 +10267,9 @@ struct mlx5_ifc_mcam_access_reg_bits {
+ 	u8         mfrl[0x1];
+ 	u8         regs_39_to_32[0x8];
+ 
+-	u8         regs_31_to_10[0x16];
++	u8         regs_31_to_11[0x15];
+ 	u8         mtmp[0x1];
+-	u8         regs_8_to_0[0x9];
++	u8         regs_9_to_0[0xa];
+ };
+ 
+ struct mlx5_ifc_mcam_access_reg_bits1 {
+diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
+index 22a07c0900a41..f230a472ccd35 100644
+--- a/include/linux/regulator/driver.h
++++ b/include/linux/regulator/driver.h
+@@ -299,6 +299,8 @@ enum regulator_type {
+  * @vsel_range_reg: Register for range selector when using pickable ranges
+  *		    and ``regulator_map_*_voltage_*_pickable`` functions.
+  * @vsel_range_mask: Mask for register bitfield used for range selector
++ * @range_applied_by_vsel: A flag to indicate that changes to vsel_range_reg
++ *			   are only effective after vsel_reg is written
+  * @vsel_reg: Register for selector when using ``regulator_map_*_voltage_*``
+  * @vsel_mask: Mask for register bitfield used for selector
+  * @vsel_step: Specify the resolution of selector stepping when setting
+@@ -389,6 +391,7 @@ struct regulator_desc {
+ 
+ 	unsigned int vsel_range_reg;
+ 	unsigned int vsel_range_mask;
++	bool range_applied_by_vsel;
+ 	unsigned int vsel_reg;
+ 	unsigned int vsel_mask;
+ 	unsigned int vsel_step;
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 4ff48eda3f642..5b1078c160f27 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1174,15 +1174,6 @@ static inline bool skb_dst_is_noref(const struct sk_buff *skb)
+ 	return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
+ }
+ 
+-/**
+- * skb_rtable - Returns the skb &rtable
+- * @skb: buffer
+- */
+-static inline struct rtable *skb_rtable(const struct sk_buff *skb)
+-{
+-	return (struct rtable *)skb_dst(skb);
+-}
+-
+ /* For mangling skb->pkt_type from user space side from applications
+  * such as nft, tc, etc, we only allow a conservative subset of
+  * possible pkt_types to be set.
+diff --git a/include/media/cec.h b/include/media/cec.h
+index 10c9cf6058b7e..cc3fcd0496c36 100644
+--- a/include/media/cec.h
++++ b/include/media/cec.h
+@@ -258,6 +258,7 @@ struct cec_adapter {
+ 	u16 phys_addr;
+ 	bool needs_hpd;
+ 	bool is_enabled;
++	bool is_claiming_log_addrs;
+ 	bool is_configuring;
+ 	bool must_reconfigure;
+ 	bool is_configured;
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index 5277c6d5134ca..970101184cf10 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -1335,8 +1335,7 @@ hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle)
+ 	rcu_read_lock();
+ 
+ 	list_for_each_entry_rcu(c, &h->list, list) {
+-		if (c->type != ISO_LINK ||
+-			!test_bit(HCI_CONN_PA_SYNC, &c->flags))
++		if (c->type != ISO_LINK)
+ 			continue;
+ 
+ 		if (c->sync_handle == sync_handle) {
+diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
+index 6d1c8541183db..3a9001a042a5c 100644
+--- a/include/net/dst_ops.h
++++ b/include/net/dst_ops.h
+@@ -24,7 +24,7 @@ struct dst_ops {
+ 	void			(*destroy)(struct dst_entry *);
+ 	void			(*ifdown)(struct dst_entry *,
+ 					  struct net_device *dev);
+-	struct dst_entry *	(*negative_advice)(struct dst_entry *);
++	void			(*negative_advice)(struct sock *sk, struct dst_entry *);
+ 	void			(*link_failure)(struct sk_buff *);
+ 	void			(*update_pmtu)(struct dst_entry *dst, struct sock *sk,
+ 					       struct sk_buff *skb, u32 mtu,
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 25cb688bdc623..6d735e00d3f3e 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -423,7 +423,7 @@ int ip_decrease_ttl(struct iphdr *iph)
+ 
+ static inline int ip_mtu_locked(const struct dst_entry *dst)
+ {
+-	const struct rtable *rt = (const struct rtable *)dst;
++	const struct rtable *rt = dst_rtable(dst);
+ 
+ 	return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
+ }
+@@ -461,7 +461,7 @@ static inline bool ip_sk_ignore_df(const struct sock *sk)
+ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
+ 						    bool forwarding)
+ {
+-	const struct rtable *rt = container_of(dst, struct rtable, dst);
++	const struct rtable *rt = dst_rtable(dst);
+ 	struct net *net = dev_net(dst->dev);
+ 	unsigned int mtu;
+ 
+diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
+index 323c94f1845b9..73524fa0c064b 100644
+--- a/include/net/ip6_fib.h
++++ b/include/net/ip6_fib.h
+@@ -234,9 +234,11 @@ struct fib6_result {
+ 	for (rt = (w)->leaf; rt;					\
+ 	     rt = rcu_dereference_protected(rt->fib6_next, 1))
+ 
+-static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
++#define dst_rt6_info(_ptr) container_of_const(_ptr, struct rt6_info, dst)
++
++static inline struct inet6_dev *ip6_dst_idev(const struct dst_entry *dst)
+ {
+-	return ((struct rt6_info *)dst)->rt6i_idev;
++	return dst_rt6_info(dst)->rt6i_idev;
+ }
+ 
+ static inline bool fib6_requires_src(const struct fib6_info *rt)
+diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
+index a30c6aa9e5cf3..a18ed24fed948 100644
+--- a/include/net/ip6_route.h
++++ b/include/net/ip6_route.h
+@@ -210,12 +210,11 @@ void rt6_uncached_list_del(struct rt6_info *rt);
+ static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb)
+ {
+ 	const struct dst_entry *dst = skb_dst(skb);
+-	const struct rt6_info *rt6 = NULL;
+ 
+ 	if (dst)
+-		rt6 = container_of(dst, struct rt6_info, dst);
++		return dst_rt6_info(dst);
+ 
+-	return rt6;
++	return NULL;
+ }
+ 
+ /*
+@@ -227,7 +226,7 @@ static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
+ {
+ 	struct ipv6_pinfo *np = inet6_sk(sk);
+ 
+-	np->dst_cookie = rt6_get_cookie((struct rt6_info *)dst);
++	np->dst_cookie = rt6_get_cookie(dst_rt6_info(dst));
+ 	sk_setup_caps(sk, dst);
+ 	np->daddr_cache = daddr;
+ #ifdef CONFIG_IPV6_SUBTREES
+@@ -240,7 +239,7 @@ void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
+ 
+ static inline bool ipv6_unicast_destination(const struct sk_buff *skb)
+ {
+-	struct rt6_info *rt = (struct rt6_info *) skb_dst(skb);
++	const struct rt6_info *rt = dst_rt6_info(skb_dst(skb));
+ 
+ 	return rt->rt6i_flags & RTF_LOCAL;
+ }
+@@ -248,7 +247,7 @@ static inline bool ipv6_unicast_destination(const struct sk_buff *skb)
+ static inline bool ipv6_anycast_destination(const struct dst_entry *dst,
+ 					    const struct in6_addr *daddr)
+ {
+-	struct rt6_info *rt = (struct rt6_info *)dst;
++	const struct rt6_info *rt = dst_rt6_info(dst);
+ 
+ 	return rt->rt6i_flags & RTF_ANYCAST ||
+ 		(rt->rt6i_dst.plen < 127 &&
+diff --git a/include/net/route.h b/include/net/route.h
+index d4a0147942f1a..af55401aa8f40 100644
+--- a/include/net/route.h
++++ b/include/net/route.h
+@@ -77,6 +77,17 @@ struct rtable {
+ 				rt_pmtu:31;
+ };
+ 
++#define dst_rtable(_ptr) container_of_const(_ptr, struct rtable, dst)
++
++/**
++ * skb_rtable - Returns the skb &rtable
++ * @skb: buffer
++ */
++static inline struct rtable *skb_rtable(const struct sk_buff *skb)
++{
++	return dst_rtable(skb_dst(skb));
++}
++
+ static inline bool rt_is_input_route(const struct rtable *rt)
+ {
+ 	return rt->rt_is_input != 0;
+diff --git a/include/net/sock.h b/include/net/sock.h
+index b4b553df7870c..944f71a8ab223 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -2134,17 +2134,10 @@ sk_dst_get(const struct sock *sk)
+ 
+ static inline void __dst_negative_advice(struct sock *sk)
+ {
+-	struct dst_entry *ndst, *dst = __sk_dst_get(sk);
++	struct dst_entry *dst = __sk_dst_get(sk);
+ 
+-	if (dst && dst->ops->negative_advice) {
+-		ndst = dst->ops->negative_advice(dst);
+-
+-		if (ndst != dst) {
+-			rcu_assign_pointer(sk->sk_dst_cache, ndst);
+-			sk_tx_queue_clear(sk);
+-			WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
+-		}
+-	}
++	if (dst && dst->ops->negative_advice)
++		dst->ops->negative_advice(sk, dst);
+ }
+ 
+ static inline void dst_negative_advice(struct sock *sk)
+diff --git a/include/sound/tas2781-dsp.h b/include/sound/tas2781-dsp.h
+index ea9af2726a53f..7fba7ea26a4b0 100644
+--- a/include/sound/tas2781-dsp.h
++++ b/include/sound/tas2781-dsp.h
+@@ -2,7 +2,7 @@
+ //
+ // ALSA SoC Texas Instruments TAS2781 Audio Smart Amplifier
+ //
+-// Copyright (C) 2022 - 2023 Texas Instruments Incorporated
++// Copyright (C) 2022 - 2024 Texas Instruments Incorporated
+ // https://www.ti.com
+ //
+ // The TAS2781 driver implements a flexible and configurable
+@@ -13,8 +13,8 @@
+ // Author: Kevin Lu <kevin-lu@ti.com>
+ //
+ 
+-#ifndef __TASDEVICE_DSP_H__
+-#define __TASDEVICE_DSP_H__
++#ifndef __TAS2781_DSP_H__
++#define __TAS2781_DSP_H__
+ 
+ #define MAIN_ALL_DEVICES			0x0d
+ #define MAIN_DEVICE_A				0x01
+@@ -180,7 +180,6 @@ void tasdevice_calbin_remove(void *context);
+ int tasdevice_select_tuningprm_cfg(void *context, int prm,
+ 	int cfg_no, int rca_conf_no);
+ int tasdevice_prmg_load(void *context, int prm_no);
+-int tasdevice_prmg_calibdata_load(void *context, int prm_no);
+ void tasdevice_tuning_switch(void *context, int state);
+ int tas2781_load_calibration(void *context, char *file_name,
+ 	unsigned short i);
+diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
+index cd84227f1b42f..5402f77ee8594 100644
+--- a/include/uapi/drm/nouveau_drm.h
++++ b/include/uapi/drm/nouveau_drm.h
+@@ -68,6 +68,13 @@ extern "C" {
+  */
+ #define NOUVEAU_GETPARAM_VRAM_USED 19
+ 
++/*
++ * NOUVEAU_GETPARAM_HAS_VMA_TILEMODE
++ *
++ * Query whether tile mode and PTE kind are accepted with VM allocs or not.
++ */
++#define NOUVEAU_GETPARAM_HAS_VMA_TILEMODE 20
++
+ struct drm_nouveau_getparam {
+ 	__u64 param;
+ 	__u64 value;
+diff --git a/init/Kconfig b/init/Kconfig
+index 664bedb9a71fb..459f44ef7cc94 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -743,8 +743,8 @@ config LOG_CPU_MAX_BUF_SHIFT
+ 	int "CPU kernel log buffer size contribution (13 => 8 KB, 17 => 128KB)"
+ 	depends on SMP
+ 	range 0 21
+-	default 12 if !BASE_SMALL
+-	default 0 if BASE_SMALL
++	default 0 if BASE_SMALL != 0
++	default 12
+ 	depends on PRINTK
+ 	help
+ 	  This option allows to increase the default ring buffer size
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 2c90b1eb12e2c..8a29309db4245 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -8845,7 +8845,8 @@ static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
+ 	enum bpf_attach_type eatype = env->prog->expected_attach_type;
+ 	enum bpf_prog_type type = resolve_prog_type(env->prog);
+ 
+-	if (func_id != BPF_FUNC_map_update_elem)
++	if (func_id != BPF_FUNC_map_update_elem &&
++	    func_id != BPF_FUNC_map_delete_elem)
+ 		return false;
+ 
+ 	/* It's not possible to get access to a locked struct sock in these
+@@ -8856,6 +8857,11 @@ static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
+ 		if (eatype == BPF_TRACE_ITER)
+ 			return true;
+ 		break;
++	case BPF_PROG_TYPE_SOCK_OPS:
++		/* map_update allowed only via dedicated helpers with event type checks */
++		if (func_id == BPF_FUNC_map_delete_elem)
++			return true;
++		break;
+ 	case BPF_PROG_TYPE_SOCKET_FILTER:
+ 	case BPF_PROG_TYPE_SCHED_CLS:
+ 	case BPF_PROG_TYPE_SCHED_ACT:
+@@ -8951,7 +8957,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
+ 	case BPF_MAP_TYPE_SOCKMAP:
+ 		if (func_id != BPF_FUNC_sk_redirect_map &&
+ 		    func_id != BPF_FUNC_sock_map_update &&
+-		    func_id != BPF_FUNC_map_delete_elem &&
+ 		    func_id != BPF_FUNC_msg_redirect_map &&
+ 		    func_id != BPF_FUNC_sk_select_reuseport &&
+ 		    func_id != BPF_FUNC_map_lookup_elem &&
+@@ -8961,7 +8966,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
+ 	case BPF_MAP_TYPE_SOCKHASH:
+ 		if (func_id != BPF_FUNC_sk_redirect_hash &&
+ 		    func_id != BPF_FUNC_sock_hash_update &&
+-		    func_id != BPF_FUNC_map_delete_elem &&
+ 		    func_id != BPF_FUNC_msg_redirect_hash &&
+ 		    func_id != BPF_FUNC_sk_select_reuseport &&
+ 		    func_id != BPF_FUNC_map_lookup_elem &&
+diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c
+index 02205ab53b7e9..f7f3d14fa69a7 100644
+--- a/kernel/dma/map_benchmark.c
++++ b/kernel/dma/map_benchmark.c
+@@ -101,7 +101,6 @@ static int do_map_benchmark(struct map_benchmark_data *map)
+ 	struct task_struct **tsk;
+ 	int threads = map->bparam.threads;
+ 	int node = map->bparam.node;
+-	const cpumask_t *cpu_mask = cpumask_of_node(node);
+ 	u64 loops;
+ 	int ret = 0;
+ 	int i;
+@@ -118,11 +117,13 @@ static int do_map_benchmark(struct map_benchmark_data *map)
+ 		if (IS_ERR(tsk[i])) {
+ 			pr_err("create dma_map thread failed\n");
+ 			ret = PTR_ERR(tsk[i]);
++			while (--i >= 0)
++				kthread_stop(tsk[i]);
+ 			goto out;
+ 		}
+ 
+ 		if (node != NUMA_NO_NODE)
+-			kthread_bind_mask(tsk[i], cpu_mask);
++			kthread_bind_mask(tsk[i], cpumask_of_node(node));
+ 	}
+ 
+ 	/* clear the old value in the previous benchmark */
+@@ -139,13 +140,17 @@ static int do_map_benchmark(struct map_benchmark_data *map)
+ 
+ 	msleep_interruptible(map->bparam.seconds * 1000);
+ 
+-	/* wait for the completion of benchmark threads */
++	/* wait for the completion of all started benchmark threads */
+ 	for (i = 0; i < threads; i++) {
+-		ret = kthread_stop(tsk[i]);
+-		if (ret)
+-			goto out;
++		int kthread_ret = kthread_stop_put(tsk[i]);
++
++		if (kthread_ret)
++			ret = kthread_ret;
+ 	}
+ 
++	if (ret)
++		goto out;
++
+ 	loops = atomic64_read(&map->loops);
+ 	if (likely(loops > 0)) {
+ 		u64 map_variance, unmap_variance;
+@@ -170,8 +175,6 @@ static int do_map_benchmark(struct map_benchmark_data *map)
+ 	}
+ 
+ out:
+-	for (i = 0; i < threads; i++)
+-		put_task_struct(tsk[i]);
+ 	put_device(map->dev);
+ 	kfree(tsk);
+ 	return ret;
+@@ -208,7 +211,8 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
+ 		}
+ 
+ 		if (map->bparam.node != NUMA_NO_NODE &&
+-		    !node_possible(map->bparam.node)) {
++		    (map->bparam.node < 0 || map->bparam.node >= MAX_NUMNODES ||
++		     !node_possible(map->bparam.node))) {
+ 			pr_err("invalid numa node\n");
+ 			return -EINVAL;
+ 		}
+diff --git a/kernel/gen_kheaders.sh b/kernel/gen_kheaders.sh
+index 6d443ea22bb73..4ba5fd3d73ae2 100755
+--- a/kernel/gen_kheaders.sh
++++ b/kernel/gen_kheaders.sh
+@@ -14,7 +14,12 @@ include/
+ arch/$SRCARCH/include/
+ "
+ 
+-type cpio > /dev/null
++if ! command -v cpio >/dev/null; then
++	echo >&2 "***"
++	echo >&2 "*** 'cpio' could not be found."
++	echo >&2 "***"
++	exit 1
++fi
+ 
+ # Support incremental builds by skipping archive generation
+ # if timestamps of files being archived are not changed.
+diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
+index 1ed2b1739363b..5ecd072a34fe7 100644
+--- a/kernel/irq/cpuhotplug.c
++++ b/kernel/irq/cpuhotplug.c
+@@ -69,6 +69,14 @@ static bool migrate_one_irq(struct irq_desc *desc)
+ 		return false;
+ 	}
+ 
++	/*
++	 * Complete an eventually pending irq move cleanup. If this
++	 * interrupt was moved in hard irq context, then the vectors need
++	 * to be cleaned up. It can't wait until this interrupt actually
++	 * happens and this CPU was involved.
++	 */
++	irq_force_complete_move(desc);
++
+ 	/*
+ 	 * No move required, if:
+ 	 * - Interrupt is per cpu
+@@ -87,14 +95,6 @@ static bool migrate_one_irq(struct irq_desc *desc)
+ 		return false;
+ 	}
+ 
+-	/*
+-	 * Complete an eventually pending irq move cleanup. If this
+-	 * interrupt was moved in hard irq context, then the vectors need
+-	 * to be cleaned up. It can't wait until this interrupt actually
+-	 * happens and this CPU was involved.
+-	 */
+-	irq_force_complete_move(desc);
+-
+ 	/*
+ 	 * If there is a setaffinity pending, then try to reuse the pending
+ 	 * mask, so the last change of the affinity does not get lost. If
+diff --git a/kernel/trace/rv/rv.c b/kernel/trace/rv/rv.c
+index 2f68e93fff0bc..df0745a42a3f3 100644
+--- a/kernel/trace/rv/rv.c
++++ b/kernel/trace/rv/rv.c
+@@ -245,6 +245,7 @@ static int __rv_disable_monitor(struct rv_monitor_def *mdef, bool sync)
+ 
+ /**
+  * rv_disable_monitor - disable a given runtime monitor
++ * @mdef: Pointer to the monitor definition structure.
+  *
+  * Returns 0 on success.
+  */
+@@ -256,6 +257,7 @@ int rv_disable_monitor(struct rv_monitor_def *mdef)
+ 
+ /**
+  * rv_enable_monitor - enable a given runtime monitor
++ * @mdef: Pointer to the monitor definition structure.
+  *
+  * Returns 0 on success, error otherwise.
+  */
+diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
+index 42bc0f3622263..1a7e7cf944938 100644
+--- a/kernel/trace/trace_probe.c
++++ b/kernel/trace/trace_probe.c
+@@ -553,6 +553,10 @@ static int parse_btf_field(char *fieldname, const struct btf_type *type,
+ 			anon_offs = 0;
+ 			field = btf_find_struct_member(ctx->btf, type, fieldname,
+ 						       &anon_offs);
++			if (IS_ERR(field)) {
++				trace_probe_log_err(ctx->offset, BAD_BTF_TID);
++				return PTR_ERR(field);
++			}
+ 			if (!field) {
+ 				trace_probe_log_err(ctx->offset, NO_BTF_FIELD);
+ 				return -ENOENT;
+diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
+index e81e1ac4a919b..bdda600f8dfbe 100644
+--- a/lib/Kconfig.ubsan
++++ b/lib/Kconfig.ubsan
+@@ -4,6 +4,7 @@ config ARCH_HAS_UBSAN
+ 
+ menuconfig UBSAN
+ 	bool "Undefined behaviour sanity checker"
++	depends on ARCH_HAS_UBSAN
+ 	help
+ 	  This option enables the Undefined Behaviour sanity checker.
+ 	  Compile-time instrumentation is used to detect various undefined
+diff --git a/lib/strcat_kunit.c b/lib/strcat_kunit.c
+index e21be95514afa..ca09f7f0e6a26 100644
+--- a/lib/strcat_kunit.c
++++ b/lib/strcat_kunit.c
+@@ -10,7 +10,7 @@
+ 
+ static volatile int unconst;
+ 
+-static void strcat_test(struct kunit *test)
++static void test_strcat(struct kunit *test)
+ {
+ 	char dest[8];
+ 
+@@ -29,7 +29,7 @@ static void strcat_test(struct kunit *test)
+ 	KUNIT_EXPECT_STREQ(test, dest, "fourAB");
+ }
+ 
+-static void strncat_test(struct kunit *test)
++static void test_strncat(struct kunit *test)
+ {
+ 	char dest[8];
+ 
+@@ -56,7 +56,7 @@ static void strncat_test(struct kunit *test)
+ 	KUNIT_EXPECT_STREQ(test, dest, "fourAB");
+ }
+ 
+-static void strlcat_test(struct kunit *test)
++static void test_strlcat(struct kunit *test)
+ {
+ 	char dest[8] = "";
+ 	int len = sizeof(dest) + unconst;
+@@ -88,9 +88,9 @@ static void strlcat_test(struct kunit *test)
+ }
+ 
+ static struct kunit_case strcat_test_cases[] = {
+-	KUNIT_CASE(strcat_test),
+-	KUNIT_CASE(strncat_test),
+-	KUNIT_CASE(strlcat_test),
++	KUNIT_CASE(test_strcat),
++	KUNIT_CASE(test_strncat),
++	KUNIT_CASE(test_strlcat),
+ 	{}
+ };
+ 
+diff --git a/lib/string_kunit.c b/lib/string_kunit.c
+index eabf025cf77c9..dd19bd7748aa2 100644
+--- a/lib/string_kunit.c
++++ b/lib/string_kunit.c
+@@ -11,6 +11,12 @@
+ #include <linux/slab.h>
+ #include <linux/string.h>
+ 
++#define STRCMP_LARGE_BUF_LEN 2048
++#define STRCMP_CHANGE_POINT 1337
++#define STRCMP_TEST_EXPECT_EQUAL(test, fn, ...) KUNIT_EXPECT_EQ(test, fn(__VA_ARGS__), 0)
++#define STRCMP_TEST_EXPECT_LOWER(test, fn, ...) KUNIT_EXPECT_LT(test, fn(__VA_ARGS__), 0)
++#define STRCMP_TEST_EXPECT_GREATER(test, fn, ...) KUNIT_EXPECT_GT(test, fn(__VA_ARGS__), 0)
++
+ static void test_memset16(struct kunit *test)
+ {
+ 	unsigned i, j, k;
+@@ -179,6 +185,147 @@ static void test_strspn(struct kunit *test)
+ 	}
+ }
+ 
++static char strcmp_buffer1[STRCMP_LARGE_BUF_LEN];
++static char strcmp_buffer2[STRCMP_LARGE_BUF_LEN];
++
++static void strcmp_fill_buffers(char fill1, char fill2)
++{
++	memset(strcmp_buffer1, fill1, STRCMP_LARGE_BUF_LEN);
++	memset(strcmp_buffer2, fill2, STRCMP_LARGE_BUF_LEN);
++	strcmp_buffer1[STRCMP_LARGE_BUF_LEN - 1] = 0;
++	strcmp_buffer2[STRCMP_LARGE_BUF_LEN - 1] = 0;
++}
++
++static void test_strcmp(struct kunit *test)
++{
++	/* Equal strings */
++	STRCMP_TEST_EXPECT_EQUAL(test, strcmp, "Hello, Kernel!", "Hello, Kernel!");
++	/* First string is lexicographically less than the second */
++	STRCMP_TEST_EXPECT_LOWER(test, strcmp, "Hello, KUnit!", "Hello, Kernel!");
++	/* First string is lexicographically larger than the second */
++	STRCMP_TEST_EXPECT_GREATER(test, strcmp, "Hello, Kernel!", "Hello, KUnit!");
++	/* Empty string is always lexicographically less than any non-empty string */
++	STRCMP_TEST_EXPECT_LOWER(test, strcmp, "", "Non-empty string");
++	/* Two empty strings should be equal */
++	STRCMP_TEST_EXPECT_EQUAL(test, strcmp, "", "");
++	/* Compare two strings which have only one char difference */
++	STRCMP_TEST_EXPECT_LOWER(test, strcmp, "Abacaba", "Abadaba");
++	/* Compare two strings which have the same prefix*/
++	STRCMP_TEST_EXPECT_LOWER(test, strcmp, "Just a string", "Just a string and something else");
++}
++
++static void test_strcmp_long_strings(struct kunit *test)
++{
++	strcmp_fill_buffers('B', 'B');
++	STRCMP_TEST_EXPECT_EQUAL(test, strcmp, strcmp_buffer1, strcmp_buffer2);
++
++	strcmp_buffer1[STRCMP_CHANGE_POINT] = 'A';
++	STRCMP_TEST_EXPECT_LOWER(test, strcmp, strcmp_buffer1, strcmp_buffer2);
++
++	strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C';
++	STRCMP_TEST_EXPECT_GREATER(test, strcmp, strcmp_buffer1, strcmp_buffer2);
++}
++
++static void test_strncmp(struct kunit *test)
++{
++	/* Equal strings */
++	STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Hello, KUnit!", "Hello, KUnit!", 13);
++	/* First string is lexicographically less than the second */
++	STRCMP_TEST_EXPECT_LOWER(test, strncmp, "Hello, KUnit!", "Hello, Kernel!", 13);
++	/* Result is always 'equal' when count = 0 */
++	STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Hello, Kernel!", "Hello, KUnit!", 0);
++	/* Strings with common prefix are equal if count = length of prefix */
++	STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Abacaba", "Abadaba", 3);
++	/* Strings with common prefix are not equal when count = length of prefix + 1 */
++	STRCMP_TEST_EXPECT_LOWER(test, strncmp, "Abacaba", "Abadaba", 4);
++	/* If one string is a prefix of another, the shorter string is lexicographically smaller */
++	STRCMP_TEST_EXPECT_LOWER(test, strncmp, "Just a string", "Just a string and something else",
++				 strlen("Just a string and something else"));
++	/*
++	 * If one string is a prefix of another, and we check first length
++	 * of prefix chars, the result is 'equal'
++	 */
++	STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Just a string", "Just a string and something else",
++				 strlen("Just a string"));
++}
++
++static void test_strncmp_long_strings(struct kunit *test)
++{
++	strcmp_fill_buffers('B', 'B');
++	STRCMP_TEST_EXPECT_EQUAL(test, strncmp, strcmp_buffer1,
++				 strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
++
++	strcmp_buffer1[STRCMP_CHANGE_POINT] = 'A';
++	STRCMP_TEST_EXPECT_LOWER(test, strncmp, strcmp_buffer1,
++				 strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
++
++	strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C';
++	STRCMP_TEST_EXPECT_GREATER(test, strncmp, strcmp_buffer1,
++				   strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
++	/* the strings are equal up to STRCMP_CHANGE_POINT */
++	STRCMP_TEST_EXPECT_EQUAL(test, strncmp, strcmp_buffer1,
++				 strcmp_buffer2, STRCMP_CHANGE_POINT);
++	STRCMP_TEST_EXPECT_GREATER(test, strncmp, strcmp_buffer1,
++				   strcmp_buffer2, STRCMP_CHANGE_POINT + 1);
++}
++
++static void test_strcasecmp(struct kunit *test)
++{
++	/* Same strings in different case should be equal */
++	STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, "Hello, Kernel!", "HeLLO, KErNeL!");
++	/* Empty strings should be equal */
++	STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, "", "");
++	/* Despite ascii code for 'a' is larger than ascii code for 'B', 'a' < 'B' */
++	STRCMP_TEST_EXPECT_LOWER(test, strcasecmp, "a", "B");
++	STRCMP_TEST_EXPECT_GREATER(test, strcasecmp, "B", "a");
++	/* Special symbols and numbers should be processed correctly */
++	STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, "-+**.1230ghTTT~^", "-+**.1230Ghttt~^");
++}
++
++static void test_strcasecmp_long_strings(struct kunit *test)
++{
++	strcmp_fill_buffers('b', 'B');
++	STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, strcmp_buffer1, strcmp_buffer2);
++
++	strcmp_buffer1[STRCMP_CHANGE_POINT] = 'a';
++	STRCMP_TEST_EXPECT_LOWER(test, strcasecmp, strcmp_buffer1, strcmp_buffer2);
++
++	strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C';
++	STRCMP_TEST_EXPECT_GREATER(test, strcasecmp, strcmp_buffer1, strcmp_buffer2);
++}
++
++static void test_strncasecmp(struct kunit *test)
++{
++	/* Same strings in different case should be equal */
++	STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, "AbAcAbA", "Abacaba", strlen("Abacaba"));
++	/* strncasecmp should check 'count' chars only */
++	STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, "AbaCaBa", "abaCaDa", 5);
++	STRCMP_TEST_EXPECT_LOWER(test, strncasecmp, "a", "B", 1);
++	STRCMP_TEST_EXPECT_GREATER(test, strncasecmp, "B", "a", 1);
++	/* Result is always 'equal' when count = 0 */
++	STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, "Abacaba", "Not abacaba", 0);
++}
++
++static void test_strncasecmp_long_strings(struct kunit *test)
++{
++	strcmp_fill_buffers('b', 'B');
++	STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, strcmp_buffer1,
++				 strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
++
++	strcmp_buffer1[STRCMP_CHANGE_POINT] = 'a';
++	STRCMP_TEST_EXPECT_LOWER(test, strncasecmp, strcmp_buffer1,
++				 strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
++
++	strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C';
++	STRCMP_TEST_EXPECT_GREATER(test, strncasecmp, strcmp_buffer1,
++				   strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
++
++	STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, strcmp_buffer1,
++				 strcmp_buffer2, STRCMP_CHANGE_POINT);
++	STRCMP_TEST_EXPECT_GREATER(test, strncasecmp, strcmp_buffer1,
++				   strcmp_buffer2, STRCMP_CHANGE_POINT + 1);
++}
++
+ static struct kunit_case string_test_cases[] = {
+ 	KUNIT_CASE(test_memset16),
+ 	KUNIT_CASE(test_memset32),
+@@ -186,6 +333,14 @@ static struct kunit_case string_test_cases[] = {
+ 	KUNIT_CASE(test_strchr),
+ 	KUNIT_CASE(test_strnchr),
+ 	KUNIT_CASE(test_strspn),
++	KUNIT_CASE(test_strcmp),
++	KUNIT_CASE(test_strcmp_long_strings),
++	KUNIT_CASE(test_strncmp),
++	KUNIT_CASE(test_strncmp_long_strings),
++	KUNIT_CASE(test_strcasecmp),
++	KUNIT_CASE(test_strcasecmp_long_strings),
++	KUNIT_CASE(test_strncasecmp),
++	KUNIT_CASE(test_strncasecmp_long_strings),
+ 	{}
+ };
+ 
+diff --git a/lib/strscpy_kunit.c b/lib/strscpy_kunit.c
+index a6b6344354ed5..b6d1d93a88832 100644
+--- a/lib/strscpy_kunit.c
++++ b/lib/strscpy_kunit.c
+@@ -8,22 +8,23 @@
+ #include <kunit/test.h>
+ #include <linux/string.h>
+ 
+-/*
+- * tc() - Run a specific test case.
++/**
++ * strscpy_check() - Run a specific test case.
++ * @test: KUnit test context pointer
+  * @src: Source string, argument to strscpy_pad()
+  * @count: Size of destination buffer, argument to strscpy_pad()
+  * @expected: Expected return value from call to strscpy_pad()
+- * @terminator: 1 if there should be a terminating null byte 0 otherwise.
+  * @chars: Number of characters from the src string expected to be
+  *         written to the dst buffer.
++ * @terminator: 1 if there should be a terminating null byte 0 otherwise.
+  * @pad: Number of pad characters expected (in the tail of dst buffer).
+  *       (@pad does not include the null terminator byte.)
+  *
+  * Calls strscpy_pad() and verifies the return value and state of the
+  * destination buffer after the call returns.
+  */
+-static void tc(struct kunit *test, char *src, int count, int expected,
+-	       int chars, int terminator, int pad)
++static void strscpy_check(struct kunit *test, char *src, int count,
++			  int expected, int chars, int terminator, int pad)
+ {
+ 	int nr_bytes_poison;
+ 	int max_expected;
+@@ -79,12 +80,12 @@ static void tc(struct kunit *test, char *src, int count, int expected,
+ 	}
+ }
+ 
+-static void strscpy_test(struct kunit *test)
++static void test_strscpy(struct kunit *test)
+ {
+ 	char dest[8];
+ 
+ 	/*
+-	 * tc() uses a destination buffer of size 6 and needs at
++	 * strscpy_check() uses a destination buffer of size 6 and needs at
+ 	 * least 2 characters spare (one for null and one to check for
+ 	 * overflow).  This means we should only call tc() with
+ 	 * strings up to a maximum of 4 characters long and 'count'
+@@ -92,27 +93,27 @@ static void strscpy_test(struct kunit *test)
+ 	 * the buffer size in tc().
+ 	 */
+ 
+-	/* tc(test, src, count, expected, chars, terminator, pad) */
+-	tc(test, "a", 0, -E2BIG, 0, 0, 0);
+-	tc(test, "",  0, -E2BIG, 0, 0, 0);
++	/* strscpy_check(test, src, count, expected, chars, terminator, pad) */
++	strscpy_check(test, "a", 0, -E2BIG, 0, 0, 0);
++	strscpy_check(test, "",  0, -E2BIG, 0, 0, 0);
+ 
+-	tc(test, "a", 1, -E2BIG, 0, 1, 0);
+-	tc(test, "",  1, 0,	 0, 1, 0);
++	strscpy_check(test, "a", 1, -E2BIG, 0, 1, 0);
++	strscpy_check(test, "",  1, 0,	 0, 1, 0);
+ 
+-	tc(test, "ab", 2, -E2BIG, 1, 1, 0);
+-	tc(test, "a",  2, 1,	  1, 1, 0);
+-	tc(test, "",   2, 0,	  0, 1, 1);
++	strscpy_check(test, "ab", 2, -E2BIG, 1, 1, 0);
++	strscpy_check(test, "a",  2, 1,	  1, 1, 0);
++	strscpy_check(test, "",   2, 0,	  0, 1, 1);
+ 
+-	tc(test, "abc", 3, -E2BIG, 2, 1, 0);
+-	tc(test, "ab",  3, 2,	   2, 1, 0);
+-	tc(test, "a",   3, 1,	   1, 1, 1);
+-	tc(test, "",    3, 0,	   0, 1, 2);
++	strscpy_check(test, "abc", 3, -E2BIG, 2, 1, 0);
++	strscpy_check(test, "ab",  3, 2,	   2, 1, 0);
++	strscpy_check(test, "a",   3, 1,	   1, 1, 1);
++	strscpy_check(test, "",    3, 0,	   0, 1, 2);
+ 
+-	tc(test, "abcd", 4, -E2BIG, 3, 1, 0);
+-	tc(test, "abc",  4, 3,	    3, 1, 0);
+-	tc(test, "ab",   4, 2,	    2, 1, 1);
+-	tc(test, "a",    4, 1,	    1, 1, 2);
+-	tc(test, "",     4, 0,	    0, 1, 3);
++	strscpy_check(test, "abcd", 4, -E2BIG, 3, 1, 0);
++	strscpy_check(test, "abc",  4, 3,	    3, 1, 0);
++	strscpy_check(test, "ab",   4, 2,	    2, 1, 1);
++	strscpy_check(test, "a",    4, 1,	    1, 1, 2);
++	strscpy_check(test, "",     4, 0,	    0, 1, 3);
+ 
+ 	/* Compile-time-known source strings. */
+ 	KUNIT_EXPECT_EQ(test, strscpy(dest, "", ARRAY_SIZE(dest)), 0);
+@@ -127,7 +128,7 @@ static void strscpy_test(struct kunit *test)
+ }
+ 
+ static struct kunit_case strscpy_test_cases[] = {
+-	KUNIT_CASE(strscpy_test),
++	KUNIT_CASE(test_strscpy),
+ 	{}
+ };
+ 
+diff --git a/net/atm/clip.c b/net/atm/clip.c
+index 294cb9efe3d38..015fb679be425 100644
+--- a/net/atm/clip.c
++++ b/net/atm/clip.c
+@@ -345,7 +345,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
+ 		dev->stats.tx_dropped++;
+ 		return NETDEV_TX_OK;
+ 	}
+-	rt = (struct rtable *) dst;
++	rt = dst_rtable(dst);
+ 	if (rt->rt_gw_family == AF_INET)
+ 		daddr = &rt->rt_gw4;
+ 	else
+diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
+index 27520a8a486f3..50cfec8ccac4f 100644
+--- a/net/bluetooth/6lowpan.c
++++ b/net/bluetooth/6lowpan.c
+@@ -133,7 +133,7 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_btle_dev *dev,
+ 						  struct in6_addr *daddr,
+ 						  struct sk_buff *skb)
+ {
+-	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
++	struct rt6_info *rt = dst_rt6_info(skb_dst(skb));
+ 	int count = atomic_read(&dev->peer_count);
+ 	const struct in6_addr *nexthop;
+ 	struct lowpan_peer *peer;
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index cce73749f2dce..1ed734a7fb313 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -1,7 +1,7 @@
+ /*
+    BlueZ - Bluetooth protocol stack for Linux
+    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
+-   Copyright 2023 NXP
++   Copyright 2023-2024 NXP
+ 
+    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+ 
+@@ -6359,14 +6359,16 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
+ 	if (!(flags & HCI_PROTO_DEFER))
+ 		goto unlock;
+ 
+-	if (ev->status) {
+-		/* Add connection to indicate the failed PA sync event */
+-		pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
+-					     HCI_ROLE_SLAVE);
++	/* Add connection to indicate PA sync event */
++	pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
++				     HCI_ROLE_SLAVE);
+ 
+-		if (!pa_sync)
+-			goto unlock;
++	if (IS_ERR(pa_sync))
++		goto unlock;
++
++	pa_sync->sync_handle = le16_to_cpu(ev->handle);
+ 
++	if (ev->status) {
+ 		set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
+ 
+ 		/* Notify iso layer */
+@@ -6383,6 +6385,7 @@ static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
+ 	struct hci_ev_le_per_adv_report *ev = data;
+ 	int mask = hdev->link_mode;
+ 	__u8 flags = 0;
++	struct hci_conn *pa_sync;
+ 
+ 	bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
+ 
+@@ -6390,8 +6393,28 @@ static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
+ 
+ 	mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
+ 	if (!(mask & HCI_LM_ACCEPT))
+-		hci_le_pa_term_sync(hdev, ev->sync_handle);
++		goto unlock;
++
++	if (!(flags & HCI_PROTO_DEFER))
++		goto unlock;
++
++	pa_sync = hci_conn_hash_lookup_pa_sync_handle
++			(hdev,
++			le16_to_cpu(ev->sync_handle));
+ 
++	if (!pa_sync)
++		goto unlock;
++
++	if (ev->data_status == LE_PA_DATA_COMPLETE &&
++	    !test_and_set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags)) {
++		/* Notify iso layer */
++		hci_connect_cfm(pa_sync, 0);
++
++		/* Notify MGMT layer */
++		mgmt_device_connected(hdev, pa_sync, NULL, 0);
++	}
++
++unlock:
+ 	hci_dev_unlock(hdev);
+ }
+ 
+@@ -6926,10 +6949,8 @@ static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
+ 	hci_dev_lock(hdev);
+ 
+ 	mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
+-	if (!(mask & HCI_LM_ACCEPT)) {
+-		hci_le_pa_term_sync(hdev, ev->sync_handle);
++	if (!(mask & HCI_LM_ACCEPT))
+ 		goto unlock;
+-	}
+ 
+ 	if (!(flags & HCI_PROTO_DEFER))
+ 		goto unlock;
+@@ -6938,24 +6959,11 @@ static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
+ 			(hdev,
+ 			le16_to_cpu(ev->sync_handle));
+ 
+-	if (pa_sync)
+-		goto unlock;
+-
+-	/* Add connection to indicate the PA sync event */
+-	pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
+-				     HCI_ROLE_SLAVE);
+-
+ 	if (IS_ERR(pa_sync))
+ 		goto unlock;
+ 
+-	pa_sync->sync_handle = le16_to_cpu(ev->sync_handle);
+-	set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags);
+-
+ 	/* Notify iso layer */
+-	hci_connect_cfm(pa_sync, 0x00);
+-
+-	/* Notify MGMT layer */
+-	mgmt_device_connected(hdev, pa_sync, NULL, 0);
++	hci_connect_cfm(pa_sync, 0);
+ 
+ unlock:
+ 	hci_dev_unlock(hdev);
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index 6cb41f9d174e2..00c0d8413c638 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -54,7 +54,6 @@ static void iso_sock_kill(struct sock *sk);
+ enum {
+ 	BT_SK_BIG_SYNC,
+ 	BT_SK_PA_SYNC,
+-	BT_SK_PA_SYNC_TERM,
+ };
+ 
+ struct iso_pinfo {
+@@ -81,6 +80,7 @@ static bool check_ucast_qos(struct bt_iso_qos *qos);
+ static bool check_bcast_qos(struct bt_iso_qos *qos);
+ static bool iso_match_sid(struct sock *sk, void *data);
+ static bool iso_match_sync_handle(struct sock *sk, void *data);
++static bool iso_match_sync_handle_pa_report(struct sock *sk, void *data);
+ static void iso_sock_disconn(struct sock *sk);
+ 
+ typedef bool (*iso_sock_match_t)(struct sock *sk, void *data);
+@@ -197,21 +197,10 @@ static void iso_chan_del(struct sock *sk, int err)
+ 	sock_set_flag(sk, SOCK_ZAPPED);
+ }
+ 
+-static bool iso_match_conn_sync_handle(struct sock *sk, void *data)
+-{
+-	struct hci_conn *hcon = data;
+-
+-	if (test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags))
+-		return false;
+-
+-	return hcon->sync_handle == iso_pi(sk)->sync_handle;
+-}
+-
+ static void iso_conn_del(struct hci_conn *hcon, int err)
+ {
+ 	struct iso_conn *conn = hcon->iso_data;
+ 	struct sock *sk;
+-	struct sock *parent;
+ 
+ 	if (!conn)
+ 		return;
+@@ -227,26 +216,6 @@ static void iso_conn_del(struct hci_conn *hcon, int err)
+ 
+ 	if (sk) {
+ 		lock_sock(sk);
+-
+-		/* While a PA sync hcon is in the process of closing,
+-		 * mark parent socket with a flag, so that any residual
+-		 * BIGInfo adv reports that arrive before PA sync is
+-		 * terminated are not processed anymore.
+-		 */
+-		if (test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags)) {
+-			parent = iso_get_sock(&hcon->src,
+-					      &hcon->dst,
+-					      BT_LISTEN,
+-					      iso_match_conn_sync_handle,
+-					      hcon);
+-
+-			if (parent) {
+-				set_bit(BT_SK_PA_SYNC_TERM,
+-					&iso_pi(parent)->flags);
+-				sock_put(parent);
+-			}
+-		}
+-
+ 		iso_sock_clear_timer(sk);
+ 		iso_chan_del(sk, err);
+ 		release_sock(sk);
+@@ -860,6 +829,7 @@ static struct sock *iso_sock_alloc(struct net *net, struct socket *sock,
+ 	iso_pi(sk)->src_type = BDADDR_LE_PUBLIC;
+ 
+ 	iso_pi(sk)->qos = default_qos;
++	iso_pi(sk)->sync_handle = -1;
+ 
+ 	bt_sock_link(&iso_sk_list, sk);
+ 	return sk;
+@@ -907,7 +877,6 @@ static int iso_sock_bind_bc(struct socket *sock, struct sockaddr *addr,
+ 		return -EINVAL;
+ 
+ 	iso_pi(sk)->dst_type = sa->iso_bc->bc_bdaddr_type;
+-	iso_pi(sk)->sync_handle = -1;
+ 
+ 	if (sa->iso_bc->bc_sid > 0x0f)
+ 		return -EINVAL;
+@@ -984,7 +953,8 @@ static int iso_sock_bind(struct socket *sock, struct sockaddr *addr,
+ 	/* Allow the user to bind a PA sync socket to a number
+ 	 * of BISes to sync to.
+ 	 */
+-	if (sk->sk_state == BT_CONNECT2 &&
++	if ((sk->sk_state == BT_CONNECT2 ||
++	     sk->sk_state == BT_CONNECTED) &&
+ 	    test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags)) {
+ 		err = iso_sock_bind_pa_sk(sk, sa, addr_len);
+ 		goto done;
+@@ -1396,6 +1366,16 @@ static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+ 			}
+ 			release_sock(sk);
+ 			return 0;
++		case BT_CONNECTED:
++			if (test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags)) {
++				iso_conn_big_sync(sk);
++				sk->sk_state = BT_LISTEN;
++				release_sock(sk);
++				return 0;
++			}
++
++			release_sock(sk);
++			break;
+ 		case BT_CONNECT:
+ 			release_sock(sk);
+ 			return iso_connect_cis(sk);
+@@ -1541,7 +1521,9 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname,
+ 
+ 	case BT_ISO_QOS:
+ 		if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND &&
+-		    sk->sk_state != BT_CONNECT2) {
++		    sk->sk_state != BT_CONNECT2 &&
++		    (!test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags) ||
++		    sk->sk_state != BT_CONNECTED)) {
+ 			err = -EINVAL;
+ 			break;
+ 		}
+@@ -1762,7 +1744,7 @@ static void iso_conn_ready(struct iso_conn *conn)
+ 	struct sock *sk = conn->sk;
+ 	struct hci_ev_le_big_sync_estabilished *ev = NULL;
+ 	struct hci_ev_le_pa_sync_established *ev2 = NULL;
+-	struct hci_evt_le_big_info_adv_report *ev3 = NULL;
++	struct hci_ev_le_per_adv_report *ev3 = NULL;
+ 	struct hci_conn *hcon;
+ 
+ 	BT_DBG("conn %p", conn);
+@@ -1799,12 +1781,12 @@ static void iso_conn_ready(struct iso_conn *conn)
+ 						      iso_match_sid, ev2);
+ 		} else if (test_bit(HCI_CONN_PA_SYNC, &hcon->flags)) {
+ 			ev3 = hci_recv_event_data(hcon->hdev,
+-						  HCI_EVT_LE_BIG_INFO_ADV_REPORT);
++						  HCI_EV_LE_PER_ADV_REPORT);
+ 			if (ev3)
+ 				parent = iso_get_sock(&hcon->src,
+ 						      &hcon->dst,
+ 						      BT_LISTEN,
+-						      iso_match_sync_handle,
++						      iso_match_sync_handle_pa_report,
+ 						      ev3);
+ 		}
+ 
+@@ -1847,7 +1829,6 @@ static void iso_conn_ready(struct iso_conn *conn)
+ 
+ 		if (ev3) {
+ 			iso_pi(sk)->qos = iso_pi(parent)->qos;
+-			iso_pi(sk)->qos.bcast.encryption = ev3->encryption;
+ 			hcon->iso_qos = iso_pi(sk)->qos;
+ 			iso_pi(sk)->bc_num_bis = iso_pi(parent)->bc_num_bis;
+ 			memcpy(iso_pi(sk)->bc_bis, iso_pi(parent)->bc_bis, ISO_MAX_NUM_BIS);
+@@ -1941,26 +1922,29 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
+ 
+ 	ev2 = hci_recv_event_data(hdev, HCI_EVT_LE_BIG_INFO_ADV_REPORT);
+ 	if (ev2) {
+-		/* Try to get PA sync listening socket, if it exists */
+-		sk = iso_get_sock(&hdev->bdaddr, bdaddr, BT_LISTEN,
+-				  iso_match_pa_sync_flag, NULL);
+-
+-		if (!sk) {
+-			sk = iso_get_sock(&hdev->bdaddr, bdaddr, BT_LISTEN,
+-					  iso_match_sync_handle, ev2);
+-
+-			/* If PA Sync is in process of terminating,
+-			 * do not handle any more BIGInfo adv reports.
+-			 */
+-
+-			if (sk && test_bit(BT_SK_PA_SYNC_TERM,
+-					   &iso_pi(sk)->flags))
+-				return 0;
++		/* Check if BIGInfo report has already been handled */
++		sk = iso_get_sock(&hdev->bdaddr, bdaddr, BT_CONNECTED,
++				  iso_match_sync_handle, ev2);
++		if (sk) {
++			sock_put(sk);
++			sk = NULL;
++			goto done;
+ 		}
+ 
++		/* Try to get PA sync socket, if it exists */
++		sk = iso_get_sock(&hdev->bdaddr, bdaddr, BT_CONNECT2,
++				  iso_match_sync_handle, ev2);
++		if (!sk)
++			sk = iso_get_sock(&hdev->bdaddr, bdaddr,
++					  BT_LISTEN,
++					  iso_match_sync_handle,
++					  ev2);
++
+ 		if (sk) {
+ 			int err;
+ 
++			iso_pi(sk)->qos.bcast.encryption = ev2->encryption;
++
+ 			if (ev2->num_bis < iso_pi(sk)->bc_num_bis)
+ 				iso_pi(sk)->bc_num_bis = ev2->num_bis;
+ 
+@@ -1979,6 +1963,8 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
+ 				}
+ 			}
+ 		}
++
++		goto done;
+ 	}
+ 
+ 	ev3 = hci_recv_event_data(hdev, HCI_EV_LE_PER_ADV_REPORT);
+diff --git a/net/core/dst_cache.c b/net/core/dst_cache.c
+index 0ccfd5fa5cb9b..0c0bdb058c5b1 100644
+--- a/net/core/dst_cache.c
++++ b/net/core/dst_cache.c
+@@ -83,7 +83,7 @@ struct rtable *dst_cache_get_ip4(struct dst_cache *dst_cache, __be32 *saddr)
+ 		return NULL;
+ 
+ 	*saddr = idst->in_saddr.s_addr;
+-	return container_of(dst, struct rtable, dst);
++	return dst_rtable(dst);
+ }
+ EXPORT_SYMBOL_GPL(dst_cache_get_ip4);
+ 
+@@ -112,7 +112,7 @@ void dst_cache_set_ip6(struct dst_cache *dst_cache, struct dst_entry *dst,
+ 
+ 	idst = this_cpu_ptr(dst_cache->cache);
+ 	dst_cache_per_cpu_dst_set(this_cpu_ptr(dst_cache->cache), dst,
+-				  rt6_get_cookie((struct rt6_info *)dst));
++				  rt6_get_cookie(dst_rt6_info(dst)));
+ 	idst->in6_saddr = *saddr;
+ }
+ EXPORT_SYMBOL_GPL(dst_cache_set_ip6);
+diff --git a/net/core/filter.c b/net/core/filter.c
+index ae5254f712c94..a5856a8b4498b 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -2215,7 +2215,7 @@ static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb,
+ 	rcu_read_lock();
+ 	if (!nh) {
+ 		dst = skb_dst(skb);
+-		nexthop = rt6_nexthop(container_of(dst, struct rt6_info, dst),
++		nexthop = rt6_nexthop(dst_rt6_info(dst),
+ 				      &ipv6_hdr(skb)->daddr);
+ 	} else {
+ 		nexthop = &nh->ipv6_nh;
+@@ -2314,8 +2314,7 @@ static int bpf_out_neigh_v4(struct net *net, struct sk_buff *skb,
+ 
+ 	rcu_read_lock();
+ 	if (!nh) {
+-		struct dst_entry *dst = skb_dst(skb);
+-		struct rtable *rt = container_of(dst, struct rtable, dst);
++		struct rtable *rt = skb_rtable(skb);
+ 
+ 		neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
+ 	} else if (nh->nh_family == AF_INET6) {
+diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
+index 049c3adeb8504..4e3651101b866 100644
+--- a/net/ethernet/eth.c
++++ b/net/ethernet/eth.c
+@@ -161,9 +161,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
+ 	skb->dev = dev;
+ 	skb_reset_mac_header(skb);
+ 
+-	eth = (struct ethhdr *)skb->data;
+-	skb_pull_inline(skb, ETH_HLEN);
+-
++	eth = eth_skb_pull_mac(skb);
+ 	eth_skb_pkt_type(skb, dev);
+ 
+ 	/*
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index fafb123f798be..5622ddd3bf55b 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -758,7 +758,9 @@ void __inet_accept(struct socket *sock, struct socket *newsock, struct sock *new
+ 	sock_rps_record_flow(newsk);
+ 	WARN_ON(!((1 << newsk->sk_state) &
+ 		  (TCPF_ESTABLISHED | TCPF_SYN_RECV |
+-		  TCPF_CLOSE_WAIT | TCPF_CLOSE)));
++		   TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 |
++		   TCPF_CLOSING | TCPF_CLOSE_WAIT |
++		   TCPF_CLOSE)));
+ 
+ 	if (test_bit(SOCK_SUPPORT_ZC, &sock->flags))
+ 		set_bit(SOCK_SUPPORT_ZC, &newsock->flags);
+@@ -1306,8 +1308,8 @@ static int inet_sk_reselect_saddr(struct sock *sk)
+ 
+ int inet_sk_rebuild_header(struct sock *sk)
+ {
++	struct rtable *rt = dst_rtable(__sk_dst_check(sk, 0));
+ 	struct inet_sock *inet = inet_sk(sk);
+-	struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
+ 	__be32 daddr;
+ 	struct ip_options_rcu *inet_opt;
+ 	struct flowi4 *fl4;
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index 7e45c34c8340a..8382cc998bff8 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -1882,10 +1882,11 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
+ 			goto done;
+ 
+ 		if (fillargs.ifindex) {
+-			err = -ENODEV;
+ 			dev = dev_get_by_index_rcu(tgt_net, fillargs.ifindex);
+-			if (!dev)
++			if (!dev) {
++				err = -ENODEV;
+ 				goto done;
++			}
+ 			in_dev = __in_dev_get_rcu(dev);
+ 			if (!in_dev)
+ 				goto done;
+@@ -1897,7 +1898,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
+ 
+ 	cb->seq = inet_base_seq(tgt_net);
+ 
+-	for_each_netdev_dump(net, dev, ctx->ifindex) {
++	for_each_netdev_dump(tgt_net, dev, ctx->ifindex) {
+ 		in_dev = __in_dev_get_rcu(dev);
+ 		if (!in_dev)
+ 			continue;
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index 437e782b9663b..207482d30dc7e 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -483,6 +483,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
+ 					struct icmp_bxm *param)
+ {
+ 	struct net_device *route_lookup_dev;
++	struct dst_entry *dst, *dst2;
+ 	struct rtable *rt, *rt2;
+ 	struct flowi4 fl4_dec;
+ 	int err;
+@@ -508,16 +509,17 @@ static struct rtable *icmp_route_lookup(struct net *net,
+ 	/* No need to clone since we're just using its address. */
+ 	rt2 = rt;
+ 
+-	rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
+-					   flowi4_to_flowi(fl4), NULL, 0);
+-	if (!IS_ERR(rt)) {
++	dst = xfrm_lookup(net, &rt->dst,
++			  flowi4_to_flowi(fl4), NULL, 0);
++	rt = dst_rtable(dst);
++	if (!IS_ERR(dst)) {
+ 		if (rt != rt2)
+ 			return rt;
+-	} else if (PTR_ERR(rt) == -EPERM) {
++	} else if (PTR_ERR(dst) == -EPERM) {
+ 		rt = NULL;
+-	} else
++	} else {
+ 		return rt;
+-
++	}
+ 	err = xfrm_decode_session_reverse(net, skb_in, flowi4_to_flowi(&fl4_dec), AF_INET);
+ 	if (err)
+ 		goto relookup_failed;
+@@ -551,19 +553,19 @@ static struct rtable *icmp_route_lookup(struct net *net,
+ 	if (err)
+ 		goto relookup_failed;
+ 
+-	rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst,
+-					    flowi4_to_flowi(&fl4_dec), NULL,
+-					    XFRM_LOOKUP_ICMP);
+-	if (!IS_ERR(rt2)) {
++	dst2 = xfrm_lookup(net, &rt2->dst, flowi4_to_flowi(&fl4_dec), NULL,
++			   XFRM_LOOKUP_ICMP);
++	rt2 = dst_rtable(dst2);
++	if (!IS_ERR(dst2)) {
+ 		dst_release(&rt->dst);
+ 		memcpy(fl4, &fl4_dec, sizeof(*fl4));
+ 		rt = rt2;
+-	} else if (PTR_ERR(rt2) == -EPERM) {
++	} else if (PTR_ERR(dst2) == -EPERM) {
+ 		if (rt)
+ 			dst_release(&rt->dst);
+ 		return rt2;
+ 	} else {
+-		err = PTR_ERR(rt2);
++		err = PTR_ERR(dst2);
+ 		goto relookup_failed;
+ 	}
+ 	return rt;
+diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
+index 5e9c8156656a7..d6fbcbd2358a5 100644
+--- a/net/ipv4/ip_input.c
++++ b/net/ipv4/ip_input.c
+@@ -616,7 +616,7 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk,
+ 		dst = skb_dst(skb);
+ 		if (curr_dst != dst) {
+ 			hint = ip_extract_route_hint(net, skb,
+-					       ((struct rtable *)dst)->rt_type);
++						     dst_rtable(dst)->rt_type);
+ 
+ 			/* dispatch old sublist */
+ 			if (!list_empty(&sublist))
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 39229fd0601a1..9500031a1f55b 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -198,7 +198,7 @@ EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
+ static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
+ 	struct dst_entry *dst = skb_dst(skb);
+-	struct rtable *rt = (struct rtable *)dst;
++	struct rtable *rt = dst_rtable(dst);
+ 	struct net_device *dev = dst->dev;
+ 	unsigned int hh_len = LL_RESERVED_SPACE(dev);
+ 	struct neighbour *neigh;
+@@ -475,7 +475,7 @@ int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
+ 		goto packet_routed;
+ 
+ 	/* Make sure we can route this packet. */
+-	rt = (struct rtable *)__sk_dst_check(sk, 0);
++	rt = dst_rtable(__sk_dst_check(sk, 0));
+ 	if (!rt) {
+ 		__be32 daddr;
+ 
+@@ -971,7 +971,7 @@ static int __ip_append_data(struct sock *sk,
+ 	bool zc = false;
+ 	unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
+ 	int csummode = CHECKSUM_NONE;
+-	struct rtable *rt = (struct rtable *)cork->dst;
++	struct rtable *rt = dst_rtable(cork->dst);
+ 	bool paged, hold_tskey, extra_uref = false;
+ 	unsigned int wmem_alloc_delta = 0;
+ 	u32 tskey = 0;
+@@ -1390,7 +1390,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
+ 	struct inet_sock *inet = inet_sk(sk);
+ 	struct net *net = sock_net(sk);
+ 	struct ip_options *opt = NULL;
+-	struct rtable *rt = (struct rtable *)cork->dst;
++	struct rtable *rt = dst_rtable(cork->dst);
+ 	struct iphdr *iph;
+ 	u8 pmtudisc, ttl;
+ 	__be16 df = 0;
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 1b8d8ff9a2375..0e4bd528428e9 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -543,7 +543,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
+ 		struct rt6_info *rt6;
+ 		__be32 daddr;
+ 
+-		rt6 = skb_valid_dst(skb) ? (struct rt6_info *)skb_dst(skb) :
++		rt6 = skb_valid_dst(skb) ? dst_rt6_info(skb_dst(skb)) :
+ 					   NULL;
+ 		daddr = md ? dst : tunnel->parms.iph.daddr;
+ 
+diff --git a/net/ipv4/netfilter/nf_tproxy_ipv4.c b/net/ipv4/netfilter/nf_tproxy_ipv4.c
+index 69e3317996043..73e66a088e25e 100644
+--- a/net/ipv4/netfilter/nf_tproxy_ipv4.c
++++ b/net/ipv4/netfilter/nf_tproxy_ipv4.c
+@@ -58,6 +58,8 @@ __be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
+ 
+ 	laddr = 0;
+ 	indev = __in_dev_get_rcu(skb->dev);
++	if (!indev)
++		return daddr;
+ 
+ 	in_dev_for_each_ifa_rcu(ifa, indev) {
+ 		if (ifa->ifa_flags & IFA_F_SECONDARY)
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index b814fdab19f71..3fcf084fbda5d 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -132,7 +132,8 @@ struct dst_entry	*ipv4_dst_check(struct dst_entry *dst, u32 cookie);
+ static unsigned int	 ipv4_default_advmss(const struct dst_entry *dst);
+ INDIRECT_CALLABLE_SCOPE
+ unsigned int		ipv4_mtu(const struct dst_entry *dst);
+-static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
++static void		ipv4_negative_advice(struct sock *sk,
++					     struct dst_entry *dst);
+ static void		 ipv4_link_failure(struct sk_buff *skb);
+ static void		 ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ 					   struct sk_buff *skb, u32 mtu,
+@@ -831,28 +832,21 @@ static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buf
+ 	u32 mark = skb->mark;
+ 	__u8 tos = iph->tos;
+ 
+-	rt = (struct rtable *) dst;
++	rt = dst_rtable(dst);
+ 
+ 	__build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
+ 	__ip_do_redirect(rt, skb, &fl4, true);
+ }
+ 
+-static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
++static void ipv4_negative_advice(struct sock *sk,
++				 struct dst_entry *dst)
+ {
+-	struct rtable *rt = (struct rtable *)dst;
+-	struct dst_entry *ret = dst;
++	struct rtable *rt = dst_rtable(dst);
+ 
+-	if (rt) {
+-		if (dst->obsolete > 0) {
+-			ip_rt_put(rt);
+-			ret = NULL;
+-		} else if ((rt->rt_flags & RTCF_REDIRECTED) ||
+-			   rt->dst.expires) {
+-			ip_rt_put(rt);
+-			ret = NULL;
+-		}
+-	}
+-	return ret;
++	if ((dst->obsolete > 0) ||
++	    (rt->rt_flags & RTCF_REDIRECTED) ||
++	    rt->dst.expires)
++		sk_dst_reset(sk);
+ }
+ 
+ /*
+@@ -1056,7 +1050,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ 			      struct sk_buff *skb, u32 mtu,
+ 			      bool confirm_neigh)
+ {
+-	struct rtable *rt = (struct rtable *) dst;
++	struct rtable *rt = dst_rtable(dst);
+ 	struct flowi4 fl4;
+ 
+ 	ip_rt_build_flow_key(&fl4, sk, skb);
+@@ -1127,7 +1121,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
+ 
+ 	__build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
+ 
+-	rt = (struct rtable *)odst;
++	rt = dst_rtable(odst);
+ 	if (odst->obsolete && !odst->ops->check(odst, 0)) {
+ 		rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
+ 		if (IS_ERR(rt))
+@@ -1136,7 +1130,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
+ 		new = true;
+ 	}
+ 
+-	__ip_rt_update_pmtu((struct rtable *)xfrm_dst_path(&rt->dst), &fl4, mtu);
++	__ip_rt_update_pmtu(dst_rtable(xfrm_dst_path(&rt->dst)), &fl4, mtu);
+ 
+ 	if (!dst_check(&rt->dst, 0)) {
+ 		if (new)
+@@ -1193,7 +1187,7 @@ EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
+ INDIRECT_CALLABLE_SCOPE struct dst_entry *ipv4_dst_check(struct dst_entry *dst,
+ 							 u32 cookie)
+ {
+-	struct rtable *rt = (struct rtable *) dst;
++	struct rtable *rt = dst_rtable(dst);
+ 
+ 	/* All IPV4 dsts are created with ->obsolete set to the value
+ 	 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
+@@ -1528,10 +1522,8 @@ void rt_del_uncached_list(struct rtable *rt)
+ 
+ static void ipv4_dst_destroy(struct dst_entry *dst)
+ {
+-	struct rtable *rt = (struct rtable *)dst;
+-
+ 	ip_dst_metrics_put(dst);
+-	rt_del_uncached_list(rt);
++	rt_del_uncached_list(dst_rtable(dst));
+ }
+ 
+ void rt_flush_dev(struct net_device *dev)
+@@ -2832,7 +2824,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
+ 
+ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
+ {
+-	struct rtable *ort = (struct rtable *) dst_orig;
++	struct rtable *ort = dst_rtable(dst_orig);
+ 	struct rtable *rt;
+ 
+ 	rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, DST_OBSOLETE_DEAD, 0);
+@@ -2877,9 +2869,9 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
+ 
+ 	if (flp4->flowi4_proto) {
+ 		flp4->flowi4_oif = rt->dst.dev->ifindex;
+-		rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
+-							flowi4_to_flowi(flp4),
+-							sk, 0);
++		rt = dst_rtable(xfrm_lookup_route(net, &rt->dst,
++						  flowi4_to_flowi(flp4),
++						  sk, 0));
+ 	}
+ 
+ 	return rt;
+diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
+index e33fbe4933e42..b004280855f87 100644
+--- a/net/ipv4/tcp_dctcp.c
++++ b/net/ipv4/tcp_dctcp.c
+@@ -58,7 +58,18 @@ struct dctcp {
+ };
+ 
+ static unsigned int dctcp_shift_g __read_mostly = 4; /* g = 1/2^4 */
+-module_param(dctcp_shift_g, uint, 0644);
++
++static int dctcp_shift_g_set(const char *val, const struct kernel_param *kp)
++{
++	return param_set_uint_minmax(val, kp, 0, 10);
++}
++
++static const struct kernel_param_ops dctcp_shift_g_ops = {
++	.set = dctcp_shift_g_set,
++	.get = param_get_uint,
++};
++
++module_param_cb(dctcp_shift_g, &dctcp_shift_g_ops, &dctcp_shift_g, 0644);
+ MODULE_PARM_DESC(dctcp_shift_g, "parameter g for updating dctcp_alpha");
+ 
+ static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA;
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index b5ad0c527c521..72d3bf136810d 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1218,7 +1218,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	}
+ 
+ 	if (connected)
+-		rt = (struct rtable *)sk_dst_check(sk, 0);
++		rt = dst_rtable(sk_dst_check(sk, 0));
+ 
+ 	if (!rt) {
+ 		struct net *net = sock_net(sk);
+diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
+index c33bca2c38415..1853a8415d9f1 100644
+--- a/net/ipv4/xfrm4_policy.c
++++ b/net/ipv4/xfrm4_policy.c
+@@ -69,7 +69,7 @@ static int xfrm4_get_saddr(struct net *net, int oif,
+ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
+ 			  const struct flowi *fl)
+ {
+-	struct rtable *rt = (struct rtable *)xdst->route;
++	struct rtable *rt = dst_rtable(xdst->route);
+ 	const struct flowi4 *fl4 = &fl->u.ip4;
+ 
+ 	xdst->u.rt.rt_iif = fl4->flowi4_iif;
+diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
+index 1635da07285f2..d285c1f6f1a61 100644
+--- a/net/ipv6/icmp.c
++++ b/net/ipv6/icmp.c
+@@ -212,7 +212,7 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
+ 	} else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
+ 		res = true;
+ 	} else {
+-		struct rt6_info *rt = (struct rt6_info *)dst;
++		struct rt6_info *rt = dst_rt6_info(dst);
+ 		int tmo = net->ipv6.sysctl.icmpv6_time;
+ 		struct inet_peer *peer;
+ 
+@@ -241,7 +241,7 @@ static bool icmpv6_rt_has_prefsrc(struct sock *sk, u8 type,
+ 
+ 	dst = ip6_route_output(net, sk, fl6);
+ 	if (!dst->error) {
+-		struct rt6_info *rt = (struct rt6_info *)dst;
++		struct rt6_info *rt = dst_rt6_info(dst);
+ 		struct in6_addr prefsrc;
+ 
+ 		rt6_get_prefsrc(rt, &prefsrc);
+@@ -616,7 +616,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ 	if (ip6_append_data(sk, icmpv6_getfrag, &msg,
+ 			    len + sizeof(struct icmp6hdr),
+ 			    sizeof(struct icmp6hdr),
+-			    &ipc6, &fl6, (struct rt6_info *)dst,
++			    &ipc6, &fl6, dst_rt6_info(dst),
+ 			    MSG_DONTWAIT)) {
+ 		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
+ 		ip6_flush_pending_frames(sk);
+@@ -803,7 +803,7 @@ static enum skb_drop_reason icmpv6_echo_reply(struct sk_buff *skb)
+ 	if (ip6_append_data(sk, icmpv6_getfrag, &msg,
+ 			    skb->len + sizeof(struct icmp6hdr),
+ 			    sizeof(struct icmp6hdr), &ipc6, &fl6,
+-			    (struct rt6_info *)dst, MSG_DONTWAIT)) {
++			    dst_rt6_info(dst), MSG_DONTWAIT)) {
+ 		__ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
+ 		ip6_flush_pending_frames(sk);
+ 	} else {
+diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
+index 8c1ce78956bae..0601bad798221 100644
+--- a/net/ipv6/ila/ila_lwt.c
++++ b/net/ipv6/ila/ila_lwt.c
+@@ -38,7 +38,7 @@ static inline struct ila_params *ila_params_lwtunnel(
+ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
+ 	struct dst_entry *orig_dst = skb_dst(skb);
+-	struct rt6_info *rt = (struct rt6_info *)orig_dst;
++	struct rt6_info *rt = dst_rt6_info(orig_dst);
+ 	struct ila_lwt *ilwt = ila_lwt_lwtunnel(orig_dst->lwtstate);
+ 	struct dst_entry *dst;
+ 	int err = -EINVAL;
+@@ -70,7 +70,7 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 		memset(&fl6, 0, sizeof(fl6));
+ 		fl6.flowi6_oif = orig_dst->dev->ifindex;
+ 		fl6.flowi6_iif = LOOPBACK_IFINDEX;
+-		fl6.daddr = *rt6_nexthop((struct rt6_info *)orig_dst,
++		fl6.daddr = *rt6_nexthop(dst_rt6_info(orig_dst),
+ 					 &ip6h->daddr);
+ 
+ 		dst = ip6_route_output(net, NULL, &fl6);
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 97b0788b31bae..27d8725445e35 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -120,7 +120,7 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
+ 	IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
+ 
+ 	rcu_read_lock();
+-	nexthop = rt6_nexthop((struct rt6_info *)dst, daddr);
++	nexthop = rt6_nexthop(dst_rt6_info(dst), daddr);
+ 	neigh = __ipv6_neigh_lookup_noref(dev, nexthop);
+ 
+ 	if (unlikely(IS_ERR_OR_NULL(neigh))) {
+@@ -599,7 +599,7 @@ int ip6_forward(struct sk_buff *skb)
+ 		 *	send a redirect.
+ 		 */
+ 
+-		rt = (struct rt6_info *) dst;
++		rt = dst_rt6_info(dst);
+ 		if (rt->rt6i_flags & RTF_GATEWAY)
+ 			target = &rt->rt6i_gateway;
+ 		else
+@@ -856,7 +856,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
+ 		 int (*output)(struct net *, struct sock *, struct sk_buff *))
+ {
+ 	struct sk_buff *frag;
+-	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
++	struct rt6_info *rt = dst_rt6_info(skb_dst(skb));
+ 	struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
+ 				inet6_sk(skb->sk) : NULL;
+ 	bool mono_delivery_time = skb->mono_delivery_time;
+@@ -1063,7 +1063,7 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
+ 		return NULL;
+ 	}
+ 
+-	rt = (struct rt6_info *)dst;
++	rt = dst_rt6_info(dst);
+ 	/* Yes, checking route validity in not connected
+ 	 * case is not very simple. Take into account,
+ 	 * that we do not support routing by source, TOS,
+@@ -1118,7 +1118,7 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
+ 		struct rt6_info *rt;
+ 
+ 		*dst = ip6_route_output(net, sk, fl6);
+-		rt = (*dst)->error ? NULL : (struct rt6_info *)*dst;
++		rt = (*dst)->error ? NULL : dst_rt6_info(*dst);
+ 
+ 		rcu_read_lock();
+ 		from = rt ? rcu_dereference(rt->from) : NULL;
+@@ -1159,7 +1159,7 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
+ 	 * dst entry and replace it instead with the
+ 	 * dst entry of the nexthop router
+ 	 */
+-	rt = (struct rt6_info *) *dst;
++	rt = dst_rt6_info(*dst);
+ 	rcu_read_lock();
+ 	n = __ipv6_neigh_lookup_noref(rt->dst.dev,
+ 				      rt6_nexthop(rt, &fl6->daddr));
+@@ -1423,7 +1423,7 @@ static int __ip6_append_data(struct sock *sk,
+ 	int offset = 0;
+ 	bool zc = false;
+ 	u32 tskey = 0;
+-	struct rt6_info *rt = (struct rt6_info *)cork->dst;
++	struct rt6_info *rt = dst_rt6_info(cork->dst);
+ 	bool paged, hold_tskey, extra_uref = false;
+ 	struct ipv6_txoptions *opt = v6_cork->opt;
+ 	int csummode = CHECKSUM_NONE;
+@@ -1877,7 +1877,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
+ 	struct net *net = sock_net(sk);
+ 	struct ipv6hdr *hdr;
+ 	struct ipv6_txoptions *opt = v6_cork->opt;
+-	struct rt6_info *rt = (struct rt6_info *)cork->base.dst;
++	struct rt6_info *rt = dst_rt6_info(cork->base.dst);
+ 	struct flowi6 *fl6 = &cork->fl.u.ip6;
+ 	unsigned char proto = fl6->flowi6_proto;
+ 
+@@ -1949,7 +1949,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
+ int ip6_send_skb(struct sk_buff *skb)
+ {
+ 	struct net *net = sock_net(skb->sk);
+-	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
++	struct rt6_info *rt = dst_rt6_info(skb_dst(skb));
+ 	int err;
+ 
+ 	err = ip6_local_out(net, skb->sk, skb);
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index cb0ee81a068a4..dd342e6ecf3f4 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -2273,7 +2273,7 @@ int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
+ 	int err;
+ 	struct mr_table *mrt;
+ 	struct mfc6_cache *cache;
+-	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
++	struct rt6_info *rt = dst_rt6_info(skb_dst(skb));
+ 
+ 	mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
+ 	if (!mrt)
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index ae134634c323c..d914b23256ce6 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -1722,7 +1722,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
+ 	if (IS_ERR(dst))
+ 		return;
+ 
+-	rt = (struct rt6_info *) dst;
++	rt = dst_rt6_info(dst);
+ 
+ 	if (rt->rt6i_flags & RTF_GATEWAY) {
+ 		ND_PRINTK(2, warn,
+diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
+index ef2059c889554..88b3fcacd4f94 100644
+--- a/net/ipv6/ping.c
++++ b/net/ipv6/ping.c
+@@ -154,7 +154,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	dst = ip6_sk_dst_lookup_flow(sk, &fl6, daddr, false);
+ 	if (IS_ERR(dst))
+ 		return PTR_ERR(dst);
+-	rt = (struct rt6_info *) dst;
++	rt = dst_rt6_info(dst);
+ 
+ 	if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
+ 		fl6.flowi6_oif = READ_ONCE(np->mcast_oif);
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index 0d896ca7b5891..2eedf255600b9 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -598,7 +598,7 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
+ 	struct ipv6hdr *iph;
+ 	struct sk_buff *skb;
+ 	int err;
+-	struct rt6_info *rt = (struct rt6_info *)*dstp;
++	struct rt6_info *rt = dst_rt6_info(*dstp);
+ 	int hlen = LL_RESERVED_SPACE(rt->dst.dev);
+ 	int tlen = rt->dst.dev->needed_tailroom;
+ 
+@@ -917,7 +917,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 		ipc6.opt = opt;
+ 		lock_sock(sk);
+ 		err = ip6_append_data(sk, raw6_getfrag, &rfv,
+-			len, 0, &ipc6, &fl6, (struct rt6_info *)dst,
++			len, 0, &ipc6, &fl6, dst_rt6_info(dst),
+ 			msg->msg_flags);
+ 
+ 		if (err)
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 1f4b935a0e57a..8d5257c3f0842 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -87,7 +87,8 @@ struct dst_entry	*ip6_dst_check(struct dst_entry *dst, u32 cookie);
+ static unsigned int	 ip6_default_advmss(const struct dst_entry *dst);
+ INDIRECT_CALLABLE_SCOPE
+ unsigned int		ip6_mtu(const struct dst_entry *dst);
+-static struct dst_entry *ip6_negative_advice(struct dst_entry *);
++static void		ip6_negative_advice(struct sock *sk,
++					    struct dst_entry *dst);
+ static void		ip6_dst_destroy(struct dst_entry *);
+ static void		ip6_dst_ifdown(struct dst_entry *,
+ 				       struct net_device *dev);
+@@ -226,7 +227,7 @@ static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
+ 					      struct sk_buff *skb,
+ 					      const void *daddr)
+ {
+-	const struct rt6_info *rt = container_of(dst, struct rt6_info, dst);
++	const struct rt6_info *rt = dst_rt6_info(dst);
+ 
+ 	return ip6_neigh_lookup(rt6_nexthop(rt, &in6addr_any),
+ 				dst->dev, skb, daddr);
+@@ -234,8 +235,8 @@ static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
+ 
+ static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
+ {
++	const struct rt6_info *rt = dst_rt6_info(dst);
+ 	struct net_device *dev = dst->dev;
+-	struct rt6_info *rt = (struct rt6_info *)dst;
+ 
+ 	daddr = choose_neigh_daddr(rt6_nexthop(rt, &in6addr_any), NULL, daddr);
+ 	if (!daddr)
+@@ -354,7 +355,7 @@ EXPORT_SYMBOL(ip6_dst_alloc);
+ 
+ static void ip6_dst_destroy(struct dst_entry *dst)
+ {
+-	struct rt6_info *rt = (struct rt6_info *)dst;
++	struct rt6_info *rt = dst_rt6_info(dst);
+ 	struct fib6_info *from;
+ 	struct inet6_dev *idev;
+ 
+@@ -373,7 +374,7 @@ static void ip6_dst_destroy(struct dst_entry *dst)
+ 
+ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
+ {
+-	struct rt6_info *rt = (struct rt6_info *)dst;
++	struct rt6_info *rt = dst_rt6_info(dst);
+ 	struct inet6_dev *idev = rt->rt6i_idev;
+ 
+ 	if (idev && idev->dev != blackhole_netdev) {
+@@ -1288,7 +1289,7 @@ struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
+ 
+ 	dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
+ 	if (dst->error == 0)
+-		return (struct rt6_info *) dst;
++		return dst_rt6_info(dst);
+ 
+ 	dst_release(dst);
+ 
+@@ -2647,7 +2648,7 @@ struct dst_entry *ip6_route_output_flags(struct net *net,
+ 
+ 	rcu_read_lock();
+ 	dst = ip6_route_output_flags_noref(net, sk, fl6, flags);
+-	rt6 = (struct rt6_info *)dst;
++	rt6 = dst_rt6_info(dst);
+ 	/* For dst cached in uncached_list, refcnt is already taken. */
+ 	if (list_empty(&rt6->dst.rt_uncached) && !dst_hold_safe(dst)) {
+ 		dst = &net->ipv6.ip6_null_entry->dst;
+@@ -2661,7 +2662,7 @@ EXPORT_SYMBOL_GPL(ip6_route_output_flags);
+ 
+ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
+ {
+-	struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
++	struct rt6_info *rt, *ort = dst_rt6_info(dst_orig);
+ 	struct net_device *loopback_dev = net->loopback_dev;
+ 	struct dst_entry *new = NULL;
+ 
+@@ -2744,7 +2745,7 @@ INDIRECT_CALLABLE_SCOPE struct dst_entry *ip6_dst_check(struct dst_entry *dst,
+ 	struct fib6_info *from;
+ 	struct rt6_info *rt;
+ 
+-	rt = container_of(dst, struct rt6_info, dst);
++	rt = dst_rt6_info(dst);
+ 
+ 	if (rt->sernum)
+ 		return rt6_is_valid(rt) ? dst : NULL;
+@@ -2770,24 +2771,24 @@ INDIRECT_CALLABLE_SCOPE struct dst_entry *ip6_dst_check(struct dst_entry *dst,
+ }
+ EXPORT_INDIRECT_CALLABLE(ip6_dst_check);
+ 
+-static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
++static void ip6_negative_advice(struct sock *sk,
++				struct dst_entry *dst)
+ {
+-	struct rt6_info *rt = (struct rt6_info *) dst;
++	struct rt6_info *rt = dst_rt6_info(dst);
+ 
+-	if (rt) {
+-		if (rt->rt6i_flags & RTF_CACHE) {
+-			rcu_read_lock();
+-			if (rt6_check_expired(rt)) {
+-				rt6_remove_exception_rt(rt);
+-				dst = NULL;
+-			}
+-			rcu_read_unlock();
+-		} else {
+-			dst_release(dst);
+-			dst = NULL;
++	if (rt->rt6i_flags & RTF_CACHE) {
++		rcu_read_lock();
++		if (rt6_check_expired(rt)) {
++			/* counteract the dst_release() in sk_dst_reset() */
++			dst_hold(dst);
++			sk_dst_reset(sk);
++
++			rt6_remove_exception_rt(rt);
+ 		}
++		rcu_read_unlock();
++		return;
+ 	}
+-	return dst;
++	sk_dst_reset(sk);
+ }
+ 
+ static void ip6_link_failure(struct sk_buff *skb)
+@@ -2796,7 +2797,7 @@ static void ip6_link_failure(struct sk_buff *skb)
+ 
+ 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
+ 
+-	rt = (struct rt6_info *) skb_dst(skb);
++	rt = dst_rt6_info(skb_dst(skb));
+ 	if (rt) {
+ 		rcu_read_lock();
+ 		if (rt->rt6i_flags & RTF_CACHE) {
+@@ -2852,7 +2853,7 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
+ 				 bool confirm_neigh)
+ {
+ 	const struct in6_addr *daddr, *saddr;
+-	struct rt6_info *rt6 = (struct rt6_info *)dst;
++	struct rt6_info *rt6 = dst_rt6_info(dst);
+ 
+ 	/* Note: do *NOT* check dst_metric_locked(dst, RTAX_MTU)
+ 	 * IPv6 pmtu discovery isn't optional, so 'mtu lock' cannot disable it.
+@@ -4174,7 +4175,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
+ 		}
+ 	}
+ 
+-	rt = (struct rt6_info *) dst;
++	rt = dst_rt6_info(dst);
+ 	if (rt->rt6i_flags & RTF_REJECT) {
+ 		net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
+ 		return;
+@@ -5608,7 +5609,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
+ 			 int iif, int type, u32 portid, u32 seq,
+ 			 unsigned int flags)
+ {
+-	struct rt6_info *rt6 = (struct rt6_info *)dst;
++	struct rt6_info *rt6 = dst_rt6_info(dst);
+ 	struct rt6key *rt6_dst, *rt6_src;
+ 	u32 *pmetrics, table, rt6_flags;
+ 	unsigned char nh_flags = 0;
+@@ -6111,7 +6112,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
+ 	}
+ 
+ 
+-	rt = container_of(dst, struct rt6_info, dst);
++	rt = dst_rt6_info(dst);
+ 	if (rt->dst.error) {
+ 		err = rt->dst.error;
+ 		ip6_rt_put(rt);
+diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
+index 861e0366f549d..bbf5b84a70fca 100644
+--- a/net/ipv6/seg6_hmac.c
++++ b/net/ipv6/seg6_hmac.c
+@@ -356,6 +356,7 @@ static int seg6_hmac_init_algo(void)
+ 	struct crypto_shash *tfm;
+ 	struct shash_desc *shash;
+ 	int i, alg_count, cpu;
++	int ret = -ENOMEM;
+ 
+ 	alg_count = ARRAY_SIZE(hmac_algos);
+ 
+@@ -366,12 +367,14 @@ static int seg6_hmac_init_algo(void)
+ 		algo = &hmac_algos[i];
+ 		algo->tfms = alloc_percpu(struct crypto_shash *);
+ 		if (!algo->tfms)
+-			return -ENOMEM;
++			goto error_out;
+ 
+ 		for_each_possible_cpu(cpu) {
+ 			tfm = crypto_alloc_shash(algo->name, 0, 0);
+-			if (IS_ERR(tfm))
+-				return PTR_ERR(tfm);
++			if (IS_ERR(tfm)) {
++				ret = PTR_ERR(tfm);
++				goto error_out;
++			}
+ 			p_tfm = per_cpu_ptr(algo->tfms, cpu);
+ 			*p_tfm = tfm;
+ 		}
+@@ -383,18 +386,22 @@ static int seg6_hmac_init_algo(void)
+ 
+ 		algo->shashs = alloc_percpu(struct shash_desc *);
+ 		if (!algo->shashs)
+-			return -ENOMEM;
++			goto error_out;
+ 
+ 		for_each_possible_cpu(cpu) {
+ 			shash = kzalloc_node(shsize, GFP_KERNEL,
+ 					     cpu_to_node(cpu));
+ 			if (!shash)
+-				return -ENOMEM;
++				goto error_out;
+ 			*per_cpu_ptr(algo->shashs, cpu) = shash;
+ 		}
+ 	}
+ 
+ 	return 0;
++
++error_out:
++	seg6_hmac_exit();
++	return ret;
+ }
+ 
+ int __init seg6_hmac_init(void)
+@@ -412,22 +419,29 @@ int __net_init seg6_hmac_net_init(struct net *net)
+ void seg6_hmac_exit(void)
+ {
+ 	struct seg6_hmac_algo *algo = NULL;
++	struct crypto_shash *tfm;
++	struct shash_desc *shash;
+ 	int i, alg_count, cpu;
+ 
+ 	alg_count = ARRAY_SIZE(hmac_algos);
+ 	for (i = 0; i < alg_count; i++) {
+ 		algo = &hmac_algos[i];
+-		for_each_possible_cpu(cpu) {
+-			struct crypto_shash *tfm;
+-			struct shash_desc *shash;
+ 
+-			shash = *per_cpu_ptr(algo->shashs, cpu);
+-			kfree(shash);
+-			tfm = *per_cpu_ptr(algo->tfms, cpu);
+-			crypto_free_shash(tfm);
++		if (algo->shashs) {
++			for_each_possible_cpu(cpu) {
++				shash = *per_cpu_ptr(algo->shashs, cpu);
++				kfree(shash);
++			}
++			free_percpu(algo->shashs);
++		}
++
++		if (algo->tfms) {
++			for_each_possible_cpu(cpu) {
++				tfm = *per_cpu_ptr(algo->tfms, cpu);
++				crypto_free_shash(tfm);
++			}
++			free_percpu(algo->tfms);
+ 		}
+-		free_percpu(algo->tfms);
+-		free_percpu(algo->shashs);
+ 	}
+ }
+ EXPORT_SYMBOL(seg6_hmac_exit);
+diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
+index 03b877ff45588..a75df2ec8db0d 100644
+--- a/net/ipv6/seg6_iptunnel.c
++++ b/net/ipv6/seg6_iptunnel.c
+@@ -459,10 +459,8 @@ static int seg6_input_core(struct net *net, struct sock *sk,
+ 	int err;
+ 
+ 	err = seg6_do_srh(skb);
+-	if (unlikely(err)) {
+-		kfree_skb(skb);
+-		return err;
+-	}
++	if (unlikely(err))
++		goto drop;
+ 
+ 	slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
+ 
+@@ -486,7 +484,7 @@ static int seg6_input_core(struct net *net, struct sock *sk,
+ 
+ 	err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+ 	if (unlikely(err))
+-		return err;
++		goto drop;
+ 
+ 	if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
+ 		return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
+@@ -494,6 +492,9 @@ static int seg6_input_core(struct net *net, struct sock *sk,
+ 			       skb_dst(skb)->dev, seg6_input_finish);
+ 
+ 	return seg6_input_finish(dev_net(skb->dev), NULL, skb);
++drop:
++	kfree_skb(skb);
++	return err;
+ }
+ 
+ static int seg6_input_nf(struct sk_buff *skb)
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 3f4cba49e9ee6..5873b3c3562ed 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -95,11 +95,9 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
+ 	struct dst_entry *dst = skb_dst(skb);
+ 
+ 	if (dst && dst_hold_safe(dst)) {
+-		const struct rt6_info *rt = (const struct rt6_info *)dst;
+-
+ 		rcu_assign_pointer(sk->sk_rx_dst, dst);
+ 		sk->sk_rx_dst_ifindex = skb->skb_iif;
+-		sk->sk_rx_dst_cookie = rt6_get_cookie(rt);
++		sk->sk_rx_dst_cookie = rt6_get_cookie(dst_rt6_info(dst));
+ 	}
+ }
+ 
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index e0dd5bc2b30eb..acafa0cdf74a8 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -910,11 +910,8 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
+ 
+ static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
+ {
+-	if (udp_sk_rx_dst_set(sk, dst)) {
+-		const struct rt6_info *rt = (const struct rt6_info *)dst;
+-
+-		sk->sk_rx_dst_cookie = rt6_get_cookie(rt);
+-	}
++	if (udp_sk_rx_dst_set(sk, dst))
++		sk->sk_rx_dst_cookie = rt6_get_cookie(dst_rt6_info(dst));
+ }
+ 
+ /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
+@@ -1585,7 +1582,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 
+ 		skb = ip6_make_skb(sk, getfrag, msg, ulen,
+ 				   sizeof(struct udphdr), &ipc6,
+-				   (struct rt6_info *)dst,
++				   dst_rt6_info(dst),
+ 				   msg->msg_flags, &cork);
+ 		err = PTR_ERR(skb);
+ 		if (!IS_ERR_OR_NULL(skb))
+@@ -1612,7 +1609,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 		ipc6.dontfrag = inet6_test_bit(DONTFRAG, sk);
+ 	up->len += ulen;
+ 	err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
+-			      &ipc6, fl6, (struct rt6_info *)dst,
++			      &ipc6, fl6, dst_rt6_info(dst),
+ 			      corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
+ 	if (err)
+ 		udp_v6_flush_pending_frames(sk);
+diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
+index 42fb6996b0777..ce48173c60e56 100644
+--- a/net/ipv6/xfrm6_policy.c
++++ b/net/ipv6/xfrm6_policy.c
+@@ -70,7 +70,7 @@ static int xfrm6_get_saddr(struct net *net, int oif,
+ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
+ 			  const struct flowi *fl)
+ {
+-	struct rt6_info *rt = (struct rt6_info *)xdst->route;
++	struct rt6_info *rt = dst_rt6_info(xdst->route);
+ 
+ 	xdst->u.dst.dev = dev;
+ 	netdev_hold(dev, &xdst->u.dst.dev_tracker, GFP_ATOMIC);
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index 970af3983d116..19c8cc5289d59 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -459,7 +459,7 @@ static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 
+ 	fl4 = &inet->cork.fl.u.ip4;
+ 	if (connected)
+-		rt = (struct rtable *)__sk_dst_check(sk, 0);
++		rt = dst_rtable(__sk_dst_check(sk, 0));
+ 
+ 	rcu_read_lock();
+ 	if (!rt) {
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index 7bf14cf9ffaa9..8780ec64f3769 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -630,7 +630,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	ulen = len + (skb_queue_empty(&sk->sk_write_queue) ? transhdrlen : 0);
+ 	err = ip6_append_data(sk, ip_generic_getfrag, msg,
+ 			      ulen, transhdrlen, &ipc6,
+-			      &fl6, (struct rt6_info *)dst,
++			      &fl6, dst_rt6_info(dst),
+ 			      msg->msg_flags);
+ 	if (err)
+ 		ip6_flush_pending_frames(sk);
+diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
+index 8fc790f2a01bb..4385fd3b13be3 100644
+--- a/net/mpls/mpls_iptunnel.c
++++ b/net/mpls/mpls_iptunnel.c
+@@ -81,7 +81,7 @@ static int mpls_xmit(struct sk_buff *skb)
+ 			ttl = net->mpls.default_ttl;
+ 		else
+ 			ttl = ip_hdr(skb)->ttl;
+-		rt = (struct rtable *)dst;
++		rt = dst_rtable(dst);
+ 	} else if (dst->ops->family == AF_INET6) {
+ 		if (tun_encap_info->ttl_propagate == MPLS_TTL_PROP_DISABLED)
+ 			ttl = tun_encap_info->default_ttl;
+@@ -90,7 +90,7 @@ static int mpls_xmit(struct sk_buff *skb)
+ 			ttl = net->mpls.default_ttl;
+ 		else
+ 			ttl = ipv6_hdr(skb)->hop_limit;
+-		rt6 = (struct rt6_info *)dst;
++		rt6 = dst_rt6_info(dst);
+ 	} else {
+ 		goto drop;
+ 	}
+diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
+index 6c3f28bc59b32..54e2a1dd7f5f5 100644
+--- a/net/netfilter/ipset/ip_set_list_set.c
++++ b/net/netfilter/ipset/ip_set_list_set.c
+@@ -549,6 +549,9 @@ list_set_cancel_gc(struct ip_set *set)
+ 
+ 	if (SET_WITH_TIMEOUT(set))
+ 		timer_shutdown_sync(&map->gc);
++
++	/* Flush list to drop references to other ipsets */
++	list_set_flush(set);
+ }
+ 
+ static const struct ip_set_type_variant set_variant = {
+diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
+index 65e0259178da4..e1f17392f58c1 100644
+--- a/net/netfilter/ipvs/ip_vs_xmit.c
++++ b/net/netfilter/ipvs/ip_vs_xmit.c
+@@ -180,7 +180,7 @@ static inline bool crosses_local_route_boundary(int skb_af, struct sk_buff *skb,
+ 			(!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
+ 			(addr_type & IPV6_ADDR_LOOPBACK);
+ 		old_rt_is_local = __ip_vs_is_local_route6(
+-			(struct rt6_info *)skb_dst(skb));
++			dst_rt6_info(skb_dst(skb)));
+ 	} else
+ #endif
+ 	{
+@@ -318,7 +318,7 @@ __ip_vs_get_out_rt(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
+ 	if (dest) {
+ 		dest_dst = __ip_vs_dst_check(dest);
+ 		if (likely(dest_dst))
+-			rt = (struct rtable *) dest_dst->dst_cache;
++			rt = dst_rtable(dest_dst->dst_cache);
+ 		else {
+ 			dest_dst = ip_vs_dest_dst_alloc();
+ 			spin_lock_bh(&dest->dst_lock);
+@@ -481,7 +481,7 @@ __ip_vs_get_out_rt_v6(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
+ 	if (dest) {
+ 		dest_dst = __ip_vs_dst_check(dest);
+ 		if (likely(dest_dst))
+-			rt = (struct rt6_info *) dest_dst->dst_cache;
++			rt = dst_rt6_info(dest_dst->dst_cache);
+ 		else {
+ 			u32 cookie;
+ 
+@@ -501,7 +501,7 @@ __ip_vs_get_out_rt_v6(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
+ 				ip_vs_dest_dst_free(dest_dst);
+ 				goto err_unreach;
+ 			}
+-			rt = (struct rt6_info *) dst;
++			rt = dst_rt6_info(dst);
+ 			cookie = rt6_get_cookie(rt);
+ 			__ip_vs_dst_set(dest, dest_dst, &rt->dst, cookie);
+ 			spin_unlock_bh(&dest->dst_lock);
+@@ -517,7 +517,7 @@ __ip_vs_get_out_rt_v6(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
+ 					      rt_mode);
+ 		if (!dst)
+ 			goto err_unreach;
+-		rt = (struct rt6_info *) dst;
++		rt = dst_rt6_info(dst);
+ 	}
+ 
+ 	local = __ip_vs_is_local_route6(rt);
+@@ -862,7 +862,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+ 				      IP_VS_RT_MODE_RDR);
+ 	if (local < 0)
+ 		goto tx_error;
+-	rt = (struct rt6_info *) skb_dst(skb);
++	rt = dst_rt6_info(skb_dst(skb));
+ 	/*
+ 	 * Avoid duplicate tuple in reply direction for NAT traffic
+ 	 * to local address when connection is sync-ed
+@@ -1288,7 +1288,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+ 	if (local)
+ 		return ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 1);
+ 
+-	rt = (struct rt6_info *) skb_dst(skb);
++	rt = dst_rt6_info(skb_dst(skb));
+ 	tdev = rt->dst.dev;
+ 
+ 	/*
+@@ -1590,7 +1590,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+ 				      &cp->daddr.in6, NULL, ipvsh, 0, rt_mode);
+ 	if (local < 0)
+ 		goto tx_error;
+-	rt = (struct rt6_info *) skb_dst(skb);
++	rt = dst_rt6_info(skb_dst(skb));
+ 	/*
+ 	 * Avoid duplicate tuple in reply direction for NAT traffic
+ 	 * to local address when connection is sync-ed
+diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
+index a0571339239c4..5c1ff07eaee0b 100644
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -77,12 +77,8 @@ EXPORT_SYMBOL_GPL(flow_offload_alloc);
+ 
+ static u32 flow_offload_dst_cookie(struct flow_offload_tuple *flow_tuple)
+ {
+-	const struct rt6_info *rt;
+-
+-	if (flow_tuple->l3proto == NFPROTO_IPV6) {
+-		rt = (const struct rt6_info *)flow_tuple->dst_cache;
+-		return rt6_get_cookie(rt);
+-	}
++	if (flow_tuple->l3proto == NFPROTO_IPV6)
++		return rt6_get_cookie(dst_rt6_info(flow_tuple->dst_cache));
+ 
+ 	return 0;
+ }
+diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
+index 5383bed3d3e00..c2c005234dcd3 100644
+--- a/net/netfilter/nf_flow_table_ip.c
++++ b/net/netfilter/nf_flow_table_ip.c
+@@ -434,7 +434,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
+ 		return NF_ACCEPT;
+ 
+ 	if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
+-		rt = (struct rtable *)tuplehash->tuple.dst_cache;
++		rt = dst_rtable(tuplehash->tuple.dst_cache);
+ 		memset(skb->cb, 0, sizeof(struct inet_skb_parm));
+ 		IPCB(skb)->iif = skb->dev->ifindex;
+ 		IPCB(skb)->flags = IPSKB_FORWARDED;
+@@ -446,7 +446,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
+ 
+ 	switch (tuplehash->tuple.xmit_type) {
+ 	case FLOW_OFFLOAD_XMIT_NEIGH:
+-		rt = (struct rtable *)tuplehash->tuple.dst_cache;
++		rt = dst_rtable(tuplehash->tuple.dst_cache);
+ 		outdev = rt->dst.dev;
+ 		skb->dev = outdev;
+ 		nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
+@@ -729,7 +729,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
+ 		return NF_ACCEPT;
+ 
+ 	if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
+-		rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
++		rt = dst_rt6_info(tuplehash->tuple.dst_cache);
+ 		memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
+ 		IP6CB(skb)->iif = skb->dev->ifindex;
+ 		IP6CB(skb)->flags = IP6SKB_FORWARDED;
+@@ -741,7 +741,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
+ 
+ 	switch (tuplehash->tuple.xmit_type) {
+ 	case FLOW_OFFLOAD_XMIT_NEIGH:
+-		rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
++		rt = dst_rt6_info(tuplehash->tuple.dst_cache);
+ 		outdev = rt->dst.dev;
+ 		skb->dev = outdev;
+ 		nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
+diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
+index 00f4bd21c59b4..f1c31757e4969 100644
+--- a/net/netfilter/nfnetlink_queue.c
++++ b/net/netfilter/nfnetlink_queue.c
+@@ -169,7 +169,9 @@ instance_destroy_rcu(struct rcu_head *head)
+ 	struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
+ 						   rcu);
+ 
++	rcu_read_lock();
+ 	nfqnl_flush(inst, NULL, 0);
++	rcu_read_unlock();
+ 	kfree(inst);
+ 	module_put(THIS_MODULE);
+ }
+diff --git a/net/netfilter/nft_fib.c b/net/netfilter/nft_fib.c
+index 37cfe6dd712d8..b58f62195ff3e 100644
+--- a/net/netfilter/nft_fib.c
++++ b/net/netfilter/nft_fib.c
+@@ -35,11 +35,9 @@ int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ 	switch (priv->result) {
+ 	case NFT_FIB_RESULT_OIF:
+ 	case NFT_FIB_RESULT_OIFNAME:
+-		hooks = (1 << NF_INET_PRE_ROUTING);
+-		if (priv->flags & NFTA_FIB_F_IIF) {
+-			hooks |= (1 << NF_INET_LOCAL_IN) |
+-				 (1 << NF_INET_FORWARD);
+-		}
++		hooks = (1 << NF_INET_PRE_ROUTING) |
++			(1 << NF_INET_LOCAL_IN) |
++			(1 << NF_INET_FORWARD);
+ 		break;
+ 	case NFT_FIB_RESULT_ADDRTYPE:
+ 		if (priv->flags & NFTA_FIB_F_IIF)
+diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
+index 0a689c8e0295d..0c43d748e23ae 100644
+--- a/net/netfilter/nft_payload.c
++++ b/net/netfilter/nft_payload.c
+@@ -45,36 +45,27 @@ nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
+ 	int mac_off = skb_mac_header(skb) - skb->data;
+ 	u8 *vlanh, *dst_u8 = (u8 *) d;
+ 	struct vlan_ethhdr veth;
+-	u8 vlan_hlen = 0;
+-
+-	if ((skb->protocol == htons(ETH_P_8021AD) ||
+-	     skb->protocol == htons(ETH_P_8021Q)) &&
+-	    offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
+-		vlan_hlen += VLAN_HLEN;
+ 
+ 	vlanh = (u8 *) &veth;
+-	if (offset < VLAN_ETH_HLEN + vlan_hlen) {
++	if (offset < VLAN_ETH_HLEN) {
+ 		u8 ethlen = len;
+ 
+-		if (vlan_hlen &&
+-		    skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
+-			return false;
+-		else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
++		if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
+ 			return false;
+ 
+-		if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
+-			ethlen -= offset + len - VLAN_ETH_HLEN - vlan_hlen;
++		if (offset + len > VLAN_ETH_HLEN)
++			ethlen -= offset + len - VLAN_ETH_HLEN;
+ 
+-		memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
++		memcpy(dst_u8, vlanh + offset, ethlen);
+ 
+ 		len -= ethlen;
+ 		if (len == 0)
+ 			return true;
+ 
+ 		dst_u8 += ethlen;
+-		offset = ETH_HLEN + vlan_hlen;
++		offset = ETH_HLEN;
+ 	} else {
+-		offset -= VLAN_HLEN + vlan_hlen;
++		offset -= VLAN_HLEN;
+ 	}
+ 
+ 	return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
+@@ -154,12 +145,12 @@ int nft_payload_inner_offset(const struct nft_pktinfo *pkt)
+ 	return pkt->inneroff;
+ }
+ 
+-static bool nft_payload_need_vlan_copy(const struct nft_payload *priv)
++static bool nft_payload_need_vlan_adjust(u32 offset, u32 len)
+ {
+-	unsigned int len = priv->offset + priv->len;
++	unsigned int boundary = offset + len;
+ 
+ 	/* data past ether src/dst requested, copy needed */
+-	if (len > offsetof(struct ethhdr, h_proto))
++	if (boundary > offsetof(struct ethhdr, h_proto))
+ 		return true;
+ 
+ 	return false;
+@@ -183,7 +174,7 @@ void nft_payload_eval(const struct nft_expr *expr,
+ 			goto err;
+ 
+ 		if (skb_vlan_tag_present(skb) &&
+-		    nft_payload_need_vlan_copy(priv)) {
++		    nft_payload_need_vlan_adjust(priv->offset, priv->len)) {
+ 			if (!nft_payload_copy_vlan(dest, skb,
+ 						   priv->offset, priv->len))
+ 				goto err;
+@@ -810,21 +801,79 @@ struct nft_payload_set {
+ 	u8			csum_flags;
+ };
+ 
++/* This is not struct vlan_hdr. */
++struct nft_payload_vlan_hdr {
++	__be16			h_vlan_proto;
++	__be16			h_vlan_TCI;
++};
++
++static bool
++nft_payload_set_vlan(const u32 *src, struct sk_buff *skb, u8 offset, u8 len,
++		     int *vlan_hlen)
++{
++	struct nft_payload_vlan_hdr *vlanh;
++	__be16 vlan_proto;
++	u16 vlan_tci;
++
++	if (offset >= offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto)) {
++		*vlan_hlen = VLAN_HLEN;
++		return true;
++	}
++
++	switch (offset) {
++	case offsetof(struct vlan_ethhdr, h_vlan_proto):
++		if (len == 2) {
++			vlan_proto = nft_reg_load_be16(src);
++			skb->vlan_proto = vlan_proto;
++		} else if (len == 4) {
++			vlanh = (struct nft_payload_vlan_hdr *)src;
++			__vlan_hwaccel_put_tag(skb, vlanh->h_vlan_proto,
++					       ntohs(vlanh->h_vlan_TCI));
++		} else {
++			return false;
++		}
++		break;
++	case offsetof(struct vlan_ethhdr, h_vlan_TCI):
++		if (len != 2)
++			return false;
++
++		vlan_tci = ntohs(nft_reg_load_be16(src));
++		skb->vlan_tci = vlan_tci;
++		break;
++	default:
++		return false;
++	}
++
++	return true;
++}
++
+ static void nft_payload_set_eval(const struct nft_expr *expr,
+ 				 struct nft_regs *regs,
+ 				 const struct nft_pktinfo *pkt)
+ {
+ 	const struct nft_payload_set *priv = nft_expr_priv(expr);
+-	struct sk_buff *skb = pkt->skb;
+ 	const u32 *src = &regs->data[priv->sreg];
+-	int offset, csum_offset;
++	int offset, csum_offset, vlan_hlen = 0;
++	struct sk_buff *skb = pkt->skb;
+ 	__wsum fsum, tsum;
+ 
+ 	switch (priv->base) {
+ 	case NFT_PAYLOAD_LL_HEADER:
+ 		if (!skb_mac_header_was_set(skb))
+ 			goto err;
+-		offset = skb_mac_header(skb) - skb->data;
++
++		if (skb_vlan_tag_present(skb) &&
++		    nft_payload_need_vlan_adjust(priv->offset, priv->len)) {
++			if (!nft_payload_set_vlan(src, skb,
++						  priv->offset, priv->len,
++						  &vlan_hlen))
++				goto err;
++
++			if (!vlan_hlen)
++				return;
++		}
++
++		offset = skb_mac_header(skb) - skb->data - vlan_hlen;
+ 		break;
+ 	case NFT_PAYLOAD_NETWORK_HEADER:
+ 		offset = skb_network_offset(skb);
+diff --git a/net/netfilter/nft_rt.c b/net/netfilter/nft_rt.c
+index 24d9771385729..14d88394bcb7f 100644
+--- a/net/netfilter/nft_rt.c
++++ b/net/netfilter/nft_rt.c
+@@ -73,14 +73,14 @@ void nft_rt_get_eval(const struct nft_expr *expr,
+ 		if (nft_pf(pkt) != NFPROTO_IPV4)
+ 			goto err;
+ 
+-		*dest = (__force u32)rt_nexthop((const struct rtable *)dst,
++		*dest = (__force u32)rt_nexthop(dst_rtable(dst),
+ 						ip_hdr(skb)->daddr);
+ 		break;
+ 	case NFT_RT_NEXTHOP6:
+ 		if (nft_pf(pkt) != NFPROTO_IPV6)
+ 			goto err;
+ 
+-		memcpy(dest, rt6_nexthop((struct rt6_info *)dst,
++		memcpy(dest, rt6_nexthop(dst_rt6_info(dst),
+ 					 &ipv6_hdr(skb)->daddr),
+ 		       sizeof(struct in6_addr));
+ 		break;
+diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
+index b133dc55304ce..f456a5911e7d1 100644
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -1463,6 +1463,19 @@ int nci_core_ntf_packet(struct nci_dev *ndev, __u16 opcode,
+ 				 ndev->ops->n_core_ops);
+ }
+ 
++static bool nci_valid_size(struct sk_buff *skb)
++{
++	BUILD_BUG_ON(NCI_CTRL_HDR_SIZE != NCI_DATA_HDR_SIZE);
++	unsigned int hdr_size = NCI_CTRL_HDR_SIZE;
++
++	if (skb->len < hdr_size ||
++	    !nci_plen(skb->data) ||
++	    skb->len < hdr_size + nci_plen(skb->data)) {
++		return false;
++	}
++	return true;
++}
++
+ /* ---- NCI TX Data worker thread ---- */
+ 
+ static void nci_tx_work(struct work_struct *work)
+@@ -1516,10 +1529,9 @@ static void nci_rx_work(struct work_struct *work)
+ 		nfc_send_to_raw_sock(ndev->nfc_dev, skb,
+ 				     RAW_PAYLOAD_NCI, NFC_DIRECTION_RX);
+ 
+-		if (!nci_plen(skb->data)) {
++		if (!nci_valid_size(skb)) {
+ 			kfree_skb(skb);
+-			kcov_remote_stop();
+-			break;
++			continue;
+ 		}
+ 
+ 		/* Process frame */
+diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
+index 6fcd7e2ca81fe..9642255808247 100644
+--- a/net/openvswitch/actions.c
++++ b/net/openvswitch/actions.c
+@@ -936,6 +936,12 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
+ 				pskb_trim(skb, ovs_mac_header_len(key));
+ 		}
+ 
++		/* Need to set the pkt_type to involve the routing layer.  The
++		 * packet movement through the OVS datapath doesn't generally
++		 * use routing, but this is needed for tunnel cases.
++		 */
++		skb->pkt_type = PACKET_OUTGOING;
++
+ 		if (likely(!mru ||
+ 		           (skb->len <= mru + vport->dev->hard_header_len))) {
+ 			ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index a0d54b422186f..5c3f8a278fc2f 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -1151,11 +1151,6 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
+ 		list_for_each_entry(entry, &new->entries, list)
+ 			cycle = ktime_add_ns(cycle, entry->interval);
+ 
+-		if (!cycle) {
+-			NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0");
+-			return -EINVAL;
+-		}
+-
+ 		if (cycle < 0 || cycle > INT_MAX) {
+ 			NL_SET_ERR_MSG(extack, "'cycle_time' is too big");
+ 			return -EINVAL;
+@@ -1164,6 +1159,11 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
+ 		new->cycle_time = cycle;
+ 	}
+ 
++	if (new->cycle_time < new->num_entries * length_to_duration(q, ETH_ZLEN)) {
++		NL_SET_ERR_MSG(extack, "'cycle_time' is too small");
++		return -EINVAL;
++	}
++
+ 	taprio_calculate_gate_durations(q, new);
+ 
+ 	return 0;
+@@ -1851,6 +1851,9 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
+ 	}
+ 	q->flags = taprio_flags;
+ 
++	/* Needed for length_to_duration() during netlink attribute parsing */
++	taprio_set_picos_per_byte(dev, q);
++
+ 	err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
+ 	if (err < 0)
+ 		return err;
+@@ -1910,7 +1913,6 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
+ 	if (err < 0)
+ 		goto free_sched;
+ 
+-	taprio_set_picos_per_byte(dev, q);
+ 	taprio_update_queue_max_sdu(q, new_admin, stab);
+ 
+ 	if (FULL_OFFLOAD_IS_ENABLED(q->flags))
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index 24368f755ab19..f7b809c0d142c 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -415,7 +415,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
+ 	if (!IS_ERR_OR_NULL(dst)) {
+ 		struct rt6_info *rt;
+ 
+-		rt = (struct rt6_info *)dst;
++		rt = dst_rt6_info(dst);
+ 		t->dst_cookie = rt6_get_cookie(rt);
+ 		pr_debug("rt6_dst:%pI6/%d rt6_src:%pI6\n",
+ 			 &rt->rt6i_dst.addr, rt->rt6i_dst.plen,
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index e849f368ed913..5a7436a13b741 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -552,7 +552,7 @@ static void sctp_v4_get_saddr(struct sctp_sock *sk,
+ 			      struct flowi *fl)
+ {
+ 	union sctp_addr *saddr = &t->saddr;
+-	struct rtable *rt = (struct rtable *)t->dst;
++	struct rtable *rt = dst_rtable(t->dst);
+ 
+ 	if (rt) {
+ 		saddr->v4.sin_family = AF_INET;
+@@ -1085,7 +1085,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb, struct sctp_transport *t)
+ 	skb_reset_inner_mac_header(skb);
+ 	skb_reset_inner_transport_header(skb);
+ 	skb_set_inner_ipproto(skb, IPPROTO_SCTP);
+-	udp_tunnel_xmit_skb((struct rtable *)dst, sk, skb, fl4->saddr,
++	udp_tunnel_xmit_skb(dst_rtable(dst), sk, skb, fl4->saddr,
+ 			    fl4->daddr, dscp, ip4_dst_hoplimit(dst), df,
+ 			    sctp_sk(sk)->udp_port, t->encap_port, false, false);
+ 	return 0;
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index 96ab50eda9c2e..73a90ad873fb9 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -1069,7 +1069,7 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp,
+ 		goto out_denied_free;
+ 
+ 	pages = DIV_ROUND_UP(inlen, PAGE_SIZE);
+-	in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL);
++	in_token->pages = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
+ 	if (!in_token->pages)
+ 		goto out_denied_free;
+ 	in_token->page_base = 0;
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 28f3749f6dc6c..59b2fbd88e5eb 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -1071,6 +1071,7 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
+ 		.authflavor	= old->cl_auth->au_flavor,
+ 		.cred		= old->cl_cred,
+ 		.stats		= old->cl_stats,
++		.timeout	= old->cl_timeout,
+ 	};
+ 	struct rpc_clnt *clnt;
+ 	int err;
+diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
+index 4f8d7efa469f0..432557a553e7e 100644
+--- a/net/sunrpc/xprtrdma/verbs.c
++++ b/net/sunrpc/xprtrdma/verbs.c
+@@ -244,7 +244,11 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
+ 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ 		pr_info("rpcrdma: removing device %s for %pISpc\n",
+ 			ep->re_id->device->name, sap);
+-		fallthrough;
++		switch (xchg(&ep->re_connect_status, -ENODEV)) {
++		case 0: goto wake_connect_worker;
++		case 1: goto disconnected;
++		}
++		return 0;
+ 	case RDMA_CM_EVENT_ADDR_CHANGE:
+ 		ep->re_connect_status = -ENODEV;
+ 		goto disconnected;
+diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
+index f892b0903dbaf..b849a3d133a01 100644
+--- a/net/tipc/udp_media.c
++++ b/net/tipc/udp_media.c
+@@ -174,7 +174,7 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb,
+ 	local_bh_disable();
+ 	ndst = dst_cache_get(cache);
+ 	if (dst->proto == htons(ETH_P_IP)) {
+-		struct rtable *rt = (struct rtable *)ndst;
++		struct rtable *rt = dst_rtable(ndst);
+ 
+ 		if (!rt) {
+ 			struct flowi4 fl = {
+diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
+index b4674f03d71a9..90b7f253d3632 100644
+--- a/net/tls/tls_main.c
++++ b/net/tls/tls_main.c
+@@ -816,9 +816,17 @@ struct tls_context *tls_ctx_create(struct sock *sk)
+ 		return NULL;
+ 
+ 	mutex_init(&ctx->tx_lock);
+-	rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
+ 	ctx->sk_proto = READ_ONCE(sk->sk_prot);
+ 	ctx->sk = sk;
++	/* Release semantic of rcu_assign_pointer() ensures that
++	 * ctx->sk_proto is visible before changing sk->sk_prot in
++	 * update_sk_prot(), and prevents reading uninitialized value in
++	 * tls_{getsockopt, setsockopt}. Note that we do not need a
++	 * read barrier in tls_{getsockopt,setsockopt} as there is an
++	 * address dependency between sk->sk_proto->{getsockopt,setsockopt}
++	 * and ctx->sk_proto.
++	 */
++	rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
+ 	return ctx;
+ }
+ 
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index e94839d89b09d..439c531744a27 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -731,7 +731,7 @@ static int unix_listen(struct socket *sock, int backlog)
+ 	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
+ 		goto out;	/* Only stream/seqpacket sockets accept */
+ 	err = -EINVAL;
+-	if (!u->addr)
++	if (!READ_ONCE(u->addr))
+ 		goto out;	/* No listens on an unbound socket */
+ 	unix_state_lock(sk);
+ 	if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
+@@ -1131,8 +1131,8 @@ static struct sock *unix_find_other(struct net *net,
+ 
+ static int unix_autobind(struct sock *sk)
+ {
+-	unsigned int new_hash, old_hash = sk->sk_hash;
+ 	struct unix_sock *u = unix_sk(sk);
++	unsigned int new_hash, old_hash;
+ 	struct net *net = sock_net(sk);
+ 	struct unix_address *addr;
+ 	u32 lastnum, ordernum;
+@@ -1155,6 +1155,7 @@ static int unix_autobind(struct sock *sk)
+ 	addr->name->sun_family = AF_UNIX;
+ 	refcount_set(&addr->refcnt, 1);
+ 
++	old_hash = sk->sk_hash;
+ 	ordernum = get_random_u32();
+ 	lastnum = ordernum & 0xFFFFF;
+ retry:
+@@ -1195,8 +1196,8 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
+ {
+ 	umode_t mode = S_IFSOCK |
+ 	       (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
+-	unsigned int new_hash, old_hash = sk->sk_hash;
+ 	struct unix_sock *u = unix_sk(sk);
++	unsigned int new_hash, old_hash;
+ 	struct net *net = sock_net(sk);
+ 	struct mnt_idmap *idmap;
+ 	struct unix_address *addr;
+@@ -1234,6 +1235,7 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
+ 	if (u->addr)
+ 		goto out_unlock;
+ 
++	old_hash = sk->sk_hash;
+ 	new_hash = unix_bsd_hash(d_backing_inode(dentry));
+ 	unix_table_double_lock(net, old_hash, new_hash);
+ 	u->path.mnt = mntget(parent.mnt);
+@@ -1261,8 +1263,8 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
+ static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
+ 			      int addr_len)
+ {
+-	unsigned int new_hash, old_hash = sk->sk_hash;
+ 	struct unix_sock *u = unix_sk(sk);
++	unsigned int new_hash, old_hash;
+ 	struct net *net = sock_net(sk);
+ 	struct unix_address *addr;
+ 	int err;
+@@ -1280,6 +1282,7 @@ static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
+ 		goto out_mutex;
+ 	}
+ 
++	old_hash = sk->sk_hash;
+ 	new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
+ 	unix_table_double_lock(net, old_hash, new_hash);
+ 
+@@ -1369,7 +1372,7 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
+ 
+ 		if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
+ 		     test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
+-		    !unix_sk(sk)->addr) {
++		    !READ_ONCE(unix_sk(sk)->addr)) {
+ 			err = unix_autobind(sk);
+ 			if (err)
+ 				goto out;
+@@ -1481,7 +1484,8 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ 		goto out;
+ 
+ 	if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
+-	     test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
++	     test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
++	    !READ_ONCE(u->addr)) {
+ 		err = unix_autobind(sk);
+ 		if (err)
+ 			goto out;
+@@ -1997,7 +2001,8 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
+ 	}
+ 
+ 	if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
+-	     test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
++	     test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
++	    !READ_ONCE(u->addr)) {
+ 		err = unix_autobind(sk);
+ 		if (err)
+ 			goto out;
+@@ -2217,13 +2222,15 @@ static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other
+ 	maybe_add_creds(skb, sock, other);
+ 	skb_get(skb);
+ 
++	scm_stat_add(other, skb);
++
++	spin_lock(&other->sk_receive_queue.lock);
+ 	if (ousk->oob_skb)
+ 		consume_skb(ousk->oob_skb);
+-
+ 	WRITE_ONCE(ousk->oob_skb, skb);
++	__skb_queue_tail(&other->sk_receive_queue, skb);
++	spin_unlock(&other->sk_receive_queue.lock);
+ 
+-	scm_stat_add(other, skb);
+-	skb_queue_tail(&other->sk_receive_queue, skb);
+ 	sk_send_sigurg(other);
+ 	unix_state_unlock(other);
+ 	other->sk_data_ready(other);
+@@ -2614,8 +2621,10 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
+ 
+ 	mutex_lock(&u->iolock);
+ 	unix_state_lock(sk);
++	spin_lock(&sk->sk_receive_queue.lock);
+ 
+ 	if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) {
++		spin_unlock(&sk->sk_receive_queue.lock);
+ 		unix_state_unlock(sk);
+ 		mutex_unlock(&u->iolock);
+ 		return -EINVAL;
+@@ -2627,6 +2636,8 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
+ 		WRITE_ONCE(u->oob_skb, NULL);
+ 	else
+ 		skb_get(oob_skb);
++
++	spin_unlock(&sk->sk_receive_queue.lock);
+ 	unix_state_unlock(sk);
+ 
+ 	chunk = state->recv_actor(oob_skb, 0, chunk, state);
+@@ -2655,6 +2666,10 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
+ 		consume_skb(skb);
+ 		skb = NULL;
+ 	} else {
++		struct sk_buff *unlinked_skb = NULL;
++
++		spin_lock(&sk->sk_receive_queue.lock);
++
+ 		if (skb == u->oob_skb) {
+ 			if (copied) {
+ 				skb = NULL;
+@@ -2666,13 +2681,19 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
+ 			} else if (flags & MSG_PEEK) {
+ 				skb = NULL;
+ 			} else {
+-				skb_unlink(skb, &sk->sk_receive_queue);
++				__skb_unlink(skb, &sk->sk_receive_queue);
+ 				WRITE_ONCE(u->oob_skb, NULL);
+-				if (!WARN_ON_ONCE(skb_unref(skb)))
+-					kfree_skb(skb);
++				unlinked_skb = skb;
+ 				skb = skb_peek(&sk->sk_receive_queue);
+ 			}
+ 		}
++
++		spin_unlock(&sk->sk_receive_queue.lock);
++
++		if (unlinked_skb) {
++			WARN_ON_ONCE(skb_unref(unlinked_skb));
++			kfree_skb(unlinked_skb);
++		}
+ 	}
+ 	return skb;
+ }
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index 53d8fabfa6858..d154597728d51 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -2598,8 +2598,7 @@ static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
+ 			   int nfheader_len)
+ {
+ 	if (dst->ops->family == AF_INET6) {
+-		struct rt6_info *rt = (struct rt6_info *)dst;
+-		path->path_cookie = rt6_get_cookie(rt);
++		path->path_cookie = rt6_get_cookie(dst_rt6_info(dst));
+ 		path->u.rt6.rt6i_nfheader_len = nfheader_len;
+ 	}
+ }
+@@ -3905,15 +3904,10 @@ static void xfrm_link_failure(struct sk_buff *skb)
+ 	/* Impossible. Such dst must be popped before reaches point of failure. */
+ }
+ 
+-static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
++static void xfrm_negative_advice(struct sock *sk, struct dst_entry *dst)
+ {
+-	if (dst) {
+-		if (dst->obsolete) {
+-			dst_release(dst);
+-			dst = NULL;
+-		}
+-	}
+-	return dst;
++	if (dst->obsolete)
++		sk_dst_reset(sk);
+ }
+ 
+ static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
+diff --git a/scripts/Makefile.vdsoinst b/scripts/Makefile.vdsoinst
+index c477d17b0aa5b..a81ca735003e4 100644
+--- a/scripts/Makefile.vdsoinst
++++ b/scripts/Makefile.vdsoinst
+@@ -21,7 +21,7 @@ $$(dest): $$(src) FORCE
+ 	$$(call cmd,install)
+ 
+ # Some architectures create .build-id symlinks
+-ifneq ($(filter arm sparc x86, $(SRCARCH)),)
++ifneq ($(filter arm s390 sparc x86, $(SRCARCH)),)
+ link := $(install-dir)/.build-id/$$(shell $(READELF) -n $$(src) | sed -n 's@^.*Build ID: \(..\)\(.*\)@\1/\2@p').debug
+ 
+ __default: $$(link)
+diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
+index 81fe1884ef8ae..67a509778135b 100644
+--- a/scripts/kconfig/symbol.c
++++ b/scripts/kconfig/symbol.c
+@@ -14,6 +14,7 @@
+ 
+ struct symbol symbol_yes = {
+ 	.name = "y",
++	.type = S_TRISTATE,
+ 	.curr = { "y", yes },
+ 	.menus = LIST_HEAD_INIT(symbol_yes.menus),
+ 	.flags = SYMBOL_CONST|SYMBOL_VALID,
+@@ -21,6 +22,7 @@ struct symbol symbol_yes = {
+ 
+ struct symbol symbol_mod = {
+ 	.name = "m",
++	.type = S_TRISTATE,
+ 	.curr = { "m", mod },
+ 	.menus = LIST_HEAD_INIT(symbol_mod.menus),
+ 	.flags = SYMBOL_CONST|SYMBOL_VALID,
+@@ -28,6 +30,7 @@ struct symbol symbol_mod = {
+ 
+ struct symbol symbol_no = {
+ 	.name = "n",
++	.type = S_TRISTATE,
+ 	.curr = { "n", no },
+ 	.menus = LIST_HEAD_INIT(symbol_no.menus),
+ 	.flags = SYMBOL_CONST|SYMBOL_VALID,
+@@ -788,8 +791,7 @@ const char *sym_get_string_value(struct symbol *sym)
+ 		case no:
+ 			return "n";
+ 		case mod:
+-			sym_calc_value(modules_sym);
+-			return (modules_sym->curr.tri == no) ? "n" : "m";
++			return "m";
+ 		case yes:
+ 			return "y";
+ 		}
+diff --git a/sound/core/init.c b/sound/core/init.c
+index 66d7265fea920..b8912de048662 100644
+--- a/sound/core/init.c
++++ b/sound/core/init.c
+@@ -539,6 +539,11 @@ void snd_card_disconnect(struct snd_card *card)
+ 		synchronize_irq(card->sync_irq);
+ 
+ 	snd_info_card_disconnect(card);
++#ifdef CONFIG_SND_DEBUG
++	debugfs_remove(card->debugfs_root);
++	card->debugfs_root = NULL;
++#endif
++
+ 	if (card->registered) {
+ 		device_del(&card->card_dev);
+ 		card->registered = false;
+@@ -590,10 +595,6 @@ static int snd_card_do_free(struct snd_card *card)
+ 		dev_warn(card->dev, "unable to free card info\n");
+ 		/* Not fatal error */
+ 	}
+-#ifdef CONFIG_SND_DEBUG
+-	debugfs_remove(card->debugfs_root);
+-	card->debugfs_root = NULL;
+-#endif
+ 	if (card->release_completion)
+ 		complete(card->release_completion);
+ 	if (!card->managed)
+diff --git a/sound/core/jack.c b/sound/core/jack.c
+index e08b2c4fbd1a5..e4bcecdf89b7e 100644
+--- a/sound/core/jack.c
++++ b/sound/core/jack.c
+@@ -37,11 +37,15 @@ static const int jack_switch_types[SND_JACK_SWITCH_TYPES] = {
+ };
+ #endif /* CONFIG_SND_JACK_INPUT_DEV */
+ 
++static void snd_jack_remove_debugfs(struct snd_jack *jack);
++
+ static int snd_jack_dev_disconnect(struct snd_device *device)
+ {
+-#ifdef CONFIG_SND_JACK_INPUT_DEV
+ 	struct snd_jack *jack = device->device_data;
+ 
++	snd_jack_remove_debugfs(jack);
++
++#ifdef CONFIG_SND_JACK_INPUT_DEV
+ 	guard(mutex)(&jack->input_dev_lock);
+ 	if (!jack->input_dev)
+ 		return 0;
+@@ -381,10 +385,14 @@ static int snd_jack_debugfs_add_inject_node(struct snd_jack *jack,
+ 	return 0;
+ }
+ 
+-static void snd_jack_debugfs_clear_inject_node(struct snd_jack_kctl *jack_kctl)
++static void snd_jack_remove_debugfs(struct snd_jack *jack)
+ {
+-	debugfs_remove(jack_kctl->jack_debugfs_root);
+-	jack_kctl->jack_debugfs_root = NULL;
++	struct snd_jack_kctl *jack_kctl;
++
++	list_for_each_entry(jack_kctl, &jack->kctl_list, list) {
++		debugfs_remove(jack_kctl->jack_debugfs_root);
++		jack_kctl->jack_debugfs_root = NULL;
++	}
+ }
+ #else /* CONFIG_SND_JACK_INJECTION_DEBUG */
+ static int snd_jack_debugfs_add_inject_node(struct snd_jack *jack,
+@@ -393,7 +401,7 @@ static int snd_jack_debugfs_add_inject_node(struct snd_jack *jack,
+ 	return 0;
+ }
+ 
+-static void snd_jack_debugfs_clear_inject_node(struct snd_jack_kctl *jack_kctl)
++static void snd_jack_remove_debugfs(struct snd_jack *jack)
+ {
+ }
+ #endif /* CONFIG_SND_JACK_INJECTION_DEBUG */
+@@ -404,7 +412,6 @@ static void snd_jack_kctl_private_free(struct snd_kcontrol *kctl)
+ 
+ 	jack_kctl = kctl->private_data;
+ 	if (jack_kctl) {
+-		snd_jack_debugfs_clear_inject_node(jack_kctl);
+ 		list_del(&jack_kctl->list);
+ 		kfree(jack_kctl);
+ 	}
+@@ -497,8 +504,8 @@ int snd_jack_new(struct snd_card *card, const char *id, int type,
+ 		.dev_free = snd_jack_dev_free,
+ #ifdef CONFIG_SND_JACK_INPUT_DEV
+ 		.dev_register = snd_jack_dev_register,
+-		.dev_disconnect = snd_jack_dev_disconnect,
+ #endif /* CONFIG_SND_JACK_INPUT_DEV */
++		.dev_disconnect = snd_jack_dev_disconnect,
+ 	};
+ 
+ 	if (initial_kctl) {
+diff --git a/sound/core/seq/seq_ump_convert.c b/sound/core/seq/seq_ump_convert.c
+index ee6ac649df836..9bfba69b2a709 100644
+--- a/sound/core/seq/seq_ump_convert.c
++++ b/sound/core/seq/seq_ump_convert.c
+@@ -157,7 +157,7 @@ static void ump_system_to_one_param_ev(const union snd_ump_midi1_msg *val,
+ static void ump_system_to_songpos_ev(const union snd_ump_midi1_msg *val,
+ 				     struct snd_seq_event *ev)
+ {
+-	ev->data.control.value = (val->system.parm1 << 7) | val->system.parm2;
++	ev->data.control.value = (val->system.parm2 << 7) | val->system.parm1;
+ }
+ 
+ /* Encoders for 0xf0 - 0xff */
+@@ -368,6 +368,7 @@ static int cvt_ump_midi1_to_midi2(struct snd_seq_client *dest,
+ 	struct snd_seq_ump_event ev_cvt;
+ 	const union snd_ump_midi1_msg *midi1 = (const union snd_ump_midi1_msg *)event->ump;
+ 	union snd_ump_midi2_msg *midi2 = (union snd_ump_midi2_msg *)ev_cvt.ump;
++	struct snd_seq_ump_midi2_bank *cc;
+ 
+ 	ev_cvt = *event;
+ 	memset(&ev_cvt.ump, 0, sizeof(ev_cvt.ump));
+@@ -387,11 +388,29 @@ static int cvt_ump_midi1_to_midi2(struct snd_seq_client *dest,
+ 		midi2->paf.data = upscale_7_to_32bit(midi1->paf.data);
+ 		break;
+ 	case UMP_MSG_STATUS_CC:
++		cc = &dest_port->midi2_bank[midi1->note.channel];
++		switch (midi1->cc.index) {
++		case UMP_CC_BANK_SELECT:
++			cc->bank_set = 1;
++			cc->cc_bank_msb = midi1->cc.data;
++			return 0; // skip
++		case UMP_CC_BANK_SELECT_LSB:
++			cc->bank_set = 1;
++			cc->cc_bank_lsb = midi1->cc.data;
++			return 0; // skip
++		}
+ 		midi2->cc.index = midi1->cc.index;
+ 		midi2->cc.data = upscale_7_to_32bit(midi1->cc.data);
+ 		break;
+ 	case UMP_MSG_STATUS_PROGRAM:
+ 		midi2->pg.program = midi1->pg.program;
++		cc = &dest_port->midi2_bank[midi1->note.channel];
++		if (cc->bank_set) {
++			midi2->pg.bank_valid = 1;
++			midi2->pg.bank_msb = cc->cc_bank_msb;
++			midi2->pg.bank_lsb = cc->cc_bank_lsb;
++			cc->bank_set = 0;
++		}
+ 		break;
+ 	case UMP_MSG_STATUS_CHANNEL_PRESSURE:
+ 		midi2->caf.data = upscale_7_to_32bit(midi1->caf.data);
+@@ -419,6 +438,7 @@ static int cvt_ump_midi2_to_midi1(struct snd_seq_client *dest,
+ 	struct snd_seq_ump_event ev_cvt;
+ 	union snd_ump_midi1_msg *midi1 = (union snd_ump_midi1_msg *)ev_cvt.ump;
+ 	const union snd_ump_midi2_msg *midi2 = (const union snd_ump_midi2_msg *)event->ump;
++	int err;
+ 	u16 v;
+ 
+ 	ev_cvt = *event;
+@@ -443,6 +463,24 @@ static int cvt_ump_midi2_to_midi1(struct snd_seq_client *dest,
+ 		midi1->cc.data = downscale_32_to_7bit(midi2->cc.data);
+ 		break;
+ 	case UMP_MSG_STATUS_PROGRAM:
++		if (midi2->pg.bank_valid) {
++			midi1->cc.status = UMP_MSG_STATUS_CC;
++			midi1->cc.index = UMP_CC_BANK_SELECT;
++			midi1->cc.data = midi2->pg.bank_msb;
++			err = __snd_seq_deliver_single_event(dest, dest_port,
++							     (struct snd_seq_event *)&ev_cvt,
++							     atomic, hop);
++			if (err < 0)
++				return err;
++			midi1->cc.index = UMP_CC_BANK_SELECT_LSB;
++			midi1->cc.data = midi2->pg.bank_lsb;
++			err = __snd_seq_deliver_single_event(dest, dest_port,
++							     (struct snd_seq_event *)&ev_cvt,
++							     atomic, hop);
++			if (err < 0)
++				return err;
++			midi1->note.status = midi2->note.status;
++		}
+ 		midi1->pg.program = midi2->pg.program;
+ 		break;
+ 	case UMP_MSG_STATUS_CHANNEL_PRESSURE:
+@@ -691,6 +729,7 @@ static int system_ev_to_ump_midi1(const struct snd_seq_event *event,
+ 				  union snd_ump_midi1_msg *data,
+ 				  unsigned char status)
+ {
++	data->system.type = UMP_MSG_TYPE_SYSTEM; // override
+ 	data->system.status = status;
+ 	return 1;
+ }
+@@ -713,8 +752,8 @@ static int system_2p_ev_to_ump_midi1(const struct snd_seq_event *event,
+ 				     unsigned char status)
+ {
+ 	data->system.status = status;
+-	data->system.parm1 = (event->data.control.value >> 7) & 0x7f;
+-	data->system.parm2 = event->data.control.value & 0x7f;
++	data->system.parm1 = event->data.control.value & 0x7f;
++	data->system.parm2 = (event->data.control.value >> 7) & 0x7f;
+ 	return 1;
+ }
+ 
+@@ -854,7 +893,6 @@ static int pgm_ev_to_ump_midi2(const struct snd_seq_event *event,
+ 		data->pg.bank_msb = cc->cc_bank_msb;
+ 		data->pg.bank_lsb = cc->cc_bank_lsb;
+ 		cc->bank_set = 0;
+-		cc->cc_bank_msb = cc->cc_bank_lsb = 0;
+ 	}
+ 	return 1;
+ }
+diff --git a/sound/pci/hda/cs35l56_hda.c b/sound/pci/hda/cs35l56_hda.c
+index 558c1f38fe971..11b0570ff56d4 100644
+--- a/sound/pci/hda/cs35l56_hda.c
++++ b/sound/pci/hda/cs35l56_hda.c
+@@ -732,8 +732,6 @@ static void cs35l56_hda_unbind(struct device *dev, struct device *master, void *
+ 	if (cs35l56->base.fw_patched)
+ 		cs_dsp_power_down(&cs35l56->cs_dsp);
+ 
+-	cs_dsp_remove(&cs35l56->cs_dsp);
+-
+ 	if (comps[cs35l56->index].dev == dev)
+ 		memset(&comps[cs35l56->index], 0, sizeof(*comps));
+ 
+@@ -1035,7 +1033,7 @@ int cs35l56_hda_common_probe(struct cs35l56_hda *cs35l56, int hid, int id)
+ 			       ARRAY_SIZE(cs35l56_hda_dai_config));
+ 	ret = cs35l56_force_sync_asp1_registers_from_cache(&cs35l56->base);
+ 	if (ret)
+-		goto err;
++		goto dsp_err;
+ 
+ 	/*
+ 	 * By default only enable one ASP1TXn, where n=amplifier index,
+@@ -1061,6 +1059,8 @@ int cs35l56_hda_common_probe(struct cs35l56_hda *cs35l56, int hid, int id)
+ 
+ pm_err:
+ 	pm_runtime_disable(cs35l56->base.dev);
++dsp_err:
++	cs_dsp_remove(&cs35l56->cs_dsp);
+ err:
+ 	gpiod_set_value_cansleep(cs35l56->base.reset_gpio, 0);
+ 
+@@ -1078,6 +1078,8 @@ void cs35l56_hda_remove(struct device *dev)
+ 
+ 	component_del(cs35l56->base.dev, &cs35l56_hda_comp_ops);
+ 
++	cs_dsp_remove(&cs35l56->cs_dsp);
++
+ 	kfree(cs35l56->system_name);
+ 	pm_runtime_put_noidle(cs35l56->base.dev);
+ 
+diff --git a/sound/pci/hda/hda_component.c b/sound/pci/hda/hda_component.c
+index cd299d7d84baf..d02589014a3fa 100644
+--- a/sound/pci/hda/hda_component.c
++++ b/sound/pci/hda/hda_component.c
+@@ -123,6 +123,21 @@ static int hda_comp_match_dev_name(struct device *dev, void *data)
+ 	return !strcmp(d + n, tmp);
+ }
+ 
++int hda_component_manager_bind(struct hda_codec *cdc,
++			       struct hda_component *comps, int count)
++{
++	int i;
++
++	/* Init shared data */
++	for (i = 0; i < count; ++i) {
++		memset(&comps[i], 0, sizeof(comps[i]));
++		comps[i].codec = cdc;
++	}
++
++	return component_bind_all(hda_codec_dev(cdc), comps);
++}
++EXPORT_SYMBOL_NS_GPL(hda_component_manager_bind, SND_HDA_SCODEC_COMPONENT);
++
+ int hda_component_manager_init(struct hda_codec *cdc,
+ 			       struct hda_component *comps, int count,
+ 			       const char *bus, const char *hid,
+@@ -143,7 +158,6 @@ int hda_component_manager_init(struct hda_codec *cdc,
+ 		sm->hid = hid;
+ 		sm->match_str = match_str;
+ 		sm->index = i;
+-		comps[i].codec = cdc;
+ 		component_match_add(dev, &match, hda_comp_match_dev_name, sm);
+ 	}
+ 
+diff --git a/sound/pci/hda/hda_component.h b/sound/pci/hda/hda_component.h
+index c80a66691b5d8..c70b3de68ab20 100644
+--- a/sound/pci/hda/hda_component.h
++++ b/sound/pci/hda/hda_component.h
+@@ -75,11 +75,8 @@ int hda_component_manager_init(struct hda_codec *cdc,
+ void hda_component_manager_free(struct hda_codec *cdc,
+ 				const struct component_master_ops *ops);
+ 
+-static inline int hda_component_manager_bind(struct hda_codec *cdc,
+-					     struct hda_component *comps)
+-{
+-	return component_bind_all(hda_codec_dev(cdc), comps);
+-}
++int hda_component_manager_bind(struct hda_codec *cdc,
++			       struct hda_component *comps, int count);
+ 
+ static inline void hda_component_manager_unbind(struct hda_codec *cdc,
+ 					       struct hda_component *comps)
+diff --git a/sound/pci/hda/hda_cs_dsp_ctl.c b/sound/pci/hda/hda_cs_dsp_ctl.c
+index 463ca06036bfe..9db45d7c17e5f 100644
+--- a/sound/pci/hda/hda_cs_dsp_ctl.c
++++ b/sound/pci/hda/hda_cs_dsp_ctl.c
+@@ -8,6 +8,7 @@
+ 
+ #include <linux/module.h>
+ #include <sound/soc.h>
++#include <linux/cleanup.h>
+ #include <linux/firmware/cirrus/cs_dsp.h>
+ #include <linux/firmware/cirrus/wmfw.h>
+ #include "hda_cs_dsp_ctl.h"
+@@ -97,11 +98,23 @@ static unsigned int wmfw_convert_flags(unsigned int in)
+ 	return out;
+ }
+ 
+-static void hda_cs_dsp_add_kcontrol(struct hda_cs_dsp_coeff_ctl *ctl, const char *name)
++static void hda_cs_dsp_free_kcontrol(struct snd_kcontrol *kctl)
+ {
++	struct hda_cs_dsp_coeff_ctl *ctl = (struct hda_cs_dsp_coeff_ctl *)snd_kcontrol_chip(kctl);
+ 	struct cs_dsp_coeff_ctl *cs_ctl = ctl->cs_ctl;
++
++	/* NULL priv to prevent a double-free in hda_cs_dsp_control_remove() */
++	cs_ctl->priv = NULL;
++	kfree(ctl);
++}
++
++static void hda_cs_dsp_add_kcontrol(struct cs_dsp_coeff_ctl *cs_ctl,
++				    const struct hda_cs_dsp_ctl_info *info,
++				    const char *name)
++{
+ 	struct snd_kcontrol_new kcontrol = {0};
+ 	struct snd_kcontrol *kctl;
++	struct hda_cs_dsp_coeff_ctl *ctl __free(kfree) = NULL;
+ 	int ret = 0;
+ 
+ 	if (cs_ctl->len > ADSP_MAX_STD_CTRL_SIZE) {
+@@ -110,6 +123,13 @@ static void hda_cs_dsp_add_kcontrol(struct hda_cs_dsp_coeff_ctl *ctl, const char
+ 		return;
+ 	}
+ 
++	ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
++	if (!ctl)
++		return;
++
++	ctl->cs_ctl = cs_ctl;
++	ctl->card = info->card;
++
+ 	kcontrol.name = name;
+ 	kcontrol.info = hda_cs_dsp_coeff_info;
+ 	kcontrol.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+@@ -117,20 +137,22 @@ static void hda_cs_dsp_add_kcontrol(struct hda_cs_dsp_coeff_ctl *ctl, const char
+ 	kcontrol.get = hda_cs_dsp_coeff_get;
+ 	kcontrol.put = hda_cs_dsp_coeff_put;
+ 
+-	/* Save ctl inside private_data, ctl is owned by cs_dsp,
+-	 * and will be freed when cs_dsp removes the control */
+ 	kctl = snd_ctl_new1(&kcontrol, (void *)ctl);
+ 	if (!kctl)
+ 		return;
+ 
+-	ret = snd_ctl_add(ctl->card, kctl);
++	kctl->private_free = hda_cs_dsp_free_kcontrol;
++	ctl->kctl = kctl;
++
++	/* snd_ctl_add() calls our private_free on error, which will kfree(ctl) */
++	cs_ctl->priv = no_free_ptr(ctl);
++	ret = snd_ctl_add(info->card, kctl);
+ 	if (ret) {
+ 		dev_err(cs_ctl->dsp->dev, "Failed to add KControl %s = %d\n", kcontrol.name, ret);
+ 		return;
+ 	}
+ 
+ 	dev_dbg(cs_ctl->dsp->dev, "Added KControl: %s\n", kcontrol.name);
+-	ctl->kctl = kctl;
+ }
+ 
+ static void hda_cs_dsp_control_add(struct cs_dsp_coeff_ctl *cs_ctl,
+@@ -138,7 +160,6 @@ static void hda_cs_dsp_control_add(struct cs_dsp_coeff_ctl *cs_ctl,
+ {
+ 	struct cs_dsp *cs_dsp = cs_ctl->dsp;
+ 	char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+-	struct hda_cs_dsp_coeff_ctl *ctl;
+ 	const char *region_name;
+ 	int ret;
+ 
+@@ -163,15 +184,7 @@ static void hda_cs_dsp_control_add(struct cs_dsp_coeff_ctl *cs_ctl,
+ 			 " %.*s", cs_ctl->subname_len - skip, cs_ctl->subname + skip);
+ 	}
+ 
+-	ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
+-	if (!ctl)
+-		return;
+-
+-	ctl->cs_ctl = cs_ctl;
+-	ctl->card = info->card;
+-	cs_ctl->priv = ctl;
+-
+-	hda_cs_dsp_add_kcontrol(ctl, name);
++	hda_cs_dsp_add_kcontrol(cs_ctl, info, name);
+ }
+ 
+ void hda_cs_dsp_add_controls(struct cs_dsp *dsp, const struct hda_cs_dsp_ctl_info *info)
+@@ -203,7 +216,9 @@ void hda_cs_dsp_control_remove(struct cs_dsp_coeff_ctl *cs_ctl)
+ {
+ 	struct hda_cs_dsp_coeff_ctl *ctl = cs_ctl->priv;
+ 
+-	kfree(ctl);
++	/* ctl and kctl may already have been removed by ALSA private_free */
++	if (ctl && ctl->kctl)
++		snd_ctl_remove(ctl->card, ctl->kctl);
+ }
+ EXPORT_SYMBOL_NS_GPL(hda_cs_dsp_control_remove, SND_HDA_CS_DSP_CONTROLS);
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 3b8b4ab488a61..1a1ca7caaff07 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6793,7 +6793,7 @@ static int comp_bind(struct device *dev)
+ 	struct alc_spec *spec = cdc->spec;
+ 	int ret;
+ 
+-	ret = hda_component_manager_bind(cdc, spec->comps);
++	ret = hda_component_manager_bind(cdc, spec->comps, ARRAY_SIZE(spec->comps));
+ 	if (ret)
+ 		return ret;
+ 
+@@ -10103,7 +10103,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8a2c, "HP Envy 16", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x8a2d, "HP Envy 16", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x8a2e, "HP Envy 16", ALC287_FIXUP_CS35L41_I2C_2),
+-	SND_PCI_QUIRK(0x103c, 0x8a2e, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x8a30, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x8a31, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x8a6e, "HP EDNA 360", ALC287_FIXUP_CS35L41_I2C_4),
+@@ -10295,7 +10294,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
+ 	SND_PCI_QUIRK(0x1043, 0x3a20, "ASUS G614JZR", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x3a30, "ASUS G814JVR/JIR", ALC245_FIXUP_CS35L41_SPI_2),
+-	SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC245_FIXUP_CS35L41_SPI_2),
++	SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
+ 	SND_PCI_QUIRK(0x1043, 0x3a50, "ASUS G834JYR/JZR", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x3a60, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
+ 	SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
+diff --git a/sound/soc/amd/acp/acp-legacy-common.c b/sound/soc/amd/acp/acp-legacy-common.c
+index b5aff3f230be5..3be7c6d55a6f8 100644
+--- a/sound/soc/amd/acp/acp-legacy-common.c
++++ b/sound/soc/amd/acp/acp-legacy-common.c
+@@ -358,11 +358,25 @@ int smn_read(struct pci_dev *dev, u32 smn_addr)
+ }
+ EXPORT_SYMBOL_NS_GPL(smn_read, SND_SOC_ACP_COMMON);
+ 
+-int check_acp_pdm(struct pci_dev *pci, struct acp_chip_info *chip)
++static void check_acp3x_config(struct acp_chip_info *chip)
+ {
+-	struct acpi_device *pdm_dev;
+-	const union acpi_object *obj;
+-	u32 pdm_addr, val;
++	u32 val;
++
++	val = readl(chip->base + ACP3X_PIN_CONFIG);
++	switch (val) {
++	case ACP_CONFIG_4:
++		chip->is_i2s_config = true;
++		chip->is_pdm_config = true;
++		break;
++	default:
++		chip->is_pdm_config = true;
++		break;
++	}
++}
++
++static void check_acp6x_config(struct acp_chip_info *chip)
++{
++	u32 val;
+ 
+ 	val = readl(chip->base + ACP_PIN_CONFIG);
+ 	switch (val) {
+@@ -371,42 +385,94 @@ int check_acp_pdm(struct pci_dev *pci, struct acp_chip_info *chip)
+ 	case ACP_CONFIG_6:
+ 	case ACP_CONFIG_7:
+ 	case ACP_CONFIG_8:
+-	case ACP_CONFIG_10:
+ 	case ACP_CONFIG_11:
++	case ACP_CONFIG_14:
++		chip->is_pdm_config = true;
++		break;
++	case ACP_CONFIG_9:
++		chip->is_i2s_config = true;
++		break;
++	case ACP_CONFIG_10:
+ 	case ACP_CONFIG_12:
+ 	case ACP_CONFIG_13:
++		chip->is_i2s_config = true;
++		chip->is_pdm_config = true;
++		break;
++	default:
++		break;
++	}
++}
++
++static void check_acp70_config(struct acp_chip_info *chip)
++{
++	u32 val;
++
++	val = readl(chip->base + ACP_PIN_CONFIG);
++	switch (val) {
++	case ACP_CONFIG_4:
++	case ACP_CONFIG_5:
++	case ACP_CONFIG_6:
++	case ACP_CONFIG_7:
++	case ACP_CONFIG_8:
++	case ACP_CONFIG_11:
+ 	case ACP_CONFIG_14:
++	case ACP_CONFIG_17:
++	case ACP_CONFIG_18:
++		chip->is_pdm_config = true;
++		break;
++	case ACP_CONFIG_9:
++		chip->is_i2s_config = true;
++		break;
++	case ACP_CONFIG_10:
++	case ACP_CONFIG_12:
++	case ACP_CONFIG_13:
++	case ACP_CONFIG_19:
++	case ACP_CONFIG_20:
++		chip->is_i2s_config = true;
++		chip->is_pdm_config = true;
+ 		break;
+ 	default:
+-		return -EINVAL;
++		break;
+ 	}
++}
++
++void check_acp_config(struct pci_dev *pci, struct acp_chip_info *chip)
++{
++	struct acpi_device *pdm_dev;
++	const union acpi_object *obj;
++	u32 pdm_addr;
+ 
+ 	switch (chip->acp_rev) {
+ 	case ACP3X_DEV:
+ 		pdm_addr = ACP_RENOIR_PDM_ADDR;
++		check_acp3x_config(chip);
+ 		break;
+ 	case ACP6X_DEV:
+ 		pdm_addr = ACP_REMBRANDT_PDM_ADDR;
++		check_acp6x_config(chip);
+ 		break;
+ 	case ACP63_DEV:
+ 		pdm_addr = ACP63_PDM_ADDR;
++		check_acp6x_config(chip);
+ 		break;
+ 	case ACP70_DEV:
+ 		pdm_addr = ACP70_PDM_ADDR;
++		check_acp70_config(chip);
+ 		break;
+ 	default:
+-		return -EINVAL;
++		break;
+ 	}
+ 
+-	pdm_dev = acpi_find_child_device(ACPI_COMPANION(&pci->dev), pdm_addr, 0);
+-	if (pdm_dev) {
+-		if (!acpi_dev_get_property(pdm_dev, "acp-audio-device-type",
+-					   ACPI_TYPE_INTEGER, &obj) &&
+-					   obj->integer.value == pdm_addr)
+-			return 0;
++	if (chip->is_pdm_config) {
++		pdm_dev = acpi_find_child_device(ACPI_COMPANION(&pci->dev), pdm_addr, 0);
++		if (pdm_dev) {
++			if (!acpi_dev_get_property(pdm_dev, "acp-audio-device-type",
++						   ACPI_TYPE_INTEGER, &obj) &&
++						   obj->integer.value == pdm_addr)
++				chip->is_pdm_dev = true;
++		}
+ 	}
+-	return -ENODEV;
+ }
+-EXPORT_SYMBOL_NS_GPL(check_acp_pdm, SND_SOC_ACP_COMMON);
++EXPORT_SYMBOL_NS_GPL(check_acp_config, SND_SOC_ACP_COMMON);
+ 
+ MODULE_LICENSE("Dual BSD/GPL");
+diff --git a/sound/soc/amd/acp/acp-pci.c b/sound/soc/amd/acp/acp-pci.c
+index 5f35b90eab8d3..ad320b29e87dc 100644
+--- a/sound/soc/amd/acp/acp-pci.c
++++ b/sound/soc/amd/acp/acp-pci.c
+@@ -100,7 +100,6 @@ static int acp_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id
+ 		ret = -EINVAL;
+ 		goto release_regions;
+ 	}
+-
+ 	dmic_dev = platform_device_register_data(dev, "dmic-codec", PLATFORM_DEVID_NONE, NULL, 0);
+ 	if (IS_ERR(dmic_dev)) {
+ 		dev_err(dev, "failed to create DMIC device\n");
+@@ -119,6 +118,10 @@ static int acp_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id
+ 	if (ret)
+ 		goto unregister_dmic_dev;
+ 
++	check_acp_config(pci, chip);
++	if (!chip->is_pdm_dev && !chip->is_i2s_config)
++		goto skip_pdev_creation;
++
+ 	res = devm_kcalloc(&pci->dev, num_res, sizeof(struct resource), GFP_KERNEL);
+ 	if (!res) {
+ 		ret = -ENOMEM;
+@@ -136,10 +139,6 @@ static int acp_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id
+ 		}
+ 	}
+ 
+-	ret = check_acp_pdm(pci, chip);
+-	if (ret < 0)
+-		goto skip_pdev_creation;
+-
+ 	chip->flag = flag;
+ 	memset(&pdevinfo, 0, sizeof(pdevinfo));
+ 
+diff --git a/sound/soc/amd/acp/amd.h b/sound/soc/amd/acp/amd.h
+index 5017e868f39b9..d75b4eb34de8d 100644
+--- a/sound/soc/amd/acp/amd.h
++++ b/sound/soc/amd/acp/amd.h
+@@ -138,6 +138,9 @@ struct acp_chip_info {
+ 	void __iomem *base;	/* ACP memory PCI base */
+ 	struct platform_device *chip_pdev;
+ 	unsigned int flag;	/* Distinguish b/w Legacy or Only PDM */
++	bool is_pdm_dev;	/* flag set to true when ACP PDM controller exists */
++	bool is_pdm_config;	/* flag set to true when PDM configuration is selected from BIOS */
++	bool is_i2s_config;	/* flag set to true when I2S configuration is selected from BIOS */
+ };
+ 
+ struct acp_stream {
+@@ -212,6 +215,11 @@ enum acp_config {
+ 	ACP_CONFIG_13,
+ 	ACP_CONFIG_14,
+ 	ACP_CONFIG_15,
++	ACP_CONFIG_16,
++	ACP_CONFIG_17,
++	ACP_CONFIG_18,
++	ACP_CONFIG_19,
++	ACP_CONFIG_20,
+ };
+ 
+ extern const struct snd_soc_dai_ops asoc_acp_cpu_dai_ops;
+@@ -240,7 +248,7 @@ void restore_acp_pdm_params(struct snd_pcm_substream *substream,
+ int restore_acp_i2s_params(struct snd_pcm_substream *substream,
+ 			   struct acp_dev_data *adata, struct acp_stream *stream);
+ 
+-int check_acp_pdm(struct pci_dev *pci, struct acp_chip_info *chip);
++void check_acp_config(struct pci_dev *pci, struct acp_chip_info *chip);
+ 
+ static inline u64 acp_get_byte_count(struct acp_dev_data *adata, int dai_id, int direction)
+ {
+diff --git a/sound/soc/amd/acp/chip_offset_byte.h b/sound/soc/amd/acp/chip_offset_byte.h
+index cfd6c4d075944..18da734c0e9e7 100644
+--- a/sound/soc/amd/acp/chip_offset_byte.h
++++ b/sound/soc/amd/acp/chip_offset_byte.h
+@@ -20,6 +20,7 @@
+ #define ACP_SOFT_RESET                          0x1000
+ #define ACP_CONTROL                             0x1004
+ #define ACP_PIN_CONFIG				0x1440
++#define ACP3X_PIN_CONFIG			0x1400
+ 
+ #define ACP_EXTERNAL_INTR_REG_ADDR(adata, offset, ctrl) \
+ 	(adata->acp_base + adata->rsrc->irq_reg_offset + offset + (ctrl * 0x04))
+diff --git a/sound/soc/codecs/cs42l43.c b/sound/soc/codecs/cs42l43.c
+index 94685449f0f48..92674314227c4 100644
+--- a/sound/soc/codecs/cs42l43.c
++++ b/sound/soc/codecs/cs42l43.c
+@@ -310,8 +310,9 @@ static int cs42l43_startup(struct snd_pcm_substream *substream, struct snd_soc_d
+ 	struct snd_soc_component *component = dai->component;
+ 	struct cs42l43_codec *priv = snd_soc_component_get_drvdata(component);
+ 	struct cs42l43 *cs42l43 = priv->core;
+-	int provider = !!regmap_test_bits(cs42l43->regmap, CS42L43_ASP_CLK_CONFIG2,
+-					  CS42L43_ASP_MASTER_MODE_MASK);
++	int provider = !dai->id || !!regmap_test_bits(cs42l43->regmap,
++						      CS42L43_ASP_CLK_CONFIG2,
++						      CS42L43_ASP_MASTER_MODE_MASK);
+ 
+ 	if (provider)
+ 		priv->constraint.mask = CS42L43_PROVIDER_RATE_MASK;
+diff --git a/sound/soc/codecs/rt715-sdca-sdw.c b/sound/soc/codecs/rt715-sdca-sdw.c
+index ee450126106f9..9a55e77af02fe 100644
+--- a/sound/soc/codecs/rt715-sdca-sdw.c
++++ b/sound/soc/codecs/rt715-sdca-sdw.c
+@@ -234,10 +234,10 @@ static int __maybe_unused rt715_dev_resume(struct device *dev)
+ 	if (!slave->unattach_request)
+ 		goto regmap_sync;
+ 
+-	time = wait_for_completion_timeout(&slave->enumeration_complete,
++	time = wait_for_completion_timeout(&slave->initialization_complete,
+ 					   msecs_to_jiffies(RT715_PROBE_TIMEOUT));
+ 	if (!time) {
+-		dev_err(&slave->dev, "%s: Enumeration not complete, timed out\n", __func__);
++		dev_err(&slave->dev, "%s: Initialization not complete, timed out\n", __func__);
+ 		sdw_show_ping_status(slave->bus, true);
+ 
+ 		return -ETIMEDOUT;
+diff --git a/sound/soc/codecs/tas2552.c b/sound/soc/codecs/tas2552.c
+index 8c9dc318b0e82..c65a4219ecd6c 100644
+--- a/sound/soc/codecs/tas2552.c
++++ b/sound/soc/codecs/tas2552.c
+@@ -2,7 +2,8 @@
+ /*
+  * tas2552.c - ALSA SoC Texas Instruments TAS2552 Mono Audio Amplifier
+  *
+- * Copyright (C) 2014 Texas Instruments Incorporated -  https://www.ti.com
++ * Copyright (C) 2014 - 2024 Texas Instruments Incorporated -
++ *	https://www.ti.com
+  *
+  * Author: Dan Murphy <dmurphy@ti.com>
+  */
+@@ -119,12 +120,14 @@ static const struct snd_soc_dapm_widget tas2552_dapm_widgets[] =
+ 			 &tas2552_input_mux_control),
+ 
+ 	SND_SOC_DAPM_AIF_IN("DAC IN", "DAC Playback", 0, SND_SOC_NOPM, 0, 0),
++	SND_SOC_DAPM_AIF_OUT("ASI OUT", "DAC Capture", 0, SND_SOC_NOPM, 0, 0),
+ 	SND_SOC_DAPM_DAC("DAC", NULL, SND_SOC_NOPM, 0, 0),
+ 	SND_SOC_DAPM_OUT_DRV("ClassD", TAS2552_CFG_2, 7, 0, NULL, 0),
+ 	SND_SOC_DAPM_SUPPLY("PLL", TAS2552_CFG_2, 3, 0, NULL, 0),
+ 	SND_SOC_DAPM_POST("Post Event", tas2552_post_event),
+ 
+-	SND_SOC_DAPM_OUTPUT("OUT")
++	SND_SOC_DAPM_OUTPUT("OUT"),
++	SND_SOC_DAPM_INPUT("DMIC")
+ };
+ 
+ static const struct snd_soc_dapm_route tas2552_audio_map[] = {
+@@ -134,6 +137,7 @@ static const struct snd_soc_dapm_route tas2552_audio_map[] = {
+ 	{"ClassD", NULL, "Input selection"},
+ 	{"OUT", NULL, "ClassD"},
+ 	{"ClassD", NULL, "PLL"},
++	{"ASI OUT", NULL, "DMIC"}
+ };
+ 
+ #ifdef CONFIG_PM
+@@ -538,6 +542,13 @@ static struct snd_soc_dai_driver tas2552_dai[] = {
+ 			.rates = SNDRV_PCM_RATE_8000_192000,
+ 			.formats = TAS2552_FORMATS,
+ 		},
++		.capture = {
++			.stream_name = "Capture",
++			.channels_min = 2,
++			.channels_max = 2,
++			.rates = SNDRV_PCM_RATE_8000_192000,
++			.formats = TAS2552_FORMATS,
++		},
+ 		.ops = &tas2552_speaker_dai_ops,
+ 	},
+ };
+diff --git a/sound/soc/codecs/tas2781-fmwlib.c b/sound/soc/codecs/tas2781-fmwlib.c
+index 45760fe195237..265a8ca25cbbe 100644
+--- a/sound/soc/codecs/tas2781-fmwlib.c
++++ b/sound/soc/codecs/tas2781-fmwlib.c
+@@ -1,8 +1,8 @@
+ // SPDX-License-Identifier: GPL-2.0
+ //
+-// tasdevice-fmw.c -- TASDEVICE firmware support
++// tas2781-fmwlib.c -- TASDEVICE firmware support
+ //
+-// Copyright 2023 Texas Instruments, Inc.
++// Copyright 2023 - 2024 Texas Instruments, Inc.
+ //
+ // Author: Shenghao Ding <shenghao-ding@ti.com>
+ 
+@@ -1878,7 +1878,7 @@ int tas2781_load_calibration(void *context, char *file_name,
+ {
+ 	struct tasdevice_priv *tas_priv = (struct tasdevice_priv *)context;
+ 	struct tasdevice *tasdev = &(tas_priv->tasdevice[i]);
+-	const struct firmware *fw_entry;
++	const struct firmware *fw_entry = NULL;
+ 	struct tasdevice_fw *tas_fmw;
+ 	struct firmware fmw;
+ 	int offset = 0;
+@@ -2151,6 +2151,24 @@ static int tasdevice_load_data(struct tasdevice_priv *tas_priv,
+ 	return ret;
+ }
+ 
++static void tasdev_load_calibrated_data(struct tasdevice_priv *priv, int i)
++{
++	struct tasdevice_calibration *cal;
++	struct tasdevice_fw *cal_fmw;
++
++	cal_fmw = priv->tasdevice[i].cali_data_fmw;
++
++	/* No calibrated data for current devices, playback will go ahead. */
++	if (!cal_fmw)
++		return;
++
++	cal = cal_fmw->calibrations;
++	if (cal)
++		return;
++
++	load_calib_data(priv, &cal->dev_data);
++}
++
+ int tasdevice_select_tuningprm_cfg(void *context, int prm_no,
+ 	int cfg_no, int rca_conf_no)
+ {
+@@ -2210,21 +2228,9 @@ int tasdevice_select_tuningprm_cfg(void *context, int prm_no,
+ 		for (i = 0; i < tas_priv->ndev; i++) {
+ 			if (tas_priv->tasdevice[i].is_loaderr == true)
+ 				continue;
+-			else if (tas_priv->tasdevice[i].is_loaderr == false
+-				&& tas_priv->tasdevice[i].is_loading == true) {
+-				struct tasdevice_fw *cal_fmw =
+-					tas_priv->tasdevice[i].cali_data_fmw;
+-
+-				if (cal_fmw) {
+-					struct tasdevice_calibration
+-						*cal = cal_fmw->calibrations;
+-
+-					if (cal)
+-						load_calib_data(tas_priv,
+-							&(cal->dev_data));
+-				}
++			if (tas_priv->tasdevice[i].is_loaderr == false &&
++				tas_priv->tasdevice[i].is_loading == true)
+ 				tas_priv->tasdevice[i].cur_prog = prm_no;
+-			}
+ 		}
+ 	}
+ 
+@@ -2245,11 +2251,15 @@ int tasdevice_select_tuningprm_cfg(void *context, int prm_no,
+ 		tasdevice_load_data(tas_priv, &(conf->dev_data));
+ 		for (i = 0; i < tas_priv->ndev; i++) {
+ 			if (tas_priv->tasdevice[i].is_loaderr == true) {
+-				status |= 1 << (i + 4);
++				status |= BIT(i + 4);
+ 				continue;
+-			} else if (tas_priv->tasdevice[i].is_loaderr == false
+-				&& tas_priv->tasdevice[i].is_loading == true)
++			}
++
++			if (tas_priv->tasdevice[i].is_loaderr == false &&
++				tas_priv->tasdevice[i].is_loading == true) {
++				tasdev_load_calibrated_data(tas_priv, i);
+ 				tas_priv->tasdevice[i].cur_conf = cfg_no;
++			}
+ 		}
+ 	} else
+ 		dev_dbg(tas_priv->dev, "%s: Unneeded loading dsp conf %d\n",
+@@ -2308,65 +2318,6 @@ int tasdevice_prmg_load(void *context, int prm_no)
+ }
+ EXPORT_SYMBOL_NS_GPL(tasdevice_prmg_load, SND_SOC_TAS2781_FMWLIB);
+ 
+-int tasdevice_prmg_calibdata_load(void *context, int prm_no)
+-{
+-	struct tasdevice_priv *tas_priv = (struct tasdevice_priv *) context;
+-	struct tasdevice_fw *tas_fmw = tas_priv->fmw;
+-	struct tasdevice_prog *program;
+-	int prog_status = 0;
+-	int i;
+-
+-	if (!tas_fmw) {
+-		dev_err(tas_priv->dev, "%s: Firmware is NULL\n", __func__);
+-		goto out;
+-	}
+-
+-	if (prm_no >= tas_fmw->nr_programs) {
+-		dev_err(tas_priv->dev,
+-			"%s: prm(%d) is not in range of Programs %u\n",
+-			__func__, prm_no, tas_fmw->nr_programs);
+-		goto out;
+-	}
+-
+-	for (i = 0, prog_status = 0; i < tas_priv->ndev; i++) {
+-		if (prm_no >= 0 && tas_priv->tasdevice[i].cur_prog != prm_no) {
+-			tas_priv->tasdevice[i].cur_conf = -1;
+-			tas_priv->tasdevice[i].is_loading = true;
+-			prog_status++;
+-		}
+-		tas_priv->tasdevice[i].is_loaderr = false;
+-	}
+-
+-	if (prog_status) {
+-		program = &(tas_fmw->programs[prm_no]);
+-		tasdevice_load_data(tas_priv, &(program->dev_data));
+-		for (i = 0; i < tas_priv->ndev; i++) {
+-			if (tas_priv->tasdevice[i].is_loaderr == true)
+-				continue;
+-			else if (tas_priv->tasdevice[i].is_loaderr == false
+-				&& tas_priv->tasdevice[i].is_loading == true) {
+-				struct tasdevice_fw *cal_fmw =
+-					tas_priv->tasdevice[i].cali_data_fmw;
+-
+-				if (cal_fmw) {
+-					struct tasdevice_calibration *cal =
+-						cal_fmw->calibrations;
+-
+-					if (cal)
+-						load_calib_data(tas_priv,
+-							&(cal->dev_data));
+-				}
+-				tas_priv->tasdevice[i].cur_prog = prm_no;
+-			}
+-		}
+-	}
+-
+-out:
+-	return prog_status;
+-}
+-EXPORT_SYMBOL_NS_GPL(tasdevice_prmg_calibdata_load,
+-	SND_SOC_TAS2781_FMWLIB);
+-
+ void tasdevice_tuning_switch(void *context, int state)
+ {
+ 	struct tasdevice_priv *tas_priv = (struct tasdevice_priv *) context;
+diff --git a/sound/soc/codecs/tas2781-i2c.c b/sound/soc/codecs/tas2781-i2c.c
+index b5abff230e437..9350972dfefe7 100644
+--- a/sound/soc/codecs/tas2781-i2c.c
++++ b/sound/soc/codecs/tas2781-i2c.c
+@@ -2,7 +2,7 @@
+ //
+ // ALSA SoC Texas Instruments TAS2563/TAS2781 Audio Smart Amplifier
+ //
+-// Copyright (C) 2022 - 2023 Texas Instruments Incorporated
++// Copyright (C) 2022 - 2024 Texas Instruments Incorporated
+ // https://www.ti.com
+ //
+ // The TAS2563/TAS2781 driver implements a flexible and configurable
+@@ -414,7 +414,7 @@ static void tasdevice_fw_ready(const struct firmware *fmw,
+ 				__func__, tas_priv->cal_binaryname[i]);
+ 	}
+ 
+-	tasdevice_prmg_calibdata_load(tas_priv, 0);
++	tasdevice_prmg_load(tas_priv, 0);
+ 	tas_priv->cur_prog = 0;
+ out:
+ 	if (tas_priv->fw_state == TASDEVICE_DSP_FW_FAIL) {
+diff --git a/sound/soc/mediatek/mt8192/mt8192-dai-tdm.c b/sound/soc/mediatek/mt8192/mt8192-dai-tdm.c
+index 9ce06821c7d0f..49440db370af0 100644
+--- a/sound/soc/mediatek/mt8192/mt8192-dai-tdm.c
++++ b/sound/soc/mediatek/mt8192/mt8192-dai-tdm.c
+@@ -566,10 +566,10 @@ static int mtk_dai_tdm_hw_params(struct snd_pcm_substream *substream,
+ 		tdm_con |= 1 << DELAY_DATA_SFT;
+ 		tdm_con |= get_tdm_lrck_width(format) << LRCK_TDM_WIDTH_SFT;
+ 	} else if (tdm_priv->tdm_out_mode == TDM_OUT_DSP_A) {
+-		tdm_con |= 0 << DELAY_DATA_SFT;
++		tdm_con |= 1 << DELAY_DATA_SFT;
+ 		tdm_con |= 0 << LRCK_TDM_WIDTH_SFT;
+ 	} else if (tdm_priv->tdm_out_mode == TDM_OUT_DSP_B) {
+-		tdm_con |= 1 << DELAY_DATA_SFT;
++		tdm_con |= 0 << DELAY_DATA_SFT;
+ 		tdm_con |= 0 << LRCK_TDM_WIDTH_SFT;
+ 	}
+ 
+diff --git a/sound/soc/sof/debug.c b/sound/soc/sof/debug.c
+index 7275437ea8d8a..6481da31826dc 100644
+--- a/sound/soc/sof/debug.c
++++ b/sound/soc/sof/debug.c
+@@ -345,8 +345,27 @@ int snd_sof_dbg_init(struct snd_sof_dev *sdev)
+ 
+ 	debugfs_create_str("fw_path", 0444, fw_profile,
+ 			   (char **)&plat_data->fw_filename_prefix);
+-	debugfs_create_str("fw_lib_path", 0444, fw_profile,
+-			   (char **)&plat_data->fw_lib_prefix);
++	/* library path is not valid for IPC3 */
++	if (plat_data->ipc_type != SOF_IPC_TYPE_3) {
++		/*
++		 * fw_lib_prefix can be NULL if the vendor/platform does not
++		 * support loadable libraries
++		 */
++		if (plat_data->fw_lib_prefix) {
++			debugfs_create_str("fw_lib_path", 0444, fw_profile,
++					   (char **)&plat_data->fw_lib_prefix);
++		} else {
++			static char *fw_lib_path;
++
++			fw_lib_path = devm_kasprintf(sdev->dev, GFP_KERNEL,
++						     "Not supported");
++			if (!fw_lib_path)
++				return -ENOMEM;
++
++			debugfs_create_str("fw_lib_path", 0444, fw_profile,
++					   (char **)&fw_lib_path);
++		}
++	}
+ 	debugfs_create_str("tplg_path", 0444, fw_profile,
+ 			   (char **)&plat_data->tplg_filename_prefix);
+ 	debugfs_create_str("fw_name", 0444, fw_profile,
+diff --git a/tools/arch/x86/intel_sdsi/intel_sdsi.c b/tools/arch/x86/intel_sdsi/intel_sdsi.c
+index 2cd92761f1714..ba2a6b6645ae8 100644
+--- a/tools/arch/x86/intel_sdsi/intel_sdsi.c
++++ b/tools/arch/x86/intel_sdsi/intel_sdsi.c
+@@ -43,7 +43,6 @@
+ #define METER_CERT_MAX_SIZE	4096
+ #define STATE_MAX_NUM_LICENSES	16
+ #define STATE_MAX_NUM_IN_BUNDLE	(uint32_t)8
+-#define METER_MAX_NUM_BUNDLES	8
+ 
+ #define __round_mask(x, y) ((__typeof__(x))((y) - 1))
+ #define round_up(x, y) ((((x) - 1) | __round_mask(x, y)) + 1)
+@@ -154,11 +153,12 @@ struct bundle_encoding {
+ };
+ 
+ struct meter_certificate {
+-	uint32_t block_signature;
+-	uint32_t counter_unit;
++	uint32_t signature;
++	uint32_t version;
+ 	uint64_t ppin;
++	uint32_t counter_unit;
+ 	uint32_t bundle_length;
+-	uint32_t reserved;
++	uint64_t reserved;
+ 	uint32_t mmrc_encoding;
+ 	uint32_t mmrc_counter;
+ };
+@@ -167,6 +167,11 @@ struct bundle_encoding_counter {
+ 	uint32_t encoding;
+ 	uint32_t counter;
+ };
++#define METER_BUNDLE_SIZE sizeof(struct bundle_encoding_counter)
++#define BUNDLE_COUNT(length) ((length) / METER_BUNDLE_SIZE)
++#define METER_MAX_NUM_BUNDLES							\
++		((METER_CERT_MAX_SIZE - sizeof(struct meter_certificate)) /	\
++		 sizeof(struct bundle_encoding_counter))
+ 
+ struct sdsi_dev {
+ 	struct sdsi_regs regs;
+@@ -334,6 +339,7 @@ static int sdsi_meter_cert_show(struct sdsi_dev *s)
+ 	uint32_t count = 0;
+ 	FILE *cert_ptr;
+ 	int ret, size;
++	char name[4];
+ 
+ 	ret = sdsi_update_registers(s);
+ 	if (ret)
+@@ -375,32 +381,40 @@ static int sdsi_meter_cert_show(struct sdsi_dev *s)
+ 	printf("\n");
+ 	printf("Meter certificate for device %s\n", s->dev_name);
+ 	printf("\n");
+-	printf("Block Signature:       0x%x\n", mc->block_signature);
+-	printf("Count Unit:            %dms\n", mc->counter_unit);
+-	printf("PPIN:                  0x%lx\n", mc->ppin);
+-	printf("Feature Bundle Length: %d\n", mc->bundle_length);
+-	printf("MMRC encoding:         %d\n", mc->mmrc_encoding);
+-	printf("MMRC counter:          %d\n", mc->mmrc_counter);
+-	if (mc->bundle_length % 8) {
++
++	get_feature(mc->signature, name);
++	printf("Signature:                    %.4s\n", name);
++
++	printf("Version:                      %d\n", mc->version);
++	printf("Count Unit:                   %dms\n", mc->counter_unit);
++	printf("PPIN:                         0x%lx\n", mc->ppin);
++	printf("Feature Bundle Length:        %d\n", mc->bundle_length);
++
++	get_feature(mc->mmrc_encoding, name);
++	printf("MMRC encoding:                %.4s\n", name);
++
++	printf("MMRC counter:                 %d\n", mc->mmrc_counter);
++	if (mc->bundle_length % METER_BUNDLE_SIZE) {
+ 		fprintf(stderr, "Invalid bundle length\n");
+ 		return -1;
+ 	}
+ 
+-	if (mc->bundle_length > METER_MAX_NUM_BUNDLES * 8)  {
+-		fprintf(stderr, "More than %d bundles: %d\n",
+-			METER_MAX_NUM_BUNDLES, mc->bundle_length / 8);
++	if (mc->bundle_length > METER_MAX_NUM_BUNDLES * METER_BUNDLE_SIZE)  {
++		fprintf(stderr, "More than %ld bundles: actual %ld\n",
++			METER_MAX_NUM_BUNDLES, BUNDLE_COUNT(mc->bundle_length));
+ 		return -1;
+ 	}
+ 
+-	bec = (void *)(mc) + sizeof(mc);
++	bec = (struct bundle_encoding_counter *)(mc + 1);
+ 
+-	printf("Number of Feature Counters:          %d\n", mc->bundle_length / 8);
+-	while (count++ < mc->bundle_length / 8) {
++	printf("Number of Feature Counters:   %ld\n", BUNDLE_COUNT(mc->bundle_length));
++	while (count < BUNDLE_COUNT(mc->bundle_length)) {
+ 		char feature[5];
+ 
+ 		feature[4] = '\0';
+ 		get_feature(bec[count].encoding, feature);
+ 		printf("    %s:          %d\n", feature, bec[count].counter);
++		++count;
+ 	}
+ 
+ 	return 0;
+diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
+index d9520cb826b31..af393c7dee1f1 100644
+--- a/tools/bpf/resolve_btfids/main.c
++++ b/tools/bpf/resolve_btfids/main.c
+@@ -728,7 +728,7 @@ static int sets_patch(struct object *obj)
+ 
+ static int symbols_patch(struct object *obj)
+ {
+-	int err;
++	off_t err;
+ 
+ 	if (__symbols_patch(obj, &obj->structs)  ||
+ 	    __symbols_patch(obj, &obj->unions)   ||
+diff --git a/tools/lib/subcmd/parse-options.c b/tools/lib/subcmd/parse-options.c
+index 9fa75943f2ed1..d943d78b787ed 100644
+--- a/tools/lib/subcmd/parse-options.c
++++ b/tools/lib/subcmd/parse-options.c
+@@ -633,11 +633,10 @@ int parse_options_subcommand(int argc, const char **argv, const struct option *o
+ 			const char *const subcommands[], const char *usagestr[], int flags)
+ {
+ 	struct parse_opt_ctx_t ctx;
++	char *buf = NULL;
+ 
+ 	/* build usage string if it's not provided */
+ 	if (subcommands && !usagestr[0]) {
+-		char *buf = NULL;
+-
+ 		astrcatf(&buf, "%s %s [<options>] {", subcmd_config.exec_name, argv[0]);
+ 
+ 		for (int i = 0; subcommands[i]; i++) {
+@@ -679,7 +678,10 @@ int parse_options_subcommand(int argc, const char **argv, const struct option *o
+ 			astrcatf(&error_buf, "unknown switch `%c'", *ctx.opt);
+ 		usage_with_options(usagestr, options);
+ 	}
+-
++	if (buf) {
++		usagestr[0] = NULL;
++		free(buf);
++	}
+ 	return parse_options_end(&ctx);
+ }
+ 
+diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
+index 3b12595193c9f..6bf2468f59d31 100644
+--- a/tools/perf/Documentation/perf-list.txt
++++ b/tools/perf/Documentation/perf-list.txt
+@@ -71,6 +71,7 @@ counted. The following modifiers exist:
+  D - pin the event to the PMU
+  W - group is weak and will fallback to non-group if not schedulable,
+  e - group or event are exclusive and do not share the PMU
++ b - use BPF aggregration (see perf stat --bpf-counters)
+ 
+ The 'p' modifier can be used for specifying how precise the instruction
+ address should be. The 'p' modifier can be specified multiple times:
+diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
+index 04d89d2ed209b..d769aa447fb75 100644
+--- a/tools/perf/Makefile.perf
++++ b/tools/perf/Makefile.perf
+@@ -458,18 +458,19 @@ SHELL = $(SHELL_PATH)
+ 
+ arm64_gen_sysreg_dir := $(srctree)/tools/arch/arm64/tools
+ ifneq ($(OUTPUT),)
+-  arm64_gen_sysreg_outdir := $(OUTPUT)
++  arm64_gen_sysreg_outdir := $(abspath $(OUTPUT))
+ else
+   arm64_gen_sysreg_outdir := $(CURDIR)
+ endif
+ 
+ arm64-sysreg-defs: FORCE
+-	$(Q)$(MAKE) -C $(arm64_gen_sysreg_dir) O=$(arm64_gen_sysreg_outdir)
++	$(Q)$(MAKE) -C $(arm64_gen_sysreg_dir) O=$(arm64_gen_sysreg_outdir) \
++		prefix= subdir=
+ 
+ arm64-sysreg-defs-clean:
+ 	$(call QUIET_CLEAN,arm64-sysreg-defs)
+ 	$(Q)$(MAKE) -C $(arm64_gen_sysreg_dir) O=$(arm64_gen_sysreg_outdir) \
+-		clean > /dev/null
++		prefix= subdir= clean > /dev/null
+ 
+ beauty_linux_dir := $(srctree)/tools/perf/trace/beauty/include/linux/
+ linux_uapi_dir := $(srctree)/tools/include/uapi/linux
+diff --git a/tools/perf/bench/inject-buildid.c b/tools/perf/bench/inject-buildid.c
+index 49331743c7439..a759eb2328bea 100644
+--- a/tools/perf/bench/inject-buildid.c
++++ b/tools/perf/bench/inject-buildid.c
+@@ -362,7 +362,7 @@ static int inject_build_id(struct bench_data *data, u64 *max_rss)
+ 		return -1;
+ 
+ 	for (i = 0; i < nr_mmaps; i++) {
+-		int idx = rand() % (nr_dsos - 1);
++		int idx = rand() % nr_dsos;
+ 		struct bench_dso *dso = &dsos[idx];
+ 		u64 timestamp = rand() % 1000000;
+ 
+diff --git a/tools/perf/bench/uprobe.c b/tools/perf/bench/uprobe.c
+index 5c71fdc419dd7..b722ff88fe7de 100644
+--- a/tools/perf/bench/uprobe.c
++++ b/tools/perf/bench/uprobe.c
+@@ -47,7 +47,7 @@ static const char * const bench_uprobe_usage[] = {
+ #define bench_uprobe__attach_uprobe(prog) \
+ 	skel->links.prog = bpf_program__attach_uprobe_opts(/*prog=*/skel->progs.prog, \
+ 							   /*pid=*/-1, \
+-							   /*binary_path=*/"/lib64/libc.so.6", \
++							   /*binary_path=*/"libc.so.6", \
+ 							   /*func_offset=*/0, \
+ 							   /*opts=*/&uprobe_opts); \
+ 	if (!skel->links.prog) { \
+diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
+index 6c1cc797692d9..9cd97fd76bb5e 100644
+--- a/tools/perf/builtin-annotate.c
++++ b/tools/perf/builtin-annotate.c
+@@ -809,8 +809,6 @@ int cmd_annotate(int argc, const char **argv)
+ 		    "Enable symbol demangling"),
+ 	OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
+ 		    "Enable kernel symbol demangling"),
+-	OPT_BOOLEAN(0, "group", &symbol_conf.event_group,
+-		    "Show event group information together"),
+ 	OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
+ 		    "Show a column with the sum of periods"),
+ 	OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
+diff --git a/tools/perf/builtin-daemon.c b/tools/perf/builtin-daemon.c
+index 83954af36753a..de76bbc50bfbc 100644
+--- a/tools/perf/builtin-daemon.c
++++ b/tools/perf/builtin-daemon.c
+@@ -523,7 +523,7 @@ static int daemon_session__control(struct daemon_session *session,
+ 		  session->base, SESSION_CONTROL);
+ 
+ 	control = open(control_path, O_WRONLY|O_NONBLOCK);
+-	if (!control)
++	if (control < 0)
+ 		return -1;
+ 
+ 	if (do_ack) {
+@@ -532,7 +532,7 @@ static int daemon_session__control(struct daemon_session *session,
+ 			  session->base, SESSION_ACK);
+ 
+ 		ack = open(ack_path, O_RDONLY, O_NONBLOCK);
+-		if (!ack) {
++		if (ack < 0) {
+ 			close(control);
+ 			return -1;
+ 		}
+diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
+index ff7e1d6cfcd2e..6aeae398ec289 100644
+--- a/tools/perf/builtin-record.c
++++ b/tools/perf/builtin-record.c
+@@ -1355,8 +1355,6 @@ static int record__open(struct record *rec)
+ 	struct record_opts *opts = &rec->opts;
+ 	int rc = 0;
+ 
+-	evlist__config(evlist, opts, &callchain_param);
+-
+ 	evlist__for_each_entry(evlist, pos) {
+ try_again:
+ 		if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
+@@ -2483,6 +2481,8 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
+ 
+ 	evlist__uniquify_name(rec->evlist);
+ 
++	evlist__config(rec->evlist, opts, &callchain_param);
++
+ 	/* Debug message used by test scripts */
+ 	pr_debug3("perf record opening and mmapping events\n");
+ 	if (record__open(rec) != 0) {
+@@ -2881,10 +2881,10 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
+ 	}
+ #endif
+ 	zstd_fini(&session->zstd_data);
+-	perf_session__delete(session);
+-
+ 	if (!opts->no_bpf_event)
+ 		evlist__stop_sb_thread(rec->sb_evlist);
++
++	perf_session__delete(session);
+ 	return status;
+ }
+ 
+diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
+index dcd93ee5fc24e..5b684d2ab4be5 100644
+--- a/tools/perf/builtin-report.c
++++ b/tools/perf/builtin-report.c
+@@ -428,7 +428,7 @@ static int report__setup_sample_type(struct report *rep)
+ 		 * compatibility, set the bit if it's an old perf data file.
+ 		 */
+ 		evlist__for_each_entry(session->evlist, evsel) {
+-			if (strstr(evsel->name, "arm_spe") &&
++			if (strstr(evsel__name(evsel), "arm_spe") &&
+ 				!(sample_type & PERF_SAMPLE_DATA_SRC)) {
+ 				evsel->core.attr.sample_type |= PERF_SAMPLE_DATA_SRC;
+ 				sample_type |= PERF_SAMPLE_DATA_SRC;
+diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
+index b248c433529a8..1bfb223473715 100644
+--- a/tools/perf/builtin-sched.c
++++ b/tools/perf/builtin-sched.c
+@@ -2963,8 +2963,11 @@ static int timehist_check_attr(struct perf_sched *sched,
+ 			return -1;
+ 		}
+ 
+-		if (sched->show_callchain && !evsel__has_callchain(evsel)) {
+-			pr_info("Samples do not have callchains.\n");
++		/* only need to save callchain related to sched_switch event */
++		if (sched->show_callchain &&
++		    evsel__name_is(evsel, "sched:sched_switch") &&
++		    !evsel__has_callchain(evsel)) {
++			pr_info("Samples of sched_switch event do not have callchains.\n");
+ 			sched->show_callchain = 0;
+ 			symbol_conf.use_callchain = 0;
+ 		}
+diff --git a/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json b/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json
+index ec2ff78e2b5f2..3ab1d3a6638c4 100644
+--- a/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json
++++ b/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json
+@@ -2,71 +2,71 @@
+   {
+     "BriefDescription": "Transaction count",
+     "MetricName": "transaction",
+-    "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL"
++    "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL if has_event(TX_C_TEND) else 0"
+   },
+   {
+     "BriefDescription": "Cycles per Instruction",
+     "MetricName": "cpi",
+-    "MetricExpr": "CPU_CYCLES / INSTRUCTIONS"
++    "MetricExpr": "CPU_CYCLES / INSTRUCTIONS if has_event(INSTRUCTIONS) else 0"
+   },
+   {
+     "BriefDescription": "Problem State Instruction Ratio",
+     "MetricName": "prbstate",
+-    "MetricExpr": "(PROBLEM_STATE_INSTRUCTIONS / INSTRUCTIONS) * 100"
++    "MetricExpr": "(PROBLEM_STATE_INSTRUCTIONS / INSTRUCTIONS) * 100 if has_event(INSTRUCTIONS) else 0"
+   },
+   {
+     "BriefDescription": "Level One Miss per 100 Instructions",
+     "MetricName": "l1mp",
+-    "MetricExpr": "((L1I_DIR_WRITES + L1D_DIR_WRITES) / INSTRUCTIONS) * 100"
++    "MetricExpr": "((L1I_DIR_WRITES + L1D_DIR_WRITES) / INSTRUCTIONS) * 100 if has_event(INSTRUCTIONS) else 0"
+   },
+   {
+     "BriefDescription": "Percentage sourced from Level 2 cache",
+     "MetricName": "l2p",
+-    "MetricExpr": "((DCW_REQ + DCW_REQ_IV + ICW_REQ + ICW_REQ_IV) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
++    "MetricExpr": "((DCW_REQ + DCW_REQ_IV + ICW_REQ + ICW_REQ_IV) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100 if has_event(DCW_REQ) else 0"
+   },
+   {
+     "BriefDescription": "Percentage sourced from Level 3 on same chip cache",
+     "MetricName": "l3p",
+-    "MetricExpr": "((DCW_REQ_CHIP_HIT + DCW_ON_CHIP + DCW_ON_CHIP_IV + DCW_ON_CHIP_CHIP_HIT + ICW_REQ_CHIP_HIT + ICW_ON_CHIP + ICW_ON_CHIP_IV + ICW_ON_CHIP_CHIP_HIT) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
++    "MetricExpr": "((DCW_REQ_CHIP_HIT + DCW_ON_CHIP + DCW_ON_CHIP_IV + DCW_ON_CHIP_CHIP_HIT + ICW_REQ_CHIP_HIT + ICW_ON_CHIP + ICW_ON_CHIP_IV + ICW_ON_CHIP_CHIP_HIT) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100 if has_event(DCW_REQ_CHIP_HIT) else 0"
+   },
+   {
+     "BriefDescription": "Percentage sourced from Level 4 Local cache on same book",
+     "MetricName": "l4lp",
+-    "MetricExpr": "((DCW_REQ_DRAWER_HIT + DCW_ON_CHIP_DRAWER_HIT + DCW_ON_MODULE + DCW_ON_DRAWER + IDCW_ON_MODULE_IV + IDCW_ON_MODULE_CHIP_HIT + IDCW_ON_MODULE_DRAWER_HIT + IDCW_ON_DRAWER_IV + IDCW_ON_DRAWER_CHIP_HIT + IDCW_ON_DRAWER_DRAWER_HIT + ICW_REQ_DRAWER_HIT + ICW_ON_CHIP_DRAWER_HIT + ICW_ON_MODULE + ICW_ON_DRAWER) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
++    "MetricExpr": "((DCW_REQ_DRAWER_HIT + DCW_ON_CHIP_DRAWER_HIT + DCW_ON_MODULE + DCW_ON_DRAWER + IDCW_ON_MODULE_IV + IDCW_ON_MODULE_CHIP_HIT + IDCW_ON_MODULE_DRAWER_HIT + IDCW_ON_DRAWER_IV + IDCW_ON_DRAWER_CHIP_HIT + IDCW_ON_DRAWER_DRAWER_HIT + ICW_REQ_DRAWER_HIT + ICW_ON_CHIP_DRAWER_HIT + ICW_ON_MODULE + ICW_ON_DRAWER) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100 if has_event(DCW_REQ_DRAWER_HIT) else 0"
+   },
+   {
+     "BriefDescription": "Percentage sourced from Level 4 Remote cache on different book",
+     "MetricName": "l4rp",
+-    "MetricExpr": "((DCW_OFF_DRAWER + IDCW_OFF_DRAWER_IV + IDCW_OFF_DRAWER_CHIP_HIT + IDCW_OFF_DRAWER_DRAWER_HIT + ICW_OFF_DRAWER) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
++    "MetricExpr": "((DCW_OFF_DRAWER + IDCW_OFF_DRAWER_IV + IDCW_OFF_DRAWER_CHIP_HIT + IDCW_OFF_DRAWER_DRAWER_HIT + ICW_OFF_DRAWER) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100 if has_event(DCW_OFF_DRAWER) else 0"
+   },
+   {
+     "BriefDescription": "Percentage sourced from memory",
+     "MetricName": "memp",
+-    "MetricExpr": "((DCW_ON_CHIP_MEMORY + DCW_ON_MODULE_MEMORY + DCW_ON_DRAWER_MEMORY + DCW_OFF_DRAWER_MEMORY + ICW_ON_CHIP_MEMORY + ICW_ON_MODULE_MEMORY + ICW_ON_DRAWER_MEMORY + ICW_OFF_DRAWER_MEMORY) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
++    "MetricExpr": "((DCW_ON_CHIP_MEMORY + DCW_ON_MODULE_MEMORY + DCW_ON_DRAWER_MEMORY + DCW_OFF_DRAWER_MEMORY + ICW_ON_CHIP_MEMORY + ICW_ON_MODULE_MEMORY + ICW_ON_DRAWER_MEMORY + ICW_OFF_DRAWER_MEMORY) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100 if has_event(DCW_ON_CHIP_MEMORY) else 0"
+   },
+   {
+     "BriefDescription": "Cycles per Instructions from Finite cache/memory",
+     "MetricName": "finite_cpi",
+-    "MetricExpr": "L1C_TLB2_MISSES / INSTRUCTIONS"
++    "MetricExpr": "L1C_TLB2_MISSES / INSTRUCTIONS if has_event(L1C_TLB2_MISSES) else 0"
+   },
+   {
+     "BriefDescription": "Estimated Instruction Complexity CPI infinite Level 1",
+     "MetricName": "est_cpi",
+-    "MetricExpr": "(CPU_CYCLES / INSTRUCTIONS) - (L1C_TLB2_MISSES / INSTRUCTIONS)"
++    "MetricExpr": "(CPU_CYCLES / INSTRUCTIONS) - (L1C_TLB2_MISSES / INSTRUCTIONS) if has_event(INSTRUCTIONS) else 0"
+   },
+   {
+     "BriefDescription": "Estimated Sourcing Cycles per Level 1 Miss",
+     "MetricName": "scpl1m",
+-    "MetricExpr": "L1C_TLB2_MISSES / (L1I_DIR_WRITES + L1D_DIR_WRITES)"
++    "MetricExpr": "L1C_TLB2_MISSES / (L1I_DIR_WRITES + L1D_DIR_WRITES) if has_event(L1C_TLB2_MISSES) else 0"
+   },
+   {
+     "BriefDescription": "Estimated TLB CPU percentage of Total CPU",
+     "MetricName": "tlb_percent",
+-    "MetricExpr": "((DTLB2_MISSES + ITLB2_MISSES) / CPU_CYCLES) * (L1C_TLB2_MISSES / (L1I_PENALTY_CYCLES + L1D_PENALTY_CYCLES)) * 100"
++    "MetricExpr": "((DTLB2_MISSES + ITLB2_MISSES) / CPU_CYCLES) * (L1C_TLB2_MISSES / (L1I_PENALTY_CYCLES + L1D_PENALTY_CYCLES)) * 100 if has_event(CPU_CYCLES) else 0"
+   },
+   {
+     "BriefDescription": "Estimated Cycles per TLB Miss",
+     "MetricName": "tlb_miss",
+-    "MetricExpr": "((DTLB2_MISSES + ITLB2_MISSES) / (DTLB2_WRITES + ITLB2_WRITES)) * (L1C_TLB2_MISSES / (L1I_PENALTY_CYCLES + L1D_PENALTY_CYCLES))"
++    "MetricExpr": "((DTLB2_MISSES + ITLB2_MISSES) / (DTLB2_WRITES + ITLB2_WRITES)) * (L1C_TLB2_MISSES / (L1I_PENALTY_CYCLES + L1D_PENALTY_CYCLES)) if has_event(DTLB2_MISSES) else 0"
+   }
+ ]
+diff --git a/tools/perf/pmu-events/arch/s390/mapfile.csv b/tools/perf/pmu-events/arch/s390/mapfile.csv
+index a918e1af77a57..b22648d127517 100644
+--- a/tools/perf/pmu-events/arch/s390/mapfile.csv
++++ b/tools/perf/pmu-events/arch/s390/mapfile.csv
+@@ -5,4 +5,4 @@ Family-model,Version,Filename,EventType
+ ^IBM.296[45].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z13,core
+ ^IBM.390[67].*[13]\.[1-5].[[:xdigit:]]+$,3,cf_z14,core
+ ^IBM.856[12].*3\.6.[[:xdigit:]]+$,3,cf_z15,core
+-^IBM.393[12].*3\.7.[[:xdigit:]]+$,3,cf_z16,core
++^IBM.393[12].*$,3,cf_z16,core
+diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
+index d13ee7683d9d8..e05b370b1e2b1 100644
+--- a/tools/perf/tests/builtin-test.c
++++ b/tools/perf/tests/builtin-test.c
+@@ -274,11 +274,8 @@ static int finish_test(struct child_test *child_test, int width)
+ 	struct test_suite *t = child_test->test;
+ 	int i = child_test->test_num;
+ 	int subi = child_test->subtest;
+-	int out = child_test->process.out;
+ 	int err = child_test->process.err;
+-	bool out_done = out <= 0;
+ 	bool err_done = err <= 0;
+-	struct strbuf out_output = STRBUF_INIT;
+ 	struct strbuf err_output = STRBUF_INIT;
+ 	int ret;
+ 
+@@ -290,11 +287,9 @@ static int finish_test(struct child_test *child_test, int width)
+ 		pr_info("%3d: %-*s:\n", i + 1, width, test_description(t, -1));
+ 
+ 	/*
+-	 * Busy loop reading from the child's stdout and stderr that are set to
+-	 * be non-blocking until EOF.
++	 * Busy loop reading from the child's stdout/stderr that are set to be
++	 * non-blocking until EOF.
+ 	 */
+-	if (!out_done)
+-		fcntl(out, F_SETFL, O_NONBLOCK);
+ 	if (!err_done)
+ 		fcntl(err, F_SETFL, O_NONBLOCK);
+ 	if (verbose > 1) {
+@@ -303,11 +298,8 @@ static int finish_test(struct child_test *child_test, int width)
+ 		else
+ 			pr_info("%3d: %s:\n", i + 1, test_description(t, -1));
+ 	}
+-	while (!out_done || !err_done) {
+-		struct pollfd pfds[2] = {
+-			{ .fd = out,
+-			  .events = POLLIN | POLLERR | POLLHUP | POLLNVAL,
+-			},
++	while (!err_done) {
++		struct pollfd pfds[1] = {
+ 			{ .fd = err,
+ 			  .events = POLLIN | POLLERR | POLLHUP | POLLNVAL,
+ 			},
+@@ -317,21 +309,7 @@ static int finish_test(struct child_test *child_test, int width)
+ 
+ 		/* Poll to avoid excessive spinning, timeout set for 1000ms. */
+ 		poll(pfds, ARRAY_SIZE(pfds), /*timeout=*/1000);
+-		if (!out_done && pfds[0].revents) {
+-			errno = 0;
+-			len = read(out, buf, sizeof(buf) - 1);
+-
+-			if (len <= 0) {
+-				out_done = errno != EAGAIN;
+-			} else {
+-				buf[len] = '\0';
+-				if (verbose > 1)
+-					fprintf(stdout, "%s", buf);
+-				else
+-					strbuf_addstr(&out_output, buf);
+-			}
+-		}
+-		if (!err_done && pfds[1].revents) {
++		if (!err_done && pfds[0].revents) {
+ 			errno = 0;
+ 			len = read(err, buf, sizeof(buf) - 1);
+ 
+@@ -354,14 +332,10 @@ static int finish_test(struct child_test *child_test, int width)
+ 			pr_info("%3d.%1d: %s:\n", i + 1, subi + 1, test_description(t, subi));
+ 		else
+ 			pr_info("%3d: %s:\n", i + 1, test_description(t, -1));
+-		fprintf(stdout, "%s", out_output.buf);
+ 		fprintf(stderr, "%s", err_output.buf);
+ 	}
+-	strbuf_release(&out_output);
+ 	strbuf_release(&err_output);
+ 	print_test_result(t, i, subi, ret, width);
+-	if (out > 0)
+-		close(out);
+ 	if (err > 0)
+ 		close(err);
+ 	return 0;
+@@ -394,6 +368,7 @@ static int start_test(struct test_suite *test, int i, int subi, struct child_tes
+ 		(*child)->process.no_stdout = 1;
+ 		(*child)->process.no_stderr = 1;
+ 	} else {
++		(*child)->process.stdout_to_stderr = 1;
+ 		(*child)->process.out = -1;
+ 		(*child)->process.err = -1;
+ 	}
+diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
+index 7a3a7bbbec714..29d2f3ee4e10f 100644
+--- a/tools/perf/tests/code-reading.c
++++ b/tools/perf/tests/code-reading.c
+@@ -637,11 +637,11 @@ static int do_test_code_reading(bool try_kcore)
+ 
+ 		evlist__config(evlist, &opts, NULL);
+ 
+-		evsel = evlist__first(evlist);
+-
+-		evsel->core.attr.comm = 1;
+-		evsel->core.attr.disabled = 1;
+-		evsel->core.attr.enable_on_exec = 0;
++		evlist__for_each_entry(evlist, evsel) {
++			evsel->core.attr.comm = 1;
++			evsel->core.attr.disabled = 1;
++			evsel->core.attr.enable_on_exec = 0;
++		}
+ 
+ 		ret = evlist__open(evlist);
+ 		if (ret < 0) {
+diff --git a/tools/perf/tests/shell/test_arm_coresight.sh b/tools/perf/tests/shell/test_arm_coresight.sh
+index 65dd852071250..3302ea0b96723 100755
+--- a/tools/perf/tests/shell/test_arm_coresight.sh
++++ b/tools/perf/tests/shell/test_arm_coresight.sh
+@@ -188,7 +188,7 @@ arm_cs_etm_snapshot_test() {
+ 
+ arm_cs_etm_basic_test() {
+ 	echo "Recording trace with '$*'"
+-	perf record -o ${perfdata} "$@" -- ls > /dev/null 2>&1
++	perf record -o ${perfdata} "$@" -m,8M -- ls > /dev/null 2>&1
+ 
+ 	perf_script_branch_samples ls &&
+ 	perf_report_branch_samples ls &&
+diff --git a/tools/perf/tests/workloads/datasym.c b/tools/perf/tests/workloads/datasym.c
+index ddd40bc63448a..8e08fc75a973e 100644
+--- a/tools/perf/tests/workloads/datasym.c
++++ b/tools/perf/tests/workloads/datasym.c
+@@ -16,6 +16,22 @@ static int datasym(int argc __maybe_unused, const char **argv __maybe_unused)
+ {
+ 	for (;;) {
+ 		buf1.data1++;
++		if (buf1.data1 == 123) {
++			/*
++			 * Add some 'noise' in the loop to work around errata
++			 * 1694299 on Arm N1.
++			 *
++			 * Bias exists in SPE sampling which can cause the load
++			 * and store instructions to be skipped entirely. This
++			 * comes and goes randomly depending on the offset the
++			 * linker places the datasym loop at in the Perf binary.
++			 * With an extra branch in the middle of the loop that
++			 * isn't always taken, the instruction stream is no
++			 * longer a continuous repeating pattern that interacts
++			 * badly with the bias.
++			 */
++			buf1.data1++;
++		}
+ 		buf1.data2 += buf1.data1;
+ 	}
+ 	return 0;
+diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
+index 603d11283cbdc..19503e8387385 100644
+--- a/tools/perf/ui/browser.c
++++ b/tools/perf/ui/browser.c
+@@ -203,7 +203,7 @@ void ui_browser__refresh_dimensions(struct ui_browser *browser)
+ void ui_browser__handle_resize(struct ui_browser *browser)
+ {
+ 	ui__refresh_dimensions(false);
+-	ui_browser__show(browser, browser->title, ui_helpline__current);
++	ui_browser__show(browser, browser->title ?: "", ui_helpline__current);
+ 	ui_browser__refresh(browser);
+ }
+ 
+@@ -287,7 +287,8 @@ int ui_browser__show(struct ui_browser *browser, const char *title,
+ 	mutex_lock(&ui__lock);
+ 	__ui_browser__show_title(browser, title);
+ 
+-	browser->title = title;
++	free(browser->title);
++	browser->title = strdup(title);
+ 	zfree(&browser->helpline);
+ 
+ 	va_start(ap, helpline);
+@@ -304,6 +305,7 @@ void ui_browser__hide(struct ui_browser *browser)
+ 	mutex_lock(&ui__lock);
+ 	ui_helpline__pop();
+ 	zfree(&browser->helpline);
++	zfree(&browser->title);
+ 	mutex_unlock(&ui__lock);
+ }
+ 
+diff --git a/tools/perf/ui/browser.h b/tools/perf/ui/browser.h
+index 510ce45540501..6e98d5f8f71cc 100644
+--- a/tools/perf/ui/browser.h
++++ b/tools/perf/ui/browser.h
+@@ -21,7 +21,7 @@ struct ui_browser {
+ 	u8	      extra_title_lines;
+ 	int	      current_color;
+ 	void	      *priv;
+-	const char    *title;
++	char	      *title;
+ 	char	      *helpline;
+ 	const char    *no_samples_msg;
+ 	void 	      (*refresh_dimensions)(struct ui_browser *browser);
+diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
+index 50ca92255ff62..79d082155c2f9 100644
+--- a/tools/perf/util/annotate.c
++++ b/tools/perf/util/annotate.c
+@@ -887,10 +887,17 @@ static struct annotated_source *annotated_source__new(void)
+ 
+ static __maybe_unused void annotated_source__delete(struct annotated_source *src)
+ {
++	struct hashmap_entry *cur;
++	size_t bkt;
++
+ 	if (src == NULL)
+ 		return;
+ 
+-	hashmap__free(src->samples);
++	if (src->samples) {
++		hashmap__for_each_entry(src->samples, cur, bkt)
++			zfree(&cur->pvalue);
++		hashmap__free(src->samples);
++	}
+ 	zfree(&src->histograms);
+ 	free(src);
+ }
+@@ -3025,7 +3032,7 @@ void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *m
+ 	annotation__update_column_widths(notes);
+ }
+ 
+-static void annotation__calc_lines(struct annotation *notes, struct map *map,
++static void annotation__calc_lines(struct annotation *notes, struct map_symbol *ms,
+ 				   struct rb_root *root)
+ {
+ 	struct annotation_line *al;
+@@ -3033,6 +3040,7 @@ static void annotation__calc_lines(struct annotation *notes, struct map *map,
+ 
+ 	list_for_each_entry(al, &notes->src->source, node) {
+ 		double percent_max = 0.0;
++		u64 addr;
+ 		int i;
+ 
+ 		for (i = 0; i < al->data_nr; i++) {
+@@ -3048,8 +3056,9 @@ static void annotation__calc_lines(struct annotation *notes, struct map *map,
+ 		if (percent_max <= 0.5)
+ 			continue;
+ 
+-		al->path = get_srcline(map__dso(map), notes->start + al->offset, NULL,
+-				       false, true, notes->start + al->offset);
++		addr = map__rip_2objdump(ms->map, ms->sym->start);
++		al->path = get_srcline(map__dso(ms->map), addr + al->offset, NULL,
++				       false, true, ms->sym->start + al->offset);
+ 		insert_source_line(&tmp_root, al);
+ 	}
+ 
+@@ -3060,7 +3069,7 @@ static void symbol__calc_lines(struct map_symbol *ms, struct rb_root *root)
+ {
+ 	struct annotation *notes = symbol__annotation(ms->sym);
+ 
+-	annotation__calc_lines(notes, ms->map, root);
++	annotation__calc_lines(notes, ms, root);
+ }
+ 
+ int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel)
+diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
+index 2791126069b4f..f93e57e2fc42e 100644
+--- a/tools/perf/util/dwarf-aux.c
++++ b/tools/perf/util/dwarf-aux.c
+@@ -1136,6 +1136,68 @@ int die_get_varname(Dwarf_Die *vr_die, struct strbuf *buf)
+ 	return ret < 0 ? ret : strbuf_addf(buf, "\t%s", dwarf_diename(vr_die));
+ }
+ 
++#if defined(HAVE_DWARF_GETLOCATIONS_SUPPORT) || defined(HAVE_DWARF_CFI_SUPPORT)
++static int reg_from_dwarf_op(Dwarf_Op *op)
++{
++	switch (op->atom) {
++	case DW_OP_reg0 ... DW_OP_reg31:
++		return op->atom - DW_OP_reg0;
++	case DW_OP_breg0 ... DW_OP_breg31:
++		return op->atom - DW_OP_breg0;
++	case DW_OP_regx:
++	case DW_OP_bregx:
++		return op->number;
++	default:
++		break;
++	}
++	return -1;
++}
++
++static int offset_from_dwarf_op(Dwarf_Op *op)
++{
++	switch (op->atom) {
++	case DW_OP_reg0 ... DW_OP_reg31:
++	case DW_OP_regx:
++		return 0;
++	case DW_OP_breg0 ... DW_OP_breg31:
++		return op->number;
++	case DW_OP_bregx:
++		return op->number2;
++	default:
++		break;
++	}
++	return -1;
++}
++
++static bool check_allowed_ops(Dwarf_Op *ops, size_t nops)
++{
++	/* The first op is checked separately */
++	ops++;
++	nops--;
++
++	/*
++	 * It needs to make sure if the location expression matches to the given
++	 * register and offset exactly.  Thus it rejects any complex expressions
++	 * and only allows a few of selected operators that doesn't change the
++	 * location.
++	 */
++	while (nops) {
++		switch (ops->atom) {
++		case DW_OP_stack_value:
++		case DW_OP_deref_size:
++		case DW_OP_deref:
++		case DW_OP_piece:
++			break;
++		default:
++			return false;
++		}
++		ops++;
++		nops--;
++	}
++	return true;
++}
++#endif /* HAVE_DWARF_GETLOCATIONS_SUPPORT || HAVE_DWARF_CFI_SUPPORT */
++
+ #ifdef HAVE_DWARF_GETLOCATIONS_SUPPORT
+ /**
+  * die_get_var_innermost_scope - Get innermost scope range of given variable DIE
+@@ -1280,7 +1342,7 @@ struct find_var_data {
+ #define DWARF_OP_DIRECT_REGS  32
+ 
+ static bool match_var_offset(Dwarf_Die *die_mem, struct find_var_data *data,
+-			     u64 addr_offset, u64 addr_type)
++			     u64 addr_offset, u64 addr_type, bool is_pointer)
+ {
+ 	Dwarf_Die type_die;
+ 	Dwarf_Word size;
+@@ -1294,6 +1356,12 @@ static bool match_var_offset(Dwarf_Die *die_mem, struct find_var_data *data,
+ 	if (die_get_real_type(die_mem, &type_die) == NULL)
+ 		return false;
+ 
++	if (is_pointer && dwarf_tag(&type_die) == DW_TAG_pointer_type) {
++		/* Get the target type of the pointer */
++		if (die_get_real_type(&type_die, &type_die) == NULL)
++			return false;
++	}
++
+ 	if (dwarf_aggregate_size(&type_die, &size) < 0)
+ 		return false;
+ 
+@@ -1305,34 +1373,6 @@ static bool match_var_offset(Dwarf_Die *die_mem, struct find_var_data *data,
+ 	return true;
+ }
+ 
+-static bool check_allowed_ops(Dwarf_Op *ops, size_t nops)
+-{
+-	/* The first op is checked separately */
+-	ops++;
+-	nops--;
+-
+-	/*
+-	 * It needs to make sure if the location expression matches to the given
+-	 * register and offset exactly.  Thus it rejects any complex expressions
+-	 * and only allows a few of selected operators that doesn't change the
+-	 * location.
+-	 */
+-	while (nops) {
+-		switch (ops->atom) {
+-		case DW_OP_stack_value:
+-		case DW_OP_deref_size:
+-		case DW_OP_deref:
+-		case DW_OP_piece:
+-			break;
+-		default:
+-			return false;
+-		}
+-		ops++;
+-		nops--;
+-	}
+-	return true;
+-}
+-
+ /* Only checks direct child DIEs in the given scope. */
+ static int __die_find_var_reg_cb(Dwarf_Die *die_mem, void *arg)
+ {
+@@ -1361,31 +1401,38 @@ static int __die_find_var_reg_cb(Dwarf_Die *die_mem, void *arg)
+ 		if (data->is_fbreg && ops->atom == DW_OP_fbreg &&
+ 		    data->offset >= (int)ops->number &&
+ 		    check_allowed_ops(ops, nops) &&
+-		    match_var_offset(die_mem, data, data->offset, ops->number))
++		    match_var_offset(die_mem, data, data->offset, ops->number,
++				     /*is_pointer=*/false))
+ 			return DIE_FIND_CB_END;
+ 
+ 		/* Only match with a simple case */
+ 		if (data->reg < DWARF_OP_DIRECT_REGS) {
+ 			/* pointer variables saved in a register 0 to 31 */
+ 			if (ops->atom == (DW_OP_reg0 + data->reg) &&
+-			    check_allowed_ops(ops, nops))
++			    check_allowed_ops(ops, nops) &&
++			    match_var_offset(die_mem, data, data->offset, 0,
++					     /*is_pointer=*/true))
+ 				return DIE_FIND_CB_END;
+ 
+ 			/* Local variables accessed by a register + offset */
+ 			if (ops->atom == (DW_OP_breg0 + data->reg) &&
+ 			    check_allowed_ops(ops, nops) &&
+-			    match_var_offset(die_mem, data, data->offset, ops->number))
++			    match_var_offset(die_mem, data, data->offset, ops->number,
++					     /*is_pointer=*/false))
+ 				return DIE_FIND_CB_END;
+ 		} else {
+ 			/* pointer variables saved in a register 32 or above */
+ 			if (ops->atom == DW_OP_regx && ops->number == data->reg &&
+-			    check_allowed_ops(ops, nops))
++			    check_allowed_ops(ops, nops) &&
++			    match_var_offset(die_mem, data, data->offset, 0,
++					     /*is_pointer=*/true))
+ 				return DIE_FIND_CB_END;
+ 
+ 			/* Local variables accessed by a register + offset */
+ 			if (ops->atom == DW_OP_bregx && data->reg == ops->number &&
+ 			    check_allowed_ops(ops, nops) &&
+-			    match_var_offset(die_mem, data, data->offset, ops->number2))
++			    match_var_offset(die_mem, data, data->offset, ops->number2,
++					     /*is_poitner=*/false))
+ 				return DIE_FIND_CB_END;
+ 		}
+ 	}
+@@ -1447,7 +1494,8 @@ static int __die_find_var_addr_cb(Dwarf_Die *die_mem, void *arg)
+ 			continue;
+ 
+ 		if (check_allowed_ops(ops, nops) &&
+-		    match_var_offset(die_mem, data, data->addr, ops->number))
++		    match_var_offset(die_mem, data, data->addr, ops->number,
++				     /*is_pointer=*/false))
+ 			return DIE_FIND_CB_END;
+ 	}
+ 	return DIE_FIND_CB_SIBLING;
+@@ -1479,41 +1527,69 @@ Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die, Dwarf_Addr pc,
+ 		*offset = data.offset;
+ 	return result;
+ }
+-#endif /* HAVE_DWARF_GETLOCATIONS_SUPPORT */
+ 
+-#ifdef HAVE_DWARF_CFI_SUPPORT
+-static int reg_from_dwarf_op(Dwarf_Op *op)
++static int __die_collect_vars_cb(Dwarf_Die *die_mem, void *arg)
+ {
+-	switch (op->atom) {
+-	case DW_OP_reg0 ... DW_OP_reg31:
+-		return op->atom - DW_OP_reg0;
+-	case DW_OP_breg0 ... DW_OP_breg31:
+-		return op->atom - DW_OP_breg0;
+-	case DW_OP_regx:
+-	case DW_OP_bregx:
+-		return op->number;
+-	default:
+-		break;
+-	}
+-	return -1;
++	struct die_var_type **var_types = arg;
++	Dwarf_Die type_die;
++	int tag = dwarf_tag(die_mem);
++	Dwarf_Attribute attr;
++	Dwarf_Addr base, start, end;
++	Dwarf_Op *ops;
++	size_t nops;
++	struct die_var_type *vt;
++
++	if (tag != DW_TAG_variable && tag != DW_TAG_formal_parameter)
++		return DIE_FIND_CB_SIBLING;
++
++	if (dwarf_attr(die_mem, DW_AT_location, &attr) == NULL)
++		return DIE_FIND_CB_SIBLING;
++
++	/*
++	 * Only collect the first location as it can reconstruct the
++	 * remaining state by following the instructions.
++	 * start = 0 means it covers the whole range.
++	 */
++	if (dwarf_getlocations(&attr, 0, &base, &start, &end, &ops, &nops) <= 0)
++		return DIE_FIND_CB_SIBLING;
++
++	if (die_get_real_type(die_mem, &type_die) == NULL)
++		return DIE_FIND_CB_SIBLING;
++
++	vt = malloc(sizeof(*vt));
++	if (vt == NULL)
++		return DIE_FIND_CB_END;
++
++	vt->die_off = dwarf_dieoffset(&type_die);
++	vt->addr = start;
++	vt->reg = reg_from_dwarf_op(ops);
++	vt->offset = offset_from_dwarf_op(ops);
++	vt->next = *var_types;
++	*var_types = vt;
++
++	return DIE_FIND_CB_SIBLING;
+ }
+ 
+-static int offset_from_dwarf_op(Dwarf_Op *op)
++/**
++ * die_collect_vars - Save all variables and parameters
++ * @sc_die: a scope DIE
++ * @var_types: a pointer to save the resulting list
++ *
++ * Save all variables and parameters in the @sc_die and save them to @var_types.
++ * The @var_types is a singly-linked list containing type and location info.
++ * Actual type can be retrieved using dwarf_offdie() with 'die_off' later.
++ *
++ * Callers should free @var_types.
++ */
++void die_collect_vars(Dwarf_Die *sc_die, struct die_var_type **var_types)
+ {
+-	switch (op->atom) {
+-	case DW_OP_reg0 ... DW_OP_reg31:
+-	case DW_OP_regx:
+-		return 0;
+-	case DW_OP_breg0 ... DW_OP_breg31:
+-		return op->number;
+-	case DW_OP_bregx:
+-		return op->number2;
+-	default:
+-		break;
+-	}
+-	return -1;
++	Dwarf_Die die_mem;
++
++	die_find_child(sc_die, __die_collect_vars_cb, (void *)var_types, &die_mem);
+ }
++#endif /* HAVE_DWARF_GETLOCATIONS_SUPPORT */
+ 
++#ifdef HAVE_DWARF_CFI_SUPPORT
+ /**
+  * die_get_cfa - Get frame base information
+  * @dwarf: a Dwarf info
+diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h
+index 85dd527ae1f70..efafd3a1f5b6f 100644
+--- a/tools/perf/util/dwarf-aux.h
++++ b/tools/perf/util/dwarf-aux.h
+@@ -135,6 +135,15 @@ void die_skip_prologue(Dwarf_Die *sp_die, Dwarf_Die *cu_die,
+ /* Get the list of including scopes */
+ int die_get_scopes(Dwarf_Die *cu_die, Dwarf_Addr pc, Dwarf_Die **scopes);
+ 
++/* Variable type information */
++struct die_var_type {
++	struct die_var_type *next;
++	u64 die_off;
++	u64 addr;
++	int reg;
++	int offset;
++};
++
+ #ifdef HAVE_DWARF_GETLOCATIONS_SUPPORT
+ 
+ /* Get byte offset range of given variable DIE */
+@@ -150,6 +159,9 @@ Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die, Dwarf_Addr pc,
+ 				     Dwarf_Addr addr, Dwarf_Die *die_mem,
+ 				     int *offset);
+ 
++/* Save all variables and parameters in this scope */
++void die_collect_vars(Dwarf_Die *sc_die, struct die_var_type **var_types);
++
+ #else /*  HAVE_DWARF_GETLOCATIONS_SUPPORT */
+ 
+ static inline int die_get_var_range(Dwarf_Die *sp_die __maybe_unused,
+@@ -178,6 +190,11 @@ static inline Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die __maybe_unu
+ 	return NULL;
+ }
+ 
++static inline void die_collect_vars(Dwarf_Die *sc_die __maybe_unused,
++				    struct die_var_type **var_types __maybe_unused)
++{
++}
++
+ #endif /* HAVE_DWARF_GETLOCATIONS_SUPPORT */
+ 
+ #ifdef HAVE_DWARF_CFI_SUPPORT
+diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+index b450178e3420b..e733f6b1f7ac5 100644
+--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+@@ -1319,6 +1319,8 @@ static bool intel_pt_fup_event(struct intel_pt_decoder *decoder, bool no_tip)
+ 	bool ret = false;
+ 
+ 	decoder->state.type &= ~INTEL_PT_BRANCH;
++	decoder->state.insn_op = INTEL_PT_OP_OTHER;
++	decoder->state.insn_len = 0;
+ 
+ 	if (decoder->set_fup_cfe_ip || decoder->set_fup_cfe) {
+ 		bool ip = decoder->set_fup_cfe_ip;
+diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
+index f38893e0b0369..4db9a098f5926 100644
+--- a/tools/perf/util/intel-pt.c
++++ b/tools/perf/util/intel-pt.c
+@@ -764,6 +764,7 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
+ 
+ 	addr_location__init(&al);
+ 	intel_pt_insn->length = 0;
++	intel_pt_insn->op = INTEL_PT_OP_OTHER;
+ 
+ 	if (to_ip && *ip == to_ip)
+ 		goto out_no_cache;
+@@ -898,6 +899,7 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
+ 
+ 			if (to_ip && *ip == to_ip) {
+ 				intel_pt_insn->length = 0;
++				intel_pt_insn->op = INTEL_PT_OP_OTHER;
+ 				goto out_no_cache;
+ 			}
+ 
+diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
+index 527517db31821..07c22f765fab4 100644
+--- a/tools/perf/util/machine.c
++++ b/tools/perf/util/machine.c
+@@ -1549,8 +1549,8 @@ static int machine__update_kernel_mmap(struct machine *machine,
+ 	updated = map__get(orig);
+ 
+ 	machine->vmlinux_map = updated;
+-	machine__set_kernel_mmap(machine, start, end);
+ 	maps__remove(machine__kernel_maps(machine), orig);
++	machine__set_kernel_mmap(machine, start, end);
+ 	err = maps__insert(machine__kernel_maps(machine), updated);
+ 	map__put(orig);
+ 
+diff --git a/tools/perf/util/perf_event_attr_fprintf.c b/tools/perf/util/perf_event_attr_fprintf.c
+index 8f04d3b7f3ec7..59fbbba796974 100644
+--- a/tools/perf/util/perf_event_attr_fprintf.c
++++ b/tools/perf/util/perf_event_attr_fprintf.c
+@@ -7,6 +7,8 @@
+ #include <linux/types.h>
+ #include <linux/perf_event.h>
+ #include "util/evsel_fprintf.h"
++#include "util/pmu.h"
++#include "util/pmus.h"
+ #include "trace-event.h"
+ 
+ struct bit_names {
+@@ -75,9 +77,12 @@ static void __p_read_format(char *buf, size_t size, u64 value)
+ }
+ 
+ #define ENUM_ID_TO_STR_CASE(x) case x: return (#x);
+-static const char *stringify_perf_type_id(u64 value)
++static const char *stringify_perf_type_id(struct perf_pmu *pmu, u32 type)
+ {
+-	switch (value) {
++	if (pmu)
++		return pmu->name;
++
++	switch (type) {
+ 	ENUM_ID_TO_STR_CASE(PERF_TYPE_HARDWARE)
+ 	ENUM_ID_TO_STR_CASE(PERF_TYPE_SOFTWARE)
+ 	ENUM_ID_TO_STR_CASE(PERF_TYPE_TRACEPOINT)
+@@ -175,9 +180,9 @@ do {								\
+ #define print_id_unsigned(_s)	PRINT_ID(_s, "%"PRIu64)
+ #define print_id_hex(_s)	PRINT_ID(_s, "%#"PRIx64)
+ 
+-static void __p_type_id(char *buf, size_t size, u64 value)
++static void __p_type_id(struct perf_pmu *pmu, char *buf, size_t size, u64 value)
+ {
+-	print_id_unsigned(stringify_perf_type_id(value));
++	print_id_unsigned(stringify_perf_type_id(pmu, value));
+ }
+ 
+ static void __p_config_hw_id(char *buf, size_t size, u64 value)
+@@ -217,8 +222,14 @@ static void __p_config_tracepoint_id(char *buf, size_t size, u64 value)
+ }
+ #endif
+ 
+-static void __p_config_id(char *buf, size_t size, u32 type, u64 value)
++static void __p_config_id(struct perf_pmu *pmu, char *buf, size_t size, u32 type, u64 value)
+ {
++	const char *name = perf_pmu__name_from_config(pmu, value);
++
++	if (name) {
++		print_id_hex(name);
++		return;
++	}
+ 	switch (type) {
+ 	case PERF_TYPE_HARDWARE:
+ 		return __p_config_hw_id(buf, size, value);
+@@ -246,8 +257,8 @@ static void __p_config_id(char *buf, size_t size, u32 type, u64 value)
+ #define p_sample_type(val)	__p_sample_type(buf, BUF_SIZE, val)
+ #define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val)
+ #define p_read_format(val)	__p_read_format(buf, BUF_SIZE, val)
+-#define p_type_id(val)		__p_type_id(buf, BUF_SIZE, val)
+-#define p_config_id(val)	__p_config_id(buf, BUF_SIZE, attr->type, val)
++#define p_type_id(val)		__p_type_id(pmu, buf, BUF_SIZE, val)
++#define p_config_id(val)	__p_config_id(pmu, buf, BUF_SIZE, attr->type, val)
+ 
+ #define PRINT_ATTRn(_n, _f, _p, _a)			\
+ do {							\
+@@ -262,6 +273,7 @@ do {							\
+ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
+ 			     attr__fprintf_f attr__fprintf, void *priv)
+ {
++	struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
+ 	char buf[BUF_SIZE];
+ 	int ret = 0;
+ 
+diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
+index f39cbbc1a7ec1..cc349d9cb0f9f 100644
+--- a/tools/perf/util/pmu.c
++++ b/tools/perf/util/pmu.c
+@@ -36,6 +36,18 @@ struct perf_pmu perf_pmu__fake = {
+ 
+ #define UNIT_MAX_LEN	31 /* max length for event unit name */
+ 
++enum event_source {
++	/* An event loaded from /sys/devices/<pmu>/events. */
++	EVENT_SRC_SYSFS,
++	/* An event loaded from a CPUID matched json file. */
++	EVENT_SRC_CPU_JSON,
++	/*
++	 * An event loaded from a /sys/devices/<pmu>/identifier matched json
++	 * file.
++	 */
++	EVENT_SRC_SYS_JSON,
++};
++
+ /**
+  * struct perf_pmu_alias - An event either read from sysfs or builtin in
+  * pmu-events.c, created by parsing the pmu-events json files.
+@@ -425,9 +437,30 @@ static struct perf_pmu_alias *perf_pmu__find_alias(struct perf_pmu *pmu,
+ {
+ 	struct perf_pmu_alias *alias;
+ 
+-	if (load && !pmu->sysfs_aliases_loaded)
+-		pmu_aliases_parse(pmu);
++	if (load && !pmu->sysfs_aliases_loaded) {
++		bool has_sysfs_event;
++		char event_file_name[FILENAME_MAX + 8];
++
++		/*
++		 * Test if alias/event 'name' exists in the PMU's sysfs/events
++		 * directory. If not skip parsing the sysfs aliases. Sysfs event
++		 * name must be all lower or all upper case.
++		 */
++		scnprintf(event_file_name, sizeof(event_file_name), "events/%s", name);
++		for (size_t i = 7, n = 7 + strlen(name); i < n; i++)
++			event_file_name[i] = tolower(event_file_name[i]);
++
++		has_sysfs_event = perf_pmu__file_exists(pmu, event_file_name);
++		if (!has_sysfs_event) {
++			for (size_t i = 7, n = 7 + strlen(name); i < n; i++)
++				event_file_name[i] = toupper(event_file_name[i]);
++
++			has_sysfs_event = perf_pmu__file_exists(pmu, event_file_name);
++		}
++		if (has_sysfs_event)
++			pmu_aliases_parse(pmu);
+ 
++	}
+ 	list_for_each_entry(alias, &pmu->aliases, list) {
+ 		if (!strcasecmp(alias->name, name))
+ 			return alias;
+@@ -500,7 +533,7 @@ static int update_alias(const struct pmu_event *pe,
+ 
+ static int perf_pmu__new_alias(struct perf_pmu *pmu, const char *name,
+ 				const char *desc, const char *val, FILE *val_fd,
+-				const struct pmu_event *pe)
++			        const struct pmu_event *pe, enum event_source src)
+ {
+ 	struct perf_pmu_alias *alias;
+ 	int ret;
+@@ -552,25 +585,30 @@ static int perf_pmu__new_alias(struct perf_pmu *pmu, const char *name,
+ 		}
+ 		snprintf(alias->unit, sizeof(alias->unit), "%s", unit);
+ 	}
+-	if (!pe) {
+-		/* Update an event from sysfs with json data. */
+-		struct update_alias_data data = {
+-			.pmu = pmu,
+-			.alias = alias,
+-		};
+-
++	switch (src) {
++	default:
++	case EVENT_SRC_SYSFS:
+ 		alias->from_sysfs = true;
+ 		if (pmu->events_table) {
++			/* Update an event from sysfs with json data. */
++			struct update_alias_data data = {
++				.pmu = pmu,
++				.alias = alias,
++			};
+ 			if (pmu_events_table__find_event(pmu->events_table, pmu, name,
+ 							 update_alias, &data) == 0)
+-				pmu->loaded_json_aliases++;
++				pmu->cpu_json_aliases++;
+ 		}
+-	}
+-
+-	if (!pe)
+ 		pmu->sysfs_aliases++;
+-	else
+-		pmu->loaded_json_aliases++;
++		break;
++	case  EVENT_SRC_CPU_JSON:
++		pmu->cpu_json_aliases++;
++		break;
++	case  EVENT_SRC_SYS_JSON:
++		pmu->sys_json_aliases++;
++		break;
++
++	}
+ 	list_add_tail(&alias->list, &pmu->aliases);
+ 	return 0;
+ }
+@@ -646,7 +684,8 @@ static int pmu_aliases_parse(struct perf_pmu *pmu)
+ 		}
+ 
+ 		if (perf_pmu__new_alias(pmu, name, /*desc=*/ NULL,
+-					/*val=*/ NULL, file, /*pe=*/ NULL) < 0)
++					/*val=*/ NULL, file, /*pe=*/ NULL,
++					EVENT_SRC_SYSFS) < 0)
+ 			pr_debug("Cannot set up %s\n", name);
+ 		fclose(file);
+ 	}
+@@ -900,7 +939,8 @@ static int pmu_add_cpu_aliases_map_callback(const struct pmu_event *pe,
+ {
+ 	struct perf_pmu *pmu = vdata;
+ 
+-	perf_pmu__new_alias(pmu, pe->name, pe->desc, pe->event, /*val_fd=*/ NULL, pe);
++	perf_pmu__new_alias(pmu, pe->name, pe->desc, pe->event, /*val_fd=*/ NULL,
++			    pe, EVENT_SRC_CPU_JSON);
+ 	return 0;
+ }
+ 
+@@ -935,13 +975,14 @@ static int pmu_add_sys_aliases_iter_fn(const struct pmu_event *pe,
+ 		return 0;
+ 
+ 	if (pmu_uncore_alias_match(pe->pmu, pmu->name) &&
+-			pmu_uncore_identifier_match(pe->compat, pmu->id)) {
++	    pmu_uncore_identifier_match(pe->compat, pmu->id)) {
+ 		perf_pmu__new_alias(pmu,
+ 				pe->name,
+ 				pe->desc,
+ 				pe->event,
+ 				/*val_fd=*/ NULL,
+-				pe);
++				pe,
++				EVENT_SRC_SYS_JSON);
+ 	}
+ 
+ 	return 0;
+@@ -1035,6 +1076,12 @@ struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char
+ 	pmu->max_precise = pmu_max_precise(dirfd, pmu);
+ 	pmu->alias_name = pmu_find_alias_name(pmu, dirfd);
+ 	pmu->events_table = perf_pmu__find_events_table(pmu);
++	/*
++	 * Load the sys json events/aliases when loading the PMU as each event
++	 * may have a different compat regular expression. We therefore can't
++	 * know the number of sys json events/aliases without computing the
++	 * regular expressions for them all.
++	 */
+ 	pmu_add_sys_aliases(pmu);
+ 	list_add_tail(&pmu->list, pmus);
+ 
+@@ -1632,15 +1679,15 @@ size_t perf_pmu__num_events(struct perf_pmu *pmu)
+ {
+ 	size_t nr;
+ 
+-	if (!pmu->sysfs_aliases_loaded)
+-		pmu_aliases_parse(pmu);
+-
+-	nr = pmu->sysfs_aliases;
++	pmu_aliases_parse(pmu);
++	nr = pmu->sysfs_aliases + pmu->sys_json_aliases;;
+ 
+ 	if (pmu->cpu_aliases_added)
+-		 nr += pmu->loaded_json_aliases;
++		 nr += pmu->cpu_json_aliases;
+ 	else if (pmu->events_table)
+-		nr += pmu_events_table__num_events(pmu->events_table, pmu) - pmu->loaded_json_aliases;
++		nr += pmu_events_table__num_events(pmu->events_table, pmu) - pmu->cpu_json_aliases;
++	else
++		assert(pmu->cpu_json_aliases == 0);
+ 
+ 	return pmu->selectable ? nr + 1 : nr;
+ }
+@@ -1693,6 +1740,7 @@ int perf_pmu__for_each_event(struct perf_pmu *pmu, bool skip_duplicate_pmus,
+ 	struct strbuf sb;
+ 
+ 	strbuf_init(&sb, /*hint=*/ 0);
++	pmu_aliases_parse(pmu);
+ 	pmu_add_cpu_aliases(pmu);
+ 	list_for_each_entry(event, &pmu->aliases, list) {
+ 		size_t buf_used;
+@@ -2085,3 +2133,22 @@ void perf_pmu__delete(struct perf_pmu *pmu)
+ 	zfree(&pmu->id);
+ 	free(pmu);
+ }
++
++const char *perf_pmu__name_from_config(struct perf_pmu *pmu, u64 config)
++{
++	struct perf_pmu_alias *event;
++
++	if (!pmu)
++		return NULL;
++
++	pmu_aliases_parse(pmu);
++	pmu_add_cpu_aliases(pmu);
++	list_for_each_entry(event, &pmu->aliases, list) {
++		struct perf_event_attr attr = {.config = 0,};
++		int ret = perf_pmu__config(pmu, &attr, &event->terms, NULL);
++
++		if (ret == 0 && config == attr.config)
++			return event->name;
++	}
++	return NULL;
++}
+diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
+index e35d985206db5..77c59ebc05261 100644
+--- a/tools/perf/util/pmu.h
++++ b/tools/perf/util/pmu.h
+@@ -123,8 +123,10 @@ struct perf_pmu {
+ 	const struct pmu_events_table *events_table;
+ 	/** @sysfs_aliases: Number of sysfs aliases loaded. */
+ 	uint32_t sysfs_aliases;
+-	/** @sysfs_aliases: Number of json event aliases loaded. */
+-	uint32_t loaded_json_aliases;
++	/** @cpu_json_aliases: Number of json event aliases loaded specific to the CPUID. */
++	uint32_t cpu_json_aliases;
++	/** @sys_json_aliases: Number of json event aliases loaded matching the PMU's identifier. */
++	uint32_t sys_json_aliases;
+ 	/** @sysfs_aliases_loaded: Are sysfs aliases loaded from disk? */
+ 	bool sysfs_aliases_loaded;
+ 	/**
+@@ -273,5 +275,6 @@ struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char
+ struct perf_pmu *perf_pmu__create_placeholder_core_pmu(struct list_head *core_pmus);
+ void perf_pmu__delete(struct perf_pmu *pmu);
+ struct perf_pmu *perf_pmus__find_core_pmu(void);
++const char *perf_pmu__name_from_config(struct perf_pmu *pmu, u64 config);
+ 
+ #endif /* __PMU_H */
+diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
+index 2a0ad9ecf0a20..5c12459e9765f 100644
+--- a/tools/perf/util/probe-event.c
++++ b/tools/perf/util/probe-event.c
+@@ -11,6 +11,7 @@
+ #include <sys/stat.h>
+ #include <fcntl.h>
+ #include <errno.h>
++#include <libgen.h>
+ #include <stdio.h>
+ #include <unistd.h>
+ #include <stdlib.h>
+diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
+index 075c0f79b1b92..0aeb97c11c030 100644
+--- a/tools/perf/util/python.c
++++ b/tools/perf/util/python.c
+@@ -103,6 +103,16 @@ int perf_pmu__scan_file(const struct perf_pmu *pmu, const char *name, const char
+ 	return EOF;
+ }
+ 
++const char *perf_pmu__name_from_config(struct perf_pmu *pmu __maybe_unused, u64 config __maybe_unused)
++{
++	return NULL;
++}
++
++struct perf_pmu *perf_pmus__find_by_type(unsigned int type __maybe_unused)
++{
++	return NULL;
++}
++
+ int perf_pmus__num_core_pmus(void)
+ {
+ 	return 1;
+diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
+index bfc1d705f4371..91d2f7f65df74 100644
+--- a/tools/perf/util/stat-display.c
++++ b/tools/perf/util/stat-display.c
+@@ -1223,6 +1223,9 @@ static void print_metric_headers(struct perf_stat_config *config,
+ 
+ 	/* Print metrics headers only */
+ 	evlist__for_each_entry(evlist, counter) {
++		if (config->aggr_mode != AGGR_NONE && counter->metric_leader != counter)
++			continue;
++
+ 		os.evsel = counter;
+ 
+ 		perf_stat__print_shadow_stats(config, counter, 0,
+diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
+index 9ebdb8e13c0b8..68dbeae8d2bf6 100644
+--- a/tools/perf/util/symbol.c
++++ b/tools/perf/util/symbol.c
+@@ -1287,7 +1287,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
+ {
+ 	struct maps *kmaps = map__kmaps(map);
+ 	struct kcore_mapfn_data md;
+-	struct map *replacement_map = NULL;
++	struct map *map_ref, *replacement_map = NULL;
+ 	struct machine *machine;
+ 	bool is_64_bit;
+ 	int err, fd;
+@@ -1365,6 +1365,24 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
+ 	if (!replacement_map)
+ 		replacement_map = list_entry(md.maps.next, struct map_list_node, node)->map;
+ 
++	/*
++	 * Update addresses of vmlinux map. Re-insert it to ensure maps are
++	 * correctly ordered. Do this before using maps__merge_in() for the
++	 * remaining maps so vmlinux gets split if necessary.
++	 */
++	map_ref = map__get(map);
++	maps__remove(kmaps, map_ref);
++
++	map__set_start(map_ref, map__start(replacement_map));
++	map__set_end(map_ref, map__end(replacement_map));
++	map__set_pgoff(map_ref, map__pgoff(replacement_map));
++	map__set_mapping_type(map_ref, map__mapping_type(replacement_map));
++
++	err = maps__insert(kmaps, map_ref);
++	map__put(map_ref);
++	if (err)
++		goto out_err;
++
+ 	/* Add new maps */
+ 	while (!list_empty(&md.maps)) {
+ 		struct map_list_node *new_node = list_entry(md.maps.next, struct map_list_node, node);
+@@ -1372,22 +1390,8 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
+ 
+ 		list_del_init(&new_node->node);
+ 
+-		if (RC_CHK_EQUAL(new_map, replacement_map)) {
+-			struct map *map_ref;
+-
+-			map__set_start(map, map__start(new_map));
+-			map__set_end(map, map__end(new_map));
+-			map__set_pgoff(map, map__pgoff(new_map));
+-			map__set_mapping_type(map, map__mapping_type(new_map));
+-			/* Ensure maps are correctly ordered */
+-			map_ref = map__get(map);
+-			maps__remove(kmaps, map_ref);
+-			err = maps__insert(kmaps, map_ref);
+-			map__put(map_ref);
+-			map__put(new_map);
+-			if (err)
+-				goto out_err;
+-		} else {
++		/* skip if replacement_map, already inserted above */
++		if (!RC_CHK_EQUAL(new_map, replacement_map)) {
+ 			/*
+ 			 * Merge kcore map into existing maps,
+ 			 * and ensure that current maps (eBPF)
+@@ -1969,6 +1973,10 @@ int dso__load(struct dso *dso, struct map *map)
+ 	return ret;
+ }
+ 
++/*
++ * Always takes ownership of vmlinux when vmlinux_allocated == true, even if
++ * it returns an error.
++ */
+ int dso__load_vmlinux(struct dso *dso, struct map *map,
+ 		      const char *vmlinux, bool vmlinux_allocated)
+ {
+@@ -1987,8 +1995,11 @@ int dso__load_vmlinux(struct dso *dso, struct map *map,
+ 	else
+ 		symtab_type = DSO_BINARY_TYPE__VMLINUX;
+ 
+-	if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type))
++	if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type)) {
++		if (vmlinux_allocated)
++			free((char *) vmlinux);
+ 		return -1;
++	}
+ 
+ 	/*
+ 	 * dso__load_sym() may copy 'dso' which will result in the copies having
+@@ -2031,7 +2042,6 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map)
+ 		err = dso__load_vmlinux(dso, map, filename, true);
+ 		if (err > 0)
+ 			goto out;
+-		free(filename);
+ 	}
+ out:
+ 	return err;
+@@ -2183,7 +2193,6 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map)
+ 		err = dso__load_vmlinux(dso, map, filename, true);
+ 		if (err > 0)
+ 			return err;
+-		free(filename);
+ 	}
+ 
+ 	if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {
+diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
+index 1aa8962dcf52c..515726489e36a 100644
+--- a/tools/perf/util/thread.c
++++ b/tools/perf/util/thread.c
+@@ -39,12 +39,13 @@ int thread__init_maps(struct thread *thread, struct machine *machine)
+ 
+ struct thread *thread__new(pid_t pid, pid_t tid)
+ {
+-	char *comm_str;
+-	struct comm *comm;
+ 	RC_STRUCT(thread) *_thread = zalloc(sizeof(*_thread));
+ 	struct thread *thread;
+ 
+ 	if (ADD_RC_CHK(thread, _thread) != NULL) {
++		struct comm *comm;
++		char comm_str[32];
++
+ 		thread__set_pid(thread, pid);
+ 		thread__set_tid(thread, tid);
+ 		thread__set_ppid(thread, -1);
+@@ -56,13 +57,8 @@ struct thread *thread__new(pid_t pid, pid_t tid)
+ 		init_rwsem(thread__namespaces_lock(thread));
+ 		init_rwsem(thread__comm_lock(thread));
+ 
+-		comm_str = malloc(32);
+-		if (!comm_str)
+-			goto err_thread;
+-
+-		snprintf(comm_str, 32, ":%d", tid);
++		snprintf(comm_str, sizeof(comm_str), ":%d", tid);
+ 		comm = comm__new(comm_str, 0, false);
+-		free(comm_str);
+ 		if (!comm)
+ 			goto err_thread;
+ 
+@@ -76,7 +72,7 @@ struct thread *thread__new(pid_t pid, pid_t tid)
+ 	return thread;
+ 
+ err_thread:
+-	free(thread);
++	thread__delete(thread);
+ 	return NULL;
+ }
+ 
+diff --git a/tools/testing/selftests/drivers/net/mlxsw/mlxsw_lib.sh b/tools/testing/selftests/drivers/net/mlxsw/mlxsw_lib.sh
+index 6369927e9c378..48395cfd4f958 100644
+--- a/tools/testing/selftests/drivers/net/mlxsw/mlxsw_lib.sh
++++ b/tools/testing/selftests/drivers/net/mlxsw/mlxsw_lib.sh
+@@ -42,7 +42,7 @@ __mlxsw_only_on_spectrum()
+ 	local src=$1; shift
+ 
+ 	if ! mlxsw_on_spectrum "$rev"; then
+-		log_test_skip $src:$caller "(Spectrum-$rev only)"
++		log_test_xfail $src:$caller "(Spectrum-$rev only)"
+ 		return 1
+ 	fi
+ }
+diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh
+index a88d8a8c85f2e..899b6892603fd 100755
+--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh
++++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh
+@@ -47,7 +47,6 @@ for current_test in ${TESTS:-$ALL_TESTS}; do
+ 		RET=0
+ 		target=$(${current_test}_get_target "$should_fail")
+ 		if ((target == 0)); then
+-			log_test_skip "'$current_test' should_fail=$should_fail test"
+ 			continue
+ 		fi
+ 
+diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
+index f981c957f0975..482ebb744ebad 100755
+--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
++++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
+@@ -52,7 +52,6 @@ for current_test in ${TESTS:-$ALL_TESTS}; do
+ 			RET=0
+ 			target=$(${current_test}_get_target "$should_fail")
+ 			if ((target == 0)); then
+-				log_test_skip "'$current_test' [$profile] should_fail=$should_fail test"
+ 				continue
+ 			fi
+ 			${current_test}_setup_prepare
+diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
+index 3c8f2965c2850..b634969cbb6f1 100644
+--- a/tools/testing/selftests/kselftest_harness.h
++++ b/tools/testing/selftests/kselftest_harness.h
+@@ -1216,7 +1216,7 @@ void __run_test(struct __fixture_metadata *f,
+ 		struct __test_metadata *t)
+ {
+ 	struct __test_xfail *xfail;
+-	char *test_name;
++	char test_name[1024];
+ 	const char *diagnostic;
+ 
+ 	/* reset test struct */
+@@ -1227,12 +1227,8 @@ void __run_test(struct __fixture_metadata *f,
+ 	memset(t->env, 0, sizeof(t->env));
+ 	memset(t->results->reason, 0, sizeof(t->results->reason));
+ 
+-	if (asprintf(&test_name, "%s%s%s.%s", f->name,
+-		variant->name[0] ? "." : "", variant->name, t->name) == -1) {
+-		ksft_print_msg("ERROR ALLOCATING MEMORY\n");
+-		t->exit_code = KSFT_FAIL;
+-		_exit(t->exit_code);
+-	}
++	snprintf(test_name, sizeof(test_name), "%s%s%s.%s",
++		 f->name, variant->name[0] ? "." : "", variant->name, t->name);
+ 
+ 	ksft_print_msg(" RUN           %s ...\n", test_name);
+ 
+@@ -1270,7 +1266,6 @@ void __run_test(struct __fixture_metadata *f,
+ 
+ 	ksft_test_result_code(t->exit_code, test_name,
+ 			      diagnostic ? "%s" : NULL, diagnostic);
+-	free(test_name);
+ }
+ 
+ static int test_harness_run(int argc, char **argv)
+diff --git a/tools/testing/selftests/mm/mdwe_test.c b/tools/testing/selftests/mm/mdwe_test.c
+index 1e01d3ddc11c5..200bedcdc32e9 100644
+--- a/tools/testing/selftests/mm/mdwe_test.c
++++ b/tools/testing/selftests/mm/mdwe_test.c
+@@ -7,7 +7,6 @@
+ #include <linux/mman.h>
+ #include <linux/prctl.h>
+ 
+-#define _GNU_SOURCE
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <sys/auxv.h>
+diff --git a/tools/testing/selftests/net/amt.sh b/tools/testing/selftests/net/amt.sh
+index 5175a42cbe8a2..7e7ed6c558da9 100755
+--- a/tools/testing/selftests/net/amt.sh
++++ b/tools/testing/selftests/net/amt.sh
+@@ -77,6 +77,7 @@ readonly LISTENER=$(mktemp -u listener-XXXXXXXX)
+ readonly GATEWAY=$(mktemp -u gateway-XXXXXXXX)
+ readonly RELAY=$(mktemp -u relay-XXXXXXXX)
+ readonly SOURCE=$(mktemp -u source-XXXXXXXX)
++readonly SMCROUTEDIR="$(mktemp -d)"
+ ERR=4
+ err=0
+ 
+@@ -85,6 +86,11 @@ exit_cleanup()
+ 	for ns in "$@"; do
+ 		ip netns delete "${ns}" 2>/dev/null || true
+ 	done
++	if [ -f "$SMCROUTEDIR/amt.pid" ]; then
++		smcpid=$(< $SMCROUTEDIR/amt.pid)
++		kill $smcpid
++	fi
++	rm -rf $SMCROUTEDIR
+ 
+ 	exit $ERR
+ }
+@@ -167,7 +173,7 @@ setup_iptables()
+ 
+ setup_mcast_routing()
+ {
+-	ip netns exec "${RELAY}" smcrouted
++	ip netns exec "${RELAY}" smcrouted -P $SMCROUTEDIR/amt.pid
+ 	ip netns exec "${RELAY}" smcroutectl a relay_src \
+ 		172.17.0.2 239.0.0.1 amtr
+ 	ip netns exec "${RELAY}" smcroutectl a relay_src \
+diff --git a/tools/testing/selftests/net/arp_ndisc_untracked_subnets.sh b/tools/testing/selftests/net/arp_ndisc_untracked_subnets.sh
+index a40c0e9bd023c..eef5cbf6eecca 100755
+--- a/tools/testing/selftests/net/arp_ndisc_untracked_subnets.sh
++++ b/tools/testing/selftests/net/arp_ndisc_untracked_subnets.sh
+@@ -73,25 +73,19 @@ setup_v6() {
+ 	# namespaces. veth0 is veth-router, veth1 is veth-host.
+ 	# first, set up the inteface's link to the namespace
+ 	# then, set the interface "up"
+-	ip -6 -netns ${ROUTER_NS_V6} link add name ${ROUTER_INTF} \
+-		type veth peer name ${HOST_INTF}
+-
+-	ip -6 -netns ${ROUTER_NS_V6} link set dev ${ROUTER_INTF} up
+-	ip -6 -netns ${ROUTER_NS_V6} link set dev ${HOST_INTF} netns \
+-		${HOST_NS_V6}
++	ip -n ${ROUTER_NS_V6} link add name ${ROUTER_INTF} \
++		type veth peer name ${HOST_INTF} netns ${HOST_NS_V6}
+ 
+-	ip -6 -netns ${HOST_NS_V6} link set dev ${HOST_INTF} up
+-	ip -6 -netns ${ROUTER_NS_V6} addr add \
+-		${ROUTER_ADDR_V6}/${PREFIX_WIDTH_V6} dev ${ROUTER_INTF} nodad
++	# Add tc rule to filter out host na message
++	tc -n ${ROUTER_NS_V6} qdisc add dev ${ROUTER_INTF} clsact
++	tc -n ${ROUTER_NS_V6} filter add dev ${ROUTER_INTF} \
++		ingress protocol ipv6 pref 1 handle 101 \
++		flower src_ip ${HOST_ADDR_V6} ip_proto icmpv6 type 136 skip_hw action pass
+ 
+ 	HOST_CONF=net.ipv6.conf.${HOST_INTF}
+ 	ip netns exec ${HOST_NS_V6} sysctl -qw ${HOST_CONF}.ndisc_notify=1
+ 	ip netns exec ${HOST_NS_V6} sysctl -qw ${HOST_CONF}.disable_ipv6=0
+-	ip -6 -netns ${HOST_NS_V6} addr add ${HOST_ADDR_V6}/${PREFIX_WIDTH_V6} \
+-		dev ${HOST_INTF}
+-
+ 	ROUTER_CONF=net.ipv6.conf.${ROUTER_INTF}
+-
+ 	ip netns exec ${ROUTER_NS_V6} sysctl -w \
+ 		${ROUTER_CONF}.forwarding=1 >/dev/null 2>&1
+ 	ip netns exec ${ROUTER_NS_V6} sysctl -w \
+@@ -99,6 +93,13 @@ setup_v6() {
+ 	ip netns exec ${ROUTER_NS_V6} sysctl -w \
+ 		${ROUTER_CONF}.accept_untracked_na=${accept_untracked_na} \
+ 		>/dev/null 2>&1
++
++	ip -n ${ROUTER_NS_V6} link set dev ${ROUTER_INTF} up
++	ip -n ${HOST_NS_V6} link set dev ${HOST_INTF} up
++	ip -n ${ROUTER_NS_V6} addr add ${ROUTER_ADDR_V6}/${PREFIX_WIDTH_V6} \
++		dev ${ROUTER_INTF} nodad
++	ip -n ${HOST_NS_V6} addr add ${HOST_ADDR_V6}/${PREFIX_WIDTH_V6} \
++		dev ${HOST_INTF}
+ 	set +e
+ }
+ 
+@@ -162,26 +163,6 @@ arp_test_gratuitous_combinations() {
+ 	arp_test_gratuitous 2 1
+ }
+ 
+-cleanup_tcpdump() {
+-	set -e
+-	[[ ! -z  ${tcpdump_stdout} ]] && rm -f ${tcpdump_stdout}
+-	[[ ! -z  ${tcpdump_stderr} ]] && rm -f ${tcpdump_stderr}
+-	tcpdump_stdout=
+-	tcpdump_stderr=
+-	set +e
+-}
+-
+-start_tcpdump() {
+-	set -e
+-	tcpdump_stdout=`mktemp`
+-	tcpdump_stderr=`mktemp`
+-	ip netns exec ${ROUTER_NS_V6} timeout 15s \
+-		tcpdump --immediate-mode -tpni ${ROUTER_INTF} -c 1 \
+-		"icmp6 && icmp6[0] == 136 && src ${HOST_ADDR_V6}" \
+-		> ${tcpdump_stdout} 2> /dev/null
+-	set +e
+-}
+-
+ verify_ndisc() {
+ 	local accept_untracked_na=$1
+ 	local same_subnet=$2
+@@ -222,8 +203,9 @@ ndisc_test_untracked_advertisements() {
+ 			HOST_ADDR_V6=2001:db8:abcd:0012::3
+ 		fi
+ 	fi
+-	setup_v6 $1 $2
+-	start_tcpdump
++	setup_v6 $1
++	slowwait_for_counter 15 1 \
++		tc_rule_handle_stats_get "dev ${ROUTER_INTF} ingress" 101 ".packets" "-n ${ROUTER_NS_V6}"
+ 
+ 	if verify_ndisc $1 $2; then
+ 		printf "    TEST: %-60s  [ OK ]\n" "${test_msg[*]}"
+@@ -231,7 +213,6 @@ ndisc_test_untracked_advertisements() {
+ 		printf "    TEST: %-60s  [FAIL]\n" "${test_msg[*]}"
+ 	fi
+ 
+-	cleanup_tcpdump
+ 	cleanup_v6
+ 	set +e
+ }
+diff --git a/tools/testing/selftests/net/forwarding/ethtool_rmon.sh b/tools/testing/selftests/net/forwarding/ethtool_rmon.sh
+index 41a34a61f7632..e78776db850f1 100755
+--- a/tools/testing/selftests/net/forwarding/ethtool_rmon.sh
++++ b/tools/testing/selftests/net/forwarding/ethtool_rmon.sh
+@@ -78,7 +78,7 @@ rmon_histogram()
+ 
+ 		for if in $iface $neigh; do
+ 			if ! ensure_mtu $if ${bucket[0]}; then
+-				log_test_skip "$if does not support the required MTU for $step"
++				log_test_xfail "$if does not support the required MTU for $step"
+ 				return
+ 			fi
+ 		done
+@@ -93,7 +93,7 @@ rmon_histogram()
+ 		jq -r ".[0].rmon[\"${set}-pktsNtoM\"][]|[.low, .high]|@tsv" 2>/dev/null)
+ 
+ 	if [ $nbuckets -eq 0 ]; then
+-		log_test_skip "$iface does not support $set histogram counters"
++		log_test_xfail "$iface does not support $set histogram counters"
+ 		return
+ 	fi
+ }
+diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
+index e579c2e0c462a..e78f11140edd8 100644
+--- a/tools/testing/selftests/net/forwarding/lib.sh
++++ b/tools/testing/selftests/net/forwarding/lib.sh
+@@ -38,32 +38,6 @@ fi
+ 
+ source "$net_forwarding_dir/../lib.sh"
+ 
+-# timeout in seconds
+-slowwait()
+-{
+-	local timeout=$1; shift
+-
+-	local start_time="$(date -u +%s)"
+-	while true
+-	do
+-		local out
+-		out=$("$@")
+-		local ret=$?
+-		if ((!ret)); then
+-			echo -n "$out"
+-			return 0
+-		fi
+-
+-		local current_time="$(date -u +%s)"
+-		if ((current_time - start_time > timeout)); then
+-			echo -n "$out"
+-			return 1
+-		fi
+-
+-		sleep 0.1
+-	done
+-}
+-
+ ##############################################################################
+ # Sanity checks
+ 
+@@ -358,14 +332,24 @@ EXIT_STATUS=0
+ # Per-test return value. Clear at the beginning of each test.
+ RET=0
+ 
++ret_set_ksft_status()
++{
++	local ksft_status=$1; shift
++	local msg=$1; shift
++
++	RET=$(ksft_status_merge $RET $ksft_status)
++	if (( $? )); then
++		retmsg=$msg
++	fi
++}
++
+ check_err()
+ {
+ 	local err=$1
+ 	local msg=$2
+ 
+-	if [[ $RET -eq 0 && $err -ne 0 ]]; then
+-		RET=$err
+-		retmsg=$msg
++	if ((err)); then
++		ret_set_ksft_status $ksft_fail "$msg"
+ 	fi
+ }
+ 
+@@ -374,10 +358,7 @@ check_fail()
+ 	local err=$1
+ 	local msg=$2
+ 
+-	if [[ $RET -eq 0 && $err -eq 0 ]]; then
+-		RET=1
+-		retmsg=$msg
+-	fi
++	check_err $((!err)) "$msg"
+ }
+ 
+ check_err_fail()
+@@ -393,6 +374,62 @@ check_err_fail()
+ 	fi
+ }
+ 
++log_test_result()
++{
++	local test_name=$1; shift
++	local opt_str=$1; shift
++	local result=$1; shift
++	local retmsg=$1; shift
++
++	printf "TEST: %-60s  [%s]\n" "$test_name $opt_str" "$result"
++	if [[ $retmsg ]]; then
++		printf "\t%s\n" "$retmsg"
++	fi
++}
++
++pause_on_fail()
++{
++	if [[ $PAUSE_ON_FAIL == yes ]]; then
++		echo "Hit enter to continue, 'q' to quit"
++		read a
++		[[ $a == q ]] && exit 1
++	fi
++}
++
++handle_test_result_pass()
++{
++	local test_name=$1; shift
++	local opt_str=$1; shift
++
++	log_test_result "$test_name" "$opt_str" " OK "
++}
++
++handle_test_result_fail()
++{
++	local test_name=$1; shift
++	local opt_str=$1; shift
++
++	log_test_result "$test_name" "$opt_str" FAIL "$retmsg"
++	pause_on_fail
++}
++
++handle_test_result_xfail()
++{
++	local test_name=$1; shift
++	local opt_str=$1; shift
++
++	log_test_result "$test_name" "$opt_str" XFAIL "$retmsg"
++	pause_on_fail
++}
++
++handle_test_result_skip()
++{
++	local test_name=$1; shift
++	local opt_str=$1; shift
++
++	log_test_result "$test_name" "$opt_str" SKIP "$retmsg"
++}
++
+ log_test()
+ {
+ 	local test_name=$1
+@@ -402,31 +439,28 @@ log_test()
+ 		opt_str="($opt_str)"
+ 	fi
+ 
+-	if [[ $RET -ne 0 ]]; then
+-		EXIT_STATUS=1
+-		printf "TEST: %-60s  [FAIL]\n" "$test_name $opt_str"
+-		if [[ ! -z "$retmsg" ]]; then
+-			printf "\t%s\n" "$retmsg"
+-		fi
+-		if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+-			echo "Hit enter to continue, 'q' to quit"
+-			read a
+-			[ "$a" = "q" ] && exit 1
+-		fi
+-		return 1
++	if ((RET == ksft_pass)); then
++		handle_test_result_pass "$test_name" "$opt_str"
++	elif ((RET == ksft_xfail)); then
++		handle_test_result_xfail "$test_name" "$opt_str"
++	elif ((RET == ksft_skip)); then
++		handle_test_result_skip "$test_name" "$opt_str"
++	else
++		handle_test_result_fail "$test_name" "$opt_str"
+ 	fi
+ 
+-	printf "TEST: %-60s  [ OK ]\n" "$test_name $opt_str"
+-	return 0
++	EXIT_STATUS=$(ksft_exit_status_merge $EXIT_STATUS $RET)
++	return $RET
+ }
+ 
+ log_test_skip()
+ {
+-	local test_name=$1
+-	local opt_str=$2
++	RET=$ksft_skip retmsg= log_test "$@"
++}
+ 
+-	printf "TEST: %-60s  [SKIP]\n" "$test_name $opt_str"
+-	return 0
++log_test_xfail()
++{
++	RET=$ksft_xfail retmsg= log_test "$@"
+ }
+ 
+ log_info()
+@@ -487,33 +521,6 @@ wait_for_trap()
+ 	"$@" | grep -q trap
+ }
+ 
+-until_counter_is()
+-{
+-	local expr=$1; shift
+-	local current=$("$@")
+-
+-	echo $((current))
+-	((current $expr))
+-}
+-
+-busywait_for_counter()
+-{
+-	local timeout=$1; shift
+-	local delta=$1; shift
+-
+-	local base=$("$@")
+-	busywait "$timeout" until_counter_is ">= $((base + delta))" "$@"
+-}
+-
+-slowwait_for_counter()
+-{
+-	local timeout=$1; shift
+-	local delta=$1; shift
+-
+-	local base=$("$@")
+-	slowwait "$timeout" until_counter_is ">= $((base + delta))" "$@"
+-}
+-
+ setup_wait_dev()
+ {
+ 	local dev=$1; shift
+@@ -819,29 +826,6 @@ link_stats_rx_errors_get()
+ 	link_stats_get $1 rx errors
+ }
+ 
+-tc_rule_stats_get()
+-{
+-	local dev=$1; shift
+-	local pref=$1; shift
+-	local dir=$1; shift
+-	local selector=${1:-.packets}; shift
+-
+-	tc -j -s filter show dev $dev ${dir:-ingress} pref $pref \
+-	    | jq ".[1].options.actions[].stats$selector"
+-}
+-
+-tc_rule_handle_stats_get()
+-{
+-	local id=$1; shift
+-	local handle=$1; shift
+-	local selector=${1:-.packets}; shift
+-	local netns=${1:-""}; shift
+-
+-	tc $netns -j -s filter show $id \
+-	    | jq ".[] | select(.options.handle == $handle) | \
+-		  .options.actions[0].stats$selector"
+-}
+-
+ ethtool_stats_get()
+ {
+ 	local dev=$1; shift
+diff --git a/tools/testing/selftests/net/forwarding/router_mpath_nh_lib.sh b/tools/testing/selftests/net/forwarding/router_mpath_nh_lib.sh
+index 7e7d62161c345..b2d2c6cecc01e 100644
+--- a/tools/testing/selftests/net/forwarding/router_mpath_nh_lib.sh
++++ b/tools/testing/selftests/net/forwarding/router_mpath_nh_lib.sh
+@@ -69,7 +69,7 @@ nh_stats_test_dispatch_swhw()
+ 		nh_stats_do_test "HW $what" "$nh1_id" "$nh2_id" "$group_id" \
+ 				 nh_stats_get_hw "${mz[@]}"
+ 	elif [[ $kind == veth ]]; then
+-		log_test_skip "HW stats not offloaded on veth topology"
++		log_test_xfail "HW stats not offloaded on veth topology"
+ 	fi
+ }
+ 
+diff --git a/tools/testing/selftests/net/lib.sh b/tools/testing/selftests/net/lib.sh
+index 56a9454b7ba35..cf7fe4d550dde 100644
+--- a/tools/testing/selftests/net/lib.sh
++++ b/tools/testing/selftests/net/lib.sh
+@@ -14,9 +14,50 @@ NS_LIST=""
+ 
+ ##############################################################################
+ # Helpers
+-busywait()
++
++__ksft_status_merge()
+ {
+-	local timeout=$1; shift
++	local a=$1; shift
++	local b=$1; shift
++	local -A weights
++	local weight=0
++
++	local i
++	for i in "$@"; do
++		weights[$i]=$((weight++))
++	done
++
++	if [[ ${weights[$a]} > ${weights[$b]} ]]; then
++		echo "$a"
++		return 0
++	else
++		echo "$b"
++		return 1
++	fi
++}
++
++ksft_status_merge()
++{
++	local a=$1; shift
++	local b=$1; shift
++
++	__ksft_status_merge "$a" "$b" \
++		$ksft_pass $ksft_xfail $ksft_skip $ksft_fail
++}
++
++ksft_exit_status_merge()
++{
++	local a=$1; shift
++	local b=$1; shift
++
++	__ksft_status_merge "$a" "$b" \
++		$ksft_xfail $ksft_pass $ksft_skip $ksft_fail
++}
++
++loopy_wait()
++{
++	local sleep_cmd=$1; shift
++	local timeout_ms=$1; shift
+ 
+ 	local start_time="$(date -u +%s%3N)"
+ 	while true
+@@ -30,13 +71,57 @@ busywait()
+ 		fi
+ 
+ 		local current_time="$(date -u +%s%3N)"
+-		if ((current_time - start_time > timeout)); then
++		if ((current_time - start_time > timeout_ms)); then
+ 			echo -n "$out"
+ 			return 1
+ 		fi
++
++		$sleep_cmd
+ 	done
+ }
+ 
++busywait()
++{
++	local timeout_ms=$1; shift
++
++	loopy_wait : "$timeout_ms" "$@"
++}
++
++# timeout in seconds
++slowwait()
++{
++	local timeout_sec=$1; shift
++
++	loopy_wait "sleep 0.1" "$((timeout_sec * 1000))" "$@"
++}
++
++until_counter_is()
++{
++	local expr=$1; shift
++	local current=$("$@")
++
++	echo $((current))
++	((current $expr))
++}
++
++busywait_for_counter()
++{
++	local timeout=$1; shift
++	local delta=$1; shift
++
++	local base=$("$@")
++	busywait "$timeout" until_counter_is ">= $((base + delta))" "$@"
++}
++
++slowwait_for_counter()
++{
++	local timeout=$1; shift
++	local delta=$1; shift
++
++	local base=$("$@")
++	slowwait "$timeout" until_counter_is ">= $((base + delta))" "$@"
++}
++
+ cleanup_ns()
+ {
+ 	local ns=""
+@@ -96,3 +181,26 @@ setup_ns()
+ 	done
+ 	NS_LIST="$NS_LIST $ns_list"
+ }
++
++tc_rule_stats_get()
++{
++	local dev=$1; shift
++	local pref=$1; shift
++	local dir=$1; shift
++	local selector=${1:-.packets}; shift
++
++	tc -j -s filter show dev $dev ${dir:-ingress} pref $pref \
++	    | jq ".[1].options.actions[].stats$selector"
++}
++
++tc_rule_handle_stats_get()
++{
++	local id=$1; shift
++	local handle=$1; shift
++	local selector=${1:-.packets}; shift
++	local netns=${1:-""}; shift
++
++	tc $netns -j -s filter show $id \
++	    | jq ".[] | select(.options.handle == $handle) | \
++		  .options.actions[0].stats$selector"
++}
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index e4403236f6554..1b5722e6166e5 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -125,8 +125,8 @@ init_shapers()
+ {
+ 	local i
+ 	for i in $(seq 1 4); do
+-		tc -n $ns1 qdisc add dev ns1eth$i root netem rate 20mbit delay 1
+-		tc -n $ns2 qdisc add dev ns2eth$i root netem rate 20mbit delay 1
++		tc -n $ns1 qdisc add dev ns1eth$i root netem rate 20mbit delay 1ms
++		tc -n $ns2 qdisc add dev ns2eth$i root netem rate 20mbit delay 1ms
+ 	done
+ }
+ 
+@@ -262,6 +262,8 @@ reset()
+ 
+ 	TEST_NAME="${1}"
+ 
++	MPTCP_LIB_SUBTEST_FLAKY=0 # reset if modified
++
+ 	if skip_test; then
+ 		MPTCP_LIB_TEST_COUNTER=$((MPTCP_LIB_TEST_COUNTER+1))
+ 		last_test_ignored=1
+@@ -449,7 +451,9 @@ reset_with_tcp_filter()
+ # $1: err msg
+ fail_test()
+ {
+-	ret=${KSFT_FAIL}
++	if ! mptcp_lib_subtest_is_flaky; then
++		ret=${KSFT_FAIL}
++	fi
+ 
+ 	if [ ${#} -gt 0 ]; then
+ 		print_fail "${@}"
+@@ -3178,6 +3182,7 @@ fullmesh_tests()
+ fastclose_tests()
+ {
+ 	if reset_check_counter "fastclose test" "MPTcpExtMPFastcloseTx"; then
++		MPTCP_LIB_SUBTEST_FLAKY=1
+ 		test_linkfail=1024 fastclose=client \
+ 			run_tests $ns1 $ns2 10.0.1.1
+ 		chk_join_nr 0 0 0
+@@ -3186,6 +3191,7 @@ fastclose_tests()
+ 	fi
+ 
+ 	if reset_check_counter "fastclose server test" "MPTcpExtMPFastcloseRx"; then
++		MPTCP_LIB_SUBTEST_FLAKY=1
+ 		test_linkfail=1024 fastclose=server \
+ 			run_tests $ns1 $ns2 10.0.1.1
+ 		chk_join_nr 0 0 0 0 0 0 1
+@@ -3204,6 +3210,7 @@ fail_tests()
+ {
+ 	# single subflow
+ 	if reset_with_fail "Infinite map" 1; then
++		MPTCP_LIB_SUBTEST_FLAKY=1
+ 		test_linkfail=128 \
+ 			run_tests $ns1 $ns2 10.0.1.1
+ 		chk_join_nr 0 0 0 +1 +0 1 0 1 "$(pedit_action_pkts)"
+@@ -3212,7 +3219,8 @@ fail_tests()
+ 
+ 	# multiple subflows
+ 	if reset_with_fail "MP_FAIL MP_RST" 2; then
+-		tc -n $ns2 qdisc add dev ns2eth1 root netem rate 1mbit delay 5
++		MPTCP_LIB_SUBTEST_FLAKY=1
++		tc -n $ns2 qdisc add dev ns2eth1 root netem rate 1mbit delay 5ms
+ 		pm_nl_set_limits $ns1 0 1
+ 		pm_nl_set_limits $ns2 0 1
+ 		pm_nl_add_endpoint $ns2 10.0.2.2 dev ns2eth2 flags subflow
+diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
+index 1b23662203881..7322e1e4e5db6 100755
+--- a/tools/testing/selftests/net/mptcp/simult_flows.sh
++++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
+@@ -216,8 +216,8 @@ run_test()
+ 	shift 4
+ 	local msg=$*
+ 
+-	[ $delay1 -gt 0 ] && delay1="delay $delay1" || delay1=""
+-	[ $delay2 -gt 0 ] && delay2="delay $delay2" || delay2=""
++	[ $delay1 -gt 0 ] && delay1="delay ${delay1}ms" || delay1=""
++	[ $delay2 -gt 0 ] && delay2="delay ${delay2}ms" || delay2=""
+ 
+ 	for dev in ns1eth1 ns1eth2; do
+ 		tc -n $ns1 qdisc del dev $dev root >/dev/null 2>&1
+@@ -243,7 +243,7 @@ run_test()
+ 	do_transfer $small $large $time
+ 	lret=$?
+ 	mptcp_lib_result_code "${lret}" "${msg}"
+-	if [ $lret -ne 0 ]; then
++	if [ $lret -ne 0 ] && ! mptcp_lib_subtest_is_flaky; then
+ 		ret=$lret
+ 		[ $bail -eq 0 ] || exit $ret
+ 	fi
+@@ -253,7 +253,7 @@ run_test()
+ 	do_transfer $large $small $time
+ 	lret=$?
+ 	mptcp_lib_result_code "${lret}" "${msg}"
+-	if [ $lret -ne 0 ]; then
++	if [ $lret -ne 0 ] && ! mptcp_lib_subtest_is_flaky; then
+ 		ret=$lret
+ 		[ $bail -eq 0 ] || exit $ret
+ 	fi
+@@ -286,7 +286,7 @@ run_test 10 10 0 0 "balanced bwidth"
+ run_test 10 10 1 25 "balanced bwidth with unbalanced delay"
+ 
+ # we still need some additional infrastructure to pass the following test-cases
+-run_test 10 3 0 0 "unbalanced bwidth"
++MPTCP_LIB_SUBTEST_FLAKY=1 run_test 10 3 0 0 "unbalanced bwidth"
+ run_test 10 3 1 25 "unbalanced bwidth with unbalanced delay"
+ run_test 10 3 25 1 "unbalanced bwidth with opposed, unbalanced delay"
+ 
+diff --git a/tools/testing/selftests/powerpc/dexcr/Makefile b/tools/testing/selftests/powerpc/dexcr/Makefile
+index 76210f2bcec3c..829ad075b4a44 100644
+--- a/tools/testing/selftests/powerpc/dexcr/Makefile
++++ b/tools/testing/selftests/powerpc/dexcr/Makefile
+@@ -3,7 +3,7 @@ TEST_GEN_FILES := lsdexcr
+ 
+ include ../../lib.mk
+ 
+-$(OUTPUT)/hashchk_test: CFLAGS += -fno-pie $(call cc-option,-mno-rop-protect)
++$(OUTPUT)/hashchk_test: CFLAGS += -fno-pie -no-pie $(call cc-option,-mno-rop-protect)
+ 
+ $(TEST_GEN_PROGS): ../harness.c ../utils.c ./dexcr.c
+ $(TEST_GEN_FILES): ../utils.c ./dexcr.c
+diff --git a/tools/testing/selftests/riscv/hwprobe/.gitignore b/tools/testing/selftests/riscv/hwprobe/.gitignore
+index 8113dc3bdd03a..6e384e80ea1a8 100644
+--- a/tools/testing/selftests/riscv/hwprobe/.gitignore
++++ b/tools/testing/selftests/riscv/hwprobe/.gitignore
+@@ -1 +1,3 @@
+ hwprobe
++cbo
++which-cpus
+diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
+index 12da0a939e3e5..557fb074acf0c 100644
+--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
++++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
+@@ -132,6 +132,50 @@
+             "echo \"1\" > /sys/bus/netdevsim/del_device"
+         ]
+     },
++    {
++        "id": "6f62",
++        "name": "Add taprio Qdisc with too short interval",
++        "category": [
++            "qdisc",
++            "taprio"
++        ],
++        "plugins": {
++            "requires": "nsPlugin"
++        },
++        "setup": [
++            "echo \"1 1 8\" > /sys/bus/netdevsim/new_device"
++        ],
++        "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: taprio num_tc 2 queues 1@0 1@1 sched-entry S 01 300 sched-entry S 02 1700 clockid CLOCK_TAI",
++        "expExitCode": "2",
++        "verifyCmd": "$TC qdisc show dev $ETH",
++        "matchPattern": "qdisc taprio 1: root refcnt",
++        "matchCount": "0",
++        "teardown": [
++            "echo \"1\" > /sys/bus/netdevsim/del_device"
++        ]
++    },
++    {
++        "id": "831f",
++        "name": "Add taprio Qdisc with too short cycle-time",
++        "category": [
++            "qdisc",
++            "taprio"
++        ],
++        "plugins": {
++            "requires": "nsPlugin"
++        },
++        "setup": [
++            "echo \"1 1 8\" > /sys/bus/netdevsim/new_device"
++        ],
++        "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: taprio num_tc 2 queues 1@0 1@1 sched-entry S 01 200000 sched-entry S 02 200000 cycle-time 100 clockid CLOCK_TAI",
++        "expExitCode": "2",
++        "verifyCmd": "$TC qdisc show dev $ETH",
++        "matchPattern": "qdisc taprio 1: root refcnt",
++        "matchCount": "0",
++        "teardown": [
++            "echo \"1\" > /sys/bus/netdevsim/del_device"
++        ]
++    },
+     {
+         "id": "3e1e",
+         "name": "Add taprio Qdisc with an invalid cycle-time",


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [gentoo-commits] proj/linux-patches:6.9 commit in: /
@ 2024-06-16 14:31 Mike Pagano
  0 siblings, 0 replies; 18+ messages in thread
From: Mike Pagano @ 2024-06-16 14:31 UTC (permalink / raw
  To: gentoo-commits

commit:     88e6914e3a0bd49ede202c2921b807ad344c647b
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Jun 16 14:30:40 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Jun 16 14:30:40 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=88e6914e

Linux patch 6.9.5

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |    4 +
 1004_linux-6.9.5.patch | 5592 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5596 insertions(+)

diff --git a/0000_README b/0000_README
index f824f87c..bb6274e1 100644
--- a/0000_README
+++ b/0000_README
@@ -59,6 +59,10 @@ Patch:  1003_linux-6.9.4.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.9.4
 
+Patch:  1004_linux-6.9.5.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.9.5
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.

diff --git a/1004_linux-6.9.5.patch b/1004_linux-6.9.5.patch
new file mode 100644
index 00000000..82e93e9f
--- /dev/null
+++ b/1004_linux-6.9.5.patch
@@ -0,0 +1,5592 @@
+diff --git a/Documentation/mm/arch_pgtable_helpers.rst b/Documentation/mm/arch_pgtable_helpers.rst
+index 2466d3363af79..ad50ca6f495eb 100644
+--- a/Documentation/mm/arch_pgtable_helpers.rst
++++ b/Documentation/mm/arch_pgtable_helpers.rst
+@@ -140,7 +140,8 @@ PMD Page Table Helpers
+ +---------------------------+--------------------------------------------------+
+ | pmd_swp_clear_soft_dirty  | Clears a soft dirty swapped PMD                  |
+ +---------------------------+--------------------------------------------------+
+-| pmd_mkinvalid             | Invalidates a mapped PMD [1]                     |
++| pmd_mkinvalid             | Invalidates a present PMD; do not call for       |
++|                           | non-present PMD [1]                              |
+ +---------------------------+--------------------------------------------------+
+ | pmd_set_huge              | Creates a PMD huge mapping                       |
+ +---------------------------+--------------------------------------------------+
+@@ -196,7 +197,8 @@ PUD Page Table Helpers
+ +---------------------------+--------------------------------------------------+
+ | pud_mkdevmap              | Creates a ZONE_DEVICE mapped PUD                 |
+ +---------------------------+--------------------------------------------------+
+-| pud_mkinvalid             | Invalidates a mapped PUD [1]                     |
++| pud_mkinvalid             | Invalidates a present PUD; do not call for       |
++|                           | non-present PUD [1]                              |
+ +---------------------------+--------------------------------------------------+
+ | pud_set_huge              | Creates a PUD huge mapping                       |
+ +---------------------------+--------------------------------------------------+
+diff --git a/Documentation/networking/af_xdp.rst b/Documentation/networking/af_xdp.rst
+index 72da7057e4cf9..dceeb0d763aa2 100644
+--- a/Documentation/networking/af_xdp.rst
++++ b/Documentation/networking/af_xdp.rst
+@@ -329,24 +329,23 @@ XDP_SHARED_UMEM option and provide the initial socket's fd in the
+ sxdp_shared_umem_fd field as you registered the UMEM on that
+ socket. These two sockets will now share one and the same UMEM.
+ 
+-In this case, it is possible to use the NIC's packet steering
+-capabilities to steer the packets to the right queue. This is not
+-possible in the previous example as there is only one queue shared
+-among sockets, so the NIC cannot do this steering as it can only steer
+-between queues.
+-
+-In libxdp (or libbpf prior to version 1.0), you need to use the
+-xsk_socket__create_shared() API as it takes a reference to a FILL ring
+-and a COMPLETION ring that will be created for you and bound to the
+-shared UMEM. You can use this function for all the sockets you create,
+-or you can use it for the second and following ones and use
+-xsk_socket__create() for the first one. Both methods yield the same
+-result.
++There is no need to supply an XDP program like the one in the previous
++case where sockets were bound to the same queue id and
++device. Instead, use the NIC's packet steering capabilities to steer
++the packets to the right queue. In the previous example, there is only
++one queue shared among sockets, so the NIC cannot do this steering. It
++can only steer between queues.
++
++In libbpf, you need to use the xsk_socket__create_shared() API as it
++takes a reference to a FILL ring and a COMPLETION ring that will be
++created for you and bound to the shared UMEM. You can use this
++function for all the sockets you create, or you can use it for the
++second and following ones and use xsk_socket__create() for the first
++one. Both methods yield the same result.
+ 
+ Note that a UMEM can be shared between sockets on the same queue id
+ and device, as well as between queues on the same device and between
+-devices at the same time. It is also possible to redirect to any
+-socket as long as it is bound to the same umem with XDP_SHARED_UMEM.
++devices at the same time.
+ 
+ XDP_USE_NEED_WAKEUP bind flag
+ -----------------------------
+@@ -823,10 +822,6 @@ A: The short answer is no, that is not supported at the moment. The
+    switch, or other distribution mechanism, in your NIC to direct
+    traffic to the correct queue id and socket.
+ 
+-   Note that if you are using the XDP_SHARED_UMEM option, it is
+-   possible to switch traffic between any socket bound to the same
+-   umem.
+-
+ Q: My packets are sometimes corrupted. What is wrong?
+ 
+ A: Care has to be taken not to feed the same buffer in the UMEM into
+diff --git a/Makefile b/Makefile
+index 91f1d4d34e809..d5062a593ef7e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 9
+-SUBLEVEL = 4
++SUBLEVEL = 5
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+@@ -942,7 +942,6 @@ endif
+ ifdef CONFIG_LTO_CLANG
+ ifdef CONFIG_LTO_CLANG_THIN
+ CC_FLAGS_LTO	:= -flto=thin -fsplit-lto-unit
+-KBUILD_LDFLAGS	+= --thinlto-cache-dir=$(extmod_prefix).thinlto-cache
+ else
+ CC_FLAGS_LTO	:= -flto
+ endif
+@@ -1477,7 +1476,7 @@ endif # CONFIG_MODULES
+ # Directories & files removed with 'make clean'
+ CLEAN_FILES += vmlinux.symvers modules-only.symvers \
+ 	       modules.builtin modules.builtin.modinfo modules.nsdeps \
+-	       compile_commands.json .thinlto-cache rust/test \
++	       compile_commands.json rust/test \
+ 	       rust-project.json .vmlinux.objs .vmlinux.export.c
+ 
+ # Directories & files removed with 'make mrproper'
+@@ -1783,7 +1782,7 @@ PHONY += compile_commands.json
+ 
+ clean-dirs := $(KBUILD_EXTMOD)
+ clean: rm-files := $(KBUILD_EXTMOD)/Module.symvers $(KBUILD_EXTMOD)/modules.nsdeps \
+-	$(KBUILD_EXTMOD)/compile_commands.json $(KBUILD_EXTMOD)/.thinlto-cache
++	$(KBUILD_EXTMOD)/compile_commands.json
+ 
+ PHONY += prepare
+ # now expand this into a simple variable to reduce the cost of shell evaluations
+diff --git a/arch/arm/boot/dts/samsung/exynos4210-smdkv310.dts b/arch/arm/boot/dts/samsung/exynos4210-smdkv310.dts
+index b566f878ed84f..18f4f494093ba 100644
+--- a/arch/arm/boot/dts/samsung/exynos4210-smdkv310.dts
++++ b/arch/arm/boot/dts/samsung/exynos4210-smdkv310.dts
+@@ -88,7 +88,7 @@ eeprom@52 {
+ &keypad {
+ 	samsung,keypad-num-rows = <2>;
+ 	samsung,keypad-num-columns = <8>;
+-	linux,keypad-no-autorepeat;
++	linux,input-no-autorepeat;
+ 	wakeup-source;
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&keypad_rows &keypad_cols>;
+diff --git a/arch/arm/boot/dts/samsung/exynos4412-origen.dts b/arch/arm/boot/dts/samsung/exynos4412-origen.dts
+index 23b151645d668..10ab7bc90f502 100644
+--- a/arch/arm/boot/dts/samsung/exynos4412-origen.dts
++++ b/arch/arm/boot/dts/samsung/exynos4412-origen.dts
+@@ -453,7 +453,7 @@ buck9_reg: BUCK9 {
+ &keypad {
+ 	samsung,keypad-num-rows = <3>;
+ 	samsung,keypad-num-columns = <2>;
+-	linux,keypad-no-autorepeat;
++	linux,input-no-autorepeat;
+ 	wakeup-source;
+ 	pinctrl-0 = <&keypad_rows &keypad_cols>;
+ 	pinctrl-names = "default";
+diff --git a/arch/arm/boot/dts/samsung/exynos4412-smdk4412.dts b/arch/arm/boot/dts/samsung/exynos4412-smdk4412.dts
+index 715dfcba14174..e16df9e75fcb0 100644
+--- a/arch/arm/boot/dts/samsung/exynos4412-smdk4412.dts
++++ b/arch/arm/boot/dts/samsung/exynos4412-smdk4412.dts
+@@ -69,7 +69,7 @@ cooling_map1: map1 {
+ &keypad {
+ 	samsung,keypad-num-rows = <3>;
+ 	samsung,keypad-num-columns = <8>;
+-	linux,keypad-no-autorepeat;
++	linux,input-no-autorepeat;
+ 	wakeup-source;
+ 	pinctrl-0 = <&keypad_rows &keypad_cols>;
+ 	pinctrl-names = "default";
+diff --git a/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi b/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi
+index ed1b5a7a60678..d01023401d7e3 100644
+--- a/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi
++++ b/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi
+@@ -58,7 +58,7 @@ cpu@3 {
+ 	gic: interrupt-controller@f1001000 {
+ 		compatible = "arm,gic-400";
+ 		reg = <0x0 0xf1001000 0x0 0x1000>,  /* GICD */
+-		      <0x0 0xf1002000 0x0 0x100>;   /* GICC */
++		      <0x0 0xf1002000 0x0 0x2000>;  /* GICC */
+ 		#address-cells = <0>;
+ 		#interrupt-cells = <3>;
+ 		interrupt-controller;
+diff --git a/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts b/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts
+index 14d58859bb55c..683ac124523b3 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts
++++ b/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts
+@@ -9,8 +9,8 @@ / {
+ 	compatible = "nvidia,norrin", "nvidia,tegra132", "nvidia,tegra124";
+ 
+ 	aliases {
+-		rtc0 = "/i2c@7000d000/as3722@40";
+-		rtc1 = "/rtc@7000e000";
++		rtc0 = &as3722;
++		rtc1 = &tegra_rtc;
+ 		serial0 = &uarta;
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/nvidia/tegra132.dtsi b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
+index 7e24a212c7e44..5bcccfef3f7f8 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra132.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
+@@ -572,7 +572,7 @@ spi@7000de00 {
+ 		status = "disabled";
+ 	};
+ 
+-	rtc@7000e000 {
++	tegra_rtc: rtc@7000e000 {
+ 		compatible = "nvidia,tegra124-rtc", "nvidia,tegra20-rtc";
+ 		reg = <0x0 0x7000e000 0x0 0x100>;
+ 		interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+diff --git a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
+index 10655401528e4..a22b4501ce1ef 100644
+--- a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
++++ b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
+@@ -62,7 +62,7 @@ bluetooth {
+ 		vddrf-supply = <&vreg_l1_1p3>;
+ 		vddch0-supply = <&vdd_ch0_3p3>;
+ 
+-		local-bd-address = [ 02 00 00 00 5a ad ];
++		local-bd-address = [ 00 00 00 00 00 00 ];
+ 
+ 		max-speed = <3200000>;
+ 	};
+diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+index d0f82e12289e1..91871a6ee3b6b 100644
+--- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+@@ -1799,6 +1799,7 @@ pcie4_phy: phy@1c06000 {
+ 			assigned-clock-rates = <100000000>;
+ 
+ 			power-domains = <&gcc PCIE_4_GDSC>;
++			required-opps = <&rpmhpd_opp_nom>;
+ 
+ 			resets = <&gcc GCC_PCIE_4_PHY_BCR>;
+ 			reset-names = "phy";
+@@ -1898,6 +1899,7 @@ pcie3b_phy: phy@1c0e000 {
+ 			assigned-clock-rates = <100000000>;
+ 
+ 			power-domains = <&gcc PCIE_3B_GDSC>;
++			required-opps = <&rpmhpd_opp_nom>;
+ 
+ 			resets = <&gcc GCC_PCIE_3B_PHY_BCR>;
+ 			reset-names = "phy";
+@@ -1998,6 +2000,7 @@ pcie3a_phy: phy@1c14000 {
+ 			assigned-clock-rates = <100000000>;
+ 
+ 			power-domains = <&gcc PCIE_3A_GDSC>;
++			required-opps = <&rpmhpd_opp_nom>;
+ 
+ 			resets = <&gcc GCC_PCIE_3A_PHY_BCR>;
+ 			reset-names = "phy";
+@@ -2099,6 +2102,7 @@ pcie2b_phy: phy@1c1e000 {
+ 			assigned-clock-rates = <100000000>;
+ 
+ 			power-domains = <&gcc PCIE_2B_GDSC>;
++			required-opps = <&rpmhpd_opp_nom>;
+ 
+ 			resets = <&gcc GCC_PCIE_2B_PHY_BCR>;
+ 			reset-names = "phy";
+@@ -2199,6 +2203,7 @@ pcie2a_phy: phy@1c24000 {
+ 			assigned-clock-rates = <100000000>;
+ 
+ 			power-domains = <&gcc PCIE_2A_GDSC>;
++			required-opps = <&rpmhpd_opp_nom>;
+ 
+ 			resets = <&gcc GCC_PCIE_2A_PHY_BCR>;
+ 			reset-names = "phy";
+diff --git a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
+index e8d8857ad51ff..8c837467069b0 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
+@@ -76,7 +76,7 @@ verdin_key_wakeup: key-wakeup {
+ 
+ 	memory@80000000 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x80000000 0x00000000 0x40000000>; /* 1G RAM */
++		reg = <0x00000000 0x80000000 0x00000000 0x80000000>; /* 2G RAM */
+ 	};
+ 
+ 	opp-table {
+diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
+index e2f762d959bb3..11098eb7eb44a 100644
+--- a/arch/arm64/kvm/guest.c
++++ b/arch/arm64/kvm/guest.c
+@@ -251,6 +251,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+ 		case PSR_AA32_MODE_SVC:
+ 		case PSR_AA32_MODE_ABT:
+ 		case PSR_AA32_MODE_UND:
++		case PSR_AA32_MODE_SYS:
+ 			if (!vcpu_el1_is_32bit(vcpu))
+ 				return -EINVAL;
+ 			break;
+@@ -276,7 +277,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+ 	if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
+ 		int i, nr_reg;
+ 
+-		switch (*vcpu_cpsr(vcpu)) {
++		switch (*vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK) {
+ 		/*
+ 		 * Either we are dealing with user mode, and only the
+ 		 * first 15 registers (+ PC) must be narrowed to 32bit.
+diff --git a/arch/arm64/kvm/hyp/aarch32.c b/arch/arm64/kvm/hyp/aarch32.c
+index 8d9670e6615dc..449fa58cf3b63 100644
+--- a/arch/arm64/kvm/hyp/aarch32.c
++++ b/arch/arm64/kvm/hyp/aarch32.c
+@@ -50,9 +50,23 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
+ 	u32 cpsr_cond;
+ 	int cond;
+ 
+-	/* Top two bits non-zero?  Unconditional. */
+-	if (kvm_vcpu_get_esr(vcpu) >> 30)
++	/*
++	 * These are the exception classes that could fire with a
++	 * conditional instruction.
++	 */
++	switch (kvm_vcpu_trap_get_class(vcpu)) {
++	case ESR_ELx_EC_CP15_32:
++	case ESR_ELx_EC_CP15_64:
++	case ESR_ELx_EC_CP14_MR:
++	case ESR_ELx_EC_CP14_LS:
++	case ESR_ELx_EC_FP_ASIMD:
++	case ESR_ELx_EC_CP10_ID:
++	case ESR_ELx_EC_CP14_64:
++	case ESR_ELx_EC_SVC32:
++		break;
++	default:
+ 		return true;
++	}
+ 
+ 	/* Is condition field valid? */
+ 	cond = kvm_vcpu_get_condition(vcpu);
+diff --git a/arch/loongarch/include/asm/numa.h b/arch/loongarch/include/asm/numa.h
+index 27f319b498625..b5f9de9f102e4 100644
+--- a/arch/loongarch/include/asm/numa.h
++++ b/arch/loongarch/include/asm/numa.h
+@@ -56,6 +56,7 @@ extern int early_cpu_to_node(int cpu);
+ static inline void early_numa_add_cpu(int cpuid, s16 node)	{ }
+ static inline void numa_add_cpu(unsigned int cpu)		{ }
+ static inline void numa_remove_cpu(unsigned int cpu)		{ }
++static inline void set_cpuid_to_node(int cpuid, s16 node)	{ }
+ 
+ static inline int early_cpu_to_node(int cpu)
+ {
+diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/include/asm/stackframe.h
+index 45b507a7b06fc..d9eafd3ee3d1e 100644
+--- a/arch/loongarch/include/asm/stackframe.h
++++ b/arch/loongarch/include/asm/stackframe.h
+@@ -42,7 +42,7 @@
+ 	.macro JUMP_VIRT_ADDR temp1 temp2
+ 	li.d	\temp1, CACHE_BASE
+ 	pcaddi	\temp2, 0
+-	or	\temp1, \temp1, \temp2
++	bstrins.d  \temp1, \temp2, (DMW_PABITS - 1), 0
+ 	jirl	zero, \temp1, 0xc
+ 	.endm
+ 
+diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S
+index c4f7de2e28054..4677ea8fa8e98 100644
+--- a/arch/loongarch/kernel/head.S
++++ b/arch/loongarch/kernel/head.S
+@@ -22,7 +22,7 @@
+ _head:
+ 	.word	MZ_MAGIC		/* "MZ", MS-DOS header */
+ 	.org	0x8
+-	.dword	kernel_entry		/* Kernel entry point */
++	.dword	_kernel_entry		/* Kernel entry point (physical address) */
+ 	.dword	_kernel_asize		/* Kernel image effective size */
+ 	.quad	PHYS_LINK_KADDR		/* Kernel image load offset from start of RAM */
+ 	.org	0x38			/* 0x20 ~ 0x37 reserved */
+diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
+index 60e0fe97f61a3..e8f7a54ff67cc 100644
+--- a/arch/loongarch/kernel/setup.c
++++ b/arch/loongarch/kernel/setup.c
+@@ -282,7 +282,7 @@ static void __init fdt_setup(void)
+ 		return;
+ 
+ 	/* Prefer to use built-in dtb, checking its legality first. */
+-	if (!fdt_check_header(__dtb_start))
++	if (IS_ENABLED(CONFIG_BUILTIN_DTB) && !fdt_check_header(__dtb_start))
+ 		fdt_pointer = __dtb_start;
+ 	else
+ 		fdt_pointer = efi_fdt_pointer(); /* Fallback to firmware dtb */
+diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
+index aabee0b280fe5..fd22a32755a58 100644
+--- a/arch/loongarch/kernel/smp.c
++++ b/arch/loongarch/kernel/smp.c
+@@ -262,7 +262,6 @@ static void __init fdt_smp_setup(void)
+ 
+ 		if (cpuid == loongson_sysconf.boot_cpu_id) {
+ 			cpu = 0;
+-			numa_add_cpu(cpu);
+ 		} else {
+ 			cpu = cpumask_next_zero(-1, cpu_present_mask);
+ 		}
+@@ -272,6 +271,9 @@ static void __init fdt_smp_setup(void)
+ 		set_cpu_present(cpu, true);
+ 		__cpu_number_map[cpuid] = cpu;
+ 		__cpu_logical_map[cpu] = cpuid;
++
++		early_numa_add_cpu(cpu, 0);
++		set_cpuid_to_node(cpuid, 0);
+ 	}
+ 
+ 	loongson_sysconf.nr_cpus = num_processors;
+@@ -456,6 +458,7 @@ void smp_prepare_boot_cpu(void)
+ 	set_cpu_possible(0, true);
+ 	set_cpu_online(0, true);
+ 	set_my_cpu_offset(per_cpu_offset(0));
++	numa_add_cpu(0);
+ 
+ 	rr_node = first_node(node_online_map);
+ 	for_each_possible_cpu(cpu) {
+diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S
+index e8e97dbf9ca40..3c7595342730e 100644
+--- a/arch/loongarch/kernel/vmlinux.lds.S
++++ b/arch/loongarch/kernel/vmlinux.lds.S
+@@ -6,6 +6,7 @@
+ 
+ #define PAGE_SIZE _PAGE_SIZE
+ #define RO_EXCEPTION_TABLE_ALIGN	4
++#define PHYSADDR_MASK			0xffffffffffff /* 48-bit */
+ 
+ /*
+  * Put .bss..swapper_pg_dir as the first thing in .bss. This will
+@@ -142,10 +143,11 @@ SECTIONS
+ 
+ #ifdef CONFIG_EFI_STUB
+ 	/* header symbols */
+-	_kernel_asize = _end - _text;
+-	_kernel_fsize = _edata - _text;
+-	_kernel_vsize = _end - __initdata_begin;
+-	_kernel_rsize = _edata - __initdata_begin;
++	_kernel_entry = ABSOLUTE(kernel_entry & PHYSADDR_MASK);
++	_kernel_asize = ABSOLUTE(_end - _text);
++	_kernel_fsize = ABSOLUTE(_edata - _text);
++	_kernel_vsize = ABSOLUTE(_end - __initdata_begin);
++	_kernel_rsize = ABSOLUTE(_edata - __initdata_begin);
+ #endif
+ 
+ 	.gptab.sdata : {
+diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
+index ad4e15d12ed1c..4bea2e95798f0 100644
+--- a/arch/parisc/include/asm/page.h
++++ b/arch/parisc/include/asm/page.h
+@@ -8,6 +8,7 @@
+ #define PAGE_SIZE	(_AC(1,UL) << PAGE_SHIFT)
+ #define PAGE_MASK	(~(PAGE_SIZE-1))
+ 
++#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+ 
+ #ifndef __ASSEMBLY__
+ 
+diff --git a/arch/parisc/include/asm/signal.h b/arch/parisc/include/asm/signal.h
+index 715c96ba2ec81..e84883c6b4c7a 100644
+--- a/arch/parisc/include/asm/signal.h
++++ b/arch/parisc/include/asm/signal.h
+@@ -4,23 +4,11 @@
+ 
+ #include <uapi/asm/signal.h>
+ 
+-#define _NSIG		64
+-/* bits-per-word, where word apparently means 'long' not 'int' */
+-#define _NSIG_BPW	BITS_PER_LONG
+-#define _NSIG_WORDS	(_NSIG / _NSIG_BPW)
+-
+ # ifndef __ASSEMBLY__
+ 
+ /* Most things should be clean enough to redefine this at will, if care
+    is taken to make libc match.  */
+ 
+-typedef unsigned long old_sigset_t;		/* at least 32 bits */
+-
+-typedef struct {
+-	/* next_signal() assumes this is a long - no choice */
+-	unsigned long sig[_NSIG_WORDS];
+-} sigset_t;
+-
+ #include <asm/sigcontext.h>
+ 
+ #endif /* !__ASSEMBLY */
+diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h
+index 8e4895c5ea5d3..40d7a574c5dd1 100644
+--- a/arch/parisc/include/uapi/asm/signal.h
++++ b/arch/parisc/include/uapi/asm/signal.h
+@@ -57,10 +57,20 @@
+ 
+ #include <asm-generic/signal-defs.h>
+ 
++#define _NSIG		64
++#define _NSIG_BPW	(sizeof(unsigned long) * 8)
++#define _NSIG_WORDS	(_NSIG / _NSIG_BPW)
++
+ # ifndef __ASSEMBLY__
+ 
+ #  include <linux/types.h>
+ 
++typedef unsigned long old_sigset_t;	/* at least 32 bits */
++
++typedef struct {
++	unsigned long sig[_NSIG_WORDS];
++} sigset_t;
++
+ /* Avoid too many header ordering problems.  */
+ struct siginfo;
+ 
+diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
+index 83823db3488b9..2975ea0841ba4 100644
+--- a/arch/powerpc/mm/book3s64/pgtable.c
++++ b/arch/powerpc/mm/book3s64/pgtable.c
+@@ -170,6 +170,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ {
+ 	unsigned long old_pmd;
+ 
++	VM_WARN_ON_ONCE(!pmd_present(*pmdp));
+ 	old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
+ 	flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+ 	return __pmd(old_pmd);
+diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
+index 43b97032a91c0..a0c4f1bde83e8 100644
+--- a/arch/powerpc/net/bpf_jit_comp32.c
++++ b/arch/powerpc/net/bpf_jit_comp32.c
+@@ -900,6 +900,15 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
+ 
+ 			/* Get offset into TMP_REG */
+ 			EMIT(PPC_RAW_LI(tmp_reg, off));
++			/*
++			 * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
++			 * before and after the operation.
++			 *
++			 * This is a requirement in the Linux Kernel Memory Model.
++			 * See __cmpxchg_u32() in asm/cmpxchg.h as an example.
++			 */
++			if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
++				EMIT(PPC_RAW_SYNC());
+ 			tmp_idx = ctx->idx * 4;
+ 			/* load value from memory into r0 */
+ 			EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0));
+@@ -953,6 +962,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
+ 
+ 			/* For the BPF_FETCH variant, get old data into src_reg */
+ 			if (imm & BPF_FETCH) {
++				/* Emit 'sync' to enforce full ordering */
++				if (IS_ENABLED(CONFIG_SMP))
++					EMIT(PPC_RAW_SYNC());
+ 				EMIT(PPC_RAW_MR(ret_reg, ax_reg));
+ 				if (!fp->aux->verifier_zext)
+ 					EMIT(PPC_RAW_LI(ret_reg - 1, 0)); /* higher 32-bit */
+diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
+index 79f23974a3204..58522de615ff2 100644
+--- a/arch/powerpc/net/bpf_jit_comp64.c
++++ b/arch/powerpc/net/bpf_jit_comp64.c
+@@ -202,7 +202,8 @@ void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
+ 	EMIT(PPC_RAW_BLR());
+ }
+ 
+-static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u64 func)
++static int
++bpf_jit_emit_func_call_hlp(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
+ {
+ 	unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
+ 	long reladdr;
+@@ -211,19 +212,20 @@ static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u
+ 		return -EINVAL;
+ 
+ 	if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) {
+-		reladdr = func_addr - CTX_NIA(ctx);
++		reladdr = func_addr - local_paca->kernelbase;
+ 
+ 		if (reladdr >= (long)SZ_8G || reladdr < -(long)SZ_8G) {
+-			pr_err("eBPF: address of %ps out of range of pcrel address.\n",
+-				(void *)func);
++			pr_err("eBPF: address of %ps out of range of 34-bit relative address.\n",
++			       (void *)func);
+ 			return -ERANGE;
+ 		}
+-		/* pla r12,addr */
+-		EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(1) | IMM_H18(reladdr));
+-		EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | IMM_L(reladdr));
+-		EMIT(PPC_RAW_MTCTR(_R12));
+-		EMIT(PPC_RAW_BCTR());
+-
++		EMIT(PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernelbase)));
++		/* Align for subsequent prefix instruction */
++		if (!IS_ALIGNED((unsigned long)fimage + CTX_NIA(ctx), 8))
++			EMIT(PPC_RAW_NOP());
++		/* paddi r12,r12,addr */
++		EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(0) | IMM_H18(reladdr));
++		EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | ___PPC_RA(_R12) | IMM_L(reladdr));
+ 	} else {
+ 		reladdr = func_addr - kernel_toc_addr();
+ 		if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
+@@ -233,9 +235,9 @@ static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u
+ 
+ 		EMIT(PPC_RAW_ADDIS(_R12, _R2, PPC_HA(reladdr)));
+ 		EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr)));
+-		EMIT(PPC_RAW_MTCTR(_R12));
+-		EMIT(PPC_RAW_BCTRL());
+ 	}
++	EMIT(PPC_RAW_MTCTR(_R12));
++	EMIT(PPC_RAW_BCTRL());
+ 
+ 	return 0;
+ }
+@@ -285,7 +287,7 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
+ 	int b2p_index = bpf_to_ppc(BPF_REG_3);
+ 	int bpf_tailcall_prologue_size = 8;
+ 
+-	if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
++	if (!IS_ENABLED(CONFIG_PPC_KERNEL_PCREL) && IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
+ 		bpf_tailcall_prologue_size += 4; /* skip past the toc load */
+ 
+ 	/*
+@@ -803,6 +805,15 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
+ 
+ 			/* Get offset into TMP_REG_1 */
+ 			EMIT(PPC_RAW_LI(tmp1_reg, off));
++			/*
++			 * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
++			 * before and after the operation.
++			 *
++			 * This is a requirement in the Linux Kernel Memory Model.
++			 * See __cmpxchg_u64() in asm/cmpxchg.h as an example.
++			 */
++			if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
++				EMIT(PPC_RAW_SYNC());
+ 			tmp_idx = ctx->idx * 4;
+ 			/* load value from memory into TMP_REG_2 */
+ 			if (size == BPF_DW)
+@@ -865,6 +876,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
+ 			PPC_BCC_SHORT(COND_NE, tmp_idx);
+ 
+ 			if (imm & BPF_FETCH) {
++				/* Emit 'sync' to enforce full ordering */
++				if (IS_ENABLED(CONFIG_SMP))
++					EMIT(PPC_RAW_SYNC());
+ 				EMIT(PPC_RAW_MR(ret_reg, _R0));
+ 				/*
+ 				 * Skip unnecessary zero-extension for 32-bit cmpxchg.
+@@ -993,7 +1007,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
+ 				return ret;
+ 
+ 			if (func_addr_fixed)
+-				ret = bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
++				ret = bpf_jit_emit_func_call_hlp(image, fimage, ctx, func_addr);
+ 			else
+ 				ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
+ 
+diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
+index be09c8836d56b..8025ca7ef7798 100644
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -103,7 +103,7 @@ config RISCV
+ 	select HAS_IOPORT if MMU
+ 	select HAVE_ARCH_AUDITSYSCALL
+ 	select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP
+-	select HAVE_ARCH_HUGE_VMAP if MMU && 64BIT && !XIP_KERNEL
++	select HAVE_ARCH_HUGE_VMAP if MMU && 64BIT
+ 	select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
+ 	select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL
+ 	select HAVE_ARCH_KASAN if MMU && 64BIT
+diff --git a/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi b/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
+index 2b3e952513e44..ccd0ce55aa530 100644
+--- a/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
++++ b/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
+@@ -238,7 +238,6 @@ &i2c5 {
+ 	axp15060: pmic@36 {
+ 		compatible = "x-powers,axp15060";
+ 		reg = <0x36>;
+-		interrupts = <0>;
+ 		interrupt-controller;
+ 		#interrupt-cells = <1>;
+ 
+diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
+index b378e2b57ad87..c786538e397c0 100644
+--- a/arch/s390/include/asm/cpacf.h
++++ b/arch/s390/include/asm/cpacf.h
+@@ -166,28 +166,86 @@
+ 
+ typedef struct { unsigned char bytes[16]; } cpacf_mask_t;
+ 
+-/**
+- * cpacf_query() - check if a specific CPACF function is available
+- * @opcode: the opcode of the crypto instruction
+- * @func: the function code to test for
+- *
+- * Executes the query function for the given crypto instruction @opcode
+- * and checks if @func is available
+- *
+- * Returns 1 if @func is available for @opcode, 0 otherwise
++/*
++ * Prototype for a not existing function to produce a link
++ * error if __cpacf_query() or __cpacf_check_opcode() is used
++ * with an invalid compile time const opcode.
+  */
+-static __always_inline void __cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
++void __cpacf_bad_opcode(void);
++
++static __always_inline void __cpacf_query_rre(u32 opc, u8 r1, u8 r2,
++					      cpacf_mask_t *mask)
+ {
+ 	asm volatile(
+-		"	lghi	0,0\n" /* query function */
+-		"	lgr	1,%[mask]\n"
+-		"	spm	0\n" /* pckmo doesn't change the cc */
+-		/* Parameter regs are ignored, but must be nonzero and unique */
+-		"0:	.insn	rrf,%[opc] << 16,2,4,6,0\n"
+-		"	brc	1,0b\n"	/* handle partial completion */
+-		: "=m" (*mask)
+-		: [mask] "d" ((unsigned long)mask), [opc] "i" (opcode)
+-		: "cc", "0", "1");
++		"	la	%%r1,%[mask]\n"
++		"	xgr	%%r0,%%r0\n"
++		"	.insn	rre,%[opc] << 16,%[r1],%[r2]\n"
++		: [mask] "=R" (*mask)
++		: [opc] "i" (opc),
++		  [r1] "i" (r1), [r2] "i" (r2)
++		: "cc", "r0", "r1");
++}
++
++static __always_inline void __cpacf_query_rrf(u32 opc,
++					      u8 r1, u8 r2, u8 r3, u8 m4,
++					      cpacf_mask_t *mask)
++{
++	asm volatile(
++		"	la	%%r1,%[mask]\n"
++		"	xgr	%%r0,%%r0\n"
++		"	.insn	rrf,%[opc] << 16,%[r1],%[r2],%[r3],%[m4]\n"
++		: [mask] "=R" (*mask)
++		: [opc] "i" (opc), [r1] "i" (r1), [r2] "i" (r2),
++		  [r3] "i" (r3), [m4] "i" (m4)
++		: "cc", "r0", "r1");
++}
++
++static __always_inline void __cpacf_query(unsigned int opcode,
++					  cpacf_mask_t *mask)
++{
++	switch (opcode) {
++	case CPACF_KDSA:
++		__cpacf_query_rre(CPACF_KDSA, 0, 2, mask);
++		break;
++	case CPACF_KIMD:
++		__cpacf_query_rre(CPACF_KIMD, 0, 2, mask);
++		break;
++	case CPACF_KLMD:
++		__cpacf_query_rre(CPACF_KLMD, 0, 2, mask);
++		break;
++	case CPACF_KM:
++		__cpacf_query_rre(CPACF_KM, 2, 4, mask);
++		break;
++	case CPACF_KMA:
++		__cpacf_query_rrf(CPACF_KMA, 2, 4, 6, 0, mask);
++		break;
++	case CPACF_KMAC:
++		__cpacf_query_rre(CPACF_KMAC, 0, 2, mask);
++		break;
++	case CPACF_KMC:
++		__cpacf_query_rre(CPACF_KMC, 2, 4, mask);
++		break;
++	case CPACF_KMCTR:
++		__cpacf_query_rrf(CPACF_KMCTR, 2, 4, 6, 0, mask);
++		break;
++	case CPACF_KMF:
++		__cpacf_query_rre(CPACF_KMF, 2, 4, mask);
++		break;
++	case CPACF_KMO:
++		__cpacf_query_rre(CPACF_KMO, 2, 4, mask);
++		break;
++	case CPACF_PCC:
++		__cpacf_query_rre(CPACF_PCC, 0, 0, mask);
++		break;
++	case CPACF_PCKMO:
++		__cpacf_query_rre(CPACF_PCKMO, 0, 0, mask);
++		break;
++	case CPACF_PRNO:
++		__cpacf_query_rre(CPACF_PRNO, 2, 4, mask);
++		break;
++	default:
++		__cpacf_bad_opcode();
++	}
+ }
+ 
+ static __always_inline int __cpacf_check_opcode(unsigned int opcode)
+@@ -211,10 +269,21 @@ static __always_inline int __cpacf_check_opcode(unsigned int opcode)
+ 	case CPACF_KMA:
+ 		return test_facility(146);	/* check for MSA8 */
+ 	default:
+-		BUG();
++		__cpacf_bad_opcode();
++		return 0;
+ 	}
+ }
+ 
++/**
++ * cpacf_query() - check if a specific CPACF function is available
++ * @opcode: the opcode of the crypto instruction
++ * @func: the function code to test for
++ *
++ * Executes the query function for the given crypto instruction @opcode
++ * and checks if @func is available
++ *
++ * Returns 1 if @func is available for @opcode, 0 otherwise
++ */
+ static __always_inline int cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
+ {
+ 	if (__cpacf_check_opcode(opcode)) {
+diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
+index 259c2439c2517..1077c1daec851 100644
+--- a/arch/s390/include/asm/pgtable.h
++++ b/arch/s390/include/asm/pgtable.h
+@@ -1778,8 +1778,10 @@ static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
+ static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
+ 				   unsigned long addr, pmd_t *pmdp)
+ {
+-	pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
++	pmd_t pmd;
+ 
++	VM_WARN_ON_ONCE(!pmd_present(*pmdp));
++	pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
+ 	return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
+ }
+ 
+diff --git a/arch/sparc/include/asm/smp_64.h b/arch/sparc/include/asm/smp_64.h
+index 505b6700805dd..0964fede0b2cc 100644
+--- a/arch/sparc/include/asm/smp_64.h
++++ b/arch/sparc/include/asm/smp_64.h
+@@ -47,7 +47,6 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+ int hard_smp_processor_id(void);
+ #define raw_smp_processor_id() (current_thread_info()->cpu)
+ 
+-void smp_fill_in_cpu_possible_map(void);
+ void smp_fill_in_sib_core_maps(void);
+ void __noreturn cpu_play_dead(void);
+ 
+@@ -77,7 +76,6 @@ void __cpu_die(unsigned int cpu);
+ #define smp_fill_in_sib_core_maps() do { } while (0)
+ #define smp_fetch_global_regs() do { } while (0)
+ #define smp_fetch_global_pmu() do { } while (0)
+-#define smp_fill_in_cpu_possible_map() do { } while (0)
+ #define smp_init_cpu_poke() do { } while (0)
+ #define scheduler_poke() do { } while (0)
+ 
+diff --git a/arch/sparc/include/uapi/asm/termbits.h b/arch/sparc/include/uapi/asm/termbits.h
+index 4321322701fcf..0da2b1adc0f52 100644
+--- a/arch/sparc/include/uapi/asm/termbits.h
++++ b/arch/sparc/include/uapi/asm/termbits.h
+@@ -10,16 +10,6 @@ typedef unsigned int	tcflag_t;
+ typedef unsigned long	tcflag_t;
+ #endif
+ 
+-#define NCC 8
+-struct termio {
+-	unsigned short c_iflag;		/* input mode flags */
+-	unsigned short c_oflag;		/* output mode flags */
+-	unsigned short c_cflag;		/* control mode flags */
+-	unsigned short c_lflag;		/* local mode flags */
+-	unsigned char c_line;		/* line discipline */
+-	unsigned char c_cc[NCC];	/* control characters */
+-};
+-
+ #define NCCS 17
+ struct termios {
+ 	tcflag_t c_iflag;		/* input mode flags */
+diff --git a/arch/sparc/include/uapi/asm/termios.h b/arch/sparc/include/uapi/asm/termios.h
+index ee86f4093d83e..cceb32260881e 100644
+--- a/arch/sparc/include/uapi/asm/termios.h
++++ b/arch/sparc/include/uapi/asm/termios.h
+@@ -40,5 +40,14 @@ struct winsize {
+ 	unsigned short ws_ypixel;
+ };
+ 
++#define NCC 8
++struct termio {
++	unsigned short c_iflag;		/* input mode flags */
++	unsigned short c_oflag;		/* output mode flags */
++	unsigned short c_cflag;		/* control mode flags */
++	unsigned short c_lflag;		/* local mode flags */
++	unsigned char c_line;		/* line discipline */
++	unsigned char c_cc[NCC];	/* control characters */
++};
+ 
+ #endif /* _UAPI_SPARC_TERMIOS_H */
+diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c
+index 998aa693d4912..ba82884cb92aa 100644
+--- a/arch/sparc/kernel/prom_64.c
++++ b/arch/sparc/kernel/prom_64.c
+@@ -483,7 +483,9 @@ static void *record_one_cpu(struct device_node *dp, int cpuid, int arg)
+ 	ncpus_probed++;
+ #ifdef CONFIG_SMP
+ 	set_cpu_present(cpuid, true);
+-	set_cpu_possible(cpuid, true);
++
++	if (num_possible_cpus() < nr_cpu_ids)
++		set_cpu_possible(cpuid, true);
+ #endif
+ 	return NULL;
+ }
+diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
+index 6a4797dec34b4..6bbe8e394ad3f 100644
+--- a/arch/sparc/kernel/setup_64.c
++++ b/arch/sparc/kernel/setup_64.c
+@@ -671,7 +671,6 @@ void __init setup_arch(char **cmdline_p)
+ 
+ 	paging_init();
+ 	init_sparc64_elf_hwcap();
+-	smp_fill_in_cpu_possible_map();
+ 	/*
+ 	 * Once the OF device tree and MDESC have been setup and nr_cpus has
+ 	 * been parsed, we know the list of possible cpus.  Therefore we can
+diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
+index a0cc9bb41a921..e40c395db2026 100644
+--- a/arch/sparc/kernel/smp_64.c
++++ b/arch/sparc/kernel/smp_64.c
+@@ -1216,20 +1216,6 @@ void __init smp_setup_processor_id(void)
+ 		xcall_deliver_impl = hypervisor_xcall_deliver;
+ }
+ 
+-void __init smp_fill_in_cpu_possible_map(void)
+-{
+-	int possible_cpus = num_possible_cpus();
+-	int i;
+-
+-	if (possible_cpus > nr_cpu_ids)
+-		possible_cpus = nr_cpu_ids;
+-
+-	for (i = 0; i < possible_cpus; i++)
+-		set_cpu_possible(i, true);
+-	for (; i < NR_CPUS; i++)
+-		set_cpu_possible(i, false);
+-}
+-
+ void smp_fill_in_sib_core_maps(void)
+ {
+ 	unsigned int i;
+diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
+index b44d79d778c71..ef69127d7e5e8 100644
+--- a/arch/sparc/mm/tlb.c
++++ b/arch/sparc/mm/tlb.c
+@@ -249,6 +249,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ {
+ 	pmd_t old, entry;
+ 
++	VM_WARN_ON_ONCE(!pmd_present(*pmdp));
+ 	entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID);
+ 	old = pmdp_establish(vma, address, pmdp, entry);
+ 	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+diff --git a/arch/x86/kernel/cpu/topology_amd.c b/arch/x86/kernel/cpu/topology_amd.c
+index ce2d507c3b076..5ee6373d4d926 100644
+--- a/arch/x86/kernel/cpu/topology_amd.c
++++ b/arch/x86/kernel/cpu/topology_amd.c
+@@ -84,9 +84,9 @@ static bool parse_8000_001e(struct topo_scan *tscan, bool has_0xb)
+ 
+ 	/*
+ 	 * If leaf 0xb is available, then the domain shifts are set
+-	 * already and nothing to do here.
++	 * already and nothing to do here. Only valid for family >= 0x17.
+ 	 */
+-	if (!has_0xb) {
++	if (!has_0xb && tscan->c->x86 >= 0x17) {
+ 		/*
+ 		 * Leaf 0x80000008 set the CORE domain shift already.
+ 		 * Update the SMT domain, but do not propagate it.
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 9aaf83c8d57df..308416b50b036 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -3843,16 +3843,27 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+ 
+ 	/*
+-	 * KVM should never request an NMI window when vNMI is enabled, as KVM
+-	 * allows at most one to-be-injected NMI and one pending NMI, i.e. if
+-	 * two NMIs arrive simultaneously, KVM will inject one and set
+-	 * V_NMI_PENDING for the other.  WARN, but continue with the standard
+-	 * single-step approach to try and salvage the pending NMI.
++	 * If NMIs are outright masked, i.e. the vCPU is already handling an
++	 * NMI, and KVM has not yet intercepted an IRET, then there is nothing
++	 * more to do at this time as KVM has already enabled IRET intercepts.
++	 * If KVM has already intercepted IRET, then single-step over the IRET,
++	 * as NMIs aren't architecturally unmasked until the IRET completes.
++	 *
++	 * If vNMI is enabled, KVM should never request an NMI window if NMIs
++	 * are masked, as KVM allows at most one to-be-injected NMI and one
++	 * pending NMI.  If two NMIs arrive simultaneously, KVM will inject one
++	 * NMI and set V_NMI_PENDING for the other, but if and only if NMIs are
++	 * unmasked.  KVM _will_ request an NMI window in some situations, e.g.
++	 * if the vCPU is in an STI shadow or if GIF=0, KVM can't immediately
++	 * inject the NMI.  In those situations, KVM needs to single-step over
++	 * the STI shadow or intercept STGI.
+ 	 */
+-	WARN_ON_ONCE(is_vnmi_enabled(svm));
++	if (svm_get_nmi_mask(vcpu)) {
++		WARN_ON_ONCE(is_vnmi_enabled(svm));
+ 
+-	if (svm_get_nmi_mask(vcpu) && !svm->awaiting_iret_completion)
+-		return; /* IRET will cause a vm exit */
++		if (!svm->awaiting_iret_completion)
++			return; /* IRET will cause a vm exit */
++	}
+ 
+ 	/*
+ 	 * SEV-ES guests are responsible for signaling when a vCPU is ready to
+diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
+index d007591b80597..103cbccf1d7dd 100644
+--- a/arch/x86/mm/pgtable.c
++++ b/arch/x86/mm/pgtable.c
+@@ -631,6 +631,8 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma,
+ pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
+ 			 pmd_t *pmdp)
+ {
++	VM_WARN_ON_ONCE(!pmd_present(*pmdp));
++
+ 	/*
+ 	 * No flush is necessary. Once an invalid PTE is established, the PTE's
+ 	 * access and dirty bits cannot be updated.
+diff --git a/crypto/ecdsa.c b/crypto/ecdsa.c
+index fbd76498aba83..3f9ec273a121f 100644
+--- a/crypto/ecdsa.c
++++ b/crypto/ecdsa.c
+@@ -373,4 +373,7 @@ module_exit(ecdsa_exit);
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Stefan Berger <stefanb@linux.ibm.com>");
+ MODULE_DESCRIPTION("ECDSA generic algorithm");
++MODULE_ALIAS_CRYPTO("ecdsa-nist-p192");
++MODULE_ALIAS_CRYPTO("ecdsa-nist-p256");
++MODULE_ALIAS_CRYPTO("ecdsa-nist-p384");
+ MODULE_ALIAS_CRYPTO("ecdsa-generic");
+diff --git a/crypto/ecrdsa.c b/crypto/ecrdsa.c
+index f3c6b5e15e75b..3811f3805b5d8 100644
+--- a/crypto/ecrdsa.c
++++ b/crypto/ecrdsa.c
+@@ -294,4 +294,5 @@ module_exit(ecrdsa_mod_fini);
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Vitaly Chikunov <vt@altlinux.org>");
+ MODULE_DESCRIPTION("EC-RDSA generic algorithm");
++MODULE_ALIAS_CRYPTO("ecrdsa");
+ MODULE_ALIAS_CRYPTO("ecrdsa-generic");
+diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c
+index 01faca3a238a3..bb9f8475ce594 100644
+--- a/drivers/acpi/apei/einj-core.c
++++ b/drivers/acpi/apei/einj-core.c
+@@ -903,7 +903,7 @@ static void __exit einj_exit(void)
+ 	if (einj_initialized)
+ 		platform_driver_unregister(&einj_driver);
+ 
+-	platform_device_del(einj_dev);
++	platform_device_unregister(einj_dev);
+ }
+ 
+ module_init(einj_init);
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index 59423fe9d0f29..6cc8572759a3d 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -630,6 +630,18 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "X565"),
+ 		},
+ 	},
++	{
++		/* TongFang GXxHRXx/TUXEDO InfinityBook Pro Gen9 AMD */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "GXxHRXx"),
++		},
++	},
++	{
++		/* TongFang GMxHGxx/TUXEDO Stellaris Slim Gen1 AMD */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"),
++		},
++	},
+ 	{ }
+ };
+ 
+diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
+index 448a511cbc179..e7ac142c2423d 100644
+--- a/drivers/ata/pata_legacy.c
++++ b/drivers/ata/pata_legacy.c
+@@ -173,8 +173,6 @@ static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 };
+ static struct legacy_probe probe_list[NR_HOST];
+ static struct legacy_data legacy_data[NR_HOST];
+ static struct ata_host *legacy_host[NR_HOST];
+-static int nr_legacy_host;
+-
+ 
+ /**
+  *	legacy_probe_add	-	Add interface to probe list
+@@ -1276,9 +1274,11 @@ static __exit void legacy_exit(void)
+ {
+ 	int i;
+ 
+-	for (i = 0; i < nr_legacy_host; i++) {
++	for (i = 0; i < NR_HOST; i++) {
+ 		struct legacy_data *ld = &legacy_data[i];
+-		ata_host_detach(legacy_host[i]);
++
++		if (legacy_host[i])
++			ata_host_detach(legacy_host[i]);
+ 		platform_device_unregister(ld->platform_dev);
+ 	}
+ }
+diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
+index 714070ebb6e7a..0c20fbc089dae 100644
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -1020,7 +1020,8 @@ void tpm_tis_remove(struct tpm_chip *chip)
+ 		interrupt = 0;
+ 
+ 	tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt);
+-	flush_work(&priv->free_irq_work);
++	if (priv->free_irq_work.func)
++		flush_work(&priv->free_irq_work);
+ 
+ 	tpm_tis_clkrun_enable(chip, false);
+ 
+diff --git a/drivers/clk/bcm/clk-bcm2711-dvp.c b/drivers/clk/bcm/clk-bcm2711-dvp.c
+index e4fbbf3c40fe2..3cb235df9d379 100644
+--- a/drivers/clk/bcm/clk-bcm2711-dvp.c
++++ b/drivers/clk/bcm/clk-bcm2711-dvp.c
+@@ -56,6 +56,8 @@ static int clk_dvp_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		return ret;
+ 
++	data->num = NR_CLOCKS;
++
+ 	data->hws[0] = clk_hw_register_gate_parent_data(&pdev->dev,
+ 							"hdmi0-108MHz",
+ 							&clk_dvp_parent, 0,
+@@ -76,7 +78,6 @@ static int clk_dvp_probe(struct platform_device *pdev)
+ 		goto unregister_clk0;
+ 	}
+ 
+-	data->num = NR_CLOCKS;
+ 	ret = of_clk_add_hw_provider(pdev->dev.of_node, of_clk_hw_onecell_get,
+ 				     data);
+ 	if (ret)
+diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
+index 829406dc44a20..4d411408e4afe 100644
+--- a/drivers/clk/bcm/clk-raspberrypi.c
++++ b/drivers/clk/bcm/clk-raspberrypi.c
+@@ -371,8 +371,8 @@ static int raspberrypi_discover_clocks(struct raspberrypi_clk *rpi,
+ 			if (IS_ERR(hw))
+ 				return PTR_ERR(hw);
+ 
+-			data->hws[clks->id] = hw;
+ 			data->num = clks->id + 1;
++			data->hws[clks->id] = hw;
+ 		}
+ 
+ 		clks++;
+diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c
+index 5e3da5558f4e0..d7ab5bd5d4b41 100644
+--- a/drivers/clk/qcom/apss-ipq-pll.c
++++ b/drivers/clk/qcom/apss-ipq-pll.c
+@@ -55,6 +55,29 @@ static struct clk_alpha_pll ipq_pll_huayra = {
+ 	},
+ };
+ 
++static struct clk_alpha_pll ipq_pll_stromer = {
++	.offset = 0x0,
++	/*
++	 * Reuse CLK_ALPHA_PLL_TYPE_STROMER_PLUS register offsets.
++	 * Although this is a bit confusing, but the offset values
++	 * are correct nevertheless.
++	 */
++	.regs = ipq_pll_offsets[CLK_ALPHA_PLL_TYPE_STROMER_PLUS],
++	.flags = SUPPORTS_DYNAMIC_UPDATE,
++	.clkr = {
++		.enable_reg = 0x0,
++		.enable_mask = BIT(0),
++		.hw.init = &(const struct clk_init_data) {
++			.name = "a53pll",
++			.parent_data = &(const struct clk_parent_data) {
++				.fw_name = "xo",
++			},
++			.num_parents = 1,
++			.ops = &clk_alpha_pll_stromer_ops,
++		},
++	},
++};
++
+ static struct clk_alpha_pll ipq_pll_stromer_plus = {
+ 	.offset = 0x0,
+ 	.regs = ipq_pll_offsets[CLK_ALPHA_PLL_TYPE_STROMER_PLUS],
+@@ -145,8 +168,8 @@ struct apss_pll_data {
+ };
+ 
+ static const struct apss_pll_data ipq5018_pll_data = {
+-	.pll_type = CLK_ALPHA_PLL_TYPE_STROMER_PLUS,
+-	.pll = &ipq_pll_stromer_plus,
++	.pll_type = CLK_ALPHA_PLL_TYPE_STROMER,
++	.pll = &ipq_pll_stromer,
+ 	.pll_config = &ipq5018_pll_config,
+ };
+ 
+@@ -204,7 +227,8 @@ static int apss_ipq_pll_probe(struct platform_device *pdev)
+ 
+ 	if (data->pll_type == CLK_ALPHA_PLL_TYPE_HUAYRA)
+ 		clk_alpha_pll_configure(data->pll, regmap, data->pll_config);
+-	else if (data->pll_type == CLK_ALPHA_PLL_TYPE_STROMER_PLUS)
++	else if (data->pll_type == CLK_ALPHA_PLL_TYPE_STROMER ||
++		 data->pll_type == CLK_ALPHA_PLL_TYPE_STROMER_PLUS)
+ 		clk_stromer_pll_configure(data->pll, regmap, data->pll_config);
+ 
+ 	ret = devm_clk_register_regmap(dev, &data->pll->clkr);
+diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
+index 734a73f322b3a..be18ff983d35c 100644
+--- a/drivers/clk/qcom/clk-alpha-pll.c
++++ b/drivers/clk/qcom/clk-alpha-pll.c
+@@ -2489,6 +2489,8 @@ static int clk_alpha_pll_stromer_set_rate(struct clk_hw *hw, unsigned long rate,
+ 	rate = alpha_pll_round_rate(rate, prate, &l, &a, ALPHA_REG_BITWIDTH);
+ 
+ 	regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
++
++	a <<= ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH;
+ 	regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
+ 	regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
+ 		     a >> ALPHA_BITWIDTH);
+diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
+index 28166df81cf8d..e263db0385ab7 100644
+--- a/drivers/cpufreq/amd-pstate.c
++++ b/drivers/cpufreq/amd-pstate.c
+@@ -705,7 +705,7 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
+ 	if (state)
+ 		policy->cpuinfo.max_freq = cpudata->max_freq;
+ 	else
+-		policy->cpuinfo.max_freq = cpudata->nominal_freq;
++		policy->cpuinfo.max_freq = cpudata->nominal_freq * 1000;
+ 
+ 	policy->max = policy->cpuinfo.max_freq;
+ 
+diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c
+index 9da2278bd5b7d..04260f61d0429 100644
+--- a/drivers/crypto/intel/qat/qat_common/adf_aer.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c
+@@ -130,8 +130,7 @@ static void adf_device_reset_worker(struct work_struct *work)
+ 	if (adf_dev_restart(accel_dev)) {
+ 		/* The device hanged and we can't restart it so stop here */
+ 		dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
+-		if (reset_data->mode == ADF_DEV_RESET_ASYNC ||
+-		    completion_done(&reset_data->compl))
++		if (reset_data->mode == ADF_DEV_RESET_ASYNC)
+ 			kfree(reset_data);
+ 		WARN(1, "QAT: device restart failed. Device is unusable\n");
+ 		return;
+@@ -147,16 +146,8 @@ static void adf_device_reset_worker(struct work_struct *work)
+ 	adf_dev_restarted_notify(accel_dev);
+ 	clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+ 
+-	/*
+-	 * The dev is back alive. Notify the caller if in sync mode
+-	 *
+-	 * If device restart will take a more time than expected,
+-	 * the schedule_reset() function can timeout and exit. This can be
+-	 * detected by calling the completion_done() function. In this case
+-	 * the reset_data structure needs to be freed here.
+-	 */
+-	if (reset_data->mode == ADF_DEV_RESET_ASYNC ||
+-	    completion_done(&reset_data->compl))
++	/* The dev is back alive. Notify the caller if in sync mode */
++	if (reset_data->mode == ADF_DEV_RESET_ASYNC)
+ 		kfree(reset_data);
+ 	else
+ 		complete(&reset_data->compl);
+@@ -191,10 +182,10 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
+ 		if (!timeout) {
+ 			dev_err(&GET_DEV(accel_dev),
+ 				"Reset device timeout expired\n");
++			cancel_work_sync(&reset_data->reset_work);
+ 			ret = -EFAULT;
+-		} else {
+-			kfree(reset_data);
+ 		}
++		kfree(reset_data);
+ 		return ret;
+ 	}
+ 	return 0;
+diff --git a/drivers/crypto/starfive/jh7110-rsa.c b/drivers/crypto/starfive/jh7110-rsa.c
+index cf8bda7f0855d..7ec14b5b84905 100644
+--- a/drivers/crypto/starfive/jh7110-rsa.c
++++ b/drivers/crypto/starfive/jh7110-rsa.c
+@@ -273,7 +273,6 @@ static int starfive_rsa_enc_core(struct starfive_cryp_ctx *ctx, int enc)
+ 
+ err_rsa_crypt:
+ 	writel(STARFIVE_RSA_RESET, cryp->base + STARFIVE_PKA_CACR_OFFSET);
+-	kfree(rctx->rsa_data);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
+index 1f3520d768613..a17f3c0cdfa60 100644
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -81,7 +81,7 @@ int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
+ 		amd64_warn("%s: error reading F%dx%03x.\n",
+ 			   func, PCI_FUNC(pdev->devfn), offset);
+ 
+-	return err;
++	return pcibios_err_to_errno(err);
+ }
+ 
+ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
+@@ -94,7 +94,7 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
+ 		amd64_warn("%s: error writing to F%dx%03x.\n",
+ 			   func, PCI_FUNC(pdev->devfn), offset);
+ 
+-	return err;
++	return pcibios_err_to_errno(err);
+ }
+ 
+ /*
+@@ -1025,8 +1025,10 @@ static int gpu_get_node_map(struct amd64_pvt *pvt)
+ 	}
+ 
+ 	ret = pci_read_config_dword(pdev, REG_LOCAL_NODE_TYPE_MAP, &tmp);
+-	if (ret)
++	if (ret) {
++		ret = pcibios_err_to_errno(ret);
+ 		goto out;
++	}
+ 
+ 	gpu_node_map.node_count = FIELD_GET(LNTM_NODE_COUNT, tmp);
+ 	gpu_node_map.base_node_id = FIELD_GET(LNTM_BASE_NODE_ID, tmp);
+diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c
+index cdd8480e73687..dbe9fe5f2ca6c 100644
+--- a/drivers/edac/igen6_edac.c
++++ b/drivers/edac/igen6_edac.c
+@@ -800,7 +800,7 @@ static int errcmd_enable_error_reporting(bool enable)
+ 
+ 	rc = pci_read_config_word(imc->pdev, ERRCMD_OFFSET, &errcmd);
+ 	if (rc)
+-		return rc;
++		return pcibios_err_to_errno(rc);
+ 
+ 	if (enable)
+ 		errcmd |= ERRCMD_CE | ERRSTS_UE;
+@@ -809,7 +809,7 @@ static int errcmd_enable_error_reporting(bool enable)
+ 
+ 	rc = pci_write_config_word(imc->pdev, ERRCMD_OFFSET, errcmd);
+ 	if (rc)
+-		return rc;
++		return pcibios_err_to_errno(rc);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/firmware/efi/libstub/loongarch.c b/drivers/firmware/efi/libstub/loongarch.c
+index 684c9354637c6..d0ef93551c44f 100644
+--- a/drivers/firmware/efi/libstub/loongarch.c
++++ b/drivers/firmware/efi/libstub/loongarch.c
+@@ -41,7 +41,7 @@ static efi_status_t exit_boot_func(struct efi_boot_memmap *map, void *priv)
+ unsigned long __weak kernel_entry_address(unsigned long kernel_addr,
+ 		efi_loaded_image_t *image)
+ {
+-	return *(unsigned long *)(kernel_addr + 8) - VMLINUX_LOAD_ADDRESS + kernel_addr;
++	return *(unsigned long *)(kernel_addr + 8) - PHYSADDR(VMLINUX_LOAD_ADDRESS) + kernel_addr;
+ }
+ 
+ efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image,
+diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
+index 29c24578ad2bf..2ad85052b3f60 100644
+--- a/drivers/firmware/qcom/qcom_scm.c
++++ b/drivers/firmware/qcom/qcom_scm.c
+@@ -569,13 +569,14 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
+ 
+ 	ret = qcom_scm_bw_enable();
+ 	if (ret)
+-		return ret;
++		goto disable_clk;
+ 
+ 	desc.args[1] = mdata_phys;
+ 
+ 	ret = qcom_scm_call(__scm->dev, &desc, &res);
+-
+ 	qcom_scm_bw_disable();
++
++disable_clk:
+ 	qcom_scm_clk_disable();
+ 
+ out:
+@@ -637,10 +638,12 @@ int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
+ 
+ 	ret = qcom_scm_bw_enable();
+ 	if (ret)
+-		return ret;
++		goto disable_clk;
+ 
+ 	ret = qcom_scm_call(__scm->dev, &desc, &res);
+ 	qcom_scm_bw_disable();
++
++disable_clk:
+ 	qcom_scm_clk_disable();
+ 
+ 	return ret ? : res.result[0];
+@@ -672,10 +675,12 @@ int qcom_scm_pas_auth_and_reset(u32 peripheral)
+ 
+ 	ret = qcom_scm_bw_enable();
+ 	if (ret)
+-		return ret;
++		goto disable_clk;
+ 
+ 	ret = qcom_scm_call(__scm->dev, &desc, &res);
+ 	qcom_scm_bw_disable();
++
++disable_clk:
+ 	qcom_scm_clk_disable();
+ 
+ 	return ret ? : res.result[0];
+@@ -706,11 +711,12 @@ int qcom_scm_pas_shutdown(u32 peripheral)
+ 
+ 	ret = qcom_scm_bw_enable();
+ 	if (ret)
+-		return ret;
++		goto disable_clk;
+ 
+ 	ret = qcom_scm_call(__scm->dev, &desc, &res);
+-
+ 	qcom_scm_bw_disable();
++
++disable_clk:
+ 	qcom_scm_clk_disable();
+ 
+ 	return ret ? : res.result[0];
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index e4d4e55c08ad5..0535b07987d9d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -1188,7 +1188,8 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
+ 	int ret;
+ 
+ 	ctx->sync = &mem->sync;
+-	drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
++	drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
++		      DRM_EXEC_IGNORE_DUPLICATES, 0);
+ 	drm_exec_until_all_locked(&ctx->exec) {
+ 		ctx->n_vms = 0;
+ 		list_for_each_entry(entry, &mem->attachments, list) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+index 6857c586ded71..c8c23dbb90916 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+@@ -211,6 +211,7 @@ union igp_info {
+ 	struct atom_integrated_system_info_v1_11 v11;
+ 	struct atom_integrated_system_info_v1_12 v12;
+ 	struct atom_integrated_system_info_v2_1 v21;
++	struct atom_integrated_system_info_v2_3 v23;
+ };
+ 
+ union umc_info {
+@@ -359,6 +360,20 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
+ 					if (vram_type)
+ 						*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ 					break;
++				case 3:
++					mem_channel_number = igp_info->v23.umachannelnumber;
++					if (!mem_channel_number)
++						mem_channel_number = 1;
++					mem_type = igp_info->v23.memorytype;
++					if (mem_type == LpDdr5MemType)
++						mem_channel_width = 32;
++					else
++						mem_channel_width = 64;
++					if (vram_width)
++						*vram_width = mem_channel_number * mem_channel_width;
++					if (vram_type)
++						*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
++					break;
+ 				default:
+ 					return -EINVAL;
+ 				}
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index 43775cb67ff5f..8f231766756a2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -2021,6 +2021,9 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
+ 
+ 	DRM_DEBUG("IH: SDMA trap\n");
+ 	instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
++	if (instance < 0)
++		return instance;
++
+ 	switch (entry->ring_id) {
+ 	case 0:
+ 		amdgpu_fence_process(&adev->sdma.instance[instance].ring);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index 719d6d365e150..ff01610fbce3b 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -408,15 +408,8 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
+ 			f2g = &gfx_v11_kfd2kgd;
+ 			break;
+ 		case IP_VERSION(11, 0, 3):
+-			if ((adev->pdev->device == 0x7460 &&
+-			     adev->pdev->revision == 0x00) ||
+-			    (adev->pdev->device == 0x7461 &&
+-			     adev->pdev->revision == 0x00))
+-				/* Note: Compiler version is 11.0.5 while HW version is 11.0.3 */
+-				gfx_target_version = 110005;
+-			else
+-				/* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
+-				gfx_target_version = 110001;
++			/* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
++			gfx_target_version = 110001;
+ 			f2g = &gfx_v11_kfd2kgd;
+ 			break;
+ 		case IP_VERSION(11, 5, 0):
+diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
+index af3eebb4c9bcb..1acb2d2c5597b 100644
+--- a/drivers/gpu/drm/amd/include/atomfirmware.h
++++ b/drivers/gpu/drm/amd/include/atomfirmware.h
+@@ -1657,6 +1657,49 @@ struct atom_integrated_system_info_v2_2
+ 	uint32_t  reserved4[189];
+ };
+ 
++struct uma_carveout_option {
++  char       optionName[29];        //max length of string is 28chars + '\0'. Current design is for "minimum", "Medium", "High". This makes entire struct size 64bits
++  uint8_t    memoryCarvedGb;        //memory carved out with setting
++  uint8_t    memoryRemainingGb;     //memory remaining on system
++  union {
++    struct _flags {
++      uint8_t Auto     : 1;
++      uint8_t Custom   : 1;
++      uint8_t Reserved : 6;
++    } flags;
++    uint8_t all8;
++  } uma_carveout_option_flags;
++};
++
++struct atom_integrated_system_info_v2_3 {
++  struct  atom_common_table_header table_header;
++  uint32_t  vbios_misc; // enum of atom_system_vbiosmisc_def
++  uint32_t  gpucapinfo; // enum of atom_system_gpucapinf_def
++  uint32_t  system_config;
++  uint32_t  cpucapinfo;
++  uint16_t  gpuclk_ss_percentage; // unit of 0.001%,   1000 mean 1%
++  uint16_t  gpuclk_ss_type;
++  uint16_t  dpphy_override;  // bit vector, enum of atom_sysinfo_dpphy_override_def
++  uint8_t memorytype;       // enum of atom_dmi_t17_mem_type_def, APU memory type indication.
++  uint8_t umachannelnumber; // number of memory channels
++  uint8_t htc_hyst_limit;
++  uint8_t htc_tmp_limit;
++  uint8_t reserved1; // dp_ss_control
++  uint8_t gpu_package_id;
++  struct  edp_info_table  edp1_info;
++  struct  edp_info_table  edp2_info;
++  uint32_t  reserved2[8];
++  struct  atom_external_display_connection_info extdispconninfo;
++  uint8_t UMACarveoutVersion;
++  uint8_t UMACarveoutIndexMax;
++  uint8_t UMACarveoutTypeDefault;
++  uint8_t UMACarveoutIndexDefault;
++  uint8_t UMACarveoutType;           //Auto or Custom
++  uint8_t UMACarveoutIndex;
++  struct  uma_carveout_option UMASizeControlOption[20];
++  uint8_t reserved3[110];
++};
++
+ // system_config
+ enum atom_system_vbiosmisc_def{
+   INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT = 0x01,
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+index 4abfcd32747d3..2fb6c9cb0f143 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+@@ -226,15 +226,17 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
+ 	struct amdgpu_device *adev = smu->adev;
+ 	int ret = 0;
+ 
+-	if (!en && adev->in_s4) {
+-		/* Adds a GFX reset as workaround just before sending the
+-		 * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
+-		 * an invalid state.
+-		 */
+-		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
+-						      SMU_RESET_MODE_2, NULL);
+-		if (ret)
+-			return ret;
++	if (!en && !adev->in_s0ix) {
++		if (adev->in_s4) {
++			/* Adds a GFX reset as workaround just before sending the
++			 * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
++			 * an invalid state.
++			 */
++			ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
++							      SMU_RESET_MODE_2, NULL);
++			if (ret)
++				return ret;
++		}
+ 
+ 		ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
+ 	}
+diff --git a/drivers/gpu/drm/drm_fbdev_generic.c b/drivers/gpu/drm/drm_fbdev_generic.c
+index d647d89764cb9..b4659cd6285ab 100644
+--- a/drivers/gpu/drm/drm_fbdev_generic.c
++++ b/drivers/gpu/drm/drm_fbdev_generic.c
+@@ -113,7 +113,6 @@ static int drm_fbdev_generic_helper_fb_probe(struct drm_fb_helper *fb_helper,
+ 	/* screen */
+ 	info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
+ 	info->screen_buffer = screen_buffer;
+-	info->fix.smem_start = page_to_phys(vmalloc_to_page(info->screen_buffer));
+ 	info->fix.smem_len = screen_size;
+ 
+ 	/* deferred I/O */
+diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c
+index b758fd110c204..c0662a022f59c 100644
+--- a/drivers/gpu/drm/i915/i915_hwmon.c
++++ b/drivers/gpu/drm/i915/i915_hwmon.c
+@@ -793,7 +793,7 @@ void i915_hwmon_register(struct drm_i915_private *i915)
+ 	if (!IS_DGFX(i915))
+ 		return;
+ 
+-	hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
++	hwmon = kzalloc(sizeof(*hwmon), GFP_KERNEL);
+ 	if (!hwmon)
+ 		return;
+ 
+@@ -819,14 +819,12 @@ void i915_hwmon_register(struct drm_i915_private *i915)
+ 	hwm_get_preregistration_info(i915);
+ 
+ 	/*  hwmon_dev points to device hwmon<i> */
+-	hwmon_dev = devm_hwmon_device_register_with_info(dev, ddat->name,
+-							 ddat,
+-							 &hwm_chip_info,
+-							 hwm_groups);
+-	if (IS_ERR(hwmon_dev)) {
+-		i915->hwmon = NULL;
+-		return;
+-	}
++	hwmon_dev = hwmon_device_register_with_info(dev, ddat->name,
++						    ddat,
++						    &hwm_chip_info,
++						    hwm_groups);
++	if (IS_ERR(hwmon_dev))
++		goto err;
+ 
+ 	ddat->hwmon_dev = hwmon_dev;
+ 
+@@ -839,16 +837,36 @@ void i915_hwmon_register(struct drm_i915_private *i915)
+ 		if (!hwm_gt_is_visible(ddat_gt, hwmon_energy, hwmon_energy_input, 0))
+ 			continue;
+ 
+-		hwmon_dev = devm_hwmon_device_register_with_info(dev, ddat_gt->name,
+-								 ddat_gt,
+-								 &hwm_gt_chip_info,
+-								 NULL);
++		hwmon_dev = hwmon_device_register_with_info(dev, ddat_gt->name,
++							    ddat_gt,
++							    &hwm_gt_chip_info,
++							    NULL);
+ 		if (!IS_ERR(hwmon_dev))
+ 			ddat_gt->hwmon_dev = hwmon_dev;
+ 	}
++	return;
++err:
++	i915_hwmon_unregister(i915);
+ }
+ 
+ void i915_hwmon_unregister(struct drm_i915_private *i915)
+ {
+-	fetch_and_zero(&i915->hwmon);
++	struct i915_hwmon *hwmon = i915->hwmon;
++	struct intel_gt *gt;
++	int i;
++
++	if (!hwmon)
++		return;
++
++	for_each_gt(gt, i915, i)
++		if (hwmon->ddat_gt[i].hwmon_dev)
++			hwmon_device_unregister(hwmon->ddat_gt[i].hwmon_dev);
++
++	if (hwmon->ddat.hwmon_dev)
++		hwmon_device_unregister(hwmon->ddat.hwmon_dev);
++
++	mutex_destroy(&hwmon->hwmon_lock);
++
++	kfree(i915->hwmon);
++	i915->hwmon = NULL;
+ }
+diff --git a/drivers/gpu/drm/xe/xe_bb.c b/drivers/gpu/drm/xe/xe_bb.c
+index 7c124475c4289..a35e0781b7b95 100644
+--- a/drivers/gpu/drm/xe/xe_bb.c
++++ b/drivers/gpu/drm/xe/xe_bb.c
+@@ -96,7 +96,8 @@ struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q,
+ {
+ 	u64 addr = xe_sa_bo_gpu_addr(bb->bo);
+ 
+-	xe_gt_assert(q->gt, !(q->vm && q->vm->flags & XE_VM_FLAG_MIGRATION));
++	xe_gt_assert(q->gt, !xe_sched_job_is_migration(q));
++	xe_gt_assert(q->gt, q->width == 1);
+ 	return __xe_bb_create_job(q, bb, &addr);
+ }
+ 
+diff --git a/drivers/hid/i2c-hid/i2c-hid-of-elan.c b/drivers/hid/i2c-hid/i2c-hid-of-elan.c
+index 5b91fb106cfc3..091e37933225a 100644
+--- a/drivers/hid/i2c-hid/i2c-hid-of-elan.c
++++ b/drivers/hid/i2c-hid/i2c-hid-of-elan.c
+@@ -31,6 +31,7 @@ struct i2c_hid_of_elan {
+ 	struct regulator *vcc33;
+ 	struct regulator *vccio;
+ 	struct gpio_desc *reset_gpio;
++	bool no_reset_on_power_off;
+ 	const struct elan_i2c_hid_chip_data *chip_data;
+ };
+ 
+@@ -40,17 +41,17 @@ static int elan_i2c_hid_power_up(struct i2chid_ops *ops)
+ 		container_of(ops, struct i2c_hid_of_elan, ops);
+ 	int ret;
+ 
++	gpiod_set_value_cansleep(ihid_elan->reset_gpio, 1);
++
+ 	if (ihid_elan->vcc33) {
+ 		ret = regulator_enable(ihid_elan->vcc33);
+ 		if (ret)
+-			return ret;
++			goto err_deassert_reset;
+ 	}
+ 
+ 	ret = regulator_enable(ihid_elan->vccio);
+-	if (ret) {
+-		regulator_disable(ihid_elan->vcc33);
+-		return ret;
+-	}
++	if (ret)
++		goto err_disable_vcc33;
+ 
+ 	if (ihid_elan->chip_data->post_power_delay_ms)
+ 		msleep(ihid_elan->chip_data->post_power_delay_ms);
+@@ -60,6 +61,15 @@ static int elan_i2c_hid_power_up(struct i2chid_ops *ops)
+ 		msleep(ihid_elan->chip_data->post_gpio_reset_on_delay_ms);
+ 
+ 	return 0;
++
++err_disable_vcc33:
++	if (ihid_elan->vcc33)
++		regulator_disable(ihid_elan->vcc33);
++err_deassert_reset:
++	if (ihid_elan->no_reset_on_power_off)
++		gpiod_set_value_cansleep(ihid_elan->reset_gpio, 0);
++
++	return ret;
+ }
+ 
+ static void elan_i2c_hid_power_down(struct i2chid_ops *ops)
+@@ -67,7 +77,14 @@ static void elan_i2c_hid_power_down(struct i2chid_ops *ops)
+ 	struct i2c_hid_of_elan *ihid_elan =
+ 		container_of(ops, struct i2c_hid_of_elan, ops);
+ 
+-	gpiod_set_value_cansleep(ihid_elan->reset_gpio, 1);
++	/*
++	 * Do not assert reset when the hardware allows for it to remain
++	 * deasserted regardless of the state of the (shared) power supply to
++	 * avoid wasting power when the supply is left on.
++	 */
++	if (!ihid_elan->no_reset_on_power_off)
++		gpiod_set_value_cansleep(ihid_elan->reset_gpio, 1);
++
+ 	if (ihid_elan->chip_data->post_gpio_reset_off_delay_ms)
+ 		msleep(ihid_elan->chip_data->post_gpio_reset_off_delay_ms);
+ 
+@@ -79,6 +96,7 @@ static void elan_i2c_hid_power_down(struct i2chid_ops *ops)
+ static int i2c_hid_of_elan_probe(struct i2c_client *client)
+ {
+ 	struct i2c_hid_of_elan *ihid_elan;
++	int ret;
+ 
+ 	ihid_elan = devm_kzalloc(&client->dev, sizeof(*ihid_elan), GFP_KERNEL);
+ 	if (!ihid_elan)
+@@ -93,21 +111,38 @@ static int i2c_hid_of_elan_probe(struct i2c_client *client)
+ 	if (IS_ERR(ihid_elan->reset_gpio))
+ 		return PTR_ERR(ihid_elan->reset_gpio);
+ 
++	ihid_elan->no_reset_on_power_off = of_property_read_bool(client->dev.of_node,
++						"no-reset-on-power-off");
++
+ 	ihid_elan->vccio = devm_regulator_get(&client->dev, "vccio");
+-	if (IS_ERR(ihid_elan->vccio))
+-		return PTR_ERR(ihid_elan->vccio);
++	if (IS_ERR(ihid_elan->vccio)) {
++		ret = PTR_ERR(ihid_elan->vccio);
++		goto err_deassert_reset;
++	}
+ 
+ 	ihid_elan->chip_data = device_get_match_data(&client->dev);
+ 
+ 	if (ihid_elan->chip_data->main_supply_name) {
+ 		ihid_elan->vcc33 = devm_regulator_get(&client->dev,
+ 						      ihid_elan->chip_data->main_supply_name);
+-		if (IS_ERR(ihid_elan->vcc33))
+-			return PTR_ERR(ihid_elan->vcc33);
++		if (IS_ERR(ihid_elan->vcc33)) {
++			ret = PTR_ERR(ihid_elan->vcc33);
++			goto err_deassert_reset;
++		}
+ 	}
+ 
+-	return i2c_hid_core_probe(client, &ihid_elan->ops,
+-				  ihid_elan->chip_data->hid_descriptor_address, 0);
++	ret = i2c_hid_core_probe(client, &ihid_elan->ops,
++				 ihid_elan->chip_data->hid_descriptor_address, 0);
++	if (ret)
++		goto err_deassert_reset;
++
++	return 0;
++
++err_deassert_reset:
++	if (ihid_elan->no_reset_on_power_off)
++		gpiod_set_value_cansleep(ihid_elan->reset_gpio, 0);
++
++	return ret;
+ }
+ 
+ static const struct elan_i2c_hid_chip_data elan_ekth6915_chip_data = {
+diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
+index 001799bc28ed6..b8548105cd67a 100644
+--- a/drivers/hwmon/ltc2992.c
++++ b/drivers/hwmon/ltc2992.c
+@@ -876,9 +876,11 @@ static int ltc2992_parse_dt(struct ltc2992_state *st)
+ 
+ 		ret = fwnode_property_read_u32(child, "shunt-resistor-micro-ohms", &val);
+ 		if (!ret) {
+-			if (!val)
++			if (!val) {
++				fwnode_handle_put(child);
+ 				return dev_err_probe(&st->client->dev, -EINVAL,
+ 						     "shunt resistor value cannot be zero\n");
++			}
+ 			st->r_sense_uohm[addr] = val;
+ 		}
+ 	}
+diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
+index 147d338c191e7..648893f9e4b67 100644
+--- a/drivers/hwtracing/intel_th/pci.c
++++ b/drivers/hwtracing/intel_th/pci.c
+@@ -289,6 +289,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
+ 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7e24),
+ 		.driver_data = (kernel_ulong_t)&intel_th_2x,
+ 	},
++	{
++		/* Meteor Lake-S CPU */
++		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xae24),
++		.driver_data = (kernel_ulong_t)&intel_th_2x,
++	},
+ 	{
+ 		/* Raptor Lake-S */
+ 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7a26),
+diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
+index d6037a3286690..14ae0cfc325ef 100644
+--- a/drivers/i2c/i2c-core-acpi.c
++++ b/drivers/i2c/i2c-core-acpi.c
+@@ -445,6 +445,11 @@ static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
+ 	return i2c_find_device_by_fwnode(acpi_fwnode_handle(adev));
+ }
+ 
++static struct i2c_adapter *i2c_acpi_find_adapter_by_adev(struct acpi_device *adev)
++{
++	return i2c_find_adapter_by_fwnode(acpi_fwnode_handle(adev));
++}
++
+ static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
+ 			   void *arg)
+ {
+@@ -471,11 +476,17 @@ static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
+ 			break;
+ 
+ 		client = i2c_acpi_find_client_by_adev(adev);
+-		if (!client)
+-			break;
++		if (client) {
++			i2c_unregister_device(client);
++			put_device(&client->dev);
++		}
++
++		adapter = i2c_acpi_find_adapter_by_adev(adev);
++		if (adapter) {
++			acpi_unbind_one(&adapter->dev);
++			put_device(&adapter->dev);
++		}
+ 
+-		i2c_unregister_device(client);
+-		put_device(&client->dev);
+ 		break;
+ 	}
+ 
+diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
+index a2298ab460a37..bb299ce02cccb 100644
+--- a/drivers/i3c/master/svc-i3c-master.c
++++ b/drivers/i3c/master/svc-i3c-master.c
+@@ -415,6 +415,19 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
+ 	int ret;
+ 
+ 	mutex_lock(&master->lock);
++	/*
++	 * IBIWON may be set before SVC_I3C_MCTRL_REQUEST_AUTO_IBI, causing
++	 * readl_relaxed_poll_timeout() to return immediately. Consequently,
++	 * ibitype will be 0 since it was last updated only after the 8th SCL
++	 * cycle, leading to missed client IBI handlers.
++	 *
++	 * A typical scenario is when IBIWON occurs and bus arbitration is lost
++	 * at svc_i3c_master_priv_xfers().
++	 *
++	 * Clear SVC_I3C_MINT_IBIWON before sending SVC_I3C_MCTRL_REQUEST_AUTO_IBI.
++	 */
++	writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
++
+ 	/* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
+ 	writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
+ 	       SVC_I3C_MCTRL_IBIRESP_AUTO,
+@@ -429,9 +442,6 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
+ 		goto reenable_ibis;
+ 	}
+ 
+-	/* Clear the interrupt status */
+-	writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
+-
+ 	status = readl(master->regs + SVC_I3C_MSTATUS);
+ 	ibitype = SVC_I3C_MSTATUS_IBITYPE(status);
+ 	ibiaddr = SVC_I3C_MSTATUS_IBIADDR(status);
+diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
+index 9e71c44288141..4f3a12383a1e4 100644
+--- a/drivers/irqchip/irq-riscv-intc.c
++++ b/drivers/irqchip/irq-riscv-intc.c
+@@ -253,8 +253,9 @@ IRQCHIP_DECLARE(andes, "andestech,cpu-intc", riscv_intc_init);
+ static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header,
+ 				       const unsigned long end)
+ {
+-	struct fwnode_handle *fn;
+ 	struct acpi_madt_rintc *rintc;
++	struct fwnode_handle *fn;
++	int rc;
+ 
+ 	rintc = (struct acpi_madt_rintc *)header;
+ 
+@@ -273,7 +274,11 @@ static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header,
+ 		return -ENOMEM;
+ 	}
+ 
+-	return riscv_intc_init_common(fn, &riscv_intc_chip);
++	rc = riscv_intc_init_common(fn, &riscv_intc_chip);
++	if (rc)
++		irq_domain_free_fwnode(fn);
++
++	return rc;
+ }
+ 
+ IRQCHIP_ACPI_DECLARE(riscv_intc, ACPI_MADT_TYPE_RINTC, NULL,
+diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
+index 2bba4d6aaaa28..463eb13bd0b2a 100644
+--- a/drivers/md/bcache/bset.c
++++ b/drivers/md/bcache/bset.c
+@@ -54,7 +54,7 @@ void bch_dump_bucket(struct btree_keys *b)
+ int __bch_count_data(struct btree_keys *b)
+ {
+ 	unsigned int ret = 0;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	struct bkey *k;
+ 
+ 	if (b->ops->is_extents)
+@@ -67,7 +67,7 @@ void __bch_check_keys(struct btree_keys *b, const char *fmt, ...)
+ {
+ 	va_list args;
+ 	struct bkey *k, *p = NULL;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	const char *err;
+ 
+ 	for_each_key(b, k, &iter) {
+@@ -879,7 +879,7 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
+ 	unsigned int status = BTREE_INSERT_STATUS_NO_INSERT;
+ 	struct bset *i = bset_tree_last(b)->data;
+ 	struct bkey *m, *prev = NULL;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	struct bkey preceding_key_on_stack = ZERO_KEY;
+ 	struct bkey *preceding_key_p = &preceding_key_on_stack;
+ 
+@@ -895,9 +895,9 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
+ 	else
+ 		preceding_key(k, &preceding_key_p);
+ 
+-	m = bch_btree_iter_init(b, &iter, preceding_key_p);
++	m = bch_btree_iter_stack_init(b, &iter, preceding_key_p);
+ 
+-	if (b->ops->insert_fixup(b, k, &iter, replace_key))
++	if (b->ops->insert_fixup(b, k, &iter.iter, replace_key))
+ 		return status;
+ 
+ 	status = BTREE_INSERT_STATUS_INSERT;
+@@ -1100,33 +1100,33 @@ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
+ 				 btree_iter_cmp));
+ }
+ 
+-static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
+-					  struct btree_iter *iter,
+-					  struct bkey *search,
+-					  struct bset_tree *start)
++static struct bkey *__bch_btree_iter_stack_init(struct btree_keys *b,
++						struct btree_iter_stack *iter,
++						struct bkey *search,
++						struct bset_tree *start)
+ {
+ 	struct bkey *ret = NULL;
+ 
+-	iter->size = ARRAY_SIZE(iter->data);
+-	iter->used = 0;
++	iter->iter.size = ARRAY_SIZE(iter->stack_data);
++	iter->iter.used = 0;
+ 
+ #ifdef CONFIG_BCACHE_DEBUG
+-	iter->b = b;
++	iter->iter.b = b;
+ #endif
+ 
+ 	for (; start <= bset_tree_last(b); start++) {
+ 		ret = bch_bset_search(b, start, search);
+-		bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
++		bch_btree_iter_push(&iter->iter, ret, bset_bkey_last(start->data));
+ 	}
+ 
+ 	return ret;
+ }
+ 
+-struct bkey *bch_btree_iter_init(struct btree_keys *b,
+-				 struct btree_iter *iter,
++struct bkey *bch_btree_iter_stack_init(struct btree_keys *b,
++				 struct btree_iter_stack *iter,
+ 				 struct bkey *search)
+ {
+-	return __bch_btree_iter_init(b, iter, search, b->set);
++	return __bch_btree_iter_stack_init(b, iter, search, b->set);
+ }
+ 
+ static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
+@@ -1293,10 +1293,10 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
+ 			    struct bset_sort_state *state)
+ {
+ 	size_t order = b->page_order, keys = 0;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	int oldsize = bch_count_data(b);
+ 
+-	__bch_btree_iter_init(b, &iter, NULL, &b->set[start]);
++	__bch_btree_iter_stack_init(b, &iter, NULL, &b->set[start]);
+ 
+ 	if (start) {
+ 		unsigned int i;
+@@ -1307,7 +1307,7 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
+ 		order = get_order(__set_bytes(b->set->data, keys));
+ 	}
+ 
+-	__btree_sort(b, &iter, start, order, false, state);
++	__btree_sort(b, &iter.iter, start, order, false, state);
+ 
+ 	EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);
+ }
+@@ -1323,11 +1323,11 @@ void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
+ 			 struct bset_sort_state *state)
+ {
+ 	uint64_t start_time = local_clock();
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 
+-	bch_btree_iter_init(b, &iter, NULL);
++	bch_btree_iter_stack_init(b, &iter, NULL);
+ 
+-	btree_mergesort(b, new->set->data, &iter, false, true);
++	btree_mergesort(b, new->set->data, &iter.iter, false, true);
+ 
+ 	bch_time_stats_update(&state->time, start_time);
+ 
+diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
+index d795c84246b01..011f6062c4c04 100644
+--- a/drivers/md/bcache/bset.h
++++ b/drivers/md/bcache/bset.h
+@@ -321,7 +321,14 @@ struct btree_iter {
+ #endif
+ 	struct btree_iter_set {
+ 		struct bkey *k, *end;
+-	} data[MAX_BSETS];
++	} data[];
++};
++
++/* Fixed-size btree_iter that can be allocated on the stack */
++
++struct btree_iter_stack {
++	struct btree_iter iter;
++	struct btree_iter_set stack_data[MAX_BSETS];
+ };
+ 
+ typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k);
+@@ -333,9 +340,9 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
+ 
+ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
+ 			 struct bkey *end);
+-struct bkey *bch_btree_iter_init(struct btree_keys *b,
+-				 struct btree_iter *iter,
+-				 struct bkey *search);
++struct bkey *bch_btree_iter_stack_init(struct btree_keys *b,
++				       struct btree_iter_stack *iter,
++				       struct bkey *search);
+ 
+ struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
+ 			       const struct bkey *search);
+@@ -350,13 +357,14 @@ static inline struct bkey *bch_bset_search(struct btree_keys *b,
+ 	return search ? __bch_bset_search(b, t, search) : t->data->start;
+ }
+ 
+-#define for_each_key_filter(b, k, iter, filter)				\
+-	for (bch_btree_iter_init((b), (iter), NULL);			\
+-	     ((k) = bch_btree_iter_next_filter((iter), (b), filter));)
++#define for_each_key_filter(b, k, stack_iter, filter)                      \
++	for (bch_btree_iter_stack_init((b), (stack_iter), NULL);           \
++	     ((k) = bch_btree_iter_next_filter(&((stack_iter)->iter), (b), \
++					       filter));)
+ 
+-#define for_each_key(b, k, iter)					\
+-	for (bch_btree_iter_init((b), (iter), NULL);			\
+-	     ((k) = bch_btree_iter_next(iter));)
++#define for_each_key(b, k, stack_iter)                           \
++	for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \
++	     ((k) = bch_btree_iter_next(&((stack_iter)->iter)));)
+ 
+ /* Sorting */
+ 
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 196cdacce38f2..d011a7154d330 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -1309,7 +1309,7 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
+ 	uint8_t stale = 0;
+ 	unsigned int keys = 0, good_keys = 0;
+ 	struct bkey *k;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	struct bset_tree *t;
+ 
+ 	gc->nodes++;
+@@ -1570,7 +1570,7 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
+ static unsigned int btree_gc_count_keys(struct btree *b)
+ {
+ 	struct bkey *k;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	unsigned int ret = 0;
+ 
+ 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
+@@ -1611,17 +1611,18 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
+ 	int ret = 0;
+ 	bool should_rewrite;
+ 	struct bkey *k;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	struct gc_merge_info r[GC_MERGE_NODES];
+ 	struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
+ 
+-	bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
++	bch_btree_iter_stack_init(&b->keys, &iter, &b->c->gc_done);
+ 
+ 	for (i = r; i < r + ARRAY_SIZE(r); i++)
+ 		i->b = ERR_PTR(-EINTR);
+ 
+ 	while (1) {
+-		k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
++		k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
++					       bch_ptr_bad);
+ 		if (k) {
+ 			r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
+ 						  true, b);
+@@ -1911,7 +1912,7 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
+ {
+ 	int ret = 0;
+ 	struct bkey *k, *p = NULL;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 
+ 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
+ 		bch_initial_mark_key(b->c, b->level, k);
+@@ -1919,10 +1920,10 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
+ 	bch_initial_mark_key(b->c, b->level + 1, &b->key);
+ 
+ 	if (b->level) {
+-		bch_btree_iter_init(&b->keys, &iter, NULL);
++		bch_btree_iter_stack_init(&b->keys, &iter, NULL);
+ 
+ 		do {
+-			k = bch_btree_iter_next_filter(&iter, &b->keys,
++			k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
+ 						       bch_ptr_bad);
+ 			if (k) {
+ 				btree_node_prefetch(b, k);
+@@ -1950,7 +1951,7 @@ static int bch_btree_check_thread(void *arg)
+ 	struct btree_check_info *info = arg;
+ 	struct btree_check_state *check_state = info->state;
+ 	struct cache_set *c = check_state->c;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	struct bkey *k, *p;
+ 	int cur_idx, prev_idx, skip_nr;
+ 
+@@ -1959,8 +1960,8 @@ static int bch_btree_check_thread(void *arg)
+ 	ret = 0;
+ 
+ 	/* root node keys are checked before thread created */
+-	bch_btree_iter_init(&c->root->keys, &iter, NULL);
+-	k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
++	bch_btree_iter_stack_init(&c->root->keys, &iter, NULL);
++	k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad);
+ 	BUG_ON(!k);
+ 
+ 	p = k;
+@@ -1978,7 +1979,7 @@ static int bch_btree_check_thread(void *arg)
+ 		skip_nr = cur_idx - prev_idx;
+ 
+ 		while (skip_nr) {
+-			k = bch_btree_iter_next_filter(&iter,
++			k = bch_btree_iter_next_filter(&iter.iter,
+ 						       &c->root->keys,
+ 						       bch_ptr_bad);
+ 			if (k)
+@@ -2051,7 +2052,7 @@ int bch_btree_check(struct cache_set *c)
+ 	int ret = 0;
+ 	int i;
+ 	struct bkey *k = NULL;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	struct btree_check_state check_state;
+ 
+ 	/* check and mark root node keys */
+@@ -2547,11 +2548,11 @@ static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
+ 
+ 	if (b->level) {
+ 		struct bkey *k;
+-		struct btree_iter iter;
++		struct btree_iter_stack iter;
+ 
+-		bch_btree_iter_init(&b->keys, &iter, from);
++		bch_btree_iter_stack_init(&b->keys, &iter, from);
+ 
+-		while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
++		while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
+ 						       bch_ptr_bad))) {
+ 			ret = bcache_btree(map_nodes_recurse, k, b,
+ 				    op, from, fn, flags);
+@@ -2580,11 +2581,12 @@ int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
+ {
+ 	int ret = MAP_CONTINUE;
+ 	struct bkey *k;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 
+-	bch_btree_iter_init(&b->keys, &iter, from);
++	bch_btree_iter_stack_init(&b->keys, &iter, from);
+ 
+-	while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
++	while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
++					       bch_ptr_bad))) {
+ 		ret = !b->level
+ 			? fn(op, b, k)
+ 			: bcache_btree(map_keys_recurse, k,
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 330bcd9ea4a9c..b0819be0486e6 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1914,8 +1914,9 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
+ 	INIT_LIST_HEAD(&c->btree_cache_freed);
+ 	INIT_LIST_HEAD(&c->data_buckets);
+ 
+-	iter_size = ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size + 1) *
+-		sizeof(struct btree_iter_set);
++	iter_size = sizeof(struct btree_iter) +
++		    ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size) *
++			    sizeof(struct btree_iter_set);
+ 
+ 	c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL);
+ 	if (!c->devices)
+diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
+index 6956beb55326f..826b14cae4e58 100644
+--- a/drivers/md/bcache/sysfs.c
++++ b/drivers/md/bcache/sysfs.c
+@@ -660,7 +660,7 @@ static unsigned int bch_root_usage(struct cache_set *c)
+ 	unsigned int bytes = 0;
+ 	struct bkey *k;
+ 	struct btree *b;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 
+ 	goto lock_root;
+ 
+diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
+index 8827a6f130ad7..792e070ccf38b 100644
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -908,15 +908,15 @@ static int bch_dirty_init_thread(void *arg)
+ 	struct dirty_init_thrd_info *info = arg;
+ 	struct bch_dirty_init_state *state = info->state;
+ 	struct cache_set *c = state->c;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	struct bkey *k, *p;
+ 	int cur_idx, prev_idx, skip_nr;
+ 
+ 	k = p = NULL;
+ 	prev_idx = 0;
+ 
+-	bch_btree_iter_init(&c->root->keys, &iter, NULL);
+-	k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
++	bch_btree_iter_stack_init(&c->root->keys, &iter, NULL);
++	k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad);
+ 	BUG_ON(!k);
+ 
+ 	p = k;
+@@ -930,7 +930,7 @@ static int bch_dirty_init_thread(void *arg)
+ 		skip_nr = cur_idx - prev_idx;
+ 
+ 		while (skip_nr) {
+-			k = bch_btree_iter_next_filter(&iter,
++			k = bch_btree_iter_next_filter(&iter.iter,
+ 						       &c->root->keys,
+ 						       bch_ptr_bad);
+ 			if (k)
+@@ -979,7 +979,7 @@ void bch_sectors_dirty_init(struct bcache_device *d)
+ 	int i;
+ 	struct btree *b = NULL;
+ 	struct bkey *k = NULL;
+-	struct btree_iter iter;
++	struct btree_iter_stack iter;
+ 	struct sectors_dirty_init op;
+ 	struct cache_set *c = d->c;
+ 	struct bch_dirty_init_state state;
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index d874abfc18364..2bd1ce9b39226 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -36,7 +36,6 @@
+  */
+ 
+ #include <linux/blkdev.h>
+-#include <linux/delay.h>
+ #include <linux/kthread.h>
+ #include <linux/raid/pq.h>
+ #include <linux/async_tx.h>
+@@ -6734,6 +6733,9 @@ static void raid5d(struct md_thread *thread)
+ 		int batch_size, released;
+ 		unsigned int offset;
+ 
++		if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
++			break;
++
+ 		released = release_stripe_list(conf, conf->temp_inactive_list);
+ 		if (released)
+ 			clear_bit(R5_DID_ALLOC, &conf->cache_state);
+@@ -6770,18 +6772,7 @@ static void raid5d(struct md_thread *thread)
+ 			spin_unlock_irq(&conf->device_lock);
+ 			md_check_recovery(mddev);
+ 			spin_lock_irq(&conf->device_lock);
+-
+-			/*
+-			 * Waiting on MD_SB_CHANGE_PENDING below may deadlock
+-			 * seeing md_check_recovery() is needed to clear
+-			 * the flag when using mdmon.
+-			 */
+-			continue;
+ 		}
+-
+-		wait_event_lock_irq(mddev->sb_wait,
+-			!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags),
+-			conf->device_lock);
+ 	}
+ 	pr_debug("%d stripes handled\n", handled);
+ 
+diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
+index 2638875924153..231b45632ad5a 100644
+--- a/drivers/media/dvb-frontends/lgdt3306a.c
++++ b/drivers/media/dvb-frontends/lgdt3306a.c
+@@ -2176,6 +2176,11 @@ static int lgdt3306a_probe(struct i2c_client *client)
+ 	struct dvb_frontend *fe;
+ 	int ret;
+ 
++	if (!client->dev.platform_data) {
++		dev_err(&client->dev, "platform data is mandatory\n");
++		return -EINVAL;
++	}
++
+ 	config = kmemdup(client->dev.platform_data,
+ 			 sizeof(struct lgdt3306a_config), GFP_KERNEL);
+ 	if (config == NULL) {
+diff --git a/drivers/media/dvb-frontends/mxl5xx.c b/drivers/media/dvb-frontends/mxl5xx.c
+index 4ebbcf05cc09e..91e9c378397c8 100644
+--- a/drivers/media/dvb-frontends/mxl5xx.c
++++ b/drivers/media/dvb-frontends/mxl5xx.c
+@@ -1381,57 +1381,57 @@ static int config_ts(struct mxl *state, enum MXL_HYDRA_DEMOD_ID_E demod_id,
+ 	u32 nco_count_min = 0;
+ 	u32 clk_type = 0;
+ 
+-	struct MXL_REG_FIELD_T xpt_sync_polarity[MXL_HYDRA_DEMOD_MAX] = {
++	static const struct MXL_REG_FIELD_T xpt_sync_polarity[MXL_HYDRA_DEMOD_MAX] = {
+ 		{0x90700010, 8, 1}, {0x90700010, 9, 1},
+ 		{0x90700010, 10, 1}, {0x90700010, 11, 1},
+ 		{0x90700010, 12, 1}, {0x90700010, 13, 1},
+ 		{0x90700010, 14, 1}, {0x90700010, 15, 1} };
+-	struct MXL_REG_FIELD_T xpt_clock_polarity[MXL_HYDRA_DEMOD_MAX] = {
++	static const struct MXL_REG_FIELD_T xpt_clock_polarity[MXL_HYDRA_DEMOD_MAX] = {
+ 		{0x90700010, 16, 1}, {0x90700010, 17, 1},
+ 		{0x90700010, 18, 1}, {0x90700010, 19, 1},
+ 		{0x90700010, 20, 1}, {0x90700010, 21, 1},
+ 		{0x90700010, 22, 1}, {0x90700010, 23, 1} };
+-	struct MXL_REG_FIELD_T xpt_valid_polarity[MXL_HYDRA_DEMOD_MAX] = {
++	static const struct MXL_REG_FIELD_T xpt_valid_polarity[MXL_HYDRA_DEMOD_MAX] = {
+ 		{0x90700014, 0, 1}, {0x90700014, 1, 1},
+ 		{0x90700014, 2, 1}, {0x90700014, 3, 1},
+ 		{0x90700014, 4, 1}, {0x90700014, 5, 1},
+ 		{0x90700014, 6, 1}, {0x90700014, 7, 1} };
+-	struct MXL_REG_FIELD_T xpt_ts_clock_phase[MXL_HYDRA_DEMOD_MAX] = {
++	static const struct MXL_REG_FIELD_T xpt_ts_clock_phase[MXL_HYDRA_DEMOD_MAX] = {
+ 		{0x90700018, 0, 3}, {0x90700018, 4, 3},
+ 		{0x90700018, 8, 3}, {0x90700018, 12, 3},
+ 		{0x90700018, 16, 3}, {0x90700018, 20, 3},
+ 		{0x90700018, 24, 3}, {0x90700018, 28, 3} };
+-	struct MXL_REG_FIELD_T xpt_lsb_first[MXL_HYDRA_DEMOD_MAX] = {
++	static const struct MXL_REG_FIELD_T xpt_lsb_first[MXL_HYDRA_DEMOD_MAX] = {
+ 		{0x9070000C, 16, 1}, {0x9070000C, 17, 1},
+ 		{0x9070000C, 18, 1}, {0x9070000C, 19, 1},
+ 		{0x9070000C, 20, 1}, {0x9070000C, 21, 1},
+ 		{0x9070000C, 22, 1}, {0x9070000C, 23, 1} };
+-	struct MXL_REG_FIELD_T xpt_sync_byte[MXL_HYDRA_DEMOD_MAX] = {
++	static const struct MXL_REG_FIELD_T xpt_sync_byte[MXL_HYDRA_DEMOD_MAX] = {
+ 		{0x90700010, 0, 1}, {0x90700010, 1, 1},
+ 		{0x90700010, 2, 1}, {0x90700010, 3, 1},
+ 		{0x90700010, 4, 1}, {0x90700010, 5, 1},
+ 		{0x90700010, 6, 1}, {0x90700010, 7, 1} };
+-	struct MXL_REG_FIELD_T xpt_enable_output[MXL_HYDRA_DEMOD_MAX] = {
++	static const struct MXL_REG_FIELD_T xpt_enable_output[MXL_HYDRA_DEMOD_MAX] = {
+ 		{0x9070000C, 0, 1}, {0x9070000C, 1, 1},
+ 		{0x9070000C, 2, 1}, {0x9070000C, 3, 1},
+ 		{0x9070000C, 4, 1}, {0x9070000C, 5, 1},
+ 		{0x9070000C, 6, 1}, {0x9070000C, 7, 1} };
+-	struct MXL_REG_FIELD_T xpt_err_replace_sync[MXL_HYDRA_DEMOD_MAX] = {
++	static const struct MXL_REG_FIELD_T xpt_err_replace_sync[MXL_HYDRA_DEMOD_MAX] = {
+ 		{0x9070000C, 24, 1}, {0x9070000C, 25, 1},
+ 		{0x9070000C, 26, 1}, {0x9070000C, 27, 1},
+ 		{0x9070000C, 28, 1}, {0x9070000C, 29, 1},
+ 		{0x9070000C, 30, 1}, {0x9070000C, 31, 1} };
+-	struct MXL_REG_FIELD_T xpt_err_replace_valid[MXL_HYDRA_DEMOD_MAX] = {
++	static const struct MXL_REG_FIELD_T xpt_err_replace_valid[MXL_HYDRA_DEMOD_MAX] = {
+ 		{0x90700014, 8, 1}, {0x90700014, 9, 1},
+ 		{0x90700014, 10, 1}, {0x90700014, 11, 1},
+ 		{0x90700014, 12, 1}, {0x90700014, 13, 1},
+ 		{0x90700014, 14, 1}, {0x90700014, 15, 1} };
+-	struct MXL_REG_FIELD_T xpt_continuous_clock[MXL_HYDRA_DEMOD_MAX] = {
++	static const struct MXL_REG_FIELD_T xpt_continuous_clock[MXL_HYDRA_DEMOD_MAX] = {
+ 		{0x907001D4, 0, 1}, {0x907001D4, 1, 1},
+ 		{0x907001D4, 2, 1}, {0x907001D4, 3, 1},
+ 		{0x907001D4, 4, 1}, {0x907001D4, 5, 1},
+ 		{0x907001D4, 6, 1}, {0x907001D4, 7, 1} };
+-	struct MXL_REG_FIELD_T xpt_nco_clock_rate[MXL_HYDRA_DEMOD_MAX] = {
++	static const struct MXL_REG_FIELD_T xpt_nco_clock_rate[MXL_HYDRA_DEMOD_MAX] = {
+ 		{0x90700044, 16, 80}, {0x90700044, 16, 81},
+ 		{0x90700044, 16, 82}, {0x90700044, 16, 83},
+ 		{0x90700044, 16, 84}, {0x90700044, 16, 85},
+diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
+index 552935ccb4a90..57906df7be4ec 100644
+--- a/drivers/media/i2c/ov2740.c
++++ b/drivers/media/i2c/ov2740.c
+@@ -768,14 +768,15 @@ static int ov2740_init_controls(struct ov2740 *ov2740)
+ 	cur_mode = ov2740->cur_mode;
+ 	size = ARRAY_SIZE(link_freq_menu_items);
+ 
+-	ov2740->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov2740_ctrl_ops,
+-						   V4L2_CID_LINK_FREQ,
+-						   size - 1, 0,
+-						   link_freq_menu_items);
++	ov2740->link_freq =
++		v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov2740_ctrl_ops,
++				       V4L2_CID_LINK_FREQ, size - 1,
++				       ov2740->supported_modes->link_freq_index,
++				       link_freq_menu_items);
+ 	if (ov2740->link_freq)
+ 		ov2740->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ 
+-	pixel_rate = to_pixel_rate(OV2740_LINK_FREQ_360MHZ_INDEX);
++	pixel_rate = to_pixel_rate(ov2740->supported_modes->link_freq_index);
+ 	ov2740->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov2740_ctrl_ops,
+ 					       V4L2_CID_PIXEL_RATE, 0,
+ 					       pixel_rate, 1, pixel_rate);
+diff --git a/drivers/media/mc/mc-devnode.c b/drivers/media/mc/mc-devnode.c
+index 7f67825c8757f..318e267e798e3 100644
+--- a/drivers/media/mc/mc-devnode.c
++++ b/drivers/media/mc/mc-devnode.c
+@@ -245,15 +245,14 @@ int __must_check media_devnode_register(struct media_device *mdev,
+ 	kobject_set_name(&devnode->cdev.kobj, "media%d", devnode->minor);
+ 
+ 	/* Part 3: Add the media and char device */
++	set_bit(MEDIA_FLAG_REGISTERED, &devnode->flags);
+ 	ret = cdev_device_add(&devnode->cdev, &devnode->dev);
+ 	if (ret < 0) {
++		clear_bit(MEDIA_FLAG_REGISTERED, &devnode->flags);
+ 		pr_err("%s: cdev_device_add failed\n", __func__);
+ 		goto cdev_add_error;
+ 	}
+ 
+-	/* Part 4: Activate this minor. The char device can now be used. */
+-	set_bit(MEDIA_FLAG_REGISTERED, &devnode->flags);
+-
+ 	return 0;
+ 
+ cdev_add_error:
+diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
+index 0e28b9a7936ef..96dd0f6ccd0d0 100644
+--- a/drivers/media/mc/mc-entity.c
++++ b/drivers/media/mc/mc-entity.c
+@@ -619,6 +619,12 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe,
+ 	link = list_entry(entry->links, typeof(*link), list);
+ 	last_link = media_pipeline_walk_pop(walk);
+ 
++	if ((link->flags & MEDIA_LNK_FL_LINK_TYPE) != MEDIA_LNK_FL_DATA_LINK) {
++		dev_dbg(walk->mdev->dev,
++			"media pipeline: skipping link (not data-link)\n");
++		return 0;
++	}
++
+ 	dev_dbg(walk->mdev->dev,
+ 		"media pipeline: exploring link '%s':%u -> '%s':%u\n",
+ 		link->source->entity->name, link->source->index,
+diff --git a/drivers/media/pci/mgb4/mgb4_core.c b/drivers/media/pci/mgb4/mgb4_core.c
+index 9bcf10a77fd38..97c833a8deb3b 100644
+--- a/drivers/media/pci/mgb4/mgb4_core.c
++++ b/drivers/media/pci/mgb4/mgb4_core.c
+@@ -642,9 +642,6 @@ static void mgb4_remove(struct pci_dev *pdev)
+ 	struct mgb4_dev *mgbdev = pci_get_drvdata(pdev);
+ 	int i;
+ 
+-#ifdef CONFIG_DEBUG_FS
+-	debugfs_remove_recursive(mgbdev->debugfs);
+-#endif
+ #if IS_REACHABLE(CONFIG_HWMON)
+ 	hwmon_device_unregister(mgbdev->hwmon_dev);
+ #endif
+@@ -659,6 +656,10 @@ static void mgb4_remove(struct pci_dev *pdev)
+ 		if (mgbdev->vin[i])
+ 			mgb4_vin_free(mgbdev->vin[i]);
+ 
++#ifdef CONFIG_DEBUG_FS
++	debugfs_remove_recursive(mgbdev->debugfs);
++#endif
++
+ 	device_remove_groups(&mgbdev->pdev->dev, mgb4_pci_groups);
+ 	free_spi(mgbdev);
+ 	free_i2c(mgbdev);
+diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
+index 3ec323bd528b1..4bb073587817c 100644
+--- a/drivers/media/v4l2-core/v4l2-async.c
++++ b/drivers/media/v4l2-core/v4l2-async.c
+@@ -563,6 +563,7 @@ void v4l2_async_nf_init(struct v4l2_async_notifier *notifier,
+ {
+ 	INIT_LIST_HEAD(&notifier->waiting_list);
+ 	INIT_LIST_HEAD(&notifier->done_list);
++	INIT_LIST_HEAD(&notifier->notifier_entry);
+ 	notifier->v4l2_dev = v4l2_dev;
+ }
+ EXPORT_SYMBOL(v4l2_async_nf_init);
+@@ -572,6 +573,7 @@ void v4l2_async_subdev_nf_init(struct v4l2_async_notifier *notifier,
+ {
+ 	INIT_LIST_HEAD(&notifier->waiting_list);
+ 	INIT_LIST_HEAD(&notifier->done_list);
++	INIT_LIST_HEAD(&notifier->notifier_entry);
+ 	notifier->sd = sd;
+ }
+ EXPORT_SYMBOL_GPL(v4l2_async_subdev_nf_init);
+@@ -618,16 +620,10 @@ static int __v4l2_async_nf_register(struct v4l2_async_notifier *notifier)
+ 
+ int v4l2_async_nf_register(struct v4l2_async_notifier *notifier)
+ {
+-	int ret;
+-
+ 	if (WARN_ON(!notifier->v4l2_dev == !notifier->sd))
+ 		return -EINVAL;
+ 
+-	ret = __v4l2_async_nf_register(notifier);
+-	if (ret)
+-		notifier->v4l2_dev = NULL;
+-
+-	return ret;
++	return __v4l2_async_nf_register(notifier);
+ }
+ EXPORT_SYMBOL(v4l2_async_nf_register);
+ 
+@@ -639,7 +635,7 @@ __v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
+ 
+ 	v4l2_async_nf_unbind_all_subdevs(notifier);
+ 
+-	list_del(&notifier->notifier_entry);
++	list_del_init(&notifier->notifier_entry);
+ }
+ 
+ void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
+diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
+index d13954bd31fd9..bae73b8c52ff5 100644
+--- a/drivers/media/v4l2-core/v4l2-dev.c
++++ b/drivers/media/v4l2-core/v4l2-dev.c
+@@ -1036,8 +1036,10 @@ int __video_register_device(struct video_device *vdev,
+ 	vdev->dev.devt = MKDEV(VIDEO_MAJOR, vdev->minor);
+ 	vdev->dev.parent = vdev->dev_parent;
+ 	dev_set_name(&vdev->dev, "%s%d", name_base, vdev->num);
++	mutex_lock(&videodev_lock);
+ 	ret = device_register(&vdev->dev);
+ 	if (ret < 0) {
++		mutex_unlock(&videodev_lock);
+ 		pr_err("%s: device_register failed\n", __func__);
+ 		goto cleanup;
+ 	}
+@@ -1057,6 +1059,7 @@ int __video_register_device(struct video_device *vdev,
+ 
+ 	/* Part 6: Activate this minor. The char device can now be used. */
+ 	set_bit(V4L2_FL_REGISTERED, &vdev->flags);
++	mutex_unlock(&videodev_lock);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
+index 39f45c2b6de8a..8791656e9e20d 100644
+--- a/drivers/mmc/core/slot-gpio.c
++++ b/drivers/mmc/core/slot-gpio.c
+@@ -221,6 +221,26 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
+ }
+ EXPORT_SYMBOL(mmc_gpiod_request_cd);
+ 
++/**
++ * mmc_gpiod_set_cd_config - set config for card-detection GPIO
++ * @host: mmc host
++ * @config: Generic pinconf config (from pinconf_to_config_packed())
++ *
++ * This can be used by mmc host drivers to fixup a card-detection GPIO's config
++ * (e.g. set PIN_CONFIG_BIAS_PULL_UP) after acquiring the GPIO descriptor
++ * through mmc_gpiod_request_cd().
++ *
++ * Returns:
++ * 0 on success, or a negative errno value on error.
++ */
++int mmc_gpiod_set_cd_config(struct mmc_host *host, unsigned long config)
++{
++	struct mmc_gpio *ctx = host->slot.handler_priv;
++
++	return gpiod_set_config(ctx->cd_gpio, config);
++}
++EXPORT_SYMBOL(mmc_gpiod_set_cd_config);
++
+ bool mmc_can_gpio_cd(struct mmc_host *host)
+ {
+ 	struct mmc_gpio *ctx = host->slot.handler_priv;
+diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
+index 8bd938919687b..d7427894e0bc9 100644
+--- a/drivers/mmc/host/davinci_mmc.c
++++ b/drivers/mmc/host/davinci_mmc.c
+@@ -1337,7 +1337,7 @@ static int davinci_mmcsd_probe(struct platform_device *pdev)
+ 	return ret;
+ }
+ 
+-static void __exit davinci_mmcsd_remove(struct platform_device *pdev)
++static void davinci_mmcsd_remove(struct platform_device *pdev)
+ {
+ 	struct mmc_davinci_host *host = platform_get_drvdata(pdev);
+ 
+@@ -1392,7 +1392,7 @@ static struct platform_driver davinci_mmcsd_driver = {
+ 		.of_match_table = davinci_mmc_dt_ids,
+ 	},
+ 	.probe		= davinci_mmcsd_probe,
+-	.remove_new	= __exit_p(davinci_mmcsd_remove),
++	.remove_new	= davinci_mmcsd_remove,
+ 	.id_table	= davinci_mmc_devtype,
+ };
+ 
+diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
+index acf5fc3ad7e41..eb8f427f9770d 100644
+--- a/drivers/mmc/host/sdhci-acpi.c
++++ b/drivers/mmc/host/sdhci-acpi.c
+@@ -10,6 +10,7 @@
+ #include <linux/export.h>
+ #include <linux/module.h>
+ #include <linux/device.h>
++#include <linux/pinctrl/pinconf-generic.h>
+ #include <linux/platform_device.h>
+ #include <linux/ioport.h>
+ #include <linux/io.h>
+@@ -80,6 +81,8 @@ struct sdhci_acpi_host {
+ enum {
+ 	DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP			= BIT(0),
+ 	DMI_QUIRK_SD_NO_WRITE_PROTECT				= BIT(1),
++	DMI_QUIRK_SD_CD_ACTIVE_HIGH				= BIT(2),
++	DMI_QUIRK_SD_CD_ENABLE_PULL_UP				= BIT(3),
+ };
+ 
+ static inline void *sdhci_acpi_priv(struct sdhci_acpi_host *c)
+@@ -719,7 +722,28 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
+ };
+ MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids);
+ 
++/* Please keep this list sorted alphabetically */
+ static const struct dmi_system_id sdhci_acpi_quirks[] = {
++	{
++		/*
++		 * The Acer Aspire Switch 10 (SW5-012) microSD slot always
++		 * reports the card being write-protected even though microSD
++		 * cards do not have a write-protect switch at all.
++		 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
++		},
++		.driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
++	},
++	{
++		/* Asus T100TA, needs pull-up for cd but DSDT GpioInt has NoPull set */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "T100TA"),
++		},
++		.driver_data = (void *)DMI_QUIRK_SD_CD_ENABLE_PULL_UP,
++	},
+ 	{
+ 		/*
+ 		 * The Lenovo Miix 320-10ICR has a bug in the _PS0 method of
+@@ -736,15 +760,23 @@ static const struct dmi_system_id sdhci_acpi_quirks[] = {
+ 	},
+ 	{
+ 		/*
+-		 * The Acer Aspire Switch 10 (SW5-012) microSD slot always
+-		 * reports the card being write-protected even though microSD
+-		 * cards do not have a write-protect switch at all.
++		 * Lenovo Yoga Tablet 2 Pro 1380F/L (13" Android version) this
++		 * has broken WP reporting and an inverted CD signal.
++		 * Note this has more or less the same BIOS as the Lenovo Yoga
++		 * Tablet 2 830F/L or 1050F/L (8" and 10" Android), but unlike
++		 * the 830 / 1050 models which share the same mainboard this
++		 * model has a different mainboard and the inverted CD and
++		 * broken WP are unique to this board.
+ 		 */
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
++			DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"),
++			DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"),
++			/* Full match so as to NOT match the 830/1050 BIOS */
++			DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21.X64.0005.R00.1504101516"),
+ 		},
+-		.driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
++		.driver_data = (void *)(DMI_QUIRK_SD_NO_WRITE_PROTECT |
++					DMI_QUIRK_SD_CD_ACTIVE_HIGH),
+ 	},
+ 	{
+ 		/*
+@@ -757,6 +789,17 @@ static const struct dmi_system_id sdhci_acpi_quirks[] = {
+ 		},
+ 		.driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
+ 	},
++	{
++		/*
++		 * The Toshiba WT10-A's microSD slot always reports the card being
++		 * write-protected.
++		 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "TOSHIBA WT10-A"),
++		},
++		.driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
++	},
+ 	{} /* Terminating entry */
+ };
+ 
+@@ -866,12 +909,18 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
+ 	if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) {
+ 		bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL);
+ 
++		if (quirks & DMI_QUIRK_SD_CD_ACTIVE_HIGH)
++			host->mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
++
+ 		err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0);
+ 		if (err) {
+ 			if (err == -EPROBE_DEFER)
+ 				goto err_free;
+ 			dev_warn(dev, "failed to setup card detect gpio\n");
+ 			c->use_runtime_pm = false;
++		} else if (quirks & DMI_QUIRK_SD_CD_ENABLE_PULL_UP) {
++			mmc_gpiod_set_cd_config(host->mmc,
++						PIN_CONF_PACKED(PIN_CONFIG_BIAS_PULL_UP, 20000));
+ 		}
+ 
+ 		if (quirks & DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP)
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index c79f73459915d..746f4cf7ab033 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -3439,12 +3439,18 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
+ 		host->data->error = -EILSEQ;
+ 		if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
+ 			sdhci_err_stats_inc(host, DAT_CRC);
+-	} else if ((intmask & SDHCI_INT_DATA_CRC) &&
++	} else if ((intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_TUNING_ERROR)) &&
+ 		SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
+ 			!= MMC_BUS_TEST_R) {
+ 		host->data->error = -EILSEQ;
+ 		if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
+ 			sdhci_err_stats_inc(host, DAT_CRC);
++		if (intmask & SDHCI_INT_TUNING_ERROR) {
++			u16 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
++
++			ctrl2 &= ~SDHCI_CTRL_TUNED_CLK;
++			sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
++		}
+ 	} else if (intmask & SDHCI_INT_ADMA_ERROR) {
+ 		pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
+ 		       intmask);
+@@ -3979,7 +3985,7 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
+ 	} else
+ 		*cmd_error = 0;
+ 
+-	if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) {
++	if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC | SDHCI_INT_TUNING_ERROR)) {
+ 		*data_error = -EILSEQ;
+ 		if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
+ 			sdhci_err_stats_inc(host, DAT_CRC);
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index a20864fc06412..957c7a917ffb7 100644
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -158,6 +158,7 @@
+ #define  SDHCI_INT_BUS_POWER	0x00800000
+ #define  SDHCI_INT_AUTO_CMD_ERR	0x01000000
+ #define  SDHCI_INT_ADMA_ERROR	0x02000000
++#define  SDHCI_INT_TUNING_ERROR	0x04000000
+ 
+ #define  SDHCI_INT_NORMAL_MASK	0x00007FFF
+ #define  SDHCI_INT_ERROR_MASK	0xFFFF8000
+@@ -169,7 +170,7 @@
+ 		SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
+ 		SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
+ 		SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR | \
+-		SDHCI_INT_BLK_GAP)
++		SDHCI_INT_BLK_GAP | SDHCI_INT_TUNING_ERROR)
+ #define SDHCI_INT_ALL_MASK	((unsigned int)-1)
+ 
+ #define SDHCI_CQE_INT_ERR_MASK ( \
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 2c5ed0a7cb18c..bceda85f0dcf6 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -6477,16 +6477,16 @@ static int __init bonding_init(void)
+ 	if (res)
+ 		goto out;
+ 
++	bond_create_debugfs();
++
+ 	res = register_pernet_subsys(&bond_net_ops);
+ 	if (res)
+-		goto out;
++		goto err_net_ops;
+ 
+ 	res = bond_netlink_init();
+ 	if (res)
+ 		goto err_link;
+ 
+-	bond_create_debugfs();
+-
+ 	for (i = 0; i < max_bonds; i++) {
+ 		res = bond_create(&init_net, NULL);
+ 		if (res)
+@@ -6501,10 +6501,11 @@ static int __init bonding_init(void)
+ out:
+ 	return res;
+ err:
+-	bond_destroy_debugfs();
+ 	bond_netlink_fini();
+ err_link:
+ 	unregister_pernet_subsys(&bond_net_ops);
++err_net_ops:
++	bond_destroy_debugfs();
+ 	goto out;
+ 
+ }
+@@ -6513,11 +6514,11 @@ static void __exit bonding_exit(void)
+ {
+ 	unregister_netdevice_notifier(&bond_netdev_notifier);
+ 
+-	bond_destroy_debugfs();
+-
+ 	bond_netlink_fini();
+ 	unregister_pernet_subsys(&bond_net_ops);
+ 
++	bond_destroy_debugfs();
++
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+ 	/* Make sure we don't have an imbalance on our netpoll blocking */
+ 	WARN_ON(atomic_read(&netpoll_block_tx));
+diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
+index 6b64f28a9174d..b2d054f59f30f 100644
+--- a/drivers/net/vxlan/vxlan_core.c
++++ b/drivers/net/vxlan/vxlan_core.c
+@@ -1446,6 +1446,10 @@ static bool vxlan_snoop(struct net_device *dev,
+ 	struct vxlan_fdb *f;
+ 	u32 ifindex = 0;
+ 
++	/* Ignore packets from invalid src-address */
++	if (!is_valid_ether_addr(src_mac))
++		return true;
++
+ #if IS_ENABLED(CONFIG_IPV6)
+ 	if (src_ip->sa.sa_family == AF_INET6 &&
+ 	    (ipv6_addr_type(&src_ip->sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL))
+@@ -1615,10 +1619,6 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan,
+ 	if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
+ 		return false;
+ 
+-	/* Ignore packets from invalid src-address */
+-	if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
+-		return false;
+-
+ 	/* Get address from the outer IP header */
+ 	if (vxlan_get_sk_family(vs) == AF_INET) {
+ 		saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
+diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
+index e6ea884cafc19..4f385f4a8cef2 100644
+--- a/drivers/net/wireless/ath/ath10k/Kconfig
++++ b/drivers/net/wireless/ath/ath10k/Kconfig
+@@ -45,6 +45,7 @@ config ATH10K_SNOC
+ 	depends on ATH10K
+ 	depends on ARCH_QCOM || COMPILE_TEST
+ 	depends on QCOM_SMEM
++	depends on QCOM_RPROC_COMMON || QCOM_RPROC_COMMON=n
+ 	select QCOM_SCM
+ 	select QCOM_QMI_HELPERS
+ 	help
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+index fd92d23c43d91..16d884a3d87df 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+@@ -122,6 +122,15 @@ enum rtl8xxxu_rx_type {
+ 	RX_TYPE_ERROR = -1
+ };
+ 
++enum rtl8xxxu_rx_desc_enc {
++	RX_DESC_ENC_NONE	= 0,
++	RX_DESC_ENC_WEP40	= 1,
++	RX_DESC_ENC_TKIP_WO_MIC	= 2,
++	RX_DESC_ENC_TKIP_MIC	= 3,
++	RX_DESC_ENC_AES		= 4,
++	RX_DESC_ENC_WEP104	= 5,
++};
++
+ struct rtl8xxxu_rxdesc16 {
+ #ifdef __LITTLE_ENDIAN
+ 	u32 pktlen:14;
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+index 4a49f8f9d80f2..fbbc97161f5df 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+@@ -1505,13 +1505,13 @@ rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
+ 	u8 cck[RTL8723A_MAX_RF_PATHS], ofdm[RTL8723A_MAX_RF_PATHS];
+ 	u8 ofdmbase[RTL8723A_MAX_RF_PATHS], mcsbase[RTL8723A_MAX_RF_PATHS];
+ 	u32 val32, ofdm_a, ofdm_b, mcs_a, mcs_b;
+-	u8 val8;
++	u8 val8, base;
+ 	int group, i;
+ 
+ 	group = rtl8xxxu_gen1_channel_to_group(channel);
+ 
+-	cck[0] = priv->cck_tx_power_index_A[group] - 1;
+-	cck[1] = priv->cck_tx_power_index_B[group] - 1;
++	cck[0] = priv->cck_tx_power_index_A[group];
++	cck[1] = priv->cck_tx_power_index_B[group];
+ 
+ 	if (priv->hi_pa) {
+ 		if (cck[0] > 0x20)
+@@ -1522,10 +1522,6 @@ rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
+ 
+ 	ofdm[0] = priv->ht40_1s_tx_power_index_A[group];
+ 	ofdm[1] = priv->ht40_1s_tx_power_index_B[group];
+-	if (ofdm[0])
+-		ofdm[0] -= 1;
+-	if (ofdm[1])
+-		ofdm[1] -= 1;
+ 
+ 	ofdmbase[0] = ofdm[0] +	priv->ofdm_tx_power_index_diff[group].a;
+ 	ofdmbase[1] = ofdm[1] +	priv->ofdm_tx_power_index_diff[group].b;
+@@ -1614,20 +1610,19 @@ rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
+ 
+ 	rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12,
+ 			 mcs_a + power_base->reg_0e1c);
++	val8 = u32_get_bits(mcs_a + power_base->reg_0e1c, 0xff000000);
+ 	for (i = 0; i < 3; i++) {
+-		if (i != 2)
+-			val8 = (mcsbase[0] > 8) ? (mcsbase[0] - 8) : 0;
+-		else
+-			val8 = (mcsbase[0] > 6) ? (mcsbase[0] - 6) : 0;
++		base = i != 2 ? 8 : 6;
++		val8 = max_t(int, val8 - base, 0);
+ 		rtl8xxxu_write8(priv, REG_OFDM0_XC_TX_IQ_IMBALANCE + i, val8);
+ 	}
++
+ 	rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS15_MCS12,
+ 			 mcs_b + power_base->reg_0868);
++	val8 = u32_get_bits(mcs_b + power_base->reg_0868, 0xff000000);
+ 	for (i = 0; i < 3; i++) {
+-		if (i != 2)
+-			val8 = (mcsbase[1] > 8) ? (mcsbase[1] - 8) : 0;
+-		else
+-			val8 = (mcsbase[1] > 6) ? (mcsbase[1] - 6) : 0;
++		base = i != 2 ? 8 : 6;
++		val8 = max_t(int, val8 - base, 0);
+ 		rtl8xxxu_write8(priv, REG_OFDM0_XD_TX_IQ_IMBALANCE + i, val8);
+ 	}
+ }
+@@ -6473,7 +6468,8 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
+ 			rx_status->mactime = rx_desc->tsfl;
+ 			rx_status->flag |= RX_FLAG_MACTIME_START;
+ 
+-			if (!rx_desc->swdec)
++			if (!rx_desc->swdec &&
++			    rx_desc->security != RX_DESC_ENC_NONE)
+ 				rx_status->flag |= RX_FLAG_DECRYPTED;
+ 			if (rx_desc->crc32)
+ 				rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+@@ -6578,7 +6574,8 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
+ 			rx_status->mactime = rx_desc->tsfl;
+ 			rx_status->flag |= RX_FLAG_MACTIME_START;
+ 
+-			if (!rx_desc->swdec)
++			if (!rx_desc->swdec &&
++			    rx_desc->security != RX_DESC_ENC_NONE)
+ 				rx_status->flag |= RX_FLAG_DECRYPTED;
+ 			if (rx_desc->crc32)
+ 				rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+@@ -7998,6 +7995,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
+ 	ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+ 	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
+ 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
++	ieee80211_hw_set(hw, MFP_CAPABLE);
+ 
+ 	wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+index d835a27429f0f..56b5cd032a9ac 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+@@ -892,8 +892,8 @@ static u8 _rtl92c_phy_get_rightchnlplace(u8 chnl)
+ 	u8 place = chnl;
+ 
+ 	if (chnl > 14) {
+-		for (place = 14; place < ARRAY_SIZE(channel5g); place++) {
+-			if (channel5g[place] == chnl) {
++		for (place = 14; place < ARRAY_SIZE(channel_all); place++) {
++			if (channel_all[place] == chnl) {
+ 				place++;
+ 				break;
+ 			}
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+index 192982ec8152d..cbc7b4dbea9ad 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+@@ -35,7 +35,7 @@ static long _rtl92de_translate_todbm(struct ieee80211_hw *hw,
+ 
+ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
+ 				       struct rtl_stats *pstats,
+-				       struct rx_desc_92d *pdesc,
++				       __le32 *pdesc,
+ 				       struct rx_fwinfo_92d *p_drvinfo,
+ 				       bool packet_match_bssid,
+ 				       bool packet_toself,
+@@ -50,8 +50,10 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
+ 	u8 i, max_spatial_stream;
+ 	u32 rssi, total_rssi = 0;
+ 	bool is_cck_rate;
++	u8 rxmcs;
+ 
+-	is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc->rxmcs);
++	rxmcs = get_rx_desc_rxmcs(pdesc);
++	is_cck_rate = rxmcs <= DESC_RATE11M;
+ 	pstats->packet_matchbssid = packet_match_bssid;
+ 	pstats->packet_toself = packet_toself;
+ 	pstats->packet_beacon = packet_beacon;
+@@ -157,8 +159,8 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
+ 		pstats->rx_pwdb_all = pwdb_all;
+ 		pstats->rxpower = rx_pwr_all;
+ 		pstats->recvsignalpower = rx_pwr_all;
+-		if (pdesc->rxht && pdesc->rxmcs >= DESC_RATEMCS8 &&
+-		    pdesc->rxmcs <= DESC_RATEMCS15)
++		if (get_rx_desc_rxht(pdesc) && rxmcs >= DESC_RATEMCS8 &&
++		    rxmcs <= DESC_RATEMCS15)
+ 			max_spatial_stream = 2;
+ 		else
+ 			max_spatial_stream = 1;
+@@ -364,7 +366,7 @@ static void _rtl92de_process_phyinfo(struct ieee80211_hw *hw,
+ static void _rtl92de_translate_rx_signal_stuff(struct ieee80211_hw *hw,
+ 					       struct sk_buff *skb,
+ 					       struct rtl_stats *pstats,
+-					       struct rx_desc_92d *pdesc,
++					       __le32 *pdesc,
+ 					       struct rx_fwinfo_92d *p_drvinfo)
+ {
+ 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+@@ -413,7 +415,8 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw,	struct rtl_stats *stats,
+ 	stats->icv = (u16)get_rx_desc_icv(pdesc);
+ 	stats->crc = (u16)get_rx_desc_crc32(pdesc);
+ 	stats->hwerror = (stats->crc | stats->icv);
+-	stats->decrypted = !get_rx_desc_swdec(pdesc);
++	stats->decrypted = !get_rx_desc_swdec(pdesc) &&
++			   get_rx_desc_enc_type(pdesc) != RX_DESC_ENC_NONE;
+ 	stats->rate = (u8)get_rx_desc_rxmcs(pdesc);
+ 	stats->shortpreamble = (u16)get_rx_desc_splcp(pdesc);
+ 	stats->isampdu = (bool)(get_rx_desc_paggr(pdesc) == 1);
+@@ -426,8 +429,6 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw,	struct rtl_stats *stats,
+ 	rx_status->band = hw->conf.chandef.chan->band;
+ 	if (get_rx_desc_crc32(pdesc))
+ 		rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+-	if (!get_rx_desc_swdec(pdesc))
+-		rx_status->flag |= RX_FLAG_DECRYPTED;
+ 	if (get_rx_desc_bw(pdesc))
+ 		rx_status->bw = RATE_INFO_BW_40;
+ 	if (get_rx_desc_rxht(pdesc))
+@@ -441,9 +442,7 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw,	struct rtl_stats *stats,
+ 	if (phystatus) {
+ 		p_drvinfo = (struct rx_fwinfo_92d *)(skb->data +
+ 						     stats->rx_bufshift);
+-		_rtl92de_translate_rx_signal_stuff(hw,
+-						   skb, stats,
+-						   (struct rx_desc_92d *)pdesc,
++		_rtl92de_translate_rx_signal_stuff(hw, skb, stats, pdesc,
+ 						   p_drvinfo);
+ 	}
+ 	/*rx_status->qual = stats->signal; */
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
+index 2992668c156c6..2d4887490f00f 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
+@@ -14,6 +14,15 @@
+ #define USB_HWDESC_HEADER_LEN			32
+ #define CRCLENGTH				4
+ 
++enum rtl92d_rx_desc_enc {
++	RX_DESC_ENC_NONE	= 0,
++	RX_DESC_ENC_WEP40	= 1,
++	RX_DESC_ENC_TKIP_WO_MIC	= 2,
++	RX_DESC_ENC_TKIP_MIC	= 3,
++	RX_DESC_ENC_AES		= 4,
++	RX_DESC_ENC_WEP104	= 5,
++};
++
+ /* macros to read/write various fields in RX or TX descriptors */
+ 
+ static inline void set_tx_desc_pkt_size(__le32 *__pdesc, u32 __val)
+@@ -246,6 +255,11 @@ static inline u32 get_rx_desc_drv_info_size(__le32 *__pdesc)
+ 	return le32_get_bits(*__pdesc, GENMASK(19, 16));
+ }
+ 
++static inline u32 get_rx_desc_enc_type(__le32 *__pdesc)
++{
++	return le32_get_bits(*__pdesc, GENMASK(22, 20));
++}
++
+ static inline u32 get_rx_desc_shift(__le32 *__pdesc)
+ {
+ 	return le32_get_bits(*__pdesc, GENMASK(25, 24));
+@@ -380,10 +394,17 @@ struct rx_fwinfo_92d {
+ 	u8 csi_target[2];
+ 	u8 sigevm;
+ 	u8 max_ex_pwr;
++#ifdef __LITTLE_ENDIAN
+ 	u8 ex_intf_flag:1;
+ 	u8 sgi_en:1;
+ 	u8 rxsc:2;
+ 	u8 reserve:4;
++#else
++	u8 reserve:4;
++	u8 rxsc:2;
++	u8 sgi_en:1;
++	u8 ex_intf_flag:1;
++#endif
+ } __packed;
+ 
+ struct tx_desc_92d {
+@@ -488,64 +509,6 @@ struct tx_desc_92d {
+ 	u32 reserve_pass_pcie_mm_limit[4];
+ } __packed;
+ 
+-struct rx_desc_92d {
+-	u32 length:14;
+-	u32 crc32:1;
+-	u32 icverror:1;
+-	u32 drv_infosize:4;
+-	u32 security:3;
+-	u32 qos:1;
+-	u32 shift:2;
+-	u32 phystatus:1;
+-	u32 swdec:1;
+-	u32 lastseg:1;
+-	u32 firstseg:1;
+-	u32 eor:1;
+-	u32 own:1;
+-
+-	u32 macid:5;
+-	u32 tid:4;
+-	u32 hwrsvd:5;
+-	u32 paggr:1;
+-	u32 faggr:1;
+-	u32 a1_fit:4;
+-	u32 a2_fit:4;
+-	u32 pam:1;
+-	u32 pwr:1;
+-	u32 moredata:1;
+-	u32 morefrag:1;
+-	u32 type:2;
+-	u32 mc:1;
+-	u32 bc:1;
+-
+-	u32 seq:12;
+-	u32 frag:4;
+-	u32 nextpktlen:14;
+-	u32 nextind:1;
+-	u32 rsvd:1;
+-
+-	u32 rxmcs:6;
+-	u32 rxht:1;
+-	u32 amsdu:1;
+-	u32 splcp:1;
+-	u32 bandwidth:1;
+-	u32 htc:1;
+-	u32 tcpchk_rpt:1;
+-	u32 ipcchk_rpt:1;
+-	u32 tcpchk_valid:1;
+-	u32 hwpcerr:1;
+-	u32 hwpcind:1;
+-	u32 iv0:16;
+-
+-	u32 iv1;
+-
+-	u32 tsfl;
+-
+-	u32 bufferaddress;
+-	u32 bufferaddress64;
+-
+-} __packed;
+-
+ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
+ 			  struct ieee80211_hdr *hdr, u8 *pdesc,
+ 			  u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
+diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
+index 31d1ffb16e83e..c4707a036b005 100644
+--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
++++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
+@@ -318,7 +318,7 @@ static u8 rtw89_aifsn_to_aifs(struct rtw89_dev *rtwdev,
+ 	u8 sifs;
+ 
+ 	slot_time = vif->bss_conf.use_short_slot ? 9 : 20;
+-	sifs = chan->band_type == RTW89_BAND_5G ? 16 : 10;
++	sifs = chan->band_type == RTW89_BAND_2G ? 10 : 16;
+ 
+ 	return aifsn * slot_time + sifs;
+ }
+diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
+index 19001130ad943..0afe22e568f43 100644
+--- a/drivers/net/wireless/realtek/rtw89/pci.c
++++ b/drivers/net/wireless/realtek/rtw89/pci.c
+@@ -1089,7 +1089,8 @@ u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev,
+ 
+ 	spin_lock_bh(&rtwpci->trx_lock);
+ 	cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
+-	cnt = min(cnt, wd_ring->curr_num);
++	if (txch != RTW89_TXCH_CH12)
++		cnt = min(cnt, wd_ring->curr_num);
+ 	spin_unlock_bh(&rtwpci->trx_lock);
+ 
+ 	return cnt;
+diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
+index badc68bbae8cc..47d19f7e295a7 100644
+--- a/drivers/platform/chrome/cros_ec.c
++++ b/drivers/platform/chrome/cros_ec.c
+@@ -432,6 +432,12 @@ static void cros_ec_send_resume_event(struct cros_ec_device *ec_dev)
+ void cros_ec_resume_complete(struct cros_ec_device *ec_dev)
+ {
+ 	cros_ec_send_resume_event(ec_dev);
++
++	/*
++	 * Let the mfd devices know about events that occur during
++	 * suspend. This way the clients know what to do with them.
++	 */
++	cros_ec_report_events_during_suspend(ec_dev);
+ }
+ EXPORT_SYMBOL(cros_ec_resume_complete);
+ 
+@@ -442,12 +448,6 @@ static void cros_ec_enable_irq(struct cros_ec_device *ec_dev)
+ 
+ 	if (ec_dev->wake_enabled)
+ 		disable_irq_wake(ec_dev->irq);
+-
+-	/*
+-	 * Let the mfd devices know about events that occur during
+-	 * suspend. This way the clients know what to do with them.
+-	 */
+-	cros_ec_report_events_during_suspend(ec_dev);
+ }
+ 
+ /**
+@@ -475,8 +475,8 @@ EXPORT_SYMBOL(cros_ec_resume_early);
+  */
+ int cros_ec_resume(struct cros_ec_device *ec_dev)
+ {
+-	cros_ec_enable_irq(ec_dev);
+-	cros_ec_send_resume_event(ec_dev);
++	cros_ec_resume_early(ec_dev);
++	cros_ec_resume_complete(ec_dev);
+ 	return 0;
+ }
+ EXPORT_SYMBOL(cros_ec_resume);
+diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
+index f13837907bd5e..ca7e626a97a4b 100644
+--- a/drivers/s390/crypto/ap_bus.c
++++ b/drivers/s390/crypto/ap_bus.c
+@@ -1129,7 +1129,7 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
+  */
+ static int modify_bitmap(const char *str, unsigned long *bitmap, int bits)
+ {
+-	int a, i, z;
++	unsigned long a, i, z;
+ 	char *np, sign;
+ 
+ 	/* bits needs to be a multiple of 8 */
+diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
+index 3e0c0381277ac..f0464db3f9de9 100644
+--- a/drivers/scsi/scsi.c
++++ b/drivers/scsi/scsi.c
+@@ -350,6 +350,13 @@ static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
+ 		if (result < SCSI_VPD_HEADER_SIZE)
+ 			return 0;
+ 
++		if (result > sizeof(vpd)) {
++			dev_warn_once(&sdev->sdev_gendev,
++				      "%s: long VPD page 0 length: %d bytes\n",
++				      __func__, result);
++			result = sizeof(vpd);
++		}
++
+ 		result -= SCSI_VPD_HEADER_SIZE;
+ 		if (!memchr(&vpd[SCSI_VPD_HEADER_SIZE], page, result))
+ 			return 0;
+diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
+index a5fd68411bed5..86fb2cd4f484d 100644
+--- a/drivers/soc/qcom/cmd-db.c
++++ b/drivers/soc/qcom/cmd-db.c
+@@ -1,6 +1,10 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+-/* Copyright (c) 2016-2018, 2020, The Linux Foundation. All rights reserved. */
++/*
++ * Copyright (c) 2016-2018, 2020, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
++ */
+ 
++#include <linux/bitfield.h>
+ #include <linux/debugfs.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+@@ -17,6 +21,8 @@
+ #define MAX_SLV_ID		8
+ #define SLAVE_ID_MASK		0x7
+ #define SLAVE_ID_SHIFT		16
++#define SLAVE_ID(addr)		FIELD_GET(GENMASK(19, 16), addr)
++#define VRM_ADDR(addr)		FIELD_GET(GENMASK(19, 4), addr)
+ 
+ /**
+  * struct entry_header: header for each entry in cmddb
+@@ -220,6 +226,30 @@ const void *cmd_db_read_aux_data(const char *id, size_t *len)
+ }
+ EXPORT_SYMBOL_GPL(cmd_db_read_aux_data);
+ 
++/**
++ * cmd_db_match_resource_addr() - Compare if both Resource addresses are same
++ *
++ * @addr1: Resource address to compare
++ * @addr2: Resource address to compare
++ *
++ * Return: true if two addresses refer to the same resource, false otherwise
++ */
++bool cmd_db_match_resource_addr(u32 addr1, u32 addr2)
++{
++	/*
++	 * Each RPMh VRM accelerator resource has 3 or 4 contiguous 4-byte
++	 * aligned addresses associated with it. Ignore the offset to check
++	 * for VRM requests.
++	 */
++	if (addr1 == addr2)
++		return true;
++	else if (SLAVE_ID(addr1) == CMD_DB_HW_VRM && VRM_ADDR(addr1) == VRM_ADDR(addr2))
++		return true;
++
++	return false;
++}
++EXPORT_SYMBOL_GPL(cmd_db_match_resource_addr);
++
+ /**
+  * cmd_db_read_slave_id - Get the slave ID for a given resource address
+  *
+diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
+index a021dc71807be..daf64be966fe1 100644
+--- a/drivers/soc/qcom/rpmh-rsc.c
++++ b/drivers/soc/qcom/rpmh-rsc.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /*
+  * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ 
+ #define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
+@@ -557,7 +558,7 @@ static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
+ 		for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) {
+ 			addr = read_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_ADDR], i, j);
+ 			for (k = 0; k < msg->num_cmds; k++) {
+-				if (addr == msg->cmds[k].addr)
++				if (cmd_db_match_resource_addr(msg->cmds[k].addr, addr))
+ 					return -EBUSY;
+ 			}
+ 		}
+diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c
+index f6edb12ec0041..5225b3621a56c 100644
+--- a/drivers/thermal/qcom/lmh.c
++++ b/drivers/thermal/qcom/lmh.c
+@@ -95,6 +95,9 @@ static int lmh_probe(struct platform_device *pdev)
+ 	unsigned int enable_alg;
+ 	u32 node_id;
+ 
++	if (!qcom_scm_is_available())
++		return -EPROBE_DEFER;
++
+ 	lmh_data = devm_kzalloc(dev, sizeof(*lmh_data), GFP_KERNEL);
+ 	if (!lmh_data)
+ 		return -ENOMEM;
+diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c
+index ebc9aeffdde7c..ac41f8f37589f 100644
+--- a/drivers/video/fbdev/savage/savagefb_driver.c
++++ b/drivers/video/fbdev/savage/savagefb_driver.c
+@@ -2276,7 +2276,10 @@ static int savagefb_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ 	if (info->var.xres_virtual > 0x1000)
+ 		info->var.xres_virtual = 0x1000;
+ #endif
+-	savagefb_check_var(&info->var, info);
++	err = savagefb_check_var(&info->var, info);
++	if (err)
++		goto failed;
++
+ 	savagefb_set_fix(info);
+ 
+ 	/*
+diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
+index 9215793a1c814..4895a69015a8e 100644
+--- a/drivers/watchdog/rti_wdt.c
++++ b/drivers/watchdog/rti_wdt.c
+@@ -59,6 +59,8 @@
+ #define PON_REASON_EOF_NUM	0xCCCCBBBB
+ #define RESERVED_MEM_MIN_SIZE	12
+ 
++#define MAX_HW_ERROR		250
++
+ static int heartbeat = DEFAULT_HEARTBEAT;
+ 
+ /*
+@@ -97,7 +99,7 @@ static int rti_wdt_start(struct watchdog_device *wdd)
+ 	 * to be 50% or less than that; we obviouly want to configure the open
+ 	 * window as large as possible so we select the 50% option.
+ 	 */
+-	wdd->min_hw_heartbeat_ms = 500 * wdd->timeout;
++	wdd->min_hw_heartbeat_ms = 520 * wdd->timeout + MAX_HW_ERROR;
+ 
+ 	/* Generate NMI when wdt expires */
+ 	writel_relaxed(RTIWWDRX_NMI, wdt->base + RTIWWDRXCTRL);
+@@ -131,31 +133,33 @@ static int rti_wdt_setup_hw_hb(struct watchdog_device *wdd, u32 wsize)
+ 	 * be petted during the open window; not too early or not too late.
+ 	 * The HW configuration options only allow for the open window size
+ 	 * to be 50% or less than that.
++	 * To avoid any glitches, we accommodate 2% + max hardware error
++	 * safety margin.
+ 	 */
+ 	switch (wsize) {
+ 	case RTIWWDSIZE_50P:
+-		/* 50% open window => 50% min heartbeat */
+-		wdd->min_hw_heartbeat_ms = 500 * heartbeat;
++		/* 50% open window => 52% min heartbeat */
++		wdd->min_hw_heartbeat_ms = 520 * heartbeat + MAX_HW_ERROR;
+ 		break;
+ 
+ 	case RTIWWDSIZE_25P:
+-		/* 25% open window => 75% min heartbeat */
+-		wdd->min_hw_heartbeat_ms = 750 * heartbeat;
++		/* 25% open window => 77% min heartbeat */
++		wdd->min_hw_heartbeat_ms = 770 * heartbeat + MAX_HW_ERROR;
+ 		break;
+ 
+ 	case RTIWWDSIZE_12P5:
+-		/* 12.5% open window => 87.5% min heartbeat */
+-		wdd->min_hw_heartbeat_ms = 875 * heartbeat;
++		/* 12.5% open window => 89.5% min heartbeat */
++		wdd->min_hw_heartbeat_ms = 895 * heartbeat + MAX_HW_ERROR;
+ 		break;
+ 
+ 	case RTIWWDSIZE_6P25:
+-		/* 6.5% open window => 93.5% min heartbeat */
+-		wdd->min_hw_heartbeat_ms = 935 * heartbeat;
++		/* 6.5% open window => 95.5% min heartbeat */
++		wdd->min_hw_heartbeat_ms = 955 * heartbeat + MAX_HW_ERROR;
+ 		break;
+ 
+ 	case RTIWWDSIZE_3P125:
+-		/* 3.125% open window => 96.9% min heartbeat */
+-		wdd->min_hw_heartbeat_ms = 969 * heartbeat;
++		/* 3.125% open window => 98.9% min heartbeat */
++		wdd->min_hw_heartbeat_ms = 989 * heartbeat + MAX_HW_ERROR;
+ 		break;
+ 
+ 	default:
+@@ -233,14 +237,6 @@ static int rti_wdt_probe(struct platform_device *pdev)
+ 		return -EINVAL;
+ 	}
+ 
+-	/*
+-	 * If watchdog is running at 32k clock, it is not accurate.
+-	 * Adjust frequency down in this case so that we don't pet
+-	 * the watchdog too often.
+-	 */
+-	if (wdt->freq < 32768)
+-		wdt->freq = wdt->freq * 9 / 10;
+-
+ 	pm_runtime_enable(dev);
+ 	ret = pm_runtime_resume_and_get(dev);
+ 	if (ret < 0) {
+diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
+index f16f735816349..01338d4c2d9e6 100644
+--- a/fs/9p/vfs_dentry.c
++++ b/fs/9p/vfs_dentry.c
+@@ -48,12 +48,17 @@ static int v9fs_cached_dentry_delete(const struct dentry *dentry)
+ static void v9fs_dentry_release(struct dentry *dentry)
+ {
+ 	struct hlist_node *p, *n;
++	struct hlist_head head;
+ 
+ 	p9_debug(P9_DEBUG_VFS, " dentry: %pd (%p)\n",
+ 		 dentry, dentry);
+-	hlist_for_each_safe(p, n, (struct hlist_head *)&dentry->d_fsdata)
++
++	spin_lock(&dentry->d_lock);
++	hlist_move_list((struct hlist_head *)&dentry->d_fsdata, &head);
++	spin_unlock(&dentry->d_lock);
++
++	hlist_for_each_safe(p, n, &head)
+ 		p9_fid_put(hlist_entry(p, struct p9_fid, dlist));
+-	dentry->d_fsdata = NULL;
+ }
+ 
+ static int v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
+index 97f50e9fd9eb0..297487ee83231 100644
+--- a/fs/afs/mntpt.c
++++ b/fs/afs/mntpt.c
+@@ -140,6 +140,11 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
+ 		put_page(page);
+ 		if (ret < 0)
+ 			return ret;
++
++		/* Don't cross a backup volume mountpoint from a backup volume */
++		if (src_as->volume && src_as->volume->type == AFSVL_BACKVOL &&
++		    ctx->type == AFSVL_BACKVOL)
++			return -ENODEV;
+ 	}
+ 
+ 	return 0;
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 3df5477d48a81..3a47eec87b263 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -4544,18 +4544,10 @@ static void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
+ 				       struct btrfs_fs_info *fs_info)
+ {
+ 	struct rb_node *node;
+-	struct btrfs_delayed_ref_root *delayed_refs;
++	struct btrfs_delayed_ref_root *delayed_refs = &trans->delayed_refs;
+ 	struct btrfs_delayed_ref_node *ref;
+ 
+-	delayed_refs = &trans->delayed_refs;
+-
+ 	spin_lock(&delayed_refs->lock);
+-	if (atomic_read(&delayed_refs->num_entries) == 0) {
+-		spin_unlock(&delayed_refs->lock);
+-		btrfs_debug(fs_info, "delayed_refs has NO entry");
+-		return;
+-	}
+-
+ 	while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
+ 		struct btrfs_delayed_ref_head *head;
+ 		struct rb_node *n;
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 87f487b116577..41173701f1bef 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -3662,6 +3662,8 @@ static struct extent_buffer *grab_extent_buffer(
+ 	struct folio *folio = page_folio(page);
+ 	struct extent_buffer *exists;
+ 
++	lockdep_assert_held(&page->mapping->i_private_lock);
++
+ 	/*
+ 	 * For subpage case, we completely rely on radix tree to ensure we
+ 	 * don't try to insert two ebs for the same bytenr.  So here we always
+@@ -3729,13 +3731,14 @@ static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
+  * The caller needs to free the existing folios and retry using the same order.
+  */
+ static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
++				      struct btrfs_subpage *prealloc,
+ 				      struct extent_buffer **found_eb_ret)
+ {
+ 
+ 	struct btrfs_fs_info *fs_info = eb->fs_info;
+ 	struct address_space *mapping = fs_info->btree_inode->i_mapping;
+ 	const unsigned long index = eb->start >> PAGE_SHIFT;
+-	struct folio *existing_folio;
++	struct folio *existing_folio = NULL;
+ 	int ret;
+ 
+ 	ASSERT(found_eb_ret);
+@@ -3747,12 +3750,14 @@ static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
+ 	ret = filemap_add_folio(mapping, eb->folios[i], index + i,
+ 				GFP_NOFS | __GFP_NOFAIL);
+ 	if (!ret)
+-		return 0;
++		goto finish;
+ 
+ 	existing_folio = filemap_lock_folio(mapping, index + i);
+ 	/* The page cache only exists for a very short time, just retry. */
+-	if (IS_ERR(existing_folio))
++	if (IS_ERR(existing_folio)) {
++		existing_folio = NULL;
+ 		goto retry;
++	}
+ 
+ 	/* For now, we should only have single-page folios for btree inode. */
+ 	ASSERT(folio_nr_pages(existing_folio) == 1);
+@@ -3763,14 +3768,13 @@ static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
+ 		return -EAGAIN;
+ 	}
+ 
+-	if (fs_info->nodesize < PAGE_SIZE) {
+-		/*
+-		 * We're going to reuse the existing page, can drop our page
+-		 * and subpage structure now.
+-		 */
++finish:
++	spin_lock(&mapping->i_private_lock);
++	if (existing_folio && fs_info->nodesize < PAGE_SIZE) {
++		/* We're going to reuse the existing page, can drop our folio now. */
+ 		__free_page(folio_page(eb->folios[i], 0));
+ 		eb->folios[i] = existing_folio;
+-	} else {
++	} else if (existing_folio) {
+ 		struct extent_buffer *existing_eb;
+ 
+ 		existing_eb = grab_extent_buffer(fs_info,
+@@ -3778,6 +3782,7 @@ static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
+ 		if (existing_eb) {
+ 			/* The extent buffer still exists, we can use it directly. */
+ 			*found_eb_ret = existing_eb;
++			spin_unlock(&mapping->i_private_lock);
+ 			folio_unlock(existing_folio);
+ 			folio_put(existing_folio);
+ 			return 1;
+@@ -3786,6 +3791,22 @@ static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
+ 		__free_page(folio_page(eb->folios[i], 0));
+ 		eb->folios[i] = existing_folio;
+ 	}
++	eb->folio_size = folio_size(eb->folios[i]);
++	eb->folio_shift = folio_shift(eb->folios[i]);
++	/* Should not fail, as we have preallocated the memory. */
++	ret = attach_extent_buffer_folio(eb, eb->folios[i], prealloc);
++	ASSERT(!ret);
++	/*
++	 * To inform we have an extra eb under allocation, so that
++	 * detach_extent_buffer_page() won't release the folio private when the
++	 * eb hasn't been inserted into radix tree yet.
++	 *
++	 * The ref will be decreased when the eb releases the page, in
++	 * detach_extent_buffer_page().  Thus needs no special handling in the
++	 * error path.
++	 */
++	btrfs_folio_inc_eb_refs(fs_info, eb->folios[i]);
++	spin_unlock(&mapping->i_private_lock);
+ 	return 0;
+ }
+ 
+@@ -3797,7 +3818,6 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
+ 	int attached = 0;
+ 	struct extent_buffer *eb;
+ 	struct extent_buffer *existing_eb = NULL;
+-	struct address_space *mapping = fs_info->btree_inode->i_mapping;
+ 	struct btrfs_subpage *prealloc = NULL;
+ 	u64 lockdep_owner = owner_root;
+ 	bool page_contig = true;
+@@ -3863,7 +3883,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
+ 	for (int i = 0; i < num_folios; i++) {
+ 		struct folio *folio;
+ 
+-		ret = attach_eb_folio_to_filemap(eb, i, &existing_eb);
++		ret = attach_eb_folio_to_filemap(eb, i, prealloc, &existing_eb);
+ 		if (ret > 0) {
+ 			ASSERT(existing_eb);
+ 			goto out;
+@@ -3900,24 +3920,6 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
+ 		 * and free the allocated page.
+ 		 */
+ 		folio = eb->folios[i];
+-		eb->folio_size = folio_size(folio);
+-		eb->folio_shift = folio_shift(folio);
+-		spin_lock(&mapping->i_private_lock);
+-		/* Should not fail, as we have preallocated the memory */
+-		ret = attach_extent_buffer_folio(eb, folio, prealloc);
+-		ASSERT(!ret);
+-		/*
+-		 * To inform we have extra eb under allocation, so that
+-		 * detach_extent_buffer_page() won't release the folio private
+-		 * when the eb hasn't yet been inserted into radix tree.
+-		 *
+-		 * The ref will be decreased when the eb released the page, in
+-		 * detach_extent_buffer_page().
+-		 * Thus needs no special handling in error path.
+-		 */
+-		btrfs_folio_inc_eb_refs(fs_info, folio);
+-		spin_unlock(&mapping->i_private_lock);
+-
+ 		WARN_ON(btrfs_folio_test_dirty(fs_info, folio, eb->start, eb->len));
+ 
+ 		/*
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 40e5f7f2fcb7f..1167899a16d05 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -468,6 +468,7 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
+ 		}
+ 		if (!qgroup) {
+ 			struct btrfs_qgroup *prealloc;
++			struct btrfs_root *tree_root = fs_info->tree_root;
+ 
+ 			prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
+ 			if (!prealloc) {
+@@ -475,6 +476,25 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
+ 				goto out;
+ 			}
+ 			qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset);
++			/*
++			 * If a qgroup exists for a subvolume ID, it is possible
++			 * that subvolume has been deleted, in which case
++			 * re-using that ID would lead to incorrect accounting.
++			 *
++			 * Ensure that we skip any such subvol ids.
++			 *
++			 * We don't need to lock because this is only called
++			 * during mount before we start doing things like creating
++			 * subvolumes.
++			 */
++			if (is_fstree(qgroup->qgroupid) &&
++			    qgroup->qgroupid > tree_root->free_objectid)
++				/*
++				 * Don't need to check against BTRFS_LAST_FREE_OBJECTID,
++				 * as it will get checked on the next call to
++				 * btrfs_get_free_objectid.
++				 */
++				tree_root->free_objectid = qgroup->qgroupid + 1;
+ 		}
+ 		ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
+ 		if (ret < 0)
+@@ -3129,7 +3149,7 @@ static int qgroup_auto_inherit(struct btrfs_fs_info *fs_info,
+ 	qgids = res->qgroups;
+ 
+ 	list_for_each_entry(qg_list, &inode_qg->groups, next_group)
+-		qgids[i] = qg_list->group->qgroupid;
++		qgids[i++] = qg_list->group->qgroupid;
+ 
+ 	*inherit = res;
+ 	return 0;
+@@ -3826,14 +3846,14 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
+ 		/* we're resuming qgroup rescan at mount time */
+ 		if (!(fs_info->qgroup_flags &
+ 		      BTRFS_QGROUP_STATUS_FLAG_RESCAN)) {
+-			btrfs_warn(fs_info,
++			btrfs_debug(fs_info,
+ 			"qgroup rescan init failed, qgroup rescan is not queued");
+ 			ret = -EINVAL;
+ 		} else if (!(fs_info->qgroup_flags &
+ 			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
+-			btrfs_warn(fs_info,
++			btrfs_debug(fs_info,
+ 			"qgroup rescan init failed, qgroup is not enabled");
+-			ret = -EINVAL;
++			ret = -ENOTCONN;
+ 		}
+ 
+ 		if (ret)
+@@ -3844,14 +3864,12 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
+ 
+ 	if (init_flags) {
+ 		if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
+-			btrfs_warn(fs_info,
+-				   "qgroup rescan is already in progress");
+ 			ret = -EINPROGRESS;
+ 		} else if (!(fs_info->qgroup_flags &
+ 			     BTRFS_QGROUP_STATUS_FLAG_ON)) {
+-			btrfs_warn(fs_info,
++			btrfs_debug(fs_info,
+ 			"qgroup rescan init failed, qgroup is not enabled");
+-			ret = -EINVAL;
++			ret = -ENOTCONN;
+ 		} else if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED) {
+ 			/* Quota disable is in progress */
+ 			ret = -EBUSY;
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 7e44ccaf348f2..fa6964de34379 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -119,6 +119,7 @@ enum {
+ 	Opt_thread_pool,
+ 	Opt_treelog,
+ 	Opt_user_subvol_rm_allowed,
++	Opt_norecovery,
+ 
+ 	/* Rescue options */
+ 	Opt_rescue,
+@@ -245,6 +246,8 @@ static const struct fs_parameter_spec btrfs_fs_parameters[] = {
+ 	__fsparam(NULL, "nologreplay", Opt_nologreplay, fs_param_deprecated, NULL),
+ 	/* Deprecated, with alias rescue=usebackuproot */
+ 	__fsparam(NULL, "usebackuproot", Opt_usebackuproot, fs_param_deprecated, NULL),
++	/* For compatibility only, alias for "rescue=nologreplay". */
++	fsparam_flag("norecovery", Opt_norecovery),
+ 
+ 	/* Debugging options. */
+ 	fsparam_flag_no("enospc_debug", Opt_enospc_debug),
+@@ -438,6 +441,11 @@ static int btrfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
+ 		"'nologreplay' is deprecated, use 'rescue=nologreplay' instead");
+ 		btrfs_set_opt(ctx->mount_opt, NOLOGREPLAY);
+ 		break;
++	case Opt_norecovery:
++		btrfs_info(NULL,
++"'norecovery' is for compatibility only, recommended to use 'rescue=nologreplay'");
++		btrfs_set_opt(ctx->mount_opt, NOLOGREPLAY);
++		break;
+ 	case Opt_flushoncommit:
+ 		if (result.negated)
+ 			btrfs_clear_opt(ctx->mount_opt, FLUSHONCOMMIT);
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 472918a5bc73a..d4fc5fedd8ee5 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -4856,18 +4856,23 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
+ 			path->slots[0]++;
+ 			continue;
+ 		}
+-		if (!dropped_extents) {
+-			/*
+-			 * Avoid logging extent items logged in past fsync calls
+-			 * and leading to duplicate keys in the log tree.
+-			 */
++		/*
++		 * Avoid overlapping items in the log tree. The first time we
++		 * get here, get rid of everything from a past fsync. After
++		 * that, if the current extent starts before the end of the last
++		 * extent we copied, truncate the last one. This can happen if
++		 * an ordered extent completion modifies the subvolume tree
++		 * while btrfs_next_leaf() has the tree unlocked.
++		 */
++		if (!dropped_extents || key.offset < truncate_offset) {
+ 			ret = truncate_inode_items(trans, root->log_root, inode,
+-						   truncate_offset,
++						   min(key.offset, truncate_offset),
+ 						   BTRFS_EXTENT_DATA_KEY);
+ 			if (ret)
+ 				goto out;
+ 			dropped_extents = true;
+ 		}
++		truncate_offset = btrfs_file_extent_end(path);
+ 		if (ins_nr == 0)
+ 			start_slot = slot;
+ 		ins_nr++;
+diff --git a/fs/erofs/decompressor_deflate.c b/fs/erofs/decompressor_deflate.c
+index 81e65c453ef05..3a3461561a3c9 100644
+--- a/fs/erofs/decompressor_deflate.c
++++ b/fs/erofs/decompressor_deflate.c
+@@ -46,39 +46,15 @@ int __init z_erofs_deflate_init(void)
+ 	/* by default, use # of possible CPUs instead */
+ 	if (!z_erofs_deflate_nstrms)
+ 		z_erofs_deflate_nstrms = num_possible_cpus();
+-
+-	for (; z_erofs_deflate_avail_strms < z_erofs_deflate_nstrms;
+-	     ++z_erofs_deflate_avail_strms) {
+-		struct z_erofs_deflate *strm;
+-
+-		strm = kzalloc(sizeof(*strm), GFP_KERNEL);
+-		if (!strm)
+-			goto out_failed;
+-
+-		/* XXX: in-kernel zlib cannot shrink windowbits currently */
+-		strm->z.workspace = vmalloc(zlib_inflate_workspacesize());
+-		if (!strm->z.workspace) {
+-			kfree(strm);
+-			goto out_failed;
+-		}
+-
+-		spin_lock(&z_erofs_deflate_lock);
+-		strm->next = z_erofs_deflate_head;
+-		z_erofs_deflate_head = strm;
+-		spin_unlock(&z_erofs_deflate_lock);
+-	}
+ 	return 0;
+-
+-out_failed:
+-	erofs_err(NULL, "failed to allocate zlib workspace");
+-	z_erofs_deflate_exit();
+-	return -ENOMEM;
+ }
+ 
+ int z_erofs_load_deflate_config(struct super_block *sb,
+ 			struct erofs_super_block *dsb, void *data, int size)
+ {
+ 	struct z_erofs_deflate_cfgs *dfl = data;
++	static DEFINE_MUTEX(deflate_resize_mutex);
++	static bool inited;
+ 
+ 	if (!dfl || size < sizeof(struct z_erofs_deflate_cfgs)) {
+ 		erofs_err(sb, "invalid deflate cfgs, size=%u", size);
+@@ -89,9 +65,36 @@ int z_erofs_load_deflate_config(struct super_block *sb,
+ 		erofs_err(sb, "unsupported windowbits %u", dfl->windowbits);
+ 		return -EOPNOTSUPP;
+ 	}
++	mutex_lock(&deflate_resize_mutex);
++	if (!inited) {
++		for (; z_erofs_deflate_avail_strms < z_erofs_deflate_nstrms;
++		     ++z_erofs_deflate_avail_strms) {
++			struct z_erofs_deflate *strm;
++
++			strm = kzalloc(sizeof(*strm), GFP_KERNEL);
++			if (!strm)
++				goto failed;
++			/* XXX: in-kernel zlib cannot customize windowbits */
++			strm->z.workspace = vmalloc(zlib_inflate_workspacesize());
++			if (!strm->z.workspace) {
++				kfree(strm);
++				goto failed;
++			}
+ 
++			spin_lock(&z_erofs_deflate_lock);
++			strm->next = z_erofs_deflate_head;
++			z_erofs_deflate_head = strm;
++			spin_unlock(&z_erofs_deflate_lock);
++		}
++		inited = true;
++	}
++	mutex_unlock(&deflate_resize_mutex);
+ 	erofs_info(sb, "EXPERIMENTAL DEFLATE feature in use. Use at your own risk!");
+ 	return 0;
++failed:
++	mutex_unlock(&deflate_resize_mutex);
++	z_erofs_deflate_exit();
++	return -ENOMEM;
+ }
+ 
+ int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 6de6bf57699be..30e824866274e 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -2334,7 +2334,7 @@ static int mpage_journal_page_buffers(handle_t *handle,
+ 
+ 	if (folio_pos(folio) + len > size &&
+ 	    !ext4_verity_in_progress(inode))
+-		len = size - folio_pos(folio);
++		len = size & (len - 1);
+ 
+ 	return ext4_journal_folio_buffers(handle, folio, len);
+ }
+diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
+index 56938532b4ce2..7bfc5fb5a1285 100644
+--- a/fs/ext4/mballoc.h
++++ b/fs/ext4/mballoc.h
+@@ -193,8 +193,8 @@ struct ext4_allocation_context {
+ 	ext4_grpblk_t	ac_orig_goal_len;
+ 
+ 	__u32 ac_flags;		/* allocation hints */
++	__u32 ac_groups_linear_remaining;
+ 	__u16 ac_groups_scanned;
+-	__u16 ac_groups_linear_remaining;
+ 	__u16 ac_found;
+ 	__u16 ac_cX_found[EXT4_MB_NUM_CRS];
+ 	__u16 ac_tail;
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index b67a176bfcf9f..9fdd134220736 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -3113,8 +3113,10 @@ ext4_xattr_block_cache_find(struct inode *inode,
+ 
+ 		bh = ext4_sb_bread(inode->i_sb, ce->e_value, REQ_PRIO);
+ 		if (IS_ERR(bh)) {
+-			if (PTR_ERR(bh) == -ENOMEM)
++			if (PTR_ERR(bh) == -ENOMEM) {
++				mb_cache_entry_put(ea_block_cache, ce);
+ 				return NULL;
++			}
+ 			bh = NULL;
+ 			EXT4_ERROR_INODE(inode, "block %lu read error",
+ 					 (unsigned long)ce->e_value);
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index c26effdce9aa7..2af50bc34fcd7 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -361,6 +361,12 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
+ 		return false;
+ 	}
+ 
++	if (fi->i_xattr_nid && f2fs_check_nid_range(sbi, fi->i_xattr_nid)) {
++		f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_xattr_nid: %u, run fsck to fix.",
++			  __func__, inode->i_ino, fi->i_xattr_nid);
++		return false;
++	}
++
+ 	return true;
+ }
+ 
+diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
+index 4e8e41c8b3c0e..4ac6c8c403c26 100644
+--- a/fs/iomap/buffered-io.c
++++ b/fs/iomap/buffered-io.c
+@@ -909,11 +909,11 @@ static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
+ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
+ {
+ 	loff_t length = iomap_length(iter);
+-	size_t chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
+ 	loff_t pos = iter->pos;
+ 	ssize_t written = 0;
+ 	long status = 0;
+ 	struct address_space *mapping = iter->inode->i_mapping;
++	size_t chunk = mapping_max_folio_size(mapping);
+ 	unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
+ 
+ 	do {
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index 06253695fe53f..2ff0c35d80e0d 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -710,9 +710,9 @@ unsigned long nfs_block_bits(unsigned long bsize, unsigned char *nrbitsp)
+ 	if ((bsize & (bsize - 1)) || nrbitsp) {
+ 		unsigned char	nrbits;
+ 
+-		for (nrbits = 31; nrbits && !(bsize & (1 << nrbits)); nrbits--)
++		for (nrbits = 31; nrbits && !(bsize & (1UL << nrbits)); nrbits--)
+ 			;
+-		bsize = 1 << nrbits;
++		bsize = 1UL << nrbits;
+ 		if (nrbitsp)
+ 			*nrbitsp = nrbits;
+ 	}
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index ea390db94b622..c93c12063b3af 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5456,7 +5456,7 @@ static bool nfs4_read_plus_not_supported(struct rpc_task *task,
+ 	struct rpc_message *msg = &task->tk_msg;
+ 
+ 	if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS] &&
+-	    server->caps & NFS_CAP_READ_PLUS && task->tk_status == -ENOTSUPP) {
++	    task->tk_status == -ENOTSUPP) {
+ 		server->caps &= ~NFS_CAP_READ_PLUS;
+ 		msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
+ 		rpc_restart_call_prepare(task);
+diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
+index aee40db7a036f..35e6c55a0d231 100644
+--- a/fs/nilfs2/dir.c
++++ b/fs/nilfs2/dir.c
+@@ -608,7 +608,7 @@ int nilfs_empty_dir(struct inode *inode)
+ 
+ 		kaddr = nilfs_get_folio(inode, i, &folio);
+ 		if (IS_ERR(kaddr))
+-			continue;
++			return 0;
+ 
+ 		de = (struct nilfs_dir_entry *)kaddr;
+ 		kaddr += nilfs_last_byte(inode, i) - NILFS_DIR_REC_LEN(1);
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index 3c1e4e9eafa31..a1b011cb4b471 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -1652,6 +1652,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
+ 			if (bh->b_folio != bd_folio) {
+ 				if (bd_folio) {
+ 					folio_lock(bd_folio);
++					folio_wait_writeback(bd_folio);
+ 					folio_clear_dirty_for_io(bd_folio);
+ 					folio_start_writeback(bd_folio);
+ 					folio_unlock(bd_folio);
+@@ -1665,6 +1666,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
+ 			if (bh == segbuf->sb_super_root) {
+ 				if (bh->b_folio != bd_folio) {
+ 					folio_lock(bd_folio);
++					folio_wait_writeback(bd_folio);
+ 					folio_clear_dirty_for_io(bd_folio);
+ 					folio_start_writeback(bd_folio);
+ 					folio_unlock(bd_folio);
+@@ -1681,6 +1683,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
+ 	}
+ 	if (bd_folio) {
+ 		folio_lock(bd_folio);
++		folio_wait_writeback(bd_folio);
+ 		folio_clear_dirty_for_io(bd_folio);
+ 		folio_start_writeback(bd_folio);
+ 		folio_unlock(bd_folio);
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 18550c071d71c..72a1acd03675c 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -3214,7 +3214,7 @@ static int proc_pid_ksm_stat(struct seq_file *m, struct pid_namespace *ns,
+ 	mm = get_task_mm(task);
+ 	if (mm) {
+ 		seq_printf(m, "ksm_rmap_items %lu\n", mm->ksm_rmap_items);
+-		seq_printf(m, "ksm_zero_pages %lu\n", mm->ksm_zero_pages);
++		seq_printf(m, "ksm_zero_pages %ld\n", mm_ksm_zero_pages(mm));
+ 		seq_printf(m, "ksm_merging_pages %lu\n", mm->ksm_merging_pages);
+ 		seq_printf(m, "ksm_process_profit %ld\n", ksm_process_profit(mm));
+ 		mmput(mm);
+diff --git a/fs/proc/fd.c b/fs/proc/fd.c
+index 6e72e5ad42bc7..f4b1c8b42a511 100644
+--- a/fs/proc/fd.c
++++ b/fs/proc/fd.c
+@@ -74,7 +74,18 @@ static int seq_show(struct seq_file *m, void *v)
+ 	return 0;
+ }
+ 
+-static int proc_fdinfo_access_allowed(struct inode *inode)
++static int seq_fdinfo_open(struct inode *inode, struct file *file)
++{
++	return single_open(file, seq_show, inode);
++}
++
++/**
++ * Shared /proc/pid/fdinfo and /proc/pid/fdinfo/fd permission helper to ensure
++ * that the current task has PTRACE_MODE_READ in addition to the normal
++ * POSIX-like checks.
++ */
++static int proc_fdinfo_permission(struct mnt_idmap *idmap, struct inode *inode,
++				  int mask)
+ {
+ 	bool allowed = false;
+ 	struct task_struct *task = get_proc_task(inode);
+@@ -88,18 +99,13 @@ static int proc_fdinfo_access_allowed(struct inode *inode)
+ 	if (!allowed)
+ 		return -EACCES;
+ 
+-	return 0;
++	return generic_permission(idmap, inode, mask);
+ }
+ 
+-static int seq_fdinfo_open(struct inode *inode, struct file *file)
+-{
+-	int ret = proc_fdinfo_access_allowed(inode);
+-
+-	if (ret)
+-		return ret;
+-
+-	return single_open(file, seq_show, inode);
+-}
++static const struct inode_operations proc_fdinfo_file_inode_operations = {
++	.permission	= proc_fdinfo_permission,
++	.setattr	= proc_setattr,
++};
+ 
+ static const struct file_operations proc_fdinfo_file_operations = {
+ 	.open		= seq_fdinfo_open,
+@@ -388,6 +394,8 @@ static struct dentry *proc_fdinfo_instantiate(struct dentry *dentry,
+ 	ei = PROC_I(inode);
+ 	ei->fd = data->fd;
+ 
++	inode->i_op = &proc_fdinfo_file_inode_operations;
++
+ 	inode->i_fop = &proc_fdinfo_file_operations;
+ 	tid_fd_update_inode(task, inode, 0);
+ 
+@@ -407,23 +415,13 @@ static int proc_readfdinfo(struct file *file, struct dir_context *ctx)
+ 				  proc_fdinfo_instantiate);
+ }
+ 
+-static int proc_open_fdinfo(struct inode *inode, struct file *file)
+-{
+-	int ret = proc_fdinfo_access_allowed(inode);
+-
+-	if (ret)
+-		return ret;
+-
+-	return 0;
+-}
+-
+ const struct inode_operations proc_fdinfo_inode_operations = {
+ 	.lookup		= proc_lookupfdinfo,
++	.permission	= proc_fdinfo_permission,
+ 	.setattr	= proc_setattr,
+ };
+ 
+ const struct file_operations proc_fdinfo_operations = {
+-	.open		= proc_open_fdinfo,
+ 	.read		= generic_read_dir,
+ 	.iterate_shared	= proc_readfdinfo,
+ 	.llseek		= generic_file_llseek,
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 102f48668c352..a097c9a65234b 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -965,12 +965,17 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
+ 				break;
+ 
+ 			/* Case 1 and 2 above */
+-			if (vma->vm_start >= last_vma_end)
++			if (vma->vm_start >= last_vma_end) {
++				smap_gather_stats(vma, &mss, 0);
++				last_vma_end = vma->vm_end;
+ 				continue;
++			}
+ 
+ 			/* Case 4 above */
+-			if (vma->vm_end > last_vma_end)
++			if (vma->vm_end > last_vma_end) {
+ 				smap_gather_stats(vma, &mss, last_vma_end);
++				last_vma_end = vma->vm_end;
++			}
+ 		}
+ 	} for_each_vma(vmi, vma);
+ 
+diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
+index c46d418c1c0c3..a2072ab9e586d 100644
+--- a/fs/smb/client/cifspdu.h
++++ b/fs/smb/client/cifspdu.h
+@@ -2574,7 +2574,7 @@ typedef struct {
+ 
+ 
+ struct win_dev {
+-	unsigned char type[8]; /* IntxCHR or IntxBLK or LnxFIFO*/
++	unsigned char type[8]; /* IntxCHR or IntxBLK or LnxFIFO or LnxSOCK */
+ 	__le64 major;
+ 	__le64 minor;
+ } __attribute__((packed));
+diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
+index 60afab5c83d41..d0e69591332d8 100644
+--- a/fs/smb/client/inode.c
++++ b/fs/smb/client/inode.c
+@@ -591,6 +591,10 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
+ 				mnr = le64_to_cpu(*(__le64 *)(pbuf+16));
+ 				fattr->cf_rdev = MKDEV(mjr, mnr);
+ 			}
++		} else if (memcmp("LnxSOCK", pbuf, 8) == 0) {
++			cifs_dbg(FYI, "Socket\n");
++			fattr->cf_mode |= S_IFSOCK;
++			fattr->cf_dtype = DT_SOCK;
+ 		} else if (memcmp("IntxLNK", pbuf, 7) == 0) {
+ 			cifs_dbg(FYI, "Symlink\n");
+ 			fattr->cf_mode |= S_IFLNK;
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 6fea0aed43461..d35c45f3a25a0 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -4996,6 +4996,9 @@ static int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
+ 		pdev.major = cpu_to_le64(MAJOR(dev));
+ 		pdev.minor = cpu_to_le64(MINOR(dev));
+ 		break;
++	case S_IFSOCK:
++		strscpy(pdev.type, "LnxSOCK");
++		break;
+ 	case S_IFIFO:
+ 		strscpy(pdev.type, "LnxFIFO");
+ 		break;
+diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
+index 02135a6053051..1476c445cadcf 100644
+--- a/fs/smb/client/smb2transport.c
++++ b/fs/smb/client/smb2transport.c
+@@ -216,8 +216,8 @@ smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32  tid)
+ 	}
+ 	tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid);
+ 	if (!tcon) {
+-		cifs_put_smb_ses(ses);
+ 		spin_unlock(&cifs_tcp_ses_lock);
++		cifs_put_smb_ses(ses);
+ 		return NULL;
+ 	}
+ 	spin_unlock(&cifs_tcp_ses_lock);
+diff --git a/fs/tracefs/event_inode.c b/fs/tracefs/event_inode.c
+index a878cea70f4c9..55a40a730b10c 100644
+--- a/fs/tracefs/event_inode.c
++++ b/fs/tracefs/event_inode.c
+@@ -50,8 +50,12 @@ static struct eventfs_root_inode *get_root_inode(struct eventfs_inode *ei)
+ /* Just try to make something consistent and unique */
+ static int eventfs_dir_ino(struct eventfs_inode *ei)
+ {
+-	if (!ei->ino)
++	if (!ei->ino) {
+ 		ei->ino = get_next_ino();
++		/* Must not have the file inode number */
++		if (ei->ino == EVENTFS_FILE_INODE_INO)
++			ei->ino = get_next_ino();
++	}
+ 
+ 	return ei->ino;
+ }
+@@ -345,10 +349,9 @@ static struct eventfs_inode *eventfs_find_events(struct dentry *dentry)
+ 		 * If the ei is being freed, the ownership of the children
+ 		 * doesn't matter.
+ 		 */
+-		if (ei->is_freed) {
+-			ei = NULL;
+-			break;
+-		}
++		if (ei->is_freed)
++			return NULL;
++
+ 		// Walk upwards until you find the events inode
+ 	} while (!ei->is_events);
+ 
+diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
+index 417c840e64033..d8c60c2b76e23 100644
+--- a/fs/tracefs/inode.c
++++ b/fs/tracefs/inode.c
+@@ -439,10 +439,26 @@ static int tracefs_show_options(struct seq_file *m, struct dentry *root)
+ 	return 0;
+ }
+ 
++static int tracefs_drop_inode(struct inode *inode)
++{
++	struct tracefs_inode *ti = get_tracefs(inode);
++
++	/*
++	 * This inode is being freed and cannot be used for
++	 * eventfs. Clear the flag so that it doesn't call into
++	 * eventfs during the remount flag updates. The eventfs_inode
++	 * gets freed after an RCU cycle, so the content will still
++	 * be safe if the iteration is going on now.
++	 */
++	ti->flags &= ~TRACEFS_EVENT_INODE;
++
++	return 1;
++}
++
+ static const struct super_operations tracefs_super_operations = {
+ 	.alloc_inode    = tracefs_alloc_inode,
+ 	.free_inode     = tracefs_free_inode,
+-	.drop_inode     = generic_delete_inode,
++	.drop_inode     = tracefs_drop_inode,
+ 	.statfs		= simple_statfs,
+ 	.remount_fs	= tracefs_remount,
+ 	.show_options	= tracefs_show_options,
+@@ -469,22 +485,7 @@ static int tracefs_d_revalidate(struct dentry *dentry, unsigned int flags)
+ 	return !(ei && ei->is_freed);
+ }
+ 
+-static void tracefs_d_iput(struct dentry *dentry, struct inode *inode)
+-{
+-	struct tracefs_inode *ti = get_tracefs(inode);
+-
+-	/*
+-	 * This inode is being freed and cannot be used for
+-	 * eventfs. Clear the flag so that it doesn't call into
+-	 * eventfs during the remount flag updates. The eventfs_inode
+-	 * gets freed after an RCU cycle, so the content will still
+-	 * be safe if the iteration is going on now.
+-	 */
+-	ti->flags &= ~TRACEFS_EVENT_INODE;
+-}
+-
+ static const struct dentry_operations tracefs_dentry_operations = {
+-	.d_iput = tracefs_d_iput,
+ 	.d_revalidate = tracefs_d_revalidate,
+ 	.d_release = tracefs_d_release,
+ };
+diff --git a/fs/verity/init.c b/fs/verity/init.c
+index cb2c9aac61ed0..f440f0e61e3e6 100644
+--- a/fs/verity/init.c
++++ b/fs/verity/init.c
+@@ -10,8 +10,6 @@
+ #include <linux/ratelimit.h>
+ 
+ #ifdef CONFIG_SYSCTL
+-static struct ctl_table_header *fsverity_sysctl_header;
+-
+ static struct ctl_table fsverity_sysctl_table[] = {
+ #ifdef CONFIG_FS_VERITY_BUILTIN_SIGNATURES
+ 	{
+@@ -28,10 +26,7 @@ static struct ctl_table fsverity_sysctl_table[] = {
+ 
+ static void __init fsverity_init_sysctl(void)
+ {
+-	fsverity_sysctl_header = register_sysctl("fs/verity",
+-						 fsverity_sysctl_table);
+-	if (!fsverity_sysctl_header)
+-		panic("fsverity sysctl registration failed");
++	register_sysctl_init("fs/verity", fsverity_sysctl_table);
+ }
+ #else /* CONFIG_SYSCTL */
+ static inline void fsverity_init_sysctl(void)
+diff --git a/include/linux/ksm.h b/include/linux/ksm.h
+index 7e2b1de3996ac..f4692ec361e1b 100644
+--- a/include/linux/ksm.h
++++ b/include/linux/ksm.h
+@@ -33,16 +33,27 @@ void __ksm_exit(struct mm_struct *mm);
+  */
+ #define is_ksm_zero_pte(pte)	(is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte))
+ 
+-extern unsigned long ksm_zero_pages;
++extern atomic_long_t ksm_zero_pages;
++
++static inline void ksm_map_zero_page(struct mm_struct *mm)
++{
++	atomic_long_inc(&ksm_zero_pages);
++	atomic_long_inc(&mm->ksm_zero_pages);
++}
+ 
+ static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
+ {
+ 	if (is_ksm_zero_pte(pte)) {
+-		ksm_zero_pages--;
+-		mm->ksm_zero_pages--;
++		atomic_long_dec(&ksm_zero_pages);
++		atomic_long_dec(&mm->ksm_zero_pages);
+ 	}
+ }
+ 
++static inline long mm_ksm_zero_pages(struct mm_struct *mm)
++{
++	return atomic_long_read(&mm->ksm_zero_pages);
++}
++
+ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+ {
+ 	int ret;
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index 5240bd7bca338..962e7e05e4f2f 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -988,7 +988,7 @@ struct mm_struct {
+ 		 * Represent how many empty pages are merged with kernel zero
+ 		 * pages when enabling KSM use_zero_pages.
+ 		 */
+-		unsigned long ksm_zero_pages;
++		atomic_long_t ksm_zero_pages;
+ #endif /* CONFIG_KSM */
+ #ifdef CONFIG_LRU_GEN_WALKS_MMU
+ 		struct {
+diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
+index 5d3d15e97868a..66272fdce43d8 100644
+--- a/include/linux/mmc/slot-gpio.h
++++ b/include/linux/mmc/slot-gpio.h
+@@ -21,6 +21,7 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
+ 			 unsigned int debounce);
+ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
+ 			 unsigned int idx, unsigned int debounce);
++int mmc_gpiod_set_cd_config(struct mmc_host *host, unsigned long config);
+ void mmc_gpio_set_cd_isr(struct mmc_host *host,
+ 			 irqreturn_t (*isr)(int irq, void *dev_id));
+ int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on);
+diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
+index 2df35e65557d2..5283b6193e7dc 100644
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -344,6 +344,19 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
+ 	m->gfp_mask = mask;
+ }
+ 
++/*
++ * There are some parts of the kernel which assume that PMD entries
++ * are exactly HPAGE_PMD_ORDER.  Those should be fixed, but until then,
++ * limit the maximum allocation order to PMD size.  I'm not aware of any
++ * assumptions about maximum order if THP are disabled, but 8 seems like
++ * a good order (that's 1MB if you're using 4kB pages)
++ */
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++#define MAX_PAGECACHE_ORDER	HPAGE_PMD_ORDER
++#else
++#define MAX_PAGECACHE_ORDER	8
++#endif
++
+ /**
+  * mapping_set_large_folios() - Indicate the file supports large folios.
+  * @mapping: The file.
+@@ -370,6 +383,14 @@ static inline bool mapping_large_folio_support(struct address_space *mapping)
+ 		test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
+ }
+ 
++/* Return the maximum folio size for this pagecache mapping, in bytes. */
++static inline size_t mapping_max_folio_size(struct address_space *mapping)
++{
++	if (mapping_large_folio_support(mapping))
++		return PAGE_SIZE << MAX_PAGECACHE_ORDER;
++	return PAGE_SIZE;
++}
++
+ static inline int filemap_nr_thps(struct address_space *mapping)
+ {
+ #ifdef CONFIG_READ_ONLY_THP_FOR_FS
+@@ -528,19 +549,6 @@ static inline void *detach_page_private(struct page *page)
+ 	return folio_detach_private(page_folio(page));
+ }
+ 
+-/*
+- * There are some parts of the kernel which assume that PMD entries
+- * are exactly HPAGE_PMD_ORDER.  Those should be fixed, but until then,
+- * limit the maximum allocation order to PMD size.  I'm not aware of any
+- * assumptions about maximum order if THP are disabled, but 8 seems like
+- * a good order (that's 1MB if you're using 4kB pages)
+- */
+-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+-#define MAX_PAGECACHE_ORDER	HPAGE_PMD_ORDER
+-#else
+-#define MAX_PAGECACHE_ORDER	8
+-#endif
+-
+ #ifdef CONFIG_NUMA
+ struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order);
+ #else
+diff --git a/include/net/tcp_ao.h b/include/net/tcp_ao.h
+index 471e177362b4c..5d8e9ed2c0056 100644
+--- a/include/net/tcp_ao.h
++++ b/include/net/tcp_ao.h
+@@ -86,7 +86,8 @@ static inline int tcp_ao_sizeof_key(const struct tcp_ao_key *key)
+ struct tcp_ao_info {
+ 	/* List of tcp_ao_key's */
+ 	struct hlist_head	head;
+-	/* current_key and rnext_key aren't maintained on listen sockets.
++	/* current_key and rnext_key are maintained on sockets
++	 * in TCP_AO_ESTABLISHED states.
+ 	 * Their purpose is to cache keys on established connections,
+ 	 * saving needless lookups. Never dereference any of them from
+ 	 * listen sockets.
+@@ -201,9 +202,9 @@ struct tcp6_ao_context {
+ };
+ 
+ struct tcp_sigpool;
++/* Established states are fast-path and there always is current_key/rnext_key */
+ #define TCP_AO_ESTABLISHED (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | \
+-			    TCPF_CLOSE | TCPF_CLOSE_WAIT | \
+-			    TCPF_LAST_ACK | TCPF_CLOSING)
++			    TCPF_CLOSE_WAIT | TCPF_LAST_ACK | TCPF_CLOSING)
+ 
+ int tcp_ao_transmit_skb(struct sock *sk, struct sk_buff *skb,
+ 			struct tcp_ao_key *key, struct tcphdr *th,
+diff --git a/include/soc/qcom/cmd-db.h b/include/soc/qcom/cmd-db.h
+index c8bb56e6852a8..47a6cab75e630 100644
+--- a/include/soc/qcom/cmd-db.h
++++ b/include/soc/qcom/cmd-db.h
+@@ -1,5 +1,8 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. */
++/*
++ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
++ */
+ 
+ #ifndef __QCOM_COMMAND_DB_H__
+ #define __QCOM_COMMAND_DB_H__
+@@ -21,6 +24,8 @@ u32 cmd_db_read_addr(const char *resource_id);
+ 
+ const void *cmd_db_read_aux_data(const char *resource_id, size_t *len);
+ 
++bool cmd_db_match_resource_addr(u32 addr1, u32 addr2);
++
+ enum cmd_db_hw_type cmd_db_read_slave_id(const char *resource_id);
+ 
+ int cmd_db_ready(void);
+@@ -31,6 +36,9 @@ static inline u32 cmd_db_read_addr(const char *resource_id)
+ static inline const void *cmd_db_read_aux_data(const char *resource_id, size_t *len)
+ { return ERR_PTR(-ENODEV); }
+ 
++static inline bool cmd_db_match_resource_addr(u32 addr1, u32 addr2)
++{ return false; }
++
+ static inline enum cmd_db_hw_type cmd_db_read_slave_id(const char *resource_id)
+ { return -ENODEV; }
+ 
+diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
+index 03e52094630ac..bb6492b667511 100644
+--- a/io_uring/io_uring.h
++++ b/io_uring/io_uring.h
+@@ -442,7 +442,7 @@ static inline bool io_file_can_poll(struct io_kiocb *req)
+ {
+ 	if (req->flags & REQ_F_CAN_POLL)
+ 		return true;
+-	if (file_can_poll(req->file)) {
++	if (req->file && file_can_poll(req->file)) {
+ 		req->flags |= REQ_F_CAN_POLL;
+ 		return true;
+ 	}
+diff --git a/io_uring/napi.c b/io_uring/napi.c
+index 883a1a6659075..8c18ede595c41 100644
+--- a/io_uring/napi.c
++++ b/io_uring/napi.c
+@@ -261,12 +261,14 @@ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg)
+ }
+ 
+ /*
+- * __io_napi_adjust_timeout() - Add napi id to the busy poll list
++ * __io_napi_adjust_timeout() - adjust busy loop timeout
+  * @ctx: pointer to io-uring context structure
+  * @iowq: pointer to io wait queue
+  * @ts: pointer to timespec or NULL
+  *
+  * Adjust the busy loop timeout according to timespec and busy poll timeout.
++ * If the specified NAPI timeout is bigger than the wait timeout, then adjust
++ * the NAPI timeout accordingly.
+  */
+ void __io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iowq,
+ 			      struct timespec64 *ts)
+@@ -274,16 +276,16 @@ void __io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iow
+ 	unsigned int poll_to = READ_ONCE(ctx->napi_busy_poll_to);
+ 
+ 	if (ts) {
+-		struct timespec64 poll_to_ts = ns_to_timespec64(1000 * (s64)poll_to);
+-
+-		if (timespec64_compare(ts, &poll_to_ts) > 0) {
+-			*ts = timespec64_sub(*ts, poll_to_ts);
+-		} else {
+-			u64 to = timespec64_to_ns(ts);
+-
+-			do_div(to, 1000);
+-			ts->tv_sec = 0;
+-			ts->tv_nsec = 0;
++		struct timespec64 poll_to_ts;
++
++		poll_to_ts = ns_to_timespec64(1000 * (s64)poll_to);
++		if (timespec64_compare(ts, &poll_to_ts) < 0) {
++			s64 poll_to_ns = timespec64_to_ns(ts);
++			if (poll_to_ns > 0) {
++				u64 val = poll_to_ns + 999;
++				do_div(val, (s64) 1000);
++				poll_to = val;
++			}
+ 		}
+ 	}
+ 
+diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
+index 9443bc63c5a24..2aeaf9765b248 100644
+--- a/kernel/debug/kdb/kdb_io.c
++++ b/kernel/debug/kdb/kdb_io.c
+@@ -184,6 +184,33 @@ char kdb_getchar(void)
+ 	unreachable();
+ }
+ 
++/**
++ * kdb_position_cursor() - Place cursor in the correct horizontal position
++ * @prompt: Nil-terminated string containing the prompt string
++ * @buffer: Nil-terminated string containing the entire command line
++ * @cp: Cursor position, pointer the character in buffer where the cursor
++ *      should be positioned.
++ *
++ * The cursor is positioned by sending a carriage-return and then printing
++ * the content of the line until we reach the correct cursor position.
++ *
++ * There is some additional fine detail here.
++ *
++ * Firstly, even though kdb_printf() will correctly format zero-width fields
++ * we want the second call to kdb_printf() to be conditional. That keeps things
++ * a little cleaner when LOGGING=1.
++ *
++ * Secondly, we can't combine everything into one call to kdb_printf() since
++ * that renders into a fixed length buffer and the combined print could result
++ * in unwanted truncation.
++ */
++static void kdb_position_cursor(char *prompt, char *buffer, char *cp)
++{
++	kdb_printf("\r%s", kdb_prompt_str);
++	if (cp > buffer)
++		kdb_printf("%.*s", (int)(cp - buffer), buffer);
++}
++
+ /*
+  * kdb_read
+  *
+@@ -212,7 +239,6 @@ static char *kdb_read(char *buffer, size_t bufsize)
+ 						 * and null byte */
+ 	char *lastchar;
+ 	char *p_tmp;
+-	char tmp;
+ 	static char tmpbuffer[CMD_BUFLEN];
+ 	int len = strlen(buffer);
+ 	int len_tmp;
+@@ -249,12 +275,8 @@ static char *kdb_read(char *buffer, size_t bufsize)
+ 			}
+ 			*(--lastchar) = '\0';
+ 			--cp;
+-			kdb_printf("\b%s \r", cp);
+-			tmp = *cp;
+-			*cp = '\0';
+-			kdb_printf(kdb_prompt_str);
+-			kdb_printf("%s", buffer);
+-			*cp = tmp;
++			kdb_printf("\b%s ", cp);
++			kdb_position_cursor(kdb_prompt_str, buffer, cp);
+ 		}
+ 		break;
+ 	case 10: /* linefeed */
+@@ -272,19 +294,14 @@ static char *kdb_read(char *buffer, size_t bufsize)
+ 			memcpy(tmpbuffer, cp+1, lastchar - cp - 1);
+ 			memcpy(cp, tmpbuffer, lastchar - cp - 1);
+ 			*(--lastchar) = '\0';
+-			kdb_printf("%s \r", cp);
+-			tmp = *cp;
+-			*cp = '\0';
+-			kdb_printf(kdb_prompt_str);
+-			kdb_printf("%s", buffer);
+-			*cp = tmp;
++			kdb_printf("%s ", cp);
++			kdb_position_cursor(kdb_prompt_str, buffer, cp);
+ 		}
+ 		break;
+ 	case 1: /* Home */
+ 		if (cp > buffer) {
+-			kdb_printf("\r");
+-			kdb_printf(kdb_prompt_str);
+ 			cp = buffer;
++			kdb_position_cursor(kdb_prompt_str, buffer, cp);
+ 		}
+ 		break;
+ 	case 5: /* End */
+@@ -300,11 +317,10 @@ static char *kdb_read(char *buffer, size_t bufsize)
+ 		}
+ 		break;
+ 	case 14: /* Down */
+-		memset(tmpbuffer, ' ',
+-		       strlen(kdb_prompt_str) + (lastchar-buffer));
+-		*(tmpbuffer+strlen(kdb_prompt_str) +
+-		  (lastchar-buffer)) = '\0';
+-		kdb_printf("\r%s\r", tmpbuffer);
++	case 16: /* Up */
++		kdb_printf("\r%*c\r",
++			   (int)(strlen(kdb_prompt_str) + (lastchar - buffer)),
++			   ' ');
+ 		*lastchar = (char)key;
+ 		*(lastchar+1) = '\0';
+ 		return lastchar;
+@@ -314,15 +330,6 @@ static char *kdb_read(char *buffer, size_t bufsize)
+ 			++cp;
+ 		}
+ 		break;
+-	case 16: /* Up */
+-		memset(tmpbuffer, ' ',
+-		       strlen(kdb_prompt_str) + (lastchar-buffer));
+-		*(tmpbuffer+strlen(kdb_prompt_str) +
+-		  (lastchar-buffer)) = '\0';
+-		kdb_printf("\r%s\r", tmpbuffer);
+-		*lastchar = (char)key;
+-		*(lastchar+1) = '\0';
+-		return lastchar;
+ 	case 9: /* Tab */
+ 		if (tab < 2)
+ 			++tab;
+@@ -366,15 +373,25 @@ static char *kdb_read(char *buffer, size_t bufsize)
+ 			kdb_printf("\n");
+ 			kdb_printf(kdb_prompt_str);
+ 			kdb_printf("%s", buffer);
++			if (cp != lastchar)
++				kdb_position_cursor(kdb_prompt_str, buffer, cp);
+ 		} else if (tab != 2 && count > 0) {
+-			len_tmp = strlen(p_tmp);
+-			strncpy(p_tmp+len_tmp, cp, lastchar-cp+1);
+-			len_tmp = strlen(p_tmp);
+-			strncpy(cp, p_tmp+len, len_tmp-len + 1);
+-			len = len_tmp - len;
+-			kdb_printf("%s", cp);
+-			cp += len;
+-			lastchar += len;
++			/* How many new characters do we want from tmpbuffer? */
++			len_tmp = strlen(p_tmp) - len;
++			if (lastchar + len_tmp >= bufend)
++				len_tmp = bufend - lastchar;
++
++			if (len_tmp) {
++				/* + 1 ensures the '\0' is memmove'd */
++				memmove(cp+len_tmp, cp, (lastchar-cp) + 1);
++				memcpy(cp, p_tmp+len, len_tmp);
++				kdb_printf("%s", cp);
++				cp += len_tmp;
++				lastchar += len_tmp;
++				if (cp != lastchar)
++					kdb_position_cursor(kdb_prompt_str,
++							    buffer, cp);
++			}
+ 		}
+ 		kdb_nextline = 1; /* reset output line number */
+ 		break;
+@@ -385,13 +402,9 @@ static char *kdb_read(char *buffer, size_t bufsize)
+ 				memcpy(cp+1, tmpbuffer, lastchar - cp);
+ 				*++lastchar = '\0';
+ 				*cp = key;
+-				kdb_printf("%s\r", cp);
++				kdb_printf("%s", cp);
+ 				++cp;
+-				tmp = *cp;
+-				*cp = '\0';
+-				kdb_printf(kdb_prompt_str);
+-				kdb_printf("%s", buffer);
+-				*cp = tmp;
++				kdb_position_cursor(kdb_prompt_str, buffer, cp);
+ 			} else {
+ 				*++lastchar = '\0';
+ 				*cp++ = key;
+diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
+index 4c6b32318ce35..7bf9f66ca63a2 100644
+--- a/kernel/irq/irqdesc.c
++++ b/kernel/irq/irqdesc.c
+@@ -160,7 +160,10 @@ static int irq_find_free_area(unsigned int from, unsigned int cnt)
+ static unsigned int irq_find_at_or_after(unsigned int offset)
+ {
+ 	unsigned long index = offset;
+-	struct irq_desc *desc = mt_find(&sparse_irqs, &index, nr_irqs);
++	struct irq_desc *desc;
++
++	guard(rcu)();
++	desc = mt_find(&sparse_irqs, &index, nr_irqs);
+ 
+ 	return desc ? irq_desc_get_irq(desc) : nr_irqs;
+ }
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index 9dc605f08a231..5d8f918c9825f 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -3260,7 +3260,7 @@ static int uprobe_prog_run(struct bpf_uprobe *uprobe,
+ 	struct bpf_run_ctx *old_run_ctx;
+ 	int err = 0;
+ 
+-	if (link->task && current != link->task)
++	if (link->task && current->mm != link->task->mm)
+ 		return 0;
+ 
+ 	if (sleepable)
+@@ -3361,8 +3361,9 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
+ 	upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path);
+ 	uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets);
+ 	cnt = attr->link_create.uprobe_multi.cnt;
++	pid = attr->link_create.uprobe_multi.pid;
+ 
+-	if (!upath || !uoffsets || !cnt)
++	if (!upath || !uoffsets || !cnt || pid < 0)
+ 		return -EINVAL;
+ 	if (cnt > MAX_UPROBE_MULTI_CNT)
+ 		return -E2BIG;
+@@ -3386,10 +3387,9 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
+ 		goto error_path_put;
+ 	}
+ 
+-	pid = attr->link_create.uprobe_multi.pid;
+ 	if (pid) {
+ 		rcu_read_lock();
+-		task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
++		task = get_pid_task(find_vpid(pid), PIDTYPE_TGID);
+ 		rcu_read_unlock();
+ 		if (!task) {
+ 			err = -ESRCH;
+diff --git a/mm/cma.c b/mm/cma.c
+index 01f5a8f71ddfa..3e9724716bad8 100644
+--- a/mm/cma.c
++++ b/mm/cma.c
+@@ -182,10 +182,6 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
+ 	if (!size || !memblock_is_region_reserved(base, size))
+ 		return -EINVAL;
+ 
+-	/* alignment should be aligned with order_per_bit */
+-	if (!IS_ALIGNED(CMA_MIN_ALIGNMENT_PAGES, 1 << order_per_bit))
+-		return -EINVAL;
+-
+ 	/* ensure minimal alignment required by mm core */
+ 	if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES))
+ 		return -EINVAL;
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 89f58c7603b25..dd1fc105f70bd 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2493,32 +2493,11 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
+ 		return __split_huge_zero_page_pmd(vma, haddr, pmd);
+ 	}
+ 
+-	/*
+-	 * Up to this point the pmd is present and huge and userland has the
+-	 * whole access to the hugepage during the split (which happens in
+-	 * place). If we overwrite the pmd with the not-huge version pointing
+-	 * to the pte here (which of course we could if all CPUs were bug
+-	 * free), userland could trigger a small page size TLB miss on the
+-	 * small sized TLB while the hugepage TLB entry is still established in
+-	 * the huge TLB. Some CPU doesn't like that.
+-	 * See http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
+-	 * 383 on page 105. Intel should be safe but is also warns that it's
+-	 * only safe if the permission and cache attributes of the two entries
+-	 * loaded in the two TLB is identical (which should be the case here).
+-	 * But it is generally safer to never allow small and huge TLB entries
+-	 * for the same virtual address to be loaded simultaneously. So instead
+-	 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
+-	 * current pmd notpresent (atomically because here the pmd_trans_huge
+-	 * must remain set at all times on the pmd until the split is complete
+-	 * for this pmd), then we flush the SMP TLB and finally we write the
+-	 * non-huge version of the pmd entry with pmd_populate.
+-	 */
+-	old_pmd = pmdp_invalidate(vma, haddr, pmd);
+-
+-	pmd_migration = is_pmd_migration_entry(old_pmd);
++	pmd_migration = is_pmd_migration_entry(*pmd);
+ 	if (unlikely(pmd_migration)) {
+ 		swp_entry_t entry;
+ 
++		old_pmd = *pmd;
+ 		entry = pmd_to_swp_entry(old_pmd);
+ 		page = pfn_swap_entry_to_page(entry);
+ 		write = is_writable_migration_entry(entry);
+@@ -2529,6 +2508,30 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
+ 		soft_dirty = pmd_swp_soft_dirty(old_pmd);
+ 		uffd_wp = pmd_swp_uffd_wp(old_pmd);
+ 	} else {
++		/*
++		 * Up to this point the pmd is present and huge and userland has
++		 * the whole access to the hugepage during the split (which
++		 * happens in place). If we overwrite the pmd with the not-huge
++		 * version pointing to the pte here (which of course we could if
++		 * all CPUs were bug free), userland could trigger a small page
++		 * size TLB miss on the small sized TLB while the hugepage TLB
++		 * entry is still established in the huge TLB. Some CPU doesn't
++		 * like that. See
++		 * http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
++		 * 383 on page 105. Intel should be safe but is also warns that
++		 * it's only safe if the permission and cache attributes of the
++		 * two entries loaded in the two TLB is identical (which should
++		 * be the case here). But it is generally safer to never allow
++		 * small and huge TLB entries for the same virtual address to be
++		 * loaded simultaneously. So instead of doing "pmd_populate();
++		 * flush_pmd_tlb_range();" we first mark the current pmd
++		 * notpresent (atomically because here the pmd_trans_huge must
++		 * remain set at all times on the pmd until the split is
++		 * complete for this pmd), then we flush the SMP TLB and finally
++		 * we write the non-huge version of the pmd entry with
++		 * pmd_populate.
++		 */
++		old_pmd = pmdp_invalidate(vma, haddr, pmd);
+ 		page = pmd_page(old_pmd);
+ 		folio = page_folio(page);
+ 		if (pmd_dirty(old_pmd)) {
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index ce7be5c244429..c445e6fd85799 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -5774,8 +5774,20 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ 		 * do_exit() will not see it, and will keep the reservation
+ 		 * forever.
+ 		 */
+-		if (adjust_reservation && vma_needs_reservation(h, vma, address))
+-			vma_add_reservation(h, vma, address);
++		if (adjust_reservation) {
++			int rc = vma_needs_reservation(h, vma, address);
++
++			if (rc < 0)
++				/* Pressumably allocate_file_region_entries failed
++				 * to allocate a file_region struct. Clear
++				 * hugetlb_restore_reserve so that global reserve
++				 * count will not be incremented by free_huge_folio.
++				 * Act as if we consumed the reservation.
++				 */
++				folio_clear_hugetlb_restore_reserve(page_folio(page));
++			else if (rc)
++				vma_add_reservation(h, vma, address);
++		}
+ 
+ 		tlb_remove_page_size(tlb, page, huge_page_size(h));
+ 		/*
+@@ -7867,9 +7879,9 @@ void __init hugetlb_cma_reserve(int order)
+ 		 * huge page demotion.
+ 		 */
+ 		res = cma_declare_contiguous_nid(0, size, 0,
+-						PAGE_SIZE << HUGETLB_PAGE_ORDER,
+-						 0, false, name,
+-						 &hugetlb_cma[nid], nid);
++					PAGE_SIZE << HUGETLB_PAGE_ORDER,
++					HUGETLB_PAGE_ORDER, false, name,
++					&hugetlb_cma[nid], nid);
+ 		if (res) {
+ 			pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
+ 				res, nid);
+diff --git a/mm/kmsan/core.c b/mm/kmsan/core.c
+index cf2d70e9c9a5f..95f859e38c533 100644
+--- a/mm/kmsan/core.c
++++ b/mm/kmsan/core.c
+@@ -196,8 +196,7 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
+ 				      u32 origin, bool checked)
+ {
+ 	u64 address = (u64)addr;
+-	void *shadow_start;
+-	u32 *origin_start;
++	u32 *shadow_start, *origin_start;
+ 	size_t pad = 0;
+ 
+ 	KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(addr, size));
+@@ -225,8 +224,16 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
+ 	origin_start =
+ 		(u32 *)kmsan_get_metadata((void *)address, KMSAN_META_ORIGIN);
+ 
+-	for (int i = 0; i < size / KMSAN_ORIGIN_SIZE; i++)
+-		origin_start[i] = origin;
++	/*
++	 * If the new origin is non-zero, assume that the shadow byte is also non-zero,
++	 * and unconditionally overwrite the old origin slot.
++	 * If the new origin is zero, overwrite the old origin slot iff the
++	 * corresponding shadow slot is zero.
++	 */
++	for (int i = 0; i < size / KMSAN_ORIGIN_SIZE; i++) {
++		if (origin || !shadow_start[i])
++			origin_start[i] = origin;
++	}
+ }
+ 
+ struct page *kmsan_vmalloc_to_page_or_null(void *vaddr)
+diff --git a/mm/ksm.c b/mm/ksm.c
+index 8c001819cf10f..ddd482c71886f 100644
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -296,7 +296,7 @@ static bool ksm_use_zero_pages __read_mostly;
+ static bool ksm_smart_scan = true;
+ 
+ /* The number of zero pages which is placed by KSM */
+-unsigned long ksm_zero_pages;
++atomic_long_t ksm_zero_pages = ATOMIC_LONG_INIT(0);
+ 
+ /* The number of pages that have been skipped due to "smart scanning" */
+ static unsigned long ksm_pages_skipped;
+@@ -1428,8 +1428,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
+ 		 * the dirty bit in zero page's PTE is set.
+ 		 */
+ 		newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot)));
+-		ksm_zero_pages++;
+-		mm->ksm_zero_pages++;
++		ksm_map_zero_page(mm);
+ 		/*
+ 		 * We're replacing an anonymous page with a zero page, which is
+ 		 * not anonymous. We need to do proper accounting otherwise we
+@@ -2747,18 +2746,16 @@ static void ksm_do_scan(unsigned int scan_npages)
+ {
+ 	struct ksm_rmap_item *rmap_item;
+ 	struct page *page;
+-	unsigned int npages = scan_npages;
+ 
+-	while (npages-- && likely(!freezing(current))) {
++	while (scan_npages-- && likely(!freezing(current))) {
+ 		cond_resched();
+ 		rmap_item = scan_get_next_rmap_item(&page);
+ 		if (!rmap_item)
+ 			return;
+ 		cmp_and_merge_page(page, rmap_item);
+ 		put_page(page);
++		ksm_pages_scanned++;
+ 	}
+-
+-	ksm_pages_scanned += scan_npages - npages;
+ }
+ 
+ static int ksmd_should_run(void)
+@@ -3370,7 +3367,7 @@ static void wait_while_offlining(void)
+ #ifdef CONFIG_PROC_FS
+ long ksm_process_profit(struct mm_struct *mm)
+ {
+-	return (long)(mm->ksm_merging_pages + mm->ksm_zero_pages) * PAGE_SIZE -
++	return (long)(mm->ksm_merging_pages + mm_ksm_zero_pages(mm)) * PAGE_SIZE -
+ 		mm->ksm_rmap_items * sizeof(struct ksm_rmap_item);
+ }
+ #endif /* CONFIG_PROC_FS */
+@@ -3659,7 +3656,7 @@ KSM_ATTR_RO(pages_skipped);
+ static ssize_t ksm_zero_pages_show(struct kobject *kobj,
+ 				struct kobj_attribute *attr, char *buf)
+ {
+-	return sysfs_emit(buf, "%ld\n", ksm_zero_pages);
++	return sysfs_emit(buf, "%ld\n", atomic_long_read(&ksm_zero_pages));
+ }
+ KSM_ATTR_RO(ksm_zero_pages);
+ 
+@@ -3668,7 +3665,7 @@ static ssize_t general_profit_show(struct kobject *kobj,
+ {
+ 	long general_profit;
+ 
+-	general_profit = (ksm_pages_sharing + ksm_zero_pages) * PAGE_SIZE -
++	general_profit = (ksm_pages_sharing + atomic_long_read(&ksm_zero_pages)) * PAGE_SIZE -
+ 				ksm_rmap_items * sizeof(struct ksm_rmap_item);
+ 
+ 	return sysfs_emit(buf, "%ld\n", general_profit);
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 9e62a00b46dde..18da1c2b08c3d 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1218,7 +1218,7 @@ static int me_huge_page(struct page_state *ps, struct page *p)
+ 		 * subpages.
+ 		 */
+ 		folio_put(folio);
+-		if (__page_handle_poison(p) >= 0) {
++		if (__page_handle_poison(p) > 0) {
+ 			page_ref_inc(p);
+ 			res = MF_RECOVERED;
+ 		} else {
+@@ -2097,7 +2097,7 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb
+ 	 */
+ 	if (res == 0) {
+ 		folio_unlock(folio);
+-		if (__page_handle_poison(p) >= 0) {
++		if (__page_handle_poison(p) > 0) {
+ 			page_ref_inc(p);
+ 			res = MF_RECOVERED;
+ 		} else {
+diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
+index 4fcd959dcc4d0..a78a4adf711ac 100644
+--- a/mm/pgtable-generic.c
++++ b/mm/pgtable-generic.c
+@@ -198,6 +198,7 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
+ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ 		     pmd_t *pmdp)
+ {
++	VM_WARN_ON_ONCE(!pmd_present(*pmdp));
+ 	pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp));
+ 	flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+ 	return old;
+@@ -208,6 +209,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
+ 			 pmd_t *pmdp)
+ {
++	VM_WARN_ON_ONCE(!pmd_present(*pmdp));
+ 	return pmdp_invalidate(vma, address, pmdp);
+ }
+ #endif
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 125427cbdb87b..109272b8ee2e9 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -3492,7 +3492,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
+ {
+ 	unsigned int nr_allocated = 0;
+ 	gfp_t alloc_gfp = gfp;
+-	bool nofail = false;
++	bool nofail = gfp & __GFP_NOFAIL;
+ 	struct page *page;
+ 	int i;
+ 
+@@ -3549,12 +3549,11 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
+ 		 * and compaction etc.
+ 		 */
+ 		alloc_gfp &= ~__GFP_NOFAIL;
+-		nofail = true;
+ 	}
+ 
+ 	/* High-order pages or fallback path if "bulk" fails. */
+ 	while (nr_allocated < nr_pages) {
+-		if (fatal_signal_pending(current))
++		if (!nofail && fatal_signal_pending(current))
+ 			break;
+ 
+ 		if (nid == NUMA_NO_NODE)
+diff --git a/net/9p/client.c b/net/9p/client.c
+index f7e90b4769bba..b05f73c291b4b 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -235,6 +235,8 @@ static int p9_fcall_init(struct p9_client *c, struct p9_fcall *fc,
+ 	if (!fc->sdata)
+ 		return -ENOMEM;
+ 	fc->capacity = alloc_msize;
++	fc->id = 0;
++	fc->tag = P9_NOTAG;
+ 	return 0;
+ }
+ 
+diff --git a/net/ipv4/tcp_ao.c b/net/ipv4/tcp_ao.c
+index 781b67a525719..37c42b63ff993 100644
+--- a/net/ipv4/tcp_ao.c
++++ b/net/ipv4/tcp_ao.c
+@@ -933,6 +933,7 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
+ 	struct tcp_ao_key *key;
+ 	__be32 sisn, disn;
+ 	u8 *traffic_key;
++	int state;
+ 	u32 sne = 0;
+ 
+ 	info = rcu_dereference(tcp_sk(sk)->ao_info);
+@@ -948,8 +949,9 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
+ 		disn = 0;
+ 	}
+ 
++	state = READ_ONCE(sk->sk_state);
+ 	/* Fast-path */
+-	if (likely((1 << sk->sk_state) & TCP_AO_ESTABLISHED)) {
++	if (likely((1 << state) & TCP_AO_ESTABLISHED)) {
+ 		enum skb_drop_reason err;
+ 		struct tcp_ao_key *current_key;
+ 
+@@ -988,6 +990,9 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
+ 		return SKB_NOT_DROPPED_YET;
+ 	}
+ 
++	if (unlikely(state == TCP_CLOSE))
++		return SKB_DROP_REASON_TCP_CLOSE;
++
+ 	/* Lookup key based on peer address and keyid.
+ 	 * current_key and rnext_key must not be used on tcp listen
+ 	 * sockets as otherwise:
+@@ -1001,7 +1006,7 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
+ 	if (th->syn && !th->ack)
+ 		goto verify_hash;
+ 
+-	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV)) {
++	if ((1 << state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV)) {
+ 		/* Make the initial syn the likely case here */
+ 		if (unlikely(req)) {
+ 			sne = tcp_ao_compute_sne(0, tcp_rsk(req)->rcv_isn,
+@@ -1018,14 +1023,14 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
+ 			/* no way to figure out initial sisn/disn - drop */
+ 			return SKB_DROP_REASON_TCP_FLAGS;
+ 		}
+-	} else if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
++	} else if ((1 << state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
+ 		disn = info->lisn;
+ 		if (th->syn || th->rst)
+ 			sisn = th->seq;
+ 		else
+ 			sisn = info->risn;
+ 	} else {
+-		WARN_ONCE(1, "TCP-AO: Unexpected sk_state %d", sk->sk_state);
++		WARN_ONCE(1, "TCP-AO: Unexpected sk_state %d", state);
+ 		return SKB_DROP_REASON_TCP_AOFAILURE;
+ 	}
+ verify_hash:
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 8d5257c3f0842..f090e7bcb784f 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -4446,7 +4446,7 @@ static void rtmsg_to_fib6_config(struct net *net,
+ 		.fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
+ 			 : RT6_TABLE_MAIN,
+ 		.fc_ifindex = rtmsg->rtmsg_ifindex,
+-		.fc_metric = rtmsg->rtmsg_metric ? : IP6_RT_PRIO_USER,
++		.fc_metric = rtmsg->rtmsg_metric,
+ 		.fc_expires = rtmsg->rtmsg_info,
+ 		.fc_dst_len = rtmsg->rtmsg_dst_len,
+ 		.fc_src_len = rtmsg->rtmsg_src_len,
+@@ -4476,6 +4476,9 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd, struct in6_rtmsg *rtmsg)
+ 	rtnl_lock();
+ 	switch (cmd) {
+ 	case SIOCADDRT:
++		/* Only do the default setting of fc_metric in route adding */
++		if (cfg.fc_metric == 0)
++			cfg.fc_metric = IP6_RT_PRIO_USER;
+ 		err = ip6_route_add(&cfg, GFP_KERNEL, NULL);
+ 		break;
+ 	case SIOCDELRT:
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index 727aa20be4bde..7d1c0986f9bb3 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -313,13 +313,10 @@ static bool xsk_is_bound(struct xdp_sock *xs)
+ 
+ static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
+ {
+-	struct net_device *dev = xdp->rxq->dev;
+-	u32 qid = xdp->rxq->queue_index;
+-
+ 	if (!xsk_is_bound(xs))
+ 		return -ENXIO;
+ 
+-	if (!dev->_rx[qid].pool || xs->umem != dev->_rx[qid].pool->umem)
++	if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
+ 		return -EINVAL;
+ 
+ 	if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) {
+diff --git a/sound/core/seq/seq_ump_convert.c b/sound/core/seq/seq_ump_convert.c
+index 9bfba69b2a709..171fb75267afa 100644
+--- a/sound/core/seq/seq_ump_convert.c
++++ b/sound/core/seq/seq_ump_convert.c
+@@ -740,6 +740,7 @@ static int system_1p_ev_to_ump_midi1(const struct snd_seq_event *event,
+ 				     union snd_ump_midi1_msg *data,
+ 				     unsigned char status)
+ {
++	data->system.type = UMP_MSG_TYPE_SYSTEM; // override
+ 	data->system.status = status;
+ 	data->system.parm1 = event->data.control.value & 0x7f;
+ 	return 1;
+@@ -751,6 +752,7 @@ static int system_2p_ev_to_ump_midi1(const struct snd_seq_event *event,
+ 				     union snd_ump_midi1_msg *data,
+ 				     unsigned char status)
+ {
++	data->system.type = UMP_MSG_TYPE_SYSTEM; // override
+ 	data->system.status = status;
+ 	data->system.parm1 = event->data.control.value & 0x7f;
+ 	data->system.parm2 = (event->data.control.value >> 7) & 0x7f;
+diff --git a/sound/core/ump.c b/sound/core/ump.c
+index fd6a68a542788..117c7ecc48563 100644
+--- a/sound/core/ump.c
++++ b/sound/core/ump.c
+@@ -685,10 +685,17 @@ static void seq_notify_protocol(struct snd_ump_endpoint *ump)
+  */
+ int snd_ump_switch_protocol(struct snd_ump_endpoint *ump, unsigned int protocol)
+ {
++	unsigned int type;
++
+ 	protocol &= ump->info.protocol_caps;
+ 	if (protocol == ump->info.protocol)
+ 		return 0;
+ 
++	type = protocol & SNDRV_UMP_EP_INFO_PROTO_MIDI_MASK;
++	if (type != SNDRV_UMP_EP_INFO_PROTO_MIDI1 &&
++	    type != SNDRV_UMP_EP_INFO_PROTO_MIDI2)
++		return 0;
++
+ 	ump->info.protocol = protocol;
+ 	ump_dbg(ump, "New protocol = %x (caps = %x)\n",
+ 		protocol, ump->info.protocol_caps);
+diff --git a/sound/core/ump_convert.c b/sound/core/ump_convert.c
+index de04799fdb69a..f67c44c83fde4 100644
+--- a/sound/core/ump_convert.c
++++ b/sound/core/ump_convert.c
+@@ -404,7 +404,6 @@ static int cvt_legacy_cmd_to_ump(struct ump_cvt_to_ump *cvt,
+ 			midi2->pg.bank_msb = cc->cc_bank_msb;
+ 			midi2->pg.bank_lsb = cc->cc_bank_lsb;
+ 			cc->bank_set = 0;
+-			cc->cc_bank_msb = cc->cc_bank_lsb = 0;
+ 		}
+ 		break;
+ 	case UMP_MSG_STATUS_CHANNEL_PRESSURE:
+diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c
+index 5cca058421260..35941027b78c1 100644
+--- a/sound/soc/sof/ipc4-topology.c
++++ b/sound/soc/sof/ipc4-topology.c
+@@ -217,6 +217,14 @@ sof_ipc4_get_input_pin_audio_fmt(struct snd_sof_widget *swidget, int pin_index)
+ 	}
+ 
+ 	process = swidget->private;
++
++	/*
++	 * For process modules without base config extension, base module config
++	 * format is used for all input pins
++	 */
++	if (process->init_config != SOF_IPC4_MODULE_INIT_CONFIG_TYPE_BASE_CFG_WITH_EXT)
++		return &process->base_config.audio_fmt;
++
+ 	base_cfg_ext = process->base_config_ext;
+ 
+ 	/*
+diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
+index 6aeae398ec289..45c755fb50eef 100644
+--- a/tools/perf/builtin-record.c
++++ b/tools/perf/builtin-record.c
+@@ -1956,8 +1956,7 @@ static void record__read_lost_samples(struct record *rec)
+ 
+ 				if (count.lost) {
+ 					if (!lost) {
+-						lost = zalloc(sizeof(*lost) +
+-							      session->machines.host.id_hdr_size);
++						lost = zalloc(PERF_SAMPLE_MAX_SIZE);
+ 						if (!lost) {
+ 							pr_debug("Memory allocation failed\n");
+ 							return;
+@@ -1973,8 +1972,7 @@ static void record__read_lost_samples(struct record *rec)
+ 		lost_count = perf_bpf_filter__lost_count(evsel);
+ 		if (lost_count) {
+ 			if (!lost) {
+-				lost = zalloc(sizeof(*lost) +
+-					      session->machines.host.id_hdr_size);
++				lost = zalloc(PERF_SAMPLE_MAX_SIZE);
+ 				if (!lost) {
+ 					pr_debug("Memory allocation failed\n");
+ 					return;
+diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
+index 8269cdee33ae9..38fda42fd70f9 100644
+--- a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
++++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
+@@ -397,7 +397,7 @@ static void test_attach_api_fails(void)
+ 	link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
+ 	if (!ASSERT_ERR(link_fd, "link_fd"))
+ 		goto cleanup;
+-	ASSERT_EQ(link_fd, -ESRCH, "pid_is_wrong");
++	ASSERT_EQ(link_fd, -EINVAL, "pid_is_wrong");
+ 
+ cleanup:
+ 	if (link_fd >= 0)
+diff --git a/tools/testing/selftests/mm/compaction_test.c b/tools/testing/selftests/mm/compaction_test.c
+index 533999b6c2844..326a893647bab 100644
+--- a/tools/testing/selftests/mm/compaction_test.c
++++ b/tools/testing/selftests/mm/compaction_test.c
+@@ -82,12 +82,13 @@ int prereq(void)
+ 	return -1;
+ }
+ 
+-int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
++int check_compaction(unsigned long mem_free, unsigned long hugepage_size)
+ {
++	unsigned long nr_hugepages_ul;
+ 	int fd, ret = -1;
+ 	int compaction_index = 0;
+-	char initial_nr_hugepages[10] = {0};
+-	char nr_hugepages[10] = {0};
++	char initial_nr_hugepages[20] = {0};
++	char nr_hugepages[20] = {0};
+ 
+ 	/* We want to test with 80% of available memory. Else, OOM killer comes
+ 	   in to play */
+@@ -107,6 +108,8 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
+ 		goto close_fd;
+ 	}
+ 
++	lseek(fd, 0, SEEK_SET);
++
+ 	/* Start with the initial condition of 0 huge pages*/
+ 	if (write(fd, "0", sizeof(char)) != sizeof(char)) {
+ 		ksft_print_msg("Failed to write 0 to /proc/sys/vm/nr_hugepages: %s\n",
+@@ -134,7 +137,12 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
+ 
+ 	/* We should have been able to request at least 1/3 rd of the memory in
+ 	   huge pages */
+-	compaction_index = mem_free/(atoi(nr_hugepages) * hugepage_size);
++	nr_hugepages_ul = strtoul(nr_hugepages, NULL, 10);
++	if (!nr_hugepages_ul) {
++		ksft_print_msg("ERROR: No memory is available as huge pages\n");
++		goto close_fd;
++	}
++	compaction_index = mem_free/(nr_hugepages_ul * hugepage_size);
+ 
+ 	lseek(fd, 0, SEEK_SET);
+ 
+@@ -145,11 +153,11 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
+ 		goto close_fd;
+ 	}
+ 
+-	ksft_print_msg("Number of huge pages allocated = %d\n",
+-		       atoi(nr_hugepages));
++	ksft_print_msg("Number of huge pages allocated = %lu\n",
++		       nr_hugepages_ul);
+ 
+ 	if (compaction_index > 3) {
+-		ksft_print_msg("ERROR: Less that 1/%d of memory is available\n"
++		ksft_print_msg("ERROR: Less than 1/%d of memory is available\n"
+ 			       "as huge pages\n", compaction_index);
+ 		goto close_fd;
+ 	}
+diff --git a/tools/testing/selftests/mm/gup_test.c b/tools/testing/selftests/mm/gup_test.c
+index 18a49c70d4c63..7821cf45c323b 100644
+--- a/tools/testing/selftests/mm/gup_test.c
++++ b/tools/testing/selftests/mm/gup_test.c
+@@ -1,3 +1,4 @@
++#define __SANE_USERSPACE_TYPES__ // Use ll64
+ #include <fcntl.h>
+ #include <errno.h>
+ #include <stdio.h>
+diff --git a/tools/testing/selftests/mm/uffd-common.h b/tools/testing/selftests/mm/uffd-common.h
+index cc5629c3d2aa1..a70ae10b5f620 100644
+--- a/tools/testing/selftests/mm/uffd-common.h
++++ b/tools/testing/selftests/mm/uffd-common.h
+@@ -8,6 +8,7 @@
+ #define __UFFD_COMMON_H__
+ 
+ #define _GNU_SOURCE
++#define __SANE_USERSPACE_TYPES__ // Use ll64
+ #include <stdio.h>
+ #include <errno.h>
+ #include <unistd.h>
+diff --git a/tools/testing/selftests/net/lib.sh b/tools/testing/selftests/net/lib.sh
+index cf7fe4d550dde..16372ca167ba8 100644
+--- a/tools/testing/selftests/net/lib.sh
++++ b/tools/testing/selftests/net/lib.sh
+@@ -10,7 +10,7 @@ BUSYWAIT_TIMEOUT=$((WAIT_TIMEOUT * 1000)) # ms
+ # Kselftest framework requirement - SKIP code is 4.
+ ksft_skip=4
+ # namespace list created by setup_ns
+-NS_LIST=""
++NS_LIST=()
+ 
+ ##############################################################################
+ # Helpers
+@@ -63,9 +63,7 @@ loopy_wait()
+ 	while true
+ 	do
+ 		local out
+-		out=$("$@")
+-		local ret=$?
+-		if ((!ret)); then
++		if out=$("$@"); then
+ 			echo -n "$out"
+ 			return 0
+ 		fi
+@@ -135,6 +133,7 @@ cleanup_ns()
+ 	fi
+ 
+ 	for ns in "$@"; do
++		[ -z "${ns}" ] && continue
+ 		ip netns delete "${ns}" &> /dev/null
+ 		if ! busywait $BUSYWAIT_TIMEOUT ip netns list \| grep -vq "^$ns$" &> /dev/null; then
+ 			echo "Warn: Failed to remove namespace $ns"
+@@ -148,7 +147,7 @@ cleanup_ns()
+ 
+ cleanup_all_ns()
+ {
+-	cleanup_ns $NS_LIST
++	cleanup_ns "${NS_LIST[@]}"
+ }
+ 
+ # setup netns with given names as prefix. e.g
+@@ -157,7 +156,7 @@ setup_ns()
+ {
+ 	local ns=""
+ 	local ns_name=""
+-	local ns_list=""
++	local ns_list=()
+ 	local ns_exist=
+ 	for ns_name in "$@"; do
+ 		# Some test may setup/remove same netns multi times
+@@ -173,13 +172,13 @@ setup_ns()
+ 
+ 		if ! ip netns add "$ns"; then
+ 			echo "Failed to create namespace $ns_name"
+-			cleanup_ns "$ns_list"
++			cleanup_ns "${ns_list[@]}"
+ 			return $ksft_skip
+ 		fi
+ 		ip -n "$ns" link set lo up
+-		! $ns_exist && ns_list="$ns_list $ns"
++		! $ns_exist && ns_list+=("$ns")
+ 	done
+-	NS_LIST="$NS_LIST $ns_list"
++	NS_LIST+=("${ns_list[@]}")
+ }
+ 
+ tc_rule_stats_get()
+diff --git a/tools/tracing/rtla/src/timerlat_hist.c b/tools/tracing/rtla/src/timerlat_hist.c
+index 8bd51aab6513a..5b869caed10d0 100644
+--- a/tools/tracing/rtla/src/timerlat_hist.c
++++ b/tools/tracing/rtla/src/timerlat_hist.c
+@@ -324,17 +324,29 @@ timerlat_print_summary(struct timerlat_hist_params *params,
+ 		if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
+ 			continue;
+ 
+-		if (!params->no_irq)
+-			trace_seq_printf(trace->seq, "%9llu ",
+-					data->hist[cpu].min_irq);
++		if (!params->no_irq) {
++			if (data->hist[cpu].irq_count)
++				trace_seq_printf(trace->seq, "%9llu ",
++						data->hist[cpu].min_irq);
++			else
++				trace_seq_printf(trace->seq, "        - ");
++		}
+ 
+-		if (!params->no_thread)
+-			trace_seq_printf(trace->seq, "%9llu ",
+-					data->hist[cpu].min_thread);
++		if (!params->no_thread) {
++			if (data->hist[cpu].thread_count)
++				trace_seq_printf(trace->seq, "%9llu ",
++						data->hist[cpu].min_thread);
++			else
++				trace_seq_printf(trace->seq, "        - ");
++		}
+ 
+-		if (params->user_hist)
+-			trace_seq_printf(trace->seq, "%9llu ",
+-					data->hist[cpu].min_user);
++		if (params->user_hist) {
++			if (data->hist[cpu].user_count)
++				trace_seq_printf(trace->seq, "%9llu ",
++						data->hist[cpu].min_user);
++			else
++				trace_seq_printf(trace->seq, "        - ");
++		}
+ 	}
+ 	trace_seq_printf(trace->seq, "\n");
+ 
+@@ -384,17 +396,29 @@ timerlat_print_summary(struct timerlat_hist_params *params,
+ 		if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
+ 			continue;
+ 
+-		if (!params->no_irq)
+-			trace_seq_printf(trace->seq, "%9llu ",
+-					data->hist[cpu].max_irq);
++		if (!params->no_irq) {
++			if (data->hist[cpu].irq_count)
++				trace_seq_printf(trace->seq, "%9llu ",
++						 data->hist[cpu].max_irq);
++			else
++				trace_seq_printf(trace->seq, "        - ");
++		}
+ 
+-		if (!params->no_thread)
+-			trace_seq_printf(trace->seq, "%9llu ",
+-					data->hist[cpu].max_thread);
++		if (!params->no_thread) {
++			if (data->hist[cpu].thread_count)
++				trace_seq_printf(trace->seq, "%9llu ",
++						data->hist[cpu].max_thread);
++			else
++				trace_seq_printf(trace->seq, "        - ");
++		}
+ 
+-		if (params->user_hist)
+-			trace_seq_printf(trace->seq, "%9llu ",
+-					data->hist[cpu].max_user);
++		if (params->user_hist) {
++			if (data->hist[cpu].user_count)
++				trace_seq_printf(trace->seq, "%9llu ",
++						data->hist[cpu].max_user);
++			else
++				trace_seq_printf(trace->seq, "        - ");
++		}
+ 	}
+ 	trace_seq_printf(trace->seq, "\n");
+ 	trace_seq_do_printf(trace->seq);


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [gentoo-commits] proj/linux-patches:6.9 commit in: /
@ 2024-06-21 14:06 Mike Pagano
  0 siblings, 0 replies; 18+ messages in thread
From: Mike Pagano @ 2024-06-21 14:06 UTC (permalink / raw
  To: gentoo-commits

commit:     87459de2c639cc4240eb8e0dee78964a850ff94a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jun 21 14:06:03 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jun 21 14:06:03 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=87459de2

Linux patch 6.9.6

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |     4 +
 1005_linux-6.9.6.patch | 12175 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 12179 insertions(+)

diff --git a/0000_README b/0000_README
index bb6274e1..9c96fb59 100644
--- a/0000_README
+++ b/0000_README
@@ -63,6 +63,10 @@ Patch:  1004_linux-6.9.5.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.9.5
 
+Patch:  1005_linux-6.9.6.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.9.6
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.

diff --git a/1005_linux-6.9.6.patch b/1005_linux-6.9.6.patch
new file mode 100644
index 00000000..629b9871
--- /dev/null
+++ b/1005_linux-6.9.6.patch
@@ -0,0 +1,12175 @@
+diff --git a/.editorconfig b/.editorconfig
+index 854773350cc5a..29a30ccfc07bf 100644
+--- a/.editorconfig
++++ b/.editorconfig
+@@ -5,7 +5,6 @@ root = true
+ [{*.{awk,c,dts,dtsi,dtso,h,mk,s,S},Kconfig,Makefile,Makefile.*}]
+ charset = utf-8
+ end_of_line = lf
+-trim_trailing_whitespace = true
+ insert_final_newline = true
+ indent_style = tab
+ indent_size = 8
+@@ -13,7 +12,6 @@ indent_size = 8
+ [*.{json,py,rs}]
+ charset = utf-8
+ end_of_line = lf
+-trim_trailing_whitespace = true
+ insert_final_newline = true
+ indent_style = space
+ indent_size = 4
+@@ -26,7 +24,6 @@ indent_size = 8
+ [*.yaml]
+ charset = utf-8
+ end_of_line = lf
+-trim_trailing_whitespace = unset
+ insert_final_newline = true
+ indent_style = space
+ indent_size = 2
+diff --git a/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml b/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml
+index 0874fc21f66fb..6577a61cc0753 100644
+--- a/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml
++++ b/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml
+@@ -65,6 +65,7 @@ patternProperties:
+     description: The hard wired USB devices
+     type: object
+     $ref: /schemas/usb/usb-device.yaml
++    additionalProperties: true
+ 
+ required:
+   - peer-hub
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 28e20975c26f5..3121709d99e3b 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -1066,7 +1066,6 @@ L:	linux-pm@vger.kernel.org
+ S:	Supported
+ F:	Documentation/admin-guide/pm/amd-pstate.rst
+ F:	drivers/cpufreq/amd-pstate*
+-F:	include/linux/amd-pstate.h
+ F:	tools/power/x86/amd_pstate_tracer/amd_pstate_trace.py
+ 
+ AMD PTDMA DRIVER
+diff --git a/Makefile b/Makefile
+index d5062a593ef7e..8da63744745be 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 9
+-SUBLEVEL = 5
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
+index ba4c05bc24d69..8394718870e1a 100644
+--- a/arch/parisc/include/asm/cacheflush.h
++++ b/arch/parisc/include/asm/cacheflush.h
+@@ -31,18 +31,17 @@ void flush_cache_all_local(void);
+ void flush_cache_all(void);
+ void flush_cache_mm(struct mm_struct *mm);
+ 
+-void flush_kernel_dcache_page_addr(const void *addr);
+-
+ #define flush_kernel_dcache_range(start,size) \
+ 	flush_kernel_dcache_range_asm((start), (start)+(size));
+ 
++/* The only way to flush a vmap range is to flush whole cache */
+ #define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
+ void flush_kernel_vmap_range(void *vaddr, int size);
+ void invalidate_kernel_vmap_range(void *vaddr, int size);
+ 
+-#define flush_cache_vmap(start, end)		flush_cache_all()
++void flush_cache_vmap(unsigned long start, unsigned long end);
+ #define flush_cache_vmap_early(start, end)	do { } while (0)
+-#define flush_cache_vunmap(start, end)		flush_cache_all()
++void flush_cache_vunmap(unsigned long start, unsigned long end);
+ 
+ void flush_dcache_folio(struct folio *folio);
+ #define flush_dcache_folio flush_dcache_folio
+@@ -77,17 +76,11 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
+ void flush_cache_range(struct vm_area_struct *vma,
+ 		unsigned long start, unsigned long end);
+ 
+-/* defined in pacache.S exported in cache.c used by flush_anon_page */
+-void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
+-
+ #define ARCH_HAS_FLUSH_ANON_PAGE
+ void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);
+ 
+ #define ARCH_HAS_FLUSH_ON_KUNMAP
+-static inline void kunmap_flush_on_unmap(const void *addr)
+-{
+-	flush_kernel_dcache_page_addr(addr);
+-}
++void kunmap_flush_on_unmap(const void *addr);
+ 
+ #endif /* _PARISC_CACHEFLUSH_H */
+ 
+diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
+index 974accac05cd3..babf65751e818 100644
+--- a/arch/parisc/include/asm/pgtable.h
++++ b/arch/parisc/include/asm/pgtable.h
+@@ -448,14 +448,17 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
+ 	return pte;
+ }
+ 
++static inline pte_t ptep_get(pte_t *ptep)
++{
++	return READ_ONCE(*ptep);
++}
++#define ptep_get ptep_get
++
+ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+ {
+ 	pte_t pte;
+ 
+-	if (!pte_young(*ptep))
+-		return 0;
+-
+-	pte = *ptep;
++	pte = ptep_get(ptep);
+ 	if (!pte_young(pte)) {
+ 		return 0;
+ 	}
+@@ -463,17 +466,10 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
+ 	return 1;
+ }
+ 
+-struct mm_struct;
+-static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+-{
+-	pte_t old_pte;
+-
+-	old_pte = *ptep;
+-	set_pte(ptep, __pte(0));
+-
+-	return old_pte;
+-}
++int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep);
++pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep);
+ 
++struct mm_struct;
+ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ {
+ 	set_pte(ptep, pte_wrprotect(*ptep));
+@@ -511,7 +507,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+ #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+ 
+ #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
++#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
++#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
+ #define __HAVE_ARCH_PTEP_SET_WRPROTECT
+ #define __HAVE_ARCH_PTE_SAME
+ 
+diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
+index 422f3e1e6d9ca..483bfafd930cd 100644
+--- a/arch/parisc/kernel/cache.c
++++ b/arch/parisc/kernel/cache.c
+@@ -20,6 +20,7 @@
+ #include <linux/sched.h>
+ #include <linux/sched/mm.h>
+ #include <linux/syscalls.h>
++#include <linux/vmalloc.h>
+ #include <asm/pdc.h>
+ #include <asm/cache.h>
+ #include <asm/cacheflush.h>
+@@ -31,20 +32,31 @@
+ #include <asm/mmu_context.h>
+ #include <asm/cachectl.h>
+ 
++#define PTR_PAGE_ALIGN_DOWN(addr) PTR_ALIGN_DOWN(addr, PAGE_SIZE)
++
++/*
++ * When nonzero, use _PAGE_ACCESSED bit to try to reduce the number
++ * of page flushes done flush_cache_page_if_present. There are some
++ * pros and cons in using this option. It may increase the risk of
++ * random segmentation faults.
++ */
++#define CONFIG_FLUSH_PAGE_ACCESSED	0
++
+ int split_tlb __ro_after_init;
+ int dcache_stride __ro_after_init;
+ int icache_stride __ro_after_init;
+ EXPORT_SYMBOL(dcache_stride);
+ 
++/* Internal implementation in arch/parisc/kernel/pacache.S */
+ void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
+ EXPORT_SYMBOL(flush_dcache_page_asm);
+ void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
+ void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
+-
+-/* Internal implementation in arch/parisc/kernel/pacache.S */
+ void flush_data_cache_local(void *);  /* flushes local data-cache only */
+ void flush_instruction_cache_local(void); /* flushes local code-cache only */
+ 
++static void flush_kernel_dcache_page_addr(const void *addr);
++
+ /* On some machines (i.e., ones with the Merced bus), there can be
+  * only a single PxTLB broadcast at a time; this must be guaranteed
+  * by software. We need a spinlock around all TLB flushes to ensure
+@@ -321,6 +333,18 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
+ {
+ 	if (!static_branch_likely(&parisc_has_cache))
+ 		return;
++
++	/*
++	 * The TLB is the engine of coherence on parisc.  The CPU is
++	 * entitled to speculate any page with a TLB mapping, so here
++	 * we kill the mapping then flush the page along a special flush
++	 * only alias mapping. This guarantees that the page is no-longer
++	 * in the cache for any process and nor may it be speculatively
++	 * read in (until the user or kernel specifically accesses it,
++	 * of course).
++	 */
++	flush_tlb_page(vma, vmaddr);
++
+ 	preempt_disable();
+ 	flush_dcache_page_asm(physaddr, vmaddr);
+ 	if (vma->vm_flags & VM_EXEC)
+@@ -328,46 +352,44 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
+ 	preempt_enable();
+ }
+ 
+-static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
++static void flush_kernel_dcache_page_addr(const void *addr)
+ {
+-	unsigned long flags, space, pgd, prot;
+-#ifdef CONFIG_TLB_PTLOCK
+-	unsigned long pgd_lock;
+-#endif
++	unsigned long vaddr = (unsigned long)addr;
++	unsigned long flags;
+ 
+-	vmaddr &= PAGE_MASK;
++	/* Purge TLB entry to remove translation on all CPUs */
++	purge_tlb_start(flags);
++	pdtlb(SR_KERNEL, addr);
++	purge_tlb_end(flags);
+ 
++	/* Use tmpalias flush to prevent data cache move-in */
+ 	preempt_disable();
++	flush_dcache_page_asm(__pa(vaddr), vaddr);
++	preempt_enable();
++}
+ 
+-	/* Set context for flush */
+-	local_irq_save(flags);
+-	prot = mfctl(8);
+-	space = mfsp(SR_USER);
+-	pgd = mfctl(25);
+-#ifdef CONFIG_TLB_PTLOCK
+-	pgd_lock = mfctl(28);
+-#endif
+-	switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
+-	local_irq_restore(flags);
+-
+-	flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
+-	if (vma->vm_flags & VM_EXEC)
+-		flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
+-	flush_tlb_page(vma, vmaddr);
++static void flush_kernel_icache_page_addr(const void *addr)
++{
++	unsigned long vaddr = (unsigned long)addr;
++	unsigned long flags;
+ 
+-	/* Restore previous context */
+-	local_irq_save(flags);
+-#ifdef CONFIG_TLB_PTLOCK
+-	mtctl(pgd_lock, 28);
+-#endif
+-	mtctl(pgd, 25);
+-	mtsp(space, SR_USER);
+-	mtctl(prot, 8);
+-	local_irq_restore(flags);
++	/* Purge TLB entry to remove translation on all CPUs */
++	purge_tlb_start(flags);
++	pdtlb(SR_KERNEL, addr);
++	purge_tlb_end(flags);
+ 
++	/* Use tmpalias flush to prevent instruction cache move-in */
++	preempt_disable();
++	flush_icache_page_asm(__pa(vaddr), vaddr);
+ 	preempt_enable();
+ }
+ 
++void kunmap_flush_on_unmap(const void *addr)
++{
++	flush_kernel_dcache_page_addr(addr);
++}
++EXPORT_SYMBOL(kunmap_flush_on_unmap);
++
+ void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
+ 		unsigned int nr)
+ {
+@@ -375,13 +397,16 @@ void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
+ 
+ 	for (;;) {
+ 		flush_kernel_dcache_page_addr(kaddr);
+-		flush_kernel_icache_page(kaddr);
++		flush_kernel_icache_page_addr(kaddr);
+ 		if (--nr == 0)
+ 			break;
+ 		kaddr += PAGE_SIZE;
+ 	}
+ }
+ 
++/*
++ * Walk page directory for MM to find PTEP pointer for address ADDR.
++ */
+ static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
+ {
+ 	pte_t *ptep = NULL;
+@@ -410,6 +435,41 @@ static inline bool pte_needs_flush(pte_t pte)
+ 		== (_PAGE_PRESENT | _PAGE_ACCESSED);
+ }
+ 
++/*
++ * Return user physical address. Returns 0 if page is not present.
++ */
++static inline unsigned long get_upa(struct mm_struct *mm, unsigned long addr)
++{
++	unsigned long flags, space, pgd, prot, pa;
++#ifdef CONFIG_TLB_PTLOCK
++	unsigned long pgd_lock;
++#endif
++
++	/* Save context */
++	local_irq_save(flags);
++	prot = mfctl(8);
++	space = mfsp(SR_USER);
++	pgd = mfctl(25);
++#ifdef CONFIG_TLB_PTLOCK
++	pgd_lock = mfctl(28);
++#endif
++
++	/* Set context for lpa_user */
++	switch_mm_irqs_off(NULL, mm, NULL);
++	pa = lpa_user(addr);
++
++	/* Restore previous context */
++#ifdef CONFIG_TLB_PTLOCK
++	mtctl(pgd_lock, 28);
++#endif
++	mtctl(pgd, 25);
++	mtsp(space, SR_USER);
++	mtctl(prot, 8);
++	local_irq_restore(flags);
++
++	return pa;
++}
++
+ void flush_dcache_folio(struct folio *folio)
+ {
+ 	struct address_space *mapping = folio_flush_mapping(folio);
+@@ -458,50 +518,23 @@ void flush_dcache_folio(struct folio *folio)
+ 		if (addr + nr * PAGE_SIZE > vma->vm_end)
+ 			nr = (vma->vm_end - addr) / PAGE_SIZE;
+ 
+-		if (parisc_requires_coherency()) {
+-			for (i = 0; i < nr; i++) {
+-				pte_t *ptep = get_ptep(vma->vm_mm,
+-							addr + i * PAGE_SIZE);
+-				if (!ptep)
+-					continue;
+-				if (pte_needs_flush(*ptep))
+-					flush_user_cache_page(vma,
+-							addr + i * PAGE_SIZE);
+-				/* Optimise accesses to the same table? */
+-				pte_unmap(ptep);
+-			}
+-		} else {
++		if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
++					!= (addr & (SHM_COLOUR - 1))) {
++			for (i = 0; i < nr; i++)
++				__flush_cache_page(vma,
++					addr + i * PAGE_SIZE,
++					(pfn + i) * PAGE_SIZE);
+ 			/*
+-			 * The TLB is the engine of coherence on parisc:
+-			 * The CPU is entitled to speculate any page
+-			 * with a TLB mapping, so here we kill the
+-			 * mapping then flush the page along a special
+-			 * flush only alias mapping. This guarantees that
+-			 * the page is no-longer in the cache for any
+-			 * process and nor may it be speculatively read
+-			 * in (until the user or kernel specifically
+-			 * accesses it, of course)
++			 * Software is allowed to have any number
++			 * of private mappings to a page.
+ 			 */
+-			for (i = 0; i < nr; i++)
+-				flush_tlb_page(vma, addr + i * PAGE_SIZE);
+-			if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
+-					!= (addr & (SHM_COLOUR - 1))) {
+-				for (i = 0; i < nr; i++)
+-					__flush_cache_page(vma,
+-						addr + i * PAGE_SIZE,
+-						(pfn + i) * PAGE_SIZE);
+-				/*
+-				 * Software is allowed to have any number
+-				 * of private mappings to a page.
+-				 */
+-				if (!(vma->vm_flags & VM_SHARED))
+-					continue;
+-				if (old_addr)
+-					pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
+-						old_addr, addr, vma->vm_file);
+-				if (nr == folio_nr_pages(folio))
+-					old_addr = addr;
+-			}
++			if (!(vma->vm_flags & VM_SHARED))
++				continue;
++			if (old_addr)
++				pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
++					old_addr, addr, vma->vm_file);
++			if (nr == folio_nr_pages(folio))
++				old_addr = addr;
+ 		}
+ 		WARN_ON(++count == 4096);
+ 	}
+@@ -591,35 +624,28 @@ extern void purge_kernel_dcache_page_asm(unsigned long);
+ extern void clear_user_page_asm(void *, unsigned long);
+ extern void copy_user_page_asm(void *, void *, unsigned long);
+ 
+-void flush_kernel_dcache_page_addr(const void *addr)
+-{
+-	unsigned long flags;
+-
+-	flush_kernel_dcache_page_asm(addr);
+-	purge_tlb_start(flags);
+-	pdtlb(SR_KERNEL, addr);
+-	purge_tlb_end(flags);
+-}
+-EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
+-
+ static void flush_cache_page_if_present(struct vm_area_struct *vma,
+-	unsigned long vmaddr, unsigned long pfn)
++	unsigned long vmaddr)
+ {
++#if CONFIG_FLUSH_PAGE_ACCESSED
+ 	bool needs_flush = false;
+-	pte_t *ptep;
++	pte_t *ptep, pte;
+ 
+-	/*
+-	 * The pte check is racy and sometimes the flush will trigger
+-	 * a non-access TLB miss. Hopefully, the page has already been
+-	 * flushed.
+-	 */
+ 	ptep = get_ptep(vma->vm_mm, vmaddr);
+ 	if (ptep) {
+-		needs_flush = pte_needs_flush(*ptep);
++		pte = ptep_get(ptep);
++		needs_flush = pte_needs_flush(pte);
+ 		pte_unmap(ptep);
+ 	}
+ 	if (needs_flush)
+-		flush_cache_page(vma, vmaddr, pfn);
++		__flush_cache_page(vma, vmaddr, PFN_PHYS(pte_pfn(pte)));
++#else
++	struct mm_struct *mm = vma->vm_mm;
++	unsigned long physaddr = get_upa(mm, vmaddr);
++
++	if (physaddr)
++		__flush_cache_page(vma, vmaddr, PAGE_ALIGN_DOWN(physaddr));
++#endif
+ }
+ 
+ void copy_user_highpage(struct page *to, struct page *from,
+@@ -629,7 +655,7 @@ void copy_user_highpage(struct page *to, struct page *from,
+ 
+ 	kfrom = kmap_local_page(from);
+ 	kto = kmap_local_page(to);
+-	flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
++	__flush_cache_page(vma, vaddr, PFN_PHYS(page_to_pfn(from)));
+ 	copy_page_asm(kto, kfrom);
+ 	kunmap_local(kto);
+ 	kunmap_local(kfrom);
+@@ -638,16 +664,17 @@ void copy_user_highpage(struct page *to, struct page *from,
+ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ 		unsigned long user_vaddr, void *dst, void *src, int len)
+ {
+-	flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
++	__flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
+ 	memcpy(dst, src, len);
+-	flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
++	flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(dst));
+ }
+ 
+ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
+ 		unsigned long user_vaddr, void *dst, void *src, int len)
+ {
+-	flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
++	__flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
+ 	memcpy(dst, src, len);
++	flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(src));
+ }
+ 
+ /* __flush_tlb_range()
+@@ -681,32 +708,10 @@ int __flush_tlb_range(unsigned long sid, unsigned long start,
+ 
+ static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+ {
+-	unsigned long addr, pfn;
+-	pte_t *ptep;
+-
+-	for (addr = start; addr < end; addr += PAGE_SIZE) {
+-		bool needs_flush = false;
+-		/*
+-		 * The vma can contain pages that aren't present. Although
+-		 * the pte search is expensive, we need the pte to find the
+-		 * page pfn and to check whether the page should be flushed.
+-		 */
+-		ptep = get_ptep(vma->vm_mm, addr);
+-		if (ptep) {
+-			needs_flush = pte_needs_flush(*ptep);
+-			pfn = pte_pfn(*ptep);
+-			pte_unmap(ptep);
+-		}
+-		if (needs_flush) {
+-			if (parisc_requires_coherency()) {
+-				flush_user_cache_page(vma, addr);
+-			} else {
+-				if (WARN_ON(!pfn_valid(pfn)))
+-					return;
+-				__flush_cache_page(vma, addr, PFN_PHYS(pfn));
+-			}
+-		}
+-	}
++	unsigned long addr;
++
++	for (addr = start; addr < end; addr += PAGE_SIZE)
++		flush_cache_page_if_present(vma, addr);
+ }
+ 
+ static inline unsigned long mm_total_size(struct mm_struct *mm)
+@@ -757,21 +762,19 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
+ 		if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
+ 			return;
+ 		flush_tlb_range(vma, start, end);
+-		flush_cache_all();
++		if (vma->vm_flags & VM_EXEC)
++			flush_cache_all();
++		else
++			flush_data_cache();
+ 		return;
+ 	}
+ 
+-	flush_cache_pages(vma, start, end);
++	flush_cache_pages(vma, start & PAGE_MASK, end);
+ }
+ 
+ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
+ {
+-	if (WARN_ON(!pfn_valid(pfn)))
+-		return;
+-	if (parisc_requires_coherency())
+-		flush_user_cache_page(vma, vmaddr);
+-	else
+-		__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
++	__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
+ }
+ 
+ void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
+@@ -779,34 +782,133 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned lon
+ 	if (!PageAnon(page))
+ 		return;
+ 
+-	if (parisc_requires_coherency()) {
+-		if (vma->vm_flags & VM_SHARED)
+-			flush_data_cache();
+-		else
+-			flush_user_cache_page(vma, vmaddr);
++	__flush_cache_page(vma, vmaddr, PFN_PHYS(page_to_pfn(page)));
++}
++
++int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr,
++			   pte_t *ptep)
++{
++	pte_t pte = ptep_get(ptep);
++
++	if (!pte_young(pte))
++		return 0;
++	set_pte(ptep, pte_mkold(pte));
++#if CONFIG_FLUSH_PAGE_ACCESSED
++	__flush_cache_page(vma, addr, PFN_PHYS(pte_pfn(pte)));
++#endif
++	return 1;
++}
++
++/*
++ * After a PTE is cleared, we have no way to flush the cache for
++ * the physical page. On PA8800 and PA8900 processors, these lines
++ * can cause random cache corruption. Thus, we must flush the cache
++ * as well as the TLB when clearing a PTE that's valid.
++ */
++pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr,
++		       pte_t *ptep)
++{
++	struct mm_struct *mm = (vma)->vm_mm;
++	pte_t pte = ptep_get_and_clear(mm, addr, ptep);
++	unsigned long pfn = pte_pfn(pte);
++
++	if (pfn_valid(pfn))
++		__flush_cache_page(vma, addr, PFN_PHYS(pfn));
++	else if (pte_accessible(mm, pte))
++		flush_tlb_page(vma, addr);
++
++	return pte;
++}
++
++/*
++ * The physical address for pages in the ioremap case can be obtained
++ * from the vm_struct struct. I wasn't able to successfully handle the
++ * vmalloc and vmap cases. We have an array of struct page pointers in
++ * the uninitialized vmalloc case but the flush failed using page_to_pfn.
++ */
++void flush_cache_vmap(unsigned long start, unsigned long end)
++{
++	unsigned long addr, physaddr;
++	struct vm_struct *vm;
++
++	/* Prevent cache move-in */
++	flush_tlb_kernel_range(start, end);
++
++	if (end - start >= parisc_cache_flush_threshold) {
++		flush_cache_all();
+ 		return;
+ 	}
+ 
+-	flush_tlb_page(vma, vmaddr);
+-	preempt_disable();
+-	flush_dcache_page_asm(page_to_phys(page), vmaddr);
+-	preempt_enable();
++	if (WARN_ON_ONCE(!is_vmalloc_addr((void *)start))) {
++		flush_cache_all();
++		return;
++	}
++
++	vm = find_vm_area((void *)start);
++	if (WARN_ON_ONCE(!vm)) {
++		flush_cache_all();
++		return;
++	}
++
++	/* The physical addresses of IOREMAP regions are contiguous */
++	if (vm->flags & VM_IOREMAP) {
++		physaddr = vm->phys_addr;
++		for (addr = start; addr < end; addr += PAGE_SIZE) {
++			preempt_disable();
++			flush_dcache_page_asm(physaddr, start);
++			flush_icache_page_asm(physaddr, start);
++			preempt_enable();
++			physaddr += PAGE_SIZE;
++		}
++		return;
++	}
++
++	flush_cache_all();
+ }
++EXPORT_SYMBOL(flush_cache_vmap);
+ 
++/*
++ * The vm_struct has been retired and the page table is set up. The
++ * last page in the range is a guard page. Its physical address can't
++ * be determined using lpa, so there is no way to flush the range
++ * using flush_dcache_page_asm.
++ */
++void flush_cache_vunmap(unsigned long start, unsigned long end)
++{
++	/* Prevent cache move-in */
++	flush_tlb_kernel_range(start, end);
++	flush_data_cache();
++}
++EXPORT_SYMBOL(flush_cache_vunmap);
++
++/*
++ * On systems with PA8800/PA8900 processors, there is no way to flush
++ * a vmap range other than using the architected loop to flush the
++ * entire cache. The page directory is not set up, so we can't use
++ * fdc, etc. FDCE/FICE don't work to flush a portion of the cache.
++ * L2 is physically indexed but FDCE/FICE instructions in virtual
++ * mode output their virtual address on the core bus, not their
++ * real address. As a result, the L2 cache index formed from the
++ * virtual address will most likely not be the same as the L2 index
++ * formed from the real address.
++ */
+ void flush_kernel_vmap_range(void *vaddr, int size)
+ {
+ 	unsigned long start = (unsigned long)vaddr;
+ 	unsigned long end = start + size;
+ 
+-	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
+-	    (unsigned long)size >= parisc_cache_flush_threshold) {
+-		flush_tlb_kernel_range(start, end);
+-		flush_data_cache();
++	flush_tlb_kernel_range(start, end);
++
++	if (!static_branch_likely(&parisc_has_dcache))
++		return;
++
++	/* If interrupts are disabled, we can only do local flush */
++	if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
++		flush_data_cache_local(NULL);
+ 		return;
+ 	}
+ 
+-	flush_kernel_dcache_range_asm(start, end);
+-	flush_tlb_kernel_range(start, end);
++	flush_data_cache();
+ }
+ EXPORT_SYMBOL(flush_kernel_vmap_range);
+ 
+@@ -818,15 +920,18 @@ void invalidate_kernel_vmap_range(void *vaddr, int size)
+ 	/* Ensure DMA is complete */
+ 	asm_syncdma();
+ 
+-	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
+-	    (unsigned long)size >= parisc_cache_flush_threshold) {
+-		flush_tlb_kernel_range(start, end);
+-		flush_data_cache();
++	flush_tlb_kernel_range(start, end);
++
++	if (!static_branch_likely(&parisc_has_dcache))
++		return;
++
++	/* If interrupts are disabled, we can only do local flush */
++	if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
++		flush_data_cache_local(NULL);
+ 		return;
+ 	}
+ 
+-	purge_kernel_dcache_range_asm(start, end);
+-	flush_tlb_kernel_range(start, end);
++	flush_data_cache();
+ }
+ EXPORT_SYMBOL(invalidate_kernel_vmap_range);
+ 
+diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
+index de10437fd2065..4cba724c88991 100644
+--- a/arch/powerpc/include/asm/uaccess.h
++++ b/arch/powerpc/include/asm/uaccess.h
+@@ -92,9 +92,25 @@ __pu_failed:							\
+ 		: label)
+ #endif
+ 
++#ifdef CONFIG_CC_IS_CLANG
++#define DS_FORM_CONSTRAINT "Z<>"
++#else
++#define DS_FORM_CONSTRAINT "YZ<>"
++#endif
++
+ #ifdef __powerpc64__
++#ifdef CONFIG_PPC_KERNEL_PREFIXED
+ #define __put_user_asm2_goto(x, ptr, label)			\
+ 	__put_user_asm_goto(x, ptr, label, "std")
++#else
++#define __put_user_asm2_goto(x, addr, label)			\
++	asm goto ("1: std%U1%X1 %0,%1	# put_user\n"		\
++		EX_TABLE(1b, %l2)				\
++		:						\
++		: "r" (x), DS_FORM_CONSTRAINT (*addr)		\
++		:						\
++		: label)
++#endif // CONFIG_PPC_KERNEL_PREFIXED
+ #else /* __powerpc64__ */
+ #define __put_user_asm2_goto(x, addr, label)			\
+ 	asm goto(					\
+diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
+index 40aa582068883..e52b848b64b79 100644
+--- a/arch/powerpc/platforms/85xx/smp.c
++++ b/arch/powerpc/platforms/85xx/smp.c
+@@ -398,6 +398,7 @@ static void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
+ 	hard_irq_disable();
+ 	mpic_teardown_this_cpu(secondary);
+ 
++#ifdef CONFIG_CRASH_DUMP
+ 	if (cpu == crashing_cpu && cpu_thread_in_core(cpu) != 0) {
+ 		/*
+ 		 * We enter the crash kernel on whatever cpu crashed,
+@@ -406,9 +407,11 @@ static void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
+ 		 */
+ 		disable_threadbit = 1;
+ 		disable_cpu = cpu_first_thread_sibling(cpu);
+-	} else if (sibling != crashing_cpu &&
+-		   cpu_thread_in_core(cpu) == 0 &&
+-		   cpu_thread_in_core(sibling) != 0) {
++	} else if (sibling == crashing_cpu) {
++		return;
++	}
++#endif
++	if (cpu_thread_in_core(cpu) == 0 && cpu_thread_in_core(sibling) != 0) {
+ 		disable_threadbit = 2;
+ 		disable_cpu = sibling;
+ 	}
+diff --git a/arch/riscv/kvm/aia_device.c b/arch/riscv/kvm/aia_device.c
+index 0eb689351b7d0..5cd407c6a8e4f 100644
+--- a/arch/riscv/kvm/aia_device.c
++++ b/arch/riscv/kvm/aia_device.c
+@@ -237,10 +237,11 @@ static gpa_t aia_imsic_ppn(struct kvm_aia *aia, gpa_t addr)
+ 
+ static u32 aia_imsic_hart_index(struct kvm_aia *aia, gpa_t addr)
+ {
+-	u32 hart, group = 0;
++	u32 hart = 0, group = 0;
+ 
+-	hart = (addr >> (aia->nr_guest_bits + IMSIC_MMIO_PAGE_SHIFT)) &
+-		GENMASK_ULL(aia->nr_hart_bits - 1, 0);
++	if (aia->nr_hart_bits)
++		hart = (addr >> (aia->nr_guest_bits + IMSIC_MMIO_PAGE_SHIFT)) &
++		       GENMASK_ULL(aia->nr_hart_bits - 1, 0);
+ 	if (aia->nr_group_bits)
+ 		group = (addr >> aia->nr_group_shift) &
+ 			GENMASK_ULL(aia->nr_group_bits - 1, 0);
+diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c
+index 994adc26db4b1..e5706f5f2c71a 100644
+--- a/arch/riscv/kvm/vcpu_onereg.c
++++ b/arch/riscv/kvm/vcpu_onereg.c
+@@ -718,9 +718,9 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
+ 	switch (reg_subtype) {
+ 	case KVM_REG_RISCV_ISA_SINGLE:
+ 		return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
+-	case KVM_REG_RISCV_SBI_MULTI_EN:
++	case KVM_REG_RISCV_ISA_MULTI_EN:
+ 		return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
+-	case KVM_REG_RISCV_SBI_MULTI_DIS:
++	case KVM_REG_RISCV_ISA_MULTI_DIS:
+ 		return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
+ 	default:
+ 		return -ENOENT;
+diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
+index 9687618432031..46b4ad418f6bc 100644
+--- a/arch/riscv/mm/init.c
++++ b/arch/riscv/mm/init.c
+@@ -235,18 +235,19 @@ static void __init setup_bootmem(void)
+ 		kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base;
+ 
+ 	/*
+-	 * memblock allocator is not aware of the fact that last 4K bytes of
+-	 * the addressable memory can not be mapped because of IS_ERR_VALUE
+-	 * macro. Make sure that last 4k bytes are not usable by memblock
+-	 * if end of dram is equal to maximum addressable memory.  For 64-bit
+-	 * kernel, this problem can't happen here as the end of the virtual
+-	 * address space is occupied by the kernel mapping then this check must
+-	 * be done as soon as the kernel mapping base address is determined.
++	 * Reserve physical address space that would be mapped to virtual
++	 * addresses greater than (void *)(-PAGE_SIZE) because:
++	 *  - This memory would overlap with ERR_PTR
++	 *  - This memory belongs to high memory, which is not supported
++	 *
++	 * This is not applicable to 64-bit kernel, because virtual addresses
++	 * after (void *)(-PAGE_SIZE) are not linearly mapped: they are
++	 * occupied by kernel mapping. Also it is unrealistic for high memory
++	 * to exist on 64-bit platforms.
+ 	 */
+ 	if (!IS_ENABLED(CONFIG_64BIT)) {
+-		max_mapped_addr = __pa(~(ulong)0);
+-		if (max_mapped_addr == (phys_ram_end - 1))
+-			memblock_set_current_limit(max_mapped_addr - 4096);
++		max_mapped_addr = __va_to_pa_nodebug(-PAGE_SIZE);
++		memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr);
+ 	}
+ 
+ 	min_low_pfn = PFN_UP(phys_ram_base);
+@@ -668,6 +669,9 @@ void __init create_pgd_mapping(pgd_t *pgdp,
+ static uintptr_t __init best_map_size(phys_addr_t pa, uintptr_t va,
+ 				      phys_addr_t size)
+ {
++	if (debug_pagealloc_enabled())
++		return PAGE_SIZE;
++
+ 	if (pgtable_l5_enabled &&
+ 	    !(pa & (P4D_SIZE - 1)) && !(va & (P4D_SIZE - 1)) && size >= P4D_SIZE)
+ 		return P4D_SIZE;
+diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
+index 410056a50aa9f..271d01a5ba4da 100644
+--- a/arch/riscv/mm/pageattr.c
++++ b/arch/riscv/mm/pageattr.c
+@@ -387,17 +387,33 @@ int set_direct_map_default_noflush(struct page *page)
+ }
+ 
+ #ifdef CONFIG_DEBUG_PAGEALLOC
++static int debug_pagealloc_set_page(pte_t *pte, unsigned long addr, void *data)
++{
++	int enable = *(int *)data;
++
++	unsigned long val = pte_val(ptep_get(pte));
++
++	if (enable)
++		val |= _PAGE_PRESENT;
++	else
++		val &= ~_PAGE_PRESENT;
++
++	set_pte(pte, __pte(val));
++
++	return 0;
++}
++
+ void __kernel_map_pages(struct page *page, int numpages, int enable)
+ {
+ 	if (!debug_pagealloc_enabled())
+ 		return;
+ 
+-	if (enable)
+-		__set_memory((unsigned long)page_address(page), numpages,
+-			     __pgprot(_PAGE_PRESENT), __pgprot(0));
+-	else
+-		__set_memory((unsigned long)page_address(page), numpages,
+-			     __pgprot(0), __pgprot(_PAGE_PRESENT));
++	unsigned long start = (unsigned long)page_address(page);
++	unsigned long size = PAGE_SIZE * numpages;
++
++	apply_to_existing_page_range(&init_mm, start, size, debug_pagealloc_set_page, &enable);
++
++	flush_tlb_kernel_range(start, start + size);
+ }
+ #endif
+ 
+diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
+index e9522c6893bee..8da3466775de8 100644
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -116,9 +116,9 @@ vmlinux-objs-$(CONFIG_UNACCEPTED_MEMORY) += $(obj)/mem.o
+ 
+ vmlinux-objs-$(CONFIG_EFI) += $(obj)/efi.o
+ vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_mixed.o
+-vmlinux-objs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
++vmlinux-libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
+ 
+-$(obj)/vmlinux: $(vmlinux-objs-y) FORCE
++$(obj)/vmlinux: $(vmlinux-objs-y) $(vmlinux-libs-y) FORCE
+ 	$(call if_changed,ld)
+ 
+ OBJCOPYFLAGS_vmlinux.bin :=  -R .comment -S
+diff --git a/arch/x86/boot/main.c b/arch/x86/boot/main.c
+index c4ea5258ab558..9049f390d8347 100644
+--- a/arch/x86/boot/main.c
++++ b/arch/x86/boot/main.c
+@@ -119,8 +119,8 @@ static void init_heap(void)
+ 	char *stack_end;
+ 
+ 	if (boot_params.hdr.loadflags & CAN_USE_HEAP) {
+-		asm("leal %P1(%%esp),%0"
+-		    : "=r" (stack_end) : "i" (-STACK_SIZE));
++		asm("leal %n1(%%esp),%0"
++		    : "=r" (stack_end) : "i" (STACK_SIZE));
+ 
+ 		heap_end = (char *)
+ 			((size_t)boot_params.hdr.heap_end_ptr + 0x200);
+diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
+index 67b68d0d17d1e..0cb2396de066d 100644
+--- a/arch/x86/include/asm/alternative.h
++++ b/arch/x86/include/asm/alternative.h
+@@ -294,10 +294,10 @@ static inline int alternatives_text_reserved(void *start, void *end)
+  * Otherwise, if CPU has feature1, newinstr1 is used.
+  * Otherwise, oldinstr is used.
+  */
+-#define alternative_input_2(oldinstr, newinstr1, ft_flags1, newinstr2,	     \
+-			   ft_flags2, input...)				     \
+-	asm_inline volatile(ALTERNATIVE_2(oldinstr, newinstr1, ft_flags1,     \
+-		newinstr2, ft_flags2)					     \
++#define alternative_input_2(oldinstr, newinstr1, ft_flags1, newinstr2,	\
++			   ft_flags2, input...)				\
++	asm_inline volatile(ALTERNATIVE_2(oldinstr, newinstr1, ft_flags1, \
++		newinstr2, ft_flags2)					\
+ 		: : "i" (0), ## input)
+ 
+ /* Like alternative_input, but with a single output argument */
+@@ -307,7 +307,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
+ 
+ /* Like alternative_io, but for replacing a direct call with another one. */
+ #define alternative_call(oldfunc, newfunc, ft_flags, output, input...)	\
+-	asm_inline volatile (ALTERNATIVE("call %P[old]", "call %P[new]", ft_flags) \
++	asm_inline volatile (ALTERNATIVE("call %c[old]", "call %c[new]", ft_flags) \
+ 		: output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input)
+ 
+ /*
+@@ -316,12 +316,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
+  * Otherwise, if CPU has feature1, function1 is used.
+  * Otherwise, old function is used.
+  */
+-#define alternative_call_2(oldfunc, newfunc1, ft_flags1, newfunc2, ft_flags2,   \
+-			   output, input...)				      \
+-	asm_inline volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", ft_flags1,\
+-		"call %P[new2]", ft_flags2)				      \
+-		: output, ASM_CALL_CONSTRAINT				      \
+-		: [old] "i" (oldfunc), [new1] "i" (newfunc1),		      \
++#define alternative_call_2(oldfunc, newfunc1, ft_flags1, newfunc2, ft_flags2, \
++			   output, input...)				\
++	asm_inline volatile (ALTERNATIVE_2("call %c[old]", "call %c[new1]", ft_flags1, \
++		"call %c[new2]", ft_flags2)				\
++		: output, ASM_CALL_CONSTRAINT				\
++		: [old] "i" (oldfunc), [new1] "i" (newfunc1),		\
+ 		  [new2] "i" (newfunc2), ## input)
+ 
+ /*
+diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
+index 3486d91b8595f..d510405e4e1de 100644
+--- a/arch/x86/include/asm/atomic64_32.h
++++ b/arch/x86/include/asm/atomic64_32.h
+@@ -24,7 +24,7 @@ typedef struct {
+ 
+ #ifdef CONFIG_X86_CMPXCHG64
+ #define __alternative_atomic64(f, g, out, in...) \
+-	asm volatile("call %P[func]" \
++	asm volatile("call %c[func]" \
+ 		     : out : [func] "i" (atomic64_##g##_cx8), ## in)
+ 
+ #define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
+diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
+index 686e92d2663ee..3508f3fc928d4 100644
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -173,7 +173,7 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
+ static __always_inline bool _static_cpu_has(u16 bit)
+ {
+ 	asm goto(
+-		ALTERNATIVE_TERNARY("jmp 6f", %P[feature], "", "jmp %l[t_no]")
++		ALTERNATIVE_TERNARY("jmp 6f", %c[feature], "", "jmp %l[t_no]")
+ 		".pushsection .altinstr_aux,\"ax\"\n"
+ 		"6:\n"
+ 		" testb %[bitnum]," _ASM_RIP(%P[cap_byte]) "\n"
+diff --git a/arch/x86/include/asm/irq_stack.h b/arch/x86/include/asm/irq_stack.h
+index 798183867d789..b71ad173f8776 100644
+--- a/arch/x86/include/asm/irq_stack.h
++++ b/arch/x86/include/asm/irq_stack.h
+@@ -100,7 +100,7 @@
+ }
+ 
+ #define ASM_CALL_ARG0							\
+-	"call %P[__func]				\n"		\
++	"call %c[__func]				\n"		\
+ 	ASM_REACHABLE
+ 
+ #define ASM_CALL_ARG1							\
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index 237dc8cdd12b9..3a7755c1a4410 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -78,10 +78,10 @@ extern int __get_user_bad(void);
+ 	int __ret_gu;							\
+ 	register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX);		\
+ 	__chk_user_ptr(ptr);						\
+-	asm volatile("call __" #fn "_%P4"				\
++	asm volatile("call __" #fn "_%c[size]"				\
+ 		     : "=a" (__ret_gu), "=r" (__val_gu),		\
+ 			ASM_CALL_CONSTRAINT				\
+-		     : "0" (ptr), "i" (sizeof(*(ptr))));		\
++		     : "0" (ptr), [size] "i" (sizeof(*(ptr))));		\
+ 	instrument_get_user(__val_gu);					\
+ 	(x) = (__force __typeof__(*(ptr))) __val_gu;			\
+ 	__builtin_expect(__ret_gu, 0);					\
+@@ -177,7 +177,7 @@ extern void __put_user_nocheck_8(void);
+ 	__chk_user_ptr(__ptr);						\
+ 	__ptr_pu = __ptr;						\
+ 	__val_pu = __x;							\
+-	asm volatile("call __" #fn "_%P[size]"				\
++	asm volatile("call __" #fn "_%c[size]"				\
+ 		     : "=c" (__ret_pu),					\
+ 			ASM_CALL_CONSTRAINT				\
+ 		     : "0" (__ptr_pu),					\
+diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
+index 3cf156f708591..027a8c7a2c9e6 100644
+--- a/arch/x86/kernel/amd_nb.c
++++ b/arch/x86/kernel/amd_nb.c
+@@ -215,7 +215,14 @@ static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
+ 
+ int amd_smn_read(u16 node, u32 address, u32 *value)
+ {
+-	return __amd_smn_rw(node, address, value, false);
++	int err = __amd_smn_rw(node, address, value, false);
++
++	if (PCI_POSSIBLE_ERROR(*value)) {
++		err = -ENODEV;
++		*value = 0;
++	}
++
++	return err;
+ }
+ EXPORT_SYMBOL_GPL(amd_smn_read);
+ 
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index ae987a26f26e4..1982007828276 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1053,18 +1053,9 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
+ void get_cpu_address_sizes(struct cpuinfo_x86 *c)
+ {
+ 	u32 eax, ebx, ecx, edx;
+-	bool vp_bits_from_cpuid = true;
+ 
+ 	if (!cpu_has(c, X86_FEATURE_CPUID) ||
+-	    (c->extended_cpuid_level < 0x80000008))
+-		vp_bits_from_cpuid = false;
+-
+-	if (vp_bits_from_cpuid) {
+-		cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
+-
+-		c->x86_virt_bits = (eax >> 8) & 0xff;
+-		c->x86_phys_bits = eax & 0xff;
+-	} else {
++	    (c->extended_cpuid_level < 0x80000008)) {
+ 		if (IS_ENABLED(CONFIG_X86_64)) {
+ 			c->x86_clflush_size = 64;
+ 			c->x86_phys_bits = 36;
+@@ -1078,7 +1069,17 @@ void get_cpu_address_sizes(struct cpuinfo_x86 *c)
+ 			    cpu_has(c, X86_FEATURE_PSE36))
+ 				c->x86_phys_bits = 36;
+ 		}
++	} else {
++		cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
++
++		c->x86_virt_bits = (eax >> 8) & 0xff;
++		c->x86_phys_bits = eax & 0xff;
++
++		/* Provide a sane default if not enumerated: */
++		if (!c->x86_clflush_size)
++			c->x86_clflush_size = 32;
+ 	}
++
+ 	c->x86_cache_bits = c->x86_phys_bits;
+ 	c->x86_cache_alignment = c->x86_clflush_size;
+ }
+diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
+index b180d8e497c31..cc0f7f70b17ba 100644
+--- a/arch/x86/kernel/machine_kexec_64.c
++++ b/arch/x86/kernel/machine_kexec_64.c
+@@ -295,8 +295,15 @@ void machine_kexec_cleanup(struct kimage *image)
+ void machine_kexec(struct kimage *image)
+ {
+ 	unsigned long page_list[PAGES_NR];
+-	void *control_page;
++	unsigned int host_mem_enc_active;
+ 	int save_ftrace_enabled;
++	void *control_page;
++
++	/*
++	 * This must be done before load_segments() since if call depth tracking
++	 * is used then GS must be valid to make any function calls.
++	 */
++	host_mem_enc_active = cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT);
+ 
+ #ifdef CONFIG_KEXEC_JUMP
+ 	if (image->preserve_context)
+@@ -358,7 +365,7 @@ void machine_kexec(struct kimage *image)
+ 				       (unsigned long)page_list,
+ 				       image->start,
+ 				       image->preserve_context,
+-				       cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT));
++				       host_mem_enc_active);
+ 
+ #ifdef CONFIG_KEXEC_JUMP
+ 	if (image->preserve_context)
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
+index 759581bb2128d..4471b4e08d23d 100644
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -666,6 +666,14 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
+ 	  return ret;
+ 
+ 	vcpu->arch.guest_state_protected = true;
++
++	/*
++	 * SEV-ES guest mandates LBR Virtualization to be _always_ ON. Enable it
++	 * only after setting guest_state_protected because KVM_SET_MSRS allows
++	 * dynamic toggling of LBRV (for performance reason) on write access to
++	 * MSR_IA32_DEBUGCTLMSR when guest_state_protected is not set.
++	 */
++	svm_enable_lbrv(vcpu);
+ 	return 0;
+ }
+ 
+@@ -2269,6 +2277,12 @@ void __init sev_hardware_setup(void)
+ 	if (!boot_cpu_has(X86_FEATURE_SEV_ES))
+ 		goto out;
+ 
++	if (!lbrv) {
++		WARN_ONCE(!boot_cpu_has(X86_FEATURE_LBRV),
++			  "LBRV must be present for SEV-ES support");
++		goto out;
++	}
++
+ 	/* Has the system been allocated ASIDs for SEV-ES? */
+ 	if (min_sev_asid == 1)
+ 		goto out;
+@@ -3034,7 +3048,6 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
+ 	struct kvm_vcpu *vcpu = &svm->vcpu;
+ 
+ 	svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
+-	svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
+ 
+ 	/*
+ 	 * An SEV-ES guest requires a VMSA area that is a separate from the
+@@ -3086,10 +3099,6 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
+ 	/* Clear intercepts on selected MSRs */
+ 	set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1);
+ 	set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1);
+-	set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
+-	set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
+-	set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
+-	set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
+ }
+ 
+ void sev_init_vmcb(struct vcpu_svm *svm)
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 308416b50b036..4650153afa465 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -99,6 +99,7 @@ static const struct svm_direct_access_msrs {
+ 	{ .index = MSR_IA32_SPEC_CTRL,			.always = false },
+ 	{ .index = MSR_IA32_PRED_CMD,			.always = false },
+ 	{ .index = MSR_IA32_FLUSH_CMD,			.always = false },
++	{ .index = MSR_IA32_DEBUGCTLMSR,		.always = false },
+ 	{ .index = MSR_IA32_LASTBRANCHFROMIP,		.always = false },
+ 	{ .index = MSR_IA32_LASTBRANCHTOIP,		.always = false },
+ 	{ .index = MSR_IA32_LASTINTFROMIP,		.always = false },
+@@ -215,7 +216,7 @@ int vgif = true;
+ module_param(vgif, int, 0444);
+ 
+ /* enable/disable LBR virtualization */
+-static int lbrv = true;
++int lbrv = true;
+ module_param(lbrv, int, 0444);
+ 
+ static int tsc_scaling = true;
+@@ -990,7 +991,7 @@ void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
+ 	vmcb_mark_dirty(to_vmcb, VMCB_LBR);
+ }
+ 
+-static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
++void svm_enable_lbrv(struct kvm_vcpu *vcpu)
+ {
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+ 
+@@ -1000,6 +1001,9 @@ static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
+ 	set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
+ 	set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
+ 
++	if (sev_es_guest(vcpu->kvm))
++		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_DEBUGCTLMSR, 1, 1);
++
+ 	/* Move the LBR msrs to the vmcb02 so that the guest can see them. */
+ 	if (is_guest_mode(vcpu))
+ 		svm_copy_lbrs(svm->vmcb, svm->vmcb01.ptr);
+@@ -1009,6 +1013,8 @@ static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
+ {
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+ 
++	KVM_BUG_ON(sev_es_guest(vcpu->kvm), vcpu->kvm);
++
+ 	svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
+ 	set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
+ 	set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
+@@ -5260,6 +5266,12 @@ static __init int svm_hardware_setup(void)
+ 
+ 	nrips = nrips && boot_cpu_has(X86_FEATURE_NRIPS);
+ 
++	if (lbrv) {
++		if (!boot_cpu_has(X86_FEATURE_LBRV))
++			lbrv = false;
++		else
++			pr_info("LBR virtualization supported\n");
++	}
+ 	/*
+ 	 * Note, SEV setup consumes npt_enabled and enable_mmio_caching (which
+ 	 * may be modified by svm_adjust_mmio_mask()), as well as nrips.
+@@ -5313,14 +5325,6 @@ static __init int svm_hardware_setup(void)
+ 		svm_x86_ops.set_vnmi_pending = NULL;
+ 	}
+ 
+-
+-	if (lbrv) {
+-		if (!boot_cpu_has(X86_FEATURE_LBRV))
+-			lbrv = false;
+-		else
+-			pr_info("LBR virtualization supported\n");
+-	}
+-
+ 	if (!enable_pmu)
+ 		pr_info("PMU virtualization is disabled\n");
+ 
+diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
+index 33878efdebc82..2ed3015e03f13 100644
+--- a/arch/x86/kvm/svm/svm.h
++++ b/arch/x86/kvm/svm/svm.h
+@@ -30,7 +30,7 @@
+ #define	IOPM_SIZE PAGE_SIZE * 3
+ #define	MSRPM_SIZE PAGE_SIZE * 2
+ 
+-#define MAX_DIRECT_ACCESS_MSRS	47
++#define MAX_DIRECT_ACCESS_MSRS	48
+ #define MSRPM_OFFSETS	32
+ extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
+ extern bool npt_enabled;
+@@ -39,6 +39,7 @@ extern int vgif;
+ extern bool intercept_smi;
+ extern bool x2avic_enabled;
+ extern bool vnmi;
++extern int lbrv;
+ 
+ /*
+  * Clean bits in VMCB.
+@@ -543,6 +544,7 @@ u32 *svm_vcpu_alloc_msrpm(void);
+ void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
+ void svm_vcpu_free_msrpm(u32 *msrpm);
+ void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
++void svm_enable_lbrv(struct kvm_vcpu *vcpu);
+ void svm_update_lbrv(struct kvm_vcpu *vcpu);
+ 
+ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
+diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
+index 10d5ed8b5990f..a1cb3a4e6742d 100644
+--- a/arch/x86/lib/getuser.S
++++ b/arch/x86/lib/getuser.S
+@@ -44,7 +44,11 @@
+ 	or %rdx, %rax
+ .else
+ 	cmp $TASK_SIZE_MAX-\size+1, %eax
++.if \size != 8
+ 	jae .Lbad_get_user
++.else
++	jae .Lbad_get_user_8
++.endif
+ 	sbb %edx, %edx		/* array_index_mask_nospec() */
+ 	and %edx, %eax
+ .endif
+@@ -154,7 +158,7 @@ SYM_CODE_END(__get_user_handle_exception)
+ #ifdef CONFIG_X86_32
+ SYM_CODE_START_LOCAL(__get_user_8_handle_exception)
+ 	ASM_CLAC
+-bad_get_user_8:
++.Lbad_get_user_8:
+ 	xor %edx,%edx
+ 	xor %ecx,%ecx
+ 	mov $(-EFAULT),%_ASM_AX
+diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
+index ce84ba86e69e9..6ce10e3c62285 100644
+--- a/arch/x86/mm/numa.c
++++ b/arch/x86/mm/numa.c
+@@ -493,7 +493,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
+ 	for_each_reserved_mem_region(mb_region) {
+ 		int nid = memblock_get_region_node(mb_region);
+ 
+-		if (nid != MAX_NUMNODES)
++		if (nid != NUMA_NO_NODE)
+ 			node_set(nid, reserved_nodemask);
+ 	}
+ 
+@@ -614,9 +614,9 @@ static int __init numa_init(int (*init_func)(void))
+ 	nodes_clear(node_online_map);
+ 	memset(&numa_meminfo, 0, sizeof(numa_meminfo));
+ 	WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory,
+-				  MAX_NUMNODES));
++				  NUMA_NO_NODE));
+ 	WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved,
+-				  MAX_NUMNODES));
++				  NUMA_NO_NODE));
+ 	/* In case that parsing SRAT failed. */
+ 	WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX));
+ 	numa_reset_distance();
+diff --git a/block/blk-flush.c b/block/blk-flush.c
+index b0f314f4bc149..9944414bf7eed 100644
+--- a/block/blk-flush.c
++++ b/block/blk-flush.c
+@@ -183,7 +183,7 @@ static void blk_flush_complete_seq(struct request *rq,
+ 		/* queue for flush */
+ 		if (list_empty(pending))
+ 			fq->flush_pending_since = jiffies;
+-		list_move_tail(&rq->queuelist, pending);
++		list_add_tail(&rq->queuelist, pending);
+ 		break;
+ 
+ 	case REQ_FSEQ_DATA:
+@@ -261,6 +261,7 @@ static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
+ 		unsigned int seq = blk_flush_cur_seq(rq);
+ 
+ 		BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
++		list_del_init(&rq->queuelist);
+ 		blk_flush_complete_seq(rq, fq, seq, error);
+ 	}
+ 
+diff --git a/block/sed-opal.c b/block/sed-opal.c
+index 14fe0fef811cf..598fd3e7fcc8e 100644
+--- a/block/sed-opal.c
++++ b/block/sed-opal.c
+@@ -314,7 +314,7 @@ static int read_sed_opal_key(const char *key_name, u_char *buffer, int buflen)
+ 			      &key_type_user, key_name, true);
+ 
+ 	if (IS_ERR(kref))
+-		ret = PTR_ERR(kref);
++		return PTR_ERR(kref);
+ 
+ 	key = key_ref_to_ptr(kref);
+ 	down_read(&key->sem);
+diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
+index d67881b50bca2..a0cfc857fb553 100644
+--- a/drivers/acpi/thermal.c
++++ b/drivers/acpi/thermal.c
+@@ -168,11 +168,17 @@ static int acpi_thermal_get_polling_frequency(struct acpi_thermal *tz)
+ 
+ static int acpi_thermal_temp(struct acpi_thermal *tz, int temp_deci_k)
+ {
++	int temp;
++
+ 	if (temp_deci_k == THERMAL_TEMP_INVALID)
+ 		return THERMAL_TEMP_INVALID;
+ 
+-	return deci_kelvin_to_millicelsius_with_offset(temp_deci_k,
++	temp = deci_kelvin_to_millicelsius_with_offset(temp_deci_k,
+ 						       tz->kelvin_offset);
++	if (temp <= 0)
++		return THERMAL_TEMP_INVALID;
++
++	return temp;
+ }
+ 
+ static bool acpi_thermal_trip_valid(struct acpi_thermal_trip *acpi_trip)
+diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
+index 90c3d2eab9e99..7507a7706898c 100644
+--- a/drivers/acpi/x86/utils.c
++++ b/drivers/acpi/x86/utils.c
+@@ -197,16 +197,16 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
+ }
+ 
+ /*
+- * AMD systems from Renoir and Lucienne *require* that the NVME controller
++ * AMD systems from Renoir onwards *require* that the NVME controller
+  * is put into D3 over a Modern Standby / suspend-to-idle cycle.
+  *
+  * This is "typically" accomplished using the `StorageD3Enable`
+  * property in the _DSD that is checked via the `acpi_storage_d3` function
+- * but this property was introduced after many of these systems launched
+- * and most OEM systems don't have it in their BIOS.
++ * but some OEM systems still don't have it in their BIOS.
+  *
+  * The Microsoft documentation for StorageD3Enable mentioned that Windows has
+- * a hardcoded allowlist for D3 support, which was used for these platforms.
++ * a hardcoded allowlist for D3 support as well as a registry key to override
++ * the BIOS, which has been used for these cases.
+  *
+  * This allows quirking on Linux in a similar fashion.
+  *
+@@ -219,19 +219,15 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
+  *    https://bugzilla.kernel.org/show_bug.cgi?id=216773
+  *    https://bugzilla.kernel.org/show_bug.cgi?id=217003
+  * 2) On at least one HP system StorageD3Enable is missing on the second NVME
+-      disk in the system.
++ *    disk in the system.
++ * 3) On at least one HP Rembrandt system StorageD3Enable is missing on the only
++ *    NVME device.
+  */
+-static const struct x86_cpu_id storage_d3_cpu_ids[] = {
+-	X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 24, NULL),  /* Picasso */
+-	X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL),	/* Renoir */
+-	X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL),	/* Lucienne */
+-	X86_MATCH_VENDOR_FAM_MODEL(AMD, 25, 80, NULL),	/* Cezanne */
+-	{}
+-};
+-
+ bool force_storage_d3(void)
+ {
+-	return x86_match_cpu(storage_d3_cpu_ids);
++	if (!cpu_feature_enabled(X86_FEATURE_ZEN))
++		return false;
++	return acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0;
+ }
+ 
+ /*
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 6548f10e61d9c..07d66d2c5f0dd 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -429,7 +429,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	{ PCI_VDEVICE(INTEL, 0x02d7), board_ahci_pcs_quirk }, /* Comet Lake PCH RAID */
+ 	/* Elkhart Lake IDs 0x4b60 & 0x4b62 https://sata-io.org/product/8803 not tested yet */
+ 	{ PCI_VDEVICE(INTEL, 0x4b63), board_ahci_pcs_quirk }, /* Elkhart Lake AHCI */
+-	{ PCI_VDEVICE(INTEL, 0x7ae2), board_ahci_pcs_quirk }, /* Alder Lake-P AHCI */
+ 
+ 	/* JMicron 360/1/3/5/6, match class to avoid IDE function */
+ 	{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index c449d60d9bb96..28caed151e05f 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4180,8 +4180,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 	{ "PIONEER BD-RW   BDR-207M",	NULL,	ATA_HORKAGE_NOLPM },
+ 	{ "PIONEER BD-RW   BDR-205",	NULL,	ATA_HORKAGE_NOLPM },
+ 
+-	/* Crucial BX100 SSD 500GB has broken LPM support */
++	/* Crucial devices with broken LPM support */
+ 	{ "CT500BX100SSD1",		NULL,	ATA_HORKAGE_NOLPM },
++	{ "CT240BX500SSD1",		NULL,	ATA_HORKAGE_NOLPM },
+ 
+ 	/* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
+ 	{ "Crucial_CT512MX100*",	"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
+@@ -4199,6 +4200,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 						ATA_HORKAGE_ZERO_AFTER_TRIM |
+ 						ATA_HORKAGE_NOLPM },
+ 
++	/* Apacer models with LPM issues */
++	{ "Apacer AS340*",		NULL,	ATA_HORKAGE_NOLPM },
++
++	/* AMD Radeon devices with broken LPM support */
++	{ "R3SL240G",			NULL,	ATA_HORKAGE_NOLPM },
++
+ 	/* These specific Samsung models/firmware-revs do not handle LPM well */
+ 	{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM },
+ 	{ "SAMSUNG SSD PM830 mSATA *",  "CXM13D1Q", ATA_HORKAGE_NOLPM },
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index e954976891a9f..9c3daa7d195c4 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -1828,11 +1828,11 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
+ 		2
+ 	};
+ 
+-	/* set scsi removable (RMB) bit per ata bit, or if the
+-	 * AHCI port says it's external (Hotplug-capable, eSATA).
++	/*
++	 * Set the SCSI Removable Media Bit (RMB) if the ATA removable media
++	 * device bit (obsolete since ATA-8 ACS) is set.
+ 	 */
+-	if (ata_id_removable(args->id) ||
+-	    (args->dev->link->ap->pflags & ATA_PFLAG_EXTERNAL))
++	if (ata_id_removable(args->id))
+ 		hdr[1] |= (1 << 7);
+ 
+ 	if (args->dev->class == ATA_DEV_ZAC) {
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index 5f4e03336e68e..8e3bd230b165e 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -2738,8 +2738,11 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
+ 	if (!env)
+ 		return -ENOMEM;
+ 
++	/* Synchronize with really_probe() */
++	device_lock(dev);
+ 	/* let the kset specific function add its keys */
+ 	retval = kset->uevent_ops->uevent(&dev->kobj, env);
++	device_unlock(dev);
+ 	if (retval)
+ 		goto out;
+ 
+diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
+index 1689e25841048..27928deccc643 100644
+--- a/drivers/block/null_blk/zoned.c
++++ b/drivers/block/null_blk/zoned.c
+@@ -113,7 +113,7 @@ int null_init_zoned_dev(struct nullb_device *dev,
+ 	if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
+ 		dev->zone_max_open = dev->zone_max_active;
+ 		pr_info("changed the maximum number of open zones to %u\n",
+-			dev->nr_zones);
++			dev->zone_max_open);
+ 	} else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
+ 		dev->zone_max_open = 0;
+ 		pr_info("zone_max_open limit disabled, limit >= zone count\n");
+diff --git a/drivers/clk/sifive/sifive-prci.c b/drivers/clk/sifive/sifive-prci.c
+index 25b8e1a80ddce..b32a59fe55e74 100644
+--- a/drivers/clk/sifive/sifive-prci.c
++++ b/drivers/clk/sifive/sifive-prci.c
+@@ -4,7 +4,6 @@
+  * Copyright (C) 2020 Zong Li
+  */
+ 
+-#include <linux/clkdev.h>
+ #include <linux/delay.h>
+ #include <linux/io.h>
+ #include <linux/module.h>
+@@ -537,13 +536,6 @@ static int __prci_register_clocks(struct device *dev, struct __prci_data *pd,
+ 			return r;
+ 		}
+ 
+-		r = clk_hw_register_clkdev(&pic->hw, pic->name, dev_name(dev));
+-		if (r) {
+-			dev_warn(dev, "Failed to register clkdev for %s: %d\n",
+-				 init.name, r);
+-			return r;
+-		}
+-
+ 		pd->hw_clks.hws[i] = &pic->hw;
+ 	}
+ 
+diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c
+index f04ae67dda372..fc275d41d51e9 100644
+--- a/drivers/cpufreq/amd-pstate-ut.c
++++ b/drivers/cpufreq/amd-pstate-ut.c
+@@ -26,10 +26,11 @@
+ #include <linux/module.h>
+ #include <linux/moduleparam.h>
+ #include <linux/fs.h>
+-#include <linux/amd-pstate.h>
+ 
+ #include <acpi/cppc_acpi.h>
+ 
++#include "amd-pstate.h"
++
+ /*
+  * Abbreviations:
+  * amd_pstate_ut: used as a shortform for AMD P-State unit test.
+diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
+index e263db0385ab7..6c989d859b396 100644
+--- a/drivers/cpufreq/amd-pstate.c
++++ b/drivers/cpufreq/amd-pstate.c
+@@ -36,7 +36,6 @@
+ #include <linux/delay.h>
+ #include <linux/uaccess.h>
+ #include <linux/static_call.h>
+-#include <linux/amd-pstate.h>
+ #include <linux/topology.h>
+ 
+ #include <acpi/processor.h>
+@@ -46,6 +45,8 @@
+ #include <asm/processor.h>
+ #include <asm/cpufeature.h>
+ #include <asm/cpu_device_id.h>
++
++#include "amd-pstate.h"
+ #include "amd-pstate-trace.h"
+ 
+ #define AMD_PSTATE_TRANSITION_LATENCY	20000
+@@ -53,6 +54,37 @@
+ #define CPPC_HIGHEST_PERF_PERFORMANCE	196
+ #define CPPC_HIGHEST_PERF_DEFAULT	166
+ 
++#define AMD_CPPC_EPP_PERFORMANCE		0x00
++#define AMD_CPPC_EPP_BALANCE_PERFORMANCE	0x80
++#define AMD_CPPC_EPP_BALANCE_POWERSAVE		0xBF
++#define AMD_CPPC_EPP_POWERSAVE			0xFF
++
++/*
++ * enum amd_pstate_mode - driver working mode of amd pstate
++ */
++enum amd_pstate_mode {
++	AMD_PSTATE_UNDEFINED = 0,
++	AMD_PSTATE_DISABLE,
++	AMD_PSTATE_PASSIVE,
++	AMD_PSTATE_ACTIVE,
++	AMD_PSTATE_GUIDED,
++	AMD_PSTATE_MAX,
++};
++
++static const char * const amd_pstate_mode_string[] = {
++	[AMD_PSTATE_UNDEFINED]   = "undefined",
++	[AMD_PSTATE_DISABLE]     = "disable",
++	[AMD_PSTATE_PASSIVE]     = "passive",
++	[AMD_PSTATE_ACTIVE]      = "active",
++	[AMD_PSTATE_GUIDED]      = "guided",
++	NULL,
++};
++
++struct quirk_entry {
++	u32 nominal_freq;
++	u32 lowest_freq;
++};
++
+ /*
+  * TODO: We need more time to fine tune processors with shared memory solution
+  * with community together.
+@@ -68,6 +100,7 @@ static struct cpufreq_driver amd_pstate_epp_driver;
+ static int cppc_state = AMD_PSTATE_UNDEFINED;
+ static bool cppc_enabled;
+ static bool amd_pstate_prefcore = true;
++static struct quirk_entry *quirks;
+ 
+ /*
+  * AMD Energy Preference Performance (EPP)
+@@ -112,6 +145,41 @@ static unsigned int epp_values[] = {
+ 
+ typedef int (*cppc_mode_transition_fn)(int);
+ 
++static struct quirk_entry quirk_amd_7k62 = {
++	.nominal_freq = 2600,
++	.lowest_freq = 550,
++};
++
++static int __init dmi_matched_7k62_bios_bug(const struct dmi_system_id *dmi)
++{
++	/**
++	 * match the broken bios for family 17h processor support CPPC V2
++	 * broken BIOS lack of nominal_freq and lowest_freq capabilities
++	 * definition in ACPI tables
++	 */
++	if (boot_cpu_has(X86_FEATURE_ZEN2)) {
++		quirks = dmi->driver_data;
++		pr_info("Overriding nominal and lowest frequencies for %s\n", dmi->ident);
++		return 1;
++	}
++
++	return 0;
++}
++
++static const struct dmi_system_id amd_pstate_quirks_table[] __initconst = {
++	{
++		.callback = dmi_matched_7k62_bios_bug,
++		.ident = "AMD EPYC 7K62",
++		.matches = {
++			DMI_MATCH(DMI_BIOS_VERSION, "5.14"),
++			DMI_MATCH(DMI_BIOS_RELEASE, "12/12/2019"),
++		},
++		.driver_data = &quirk_amd_7k62,
++	},
++	{}
++};
++MODULE_DEVICE_TABLE(dmi, amd_pstate_quirks_table);
++
+ static inline int get_mode_idx_from_str(const char *str, size_t size)
+ {
+ 	int i;
+@@ -622,74 +690,22 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
+ 
+ static int amd_get_min_freq(struct amd_cpudata *cpudata)
+ {
+-	struct cppc_perf_caps cppc_perf;
+-
+-	int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
+-	if (ret)
+-		return ret;
+-
+-	/* Switch to khz */
+-	return cppc_perf.lowest_freq * 1000;
++	return READ_ONCE(cpudata->min_freq);
+ }
+ 
+ static int amd_get_max_freq(struct amd_cpudata *cpudata)
+ {
+-	struct cppc_perf_caps cppc_perf;
+-	u32 max_perf, max_freq, nominal_freq, nominal_perf;
+-	u64 boost_ratio;
+-
+-	int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
+-	if (ret)
+-		return ret;
+-
+-	nominal_freq = cppc_perf.nominal_freq;
+-	nominal_perf = READ_ONCE(cpudata->nominal_perf);
+-	max_perf = READ_ONCE(cpudata->highest_perf);
+-
+-	boost_ratio = div_u64(max_perf << SCHED_CAPACITY_SHIFT,
+-			      nominal_perf);
+-
+-	max_freq = nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT;
+-
+-	/* Switch to khz */
+-	return max_freq * 1000;
++	return READ_ONCE(cpudata->max_freq);
+ }
+ 
+ static int amd_get_nominal_freq(struct amd_cpudata *cpudata)
+ {
+-	struct cppc_perf_caps cppc_perf;
+-
+-	int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
+-	if (ret)
+-		return ret;
+-
+-	/* Switch to khz */
+-	return cppc_perf.nominal_freq * 1000;
++	return READ_ONCE(cpudata->nominal_freq);
+ }
+ 
+ static int amd_get_lowest_nonlinear_freq(struct amd_cpudata *cpudata)
+ {
+-	struct cppc_perf_caps cppc_perf;
+-	u32 lowest_nonlinear_freq, lowest_nonlinear_perf,
+-	    nominal_freq, nominal_perf;
+-	u64 lowest_nonlinear_ratio;
+-
+-	int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
+-	if (ret)
+-		return ret;
+-
+-	nominal_freq = cppc_perf.nominal_freq;
+-	nominal_perf = READ_ONCE(cpudata->nominal_perf);
+-
+-	lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
+-
+-	lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT,
+-					 nominal_perf);
+-
+-	lowest_nonlinear_freq = nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT;
+-
+-	/* Switch to khz */
+-	return lowest_nonlinear_freq * 1000;
++	return READ_ONCE(cpudata->lowest_nonlinear_freq);
+ }
+ 
+ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
+@@ -844,6 +860,61 @@ static void amd_pstate_update_limits(unsigned int cpu)
+ 	mutex_unlock(&amd_pstate_driver_lock);
+ }
+ 
++/**
++ * amd_pstate_init_freq: Initialize the max_freq, min_freq,
++ *                       nominal_freq and lowest_nonlinear_freq for
++ *                       the @cpudata object.
++ *
++ *  Requires: highest_perf, lowest_perf, nominal_perf and
++ *            lowest_nonlinear_perf members of @cpudata to be
++ *            initialized.
++ *
++ *  Returns 0 on success, non-zero value on failure.
++ */
++static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
++{
++	int ret;
++	u32 min_freq;
++	u32 highest_perf, max_freq;
++	u32 nominal_perf, nominal_freq;
++	u32 lowest_nonlinear_perf, lowest_nonlinear_freq;
++	u32 boost_ratio, lowest_nonlinear_ratio;
++	struct cppc_perf_caps cppc_perf;
++
++
++	ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
++	if (ret)
++		return ret;
++
++	if (quirks && quirks->lowest_freq)
++		min_freq = quirks->lowest_freq * 1000;
++	else
++		min_freq = cppc_perf.lowest_freq * 1000;
++
++	if (quirks && quirks->nominal_freq)
++		nominal_freq = quirks->nominal_freq ;
++	else
++		nominal_freq = cppc_perf.nominal_freq;
++
++	nominal_perf = READ_ONCE(cpudata->nominal_perf);
++
++	highest_perf = READ_ONCE(cpudata->highest_perf);
++	boost_ratio = div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
++	max_freq = (nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
++
++	lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
++	lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT,
++					 nominal_perf);
++	lowest_nonlinear_freq = (nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
++
++	WRITE_ONCE(cpudata->min_freq, min_freq);
++	WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq);
++	WRITE_ONCE(cpudata->nominal_freq, nominal_freq);
++	WRITE_ONCE(cpudata->max_freq, max_freq);
++
++	return 0;
++}
++
+ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
+ {
+ 	int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
+@@ -871,6 +942,10 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
+ 	if (ret)
+ 		goto free_cpudata1;
+ 
++	ret = amd_pstate_init_freq(cpudata);
++	if (ret)
++		goto free_cpudata1;
++
+ 	min_freq = amd_get_min_freq(cpudata);
+ 	max_freq = amd_get_max_freq(cpudata);
+ 	nominal_freq = amd_get_nominal_freq(cpudata);
+@@ -912,13 +987,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
+ 		goto free_cpudata2;
+ 	}
+ 
+-	/* Initial processor data capability frequencies */
+-	cpudata->max_freq = max_freq;
+-	cpudata->min_freq = min_freq;
+ 	cpudata->max_limit_freq = max_freq;
+ 	cpudata->min_limit_freq = min_freq;
+-	cpudata->nominal_freq = nominal_freq;
+-	cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
+ 
+ 	policy->driver_data = cpudata;
+ 
+@@ -1333,6 +1403,10 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
+ 	if (ret)
+ 		goto free_cpudata1;
+ 
++	ret = amd_pstate_init_freq(cpudata);
++	if (ret)
++		goto free_cpudata1;
++
+ 	min_freq = amd_get_min_freq(cpudata);
+ 	max_freq = amd_get_max_freq(cpudata);
+ 	nominal_freq = amd_get_nominal_freq(cpudata);
+@@ -1349,12 +1423,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
+ 	/* It will be updated by governor */
+ 	policy->cur = policy->cpuinfo.min_freq;
+ 
+-	/* Initial processor data capability frequencies */
+-	cpudata->max_freq = max_freq;
+-	cpudata->min_freq = min_freq;
+-	cpudata->nominal_freq = nominal_freq;
+-	cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
+-
+ 	policy->driver_data = cpudata;
+ 
+ 	cpudata->epp_cached = amd_pstate_get_epp(cpudata, 0);
+@@ -1672,6 +1740,11 @@ static int __init amd_pstate_init(void)
+ 	if (cpufreq_get_current_driver())
+ 		return -EEXIST;
+ 
++	quirks = NULL;
++
++	/* check if this machine need CPPC quirks */
++	dmi_check_system(amd_pstate_quirks_table);
++
+ 	switch (cppc_state) {
+ 	case AMD_PSTATE_UNDEFINED:
+ 		/* Disable on the following configs by default:
+diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h
+new file mode 100644
+index 0000000000000..bc341f35908d7
+--- /dev/null
++++ b/drivers/cpufreq/amd-pstate.h
+@@ -0,0 +1,100 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/*
++ * Copyright (C) 2022 Advanced Micro Devices, Inc.
++ *
++ * Author: Meng Li <li.meng@amd.com>
++ */
++
++#ifndef _LINUX_AMD_PSTATE_H
++#define _LINUX_AMD_PSTATE_H
++
++#include <linux/pm_qos.h>
++
++/*********************************************************************
++ *                        AMD P-state INTERFACE                       *
++ *********************************************************************/
++/**
++ * struct  amd_aperf_mperf
++ * @aperf: actual performance frequency clock count
++ * @mperf: maximum performance frequency clock count
++ * @tsc:   time stamp counter
++ */
++struct amd_aperf_mperf {
++	u64 aperf;
++	u64 mperf;
++	u64 tsc;
++};
++
++/**
++ * struct amd_cpudata - private CPU data for AMD P-State
++ * @cpu: CPU number
++ * @req: constraint request to apply
++ * @cppc_req_cached: cached performance request hints
++ * @highest_perf: the maximum performance an individual processor may reach,
++ *		  assuming ideal conditions
++ *		  For platforms that do not support the preferred core feature, the
++ *		  highest_pef may be configured with 166 or 255, to avoid max frequency
++ *		  calculated wrongly. we take the fixed value as the highest_perf.
++ * @nominal_perf: the maximum sustained performance level of the processor,
++ *		  assuming ideal operating conditions
++ * @lowest_nonlinear_perf: the lowest performance level at which nonlinear power
++ *			   savings are achieved
++ * @lowest_perf: the absolute lowest performance level of the processor
++ * @prefcore_ranking: the preferred core ranking, the higher value indicates a higher
++ * 		  priority.
++ * @max_freq: the frequency that mapped to highest_perf
++ * @min_freq: the frequency that mapped to lowest_perf
++ * @nominal_freq: the frequency that mapped to nominal_perf
++ * @lowest_nonlinear_freq: the frequency that mapped to lowest_nonlinear_perf
++ * @cur: Difference of Aperf/Mperf/tsc count between last and current sample
++ * @prev: Last Aperf/Mperf/tsc count value read from register
++ * @freq: current cpu frequency value
++ * @boost_supported: check whether the Processor or SBIOS supports boost mode
++ * @hw_prefcore: check whether HW supports preferred core featue.
++ * 		  Only when hw_prefcore and early prefcore param are true,
++ * 		  AMD P-State driver supports preferred core featue.
++ * @epp_policy: Last saved policy used to set energy-performance preference
++ * @epp_cached: Cached CPPC energy-performance preference value
++ * @policy: Cpufreq policy value
++ * @cppc_cap1_cached Cached MSR_AMD_CPPC_CAP1 register value
++ *
++ * The amd_cpudata is key private data for each CPU thread in AMD P-State, and
++ * represents all the attributes and goals that AMD P-State requests at runtime.
++ */
++struct amd_cpudata {
++	int	cpu;
++
++	struct	freq_qos_request req[2];
++	u64	cppc_req_cached;
++
++	u32	highest_perf;
++	u32	nominal_perf;
++	u32	lowest_nonlinear_perf;
++	u32	lowest_perf;
++	u32     prefcore_ranking;
++	u32     min_limit_perf;
++	u32     max_limit_perf;
++	u32     min_limit_freq;
++	u32     max_limit_freq;
++
++	u32	max_freq;
++	u32	min_freq;
++	u32	nominal_freq;
++	u32	lowest_nonlinear_freq;
++
++	struct amd_aperf_mperf cur;
++	struct amd_aperf_mperf prev;
++
++	u64	freq;
++	bool	boost_supported;
++	bool	hw_prefcore;
++
++	/* EPP feature related attributes*/
++	s16	epp_policy;
++	s16	epp_cached;
++	u32	policy;
++	u64	cppc_cap1_cached;
++	bool	suspended;
++};
++
++#endif /* _LINUX_AMD_PSTATE_H */
+diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
+index 812b2948b6c65..18b95149640b6 100644
+--- a/drivers/cxl/core/region.c
++++ b/drivers/cxl/core/region.c
+@@ -2352,15 +2352,6 @@ static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
+ 	struct device *dev;
+ 	int rc;
+ 
+-	switch (mode) {
+-	case CXL_DECODER_RAM:
+-	case CXL_DECODER_PMEM:
+-		break;
+-	default:
+-		dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode);
+-		return ERR_PTR(-EINVAL);
+-	}
+-
+ 	cxlr = cxl_region_alloc(cxlrd, id);
+ 	if (IS_ERR(cxlr))
+ 		return cxlr;
+@@ -2415,6 +2406,15 @@ static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd,
+ {
+ 	int rc;
+ 
++	switch (mode) {
++	case CXL_DECODER_RAM:
++	case CXL_DECODER_PMEM:
++		break;
++	default:
++		dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode);
++		return ERR_PTR(-EINVAL);
++	}
++
+ 	rc = memregion_alloc(GFP_KERNEL);
+ 	if (rc < 0)
+ 		return ERR_PTR(rc);
+diff --git a/drivers/dma-buf/st-dma-fence.c b/drivers/dma-buf/st-dma-fence.c
+index b7c6f7ea9e0c8..6a1bfcd0cc210 100644
+--- a/drivers/dma-buf/st-dma-fence.c
++++ b/drivers/dma-buf/st-dma-fence.c
+@@ -540,6 +540,12 @@ static int race_signal_callback(void *arg)
+ 			t[i].before = pass;
+ 			t[i].task = kthread_run(thread_signal_callback, &t[i],
+ 						"dma-fence:%d", i);
++			if (IS_ERR(t[i].task)) {
++				ret = PTR_ERR(t[i].task);
++				while (--i >= 0)
++					kthread_stop_put(t[i].task);
++				return ret;
++			}
+ 			get_task_struct(t[i].task);
+ 		}
+ 
+diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
+index 4e339c04fc1ea..d5a33e4a91b19 100644
+--- a/drivers/dma/dma-axi-dmac.c
++++ b/drivers/dma/dma-axi-dmac.c
+@@ -1134,8 +1134,8 @@ static void axi_dmac_remove(struct platform_device *pdev)
+ {
+ 	struct axi_dmac *dmac = platform_get_drvdata(pdev);
+ 
+-	of_dma_controller_free(pdev->dev.of_node);
+ 	free_irq(dmac->irq, dmac);
++	of_dma_controller_free(pdev->dev.of_node);
+ 	tasklet_kill(&dmac->chan.vchan.task);
+ 	dma_async_device_unregister(&dmac->dma_dev);
+ 	clk_disable_unprepare(dmac->clk);
+diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
+index b50d0b4708497..cbfcfefdb5da1 100644
+--- a/drivers/gpio/Kconfig
++++ b/drivers/gpio/Kconfig
+@@ -1549,7 +1549,7 @@ config GPIO_TPS68470
+ 	  are "output only" GPIOs.
+ 
+ config GPIO_TQMX86
+-	tristate "TQ-Systems QTMX86 GPIO"
++	tristate "TQ-Systems TQMx86 GPIO"
+ 	depends on MFD_TQMX86 || COMPILE_TEST
+ 	depends on HAS_IOPORT_MAP
+ 	select GPIOLIB_IRQCHIP
+diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
+index 3a28c1f273c39..f2e7e8754d95d 100644
+--- a/drivers/gpio/gpio-tqmx86.c
++++ b/drivers/gpio/gpio-tqmx86.c
+@@ -6,6 +6,7 @@
+  *   Vadim V.Vlasov <vvlasov@dev.rtsoft.ru>
+  */
+ 
++#include <linux/bitmap.h>
+ #include <linux/bitops.h>
+ #include <linux/errno.h>
+ #include <linux/gpio/driver.h>
+@@ -28,16 +29,25 @@
+ #define TQMX86_GPIIC	3	/* GPI Interrupt Configuration Register */
+ #define TQMX86_GPIIS	4	/* GPI Interrupt Status Register */
+ 
++#define TQMX86_GPII_NONE	0
+ #define TQMX86_GPII_FALLING	BIT(0)
+ #define TQMX86_GPII_RISING	BIT(1)
++/* Stored in irq_type as a trigger type, but not actually valid as a register
++ * value, so the name doesn't use "GPII"
++ */
++#define TQMX86_INT_BOTH		(BIT(0) | BIT(1))
+ #define TQMX86_GPII_MASK	(BIT(0) | BIT(1))
+ #define TQMX86_GPII_BITS	2
++/* Stored in irq_type with GPII bits */
++#define TQMX86_INT_UNMASKED	BIT(2)
+ 
+ struct tqmx86_gpio_data {
+ 	struct gpio_chip	chip;
+ 	void __iomem		*io_base;
+ 	int			irq;
++	/* Lock must be held for accessing output and irq_type fields */
+ 	raw_spinlock_t		spinlock;
++	DECLARE_BITMAP(output, TQMX86_NGPIO);
+ 	u8			irq_type[TQMX86_NGPI];
+ };
+ 
+@@ -64,15 +74,10 @@ static void tqmx86_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ {
+ 	struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
+ 	unsigned long flags;
+-	u8 val;
+ 
+ 	raw_spin_lock_irqsave(&gpio->spinlock, flags);
+-	val = tqmx86_gpio_read(gpio, TQMX86_GPIOD);
+-	if (value)
+-		val |= BIT(offset);
+-	else
+-		val &= ~BIT(offset);
+-	tqmx86_gpio_write(gpio, val, TQMX86_GPIOD);
++	__assign_bit(offset, gpio->output, value);
++	tqmx86_gpio_write(gpio, bitmap_get_value8(gpio->output, 0), TQMX86_GPIOD);
+ 	raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+ }
+ 
+@@ -107,21 +112,38 @@ static int tqmx86_gpio_get_direction(struct gpio_chip *chip,
+ 	return GPIO_LINE_DIRECTION_OUT;
+ }
+ 
++static void tqmx86_gpio_irq_config(struct tqmx86_gpio_data *gpio, int offset)
++	__must_hold(&gpio->spinlock)
++{
++	u8 type = TQMX86_GPII_NONE, gpiic;
++
++	if (gpio->irq_type[offset] & TQMX86_INT_UNMASKED) {
++		type = gpio->irq_type[offset] & TQMX86_GPII_MASK;
++
++		if (type == TQMX86_INT_BOTH)
++			type = tqmx86_gpio_get(&gpio->chip, offset + TQMX86_NGPO)
++				? TQMX86_GPII_FALLING
++				: TQMX86_GPII_RISING;
++	}
++
++	gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
++	gpiic &= ~(TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS));
++	gpiic |= type << (offset * TQMX86_GPII_BITS);
++	tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
++}
++
+ static void tqmx86_gpio_irq_mask(struct irq_data *data)
+ {
+ 	unsigned int offset = (data->hwirq - TQMX86_NGPO);
+ 	struct tqmx86_gpio_data *gpio = gpiochip_get_data(
+ 		irq_data_get_irq_chip_data(data));
+ 	unsigned long flags;
+-	u8 gpiic, mask;
+-
+-	mask = TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS);
+ 
+ 	raw_spin_lock_irqsave(&gpio->spinlock, flags);
+-	gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
+-	gpiic &= ~mask;
+-	tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
++	gpio->irq_type[offset] &= ~TQMX86_INT_UNMASKED;
++	tqmx86_gpio_irq_config(gpio, offset);
+ 	raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
++
+ 	gpiochip_disable_irq(&gpio->chip, irqd_to_hwirq(data));
+ }
+ 
+@@ -131,16 +153,12 @@ static void tqmx86_gpio_irq_unmask(struct irq_data *data)
+ 	struct tqmx86_gpio_data *gpio = gpiochip_get_data(
+ 		irq_data_get_irq_chip_data(data));
+ 	unsigned long flags;
+-	u8 gpiic, mask;
+-
+-	mask = TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS);
+ 
+ 	gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(data));
++
+ 	raw_spin_lock_irqsave(&gpio->spinlock, flags);
+-	gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
+-	gpiic &= ~mask;
+-	gpiic |= gpio->irq_type[offset] << (offset * TQMX86_GPII_BITS);
+-	tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
++	gpio->irq_type[offset] |= TQMX86_INT_UNMASKED;
++	tqmx86_gpio_irq_config(gpio, offset);
+ 	raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+ }
+ 
+@@ -151,7 +169,7 @@ static int tqmx86_gpio_irq_set_type(struct irq_data *data, unsigned int type)
+ 	unsigned int offset = (data->hwirq - TQMX86_NGPO);
+ 	unsigned int edge_type = type & IRQF_TRIGGER_MASK;
+ 	unsigned long flags;
+-	u8 new_type, gpiic;
++	u8 new_type;
+ 
+ 	switch (edge_type) {
+ 	case IRQ_TYPE_EDGE_RISING:
+@@ -161,19 +179,16 @@ static int tqmx86_gpio_irq_set_type(struct irq_data *data, unsigned int type)
+ 		new_type = TQMX86_GPII_FALLING;
+ 		break;
+ 	case IRQ_TYPE_EDGE_BOTH:
+-		new_type = TQMX86_GPII_FALLING | TQMX86_GPII_RISING;
++		new_type = TQMX86_INT_BOTH;
+ 		break;
+ 	default:
+ 		return -EINVAL; /* not supported */
+ 	}
+ 
+-	gpio->irq_type[offset] = new_type;
+-
+ 	raw_spin_lock_irqsave(&gpio->spinlock, flags);
+-	gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
+-	gpiic &= ~((TQMX86_GPII_MASK) << (offset * TQMX86_GPII_BITS));
+-	gpiic |= new_type << (offset * TQMX86_GPII_BITS);
+-	tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
++	gpio->irq_type[offset] &= ~TQMX86_GPII_MASK;
++	gpio->irq_type[offset] |= new_type;
++	tqmx86_gpio_irq_config(gpio, offset);
+ 	raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+ 
+ 	return 0;
+@@ -184,8 +199,8 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc)
+ 	struct gpio_chip *chip = irq_desc_get_handler_data(desc);
+ 	struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
+ 	struct irq_chip *irq_chip = irq_desc_get_chip(desc);
+-	unsigned long irq_bits;
+-	int i = 0;
++	unsigned long irq_bits, flags;
++	int i;
+ 	u8 irq_status;
+ 
+ 	chained_irq_enter(irq_chip, desc);
+@@ -194,6 +209,34 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc)
+ 	tqmx86_gpio_write(gpio, irq_status, TQMX86_GPIIS);
+ 
+ 	irq_bits = irq_status;
++
++	raw_spin_lock_irqsave(&gpio->spinlock, flags);
++	for_each_set_bit(i, &irq_bits, TQMX86_NGPI) {
++		/*
++		 * Edge-both triggers are implemented by flipping the edge
++		 * trigger after each interrupt, as the controller only supports
++		 * either rising or falling edge triggers, but not both.
++		 *
++		 * Internally, the TQMx86 GPIO controller has separate status
++		 * registers for rising and falling edge interrupts. GPIIC
++		 * configures which bits from which register are visible in the
++		 * interrupt status register GPIIS and defines what triggers the
++		 * parent IRQ line. Writing to GPIIS always clears both rising
++		 * and falling interrupt flags internally, regardless of the
++		 * currently configured trigger.
++		 *
++		 * In consequence, we can cleanly implement the edge-both
++		 * trigger in software by first clearing the interrupt and then
++		 * setting the new trigger based on the current GPIO input in
++		 * tqmx86_gpio_irq_config() - even if an edge arrives between
++		 * reading the input and setting the trigger, we will have a new
++		 * interrupt pending.
++		 */
++		if ((gpio->irq_type[i] & TQMX86_GPII_MASK) == TQMX86_INT_BOTH)
++			tqmx86_gpio_irq_config(gpio, i);
++	}
++	raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
++
+ 	for_each_set_bit(i, &irq_bits, TQMX86_NGPI)
+ 		generic_handle_domain_irq(gpio->chip.irq.domain,
+ 					  i + TQMX86_NGPO);
+@@ -277,6 +320,13 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
+ 
+ 	tqmx86_gpio_write(gpio, (u8)~TQMX86_DIR_INPUT_MASK, TQMX86_GPIODD);
+ 
++	/*
++	 * Reading the previous output state is not possible with TQMx86 hardware.
++	 * Initialize all outputs to 0 to have a defined state that matches the
++	 * shadow register.
++	 */
++	tqmx86_gpio_write(gpio, 0, TQMX86_GPIOD);
++
+ 	chip = &gpio->chip;
+ 	chip->label = "gpio-tqmx86";
+ 	chip->owner = THIS_MODULE;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+index c27063305a134..2c36f3d00ca25 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+@@ -363,7 +363,7 @@ void dm_helpers_dp_mst_send_payload_allocation(
+ 	mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
+ 	new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
+ 
+-	ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, new_payload);
++	ret = drm_dp_add_payload_part2(mst_mgr, new_payload);
+ 
+ 	if (ret) {
+ 		amdgpu_dm_set_mst_status(&aconnector->mst_status,
+diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+index f3e744172673c..f4e76b46ca327 100644
+--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
++++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+@@ -259,7 +259,7 @@ komeda_component_get_avail_scaler(struct komeda_component *c,
+ 	u32 avail_scalers;
+ 
+ 	pipe_st = komeda_pipeline_get_state(c->pipeline, state);
+-	if (!pipe_st)
++	if (IS_ERR_OR_NULL(pipe_st))
+ 		return NULL;
+ 
+ 	avail_scalers = (pipe_st->active_comps & KOMEDA_PIPELINE_SCALERS) ^
+diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
+index 7f41525f7a6e6..3d6e8f096a5d4 100644
+--- a/drivers/gpu/drm/bridge/panel.c
++++ b/drivers/gpu/drm/bridge/panel.c
+@@ -358,9 +358,12 @@ EXPORT_SYMBOL(drm_panel_bridge_set_orientation);
+ 
+ static void devm_drm_panel_bridge_release(struct device *dev, void *res)
+ {
+-	struct drm_bridge **bridge = res;
++	struct drm_bridge *bridge = *(struct drm_bridge **)res;
+ 
+-	drm_panel_bridge_remove(*bridge);
++	if (!bridge)
++		return;
++
++	drm_bridge_remove(bridge);
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+index 03d5282094262..95fd18f24e944 100644
+--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+@@ -3421,7 +3421,6 @@ EXPORT_SYMBOL(drm_dp_remove_payload_part2);
+ /**
+  * drm_dp_add_payload_part2() - Execute payload update part 2
+  * @mgr: Manager to use.
+- * @state: The global atomic state
+  * @payload: The payload to update
+  *
+  * If @payload was successfully assigned a starting time slot by drm_dp_add_payload_part1(), this
+@@ -3430,14 +3429,13 @@ EXPORT_SYMBOL(drm_dp_remove_payload_part2);
+  * Returns: 0 on success, negative error code on failure.
+  */
+ int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
+-			     struct drm_atomic_state *state,
+ 			     struct drm_dp_mst_atomic_payload *payload)
+ {
+ 	int ret = 0;
+ 
+ 	/* Skip failed payloads */
+ 	if (payload->payload_allocation_status != DRM_DP_MST_PAYLOAD_ALLOCATION_DFP) {
+-		drm_dbg_kms(state->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",
++		drm_dbg_kms(mgr->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",
+ 			    payload->port->connector->name);
+ 		return -EIO;
+ 	}
+diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
+index e435f986cd135..1ff0678be7c75 100644
+--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
++++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
+@@ -610,6 +610,9 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
+ 		return ret;
+ 	}
+ 
++	if (is_cow_mapping(vma->vm_flags))
++		return -EINVAL;
++
+ 	dma_resv_lock(shmem->base.resv, NULL);
+ 	ret = drm_gem_shmem_get_pages(shmem);
+ 	dma_resv_unlock(shmem->base.resv);
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+index f5bbba9ad2252..b1e9a702172f5 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+@@ -309,6 +309,7 @@ static int vidi_get_modes(struct drm_connector *connector)
+ 	struct vidi_context *ctx = ctx_from_connector(connector);
+ 	struct edid *edid;
+ 	int edid_len;
++	int count;
+ 
+ 	/*
+ 	 * the edid data comes from user side and it would be set
+@@ -328,7 +329,11 @@ static int vidi_get_modes(struct drm_connector *connector)
+ 
+ 	drm_connector_update_edid_property(connector, edid);
+ 
+-	return drm_add_edid_modes(connector, edid);
++	count = drm_add_edid_modes(connector, edid);
++
++	kfree(edid);
++
++	return count;
+ }
+ 
+ static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
+diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
+index b1d02dec3774d..6385202cd28bb 100644
+--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
++++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
+@@ -887,11 +887,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
+ 	int ret;
+ 
+ 	if (!hdata->ddc_adpt)
+-		return 0;
++		goto no_edid;
+ 
+ 	edid = drm_get_edid(connector, hdata->ddc_adpt);
+ 	if (!edid)
+-		return 0;
++		goto no_edid;
+ 
+ 	hdata->dvi_mode = !connector->display_info.is_hdmi;
+ 	DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n",
+@@ -906,6 +906,9 @@ static int hdmi_get_modes(struct drm_connector *connector)
+ 	kfree(edid);
+ 
+ 	return ret;
++
++no_edid:
++	return drm_add_modes_noedid(connector, 640, 480);
+ }
+ 
+ static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
+diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
+index ed81e1466c4b5..40e7d862675ee 100644
+--- a/drivers/gpu/drm/i915/display/intel_audio.c
++++ b/drivers/gpu/drm/i915/display/intel_audio.c
+@@ -1252,17 +1252,6 @@ static const struct component_ops i915_audio_component_bind_ops = {
+ static void i915_audio_component_init(struct drm_i915_private *i915)
+ {
+ 	u32 aud_freq, aud_freq_init;
+-	int ret;
+-
+-	ret = component_add_typed(i915->drm.dev,
+-				  &i915_audio_component_bind_ops,
+-				  I915_COMPONENT_AUDIO);
+-	if (ret < 0) {
+-		drm_err(&i915->drm,
+-			"failed to add audio component (%d)\n", ret);
+-		/* continue with reduced functionality */
+-		return;
+-	}
+ 
+ 	if (DISPLAY_VER(i915) >= 9) {
+ 		aud_freq_init = intel_de_read(i915, AUD_FREQ_CNTRL);
+@@ -1285,6 +1274,21 @@ static void i915_audio_component_init(struct drm_i915_private *i915)
+ 
+ 	/* init with current cdclk */
+ 	intel_audio_cdclk_change_post(i915);
++}
++
++static void i915_audio_component_register(struct drm_i915_private *i915)
++{
++	int ret;
++
++	ret = component_add_typed(i915->drm.dev,
++				  &i915_audio_component_bind_ops,
++				  I915_COMPONENT_AUDIO);
++	if (ret < 0) {
++		drm_err(&i915->drm,
++			"failed to add audio component (%d)\n", ret);
++		/* continue with reduced functionality */
++		return;
++	}
+ 
+ 	i915->display.audio.component_registered = true;
+ }
+@@ -1317,6 +1321,12 @@ void intel_audio_init(struct drm_i915_private *i915)
+ 		i915_audio_component_init(i915);
+ }
+ 
++void intel_audio_register(struct drm_i915_private *i915)
++{
++	if (!i915->display.audio.lpe.platdev)
++		i915_audio_component_register(i915);
++}
++
+ /**
+  * intel_audio_deinit() - deinitialize the audio driver
+  * @i915: the i915 drm device private data
+diff --git a/drivers/gpu/drm/i915/display/intel_audio.h b/drivers/gpu/drm/i915/display/intel_audio.h
+index 9327954b801e5..576c061d72a45 100644
+--- a/drivers/gpu/drm/i915/display/intel_audio.h
++++ b/drivers/gpu/drm/i915/display/intel_audio.h
+@@ -28,6 +28,7 @@ void intel_audio_codec_get_config(struct intel_encoder *encoder,
+ void intel_audio_cdclk_change_pre(struct drm_i915_private *dev_priv);
+ void intel_audio_cdclk_change_post(struct drm_i915_private *dev_priv);
+ void intel_audio_init(struct drm_i915_private *dev_priv);
++void intel_audio_register(struct drm_i915_private *i915);
+ void intel_audio_deinit(struct drm_i915_private *dev_priv);
+ void intel_audio_sdp_split_update(const struct intel_crtc_state *crtc_state);
+ 
+diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
+index 87dd07e0d138d..6da5e85abe636 100644
+--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
++++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
+@@ -542,6 +542,8 @@ void intel_display_driver_register(struct drm_i915_private *i915)
+ 
+ 	intel_display_driver_enable_user_access(i915);
+ 
++	intel_audio_register(i915);
++
+ 	intel_display_debugfs_register(i915);
+ 
+ 	/*
+diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
+index b651c990af85f..8264ff7fb6c27 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
++++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
+@@ -1160,7 +1160,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
+ 	if (first_mst_stream)
+ 		intel_ddi_wait_for_fec_status(encoder, pipe_config, true);
+ 
+-	drm_dp_add_payload_part2(&intel_dp->mst_mgr, &state->base,
++	drm_dp_add_payload_part2(&intel_dp->mst_mgr,
+ 				 drm_atomic_get_mst_payload_state(mst_state, connector->port));
+ 
+ 	if (DISPLAY_VER(dev_priv) >= 12)
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
+index 3560a062d2872..5d7446a48ae79 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
++++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
+@@ -284,7 +284,9 @@ bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj);
+ static inline bool
+ i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
+ {
+-	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
++	/* TODO: make DPT shrinkable when it has no bound vmas */
++	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE) &&
++		!obj->is_dpt;
+ }
+ 
+ static inline bool
+diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+index d650beb8ed22f..20b9b04ec1e0b 100644
+--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
++++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+@@ -263,8 +263,13 @@ static void signal_irq_work(struct irq_work *work)
+ 		i915_request_put(rq);
+ 	}
+ 
++	/* Lazy irq enabling after HW submission */
+ 	if (!READ_ONCE(b->irq_armed) && !list_empty(&b->signalers))
+ 		intel_breadcrumbs_arm_irq(b);
++
++	/* And confirm that we still want irqs enabled before we yield */
++	if (READ_ONCE(b->irq_armed) && !atomic_read(&b->active))
++		intel_breadcrumbs_disarm_irq(b);
+ }
+ 
+ struct intel_breadcrumbs *
+@@ -315,13 +320,7 @@ void __intel_breadcrumbs_park(struct intel_breadcrumbs *b)
+ 		return;
+ 
+ 	/* Kick the work once more to drain the signalers, and disarm the irq */
+-	irq_work_sync(&b->irq_work);
+-	while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) {
+-		local_irq_disable();
+-		signal_irq_work(&b->irq_work);
+-		local_irq_enable();
+-		cond_resched();
+-	}
++	irq_work_queue(&b->irq_work);
+ }
+ 
+ void intel_breadcrumbs_free(struct kref *kref)
+@@ -404,7 +403,7 @@ static void insert_breadcrumb(struct i915_request *rq)
+ 	 * the request as it may have completed and raised the interrupt as
+ 	 * we were attaching it into the lists.
+ 	 */
+-	if (!b->irq_armed || __i915_request_is_complete(rq))
++	if (!READ_ONCE(b->irq_armed) || __i915_request_is_complete(rq))
+ 		irq_work_queue(&b->irq_work);
+ }
+ 
+diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
+index 13705c5f14973..4b7497a8755cd 100644
+--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
++++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
+@@ -68,7 +68,7 @@ nv04_display_fini(struct drm_device *dev, bool runtime, bool suspend)
+ 	if (nv_two_heads(dev))
+ 		NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0);
+ 
+-	if (!runtime)
++	if (!runtime && !drm->headless)
+ 		cancel_work_sync(&drm->hpd_work);
+ 
+ 	if (!suspend)
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+index 0c3d88ad0b0ea..674dc567e1798 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+@@ -915,7 +915,7 @@ nv50_msto_cleanup(struct drm_atomic_state *state,
+ 		msto->disabled = false;
+ 		drm_dp_remove_payload_part2(mgr, new_mst_state, old_payload, new_payload);
+ 	} else if (msto->enabled) {
+-		drm_dp_add_payload_part2(mgr, state, new_payload);
++		drm_dp_add_payload_part2(mgr, new_payload);
+ 		msto->enabled = false;
+ 	}
+ }
+@@ -2680,7 +2680,7 @@ nv50_display_fini(struct drm_device *dev, bool runtime, bool suspend)
+ 			nv50_mstm_fini(nouveau_encoder(encoder));
+ 	}
+ 
+-	if (!runtime)
++	if (!runtime && !drm->headless)
+ 		cancel_work_sync(&drm->hpd_work);
+ }
+ 
+diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
+index f28f9a8574586..60c32244211d0 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_display.c
++++ b/drivers/gpu/drm/nouveau/nouveau_display.c
+@@ -450,6 +450,9 @@ nouveau_display_hpd_resume(struct drm_device *dev)
+ {
+ 	struct nouveau_drm *drm = nouveau_drm(dev);
+ 
++	if (drm->headless)
++		return;
++
+ 	spin_lock_irq(&drm->hpd_lock);
+ 	drm->hpd_pending = ~0;
+ 	spin_unlock_irq(&drm->hpd_lock);
+@@ -635,7 +638,7 @@ nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime)
+ 	}
+ 	drm_connector_list_iter_end(&conn_iter);
+ 
+-	if (!runtime)
++	if (!runtime && !drm->headless)
+ 		cancel_work_sync(&drm->hpd_work);
+ 
+ 	drm_kms_helper_poll_disable(dev);
+@@ -729,6 +732,7 @@ nouveau_display_create(struct drm_device *dev)
+ 		/* no display hw */
+ 		if (ret == -ENODEV) {
+ 			ret = 0;
++			drm->headless = true;
+ 			goto disp_create_err;
+ 		}
+ 
+diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
+index e239c6bf4afa4..25fca98a20bcd 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
++++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
+@@ -276,6 +276,7 @@ struct nouveau_drm {
+ 	/* modesetting */
+ 	struct nvbios vbios;
+ 	struct nouveau_display *display;
++	bool headless;
+ 	struct work_struct hpd_work;
+ 	spinlock_t hpd_lock;
+ 	u32 hpd_pending;
+diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+index e8f385b9c6182..28bfc48a91272 100644
+--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
++++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+@@ -643,7 +643,9 @@ static int st7789v_probe(struct spi_device *spi)
+ 	if (ret)
+ 		return dev_err_probe(dev, ret, "Failed to get backlight\n");
+ 
+-	of_drm_get_panel_orientation(spi->dev.of_node, &ctx->orientation);
++	ret = of_drm_get_panel_orientation(spi->dev.of_node, &ctx->orientation);
++	if (ret)
++		return dev_err_probe(&spi->dev, ret, "Failed to get orientation\n");
+ 
+ 	drm_panel_add(&ctx->panel);
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index 58fb40c93100a..bea576434e475 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -956,13 +956,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
+ 				vmw_read(dev_priv,
+ 					 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
+ 
+-		/*
+-		 * Workaround for low memory 2D VMs to compensate for the
+-		 * allocation taken by fbdev
+-		 */
+-		if (!(dev_priv->capabilities & SVGA_CAP_3D))
+-			mem_size *= 3;
+-
+ 		dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
+ 		dev_priv->max_primary_mem =
+ 			vmw_read(dev_priv, SVGA_REG_MAX_PRIMARY_MEM);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+index b019a1a1787af..c1430e55474cb 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+@@ -1066,9 +1066,6 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
+ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
+ 		       unsigned width, unsigned height, unsigned pitch,
+ 		       unsigned bpp, unsigned depth);
+-bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
+-				uint32_t pitch,
+-				uint32_t height);
+ int vmw_kms_present(struct vmw_private *dev_priv,
+ 		    struct drm_file *file_priv,
+ 		    struct vmw_framebuffer *vfb,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index 84ae4e10a2ebe..11755d143e657 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -216,7 +216,7 @@ static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
+ 	new_image = vmw_du_cursor_plane_acquire_image(new_vps);
+ 
+ 	changed = false;
+-	if (old_image && new_image)
++	if (old_image && new_image && old_image != new_image)
+ 		changed = memcmp(old_image, new_image, size) != 0;
+ 
+ 	return changed;
+@@ -2157,13 +2157,12 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
+ 	return 0;
+ }
+ 
++static
+ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
+-				uint32_t pitch,
+-				uint32_t height)
++				u64 pitch,
++				u64 height)
+ {
+-	return ((u64) pitch * (u64) height) < (u64)
+-		((dev_priv->active_display_unit == vmw_du_screen_target) ?
+-		 dev_priv->max_primary_mem : dev_priv->vram_size);
++	return (pitch * height) < (u64)dev_priv->vram_size;
+ }
+ 
+ /**
+@@ -2859,25 +2858,18 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
+ enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
+ 					      struct drm_display_mode *mode)
+ {
++	enum drm_mode_status ret;
+ 	struct drm_device *dev = connector->dev;
+ 	struct vmw_private *dev_priv = vmw_priv(dev);
+-	u32 max_width = dev_priv->texture_max_width;
+-	u32 max_height = dev_priv->texture_max_height;
+ 	u32 assumed_cpp = 4;
+ 
+ 	if (dev_priv->assume_16bpp)
+ 		assumed_cpp = 2;
+ 
+-	if (dev_priv->active_display_unit == vmw_du_screen_target) {
+-		max_width  = min(dev_priv->stdu_max_width,  max_width);
+-		max_height = min(dev_priv->stdu_max_height, max_height);
+-	}
+-
+-	if (max_width < mode->hdisplay)
+-		return MODE_BAD_HVALUE;
+-
+-	if (max_height < mode->vdisplay)
+-		return MODE_BAD_VVALUE;
++	ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
++				     dev_priv->texture_max_height);
++	if (ret != MODE_OK)
++		return ret;
+ 
+ 	if (!vmw_kms_validate_mode_vram(dev_priv,
+ 					mode->hdisplay * assumed_cpp,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+index 3c8414a13dbad..dbc44ecbd1f4a 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+@@ -41,7 +41,14 @@
+ #define vmw_connector_to_stdu(x) \
+ 	container_of(x, struct vmw_screen_target_display_unit, base.connector)
+ 
+-
++/*
++ * Some renderers such as llvmpipe will align the width and height of their
++ * buffers to match their tile size. We need to keep this in mind when exposing
++ * modes to userspace so that this possible over-allocation will not exceed
++ * graphics memory. 64x64 pixels seems to be a reasonable upper bound for the
++ * tile size of current renderers.
++ */
++#define GPU_TILE_SIZE 64
+ 
+ enum stdu_content_type {
+ 	SAME_AS_DISPLAY = 0,
+@@ -830,7 +837,41 @@ static void vmw_stdu_connector_destroy(struct drm_connector *connector)
+ 	vmw_stdu_destroy(vmw_connector_to_stdu(connector));
+ }
+ 
++static enum drm_mode_status
++vmw_stdu_connector_mode_valid(struct drm_connector *connector,
++			      struct drm_display_mode *mode)
++{
++	enum drm_mode_status ret;
++	struct drm_device *dev = connector->dev;
++	struct vmw_private *dev_priv = vmw_priv(dev);
++	u64 assumed_cpp = dev_priv->assume_16bpp ? 2 : 4;
++	/* Align width and height to account for GPU tile over-alignment */
++	u64 required_mem = ALIGN(mode->hdisplay, GPU_TILE_SIZE) *
++			   ALIGN(mode->vdisplay, GPU_TILE_SIZE) *
++			   assumed_cpp;
++	required_mem = ALIGN(required_mem, PAGE_SIZE);
++
++	ret = drm_mode_validate_size(mode, dev_priv->stdu_max_width,
++				     dev_priv->stdu_max_height);
++	if (ret != MODE_OK)
++		return ret;
+ 
++	ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
++				     dev_priv->texture_max_height);
++	if (ret != MODE_OK)
++		return ret;
++
++	if (required_mem > dev_priv->max_primary_mem)
++		return MODE_MEM;
++
++	if (required_mem > dev_priv->max_mob_pages * PAGE_SIZE)
++		return MODE_MEM;
++
++	if (required_mem > dev_priv->max_mob_size)
++		return MODE_MEM;
++
++	return MODE_OK;
++}
+ 
+ static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
+ 	.dpms = vmw_du_connector_dpms,
+@@ -846,7 +887,7 @@ static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
+ static const struct
+ drm_connector_helper_funcs vmw_stdu_connector_helper_funcs = {
+ 	.get_modes = vmw_connector_get_modes,
+-	.mode_valid = vmw_connector_mode_valid
++	.mode_valid = vmw_stdu_connector_mode_valid
+ };
+ 
+ 
+diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
+index 9fcae65b64699..e7a39ad7adba0 100644
+--- a/drivers/gpu/drm/xe/xe_gt_idle.c
++++ b/drivers/gpu/drm/xe/xe_gt_idle.c
+@@ -126,6 +126,13 @@ static const struct attribute *gt_idle_attrs[] = {
+ static void gt_idle_sysfs_fini(struct drm_device *drm, void *arg)
+ {
+ 	struct kobject *kobj = arg;
++	struct xe_gt *gt = kobj_to_gt(kobj->parent);
++
++	if (gt_to_xe(gt)->info.skip_guc_pc) {
++		XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT));
++		xe_gt_idle_disable_c6(gt);
++		xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
++	}
+ 
+ 	sysfs_remove_files(kobj, gt_idle_attrs);
+ 	kobject_put(kobj);
+@@ -184,7 +191,7 @@ void xe_gt_idle_enable_c6(struct xe_gt *gt)
+ void xe_gt_idle_disable_c6(struct xe_gt *gt)
+ {
+ 	xe_device_assert_mem_access(gt_to_xe(gt));
+-	xe_force_wake_assert_held(gt_to_fw(gt), XE_FORCEWAKE_ALL);
++	xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
+ 
+ 	xe_mmio_write32(gt, PG_ENABLE, 0);
+ 	xe_mmio_write32(gt, RC_CONTROL, 0);
+diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
+index 2839d685631bc..dd9d65f923da7 100644
+--- a/drivers/gpu/drm/xe/xe_guc_pc.c
++++ b/drivers/gpu/drm/xe/xe_guc_pc.c
+@@ -381,8 +381,6 @@ u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
+ 	struct xe_device *xe = gt_to_xe(gt);
+ 	u32 freq;
+ 
+-	xe_device_mem_access_get(gt_to_xe(gt));
+-
+ 	/* When in RC6, actual frequency reported will be 0. */
+ 	if (GRAPHICS_VERx100(xe) >= 1270) {
+ 		freq = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
+@@ -394,8 +392,6 @@ u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
+ 
+ 	freq = decode_freq(freq);
+ 
+-	xe_device_mem_access_put(gt_to_xe(gt));
+-
+ 	return freq;
+ }
+ 
+@@ -412,14 +408,13 @@ int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
+ 	struct xe_gt *gt = pc_to_gt(pc);
+ 	int ret;
+ 
+-	xe_device_mem_access_get(gt_to_xe(gt));
+ 	/*
+ 	 * GuC SLPC plays with cur freq request when GuCRC is enabled
+ 	 * Block RC6 for a more reliable read.
+ 	 */
+ 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ 	if (ret)
+-		goto out;
++		return ret;
+ 
+ 	*freq = xe_mmio_read32(gt, RPNSWREQ);
+ 
+@@ -427,9 +422,7 @@ int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
+ 	*freq = decode_freq(*freq);
+ 
+ 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+-out:
+-	xe_device_mem_access_put(gt_to_xe(gt));
+-	return ret;
++	return 0;
+ }
+ 
+ /**
+@@ -451,12 +444,7 @@ u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
+  */
+ u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc)
+ {
+-	struct xe_gt *gt = pc_to_gt(pc);
+-	struct xe_device *xe = gt_to_xe(gt);
+-
+-	xe_device_mem_access_get(xe);
+ 	pc_update_rp_values(pc);
+-	xe_device_mem_access_put(xe);
+ 
+ 	return pc->rpe_freq;
+ }
+@@ -485,7 +473,6 @@ int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
+ 	struct xe_gt *gt = pc_to_gt(pc);
+ 	int ret;
+ 
+-	xe_device_mem_access_get(pc_to_xe(pc));
+ 	mutex_lock(&pc->freq_lock);
+ 	if (!pc->freq_ready) {
+ 		/* Might be in the middle of a gt reset */
+@@ -511,7 +498,6 @@ int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
+ 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ out:
+ 	mutex_unlock(&pc->freq_lock);
+-	xe_device_mem_access_put(pc_to_xe(pc));
+ 	return ret;
+ }
+ 
+@@ -528,7 +514,6 @@ int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
+ {
+ 	int ret;
+ 
+-	xe_device_mem_access_get(pc_to_xe(pc));
+ 	mutex_lock(&pc->freq_lock);
+ 	if (!pc->freq_ready) {
+ 		/* Might be in the middle of a gt reset */
+@@ -544,8 +529,6 @@ int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
+ 
+ out:
+ 	mutex_unlock(&pc->freq_lock);
+-	xe_device_mem_access_put(pc_to_xe(pc));
+-
+ 	return ret;
+ }
+ 
+@@ -561,7 +544,6 @@ int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq)
+ {
+ 	int ret;
+ 
+-	xe_device_mem_access_get(pc_to_xe(pc));
+ 	mutex_lock(&pc->freq_lock);
+ 	if (!pc->freq_ready) {
+ 		/* Might be in the middle of a gt reset */
+@@ -577,7 +559,6 @@ int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq)
+ 
+ out:
+ 	mutex_unlock(&pc->freq_lock);
+-	xe_device_mem_access_put(pc_to_xe(pc));
+ 	return ret;
+ }
+ 
+@@ -594,7 +575,6 @@ int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
+ {
+ 	int ret;
+ 
+-	xe_device_mem_access_get(pc_to_xe(pc));
+ 	mutex_lock(&pc->freq_lock);
+ 	if (!pc->freq_ready) {
+ 		/* Might be in the middle of a gt reset */
+@@ -610,7 +590,6 @@ int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
+ 
+ out:
+ 	mutex_unlock(&pc->freq_lock);
+-	xe_device_mem_access_put(pc_to_xe(pc));
+ 	return ret;
+ }
+ 
+@@ -623,8 +602,6 @@ enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
+ 	struct xe_gt *gt = pc_to_gt(pc);
+ 	u32 reg, gt_c_state;
+ 
+-	xe_device_mem_access_get(gt_to_xe(gt));
+-
+ 	if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
+ 		reg = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
+ 		gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg);
+@@ -633,8 +610,6 @@ enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
+ 		gt_c_state = REG_FIELD_GET(RCN_MASK, reg);
+ 	}
+ 
+-	xe_device_mem_access_put(gt_to_xe(gt));
+-
+ 	switch (gt_c_state) {
+ 	case GT_C6:
+ 		return GT_IDLE_C6;
+@@ -654,9 +629,7 @@ u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc)
+ 	struct xe_gt *gt = pc_to_gt(pc);
+ 	u32 reg;
+ 
+-	xe_device_mem_access_get(gt_to_xe(gt));
+ 	reg = xe_mmio_read32(gt, GT_GFX_RC6);
+-	xe_device_mem_access_put(gt_to_xe(gt));
+ 
+ 	return reg;
+ }
+@@ -670,9 +643,7 @@ u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc)
+ 	struct xe_gt *gt = pc_to_gt(pc);
+ 	u64 reg;
+ 
+-	xe_device_mem_access_get(gt_to_xe(gt));
+ 	reg = xe_mmio_read32(gt, MTL_MEDIA_MC6);
+-	xe_device_mem_access_put(gt_to_xe(gt));
+ 
+ 	return reg;
+ }
+@@ -801,23 +772,19 @@ int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
+ 	if (xe->info.skip_guc_pc)
+ 		return 0;
+ 
+-	xe_device_mem_access_get(pc_to_xe(pc));
+-
+ 	ret = pc_action_setup_gucrc(pc, XE_GUCRC_HOST_CONTROL);
+ 	if (ret)
+-		goto out;
++		return ret;
+ 
+ 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ 	if (ret)
+-		goto out;
++		return ret;
+ 
+ 	xe_gt_idle_disable_c6(gt);
+ 
+ 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ 
+-out:
+-	xe_device_mem_access_put(pc_to_xe(pc));
+-	return ret;
++	return 0;
+ }
+ 
+ static void pc_init_pcode_freq(struct xe_guc_pc *pc)
+@@ -870,11 +837,9 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
+ 
+ 	xe_gt_assert(gt, xe_device_uc_enabled(xe));
+ 
+-	xe_device_mem_access_get(pc_to_xe(pc));
+-
+ 	ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ 	if (ret)
+-		goto out_fail_force_wake;
++		return ret;
+ 
+ 	if (xe->info.skip_guc_pc) {
+ 		if (xe->info.platform != XE_PVC)
+@@ -914,8 +879,6 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
+ 
+ out:
+ 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+-out_fail_force_wake:
+-	xe_device_mem_access_put(pc_to_xe(pc));
+ 	return ret;
+ }
+ 
+@@ -928,12 +891,9 @@ int xe_guc_pc_stop(struct xe_guc_pc *pc)
+ 	struct xe_device *xe = pc_to_xe(pc);
+ 	int ret;
+ 
+-	xe_device_mem_access_get(pc_to_xe(pc));
+-
+ 	if (xe->info.skip_guc_pc) {
+ 		xe_gt_idle_disable_c6(pc_to_gt(pc));
+-		ret = 0;
+-		goto out;
++		return 0;
+ 	}
+ 
+ 	mutex_lock(&pc->freq_lock);
+@@ -942,16 +902,14 @@ int xe_guc_pc_stop(struct xe_guc_pc *pc)
+ 
+ 	ret = pc_action_shutdown(pc);
+ 	if (ret)
+-		goto out;
++		return ret;
+ 
+ 	if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_NOT_RUNNING)) {
+ 		drm_err(&pc_to_xe(pc)->drm, "GuC PC Shutdown failed\n");
+-		ret = -EIO;
++		return -EIO;
+ 	}
+ 
+-out:
+-	xe_device_mem_access_put(pc_to_xe(pc));
+-	return ret;
++	return 0;
+ }
+ 
+ /**
+@@ -962,14 +920,6 @@ int xe_guc_pc_stop(struct xe_guc_pc *pc)
+ static void xe_guc_pc_fini(struct drm_device *drm, void *arg)
+ {
+ 	struct xe_guc_pc *pc = arg;
+-	struct xe_device *xe = pc_to_xe(pc);
+-
+-	if (xe->info.skip_guc_pc) {
+-		xe_device_mem_access_get(xe);
+-		xe_gt_idle_disable_c6(pc_to_gt(pc));
+-		xe_device_mem_access_put(xe);
+-		return;
+-	}
+ 
+ 	xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
+ 	XE_WARN_ON(xe_guc_pc_gucrc_disable(pc));
+diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
+index e2a4c3b5e9ff8..3757e0dc5c81a 100644
+--- a/drivers/gpu/drm/xe/xe_guc_submit.c
++++ b/drivers/gpu/drm/xe/xe_guc_submit.c
+@@ -1257,6 +1257,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
+ 	return 0;
+ 
+ err_entity:
++	mutex_unlock(&guc->submission_state.lock);
+ 	xe_sched_entity_fini(&ge->entity);
+ err_sched:
+ 	xe_sched_fini(&ge->sched);
+diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
+index 5b2b37b598130..820c8d73c464f 100644
+--- a/drivers/gpu/drm/xe/xe_ring_ops.c
++++ b/drivers/gpu/drm/xe/xe_ring_ops.c
+@@ -79,6 +79,16 @@ static int emit_store_imm_ggtt(u32 addr, u32 value, u32 *dw, int i)
+ 	return i;
+ }
+ 
++static int emit_flush_dw(u32 *dw, int i)
++{
++	dw[i++] = MI_FLUSH_DW | MI_FLUSH_IMM_DW;
++	dw[i++] = 0;
++	dw[i++] = 0;
++	dw[i++] = 0;
++
++	return i;
++}
++
+ static int emit_flush_imm_ggtt(u32 addr, u32 value, bool invalidate_tlb,
+ 			       u32 *dw, int i)
+ {
+@@ -233,10 +243,12 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc
+ 
+ 	i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
+ 
+-	if (job->user_fence.used)
++	if (job->user_fence.used) {
++		i = emit_flush_dw(dw, i);
+ 		i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
+ 						job->user_fence.value,
+ 						dw, i);
++	}
+ 
+ 	i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i);
+ 
+@@ -292,10 +304,12 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
+ 
+ 	i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
+ 
+-	if (job->user_fence.used)
++	if (job->user_fence.used) {
++		i = emit_flush_dw(dw, i);
+ 		i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
+ 						job->user_fence.value,
+ 						dw, i);
++	}
+ 
+ 	i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i);
+ 
+diff --git a/drivers/greybus/interface.c b/drivers/greybus/interface.c
+index fd58a86b0888d..d022bfb5e95d7 100644
+--- a/drivers/greybus/interface.c
++++ b/drivers/greybus/interface.c
+@@ -693,6 +693,7 @@ static void gb_interface_release(struct device *dev)
+ 
+ 	trace_gb_interface_release(intf);
+ 
++	cancel_work_sync(&intf->mode_switch_work);
+ 	kfree(intf);
+ }
+ 
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index de7a477d66656..751a73ae7de75 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1448,7 +1448,6 @@ static void implement(const struct hid_device *hid, u8 *report,
+ 			hid_warn(hid,
+ 				 "%s() called with too large value %d (n: %d)! (%s)\n",
+ 				 __func__, value, n, current->comm);
+-			WARN_ON(1);
+ 			value &= m;
+ 		}
+ 	}
+diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
+index 3c3c497b6b911..37958edec55f5 100644
+--- a/drivers/hid/hid-logitech-dj.c
++++ b/drivers/hid/hid-logitech-dj.c
+@@ -1284,8 +1284,10 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
+ 		 */
+ 		msleep(50);
+ 
+-		if (retval)
++		if (retval) {
++			kfree(dj_report);
+ 			return retval;
++		}
+ 	}
+ 
+ 	/*
+diff --git a/drivers/hid/hid-nvidia-shield.c b/drivers/hid/hid-nvidia-shield.c
+index 58b15750dbb0a..ff9078ad19611 100644
+--- a/drivers/hid/hid-nvidia-shield.c
++++ b/drivers/hid/hid-nvidia-shield.c
+@@ -283,7 +283,9 @@ static struct input_dev *shield_haptics_create(
+ 		return haptics;
+ 
+ 	input_set_capability(haptics, EV_FF, FF_RUMBLE);
+-	input_ff_create_memless(haptics, NULL, play_effect);
++	ret = input_ff_create_memless(haptics, NULL, play_effect);
++	if (ret)
++		goto err;
+ 
+ 	ret = input_register_device(haptics);
+ 	if (ret)
+diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
+index 648893f9e4b67..8dad239aba2ce 100644
+--- a/drivers/hwtracing/intel_th/pci.c
++++ b/drivers/hwtracing/intel_th/pci.c
+@@ -294,6 +294,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
+ 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xae24),
+ 		.driver_data = (kernel_ulong_t)&intel_th_2x,
+ 	},
++	{
++		/* Meteor Lake-S */
++		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7f26),
++		.driver_data = (kernel_ulong_t)&intel_th_2x,
++	},
+ 	{
+ 		/* Raptor Lake-S */
+ 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7a26),
+@@ -304,6 +309,26 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
+ 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa76f),
+ 		.driver_data = (kernel_ulong_t)&intel_th_2x,
+ 	},
++	{
++		/* Granite Rapids */
++		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0963),
++		.driver_data = (kernel_ulong_t)&intel_th_2x,
++	},
++	{
++		/* Granite Rapids SOC */
++		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3256),
++		.driver_data = (kernel_ulong_t)&intel_th_2x,
++	},
++	{
++		/* Sapphire Rapids SOC */
++		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3456),
++		.driver_data = (kernel_ulong_t)&intel_th_2x,
++	},
++	{
++		/* Lunar Lake */
++		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa824),
++		.driver_data = (kernel_ulong_t)&intel_th_2x,
++	},
+ 	{
+ 		/* Alder Lake CPU */
+ 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
+diff --git a/drivers/i2c/busses/i2c-at91-slave.c b/drivers/i2c/busses/i2c-at91-slave.c
+index d6eeea5166c04..131a67d9d4a68 100644
+--- a/drivers/i2c/busses/i2c-at91-slave.c
++++ b/drivers/i2c/busses/i2c-at91-slave.c
+@@ -106,8 +106,7 @@ static int at91_unreg_slave(struct i2c_client *slave)
+ 
+ static u32 at91_twi_func(struct i2c_adapter *adapter)
+ {
+-	return I2C_FUNC_SLAVE | I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
+-		| I2C_FUNC_SMBUS_READ_BLOCK_DATA;
++	return I2C_FUNC_SLAVE;
+ }
+ 
+ static const struct i2c_algorithm at91_twi_algorithm_slave = {
+diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
+index 2e079cf20bb5b..78e2c47e3d7da 100644
+--- a/drivers/i2c/busses/i2c-designware-slave.c
++++ b/drivers/i2c/busses/i2c-designware-slave.c
+@@ -220,7 +220,7 @@ static const struct i2c_algorithm i2c_dw_algo = {
+ 
+ void i2c_dw_configure_slave(struct dw_i2c_dev *dev)
+ {
+-	dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY;
++	dev->functionality = I2C_FUNC_SLAVE;
+ 
+ 	dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL |
+ 			 DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED;
+diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c
+index 7475ec2a56c72..13e1bba45beff 100644
+--- a/drivers/iio/adc/ad9467.c
++++ b/drivers/iio/adc/ad9467.c
+@@ -225,11 +225,11 @@ static void __ad9467_get_scale(struct ad9467_state *st, int index,
+ }
+ 
+ static const struct iio_chan_spec ad9434_channels[] = {
+-	AD9467_CHAN(0, 0, 12, 'S'),
++	AD9467_CHAN(0, 0, 12, 's'),
+ };
+ 
+ static const struct iio_chan_spec ad9467_channels[] = {
+-	AD9467_CHAN(0, 0, 16, 'S'),
++	AD9467_CHAN(0, 0, 16, 's'),
+ };
+ 
+ static const struct ad9467_chip_info ad9467_chip_tbl = {
+diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
+index a543b91124b07..e3b2158829416 100644
+--- a/drivers/iio/adc/adi-axi-adc.c
++++ b/drivers/iio/adc/adi-axi-adc.c
+@@ -175,6 +175,7 @@ static int adi_axi_adc_probe(struct platform_device *pdev)
+ 	struct adi_axi_adc_state *st;
+ 	void __iomem *base;
+ 	unsigned int ver;
++	struct clk *clk;
+ 	int ret;
+ 
+ 	st = devm_kzalloc(&pdev->dev, sizeof(*st), GFP_KERNEL);
+@@ -195,6 +196,10 @@ static int adi_axi_adc_probe(struct platform_device *pdev)
+ 	if (!expected_ver)
+ 		return -ENODEV;
+ 
++	clk = devm_clk_get_enabled(&pdev->dev, NULL);
++	if (IS_ERR(clk))
++		return PTR_ERR(clk);
++
+ 	/*
+ 	 * Force disable the core. Up to the frontend to enable us. And we can
+ 	 * still read/write registers...
+diff --git a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
+index 3b0f9598a7c77..adceb1b7a07ff 100644
+--- a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
++++ b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
+@@ -60,11 +60,15 @@ EXPORT_SYMBOL_NS_GPL(inv_sensors_timestamp_init, IIO_INV_SENSORS_TIMESTAMP);
+ int inv_sensors_timestamp_update_odr(struct inv_sensors_timestamp *ts,
+ 				     uint32_t period, bool fifo)
+ {
++	uint32_t mult;
++
+ 	/* when FIFO is on, prevent odr change if one is already pending */
+ 	if (fifo && ts->new_mult != 0)
+ 		return -EAGAIN;
+ 
+-	ts->new_mult = period / ts->chip.clock_period;
++	mult = period / ts->chip.clock_period;
++	if (mult != ts->mult)
++		ts->new_mult = mult;
+ 
+ 	return 0;
+ }
+@@ -101,6 +105,9 @@ static bool inv_update_chip_period(struct inv_sensors_timestamp *ts,
+ 
+ static void inv_align_timestamp_it(struct inv_sensors_timestamp *ts)
+ {
++	const int64_t period_min = ts->min_period * ts->mult;
++	const int64_t period_max = ts->max_period * ts->mult;
++	int64_t add_max, sub_max;
+ 	int64_t delta, jitter;
+ 	int64_t adjust;
+ 
+@@ -108,11 +115,13 @@ static void inv_align_timestamp_it(struct inv_sensors_timestamp *ts)
+ 	delta = ts->it.lo - ts->timestamp;
+ 
+ 	/* adjust timestamp while respecting jitter */
++	add_max = period_max - (int64_t)ts->period;
++	sub_max = period_min - (int64_t)ts->period;
+ 	jitter = INV_SENSORS_TIMESTAMP_JITTER((int64_t)ts->period, ts->chip.jitter);
+ 	if (delta > jitter)
+-		adjust = jitter;
++		adjust = add_max;
+ 	else if (delta < -jitter)
+-		adjust = -jitter;
++		adjust = sub_max;
+ 	else
+ 		adjust = 0;
+ 
+diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
+index 076bc9ecfb499..4763402dbcd66 100644
+--- a/drivers/iio/dac/ad5592r-base.c
++++ b/drivers/iio/dac/ad5592r-base.c
+@@ -415,7 +415,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
+ 			s64 tmp = *val * (3767897513LL / 25LL);
+ 			*val = div_s64_rem(tmp, 1000000000LL, val2);
+ 
+-			return IIO_VAL_INT_PLUS_MICRO;
++			return IIO_VAL_INT_PLUS_NANO;
+ 		}
+ 
+ 		mutex_lock(&st->lock);
+diff --git a/drivers/iio/imu/bmi323/bmi323_core.c b/drivers/iio/imu/bmi323/bmi323_core.c
+index 5d42ab9b176a3..67d74a1a1b26d 100644
+--- a/drivers/iio/imu/bmi323/bmi323_core.c
++++ b/drivers/iio/imu/bmi323/bmi323_core.c
+@@ -1391,7 +1391,7 @@ static irqreturn_t bmi323_trigger_handler(int irq, void *p)
+ 				       &data->buffer.channels,
+ 				       ARRAY_SIZE(data->buffer.channels));
+ 		if (ret)
+-			return IRQ_NONE;
++			goto out;
+ 	} else {
+ 		for_each_set_bit(bit, indio_dev->active_scan_mask,
+ 				 BMI323_CHAN_MAX) {
+@@ -1400,13 +1400,14 @@ static irqreturn_t bmi323_trigger_handler(int irq, void *p)
+ 					      &data->buffer.channels[index++],
+ 					      BMI323_BYTES_PER_SAMPLE);
+ 			if (ret)
+-				return IRQ_NONE;
++				goto out;
+ 		}
+ 	}
+ 
+ 	iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
+ 					   iio_get_time_ns(indio_dev));
+ 
++out:
+ 	iio_trigger_notify_done(indio_dev->trig);
+ 
+ 	return IRQ_HANDLED;
+diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
+index f67bd5a39beb3..6d9cb010f628b 100644
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
+@@ -129,10 +129,6 @@ static int inv_icm42600_accel_update_scan_mode(struct iio_dev *indio_dev,
+ 	/* update data FIFO write */
+ 	inv_sensors_timestamp_apply_odr(ts, 0, 0, 0);
+ 	ret = inv_icm42600_buffer_set_fifo_en(st, fifo_en | st->fifo.en);
+-	if (ret)
+-		goto out_unlock;
+-
+-	ret = inv_icm42600_buffer_update_watermark(st);
+ 
+ out_unlock:
+ 	mutex_unlock(&st->lock);
+diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
+index 3df0a715e8856..fc80b4a97fda8 100644
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
+@@ -129,10 +129,6 @@ static int inv_icm42600_gyro_update_scan_mode(struct iio_dev *indio_dev,
+ 	/* update data FIFO write */
+ 	inv_sensors_timestamp_apply_odr(ts, 0, 0, 0);
+ 	ret = inv_icm42600_buffer_set_fifo_en(st, fifo_en | st->fifo.en);
+-	if (ret)
+-		goto out_unlock;
+-
+-	ret = inv_icm42600_buffer_update_watermark(st);
+ 
+ out_unlock:
+ 	mutex_unlock(&st->lock);
+diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
+index 62e9e93d915dc..0199d055e0ee1 100644
+--- a/drivers/iio/pressure/bmp280-core.c
++++ b/drivers/iio/pressure/bmp280-core.c
+@@ -1394,12 +1394,12 @@ static int bmp580_read_temp(struct bmp280_data *data, int *val, int *val2)
+ 
+ 	/*
+ 	 * Temperature is returned in Celsius degrees in fractional
+-	 * form down 2^16. We rescale by x1000 to return milli Celsius
+-	 * to respect IIO ABI.
++	 * form down 2^16. We rescale by x1000 to return millidegrees
++	 * Celsius to respect IIO ABI.
+ 	 */
+-	*val = raw_temp * 1000;
+-	*val2 = 16;
+-	return IIO_VAL_FRACTIONAL_LOG2;
++	raw_temp = sign_extend32(raw_temp, 23);
++	*val = ((s64)raw_temp * 1000) / (1 << 16);
++	return IIO_VAL_INT;
+ }
+ 
+ static int bmp580_read_press(struct bmp280_data *data, int *val, int *val2)
+diff --git a/drivers/iio/temperature/mcp9600.c b/drivers/iio/temperature/mcp9600.c
+index 46845804292bf..7a3eef5d5e752 100644
+--- a/drivers/iio/temperature/mcp9600.c
++++ b/drivers/iio/temperature/mcp9600.c
+@@ -52,7 +52,8 @@ static int mcp9600_read(struct mcp9600_data *data,
+ 
+ 	if (ret < 0)
+ 		return ret;
+-	*val = ret;
++
++	*val = sign_extend32(ret, 15);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/iio/temperature/mlx90635.c b/drivers/iio/temperature/mlx90635.c
+index 1f5c962c1818f..f7f88498ba0e8 100644
+--- a/drivers/iio/temperature/mlx90635.c
++++ b/drivers/iio/temperature/mlx90635.c
+@@ -947,9 +947,9 @@ static int mlx90635_probe(struct i2c_client *client)
+ 				     "failed to allocate regmap\n");
+ 
+ 	regmap_ee = devm_regmap_init_i2c(client, &mlx90635_regmap_ee);
+-	if (IS_ERR(regmap))
+-		return dev_err_probe(&client->dev, PTR_ERR(regmap),
+-				     "failed to allocate regmap\n");
++	if (IS_ERR(regmap_ee))
++		return dev_err_probe(&client->dev, PTR_ERR(regmap_ee),
++				     "failed to allocate EEPROM regmap\n");
+ 
+ 	mlx90635 = iio_priv(indio_dev);
+ 	i2c_set_clientdata(client, indio_dev);
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index f440ca440d924..e740dc54c4685 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -1678,8 +1678,17 @@ static void __init free_pci_segments(void)
+ 	}
+ }
+ 
++static void __init free_sysfs(struct amd_iommu *iommu)
++{
++	if (iommu->iommu.dev) {
++		iommu_device_unregister(&iommu->iommu);
++		iommu_device_sysfs_remove(&iommu->iommu);
++	}
++}
++
+ static void __init free_iommu_one(struct amd_iommu *iommu)
+ {
++	free_sysfs(iommu);
+ 	free_cwwb_sem(iommu);
+ 	free_command_buffer(iommu);
+ 	free_event_buffer(iommu);
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index 5f7d3db3afd82..6dbac6ec778ee 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -1846,28 +1846,22 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
+ {
+ 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ 	u32 event = its_get_event_id(d);
+-	int ret = 0;
+ 
+ 	if (!info->map)
+ 		return -EINVAL;
+ 
+-	raw_spin_lock(&its_dev->event_map.vlpi_lock);
+-
+ 	if (!its_dev->event_map.vm) {
+ 		struct its_vlpi_map *maps;
+ 
+ 		maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
+ 			       GFP_ATOMIC);
+-		if (!maps) {
+-			ret = -ENOMEM;
+-			goto out;
+-		}
++		if (!maps)
++			return -ENOMEM;
+ 
+ 		its_dev->event_map.vm = info->map->vm;
+ 		its_dev->event_map.vlpi_maps = maps;
+ 	} else if (its_dev->event_map.vm != info->map->vm) {
+-		ret = -EINVAL;
+-		goto out;
++		return -EINVAL;
+ 	}
+ 
+ 	/* Get our private copy of the mapping information */
+@@ -1899,46 +1893,32 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
+ 		its_dev->event_map.nr_vlpis++;
+ 	}
+ 
+-out:
+-	raw_spin_unlock(&its_dev->event_map.vlpi_lock);
+-	return ret;
++	return 0;
+ }
+ 
+ static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
+ {
+ 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ 	struct its_vlpi_map *map;
+-	int ret = 0;
+-
+-	raw_spin_lock(&its_dev->event_map.vlpi_lock);
+ 
+ 	map = get_vlpi_map(d);
+ 
+-	if (!its_dev->event_map.vm || !map) {
+-		ret = -EINVAL;
+-		goto out;
+-	}
++	if (!its_dev->event_map.vm || !map)
++		return -EINVAL;
+ 
+ 	/* Copy our mapping information to the incoming request */
+ 	*info->map = *map;
+ 
+-out:
+-	raw_spin_unlock(&its_dev->event_map.vlpi_lock);
+-	return ret;
++	return 0;
+ }
+ 
+ static int its_vlpi_unmap(struct irq_data *d)
+ {
+ 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ 	u32 event = its_get_event_id(d);
+-	int ret = 0;
+-
+-	raw_spin_lock(&its_dev->event_map.vlpi_lock);
+ 
+-	if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
+-		ret = -EINVAL;
+-		goto out;
+-	}
++	if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
++		return -EINVAL;
+ 
+ 	/* Drop the virtual mapping */
+ 	its_send_discard(its_dev, event);
+@@ -1962,9 +1942,7 @@ static int its_vlpi_unmap(struct irq_data *d)
+ 		kfree(its_dev->event_map.vlpi_maps);
+ 	}
+ 
+-out:
+-	raw_spin_unlock(&its_dev->event_map.vlpi_lock);
+-	return ret;
++	return 0;
+ }
+ 
+ static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
+@@ -1992,6 +1970,8 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
+ 	if (!is_v4(its_dev->its))
+ 		return -EINVAL;
+ 
++	guard(raw_spinlock_irq)(&its_dev->event_map.vlpi_lock);
++
+ 	/* Unmap request? */
+ 	if (!info)
+ 		return its_vlpi_unmap(d);
+diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
+index f3d4cb9e34f7d..d246b7230bd9c 100644
+--- a/drivers/irqchip/irq-sifive-plic.c
++++ b/drivers/irqchip/irq-sifive-plic.c
+@@ -85,7 +85,7 @@ struct plic_handler {
+ 	struct plic_priv	*priv;
+ };
+ static int plic_parent_irq __ro_after_init;
+-static bool plic_cpuhp_setup_done __ro_after_init;
++static bool plic_global_setup_done __ro_after_init;
+ static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
+ 
+ static int plic_irq_set_type(struct irq_data *d, unsigned int type);
+@@ -490,10 +490,8 @@ static int plic_probe(struct platform_device *pdev)
+ 	unsigned long plic_quirks = 0;
+ 	struct plic_handler *handler;
+ 	u32 nr_irqs, parent_hwirq;
+-	struct irq_domain *domain;
+ 	struct plic_priv *priv;
+ 	irq_hw_number_t hwirq;
+-	bool cpuhp_setup;
+ 
+ 	if (is_of_node(dev->fwnode)) {
+ 		const struct of_device_id *id;
+@@ -552,14 +550,6 @@ static int plic_probe(struct platform_device *pdev)
+ 			continue;
+ 		}
+ 
+-		/* Find parent domain and register chained handler */
+-		domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY);
+-		if (!plic_parent_irq && domain) {
+-			plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT);
+-			if (plic_parent_irq)
+-				irq_set_chained_handler(plic_parent_irq, plic_handle_irq);
+-		}
+-
+ 		/*
+ 		 * When running in M-mode we need to ignore the S-mode handler.
+ 		 * Here we assume it always comes later, but that might be a
+@@ -600,25 +590,35 @@ static int plic_probe(struct platform_device *pdev)
+ 		goto fail_cleanup_contexts;
+ 
+ 	/*
+-	 * We can have multiple PLIC instances so setup cpuhp state
++	 * We can have multiple PLIC instances so setup global state
+ 	 * and register syscore operations only once after context
+ 	 * handlers of all online CPUs are initialized.
+ 	 */
+-	if (!plic_cpuhp_setup_done) {
+-		cpuhp_setup = true;
++	if (!plic_global_setup_done) {
++		struct irq_domain *domain;
++		bool global_setup = true;
++
+ 		for_each_online_cpu(cpu) {
+ 			handler = per_cpu_ptr(&plic_handlers, cpu);
+ 			if (!handler->present) {
+-				cpuhp_setup = false;
++				global_setup = false;
+ 				break;
+ 			}
+ 		}
+-		if (cpuhp_setup) {
++
++		if (global_setup) {
++			/* Find parent domain and register chained handler */
++			domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY);
++			if (domain)
++				plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT);
++			if (plic_parent_irq)
++				irq_set_chained_handler(plic_parent_irq, plic_handle_irq);
++
+ 			cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
+ 					  "irqchip/sifive/plic:starting",
+ 					  plic_starting_cpu, plic_dying_cpu);
+ 			register_syscore_ops(&plic_irq_syscore_ops);
+-			plic_cpuhp_setup_done = true;
++			plic_global_setup_done = true;
+ 		}
+ 	}
+ 
+diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
+index 24fcff682b24a..ba1be15cfd8ea 100644
+--- a/drivers/leds/led-class.c
++++ b/drivers/leds/led-class.c
+@@ -552,12 +552,6 @@ int led_classdev_register_ext(struct device *parent,
+ 	led_init_core(led_cdev);
+ 
+ #ifdef CONFIG_LEDS_TRIGGERS
+-	/*
+-	 * If no default trigger was given and hw_control_trigger is set,
+-	 * make it the default trigger.
+-	 */
+-	if (!led_cdev->default_trigger && led_cdev->hw_control_trigger)
+-		led_cdev->default_trigger = led_cdev->hw_control_trigger;
+ 	led_trigger_set_default(led_cdev);
+ #endif
+ 
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 7f3dc8ee6ab8d..417fddebe367a 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -3492,6 +3492,7 @@ static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *lim
+ 		limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
+ 		blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
+ 		limits->dma_alignment = limits->logical_block_size - 1;
++		limits->discard_granularity = ic->sectors_per_block << SECTOR_SHIFT;
+ 	}
+ 	limits->max_integrity_segments = USHRT_MAX;
+ }
+diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c
+index 32af2b14ff344..34c9be437432a 100644
+--- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c
++++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c
+@@ -69,8 +69,10 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id
+ 
+ 	aux_bus->aux_device_wrapper[1] = kzalloc(sizeof(*aux_bus->aux_device_wrapper[1]),
+ 						 GFP_KERNEL);
+-	if (!aux_bus->aux_device_wrapper[1])
+-		return -ENOMEM;
++	if (!aux_bus->aux_device_wrapper[1]) {
++		retval =  -ENOMEM;
++		goto err_aux_dev_add_0;
++	}
+ 
+ 	retval = ida_alloc(&gp_client_ida, GFP_KERNEL);
+ 	if (retval < 0)
+@@ -111,6 +113,7 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id
+ 
+ err_aux_dev_add_1:
+ 	auxiliary_device_uninit(&aux_bus->aux_device_wrapper[1]->aux_dev);
++	goto err_aux_dev_add_0;
+ 
+ err_aux_dev_init_1:
+ 	ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[1]->aux_dev.id);
+@@ -120,6 +123,7 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id
+ 
+ err_aux_dev_add_0:
+ 	auxiliary_device_uninit(&aux_bus->aux_device_wrapper[0]->aux_dev);
++	goto err_ret;
+ 
+ err_aux_dev_init_0:
+ 	ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[0]->aux_dev.id);
+@@ -127,6 +131,7 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id
+ err_ida_alloc_0:
+ 	kfree(aux_bus->aux_device_wrapper[0]);
+ 
++err_ret:
+ 	return retval;
+ }
+ 
+diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
+index 7f59dd38c32f5..6589635f8ba32 100644
+--- a/drivers/misc/mei/pci-me.c
++++ b/drivers/misc/mei/pci-me.c
+@@ -385,8 +385,10 @@ static int mei_me_pci_resume(struct device *device)
+ 	}
+ 
+ 	err = mei_restart(dev);
+-	if (err)
++	if (err) {
++		free_irq(pdev->irq, dev);
+ 		return err;
++	}
+ 
+ 	/* Start timer if stopped in suspend */
+ 	schedule_delayed_work(&dev->timer_work, HZ);
+diff --git a/drivers/misc/mei/platform-vsc.c b/drivers/misc/mei/platform-vsc.c
+index b543e6b9f3cfd..1ec65d87488a3 100644
+--- a/drivers/misc/mei/platform-vsc.c
++++ b/drivers/misc/mei/platform-vsc.c
+@@ -399,41 +399,32 @@ static void mei_vsc_remove(struct platform_device *pdev)
+ 
+ static int mei_vsc_suspend(struct device *dev)
+ {
+-	struct mei_device *mei_dev = dev_get_drvdata(dev);
+-	struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
++	struct mei_device *mei_dev;
++	int ret = 0;
+ 
+-	mei_stop(mei_dev);
++	mei_dev = dev_get_drvdata(dev);
++	if (!mei_dev)
++		return -ENODEV;
+ 
+-	mei_disable_interrupts(mei_dev);
++	mutex_lock(&mei_dev->device_lock);
+ 
+-	vsc_tp_free_irq(hw->tp);
++	if (!mei_write_is_idle(mei_dev))
++		ret = -EAGAIN;
+ 
+-	return 0;
++	mutex_unlock(&mei_dev->device_lock);
++
++	return ret;
+ }
+ 
+ static int mei_vsc_resume(struct device *dev)
+ {
+-	struct mei_device *mei_dev = dev_get_drvdata(dev);
+-	struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+-	int ret;
+-
+-	ret = vsc_tp_request_irq(hw->tp);
+-	if (ret)
+-		return ret;
+-
+-	ret = mei_restart(mei_dev);
+-	if (ret)
+-		goto err_free;
++	struct mei_device *mei_dev;
+ 
+-	/* start timer if stopped in suspend */
+-	schedule_delayed_work(&mei_dev->timer_work, HZ);
++	mei_dev = dev_get_drvdata(dev);
++	if (!mei_dev)
++		return -ENODEV;
+ 
+ 	return 0;
+-
+-err_free:
+-	vsc_tp_free_irq(hw->tp);
+-
+-	return ret;
+ }
+ 
+ static DEFINE_SIMPLE_DEV_PM_OPS(mei_vsc_pm_ops, mei_vsc_suspend, mei_vsc_resume);
+diff --git a/drivers/misc/mei/vsc-fw-loader.c b/drivers/misc/mei/vsc-fw-loader.c
+index ffa4ccd96a104..596a9d695dfc1 100644
+--- a/drivers/misc/mei/vsc-fw-loader.c
++++ b/drivers/misc/mei/vsc-fw-loader.c
+@@ -252,7 +252,7 @@ static int vsc_get_sensor_name(struct vsc_fw_loader *fw_loader,
+ {
+ 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER };
+ 	union acpi_object obj = {
+-		.type = ACPI_TYPE_INTEGER,
++		.integer.type = ACPI_TYPE_INTEGER,
+ 		.integer.value = 1,
+ 	};
+ 	struct acpi_object_list arg_list = {
+diff --git a/drivers/misc/vmw_vmci/vmci_event.c b/drivers/misc/vmw_vmci/vmci_event.c
+index 5d7ac07623c27..9a41ab65378de 100644
+--- a/drivers/misc/vmw_vmci/vmci_event.c
++++ b/drivers/misc/vmw_vmci/vmci_event.c
+@@ -9,6 +9,7 @@
+ #include <linux/vmw_vmci_api.h>
+ #include <linux/list.h>
+ #include <linux/module.h>
++#include <linux/nospec.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/rculist.h>
+@@ -86,9 +87,12 @@ static void event_deliver(struct vmci_event_msg *event_msg)
+ {
+ 	struct vmci_subscription *cur;
+ 	struct list_head *subscriber_list;
++	u32 sanitized_event, max_vmci_event;
+ 
+ 	rcu_read_lock();
+-	subscriber_list = &subscriber_array[event_msg->event_data.event];
++	max_vmci_event = ARRAY_SIZE(subscriber_array);
++	sanitized_event = array_index_nospec(event_msg->event_data.event, max_vmci_event);
++	subscriber_list = &subscriber_array[sanitized_event];
+ 	list_for_each_entry_rcu(cur, subscriber_list, node) {
+ 		cur->callback(cur->id, &event_msg->event_data,
+ 			      cur->callback_data);
+diff --git a/drivers/net/dsa/qca/qca8k-leds.c b/drivers/net/dsa/qca/qca8k-leds.c
+index 811ebeeff4ed7..43ac68052baf9 100644
+--- a/drivers/net/dsa/qca/qca8k-leds.c
++++ b/drivers/net/dsa/qca/qca8k-leds.c
+@@ -431,8 +431,11 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p
+ 		init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d",
+ 						 priv->internal_mdio_bus->id,
+ 						 port_num);
+-		if (!init_data.devicename)
++		if (!init_data.devicename) {
++			fwnode_handle_put(led);
++			fwnode_handle_put(leds);
+ 			return -ENOMEM;
++		}
+ 
+ 		ret = devm_led_classdev_register_ext(priv->dev, &port_led->cdev, &init_data);
+ 		if (ret)
+@@ -441,6 +444,7 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p
+ 		kfree(init_data.devicename);
+ 	}
+ 
++	fwnode_handle_put(leds);
+ 	return 0;
+ }
+ 
+@@ -471,9 +475,13 @@ qca8k_setup_led_ctrl(struct qca8k_priv *priv)
+ 		 * the correct port for LED setup.
+ 		 */
+ 		ret = qca8k_parse_port_leds(priv, port, qca8k_port_to_phy(port_num));
+-		if (ret)
++		if (ret) {
++			fwnode_handle_put(port);
++			fwnode_handle_put(ports);
+ 			return ret;
++		}
+ 	}
+ 
++	fwnode_handle_put(ports);
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+index dd849e715c9ba..c46abcccfca10 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -1419,6 +1419,57 @@ struct bnxt_l2_filter {
+ 	atomic_t		refcnt;
+ };
+ 
++/* Compat version of hwrm_port_phy_qcfg_output capped at 96 bytes.  The
++ * first 95 bytes are identical to hwrm_port_phy_qcfg_output in bnxt_hsi.h.
++ * The last valid byte in the compat version is different.
++ */
++struct hwrm_port_phy_qcfg_output_compat {
++	__le16	error_code;
++	__le16	req_type;
++	__le16	seq_id;
++	__le16	resp_len;
++	u8	link;
++	u8	active_fec_signal_mode;
++	__le16	link_speed;
++	u8	duplex_cfg;
++	u8	pause;
++	__le16	support_speeds;
++	__le16	force_link_speed;
++	u8	auto_mode;
++	u8	auto_pause;
++	__le16	auto_link_speed;
++	__le16	auto_link_speed_mask;
++	u8	wirespeed;
++	u8	lpbk;
++	u8	force_pause;
++	u8	module_status;
++	__le32	preemphasis;
++	u8	phy_maj;
++	u8	phy_min;
++	u8	phy_bld;
++	u8	phy_type;
++	u8	media_type;
++	u8	xcvr_pkg_type;
++	u8	eee_config_phy_addr;
++	u8	parallel_detect;
++	__le16	link_partner_adv_speeds;
++	u8	link_partner_adv_auto_mode;
++	u8	link_partner_adv_pause;
++	__le16	adv_eee_link_speed_mask;
++	__le16	link_partner_adv_eee_link_speed_mask;
++	__le32	xcvr_identifier_type_tx_lpi_timer;
++	__le16	fec_cfg;
++	u8	duplex_state;
++	u8	option_flags;
++	char	phy_vendor_name[16];
++	char	phy_vendor_partnumber[16];
++	__le16	support_pam4_speeds;
++	__le16	force_pam4_link_speed;
++	__le16	auto_pam4_link_speed_mask;
++	u8	link_partner_pam4_adv_speeds;
++	u8	valid;
++};
++
+ struct bnxt_link_info {
+ 	u8			phy_type;
+ 	u8			media_type;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
+index 1df3d56cc4b51..d2fd2d04ed474 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
+@@ -680,7 +680,7 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
+ 			    req_type);
+ 	else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE)
+ 		hwrm_err(bp, ctx, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
+-			 req_type, token->seq_id, rc);
++			 req_type, le16_to_cpu(ctx->req->seq_id), rc);
+ 	rc = __hwrm_to_stderr(rc);
+ exit:
+ 	if (token)
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+index 175192ebaa773..22898d3d088b0 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+@@ -950,8 +950,11 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
+ 	struct hwrm_fwd_resp_input *req;
+ 	int rc;
+ 
+-	if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
++	if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) {
++		netdev_warn_once(bp->dev, "HWRM fwd response too big (%d bytes)\n",
++				 msg_size);
+ 		return -EINVAL;
++	}
+ 
+ 	rc = hwrm_req_init(bp, req, HWRM_FWD_RESP);
+ 	if (!rc) {
+@@ -1085,7 +1088,7 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
+ 		rc = bnxt_hwrm_exec_fwd_resp(
+ 			bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
+ 	} else {
+-		struct hwrm_port_phy_qcfg_output phy_qcfg_resp = {0};
++		struct hwrm_port_phy_qcfg_output_compat phy_qcfg_resp = {};
+ 		struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
+ 
+ 		phy_qcfg_req =
+@@ -1096,6 +1099,11 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
+ 		mutex_unlock(&bp->link_lock);
+ 		phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
+ 		phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
++		/* New SPEEDS2 fields are beyond the legacy structure, so
++		 * clear the SPEEDS2_SUPPORTED flag.
++		 */
++		phy_qcfg_resp.option_flags &=
++			~PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED;
+ 		phy_qcfg_resp.valid = 1;
+ 
+ 		if (vf->flags & BNXT_VF_LINK_UP) {
+diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
+index aa6c0dfb6f1ca..e26b4ed33dc83 100644
+--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
++++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
+@@ -272,13 +272,12 @@ lio_vf_rep_copy_packet(struct octeon_device *oct,
+ 				pg_info->page_offset;
+ 			memcpy(skb->data, va, MIN_SKB_SIZE);
+ 			skb_put(skb, MIN_SKB_SIZE);
++			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
++					pg_info->page,
++					pg_info->page_offset + MIN_SKB_SIZE,
++					len - MIN_SKB_SIZE,
++					LIO_RXBUFFER_SZ);
+ 		}
+-
+-		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+-				pg_info->page,
+-				pg_info->page_offset + MIN_SKB_SIZE,
+-				len - MIN_SKB_SIZE,
+-				LIO_RXBUFFER_SZ);
+ 	} else {
+ 		struct octeon_skb_page_info *pg_info =
+ 			((struct octeon_skb_page_info *)(skb->cb));
+diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+index 8e8071308aebe..d165a999d32e9 100644
+--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
++++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+@@ -581,11 +581,13 @@ static void gve_rx_skb_hash(struct sk_buff *skb,
+ 	skb_set_hash(skb, le32_to_cpu(compl_desc->hash), hash_type);
+ }
+ 
+-static void gve_rx_free_skb(struct gve_rx_ring *rx)
++static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx)
+ {
+ 	if (!rx->ctx.skb_head)
+ 		return;
+ 
++	if (rx->ctx.skb_head == napi->skb)
++		napi->skb = NULL;
+ 	dev_kfree_skb_any(rx->ctx.skb_head);
+ 	rx->ctx.skb_head = NULL;
+ 	rx->ctx.skb_tail = NULL;
+@@ -884,7 +886,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
+ 
+ 		err = gve_rx_dqo(napi, rx, compl_desc, complq->head, rx->q_num);
+ 		if (err < 0) {
+-			gve_rx_free_skb(rx);
++			gve_rx_free_skb(napi, rx);
+ 			u64_stats_update_begin(&rx->statss);
+ 			if (err == -ENOMEM)
+ 				rx->rx_skb_alloc_fail++;
+@@ -927,7 +929,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
+ 
+ 		/* gve_rx_complete_skb() will consume skb if successful */
+ 		if (gve_rx_complete_skb(rx, napi, compl_desc, feat) != 0) {
+-			gve_rx_free_skb(rx);
++			gve_rx_free_skb(napi, rx);
+ 			u64_stats_update_begin(&rx->statss);
+ 			rx->rx_desc_err_dropped_pkt++;
+ 			u64_stats_update_end(&rx->statss);
+diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+index bc34b6cd3a3e5..917a79a47e19c 100644
+--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
++++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+@@ -555,28 +555,18 @@ static int gve_prep_tso(struct sk_buff *skb)
+ 	if (unlikely(skb_shinfo(skb)->gso_size < GVE_TX_MIN_TSO_MSS_DQO))
+ 		return -1;
+ 
++	if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
++		return -EINVAL;
++
+ 	/* Needed because we will modify header. */
+ 	err = skb_cow_head(skb, 0);
+ 	if (err < 0)
+ 		return err;
+ 
+ 	tcp = tcp_hdr(skb);
+-
+-	/* Remove payload length from checksum. */
+ 	paylen = skb->len - skb_transport_offset(skb);
+-
+-	switch (skb_shinfo(skb)->gso_type) {
+-	case SKB_GSO_TCPV4:
+-	case SKB_GSO_TCPV6:
+-		csum_replace_by_diff(&tcp->check,
+-				     (__force __wsum)htonl(paylen));
+-
+-		/* Compute length of segmentation header. */
+-		header_len = skb_tcp_all_headers(skb);
+-		break;
+-	default:
+-		return -EINVAL;
+-	}
++	csum_replace_by_diff(&tcp->check, (__force __wsum)htonl(paylen));
++	header_len = skb_tcp_all_headers(skb);
+ 
+ 	if (unlikely(header_len > GVE_TX_MAX_HDR_SIZE_DQO))
+ 		return -EINVAL;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 19668a8d22f76..c9258b1b2b429 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -3539,6 +3539,9 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
+ 		ret = hns3_alloc_and_attach_buffer(ring, i);
+ 		if (ret)
+ 			goto out_buffer_fail;
++
++		if (!(i % HNS3_RESCHED_BD_NUM))
++			cond_resched();
+ 	}
+ 
+ 	return 0;
+@@ -5111,6 +5114,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
+ 		}
+ 
+ 		u64_stats_init(&priv->ring[i].syncp);
++		cond_resched();
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+index acd756b0c7c9a..d36c4ed16d8dd 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+@@ -214,6 +214,8 @@ enum hns3_nic_state {
+ #define HNS3_CQ_MODE_EQE			1U
+ #define HNS3_CQ_MODE_CQE			0U
+ 
++#define HNS3_RESCHED_BD_NUM			1024
++
+ enum hns3_pkt_l2t_type {
+ 	HNS3_L2_TYPE_UNICAST,
+ 	HNS3_L2_TYPE_MULTICAST,
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index ce60332d83c39..990d7bd397aa8 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -3042,9 +3042,7 @@ static void hclge_push_link_status(struct hclge_dev *hdev)
+ 
+ static void hclge_update_link_status(struct hclge_dev *hdev)
+ {
+-	struct hnae3_handle *rhandle = &hdev->vport[0].roce;
+ 	struct hnae3_handle *handle = &hdev->vport[0].nic;
+-	struct hnae3_client *rclient = hdev->roce_client;
+ 	struct hnae3_client *client = hdev->nic_client;
+ 	int state;
+ 	int ret;
+@@ -3068,8 +3066,15 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
+ 
+ 		client->ops->link_status_change(handle, state);
+ 		hclge_config_mac_tnl_int(hdev, state);
+-		if (rclient && rclient->ops->link_status_change)
+-			rclient->ops->link_status_change(rhandle, state);
++
++		if (test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state)) {
++			struct hnae3_handle *rhandle = &hdev->vport[0].roce;
++			struct hnae3_client *rclient = hdev->roce_client;
++
++			if (rclient && rclient->ops->link_status_change)
++				rclient->ops->link_status_change(rhandle,
++								 state);
++		}
+ 
+ 		hclge_push_link_status(hdev);
+ 	}
+@@ -11245,6 +11250,12 @@ static int hclge_init_client_instance(struct hnae3_client *client,
+ 	return ret;
+ }
+ 
++static bool hclge_uninit_need_wait(struct hclge_dev *hdev)
++{
++	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
++	       test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
++}
++
+ static void hclge_uninit_client_instance(struct hnae3_client *client,
+ 					 struct hnae3_ae_dev *ae_dev)
+ {
+@@ -11253,7 +11264,7 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
+ 
+ 	if (hdev->roce_client) {
+ 		clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
+-		while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
++		while (hclge_uninit_need_wait(hdev))
+ 			msleep(HCLGE_WAIT_RESET_DONE);
+ 
+ 		hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index 365c03d1c4622..8e40f26aa5060 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -412,7 +412,6 @@ struct ice_vsi {
+ 	struct ice_tc_cfg tc_cfg;
+ 	struct bpf_prog *xdp_prog;
+ 	struct ice_tx_ring **xdp_rings;	 /* XDP ring array */
+-	unsigned long *af_xdp_zc_qps;	 /* tracks AF_XDP ZC enabled qps */
+ 	u16 num_xdp_txq;		 /* Used XDP queues */
+ 	u8 xdp_mapping_mode;		 /* ICE_MAP_MODE_[CONTIG|SCATTER] */
+ 
+@@ -748,6 +747,25 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
+ 	ring->flags |= ICE_TX_FLAGS_RING_XDP;
+ }
+ 
++/**
++ * ice_get_xp_from_qid - get ZC XSK buffer pool bound to a queue ID
++ * @vsi: pointer to VSI
++ * @qid: index of a queue to look at XSK buff pool presence
++ *
++ * Return: A pointer to xsk_buff_pool structure if there is a buffer pool
++ * attached and configured as zero-copy, NULL otherwise.
++ */
++static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi,
++							u16 qid)
++{
++	struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
++
++	if (!ice_is_xdp_ena_vsi(vsi))
++		return NULL;
++
++	return (pool && pool->dev) ? pool : NULL;
++}
++
+ /**
+  * ice_xsk_pool - get XSK buffer pool bound to a ring
+  * @ring: Rx ring to use
+@@ -760,10 +778,7 @@ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
+ 	struct ice_vsi *vsi = ring->vsi;
+ 	u16 qid = ring->q_index;
+ 
+-	if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
+-		return NULL;
+-
+-	return xsk_get_pool_from_qid(vsi->netdev, qid);
++	return ice_get_xp_from_qid(vsi, qid);
+ }
+ 
+ /**
+@@ -788,12 +803,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
+ 	if (!ring)
+ 		return;
+ 
+-	if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) {
+-		ring->xsk_pool = NULL;
+-		return;
+-	}
+-
+-	ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid);
++	ring->xsk_pool = ice_get_xp_from_qid(vsi, qid);
+ }
+ 
+ /**
+@@ -922,9 +932,17 @@ int ice_down(struct ice_vsi *vsi);
+ int ice_down_up(struct ice_vsi *vsi);
+ int ice_vsi_cfg_lan(struct ice_vsi *vsi);
+ struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
++
++enum ice_xdp_cfg {
++	ICE_XDP_CFG_FULL,	/* Fully apply new config in .ndo_bpf() */
++	ICE_XDP_CFG_PART,	/* Save/use part of config in VSI rebuild */
++};
++
+ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
+-int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
+-int ice_destroy_xdp_rings(struct ice_vsi *vsi);
++int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
++			  enum ice_xdp_cfg cfg_type);
++int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type);
++void ice_map_xdp_rings(struct ice_vsi *vsi);
+ int
+ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ 	     u32 flags);
+diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
+index a545a7917e4fc..9d23a436d2a6a 100644
+--- a/drivers/net/ethernet/intel/ice/ice_base.c
++++ b/drivers/net/ethernet/intel/ice/ice_base.c
+@@ -860,6 +860,9 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
+ 		}
+ 		rx_rings_rem -= rx_rings_per_v;
+ 	}
++
++	if (ice_is_xdp_ena_vsi(vsi))
++		ice_map_xdp_rings(vsi);
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index 558422120312b..acf732ce04ed6 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -117,14 +117,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
+ 	if (!vsi->q_vectors)
+ 		goto err_vectors;
+ 
+-	vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL);
+-	if (!vsi->af_xdp_zc_qps)
+-		goto err_zc_qps;
+-
+ 	return 0;
+ 
+-err_zc_qps:
+-	devm_kfree(dev, vsi->q_vectors);
+ err_vectors:
+ 	devm_kfree(dev, vsi->rxq_map);
+ err_rxq_map:
+@@ -328,8 +322,6 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
+ 
+ 	dev = ice_pf_to_dev(pf);
+ 
+-	bitmap_free(vsi->af_xdp_zc_qps);
+-	vsi->af_xdp_zc_qps = NULL;
+ 	/* free the ring and vector containers */
+ 	devm_kfree(dev, vsi->q_vectors);
+ 	vsi->q_vectors = NULL;
+@@ -2331,22 +2323,23 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
+ 		if (ret)
+ 			goto unroll_vector_base;
+ 
+-		ice_vsi_map_rings_to_vectors(vsi);
+-
+-		/* Associate q_vector rings to napi */
+-		ice_vsi_set_napi_queues(vsi);
+-
+-		vsi->stat_offsets_loaded = false;
+-
+ 		if (ice_is_xdp_ena_vsi(vsi)) {
+ 			ret = ice_vsi_determine_xdp_res(vsi);
+ 			if (ret)
+ 				goto unroll_vector_base;
+-			ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
++			ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog,
++						    ICE_XDP_CFG_PART);
+ 			if (ret)
+ 				goto unroll_vector_base;
+ 		}
+ 
++		ice_vsi_map_rings_to_vectors(vsi);
++
++		/* Associate q_vector rings to napi */
++		ice_vsi_set_napi_queues(vsi);
++
++		vsi->stat_offsets_loaded = false;
++
+ 		/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
+ 		if (vsi->type != ICE_VSI_CTRL)
+ 			/* Do not exit if configuring RSS had an issue, at
+@@ -2493,7 +2486,7 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
+ 		/* return value check can be skipped here, it always returns
+ 		 * 0 if reset is in progress
+ 		 */
+-		ice_destroy_xdp_rings(vsi);
++		ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART);
+ 
+ 	ice_vsi_clear_rings(vsi);
+ 	ice_vsi_free_q_vectors(vsi);
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 33a164fa325ac..10fef2e726b39 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -2670,17 +2670,72 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
+ 		bpf_prog_put(old_prog);
+ }
+ 
++static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid)
++{
++	struct ice_q_vector *q_vector;
++	struct ice_tx_ring *ring;
++
++	if (static_key_enabled(&ice_xdp_locking_key))
++		return vsi->xdp_rings[qid % vsi->num_xdp_txq];
++
++	q_vector = vsi->rx_rings[qid]->q_vector;
++	ice_for_each_tx_ring(ring, q_vector->tx)
++		if (ice_ring_is_xdp(ring))
++			return ring;
++
++	return NULL;
++}
++
++/**
++ * ice_map_xdp_rings - Map XDP rings to interrupt vectors
++ * @vsi: the VSI with XDP rings being configured
++ *
++ * Map XDP rings to interrupt vectors and perform the configuration steps
++ * dependent on the mapping.
++ */
++void ice_map_xdp_rings(struct ice_vsi *vsi)
++{
++	int xdp_rings_rem = vsi->num_xdp_txq;
++	int v_idx, q_idx;
++
++	/* follow the logic from ice_vsi_map_rings_to_vectors */
++	ice_for_each_q_vector(vsi, v_idx) {
++		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
++		int xdp_rings_per_v, q_id, q_base;
++
++		xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
++					       vsi->num_q_vectors - v_idx);
++		q_base = vsi->num_xdp_txq - xdp_rings_rem;
++
++		for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
++			struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
++
++			xdp_ring->q_vector = q_vector;
++			xdp_ring->next = q_vector->tx.tx_ring;
++			q_vector->tx.tx_ring = xdp_ring;
++		}
++		xdp_rings_rem -= xdp_rings_per_v;
++	}
++
++	ice_for_each_rxq(vsi, q_idx) {
++		vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi,
++								       q_idx);
++		ice_tx_xsk_pool(vsi, q_idx);
++	}
++}
++
+ /**
+  * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
+  * @vsi: VSI to bring up Tx rings used by XDP
+  * @prog: bpf program that will be assigned to VSI
++ * @cfg_type: create from scratch or restore the existing configuration
+  *
+  * Return 0 on success and negative value on error
+  */
+-int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
++int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
++			  enum ice_xdp_cfg cfg_type)
+ {
+ 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+-	int xdp_rings_rem = vsi->num_xdp_txq;
+ 	struct ice_pf *pf = vsi->back;
+ 	struct ice_qs_cfg xdp_qs_cfg = {
+ 		.qs_mutex = &pf->avail_q_mutex,
+@@ -2693,8 +2748,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
+ 		.mapping_mode = ICE_VSI_MAP_CONTIG
+ 	};
+ 	struct device *dev;
+-	int i, v_idx;
+-	int status;
++	int status, i;
+ 
+ 	dev = ice_pf_to_dev(pf);
+ 	vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
+@@ -2713,49 +2767,15 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
+ 	if (ice_xdp_alloc_setup_rings(vsi))
+ 		goto clear_xdp_rings;
+ 
+-	/* follow the logic from ice_vsi_map_rings_to_vectors */
+-	ice_for_each_q_vector(vsi, v_idx) {
+-		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+-		int xdp_rings_per_v, q_id, q_base;
+-
+-		xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
+-					       vsi->num_q_vectors - v_idx);
+-		q_base = vsi->num_xdp_txq - xdp_rings_rem;
+-
+-		for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
+-			struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
+-
+-			xdp_ring->q_vector = q_vector;
+-			xdp_ring->next = q_vector->tx.tx_ring;
+-			q_vector->tx.tx_ring = xdp_ring;
+-		}
+-		xdp_rings_rem -= xdp_rings_per_v;
+-	}
+-
+-	ice_for_each_rxq(vsi, i) {
+-		if (static_key_enabled(&ice_xdp_locking_key)) {
+-			vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
+-		} else {
+-			struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
+-			struct ice_tx_ring *ring;
+-
+-			ice_for_each_tx_ring(ring, q_vector->tx) {
+-				if (ice_ring_is_xdp(ring)) {
+-					vsi->rx_rings[i]->xdp_ring = ring;
+-					break;
+-				}
+-			}
+-		}
+-		ice_tx_xsk_pool(vsi, i);
+-	}
+-
+ 	/* omit the scheduler update if in reset path; XDP queues will be
+ 	 * taken into account at the end of ice_vsi_rebuild, where
+ 	 * ice_cfg_vsi_lan is being called
+ 	 */
+-	if (ice_is_reset_in_progress(pf->state))
++	if (cfg_type == ICE_XDP_CFG_PART)
+ 		return 0;
+ 
++	ice_map_xdp_rings(vsi);
++
+ 	/* tell the Tx scheduler that right now we have
+ 	 * additional queues
+ 	 */
+@@ -2805,22 +2825,21 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
+ /**
+  * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
+  * @vsi: VSI to remove XDP rings
++ * @cfg_type: disable XDP permanently or allow it to be restored later
+  *
+  * Detach XDP rings from irq vectors, clean up the PF bitmap and free
+  * resources
+  */
+-int ice_destroy_xdp_rings(struct ice_vsi *vsi)
++int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
+ {
+ 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ 	struct ice_pf *pf = vsi->back;
+ 	int i, v_idx;
+ 
+ 	/* q_vectors are freed in reset path so there's no point in detaching
+-	 * rings; in case of rebuild being triggered not from reset bits
+-	 * in pf->state won't be set, so additionally check first q_vector
+-	 * against NULL
++	 * rings
+ 	 */
+-	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
++	if (cfg_type == ICE_XDP_CFG_PART)
+ 		goto free_qmap;
+ 
+ 	ice_for_each_q_vector(vsi, v_idx) {
+@@ -2861,7 +2880,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi)
+ 	if (static_key_enabled(&ice_xdp_locking_key))
+ 		static_branch_dec(&ice_xdp_locking_key);
+ 
+-	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
++	if (cfg_type == ICE_XDP_CFG_PART)
+ 		return 0;
+ 
+ 	ice_vsi_assign_bpf_prog(vsi, NULL);
+@@ -2972,7 +2991,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
+ 		if (xdp_ring_err) {
+ 			NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
+ 		} else {
+-			xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
++			xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
++							     ICE_XDP_CFG_FULL);
+ 			if (xdp_ring_err)
+ 				NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
+ 		}
+@@ -2983,7 +3003,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
+ 			NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
+ 	} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
+ 		xdp_features_clear_redirect_target(vsi->netdev);
+-		xdp_ring_err = ice_destroy_xdp_rings(vsi);
++		xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);
+ 		if (xdp_ring_err)
+ 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
+ 		/* reallocate Rx queues that were used for zero-copy */
+diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
+index d4e05d2cb30c4..8510a02afedcc 100644
+--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
++++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
+@@ -375,11 +375,25 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1
+  *
+  * Read the specified word from the copy of the Shadow RAM found in the
+  * specified NVM module.
++ *
++ * Note that the Shadow RAM copy is always located after the CSS header, and
++ * is aligned to 64-byte (32-word) offsets.
+  */
+ static int
+ ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
+ {
+-	return ice_read_nvm_module(hw, bank, ICE_NVM_SR_COPY_WORD_OFFSET + offset, data);
++	u32 sr_copy;
++
++	switch (bank) {
++	case ICE_ACTIVE_FLASH_BANK:
++		sr_copy = roundup(hw->flash.banks.active_css_hdr_len, 32);
++		break;
++	case ICE_INACTIVE_FLASH_BANK:
++		sr_copy = roundup(hw->flash.banks.inactive_css_hdr_len, 32);
++		break;
++	}
++
++	return ice_read_nvm_module(hw, bank, sr_copy + offset, data);
+ }
+ 
+ /**
+@@ -441,8 +455,7 @@ int
+ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
+ 		       u16 module_type)
+ {
+-	u16 pfa_len, pfa_ptr;
+-	u16 next_tlv;
++	u16 pfa_len, pfa_ptr, next_tlv, max_tlv;
+ 	int status;
+ 
+ 	status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
+@@ -455,11 +468,23 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
+ 		ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
+ 		return status;
+ 	}
++
++	/* The Preserved Fields Area contains a sequence of Type-Length-Value
++	 * structures which define its contents. The PFA length includes all
++	 * of the TLVs, plus the initial length word itself, *and* one final
++	 * word at the end after all of the TLVs.
++	 */
++	if (check_add_overflow(pfa_ptr, pfa_len - 1, &max_tlv)) {
++		dev_warn(ice_hw_to_dev(hw), "PFA starts at offset %u. PFA length of %u caused 16-bit arithmetic overflow.\n",
++			 pfa_ptr, pfa_len);
++		return -EINVAL;
++	}
++
+ 	/* Starting with first TLV after PFA length, iterate through the list
+ 	 * of TLVs to find the requested one.
+ 	 */
+ 	next_tlv = pfa_ptr + 1;
+-	while (next_tlv < pfa_ptr + pfa_len) {
++	while (next_tlv < max_tlv) {
+ 		u16 tlv_sub_module_type;
+ 		u16 tlv_len;
+ 
+@@ -483,10 +508,13 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
+ 			}
+ 			return -EINVAL;
+ 		}
+-		/* Check next TLV, i.e. current TLV pointer + length + 2 words
+-		 * (for current TLV's type and length)
+-		 */
+-		next_tlv = next_tlv + tlv_len + 2;
++
++		if (check_add_overflow(next_tlv, 2, &next_tlv) ||
++		    check_add_overflow(next_tlv, tlv_len, &next_tlv)) {
++			dev_warn(ice_hw_to_dev(hw), "TLV of type %u and length 0x%04x caused 16-bit arithmetic overflow. The PFA starts at 0x%04x and has length of 0x%04x\n",
++				 tlv_sub_module_type, tlv_len, pfa_ptr, pfa_len);
++			return -EINVAL;
++		}
+ 	}
+ 	/* Module does not exist */
+ 	return -ENOENT;
+@@ -1010,6 +1038,72 @@ static int ice_determine_active_flash_banks(struct ice_hw *hw)
+ 	return 0;
+ }
+ 
++/**
++ * ice_get_nvm_css_hdr_len - Read the CSS header length from the NVM CSS header
++ * @hw: pointer to the HW struct
++ * @bank: whether to read from the active or inactive flash bank
++ * @hdr_len: storage for header length in words
++ *
++ * Read the CSS header length from the NVM CSS header and add the Authentication
++ * header size, and then convert to words.
++ *
++ * Return: zero on success, or a negative error code on failure.
++ */
++static int
++ice_get_nvm_css_hdr_len(struct ice_hw *hw, enum ice_bank_select bank,
++			u32 *hdr_len)
++{
++	u16 hdr_len_l, hdr_len_h;
++	u32 hdr_len_dword;
++	int status;
++
++	status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_L,
++				     &hdr_len_l);
++	if (status)
++		return status;
++
++	status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_H,
++				     &hdr_len_h);
++	if (status)
++		return status;
++
++	/* CSS header length is in DWORD, so convert to words and add
++	 * authentication header size
++	 */
++	hdr_len_dword = hdr_len_h << 16 | hdr_len_l;
++	*hdr_len = (hdr_len_dword * 2) + ICE_NVM_AUTH_HEADER_LEN;
++
++	return 0;
++}
++
++/**
++ * ice_determine_css_hdr_len - Discover CSS header length for the device
++ * @hw: pointer to the HW struct
++ *
++ * Determine the size of the CSS header at the start of the NVM module. This
++ * is useful for locating the Shadow RAM copy in the NVM, as the Shadow RAM is
++ * always located just after the CSS header.
++ *
++ * Return: zero on success, or a negative error code on failure.
++ */
++static int ice_determine_css_hdr_len(struct ice_hw *hw)
++{
++	struct ice_bank_info *banks = &hw->flash.banks;
++	int status;
++
++	status = ice_get_nvm_css_hdr_len(hw, ICE_ACTIVE_FLASH_BANK,
++					 &banks->active_css_hdr_len);
++	if (status)
++		return status;
++
++	status = ice_get_nvm_css_hdr_len(hw, ICE_INACTIVE_FLASH_BANK,
++					 &banks->inactive_css_hdr_len);
++	if (status)
++		return status;
++
++	return 0;
++}
++
+ /**
+  * ice_init_nvm - initializes NVM setting
+  * @hw: pointer to the HW struct
+@@ -1056,6 +1150,12 @@ int ice_init_nvm(struct ice_hw *hw)
+ 		return status;
+ 	}
+ 
++	status = ice_determine_css_hdr_len(hw);
++	if (status) {
++		ice_debug(hw, ICE_DBG_NVM, "Failed to determine Shadow RAM copy offsets.\n");
++		return status;
++	}
++
+ 	status = ice_get_nvm_ver_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->nvm);
+ 	if (status) {
+ 		ice_debug(hw, ICE_DBG_INIT, "Failed to read NVM info.\n");
+diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
+index 9ff92dba58236..0aacd0d050b8e 100644
+--- a/drivers/net/ethernet/intel/ice/ice_type.h
++++ b/drivers/net/ethernet/intel/ice/ice_type.h
+@@ -481,6 +481,8 @@ struct ice_bank_info {
+ 	u32 orom_size;				/* Size of OROM bank */
+ 	u32 netlist_ptr;			/* Pointer to 1st Netlist bank */
+ 	u32 netlist_size;			/* Size of Netlist bank */
++	u32 active_css_hdr_len;			/* Active CSS header length */
++	u32 inactive_css_hdr_len;		/* Inactive CSS header length */
+ 	enum ice_flash_bank nvm_bank;		/* Active NVM bank */
+ 	enum ice_flash_bank orom_bank;		/* Active OROM bank */
+ 	enum ice_flash_bank netlist_bank;	/* Active Netlist bank */
+@@ -1084,17 +1086,13 @@ struct ice_aq_get_set_rss_lut_params {
+ #define ICE_SR_SECTOR_SIZE_IN_WORDS	0x800
+ 
+ /* CSS Header words */
++#define ICE_NVM_CSS_HDR_LEN_L			0x02
++#define ICE_NVM_CSS_HDR_LEN_H			0x03
+ #define ICE_NVM_CSS_SREV_L			0x14
+ #define ICE_NVM_CSS_SREV_H			0x15
+ 
+-/* Length of CSS header section in words */
+-#define ICE_CSS_HEADER_LENGTH			330
+-
+-/* Offset of Shadow RAM copy in the NVM bank area. */
+-#define ICE_NVM_SR_COPY_WORD_OFFSET		roundup(ICE_CSS_HEADER_LENGTH, 32)
+-
+-/* Size in bytes of Option ROM trailer */
+-#define ICE_NVM_OROM_TRAILER_LENGTH		(2 * ICE_CSS_HEADER_LENGTH)
++/* Length of Authentication header section in words */
++#define ICE_NVM_AUTH_HEADER_LEN			0x08
+ 
+ /* The Link Topology Netlist section is stored as a series of words. It is
+  * stored in the NVM as a TLV, with the first two words containing the type
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index 1857220d27fee..86a865788e345 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -269,7 +269,6 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
+ 	if (!pool)
+ 		return -EINVAL;
+ 
+-	clear_bit(qid, vsi->af_xdp_zc_qps);
+ 	xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
+ 
+ 	return 0;
+@@ -300,8 +299,6 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
+ 	if (err)
+ 		return err;
+ 
+-	set_bit(qid, vsi->af_xdp_zc_qps);
+-
+ 	return 0;
+ }
+ 
+@@ -349,11 +346,13 @@ ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
+ int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
+ {
+ 	struct ice_rx_ring *rx_ring;
+-	unsigned long q;
++	uint i;
++
++	ice_for_each_rxq(vsi, i) {
++		rx_ring = vsi->rx_rings[i];
++		if (!rx_ring->xsk_pool)
++			continue;
+ 
+-	for_each_set_bit(q, vsi->af_xdp_zc_qps,
+-			 max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) {
+-		rx_ring = vsi->rx_rings[q];
+ 		if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
+ 			return -ENOMEM;
+ 	}
+diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
+index 1a64f1ca6ca86..e699412d22f68 100644
+--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
++++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
+@@ -1629,12 +1629,17 @@ static int igc_ethtool_get_eee(struct net_device *netdev,
+ 	struct igc_hw *hw = &adapter->hw;
+ 	u32 eeer;
+ 
++	linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
++			 edata->supported);
++	linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
++			 edata->supported);
++	linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
++			 edata->supported);
++
+ 	if (hw->dev_spec._base.eee_enable)
+ 		mii_eee_cap1_mod_linkmode_t(edata->advertised,
+ 					    adapter->eee_advert);
+ 
+-	*edata = adapter->eee;
+-
+ 	eeer = rd32(IGC_EEER);
+ 
+ 	/* EEE status on negotiated link */
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 4d975d620a8e4..58bc96021bb4c 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -12,6 +12,7 @@
+ #include <linux/bpf_trace.h>
+ #include <net/xdp_sock_drv.h>
+ #include <linux/pci.h>
++#include <linux/mdio.h>
+ 
+ #include <net/ipv6.h>
+ 
+@@ -4876,6 +4877,9 @@ void igc_up(struct igc_adapter *adapter)
+ 	/* start the watchdog. */
+ 	hw->mac.get_link_status = true;
+ 	schedule_work(&adapter->watchdog_task);
++
++	adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T |
++			      MDIO_EEE_2_5GT;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+index e8b73b9d75e31..97722ce8c4cb3 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+@@ -2519,7 +2519,17 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
+ 	 * - when available free entries are less.
+ 	 * Lower priority ones out of avaialble free entries are always
+ 	 * chosen when 'high vs low' question arises.
++	 *
++	 * For a VF base MCAM match rule is set by its PF. And all the
++	 * further MCAM rules installed by VF on its own are
++	 * concatenated with the base rule set by its PF. Hence PF entries
++	 * should be at lower priority compared to VF entries. Otherwise
++	 * base rule is hit always and rules installed by VF will be of
++	 * no use. Hence if the request is from PF then allocate low
++	 * priority entries.
+ 	 */
++	if (!(pcifunc & RVU_PFVF_FUNC_MASK))
++		goto lprio_alloc;
+ 
+ 	/* Get the search range for priority allocation request */
+ 	if (req->priority) {
+@@ -2528,17 +2538,6 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
+ 		goto alloc;
+ 	}
+ 
+-	/* For a VF base MCAM match rule is set by its PF. And all the
+-	 * further MCAM rules installed by VF on its own are
+-	 * concatenated with the base rule set by its PF. Hence PF entries
+-	 * should be at lower priority compared to VF entries. Otherwise
+-	 * base rule is hit always and rules installed by VF will be of
+-	 * no use. Hence if the request is from PF and NOT a priority
+-	 * allocation request then allocate low priority entries.
+-	 */
+-	if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+-		goto lprio_alloc;
+-
+ 	/* Find out the search range for non-priority allocation request
+ 	 *
+ 	 * Get MCAM free entry count in middle zone.
+@@ -2568,6 +2567,18 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
+ 		reverse = true;
+ 		start = 0;
+ 		end = mcam->bmap_entries;
++		/* Ensure PF requests are always at bottom and if PF requests
++		 * for higher/lower priority entry wrt reference entry then
++		 * honour that criteria and start search for entries from bottom
++		 * and not in mid zone.
++		 */
++		if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
++		    req->priority == NPC_MCAM_HIGHER_PRIO)
++			end = req->ref_entry;
++
++		if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
++		    req->priority == NPC_MCAM_LOWER_PRIO)
++			start = req->ref_entry;
+ 	}
+ 
+ alloc:
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index d7d73295f0dc4..41d9b0684be74 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1131,9 +1131,9 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
+ {
+ 	const struct mtk_soc_data *soc = eth->soc;
+ 	dma_addr_t phy_ring_tail;
+-	int cnt = MTK_QDMA_RING_SIZE;
++	int cnt = soc->tx.fq_dma_size;
+ 	dma_addr_t dma_addr;
+-	int i;
++	int i, j, len;
+ 
+ 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
+ 		eth->scratch_ring = eth->sram_base;
+@@ -1142,40 +1142,46 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
+ 						       cnt * soc->tx.desc_size,
+ 						       &eth->phy_scratch_ring,
+ 						       GFP_KERNEL);
++
+ 	if (unlikely(!eth->scratch_ring))
+ 		return -ENOMEM;
+ 
+-	eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
+-	if (unlikely(!eth->scratch_head))
+-		return -ENOMEM;
++	phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
+ 
+-	dma_addr = dma_map_single(eth->dma_dev,
+-				  eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
+-				  DMA_FROM_DEVICE);
+-	if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
+-		return -ENOMEM;
++	for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) {
++		len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH);
++		eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
+ 
+-	phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
++		if (unlikely(!eth->scratch_head[j]))
++			return -ENOMEM;
+ 
+-	for (i = 0; i < cnt; i++) {
+-		dma_addr_t addr = dma_addr + i * MTK_QDMA_PAGE_SIZE;
+-		struct mtk_tx_dma_v2 *txd;
++		dma_addr = dma_map_single(eth->dma_dev,
++					  eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE,
++					  DMA_FROM_DEVICE);
+ 
+-		txd = eth->scratch_ring + i * soc->tx.desc_size;
+-		txd->txd1 = addr;
+-		if (i < cnt - 1)
+-			txd->txd2 = eth->phy_scratch_ring +
+-				    (i + 1) * soc->tx.desc_size;
++		if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
++			return -ENOMEM;
+ 
+-		txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
+-		if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
+-			txd->txd3 |= TX_DMA_PREP_ADDR64(addr);
+-		txd->txd4 = 0;
+-		if (mtk_is_netsys_v2_or_greater(eth)) {
+-			txd->txd5 = 0;
+-			txd->txd6 = 0;
+-			txd->txd7 = 0;
+-			txd->txd8 = 0;
++		for (i = 0; i < cnt; i++) {
++			struct mtk_tx_dma_v2 *txd;
++
++			txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;
++			txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
++			if (j * MTK_FQ_DMA_LENGTH + i < cnt)
++				txd->txd2 = eth->phy_scratch_ring +
++					    (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size;
++
++			txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
++			if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
++				txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i * MTK_QDMA_PAGE_SIZE);
++
++			txd->txd4 = 0;
++			if (mtk_is_netsys_v2_or_greater(eth)) {
++				txd->txd5 = 0;
++				txd->txd6 = 0;
++				txd->txd7 = 0;
++				txd->txd8 = 0;
++			}
+ 		}
+ 	}
+ 
+@@ -2457,7 +2463,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
+ 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
+ 		ring_size = MTK_QDMA_RING_SIZE;
+ 	else
+-		ring_size = MTK_DMA_SIZE;
++		ring_size = soc->tx.dma_size;
+ 
+ 	ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
+ 			       GFP_KERNEL);
+@@ -2465,8 +2471,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
+ 		goto no_tx_mem;
+ 
+ 	if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
+-		ring->dma = eth->sram_base + ring_size * sz;
+-		ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz;
++		ring->dma = eth->sram_base + soc->tx.fq_dma_size * sz;
++		ring->phys = eth->phy_scratch_ring + soc->tx.fq_dma_size * (dma_addr_t)sz;
+ 	} else {
+ 		ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
+ 					       &ring->phys, GFP_KERNEL);
+@@ -2588,6 +2594,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
+ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
+ {
+ 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
++	const struct mtk_soc_data *soc = eth->soc;
+ 	struct mtk_rx_ring *ring;
+ 	int rx_data_len, rx_dma_size, tx_ring_size;
+ 	int i;
+@@ -2595,7 +2602,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
+ 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ 		tx_ring_size = MTK_QDMA_RING_SIZE;
+ 	else
+-		tx_ring_size = MTK_DMA_SIZE;
++		tx_ring_size = soc->tx.dma_size;
+ 
+ 	if (rx_flag == MTK_RX_FLAGS_QDMA) {
+ 		if (ring_no)
+@@ -2610,7 +2617,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
+ 		rx_dma_size = MTK_HW_LRO_DMA_SIZE;
+ 	} else {
+ 		rx_data_len = ETH_DATA_LEN;
+-		rx_dma_size = MTK_DMA_SIZE;
++		rx_dma_size = soc->rx.dma_size;
+ 	}
+ 
+ 	ring->frag_size = mtk_max_frag_size(rx_data_len);
+@@ -3139,7 +3146,10 @@ static void mtk_dma_free(struct mtk_eth *eth)
+ 			mtk_rx_clean(eth, &eth->rx_ring[i], false);
+ 	}
+ 
+-	kfree(eth->scratch_head);
++	for (i = 0; i < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); i++) {
++		kfree(eth->scratch_head[i]);
++		eth->scratch_head[i] = NULL;
++	}
+ }
+ 
+ static bool mtk_hw_reset_check(struct mtk_eth *eth)
+@@ -5043,11 +5053,14 @@ static const struct mtk_soc_data mt2701_data = {
+ 		.desc_size = sizeof(struct mtk_tx_dma),
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
+ 		.dma_len_offset = 16,
++		.dma_size = MTK_DMA_SIZE(2K),
++		.fq_dma_size = MTK_DMA_SIZE(2K),
+ 	},
+ 	.rx = {
+ 		.desc_size = sizeof(struct mtk_rx_dma),
+ 		.irq_done_mask = MTK_RX_DONE_INT,
+ 		.dma_l4_valid = RX_DMA_L4_VALID,
++		.dma_size = MTK_DMA_SIZE(2K),
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
+ 		.dma_len_offset = 16,
+ 	},
+@@ -5067,11 +5080,14 @@ static const struct mtk_soc_data mt7621_data = {
+ 		.desc_size = sizeof(struct mtk_tx_dma),
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
+ 		.dma_len_offset = 16,
++		.dma_size = MTK_DMA_SIZE(2K),
++		.fq_dma_size = MTK_DMA_SIZE(2K),
+ 	},
+ 	.rx = {
+ 		.desc_size = sizeof(struct mtk_rx_dma),
+ 		.irq_done_mask = MTK_RX_DONE_INT,
+ 		.dma_l4_valid = RX_DMA_L4_VALID,
++		.dma_size = MTK_DMA_SIZE(2K),
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
+ 		.dma_len_offset = 16,
+ 	},
+@@ -5093,11 +5109,14 @@ static const struct mtk_soc_data mt7622_data = {
+ 		.desc_size = sizeof(struct mtk_tx_dma),
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
+ 		.dma_len_offset = 16,
++		.dma_size = MTK_DMA_SIZE(2K),
++		.fq_dma_size = MTK_DMA_SIZE(2K),
+ 	},
+ 	.rx = {
+ 		.desc_size = sizeof(struct mtk_rx_dma),
+ 		.irq_done_mask = MTK_RX_DONE_INT,
+ 		.dma_l4_valid = RX_DMA_L4_VALID,
++		.dma_size = MTK_DMA_SIZE(2K),
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
+ 		.dma_len_offset = 16,
+ 	},
+@@ -5118,11 +5137,14 @@ static const struct mtk_soc_data mt7623_data = {
+ 		.desc_size = sizeof(struct mtk_tx_dma),
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
+ 		.dma_len_offset = 16,
++		.dma_size = MTK_DMA_SIZE(2K),
++		.fq_dma_size = MTK_DMA_SIZE(2K),
+ 	},
+ 	.rx = {
+ 		.desc_size = sizeof(struct mtk_rx_dma),
+ 		.irq_done_mask = MTK_RX_DONE_INT,
+ 		.dma_l4_valid = RX_DMA_L4_VALID,
++		.dma_size = MTK_DMA_SIZE(2K),
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
+ 		.dma_len_offset = 16,
+ 	},
+@@ -5141,11 +5163,14 @@ static const struct mtk_soc_data mt7629_data = {
+ 		.desc_size = sizeof(struct mtk_tx_dma),
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
+ 		.dma_len_offset = 16,
++		.dma_size = MTK_DMA_SIZE(2K),
++		.fq_dma_size = MTK_DMA_SIZE(2K),
+ 	},
+ 	.rx = {
+ 		.desc_size = sizeof(struct mtk_rx_dma),
+ 		.irq_done_mask = MTK_RX_DONE_INT,
+ 		.dma_l4_valid = RX_DMA_L4_VALID,
++		.dma_size = MTK_DMA_SIZE(2K),
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
+ 		.dma_len_offset = 16,
+ 	},
+@@ -5167,6 +5192,8 @@ static const struct mtk_soc_data mt7981_data = {
+ 		.desc_size = sizeof(struct mtk_tx_dma_v2),
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ 		.dma_len_offset = 8,
++		.dma_size = MTK_DMA_SIZE(2K),
++		.fq_dma_size = MTK_DMA_SIZE(2K),
+ 	},
+ 	.rx = {
+ 		.desc_size = sizeof(struct mtk_rx_dma),
+@@ -5174,6 +5201,7 @@ static const struct mtk_soc_data mt7981_data = {
+ 		.dma_l4_valid = RX_DMA_L4_VALID_V2,
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
+ 		.dma_len_offset = 16,
++		.dma_size = MTK_DMA_SIZE(2K),
+ 	},
+ };
+ 
+@@ -5193,6 +5221,8 @@ static const struct mtk_soc_data mt7986_data = {
+ 		.desc_size = sizeof(struct mtk_tx_dma_v2),
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ 		.dma_len_offset = 8,
++		.dma_size = MTK_DMA_SIZE(2K),
++		.fq_dma_size = MTK_DMA_SIZE(2K),
+ 	},
+ 	.rx = {
+ 		.desc_size = sizeof(struct mtk_rx_dma),
+@@ -5200,6 +5230,7 @@ static const struct mtk_soc_data mt7986_data = {
+ 		.dma_l4_valid = RX_DMA_L4_VALID_V2,
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
+ 		.dma_len_offset = 16,
++		.dma_size = MTK_DMA_SIZE(2K),
+ 	},
+ };
+ 
+@@ -5219,6 +5250,8 @@ static const struct mtk_soc_data mt7988_data = {
+ 		.desc_size = sizeof(struct mtk_tx_dma_v2),
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ 		.dma_len_offset = 8,
++		.dma_size = MTK_DMA_SIZE(2K),
++		.fq_dma_size = MTK_DMA_SIZE(4K),
+ 	},
+ 	.rx = {
+ 		.desc_size = sizeof(struct mtk_rx_dma_v2),
+@@ -5226,6 +5259,7 @@ static const struct mtk_soc_data mt7988_data = {
+ 		.dma_l4_valid = RX_DMA_L4_VALID_V2,
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ 		.dma_len_offset = 8,
++		.dma_size = MTK_DMA_SIZE(2K),
+ 	},
+ };
+ 
+@@ -5240,6 +5274,7 @@ static const struct mtk_soc_data rt5350_data = {
+ 		.desc_size = sizeof(struct mtk_tx_dma),
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
+ 		.dma_len_offset = 16,
++		.dma_size = MTK_DMA_SIZE(2K),
+ 	},
+ 	.rx = {
+ 		.desc_size = sizeof(struct mtk_rx_dma),
+@@ -5247,6 +5282,7 @@ static const struct mtk_soc_data rt5350_data = {
+ 		.dma_l4_valid = RX_DMA_L4_VALID_PDMA,
+ 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
+ 		.dma_len_offset = 16,
++		.dma_size = MTK_DMA_SIZE(2K),
+ 	},
+ };
+ 
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+index 39b50de1decbf..a25c33b9a4f34 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -32,7 +32,9 @@
+ #define MTK_TX_DMA_BUF_LEN	0x3fff
+ #define MTK_TX_DMA_BUF_LEN_V2	0xffff
+ #define MTK_QDMA_RING_SIZE	2048
+-#define MTK_DMA_SIZE		512
++#define MTK_DMA_SIZE(x)		(SZ_##x)
++#define MTK_FQ_DMA_HEAD		32
++#define MTK_FQ_DMA_LENGTH	2048
+ #define MTK_RX_ETH_HLEN		(ETH_HLEN + ETH_FCS_LEN)
+ #define MTK_RX_HLEN		(NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
+ #define MTK_DMA_DUMMY_DESC	0xffffffff
+@@ -1176,6 +1178,8 @@ struct mtk_soc_data {
+ 		u32	desc_size;
+ 		u32	dma_max_len;
+ 		u32	dma_len_offset;
++		u32	dma_size;
++		u32	fq_dma_size;
+ 	} tx;
+ 	struct {
+ 		u32	desc_size;
+@@ -1183,6 +1187,7 @@ struct mtk_soc_data {
+ 		u32	dma_l4_valid;
+ 		u32	dma_max_len;
+ 		u32	dma_len_offset;
++		u32	dma_size;
+ 	} rx;
+ };
+ 
+@@ -1264,7 +1269,7 @@ struct mtk_eth {
+ 	struct napi_struct		rx_napi;
+ 	void				*scratch_ring;
+ 	dma_addr_t			phy_scratch_ring;
+-	void				*scratch_head;
++	void				*scratch_head[MTK_FQ_DMA_HEAD];
+ 	struct clk			*clks[MTK_CLK_MAX];
+ 
+ 	struct mii_bus			*mii_bus;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 47be07af214ff..981a3e058840d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -4738,7 +4738,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
+ 
+ 		/* Verify if UDP port is being offloaded by HW */
+ 		if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
+-			return features;
++			return vxlan_features_check(skb, features);
+ 
+ #if IS_ENABLED(CONFIG_GENEVE)
+ 		/* Support Geneve offload for default UDP port */
+@@ -4764,7 +4764,6 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb,
+ 	struct mlx5e_priv *priv = netdev_priv(netdev);
+ 
+ 	features = vlan_features_check(skb, features);
+-	features = vxlan_features_check(skb, features);
+ 
+ 	/* Validate if the tunneled packet is being offloaded by HW */
+ 	if (skb->encapsulation &&
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+index e7faf7e73ca48..6c7f2471fe629 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+@@ -373,6 +373,10 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
+ 	do {
+ 		if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
+ 			break;
++		if (pci_channel_offline(dev->pdev)) {
++			mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n");
++			return -EACCES;
++		}
+ 
+ 		cond_resched();
+ 	} while (!time_after(jiffies, end));
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
+index ad38e31822df1..a6329ca2d9bff 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
+@@ -248,6 +248,10 @@ void mlx5_error_sw_reset(struct mlx5_core_dev *dev)
+ 	do {
+ 		if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
+ 			break;
++		if (pci_channel_offline(dev->pdev)) {
++			mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n");
++			goto unlock;
++		}
+ 
+ 		msleep(20);
+ 	} while (!time_after(jiffies, end));
+@@ -317,6 +321,10 @@ int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev)
+ 			mlx5_core_warn(dev, "device is being removed, stop waiting for PCI\n");
+ 			return -ENODEV;
+ 		}
++		if (pci_channel_offline(dev->pdev)) {
++			mlx5_core_err(dev, "PCI channel offline, stop waiting for PCI\n");
++			return -EACCES;
++		}
+ 		msleep(100);
+ 	}
+ 	return 0;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+index 101b3bb908638..e12bc4cd80661 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+@@ -88,9 +88,13 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
+ 								      &dest, 1);
+ 			if (IS_ERR(lag_definer->rules[idx])) {
+ 				err = PTR_ERR(lag_definer->rules[idx]);
+-				while (i--)
+-					while (j--)
++				do {
++					while (j--) {
++						idx = i * ldev->buckets + j;
+ 						mlx5_del_flow_rules(lag_definer->rules[idx]);
++					}
++					j = ldev->buckets;
++				} while (i--);
+ 				goto destroy_fg;
+ 			}
+ 		}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
+index 6b774e0c27665..d0b595ba61101 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
+@@ -74,6 +74,10 @@ int mlx5_vsc_gw_lock(struct mlx5_core_dev *dev)
+ 			ret = -EBUSY;
+ 			goto pci_unlock;
+ 		}
++		if (pci_channel_offline(dev->pdev)) {
++			ret = -EACCES;
++			goto pci_unlock;
++		}
+ 
+ 		/* Check if semaphore is already locked */
+ 		ret = vsc_read(dev, VSC_SEMAPHORE_OFFSET, &lock_val);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 6574c145dc1e2..459a836a5d9c1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -1298,6 +1298,9 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
+ 
+ 	if (!err)
+ 		mlx5_function_disable(dev, boot);
++	else
++		mlx5_stop_health_poll(dev, boot);
++
+ 	return err;
+ }
+ 
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+index 7f0c6cdc375e3..0cd819bc4ae35 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+@@ -304,10 +304,8 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
+ 	if (ret)
+ 		return ret;
+ 
+-	if (qcq->napi.poll)
+-		napi_enable(&qcq->napi);
+-
+ 	if (qcq->flags & IONIC_QCQ_F_INTR) {
++		napi_enable(&qcq->napi);
+ 		irq_set_affinity_hint(qcq->intr.vector,
+ 				      &qcq->intr.affinity_mask);
+ 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+index 5dba6d2d633cb..2427610f4306d 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+@@ -586,6 +586,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
+ 			netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err);
+ 			goto out_xdp_abort;
+ 		}
++		buf_info->page = NULL;
+ 		stats->xdp_tx++;
+ 
+ 		/* the Tx completion will free the buffers */
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+index e254b21fdb598..65d7370b47d57 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+@@ -93,6 +93,7 @@ struct ethqos_emac_driver_data {
+ 	bool has_emac_ge_3;
+ 	const char *link_clk_name;
+ 	bool has_integrated_pcs;
++	u32 dma_addr_width;
+ 	struct dwmac4_addrs dwmac4_addrs;
+ };
+ 
+@@ -276,6 +277,7 @@ static const struct ethqos_emac_driver_data emac_v4_0_0_data = {
+ 	.has_emac_ge_3 = true,
+ 	.link_clk_name = "phyaux",
+ 	.has_integrated_pcs = true,
++	.dma_addr_width = 36,
+ 	.dwmac4_addrs = {
+ 		.dma_chan = 0x00008100,
+ 		.dma_chan_offset = 0x1000,
+@@ -845,6 +847,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
+ 		plat_dat->flags |= STMMAC_FLAG_RX_CLK_RUNS_IN_LPI;
+ 	if (data->has_integrated_pcs)
+ 		plat_dat->flags |= STMMAC_FLAG_HAS_INTEGRATED_PCS;
++	if (data->dma_addr_width)
++		plat_dat->host_dma_width = data->dma_addr_width;
+ 
+ 	if (ethqos->serdes_phy) {
+ 		plat_dat->serdes_powerup = qcom_ethqos_serdes_powerup;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+index 620c16e9be3a6..b1896379dbab5 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+@@ -343,10 +343,11 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
+ 			struct tc_cbs_qopt_offload *qopt)
+ {
+ 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
++	s64 port_transmit_rate_kbps;
+ 	u32 queue = qopt->queue;
+-	u32 ptr, speed_div;
+ 	u32 mode_to_use;
+ 	u64 value;
++	u32 ptr;
+ 	int ret;
+ 
+ 	/* Queue 0 is not AVB capable */
+@@ -355,30 +356,26 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
+ 	if (!priv->dma_cap.av)
+ 		return -EOPNOTSUPP;
+ 
++	port_transmit_rate_kbps = qopt->idleslope - qopt->sendslope;
++
+ 	/* Port Transmit Rate and Speed Divider */
+-	switch (priv->speed) {
++	switch (div_s64(port_transmit_rate_kbps, 1000)) {
+ 	case SPEED_10000:
+-		ptr = 32;
+-		speed_div = 10000000;
+-		break;
+ 	case SPEED_5000:
+ 		ptr = 32;
+-		speed_div = 5000000;
+ 		break;
+ 	case SPEED_2500:
+-		ptr = 8;
+-		speed_div = 2500000;
+-		break;
+ 	case SPEED_1000:
+ 		ptr = 8;
+-		speed_div = 1000000;
+ 		break;
+ 	case SPEED_100:
+ 		ptr = 4;
+-		speed_div = 100000;
+ 		break;
+ 	default:
+-		return -EOPNOTSUPP;
++		netdev_err(priv->dev,
++			   "Invalid portTransmitRate %lld (idleSlope - sendSlope)\n",
++			   port_transmit_rate_kbps);
++		return -EINVAL;
+ 	}
+ 
+ 	mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
+@@ -398,10 +395,10 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
+ 	}
+ 
+ 	/* Final adjustments for HW */
+-	value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
++	value = div_s64(qopt->idleslope * 1024ll * ptr, port_transmit_rate_kbps);
+ 	priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
+ 
+-	value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div);
++	value = div_s64(-qopt->sendslope * 1024ll * ptr, port_transmit_rate_kbps);
+ 	priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
+ 
+ 	value = qopt->hicredit * 1024ll * 8;
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index 6c2835086b57e..76053d2c15958 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -811,6 +811,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ 			   struct geneve_dev *geneve,
+ 			   const struct ip_tunnel_info *info)
+ {
++	bool inner_proto_inherit = geneve->cfg.inner_proto_inherit;
+ 	bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
+ 	struct geneve_sock *gs4 = rcu_dereference(geneve->sock4);
+ 	const struct ip_tunnel_key *key = &info->key;
+@@ -822,7 +823,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ 	__be16 sport;
+ 	int err;
+ 
+-	if (!skb_vlan_inet_prepare(skb))
++	if (!skb_vlan_inet_prepare(skb, inner_proto_inherit))
+ 		return -EINVAL;
+ 
+ 	if (!gs4)
+@@ -903,7 +904,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ 	}
+ 
+ 	err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr),
+-			       geneve->cfg.inner_proto_inherit);
++			       inner_proto_inherit);
+ 	if (unlikely(err))
+ 		return err;
+ 
+@@ -919,6 +920,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ 			    struct geneve_dev *geneve,
+ 			    const struct ip_tunnel_info *info)
+ {
++	bool inner_proto_inherit = geneve->cfg.inner_proto_inherit;
+ 	bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
+ 	struct geneve_sock *gs6 = rcu_dereference(geneve->sock6);
+ 	const struct ip_tunnel_key *key = &info->key;
+@@ -929,7 +931,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ 	__be16 sport;
+ 	int err;
+ 
+-	if (!skb_vlan_inet_prepare(skb))
++	if (!skb_vlan_inet_prepare(skb, inner_proto_inherit))
+ 		return -EINVAL;
+ 
+ 	if (!gs6)
+@@ -991,7 +993,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ 		ttl = ttl ? : ip6_dst_hoplimit(dst);
+ 	}
+ 	err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr),
+-			       geneve->cfg.inner_proto_inherit);
++			       inner_proto_inherit);
+ 	if (unlikely(err))
+ 		return err;
+ 
+diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
+index 8330bc0bcb7e5..d405809030aab 100644
+--- a/drivers/net/netdevsim/netdev.c
++++ b/drivers/net/netdevsim/netdev.c
+@@ -292,7 +292,8 @@ static int nsim_get_iflink(const struct net_device *dev)
+ 
+ 	rcu_read_lock();
+ 	peer = rcu_dereference(nsim->peer);
+-	iflink = peer ? READ_ONCE(peer->netdev->ifindex) : 0;
++	iflink = peer ? READ_ONCE(peer->netdev->ifindex) :
++			READ_ONCE(dev->ifindex);
+ 	rcu_read_unlock();
+ 
+ 	return iflink;
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 13370439a7cae..4b22bb6393e26 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -785,6 +785,17 @@ static int ksz8061_config_init(struct phy_device *phydev)
+ {
+ 	int ret;
+ 
++	/* Chip can be powered down by the bootstrap code. */
++	ret = phy_read(phydev, MII_BMCR);
++	if (ret < 0)
++		return ret;
++	if (ret & BMCR_PDOWN) {
++		ret = phy_write(phydev, MII_BMCR, ret & ~BMCR_PDOWN);
++		if (ret < 0)
++			return ret;
++		usleep_range(1000, 2000);
++	}
++
+ 	ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
+ 	if (ret)
+ 		return ret;
+@@ -1858,7 +1869,7 @@ static const struct ksz9477_errata_write ksz9477_errata_writes[] = {
+ 	{0x1c, 0x20, 0xeeee},
+ };
+ 
+-static int ksz9477_config_init(struct phy_device *phydev)
++static int ksz9477_phy_errata(struct phy_device *phydev)
+ {
+ 	int err;
+ 	int i;
+@@ -1886,16 +1897,30 @@ static int ksz9477_config_init(struct phy_device *phydev)
+ 			return err;
+ 	}
+ 
++	err = genphy_restart_aneg(phydev);
++	if (err)
++		return err;
++
++	return err;
++}
++
++static int ksz9477_config_init(struct phy_device *phydev)
++{
++	int err;
++
++	/* Only KSZ9897 family of switches needs this fix. */
++	if ((phydev->phy_id & 0xf) == 1) {
++		err = ksz9477_phy_errata(phydev);
++		if (err)
++			return err;
++	}
++
+ 	/* According to KSZ9477 Errata DS80000754C (Module 4) all EEE modes
+ 	 * in this switch shall be regarded as broken.
+ 	 */
+ 	if (phydev->dev_flags & MICREL_NO_EEE)
+ 		phydev->eee_broken_modes = -1;
+ 
+-	err = genphy_restart_aneg(phydev);
+-	if (err)
+-		return err;
+-
+ 	return kszphy_config_init(phydev);
+ }
+ 
+@@ -2004,6 +2029,71 @@ static int kszphy_resume(struct phy_device *phydev)
+ 	return 0;
+ }
+ 
++static int ksz9477_resume(struct phy_device *phydev)
++{
++	int ret;
++
++	/* No need to initialize registers if not powered down. */
++	ret = phy_read(phydev, MII_BMCR);
++	if (ret < 0)
++		return ret;
++	if (!(ret & BMCR_PDOWN))
++		return 0;
++
++	genphy_resume(phydev);
++
++	/* After switching from power-down to normal mode, an internal global
++	 * reset is automatically generated. Wait a minimum of 1 ms before
++	 * read/write access to the PHY registers.
++	 */
++	usleep_range(1000, 2000);
++
++	/* Only KSZ9897 family of switches needs this fix. */
++	if ((phydev->phy_id & 0xf) == 1) {
++		ret = ksz9477_phy_errata(phydev);
++		if (ret)
++			return ret;
++	}
++
++	/* Enable PHY Interrupts */
++	if (phy_interrupt_is_valid(phydev)) {
++		phydev->interrupts = PHY_INTERRUPT_ENABLED;
++		if (phydev->drv->config_intr)
++			phydev->drv->config_intr(phydev);
++	}
++
++	return 0;
++}
++
++static int ksz8061_resume(struct phy_device *phydev)
++{
++	int ret;
++
++	/* This function can be called twice when the Ethernet device is on. */
++	ret = phy_read(phydev, MII_BMCR);
++	if (ret < 0)
++		return ret;
++	if (!(ret & BMCR_PDOWN))
++		return 0;
++
++	genphy_resume(phydev);
++	usleep_range(1000, 2000);
++
++	/* Re-program the value after chip is reset. */
++	ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
++	if (ret)
++		return ret;
++
++	/* Enable PHY Interrupts */
++	if (phy_interrupt_is_valid(phydev)) {
++		phydev->interrupts = PHY_INTERRUPT_ENABLED;
++		if (phydev->drv->config_intr)
++			phydev->drv->config_intr(phydev);
++	}
++
++	return 0;
++}
++
+ static int kszphy_probe(struct phy_device *phydev)
+ {
+ 	const struct kszphy_type *type = phydev->drv->driver_data;
+@@ -4826,7 +4916,7 @@ static struct phy_driver ksphy_driver[] = {
+ 	.config_intr	= kszphy_config_intr,
+ 	.handle_interrupt = kszphy_handle_interrupt,
+ 	.suspend	= kszphy_suspend,
+-	.resume		= kszphy_resume,
++	.resume		= ksz8061_resume,
+ }, {
+ 	.phy_id		= PHY_ID_KSZ9021,
+ 	.phy_id_mask	= 0x000ffffe,
+@@ -4980,7 +5070,7 @@ static struct phy_driver ksphy_driver[] = {
+ 	.config_intr	= kszphy_config_intr,
+ 	.handle_interrupt = kszphy_handle_interrupt,
+ 	.suspend	= genphy_suspend,
+-	.resume		= genphy_resume,
++	.resume		= ksz9477_resume,
+ 	.get_features	= ksz9477_get_features,
+ } };
+ 
+diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
+index f75c9eb3958ef..d999d9baadb26 100644
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -2418,8 +2418,7 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
+ 
+ 	/* Handle remove event globally, it resets this state machine */
+ 	if (event == SFP_E_REMOVE) {
+-		if (sfp->sm_mod_state > SFP_MOD_PROBE)
+-			sfp_sm_mod_remove(sfp);
++		sfp_sm_mod_remove(sfp);
+ 		sfp_sm_mod_next(sfp, SFP_MOD_EMPTY, 0);
+ 		return;
+ 	}
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 115c3c5414f2a..574b052a517d7 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -3589,10 +3589,10 @@ static void virtnet_rx_dim_work(struct work_struct *work)
+ 			if (err)
+ 				pr_debug("%s: Failed to send dim parameters on rxq%d\n",
+ 					 dev->name, qnum);
+-			dim->state = DIM_START_MEASURE;
+ 		}
+ 	}
+ 
++	dim->state = DIM_START_MEASURE;
+ 	rtnl_unlock();
+ }
+ 
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
+index 0578864792b60..beebe09eb88ff 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -2034,8 +2034,8 @@ vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
+ 					  rq->data_ring.base,
+ 					  rq->data_ring.basePA);
+ 			rq->data_ring.base = NULL;
+-			rq->data_ring.desc_size = 0;
+ 		}
++		rq->data_ring.desc_size = 0;
+ 	}
+ }
+ 
+diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
+index c78bce19bd754..5d07585e59c17 100644
+--- a/drivers/net/wireless/ath/ath11k/core.c
++++ b/drivers/net/wireless/ath/ath11k/core.c
+@@ -595,7 +595,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
+ 		.coldboot_cal_ftm = true,
+ 		.cbcal_restart_fw = false,
+ 		.fw_mem_mode = 0,
+-		.num_vdevs = 16 + 1,
++		.num_vdevs = 3,
+ 		.num_peers = 512,
+ 		.supports_suspend = false,
+ 		.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
+diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
+index 2fca415322aec..790277f547bb4 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.c
++++ b/drivers/net/wireless/ath/ath11k/mac.c
+@@ -7889,8 +7889,6 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
+ 	struct ath11k_base *ab = ar->ab;
+ 	struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ 	int ret;
+-	struct cur_regulatory_info *reg_info;
+-	enum ieee80211_ap_reg_power power_type;
+ 
+ 	mutex_lock(&ar->conf_mutex);
+ 
+@@ -7901,17 +7899,6 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
+ 	if (ath11k_wmi_supports_6ghz_cc_ext(ar) &&
+ 	    ctx->def.chan->band == NL80211_BAND_6GHZ &&
+ 	    arvif->vdev_type == WMI_VDEV_TYPE_STA) {
+-		reg_info = &ab->reg_info_store[ar->pdev_idx];
+-		power_type = vif->bss_conf.power_type;
+-
+-		ath11k_dbg(ab, ATH11K_DBG_MAC, "chanctx power type %d\n", power_type);
+-
+-		if (power_type == IEEE80211_REG_UNSET_AP) {
+-			ret = -EINVAL;
+-			goto out;
+-		}
+-
+-		ath11k_reg_handle_chan_list(ab, reg_info, power_type);
+ 		arvif->chanctx = *ctx;
+ 		ath11k_mac_parse_tx_pwr_env(ar, vif, ctx);
+ 	}
+@@ -9525,6 +9512,8 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
+ 	struct ath11k *ar = hw->priv;
+ 	struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ 	struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
++	enum ieee80211_ap_reg_power power_type;
++	struct cur_regulatory_info *reg_info;
+ 	struct ath11k_peer *peer;
+ 	int ret = 0;
+ 
+@@ -9604,6 +9593,29 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
+ 				ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
+ 					    sta->addr, arvif->vdev_id, ret);
+ 		}
++
++		if (!ret &&
++		    ath11k_wmi_supports_6ghz_cc_ext(ar) &&
++		    arvif->vdev_type == WMI_VDEV_TYPE_STA &&
++		    arvif->chanctx.def.chan &&
++		    arvif->chanctx.def.chan->band == NL80211_BAND_6GHZ) {
++			reg_info = &ar->ab->reg_info_store[ar->pdev_idx];
++			power_type = vif->bss_conf.power_type;
++
++			if (power_type == IEEE80211_REG_UNSET_AP) {
++				ath11k_warn(ar->ab, "invalid power type %d\n",
++					    power_type);
++				ret = -EINVAL;
++			} else {
++				ret = ath11k_reg_handle_chan_list(ar->ab,
++								  reg_info,
++								  power_type);
++				if (ret)
++					ath11k_warn(ar->ab,
++						    "failed to handle chan list with power type %d\n",
++						    power_type);
++			}
++		}
+ 	} else if (old_state == IEEE80211_STA_AUTHORIZED &&
+ 		   new_state == IEEE80211_STA_ASSOC) {
+ 		spin_lock_bh(&ar->ab->base_lock);
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+index 0bf38243f88ae..ce18ef9d31281 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+@@ -385,6 +385,33 @@ struct iwl_dev_tx_power_cmd_v7 {
+ 	__le32 timer_period;
+ 	__le32 flags;
+ } __packed; /* TX_REDUCED_POWER_API_S_VER_7 */
++
++/**
++ * struct iwl_dev_tx_power_cmd_v8 - TX power reduction command version 8
++ * @per_chain: per chain restrictions
++ * @enable_ack_reduction: enable or disable close range ack TX power
++ *	reduction.
++ * @per_chain_restriction_changed: is per_chain_restriction has changed
++ *	from last command. used if set_mode is
++ *	IWL_TX_POWER_MODE_SET_SAR_TIMER.
++ *	note: if not changed, the command is used for keep alive only.
++ * @reserved: reserved (padding)
++ * @timer_period: timer in milliseconds. if expires FW will change to default
++ *	BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER
++ * @flags: reduce power flags.
++ * @tpc_vlp_backoff_level: user backoff of UNII5,7 VLP channels in USA.
++ *	Not in use.
++ */
++struct iwl_dev_tx_power_cmd_v8 {
++	__le16 per_chain[IWL_NUM_CHAIN_TABLES_V2][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
++	u8 enable_ack_reduction;
++	u8 per_chain_restriction_changed;
++	u8 reserved[2];
++	__le32 timer_period;
++	__le32 flags;
++	__le32 tpc_vlp_backoff_level;
++} __packed; /* TX_REDUCED_POWER_API_S_VER_8 */
++
+ /**
+  * struct iwl_dev_tx_power_cmd - TX power reduction command (multiversion)
+  * @common: common part of the command
+@@ -392,6 +419,8 @@ struct iwl_dev_tx_power_cmd_v7 {
+  * @v4: version 4 part of the command
+  * @v5: version 5 part of the command
+  * @v6: version 6 part of the command
++ * @v7: version 7 part of the command
++ * @v8: version 8 part of the command
+  */
+ struct iwl_dev_tx_power_cmd {
+ 	struct iwl_dev_tx_power_common common;
+@@ -401,6 +430,7 @@ struct iwl_dev_tx_power_cmd {
+ 		struct iwl_dev_tx_power_cmd_v5 v5;
+ 		struct iwl_dev_tx_power_cmd_v6 v6;
+ 		struct iwl_dev_tx_power_cmd_v7 v7;
++		struct iwl_dev_tx_power_cmd_v8 v8;
+ 	};
+ };
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+index 1b7254569a37a..6c27ef2f7c7e5 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+@@ -1821,8 +1821,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
+ err_fw:
+ #ifdef CONFIG_IWLWIFI_DEBUGFS
+ 	debugfs_remove_recursive(drv->dbgfs_drv);
+-	iwl_dbg_tlv_free(drv->trans);
+ #endif
++	iwl_dbg_tlv_free(drv->trans);
+ 	kfree(drv);
+ err:
+ 	return ERR_PTR(ret);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+index e1c2b7fc92ab9..855267ea6e316 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+@@ -94,20 +94,10 @@ void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
+ {
+ 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ 	struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
+-	__le32 *dump_data = mfu_dump_notif->data;
+-	int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32);
+-	int i;
+ 
+ 	if (mfu_dump_notif->index_num == 0)
+ 		IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
+ 			 le32_to_cpu(mfu_dump_notif->assert_id));
+-
+-	for (i = 0; i < n_words; i++)
+-		IWL_DEBUG_INFO(mvm,
+-			       "MFUART assert dump, dword %u: 0x%08x\n",
+-			       le16_to_cpu(mfu_dump_notif->index_num) *
+-			       n_words + i,
+-			       le32_to_cpu(dump_data[i]));
+ }
+ 
+ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
+@@ -894,13 +884,15 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
+ 	int ret;
+ 	u16 len = 0;
+ 	u32 n_subbands;
+-	u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
+-					   IWL_FW_CMD_VER_UNKNOWN);
+-	if (cmd_ver == 7) {
++	u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 3);
++
++	if (cmd_ver >= 7) {
+ 		len = sizeof(cmd.v7);
+ 		n_subbands = IWL_NUM_SUB_BANDS_V2;
+ 		per_chain = cmd.v7.per_chain[0][0];
+ 		cmd.v7.flags = cpu_to_le32(mvm->fwrt.reduced_power_flags);
++		if (cmd_ver == 8)
++			len = sizeof(cmd.v8);
+ 	} else if (cmd_ver == 6) {
+ 		len = sizeof(cmd.v6);
+ 		n_subbands = IWL_NUM_SUB_BANDS_V2;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index 7ed7444c98715..2403ac2fcdc3b 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -1399,7 +1399,9 @@ int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ 	if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
+ 		cmd.common.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
+ 
+-	if (cmd_ver == 7)
++	if (cmd_ver == 8)
++		len = sizeof(cmd.v8);
++	else if (cmd_ver == 7)
+ 		len = sizeof(cmd.v7);
+ 	else if (cmd_ver == 6)
+ 		len = sizeof(cmd.v6);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+index df183a79db4c8..43f3002ede464 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+@@ -75,8 +75,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
+ 		goto out_free_bf;
+ 
+ 	iwl_mvm_tcm_add_vif(mvm, vif);
+-	INIT_DELAYED_WORK(&mvmvif->csa_work,
+-			  iwl_mvm_channel_switch_disconnect_wk);
+ 
+ 	if (vif->type == NL80211_IFTYPE_MONITOR) {
+ 		mvm->monitor_on = true;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+index 376b23b409dca..6cd4ec4d8f344 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+@@ -122,13 +122,8 @@ enum {
+ 
+ #define LINK_QUAL_AGG_FRAME_LIMIT_DEF	(63)
+ #define LINK_QUAL_AGG_FRAME_LIMIT_MAX	(63)
+-/*
+- * FIXME - various places in firmware API still use u8,
+- * e.g. LQ command and SCD config command.
+- * This should be 256 instead.
+- */
+-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF	(255)
+-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX	(255)
++#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF	(64)
++#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX	(64)
+ #define LINK_QUAL_AGG_FRAME_LIMIT_MIN	(0)
+ 
+ #define LQ_SIZE		2	/* 2 mode tables:  "Active" and "Search" */
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+index ce8d83c771a70..8ac5c045fcfcb 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+@@ -2456,8 +2456,11 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
+ 	 *
+ 	 * We mark it as mac header, for upper layers to know where
+ 	 * all radio tap header ends.
++	 *
++	 * Since data doesn't move data while putting data on skb and that is
++	 * the only way we use, data + len is the next place that hdr would be put
+ 	 */
+-	skb_reset_mac_header(skb);
++	skb_set_mac_header(skb, skb->len);
+ 
+ 	/*
+ 	 * Override the nss from the rx_vec since the rate_n_flags has
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+index 22bc032cffc8b..525d8efcc1475 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+@@ -1303,7 +1303,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
+ 		if (IWL_MVM_ADWELL_MAX_BUDGET)
+ 			cmd->v7.adwell_max_budget =
+ 				cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
+-		else if (params->ssids && params->ssids[0].ssid_len)
++		else if (params->n_ssids && params->ssids[0].ssid_len)
+ 			cmd->v7.adwell_max_budget =
+ 				cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
+ 		else
+@@ -1405,7 +1405,7 @@ iwl_mvm_scan_umac_dwell_v11(struct iwl_mvm *mvm,
+ 	if (IWL_MVM_ADWELL_MAX_BUDGET)
+ 		general_params->adwell_max_budget =
+ 			cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
+-	else if (params->ssids && params->ssids[0].ssid_len)
++	else if (params->n_ssids && params->ssids[0].ssid_len)
+ 		general_params->adwell_max_budget =
+ 			cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
+ 	else
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+index 0971c164b57e9..c27acaf0eb1cf 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+@@ -1326,6 +1326,10 @@ static void mt7615_set_rekey_data(struct ieee80211_hw *hw,
+ #endif /* CONFIG_PM */
+ 
+ const struct ieee80211_ops mt7615_ops = {
++	.add_chanctx = ieee80211_emulate_add_chanctx,
++	.remove_chanctx = ieee80211_emulate_remove_chanctx,
++	.change_chanctx = ieee80211_emulate_change_chanctx,
++	.switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
+ 	.tx = mt7615_tx,
+ 	.start = mt7615_start,
+ 	.stop = mt7615_stop,
+diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
+index 2e60a6991ca16..42b7db12b1bd4 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/core.c
++++ b/drivers/net/wireless/realtek/rtlwifi/core.c
+@@ -633,21 +633,6 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
+ 		}
+ 	}
+ 
+-	if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
+-		rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+-			"IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n",
+-			hw->conf.long_frame_max_tx_count);
+-		/* brought up everything changes (changed == ~0) indicates first
+-		 * open, so use our default value instead of that of wiphy.
+-		 */
+-		if (changed != ~0) {
+-			mac->retry_long = hw->conf.long_frame_max_tx_count;
+-			mac->retry_short = hw->conf.long_frame_max_tx_count;
+-			rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
+-				(u8 *)(&hw->conf.long_frame_max_tx_count));
+-		}
+-	}
+-
+ 	if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
+ 	    !rtlpriv->proximity.proxim_on) {
+ 		struct ieee80211_channel *channel = hw->conf.chandef.chan;
+diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.c b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
+index 2fe724d623c06..33c5a46f1b922 100644
+--- a/drivers/net/wwan/iosm/iosm_ipc_devlink.c
++++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
+@@ -210,7 +210,7 @@ static int ipc_devlink_create_region(struct iosm_devlink *devlink)
+ 			rc = PTR_ERR(devlink->cd_regions[i]);
+ 			dev_err(devlink->dev, "Devlink region fail,err %d", rc);
+ 			/* Delete previously created regions */
+-			for ( ; i >= 0; i--)
++			for (i--; i >= 0; i--)
+ 				devlink_region_destroy(devlink->cd_regions[i]);
+ 			goto region_create_fail;
+ 		}
+diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c
+index e05571b2a1b0c..8fa1ffcdaed48 100644
+--- a/drivers/nvme/host/pr.c
++++ b/drivers/nvme/host/pr.c
+@@ -77,7 +77,7 @@ static int nvme_sc_to_pr_err(int nvme_sc)
+ 	if (nvme_is_path_error(nvme_sc))
+ 		return PR_STS_PATH_FAILED;
+ 
+-	switch (nvme_sc) {
++	switch (nvme_sc & 0x7ff) {
+ 	case NVME_SC_SUCCESS:
+ 		return PR_STS_SUCCESS;
+ 	case NVME_SC_RESERVATION_CONFLICT:
+diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
+index bb4a69d538fd1..f003782d4ecff 100644
+--- a/drivers/nvme/target/passthru.c
++++ b/drivers/nvme/target/passthru.c
+@@ -226,13 +226,13 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
+ 	    req->cmd->common.opcode == nvme_admin_identify) {
+ 		switch (req->cmd->identify.cns) {
+ 		case NVME_ID_CNS_CTRL:
+-			nvmet_passthru_override_id_ctrl(req);
++			status = nvmet_passthru_override_id_ctrl(req);
+ 			break;
+ 		case NVME_ID_CNS_NS:
+-			nvmet_passthru_override_id_ns(req);
++			status = nvmet_passthru_override_id_ns(req);
+ 			break;
+ 		case NVME_ID_CNS_NS_DESC_LIST:
+-			nvmet_passthru_override_id_descs(req);
++			status = nvmet_passthru_override_id_descs(req);
+ 			break;
+ 		}
+ 	} else if (status < 0)
+diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
+index c9046e97a1d27..79c3e7e9a2701 100644
+--- a/drivers/pci/controller/pcie-rockchip-ep.c
++++ b/drivers/pci/controller/pcie-rockchip-ep.c
+@@ -98,10 +98,8 @@ static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
+ 
+ 	/* All functions share the same vendor ID with function 0 */
+ 	if (fn == 0) {
+-		u32 vid_regs = (hdr->vendorid & GENMASK(15, 0)) |
+-			       (hdr->subsys_vendor_id & GENMASK(31, 16)) << 16;
+-
+-		rockchip_pcie_write(rockchip, vid_regs,
++		rockchip_pcie_write(rockchip,
++				    hdr->vendorid | hdr->subsys_vendor_id << 16,
+ 				    PCIE_CORE_CONFIG_VENDOR);
+ 	}
+ 
+diff --git a/drivers/platform/x86/dell/dell-smbios-base.c b/drivers/platform/x86/dell/dell-smbios-base.c
+index e61bfaf8b5c48..86b95206cb1bd 100644
+--- a/drivers/platform/x86/dell/dell-smbios-base.c
++++ b/drivers/platform/x86/dell/dell-smbios-base.c
+@@ -11,6 +11,7 @@
+  */
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
++#include <linux/container_of.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/capability.h>
+@@ -25,11 +26,16 @@ static u32 da_supported_commands;
+ static int da_num_tokens;
+ static struct platform_device *platform_device;
+ static struct calling_interface_token *da_tokens;
+-static struct device_attribute *token_location_attrs;
+-static struct device_attribute *token_value_attrs;
++static struct token_sysfs_data *token_entries;
+ static struct attribute **token_attrs;
+ static DEFINE_MUTEX(smbios_mutex);
+ 
++struct token_sysfs_data {
++	struct device_attribute location_attr;
++	struct device_attribute value_attr;
++	struct calling_interface_token *token;
++};
++
+ struct smbios_device {
+ 	struct list_head list;
+ 	struct device *device;
+@@ -416,47 +422,26 @@ static void __init find_tokens(const struct dmi_header *dm, void *dummy)
+ 	}
+ }
+ 
+-static int match_attribute(struct device *dev,
+-			   struct device_attribute *attr)
+-{
+-	int i;
+-
+-	for (i = 0; i < da_num_tokens * 2; i++) {
+-		if (!token_attrs[i])
+-			continue;
+-		if (strcmp(token_attrs[i]->name, attr->attr.name) == 0)
+-			return i/2;
+-	}
+-	dev_dbg(dev, "couldn't match: %s\n", attr->attr.name);
+-	return -EINVAL;
+-}
+-
+ static ssize_t location_show(struct device *dev,
+ 			     struct device_attribute *attr, char *buf)
+ {
+-	int i;
++	struct token_sysfs_data *data = container_of(attr, struct token_sysfs_data, location_attr);
+ 
+ 	if (!capable(CAP_SYS_ADMIN))
+ 		return -EPERM;
+ 
+-	i = match_attribute(dev, attr);
+-	if (i > 0)
+-		return sysfs_emit(buf, "%08x", da_tokens[i].location);
+-	return 0;
++	return sysfs_emit(buf, "%08x", data->token->location);
+ }
+ 
+ static ssize_t value_show(struct device *dev,
+ 			  struct device_attribute *attr, char *buf)
+ {
+-	int i;
++	struct token_sysfs_data *data = container_of(attr, struct token_sysfs_data, value_attr);
+ 
+ 	if (!capable(CAP_SYS_ADMIN))
+ 		return -EPERM;
+ 
+-	i = match_attribute(dev, attr);
+-	if (i > 0)
+-		return sysfs_emit(buf, "%08x", da_tokens[i].value);
+-	return 0;
++	return sysfs_emit(buf, "%08x", data->token->value);
+ }
+ 
+ static struct attribute_group smbios_attribute_group = {
+@@ -473,22 +458,15 @@ static int build_tokens_sysfs(struct platform_device *dev)
+ {
+ 	char *location_name;
+ 	char *value_name;
+-	size_t size;
+ 	int ret;
+ 	int i, j;
+ 
+-	/* (number of tokens  + 1 for null terminated */
+-	size = sizeof(struct device_attribute) * (da_num_tokens + 1);
+-	token_location_attrs = kzalloc(size, GFP_KERNEL);
+-	if (!token_location_attrs)
++	token_entries = kcalloc(da_num_tokens, sizeof(*token_entries), GFP_KERNEL);
++	if (!token_entries)
+ 		return -ENOMEM;
+-	token_value_attrs = kzalloc(size, GFP_KERNEL);
+-	if (!token_value_attrs)
+-		goto out_allocate_value;
+ 
+ 	/* need to store both location and value + terminator*/
+-	size = sizeof(struct attribute *) * ((2 * da_num_tokens) + 1);
+-	token_attrs = kzalloc(size, GFP_KERNEL);
++	token_attrs = kcalloc((2 * da_num_tokens) + 1, sizeof(*token_attrs), GFP_KERNEL);
+ 	if (!token_attrs)
+ 		goto out_allocate_attrs;
+ 
+@@ -496,27 +474,32 @@ static int build_tokens_sysfs(struct platform_device *dev)
+ 		/* skip empty */
+ 		if (da_tokens[i].tokenID == 0)
+ 			continue;
++
++		token_entries[i].token = &da_tokens[i];
++
+ 		/* add location */
+ 		location_name = kasprintf(GFP_KERNEL, "%04x_location",
+ 					  da_tokens[i].tokenID);
+ 		if (location_name == NULL)
+ 			goto out_unwind_strings;
+-		sysfs_attr_init(&token_location_attrs[i].attr);
+-		token_location_attrs[i].attr.name = location_name;
+-		token_location_attrs[i].attr.mode = 0444;
+-		token_location_attrs[i].show = location_show;
+-		token_attrs[j++] = &token_location_attrs[i].attr;
++
++		sysfs_attr_init(&token_entries[i].location_attr.attr);
++		token_entries[i].location_attr.attr.name = location_name;
++		token_entries[i].location_attr.attr.mode = 0444;
++		token_entries[i].location_attr.show = location_show;
++		token_attrs[j++] = &token_entries[i].location_attr.attr;
+ 
+ 		/* add value */
+ 		value_name = kasprintf(GFP_KERNEL, "%04x_value",
+ 				       da_tokens[i].tokenID);
+ 		if (value_name == NULL)
+ 			goto loop_fail_create_value;
+-		sysfs_attr_init(&token_value_attrs[i].attr);
+-		token_value_attrs[i].attr.name = value_name;
+-		token_value_attrs[i].attr.mode = 0444;
+-		token_value_attrs[i].show = value_show;
+-		token_attrs[j++] = &token_value_attrs[i].attr;
++
++		sysfs_attr_init(&token_entries[i].value_attr.attr);
++		token_entries[i].value_attr.attr.name = value_name;
++		token_entries[i].value_attr.attr.mode = 0444;
++		token_entries[i].value_attr.show = value_show;
++		token_attrs[j++] = &token_entries[i].value_attr.attr;
+ 		continue;
+ 
+ loop_fail_create_value:
+@@ -532,14 +515,12 @@ static int build_tokens_sysfs(struct platform_device *dev)
+ 
+ out_unwind_strings:
+ 	while (i--) {
+-		kfree(token_location_attrs[i].attr.name);
+-		kfree(token_value_attrs[i].attr.name);
++		kfree(token_entries[i].location_attr.attr.name);
++		kfree(token_entries[i].value_attr.attr.name);
+ 	}
+ 	kfree(token_attrs);
+ out_allocate_attrs:
+-	kfree(token_value_attrs);
+-out_allocate_value:
+-	kfree(token_location_attrs);
++	kfree(token_entries);
+ 
+ 	return -ENOMEM;
+ }
+@@ -551,12 +532,11 @@ static void free_group(struct platform_device *pdev)
+ 	sysfs_remove_group(&pdev->dev.kobj,
+ 				&smbios_attribute_group);
+ 	for (i = 0; i < da_num_tokens; i++) {
+-		kfree(token_location_attrs[i].attr.name);
+-		kfree(token_value_attrs[i].attr.name);
++		kfree(token_entries[i].location_attr.attr.name);
++		kfree(token_entries[i].value_attr.attr.name);
+ 	}
+ 	kfree(token_attrs);
+-	kfree(token_value_attrs);
+-	kfree(token_location_attrs);
++	kfree(token_entries);
+ }
+ 
+ static int __init dell_smbios_init(void)
+diff --git a/drivers/pmdomain/ti/ti_sci_pm_domains.c b/drivers/pmdomain/ti/ti_sci_pm_domains.c
+index 9dddf227a3a6b..1510d5ddae3de 100644
+--- a/drivers/pmdomain/ti/ti_sci_pm_domains.c
++++ b/drivers/pmdomain/ti/ti_sci_pm_domains.c
+@@ -114,6 +114,18 @@ static const struct of_device_id ti_sci_pm_domain_matches[] = {
+ };
+ MODULE_DEVICE_TABLE(of, ti_sci_pm_domain_matches);
+ 
++static bool ti_sci_pm_idx_exists(struct ti_sci_genpd_provider *pd_provider, u32 idx)
++{
++	struct ti_sci_pm_domain *pd;
++
++	list_for_each_entry(pd, &pd_provider->pd_list, node) {
++		if (pd->idx == idx)
++			return true;
++	}
++
++	return false;
++}
++
+ static int ti_sci_pm_domain_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+@@ -149,8 +161,14 @@ static int ti_sci_pm_domain_probe(struct platform_device *pdev)
+ 				break;
+ 
+ 			if (args.args_count >= 1 && args.np == dev->of_node) {
+-				if (args.args[0] > max_id)
++				if (args.args[0] > max_id) {
+ 					max_id = args.args[0];
++				} else {
++					if (ti_sci_pm_idx_exists(pd_provider, args.args[0])) {
++						index++;
++						continue;
++					}
++				}
+ 
+ 				pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ 				if (!pd) {
+diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
+index 7513018c9f9ac..2067b0120d083 100644
+--- a/drivers/ptp/ptp_chardev.c
++++ b/drivers/ptp/ptp_chardev.c
+@@ -85,7 +85,8 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
+ 	}
+ 
+ 	if (info->verify(info, pin, func, chan)) {
+-		pr_err("driver cannot use function %u on pin %u\n", func, chan);
++		pr_err("driver cannot use function %u and channel %u on pin %u\n",
++		       func, chan, pin);
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+diff --git a/drivers/ras/amd/atl/internal.h b/drivers/ras/amd/atl/internal.h
+index 5de69e0bb0f99..196c1c8b578ce 100644
+--- a/drivers/ras/amd/atl/internal.h
++++ b/drivers/ras/amd/atl/internal.h
+@@ -224,7 +224,7 @@ int df_indirect_read_broadcast(u16 node, u8 func, u16 reg, u32 *lo);
+ 
+ int get_df_system_info(void);
+ int determine_node_id(struct addr_ctx *ctx, u8 socket_num, u8 die_num);
+-int get_addr_hash_mi300(void);
++int get_umc_info_mi300(void);
+ 
+ int get_address_map(struct addr_ctx *ctx);
+ 
+diff --git a/drivers/ras/amd/atl/system.c b/drivers/ras/amd/atl/system.c
+index 701349e849428..6979fa3d4fe25 100644
+--- a/drivers/ras/amd/atl/system.c
++++ b/drivers/ras/amd/atl/system.c
+@@ -127,7 +127,7 @@ static int df4_determine_df_rev(u32 reg)
+ 	if (reg == DF_FUNC0_ID_MI300) {
+ 		df_cfg.flags.heterogeneous = 1;
+ 
+-		if (get_addr_hash_mi300())
++		if (get_umc_info_mi300())
+ 			return -EINVAL;
+ 	}
+ 
+diff --git a/drivers/ras/amd/atl/umc.c b/drivers/ras/amd/atl/umc.c
+index 59b6169093f77..a1b4accf7b965 100644
+--- a/drivers/ras/amd/atl/umc.c
++++ b/drivers/ras/amd/atl/umc.c
+@@ -68,6 +68,8 @@ struct xor_bits {
+ };
+ 
+ #define NUM_BANK_BITS	4
++#define NUM_COL_BITS	5
++#define NUM_SID_BITS	2
+ 
+ static struct {
+ 	/* UMC::CH::AddrHashBank */
+@@ -80,7 +82,22 @@ static struct {
+ 	u8		bank_xor;
+ } addr_hash;
+ 
++static struct {
++	u8 bank[NUM_BANK_BITS];
++	u8 col[NUM_COL_BITS];
++	u8 sid[NUM_SID_BITS];
++	u8 num_row_lo;
++	u8 num_row_hi;
++	u8 row_lo;
++	u8 row_hi;
++	u8 pc;
++} bit_shifts;
++
+ #define MI300_UMC_CH_BASE	0x90000
++#define MI300_ADDR_CFG		(MI300_UMC_CH_BASE + 0x30)
++#define MI300_ADDR_SEL		(MI300_UMC_CH_BASE + 0x40)
++#define MI300_COL_SEL_LO	(MI300_UMC_CH_BASE + 0x50)
++#define MI300_ADDR_SEL_2	(MI300_UMC_CH_BASE + 0xA4)
+ #define MI300_ADDR_HASH_BANK0	(MI300_UMC_CH_BASE + 0xC8)
+ #define MI300_ADDR_HASH_PC	(MI300_UMC_CH_BASE + 0xE0)
+ #define MI300_ADDR_HASH_PC2	(MI300_UMC_CH_BASE + 0xE4)
+@@ -90,17 +107,42 @@ static struct {
+ #define ADDR_HASH_ROW_XOR	GENMASK(31, 14)
+ #define ADDR_HASH_BANK_XOR	GENMASK(5, 0)
+ 
++#define ADDR_CFG_NUM_ROW_LO	GENMASK(11, 8)
++#define ADDR_CFG_NUM_ROW_HI	GENMASK(15, 12)
++
++#define ADDR_SEL_BANK0		GENMASK(3, 0)
++#define ADDR_SEL_BANK1		GENMASK(7, 4)
++#define ADDR_SEL_BANK2		GENMASK(11, 8)
++#define ADDR_SEL_BANK3		GENMASK(15, 12)
++#define ADDR_SEL_BANK4		GENMASK(20, 16)
++#define ADDR_SEL_ROW_LO		GENMASK(27, 24)
++#define ADDR_SEL_ROW_HI		GENMASK(31, 28)
++
++#define COL_SEL_LO_COL0		GENMASK(3, 0)
++#define COL_SEL_LO_COL1		GENMASK(7, 4)
++#define COL_SEL_LO_COL2		GENMASK(11, 8)
++#define COL_SEL_LO_COL3		GENMASK(15, 12)
++#define COL_SEL_LO_COL4		GENMASK(19, 16)
++
++#define ADDR_SEL_2_BANK5	GENMASK(4, 0)
++#define ADDR_SEL_2_CHAN		GENMASK(15, 12)
++
+ /*
+  * Read UMC::CH::AddrHash{Bank,PC,PC2} registers to get XOR bits used
+- * for hashing. Do this during module init, since the values will not
+- * change during run time.
++ * for hashing.
++ *
++ * Also, read UMC::CH::Addr{Cfg,Sel,Sel2} and UMC::CH:ColSelLo registers to
++ * get the values needed to reconstruct the normalized address. Apply additional
++ * offsets to the raw register values, as needed.
++ *
++ * Do this during module init, since the values will not change during run time.
+  *
+  * These registers are instantiated for each UMC across each AMD Node.
+  * However, they should be identically programmed due to the fixed hardware
+  * design of MI300 systems. So read the values from Node 0 UMC 0 and keep a
+  * single global structure for simplicity.
+  */
+-int get_addr_hash_mi300(void)
++int get_umc_info_mi300(void)
+ {
+ 	u32 temp;
+ 	int ret;
+@@ -130,6 +172,44 @@ int get_addr_hash_mi300(void)
+ 
+ 	addr_hash.bank_xor = FIELD_GET(ADDR_HASH_BANK_XOR, temp);
+ 
++	ret = amd_smn_read(0, MI300_ADDR_CFG, &temp);
++	if (ret)
++		return ret;
++
++	bit_shifts.num_row_hi = FIELD_GET(ADDR_CFG_NUM_ROW_HI, temp);
++	bit_shifts.num_row_lo = 10 + FIELD_GET(ADDR_CFG_NUM_ROW_LO, temp);
++
++	ret = amd_smn_read(0, MI300_ADDR_SEL, &temp);
++	if (ret)
++		return ret;
++
++	bit_shifts.bank[0] = 5 + FIELD_GET(ADDR_SEL_BANK0, temp);
++	bit_shifts.bank[1] = 5 + FIELD_GET(ADDR_SEL_BANK1, temp);
++	bit_shifts.bank[2] = 5 + FIELD_GET(ADDR_SEL_BANK2, temp);
++	bit_shifts.bank[3] = 5 + FIELD_GET(ADDR_SEL_BANK3, temp);
++	/* Use BankBit4 for the SID0 position. */
++	bit_shifts.sid[0]  = 5 + FIELD_GET(ADDR_SEL_BANK4, temp);
++	bit_shifts.row_lo  = 12 + FIELD_GET(ADDR_SEL_ROW_LO, temp);
++	bit_shifts.row_hi  = 24 + FIELD_GET(ADDR_SEL_ROW_HI, temp);
++
++	ret = amd_smn_read(0, MI300_COL_SEL_LO, &temp);
++	if (ret)
++		return ret;
++
++	bit_shifts.col[0] = 2 + FIELD_GET(COL_SEL_LO_COL0, temp);
++	bit_shifts.col[1] = 2 + FIELD_GET(COL_SEL_LO_COL1, temp);
++	bit_shifts.col[2] = 2 + FIELD_GET(COL_SEL_LO_COL2, temp);
++	bit_shifts.col[3] = 2 + FIELD_GET(COL_SEL_LO_COL3, temp);
++	bit_shifts.col[4] = 2 + FIELD_GET(COL_SEL_LO_COL4, temp);
++
++	ret = amd_smn_read(0, MI300_ADDR_SEL_2, &temp);
++	if (ret)
++		return ret;
++
++	/* Use BankBit5 for the SID1 position. */
++	bit_shifts.sid[1] = 5 + FIELD_GET(ADDR_SEL_2_BANK5, temp);
++	bit_shifts.pc	  = 5 + FIELD_GET(ADDR_SEL_2_CHAN, temp);
++
+ 	return 0;
+ }
+ 
+@@ -146,9 +226,6 @@ int get_addr_hash_mi300(void)
+  * The MCA address format is as follows:
+  *	MCA_ADDR[27:0] = {S[1:0], P[0], R[14:0], B[3:0], C[4:0], Z[0]}
+  *
+- * The normalized address format is fixed in hardware and is as follows:
+- *	NA[30:0] = {S[1:0], R[13:0], C4, B[1:0], B[3:2], C[3:2], P, C[1:0], Z[4:0]}
+- *
+  * Additionally, the PC and Bank bits may be hashed. This must be accounted for before
+  * reconstructing the normalized address.
+  */
+@@ -158,18 +235,10 @@ int get_addr_hash_mi300(void)
+ #define MI300_UMC_MCA_PC	BIT(25)
+ #define MI300_UMC_MCA_SID	GENMASK(27, 26)
+ 
+-#define MI300_NA_COL_1_0	GENMASK(6, 5)
+-#define MI300_NA_PC		BIT(7)
+-#define MI300_NA_COL_3_2	GENMASK(9, 8)
+-#define MI300_NA_BANK_3_2	GENMASK(11, 10)
+-#define MI300_NA_BANK_1_0	GENMASK(13, 12)
+-#define MI300_NA_COL_4		BIT(14)
+-#define MI300_NA_ROW		GENMASK(28, 15)
+-#define MI300_NA_SID		GENMASK(30, 29)
+-
+ static unsigned long convert_dram_to_norm_addr_mi300(unsigned long addr)
+ {
+-	u16 i, col, row, bank, pc, sid, temp;
++	u16 i, col, row, bank, pc, sid;
++	u32 temp;
+ 
+ 	col  = FIELD_GET(MI300_UMC_MCA_COL,  addr);
+ 	bank = FIELD_GET(MI300_UMC_MCA_BANK, addr);
+@@ -189,49 +258,48 @@ static unsigned long convert_dram_to_norm_addr_mi300(unsigned long addr)
+ 
+ 	/* Calculate hash for PC bit. */
+ 	if (addr_hash.pc.xor_enable) {
+-		/* Bits SID[1:0] act as Bank[6:5] for PC hash, so apply them here. */
+-		bank |= sid << 5;
+-
+ 		temp  = bitwise_xor_bits(col  & addr_hash.pc.col_xor);
+ 		temp ^= bitwise_xor_bits(row  & addr_hash.pc.row_xor);
+-		temp ^= bitwise_xor_bits(bank & addr_hash.bank_xor);
++		/* Bits SID[1:0] act as Bank[5:4] for PC hash, so apply them here. */
++		temp ^= bitwise_xor_bits((bank | sid << NUM_BANK_BITS) & addr_hash.bank_xor);
+ 		pc   ^= temp;
+-
+-		/* Drop SID bits for the sake of debug printing later. */
+-		bank &= 0x1F;
+ 	}
+ 
+ 	/* Reconstruct the normalized address starting with NA[4:0] = 0 */
+ 	addr  = 0;
+ 
+-	/* NA[6:5] = Column[1:0] */
+-	temp  = col & 0x3;
+-	addr |= FIELD_PREP(MI300_NA_COL_1_0, temp);
+-
+-	/* NA[7] = PC */
+-	addr |= FIELD_PREP(MI300_NA_PC, pc);
+-
+-	/* NA[9:8] = Column[3:2] */
+-	temp  = (col >> 2) & 0x3;
+-	addr |= FIELD_PREP(MI300_NA_COL_3_2, temp);
++	/* Column bits */
++	for (i = 0; i < NUM_COL_BITS; i++) {
++		temp  = (col >> i) & 0x1;
++		addr |= temp << bit_shifts.col[i];
++	}
+ 
+-	/* NA[11:10] = Bank[3:2] */
+-	temp  = (bank >> 2) & 0x3;
+-	addr |= FIELD_PREP(MI300_NA_BANK_3_2, temp);
++	/* Bank bits */
++	for (i = 0; i < NUM_BANK_BITS; i++) {
++		temp  = (bank >> i) & 0x1;
++		addr |= temp << bit_shifts.bank[i];
++	}
+ 
+-	/* NA[13:12] = Bank[1:0] */
+-	temp  = bank & 0x3;
+-	addr |= FIELD_PREP(MI300_NA_BANK_1_0, temp);
++	/* Row lo bits */
++	for (i = 0; i < bit_shifts.num_row_lo; i++) {
++		temp  = (row >> i) & 0x1;
++		addr |= temp << (i + bit_shifts.row_lo);
++	}
+ 
+-	/* NA[14] = Column[4] */
+-	temp  = (col >> 4) & 0x1;
+-	addr |= FIELD_PREP(MI300_NA_COL_4, temp);
++	/* Row hi bits */
++	for (i = 0; i < bit_shifts.num_row_hi; i++) {
++		temp  = (row >> (i + bit_shifts.num_row_lo)) & 0x1;
++		addr |= temp << (i + bit_shifts.row_hi);
++	}
+ 
+-	/* NA[28:15] = Row[13:0] */
+-	addr |= FIELD_PREP(MI300_NA_ROW, row);
++	/* PC bit */
++	addr |= pc << bit_shifts.pc;
+ 
+-	/* NA[30:29] = SID[1:0] */
+-	addr |= FIELD_PREP(MI300_NA_SID, sid);
++	/* SID bits */
++	for (i = 0; i < NUM_SID_BITS; i++) {
++		temp  = (sid >> i) & 0x1;
++		addr |= temp << bit_shifts.sid[i];
++	}
+ 
+ 	pr_debug("Addr=0x%016lx", addr);
+ 	pr_debug("Bank=%u Row=%u Column=%u PC=%u SID=%u", bank, row, col, pc, sid);
+diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
+index ad3415a3851b2..50e486bcfa103 100644
+--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
++++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
+@@ -103,12 +103,14 @@ struct k3_r5_soc_data {
+  * @dev: cached device pointer
+  * @mode: Mode to configure the Cluster - Split or LockStep
+  * @cores: list of R5 cores within the cluster
++ * @core_transition: wait queue to sync core state changes
+  * @soc_data: SoC-specific feature data for a R5FSS
+  */
+ struct k3_r5_cluster {
+ 	struct device *dev;
+ 	enum cluster_mode mode;
+ 	struct list_head cores;
++	wait_queue_head_t core_transition;
+ 	const struct k3_r5_soc_data *soc_data;
+ };
+ 
+@@ -128,6 +130,7 @@ struct k3_r5_cluster {
+  * @atcm_enable: flag to control ATCM enablement
+  * @btcm_enable: flag to control BTCM enablement
+  * @loczrama: flag to dictate which TCM is at device address 0x0
++ * @released_from_reset: flag to signal when core is out of reset
+  */
+ struct k3_r5_core {
+ 	struct list_head elem;
+@@ -144,6 +147,7 @@ struct k3_r5_core {
+ 	u32 atcm_enable;
+ 	u32 btcm_enable;
+ 	u32 loczrama;
++	bool released_from_reset;
+ };
+ 
+ /**
+@@ -460,6 +464,8 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
+ 			ret);
+ 		return ret;
+ 	}
++	core->released_from_reset = true;
++	wake_up_interruptible(&cluster->core_transition);
+ 
+ 	/*
+ 	 * Newer IP revisions like on J7200 SoCs support h/w auto-initialization
+@@ -542,7 +548,7 @@ static int k3_r5_rproc_start(struct rproc *rproc)
+ 	struct k3_r5_rproc *kproc = rproc->priv;
+ 	struct k3_r5_cluster *cluster = kproc->cluster;
+ 	struct device *dev = kproc->dev;
+-	struct k3_r5_core *core;
++	struct k3_r5_core *core0, *core;
+ 	u32 boot_addr;
+ 	int ret;
+ 
+@@ -568,6 +574,16 @@ static int k3_r5_rproc_start(struct rproc *rproc)
+ 				goto unroll_core_run;
+ 		}
+ 	} else {
++		/* do not allow core 1 to start before core 0 */
++		core0 = list_first_entry(&cluster->cores, struct k3_r5_core,
++					 elem);
++		if (core != core0 && core0->rproc->state == RPROC_OFFLINE) {
++			dev_err(dev, "%s: can not start core 1 before core 0\n",
++				__func__);
++			ret = -EPERM;
++			goto put_mbox;
++		}
++
+ 		ret = k3_r5_core_run(core);
+ 		if (ret)
+ 			goto put_mbox;
+@@ -613,7 +629,8 @@ static int k3_r5_rproc_stop(struct rproc *rproc)
+ {
+ 	struct k3_r5_rproc *kproc = rproc->priv;
+ 	struct k3_r5_cluster *cluster = kproc->cluster;
+-	struct k3_r5_core *core = kproc->core;
++	struct device *dev = kproc->dev;
++	struct k3_r5_core *core1, *core = kproc->core;
+ 	int ret;
+ 
+ 	/* halt all applicable cores */
+@@ -626,6 +643,16 @@ static int k3_r5_rproc_stop(struct rproc *rproc)
+ 			}
+ 		}
+ 	} else {
++		/* do not allow core 0 to stop before core 1 */
++		core1 = list_last_entry(&cluster->cores, struct k3_r5_core,
++					elem);
++		if (core != core1 && core1->rproc->state != RPROC_OFFLINE) {
++			dev_err(dev, "%s: can not stop core 0 before core 1\n",
++				__func__);
++			ret = -EPERM;
++			goto out;
++		}
++
+ 		ret = k3_r5_core_halt(core);
+ 		if (ret)
+ 			goto out;
+@@ -1140,6 +1167,12 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
+ 		return ret;
+ 	}
+ 
++	/*
++	 * Skip the waiting mechanism for sequential power-on of cores if the
++	 * core has already been booted by another entity.
++	 */
++	core->released_from_reset = c_state;
++
+ 	ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
+ 				     &stat);
+ 	if (ret < 0) {
+@@ -1280,6 +1313,26 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
+ 		    cluster->mode == CLUSTER_MODE_SINGLECPU ||
+ 		    cluster->mode == CLUSTER_MODE_SINGLECORE)
+ 			break;
++
++		/*
++		 * R5 cores require to be powered on sequentially, core0
++		 * should be in higher power state than core1 in a cluster
++		 * So, wait for current core to power up before proceeding
++		 * to next core and put timeout of 2sec for each core.
++		 *
++		 * This waiting mechanism is necessary because
++		 * rproc_auto_boot_callback() for core1 can be called before
++		 * core0 due to thread execution order.
++		 */
++		ret = wait_event_interruptible_timeout(cluster->core_transition,
++						       core->released_from_reset,
++						       msecs_to_jiffies(2000));
++		if (ret <= 0) {
++			dev_err(dev,
++				"Timed out waiting for %s core to power up!\n",
++				rproc->name);
++			return ret;
++		}
+ 	}
+ 
+ 	return 0;
+@@ -1709,6 +1762,7 @@ static int k3_r5_probe(struct platform_device *pdev)
+ 	cluster->dev = dev;
+ 	cluster->soc_data = data;
+ 	INIT_LIST_HEAD(&cluster->cores);
++	init_waitqueue_head(&cluster->core_transition);
+ 
+ 	ret = of_property_read_u32(np, "ti,cluster-mode", &cluster->mode);
+ 	if (ret < 0 && ret != -EINVAL) {
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
+index 55d590b919476..6a3db7032c695 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
+@@ -2158,10 +2158,72 @@ persistent_id_show(struct device *dev, struct device_attribute *attr,
+ }
+ static DEVICE_ATTR_RO(persistent_id);
+ 
++/**
++ * sas_ncq_prio_supported_show - Indicate if device supports NCQ priority
++ * @dev: pointer to embedded device
++ * @attr: sas_ncq_prio_supported attribute descriptor
++ * @buf: the buffer returned
++ *
++ * A sysfs 'read-only' sdev attribute, only works with SATA devices
++ */
++static ssize_t
++sas_ncq_prio_supported_show(struct device *dev,
++			    struct device_attribute *attr, char *buf)
++{
++	struct scsi_device *sdev = to_scsi_device(dev);
++
++	return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev));
++}
++static DEVICE_ATTR_RO(sas_ncq_prio_supported);
++
++/**
++ * sas_ncq_prio_enable_show - send prioritized io commands to device
++ * @dev: pointer to embedded device
++ * @attr: sas_ncq_prio_enable attribute descriptor
++ * @buf: the buffer returned
++ *
++ * A sysfs 'read/write' sdev attribute, only works with SATA devices
++ */
++static ssize_t
++sas_ncq_prio_enable_show(struct device *dev,
++				 struct device_attribute *attr, char *buf)
++{
++	struct scsi_device *sdev = to_scsi_device(dev);
++	struct mpi3mr_sdev_priv_data *sdev_priv_data =  sdev->hostdata;
++
++	if (!sdev_priv_data)
++		return 0;
++
++	return sysfs_emit(buf, "%d\n", sdev_priv_data->ncq_prio_enable);
++}
++
++static ssize_t
++sas_ncq_prio_enable_store(struct device *dev,
++				  struct device_attribute *attr,
++				  const char *buf, size_t count)
++{
++	struct scsi_device *sdev = to_scsi_device(dev);
++	struct mpi3mr_sdev_priv_data *sdev_priv_data =  sdev->hostdata;
++	bool ncq_prio_enable = 0;
++
++	if (kstrtobool(buf, &ncq_prio_enable))
++		return -EINVAL;
++
++	if (!sas_ata_ncq_prio_supported(sdev))
++		return -EINVAL;
++
++	sdev_priv_data->ncq_prio_enable = ncq_prio_enable;
++
++	return strlen(buf);
++}
++static DEVICE_ATTR_RW(sas_ncq_prio_enable);
++
+ static struct attribute *mpi3mr_dev_attrs[] = {
+ 	&dev_attr_sas_address.attr,
+ 	&dev_attr_device_handle.attr,
+ 	&dev_attr_persistent_id.attr,
++	&dev_attr_sas_ncq_prio_supported.attr,
++	&dev_attr_sas_ncq_prio_enable.attr,
+ 	NULL,
+ };
+ 
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index 1b492e9a3e55e..86f553c617568 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -8512,6 +8512,12 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
+ 	ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
+ 	if (ioc->facts.MaxDevHandle % 8)
+ 		ioc->pd_handles_sz++;
++	/*
++	 * pd_handles_sz should have, at least, the minimal room for
++	 * set_bit()/test_bit(), otherwise out-of-memory touch may occur.
++	 */
++	ioc->pd_handles_sz = ALIGN(ioc->pd_handles_sz, sizeof(unsigned long));
++
+ 	ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
+ 	    GFP_KERNEL);
+ 	if (!ioc->pd_handles) {
+@@ -8529,6 +8535,13 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
+ 	ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
+ 	if (ioc->facts.MaxDevHandle % 8)
+ 		ioc->pend_os_device_add_sz++;
++
++	/*
++	 * pend_os_device_add_sz should have, at least, the minimal room for
++	 * set_bit()/test_bit(), otherwise out-of-memory may occur.
++	 */
++	ioc->pend_os_device_add_sz = ALIGN(ioc->pend_os_device_add_sz,
++					   sizeof(unsigned long));
+ 	ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
+ 	    GFP_KERNEL);
+ 	if (!ioc->pend_os_device_add) {
+@@ -8820,6 +8833,12 @@ _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
+ 		if (ioc->facts.MaxDevHandle % 8)
+ 			pd_handles_sz++;
+ 
++		/*
++		 * pd_handles should have, at least, the minimal room for
++		 * set_bit()/test_bit(), otherwise out-of-memory touch may
++		 * occur.
++		 */
++		pd_handles_sz = ALIGN(pd_handles_sz, sizeof(unsigned long));
+ 		pd_handles = krealloc(ioc->pd_handles, pd_handles_sz,
+ 		    GFP_KERNEL);
+ 		if (!pd_handles) {
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
+index bf100a4ebfc36..fe1e96fda284b 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
+@@ -2048,9 +2048,6 @@ void
+ mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
+ 	struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request);
+ 
+-/* NCQ Prio Handling Check */
+-bool scsih_ncq_prio_supp(struct scsi_device *sdev);
+-
+ void mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc);
+ void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc);
+ void mpt3sas_init_debugfs(void);
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+index 1c9fd26195b81..87784c96249a7 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+@@ -4088,7 +4088,7 @@ sas_ncq_prio_supported_show(struct device *dev,
+ {
+ 	struct scsi_device *sdev = to_scsi_device(dev);
+ 
+-	return sysfs_emit(buf, "%d\n", scsih_ncq_prio_supp(sdev));
++	return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev));
+ }
+ static DEVICE_ATTR_RO(sas_ncq_prio_supported);
+ 
+@@ -4123,7 +4123,7 @@ sas_ncq_prio_enable_store(struct device *dev,
+ 	if (kstrtobool(buf, &ncq_prio_enable))
+ 		return -EINVAL;
+ 
+-	if (!scsih_ncq_prio_supp(sdev))
++	if (!sas_ata_ncq_prio_supported(sdev))
+ 		return -EINVAL;
+ 
+ 	sas_device_priv_data->ncq_prio_enable = ncq_prio_enable;
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+index ef8ee93005eae..7e923e02491fd 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+@@ -12573,29 +12573,6 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev)
+ 	return PCI_ERS_RESULT_RECOVERED;
+ }
+ 
+-/**
+- * scsih_ncq_prio_supp - Check for NCQ command priority support
+- * @sdev: scsi device struct
+- *
+- * This is called when a user indicates they would like to enable
+- * ncq command priorities. This works only on SATA devices.
+- */
+-bool scsih_ncq_prio_supp(struct scsi_device *sdev)
+-{
+-	struct scsi_vpd *vpd;
+-	bool ncq_prio_supp = false;
+-
+-	rcu_read_lock();
+-	vpd = rcu_dereference(sdev->vpd_pg89);
+-	if (!vpd || vpd->len < 214)
+-		goto out;
+-
+-	ncq_prio_supp = (vpd->data[213] >> 4) & 1;
+-out:
+-	rcu_read_unlock();
+-
+-	return ncq_prio_supp;
+-}
+ /*
+  * The pci device ids are defined in mpi/mpi2_cnfg.h.
+  */
+diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
+index f0464db3f9de9..ee69bd35889d1 100644
+--- a/drivers/scsi/scsi.c
++++ b/drivers/scsi/scsi.c
+@@ -673,6 +673,13 @@ void scsi_cdl_check(struct scsi_device *sdev)
+ 		sdev->use_10_for_rw = 0;
+ 
+ 		sdev->cdl_supported = 1;
++
++		/*
++		 * If the device supports CDL, make sure that the current drive
++		 * feature status is consistent with the user controlled
++		 * cdl_enable state.
++		 */
++		scsi_cdl_enable(sdev, sdev->cdl_enable);
+ 	} else {
+ 		sdev->cdl_supported = 0;
+ 	}
+diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
+index d704c484a251c..7fdd2b61fe855 100644
+--- a/drivers/scsi/scsi_transport_sas.c
++++ b/drivers/scsi/scsi_transport_sas.c
+@@ -416,6 +416,29 @@ unsigned int sas_is_tlr_enabled(struct scsi_device *sdev)
+ }
+ EXPORT_SYMBOL_GPL(sas_is_tlr_enabled);
+ 
++/**
++ * sas_ata_ncq_prio_supported - Check for ATA NCQ command priority support
++ * @sdev: SCSI device
++ *
++ * Check if an ATA device supports NCQ priority using VPD page 89h (ATA
++ * Information). Since this VPD page is implemented only for ATA devices,
++ * this function always returns false for SCSI devices.
++ */
++bool sas_ata_ncq_prio_supported(struct scsi_device *sdev)
++{
++	struct scsi_vpd *vpd;
++	bool ncq_prio_supported = false;
++
++	rcu_read_lock();
++	vpd = rcu_dereference(sdev->vpd_pg89);
++	if (vpd && vpd->len >= 214)
++		ncq_prio_supported = (vpd->data[213] >> 4) & 1;
++	rcu_read_unlock();
++
++	return ncq_prio_supported;
++}
++EXPORT_SYMBOL_GPL(sas_ata_ncq_prio_supported);
++
+ /*
+  * SAS Phy attributes
+  */
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index caac482fff2ff..4f7b9b5b9c5b4 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -3572,16 +3572,23 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
+ 
+ static void sd_read_block_zero(struct scsi_disk *sdkp)
+ {
+-	unsigned int buf_len = sdkp->device->sector_size;
+-	char *buffer, cmd[10] = { };
++	struct scsi_device *sdev = sdkp->device;
++	unsigned int buf_len = sdev->sector_size;
++	u8 *buffer, cmd[16] = { };
+ 
+ 	buffer = kmalloc(buf_len, GFP_KERNEL);
+ 	if (!buffer)
+ 		return;
+ 
+-	cmd[0] = READ_10;
+-	put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */
+-	put_unaligned_be16(1, &cmd[7]);	/* Transfer 1 logical block */
++	if (sdev->use_16_for_rw) {
++		cmd[0] = READ_16;
++		put_unaligned_be64(0, &cmd[2]); /* Logical block address 0 */
++		put_unaligned_be32(1, &cmd[10]);/* Transfer 1 logical block */
++	} else {
++		cmd[0] = READ_10;
++		put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */
++		put_unaligned_be16(1, &cmd[7]);	/* Transfer 1 logical block */
++	}
+ 
+ 	scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN, buffer, buf_len,
+ 			 SD_TIMEOUT, sdkp->max_retries, NULL);
+diff --git a/drivers/spmi/hisi-spmi-controller.c b/drivers/spmi/hisi-spmi-controller.c
+index 674a350cc6769..fa068b34b040c 100644
+--- a/drivers/spmi/hisi-spmi-controller.c
++++ b/drivers/spmi/hisi-spmi-controller.c
+@@ -300,7 +300,6 @@ static int spmi_controller_probe(struct platform_device *pdev)
+ 
+ 	spin_lock_init(&spmi_controller->lock);
+ 
+-	ctrl->nr = spmi_controller->channel;
+ 	ctrl->dev.parent = pdev->dev.parent;
+ 	ctrl->dev.of_node = of_node_get(pdev->dev.of_node);
+ 
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index 38b7d02384d7c..258482036f1e9 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -936,9 +936,17 @@ __thermal_cooling_device_register(struct device_node *np,
+ 	if (ret)
+ 		goto out_cdev_type;
+ 
++	/*
++	 * The cooling device's current state is only needed for debug
++	 * initialization below, so a failure to get it does not cause
++	 * the entire cooling device initialization to fail.  However,
++	 * the debug will not work for the device if its initial state
++	 * cannot be determined and drivers are responsible for ensuring
++	 * that this will not happen.
++	 */
+ 	ret = cdev->ops->get_cur_state(cdev, &current_state);
+ 	if (ret)
+-		goto out_cdev_type;
++		current_state = ULONG_MAX;
+ 
+ 	thermal_cooling_device_setup_sysfs(cdev);
+ 
+@@ -953,7 +961,8 @@ __thermal_cooling_device_register(struct device_node *np,
+ 		return ERR_PTR(ret);
+ 	}
+ 
+-	thermal_debug_cdev_add(cdev, current_state);
++	if (current_state <= cdev->max_state)
++		thermal_debug_cdev_add(cdev, current_state);
+ 
+ 	/* Add 'this' new cdev to the global cdev list */
+ 	mutex_lock(&thermal_list_lock);
+diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c
+index e324cd8997193..0754fe76edde4 100644
+--- a/drivers/thunderbolt/debugfs.c
++++ b/drivers/thunderbolt/debugfs.c
+@@ -943,8 +943,9 @@ static void margining_port_init(struct tb_port *port)
+ 	debugfs_create_file("run", 0600, dir, port, &margining_run_fops);
+ 	debugfs_create_file("results", 0600, dir, port, &margining_results_fops);
+ 	debugfs_create_file("test", 0600, dir, port, &margining_test_fops);
+-	if (independent_voltage_margins(usb4) ||
+-	    (supports_time(usb4) && independent_time_margins(usb4)))
++	if (independent_voltage_margins(usb4) == USB4_MARGIN_CAP_0_VOLTAGE_HL ||
++	    (supports_time(usb4) &&
++	     independent_time_margins(usb4) == USB4_MARGIN_CAP_1_TIME_LR))
+ 		debugfs_create_file("margin", 0600, dir, port, &margining_margin_fops);
+ }
+ 
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index f252d0b5a434e..5e9ca4376d686 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -1619,15 +1619,25 @@ static void __receive_buf(struct tty_struct *tty, const u8 *cp, const u8 *fp,
+ 	else if (ldata->raw || (L_EXTPROC(tty) && !preops))
+ 		n_tty_receive_buf_raw(tty, cp, fp, count);
+ 	else if (tty->closing && !L_EXTPROC(tty)) {
+-		if (la_count > 0)
++		if (la_count > 0) {
+ 			n_tty_receive_buf_closing(tty, cp, fp, la_count, true);
+-		if (count > la_count)
+-			n_tty_receive_buf_closing(tty, cp, fp, count - la_count, false);
++			cp += la_count;
++			if (fp)
++				fp += la_count;
++			count -= la_count;
++		}
++		if (count > 0)
++			n_tty_receive_buf_closing(tty, cp, fp, count, false);
+ 	} else {
+-		if (la_count > 0)
++		if (la_count > 0) {
+ 			n_tty_receive_buf_standard(tty, cp, fp, la_count, true);
+-		if (count > la_count)
+-			n_tty_receive_buf_standard(tty, cp, fp, count - la_count, false);
++			cp += la_count;
++			if (fp)
++				fp += la_count;
++			count -= la_count;
++		}
++		if (count > 0)
++			n_tty_receive_buf_standard(tty, cp, fp, count, false);
+ 
+ 		flush_echoes(tty);
+ 		if (tty->ops->flush_chars)
+diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
+index 1300c92b8702a..94d680e4b5353 100644
+--- a/drivers/tty/serial/8250/8250_dw.c
++++ b/drivers/tty/serial/8250/8250_dw.c
+@@ -55,6 +55,7 @@
+ #define DW_UART_QUIRK_SKIP_SET_RATE	BIT(2)
+ #define DW_UART_QUIRK_IS_DMA_FC		BIT(3)
+ #define DW_UART_QUIRK_APMC0D08		BIT(4)
++#define DW_UART_QUIRK_CPR_VALUE		BIT(5)
+ 
+ static inline struct dw8250_data *clk_to_dw8250_data(struct notifier_block *nb)
+ {
+@@ -445,6 +446,10 @@ static void dw8250_prepare_rx_dma(struct uart_8250_port *p)
+ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
+ {
+ 	unsigned int quirks = data->pdata ? data->pdata->quirks : 0;
++	u32 cpr_value = data->pdata ? data->pdata->cpr_value : 0;
++
++	if (quirks & DW_UART_QUIRK_CPR_VALUE)
++		data->data.cpr_value = cpr_value;
+ 
+ #ifdef CONFIG_64BIT
+ 	if (quirks & DW_UART_QUIRK_OCTEON) {
+@@ -727,8 +732,8 @@ static const struct dw8250_platform_data dw8250_armada_38x_data = {
+ 
+ static const struct dw8250_platform_data dw8250_renesas_rzn1_data = {
+ 	.usr_reg = DW_UART_USR,
+-	.cpr_val = 0x00012f32,
+-	.quirks = DW_UART_QUIRK_IS_DMA_FC,
++	.cpr_value = 0x00012f32,
++	.quirks = DW_UART_QUIRK_CPR_VALUE | DW_UART_QUIRK_IS_DMA_FC,
+ };
+ 
+ static const struct dw8250_platform_data dw8250_starfive_jh7100_data = {
+diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c
+index 3e33ddf7bc800..5a2520943dfd5 100644
+--- a/drivers/tty/serial/8250/8250_dwlib.c
++++ b/drivers/tty/serial/8250/8250_dwlib.c
+@@ -242,7 +242,6 @@ static const struct serial_rs485 dw8250_rs485_supported = {
+ void dw8250_setup_port(struct uart_port *p)
+ {
+ 	struct dw8250_port_data *pd = p->private_data;
+-	struct dw8250_data *data = to_dw8250_data(pd);
+ 	struct uart_8250_port *up = up_to_u8250p(p);
+ 	u32 reg, old_dlf;
+ 
+@@ -278,7 +277,7 @@ void dw8250_setup_port(struct uart_port *p)
+ 
+ 	reg = dw8250_readl_ext(p, DW_UART_CPR);
+ 	if (!reg) {
+-		reg = data->pdata->cpr_val;
++		reg = pd->cpr_value;
+ 		dev_dbg(p->dev, "CPR is not available, using 0x%08x instead\n", reg);
+ 	}
+ 	if (!reg)
+diff --git a/drivers/tty/serial/8250/8250_dwlib.h b/drivers/tty/serial/8250/8250_dwlib.h
+index f13e91f2cace9..794a9014cdac1 100644
+--- a/drivers/tty/serial/8250/8250_dwlib.h
++++ b/drivers/tty/serial/8250/8250_dwlib.h
+@@ -19,6 +19,7 @@ struct dw8250_port_data {
+ 	struct uart_8250_dma	dma;
+ 
+ 	/* Hardware configuration */
++	u32			cpr_value;
+ 	u8			dlf_size;
+ 
+ 	/* RS485 variables */
+@@ -27,7 +28,7 @@ struct dw8250_port_data {
+ 
+ struct dw8250_platform_data {
+ 	u8 usr_reg;
+-	u32 cpr_val;
++	u32 cpr_value;
+ 	unsigned int quirks;
+ };
+ 
+diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c
+index f1a51b00b1b9d..ba96fa913e7f9 100644
+--- a/drivers/tty/serial/8250/8250_pxa.c
++++ b/drivers/tty/serial/8250/8250_pxa.c
+@@ -125,6 +125,7 @@ static int serial_pxa_probe(struct platform_device *pdev)
+ 	uart.port.iotype = UPIO_MEM32;
+ 	uart.port.regshift = 2;
+ 	uart.port.fifosize = 64;
++	uart.tx_loadsz = 32;
+ 	uart.dl_write = serial_pxa_dl_write;
+ 
+ 	ret = serial8250_register_8250_port(&uart);
+diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c
+index 7e3a1c7b097c3..66fd117d8aea8 100644
+--- a/drivers/tty/serial/serial_port.c
++++ b/drivers/tty/serial/serial_port.c
+@@ -63,6 +63,13 @@ static int serial_port_runtime_suspend(struct device *dev)
+ 	if (port->flags & UPF_DEAD)
+ 		return 0;
+ 
++	/*
++	 * Nothing to do on pm_runtime_force_suspend(), see
++	 * DEFINE_RUNTIME_DEV_PM_OPS.
++	 */
++	if (!pm_runtime_enabled(dev))
++		return 0;
++
+ 	uart_port_lock_irqsave(port, &flags);
+ 	if (!port_dev->tx_enabled) {
+ 		uart_port_unlock_irqrestore(port, flags);
+diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
+index 005d63ab1f441..8944548c30fa1 100644
+--- a/drivers/ufs/core/ufs-mcq.c
++++ b/drivers/ufs/core/ufs-mcq.c
+@@ -634,20 +634,20 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
+ 	struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ 	struct ufs_hw_queue *hwq;
+ 	unsigned long flags;
+-	int err = FAILED;
++	int err;
+ 
+ 	if (!ufshcd_cmd_inflight(lrbp->cmd)) {
+ 		dev_err(hba->dev,
+ 			"%s: skip abort. cmd at tag %d already completed.\n",
+ 			__func__, tag);
+-		goto out;
++		return FAILED;
+ 	}
+ 
+ 	/* Skip task abort in case previous aborts failed and report failure */
+ 	if (lrbp->req_abort_skip) {
+ 		dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
+ 			__func__, tag);
+-		goto out;
++		return FAILED;
+ 	}
+ 
+ 	hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
+@@ -659,7 +659,7 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
+ 		 */
+ 		dev_err(hba->dev, "%s: cmd found in sq. hwq=%d, tag=%d\n",
+ 			__func__, hwq->id, tag);
+-		goto out;
++		return FAILED;
+ 	}
+ 
+ 	/*
+@@ -667,18 +667,17 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
+ 	 * in the completion queue either. Query the device to see if
+ 	 * the command is being processed in the device.
+ 	 */
+-	if (ufshcd_try_to_abort_task(hba, tag)) {
++	err = ufshcd_try_to_abort_task(hba, tag);
++	if (err) {
+ 		dev_err(hba->dev, "%s: device abort failed %d\n", __func__, err);
+ 		lrbp->req_abort_skip = true;
+-		goto out;
++		return FAILED;
+ 	}
+ 
+-	err = SUCCESS;
+ 	spin_lock_irqsave(&hwq->cq_lock, flags);
+ 	if (ufshcd_cmd_inflight(lrbp->cmd))
+ 		ufshcd_release_scsi_cmd(hba, lrbp);
+ 	spin_unlock_irqrestore(&hwq->cq_lock, flags);
+ 
+-out:
+-	return err;
++	return SUCCESS;
+ }
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 1322a9c318cff..ce1abd5d725ad 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -1392,7 +1392,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
+ 	 * make sure that there are no outstanding requests when
+ 	 * clock scaling is in progress
+ 	 */
+-	ufshcd_scsi_block_requests(hba);
++	blk_mq_quiesce_tagset(&hba->host->tag_set);
+ 	mutex_lock(&hba->wb_mutex);
+ 	down_write(&hba->clk_scaling_lock);
+ 
+@@ -1401,7 +1401,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
+ 		ret = -EBUSY;
+ 		up_write(&hba->clk_scaling_lock);
+ 		mutex_unlock(&hba->wb_mutex);
+-		ufshcd_scsi_unblock_requests(hba);
++		blk_mq_unquiesce_tagset(&hba->host->tag_set);
+ 		goto out;
+ 	}
+ 
+@@ -1422,7 +1422,7 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool sc
+ 
+ 	mutex_unlock(&hba->wb_mutex);
+ 
+-	ufshcd_scsi_unblock_requests(hba);
++	blk_mq_unquiesce_tagset(&hba->host->tag_set);
+ 	ufshcd_release(hba);
+ }
+ 
+diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
+index 3a9a0dd4be706..949eca0adebea 100644
+--- a/drivers/usb/Makefile
++++ b/drivers/usb/Makefile
+@@ -35,6 +35,7 @@ obj-$(CONFIG_USB_R8A66597_HCD)	+= host/
+ obj-$(CONFIG_USB_FSL_USB2)	+= host/
+ obj-$(CONFIG_USB_FOTG210_HCD)	+= host/
+ obj-$(CONFIG_USB_MAX3421_HCD)	+= host/
++obj-$(CONFIG_USB_XEN_HCD)	+= host/
+ 
+ obj-$(CONFIG_USB_C67X00_HCD)	+= c67x00/
+ 
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index c553decb54610..6830be4419e20 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -266,14 +266,14 @@ static void wdm_int_callback(struct urb *urb)
+ 			dev_err(&desc->intf->dev, "Stall on int endpoint\n");
+ 			goto sw; /* halt is cleared in work */
+ 		default:
+-			dev_err(&desc->intf->dev,
++			dev_err_ratelimited(&desc->intf->dev,
+ 				"nonzero urb status received: %d\n", status);
+ 			break;
+ 		}
+ 	}
+ 
+ 	if (urb->actual_length < sizeof(struct usb_cdc_notification)) {
+-		dev_err(&desc->intf->dev, "wdm_int_callback - %d bytes\n",
++		dev_err_ratelimited(&desc->intf->dev, "wdm_int_callback - %d bytes\n",
+ 			urb->actual_length);
+ 		goto exit;
+ 	}
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index c0e005670d67d..fb1aa0d4fc285 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -1623,6 +1623,7 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
+ 	struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
+ 	struct usb_anchor *anchor = urb->anchor;
+ 	int status = urb->unlinked;
++	unsigned long flags;
+ 
+ 	urb->hcpriv = NULL;
+ 	if (unlikely((urb->transfer_flags & URB_SHORT_NOT_OK) &&
+@@ -1640,13 +1641,14 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
+ 	/* pass ownership to the completion handler */
+ 	urb->status = status;
+ 	/*
+-	 * This function can be called in task context inside another remote
+-	 * coverage collection section, but kcov doesn't support that kind of
+-	 * recursion yet. Only collect coverage in softirq context for now.
++	 * Only collect coverage in the softirq context and disable interrupts
++	 * to avoid scenarios with nested remote coverage collection sections
++	 * that KCOV does not support.
++	 * See the comment next to kcov_remote_start_usb_softirq() for details.
+ 	 */
+-	kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum);
++	flags = kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum);
+ 	urb->complete(urb);
+-	kcov_remote_stop_softirq();
++	kcov_remote_stop_softirq(flags);
+ 
+ 	usb_anchor_resume_wakeups(anchor);
+ 	atomic_dec(&urb->use_count);
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 93b6976480188..b271ec916926b 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -36,6 +36,7 @@
+ 
+ #define PCI_VENDOR_ID_ETRON		0x1b6f
+ #define PCI_DEVICE_ID_EJ168		0x7023
++#define PCI_DEVICE_ID_EJ188		0x7052
+ 
+ #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI	0x8c31
+ #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI	0x9c31
+@@ -402,6 +403,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ 		xhci->quirks |= XHCI_BROKEN_STREAMS;
+ 	}
++	if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
++			pdev->device == PCI_DEVICE_ID_EJ188) {
++		xhci->quirks |= XHCI_RESET_ON_RESUME;
++		xhci->quirks |= XHCI_BROKEN_STREAMS;
++	}
++
+ 	if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+ 	    pdev->device == 0x0014) {
+ 		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 575f0fd9c9f11..850846c206ed4 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1034,13 +1034,27 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
+ 				break;
+ 			case TD_DIRTY: /* TD is cached, clear it */
+ 			case TD_HALTED:
++			case TD_CLEARING_CACHE_DEFERRED:
++				if (cached_td) {
++					if (cached_td->urb->stream_id != td->urb->stream_id) {
++						/* Multiple streams case, defer move dq */
++						xhci_dbg(xhci,
++							 "Move dq deferred: stream %u URB %p\n",
++							 td->urb->stream_id, td->urb);
++						td->cancel_status = TD_CLEARING_CACHE_DEFERRED;
++						break;
++					}
++
++					/* Should never happen, but clear the TD if it does */
++					xhci_warn(xhci,
++						  "Found multiple active URBs %p and %p in stream %u?\n",
++						  td->urb, cached_td->urb,
++						  td->urb->stream_id);
++					td_to_noop(xhci, ring, cached_td, false);
++					cached_td->cancel_status = TD_CLEARED;
++				}
++
+ 				td->cancel_status = TD_CLEARING_CACHE;
+-				if (cached_td)
+-					/* FIXME  stream case, several stopped rings */
+-					xhci_dbg(xhci,
+-						 "Move dq past stream %u URB %p instead of stream %u URB %p\n",
+-						 td->urb->stream_id, td->urb,
+-						 cached_td->urb->stream_id, cached_td->urb);
+ 				cached_td = td;
+ 				break;
+ 			}
+@@ -1060,10 +1074,16 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
+ 	if (err) {
+ 		/* Failed to move past cached td, just set cached TDs to no-op */
+ 		list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
+-			if (td->cancel_status != TD_CLEARING_CACHE)
++			/*
++			 * Deferred TDs need to have the deq pointer set after the above command
++			 * completes, so if that failed we just give up on all of them (and
++			 * complain loudly since this could cause issues due to caching).
++			 */
++			if (td->cancel_status != TD_CLEARING_CACHE &&
++			    td->cancel_status != TD_CLEARING_CACHE_DEFERRED)
+ 				continue;
+-			xhci_dbg(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
+-				 td->urb);
++			xhci_warn(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
++				  td->urb);
+ 			td_to_noop(xhci, ring, td, false);
+ 			td->cancel_status = TD_CLEARED;
+ 		}
+@@ -1350,6 +1370,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
+ 	struct xhci_ep_ctx *ep_ctx;
+ 	struct xhci_slot_ctx *slot_ctx;
+ 	struct xhci_td *td, *tmp_td;
++	bool deferred = false;
+ 
+ 	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
+ 	stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
+@@ -1436,6 +1457,8 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
+ 			xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
+ 				 __func__, td->urb);
+ 			xhci_td_cleanup(ep->xhci, td, ep_ring, td->status);
++		} else if (td->cancel_status == TD_CLEARING_CACHE_DEFERRED) {
++			deferred = true;
+ 		} else {
+ 			xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
+ 				 __func__, td->urb, td->cancel_status);
+@@ -1445,8 +1468,17 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
+ 	ep->ep_state &= ~SET_DEQ_PENDING;
+ 	ep->queued_deq_seg = NULL;
+ 	ep->queued_deq_ptr = NULL;
+-	/* Restart any rings with pending URBs */
+-	ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
++
++	if (deferred) {
++		/* We have more streams to clear */
++		xhci_dbg(ep->xhci, "%s: Pending TDs to clear, continuing with invalidation\n",
++			 __func__);
++		xhci_invalidate_cancelled_tds(ep);
++	} else {
++		/* Restart any rings with pending URBs */
++		xhci_dbg(ep->xhci, "%s: All TDs cleared, ring doorbell\n", __func__);
++		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
++	}
+ }
+ 
+ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
+@@ -2535,9 +2567,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+ 		goto finish_td;
+ 	case COMP_STOPPED_LENGTH_INVALID:
+ 		/* stopped on ep trb with invalid length, exclude it */
+-		ep_trb_len	= 0;
+-		remaining	= 0;
+-		break;
++		td->urb->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb);
++		goto finish_td;
+ 	case COMP_USB_TRANSACTION_ERROR:
+ 		if (xhci->quirks & XHCI_NO_SOFT_RETRY ||
+ 		    (ep->err_count++ > MAX_SOFT_RETRY) ||
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 31566e82bbd39..069a187540a0c 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1276,6 +1276,7 @@ enum xhci_cancelled_td_status {
+ 	TD_DIRTY = 0,
+ 	TD_HALTED,
+ 	TD_CLEARING_CACHE,
++	TD_CLEARING_CACHE_DEFERRED,
+ 	TD_CLEARED,
+ };
+ 
+diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
+index 115f05a6201a1..40d34cc28344a 100644
+--- a/drivers/usb/storage/alauda.c
++++ b/drivers/usb/storage/alauda.c
+@@ -105,6 +105,8 @@ struct alauda_info {
+ 	unsigned char sense_key;
+ 	unsigned long sense_asc;	/* additional sense code */
+ 	unsigned long sense_ascq;	/* additional sense code qualifier */
++
++	bool media_initialized;
+ };
+ 
+ #define short_pack(lsb,msb) ( ((u16)(lsb)) | ( ((u16)(msb))<<8 ) )
+@@ -476,11 +478,12 @@ static int alauda_check_media(struct us_data *us)
+ 	}
+ 
+ 	/* Check for media change */
+-	if (status[0] & 0x08) {
++	if (status[0] & 0x08 || !info->media_initialized) {
+ 		usb_stor_dbg(us, "Media change detected\n");
+ 		alauda_free_maps(&MEDIA_INFO(us));
+-		alauda_init_media(us);
+-
++		rc = alauda_init_media(us);
++		if (rc == USB_STOR_TRANSPORT_GOOD)
++			info->media_initialized = true;
+ 		info->sense_key = UNIT_ATTENTION;
+ 		info->sense_asc = 0x28;
+ 		info->sense_ascq = 0x00;
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index 8a1af08f71b64..5d4da962acc87 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -3014,8 +3014,10 @@ static int tcpm_register_source_caps(struct tcpm_port *port)
+ 	memcpy(caps.pdo, port->source_caps, sizeof(u32) * port->nr_source_caps);
+ 	caps.role = TYPEC_SOURCE;
+ 
+-	if (cap)
++	if (cap) {
+ 		usb_power_delivery_unregister_capabilities(cap);
++		port->partner_source_caps = NULL;
++	}
+ 
+ 	cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
+ 	if (IS_ERR(cap))
+@@ -6172,6 +6174,7 @@ static void _tcpm_pd_hard_reset(struct tcpm_port *port)
+ 		port->tcpc->set_bist_data(port->tcpc, false);
+ 
+ 	switch (port->state) {
++	case TOGGLING:
+ 	case ERROR_RECOVERY:
+ 	case PORT_RESET:
+ 	case PORT_RESET_WAIT_OFF:
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index 4cba80b34387c..459514da16601 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -1290,7 +1290,7 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
+ 				struct btrfs_chunk_map *map)
+ {
+ 	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
+-	struct btrfs_device *device = map->stripes[zone_idx].dev;
++	struct btrfs_device *device;
+ 	int dev_replace_is_ongoing = 0;
+ 	unsigned int nofs_flag;
+ 	struct blk_zone zone;
+@@ -1298,7 +1298,11 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
+ 
+ 	info->physical = map->stripes[zone_idx].physical;
+ 
++	down_read(&dev_replace->rwsem);
++	device = map->stripes[zone_idx].dev;
++
+ 	if (!device->bdev) {
++		up_read(&dev_replace->rwsem);
+ 		info->alloc_offset = WP_MISSING_DEV;
+ 		return 0;
+ 	}
+@@ -1308,6 +1312,7 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
+ 		__set_bit(zone_idx, active);
+ 
+ 	if (!btrfs_dev_is_sequential(device, info->physical)) {
++		up_read(&dev_replace->rwsem);
+ 		info->alloc_offset = WP_CONVENTIONAL;
+ 		return 0;
+ 	}
+@@ -1315,11 +1320,9 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
+ 	/* This zone will be used for allocation, so mark this zone non-empty. */
+ 	btrfs_dev_clear_zone_empty(device, info->physical);
+ 
+-	down_read(&dev_replace->rwsem);
+ 	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
+ 	if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
+ 		btrfs_dev_clear_zone_empty(dev_replace->tgtdev, info->physical);
+-	up_read(&dev_replace->rwsem);
+ 
+ 	/*
+ 	 * The group is mapped to a sequential zone. Get the zone write pointer
+@@ -1330,6 +1333,7 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
+ 	ret = btrfs_get_dev_zone(device, info->physical, &zone);
+ 	memalloc_nofs_restore(nofs_flag);
+ 	if (ret) {
++		up_read(&dev_replace->rwsem);
+ 		if (ret != -EIO && ret != -EOPNOTSUPP)
+ 			return ret;
+ 		info->alloc_offset = WP_MISSING_DEV;
+@@ -1341,6 +1345,7 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
+ 		"zoned: unexpected conventional zone %llu on device %s (devid %llu)",
+ 			zone.start << SECTOR_SHIFT, rcu_str_deref(device->name),
+ 			device->devid);
++		up_read(&dev_replace->rwsem);
+ 		return -EIO;
+ 	}
+ 
+@@ -1368,6 +1373,8 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
+ 		break;
+ 	}
+ 
++	up_read(&dev_replace->rwsem);
++
+ 	return 0;
+ }
+ 
+diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
+index 6465e25742309..06cdf1a8a16f6 100644
+--- a/fs/cachefiles/daemon.c
++++ b/fs/cachefiles/daemon.c
+@@ -133,7 +133,7 @@ static int cachefiles_daemon_open(struct inode *inode, struct file *file)
+ 	return 0;
+ }
+ 
+-static void cachefiles_flush_reqs(struct cachefiles_cache *cache)
++void cachefiles_flush_reqs(struct cachefiles_cache *cache)
+ {
+ 	struct xarray *xa = &cache->reqs;
+ 	struct cachefiles_req *req;
+@@ -159,6 +159,7 @@ static void cachefiles_flush_reqs(struct cachefiles_cache *cache)
+ 	xa_for_each(xa, index, req) {
+ 		req->error = -EIO;
+ 		complete(&req->done);
++		__xa_erase(xa, index);
+ 	}
+ 	xa_unlock(xa);
+ 
+diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
+index d33169f0018b1..6845a90cdfcce 100644
+--- a/fs/cachefiles/internal.h
++++ b/fs/cachefiles/internal.h
+@@ -55,6 +55,7 @@ struct cachefiles_ondemand_info {
+ 	int				ondemand_id;
+ 	enum cachefiles_object_state	state;
+ 	struct cachefiles_object	*object;
++	spinlock_t			lock;
+ };
+ 
+ /*
+@@ -138,6 +139,7 @@ static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache)
+ struct cachefiles_req {
+ 	struct cachefiles_object *object;
+ 	struct completion done;
++	refcount_t ref;
+ 	int error;
+ 	struct cachefiles_msg msg;
+ };
+@@ -186,6 +188,7 @@ extern int cachefiles_has_space(struct cachefiles_cache *cache,
+  * daemon.c
+  */
+ extern const struct file_operations cachefiles_daemon_fops;
++extern void cachefiles_flush_reqs(struct cachefiles_cache *cache);
+ extern void cachefiles_get_unbind_pincount(struct cachefiles_cache *cache);
+ extern void cachefiles_put_unbind_pincount(struct cachefiles_cache *cache);
+ 
+@@ -424,6 +427,8 @@ do {							\
+ 	pr_err("I/O Error: " FMT"\n", ##__VA_ARGS__);	\
+ 	fscache_io_error((___cache)->cache);		\
+ 	set_bit(CACHEFILES_DEAD, &(___cache)->flags);	\
++	if (cachefiles_in_ondemand_mode(___cache))	\
++		cachefiles_flush_reqs(___cache);	\
+ } while (0)
+ 
+ #define cachefiles_io_error_obj(object, FMT, ...)			\
+diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
+index 4ba42f1fa3b40..89f118d68d125 100644
+--- a/fs/cachefiles/ondemand.c
++++ b/fs/cachefiles/ondemand.c
+@@ -4,19 +4,40 @@
+ #include <linux/uio.h>
+ #include "internal.h"
+ 
++struct ondemand_anon_file {
++	struct file *file;
++	int fd;
++};
++
++static inline void cachefiles_req_put(struct cachefiles_req *req)
++{
++	if (refcount_dec_and_test(&req->ref))
++		kfree(req);
++}
++
+ static int cachefiles_ondemand_fd_release(struct inode *inode,
+ 					  struct file *file)
+ {
+ 	struct cachefiles_object *object = file->private_data;
+-	struct cachefiles_cache *cache = object->volume->cache;
+-	struct cachefiles_ondemand_info *info = object->ondemand;
+-	int object_id = info->ondemand_id;
++	struct cachefiles_cache *cache;
++	struct cachefiles_ondemand_info *info;
++	int object_id;
+ 	struct cachefiles_req *req;
+-	XA_STATE(xas, &cache->reqs, 0);
++	XA_STATE(xas, NULL, 0);
++
++	if (!object)
++		return 0;
++
++	info = object->ondemand;
++	cache = object->volume->cache;
++	xas.xa = &cache->reqs;
+ 
+ 	xa_lock(&cache->reqs);
++	spin_lock(&info->lock);
++	object_id = info->ondemand_id;
+ 	info->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED;
+ 	cachefiles_ondemand_set_object_close(object);
++	spin_unlock(&info->lock);
+ 
+ 	/* Only flush CACHEFILES_REQ_NEW marked req to avoid race with daemon_read */
+ 	xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
+@@ -116,6 +137,7 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
+ {
+ 	struct cachefiles_req *req;
+ 	struct fscache_cookie *cookie;
++	struct cachefiles_ondemand_info *info;
+ 	char *pid, *psize;
+ 	unsigned long id;
+ 	long size;
+@@ -166,6 +188,33 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
+ 		goto out;
+ 	}
+ 
++	info = req->object->ondemand;
++	spin_lock(&info->lock);
++	/*
++	 * The anonymous fd was closed before copen ? Fail the request.
++	 *
++	 *             t1             |             t2
++	 * ---------------------------------------------------------
++	 *                             cachefiles_ondemand_copen
++	 *                             req = xa_erase(&cache->reqs, id)
++	 * // Anon fd is maliciously closed.
++	 * cachefiles_ondemand_fd_release
++	 * xa_lock(&cache->reqs)
++	 * cachefiles_ondemand_set_object_close(object)
++	 * xa_unlock(&cache->reqs)
++	 *                             cachefiles_ondemand_set_object_open
++	 *                             // No one will ever close it again.
++	 * cachefiles_ondemand_daemon_read
++	 * cachefiles_ondemand_select_req
++	 *
++	 * Get a read req but its fd is already closed. The daemon can't
++	 * issue a cread ioctl with an closed fd, then hung.
++	 */
++	if (info->ondemand_id == CACHEFILES_ONDEMAND_ID_CLOSED) {
++		spin_unlock(&info->lock);
++		req->error = -EBADFD;
++		goto out;
++	}
+ 	cookie = req->object->cookie;
+ 	cookie->object_size = size;
+ 	if (size)
+@@ -175,6 +224,7 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
+ 	trace_cachefiles_ondemand_copen(req->object, id, size);
+ 
+ 	cachefiles_ondemand_set_object_open(req->object);
++	spin_unlock(&info->lock);
+ 	wake_up_all(&cache->daemon_pollwq);
+ 
+ out:
+@@ -205,14 +255,14 @@ int cachefiles_ondemand_restore(struct cachefiles_cache *cache, char *args)
+ 	return 0;
+ }
+ 
+-static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
++static int cachefiles_ondemand_get_fd(struct cachefiles_req *req,
++				      struct ondemand_anon_file *anon_file)
+ {
+ 	struct cachefiles_object *object;
+ 	struct cachefiles_cache *cache;
+ 	struct cachefiles_open *load;
+-	struct file *file;
+ 	u32 object_id;
+-	int ret, fd;
++	int ret;
+ 
+ 	object = cachefiles_grab_object(req->object,
+ 			cachefiles_obj_get_ondemand_fd);
+@@ -224,35 +274,53 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
+ 	if (ret < 0)
+ 		goto err;
+ 
+-	fd = get_unused_fd_flags(O_WRONLY);
+-	if (fd < 0) {
+-		ret = fd;
++	anon_file->fd = get_unused_fd_flags(O_WRONLY);
++	if (anon_file->fd < 0) {
++		ret = anon_file->fd;
+ 		goto err_free_id;
+ 	}
+ 
+-	file = anon_inode_getfile("[cachefiles]", &cachefiles_ondemand_fd_fops,
+-				  object, O_WRONLY);
+-	if (IS_ERR(file)) {
+-		ret = PTR_ERR(file);
++	anon_file->file = anon_inode_getfile("[cachefiles]",
++				&cachefiles_ondemand_fd_fops, object, O_WRONLY);
++	if (IS_ERR(anon_file->file)) {
++		ret = PTR_ERR(anon_file->file);
+ 		goto err_put_fd;
+ 	}
+ 
+-	file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
+-	fd_install(fd, file);
++	spin_lock(&object->ondemand->lock);
++	if (object->ondemand->ondemand_id > 0) {
++		spin_unlock(&object->ondemand->lock);
++		/* Pair with check in cachefiles_ondemand_fd_release(). */
++		anon_file->file->private_data = NULL;
++		ret = -EEXIST;
++		goto err_put_file;
++	}
++
++	anon_file->file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
+ 
+ 	load = (void *)req->msg.data;
+-	load->fd = fd;
++	load->fd = anon_file->fd;
+ 	object->ondemand->ondemand_id = object_id;
++	spin_unlock(&object->ondemand->lock);
+ 
+ 	cachefiles_get_unbind_pincount(cache);
+ 	trace_cachefiles_ondemand_open(object, &req->msg, load);
+ 	return 0;
+ 
++err_put_file:
++	fput(anon_file->file);
++	anon_file->file = NULL;
+ err_put_fd:
+-	put_unused_fd(fd);
++	put_unused_fd(anon_file->fd);
++	anon_file->fd = ret;
+ err_free_id:
+ 	xa_erase(&cache->ondemand_ids, object_id);
+ err:
++	spin_lock(&object->ondemand->lock);
++	/* Avoid marking an opened object as closed. */
++	if (object->ondemand->ondemand_id <= 0)
++		cachefiles_ondemand_set_object_close(object);
++	spin_unlock(&object->ondemand->lock);
+ 	cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
+ 	return ret;
+ }
+@@ -299,9 +367,9 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
+ {
+ 	struct cachefiles_req *req;
+ 	struct cachefiles_msg *msg;
+-	unsigned long id = 0;
+ 	size_t n;
+ 	int ret = 0;
++	struct ondemand_anon_file anon_file;
+ 	XA_STATE(xas, &cache->reqs, cache->req_id_next);
+ 
+ 	xa_lock(&cache->reqs);
+@@ -330,42 +398,45 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
+ 
+ 	xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
+ 	cache->req_id_next = xas.xa_index + 1;
++	refcount_inc(&req->ref);
++	cachefiles_grab_object(req->object, cachefiles_obj_get_read_req);
+ 	xa_unlock(&cache->reqs);
+ 
+-	id = xas.xa_index;
+-
+ 	if (msg->opcode == CACHEFILES_OP_OPEN) {
+-		ret = cachefiles_ondemand_get_fd(req);
+-		if (ret) {
+-			cachefiles_ondemand_set_object_close(req->object);
+-			goto error;
+-		}
++		ret = cachefiles_ondemand_get_fd(req, &anon_file);
++		if (ret)
++			goto out;
+ 	}
+ 
+-	msg->msg_id = id;
++	msg->msg_id = xas.xa_index;
+ 	msg->object_id = req->object->ondemand->ondemand_id;
+ 
+-	if (copy_to_user(_buffer, msg, n) != 0) {
++	if (copy_to_user(_buffer, msg, n) != 0)
+ 		ret = -EFAULT;
+-		goto err_put_fd;
+-	}
+ 
+-	/* CLOSE request has no reply */
+-	if (msg->opcode == CACHEFILES_OP_CLOSE) {
+-		xa_erase(&cache->reqs, id);
+-		complete(&req->done);
++	if (msg->opcode == CACHEFILES_OP_OPEN) {
++		if (ret < 0) {
++			fput(anon_file.file);
++			put_unused_fd(anon_file.fd);
++			goto out;
++		}
++		fd_install(anon_file.fd, anon_file.file);
+ 	}
+-
+-	return n;
+-
+-err_put_fd:
+-	if (msg->opcode == CACHEFILES_OP_OPEN)
+-		close_fd(((struct cachefiles_open *)msg->data)->fd);
+-error:
+-	xa_erase(&cache->reqs, id);
+-	req->error = ret;
+-	complete(&req->done);
+-	return ret;
++out:
++	cachefiles_put_object(req->object, cachefiles_obj_put_read_req);
++	/* Remove error request and CLOSE request has no reply */
++	if (ret || msg->opcode == CACHEFILES_OP_CLOSE) {
++		xas_reset(&xas);
++		xas_lock(&xas);
++		if (xas_load(&xas) == req) {
++			req->error = ret;
++			complete(&req->done);
++			xas_store(&xas, NULL);
++		}
++		xas_unlock(&xas);
++	}
++	cachefiles_req_put(req);
++	return ret ? ret : n;
+ }
+ 
+ typedef int (*init_req_fn)(struct cachefiles_req *req, void *private);
+@@ -395,6 +466,7 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
+ 		goto out;
+ 	}
+ 
++	refcount_set(&req->ref, 1);
+ 	req->object = object;
+ 	init_completion(&req->done);
+ 	req->msg.opcode = opcode;
+@@ -456,7 +528,7 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
+ 	wake_up_all(&cache->daemon_pollwq);
+ 	wait_for_completion(&req->done);
+ 	ret = req->error;
+-	kfree(req);
++	cachefiles_req_put(req);
+ 	return ret;
+ out:
+ 	/* Reset the object to close state in error handling path.
+@@ -578,6 +650,7 @@ int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object,
+ 		return -ENOMEM;
+ 
+ 	object->ondemand->object = object;
++	spin_lock_init(&object->ondemand->lock);
+ 	INIT_WORK(&object->ondemand->ondemand_work, ondemand_object_worker);
+ 	return 0;
+ }
+diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
+index 0fb7afac298e1..9987055293b35 100644
+--- a/fs/jfs/xattr.c
++++ b/fs/jfs/xattr.c
+@@ -557,9 +557,11 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
+ 
+       size_check:
+ 	if (EALIST_SIZE(ea_buf->xattr) != ea_size) {
++		int size = min_t(int, EALIST_SIZE(ea_buf->xattr), ea_size);
++
+ 		printk(KERN_ERR "ea_get: invalid extended attribute\n");
+ 		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1,
+-				     ea_buf->xattr, ea_size, 1);
++				     ea_buf->xattr, size, 1);
+ 		ea_release(inode, ea_buf);
+ 		rc = -EIO;
+ 		goto clean_up;
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index ac505671efbdb..bdd6cb33a3708 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1802,9 +1802,10 @@ __nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags,
+ 		if (parent != READ_ONCE(dentry->d_parent))
+ 			return -ECHILD;
+ 	} else {
+-		/* Wait for unlink to complete */
++		/* Wait for unlink to complete - see unblock_revalidate() */
+ 		wait_var_event(&dentry->d_fsdata,
+-			       dentry->d_fsdata != NFS_FSDATA_BLOCKED);
++			       smp_load_acquire(&dentry->d_fsdata)
++			       != NFS_FSDATA_BLOCKED);
+ 		parent = dget_parent(dentry);
+ 		ret = reval(d_inode(parent), dentry, flags);
+ 		dput(parent);
+@@ -1817,6 +1818,29 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+ 	return __nfs_lookup_revalidate(dentry, flags, nfs_do_lookup_revalidate);
+ }
+ 
++static void block_revalidate(struct dentry *dentry)
++{
++	/* old devname - just in case */
++	kfree(dentry->d_fsdata);
++
++	/* Any new reference that could lead to an open
++	 * will take ->d_lock in lookup_open() -> d_lookup().
++	 * Holding this lock ensures we cannot race with
++	 * __nfs_lookup_revalidate() and removes and need
++	 * for further barriers.
++	 */
++	lockdep_assert_held(&dentry->d_lock);
++
++	dentry->d_fsdata = NFS_FSDATA_BLOCKED;
++}
++
++static void unblock_revalidate(struct dentry *dentry)
++{
++	/* store_release ensures wait_var_event() sees the update */
++	smp_store_release(&dentry->d_fsdata, NULL);
++	wake_up_var(&dentry->d_fsdata);
++}
++
+ /*
+  * A weaker form of d_revalidate for revalidating just the d_inode(dentry)
+  * when we don't really care about the dentry name. This is called when a
+@@ -2501,15 +2525,12 @@ int nfs_unlink(struct inode *dir, struct dentry *dentry)
+ 		spin_unlock(&dentry->d_lock);
+ 		goto out;
+ 	}
+-	/* old devname */
+-	kfree(dentry->d_fsdata);
+-	dentry->d_fsdata = NFS_FSDATA_BLOCKED;
++	block_revalidate(dentry);
+ 
+ 	spin_unlock(&dentry->d_lock);
+ 	error = nfs_safe_remove(dentry);
+ 	nfs_dentry_remove_handle_error(dir, dentry, error);
+-	dentry->d_fsdata = NULL;
+-	wake_up_var(&dentry->d_fsdata);
++	unblock_revalidate(dentry);
+ out:
+ 	trace_nfs_unlink_exit(dir, dentry, error);
+ 	return error;
+@@ -2616,8 +2637,7 @@ nfs_unblock_rename(struct rpc_task *task, struct nfs_renamedata *data)
+ {
+ 	struct dentry *new_dentry = data->new_dentry;
+ 
+-	new_dentry->d_fsdata = NULL;
+-	wake_up_var(&new_dentry->d_fsdata);
++	unblock_revalidate(new_dentry);
+ }
+ 
+ /*
+@@ -2679,11 +2699,6 @@ int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
+ 		if (WARN_ON(new_dentry->d_flags & DCACHE_NFSFS_RENAMED) ||
+ 		    WARN_ON(new_dentry->d_fsdata == NFS_FSDATA_BLOCKED))
+ 			goto out;
+-		if (new_dentry->d_fsdata) {
+-			/* old devname */
+-			kfree(new_dentry->d_fsdata);
+-			new_dentry->d_fsdata = NULL;
+-		}
+ 
+ 		spin_lock(&new_dentry->d_lock);
+ 		if (d_count(new_dentry) > 2) {
+@@ -2705,7 +2720,7 @@ int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
+ 			new_dentry = dentry;
+ 			new_inode = NULL;
+ 		} else {
+-			new_dentry->d_fsdata = NFS_FSDATA_BLOCKED;
++			block_revalidate(new_dentry);
+ 			must_unblock = true;
+ 			spin_unlock(&new_dentry->d_lock);
+ 		}
+@@ -2717,6 +2732,8 @@ int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
+ 	task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry,
+ 				must_unblock ? nfs_unblock_rename : NULL);
+ 	if (IS_ERR(task)) {
++		if (must_unblock)
++			unblock_revalidate(new_dentry);
+ 		error = PTR_ERR(task);
+ 		goto out;
+ 	}
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index c93c12063b3af..3a816c4a6d5e2 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -4023,6 +4023,23 @@ static void test_fs_location_for_trunking(struct nfs4_fs_location *location,
+ 	}
+ }
+ 
++static bool _is_same_nfs4_pathname(struct nfs4_pathname *path1,
++				   struct nfs4_pathname *path2)
++{
++	int i;
++
++	if (path1->ncomponents != path2->ncomponents)
++		return false;
++	for (i = 0; i < path1->ncomponents; i++) {
++		if (path1->components[i].len != path2->components[i].len)
++			return false;
++		if (memcmp(path1->components[i].data, path2->components[i].data,
++				path1->components[i].len))
++			return false;
++	}
++	return true;
++}
++
+ static int _nfs4_discover_trunking(struct nfs_server *server,
+ 				   struct nfs_fh *fhandle)
+ {
+@@ -4056,9 +4073,13 @@ static int _nfs4_discover_trunking(struct nfs_server *server,
+ 	if (status)
+ 		goto out_free_3;
+ 
+-	for (i = 0; i < locations->nlocations; i++)
++	for (i = 0; i < locations->nlocations; i++) {
++		if (!_is_same_nfs4_pathname(&locations->fs_path,
++					&locations->locations[i].rootpath))
++			continue;
+ 		test_fs_location_for_trunking(&locations->locations[i], clp,
+ 					      server);
++	}
+ out_free_3:
+ 	kfree(locations->fattr);
+ out_free_2:
+diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
+index 40fecf7b224f2..0b75305fb5f53 100644
+--- a/fs/nfsd/nfsfh.c
++++ b/fs/nfsd/nfsfh.c
+@@ -573,7 +573,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
+ 		_fh_update(fhp, exp, dentry);
+ 	if (fhp->fh_handle.fh_fileid_type == FILEID_INVALID) {
+ 		fh_put(fhp);
+-		return nfserr_opnotsupp;
++		return nfserr_stale;
+ 	}
+ 
+ 	return 0;
+@@ -599,7 +599,7 @@ fh_update(struct svc_fh *fhp)
+ 
+ 	_fh_update(fhp, fhp->fh_export, dentry);
+ 	if (fhp->fh_handle.fh_fileid_type == FILEID_INVALID)
+-		return nfserr_opnotsupp;
++		return nfserr_stale;
+ 	return 0;
+ out_bad:
+ 	printk(KERN_ERR "fh_update: fh not verified!\n");
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index 0da8e7bd32616..ccc57038a9779 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -1936,6 +1936,8 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
+ 
+ 	inode_lock(inode);
+ 
++	/* Wait all existing dio workers, newcomers will block on i_rwsem */
++	inode_dio_wait(inode);
+ 	/*
+ 	 * This prevents concurrent writes on other nodes
+ 	 */
+diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
+index 9221a33f917b8..4d1ea8703fcdf 100644
+--- a/fs/ocfs2/namei.c
++++ b/fs/ocfs2/namei.c
+@@ -566,7 +566,7 @@ static int __ocfs2_mknod_locked(struct inode *dir,
+ 	fe->i_last_eb_blk = 0;
+ 	strcpy(fe->i_signature, OCFS2_INODE_SIGNATURE);
+ 	fe->i_flags |= cpu_to_le32(OCFS2_VALID_FL);
+-	ktime_get_real_ts64(&ts);
++	ktime_get_coarse_real_ts64(&ts);
+ 	fe->i_atime = fe->i_ctime = fe->i_mtime =
+ 		cpu_to_le64(ts.tv_sec);
+ 	fe->i_mtime_nsec = fe->i_ctime_nsec = fe->i_atime_nsec =
+@@ -797,6 +797,7 @@ static int ocfs2_link(struct dentry *old_dentry,
+ 	ocfs2_set_links_count(fe, inode->i_nlink);
+ 	fe->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
+ 	fe->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
++	ocfs2_update_inode_fsync_trans(handle, inode, 0);
+ 	ocfs2_journal_dirty(handle, fe_bh);
+ 
+ 	err = ocfs2_add_entry(handle, dentry, inode,
+@@ -993,6 +994,7 @@ static int ocfs2_unlink(struct inode *dir,
+ 		drop_nlink(inode);
+ 	drop_nlink(inode);
+ 	ocfs2_set_links_count(fe, inode->i_nlink);
++	ocfs2_update_inode_fsync_trans(handle, inode, 0);
+ 	ocfs2_journal_dirty(handle, fe_bh);
+ 
+ 	inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
+diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
+index 1fb213f379a5b..d06607a1f137a 100644
+--- a/fs/proc/vmcore.c
++++ b/fs/proc/vmcore.c
+@@ -383,6 +383,8 @@ static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos)
+ 		/* leave now if filled buffer already */
+ 		if (!iov_iter_count(iter))
+ 			return acc;
++
++		cond_resched();
+ 	}
+ 
+ 	list_for_each_entry(m, &vmcore_list, list) {
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index b6c5a8ea38879..e7e07891781b3 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -630,6 +630,12 @@ smb2_get_name(const char *src, const int maxlen, struct nls_table *local_nls)
+ 		return name;
+ 	}
+ 
++	if (*name == '\\') {
++		pr_err("not allow directory name included leading slash\n");
++		kfree(name);
++		return ERR_PTR(-EINVAL);
++	}
++
+ 	ksmbd_conv_path_to_unix(name);
+ 	ksmbd_strip_last_slash(name);
+ 	return name;
+@@ -2361,7 +2367,8 @@ static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,
+ 			if (rc > 0) {
+ 				rc = ksmbd_vfs_remove_xattr(idmap,
+ 							    path,
+-							    attr_name);
++							    attr_name,
++							    get_write);
+ 
+ 				if (rc < 0) {
+ 					ksmbd_debug(SMB,
+@@ -2376,7 +2383,7 @@ static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,
+ 		} else {
+ 			rc = ksmbd_vfs_setxattr(idmap, path, attr_name, value,
+ 						le16_to_cpu(eabuf->EaValueLength),
+-						0, true);
++						0, get_write);
+ 			if (rc < 0) {
+ 				ksmbd_debug(SMB,
+ 					    "ksmbd_vfs_setxattr is failed(%d)\n",
+@@ -2468,7 +2475,7 @@ static int smb2_remove_smb_xattrs(const struct path *path)
+ 		    !strncmp(&name[XATTR_USER_PREFIX_LEN], STREAM_PREFIX,
+ 			     STREAM_PREFIX_LEN)) {
+ 			err = ksmbd_vfs_remove_xattr(idmap, path,
+-						     name);
++						     name, true);
+ 			if (err)
+ 				ksmbd_debug(SMB, "remove xattr failed : %s\n",
+ 					    name);
+@@ -2842,20 +2849,11 @@ int smb2_open(struct ksmbd_work *work)
+ 	}
+ 
+ 	if (req->NameLength) {
+-		if ((req->CreateOptions & FILE_DIRECTORY_FILE_LE) &&
+-		    *(char *)req->Buffer == '\\') {
+-			pr_err("not allow directory name included leading slash\n");
+-			rc = -EINVAL;
+-			goto err_out2;
+-		}
+-
+ 		name = smb2_get_name((char *)req + le16_to_cpu(req->NameOffset),
+ 				     le16_to_cpu(req->NameLength),
+ 				     work->conn->local_nls);
+ 		if (IS_ERR(name)) {
+ 			rc = PTR_ERR(name);
+-			if (rc != -ENOMEM)
+-				rc = -ENOENT;
+ 			name = NULL;
+ 			goto err_out2;
+ 		}
+diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
+index 51b1b0bed616e..9e859ba010cf1 100644
+--- a/fs/smb/server/vfs.c
++++ b/fs/smb/server/vfs.c
+@@ -1058,16 +1058,21 @@ int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length,
+ }
+ 
+ int ksmbd_vfs_remove_xattr(struct mnt_idmap *idmap,
+-			   const struct path *path, char *attr_name)
++			   const struct path *path, char *attr_name,
++			   bool get_write)
+ {
+ 	int err;
+ 
+-	err = mnt_want_write(path->mnt);
+-	if (err)
+-		return err;
++	if (get_write == true) {
++		err = mnt_want_write(path->mnt);
++		if (err)
++			return err;
++	}
+ 
+ 	err = vfs_removexattr(idmap, path->dentry, attr_name);
+-	mnt_drop_write(path->mnt);
++
++	if (get_write == true)
++		mnt_drop_write(path->mnt);
+ 
+ 	return err;
+ }
+@@ -1380,7 +1385,7 @@ int ksmbd_vfs_remove_sd_xattrs(struct mnt_idmap *idmap, const struct path *path)
+ 		ksmbd_debug(SMB, "%s, len %zd\n", name, strlen(name));
+ 
+ 		if (!strncmp(name, XATTR_NAME_SD, XATTR_NAME_SD_LEN)) {
+-			err = ksmbd_vfs_remove_xattr(idmap, path, name);
++			err = ksmbd_vfs_remove_xattr(idmap, path, name, true);
+ 			if (err)
+ 				ksmbd_debug(SMB, "remove xattr failed : %s\n", name);
+ 		}
+diff --git a/fs/smb/server/vfs.h b/fs/smb/server/vfs.h
+index cfe1c8092f230..cb76f4b5bafe8 100644
+--- a/fs/smb/server/vfs.h
++++ b/fs/smb/server/vfs.h
+@@ -114,7 +114,8 @@ int ksmbd_vfs_setxattr(struct mnt_idmap *idmap,
+ int ksmbd_vfs_xattr_stream_name(char *stream_name, char **xattr_stream_name,
+ 				size_t *xattr_stream_name_size, int s_type);
+ int ksmbd_vfs_remove_xattr(struct mnt_idmap *idmap,
+-			   const struct path *path, char *attr_name);
++			   const struct path *path, char *attr_name,
++			   bool get_write);
+ int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name,
+ 			       unsigned int flags, struct path *parent_path,
+ 			       struct path *path, bool caseless);
+diff --git a/fs/smb/server/vfs_cache.c b/fs/smb/server/vfs_cache.c
+index 6cb599cd287ee..8b2e37c8716ed 100644
+--- a/fs/smb/server/vfs_cache.c
++++ b/fs/smb/server/vfs_cache.c
+@@ -254,7 +254,8 @@ static void __ksmbd_inode_close(struct ksmbd_file *fp)
+ 		ci->m_flags &= ~S_DEL_ON_CLS_STREAM;
+ 		err = ksmbd_vfs_remove_xattr(file_mnt_idmap(filp),
+ 					     &filp->f_path,
+-					     fp->stream.name);
++					     fp->stream.name,
++					     true);
+ 		if (err)
+ 			pr_err("remove xattr failed : %s\n",
+ 			       fp->stream.name);
+diff --git a/fs/tracefs/event_inode.c b/fs/tracefs/event_inode.c
+index 55a40a730b10c..129d0f54ba62f 100644
+--- a/fs/tracefs/event_inode.c
++++ b/fs/tracefs/event_inode.c
+@@ -305,33 +305,60 @@ static const struct file_operations eventfs_file_operations = {
+ 	.llseek		= generic_file_llseek,
+ };
+ 
+-/*
+- * On a remount of tracefs, if UID or GID options are set, then
+- * the mount point inode permissions should be used.
+- * Reset the saved permission flags appropriately.
+- */
+-void eventfs_remount(struct tracefs_inode *ti, bool update_uid, bool update_gid)
++static void eventfs_set_attrs(struct eventfs_inode *ei, bool update_uid, kuid_t uid,
++			      bool update_gid, kgid_t gid, int level)
+ {
+-	struct eventfs_inode *ei = ti->private;
++	struct eventfs_inode *ei_child;
+ 
+-	if (!ei)
++	/* Update events/<system>/<event> */
++	if (WARN_ON_ONCE(level > 3))
+ 		return;
+ 
+-	if (update_uid)
++	if (update_uid) {
+ 		ei->attr.mode &= ~EVENTFS_SAVE_UID;
++		ei->attr.uid = uid;
++	}
+ 
+-	if (update_gid)
++	if (update_gid) {
+ 		ei->attr.mode &= ~EVENTFS_SAVE_GID;
++		ei->attr.gid = gid;
++	}
++
++	list_for_each_entry(ei_child, &ei->children, list) {
++		eventfs_set_attrs(ei_child, update_uid, uid, update_gid, gid, level + 1);
++	}
+ 
+ 	if (!ei->entry_attrs)
+ 		return;
+ 
+ 	for (int i = 0; i < ei->nr_entries; i++) {
+-		if (update_uid)
++		if (update_uid) {
+ 			ei->entry_attrs[i].mode &= ~EVENTFS_SAVE_UID;
+-		if (update_gid)
++			ei->entry_attrs[i].uid = uid;
++		}
++		if (update_gid) {
+ 			ei->entry_attrs[i].mode &= ~EVENTFS_SAVE_GID;
++			ei->entry_attrs[i].gid = gid;
++		}
+ 	}
++
++}
++
++/*
++ * On a remount of tracefs, if UID or GID options are set, then
++ * the mount point inode permissions should be used.
++ * Reset the saved permission flags appropriately.
++ */
++void eventfs_remount(struct tracefs_inode *ti, bool update_uid, bool update_gid)
++{
++	struct eventfs_inode *ei = ti->private;
++
++	/* Only the events directory does the updates */
++	if (!ei || !ei->is_events || ei->is_freed)
++		return;
++
++	eventfs_set_attrs(ei, update_uid, ti->vfs_inode.i_uid,
++			  update_gid, ti->vfs_inode.i_gid, 0);
+ }
+ 
+ /* Return the evenfs_inode of the "events" directory */
+diff --git a/include/drm/bridge/aux-bridge.h b/include/drm/bridge/aux-bridge.h
+index 4453906105ca1..c2f5a855512f3 100644
+--- a/include/drm/bridge/aux-bridge.h
++++ b/include/drm/bridge/aux-bridge.h
+@@ -33,7 +33,7 @@ static inline struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct devic
+ 	return NULL;
+ }
+ 
+-static inline int devm_drm_dp_hpd_bridge_add(struct auxiliary_device *adev)
++static inline int devm_drm_dp_hpd_bridge_add(struct device *dev, struct auxiliary_device *adev)
+ {
+ 	return 0;
+ }
+diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h
+index 9b19d8bd520af..6c9145abc7e2b 100644
+--- a/include/drm/display/drm_dp_mst_helper.h
++++ b/include/drm/display/drm_dp_mst_helper.h
+@@ -851,7 +851,6 @@ int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
+ 			     struct drm_dp_mst_topology_state *mst_state,
+ 			     struct drm_dp_mst_atomic_payload *payload);
+ int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
+-			     struct drm_atomic_state *state,
+ 			     struct drm_dp_mst_atomic_payload *payload);
+ void drm_dp_remove_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
+ 				 struct drm_dp_mst_topology_state *mst_state,
+diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h
+deleted file mode 100644
+index d21838835abda..0000000000000
+--- a/include/linux/amd-pstate.h
++++ /dev/null
+@@ -1,127 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-only */
+-/*
+- * linux/include/linux/amd-pstate.h
+- *
+- * Copyright (C) 2022 Advanced Micro Devices, Inc.
+- *
+- * Author: Meng Li <li.meng@amd.com>
+- */
+-
+-#ifndef _LINUX_AMD_PSTATE_H
+-#define _LINUX_AMD_PSTATE_H
+-
+-#include <linux/pm_qos.h>
+-
+-#define AMD_CPPC_EPP_PERFORMANCE		0x00
+-#define AMD_CPPC_EPP_BALANCE_PERFORMANCE	0x80
+-#define AMD_CPPC_EPP_BALANCE_POWERSAVE		0xBF
+-#define AMD_CPPC_EPP_POWERSAVE			0xFF
+-
+-/*********************************************************************
+- *                        AMD P-state INTERFACE                       *
+- *********************************************************************/
+-/**
+- * struct  amd_aperf_mperf
+- * @aperf: actual performance frequency clock count
+- * @mperf: maximum performance frequency clock count
+- * @tsc:   time stamp counter
+- */
+-struct amd_aperf_mperf {
+-	u64 aperf;
+-	u64 mperf;
+-	u64 tsc;
+-};
+-
+-/**
+- * struct amd_cpudata - private CPU data for AMD P-State
+- * @cpu: CPU number
+- * @req: constraint request to apply
+- * @cppc_req_cached: cached performance request hints
+- * @highest_perf: the maximum performance an individual processor may reach,
+- *		  assuming ideal conditions
+- *		  For platforms that do not support the preferred core feature, the
+- *		  highest_pef may be configured with 166 or 255, to avoid max frequency
+- *		  calculated wrongly. we take the fixed value as the highest_perf.
+- * @nominal_perf: the maximum sustained performance level of the processor,
+- *		  assuming ideal operating conditions
+- * @lowest_nonlinear_perf: the lowest performance level at which nonlinear power
+- *			   savings are achieved
+- * @lowest_perf: the absolute lowest performance level of the processor
+- * @prefcore_ranking: the preferred core ranking, the higher value indicates a higher
+- * 		  priority.
+- * @max_freq: the frequency that mapped to highest_perf
+- * @min_freq: the frequency that mapped to lowest_perf
+- * @nominal_freq: the frequency that mapped to nominal_perf
+- * @lowest_nonlinear_freq: the frequency that mapped to lowest_nonlinear_perf
+- * @cur: Difference of Aperf/Mperf/tsc count between last and current sample
+- * @prev: Last Aperf/Mperf/tsc count value read from register
+- * @freq: current cpu frequency value
+- * @boost_supported: check whether the Processor or SBIOS supports boost mode
+- * @hw_prefcore: check whether HW supports preferred core featue.
+- * 		  Only when hw_prefcore and early prefcore param are true,
+- * 		  AMD P-State driver supports preferred core featue.
+- * @epp_policy: Last saved policy used to set energy-performance preference
+- * @epp_cached: Cached CPPC energy-performance preference value
+- * @policy: Cpufreq policy value
+- * @cppc_cap1_cached Cached MSR_AMD_CPPC_CAP1 register value
+- *
+- * The amd_cpudata is key private data for each CPU thread in AMD P-State, and
+- * represents all the attributes and goals that AMD P-State requests at runtime.
+- */
+-struct amd_cpudata {
+-	int	cpu;
+-
+-	struct	freq_qos_request req[2];
+-	u64	cppc_req_cached;
+-
+-	u32	highest_perf;
+-	u32	nominal_perf;
+-	u32	lowest_nonlinear_perf;
+-	u32	lowest_perf;
+-	u32     prefcore_ranking;
+-	u32     min_limit_perf;
+-	u32     max_limit_perf;
+-	u32     min_limit_freq;
+-	u32     max_limit_freq;
+-
+-	u32	max_freq;
+-	u32	min_freq;
+-	u32	nominal_freq;
+-	u32	lowest_nonlinear_freq;
+-
+-	struct amd_aperf_mperf cur;
+-	struct amd_aperf_mperf prev;
+-
+-	u64	freq;
+-	bool	boost_supported;
+-	bool	hw_prefcore;
+-
+-	/* EPP feature related attributes*/
+-	s16	epp_policy;
+-	s16	epp_cached;
+-	u32	policy;
+-	u64	cppc_cap1_cached;
+-	bool	suspended;
+-};
+-
+-/*
+- * enum amd_pstate_mode - driver working mode of amd pstate
+- */
+-enum amd_pstate_mode {
+-	AMD_PSTATE_UNDEFINED = 0,
+-	AMD_PSTATE_DISABLE,
+-	AMD_PSTATE_PASSIVE,
+-	AMD_PSTATE_ACTIVE,
+-	AMD_PSTATE_GUIDED,
+-	AMD_PSTATE_MAX,
+-};
+-
+-static const char * const amd_pstate_mode_string[] = {
+-	[AMD_PSTATE_UNDEFINED]   = "undefined",
+-	[AMD_PSTATE_DISABLE]     = "disable",
+-	[AMD_PSTATE_PASSIVE]     = "passive",
+-	[AMD_PSTATE_ACTIVE]      = "active",
+-	[AMD_PSTATE_GUIDED]      = "guided",
+-	NULL,
+-};
+-#endif /* _LINUX_AMD_PSTATE_H */
+diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h
+index 956bcba5dbf2f..2f9d36b72bd89 100644
+--- a/include/linux/atomic/atomic-arch-fallback.h
++++ b/include/linux/atomic/atomic-arch-fallback.h
+@@ -2242,7 +2242,7 @@ raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
+ 
+ /**
+  * raw_atomic_sub_and_test() - atomic subtract and test if zero with full ordering
+- * @i: int value to add
++ * @i: int value to subtract
+  * @v: pointer to atomic_t
+  *
+  * Atomically updates @v to (@v - @i) with full ordering.
+@@ -4368,7 +4368,7 @@ raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
+ 
+ /**
+  * raw_atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
+- * @i: s64 value to add
++ * @i: s64 value to subtract
+  * @v: pointer to atomic64_t
+  *
+  * Atomically updates @v to (@v - @i) with full ordering.
+@@ -4690,4 +4690,4 @@ raw_atomic64_dec_if_positive(atomic64_t *v)
+ }
+ 
+ #endif /* _LINUX_ATOMIC_FALLBACK_H */
+-// 14850c0b0db20c62fdc78ccd1d42b98b88d76331
++// b565db590afeeff0d7c9485ccbca5bb6e155749f
+diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h
+index debd487fe9716..9409a6ddf3e0d 100644
+--- a/include/linux/atomic/atomic-instrumented.h
++++ b/include/linux/atomic/atomic-instrumented.h
+@@ -1349,7 +1349,7 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
+ 
+ /**
+  * atomic_sub_and_test() - atomic subtract and test if zero with full ordering
+- * @i: int value to add
++ * @i: int value to subtract
+  * @v: pointer to atomic_t
+  *
+  * Atomically updates @v to (@v - @i) with full ordering.
+@@ -2927,7 +2927,7 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
+ 
+ /**
+  * atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
+- * @i: s64 value to add
++ * @i: s64 value to subtract
+  * @v: pointer to atomic64_t
+  *
+  * Atomically updates @v to (@v - @i) with full ordering.
+@@ -4505,7 +4505,7 @@ atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
+ 
+ /**
+  * atomic_long_sub_and_test() - atomic subtract and test if zero with full ordering
+- * @i: long value to add
++ * @i: long value to subtract
+  * @v: pointer to atomic_long_t
+  *
+  * Atomically updates @v to (@v - @i) with full ordering.
+@@ -5050,4 +5050,4 @@ atomic_long_dec_if_positive(atomic_long_t *v)
+ 
+ 
+ #endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
+-// ce5b65e0f1f8a276268b667194581d24bed219d4
++// 8829b337928e9508259079d32581775ececd415b
+diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h
+index 3ef844b3ab8a3..f86b29d908775 100644
+--- a/include/linux/atomic/atomic-long.h
++++ b/include/linux/atomic/atomic-long.h
+@@ -1535,7 +1535,7 @@ raw_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
+ 
+ /**
+  * raw_atomic_long_sub_and_test() - atomic subtract and test if zero with full ordering
+- * @i: long value to add
++ * @i: long value to subtract
+  * @v: pointer to atomic_long_t
+  *
+  * Atomically updates @v to (@v - @i) with full ordering.
+@@ -1809,4 +1809,4 @@ raw_atomic_long_dec_if_positive(atomic_long_t *v)
+ }
+ 
+ #endif /* _LINUX_ATOMIC_LONG_H */
+-// 1c4a26fc77f345342953770ebe3c4d08e7ce2f9a
++// eadf183c3600b8b92b91839dd3be6bcc560c752d
+diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
+index ac333ea81d319..327d7f43c1fba 100644
+--- a/include/linux/io_uring_types.h
++++ b/include/linux/io_uring_types.h
+@@ -653,7 +653,7 @@ struct io_kiocb {
+ 	struct io_rsrc_node		*rsrc_node;
+ 
+ 	atomic_t			refs;
+-	atomic_t			poll_refs;
++	bool				cancel_seq_set;
+ 	struct io_task_work		io_task_work;
+ 	/* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
+ 	struct hlist_node		hash_node;
+@@ -662,6 +662,7 @@ struct io_kiocb {
+ 	/* opcode allocated if it needs to store data for async defer */
+ 	void				*async_data;
+ 	/* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
++	atomic_t			poll_refs;
+ 	struct io_kiocb			*link;
+ 	/* custom credentials, valid IFF REQ_F_CREDS is set */
+ 	const struct cred		*creds;
+diff --git a/include/linux/iommu.h b/include/linux/iommu.h
+index 2e925b5eba534..3b67d59a36bf9 100644
+--- a/include/linux/iommu.h
++++ b/include/linux/iommu.h
+@@ -1537,7 +1537,7 @@ struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
+ static inline struct iommu_sva *
+ iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
+ {
+-	return NULL;
++	return ERR_PTR(-ENODEV);
+ }
+ 
+ static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
+diff --git a/include/linux/kcov.h b/include/linux/kcov.h
+index b851ba415e03f..1068a7318d897 100644
+--- a/include/linux/kcov.h
++++ b/include/linux/kcov.h
+@@ -55,21 +55,47 @@ static inline void kcov_remote_start_usb(u64 id)
+ 
+ /*
+  * The softirq flavor of kcov_remote_*() functions is introduced as a temporary
+- * work around for kcov's lack of nested remote coverage sections support in
+- * task context. Adding support for nested sections is tracked in:
+- * https://bugzilla.kernel.org/show_bug.cgi?id=210337
++ * workaround for KCOV's lack of nested remote coverage sections support.
++ *
++ * Adding support is tracked in https://bugzilla.kernel.org/show_bug.cgi?id=210337.
++ *
++ * kcov_remote_start_usb_softirq():
++ *
++ * 1. Only collects coverage when called in the softirq context. This allows
++ *    avoiding nested remote coverage collection sections in the task context.
++ *    For example, USB/IP calls usb_hcd_giveback_urb() in the task context
++ *    within an existing remote coverage collection section. Thus, KCOV should
++ *    not attempt to start collecting coverage within the coverage collection
++ *    section in __usb_hcd_giveback_urb() in this case.
++ *
++ * 2. Disables interrupts for the duration of the coverage collection section.
++ *    This allows avoiding nested remote coverage collection sections in the
++ *    softirq context (a softirq might occur during the execution of a work in
++ *    the BH workqueue, which runs with in_serving_softirq() > 0).
++ *    For example, usb_giveback_urb_bh() runs in the BH workqueue with
++ *    interrupts enabled, so __usb_hcd_giveback_urb() might be interrupted in
++ *    the middle of its remote coverage collection section, and the interrupt
++ *    handler might invoke __usb_hcd_giveback_urb() again.
+  */
+ 
+-static inline void kcov_remote_start_usb_softirq(u64 id)
++static inline unsigned long kcov_remote_start_usb_softirq(u64 id)
+ {
+-	if (in_serving_softirq())
++	unsigned long flags = 0;
++
++	if (in_serving_softirq()) {
++		local_irq_save(flags);
+ 		kcov_remote_start_usb(id);
++	}
++
++	return flags;
+ }
+ 
+-static inline void kcov_remote_stop_softirq(void)
++static inline void kcov_remote_stop_softirq(unsigned long flags)
+ {
+-	if (in_serving_softirq())
++	if (in_serving_softirq()) {
+ 		kcov_remote_stop();
++		local_irq_restore(flags);
++	}
+ }
+ 
+ #ifdef CONFIG_64BIT
+@@ -103,8 +129,11 @@ static inline u64 kcov_common_handle(void)
+ }
+ static inline void kcov_remote_start_common(u64 id) {}
+ static inline void kcov_remote_start_usb(u64 id) {}
+-static inline void kcov_remote_start_usb_softirq(u64 id) {}
+-static inline void kcov_remote_stop_softirq(void) {}
++static inline unsigned long kcov_remote_start_usb_softirq(u64 id)
++{
++	return 0;
++}
++static inline void kcov_remote_stop_softirq(unsigned long flags) {}
+ 
+ #endif /* CONFIG_KCOV */
+ #endif /* _LINUX_KCOV_H */
+diff --git a/include/linux/kexec.h b/include/linux/kexec.h
+index 060835bb82d52..f31bd304df451 100644
+--- a/include/linux/kexec.h
++++ b/include/linux/kexec.h
+@@ -461,10 +461,8 @@ static inline void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) {
+ 
+ extern bool kexec_file_dbg_print;
+ 
+-#define kexec_dprintk(fmt, ...)					\
+-	printk("%s" fmt,					\
+-	       kexec_file_dbg_print ? KERN_INFO : KERN_DEBUG,	\
+-	       ##__VA_ARGS__)
++#define kexec_dprintk(fmt, arg...) \
++        do { if (kexec_file_dbg_print) pr_info(fmt, ##arg); } while (0)
+ 
+ #else /* !CONFIG_KEXEC_CORE */
+ struct pt_regs;
+diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h
+index fb724c65c77bc..5ce0cd76956e0 100644
+--- a/include/linux/pse-pd/pse.h
++++ b/include/linux/pse-pd/pse.h
+@@ -114,14 +114,14 @@ static inline int pse_ethtool_get_status(struct pse_control *psec,
+ 					 struct netlink_ext_ack *extack,
+ 					 struct pse_control_status *status)
+ {
+-	return -ENOTSUPP;
++	return -EOPNOTSUPP;
+ }
+ 
+ static inline int pse_ethtool_set_config(struct pse_control *psec,
+ 					 struct netlink_ext_ack *extack,
+ 					 const struct pse_control_config *config)
+ {
+-	return -ENOTSUPP;
++	return -EOPNOTSUPP;
+ }
+ 
+ #endif
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index 970101184cf10..17b2a7baa4d8f 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -2113,18 +2113,46 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
+ {
+ 	u16 max_latency;
+ 
+-	if (min > max || min < 6 || max > 3200)
++	if (min > max) {
++		BT_WARN("min %d > max %d", min, max);
+ 		return -EINVAL;
++	}
++
++	if (min < 6) {
++		BT_WARN("min %d < 6", min);
++		return -EINVAL;
++	}
++
++	if (max > 3200) {
++		BT_WARN("max %d > 3200", max);
++		return -EINVAL;
++	}
++
++	if (to_multiplier < 10) {
++		BT_WARN("to_multiplier %d < 10", to_multiplier);
++		return -EINVAL;
++	}
+ 
+-	if (to_multiplier < 10 || to_multiplier > 3200)
++	if (to_multiplier > 3200) {
++		BT_WARN("to_multiplier %d > 3200", to_multiplier);
+ 		return -EINVAL;
++	}
+ 
+-	if (max >= to_multiplier * 8)
++	if (max >= to_multiplier * 8) {
++		BT_WARN("max %d >= to_multiplier %d * 8", max, to_multiplier);
+ 		return -EINVAL;
++	}
+ 
+ 	max_latency = (to_multiplier * 4 / max) - 1;
+-	if (latency > 499 || latency > max_latency)
++	if (latency > 499) {
++		BT_WARN("latency %d > 499", latency);
+ 		return -EINVAL;
++	}
++
++	if (latency > max_latency) {
++		BT_WARN("latency %d > max_latency %d", latency, max_latency);
++		return -EINVAL;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
+index c286cc2e766ee..cc666da626517 100644
+--- a/include/net/ip_tunnels.h
++++ b/include/net/ip_tunnels.h
+@@ -363,9 +363,10 @@ static inline bool pskb_inet_may_pull(struct sk_buff *skb)
+ 
+ /* Variant of pskb_inet_may_pull().
+  */
+-static inline bool skb_vlan_inet_prepare(struct sk_buff *skb)
++static inline bool skb_vlan_inet_prepare(struct sk_buff *skb,
++					 bool inner_proto_inherit)
+ {
+-	int nhlen = 0, maclen = ETH_HLEN;
++	int nhlen = 0, maclen = inner_proto_inherit ? 0 : ETH_HLEN;
+ 	__be16 type = skb->protocol;
+ 
+ 	/* Essentially this is skb_protocol(skb, true)
+diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
+index 3bfb80bad1739..b45d57b5968af 100644
+--- a/include/net/rtnetlink.h
++++ b/include/net/rtnetlink.h
+@@ -13,6 +13,7 @@ enum rtnl_link_flags {
+ 	RTNL_FLAG_DOIT_UNLOCKED		= BIT(0),
+ 	RTNL_FLAG_BULK_DEL_SUPPORTED	= BIT(1),
+ 	RTNL_FLAG_DUMP_UNLOCKED		= BIT(2),
++	RTNL_FLAG_DUMP_SPLIT_NLM_DONE	= BIT(3),	/* legacy behavior */
+ };
+ 
+ enum rtnl_kinds {
+diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
+index 0e75b9277c8c6..e3b6ce3cbf883 100644
+--- a/include/scsi/scsi_transport_sas.h
++++ b/include/scsi/scsi_transport_sas.h
+@@ -200,6 +200,8 @@ unsigned int sas_is_tlr_enabled(struct scsi_device *);
+ void sas_disable_tlr(struct scsi_device *);
+ void sas_enable_tlr(struct scsi_device *);
+ 
++bool sas_ata_ncq_prio_supported(struct scsi_device *sdev);
++
+ extern struct sas_rphy *sas_end_device_alloc(struct sas_port *);
+ extern struct sas_rphy *sas_expander_alloc(struct sas_port *, enum sas_device_type);
+ void sas_rphy_free(struct sas_rphy *);
+diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h
+index cf4b98b9a9edc..7d931db02b934 100644
+--- a/include/trace/events/cachefiles.h
++++ b/include/trace/events/cachefiles.h
+@@ -33,6 +33,8 @@ enum cachefiles_obj_ref_trace {
+ 	cachefiles_obj_see_withdrawal,
+ 	cachefiles_obj_get_ondemand_fd,
+ 	cachefiles_obj_put_ondemand_fd,
++	cachefiles_obj_get_read_req,
++	cachefiles_obj_put_read_req,
+ };
+ 
+ enum fscache_why_object_killed {
+@@ -127,7 +129,11 @@ enum cachefiles_error_trace {
+ 	EM(cachefiles_obj_see_lookup_cookie,	"SEE lookup_cookie")	\
+ 	EM(cachefiles_obj_see_lookup_failed,	"SEE lookup_failed")	\
+ 	EM(cachefiles_obj_see_withdraw_cookie,	"SEE withdraw_cookie")	\
+-	E_(cachefiles_obj_see_withdrawal,	"SEE withdrawal")
++	EM(cachefiles_obj_see_withdrawal,	"SEE withdrawal")	\
++	EM(cachefiles_obj_get_ondemand_fd,      "GET ondemand_fd")	\
++	EM(cachefiles_obj_put_ondemand_fd,      "PUT ondemand_fd")	\
++	EM(cachefiles_obj_get_read_req,		"GET read_req")		\
++	E_(cachefiles_obj_put_read_req,		"PUT read_req")
+ 
+ #define cachefiles_coherency_traces					\
+ 	EM(cachefiles_coherency_check_aux,	"BAD aux ")		\
+diff --git a/io_uring/cancel.h b/io_uring/cancel.h
+index 76b32e65c03cd..b33995e00ba90 100644
+--- a/io_uring/cancel.h
++++ b/io_uring/cancel.h
+@@ -27,10 +27,10 @@ bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd);
+ 
+ static inline bool io_cancel_match_sequence(struct io_kiocb *req, int sequence)
+ {
+-	if ((req->flags & REQ_F_CANCEL_SEQ) && sequence == req->work.cancel_seq)
++	if (req->cancel_seq_set && sequence == req->work.cancel_seq)
+ 		return true;
+ 
+-	req->flags |= REQ_F_CANCEL_SEQ;
++	req->cancel_seq_set = true;
+ 	req->work.cancel_seq = sequence;
+ 	return false;
+ }
+diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
+index 318ed067dbf64..8a99aabcac2c3 100644
+--- a/io_uring/io-wq.c
++++ b/io_uring/io-wq.c
+@@ -25,10 +25,10 @@
+ #define WORKER_IDLE_TIMEOUT	(5 * HZ)
+ 
+ enum {
+-	IO_WORKER_F_UP		= 1,	/* up and active */
+-	IO_WORKER_F_RUNNING	= 2,	/* account as running */
+-	IO_WORKER_F_FREE	= 4,	/* worker on free list */
+-	IO_WORKER_F_BOUND	= 8,	/* is doing bounded work */
++	IO_WORKER_F_UP		= 0,	/* up and active */
++	IO_WORKER_F_RUNNING	= 1,	/* account as running */
++	IO_WORKER_F_FREE	= 2,	/* worker on free list */
++	IO_WORKER_F_BOUND	= 3,	/* is doing bounded work */
+ };
+ 
+ enum {
+@@ -44,7 +44,8 @@ enum {
+  */
+ struct io_worker {
+ 	refcount_t ref;
+-	unsigned flags;
++	int create_index;
++	unsigned long flags;
+ 	struct hlist_nulls_node nulls_node;
+ 	struct list_head all_list;
+ 	struct task_struct *task;
+@@ -58,7 +59,6 @@ struct io_worker {
+ 
+ 	unsigned long create_state;
+ 	struct callback_head create_work;
+-	int create_index;
+ 
+ 	union {
+ 		struct rcu_head rcu;
+@@ -165,7 +165,7 @@ static inline struct io_wq_acct *io_work_get_acct(struct io_wq *wq,
+ 
+ static inline struct io_wq_acct *io_wq_get_acct(struct io_worker *worker)
+ {
+-	return io_get_acct(worker->wq, worker->flags & IO_WORKER_F_BOUND);
++	return io_get_acct(worker->wq, test_bit(IO_WORKER_F_BOUND, &worker->flags));
+ }
+ 
+ static void io_worker_ref_put(struct io_wq *wq)
+@@ -225,7 +225,7 @@ static void io_worker_exit(struct io_worker *worker)
+ 	wait_for_completion(&worker->ref_done);
+ 
+ 	raw_spin_lock(&wq->lock);
+-	if (worker->flags & IO_WORKER_F_FREE)
++	if (test_bit(IO_WORKER_F_FREE, &worker->flags))
+ 		hlist_nulls_del_rcu(&worker->nulls_node);
+ 	list_del_rcu(&worker->all_list);
+ 	raw_spin_unlock(&wq->lock);
+@@ -410,7 +410,7 @@ static void io_wq_dec_running(struct io_worker *worker)
+ 	struct io_wq_acct *acct = io_wq_get_acct(worker);
+ 	struct io_wq *wq = worker->wq;
+ 
+-	if (!(worker->flags & IO_WORKER_F_UP))
++	if (!test_bit(IO_WORKER_F_UP, &worker->flags))
+ 		return;
+ 
+ 	if (!atomic_dec_and_test(&acct->nr_running))
+@@ -430,8 +430,8 @@ static void io_wq_dec_running(struct io_worker *worker)
+  */
+ static void __io_worker_busy(struct io_wq *wq, struct io_worker *worker)
+ {
+-	if (worker->flags & IO_WORKER_F_FREE) {
+-		worker->flags &= ~IO_WORKER_F_FREE;
++	if (test_bit(IO_WORKER_F_FREE, &worker->flags)) {
++		clear_bit(IO_WORKER_F_FREE, &worker->flags);
+ 		raw_spin_lock(&wq->lock);
+ 		hlist_nulls_del_init_rcu(&worker->nulls_node);
+ 		raw_spin_unlock(&wq->lock);
+@@ -444,8 +444,8 @@ static void __io_worker_busy(struct io_wq *wq, struct io_worker *worker)
+ static void __io_worker_idle(struct io_wq *wq, struct io_worker *worker)
+ 	__must_hold(wq->lock)
+ {
+-	if (!(worker->flags & IO_WORKER_F_FREE)) {
+-		worker->flags |= IO_WORKER_F_FREE;
++	if (!test_bit(IO_WORKER_F_FREE, &worker->flags)) {
++		set_bit(IO_WORKER_F_FREE, &worker->flags);
+ 		hlist_nulls_add_head_rcu(&worker->nulls_node, &wq->free_list);
+ 	}
+ }
+@@ -634,7 +634,8 @@ static int io_wq_worker(void *data)
+ 	bool exit_mask = false, last_timeout = false;
+ 	char buf[TASK_COMM_LEN];
+ 
+-	worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
++	set_mask_bits(&worker->flags, 0,
++		      BIT(IO_WORKER_F_UP) | BIT(IO_WORKER_F_RUNNING));
+ 
+ 	snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task->pid);
+ 	set_task_comm(current, buf);
+@@ -698,11 +699,11 @@ void io_wq_worker_running(struct task_struct *tsk)
+ 
+ 	if (!worker)
+ 		return;
+-	if (!(worker->flags & IO_WORKER_F_UP))
++	if (!test_bit(IO_WORKER_F_UP, &worker->flags))
+ 		return;
+-	if (worker->flags & IO_WORKER_F_RUNNING)
++	if (test_bit(IO_WORKER_F_RUNNING, &worker->flags))
+ 		return;
+-	worker->flags |= IO_WORKER_F_RUNNING;
++	set_bit(IO_WORKER_F_RUNNING, &worker->flags);
+ 	io_wq_inc_running(worker);
+ }
+ 
+@@ -716,12 +717,12 @@ void io_wq_worker_sleeping(struct task_struct *tsk)
+ 
+ 	if (!worker)
+ 		return;
+-	if (!(worker->flags & IO_WORKER_F_UP))
++	if (!test_bit(IO_WORKER_F_UP, &worker->flags))
+ 		return;
+-	if (!(worker->flags & IO_WORKER_F_RUNNING))
++	if (!test_bit(IO_WORKER_F_RUNNING, &worker->flags))
+ 		return;
+ 
+-	worker->flags &= ~IO_WORKER_F_RUNNING;
++	clear_bit(IO_WORKER_F_RUNNING, &worker->flags);
+ 	io_wq_dec_running(worker);
+ }
+ 
+@@ -735,7 +736,7 @@ static void io_init_new_worker(struct io_wq *wq, struct io_worker *worker,
+ 	raw_spin_lock(&wq->lock);
+ 	hlist_nulls_add_head_rcu(&worker->nulls_node, &wq->free_list);
+ 	list_add_tail_rcu(&worker->all_list, &wq->all_list);
+-	worker->flags |= IO_WORKER_F_FREE;
++	set_bit(IO_WORKER_F_FREE, &worker->flags);
+ 	raw_spin_unlock(&wq->lock);
+ 	wake_up_new_task(tsk);
+ }
+@@ -841,7 +842,7 @@ static bool create_io_worker(struct io_wq *wq, int index)
+ 	init_completion(&worker->ref_done);
+ 
+ 	if (index == IO_WQ_ACCT_BOUND)
+-		worker->flags |= IO_WORKER_F_BOUND;
++		set_bit(IO_WORKER_F_BOUND, &worker->flags);
+ 
+ 	tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE);
+ 	if (!IS_ERR(tsk)) {
+@@ -927,8 +928,12 @@ static bool io_wq_work_match_item(struct io_wq_work *work, void *data)
+ void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
+ {
+ 	struct io_wq_acct *acct = io_work_get_acct(wq, work);
+-	struct io_cb_cancel_data match;
+-	unsigned work_flags = work->flags;
++	unsigned long work_flags = work->flags;
++	struct io_cb_cancel_data match = {
++		.fn		= io_wq_work_match_item,
++		.data		= work,
++		.cancel_all	= false,
++	};
+ 	bool do_create;
+ 
+ 	/*
+@@ -966,10 +971,6 @@ void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
+ 		raw_spin_unlock(&wq->lock);
+ 
+ 		/* fatal condition, failed to create the first worker */
+-		match.fn		= io_wq_work_match_item,
+-		match.data		= work,
+-		match.cancel_all	= false,
+-
+ 		io_acct_cancel_pending_work(wq, acct, &match);
+ 	}
+ }
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index c170a2b8d2cf2..8a216b1d6dc1c 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -2211,6 +2211,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ 	req->file = NULL;
+ 	req->rsrc_node = NULL;
+ 	req->task = current;
++	req->cancel_seq_set = false;
+ 
+ 	if (unlikely(opcode >= IORING_OP_LAST)) {
+ 		req->opcode = 0;
+diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
+index 4818b79231ddb..87f9aa7cf9255 100644
+--- a/io_uring/rsrc.c
++++ b/io_uring/rsrc.c
+@@ -250,6 +250,7 @@ __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
+ 
+ 		ret = io_run_task_work_sig(ctx);
+ 		if (ret < 0) {
++			__set_current_state(TASK_RUNNING);
+ 			mutex_lock(&ctx->uring_lock);
+ 			if (list_empty(&ctx->rsrc_ref_list))
+ 				ret = 0;
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index cb61d8880dbe0..52ffe33356418 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -2985,6 +2985,7 @@ static int bpf_obj_get(const union bpf_attr *attr)
+ void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
+ 		   const struct bpf_link_ops *ops, struct bpf_prog *prog)
+ {
++	WARN_ON(ops->dealloc && ops->dealloc_deferred);
+ 	atomic64_set(&link->refcnt, 1);
+ 	link->type = type;
+ 	link->id = 0;
+@@ -3043,16 +3044,17 @@ static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu)
+ /* bpf_link_free is guaranteed to be called from process context */
+ static void bpf_link_free(struct bpf_link *link)
+ {
++	const struct bpf_link_ops *ops = link->ops;
+ 	bool sleepable = false;
+ 
+ 	bpf_link_free_id(link->id);
+ 	if (link->prog) {
+ 		sleepable = link->prog->sleepable;
+ 		/* detach BPF program, clean up used resources */
+-		link->ops->release(link);
++		ops->release(link);
+ 		bpf_prog_put(link->prog);
+ 	}
+-	if (link->ops->dealloc_deferred) {
++	if (ops->dealloc_deferred) {
+ 		/* schedule BPF link deallocation; if underlying BPF program
+ 		 * is sleepable, we need to first wait for RCU tasks trace
+ 		 * sync, then go through "classic" RCU grace period
+@@ -3061,9 +3063,8 @@ static void bpf_link_free(struct bpf_link *link)
+ 			call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp);
+ 		else
+ 			call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp);
+-	}
+-	if (link->ops->dealloc)
+-		link->ops->dealloc(link);
++	} else if (ops->dealloc)
++		ops->dealloc(link);
+ }
+ 
+ static void bpf_link_put_deferred(struct work_struct *work)
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 724e6d7e128f3..4082d0161b2b2 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -5365,6 +5365,7 @@ int perf_event_release_kernel(struct perf_event *event)
+ again:
+ 	mutex_lock(&event->child_mutex);
+ 	list_for_each_entry(child, &event->child_list, child_list) {
++		void *var = NULL;
+ 
+ 		/*
+ 		 * Cannot change, child events are not migrated, see the
+@@ -5405,11 +5406,23 @@ int perf_event_release_kernel(struct perf_event *event)
+ 			 * this can't be the last reference.
+ 			 */
+ 			put_event(event);
++		} else {
++			var = &ctx->refcount;
+ 		}
+ 
+ 		mutex_unlock(&event->child_mutex);
+ 		mutex_unlock(&ctx->mutex);
+ 		put_ctx(ctx);
++
++		if (var) {
++			/*
++			 * If perf_event_free_task() has deleted all events from the
++			 * ctx while the child_mutex got released above, make sure to
++			 * notify about the preceding put_ctx().
++			 */
++			smp_mb(); /* pairs with wait_var_event() */
++			wake_up_var(var);
++		}
+ 		goto again;
+ 	}
+ 	mutex_unlock(&event->child_mutex);
+diff --git a/kernel/gen_kheaders.sh b/kernel/gen_kheaders.sh
+index 4ba5fd3d73ae2..383fd43ac6122 100755
+--- a/kernel/gen_kheaders.sh
++++ b/kernel/gen_kheaders.sh
+@@ -89,7 +89,7 @@ find $cpio_dir -type f -print0 |
+ 
+ # Create archive and try to normalize metadata for reproducibility.
+ tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
+-    --owner=0 --group=0 --sort=name --numeric-owner \
++    --owner=0 --group=0 --sort=name --numeric-owner --mode=u=rw,go=r,a+X \
+     -I $XZ -cf $tarfile -C $cpio_dir/ . > /dev/null
+ 
+ echo $headers_md5 > kernel/kheaders.md5
+diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
+index 7ade20e952321..415201ca0c7e4 100644
+--- a/kernel/pid_namespace.c
++++ b/kernel/pid_namespace.c
+@@ -218,6 +218,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
+ 	 */
+ 	do {
+ 		clear_thread_flag(TIF_SIGPENDING);
++		clear_thread_flag(TIF_NOTIFY_SIGNAL);
+ 		rc = kernel_wait4(-1, NULL, __WALL, NULL);
+ 	} while (rc != -ECHILD);
+ 
+diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
+index d88b13076b794..a47bcf71defcf 100644
+--- a/kernel/time/tick-common.c
++++ b/kernel/time/tick-common.c
+@@ -178,26 +178,6 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
+ 	}
+ }
+ 
+-#ifdef CONFIG_NO_HZ_FULL
+-static void giveup_do_timer(void *info)
+-{
+-	int cpu = *(unsigned int *)info;
+-
+-	WARN_ON(tick_do_timer_cpu != smp_processor_id());
+-
+-	tick_do_timer_cpu = cpu;
+-}
+-
+-static void tick_take_do_timer_from_boot(void)
+-{
+-	int cpu = smp_processor_id();
+-	int from = tick_do_timer_boot_cpu;
+-
+-	if (from >= 0 && from != cpu)
+-		smp_call_function_single(from, giveup_do_timer, &cpu, 1);
+-}
+-#endif
+-
+ /*
+  * Setup the tick device
+  */
+@@ -221,19 +201,25 @@ static void tick_setup_device(struct tick_device *td,
+ 			tick_next_period = ktime_get();
+ #ifdef CONFIG_NO_HZ_FULL
+ 			/*
+-			 * The boot CPU may be nohz_full, in which case set
+-			 * tick_do_timer_boot_cpu so the first housekeeping
+-			 * secondary that comes up will take do_timer from
+-			 * us.
++			 * The boot CPU may be nohz_full, in which case the
++			 * first housekeeping secondary will take do_timer()
++			 * from it.
+ 			 */
+ 			if (tick_nohz_full_cpu(cpu))
+ 				tick_do_timer_boot_cpu = cpu;
+ 
+-		} else if (tick_do_timer_boot_cpu != -1 &&
+-						!tick_nohz_full_cpu(cpu)) {
+-			tick_take_do_timer_from_boot();
++		} else if (tick_do_timer_boot_cpu != -1 && !tick_nohz_full_cpu(cpu)) {
+ 			tick_do_timer_boot_cpu = -1;
+-			WARN_ON(READ_ONCE(tick_do_timer_cpu) != cpu);
++			/*
++			 * The boot CPU will stay in periodic (NOHZ disabled)
++			 * mode until clocksource_done_booting() called after
++			 * smp_init() selects a high resolution clocksource and
++			 * timekeeping_notify() kicks the NOHZ stuff alive.
++			 *
++			 * So this WRITE_ONCE can only race with the READ_ONCE
++			 * check in tick_periodic() but this race is harmless.
++			 */
++			WRITE_ONCE(tick_do_timer_cpu, cpu);
+ #endif
+ 		}
+ 
+diff --git a/mm/memblock.c b/mm/memblock.c
+index d09136e040d3c..08e9806b1cf91 100644
+--- a/mm/memblock.c
++++ b/mm/memblock.c
+@@ -1339,6 +1339,10 @@ int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
+ 	int start_rgn, end_rgn;
+ 	int i, ret;
+ 
++	if (WARN_ONCE(nid == MAX_NUMNODES,
++		      "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
++		nid = NUMA_NO_NODE;
++
+ 	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
+ 	if (ret)
+ 		return ret;
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 18da1c2b08c3d..7751bd78fbcb2 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -2550,6 +2550,13 @@ int unpoison_memory(unsigned long pfn)
+ 		goto unlock_mutex;
+ 	}
+ 
++	if (is_huge_zero_page(&folio->page)) {
++		unpoison_pr_info("Unpoison: huge zero page is not supported %#lx\n",
++				 pfn, &unpoison_rs);
++		ret = -EOPNOTSUPP;
++		goto unlock_mutex;
++	}
++
+ 	if (!PageHWPoison(p)) {
+ 		unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
+ 				 pfn, &unpoison_rs);
+diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
+index 9169efb2f43aa..5fff5930e4deb 100644
+--- a/net/ax25/af_ax25.c
++++ b/net/ax25/af_ax25.c
+@@ -1378,8 +1378,10 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags,
+ {
+ 	struct sk_buff *skb;
+ 	struct sock *newsk;
++	ax25_dev *ax25_dev;
+ 	DEFINE_WAIT(wait);
+ 	struct sock *sk;
++	ax25_cb *ax25;
+ 	int err = 0;
+ 
+ 	if (sock->state != SS_UNCONNECTED)
+@@ -1434,6 +1436,10 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags,
+ 	kfree_skb(skb);
+ 	sk_acceptq_removed(sk);
+ 	newsock->state = SS_CONNECTED;
++	ax25 = sk_to_ax25(newsk);
++	ax25_dev = ax25->ax25_dev;
++	netdev_hold(ax25_dev->dev, &ax25->dev_tracker, GFP_ATOMIC);
++	ax25_dev_hold(ax25_dev);
+ 
+ out:
+ 	release_sock(sk);
+diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
+index c9d55b99a7a57..67ae6b8c52989 100644
+--- a/net/ax25/ax25_dev.c
++++ b/net/ax25/ax25_dev.c
+@@ -193,7 +193,7 @@ void __exit ax25_dev_free(void)
+ 	list_for_each_entry_safe(s, n, &ax25_dev_list, list) {
+ 		netdev_put(s->dev, &s->dev_tracker);
+ 		list_del(&s->list);
+-		kfree(s);
++		ax25_dev_put(s);
+ 	}
+ 	spin_unlock_bh(&ax25_dev_lock);
+ }
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 64f794d198cdc..7bfa6b59ba87e 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -1194,7 +1194,7 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
+ 
+ 	cp.own_addr_type = own_addr_type;
+ 	cp.channel_map = hdev->le_adv_channel_map;
+-	cp.handle = instance;
++	cp.handle = adv ? adv->handle : instance;
+ 
+ 	if (flags & MGMT_ADV_FLAG_SEC_2M) {
+ 		cp.primary_phy = HCI_ADV_PHY_1M;
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 4a633c1b68825..9394a158d1b1a 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -4009,8 +4009,8 @@ static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
+ 				status = L2CAP_CS_AUTHOR_PEND;
+ 				chan->ops->defer(chan);
+ 			} else {
+-				l2cap_state_change(chan, BT_CONNECT2);
+-				result = L2CAP_CR_PEND;
++				l2cap_state_change(chan, BT_CONFIG);
++				result = L2CAP_CR_SUCCESS;
+ 				status = L2CAP_CS_NO_INFO;
+ 			}
+ 		} else {
+@@ -4645,13 +4645,7 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
+ 
+ 	memset(&rsp, 0, sizeof(rsp));
+ 
+-	if (max > hcon->le_conn_max_interval) {
+-		BT_DBG("requested connection interval exceeds current bounds.");
+-		err = -EINVAL;
+-	} else {
+-		err = hci_check_conn_params(min, max, latency, to_multiplier);
+-	}
+-
++	err = hci_check_conn_params(min, max, latency, to_multiplier);
+ 	if (err)
+ 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
+ 	else
+diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
+index 61efeadaff8db..4cd29fb490f7c 100644
+--- a/net/bpf/test_run.c
++++ b/net/bpf/test_run.c
+@@ -719,10 +719,16 @@ static void
+ __bpf_prog_test_run_raw_tp(void *data)
+ {
+ 	struct bpf_raw_tp_test_run_info *info = data;
++	struct bpf_trace_run_ctx run_ctx = {};
++	struct bpf_run_ctx *old_run_ctx;
++
++	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
+ 
+ 	rcu_read_lock();
+ 	info->retval = bpf_prog_run(info->prog, info->ctx);
+ 	rcu_read_unlock();
++
++	bpf_reset_run_ctx(old_run_ctx);
+ }
+ 
+ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
+diff --git a/net/bridge/br_mst.c b/net/bridge/br_mst.c
+index 3c66141d34d62..1820f09ff59ce 100644
+--- a/net/bridge/br_mst.c
++++ b/net/bridge/br_mst.c
+@@ -73,11 +73,10 @@ int br_mst_get_state(const struct net_device *dev, u16 msti, u8 *state)
+ }
+ EXPORT_SYMBOL_GPL(br_mst_get_state);
+ 
+-static void br_mst_vlan_set_state(struct net_bridge_port *p, struct net_bridge_vlan *v,
++static void br_mst_vlan_set_state(struct net_bridge_vlan_group *vg,
++				  struct net_bridge_vlan *v,
+ 				  u8 state)
+ {
+-	struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
+-
+ 	if (br_vlan_get_state(v) == state)
+ 		return;
+ 
+@@ -103,7 +102,7 @@ int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state,
+ 	int err = 0;
+ 
+ 	rcu_read_lock();
+-	vg = nbp_vlan_group(p);
++	vg = nbp_vlan_group_rcu(p);
+ 	if (!vg)
+ 		goto out;
+ 
+@@ -121,7 +120,7 @@ int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state,
+ 		if (v->brvlan->msti != msti)
+ 			continue;
+ 
+-		br_mst_vlan_set_state(p, v, state);
++		br_mst_vlan_set_state(vg, v, state);
+ 	}
+ 
+ out:
+@@ -140,13 +139,13 @@ static void br_mst_vlan_sync_state(struct net_bridge_vlan *pv, u16 msti)
+ 		 * it.
+ 		 */
+ 		if (v != pv && v->brvlan->msti == msti) {
+-			br_mst_vlan_set_state(pv->port, pv, v->state);
++			br_mst_vlan_set_state(vg, pv, v->state);
+ 			return;
+ 		}
+ 	}
+ 
+ 	/* Otherwise, start out in a new MSTI with all ports disabled. */
+-	return br_mst_vlan_set_state(pv->port, pv, BR_STATE_DISABLED);
++	return br_mst_vlan_set_state(vg, pv, BR_STATE_DISABLED);
+ }
+ 
+ int br_mst_vlan_set_msti(struct net_bridge_vlan *mv, u16 msti)
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 8ba6a4e4be266..74e6f9746fb30 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -6484,6 +6484,46 @@ static int rtnl_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
+ 
+ /* Process one rtnetlink message. */
+ 
++static int rtnl_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
++{
++	rtnl_dumpit_func dumpit = cb->data;
++	int err;
++
++	/* Previous iteration have already finished, avoid calling->dumpit()
++	 * again, it may not expect to be called after it reached the end.
++	 */
++	if (!dumpit)
++		return 0;
++
++	err = dumpit(skb, cb);
++
++	/* Old dump handlers used to send NLM_DONE as in a separate recvmsg().
++	 * Some applications which parse netlink manually depend on this.
++	 */
++	if (cb->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE) {
++		if (err < 0 && err != -EMSGSIZE)
++			return err;
++		if (!err)
++			cb->data = NULL;
++
++		return skb->len;
++	}
++	return err;
++}
++
++static int rtnetlink_dump_start(struct sock *ssk, struct sk_buff *skb,
++				const struct nlmsghdr *nlh,
++				struct netlink_dump_control *control)
++{
++	if (control->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE) {
++		WARN_ON(control->data);
++		control->data = control->dump;
++		control->dump = rtnl_dumpit;
++	}
++
++	return netlink_dump_start(ssk, skb, nlh, control);
++}
++
+ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
+ 			     struct netlink_ext_ack *extack)
+ {
+@@ -6548,7 +6588,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
+ 				.module		= owner,
+ 				.flags		= flags,
+ 			};
+-			err = netlink_dump_start(rtnl, skb, nlh, &c);
++			err = rtnetlink_dump_start(rtnl, skb, nlh, &c);
+ 			/* netlink_dump_start() will keep a reference on
+ 			 * module if dump is still in progress.
+ 			 */
+@@ -6694,7 +6734,7 @@ void __init rtnetlink_init(void)
+ 	register_netdevice_notifier(&rtnetlink_dev_notifier);
+ 
+ 	rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
+-		      rtnl_dump_ifinfo, 0);
++		      rtnl_dump_ifinfo, RTNL_FLAG_DUMP_SPLIT_NLM_DONE);
+ 	rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0);
+ 	rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0);
+ 	rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0);
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index 8598466a38057..01be07b485fad 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -1639,19 +1639,23 @@ void sock_map_close(struct sock *sk, long timeout)
+ 
+ 	lock_sock(sk);
+ 	rcu_read_lock();
+-	psock = sk_psock_get(sk);
+-	if (unlikely(!psock)) {
+-		rcu_read_unlock();
+-		release_sock(sk);
+-		saved_close = READ_ONCE(sk->sk_prot)->close;
+-	} else {
++	psock = sk_psock(sk);
++	if (likely(psock)) {
+ 		saved_close = psock->saved_close;
+ 		sock_map_remove_links(sk, psock);
++		psock = sk_psock_get(sk);
++		if (unlikely(!psock))
++			goto no_psock;
+ 		rcu_read_unlock();
+ 		sk_psock_stop(psock);
+ 		release_sock(sk);
+ 		cancel_delayed_work_sync(&psock->work);
+ 		sk_psock_put(sk, psock);
++	} else {
++		saved_close = READ_ONCE(sk->sk_prot)->close;
++no_psock:
++		rcu_read_unlock();
++		release_sock(sk);
+ 	}
+ 
+ 	/* Make sure we do not recurse. This is a bug.
+diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
+index 5a55270aa86e8..e645d751a5e89 100644
+--- a/net/ethtool/ioctl.c
++++ b/net/ethtool/ioctl.c
+@@ -2220,7 +2220,7 @@ static int ethtool_get_phy_stats_ethtool(struct net_device *dev,
+ 	const struct ethtool_ops *ops = dev->ethtool_ops;
+ 	int n_stats, ret;
+ 
+-	if (!ops || !ops->get_sset_count || ops->get_ethtool_phy_stats)
++	if (!ops || !ops->get_sset_count || !ops->get_ethtool_phy_stats)
+ 		return -EOPNOTSUPP;
+ 
+ 	n_stats = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index 8382cc998bff8..84b5d1ccf716a 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -2801,7 +2801,7 @@ void __init devinet_init(void)
+ 	rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, 0);
+ 	rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, 0);
+ 	rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr,
+-		      RTNL_FLAG_DUMP_UNLOCKED);
++		      RTNL_FLAG_DUMP_UNLOCKED | RTNL_FLAG_DUMP_SPLIT_NLM_DONE);
+ 	rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
+ 		      inet_netconf_dump_devconf,
+ 		      RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED);
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index c484b1c0fc00a..7ad2cafb92763 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -1050,11 +1050,6 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
+ 			e++;
+ 		}
+ 	}
+-
+-	/* Don't let NLM_DONE coalesce into a message, even if it could.
+-	 * Some user space expects NLM_DONE in a separate recv().
+-	 */
+-	err = skb->len;
+ out:
+ 
+ 	cb->args[1] = e;
+@@ -1665,5 +1660,5 @@ void __init ip_fib_init(void)
+ 	rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, 0);
+ 	rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, 0);
+ 	rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib,
+-		      RTNL_FLAG_DUMP_UNLOCKED);
++		      RTNL_FLAG_DUMP_UNLOCKED | RTNL_FLAG_DUMP_SPLIT_NLM_DONE);
+ }
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 66d77faca64f6..77ee1eda3fd86 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1159,6 +1159,9 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
+ 
+ 			process_backlog++;
+ 
++#ifdef CONFIG_SKB_DECRYPTED
++			skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
++#endif
+ 			tcp_skb_entail(sk, skb);
+ 			copy = size_goal;
+ 
+@@ -2637,6 +2640,10 @@ void tcp_set_state(struct sock *sk, int state)
+ 		if (oldstate != TCP_ESTABLISHED)
+ 			TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
+ 		break;
++	case TCP_CLOSE_WAIT:
++		if (oldstate == TCP_SYN_RECV)
++			TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
++		break;
+ 
+ 	case TCP_CLOSE:
+ 		if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
+@@ -2648,7 +2655,7 @@ void tcp_set_state(struct sock *sk, int state)
+ 			inet_put_port(sk);
+ 		fallthrough;
+ 	default:
+-		if (oldstate == TCP_ESTABLISHED)
++		if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT)
+ 			TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
+ 	}
+ 
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index d1ad20ce1c8c7..f96f68cf7961c 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -483,8 +483,12 @@ static bool tcp_rtx_probe0_timed_out(const struct sock *sk,
+ {
+ 	const struct tcp_sock *tp = tcp_sk(sk);
+ 	const int timeout = TCP_RTO_MAX * 2;
+-	u32 rcv_delta;
++	s32 rcv_delta;
+ 
++	/* Note: timer interrupt might have been delayed by at least one jiffy,
++	 * and tp->rcv_tstamp might very well have been written recently.
++	 * rcv_delta can thus be negative.
++	 */
+ 	rcv_delta = inet_csk(sk)->icsk_timeout - tp->rcv_tstamp;
+ 	if (rcv_delta <= timeout)
+ 		return false;
+diff --git a/net/ipv6/ioam6_iptunnel.c b/net/ipv6/ioam6_iptunnel.c
+index 7563f8c6aa87c..bf7120ecea1eb 100644
+--- a/net/ipv6/ioam6_iptunnel.c
++++ b/net/ipv6/ioam6_iptunnel.c
+@@ -351,9 +351,9 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 		goto drop;
+ 
+ 	if (!ipv6_addr_equal(&orig_daddr, &ipv6_hdr(skb)->daddr)) {
+-		preempt_disable();
++		local_bh_disable();
+ 		dst = dst_cache_get(&ilwt->cache);
+-		preempt_enable();
++		local_bh_enable();
+ 
+ 		if (unlikely(!dst)) {
+ 			struct ipv6hdr *hdr = ipv6_hdr(skb);
+@@ -373,9 +373,9 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 				goto drop;
+ 			}
+ 
+-			preempt_disable();
++			local_bh_disable();
+ 			dst_cache_set_ip6(&ilwt->cache, dst, &fl6.saddr);
+-			preempt_enable();
++			local_bh_enable();
+ 		}
+ 
+ 		skb_dst_drop(skb);
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index c1f62352a4814..1ace4ac3ee04c 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -965,6 +965,7 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
+ 	if (!fib6_nh->rt6i_pcpu)
+ 		return;
+ 
++	rcu_read_lock();
+ 	/* release the reference to this fib entry from
+ 	 * all of its cached pcpu routes
+ 	 */
+@@ -973,7 +974,9 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
+ 		struct rt6_info *pcpu_rt;
+ 
+ 		ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
+-		pcpu_rt = *ppcpu_rt;
++
++		/* Paired with xchg() in rt6_get_pcpu_route() */
++		pcpu_rt = READ_ONCE(*ppcpu_rt);
+ 
+ 		/* only dropping the 'from' reference if the cached route
+ 		 * is using 'match'. The cached pcpu_rt->from only changes
+@@ -987,6 +990,7 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
+ 			fib6_info_release(from);
+ 		}
+ 	}
++	rcu_read_unlock();
+ }
+ 
+ struct fib6_nh_pcpu_arg {
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index f090e7bcb784f..8f8c8fcfd1c21 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1409,6 +1409,7 @@ static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
+ 		struct rt6_info *prev, **p;
+ 
+ 		p = this_cpu_ptr(res->nh->rt6i_pcpu);
++		/* Paired with READ_ONCE() in __fib6_drop_pcpu_from() */
+ 		prev = xchg(p, NULL);
+ 		if (prev) {
+ 			dst_dev_put(&prev->dst);
+@@ -6342,12 +6343,12 @@ static int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
+ 	if (!write)
+ 		return -EINVAL;
+ 
+-	net = (struct net *)ctl->extra1;
+-	delay = net->ipv6.sysctl.flush_delay;
+ 	ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+ 	if (ret)
+ 		return ret;
+ 
++	net = (struct net *)ctl->extra1;
++	delay = net->ipv6.sysctl.flush_delay;
+ 	fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
+ 	return 0;
+ }
+diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
+index a75df2ec8db0d..098632adc9b5a 100644
+--- a/net/ipv6/seg6_iptunnel.c
++++ b/net/ipv6/seg6_iptunnel.c
+@@ -464,23 +464,21 @@ static int seg6_input_core(struct net *net, struct sock *sk,
+ 
+ 	slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
+ 
+-	preempt_disable();
++	local_bh_disable();
+ 	dst = dst_cache_get(&slwt->cache);
+-	preempt_enable();
+ 
+ 	if (!dst) {
+ 		ip6_route_input(skb);
+ 		dst = skb_dst(skb);
+ 		if (!dst->error) {
+-			preempt_disable();
+ 			dst_cache_set_ip6(&slwt->cache, dst,
+ 					  &ipv6_hdr(skb)->saddr);
+-			preempt_enable();
+ 		}
+ 	} else {
+ 		skb_dst_drop(skb);
+ 		skb_dst_set(skb, dst);
+ 	}
++	local_bh_enable();
+ 
+ 	err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+ 	if (unlikely(err))
+@@ -536,9 +534,9 @@ static int seg6_output_core(struct net *net, struct sock *sk,
+ 
+ 	slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
+ 
+-	preempt_disable();
++	local_bh_disable();
+ 	dst = dst_cache_get(&slwt->cache);
+-	preempt_enable();
++	local_bh_enable();
+ 
+ 	if (unlikely(!dst)) {
+ 		struct ipv6hdr *hdr = ipv6_hdr(skb);
+@@ -558,9 +556,9 @@ static int seg6_output_core(struct net *net, struct sock *sk,
+ 			goto drop;
+ 		}
+ 
+-		preempt_disable();
++		local_bh_disable();
+ 		dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr);
+-		preempt_enable();
++		local_bh_enable();
+ 	}
+ 
+ 	skb_dst_drop(skb);
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 5873b3c3562ed..2b2eda5a2894d 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1438,7 +1438,6 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
+ 	 */
+ 
+ 	newsk->sk_gso_type = SKB_GSO_TCPV6;
+-	ip6_dst_store(newsk, dst, NULL, NULL);
+ 	inet6_sk_rx_dst_set(newsk, skb);
+ 
+ 	inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
+@@ -1449,6 +1448,8 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
+ 
+ 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
+ 
++	ip6_dst_store(newsk, dst, NULL, NULL);
++
+ 	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
+ 	newnp->saddr = ireq->ir_v6_loc_addr;
+ 	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 07abaf7820c56..51dc2d9dd6b84 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -4012,7 +4012,7 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
+ 		goto out;
+ 	}
+ 
+-	link_data->csa_chanreq = chanreq; 
++	link_data->csa_chanreq = chanreq;
+ 	link_conf->csa_active = true;
+ 
+ 	if (params->block_tx &&
+@@ -4023,7 +4023,7 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
+ 	}
+ 
+ 	cfg80211_ch_switch_started_notify(sdata->dev,
+-					  &link_data->csa_chanreq.oper, 0,
++					  &link_data->csa_chanreq.oper, link_id,
+ 					  params->count, params->block_tx);
+ 
+ 	if (changed) {
+diff --git a/net/mac80211/he.c b/net/mac80211/he.c
+index 9f5ffdc9db284..ecbb042dd0433 100644
+--- a/net/mac80211/he.c
++++ b/net/mac80211/he.c
+@@ -230,15 +230,21 @@ ieee80211_he_spr_ie_to_bss_conf(struct ieee80211_vif *vif,
+ 
+ 	if (!he_spr_ie_elem)
+ 		return;
++
++	he_obss_pd->sr_ctrl = he_spr_ie_elem->he_sr_control;
+ 	data = he_spr_ie_elem->optional;
+ 
+ 	if (he_spr_ie_elem->he_sr_control &
+ 	    IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
+-		data++;
++		he_obss_pd->non_srg_max_offset = *data++;
++
+ 	if (he_spr_ie_elem->he_sr_control &
+ 	    IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) {
+-		he_obss_pd->max_offset = *data++;
+ 		he_obss_pd->min_offset = *data++;
++		he_obss_pd->max_offset = *data++;
++		memcpy(he_obss_pd->bss_color_bitmap, data, 8);
++		data += 8;
++		memcpy(he_obss_pd->partial_bssid_bitmap, data, 8);
+ 		he_obss_pd->enable = true;
+ 	}
+ }
+diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
+index a6b62169f0848..c0a5c75cddcb9 100644
+--- a/net/mac80211/mesh_pathtbl.c
++++ b/net/mac80211/mesh_pathtbl.c
+@@ -1017,10 +1017,23 @@ void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
+  */
+ void mesh_path_flush_pending(struct mesh_path *mpath)
+ {
++	struct ieee80211_sub_if_data *sdata = mpath->sdata;
++	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
++	struct mesh_preq_queue *preq, *tmp;
+ 	struct sk_buff *skb;
+ 
+ 	while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
+ 		mesh_path_discard_frame(mpath->sdata, skb);
++
++	spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
++	list_for_each_entry_safe(preq, tmp, &ifmsh->preq_queue.list, list) {
++		if (ether_addr_equal(mpath->dst, preq->dst)) {
++			list_del(&preq->list);
++			kfree(preq);
++			--ifmsh->preq_queue_len;
++		}
++	}
++	spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
+ }
+ 
+ /**
+diff --git a/net/mac80211/parse.c b/net/mac80211/parse.c
+index 55e5497f89781..055a60e90979b 100644
+--- a/net/mac80211/parse.c
++++ b/net/mac80211/parse.c
+@@ -111,7 +111,7 @@ ieee80211_parse_extension_element(u32 *crc,
+ 		if (params->mode < IEEE80211_CONN_MODE_HE)
+ 			break;
+ 		if (len >= sizeof(*elems->he_spr) &&
+-		    len >= ieee80211_he_spr_size(data))
++		    len >= ieee80211_he_spr_size(data) - 1)
+ 			elems->he_spr = data;
+ 		break;
+ 	case WLAN_EID_EXT_HE_6GHZ_CAPA:
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index da5fdd6f5c852..aa22f09e6d145 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -1724,7 +1724,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
+ 	skb_queue_head_init(&pending);
+ 
+ 	/* sync with ieee80211_tx_h_unicast_ps_buf */
+-	spin_lock(&sta->ps_lock);
++	spin_lock_bh(&sta->ps_lock);
+ 	/* Send all buffered frames to the station */
+ 	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ 		int count = skb_queue_len(&pending), tmp;
+@@ -1753,7 +1753,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
+ 	 */
+ 	clear_sta_flag(sta, WLAN_STA_PSPOLL);
+ 	clear_sta_flag(sta, WLAN_STA_UAPSD);
+-	spin_unlock(&sta->ps_lock);
++	spin_unlock_bh(&sta->ps_lock);
+ 
+ 	atomic_dec(&ps->num_sta_ps);
+ 
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 5c17d39146ea2..8bf7ed6d63d15 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -676,6 +676,7 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
+ 	unsigned int add_addr_accept_max;
+ 	struct mptcp_addr_info remote;
+ 	unsigned int subflows_max;
++	bool sf_created = false;
+ 	int i, nr;
+ 
+ 	add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
+@@ -703,15 +704,18 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
+ 	if (nr == 0)
+ 		return;
+ 
+-	msk->pm.add_addr_accepted++;
+-	if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
+-	    msk->pm.subflows >= subflows_max)
+-		WRITE_ONCE(msk->pm.accept_addr, false);
+-
+ 	spin_unlock_bh(&msk->pm.lock);
+ 	for (i = 0; i < nr; i++)
+-		__mptcp_subflow_connect(sk, &addrs[i], &remote);
++		if (__mptcp_subflow_connect(sk, &addrs[i], &remote) == 0)
++			sf_created = true;
+ 	spin_lock_bh(&msk->pm.lock);
++
++	if (sf_created) {
++		msk->pm.add_addr_accepted++;
++		if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
++		    msk->pm.subflows >= subflows_max)
++			WRITE_ONCE(msk->pm.accept_addr, false);
++	}
+ }
+ 
+ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
+@@ -813,10 +817,13 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
+ 			spin_lock_bh(&msk->pm.lock);
+ 
+ 			removed = true;
+-			__MPTCP_INC_STATS(sock_net(sk), rm_type);
++			if (rm_type == MPTCP_MIB_RMSUBFLOW)
++				__MPTCP_INC_STATS(sock_net(sk), rm_type);
+ 		}
+ 		if (rm_type == MPTCP_MIB_RMSUBFLOW)
+ 			__set_bit(rm_id ? rm_id : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap);
++		else if (rm_type == MPTCP_MIB_RMADDR)
++			__MPTCP_INC_STATS(sock_net(sk), rm_type);
+ 		if (!removed)
+ 			continue;
+ 
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 965eb69dc5de3..68e4c086483a3 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2907,9 +2907,14 @@ void mptcp_set_state(struct sock *sk, int state)
+ 		if (oldstate != TCP_ESTABLISHED)
+ 			MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
+ 		break;
+-
++	case TCP_CLOSE_WAIT:
++		/* Unlike TCP, MPTCP sk would not have the TCP_SYN_RECV state:
++		 * MPTCP "accepted" sockets will be created later on. So no
++		 * transition from TCP_SYN_RECV to TCP_CLOSE_WAIT.
++		 */
++		break;
+ 	default:
+-		if (oldstate == TCP_ESTABLISHED)
++		if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT)
+ 			MPTCP_DEC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
+ 	}
+ 
+@@ -3726,6 +3731,7 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ 
+ 	WRITE_ONCE(msk->write_seq, subflow->idsn);
+ 	WRITE_ONCE(msk->snd_nxt, subflow->idsn);
++	WRITE_ONCE(msk->snd_una, subflow->idsn);
+ 	if (likely(!__mptcp_check_fallback(msk)))
+ 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVE);
+ 
+diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
+index 374412ed780b6..ef0f8f73826f5 100644
+--- a/net/ncsi/internal.h
++++ b/net/ncsi/internal.h
+@@ -325,6 +325,7 @@ struct ncsi_dev_priv {
+ 	spinlock_t          lock;            /* Protect the NCSI device    */
+ 	unsigned int        package_probe_id;/* Current ID during probe    */
+ 	unsigned int        package_num;     /* Number of packages         */
++	unsigned int        channel_probe_id;/* Current cahnnel ID during probe */
+ 	struct list_head    packages;        /* List of packages           */
+ 	struct ncsi_channel *hot_channel;    /* Channel was ever active    */
+ 	struct ncsi_request requests[256];   /* Request table              */
+@@ -343,6 +344,7 @@ struct ncsi_dev_priv {
+ 	bool                multi_package;   /* Enable multiple packages   */
+ 	bool                mlx_multi_host;  /* Enable multi host Mellanox */
+ 	u32                 package_whitelist; /* Packages to configure    */
++	unsigned char       channel_count;     /* Num of channels to probe   */
+ };
+ 
+ struct ncsi_cmd_arg {
+diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
+index 745c788f1d1df..5ecf611c88200 100644
+--- a/net/ncsi/ncsi-manage.c
++++ b/net/ncsi/ncsi-manage.c
+@@ -510,17 +510,19 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
+ 
+ 		break;
+ 	case ncsi_dev_state_suspend_gls:
+-		ndp->pending_req_num = np->channel_num;
++		ndp->pending_req_num = 1;
+ 
+ 		nca.type = NCSI_PKT_CMD_GLS;
+ 		nca.package = np->id;
++		nca.channel = ndp->channel_probe_id;
++		ret = ncsi_xmit_cmd(&nca);
++		if (ret)
++			goto error;
++		ndp->channel_probe_id++;
+ 
+-		nd->state = ncsi_dev_state_suspend_dcnt;
+-		NCSI_FOR_EACH_CHANNEL(np, nc) {
+-			nca.channel = nc->id;
+-			ret = ncsi_xmit_cmd(&nca);
+-			if (ret)
+-				goto error;
++		if (ndp->channel_probe_id == ndp->channel_count) {
++			ndp->channel_probe_id = 0;
++			nd->state = ncsi_dev_state_suspend_dcnt;
+ 		}
+ 
+ 		break;
+@@ -1345,7 +1347,6 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
+ {
+ 	struct ncsi_dev *nd = &ndp->ndev;
+ 	struct ncsi_package *np;
+-	struct ncsi_channel *nc;
+ 	struct ncsi_cmd_arg nca;
+ 	unsigned char index;
+ 	int ret;
+@@ -1423,23 +1424,6 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
+ 
+ 		nd->state = ncsi_dev_state_probe_cis;
+ 		break;
+-	case ncsi_dev_state_probe_cis:
+-		ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
+-
+-		/* Clear initial state */
+-		nca.type = NCSI_PKT_CMD_CIS;
+-		nca.package = ndp->active_package->id;
+-		for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
+-			nca.channel = index;
+-			ret = ncsi_xmit_cmd(&nca);
+-			if (ret)
+-				goto error;
+-		}
+-
+-		nd->state = ncsi_dev_state_probe_gvi;
+-		if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY))
+-			nd->state = ncsi_dev_state_probe_keep_phy;
+-		break;
+ 	case ncsi_dev_state_probe_keep_phy:
+ 		ndp->pending_req_num = 1;
+ 
+@@ -1452,14 +1436,17 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
+ 
+ 		nd->state = ncsi_dev_state_probe_gvi;
+ 		break;
++	case ncsi_dev_state_probe_cis:
+ 	case ncsi_dev_state_probe_gvi:
+ 	case ncsi_dev_state_probe_gc:
+ 	case ncsi_dev_state_probe_gls:
+ 		np = ndp->active_package;
+-		ndp->pending_req_num = np->channel_num;
++		ndp->pending_req_num = 1;
+ 
+-		/* Retrieve version, capability or link status */
+-		if (nd->state == ncsi_dev_state_probe_gvi)
++		/* Clear initial state Retrieve version, capability or link status */
++		if (nd->state == ncsi_dev_state_probe_cis)
++			nca.type = NCSI_PKT_CMD_CIS;
++		else if (nd->state == ncsi_dev_state_probe_gvi)
+ 			nca.type = NCSI_PKT_CMD_GVI;
+ 		else if (nd->state == ncsi_dev_state_probe_gc)
+ 			nca.type = NCSI_PKT_CMD_GC;
+@@ -1467,19 +1454,29 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
+ 			nca.type = NCSI_PKT_CMD_GLS;
+ 
+ 		nca.package = np->id;
+-		NCSI_FOR_EACH_CHANNEL(np, nc) {
+-			nca.channel = nc->id;
+-			ret = ncsi_xmit_cmd(&nca);
+-			if (ret)
+-				goto error;
+-		}
++		nca.channel = ndp->channel_probe_id;
+ 
+-		if (nd->state == ncsi_dev_state_probe_gvi)
++		ret = ncsi_xmit_cmd(&nca);
++		if (ret)
++			goto error;
++
++		if (nd->state == ncsi_dev_state_probe_cis) {
++			nd->state = ncsi_dev_state_probe_gvi;
++			if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY) && ndp->channel_probe_id == 0)
++				nd->state = ncsi_dev_state_probe_keep_phy;
++		} else if (nd->state == ncsi_dev_state_probe_gvi) {
+ 			nd->state = ncsi_dev_state_probe_gc;
+-		else if (nd->state == ncsi_dev_state_probe_gc)
++		} else if (nd->state == ncsi_dev_state_probe_gc) {
+ 			nd->state = ncsi_dev_state_probe_gls;
+-		else
++		} else {
++			nd->state = ncsi_dev_state_probe_cis;
++			ndp->channel_probe_id++;
++		}
++
++		if (ndp->channel_probe_id == ndp->channel_count) {
++			ndp->channel_probe_id = 0;
+ 			nd->state = ncsi_dev_state_probe_dp;
++		}
+ 		break;
+ 	case ncsi_dev_state_probe_dp:
+ 		ndp->pending_req_num = 1;
+@@ -1780,6 +1777,7 @@ struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
+ 		ndp->requests[i].ndp = ndp;
+ 		timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
+ 	}
++	ndp->channel_count = NCSI_RESERVED_CHANNEL;
+ 
+ 	spin_lock_irqsave(&ncsi_dev_lock, flags);
+ 	list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
+@@ -1813,6 +1811,7 @@ int ncsi_start_dev(struct ncsi_dev *nd)
+ 
+ 	if (!(ndp->flags & NCSI_DEV_PROBED)) {
+ 		ndp->package_probe_id = 0;
++		ndp->channel_probe_id = 0;
+ 		nd->state = ncsi_dev_state_probe;
+ 		schedule_work(&ndp->work);
+ 		return 0;
+diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
+index bee290d0f48b6..e28be33bdf2c4 100644
+--- a/net/ncsi/ncsi-rsp.c
++++ b/net/ncsi/ncsi-rsp.c
+@@ -795,12 +795,13 @@ static int ncsi_rsp_handler_gc(struct ncsi_request *nr)
+ 	struct ncsi_rsp_gc_pkt *rsp;
+ 	struct ncsi_dev_priv *ndp = nr->ndp;
+ 	struct ncsi_channel *nc;
++	struct ncsi_package *np;
+ 	size_t size;
+ 
+ 	/* Find the channel */
+ 	rsp = (struct ncsi_rsp_gc_pkt *)skb_network_header(nr->rsp);
+ 	ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
+-				      NULL, &nc);
++				      &np, &nc);
+ 	if (!nc)
+ 		return -ENODEV;
+ 
+@@ -835,6 +836,7 @@ static int ncsi_rsp_handler_gc(struct ncsi_request *nr)
+ 	 */
+ 	nc->vlan_filter.bitmap = U64_MAX;
+ 	nc->vlan_filter.n_vids = rsp->vlan_cnt;
++	np->ndp->channel_count = rsp->channel_cnt;
+ 
+ 	return 0;
+ }
+diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
+index 3184cc6be4c9d..c7ae4d9bf3d24 100644
+--- a/net/netfilter/ipset/ip_set_core.c
++++ b/net/netfilter/ipset/ip_set_core.c
+@@ -1172,23 +1172,50 @@ ip_set_setname_policy[IPSET_ATTR_CMD_MAX + 1] = {
+ 				    .len = IPSET_MAXNAMELEN - 1 },
+ };
+ 
++/* In order to return quickly when destroying a single set, it is split
++ * into two stages:
++ * - Cancel garbage collector
++ * - Destroy the set itself via call_rcu()
++ */
++
+ static void
+-ip_set_destroy_set(struct ip_set *set)
++ip_set_destroy_set_rcu(struct rcu_head *head)
+ {
+-	pr_debug("set: %s\n",  set->name);
++	struct ip_set *set = container_of(head, struct ip_set, rcu);
+ 
+-	/* Must call it without holding any lock */
+ 	set->variant->destroy(set);
+ 	module_put(set->type->me);
+ 	kfree(set);
+ }
+ 
+ static void
+-ip_set_destroy_set_rcu(struct rcu_head *head)
++_destroy_all_sets(struct ip_set_net *inst)
+ {
+-	struct ip_set *set = container_of(head, struct ip_set, rcu);
++	struct ip_set *set;
++	ip_set_id_t i;
++	bool need_wait = false;
+ 
+-	ip_set_destroy_set(set);
++	/* First cancel gc's: set:list sets are flushed as well */
++	for (i = 0; i < inst->ip_set_max; i++) {
++		set = ip_set(inst, i);
++		if (set) {
++			set->variant->cancel_gc(set);
++			if (set->type->features & IPSET_TYPE_NAME)
++				need_wait = true;
++		}
++	}
++	/* Must wait for flush to be really finished  */
++	if (need_wait)
++		rcu_barrier();
++	for (i = 0; i < inst->ip_set_max; i++) {
++		set = ip_set(inst, i);
++		if (set) {
++			ip_set(inst, i) = NULL;
++			set->variant->destroy(set);
++			module_put(set->type->me);
++			kfree(set);
++		}
++	}
+ }
+ 
+ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
+@@ -1202,11 +1229,10 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
+ 	if (unlikely(protocol_min_failed(attr)))
+ 		return -IPSET_ERR_PROTOCOL;
+ 
+-
+ 	/* Commands are serialized and references are
+ 	 * protected by the ip_set_ref_lock.
+ 	 * External systems (i.e. xt_set) must call
+-	 * ip_set_put|get_nfnl_* functions, that way we
++	 * ip_set_nfnl_get_* functions, that way we
+ 	 * can safely check references here.
+ 	 *
+ 	 * list:set timer can only decrement the reference
+@@ -1214,8 +1240,6 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
+ 	 * without holding the lock.
+ 	 */
+ 	if (!attr[IPSET_ATTR_SETNAME]) {
+-		/* Must wait for flush to be really finished in list:set */
+-		rcu_barrier();
+ 		read_lock_bh(&ip_set_ref_lock);
+ 		for (i = 0; i < inst->ip_set_max; i++) {
+ 			s = ip_set(inst, i);
+@@ -1226,15 +1250,7 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
+ 		}
+ 		inst->is_destroyed = true;
+ 		read_unlock_bh(&ip_set_ref_lock);
+-		for (i = 0; i < inst->ip_set_max; i++) {
+-			s = ip_set(inst, i);
+-			if (s) {
+-				ip_set(inst, i) = NULL;
+-				/* Must cancel garbage collectors */
+-				s->variant->cancel_gc(s);
+-				ip_set_destroy_set(s);
+-			}
+-		}
++		_destroy_all_sets(inst);
+ 		/* Modified by ip_set_destroy() only, which is serialized */
+ 		inst->is_destroyed = false;
+ 	} else {
+@@ -1255,12 +1271,12 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
+ 		features = s->type->features;
+ 		ip_set(inst, i) = NULL;
+ 		read_unlock_bh(&ip_set_ref_lock);
++		/* Must cancel garbage collectors */
++		s->variant->cancel_gc(s);
+ 		if (features & IPSET_TYPE_NAME) {
+ 			/* Must wait for flush to be really finished  */
+ 			rcu_barrier();
+ 		}
+-		/* Must cancel garbage collectors */
+-		s->variant->cancel_gc(s);
+ 		call_rcu(&s->rcu, ip_set_destroy_set_rcu);
+ 	}
+ 	return 0;
+@@ -2365,30 +2381,25 @@ ip_set_net_init(struct net *net)
+ }
+ 
+ static void __net_exit
+-ip_set_net_exit(struct net *net)
++ip_set_net_pre_exit(struct net *net)
+ {
+ 	struct ip_set_net *inst = ip_set_pernet(net);
+ 
+-	struct ip_set *set = NULL;
+-	ip_set_id_t i;
+-
+ 	inst->is_deleted = true; /* flag for ip_set_nfnl_put */
++}
+ 
+-	nfnl_lock(NFNL_SUBSYS_IPSET);
+-	for (i = 0; i < inst->ip_set_max; i++) {
+-		set = ip_set(inst, i);
+-		if (set) {
+-			ip_set(inst, i) = NULL;
+-			set->variant->cancel_gc(set);
+-			ip_set_destroy_set(set);
+-		}
+-	}
+-	nfnl_unlock(NFNL_SUBSYS_IPSET);
++static void __net_exit
++ip_set_net_exit(struct net *net)
++{
++	struct ip_set_net *inst = ip_set_pernet(net);
++
++	_destroy_all_sets(inst);
+ 	kvfree(rcu_dereference_protected(inst->ip_set_list, 1));
+ }
+ 
+ static struct pernet_operations ip_set_net_ops = {
+ 	.init	= ip_set_net_init,
++	.pre_exit = ip_set_net_pre_exit,
+ 	.exit   = ip_set_net_exit,
+ 	.id	= &ip_set_net_id,
+ 	.size	= sizeof(struct ip_set_net),
+diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
+index 54e2a1dd7f5f5..bfae7066936bb 100644
+--- a/net/netfilter/ipset/ip_set_list_set.c
++++ b/net/netfilter/ipset/ip_set_list_set.c
+@@ -79,7 +79,7 @@ list_set_kadd(struct ip_set *set, const struct sk_buff *skb,
+ 	struct set_elem *e;
+ 	int ret;
+ 
+-	list_for_each_entry(e, &map->members, list) {
++	list_for_each_entry_rcu(e, &map->members, list) {
+ 		if (SET_WITH_TIMEOUT(set) &&
+ 		    ip_set_timeout_expired(ext_timeout(e, set)))
+ 			continue;
+@@ -99,7 +99,7 @@ list_set_kdel(struct ip_set *set, const struct sk_buff *skb,
+ 	struct set_elem *e;
+ 	int ret;
+ 
+-	list_for_each_entry(e, &map->members, list) {
++	list_for_each_entry_rcu(e, &map->members, list) {
+ 		if (SET_WITH_TIMEOUT(set) &&
+ 		    ip_set_timeout_expired(ext_timeout(e, set)))
+ 			continue;
+@@ -188,9 +188,10 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
+ 	struct list_set *map = set->data;
+ 	struct set_adt_elem *d = value;
+ 	struct set_elem *e, *next, *prev = NULL;
+-	int ret;
++	int ret = 0;
+ 
+-	list_for_each_entry(e, &map->members, list) {
++	rcu_read_lock();
++	list_for_each_entry_rcu(e, &map->members, list) {
+ 		if (SET_WITH_TIMEOUT(set) &&
+ 		    ip_set_timeout_expired(ext_timeout(e, set)))
+ 			continue;
+@@ -201,6 +202,7 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
+ 
+ 		if (d->before == 0) {
+ 			ret = 1;
++			goto out;
+ 		} else if (d->before > 0) {
+ 			next = list_next_entry(e, list);
+ 			ret = !list_is_last(&e->list, &map->members) &&
+@@ -208,9 +210,11 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
+ 		} else {
+ 			ret = prev && prev->id == d->refid;
+ 		}
+-		return ret;
++		goto out;
+ 	}
+-	return 0;
++out:
++	rcu_read_unlock();
++	return ret;
+ }
+ 
+ static void
+@@ -239,7 +243,7 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
+ 
+ 	/* Find where to add the new entry */
+ 	n = prev = next = NULL;
+-	list_for_each_entry(e, &map->members, list) {
++	list_for_each_entry_rcu(e, &map->members, list) {
+ 		if (SET_WITH_TIMEOUT(set) &&
+ 		    ip_set_timeout_expired(ext_timeout(e, set)))
+ 			continue;
+@@ -316,9 +320,9 @@ list_set_udel(struct ip_set *set, void *value, const struct ip_set_ext *ext,
+ {
+ 	struct list_set *map = set->data;
+ 	struct set_adt_elem *d = value;
+-	struct set_elem *e, *next, *prev = NULL;
++	struct set_elem *e, *n, *next, *prev = NULL;
+ 
+-	list_for_each_entry(e, &map->members, list) {
++	list_for_each_entry_safe(e, n, &map->members, list) {
+ 		if (SET_WITH_TIMEOUT(set) &&
+ 		    ip_set_timeout_expired(ext_timeout(e, set)))
+ 			continue;
+@@ -424,14 +428,8 @@ static void
+ list_set_destroy(struct ip_set *set)
+ {
+ 	struct list_set *map = set->data;
+-	struct set_elem *e, *n;
+ 
+-	list_for_each_entry_safe(e, n, &map->members, list) {
+-		list_del(&e->list);
+-		ip_set_put_byindex(map->net, e->id);
+-		ip_set_ext_destroy(set, e);
+-		kfree(e);
+-	}
++	WARN_ON_ONCE(!list_empty(&map->members));
+ 	kfree(map);
+ 
+ 	set->data = NULL;
+diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
+index ba0d3683a45d3..9139ce38ea7b9 100644
+--- a/net/netfilter/nft_meta.c
++++ b/net/netfilter/nft_meta.c
+@@ -839,6 +839,9 @@ static int nft_meta_inner_init(const struct nft_ctx *ctx,
+ 	struct nft_meta *priv = nft_expr_priv(expr);
+ 	unsigned int len;
+ 
++	if (!tb[NFTA_META_KEY] || !tb[NFTA_META_DREG])
++		return -EINVAL;
++
+ 	priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
+ 	switch (priv->key) {
+ 	case NFT_META_PROTOCOL:
+diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
+index 0c43d748e23ae..50429cbd42da4 100644
+--- a/net/netfilter/nft_payload.c
++++ b/net/netfilter/nft_payload.c
+@@ -650,6 +650,10 @@ static int nft_payload_inner_init(const struct nft_ctx *ctx,
+ 	struct nft_payload *priv = nft_expr_priv(expr);
+ 	u32 base;
+ 
++	if (!tb[NFTA_PAYLOAD_BASE] || !tb[NFTA_PAYLOAD_OFFSET] ||
++	    !tb[NFTA_PAYLOAD_LEN] || !tb[NFTA_PAYLOAD_DREG])
++		return -EINVAL;
++
+ 	base   = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
+ 	switch (base) {
+ 	case NFT_PAYLOAD_TUN_HEADER:
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index 4a2c763e2d116..10b1491d55809 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -673,6 +673,7 @@ struct Qdisc noop_qdisc = {
+ 		.qlen = 0,
+ 		.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock),
+ 	},
++	.owner = -1,
+ };
+ EXPORT_SYMBOL(noop_qdisc);
+ 
+diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
+index 79e93a19d5fab..06e03f5cd7ce1 100644
+--- a/net/sched/sch_multiq.c
++++ b/net/sched/sch_multiq.c
+@@ -185,7 +185,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
+ 
+ 	qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
+ 
+-	removed = kmalloc(sizeof(*removed) * (q->max_bands - q->bands),
++	removed = kmalloc(sizeof(*removed) * (q->max_bands - qopt->bands),
+ 			  GFP_KERNEL);
+ 	if (!removed)
+ 		return -ENOMEM;
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index 5c3f8a278fc2f..0b150b13bee7a 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -1176,16 +1176,13 @@ static int taprio_parse_mqprio_opt(struct net_device *dev,
+ {
+ 	bool allow_overlapping_txqs = TXTIME_ASSIST_IS_ENABLED(taprio_flags);
+ 
+-	if (!qopt && !dev->num_tc) {
+-		NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
+-		return -EINVAL;
+-	}
+-
+-	/* If num_tc is already set, it means that the user already
+-	 * configured the mqprio part
+-	 */
+-	if (dev->num_tc)
++	if (!qopt) {
++		if (!dev->num_tc) {
++			NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
++			return -EINVAL;
++		}
+ 		return 0;
++	}
+ 
+ 	/* taprio imposes that traffic classes map 1:n to tx queues */
+ 	if (qopt->num_tc > dev->num_tx_queues) {
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 4b52b3b159c0e..5f9f3d4c1df5f 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -460,29 +460,11 @@ static int smc_bind(struct socket *sock, struct sockaddr *uaddr,
+ static void smc_adjust_sock_bufsizes(struct sock *nsk, struct sock *osk,
+ 				     unsigned long mask)
+ {
+-	struct net *nnet = sock_net(nsk);
+-
+ 	nsk->sk_userlocks = osk->sk_userlocks;
+-	if (osk->sk_userlocks & SOCK_SNDBUF_LOCK) {
++	if (osk->sk_userlocks & SOCK_SNDBUF_LOCK)
+ 		nsk->sk_sndbuf = osk->sk_sndbuf;
+-	} else {
+-		if (mask == SK_FLAGS_SMC_TO_CLC)
+-			WRITE_ONCE(nsk->sk_sndbuf,
+-				   READ_ONCE(nnet->ipv4.sysctl_tcp_wmem[1]));
+-		else
+-			WRITE_ONCE(nsk->sk_sndbuf,
+-				   2 * READ_ONCE(nnet->smc.sysctl_wmem));
+-	}
+-	if (osk->sk_userlocks & SOCK_RCVBUF_LOCK) {
++	if (osk->sk_userlocks & SOCK_RCVBUF_LOCK)
+ 		nsk->sk_rcvbuf = osk->sk_rcvbuf;
+-	} else {
+-		if (mask == SK_FLAGS_SMC_TO_CLC)
+-			WRITE_ONCE(nsk->sk_rcvbuf,
+-				   READ_ONCE(nnet->ipv4.sysctl_tcp_rmem[1]));
+-		else
+-			WRITE_ONCE(nsk->sk_rcvbuf,
+-				   2 * READ_ONCE(nnet->smc.sysctl_rmem));
+-	}
+ }
+ 
+ static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
+diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
+index c7af0220f82f4..369310909fc98 100644
+--- a/net/sunrpc/auth_gss/auth_gss.c
++++ b/net/sunrpc/auth_gss/auth_gss.c
+@@ -1875,8 +1875,10 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
+ 	offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
+ 	maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
+ 	/* slack space should prevent this ever happening: */
+-	if (unlikely(snd_buf->len > snd_buf->buflen))
++	if (unlikely(snd_buf->len > snd_buf->buflen)) {
++		status = -EIO;
+ 		goto wrap_failed;
++	}
+ 	/* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
+ 	 * done anyway, so it's safe to put the request on the wire: */
+ 	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 439c531744a27..68a58bc07cf23 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -221,15 +221,9 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk)
+ 	return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
+ }
+ 
+-static inline int unix_recvq_full(const struct sock *sk)
+-{
+-	return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
+-}
+-
+ static inline int unix_recvq_full_lockless(const struct sock *sk)
+ {
+-	return skb_queue_len_lockless(&sk->sk_receive_queue) >
+-		READ_ONCE(sk->sk_max_ack_backlog);
++	return skb_queue_len_lockless(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
+ }
+ 
+ struct sock *unix_peer_get(struct sock *s)
+@@ -530,10 +524,10 @@ static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
+ 	return 0;
+ }
+ 
+-static int unix_writable(const struct sock *sk)
++static int unix_writable(const struct sock *sk, unsigned char state)
+ {
+-	return sk->sk_state != TCP_LISTEN &&
+-	       (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
++	return state != TCP_LISTEN &&
++		(refcount_read(&sk->sk_wmem_alloc) << 2) <= READ_ONCE(sk->sk_sndbuf);
+ }
+ 
+ static void unix_write_space(struct sock *sk)
+@@ -541,7 +535,7 @@ static void unix_write_space(struct sock *sk)
+ 	struct socket_wq *wq;
+ 
+ 	rcu_read_lock();
+-	if (unix_writable(sk)) {
++	if (unix_writable(sk, READ_ONCE(sk->sk_state))) {
+ 		wq = rcu_dereference(sk->sk_wq);
+ 		if (skwq_has_sleeper(wq))
+ 			wake_up_interruptible_sync_poll(&wq->wait,
+@@ -570,7 +564,6 @@ static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
+ 			sk_error_report(other);
+ 		}
+ 	}
+-	other->sk_state = TCP_CLOSE;
+ }
+ 
+ static void unix_sock_destructor(struct sock *sk)
+@@ -617,7 +610,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
+ 	u->path.dentry = NULL;
+ 	u->path.mnt = NULL;
+ 	state = sk->sk_state;
+-	sk->sk_state = TCP_CLOSE;
++	WRITE_ONCE(sk->sk_state, TCP_CLOSE);
+ 
+ 	skpair = unix_peer(sk);
+ 	unix_peer(sk) = NULL;
+@@ -638,7 +631,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
+ 			unix_state_lock(skpair);
+ 			/* No more writes */
+ 			WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
+-			if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
++			if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || embrion)
+ 				WRITE_ONCE(skpair->sk_err, ECONNRESET);
+ 			unix_state_unlock(skpair);
+ 			skpair->sk_state_change(skpair);
+@@ -739,7 +732,8 @@ static int unix_listen(struct socket *sock, int backlog)
+ 	if (backlog > sk->sk_max_ack_backlog)
+ 		wake_up_interruptible_all(&u->peer_wait);
+ 	sk->sk_max_ack_backlog	= backlog;
+-	sk->sk_state		= TCP_LISTEN;
++	WRITE_ONCE(sk->sk_state, TCP_LISTEN);
++
+ 	/* set credentials so connect can copy them */
+ 	init_peercred(sk);
+ 	err = 0;
+@@ -976,7 +970,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
+ 	sk->sk_hash		= unix_unbound_hash(sk);
+ 	sk->sk_allocation	= GFP_KERNEL_ACCOUNT;
+ 	sk->sk_write_space	= unix_write_space;
+-	sk->sk_max_ack_backlog	= net->unx.sysctl_max_dgram_qlen;
++	sk->sk_max_ack_backlog	= READ_ONCE(net->unx.sysctl_max_dgram_qlen);
+ 	sk->sk_destruct		= unix_sock_destructor;
+ 	u = unix_sk(sk);
+ 	u->inflight = 0;
+@@ -1402,7 +1396,8 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
+ 		if (err)
+ 			goto out_unlock;
+ 
+-		sk->sk_state = other->sk_state = TCP_ESTABLISHED;
++		WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
++		WRITE_ONCE(other->sk_state, TCP_ESTABLISHED);
+ 	} else {
+ 		/*
+ 		 *	1003.1g breaking connected state with AF_UNSPEC
+@@ -1419,13 +1414,20 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
+ 
+ 		unix_peer(sk) = other;
+ 		if (!other)
+-			sk->sk_state = TCP_CLOSE;
++			WRITE_ONCE(sk->sk_state, TCP_CLOSE);
+ 		unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
+ 
+ 		unix_state_double_unlock(sk, other);
+ 
+-		if (other != old_peer)
++		if (other != old_peer) {
+ 			unix_dgram_disconnected(sk, old_peer);
++
++			unix_state_lock(old_peer);
++			if (!unix_peer(old_peer))
++				WRITE_ONCE(old_peer->sk_state, TCP_CLOSE);
++			unix_state_unlock(old_peer);
++		}
++
+ 		sock_put(old_peer);
+ 	} else {
+ 		unix_peer(sk) = other;
+@@ -1473,7 +1475,6 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ 	struct sk_buff *skb = NULL;
+ 	long timeo;
+ 	int err;
+-	int st;
+ 
+ 	err = unix_validate_addr(sunaddr, addr_len);
+ 	if (err)
+@@ -1538,7 +1539,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ 	if (other->sk_shutdown & RCV_SHUTDOWN)
+ 		goto out_unlock;
+ 
+-	if (unix_recvq_full(other)) {
++	if (unix_recvq_full_lockless(other)) {
+ 		err = -EAGAIN;
+ 		if (!timeo)
+ 			goto out_unlock;
+@@ -1563,9 +1564,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ 
+ 	   Well, and we have to recheck the state after socket locked.
+ 	 */
+-	st = sk->sk_state;
+-
+-	switch (st) {
++	switch (READ_ONCE(sk->sk_state)) {
+ 	case TCP_CLOSE:
+ 		/* This is ok... continue with connect */
+ 		break;
+@@ -1580,7 +1579,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ 
+ 	unix_state_lock_nested(sk, U_LOCK_SECOND);
+ 
+-	if (sk->sk_state != st) {
++	if (sk->sk_state != TCP_CLOSE) {
+ 		unix_state_unlock(sk);
+ 		unix_state_unlock(other);
+ 		sock_put(other);
+@@ -1632,7 +1631,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ 	copy_peercred(sk, other);
+ 
+ 	sock->state	= SS_CONNECTED;
+-	sk->sk_state	= TCP_ESTABLISHED;
++	WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
+ 	sock_hold(newsk);
+ 
+ 	smp_mb__after_atomic();	/* sock_hold() does an atomic_inc() */
+@@ -1705,7 +1704,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
+ 		goto out;
+ 
+ 	err = -EINVAL;
+-	if (sk->sk_state != TCP_LISTEN)
++	if (READ_ONCE(sk->sk_state) != TCP_LISTEN)
+ 		goto out;
+ 
+ 	/* If socket state is TCP_LISTEN it cannot change (for now...),
+@@ -2009,7 +2008,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
+ 	}
+ 
+ 	err = -EMSGSIZE;
+-	if (len > sk->sk_sndbuf - 32)
++	if (len > READ_ONCE(sk->sk_sndbuf) - 32)
+ 		goto out;
+ 
+ 	if (len > SKB_MAX_ALLOC) {
+@@ -2091,7 +2090,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
+ 			unix_peer(sk) = NULL;
+ 			unix_dgram_peer_wake_disconnect_wakeup(sk, other);
+ 
+-			sk->sk_state = TCP_CLOSE;
++			WRITE_ONCE(sk->sk_state, TCP_CLOSE);
+ 			unix_state_unlock(sk);
+ 
+ 			unix_dgram_disconnected(sk, other);
+@@ -2268,7 +2267,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
+ 	}
+ 
+ 	if (msg->msg_namelen) {
+-		err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
++		err = READ_ONCE(sk->sk_state) == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
+ 		goto out_err;
+ 	} else {
+ 		err = -ENOTCONN;
+@@ -2289,7 +2288,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
+ 						   &err, 0);
+ 		} else {
+ 			/* Keep two messages in the pipe so it schedules better */
+-			size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
++			size = min_t(int, size, (READ_ONCE(sk->sk_sndbuf) >> 1) - 64);
+ 
+ 			/* allow fallback to order-0 allocations */
+ 			size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
+@@ -2382,7 +2381,7 @@ static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
+ 	if (err)
+ 		return err;
+ 
+-	if (sk->sk_state != TCP_ESTABLISHED)
++	if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
+ 		return -ENOTCONN;
+ 
+ 	if (msg->msg_namelen)
+@@ -2396,7 +2395,7 @@ static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
+ {
+ 	struct sock *sk = sock->sk;
+ 
+-	if (sk->sk_state != TCP_ESTABLISHED)
++	if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
+ 		return -ENOTCONN;
+ 
+ 	return unix_dgram_recvmsg(sock, msg, size, flags);
+@@ -2673,18 +2672,18 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
+ 		if (skb == u->oob_skb) {
+ 			if (copied) {
+ 				skb = NULL;
+-			} else if (sock_flag(sk, SOCK_URGINLINE)) {
+-				if (!(flags & MSG_PEEK)) {
++			} else if (!(flags & MSG_PEEK)) {
++				if (sock_flag(sk, SOCK_URGINLINE)) {
+ 					WRITE_ONCE(u->oob_skb, NULL);
+ 					consume_skb(skb);
++				} else {
++					__skb_unlink(skb, &sk->sk_receive_queue);
++					WRITE_ONCE(u->oob_skb, NULL);
++					unlinked_skb = skb;
++					skb = skb_peek(&sk->sk_receive_queue);
+ 				}
+-			} else if (flags & MSG_PEEK) {
+-				skb = NULL;
+-			} else {
+-				__skb_unlink(skb, &sk->sk_receive_queue);
+-				WRITE_ONCE(u->oob_skb, NULL);
+-				unlinked_skb = skb;
+-				skb = skb_peek(&sk->sk_receive_queue);
++			} else if (!sock_flag(sk, SOCK_URGINLINE)) {
++				skb = skb_peek_next(skb, &sk->sk_receive_queue);
+ 			}
+ 		}
+ 
+@@ -2701,7 +2700,7 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
+ 
+ static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
+ {
+-	if (unlikely(sk->sk_state != TCP_ESTABLISHED))
++	if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED))
+ 		return -ENOTCONN;
+ 
+ 	return unix_read_skb(sk, recv_actor);
+@@ -2725,7 +2724,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state,
+ 	size_t size = state->size;
+ 	unsigned int last_len;
+ 
+-	if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
++	if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)) {
+ 		err = -EINVAL;
+ 		goto out;
+ 	}
+@@ -3056,7 +3055,7 @@ long unix_inq_len(struct sock *sk)
+ 	struct sk_buff *skb;
+ 	long amount = 0;
+ 
+-	if (sk->sk_state == TCP_LISTEN)
++	if (READ_ONCE(sk->sk_state) == TCP_LISTEN)
+ 		return -EINVAL;
+ 
+ 	spin_lock(&sk->sk_receive_queue.lock);
+@@ -3168,12 +3167,14 @@ static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned lon
+ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
+ {
+ 	struct sock *sk = sock->sk;
++	unsigned char state;
+ 	__poll_t mask;
+ 	u8 shutdown;
+ 
+ 	sock_poll_wait(file, sock, wait);
+ 	mask = 0;
+ 	shutdown = READ_ONCE(sk->sk_shutdown);
++	state = READ_ONCE(sk->sk_state);
+ 
+ 	/* exceptional events? */
+ 	if (READ_ONCE(sk->sk_err))
+@@ -3195,14 +3196,14 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
+ 
+ 	/* Connection-based need to check for termination and startup */
+ 	if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
+-	    sk->sk_state == TCP_CLOSE)
++	    state == TCP_CLOSE)
+ 		mask |= EPOLLHUP;
+ 
+ 	/*
+ 	 * we set writable also when the other side has shut down the
+ 	 * connection. This prevents stuck sockets.
+ 	 */
+-	if (unix_writable(sk))
++	if (unix_writable(sk, state))
+ 		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
+ 
+ 	return mask;
+@@ -3213,12 +3214,14 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
+ {
+ 	struct sock *sk = sock->sk, *other;
+ 	unsigned int writable;
++	unsigned char state;
+ 	__poll_t mask;
+ 	u8 shutdown;
+ 
+ 	sock_poll_wait(file, sock, wait);
+ 	mask = 0;
+ 	shutdown = READ_ONCE(sk->sk_shutdown);
++	state = READ_ONCE(sk->sk_state);
+ 
+ 	/* exceptional events? */
+ 	if (READ_ONCE(sk->sk_err) ||
+@@ -3238,19 +3241,14 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
+ 		mask |= EPOLLIN | EPOLLRDNORM;
+ 
+ 	/* Connection-based need to check for termination and startup */
+-	if (sk->sk_type == SOCK_SEQPACKET) {
+-		if (sk->sk_state == TCP_CLOSE)
+-			mask |= EPOLLHUP;
+-		/* connection hasn't started yet? */
+-		if (sk->sk_state == TCP_SYN_SENT)
+-			return mask;
+-	}
++	if (sk->sk_type == SOCK_SEQPACKET && state == TCP_CLOSE)
++		mask |= EPOLLHUP;
+ 
+ 	/* No write status requested, avoid expensive OUT tests. */
+ 	if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
+ 		return mask;
+ 
+-	writable = unix_writable(sk);
++	writable = unix_writable(sk, state);
+ 	if (writable) {
+ 		unix_state_lock(sk);
+ 
+diff --git a/net/unix/diag.c b/net/unix/diag.c
+index ae39538c5042b..937edf4afed41 100644
+--- a/net/unix/diag.c
++++ b/net/unix/diag.c
+@@ -65,7 +65,7 @@ static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
+ 	u32 *buf;
+ 	int i;
+ 
+-	if (sk->sk_state == TCP_LISTEN) {
++	if (READ_ONCE(sk->sk_state) == TCP_LISTEN) {
+ 		spin_lock(&sk->sk_receive_queue.lock);
+ 
+ 		attr = nla_reserve(nlskb, UNIX_DIAG_ICONS,
+@@ -103,8 +103,8 @@ static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
+ {
+ 	struct unix_diag_rqlen rql;
+ 
+-	if (sk->sk_state == TCP_LISTEN) {
+-		rql.udiag_rqueue = sk->sk_receive_queue.qlen;
++	if (READ_ONCE(sk->sk_state) == TCP_LISTEN) {
++		rql.udiag_rqueue = skb_queue_len_lockless(&sk->sk_receive_queue);
+ 		rql.udiag_wqueue = sk->sk_max_ack_backlog;
+ 	} else {
+ 		rql.udiag_rqueue = (u32) unix_inq_len(sk);
+@@ -136,7 +136,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
+ 	rep = nlmsg_data(nlh);
+ 	rep->udiag_family = AF_UNIX;
+ 	rep->udiag_type = sk->sk_type;
+-	rep->udiag_state = sk->sk_state;
++	rep->udiag_state = READ_ONCE(sk->sk_state);
+ 	rep->pad = 0;
+ 	rep->udiag_ino = sk_ino;
+ 	sock_diag_save_cookie(sk, rep->udiag_cookie);
+@@ -165,7 +165,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
+ 	    sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
+ 		goto out_nlmsg_trim;
+ 
+-	if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown))
++	if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, READ_ONCE(sk->sk_shutdown)))
+ 		goto out_nlmsg_trim;
+ 
+ 	if ((req->udiag_show & UDIAG_SHOW_UID) &&
+@@ -215,7 +215,7 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
+ 		sk_for_each(sk, &net->unx.table.buckets[slot]) {
+ 			if (num < s_num)
+ 				goto next;
+-			if (!(req->udiag_states & (1 << sk->sk_state)))
++			if (!(req->udiag_states & (1 << READ_ONCE(sk->sk_state))))
+ 				goto next;
+ 			if (sk_diag_dump(sk, skb, req, sk_user_ns(skb->sk),
+ 					 NETLINK_CB(cb->skb).portid,
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index 3fb1b637352a9..4b1f45e3070e0 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -431,7 +431,7 @@ static void cfg80211_wiphy_work(struct work_struct *work)
+ 	if (wk) {
+ 		list_del_init(&wk->entry);
+ 		if (!list_empty(&rdev->wiphy_work_list))
+-			schedule_work(work);
++			queue_work(system_unbound_wq, work);
+ 		spin_unlock_irq(&rdev->wiphy_work_lock);
+ 
+ 		wk->func(&rdev->wiphy, wk);
+diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c
+index e106dcea39778..c569c37da3175 100644
+--- a/net/wireless/pmsr.c
++++ b/net/wireless/pmsr.c
+@@ -56,7 +56,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
+ 	out->ftm.burst_period = 0;
+ 	if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD])
+ 		out->ftm.burst_period =
+-			nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]);
++			nla_get_u16(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]);
+ 
+ 	out->ftm.asap = !!tb[NL80211_PMSR_FTM_REQ_ATTR_ASAP];
+ 	if (out->ftm.asap && !capa->ftm.asap) {
+@@ -75,7 +75,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
+ 	out->ftm.num_bursts_exp = 0;
+ 	if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP])
+ 		out->ftm.num_bursts_exp =
+-			nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]);
++			nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]);
+ 
+ 	if (capa->ftm.max_bursts_exponent >= 0 &&
+ 	    out->ftm.num_bursts_exp > capa->ftm.max_bursts_exponent) {
+@@ -88,7 +88,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
+ 	out->ftm.burst_duration = 15;
+ 	if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION])
+ 		out->ftm.burst_duration =
+-			nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]);
++			nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]);
+ 
+ 	out->ftm.ftms_per_burst = 0;
+ 	if (tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST])
+@@ -107,7 +107,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
+ 	out->ftm.ftmr_retries = 3;
+ 	if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES])
+ 		out->ftm.ftmr_retries =
+-			nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]);
++			nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]);
+ 
+ 	out->ftm.request_lci = !!tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI];
+ 	if (out->ftm.request_lci && !capa->ftm.request_lci) {
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index 9b0dbcd6cf79a..ecea8c08e2701 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -2128,7 +2128,8 @@ static bool cfg80211_6ghz_power_type_valid(const u8 *ie, size_t ielen,
+ 	struct ieee80211_he_operation *he_oper;
+ 
+ 	tmp = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, ielen);
+-	if (tmp && tmp->datalen >= sizeof(*he_oper) + 1) {
++	if (tmp && tmp->datalen >= sizeof(*he_oper) + 1 &&
++	    tmp->datalen >= ieee80211_he_oper_size(tmp->data + 1)) {
+ 		const struct ieee80211_he_6ghz_oper *he_6ghz_oper;
+ 
+ 		he_oper = (void *)&tmp->data[1];
+diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
+index 565511a3f461e..62f26618f6747 100644
+--- a/net/wireless/sysfs.c
++++ b/net/wireless/sysfs.c
+@@ -5,7 +5,7 @@
+  *
+  * Copyright 2005-2006	Jiri Benc <jbenc@suse.cz>
+  * Copyright 2006	Johannes Berg <johannes@sipsolutions.net>
+- * Copyright (C) 2020-2021, 2023 Intel Corporation
++ * Copyright (C) 2020-2021, 2023-2024 Intel Corporation
+  */
+ 
+ #include <linux/device.h>
+@@ -137,7 +137,7 @@ static int wiphy_resume(struct device *dev)
+ 	if (rdev->wiphy.registered && rdev->ops->resume)
+ 		ret = rdev_resume(rdev);
+ 	rdev->suspended = false;
+-	schedule_work(&rdev->wiphy_work);
++	queue_work(system_unbound_wq, &rdev->wiphy_work);
+ 	wiphy_unlock(&rdev->wiphy);
+ 
+ 	if (ret)
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index 2bde8a3546313..082c6f9c5416e 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -2549,6 +2549,7 @@ int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
+ {
+ 	struct cfg80211_registered_device *rdev;
+ 	struct wireless_dev *wdev;
++	int ret;
+ 
+ 	wdev = dev->ieee80211_ptr;
+ 	if (!wdev)
+@@ -2560,7 +2561,11 @@ int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
+ 
+ 	memset(sinfo, 0, sizeof(*sinfo));
+ 
+-	return rdev_get_station(rdev, dev, mac_addr, sinfo);
++	wiphy_lock(&rdev->wiphy);
++	ret = rdev_get_station(rdev, dev, mac_addr, sinfo);
++	wiphy_unlock(&rdev->wiphy);
++
++	return ret;
+ }
+ EXPORT_SYMBOL(cfg80211_get_station);
+ 
+diff --git a/scripts/atomic/kerneldoc/sub_and_test b/scripts/atomic/kerneldoc/sub_and_test
+index d3760f7749d4e..96615e50836b0 100644
+--- a/scripts/atomic/kerneldoc/sub_and_test
++++ b/scripts/atomic/kerneldoc/sub_and_test
+@@ -1,7 +1,7 @@
+ cat <<EOF
+ /**
+  * ${class}${atomicname}() - atomic subtract and test if zero with ${desc_order} ordering
+- * @i: ${int} value to add
++ * @i: ${int} value to subtract
+  * @v: pointer to ${atomic}_t
+  *
+  * Atomically updates @v to (@v - @i) with ${desc_order} ordering.
+diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
+index 2f5b91da5afa9..c27c762e68807 100644
+--- a/scripts/mod/modpost.c
++++ b/scripts/mod/modpost.c
+@@ -1652,10 +1652,11 @@ static void read_symbols(const char *modname)
+ 			namespace = get_next_modinfo(&info, "import_ns",
+ 						     namespace);
+ 		}
++
++		if (extra_warn && !get_modinfo(&info, "description"))
++			warn("missing MODULE_DESCRIPTION() in %s\n", modname);
+ 	}
+ 
+-	if (extra_warn && !get_modinfo(&info, "description"))
+-		warn("missing MODULE_DESCRIPTION() in %s\n", modname);
+ 	for (sym = info.symtab_start; sym < info.symtab_stop; sym++) {
+ 		symname = remove_dot(info.strtab + sym->st_name);
+ 
+diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
+index b37d043d5748c..1856981e33df3 100644
+--- a/security/integrity/ima/ima_api.c
++++ b/security/integrity/ima/ima_api.c
+@@ -245,8 +245,8 @@ int ima_collect_measurement(struct ima_iint_cache *iint, struct file *file,
+ 	const char *audit_cause = "failed";
+ 	struct inode *inode = file_inode(file);
+ 	struct inode *real_inode = d_real_inode(file_dentry(file));
+-	const char *filename = file->f_path.dentry->d_name.name;
+ 	struct ima_max_digest_data hash;
++	struct name_snapshot filename;
+ 	struct kstat stat;
+ 	int result = 0;
+ 	int length;
+@@ -317,9 +317,13 @@ int ima_collect_measurement(struct ima_iint_cache *iint, struct file *file,
+ 		if (file->f_flags & O_DIRECT)
+ 			audit_cause = "failed(directio)";
+ 
++		take_dentry_name_snapshot(&filename, file->f_path.dentry);
++
+ 		integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode,
+-				    filename, "collect_data", audit_cause,
+-				    result, 0);
++				    filename.name.name, "collect_data",
++				    audit_cause, result, 0);
++
++		release_dentry_name_snapshot(&filename);
+ 	}
+ 	return result;
+ }
+@@ -432,6 +436,7 @@ void ima_audit_measurement(struct ima_iint_cache *iint,
+  */
+ const char *ima_d_path(const struct path *path, char **pathbuf, char *namebuf)
+ {
++	struct name_snapshot filename;
+ 	char *pathname = NULL;
+ 
+ 	*pathbuf = __getname();
+@@ -445,7 +450,10 @@ const char *ima_d_path(const struct path *path, char **pathbuf, char *namebuf)
+ 	}
+ 
+ 	if (!pathname) {
+-		strscpy(namebuf, path->dentry->d_name.name, NAME_MAX);
++		take_dentry_name_snapshot(&filename, path->dentry);
++		strscpy(namebuf, filename.name.name, NAME_MAX);
++		release_dentry_name_snapshot(&filename);
++
+ 		pathname = namebuf;
+ 	}
+ 
+diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
+index 6cd0add524cdc..3b2cb8f1002e6 100644
+--- a/security/integrity/ima/ima_template_lib.c
++++ b/security/integrity/ima/ima_template_lib.c
+@@ -483,7 +483,10 @@ static int ima_eventname_init_common(struct ima_event_data *event_data,
+ 				     bool size_limit)
+ {
+ 	const char *cur_filename = NULL;
++	struct name_snapshot filename;
+ 	u32 cur_filename_len = 0;
++	bool snapshot = false;
++	int ret;
+ 
+ 	BUG_ON(event_data->filename == NULL && event_data->file == NULL);
+ 
+@@ -496,7 +499,10 @@ static int ima_eventname_init_common(struct ima_event_data *event_data,
+ 	}
+ 
+ 	if (event_data->file) {
+-		cur_filename = event_data->file->f_path.dentry->d_name.name;
++		take_dentry_name_snapshot(&filename,
++					  event_data->file->f_path.dentry);
++		snapshot = true;
++		cur_filename = filename.name.name;
+ 		cur_filename_len = strlen(cur_filename);
+ 	} else
+ 		/*
+@@ -505,8 +511,13 @@ static int ima_eventname_init_common(struct ima_event_data *event_data,
+ 		 */
+ 		cur_filename_len = IMA_EVENT_NAME_LEN_MAX;
+ out:
+-	return ima_write_template_field_data(cur_filename, cur_filename_len,
+-					     DATA_FMT_STRING, field_data);
++	ret = ima_write_template_field_data(cur_filename, cur_filename_len,
++					    DATA_FMT_STRING, field_data);
++
++	if (snapshot)
++		release_dentry_name_snapshot(&filename);
++
++	return ret;
+ }
+ 
+ /*
+diff --git a/security/landlock/fs.c b/security/landlock/fs.c
+index c15559432d3d5..3e43e68a63393 100644
+--- a/security/landlock/fs.c
++++ b/security/landlock/fs.c
+@@ -950,6 +950,7 @@ static int current_check_refer_path(struct dentry *const old_dentry,
+ 	bool allow_parent1, allow_parent2;
+ 	access_mask_t access_request_parent1, access_request_parent2;
+ 	struct path mnt_dir;
++	struct dentry *old_parent;
+ 	layer_mask_t layer_masks_parent1[LANDLOCK_NUM_ACCESS_FS] = {},
+ 		     layer_masks_parent2[LANDLOCK_NUM_ACCESS_FS] = {};
+ 
+@@ -997,9 +998,17 @@ static int current_check_refer_path(struct dentry *const old_dentry,
+ 	mnt_dir.mnt = new_dir->mnt;
+ 	mnt_dir.dentry = new_dir->mnt->mnt_root;
+ 
++	/*
++	 * old_dentry may be the root of the common mount point and
++	 * !IS_ROOT(old_dentry) at the same time (e.g. with open_tree() and
++	 * OPEN_TREE_CLONE).  We do not need to call dget(old_parent) because
++	 * we keep a reference to old_dentry.
++	 */
++	old_parent = (old_dentry == mnt_dir.dentry) ? old_dentry :
++						      old_dentry->d_parent;
++
+ 	/* new_dir->dentry is equal to new_dentry->d_parent */
+-	allow_parent1 = collect_domain_accesses(dom, mnt_dir.dentry,
+-						old_dentry->d_parent,
++	allow_parent1 = collect_domain_accesses(dom, mnt_dir.dentry, old_parent,
+ 						&layer_masks_parent1);
+ 	allow_parent2 = collect_domain_accesses(
+ 		dom, mnt_dir.dentry, new_dir->dentry, &layer_masks_parent2);
+diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
+index 37088cc0ff1b5..2e7148d667bd2 100644
+--- a/tools/perf/builtin-script.c
++++ b/tools/perf/builtin-script.c
+@@ -3806,7 +3806,7 @@ static int parse_insn_trace(const struct option *opt __maybe_unused,
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	itrace_parse_synth_opts(opt, "i0ns", 0);
++	itrace_parse_synth_opts(opt, "i0nse", 0);
+ 	symbol_conf.nanosecs = true;
+ 	return 0;
+ }
+diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
+index 3684e6009b635..ef314a5797e30 100644
+--- a/tools/perf/util/auxtrace.c
++++ b/tools/perf/util/auxtrace.c
+@@ -1466,6 +1466,7 @@ int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
+ 	char *endptr;
+ 	bool period_type_set = false;
+ 	bool period_set = false;
++	bool iy = false;
+ 
+ 	synth_opts->set = true;
+ 
+@@ -1484,6 +1485,7 @@ int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
+ 		switch (*p++) {
+ 		case 'i':
+ 		case 'y':
++			iy = true;
+ 			if (p[-1] == 'y')
+ 				synth_opts->cycles = true;
+ 			else
+@@ -1649,7 +1651,7 @@ int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
+ 		}
+ 	}
+ out:
+-	if (synth_opts->instructions || synth_opts->cycles) {
++	if (iy) {
+ 		if (!period_type_set)
+ 			synth_opts->period_type =
+ 					PERF_ITRACE_DEFAULT_PERIOD_TYPE;
+diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
+index 35ee41e435ab3..0d0ca63de8629 100644
+--- a/tools/testing/cxl/test/mem.c
++++ b/tools/testing/cxl/test/mem.c
+@@ -3,6 +3,7 @@
+ 
+ #include <linux/platform_device.h>
+ #include <linux/mod_devicetable.h>
++#include <linux/vmalloc.h>
+ #include <linux/module.h>
+ #include <linux/delay.h>
+ #include <linux/sizes.h>
+diff --git a/tools/testing/selftests/alsa/Makefile b/tools/testing/selftests/alsa/Makefile
+index 5af9ba8a4645b..c1ce39874e2b5 100644
+--- a/tools/testing/selftests/alsa/Makefile
++++ b/tools/testing/selftests/alsa/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ #
+ 
+-CFLAGS += $(shell pkg-config --cflags alsa)
++CFLAGS += $(shell pkg-config --cflags alsa) $(KHDR_INCLUDES)
+ LDLIBS += $(shell pkg-config --libs alsa)
+ ifeq ($(LDLIBS),)
+ LDLIBS += -lasound
+diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc b/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc
+index d3a79da215c8b..5f72abe6fa79b 100644
+--- a/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc
++++ b/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc
+@@ -1,7 +1,7 @@
+ #!/bin/sh
+ # SPDX-License-Identifier: GPL-2.0
+ # description: Generic dynamic event - check if duplicate events are caught
+-# requires: dynamic_events "e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>]":README
++# requires: dynamic_events "e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>]":README events/syscalls/sys_enter_openat
+ 
+ echo 0 > events/enable
+ 
+diff --git a/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc b/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
+index 3f74c09c56b62..118247b8dd84d 100644
+--- a/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
++++ b/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
+@@ -10,7 +10,6 @@ fail() { #msg
+ }
+ 
+ sample_events() {
+-    echo > trace
+     echo 1 > events/kmem/kmem_cache_free/enable
+     echo 1 > tracing_on
+     ls > /dev/null
+@@ -22,6 +21,7 @@ echo 0 > tracing_on
+ echo 0 > events/enable
+ 
+ echo "Get the most frequently calling function"
++echo > trace
+ sample_events
+ 
+ target_func=`cat trace | grep -o 'call_site=\([^+]*\)' | sed 's/call_site=//' | sort | uniq -c | sort | tail -n 1 | sed 's/^[ 0-9]*//'`
+@@ -32,7 +32,16 @@ echo > trace
+ 
+ echo "Test event filter function name"
+ echo "call_site.function == $target_func" > events/kmem/kmem_cache_free/filter
++
++sample_events
++max_retry=10
++while [ `grep kmem_cache_free trace| wc -l` -eq 0 ]; do
+ sample_events
++max_retry=$((max_retry - 1))
++if [ $max_retry -eq 0 ]; then
++	exit_fail
++fi
++done
+ 
+ hitcnt=`grep kmem_cache_free trace| grep $target_func | wc -l`
+ misscnt=`grep kmem_cache_free trace| grep -v $target_func | wc -l`
+@@ -49,7 +58,16 @@ address=`grep " ${target_func}\$" /proc/kallsyms | cut -d' ' -f1`
+ 
+ echo "Test event filter function address"
+ echo "call_site.function == 0x$address" > events/kmem/kmem_cache_free/filter
++echo > trace
++sample_events
++max_retry=10
++while [ `grep kmem_cache_free trace| wc -l` -eq 0 ]; do
+ sample_events
++max_retry=$((max_retry - 1))
++if [ $max_retry -eq 0 ]; then
++	exit_fail
++fi
++done
+ 
+ hitcnt=`grep kmem_cache_free trace| grep $target_func | wc -l`
+ misscnt=`grep kmem_cache_free trace| grep -v $target_func | wc -l`
+diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc
+index 1f6981ef7afa0..ba19b81cef39a 100644
+--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc
++++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc
+@@ -30,7 +30,8 @@ find_dot_func() {
+ 	fi
+ 
+ 	grep " [tT] .*\.isra\..*" /proc/kallsyms | cut -f 3 -d " " | while read f; do
+-		if grep -s $f available_filter_functions; then
++		cnt=`grep -s $f available_filter_functions | wc -l`;
++		if [ $cnt -eq 1 ]; then
+ 			echo $f
+ 			break
+ 		fi
+diff --git a/tools/testing/selftests/futex/functional/futex_requeue_pi.c b/tools/testing/selftests/futex/functional/futex_requeue_pi.c
+index 7f3ca5c78df12..215c6cb539b4a 100644
+--- a/tools/testing/selftests/futex/functional/futex_requeue_pi.c
++++ b/tools/testing/selftests/futex/functional/futex_requeue_pi.c
+@@ -360,7 +360,7 @@ int unit_test(int broadcast, long lock, int third_party_owner, long timeout_ns)
+ 
+ int main(int argc, char *argv[])
+ {
+-	const char *test_name;
++	char *test_name;
+ 	int c, ret;
+ 
+ 	while ((c = getopt(argc, argv, "bchlot:v:")) != -1) {
+diff --git a/tools/testing/selftests/mm/compaction_test.c b/tools/testing/selftests/mm/compaction_test.c
+index 326a893647bab..e140558e6f53f 100644
+--- a/tools/testing/selftests/mm/compaction_test.c
++++ b/tools/testing/selftests/mm/compaction_test.c
+@@ -82,13 +82,16 @@ int prereq(void)
+ 	return -1;
+ }
+ 
+-int check_compaction(unsigned long mem_free, unsigned long hugepage_size)
++int check_compaction(unsigned long mem_free, unsigned long hugepage_size,
++		     unsigned long initial_nr_hugepages)
+ {
+ 	unsigned long nr_hugepages_ul;
+ 	int fd, ret = -1;
+ 	int compaction_index = 0;
+-	char initial_nr_hugepages[20] = {0};
+ 	char nr_hugepages[20] = {0};
++	char init_nr_hugepages[20] = {0};
++
++	sprintf(init_nr_hugepages, "%lu", initial_nr_hugepages);
+ 
+ 	/* We want to test with 80% of available memory. Else, OOM killer comes
+ 	   in to play */
+@@ -102,23 +105,6 @@ int check_compaction(unsigned long mem_free, unsigned long hugepage_size)
+ 		goto out;
+ 	}
+ 
+-	if (read(fd, initial_nr_hugepages, sizeof(initial_nr_hugepages)) <= 0) {
+-		ksft_print_msg("Failed to read from /proc/sys/vm/nr_hugepages: %s\n",
+-			       strerror(errno));
+-		goto close_fd;
+-	}
+-
+-	lseek(fd, 0, SEEK_SET);
+-
+-	/* Start with the initial condition of 0 huge pages*/
+-	if (write(fd, "0", sizeof(char)) != sizeof(char)) {
+-		ksft_print_msg("Failed to write 0 to /proc/sys/vm/nr_hugepages: %s\n",
+-			       strerror(errno));
+-		goto close_fd;
+-	}
+-
+-	lseek(fd, 0, SEEK_SET);
+-
+ 	/* Request a large number of huge pages. The Kernel will allocate
+ 	   as much as it can */
+ 	if (write(fd, "100000", (6*sizeof(char))) != (6*sizeof(char))) {
+@@ -146,8 +132,8 @@ int check_compaction(unsigned long mem_free, unsigned long hugepage_size)
+ 
+ 	lseek(fd, 0, SEEK_SET);
+ 
+-	if (write(fd, initial_nr_hugepages, strlen(initial_nr_hugepages))
+-	    != strlen(initial_nr_hugepages)) {
++	if (write(fd, init_nr_hugepages, strlen(init_nr_hugepages))
++	    != strlen(init_nr_hugepages)) {
+ 		ksft_print_msg("Failed to write value to /proc/sys/vm/nr_hugepages: %s\n",
+ 			       strerror(errno));
+ 		goto close_fd;
+@@ -171,6 +157,41 @@ int check_compaction(unsigned long mem_free, unsigned long hugepage_size)
+ 	return ret;
+ }
+ 
++int set_zero_hugepages(unsigned long *initial_nr_hugepages)
++{
++	int fd, ret = -1;
++	char nr_hugepages[20] = {0};
++
++	fd = open("/proc/sys/vm/nr_hugepages", O_RDWR | O_NONBLOCK);
++	if (fd < 0) {
++		ksft_print_msg("Failed to open /proc/sys/vm/nr_hugepages: %s\n",
++			       strerror(errno));
++		goto out;
++	}
++	if (read(fd, nr_hugepages, sizeof(nr_hugepages)) <= 0) {
++		ksft_print_msg("Failed to read from /proc/sys/vm/nr_hugepages: %s\n",
++			       strerror(errno));
++		goto close_fd;
++	}
++
++	lseek(fd, 0, SEEK_SET);
++
++	/* Start with the initial condition of 0 huge pages */
++	if (write(fd, "0", sizeof(char)) != sizeof(char)) {
++		ksft_print_msg("Failed to write 0 to /proc/sys/vm/nr_hugepages: %s\n",
++			       strerror(errno));
++		goto close_fd;
++	}
++
++	*initial_nr_hugepages = strtoul(nr_hugepages, NULL, 10);
++	ret = 0;
++
++ close_fd:
++	close(fd);
++
++ out:
++	return ret;
++}
+ 
+ int main(int argc, char **argv)
+ {
+@@ -181,14 +202,19 @@ int main(int argc, char **argv)
+ 	unsigned long mem_free = 0;
+ 	unsigned long hugepage_size = 0;
+ 	long mem_fragmentable_MB = 0;
++	unsigned long initial_nr_hugepages;
+ 
+ 	ksft_print_header();
+ 
+ 	if (prereq() || geteuid())
+-		return ksft_exit_skip("Prerequisites unsatisfied\n");
++		ksft_exit_skip("Prerequisites unsatisfied\n");
+ 
+ 	ksft_set_plan(1);
+ 
++	/* Start the test without hugepages reducing mem_free */
++	if (set_zero_hugepages(&initial_nr_hugepages))
++		ksft_exit_fail();
++
+ 	lim.rlim_cur = RLIM_INFINITY;
+ 	lim.rlim_max = RLIM_INFINITY;
+ 	if (setrlimit(RLIMIT_MEMLOCK, &lim))
+@@ -232,8 +258,9 @@ int main(int argc, char **argv)
+ 		entry = entry->next;
+ 	}
+ 
+-	if (check_compaction(mem_free, hugepage_size) == 0)
+-		return ksft_exit_pass();
++	if (check_compaction(mem_free, hugepage_size,
++			     initial_nr_hugepages) == 0)
++		ksft_exit_pass();
+ 
+-	return ksft_exit_fail();
++	ksft_exit_fail();
+ }
+diff --git a/tools/testing/selftests/mm/cow.c b/tools/testing/selftests/mm/cow.c
+index 363bf5f801be5..fe078d6e18064 100644
+--- a/tools/testing/selftests/mm/cow.c
++++ b/tools/testing/selftests/mm/cow.c
+@@ -1779,5 +1779,5 @@ int main(int argc, char **argv)
+ 	if (err)
+ 		ksft_exit_fail_msg("%d out of %d tests failed\n",
+ 				   err, ksft_test_num());
+-	return ksft_exit_pass();
++	ksft_exit_pass();
+ }
+diff --git a/tools/testing/selftests/mm/gup_longterm.c b/tools/testing/selftests/mm/gup_longterm.c
+index ad168d35b23b7..d7eaca5bbe9b1 100644
+--- a/tools/testing/selftests/mm/gup_longterm.c
++++ b/tools/testing/selftests/mm/gup_longterm.c
+@@ -456,5 +456,5 @@ int main(int argc, char **argv)
+ 	if (err)
+ 		ksft_exit_fail_msg("%d out of %d tests failed\n",
+ 				   err, ksft_test_num());
+-	return ksft_exit_pass();
++	ksft_exit_pass();
+ }
+diff --git a/tools/testing/selftests/mm/gup_test.c b/tools/testing/selftests/mm/gup_test.c
+index 7821cf45c323b..bdeaac67ff9aa 100644
+--- a/tools/testing/selftests/mm/gup_test.c
++++ b/tools/testing/selftests/mm/gup_test.c
+@@ -229,7 +229,7 @@ int main(int argc, char **argv)
+ 			break;
+ 		}
+ 		ksft_test_result_skip("Please run this test as root\n");
+-		return ksft_exit_pass();
++		ksft_exit_pass();
+ 	}
+ 
+ 	p = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, filed, 0);
+@@ -268,5 +268,5 @@ int main(int argc, char **argv)
+ 
+ 	free(tid);
+ 
+-	return ksft_exit_pass();
++	ksft_exit_pass();
+ }
+diff --git a/tools/testing/selftests/mm/ksm_functional_tests.c b/tools/testing/selftests/mm/ksm_functional_tests.c
+index d615767e396be..508287560c455 100644
+--- a/tools/testing/selftests/mm/ksm_functional_tests.c
++++ b/tools/testing/selftests/mm/ksm_functional_tests.c
+@@ -646,5 +646,5 @@ int main(int argc, char **argv)
+ 	if (err)
+ 		ksft_exit_fail_msg("%d out of %d tests failed\n",
+ 				   err, ksft_test_num());
+-	return ksft_exit_pass();
++	ksft_exit_pass();
+ }
+diff --git a/tools/testing/selftests/mm/madv_populate.c b/tools/testing/selftests/mm/madv_populate.c
+index 17bcb07f19f34..ef7d911da13e0 100644
+--- a/tools/testing/selftests/mm/madv_populate.c
++++ b/tools/testing/selftests/mm/madv_populate.c
+@@ -307,5 +307,5 @@ int main(int argc, char **argv)
+ 	if (err)
+ 		ksft_exit_fail_msg("%d out of %d tests failed\n",
+ 				   err, ksft_test_num());
+-	return ksft_exit_pass();
++	ksft_exit_pass();
+ }
+diff --git a/tools/testing/selftests/mm/mkdirty.c b/tools/testing/selftests/mm/mkdirty.c
+index 301abb99e027e..b8a7efe9204ea 100644
+--- a/tools/testing/selftests/mm/mkdirty.c
++++ b/tools/testing/selftests/mm/mkdirty.c
+@@ -375,5 +375,5 @@ int main(void)
+ 	if (err)
+ 		ksft_exit_fail_msg("%d out of %d tests failed\n",
+ 				   err, ksft_test_num());
+-	return ksft_exit_pass();
++	ksft_exit_pass();
+ }
+diff --git a/tools/testing/selftests/mm/pagemap_ioctl.c b/tools/testing/selftests/mm/pagemap_ioctl.c
+index d59517ed3d48b..2d785aca72a5c 100644
+--- a/tools/testing/selftests/mm/pagemap_ioctl.c
++++ b/tools/testing/selftests/mm/pagemap_ioctl.c
+@@ -1484,7 +1484,7 @@ int main(int argc, char *argv[])
+ 	ksft_print_header();
+ 
+ 	if (init_uffd())
+-		return ksft_exit_pass();
++		ksft_exit_pass();
+ 
+ 	ksft_set_plan(115);
+ 
+@@ -1660,5 +1660,5 @@ int main(int argc, char *argv[])
+ 	userfaultfd_tests();
+ 
+ 	close(pagemap_fd);
+-	return ksft_exit_pass();
++	ksft_exit_pass();
+ }
+diff --git a/tools/testing/selftests/mm/soft-dirty.c b/tools/testing/selftests/mm/soft-dirty.c
+index 7dbfa53d93a05..d9dbf879748b2 100644
+--- a/tools/testing/selftests/mm/soft-dirty.c
++++ b/tools/testing/selftests/mm/soft-dirty.c
+@@ -209,5 +209,5 @@ int main(int argc, char **argv)
+ 
+ 	close(pagemap_fd);
+ 
+-	return ksft_exit_pass();
++	ksft_exit_pass();
+ }
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index 1b5722e6166e5..2290125f6df40 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -2358,9 +2358,10 @@ remove_tests()
+ 	if reset "remove invalid addresses"; then
+ 		pm_nl_set_limits $ns1 3 3
+ 		pm_nl_add_endpoint $ns1 10.0.12.1 flags signal
++		# broadcast IP: no packet for this address will be received on ns1
++		pm_nl_add_endpoint $ns1 224.0.0.1 flags signal
+ 		pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
+-		pm_nl_add_endpoint $ns1 10.0.14.1 flags signal
+-		pm_nl_set_limits $ns2 3 3
++		pm_nl_set_limits $ns2 2 2
+ 		addr_nr_ns1=-3 speed=10 \
+ 			run_tests $ns1 $ns2 10.0.1.1
+ 		chk_join_nr 1 1 1
+diff --git a/tools/tracing/rtla/src/timerlat_aa.c b/tools/tracing/rtla/src/timerlat_aa.c
+index 7093fd5333beb..7bd80ee2a5b48 100644
+--- a/tools/tracing/rtla/src/timerlat_aa.c
++++ b/tools/tracing/rtla/src/timerlat_aa.c
+@@ -16,6 +16,9 @@ enum timelat_state {
+ 	TIMERLAT_WAITING_THREAD,
+ };
+ 
++/* Used to fill spaces in the output */
++static const char *spaces  = "                                                         ";
++
+ #define MAX_COMM		24
+ 
+ /*
+@@ -274,14 +277,17 @@ static int timerlat_aa_nmi_handler(struct trace_seq *s, struct tep_record *recor
+ 		taa_data->prev_irq_timstamp = start;
+ 
+ 		trace_seq_reset(taa_data->prev_irqs_seq);
+-		trace_seq_printf(taa_data->prev_irqs_seq, "\t%24s	\t\t\t%9.2f us\n",
+-			 "nmi", ns_to_usf(duration));
++		trace_seq_printf(taa_data->prev_irqs_seq, "  %24s %.*s %9.2f us\n",
++				 "nmi",
++				 24, spaces,
++				 ns_to_usf(duration));
+ 		return 0;
+ 	}
+ 
+ 	taa_data->thread_nmi_sum += duration;
+-	trace_seq_printf(taa_data->nmi_seq, "	%24s	\t\t\t%9.2f us\n",
+-		 "nmi", ns_to_usf(duration));
++	trace_seq_printf(taa_data->nmi_seq, "  %24s %.*s %9.2f us\n",
++			 "nmi",
++			 24, spaces, ns_to_usf(duration));
+ 
+ 	return 0;
+ }
+@@ -323,8 +329,10 @@ static int timerlat_aa_irq_handler(struct trace_seq *s, struct tep_record *recor
+ 		taa_data->prev_irq_timstamp = start;
+ 
+ 		trace_seq_reset(taa_data->prev_irqs_seq);
+-		trace_seq_printf(taa_data->prev_irqs_seq, "\t%24s:%-3llu	\t\t%9.2f us\n",
+-				 desc, vector, ns_to_usf(duration));
++		trace_seq_printf(taa_data->prev_irqs_seq, "  %24s:%-3llu %.*s %9.2f us\n",
++				 desc, vector,
++				 15, spaces,
++				 ns_to_usf(duration));
+ 		return 0;
+ 	}
+ 
+@@ -372,8 +380,10 @@ static int timerlat_aa_irq_handler(struct trace_seq *s, struct tep_record *recor
+ 	 * IRQ interference.
+ 	 */
+ 	taa_data->thread_irq_sum += duration;
+-	trace_seq_printf(taa_data->irqs_seq, "	%24s:%-3llu	\t	%9.2f us\n",
+-			 desc, vector, ns_to_usf(duration));
++	trace_seq_printf(taa_data->irqs_seq, "  %24s:%-3llu %.*s %9.2f us\n",
++			 desc, vector,
++			 24, spaces,
++			 ns_to_usf(duration));
+ 
+ 	return 0;
+ }
+@@ -408,8 +418,10 @@ static int timerlat_aa_softirq_handler(struct trace_seq *s, struct tep_record *r
+ 
+ 	taa_data->thread_softirq_sum += duration;
+ 
+-	trace_seq_printf(taa_data->softirqs_seq, "\t%24s:%-3llu	\t	%9.2f us\n",
+-			 softirq_name[vector], vector, ns_to_usf(duration));
++	trace_seq_printf(taa_data->softirqs_seq, "  %24s:%-3llu %.*s %9.2f us\n",
++			 softirq_name[vector], vector,
++			 24, spaces,
++			 ns_to_usf(duration));
+ 	return 0;
+ }
+ 
+@@ -452,8 +464,10 @@ static int timerlat_aa_thread_handler(struct trace_seq *s, struct tep_record *re
+ 	} else {
+ 		taa_data->thread_thread_sum += duration;
+ 
+-		trace_seq_printf(taa_data->threads_seq, "\t%24s:%-3llu	\t\t%9.2f us\n",
+-			 comm, pid, ns_to_usf(duration));
++		trace_seq_printf(taa_data->threads_seq, "  %24s:%-12llu %.*s %9.2f us\n",
++				 comm, pid,
++				 15, spaces,
++				 ns_to_usf(duration));
+ 	}
+ 
+ 	return 0;
+@@ -482,7 +496,8 @@ static int timerlat_aa_stack_handler(struct trace_seq *s, struct tep_record *rec
+ 			function = tep_find_function(taa_ctx->tool->trace.tep, caller[i]);
+ 			if (!function)
+ 				break;
+-			trace_seq_printf(taa_data->stack_seq, "\t\t-> %s\n", function);
++			trace_seq_printf(taa_data->stack_seq, " %.*s -> %s\n",
++					 14, spaces, function);
+ 		}
+ 	}
+ 	return 0;
+@@ -568,23 +583,24 @@ static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu,
+ 	exp_irq_ts = taa_data->timer_irq_start_time - taa_data->timer_irq_start_delay;
+ 	if (exp_irq_ts < taa_data->prev_irq_timstamp + taa_data->prev_irq_duration) {
+ 		if (taa_data->prev_irq_timstamp < taa_data->timer_irq_start_time)
+-			printf("  Previous IRQ interference:	\t\t up to  %9.2f us\n",
+-				ns_to_usf(taa_data->prev_irq_duration));
++			printf("  Previous IRQ interference: %.*s up to  %9.2f us\n",
++			       16, spaces,
++			       ns_to_usf(taa_data->prev_irq_duration));
+ 	}
+ 
+ 	/*
+ 	 * The delay that the IRQ suffered before starting.
+ 	 */
+-	printf("  IRQ handler delay:		%16s	%9.2f us (%.2f %%)\n",
+-		(ns_to_usf(taa_data->timer_exit_from_idle) > 10) ? "(exit from idle)" : "",
+-		ns_to_usf(taa_data->timer_irq_start_delay),
+-		ns_to_per(total, taa_data->timer_irq_start_delay));
++	printf("  IRQ handler delay: %.*s %16s  %9.2f us (%.2f %%)\n", 16, spaces,
++	       (ns_to_usf(taa_data->timer_exit_from_idle) > 10) ? "(exit from idle)" : "",
++	       ns_to_usf(taa_data->timer_irq_start_delay),
++	       ns_to_per(total, taa_data->timer_irq_start_delay));
+ 
+ 	/*
+ 	 * Timerlat IRQ.
+ 	 */
+-	printf("  IRQ latency:	\t\t\t\t	%9.2f us\n",
+-		ns_to_usf(taa_data->tlat_irq_latency));
++	printf("  IRQ latency: %.*s %9.2f us\n", 40, spaces,
++	       ns_to_usf(taa_data->tlat_irq_latency));
+ 
+ 	if (irq) {
+ 		/*
+@@ -595,15 +611,16 @@ static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu,
+ 		 * so it will be displayed, it is the key.
+ 		 */
+ 		printf("  Blocking thread:\n");
+-		printf("	%24s:%-9llu\n",
+-			taa_data->run_thread_comm, taa_data->run_thread_pid);
++		printf(" %.*s %24s:%-9llu\n", 6, spaces, taa_data->run_thread_comm,
++		       taa_data->run_thread_pid);
+ 	} else  {
+ 		/*
+ 		 * The duration of the IRQ handler that handled the timerlat IRQ.
+ 		 */
+-		printf("  Timerlat IRQ duration:	\t\t	%9.2f us (%.2f %%)\n",
+-			ns_to_usf(taa_data->timer_irq_duration),
+-			ns_to_per(total, taa_data->timer_irq_duration));
++		printf("  Timerlat IRQ duration: %.*s %9.2f us (%.2f %%)\n",
++		       30, spaces,
++		       ns_to_usf(taa_data->timer_irq_duration),
++		       ns_to_per(total, taa_data->timer_irq_duration));
+ 
+ 		/*
+ 		 * The amount of time that the current thread postponed the scheduler.
+@@ -611,13 +628,13 @@ static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu,
+ 		 * Recalling that it is net from NMI/IRQ/Softirq interference, so there
+ 		 * is no need to compute values here.
+ 		 */
+-		printf("  Blocking thread:	\t\t\t	%9.2f us (%.2f %%)\n",
+-			ns_to_usf(taa_data->thread_blocking_duration),
+-			ns_to_per(total, taa_data->thread_blocking_duration));
++		printf("  Blocking thread: %.*s %9.2f us (%.2f %%)\n", 36, spaces,
++		       ns_to_usf(taa_data->thread_blocking_duration),
++		       ns_to_per(total, taa_data->thread_blocking_duration));
+ 
+-		printf("	%24s:%-9llu		%9.2f us\n",
+-			taa_data->run_thread_comm, taa_data->run_thread_pid,
+-			ns_to_usf(taa_data->thread_blocking_duration));
++		printf(" %.*s %24s:%-9llu %.*s %9.2f us\n", 6, spaces,
++		       taa_data->run_thread_comm, taa_data->run_thread_pid,
++		       12, spaces, ns_to_usf(taa_data->thread_blocking_duration));
+ 	}
+ 
+ 	/*
+@@ -629,9 +646,9 @@ static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu,
+ 	 * NMIs can happen during the IRQ, so they are always possible.
+ 	 */
+ 	if (taa_data->thread_nmi_sum)
+-		printf("  NMI interference	\t\t\t	%9.2f us (%.2f %%)\n",
+-			ns_to_usf(taa_data->thread_nmi_sum),
+-			ns_to_per(total, taa_data->thread_nmi_sum));
++		printf("  NMI interference %.*s %9.2f us (%.2f %%)\n", 36, spaces,
++		       ns_to_usf(taa_data->thread_nmi_sum),
++		       ns_to_per(total, taa_data->thread_nmi_sum));
+ 
+ 	/*
+ 	 * If it is an IRQ latency, the other factors can be skipped.
+@@ -643,9 +660,9 @@ static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu,
+ 	 * Prints the interference caused by IRQs to the thread latency.
+ 	 */
+ 	if (taa_data->thread_irq_sum) {
+-		printf("  IRQ interference	\t\t\t	%9.2f us (%.2f %%)\n",
+-			ns_to_usf(taa_data->thread_irq_sum),
+-			ns_to_per(total, taa_data->thread_irq_sum));
++		printf("  IRQ interference %.*s %9.2f us (%.2f %%)\n", 36, spaces,
++		       ns_to_usf(taa_data->thread_irq_sum),
++		       ns_to_per(total, taa_data->thread_irq_sum));
+ 
+ 		trace_seq_do_printf(taa_data->irqs_seq);
+ 	}
+@@ -654,9 +671,9 @@ static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu,
+ 	 * Prints the interference caused by Softirqs to the thread latency.
+ 	 */
+ 	if (taa_data->thread_softirq_sum) {
+-		printf("  Softirq interference	\t\t\t	%9.2f us (%.2f %%)\n",
+-			ns_to_usf(taa_data->thread_softirq_sum),
+-			ns_to_per(total, taa_data->thread_softirq_sum));
++		printf("  Softirq interference %.*s %9.2f us (%.2f %%)\n", 32, spaces,
++		       ns_to_usf(taa_data->thread_softirq_sum),
++		       ns_to_per(total, taa_data->thread_softirq_sum));
+ 
+ 		trace_seq_do_printf(taa_data->softirqs_seq);
+ 	}
+@@ -670,9 +687,9 @@ static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu,
+ 	 * timer handling latency.
+ 	 */
+ 	if (taa_data->thread_thread_sum) {
+-		printf("  Thread interference	\t\t\t	%9.2f us (%.2f %%)\n",
+-			ns_to_usf(taa_data->thread_thread_sum),
+-			ns_to_per(total, taa_data->thread_thread_sum));
++		printf("  Thread interference %.*s %9.2f us (%.2f %%)\n", 33, spaces,
++		       ns_to_usf(taa_data->thread_thread_sum),
++		       ns_to_per(total, taa_data->thread_thread_sum));
+ 
+ 		trace_seq_do_printf(taa_data->threads_seq);
+ 	}
+@@ -682,8 +699,8 @@ static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu,
+ 	 */
+ print_total:
+ 	printf("------------------------------------------------------------------------\n");
+-	printf("  %s latency:	\t\t\t	%9.2f us (100%%)\n", irq ? "IRQ" : "Thread",
+-		ns_to_usf(total));
++	printf("  %s latency: %.*s %9.2f us (100%%)\n", irq ? "   IRQ" : "Thread",
++	       37, spaces, ns_to_usf(total));
+ }
+ 
+ static int timerlat_auto_analysis_collect_trace(struct timerlat_aa_context *taa_ctx)
+diff --git a/tools/tracing/rtla/src/timerlat_top.c b/tools/tracing/rtla/src/timerlat_top.c
+index 8a3fa64319c6d..2665e0bb5f1ee 100644
+--- a/tools/tracing/rtla/src/timerlat_top.c
++++ b/tools/tracing/rtla/src/timerlat_top.c
+@@ -212,6 +212,8 @@ static void timerlat_top_header(struct osnoise_tool *top)
+ 	trace_seq_printf(s, "\n");
+ }
+ 
++static const char *no_value = "        -";
++
+ /*
+  * timerlat_top_print - prints the output of a given CPU
+  */
+@@ -239,10 +241,7 @@ static void timerlat_top_print(struct osnoise_tool *top, int cpu)
+ 	trace_seq_printf(s, "%3d #%-9d |", cpu, cpu_data->irq_count);
+ 
+ 	if (!cpu_data->irq_count) {
+-		trace_seq_printf(s, "        - ");
+-		trace_seq_printf(s, "        - ");
+-		trace_seq_printf(s, "        - ");
+-		trace_seq_printf(s, "        - |");
++		trace_seq_printf(s, "%s %s %s %s |", no_value, no_value, no_value, no_value);
+ 	} else {
+ 		trace_seq_printf(s, "%9llu ", cpu_data->cur_irq / params->output_divisor);
+ 		trace_seq_printf(s, "%9llu ", cpu_data->min_irq / params->output_divisor);
+@@ -251,10 +250,7 @@ static void timerlat_top_print(struct osnoise_tool *top, int cpu)
+ 	}
+ 
+ 	if (!cpu_data->thread_count) {
+-		trace_seq_printf(s, "        - ");
+-		trace_seq_printf(s, "        - ");
+-		trace_seq_printf(s, "        - ");
+-		trace_seq_printf(s, "        -\n");
++		trace_seq_printf(s, "%s %s %s %s", no_value, no_value, no_value, no_value);
+ 	} else {
+ 		trace_seq_printf(s, "%9llu ", cpu_data->cur_thread / divisor);
+ 		trace_seq_printf(s, "%9llu ", cpu_data->min_thread / divisor);
+@@ -271,10 +267,7 @@ static void timerlat_top_print(struct osnoise_tool *top, int cpu)
+ 	trace_seq_printf(s, " |");
+ 
+ 	if (!cpu_data->user_count) {
+-		trace_seq_printf(s, "        - ");
+-		trace_seq_printf(s, "        - ");
+-		trace_seq_printf(s, "        - ");
+-		trace_seq_printf(s, "        -\n");
++		trace_seq_printf(s, "%s %s %s %s\n", no_value, no_value, no_value, no_value);
+ 	} else {
+ 		trace_seq_printf(s, "%9llu ", cpu_data->cur_user / divisor);
+ 		trace_seq_printf(s, "%9llu ", cpu_data->min_user / divisor);


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [gentoo-commits] proj/linux-patches:6.9 commit in: /
@ 2024-06-27 12:31 Mike Pagano
  0 siblings, 0 replies; 18+ messages in thread
From: Mike Pagano @ 2024-06-27 12:31 UTC (permalink / raw
  To: gentoo-commits

commit:     3122325dd9d49ca1ade622f019c91cbcd64ccf8f
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jun 27 12:31:46 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jun 27 12:31:46 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3122325d

Linux patch 6.9.7

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |     6 +-
 1006_linux-6.9.7.patch | 11106 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 11111 insertions(+), 1 deletion(-)

diff --git a/0000_README b/0000_README
index 9c96fb59..5ba3f04f 100644
--- a/0000_README
+++ b/0000_README
@@ -67,8 +67,12 @@ Patch:  1005_linux-6.9.6.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.9.6
 
+Patch:  1006_linux-6.9.7.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.9.7
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
-From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
+From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch
 Desc:   Enable link security restrictions by default.
 
 Patch:  1700_sparc-address-warray-bound-warnings.patch

diff --git a/1006_linux-6.9.7.patch b/1006_linux-6.9.7.patch
new file mode 100644
index 00000000..a905db13
--- /dev/null
+++ b/1006_linux-6.9.7.patch
@@ -0,0 +1,11106 @@
+diff --git a/Documentation/devicetree/bindings/dma/fsl,edma.yaml b/Documentation/devicetree/bindings/dma/fsl,edma.yaml
+index aa51d278cb67b..83e77ee4894ef 100644
+--- a/Documentation/devicetree/bindings/dma/fsl,edma.yaml
++++ b/Documentation/devicetree/bindings/dma/fsl,edma.yaml
+@@ -48,8 +48,8 @@ properties:
+       - 3
+ 
+   dma-channels:
+-    minItems: 1
+-    maxItems: 64
++    minimum: 1
++    maximum: 64
+ 
+   clocks:
+     minItems: 1
+diff --git a/Documentation/devicetree/bindings/i2c/atmel,at91sam-i2c.yaml b/Documentation/devicetree/bindings/i2c/atmel,at91sam-i2c.yaml
+index b1c13bab24722..b2d19cfb87add 100644
+--- a/Documentation/devicetree/bindings/i2c/atmel,at91sam-i2c.yaml
++++ b/Documentation/devicetree/bindings/i2c/atmel,at91sam-i2c.yaml
+@@ -77,7 +77,7 @@ required:
+   - clocks
+ 
+ allOf:
+-  - $ref: i2c-controller.yaml
++  - $ref: /schemas/i2c/i2c-controller.yaml#
+   - if:
+       properties:
+         compatible:
+diff --git a/Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml b/Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml
+index ab151c9db2191..580003cdfff59 100644
+--- a/Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml
++++ b/Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml
+@@ -21,7 +21,7 @@ description: |
+   google,cros-ec-spi or google,cros-ec-i2c.
+ 
+ allOf:
+-  - $ref: i2c-controller.yaml#
++  - $ref: /schemas/i2c/i2c-controller.yaml#
+ 
+ properties:
+   compatible:
+diff --git a/Makefile b/Makefile
+index 8da63744745be..17dc3e55323e7 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 9
+-SUBLEVEL = 6
++SUBLEVEL = 7
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm/boot/dts/nxp/imx/imx53-qsb-common.dtsi b/arch/arm/boot/dts/nxp/imx/imx53-qsb-common.dtsi
+index d804404464737..05d7a462ea25a 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx53-qsb-common.dtsi
++++ b/arch/arm/boot/dts/nxp/imx/imx53-qsb-common.dtsi
+@@ -85,7 +85,7 @@ led-user {
+ 		};
+ 	};
+ 
+-	panel {
++	panel_dpi: panel {
+ 		compatible = "sii,43wvf1g";
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_display_power>;
+diff --git a/arch/arm/boot/dts/nxp/imx/imx53-qsb-hdmi.dtso b/arch/arm/boot/dts/nxp/imx/imx53-qsb-hdmi.dtso
+index c84e9b0525276..151e9cee3c87e 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx53-qsb-hdmi.dtso
++++ b/arch/arm/boot/dts/nxp/imx/imx53-qsb-hdmi.dtso
+@@ -10,8 +10,6 @@
+ /plugin/;
+ 
+ &{/} {
+-	/delete-node/ panel;
+-
+ 	hdmi: connector-hdmi {
+ 		compatible = "hdmi-connector";
+ 		label = "hdmi";
+@@ -82,6 +80,10 @@ sii9022_out: endpoint {
+ 	};
+ };
+ 
++&panel_dpi {
++	status = "disabled";
++};
++
+ &tve {
+ 	status = "disabled";
+ };
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+index 6f0811587142d..1b1880c607cfe 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+@@ -6,6 +6,7 @@
+ #include <dt-bindings/phy/phy-imx8-pcie.h>
+ #include <dt-bindings/pwm/pwm.h>
+ #include "imx8mm.dtsi"
++#include "imx8mm-overdrive.dtsi"
+ 
+ / {
+ 	chosen {
+@@ -929,7 +930,7 @@ pinctrl_gpio8: gpio8grp {
+ 	/* Verdin GPIO_9_DSI (pulled-up as active-low) */
+ 	pinctrl_gpio_9_dsi: gpio9dsigrp {
+ 		fsl,pins =
+-			<MX8MM_IOMUXC_NAND_RE_B_GPIO3_IO15		0x146>;	/* SODIMM 17 */
++			<MX8MM_IOMUXC_NAND_RE_B_GPIO3_IO15		0x1c6>;	/* SODIMM 17 */
+ 	};
+ 
+ 	/* Verdin GPIO_10_DSI (pulled-up as active-low) */
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
+index 43f1d45ccc96f..f5115f9e8c473 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
+@@ -254,7 +254,7 @@ tc_bridge: bridge@f {
+ 				  <&clk IMX8MP_CLK_CLKOUT2>,
+ 				  <&clk IMX8MP_AUDIO_PLL2_OUT>;
+ 		assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL2_OUT>;
+-		assigned-clock-rates = <13000000>, <13000000>, <156000000>;
++		assigned-clock-rates = <13000000>, <13000000>, <208000000>;
+ 		reset-gpios = <&gpio4 1 GPIO_ACTIVE_HIGH>;
+ 		status = "disabled";
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
+index f5491a608b2f3..3c063f8a9383e 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
+@@ -183,7 +183,7 @@ &uart3 {
+ 
+ 	bluetooth {
+ 		compatible = "brcm,bcm4330-bt";
+-		shutdown-gpios = <&gpio4 16 GPIO_ACTIVE_HIGH>;
++		shutdown-gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>;
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8qm-mek.dts b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
+index 77ac0efdfaada..c5585bfc4efe6 100644
+--- a/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
++++ b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
+@@ -36,7 +36,7 @@ reg_usdhc2_vmmc: usdhc2-vmmc {
+ 		regulator-name = "SD1_SPWR";
+ 		regulator-min-microvolt = <3000000>;
+ 		regulator-max-microvolt = <3000000>;
+-		gpio = <&lsio_gpio4 19 GPIO_ACTIVE_HIGH>;
++		gpio = <&lsio_gpio4 7 GPIO_ACTIVE_HIGH>;
+ 		enable-active-high;
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
+index 9921ea13ab489..a7cb571d74023 100644
+--- a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
++++ b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
+@@ -175,7 +175,6 @@ &usdhc2 {
+ 	vmmc-supply = <&reg_usdhc2_vmmc>;
+ 	bus-width = <4>;
+ 	status = "okay";
+-	no-sdio;
+ 	no-mmc;
+ };
+ 
+diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
+index 2c30d617e1802..8d39b863251b2 100644
+--- a/arch/arm64/configs/defconfig
++++ b/arch/arm64/configs/defconfig
+@@ -1585,6 +1585,7 @@ CONFIG_INTERCONNECT_QCOM_SC8180X=y
+ CONFIG_INTERCONNECT_QCOM_SC8280XP=y
+ CONFIG_INTERCONNECT_QCOM_SDM845=y
+ CONFIG_INTERCONNECT_QCOM_SDX75=y
++CONFIG_INTERCONNECT_QCOM_SM6115=y
+ CONFIG_INTERCONNECT_QCOM_SM8150=m
+ CONFIG_INTERCONNECT_QCOM_SM8250=y
+ CONFIG_INTERCONNECT_QCOM_SM8350=m
+diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
+index 9e8999592f3af..af3b206fa4239 100644
+--- a/arch/arm64/include/asm/sysreg.h
++++ b/arch/arm64/include/asm/sysreg.h
+@@ -1036,18 +1036,18 @@
+  * Permission Indirection Extension (PIE) permission encodings.
+  * Encodings with the _O suffix, have overlays applied (Permission Overlay Extension).
+  */
+-#define PIE_NONE_O	0x0
+-#define PIE_R_O		0x1
+-#define PIE_X_O		0x2
+-#define PIE_RX_O	0x3
+-#define PIE_RW_O	0x5
+-#define PIE_RWnX_O	0x6
+-#define PIE_RWX_O	0x7
+-#define PIE_R		0x8
+-#define PIE_GCS		0x9
+-#define PIE_RX		0xa
+-#define PIE_RW		0xc
+-#define PIE_RWX		0xe
++#define PIE_NONE_O	UL(0x0)
++#define PIE_R_O		UL(0x1)
++#define PIE_X_O		UL(0x2)
++#define PIE_RX_O	UL(0x3)
++#define PIE_RW_O	UL(0x5)
++#define PIE_RWnX_O	UL(0x6)
++#define PIE_RWX_O	UL(0x7)
++#define PIE_R		UL(0x8)
++#define PIE_GCS		UL(0x9)
++#define PIE_RX		UL(0xa)
++#define PIE_RW		UL(0xc)
++#define PIE_RWX		UL(0xe)
+ 
+ #define PIRx_ELx_PERM(idx, perm)	((perm) << ((idx) * 4))
+ 
+diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
+index f20941f83a077..ce3bcff34b09b 100644
+--- a/arch/arm64/kvm/vgic/vgic-init.c
++++ b/arch/arm64/kvm/vgic/vgic-init.c
+@@ -355,7 +355,7 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
+ 
+ 	if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
+ 		list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list)
+-			vgic_v3_free_redist_region(rdreg);
++			vgic_v3_free_redist_region(kvm, rdreg);
+ 		INIT_LIST_HEAD(&dist->rd_regions);
+ 	} else {
+ 		dist->vgic_cpu_base = VGIC_ADDR_UNDEF;
+diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+index c15ee1df036a2..dad60b1e21d4d 100644
+--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
++++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+@@ -919,8 +919,19 @@ static int vgic_v3_alloc_redist_region(struct kvm *kvm, uint32_t index,
+ 	return ret;
+ }
+ 
+-void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg)
++void vgic_v3_free_redist_region(struct kvm *kvm, struct vgic_redist_region *rdreg)
+ {
++	struct kvm_vcpu *vcpu;
++	unsigned long c;
++
++	lockdep_assert_held(&kvm->arch.config_lock);
++
++	/* Garbage collect the region */
++	kvm_for_each_vcpu(c, vcpu, kvm) {
++		if (vcpu->arch.vgic_cpu.rdreg == rdreg)
++			vcpu->arch.vgic_cpu.rdreg = NULL;
++	}
++
+ 	list_del(&rdreg->list);
+ 	kfree(rdreg);
+ }
+@@ -945,7 +956,7 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
+ 
+ 		mutex_lock(&kvm->arch.config_lock);
+ 		rdreg = vgic_v3_rdist_region_from_index(kvm, index);
+-		vgic_v3_free_redist_region(rdreg);
++		vgic_v3_free_redist_region(kvm, rdreg);
+ 		mutex_unlock(&kvm->arch.config_lock);
+ 		return ret;
+ 	}
+diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
+index 0c2b82de8fa3c..08b4c09a0886d 100644
+--- a/arch/arm64/kvm/vgic/vgic.h
++++ b/arch/arm64/kvm/vgic/vgic.h
+@@ -317,7 +317,7 @@ vgic_v3_rd_region_size(struct kvm *kvm, struct vgic_redist_region *rdreg)
+ 
+ struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
+ 							   u32 index);
+-void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg);
++void vgic_v3_free_redist_region(struct kvm *kvm, struct vgic_redist_region *rdreg);
+ 
+ bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size);
+ 
+diff --git a/arch/csky/kernel/probes/ftrace.c b/arch/csky/kernel/probes/ftrace.c
+index 834cffcfbce32..7ba4b98076de1 100644
+--- a/arch/csky/kernel/probes/ftrace.c
++++ b/arch/csky/kernel/probes/ftrace.c
+@@ -12,6 +12,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ 	struct kprobe_ctlblk *kcb;
+ 	struct pt_regs *regs;
+ 
++	if (unlikely(kprobe_ftrace_disabled))
++		return;
++
+ 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ 	if (bit < 0)
+ 		return;
+diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
+index 54ad04dacdee9..d1b94ce58f793 100644
+--- a/arch/loongarch/Kconfig
++++ b/arch/loongarch/Kconfig
+@@ -138,7 +138,7 @@ config LOONGARCH
+ 	select HAVE_LIVEPATCH
+ 	select HAVE_MOD_ARCH_SPECIFIC
+ 	select HAVE_NMI
+-	select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS
++	select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB && !CC_IS_CLANG
+ 	select HAVE_PCI
+ 	select HAVE_PERF_EVENTS
+ 	select HAVE_PERF_REGS
+@@ -257,6 +257,9 @@ config AS_HAS_EXPLICIT_RELOCS
+ config AS_HAS_FCSR_CLASS
+ 	def_bool $(as-instr,movfcsr2gr \$t0$(comma)\$fcsr0)
+ 
++config AS_HAS_THIN_ADD_SUB
++	def_bool $(cc-option,-Wa$(comma)-mthin-add-sub)
++
+ config AS_HAS_LSX_EXTENSION
+ 	def_bool $(as-instr,vld \$vr0$(comma)\$a0$(comma)0)
+ 
+diff --git a/arch/loongarch/Kconfig.debug b/arch/loongarch/Kconfig.debug
+index 98d60630c3d4b..8b2ce5b5d43e8 100644
+--- a/arch/loongarch/Kconfig.debug
++++ b/arch/loongarch/Kconfig.debug
+@@ -28,6 +28,7 @@ config UNWINDER_PROLOGUE
+ 
+ config UNWINDER_ORC
+ 	bool "ORC unwinder"
++	depends on HAVE_OBJTOOL
+ 	select OBJTOOL
+ 	help
+ 	  This option enables the ORC (Oops Rewind Capability) unwinder for
+diff --git a/arch/loongarch/include/asm/hw_breakpoint.h b/arch/loongarch/include/asm/hw_breakpoint.h
+index 21447fb1efc77..d78330916bd18 100644
+--- a/arch/loongarch/include/asm/hw_breakpoint.h
++++ b/arch/loongarch/include/asm/hw_breakpoint.h
+@@ -75,6 +75,8 @@ do {								\
+ #define CSR_MWPC_NUM		0x3f
+ 
+ #define CTRL_PLV_ENABLE		0x1e
++#define CTRL_PLV0_ENABLE	0x02
++#define CTRL_PLV3_ENABLE	0x10
+ 
+ #define MWPnCFG3_LoadEn		8
+ #define MWPnCFG3_StoreEn	9
+@@ -101,7 +103,7 @@ struct perf_event;
+ struct perf_event_attr;
+ 
+ extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
+-				  int *gen_len, int *gen_type, int *offset);
++				  int *gen_len, int *gen_type);
+ extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
+ extern int hw_breakpoint_arch_parse(struct perf_event *bp,
+ 				    const struct perf_event_attr *attr,
+diff --git a/arch/loongarch/kernel/ftrace_dyn.c b/arch/loongarch/kernel/ftrace_dyn.c
+index 73858c9029cc9..bff058317062e 100644
+--- a/arch/loongarch/kernel/ftrace_dyn.c
++++ b/arch/loongarch/kernel/ftrace_dyn.c
+@@ -287,6 +287,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ 	struct kprobe *p;
+ 	struct kprobe_ctlblk *kcb;
+ 
++	if (unlikely(kprobe_ftrace_disabled))
++		return;
++
+ 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ 	if (bit < 0)
+ 		return;
+diff --git a/arch/loongarch/kernel/hw_breakpoint.c b/arch/loongarch/kernel/hw_breakpoint.c
+index fc55c4de2a11f..621ad7634df71 100644
+--- a/arch/loongarch/kernel/hw_breakpoint.c
++++ b/arch/loongarch/kernel/hw_breakpoint.c
+@@ -174,11 +174,21 @@ void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
+ static int hw_breakpoint_control(struct perf_event *bp,
+ 				 enum hw_breakpoint_ops ops)
+ {
+-	u32 ctrl;
++	u32 ctrl, privilege;
+ 	int i, max_slots, enable;
++	struct pt_regs *regs;
+ 	struct perf_event **slots;
+ 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+ 
++	if (arch_check_bp_in_kernelspace(info))
++		privilege = CTRL_PLV0_ENABLE;
++	else
++		privilege = CTRL_PLV3_ENABLE;
++
++	/*  Whether bp belongs to a task. */
++	if (bp->hw.target)
++		regs = task_pt_regs(bp->hw.target);
++
+ 	if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
+ 		/* Breakpoint */
+ 		slots = this_cpu_ptr(bp_on_reg);
+@@ -197,31 +207,38 @@ static int hw_breakpoint_control(struct perf_event *bp,
+ 	switch (ops) {
+ 	case HW_BREAKPOINT_INSTALL:
+ 		/* Set the FWPnCFG/MWPnCFG 1~4 register. */
+-		write_wb_reg(CSR_CFG_ADDR, i, 0, info->address);
+-		write_wb_reg(CSR_CFG_ADDR, i, 1, info->address);
+-		write_wb_reg(CSR_CFG_MASK, i, 0, info->mask);
+-		write_wb_reg(CSR_CFG_MASK, i, 1, info->mask);
+-		write_wb_reg(CSR_CFG_ASID, i, 0, 0);
+-		write_wb_reg(CSR_CFG_ASID, i, 1, 0);
+ 		if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
+-			write_wb_reg(CSR_CFG_CTRL, i, 0, CTRL_PLV_ENABLE);
++			write_wb_reg(CSR_CFG_ADDR, i, 0, info->address);
++			write_wb_reg(CSR_CFG_MASK, i, 0, info->mask);
++			write_wb_reg(CSR_CFG_ASID, i, 0, 0);
++			write_wb_reg(CSR_CFG_CTRL, i, 0, privilege);
+ 		} else {
++			write_wb_reg(CSR_CFG_ADDR, i, 1, info->address);
++			write_wb_reg(CSR_CFG_MASK, i, 1, info->mask);
++			write_wb_reg(CSR_CFG_ASID, i, 1, 0);
+ 			ctrl = encode_ctrl_reg(info->ctrl);
+-			write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl | CTRL_PLV_ENABLE);
++			write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl | privilege);
+ 		}
+ 		enable = csr_read64(LOONGARCH_CSR_CRMD);
+ 		csr_write64(CSR_CRMD_WE | enable, LOONGARCH_CSR_CRMD);
++		if (bp->hw.target)
++			regs->csr_prmd |= CSR_PRMD_PWE;
+ 		break;
+ 	case HW_BREAKPOINT_UNINSTALL:
+ 		/* Reset the FWPnCFG/MWPnCFG 1~4 register. */
+-		write_wb_reg(CSR_CFG_ADDR, i, 0, 0);
+-		write_wb_reg(CSR_CFG_ADDR, i, 1, 0);
+-		write_wb_reg(CSR_CFG_MASK, i, 0, 0);
+-		write_wb_reg(CSR_CFG_MASK, i, 1, 0);
+-		write_wb_reg(CSR_CFG_CTRL, i, 0, 0);
+-		write_wb_reg(CSR_CFG_CTRL, i, 1, 0);
+-		write_wb_reg(CSR_CFG_ASID, i, 0, 0);
+-		write_wb_reg(CSR_CFG_ASID, i, 1, 0);
++		if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
++			write_wb_reg(CSR_CFG_ADDR, i, 0, 0);
++			write_wb_reg(CSR_CFG_MASK, i, 0, 0);
++			write_wb_reg(CSR_CFG_CTRL, i, 0, 0);
++			write_wb_reg(CSR_CFG_ASID, i, 0, 0);
++		} else {
++			write_wb_reg(CSR_CFG_ADDR, i, 1, 0);
++			write_wb_reg(CSR_CFG_MASK, i, 1, 0);
++			write_wb_reg(CSR_CFG_CTRL, i, 1, 0);
++			write_wb_reg(CSR_CFG_ASID, i, 1, 0);
++		}
++		if (bp->hw.target)
++			regs->csr_prmd &= ~CSR_PRMD_PWE;
+ 		break;
+ 	}
+ 
+@@ -283,7 +300,7 @@ int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
+  * to generic breakpoint descriptions.
+  */
+ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
+-			   int *gen_len, int *gen_type, int *offset)
++			   int *gen_len, int *gen_type)
+ {
+ 	/* Type */
+ 	switch (ctrl.type) {
+@@ -303,11 +320,6 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
+ 		return -EINVAL;
+ 	}
+ 
+-	if (!ctrl.len)
+-		return -EINVAL;
+-
+-	*offset = __ffs(ctrl.len);
+-
+ 	/* Len */
+ 	switch (ctrl.len) {
+ 	case LOONGARCH_BREAKPOINT_LEN_1:
+@@ -386,21 +398,17 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
+ 			     struct arch_hw_breakpoint *hw)
+ {
+ 	int ret;
+-	u64 alignment_mask, offset;
++	u64 alignment_mask;
+ 
+ 	/* Build the arch_hw_breakpoint. */
+ 	ret = arch_build_bp_info(bp, attr, hw);
+ 	if (ret)
+ 		return ret;
+ 
+-	if (hw->ctrl.type != LOONGARCH_BREAKPOINT_EXECUTE)
+-		alignment_mask = 0x7;
+-	else
++	if (hw->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
+ 		alignment_mask = 0x3;
+-	offset = hw->address & alignment_mask;
+-
+-	hw->address &= ~alignment_mask;
+-	hw->ctrl.len <<= offset;
++		hw->address &= ~alignment_mask;
++	}
+ 
+ 	return 0;
+ }
+@@ -471,12 +479,15 @@ void breakpoint_handler(struct pt_regs *regs)
+ 	slots = this_cpu_ptr(bp_on_reg);
+ 
+ 	for (i = 0; i < boot_cpu_data.watch_ireg_count; ++i) {
+-		bp = slots[i];
+-		if (bp == NULL)
+-			continue;
+-		perf_bp_event(bp, regs);
++		if ((csr_read32(LOONGARCH_CSR_FWPS) & (0x1 << i))) {
++			bp = slots[i];
++			if (bp == NULL)
++				continue;
++			perf_bp_event(bp, regs);
++			csr_write32(0x1 << i, LOONGARCH_CSR_FWPS);
++			update_bp_registers(regs, 0, 0);
++		}
+ 	}
+-	update_bp_registers(regs, 0, 0);
+ }
+ NOKPROBE_SYMBOL(breakpoint_handler);
+ 
+@@ -488,12 +499,15 @@ void watchpoint_handler(struct pt_regs *regs)
+ 	slots = this_cpu_ptr(wp_on_reg);
+ 
+ 	for (i = 0; i < boot_cpu_data.watch_dreg_count; ++i) {
+-		wp = slots[i];
+-		if (wp == NULL)
+-			continue;
+-		perf_bp_event(wp, regs);
++		if ((csr_read32(LOONGARCH_CSR_MWPS) & (0x1 << i))) {
++			wp = slots[i];
++			if (wp == NULL)
++				continue;
++			perf_bp_event(wp, regs);
++			csr_write32(0x1 << i, LOONGARCH_CSR_MWPS);
++			update_bp_registers(regs, 0, 1);
++		}
+ 	}
+-	update_bp_registers(regs, 0, 1);
+ }
+ NOKPROBE_SYMBOL(watchpoint_handler);
+ 
+diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c
+index c114c5ef13325..200109de1971a 100644
+--- a/arch/loongarch/kernel/ptrace.c
++++ b/arch/loongarch/kernel/ptrace.c
+@@ -494,28 +494,14 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
+ 				     struct arch_hw_breakpoint_ctrl ctrl,
+ 				     struct perf_event_attr *attr)
+ {
+-	int err, len, type, offset;
++	int err, len, type;
+ 
+-	err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
++	err = arch_bp_generic_fields(ctrl, &len, &type);
+ 	if (err)
+ 		return err;
+ 
+-	switch (note_type) {
+-	case NT_LOONGARCH_HW_BREAK:
+-		if ((type & HW_BREAKPOINT_X) != type)
+-			return -EINVAL;
+-		break;
+-	case NT_LOONGARCH_HW_WATCH:
+-		if ((type & HW_BREAKPOINT_RW) != type)
+-			return -EINVAL;
+-		break;
+-	default:
+-		return -EINVAL;
+-	}
+-
+ 	attr->bp_len	= len;
+ 	attr->bp_type	= type;
+-	attr->bp_addr	+= offset;
+ 
+ 	return 0;
+ }
+@@ -609,10 +595,27 @@ static int ptrace_hbp_set_ctrl(unsigned int note_type,
+ 		return PTR_ERR(bp);
+ 
+ 	attr = bp->attr;
+-	decode_ctrl_reg(uctrl, &ctrl);
+-	err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
+-	if (err)
+-		return err;
++
++	switch (note_type) {
++	case NT_LOONGARCH_HW_BREAK:
++		ctrl.type = LOONGARCH_BREAKPOINT_EXECUTE;
++		ctrl.len = LOONGARCH_BREAKPOINT_LEN_4;
++		break;
++	case NT_LOONGARCH_HW_WATCH:
++		decode_ctrl_reg(uctrl, &ctrl);
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	if (uctrl & CTRL_PLV_ENABLE) {
++		err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
++		if (err)
++			return err;
++		attr.disabled = 0;
++	} else {
++		attr.disabled = 1;
++	}
+ 
+ 	return modify_user_hw_breakpoint(bp, &attr);
+ }
+@@ -643,6 +646,10 @@ static int ptrace_hbp_set_addr(unsigned int note_type,
+ 	struct perf_event *bp;
+ 	struct perf_event_attr attr;
+ 
++	/* Kernel-space address cannot be monitored by user-space */
++	if ((unsigned long)addr >= XKPRANGE)
++		return -EINVAL;
++
+ 	bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
+ 	if (IS_ERR(bp))
+ 		return PTR_ERR(bp);
+diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c
+index ec180ab92eaa8..66a8ba19c2872 100644
+--- a/arch/mips/bmips/setup.c
++++ b/arch/mips/bmips/setup.c
+@@ -110,7 +110,8 @@ static void bcm6358_quirks(void)
+ 	 * RAC flush causes kernel panics on BCM6358 when booting from TP1
+ 	 * because the bootloader is not initializing it properly.
+ 	 */
+-	bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31));
++	bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31)) ||
++				  !!BMIPS_GET_CBR();
+ }
+ 
+ static void bcm6368_quirks(void)
+diff --git a/arch/mips/include/asm/mipsmtregs.h b/arch/mips/include/asm/mipsmtregs.h
+index 30e86861c206c..b1ee3c48e84ba 100644
+--- a/arch/mips/include/asm/mipsmtregs.h
++++ b/arch/mips/include/asm/mipsmtregs.h
+@@ -322,7 +322,7 @@ static inline void ehb(void)
+ 	"	.set	push				\n"	\
+ 	"	.set	"MIPS_ISA_LEVEL"		\n"	\
+ 	_ASM_SET_MFTC0							\
+-	"	mftc0	$1, " #rt ", " #sel "		\n"	\
++	"	mftc0	%0, " #rt ", " #sel "		\n"	\
+ 	_ASM_UNSET_MFTC0						\
+ 	"	.set	pop				\n"	\
+ 	: "=r" (__res));						\
+diff --git a/arch/mips/pci/ops-rc32434.c b/arch/mips/pci/ops-rc32434.c
+index 874ed6df97683..34b9323bdabb0 100644
+--- a/arch/mips/pci/ops-rc32434.c
++++ b/arch/mips/pci/ops-rc32434.c
+@@ -112,8 +112,8 @@ static int read_config_dword(struct pci_bus *bus, unsigned int devfn,
+ 	 * gives them time to settle
+ 	 */
+ 	if (where == PCI_VENDOR_ID) {
+-		if (ret == 0xffffffff || ret == 0x00000000 ||
+-		    ret == 0x0000ffff || ret == 0xffff0000) {
++		if (*val == 0xffffffff || *val == 0x00000000 ||
++		    *val == 0x0000ffff || *val == 0xffff0000) {
+ 			if (delay > 4)
+ 				return 0;
+ 			delay *= 2;
+diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
+old mode 100644
+new mode 100755
+index 2583e318e8c6b..b080c7c6cc463
+--- a/arch/mips/pci/pcie-octeon.c
++++ b/arch/mips/pci/pcie-octeon.c
+@@ -230,12 +230,18 @@ static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus,
+ {
+ 	union cvmx_pcie_address pcie_addr;
+ 	union cvmx_pciercx_cfg006 pciercx_cfg006;
++	union cvmx_pciercx_cfg032 pciercx_cfg032;
+ 
+ 	pciercx_cfg006.u32 =
+ 	    cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG006(pcie_port));
+ 	if ((bus <= pciercx_cfg006.s.pbnum) && (dev != 0))
+ 		return 0;
+ 
++	pciercx_cfg032.u32 =
++		cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
++	if ((pciercx_cfg032.s.dlla == 0) || (pciercx_cfg032.s.lt == 1))
++		return 0;
++
+ 	pcie_addr.u64 = 0;
+ 	pcie_addr.config.upper = 2;
+ 	pcie_addr.config.io = 1;
+diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
+index 621a4b386ae4f..c91f9c2e61ed2 100644
+--- a/arch/parisc/kernel/ftrace.c
++++ b/arch/parisc/kernel/ftrace.c
+@@ -206,6 +206,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ 	struct kprobe *p;
+ 	int bit;
+ 
++	if (unlikely(kprobe_ftrace_disabled))
++		return;
++
+ 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ 	if (bit < 0)
+ 		return;
+diff --git a/arch/powerpc/crypto/.gitignore b/arch/powerpc/crypto/.gitignore
+index e1094f08f713a..e9fe73aac8b61 100644
+--- a/arch/powerpc/crypto/.gitignore
++++ b/arch/powerpc/crypto/.gitignore
+@@ -1,3 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ aesp10-ppc.S
++aesp8-ppc.S
+ ghashp10-ppc.S
++ghashp8-ppc.S
+diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
+index 51172625fa3a5..7a8495660c2f8 100644
+--- a/arch/powerpc/include/asm/hvcall.h
++++ b/arch/powerpc/include/asm/hvcall.h
+@@ -524,7 +524,7 @@ long plpar_hcall_norets_notrace(unsigned long opcode, ...);
+  * Used for all but the craziest of phyp interfaces (see plpar_hcall9)
+  */
+ #define PLPAR_HCALL_BUFSIZE 4
+-long plpar_hcall(unsigned long opcode, unsigned long *retbuf, ...);
++long plpar_hcall(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL_BUFSIZE], ...);
+ 
+ /**
+  * plpar_hcall_raw: - Make a hypervisor call without calculating hcall stats
+@@ -538,7 +538,7 @@ long plpar_hcall(unsigned long opcode, unsigned long *retbuf, ...);
+  * plpar_hcall, but plpar_hcall_raw works in real mode and does not
+  * calculate hypervisor call statistics.
+  */
+-long plpar_hcall_raw(unsigned long opcode, unsigned long *retbuf, ...);
++long plpar_hcall_raw(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL_BUFSIZE], ...);
+ 
+ /**
+  * plpar_hcall9: - Make a pseries hypervisor call with up to 9 return arguments
+@@ -549,8 +549,8 @@ long plpar_hcall_raw(unsigned long opcode, unsigned long *retbuf, ...);
+  * PLPAR_HCALL9_BUFSIZE to size the return argument buffer.
+  */
+ #define PLPAR_HCALL9_BUFSIZE 9
+-long plpar_hcall9(unsigned long opcode, unsigned long *retbuf, ...);
+-long plpar_hcall9_raw(unsigned long opcode, unsigned long *retbuf, ...);
++long plpar_hcall9(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL9_BUFSIZE], ...);
++long plpar_hcall9_raw(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL9_BUFSIZE], ...);
+ 
+ /* pseries hcall tracing */
+ extern struct static_key hcall_tracepoint_key;
+diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
+index 08c550ed49be6..ba2e13bb879dc 100644
+--- a/arch/powerpc/include/asm/io.h
++++ b/arch/powerpc/include/asm/io.h
+@@ -585,12 +585,12 @@ __do_out_asm(_rec_outl, "stwbrx")
+ #define __do_inw(port)		_rec_inw(port)
+ #define __do_inl(port)		_rec_inl(port)
+ #else /* CONFIG_PPC32 */
+-#define __do_outb(val, port)	writeb(val,(PCI_IO_ADDR)_IO_BASE+port);
+-#define __do_outw(val, port)	writew(val,(PCI_IO_ADDR)_IO_BASE+port);
+-#define __do_outl(val, port)	writel(val,(PCI_IO_ADDR)_IO_BASE+port);
+-#define __do_inb(port)		readb((PCI_IO_ADDR)_IO_BASE + port);
+-#define __do_inw(port)		readw((PCI_IO_ADDR)_IO_BASE + port);
+-#define __do_inl(port)		readl((PCI_IO_ADDR)_IO_BASE + port);
++#define __do_outb(val, port)	writeb(val,(PCI_IO_ADDR)(_IO_BASE+port));
++#define __do_outw(val, port)	writew(val,(PCI_IO_ADDR)(_IO_BASE+port));
++#define __do_outl(val, port)	writel(val,(PCI_IO_ADDR)(_IO_BASE+port));
++#define __do_inb(port)		readb((PCI_IO_ADDR)(_IO_BASE + port));
++#define __do_inw(port)		readw((PCI_IO_ADDR)(_IO_BASE + port));
++#define __do_inl(port)		readl((PCI_IO_ADDR)(_IO_BASE + port));
+ #endif /* !CONFIG_PPC32 */
+ 
+ #ifdef CONFIG_EEH
+@@ -606,12 +606,12 @@ __do_out_asm(_rec_outl, "stwbrx")
+ #define __do_writesw(a, b, n)	_outsw(PCI_FIX_ADDR(a),(b),(n))
+ #define __do_writesl(a, b, n)	_outsl(PCI_FIX_ADDR(a),(b),(n))
+ 
+-#define __do_insb(p, b, n)	readsb((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
+-#define __do_insw(p, b, n)	readsw((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
+-#define __do_insl(p, b, n)	readsl((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
+-#define __do_outsb(p, b, n)	writesb((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
+-#define __do_outsw(p, b, n)	writesw((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
+-#define __do_outsl(p, b, n)	writesl((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
++#define __do_insb(p, b, n)	readsb((PCI_IO_ADDR)(_IO_BASE+(p)), (b), (n))
++#define __do_insw(p, b, n)	readsw((PCI_IO_ADDR)(_IO_BASE+(p)), (b), (n))
++#define __do_insl(p, b, n)	readsl((PCI_IO_ADDR)(_IO_BASE+(p)), (b), (n))
++#define __do_outsb(p, b, n)	writesb((PCI_IO_ADDR)(_IO_BASE+(p)),(b),(n))
++#define __do_outsw(p, b, n)	writesw((PCI_IO_ADDR)(_IO_BASE+(p)),(b),(n))
++#define __do_outsl(p, b, n)	writesl((PCI_IO_ADDR)(_IO_BASE+(p)),(b),(n))
+ 
+ #define __do_memset_io(addr, c, n)	\
+ 				_memset_io(PCI_FIX_ADDR(addr), c, n)
+diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c
+index 072ebe7f290ba..f8208c027148f 100644
+--- a/arch/powerpc/kernel/kprobes-ftrace.c
++++ b/arch/powerpc/kernel/kprobes-ftrace.c
+@@ -21,6 +21,9 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
+ 	struct pt_regs *regs;
+ 	int bit;
+ 
++	if (unlikely(kprobe_ftrace_disabled))
++		return;
++
+ 	bit = ftrace_test_recursion_trylock(nip, parent_nip);
+ 	if (bit < 0)
+ 		return;
+diff --git a/arch/riscv/kernel/probes/ftrace.c b/arch/riscv/kernel/probes/ftrace.c
+index 7142ec42e889f..a69dfa610aa85 100644
+--- a/arch/riscv/kernel/probes/ftrace.c
++++ b/arch/riscv/kernel/probes/ftrace.c
+@@ -11,6 +11,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ 	struct kprobe_ctlblk *kcb;
+ 	int bit;
+ 
++	if (unlikely(kprobe_ftrace_disabled))
++		return;
++
+ 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ 	if (bit < 0)
+ 		return;
+diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
+index c46381ea04ecb..7f6f8c438c265 100644
+--- a/arch/s390/kernel/ftrace.c
++++ b/arch/s390/kernel/ftrace.c
+@@ -296,6 +296,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ 	struct kprobe *p;
+ 	int bit;
+ 
++	if (unlikely(kprobe_ftrace_disabled))
++		return;
++
+ 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ 	if (bit < 0)
+ 		return;
+diff --git a/arch/x86/include/asm/cpu_device_id.h b/arch/x86/include/asm/cpu_device_id.h
+index eb8fcede9e3bf..e8e3dbe7f1730 100644
+--- a/arch/x86/include/asm/cpu_device_id.h
++++ b/arch/x86/include/asm/cpu_device_id.h
+@@ -2,6 +2,39 @@
+ #ifndef _ASM_X86_CPU_DEVICE_ID
+ #define _ASM_X86_CPU_DEVICE_ID
+ 
++/*
++ * Can't use <linux/bitfield.h> because it generates expressions that
++ * cannot be used in structure initializers. Bitfield construction
++ * here must match the union in struct cpuinfo_86:
++ *	union {
++ *		struct {
++ *			__u8	x86_model;
++ *			__u8	x86;
++ *			__u8	x86_vendor;
++ *			__u8	x86_reserved;
++ *		};
++ *		__u32		x86_vfm;
++ *	};
++ */
++#define VFM_MODEL_BIT	0
++#define VFM_FAMILY_BIT	8
++#define VFM_VENDOR_BIT	16
++#define VFM_RSVD_BIT	24
++
++#define	VFM_MODEL_MASK	GENMASK(VFM_FAMILY_BIT - 1, VFM_MODEL_BIT)
++#define	VFM_FAMILY_MASK	GENMASK(VFM_VENDOR_BIT - 1, VFM_FAMILY_BIT)
++#define	VFM_VENDOR_MASK	GENMASK(VFM_RSVD_BIT - 1, VFM_VENDOR_BIT)
++
++#define VFM_MODEL(vfm)	(((vfm) & VFM_MODEL_MASK) >> VFM_MODEL_BIT)
++#define VFM_FAMILY(vfm)	(((vfm) & VFM_FAMILY_MASK) >> VFM_FAMILY_BIT)
++#define VFM_VENDOR(vfm)	(((vfm) & VFM_VENDOR_MASK) >> VFM_VENDOR_BIT)
++
++#define	VFM_MAKE(_vendor, _family, _model) (	\
++	((_model) << VFM_MODEL_BIT) |		\
++	((_family) << VFM_FAMILY_BIT) |		\
++	((_vendor) << VFM_VENDOR_BIT)		\
++)
++
+ /*
+  * Declare drivers belonging to specific x86 CPUs
+  * Similar in spirit to pci_device_id and related PCI functions
+@@ -20,6 +53,9 @@
+ #define X86_CENTAUR_FAM6_C7_D		0xd
+ #define X86_CENTAUR_FAM6_NANO		0xf
+ 
++/* x86_cpu_id::flags */
++#define X86_CPU_ID_FLAG_ENTRY_VALID	BIT(0)
++
+ #define X86_STEPPINGS(mins, maxs)    GENMASK(maxs, mins)
+ /**
+  * X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE - Base macro for CPU matching
+@@ -46,6 +82,18 @@
+ 	.model		= _model,					\
+ 	.steppings	= _steppings,					\
+ 	.feature	= _feature,					\
++	.flags		= X86_CPU_ID_FLAG_ENTRY_VALID,			\
++	.driver_data	= (unsigned long) _data				\
++}
++
++#define X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE(_vendor, _family, _model, \
++						    _steppings, _feature, _data) { \
++	.vendor		= _vendor,					\
++	.family		= _family,					\
++	.model		= _model,					\
++	.steppings	= _steppings,					\
++	.feature	= _feature,					\
++	.flags		= X86_CPU_ID_FLAG_ENTRY_VALID,			\
+ 	.driver_data	= (unsigned long) _data				\
+ }
+ 
+@@ -164,6 +212,56 @@
+ 	X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
+ 						     steppings, X86_FEATURE_ANY, data)
+ 
++/**
++ * X86_MATCH_VFM - Match encoded vendor/family/model
++ * @vfm:	Encoded 8-bits each for vendor, family, model
++ * @data:	Driver specific data or NULL. The internal storage
++ *		format is unsigned long. The supplied value, pointer
++ *		etc. is cast to unsigned long internally.
++ *
++ * Stepping and feature are set to wildcards
++ */
++#define X86_MATCH_VFM(vfm, data)			\
++	X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE(	\
++		VFM_VENDOR(vfm),			\
++		VFM_FAMILY(vfm),			\
++		VFM_MODEL(vfm),				\
++		X86_STEPPING_ANY, X86_FEATURE_ANY, data)
++
++/**
++ * X86_MATCH_VFM_STEPPINGS - Match encoded vendor/family/model/stepping
++ * @vfm:	Encoded 8-bits each for vendor, family, model
++ * @steppings:	Bitmask of steppings to match
++ * @data:	Driver specific data or NULL. The internal storage
++ *		format is unsigned long. The supplied value, pointer
++ *		etc. is cast to unsigned long internally.
++ *
++ * feature is set to wildcard
++ */
++#define X86_MATCH_VFM_STEPPINGS(vfm, steppings, data)	\
++	X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE(	\
++		VFM_VENDOR(vfm),			\
++		VFM_FAMILY(vfm),			\
++		VFM_MODEL(vfm),				\
++		steppings, X86_FEATURE_ANY, data)
++
++/**
++ * X86_MATCH_VFM_FEATURE - Match encoded vendor/family/model/feature
++ * @vfm:	Encoded 8-bits each for vendor, family, model
++ * @feature:	A X86_FEATURE bit
++ * @data:	Driver specific data or NULL. The internal storage
++ *		format is unsigned long. The supplied value, pointer
++ *		etc. is cast to unsigned long internally.
++ *
++ * Steppings is set to wildcard
++ */
++#define X86_MATCH_VFM_FEATURE(vfm, feature, data)	\
++	X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE(	\
++		VFM_VENDOR(vfm),			\
++		VFM_FAMILY(vfm),			\
++		VFM_MODEL(vfm),				\
++		X86_STEPPING_ANY, feature, data)
++
+ /*
+  * Match specific microcode revisions.
+  *
+diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
+index 1dc600fa3ba53..481096177500e 100644
+--- a/arch/x86/include/asm/efi.h
++++ b/arch/x86/include/asm/efi.h
+@@ -401,7 +401,6 @@ extern int __init efi_memmap_alloc(unsigned int num_entries,
+ 				   struct efi_memory_map_data *data);
+ extern void __efi_memmap_free(u64 phys, unsigned long size,
+ 			      unsigned long flags);
+-#define __efi_memmap_free __efi_memmap_free
+ 
+ extern int __init efi_memmap_install(struct efi_memory_map_data *data);
+ extern int __init efi_memmap_split_count(efi_memory_desc_t *md,
+diff --git a/arch/x86/kernel/cpu/match.c b/arch/x86/kernel/cpu/match.c
+index ad6776081e60d..ae71b8ef909c9 100644
+--- a/arch/x86/kernel/cpu/match.c
++++ b/arch/x86/kernel/cpu/match.c
+@@ -39,9 +39,7 @@ const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match)
+ 	const struct x86_cpu_id *m;
+ 	struct cpuinfo_x86 *c = &boot_cpu_data;
+ 
+-	for (m = match;
+-	     m->vendor | m->family | m->model | m->steppings | m->feature;
+-	     m++) {
++	for (m = match; m->flags & X86_CPU_ID_FLAG_ENTRY_VALID; m++) {
+ 		if (m->vendor != X86_VENDOR_ANY && c->x86_vendor != m->vendor)
+ 			continue;
+ 		if (m->family != X86_FAMILY_ANY && c->x86 != m->family)
+diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
+index c34a35ec0f031..2ce5f4913c820 100644
+--- a/arch/x86/kernel/cpu/resctrl/monitor.c
++++ b/arch/x86/kernel/cpu/resctrl/monitor.c
+@@ -508,7 +508,8 @@ void free_rmid(u32 closid, u32 rmid)
+ 	 * allows architectures that ignore the closid parameter to avoid an
+ 	 * unnecessary check.
+ 	 */
+-	if (idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
++	if (!resctrl_arch_mon_capable() ||
++	    idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
+ 						RESCTRL_RESERVED_RMID))
+ 		return;
+ 
+diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
+index dd2ec14adb77b..15af7e98e161a 100644
+--- a/arch/x86/kernel/kprobes/ftrace.c
++++ b/arch/x86/kernel/kprobes/ftrace.c
+@@ -21,6 +21,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ 	struct kprobe_ctlblk *kcb;
+ 	int bit;
+ 
++	if (unlikely(kprobe_ftrace_disabled))
++		return;
++
+ 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ 	if (bit < 0)
+ 		return;
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 91478b769af08..4dbd9d99f9ea0 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -10677,13 +10677,12 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
+ 
+ 	bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
+ 
++	static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
++
+ 	if (irqchip_split(vcpu->kvm))
+ 		kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
+-	else {
+-		static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
+-		if (ioapic_in_kernel(vcpu->kvm))
+-			kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
+-	}
++	else if (ioapic_in_kernel(vcpu->kvm))
++		kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
+ 
+ 	if (is_guest_mode(vcpu))
+ 		vcpu->arch.load_eoi_exitmap_pending = true;
+diff --git a/arch/x86/platform/efi/memmap.c b/arch/x86/platform/efi/memmap.c
+index 4ef20b49eb5e7..6ed1935504b96 100644
+--- a/arch/x86/platform/efi/memmap.c
++++ b/arch/x86/platform/efi/memmap.c
+@@ -92,12 +92,22 @@ int __init efi_memmap_alloc(unsigned int num_entries,
+  */
+ int __init efi_memmap_install(struct efi_memory_map_data *data)
+ {
++	unsigned long size = efi.memmap.desc_size * efi.memmap.nr_map;
++	unsigned long flags = efi.memmap.flags;
++	u64 phys = efi.memmap.phys_map;
++	int ret;
++
+ 	efi_memmap_unmap();
+ 
+ 	if (efi_enabled(EFI_PARAVIRT))
+ 		return 0;
+ 
+-	return __efi_memmap_init(data);
++	ret = __efi_memmap_init(data);
++	if (ret)
++		return ret;
++
++	__efi_memmap_free(phys, size, flags);
++	return 0;
+ }
+ 
+ /**
+diff --git a/block/ioctl.c b/block/ioctl.c
+index f505f9c341eb0..2639ce9df3852 100644
+--- a/block/ioctl.c
++++ b/block/ioctl.c
+@@ -33,7 +33,7 @@ static int blkpg_do_ioctl(struct block_device *bdev,
+ 	if (op == BLKPG_DEL_PARTITION)
+ 		return bdev_del_partition(disk, p.pno);
+ 
+-	if (p.start < 0 || p.length <= 0 || p.start + p.length < 0)
++	if (p.start < 0 || p.length <= 0 || LLONG_MAX - p.length < p.start)
+ 		return -EINVAL;
+ 	/* Check that the partition is aligned to the block size */
+ 	if (!IS_ALIGNED(p.start | p.length, bdev_logical_block_size(bdev)))
+diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
+index ddd072cbc738d..2133085deda77 100644
+--- a/drivers/acpi/acpica/acevents.h
++++ b/drivers/acpi/acpica/acevents.h
+@@ -191,6 +191,10 @@ void
+ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
+ 			    acpi_adr_space_type space_id, u32 function);
+ 
++void
++acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *node,
++				  acpi_adr_space_type space_id);
++
+ acpi_status
+ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function);
+ 
+diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
+index 18fdf2bc2d499..dc6004daf624b 100644
+--- a/drivers/acpi/acpica/evregion.c
++++ b/drivers/acpi/acpica/evregion.c
+@@ -20,10 +20,6 @@ extern u8 acpi_gbl_default_address_spaces[];
+ 
+ /* Local prototypes */
+ 
+-static void
+-acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node,
+-				  acpi_adr_space_type space_id);
+-
+ static acpi_status
+ acpi_ev_reg_run(acpi_handle obj_handle,
+ 		u32 level, void *context, void **return_value);
+@@ -818,7 +814,7 @@ acpi_ev_reg_run(acpi_handle obj_handle,
+  *
+  ******************************************************************************/
+ 
+-static void
++void
+ acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node,
+ 				  acpi_adr_space_type space_id)
+ {
+diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
+index 3197e6303c5b0..624361a5f34d8 100644
+--- a/drivers/acpi/acpica/evxfregn.c
++++ b/drivers/acpi/acpica/evxfregn.c
+@@ -306,3 +306,57 @@ acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id)
+ }
+ 
+ ACPI_EXPORT_SYMBOL(acpi_execute_reg_methods)
++
++/*******************************************************************************
++ *
++ * FUNCTION:    acpi_execute_orphan_reg_method
++ *
++ * PARAMETERS:  device          - Handle for the device
++ *              space_id        - The address space ID
++ *
++ * RETURN:      Status
++ *
++ * DESCRIPTION: Execute an "orphan" _REG method that appears under an ACPI
++ *              device. This is a _REG method that has no corresponding region
++ *              within the device's scope.
++ *
++ ******************************************************************************/
++acpi_status
++acpi_execute_orphan_reg_method(acpi_handle device, acpi_adr_space_type space_id)
++{
++	struct acpi_namespace_node *node;
++	acpi_status status;
++
++	ACPI_FUNCTION_TRACE(acpi_execute_orphan_reg_method);
++
++	/* Parameter validation */
++
++	if (!device) {
++		return_ACPI_STATUS(AE_BAD_PARAMETER);
++	}
++
++	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
++	if (ACPI_FAILURE(status)) {
++		return_ACPI_STATUS(status);
++	}
++
++	/* Convert and validate the device handle */
++
++	node = acpi_ns_validate_handle(device);
++	if (node) {
++
++		/*
++		 * If an "orphan" _REG method is present in the device's scope
++		 * for the given address space ID, run it.
++		 */
++
++		acpi_ev_execute_orphan_reg_method(node, space_id);
++	} else {
++		status = AE_BAD_PARAMETER;
++	}
++
++	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
++	return_ACPI_STATUS(status);
++}
++
++ACPI_EXPORT_SYMBOL(acpi_execute_orphan_reg_method)
+diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
+index 8907b8bf42672..c49b9f8de723d 100644
+--- a/drivers/acpi/acpica/exregion.c
++++ b/drivers/acpi/acpica/exregion.c
+@@ -44,7 +44,6 @@ acpi_ex_system_memory_space_handler(u32 function,
+ 	struct acpi_mem_mapping *mm = mem_info->cur_mm;
+ 	u32 length;
+ 	acpi_size map_length;
+-	acpi_size page_boundary_map_length;
+ #ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED
+ 	u32 remainder;
+ #endif
+@@ -138,26 +137,8 @@ acpi_ex_system_memory_space_handler(u32 function,
+ 		map_length = (acpi_size)
+ 		    ((mem_info->address + mem_info->length) - address);
+ 
+-		/*
+-		 * If mapping the entire remaining portion of the region will cross
+-		 * a page boundary, just map up to the page boundary, do not cross.
+-		 * On some systems, crossing a page boundary while mapping regions
+-		 * can cause warnings if the pages have different attributes
+-		 * due to resource management.
+-		 *
+-		 * This has the added benefit of constraining a single mapping to
+-		 * one page, which is similar to the original code that used a 4k
+-		 * maximum window.
+-		 */
+-		page_boundary_map_length = (acpi_size)
+-		    (ACPI_ROUND_UP(address, ACPI_DEFAULT_PAGE_SIZE) - address);
+-		if (page_boundary_map_length == 0) {
+-			page_boundary_map_length = ACPI_DEFAULT_PAGE_SIZE;
+-		}
+-
+-		if (map_length > page_boundary_map_length) {
+-			map_length = page_boundary_map_length;
+-		}
++		if (map_length > ACPI_DEFAULT_PAGE_SIZE)
++			map_length = ACPI_DEFAULT_PAGE_SIZE;
+ 
+ 		/* Create a new mapping starting at the address given */
+ 
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 02255795b800d..1cec29ab64ce8 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -1482,13 +1482,14 @@ static bool install_gpio_irq_event_handler(struct acpi_ec *ec)
+ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
+ 			       bool call_reg)
+ {
++	acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle;
+ 	acpi_status status;
+ 
+ 	acpi_ec_start(ec, false);
+ 
+ 	if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
+ 		acpi_ec_enter_noirq(ec);
+-		status = acpi_install_address_space_handler_no_reg(ec->handle,
++		status = acpi_install_address_space_handler_no_reg(scope_handle,
+ 								   ACPI_ADR_SPACE_EC,
+ 								   &acpi_ec_space_handler,
+ 								   NULL, ec);
+@@ -1497,11 +1498,13 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
+ 			return -ENODEV;
+ 		}
+ 		set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
+-		ec->address_space_handler_holder = ec->handle;
+ 	}
+ 
+ 	if (call_reg && !test_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags)) {
+-		acpi_execute_reg_methods(ec->handle, ACPI_ADR_SPACE_EC);
++		acpi_execute_reg_methods(scope_handle, ACPI_ADR_SPACE_EC);
++		if (scope_handle != ec->handle)
++			acpi_execute_orphan_reg_method(ec->handle, ACPI_ADR_SPACE_EC);
++
+ 		set_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags);
+ 	}
+ 
+@@ -1553,10 +1556,13 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
+ 
+ static void ec_remove_handlers(struct acpi_ec *ec)
+ {
++	acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle;
++
+ 	if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
+ 		if (ACPI_FAILURE(acpi_remove_address_space_handler(
+-					ec->address_space_handler_holder,
+-					ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
++						scope_handle,
++						ACPI_ADR_SPACE_EC,
++						&acpi_ec_space_handler)))
+ 			pr_err("failed to remove space handler\n");
+ 		clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
+ 	}
+@@ -1595,14 +1601,18 @@ static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device, bool ca
+ {
+ 	int ret;
+ 
+-	ret = ec_install_handlers(ec, device, call_reg);
+-	if (ret)
+-		return ret;
+-
+ 	/* First EC capable of handling transactions */
+ 	if (!first_ec)
+ 		first_ec = ec;
+ 
++	ret = ec_install_handlers(ec, device, call_reg);
++	if (ret) {
++		if (ec == first_ec)
++			first_ec = NULL;
++
++		return ret;
++	}
++
+ 	pr_info("EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", ec->command_addr,
+ 		ec->data_addr);
+ 
+diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
+index ca72a0dc57151..a96d1bc662a37 100644
+--- a/drivers/acpi/internal.h
++++ b/drivers/acpi/internal.h
+@@ -185,7 +185,6 @@ enum acpi_ec_event_state {
+ 
+ struct acpi_ec {
+ 	acpi_handle handle;
+-	acpi_handle address_space_handler_holder;
+ 	int gpe;
+ 	int irq;
+ 	unsigned long command_addr;
+@@ -302,6 +301,10 @@ void acpi_mipi_check_crs_csi2(acpi_handle handle);
+ void acpi_mipi_scan_crs_csi2(void);
+ void acpi_mipi_init_crs_csi2_swnodes(void);
+ void acpi_mipi_crs_csi2_cleanup(void);
++#ifdef CONFIG_X86
+ bool acpi_graph_ignore_port(acpi_handle handle);
++#else
++static inline bool acpi_graph_ignore_port(acpi_handle handle) { return false; }
++#endif
+ 
+ #endif /* _ACPI_INTERNAL_H_ */
+diff --git a/drivers/acpi/mipi-disco-img.c b/drivers/acpi/mipi-disco-img.c
+index d05413a0672a9..0ab13751f0dbc 100644
+--- a/drivers/acpi/mipi-disco-img.c
++++ b/drivers/acpi/mipi-disco-img.c
+@@ -725,14 +725,20 @@ void acpi_mipi_crs_csi2_cleanup(void)
+ 		acpi_mipi_del_crs_csi2(csi2);
+ }
+ 
+-static const struct dmi_system_id dmi_ignore_port_nodes[] = {
+-	{
+-		.matches = {
+-			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS 9315"),
+-		},
+-	},
+-	{ }
++#ifdef CONFIG_X86
++#include <asm/cpu_device_id.h>
++#include <asm/intel-family.h>
++
++/* CPU matches for Dell generations with broken ACPI MIPI DISCO info */
++static const struct x86_cpu_id dell_broken_mipi_disco_cpu_gens[] = {
++	X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, NULL),
++	X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL),
++	X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL),
++	X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL),
++	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL),
++	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL),
++	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, NULL),
++	{}
+ };
+ 
+ static const char *strnext(const char *s1, const char *s2)
+@@ -761,7 +767,10 @@ bool acpi_graph_ignore_port(acpi_handle handle)
+ 	static bool dmi_tested, ignore_port;
+ 
+ 	if (!dmi_tested) {
+-		ignore_port = dmi_first_match(dmi_ignore_port_nodes);
++		if (dmi_name_in_vendors("Dell Inc.") &&
++		    x86_match_cpu(dell_broken_mipi_disco_cpu_gens))
++			ignore_port = true;
++
+ 		dmi_tested = true;
+ 	}
+ 
+@@ -794,3 +803,4 @@ bool acpi_graph_ignore_port(acpi_handle handle)
+ 	kfree(orig_path);
+ 	return false;
+ }
++#endif
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index 6cc8572759a3d..b5bf8b81a050a 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -517,6 +517,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "E1504GAB"),
+ 		},
+ 	},
++	{
++		/* Asus Vivobook Pro N6506MV */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_BOARD_NAME, "N6506MV"),
++		},
++	},
+ 	{
+ 		/* LG Electronics 17U70P */
+ 		.matches = {
+@@ -533,6 +540,12 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
+  * to have a working keyboard.
+  */
+ static const struct dmi_system_id irq1_edge_low_force_override[] = {
++	{
++		/* XMG APEX 17 (M23) */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "GMxBGxx"),
++		},
++	},
+ 	{
+ 		/* TongFang GMxRGxx/XMG CORE 15 (M22)/TUXEDO Stellaris 15 Gen4 AMD */
+ 		.matches = {
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index 9fdcc620c6524..2cc3821b2b16e 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -497,6 +497,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "82BK"),
+ 		},
+ 	},
++	{
++	 .callback = video_detect_force_native,
++	 /* Lenovo Slim 7 16ARH7 */
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		DMI_MATCH(DMI_PRODUCT_NAME, "82UX"),
++		},
++	},
+ 	{
+ 	 .callback = video_detect_force_native,
+ 	 /* Lenovo ThinkPad X131e (3371 AMD version) */
+diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
+index 7507a7706898c..448e0d14fd7bd 100644
+--- a/drivers/acpi/x86/utils.c
++++ b/drivers/acpi/x86/utils.c
+@@ -256,9 +256,10 @@ bool force_storage_d3(void)
+ #define ACPI_QUIRK_SKIP_I2C_CLIENTS				BIT(0)
+ #define ACPI_QUIRK_UART1_SKIP					BIT(1)
+ #define ACPI_QUIRK_UART1_TTY_UART2_SKIP				BIT(2)
+-#define ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY			BIT(3)
+-#define ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY			BIT(4)
+-#define ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS			BIT(5)
++#define ACPI_QUIRK_PNP_UART1_SKIP				BIT(3)
++#define ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY			BIT(4)
++#define ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY			BIT(5)
++#define ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS			BIT(6)
+ 
+ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
+ 	/*
+@@ -338,6 +339,7 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
+ 			DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"),
+ 		},
+ 		.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
++					ACPI_QUIRK_PNP_UART1_SKIP |
+ 					ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ 	},
+ 	{
+@@ -436,14 +438,18 @@ static int acpi_dmi_skip_serdev_enumeration(struct device *controller_parent, bo
+ 	if (ret)
+ 		return 0;
+ 
+-	/* to not match on PNP enumerated debug UARTs */
+-	if (!dev_is_platform(controller_parent))
+-		return 0;
+-
+ 	dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
+ 	if (dmi_id)
+ 		quirks = (unsigned long)dmi_id->driver_data;
+ 
++	if (!dev_is_platform(controller_parent)) {
++		/* PNP enumerated UARTs */
++		if ((quirks & ACPI_QUIRK_PNP_UART1_SKIP) && uid == 1)
++			*skip = true;
++
++		return 0;
++	}
++
+ 	if ((quirks & ACPI_QUIRK_UART1_SKIP) && uid == 1)
+ 		*skip = true;
+ 
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 07d66d2c5f0dd..5eb38fbbbecdb 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -1735,6 +1735,14 @@ static void ahci_update_initial_lpm_policy(struct ata_port *ap)
+ 	if (ap->pflags & ATA_PFLAG_EXTERNAL)
+ 		return;
+ 
++	/* If no LPM states are supported by the HBA, do not bother with LPM */
++	if ((ap->host->flags & ATA_HOST_NO_PART) &&
++	    (ap->host->flags & ATA_HOST_NO_SSC) &&
++	    (ap->host->flags & ATA_HOST_NO_DEVSLP)) {
++		ata_port_dbg(ap, "no LPM states supported, not enabling LPM\n");
++		return;
++	}
++
+ 	/* user modified policy via module param */
+ 	if (mobile_lpm_policy != -1) {
+ 		policy = mobile_lpm_policy;
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 9d4ec9273bf95..1ddd3e5497896 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -588,7 +588,10 @@ static inline int was_interrupted(int result)
+ 	return result == -ERESTARTSYS || result == -EINTR;
+ }
+ 
+-/* always call with the tx_lock held */
++/*
++ * Returns BLK_STS_RESOURCE if the caller should retry after a delay. Returns
++ * -EAGAIN if the caller should requeue @cmd. Returns -EIO if sending failed.
++ */
+ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
+ {
+ 	struct request *req = blk_mq_rq_from_pdu(cmd);
+@@ -605,6 +608,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
+ 	u32 nbd_cmd_flags = 0;
+ 	int sent = nsock->sent, skip = 0;
+ 
++	lockdep_assert_held(&cmd->lock);
++	lockdep_assert_held(&nsock->tx_lock);
++
+ 	iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request));
+ 
+ 	type = req_to_nbd_cmd_type(req);
+@@ -669,7 +675,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
+ 				nsock->sent = sent;
+ 			}
+ 			set_bit(NBD_CMD_REQUEUED, &cmd->flags);
+-			return BLK_STS_RESOURCE;
++			return (__force int)BLK_STS_RESOURCE;
+ 		}
+ 		dev_err_ratelimited(disk_to_dev(nbd->disk),
+ 			"Send control failed (result %d)\n", result);
+@@ -710,7 +716,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
+ 					nsock->pending = req;
+ 					nsock->sent = sent;
+ 					set_bit(NBD_CMD_REQUEUED, &cmd->flags);
+-					return BLK_STS_RESOURCE;
++					return (__force int)BLK_STS_RESOURCE;
+ 				}
+ 				dev_err(disk_to_dev(nbd->disk),
+ 					"Send data failed (result %d)\n",
+@@ -1007,7 +1013,7 @@ static int wait_for_reconnect(struct nbd_device *nbd)
+ 	return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
+ }
+ 
+-static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
++static blk_status_t nbd_handle_cmd(struct nbd_cmd *cmd, int index)
+ {
+ 	struct request *req = blk_mq_rq_from_pdu(cmd);
+ 	struct nbd_device *nbd = cmd->nbd;
+@@ -1015,18 +1021,20 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
+ 	struct nbd_sock *nsock;
+ 	int ret;
+ 
++	lockdep_assert_held(&cmd->lock);
++
+ 	config = nbd_get_config_unlocked(nbd);
+ 	if (!config) {
+ 		dev_err_ratelimited(disk_to_dev(nbd->disk),
+ 				    "Socks array is empty\n");
+-		return -EINVAL;
++		return BLK_STS_IOERR;
+ 	}
+ 
+ 	if (index >= config->num_connections) {
+ 		dev_err_ratelimited(disk_to_dev(nbd->disk),
+ 				    "Attempted send on invalid socket\n");
+ 		nbd_config_put(nbd);
+-		return -EINVAL;
++		return BLK_STS_IOERR;
+ 	}
+ 	cmd->status = BLK_STS_OK;
+ again:
+@@ -1049,7 +1057,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
+ 			 */
+ 			sock_shutdown(nbd);
+ 			nbd_config_put(nbd);
+-			return -EIO;
++			return BLK_STS_IOERR;
+ 		}
+ 		goto again;
+ 	}
+@@ -1062,7 +1070,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
+ 	blk_mq_start_request(req);
+ 	if (unlikely(nsock->pending && nsock->pending != req)) {
+ 		nbd_requeue_cmd(cmd);
+-		ret = 0;
++		ret = BLK_STS_OK;
+ 		goto out;
+ 	}
+ 	/*
+@@ -1081,19 +1089,19 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
+ 				    "Request send failed, requeueing\n");
+ 		nbd_mark_nsock_dead(nbd, nsock, 1);
+ 		nbd_requeue_cmd(cmd);
+-		ret = 0;
++		ret = BLK_STS_OK;
+ 	}
+ out:
+ 	mutex_unlock(&nsock->tx_lock);
+ 	nbd_config_put(nbd);
+-	return ret;
++	return ret < 0 ? BLK_STS_IOERR : (__force blk_status_t)ret;
+ }
+ 
+ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
+ 			const struct blk_mq_queue_data *bd)
+ {
+ 	struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
+-	int ret;
++	blk_status_t ret;
+ 
+ 	/*
+ 	 * Since we look at the bio's to send the request over the network we
+@@ -1113,10 +1121,6 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
+ 	 * appropriate.
+ 	 */
+ 	ret = nbd_handle_cmd(cmd, hctx->queue_num);
+-	if (ret < 0)
+-		ret = BLK_STS_IOERR;
+-	else if (!ret)
+-		ret = BLK_STS_OK;
+ 	mutex_unlock(&cmd->lock);
+ 
+ 	return ret;
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 88262d3a93923..ce97b336fbfb8 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -3,7 +3,6 @@
+  * Copyright (c) 2008-2009 Atheros Communications Inc.
+  */
+ 
+-
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+@@ -128,7 +127,6 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
+  * for AR3012
+  */
+ static const struct usb_device_id ath3k_blist_tbl[] = {
+-
+ 	/* Atheros AR3012 with sflash firmware*/
+ 	{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
+@@ -202,7 +200,7 @@ static inline void ath3k_log_failed_loading(int err, int len, int size,
+ #define TIMEGAP_USEC_MAX	100
+ 
+ static int ath3k_load_firmware(struct usb_device *udev,
+-				const struct firmware *firmware)
++			       const struct firmware *firmware)
+ {
+ 	u8 *send_buf;
+ 	int len = 0;
+@@ -237,9 +235,9 @@ static int ath3k_load_firmware(struct usb_device *udev,
+ 		memcpy(send_buf, firmware->data + sent, size);
+ 
+ 		err = usb_bulk_msg(udev, pipe, send_buf, size,
+-					&len, 3000);
++				   &len, 3000);
+ 
+-		if (err || (len != size)) {
++		if (err || len != size) {
+ 			ath3k_log_failed_loading(err, len, size, count);
+ 			goto error;
+ 		}
+@@ -262,7 +260,7 @@ static int ath3k_get_state(struct usb_device *udev, unsigned char *state)
+ }
+ 
+ static int ath3k_get_version(struct usb_device *udev,
+-			struct ath3k_version *version)
++			     struct ath3k_version *version)
+ {
+ 	return usb_control_msg_recv(udev, 0, ATH3K_GETVERSION,
+ 				    USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
+@@ -271,7 +269,7 @@ static int ath3k_get_version(struct usb_device *udev,
+ }
+ 
+ static int ath3k_load_fwfile(struct usb_device *udev,
+-		const struct firmware *firmware)
++			     const struct firmware *firmware)
+ {
+ 	u8 *send_buf;
+ 	int len = 0;
+@@ -310,8 +308,8 @@ static int ath3k_load_fwfile(struct usb_device *udev,
+ 		memcpy(send_buf, firmware->data + sent, size);
+ 
+ 		err = usb_bulk_msg(udev, pipe, send_buf, size,
+-					&len, 3000);
+-		if (err || (len != size)) {
++				   &len, 3000);
++		if (err || len != size) {
+ 			ath3k_log_failed_loading(err, len, size, count);
+ 			kfree(send_buf);
+ 			return err;
+@@ -425,7 +423,6 @@ static int ath3k_load_syscfg(struct usb_device *udev)
+ 	}
+ 
+ 	switch (fw_version.ref_clock) {
+-
+ 	case ATH3K_XTAL_FREQ_26M:
+ 		clk_value = 26;
+ 		break;
+@@ -441,7 +438,7 @@ static int ath3k_load_syscfg(struct usb_device *udev)
+ 	}
+ 
+ 	snprintf(filename, ATH3K_NAME_LEN, "ar3k/ramps_0x%08x_%d%s",
+-		le32_to_cpu(fw_version.rom_version), clk_value, ".dfu");
++		 le32_to_cpu(fw_version.rom_version), clk_value, ".dfu");
+ 
+ 	ret = request_firmware(&firmware, filename, &udev->dev);
+ 	if (ret < 0) {
+@@ -456,7 +453,7 @@ static int ath3k_load_syscfg(struct usb_device *udev)
+ }
+ 
+ static int ath3k_probe(struct usb_interface *intf,
+-			const struct usb_device_id *id)
++		       const struct usb_device_id *id)
+ {
+ 	const struct firmware *firmware;
+ 	struct usb_device *udev = interface_to_usbdev(intf);
+@@ -505,10 +502,10 @@ static int ath3k_probe(struct usb_interface *intf,
+ 	if (ret < 0) {
+ 		if (ret == -ENOENT)
+ 			BT_ERR("Firmware file \"%s\" not found",
+-							ATH3K_FIRMWARE);
++			       ATH3K_FIRMWARE);
+ 		else
+ 			BT_ERR("Firmware file \"%s\" request failed (err=%d)",
+-							ATH3K_FIRMWARE, ret);
++			       ATH3K_FIRMWARE, ret);
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
+index 6c989d859b396..6af175e6c08ac 100644
+--- a/drivers/cpufreq/amd-pstate.c
++++ b/drivers/cpufreq/amd-pstate.c
+@@ -1462,6 +1462,13 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
+ 
+ static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
+ {
++	struct amd_cpudata *cpudata = policy->driver_data;
++
++	if (cpudata) {
++		kfree(cpudata);
++		policy->driver_data = NULL;
++	}
++
+ 	pr_debug("CPU %d exiting\n", policy->cpu);
+ 	return 0;
+ }
+diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
+index 92f0a1d9b4a6b..13e413533f082 100644
+--- a/drivers/crypto/hisilicon/qm.c
++++ b/drivers/crypto/hisilicon/qm.c
+@@ -2893,12 +2893,9 @@ void hisi_qm_uninit(struct hisi_qm *qm)
+ 	hisi_qm_set_state(qm, QM_NOT_READY);
+ 	up_write(&qm->qps_lock);
+ 
++	qm_remove_uacce(qm);
+ 	qm_irqs_unregister(qm);
+ 	hisi_qm_pci_uninit(qm);
+-	if (qm->use_sva) {
+-		uacce_remove(qm->uacce);
+-		qm->uacce = NULL;
+-	}
+ }
+ EXPORT_SYMBOL_GPL(hisi_qm_uninit);
+ 
+diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
+index 93a972fcbf638..0558f98e221f6 100644
+--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
++++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
+@@ -481,8 +481,10 @@ static void sec_alg_resource_free(struct sec_ctx *ctx,
+ 
+ 	if (ctx->pbuf_supported)
+ 		sec_free_pbuf_resource(dev, qp_ctx->res);
+-	if (ctx->alg_type == SEC_AEAD)
++	if (ctx->alg_type == SEC_AEAD) {
+ 		sec_free_mac_resource(dev, qp_ctx->res);
++		sec_free_aiv_resource(dev, qp_ctx->res);
++	}
+ }
+ 
+ static int sec_alloc_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
+diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
+index 0df09bd794088..2773f05adb7d2 100644
+--- a/drivers/cxl/core/pci.c
++++ b/drivers/cxl/core/pci.c
+@@ -1045,3 +1045,32 @@ long cxl_pci_get_latency(struct pci_dev *pdev)
+ 
+ 	return cxl_flit_size(pdev) * MEGA / bw;
+ }
++
++static int __cxl_endpoint_decoder_reset_detected(struct device *dev, void *data)
++{
++	struct cxl_port *port = data;
++	struct cxl_decoder *cxld;
++	struct cxl_hdm *cxlhdm;
++	void __iomem *hdm;
++	u32 ctrl;
++
++	if (!is_endpoint_decoder(dev))
++		return 0;
++
++	cxld = to_cxl_decoder(dev);
++	if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
++		return 0;
++
++	cxlhdm = dev_get_drvdata(&port->dev);
++	hdm = cxlhdm->regs.hdm_decoder;
++	ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
++
++	return !FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl);
++}
++
++bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port)
++{
++	return device_for_each_child(&port->dev, port,
++				     __cxl_endpoint_decoder_reset_detected);
++}
++EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_reset_detected, CXL);
+diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
+index 036d17db68e00..72fa477407689 100644
+--- a/drivers/cxl/cxl.h
++++ b/drivers/cxl/cxl.h
+@@ -891,6 +891,8 @@ void cxl_coordinates_combine(struct access_coordinate *out,
+ 			     struct access_coordinate *c1,
+ 			     struct access_coordinate *c2);
+ 
++bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port);
++
+ /*
+  * Unit test builds overrides this to __weak, find the 'strong' version
+  * of these symbols in tools/testing/cxl/.
+diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
+index 2ff361e756d66..659f9d46b154c 100644
+--- a/drivers/cxl/pci.c
++++ b/drivers/cxl/pci.c
+@@ -957,11 +957,33 @@ static void cxl_error_resume(struct pci_dev *pdev)
+ 		 dev->driver ? "successful" : "failed");
+ }
+ 
++static void cxl_reset_done(struct pci_dev *pdev)
++{
++	struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
++	struct cxl_memdev *cxlmd = cxlds->cxlmd;
++	struct device *dev = &pdev->dev;
++
++	/*
++	 * FLR does not expect to touch the HDM decoders and related
++	 * registers.  SBR, however, will wipe all device configurations.
++	 * Issue a warning if there was an active decoder before the reset
++	 * that no longer exists.
++	 */
++	guard(device)(&cxlmd->dev);
++	if (cxlmd->endpoint &&
++	    cxl_endpoint_decoder_reset_detected(cxlmd->endpoint)) {
++		dev_crit(dev, "SBR happened without memory regions removal.\n");
++		dev_crit(dev, "System may be unstable if regions hosted system memory.\n");
++		add_taint(TAINT_USER, LOCKDEP_STILL_OK);
++	}
++}
++
+ static const struct pci_error_handlers cxl_error_handlers = {
+ 	.error_detected	= cxl_error_detected,
+ 	.slot_reset	= cxl_slot_reset,
+ 	.resume		= cxl_error_resume,
+ 	.cor_error_detected	= cxl_cor_error_detected,
++	.reset_done	= cxl_reset_done,
+ };
+ 
+ static struct pci_driver cxl_pci_driver = {
+diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
+index 002a5ec806207..9fc99cfbef08c 100644
+--- a/drivers/dma/Kconfig
++++ b/drivers/dma/Kconfig
+@@ -394,7 +394,7 @@ config LS2X_APB_DMA
+ 
+ config MCF_EDMA
+ 	tristate "Freescale eDMA engine support, ColdFire mcf5441x SoCs"
+-	depends on M5441x || COMPILE_TEST
++	depends on M5441x || (COMPILE_TEST && FSL_EDMA=n)
+ 	select DMA_ENGINE
+ 	select DMA_VIRTUAL_CHANNELS
+ 	help
+diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+index a86a81ff0caa6..321446fdddbd7 100644
+--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
++++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+@@ -302,6 +302,7 @@ static struct axi_dma_desc *axi_desc_alloc(u32 num)
+ 		kfree(desc);
+ 		return NULL;
+ 	}
++	desc->nr_hw_descs = num;
+ 
+ 	return desc;
+ }
+@@ -328,7 +329,7 @@ static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan,
+ static void axi_desc_put(struct axi_dma_desc *desc)
+ {
+ 	struct axi_dma_chan *chan = desc->chan;
+-	int count = atomic_read(&chan->descs_allocated);
++	int count = desc->nr_hw_descs;
+ 	struct axi_dma_hw_desc *hw_desc;
+ 	int descs_put;
+ 
+@@ -1139,9 +1140,6 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
+ 		/* Remove the completed descriptor from issued list before completing */
+ 		list_del(&vd->node);
+ 		vchan_cookie_complete(vd);
+-
+-		/* Submit queued descriptors after processing the completed ones */
+-		axi_chan_start_first_queued(chan);
+ 	}
+ 
+ out:
+diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
+index 454904d996540..ac571b413b21c 100644
+--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
++++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
+@@ -104,6 +104,7 @@ struct axi_dma_desc {
+ 	u32				completed_blocks;
+ 	u32				length;
+ 	u32				period_len;
++	u32				nr_hw_descs;
+ };
+ 
+ struct axi_dma_chan_config {
+diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
+index 8dc029c865515..fc049c9c9892e 100644
+--- a/drivers/dma/idxd/irq.c
++++ b/drivers/dma/idxd/irq.c
+@@ -611,11 +611,13 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
+ 
+ 	spin_unlock(&irq_entry->list_lock);
+ 
+-	list_for_each_entry(desc, &flist, list) {
++	list_for_each_entry_safe(desc, n, &flist, list) {
+ 		/*
+ 		 * Check against the original status as ABORT is software defined
+ 		 * and 0xff, which DSA_COMP_STATUS_MASK can mask out.
+ 		 */
++		list_del(&desc->list);
++
+ 		if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
+ 			idxd_desc_complete(desc, IDXD_COMPLETE_ABORT, true);
+ 			continue;
+diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
+index 9c364e92cb828..e8f45a7fded43 100644
+--- a/drivers/dma/ioat/init.c
++++ b/drivers/dma/ioat/init.c
+@@ -534,18 +534,6 @@ static int ioat_probe(struct ioatdma_device *ioat_dma)
+ 	return err;
+ }
+ 
+-static int ioat_register(struct ioatdma_device *ioat_dma)
+-{
+-	int err = dma_async_device_register(&ioat_dma->dma_dev);
+-
+-	if (err) {
+-		ioat_disable_interrupts(ioat_dma);
+-		dma_pool_destroy(ioat_dma->completion_pool);
+-	}
+-
+-	return err;
+-}
+-
+ static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
+ {
+ 	struct dma_device *dma = &ioat_dma->dma_dev;
+@@ -1181,9 +1169,9 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
+ 		       ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
+ 	}
+ 
+-	err = ioat_register(ioat_dma);
++	err = dma_async_device_register(&ioat_dma->dma_dev);
+ 	if (err)
+-		return err;
++		goto err_disable_interrupts;
+ 
+ 	ioat_kobject_add(ioat_dma, &ioat_ktype);
+ 
+@@ -1192,20 +1180,29 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
+ 
+ 	/* disable relaxed ordering */
+ 	err = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &val16);
+-	if (err)
+-		return pcibios_err_to_errno(err);
++	if (err) {
++		err = pcibios_err_to_errno(err);
++		goto err_disable_interrupts;
++	}
+ 
+ 	/* clear relaxed ordering enable */
+ 	val16 &= ~PCI_EXP_DEVCTL_RELAX_EN;
+ 	err = pcie_capability_write_word(pdev, PCI_EXP_DEVCTL, val16);
+-	if (err)
+-		return pcibios_err_to_errno(err);
++	if (err) {
++		err = pcibios_err_to_errno(err);
++		goto err_disable_interrupts;
++	}
+ 
+ 	if (ioat_dma->cap & IOAT_CAP_DPS)
+ 		writeb(ioat_pending_level + 1,
+ 		       ioat_dma->reg_base + IOAT_PREFETCH_LIMIT_OFFSET);
+ 
+ 	return 0;
++
++err_disable_interrupts:
++	ioat_disable_interrupts(ioat_dma);
++	dma_pool_destroy(ioat_dma->completion_pool);
++	return err;
+ }
+ 
+ static void ioat_shutdown(struct pci_dev *pdev)
+@@ -1350,6 +1347,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	void __iomem * const *iomap;
+ 	struct device *dev = &pdev->dev;
+ 	struct ioatdma_device *device;
++	unsigned int i;
++	u8 version;
+ 	int err;
+ 
+ 	err = pcim_enable_device(pdev);
+@@ -1363,6 +1362,10 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	if (!iomap)
+ 		return -ENOMEM;
+ 
++	version = readb(iomap[IOAT_MMIO_BAR] + IOAT_VER_OFFSET);
++	if (version < IOAT_VER_3_0)
++		return -ENODEV;
++
+ 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ 	if (err)
+ 		return err;
+@@ -1373,17 +1376,18 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	pci_set_master(pdev);
+ 	pci_set_drvdata(pdev, device);
+ 
+-	device->version = readb(device->reg_base + IOAT_VER_OFFSET);
++	device->version = version;
+ 	if (device->version >= IOAT_VER_3_4)
+ 		ioat_dca_enabled = 0;
+-	if (device->version >= IOAT_VER_3_0) {
+-		if (is_skx_ioat(pdev))
+-			device->version = IOAT_VER_3_2;
+-		err = ioat3_dma_probe(device, ioat_dca_enabled);
+-	} else
+-		return -ENODEV;
+ 
++	if (is_skx_ioat(pdev))
++		device->version = IOAT_VER_3_2;
++
++	err = ioat3_dma_probe(device, ioat_dca_enabled);
+ 	if (err) {
++		for (i = 0; i < IOAT_MAX_CHANS; i++)
++			kfree(device->idx[i]);
++		kfree(device);
+ 		dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
+ 		return -ENODEV;
+ 	}
+@@ -1445,6 +1449,7 @@ module_init(ioat_init_module);
+ static void __exit ioat_exit_module(void)
+ {
+ 	pci_unregister_driver(&ioat_pci_driver);
++	kmem_cache_destroy(ioat_sed_cache);
+ 	kmem_cache_destroy(ioat_cache);
+ }
+ module_exit(ioat_exit_module);
+diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
+index c9b93055dc9d3..f0a399cf45b2a 100644
+--- a/drivers/dma/ti/k3-udma-glue.c
++++ b/drivers/dma/ti/k3-udma-glue.c
+@@ -200,12 +200,9 @@ of_k3_udma_glue_parse_chn_by_id(struct device_node *udmax_np, struct k3_udma_glu
+ 
+ 	ret = of_k3_udma_glue_parse(udmax_np, common);
+ 	if (ret)
+-		goto out_put_spec;
++		return ret;
+ 
+ 	ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn);
+-
+-out_put_spec:
+-	of_node_put(udmax_np);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
+index 313b217388fe9..ae6e06057aa1d 100644
+--- a/drivers/dma/xilinx/xdma.c
++++ b/drivers/dma/xilinx/xdma.c
+@@ -885,11 +885,11 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
+ 	u32 st;
+ 	bool repeat_tx;
+ 
++	spin_lock(&xchan->vchan.lock);
++
+ 	if (xchan->stop_requested)
+ 		complete(&xchan->last_interrupt);
+ 
+-	spin_lock(&xchan->vchan.lock);
+-
+ 	/* get submitted request */
+ 	vd = vchan_next_desc(&xchan->vchan);
+ 	if (!vd)
+diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
+index 3365944f79654..34109fd86c55d 100644
+--- a/drivers/firmware/efi/memmap.c
++++ b/drivers/firmware/efi/memmap.c
+@@ -15,10 +15,6 @@
+ #include <asm/early_ioremap.h>
+ #include <asm/efi.h>
+ 
+-#ifndef __efi_memmap_free
+-#define __efi_memmap_free(phys, size, flags) do { } while (0)
+-#endif
+-
+ /**
+  * __efi_memmap_init - Common code for mapping the EFI memory map
+  * @data: EFI memory map data
+@@ -51,11 +47,6 @@ int __init __efi_memmap_init(struct efi_memory_map_data *data)
+ 		return -ENOMEM;
+ 	}
+ 
+-	if (efi.memmap.flags & (EFI_MEMMAP_MEMBLOCK | EFI_MEMMAP_SLAB))
+-		__efi_memmap_free(efi.memmap.phys_map,
+-				  efi.memmap.desc_size * efi.memmap.nr_map,
+-				  efi.memmap.flags);
+-
+ 	map.phys_map = data->phys_map;
+ 	map.nr_map = data->size / data->desc_size;
+ 	map.map_end = map.map + data->size;
+diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
+index d9629ff878619..2328ca58bba61 100644
+--- a/drivers/firmware/psci/psci.c
++++ b/drivers/firmware/psci/psci.c
+@@ -497,10 +497,12 @@ int psci_cpu_suspend_enter(u32 state)
+ 
+ static int psci_system_suspend(unsigned long unused)
+ {
++	int err;
+ 	phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume);
+ 
+-	return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
++	err = invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
+ 			      pa_cpu_resume, 0, 0);
++	return psci_to_linux_errno(err);
+ }
+ 
+ static int psci_system_suspend_enter(suspend_state_t state)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+index be4629cdac049..08b9dfb653355 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+@@ -684,12 +684,17 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
+ 	struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring;
+ 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
+ 	unsigned int ndw;
+-	signed long r;
++	int r;
+ 	uint32_t seq;
+ 
+-	if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready ||
+-	    !down_read_trylock(&adev->reset_domain->sem)) {
++	/*
++	 * A GPU reset should flush all TLBs anyway, so no need to do
++	 * this while one is ongoing.
++	 */
++	if (!down_read_trylock(&adev->reset_domain->sem))
++		return 0;
+ 
++	if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready) {
+ 		if (adev->gmc.flush_tlb_needs_extra_type_2)
+ 			adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
+ 								 2, all_hub,
+@@ -703,43 +708,40 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
+ 		adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
+ 							 flush_type, all_hub,
+ 							 inst);
+-		return 0;
+-	}
++		r = 0;
++	} else {
++		/* 2 dwords flush + 8 dwords fence */
++		ndw = kiq->pmf->invalidate_tlbs_size + 8;
+ 
+-	/* 2 dwords flush + 8 dwords fence */
+-	ndw = kiq->pmf->invalidate_tlbs_size + 8;
++		if (adev->gmc.flush_tlb_needs_extra_type_2)
++			ndw += kiq->pmf->invalidate_tlbs_size;
+ 
+-	if (adev->gmc.flush_tlb_needs_extra_type_2)
+-		ndw += kiq->pmf->invalidate_tlbs_size;
++		if (adev->gmc.flush_tlb_needs_extra_type_0)
++			ndw += kiq->pmf->invalidate_tlbs_size;
+ 
+-	if (adev->gmc.flush_tlb_needs_extra_type_0)
+-		ndw += kiq->pmf->invalidate_tlbs_size;
++		spin_lock(&adev->gfx.kiq[inst].ring_lock);
++		amdgpu_ring_alloc(ring, ndw);
++		if (adev->gmc.flush_tlb_needs_extra_type_2)
++			kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub);
+ 
+-	spin_lock(&adev->gfx.kiq[inst].ring_lock);
+-	amdgpu_ring_alloc(ring, ndw);
+-	if (adev->gmc.flush_tlb_needs_extra_type_2)
+-		kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub);
++		if (flush_type == 2 && adev->gmc.flush_tlb_needs_extra_type_0)
++			kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 0, all_hub);
+ 
+-	if (flush_type == 2 && adev->gmc.flush_tlb_needs_extra_type_0)
+-		kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 0, all_hub);
++		kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub);
++		r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
++		if (r) {
++			amdgpu_ring_undo(ring);
++			spin_unlock(&adev->gfx.kiq[inst].ring_lock);
++			goto error_unlock_reset;
++		}
+ 
+-	kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub);
+-	r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+-	if (r) {
+-		amdgpu_ring_undo(ring);
++		amdgpu_ring_commit(ring);
+ 		spin_unlock(&adev->gfx.kiq[inst].ring_lock);
+-		goto error_unlock_reset;
+-	}
+-
+-	amdgpu_ring_commit(ring);
+-	spin_unlock(&adev->gfx.kiq[inst].ring_lock);
+-	r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
+-	if (r < 1) {
+-		dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
+-		r = -ETIME;
+-		goto error_unlock_reset;
++		if (amdgpu_fence_wait_polling(ring, seq, usec_timeout) < 1) {
++			dev_err(adev->dev, "timeout waiting for kiq fence\n");
++			r = -ETIME;
++		}
+ 	}
+-	r = 0;
+ 
+ error_unlock_reset:
+ 	up_read(&adev->reset_domain->sem);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index d6e71aa808d88..f866a02f4f489 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -9149,9 +9149,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+ 
+ 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
+ 
+-	if (dm->dc->caps.ips_support && dm->dc->idle_optimizations_allowed)
+-		dc_allow_idle_optimizations(dm->dc, false);
+-
+ 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
+ 	drm_dp_mst_atomic_wait_for_dependencies(state);
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+index 6083b1dcf050a..a72e849eced3f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
++++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+@@ -1340,16 +1340,27 @@ void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_
+ 	 * Powering up the hardware requires notifying PMFW and DMCUB.
+ 	 * Clearing the driver idle allow requires a DMCUB command.
+ 	 * DMCUB commands requires the DMCUB to be powered up and restored.
+-	 *
+-	 * Exit out early to prevent an infinite loop of DMCUB commands
+-	 * triggering exit low power - use software state to track this.
+ 	 */
+-	dc_dmub_srv->idle_allowed = allow_idle;
+ 
+-	if (!allow_idle)
++	if (!allow_idle) {
+ 		dc_dmub_srv_exit_low_power_state(dc);
+-	else
++		/*
++		 * Idle is considered fully exited only after the sequence above
++		 * fully completes. If we have a race of two threads exiting
++		 * at the same time then it's safe to perform the sequence
++		 * twice as long as we're not re-entering.
++		 *
++		 * Infinite command submission is avoided by using the
++		 * dm_execute_dmub_cmd submission instead of the "wake" helpers.
++		 */
++		dc_dmub_srv->idle_allowed = false;
++	} else {
++		/* Consider idle as notified prior to the actual submission to
++		 * prevent multiple entries. */
++		dc_dmub_srv->idle_allowed = true;
++
+ 		dc_dmub_srv_notify_idle(dc, allow_idle);
++	}
+ }
+ 
+ bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd,
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+index a14f99f4f14a5..eb6c6ba64c0b9 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+@@ -1373,3 +1373,75 @@ void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ 			set_static_screen_control(pipe_ctx[i]->stream_res.tg,
+ 					triggers, params->num_frames);
+ }
++
++static bool should_avoid_empty_tu(struct pipe_ctx *pipe_ctx)
++{
++	/* Calculate average pixel count per TU, return false if under ~2.00 to
++	 * avoid empty TUs. This is only required for DPIA tunneling as empty TUs
++	 * are legal to generate for native DP links. Assume TU size 64 as there
++	 * is currently no scenario where it's reprogrammed from HW default.
++	 * MTPs have no such limitation, so this does not affect MST use cases.
++	 */
++	unsigned int pix_clk_mhz;
++	unsigned int symclk_mhz;
++	unsigned int avg_pix_per_tu_x1000;
++	unsigned int tu_size_bytes = 64;
++	struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
++	struct dc_link_settings *link_settings = &pipe_ctx->link_config.dp_link_settings;
++	const struct dc *dc = pipe_ctx->stream->link->dc;
++
++	if (pipe_ctx->stream->link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
++		return false;
++
++	// Not necessary for MST configurations
++	if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
++		return false;
++
++	pix_clk_mhz = timing->pix_clk_100hz / 10000;
++
++	// If this is true, can't block due to dynamic ODM
++	if (pix_clk_mhz > dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz)
++		return false;
++
++	switch (link_settings->link_rate) {
++	case LINK_RATE_LOW:
++		symclk_mhz = 162;
++		break;
++	case LINK_RATE_HIGH:
++		symclk_mhz = 270;
++		break;
++	case LINK_RATE_HIGH2:
++		symclk_mhz = 540;
++		break;
++	case LINK_RATE_HIGH3:
++		symclk_mhz = 810;
++		break;
++	default:
++		// We shouldn't be tunneling any other rates, something is wrong
++		ASSERT(0);
++		return false;
++	}
++
++	avg_pix_per_tu_x1000 = (1000 * pix_clk_mhz * tu_size_bytes)
++		/ (symclk_mhz * link_settings->lane_count);
++
++	// Add small empirically-decided margin to account for potential jitter
++	return (avg_pix_per_tu_x1000 < 2020);
++}
++
++bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
++{
++	struct dc *dc = pipe_ctx->stream->ctx->dc;
++
++	if (!is_h_timing_divisible_by_2(pipe_ctx->stream))
++		return false;
++
++	if (should_avoid_empty_tu(pipe_ctx))
++		return false;
++
++	if (dc_is_dp_signal(pipe_ctx->stream->signal) && !dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) &&
++		dc->debug.enable_dp_dig_pixel_rate_div_policy)
++		return true;
++
++	return false;
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
+index c354efa6c1b2f..91f5d1136a2e6 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
+@@ -93,4 +93,6 @@ void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
+ void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ 		int num_pipes, const struct dc_static_screen_params *params);
+ 
++bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
++
+ #endif /* __DC_HWSS_DCN35_H__ */
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+index a93073055e7bf..6c8da59b7956d 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+@@ -158,7 +158,7 @@ static const struct hwseq_private_funcs dcn35_private_funcs = {
+ 	.setup_hpo_hw_control = dcn35_setup_hpo_hw_control,
+ 	.calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values,
+ 	.set_pixels_per_cycle = dcn32_set_pixels_per_cycle,
+-	.is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy,
++	.is_dp_dig_pixel_rate_div_policy = dcn35_is_dp_dig_pixel_rate_div_policy,
+ 	.dsc_pg_control = dcn35_dsc_pg_control,
+ 	.dsc_pg_status = dcn32_dsc_pg_status,
+ 	.enable_plane = dcn35_enable_plane,
+diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+index 5cb4725c773f6..c8586cb7d0fec 100644
+--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
++++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+@@ -164,6 +164,8 @@ static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev,
+ 
+ 	for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
+ 		if (table[i].ulSupportedSCLK != 0) {
++			if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES)
++				continue;
+ 			vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
+ 				table[i].usVoltageID;
+ 			vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index e583515f9b25a..950f86fb13511 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -431,6 +431,10 @@ bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
+ 	struct intel_encoder *encoder = &intel_dig_port->base;
+ 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ 
++	/* eDP MSO is not compatible with joiner */
++	if (intel_dp->mso_link_count)
++		return false;
++
+ 	return DISPLAY_VER(dev_priv) >= 12 ||
+ 		(DISPLAY_VER(dev_priv) == 11 &&
+ 		 encoder->port != PORT_A);
+diff --git a/drivers/gpu/drm/lima/lima_bcast.c b/drivers/gpu/drm/lima/lima_bcast.c
+index fbc43f243c54d..6d000504e1a4e 100644
+--- a/drivers/gpu/drm/lima/lima_bcast.c
++++ b/drivers/gpu/drm/lima/lima_bcast.c
+@@ -43,6 +43,18 @@ void lima_bcast_suspend(struct lima_ip *ip)
+ 
+ }
+ 
++int lima_bcast_mask_irq(struct lima_ip *ip)
++{
++	bcast_write(LIMA_BCAST_BROADCAST_MASK, 0);
++	bcast_write(LIMA_BCAST_INTERRUPT_MASK, 0);
++	return 0;
++}
++
++int lima_bcast_reset(struct lima_ip *ip)
++{
++	return lima_bcast_hw_init(ip);
++}
++
+ int lima_bcast_init(struct lima_ip *ip)
+ {
+ 	int i;
+diff --git a/drivers/gpu/drm/lima/lima_bcast.h b/drivers/gpu/drm/lima/lima_bcast.h
+index 465ee587bceb2..cd08841e47879 100644
+--- a/drivers/gpu/drm/lima/lima_bcast.h
++++ b/drivers/gpu/drm/lima/lima_bcast.h
+@@ -13,4 +13,7 @@ void lima_bcast_fini(struct lima_ip *ip);
+ 
+ void lima_bcast_enable(struct lima_device *dev, int num_pp);
+ 
++int lima_bcast_mask_irq(struct lima_ip *ip);
++int lima_bcast_reset(struct lima_ip *ip);
++
+ #endif
+diff --git a/drivers/gpu/drm/lima/lima_gp.c b/drivers/gpu/drm/lima/lima_gp.c
+index 6b354e2fb61d4..e15295071533b 100644
+--- a/drivers/gpu/drm/lima/lima_gp.c
++++ b/drivers/gpu/drm/lima/lima_gp.c
+@@ -233,6 +233,13 @@ static void lima_gp_task_mmu_error(struct lima_sched_pipe *pipe)
+ 	lima_sched_pipe_task_done(pipe);
+ }
+ 
++static void lima_gp_task_mask_irq(struct lima_sched_pipe *pipe)
++{
++	struct lima_ip *ip = pipe->processor[0];
++
++	gp_write(LIMA_GP_INT_MASK, 0);
++}
++
+ static int lima_gp_task_recover(struct lima_sched_pipe *pipe)
+ {
+ 	struct lima_ip *ip = pipe->processor[0];
+@@ -365,6 +372,7 @@ int lima_gp_pipe_init(struct lima_device *dev)
+ 	pipe->task_error = lima_gp_task_error;
+ 	pipe->task_mmu_error = lima_gp_task_mmu_error;
+ 	pipe->task_recover = lima_gp_task_recover;
++	pipe->task_mask_irq = lima_gp_task_mask_irq;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/lima/lima_pp.c b/drivers/gpu/drm/lima/lima_pp.c
+index d0d2db0ef1ce8..a4a2ffe6527c2 100644
+--- a/drivers/gpu/drm/lima/lima_pp.c
++++ b/drivers/gpu/drm/lima/lima_pp.c
+@@ -429,6 +429,9 @@ static void lima_pp_task_error(struct lima_sched_pipe *pipe)
+ 
+ 		lima_pp_hard_reset(ip);
+ 	}
++
++	if (pipe->bcast_processor)
++		lima_bcast_reset(pipe->bcast_processor);
+ }
+ 
+ static void lima_pp_task_mmu_error(struct lima_sched_pipe *pipe)
+@@ -437,6 +440,20 @@ static void lima_pp_task_mmu_error(struct lima_sched_pipe *pipe)
+ 		lima_sched_pipe_task_done(pipe);
+ }
+ 
++static void lima_pp_task_mask_irq(struct lima_sched_pipe *pipe)
++{
++	int i;
++
++	for (i = 0; i < pipe->num_processor; i++) {
++		struct lima_ip *ip = pipe->processor[i];
++
++		pp_write(LIMA_PP_INT_MASK, 0);
++	}
++
++	if (pipe->bcast_processor)
++		lima_bcast_mask_irq(pipe->bcast_processor);
++}
++
+ static struct kmem_cache *lima_pp_task_slab;
+ static int lima_pp_task_slab_refcnt;
+ 
+@@ -468,6 +485,7 @@ int lima_pp_pipe_init(struct lima_device *dev)
+ 	pipe->task_fini = lima_pp_task_fini;
+ 	pipe->task_error = lima_pp_task_error;
+ 	pipe->task_mmu_error = lima_pp_task_mmu_error;
++	pipe->task_mask_irq = lima_pp_task_mask_irq;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
+index 00b19adfc8881..bbf3f8feab944 100644
+--- a/drivers/gpu/drm/lima/lima_sched.c
++++ b/drivers/gpu/drm/lima/lima_sched.c
+@@ -422,12 +422,21 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job
+ 	 */
+ 	for (i = 0; i < pipe->num_processor; i++)
+ 		synchronize_irq(pipe->processor[i]->irq);
++	if (pipe->bcast_processor)
++		synchronize_irq(pipe->bcast_processor->irq);
+ 
+ 	if (dma_fence_is_signaled(task->fence)) {
+ 		DRM_WARN("%s unexpectedly high interrupt latency\n", lima_ip_name(ip));
+ 		return DRM_GPU_SCHED_STAT_NOMINAL;
+ 	}
+ 
++	/*
++	 * The task might still finish while this timeout handler runs.
++	 * To prevent a race condition on its completion, mask all irqs
++	 * on the running core until the next hard reset completes.
++	 */
++	pipe->task_mask_irq(pipe);
++
+ 	if (!pipe->error)
+ 		DRM_ERROR("%s job timeout\n", lima_ip_name(ip));
+ 
+diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
+index 6bd4f3b701091..85b23ba901d53 100644
+--- a/drivers/gpu/drm/lima/lima_sched.h
++++ b/drivers/gpu/drm/lima/lima_sched.h
+@@ -80,6 +80,7 @@ struct lima_sched_pipe {
+ 	void (*task_error)(struct lima_sched_pipe *pipe);
+ 	void (*task_mmu_error)(struct lima_sched_pipe *pipe);
+ 	int (*task_recover)(struct lima_sched_pipe *pipe);
++	void (*task_mask_irq)(struct lima_sched_pipe *pipe);
+ 
+ 	struct work_struct recover_work;
+ };
+diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
+index 21d27e6235f39..b11f7c5bbcbe9 100644
+--- a/drivers/gpu/drm/radeon/sumo_dpm.c
++++ b/drivers/gpu/drm/radeon/sumo_dpm.c
+@@ -1619,6 +1619,8 @@ void sumo_construct_vid_mapping_table(struct radeon_device *rdev,
+ 
+ 	for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
+ 		if (table[i].ulSupportedSCLK != 0) {
++			if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES)
++				continue;
+ 			vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
+ 				table[i].usVoltageID;
+ 			vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
+diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
+index 0d2a2dd13f112..a38f59b4356d4 100644
+--- a/drivers/gpu/drm/xe/xe_guc.c
++++ b/drivers/gpu/drm/xe/xe_guc.c
+@@ -643,8 +643,6 @@ int xe_guc_enable_communication(struct xe_guc *guc)
+ 	struct xe_device *xe = guc_to_xe(guc);
+ 	int err;
+ 
+-	guc_enable_irq(guc);
+-
+ 	if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe)) {
+ 		struct xe_gt *gt = guc_to_gt(guc);
+ 		struct xe_tile *tile = gt_to_tile(gt);
+@@ -652,6 +650,8 @@ int xe_guc_enable_communication(struct xe_guc *guc)
+ 		err = xe_memirq_init_guc(&tile->sriov.vf.memirq, guc);
+ 		if (err)
+ 			return err;
++	} else {
++		guc_enable_irq(guc);
+ 	}
+ 
+ 	xe_mmio_rmw32(guc_to_gt(guc), PMINTRMSK,
+diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
+index 78cdfb8b9a7ae..88cbb2fe6ac8c 100644
+--- a/drivers/hid/hid-asus.c
++++ b/drivers/hid/hid-asus.c
+@@ -335,36 +335,20 @@ static int asus_raw_event(struct hid_device *hdev,
+ 	if (drvdata->quirks & QUIRK_MEDION_E1239T)
+ 		return asus_e1239t_event(drvdata, data, size);
+ 
+-	if (drvdata->quirks & QUIRK_USE_KBD_BACKLIGHT) {
++	/*
++	 * Skip these report ID, the device emits a continuous stream associated
++	 * with the AURA mode it is in which looks like an 'echo'.
++	 */
++	if (report->id == FEATURE_KBD_LED_REPORT_ID1 || report->id == FEATURE_KBD_LED_REPORT_ID2)
++		return -1;
++	if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
+ 		/*
+-		 * Skip these report ID, the device emits a continuous stream associated
+-		 * with the AURA mode it is in which looks like an 'echo'.
++		 * G713 and G733 send these codes on some keypresses, depending on
++		 * the key pressed it can trigger a shutdown event if not caught.
+ 		*/
+-		if (report->id == FEATURE_KBD_LED_REPORT_ID1 ||
+-				report->id == FEATURE_KBD_LED_REPORT_ID2) {
++		if (data[0] == 0x02 && data[1] == 0x30) {
+ 			return -1;
+-		/* Additional report filtering */
+-		} else if (report->id == FEATURE_KBD_REPORT_ID) {
+-			/*
+-			 * G14 and G15 send these codes on some keypresses with no
+-			 * discernable reason for doing so. We'll filter them out to avoid
+-			 * unmapped warning messages later.
+-			*/
+-			if (data[1] == 0xea || data[1] == 0xec || data[1] == 0x02 ||
+-					data[1] == 0x8a || data[1] == 0x9e) {
+-				return -1;
+-			}
+-		}
+-		if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
+-			/*
+-			 * G713 and G733 send these codes on some keypresses, depending on
+-			 * the key pressed it can trigger a shutdown event if not caught.
+-			*/
+-			if(data[0] == 0x02 && data[1] == 0x30) {
+-				return -1;
+-			}
+ 		}
+-
+ 	}
+ 
+ 	if (drvdata->quirks & QUIRK_ROG_CLAYMORE_II_KEYBOARD) {
+@@ -1250,6 +1234,19 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 		rdesc[205] = 0x01;
+ 	}
+ 
++	/* match many more n-key devices */
++	if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD && *rsize > 15) {
++		for (int i = 0; i < *rsize - 15; i++) {
++			/* offset to the count from 0x5a report part always 14 */
++			if (rdesc[i] == 0x85 && rdesc[i + 1] == 0x5a &&
++			    rdesc[i + 14] == 0x95 && rdesc[i + 15] == 0x05) {
++				hid_info(hdev, "Fixing up Asus N-Key report descriptor\n");
++				rdesc[i + 15] = 0x01;
++				break;
++			}
++		}
++	}
++
+ 	return rdesc;
+ }
+ 
+@@ -1319,4 +1316,4 @@ static struct hid_driver asus_driver = {
+ };
+ module_hid_driver(asus_driver);
+ 
+-MODULE_LICENSE("GPL");
+\ No newline at end of file
++MODULE_LICENSE("GPL");
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 8376fb5e2d0b4..68b0f39deaa9a 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -823,6 +823,7 @@
+ #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
+ #define USB_DEVICE_ID_LOGITECH_T651	0xb00c
+ #define USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD	0xb309
++#define USB_DEVICE_ID_LOGITECH_CASA_TOUCHPAD	0xbb00
+ #define USB_DEVICE_ID_LOGITECH_C007	0xc007
+ #define USB_DEVICE_ID_LOGITECH_C077	0xc077
+ #define USB_DEVICE_ID_LOGITECH_RECEIVER	0xc101
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 04a014cd2a2f6..56fc78841f245 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -2081,6 +2081,12 @@ static const struct hid_device_id mt_devices[] = {
+ 			   USB_VENDOR_ID_LENOVO,
+ 			   USB_DEVICE_ID_LENOVO_X12_TAB) },
+ 
++	/* Logitech devices */
++	{ .driver_data = MT_CLS_NSMU,
++		HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_MULTITOUCH_WIN_8,
++			USB_VENDOR_ID_LOGITECH,
++			USB_DEVICE_ID_LOGITECH_CASA_TOUCHPAD) },
++
+ 	/* MosArt panels */
+ 	{ .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
+ 		MT_USB_DEVICE(USB_VENDOR_ID_ASUS,
+diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
+index 6d72e4e126dde..36e8f6196a87b 100644
+--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
++++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
+@@ -99,6 +99,7 @@ struct lpi2c_imx_struct {
+ 	__u8			*rx_buf;
+ 	__u8			*tx_buf;
+ 	struct completion	complete;
++	unsigned long		rate_per;
+ 	unsigned int		msglen;
+ 	unsigned int		delivered;
+ 	unsigned int		block_data;
+@@ -212,9 +213,7 @@ static int lpi2c_imx_config(struct lpi2c_imx_struct *lpi2c_imx)
+ 
+ 	lpi2c_imx_set_mode(lpi2c_imx);
+ 
+-	clk_rate = clk_get_rate(lpi2c_imx->clks[0].clk);
+-	if (!clk_rate)
+-		return -EINVAL;
++	clk_rate = lpi2c_imx->rate_per;
+ 
+ 	if (lpi2c_imx->mode == HS || lpi2c_imx->mode == ULTRA_FAST)
+ 		filt = 0;
+@@ -611,6 +610,20 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		return ret;
+ 
++	/*
++	 * Lock the parent clock rate to avoid getting parent clock upon
++	 * each transfer
++	 */
++	ret = devm_clk_rate_exclusive_get(&pdev->dev, lpi2c_imx->clks[0].clk);
++	if (ret)
++		return dev_err_probe(&pdev->dev, ret,
++				     "can't lock I2C peripheral clock rate\n");
++
++	lpi2c_imx->rate_per = clk_get_rate(lpi2c_imx->clks[0].clk);
++	if (!lpi2c_imx->rate_per)
++		return dev_err_probe(&pdev->dev, -EINVAL,
++				     "can't get I2C peripheral clock rate\n");
++
+ 	pm_runtime_set_autosuspend_delay(&pdev->dev, I2C_PM_TIMEOUT);
+ 	pm_runtime_use_autosuspend(&pdev->dev);
+ 	pm_runtime_get_noresume(&pdev->dev);
+diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
+index e106af83cef4d..350ccfbe86340 100644
+--- a/drivers/i2c/busses/i2c-ocores.c
++++ b/drivers/i2c/busses/i2c-ocores.c
+@@ -442,8 +442,8 @@ static int ocores_init(struct device *dev, struct ocores_i2c *i2c)
+ 	oc_setreg(i2c, OCI2C_PREHIGH, prescale >> 8);
+ 
+ 	/* Init the device */
+-	oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK);
+ 	oc_setreg(i2c, OCI2C_CONTROL, ctrl | OCI2C_CTRL_EN);
++	oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+index 9dca451ed5221..6974922e5609a 100644
+--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
++++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+@@ -107,8 +107,6 @@ struct bnxt_re_gsi_context {
+ 	struct	bnxt_re_sqp_entries *sqp_tbl;
+ };
+ 
+-#define BNXT_RE_MIN_MSIX		2
+-#define BNXT_RE_MAX_MSIX		9
+ #define BNXT_RE_AEQ_IDX			0
+ #define BNXT_RE_NQ_IDX			1
+ #define BNXT_RE_GEN_P5_MAX_VF		64
+@@ -168,7 +166,7 @@ struct bnxt_re_dev {
+ 	struct bnxt_qplib_rcfw		rcfw;
+ 
+ 	/* NQ */
+-	struct bnxt_qplib_nq		nq[BNXT_RE_MAX_MSIX];
++	struct bnxt_qplib_nq		nq[BNXT_MAX_ROCE_MSIX];
+ 
+ 	/* Device Resources */
+ 	struct bnxt_qplib_dev_attr	dev_attr;
+diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
+index b70b13484f097..13a49d8fd49d5 100644
+--- a/drivers/infiniband/hw/mana/mr.c
++++ b/drivers/infiniband/hw/mana/mr.c
+@@ -112,6 +112,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
+ 		  "start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x",
+ 		  start, iova, length, access_flags);
+ 
++	access_flags &= ~IB_ACCESS_OPTIONAL;
+ 	if (access_flags & ~VALID_MR_FLAGS)
+ 		return ERR_PTR(-EINVAL);
+ 
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index c2b557e642906..9fb8a544236d7 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -3760,10 +3760,10 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
+ 	spin_lock_init(&dev->dm.lock);
+ 	dev->dm.dev = mdev;
+ 	return 0;
+-err:
+-	mlx5r_macsec_dealloc_gids(dev);
+ err_mp:
+ 	mlx5_ib_cleanup_multiport_master(dev);
++err:
++	mlx5r_macsec_dealloc_gids(dev);
+ 	return err;
+ }
+ 
+diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
+index ecc111ed5d86e..d3c1f63791a2b 100644
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -246,6 +246,7 @@ static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc)
+ 	MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3);
+ 	MLX5_SET(mkc, mkc, access_mode_4_2,
+ 		(ent->rb_key.access_mode >> 2) & 0x7);
++	MLX5_SET(mkc, mkc, ma_translation_mode, !!ent->rb_key.ats);
+ 
+ 	MLX5_SET(mkc, mkc, translations_octword_size,
+ 		 get_mkc_octo_size(ent->rb_key.access_mode,
+@@ -641,10 +642,8 @@ static int mlx5_cache_ent_insert(struct mlx5_mkey_cache *cache,
+ 			new = &((*new)->rb_left);
+ 		if (cmp < 0)
+ 			new = &((*new)->rb_right);
+-		if (cmp == 0) {
+-			mutex_unlock(&cache->rb_lock);
++		if (cmp == 0)
+ 			return -EEXIST;
+-		}
+ 	}
+ 
+ 	/* Add new node and rebalance tree. */
+@@ -719,6 +718,8 @@ static struct mlx5_ib_mr *_mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
+ 	}
+ 	mr->mmkey.cache_ent = ent;
+ 	mr->mmkey.type = MLX5_MKEY_MR;
++	mr->mmkey.rb_key = ent->rb_key;
++	mr->mmkey.cacheable = true;
+ 	init_waitqueue_head(&mr->mmkey.wait);
+ 	return mr;
+ }
+@@ -1169,7 +1170,6 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
+ 	mr->ibmr.pd = pd;
+ 	mr->umem = umem;
+ 	mr->page_shift = order_base_2(page_size);
+-	mr->mmkey.cacheable = true;
+ 	set_mr_fields(dev, mr, umem->length, access_flags, iova);
+ 
+ 	return mr;
+diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
+index a056ea835da54..84be0c3d56995 100644
+--- a/drivers/infiniband/hw/mlx5/srq.c
++++ b/drivers/infiniband/hw/mlx5/srq.c
+@@ -199,17 +199,20 @@ int mlx5_ib_create_srq(struct ib_srq *ib_srq,
+ 	int err;
+ 	struct mlx5_srq_attr in = {};
+ 	__u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
++	__u32 max_sge_sz =  MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq) /
++			    sizeof(struct mlx5_wqe_data_seg);
+ 
+ 	if (init_attr->srq_type != IB_SRQT_BASIC &&
+ 	    init_attr->srq_type != IB_SRQT_XRC &&
+ 	    init_attr->srq_type != IB_SRQT_TM)
+ 		return -EOPNOTSUPP;
+ 
+-	/* Sanity check SRQ size before proceeding */
+-	if (init_attr->attr.max_wr >= max_srq_wqes) {
+-		mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
+-			    init_attr->attr.max_wr,
+-			    max_srq_wqes);
++	/* Sanity check SRQ and sge size before proceeding */
++	if (init_attr->attr.max_wr >= max_srq_wqes ||
++	    init_attr->attr.max_sge > max_sge_sz) {
++		mlx5_ib_dbg(dev, "max_wr %d,wr_cap %d,max_sge %d, sge_cap:%d\n",
++			    init_attr->attr.max_wr, max_srq_wqes,
++			    init_attr->attr.max_sge, max_sge_sz);
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
+index 963382f625d71..fa2b87c749292 100644
+--- a/drivers/infiniband/sw/rxe/rxe_resp.c
++++ b/drivers/infiniband/sw/rxe/rxe_resp.c
+@@ -354,6 +354,19 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
+ 	 * receive buffer later. For rmda operations additional
+ 	 * length checks are performed in check_rkey.
+ 	 */
++	if ((qp_type(qp) == IB_QPT_GSI) || (qp_type(qp) == IB_QPT_UD)) {
++		unsigned int payload = payload_size(pkt);
++		unsigned int recv_buffer_len = 0;
++		int i;
++
++		for (i = 0; i < qp->resp.wqe->dma.num_sge; i++)
++			recv_buffer_len += qp->resp.wqe->dma.sge[i].length;
++		if (payload + 40 > recv_buffer_len) {
++			rxe_dbg_qp(qp, "The receive buffer is too small for this UD packet.\n");
++			return RESPST_ERR_LENGTH;
++		}
++	}
++
+ 	if (pkt->mask & RXE_PAYLOAD_MASK && ((qp_type(qp) == IB_QPT_RC) ||
+ 					     (qp_type(qp) == IB_QPT_UC))) {
+ 		unsigned int mtu = qp->mtu;
+diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
+index a49784e5156c5..a7e9510666e23 100644
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
+@@ -812,7 +812,7 @@ static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
+ 	int i;
+ 
+ 	for (i = 0; i < ibwr->num_sge; i++, sge++) {
+-		memcpy(p, ib_virt_dma_to_page(sge->addr), sge->length);
++		memcpy(p, ib_virt_dma_to_ptr(sge->addr), sge->length);
+ 		p += sge->length;
+ 	}
+ }
+diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+index 41f93c3ab160d..3afec8714cf28 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+@@ -3402,7 +3402,7 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
+ 	smmu->priq.q.irq = msi_get_virq(dev, PRIQ_MSI_INDEX);
+ 
+ 	/* Add callback to free MSIs on teardown */
+-	devm_add_action(dev, arm_smmu_free_msis, dev);
++	devm_add_action_or_reset(dev, arm_smmu_free_msis, dev);
+ }
+ 
+ static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu)
+diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c
+index e994db4f4d914..61750cc98d705 100644
+--- a/drivers/media/pci/intel/ipu-bridge.c
++++ b/drivers/media/pci/intel/ipu-bridge.c
+@@ -15,6 +15,8 @@
+ #include <media/ipu-bridge.h>
+ #include <media/v4l2-fwnode.h>
+ 
++#define ADEV_DEV(adev) ACPI_PTR(&((adev)->dev))
++
+ /*
+  * 92335fcf-3203-4472-af93-7b4453ac29da
+  *
+@@ -87,6 +89,7 @@ static const char * const ipu_vcm_types[] = {
+ 	"lc898212axb",
+ };
+ 
++#if IS_ENABLED(CONFIG_ACPI)
+ /*
+  * Used to figure out IVSC acpi device by ipu_bridge_get_ivsc_acpi_dev()
+  * instead of device and driver match to probe IVSC device.
+@@ -100,13 +103,13 @@ static const struct acpi_device_id ivsc_acpi_ids[] = {
+ 
+ static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev)
+ {
+-	acpi_handle handle = acpi_device_handle(adev);
+-	struct acpi_device *consumer, *ivsc_adev;
+ 	unsigned int i;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(ivsc_acpi_ids); i++) {
+ 		const struct acpi_device_id *acpi_id = &ivsc_acpi_ids[i];
++		struct acpi_device *consumer, *ivsc_adev;
+ 
++		acpi_handle handle = acpi_device_handle(adev);
+ 		for_each_acpi_dev_match(ivsc_adev, acpi_id->id, NULL, -1)
+ 			/* camera sensor depends on IVSC in DSDT if exist */
+ 			for_each_acpi_consumer_dev(ivsc_adev, consumer)
+@@ -118,6 +121,12 @@ static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev
+ 
+ 	return NULL;
+ }
++#else
++static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev)
++{
++	return NULL;
++}
++#endif
+ 
+ static int ipu_bridge_match_ivsc_dev(struct device *dev, const void *adev)
+ {
+@@ -163,7 +172,7 @@ static int ipu_bridge_check_ivsc_dev(struct ipu_sensor *sensor,
+ 		csi_dev = ipu_bridge_get_ivsc_csi_dev(adev);
+ 		if (!csi_dev) {
+ 			acpi_dev_put(adev);
+-			dev_err(&adev->dev, "Failed to find MEI CSI dev\n");
++			dev_err(ADEV_DEV(adev), "Failed to find MEI CSI dev\n");
+ 			return -ENODEV;
+ 		}
+ 
+@@ -182,24 +191,25 @@ static int ipu_bridge_read_acpi_buffer(struct acpi_device *adev, char *id,
+ 	acpi_status status;
+ 	int ret = 0;
+ 
+-	status = acpi_evaluate_object(adev->handle, id, NULL, &buffer);
++	status = acpi_evaluate_object(ACPI_PTR(adev->handle),
++				      id, NULL, &buffer);
+ 	if (ACPI_FAILURE(status))
+ 		return -ENODEV;
+ 
+ 	obj = buffer.pointer;
+ 	if (!obj) {
+-		dev_err(&adev->dev, "Couldn't locate ACPI buffer\n");
++		dev_err(ADEV_DEV(adev), "Couldn't locate ACPI buffer\n");
+ 		return -ENODEV;
+ 	}
+ 
+ 	if (obj->type != ACPI_TYPE_BUFFER) {
+-		dev_err(&adev->dev, "Not an ACPI buffer\n");
++		dev_err(ADEV_DEV(adev), "Not an ACPI buffer\n");
+ 		ret = -ENODEV;
+ 		goto out_free_buff;
+ 	}
+ 
+ 	if (obj->buffer.length > size) {
+-		dev_err(&adev->dev, "Given buffer is too small\n");
++		dev_err(ADEV_DEV(adev), "Given buffer is too small\n");
+ 		ret = -EINVAL;
+ 		goto out_free_buff;
+ 	}
+@@ -220,7 +230,7 @@ static u32 ipu_bridge_parse_rotation(struct acpi_device *adev,
+ 	case IPU_SENSOR_ROTATION_INVERTED:
+ 		return 180;
+ 	default:
+-		dev_warn(&adev->dev,
++		dev_warn(ADEV_DEV(adev),
+ 			 "Unknown rotation %d. Assume 0 degree rotation\n",
+ 			 ssdb->degree);
+ 		return 0;
+@@ -230,12 +240,14 @@ static u32 ipu_bridge_parse_rotation(struct acpi_device *adev,
+ static enum v4l2_fwnode_orientation ipu_bridge_parse_orientation(struct acpi_device *adev)
+ {
+ 	enum v4l2_fwnode_orientation orientation;
+-	struct acpi_pld_info *pld;
+-	acpi_status status;
++	struct acpi_pld_info *pld = NULL;
++	acpi_status status = AE_ERROR;
+ 
++#if IS_ENABLED(CONFIG_ACPI)
+ 	status = acpi_get_physical_device_location(adev->handle, &pld);
++#endif
+ 	if (ACPI_FAILURE(status)) {
+-		dev_warn(&adev->dev, "_PLD call failed, using default orientation\n");
++		dev_warn(ADEV_DEV(adev), "_PLD call failed, using default orientation\n");
+ 		return V4L2_FWNODE_ORIENTATION_EXTERNAL;
+ 	}
+ 
+@@ -253,7 +265,8 @@ static enum v4l2_fwnode_orientation ipu_bridge_parse_orientation(struct acpi_dev
+ 		orientation = V4L2_FWNODE_ORIENTATION_EXTERNAL;
+ 		break;
+ 	default:
+-		dev_warn(&adev->dev, "Unknown _PLD panel val %d\n", pld->panel);
++		dev_warn(ADEV_DEV(adev), "Unknown _PLD panel val %d\n",
++			 pld->panel);
+ 		orientation = V4L2_FWNODE_ORIENTATION_EXTERNAL;
+ 		break;
+ 	}
+@@ -272,12 +285,12 @@ int ipu_bridge_parse_ssdb(struct acpi_device *adev, struct ipu_sensor *sensor)
+ 		return ret;
+ 
+ 	if (ssdb.vcmtype > ARRAY_SIZE(ipu_vcm_types)) {
+-		dev_warn(&adev->dev, "Unknown VCM type %d\n", ssdb.vcmtype);
++		dev_warn(ADEV_DEV(adev), "Unknown VCM type %d\n", ssdb.vcmtype);
+ 		ssdb.vcmtype = 0;
+ 	}
+ 
+ 	if (ssdb.lanes > IPU_MAX_LANES) {
+-		dev_err(&adev->dev, "Number of lanes in SSDB is invalid\n");
++		dev_err(ADEV_DEV(adev), "Number of lanes in SSDB is invalid\n");
+ 		return -EINVAL;
+ 	}
+ 
+@@ -465,8 +478,14 @@ static void ipu_bridge_create_connection_swnodes(struct ipu_bridge *bridge,
+ 						sensor->ipu_properties);
+ 
+ 	if (sensor->csi_dev) {
++		const char *device_hid = "";
++
++#if IS_ENABLED(CONFIG_ACPI)
++		device_hid = acpi_device_hid(sensor->ivsc_adev);
++#endif
++
+ 		snprintf(sensor->ivsc_name, sizeof(sensor->ivsc_name), "%s-%u",
+-			 acpi_device_hid(sensor->ivsc_adev), sensor->link);
++			 device_hid, sensor->link);
+ 
+ 		nodes[SWNODE_IVSC_HID] = NODE_SENSOR(sensor->ivsc_name,
+ 						     sensor->ivsc_properties);
+@@ -631,11 +650,15 @@ static int ipu_bridge_connect_sensor(const struct ipu_sensor_config *cfg,
+ {
+ 	struct fwnode_handle *fwnode, *primary;
+ 	struct ipu_sensor *sensor;
+-	struct acpi_device *adev;
++	struct acpi_device *adev = NULL;
+ 	int ret;
+ 
++#if IS_ENABLED(CONFIG_ACPI)
+ 	for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
+-		if (!adev->status.enabled)
++#else
++	while (true) {
++#endif
++		if (!ACPI_PTR(adev->status.enabled))
+ 			continue;
+ 
+ 		if (bridge->n_sensors >= IPU_MAX_PORTS) {
+@@ -671,7 +694,7 @@ static int ipu_bridge_connect_sensor(const struct ipu_sensor_config *cfg,
+ 			goto err_free_swnodes;
+ 		}
+ 
+-		sensor->adev = acpi_dev_get(adev);
++		sensor->adev = ACPI_PTR(acpi_dev_get(adev));
+ 
+ 		primary = acpi_fwnode_handle(adev);
+ 		primary->secondary = fwnode;
+@@ -727,11 +750,16 @@ static int ipu_bridge_ivsc_is_ready(void)
+ 	unsigned int i;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(ipu_supported_sensors); i++) {
++#if IS_ENABLED(CONFIG_ACPI)
+ 		const struct ipu_sensor_config *cfg =
+ 			&ipu_supported_sensors[i];
+ 
+ 		for_each_acpi_dev_match(sensor_adev, cfg->hid, NULL, -1) {
+-			if (!sensor_adev->status.enabled)
++#else
++		while (true) {
++			sensor_adev = NULL;
++#endif
++			if (!ACPI_PTR(sensor_adev->status.enabled))
+ 				continue;
+ 
+ 			adev = ipu_bridge_get_ivsc_acpi_dev(sensor_adev);
+diff --git a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c
+index 6bbe55de6ce9a..ff23b225db705 100644
+--- a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c
++++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c
+@@ -79,6 +79,8 @@ struct mtk_vcodec_fw *mtk_vcodec_fw_scp_init(void *priv, enum mtk_vcodec_fw_use
+ 	}
+ 
+ 	fw = devm_kzalloc(&plat_dev->dev, sizeof(*fw), GFP_KERNEL);
++	if (!fw)
++		return ERR_PTR(-ENOMEM);
+ 	fw->type = SCP;
+ 	fw->ops = &mtk_vcodec_rproc_msg;
+ 	fw->scp = scp;
+diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
+index e10ae94cf7711..5ccb1a3a149d8 100644
+--- a/drivers/net/dsa/realtek/rtl8366rb.c
++++ b/drivers/net/dsa/realtek/rtl8366rb.c
+@@ -185,7 +185,12 @@
+ #define RTL8366RB_LED_BLINKRATE_222MS		0x0004
+ #define RTL8366RB_LED_BLINKRATE_446MS		0x0005
+ 
++/* LED trigger event for each group */
+ #define RTL8366RB_LED_CTRL_REG			0x0431
++#define RTL8366RB_LED_CTRL_OFFSET(led_group)	\
++	(4 * (led_group))
++#define RTL8366RB_LED_CTRL_MASK(led_group)	\
++	(0xf << RTL8366RB_LED_CTRL_OFFSET(led_group))
+ #define RTL8366RB_LED_OFF			0x0
+ #define RTL8366RB_LED_DUP_COL			0x1
+ #define RTL8366RB_LED_LINK_ACT			0x2
+@@ -202,6 +207,11 @@
+ #define RTL8366RB_LED_LINK_TX			0xd
+ #define RTL8366RB_LED_MASTER			0xe
+ #define RTL8366RB_LED_FORCE			0xf
++
++/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only
++ * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is
++ * RTL8366RB_LED_FORCE. Otherwise, it is ignored.
++ */
+ #define RTL8366RB_LED_0_1_CTRL_REG		0x0432
+ #define RTL8366RB_LED_1_OFFSET			6
+ #define RTL8366RB_LED_2_3_CTRL_REG		0x0433
+@@ -1001,28 +1011,20 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
+ 	 */
+ 	if (priv->leds_disabled) {
+ 		/* Turn everything off */
+-		regmap_update_bits(priv->map,
+-				   RTL8366RB_LED_0_1_CTRL_REG,
+-				   0x0FFF, 0);
+-		regmap_update_bits(priv->map,
+-				   RTL8366RB_LED_2_3_CTRL_REG,
+-				   0x0FFF, 0);
+ 		regmap_update_bits(priv->map,
+ 				   RTL8366RB_INTERRUPT_CONTROL_REG,
+ 				   RTL8366RB_P4_RGMII_LED,
+ 				   0);
+-		val = RTL8366RB_LED_OFF;
+-	} else {
+-		/* TODO: make this configurable per LED */
+-		val = RTL8366RB_LED_FORCE;
+-	}
+-	for (i = 0; i < 4; i++) {
+-		ret = regmap_update_bits(priv->map,
+-					 RTL8366RB_LED_CTRL_REG,
+-					 0xf << (i * 4),
+-					 val << (i * 4));
+-		if (ret)
+-			return ret;
++
++		for (i = 0; i < RTL8366RB_NUM_LEDGROUPS; i++) {
++			val = RTL8366RB_LED_OFF << RTL8366RB_LED_CTRL_OFFSET(i);
++			ret = regmap_update_bits(priv->map,
++						 RTL8366RB_LED_CTRL_REG,
++						 RTL8366RB_LED_CTRL_MASK(i),
++						 val);
++			if (ret)
++				return ret;
++		}
+ 	}
+ 
+ 	ret = rtl8366_reset_vlan(priv);
+@@ -1167,52 +1169,6 @@ rtl8366rb_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
+ 	}
+ }
+ 
+-static void rb8366rb_set_port_led(struct realtek_priv *priv,
+-				  int port, bool enable)
+-{
+-	u16 val = enable ? 0x3f : 0;
+-	int ret;
+-
+-	if (priv->leds_disabled)
+-		return;
+-
+-	switch (port) {
+-	case 0:
+-		ret = regmap_update_bits(priv->map,
+-					 RTL8366RB_LED_0_1_CTRL_REG,
+-					 0x3F, val);
+-		break;
+-	case 1:
+-		ret = regmap_update_bits(priv->map,
+-					 RTL8366RB_LED_0_1_CTRL_REG,
+-					 0x3F << RTL8366RB_LED_1_OFFSET,
+-					 val << RTL8366RB_LED_1_OFFSET);
+-		break;
+-	case 2:
+-		ret = regmap_update_bits(priv->map,
+-					 RTL8366RB_LED_2_3_CTRL_REG,
+-					 0x3F, val);
+-		break;
+-	case 3:
+-		ret = regmap_update_bits(priv->map,
+-					 RTL8366RB_LED_2_3_CTRL_REG,
+-					 0x3F << RTL8366RB_LED_3_OFFSET,
+-					 val << RTL8366RB_LED_3_OFFSET);
+-		break;
+-	case 4:
+-		ret = regmap_update_bits(priv->map,
+-					 RTL8366RB_INTERRUPT_CONTROL_REG,
+-					 RTL8366RB_P4_RGMII_LED,
+-					 enable ? RTL8366RB_P4_RGMII_LED : 0);
+-		break;
+-	default:
+-		dev_err(priv->dev, "no LED for port %d\n", port);
+-		return;
+-	}
+-	if (ret)
+-		dev_err(priv->dev, "error updating LED on port %d\n", port);
+-}
+-
+ static int
+ rtl8366rb_port_enable(struct dsa_switch *ds, int port,
+ 		      struct phy_device *phy)
+@@ -1226,7 +1182,6 @@ rtl8366rb_port_enable(struct dsa_switch *ds, int port,
+ 	if (ret)
+ 		return ret;
+ 
+-	rb8366rb_set_port_led(priv, port, true);
+ 	return 0;
+ }
+ 
+@@ -1241,8 +1196,6 @@ rtl8366rb_port_disable(struct dsa_switch *ds, int port)
+ 				 BIT(port));
+ 	if (ret)
+ 		return;
+-
+-	rb8366rb_set_port_led(priv, port, false);
+ }
+ 
+ static int
+diff --git a/drivers/net/dsa/realtek/rtl83xx.c b/drivers/net/dsa/realtek/rtl83xx.c
+index d2e876805393b..a9c1702431efb 100644
+--- a/drivers/net/dsa/realtek/rtl83xx.c
++++ b/drivers/net/dsa/realtek/rtl83xx.c
+@@ -290,16 +290,13 @@ EXPORT_SYMBOL_NS_GPL(rtl83xx_shutdown, REALTEK_DSA);
+  * rtl83xx_remove() - Cleanup a realtek switch driver
+  * @priv: realtek_priv pointer
+  *
+- * If a method is provided, this function asserts the hard reset of the switch
+- * in order to avoid leaking traffic when the driver is gone.
++ * Placehold for common cleanup procedures.
+  *
+- * Context: Might sleep if priv->gdev->chip->can_sleep.
++ * Context: Any
+  * Return: nothing
+  */
+ void rtl83xx_remove(struct realtek_priv *priv)
+ {
+-	/* leave the device reset asserted */
+-	rtl83xx_reset_assert(priv);
+ }
+ EXPORT_SYMBOL_NS_GPL(rtl83xx_remove, REALTEK_DSA);
+ 
+diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+index 933e619b3a313..4c6e07aa4bbb5 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
++++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+@@ -229,30 +229,43 @@ static struct ena_eth_io_rx_cdesc_base *
+ 		idx * io_cq->cdesc_entry_size_in_bytes);
+ }
+ 
+-static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
+-					   u16 *first_cdesc_idx)
++static int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
++				    u16 *first_cdesc_idx,
++				    u16 *num_descs)
+ {
++	u16 count = io_cq->cur_rx_pkt_cdesc_count, head_masked;
+ 	struct ena_eth_io_rx_cdesc_base *cdesc;
+-	u16 count = 0, head_masked;
+ 	u32 last = 0;
+ 
+ 	do {
++		u32 status;
++
+ 		cdesc = ena_com_get_next_rx_cdesc(io_cq);
+ 		if (!cdesc)
+ 			break;
++		status = READ_ONCE(cdesc->status);
+ 
+ 		ena_com_cq_inc_head(io_cq);
++		if (unlikely((status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK) >>
++		    ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT && count != 0)) {
++			struct ena_com_dev *dev = ena_com_io_cq_to_ena_dev(io_cq);
++
++			netdev_err(dev->net_device,
++				   "First bit is on in descriptor #%d on q_id: %d, req_id: %u\n",
++				   count, io_cq->qid, cdesc->req_id);
++			return -EFAULT;
++		}
+ 		count++;
+-		last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
+-		       ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
++		last = (status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
++			ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
+ 	} while (!last);
+ 
+ 	if (last) {
+ 		*first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
+-		count += io_cq->cur_rx_pkt_cdesc_count;
+ 
+ 		head_masked = io_cq->head & (io_cq->q_depth - 1);
+ 
++		*num_descs = count;
+ 		io_cq->cur_rx_pkt_cdesc_count = 0;
+ 		io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
+ 
+@@ -260,11 +273,11 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
+ 			   "ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
+ 			   io_cq->qid, *first_cdesc_idx, count);
+ 	} else {
+-		io_cq->cur_rx_pkt_cdesc_count += count;
+-		count = 0;
++		io_cq->cur_rx_pkt_cdesc_count = count;
++		*num_descs = 0;
+ 	}
+ 
+-	return count;
++	return 0;
+ }
+ 
+ static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
+@@ -539,10 +552,14 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
+ 	u16 cdesc_idx = 0;
+ 	u16 nb_hw_desc;
+ 	u16 i = 0;
++	int rc;
+ 
+ 	WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
+ 
+-	nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
++	rc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx, &nb_hw_desc);
++	if (unlikely(rc != 0))
++		return -EFAULT;
++
+ 	if (nb_hw_desc == 0) {
+ 		ena_rx_ctx->descs = nb_hw_desc;
+ 		return 0;
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index be5acfa41ee0c..8db05f7544f90 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -1347,6 +1347,8 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
+ 	if (rc == -ENOSPC) {
+ 		ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, &rx_ring->syncp);
+ 		ena_reset_device(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS);
++	} else if (rc == -EFAULT) {
++		ena_reset_device(adapter, ENA_REGS_RESET_RX_DESCRIPTOR_MALFORMED);
+ 	} else {
+ 		ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1,
+ 				  &rx_ring->syncp);
+diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+index 2c3d6a77ea79f..a2efebafd686a 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
++++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+@@ -22,6 +22,7 @@ enum ena_regs_reset_reason_types {
+ 	ENA_REGS_RESET_GENERIC                      = 13,
+ 	ENA_REGS_RESET_MISS_INTERRUPT               = 14,
+ 	ENA_REGS_RESET_SUSPECTED_POLL_STARVATION    = 15,
++	ENA_REGS_RESET_RX_DESCRIPTOR_MALFORMED	    = 16,
+ };
+ 
+ /* ena_registers offsets */
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 2c2ee79c4d779..0fab62a56f3b3 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -730,9 +730,6 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	return NETDEV_TX_OK;
+ 
+ tx_dma_error:
+-	if (BNXT_TX_PTP_IS_SET(lflags))
+-		atomic_inc(&bp->ptp_cfg->tx_avail);
+-
+ 	last_frag = i;
+ 
+ 	/* start back at beginning and unmap skb */
+@@ -754,6 +751,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ tx_free:
+ 	dev_kfree_skb_any(skb);
+ tx_kick_pending:
++	if (BNXT_TX_PTP_IS_SET(lflags))
++		atomic_inc(&bp->ptp_cfg->tx_avail);
+ 	if (txr->kick_pending)
+ 		bnxt_txr_db_kick(bp, txr, txr->tx_prod);
+ 	txr->tx_buf_ring[txr->tx_prod].skb = NULL;
+diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
+index 4df561d64bc38..6e7d58243c7cb 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
++++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
+@@ -1329,6 +1329,7 @@ ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start,
+ 
+ 	for (i = 0; i < count; i++) {
+ 		bool last = false;
++		int try_cnt = 0;
+ 		int status;
+ 
+ 		bh = (struct ice_buf_hdr *)(bufs + start + i);
+@@ -1336,8 +1337,26 @@ ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start,
+ 		if (indicate_last)
+ 			last = ice_is_last_download_buffer(bh, i, count);
+ 
+-		status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
+-					     &offset, &info, NULL);
++		while (1) {
++			status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE,
++						     last, &offset, &info,
++						     NULL);
++			if (hw->adminq.sq_last_status != ICE_AQ_RC_ENOSEC &&
++			    hw->adminq.sq_last_status != ICE_AQ_RC_EBADSIG)
++				break;
++
++			try_cnt++;
++
++			if (try_cnt == 5)
++				break;
++
++			msleep(20);
++		}
++
++		if (try_cnt)
++			dev_dbg(ice_hw_to_dev(hw),
++				"ice_aq_download_pkg number of retries: %d\n",
++				try_cnt);
+ 
+ 		/* Save AQ status from download package */
+ 		if (status) {
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 10fef2e726b39..61eef3259cbaa 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -803,6 +803,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
+ 	}
+ 
+ 	switch (vsi->port_info->phy.link_info.link_speed) {
++	case ICE_AQ_LINK_SPEED_200GB:
++		speed = "200 G";
++		break;
+ 	case ICE_AQ_LINK_SPEED_100GB:
+ 		speed = "100 G";
+ 		break;
+@@ -5451,7 +5454,7 @@ static int __maybe_unused ice_suspend(struct device *dev)
+ 	 */
+ 	disabled = ice_service_task_stop(pf);
+ 
+-	ice_unplug_aux_dev(pf);
++	ice_deinit_rdma(pf);
+ 
+ 	/* Already suspended?, then there is nothing to do */
+ 	if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
+@@ -5531,6 +5534,11 @@ static int __maybe_unused ice_resume(struct device *dev)
+ 	if (ret)
+ 		dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
+ 
++	ret = ice_init_rdma(pf);
++	if (ret)
++		dev_err(dev, "Reinitialize RDMA during resume failed: %d\n",
++			ret);
++
+ 	clear_bit(ICE_DOWN, pf->state);
+ 	/* Now perform PF reset and rebuild */
+ 	reset_type = ICE_RESET_PFR;
+diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
+index b4ea935e83005..1472385eb68eb 100644
+--- a/drivers/net/ethernet/intel/ice/ice_switch.c
++++ b/drivers/net/ethernet/intel/ice/ice_switch.c
+@@ -1825,7 +1825,8 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
+ 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
+ 	    lkup_type == ICE_SW_LKUP_PROMISC ||
+ 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
+-	    lkup_type == ICE_SW_LKUP_DFLT) {
++	    lkup_type == ICE_SW_LKUP_DFLT ||
++	    lkup_type == ICE_SW_LKUP_LAST) {
+ 		sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
+ 	} else if (lkup_type == ICE_SW_LKUP_VLAN) {
+ 		if (opc == ice_aqc_opc_alloc_res)
+@@ -2759,7 +2760,8 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
+ 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
+ 	    lkup_type == ICE_SW_LKUP_PROMISC ||
+ 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
+-	    lkup_type == ICE_SW_LKUP_DFLT)
++	    lkup_type == ICE_SW_LKUP_DFLT ||
++	    lkup_type == ICE_SW_LKUP_LAST)
+ 		rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
+ 			ICE_AQC_SW_RULES_T_VSI_LIST_SET;
+ 	else if (lkup_type == ICE_SW_LKUP_VLAN)
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+index 23adf53c2aa1c..cebc79a710ec2 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+@@ -4013,7 +4013,10 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
+ 			}
+ 		}
+ 
+-		skb = build_skb(data, frag_size);
++		if (frag_size)
++			skb = build_skb(data, frag_size);
++		else
++			skb = slab_build_skb(data);
+ 		if (!skb) {
+ 			netdev_warn(port->dev, "skb build failed\n");
+ 			goto err_drop_frame;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+index 5664f768cb0cd..64a97a0a10ed6 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+@@ -9,10 +9,9 @@ obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o
+ rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
+                otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
+                otx2_devlink.o qos_sq.o qos.o
+-rvu_nicvf-y := otx2_vf.o otx2_devlink.o
++rvu_nicvf-y := otx2_vf.o
+ 
+ rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
+-rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o
+ rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
+ 
+ ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+index 28fb643d2917f..aa01110f04a33 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+@@ -54,6 +54,7 @@ int otx2_pfc_txschq_config(struct otx2_nic *pfvf)
+ 
+ 	return 0;
+ }
++EXPORT_SYMBOL(otx2_pfc_txschq_config);
+ 
+ static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
+ {
+@@ -122,6 +123,7 @@ int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf)
+ 
+ 	return 0;
+ }
++EXPORT_SYMBOL(otx2_pfc_txschq_alloc);
+ 
+ static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio)
+ {
+@@ -260,6 +262,7 @@ int otx2_pfc_txschq_update(struct otx2_nic *pfvf)
+ 
+ 	return 0;
+ }
++EXPORT_SYMBOL(otx2_pfc_txschq_update);
+ 
+ int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
+ {
+@@ -282,6 +285,7 @@ int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
+ 
+ 	return 0;
+ }
++EXPORT_SYMBOL(otx2_pfc_txschq_stop);
+ 
+ int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
+ {
+@@ -321,6 +325,7 @@ int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
+ 	mutex_unlock(&pfvf->mbox.lock);
+ 	return err;
+ }
++EXPORT_SYMBOL(otx2_config_priority_flow_ctrl);
+ 
+ void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx,
+ 			       bool pfc_enable)
+@@ -385,6 +390,7 @@ void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx,
+ 			 "Updating BPIDs in CQ and Aura contexts of RQ%d failed with err %d\n",
+ 			 qidx, err);
+ }
++EXPORT_SYMBOL(otx2_update_bpid_in_rqctx);
+ 
+ static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
+ {
+@@ -472,3 +478,4 @@ int otx2_dcbnl_set_ops(struct net_device *dev)
+ 
+ 	return 0;
+ }
++EXPORT_SYMBOL(otx2_dcbnl_set_ops);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+index 4e1130496573e..05956bf03c05d 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+@@ -112,6 +112,7 @@ int otx2_register_dl(struct otx2_nic *pfvf)
+ 	devlink_free(dl);
+ 	return err;
+ }
++EXPORT_SYMBOL(otx2_register_dl);
+ 
+ void otx2_unregister_dl(struct otx2_nic *pfvf)
+ {
+@@ -123,3 +124,4 @@ void otx2_unregister_dl(struct otx2_nic *pfvf)
+ 				  ARRAY_SIZE(otx2_dl_params));
+ 	devlink_free(dl);
+ }
++EXPORT_SYMBOL(otx2_unregister_dl);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+index f828d32737af0..04a49b9b545f3 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+@@ -1171,8 +1171,11 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
+ 
+ 	if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
+ 		/* Insert vlan tag before giving pkt to tso */
+-		if (skb_vlan_tag_present(skb))
++		if (skb_vlan_tag_present(skb)) {
+ 			skb = __vlan_hwaccel_push_inside(skb);
++			if (!skb)
++				return true;
++		}
+ 		otx2_sq_append_tso(pfvf, sq, skb, qidx);
+ 		return true;
+ 	}
+diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
+index 8a6ae171e375b..def932035cba4 100644
+--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
++++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
+@@ -1148,8 +1148,12 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev,
+ 	if (netdev->phydev)
+ 		phy_ethtool_get_wol(netdev->phydev, wol);
+ 
+-	wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST |
+-		WAKE_MAGIC | WAKE_PHY | WAKE_ARP;
++	if (wol->supported != adapter->phy_wol_supported)
++		netif_warn(adapter, drv, adapter->netdev,
++			   "PHY changed its supported WOL! old=%x, new=%x\n",
++			   adapter->phy_wol_supported, wol->supported);
++
++	wol->supported |= MAC_SUPPORTED_WAKES;
+ 
+ 	if (adapter->is_pci11x1x)
+ 		wol->supported |= WAKE_MAGICSECURE;
+@@ -1164,7 +1168,39 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
+ {
+ 	struct lan743x_adapter *adapter = netdev_priv(netdev);
+ 
++	/* WAKE_MAGICSEGURE is a modifier of and only valid together with
++	 * WAKE_MAGIC
++	 */
++	if ((wol->wolopts & WAKE_MAGICSECURE) && !(wol->wolopts & WAKE_MAGIC))
++		return -EINVAL;
++
++	if (netdev->phydev) {
++		struct ethtool_wolinfo phy_wol;
++		int ret;
++
++		phy_wol.wolopts = wol->wolopts & adapter->phy_wol_supported;
++
++		/* If WAKE_MAGICSECURE was requested, filter out WAKE_MAGIC
++		 * for PHYs that do not support WAKE_MAGICSECURE
++		 */
++		if (wol->wolopts & WAKE_MAGICSECURE &&
++		    !(adapter->phy_wol_supported & WAKE_MAGICSECURE))
++			phy_wol.wolopts &= ~WAKE_MAGIC;
++
++		ret = phy_ethtool_set_wol(netdev->phydev, &phy_wol);
++		if (ret && (ret != -EOPNOTSUPP))
++			return ret;
++
++		if (ret == -EOPNOTSUPP)
++			adapter->phy_wolopts = 0;
++		else
++			adapter->phy_wolopts = phy_wol.wolopts;
++	} else {
++		adapter->phy_wolopts = 0;
++	}
++
+ 	adapter->wolopts = 0;
++	wol->wolopts &= ~adapter->phy_wolopts;
+ 	if (wol->wolopts & WAKE_UCAST)
+ 		adapter->wolopts |= WAKE_UCAST;
+ 	if (wol->wolopts & WAKE_MCAST)
+@@ -1185,10 +1221,10 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
+ 		memset(adapter->sopass, 0, sizeof(u8) * SOPASS_MAX);
+ 	}
+ 
++	wol->wolopts = adapter->wolopts | adapter->phy_wolopts;
+ 	device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts);
+ 
+-	return netdev->phydev ? phy_ethtool_set_wol(netdev->phydev, wol)
+-			: -ENETDOWN;
++	return 0;
+ }
+ #endif /* CONFIG_PM */
+ 
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
+index 75a988c0bd794..ecde3582e3dee 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.c
++++ b/drivers/net/ethernet/microchip/lan743x_main.c
+@@ -3111,6 +3111,17 @@ static int lan743x_netdev_open(struct net_device *netdev)
+ 		if (ret)
+ 			goto close_tx;
+ 	}
++
++#ifdef CONFIG_PM
++	if (adapter->netdev->phydev) {
++		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
++
++		phy_ethtool_get_wol(netdev->phydev, &wol);
++		adapter->phy_wol_supported = wol.supported;
++		adapter->phy_wolopts = wol.wolopts;
++	}
++#endif
++
+ 	return 0;
+ 
+ close_tx:
+@@ -3568,7 +3579,7 @@ static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
+ 
+ 	/* clear wake settings */
+ 	pmtctl = lan743x_csr_read(adapter, PMT_CTL);
+-	pmtctl |= PMT_CTL_WUPS_MASK_;
++	pmtctl |= PMT_CTL_WUPS_MASK_ | PMT_CTL_RES_CLR_WKP_MASK_;
+ 	pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ |
+ 		PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ |
+ 		PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_);
+@@ -3580,10 +3591,9 @@ static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
+ 
+ 	pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_;
+ 
+-	if (adapter->wolopts & WAKE_PHY) {
+-		pmtctl |= PMT_CTL_ETH_PHY_EDPD_PLL_CTL_;
++	if (adapter->phy_wolopts)
+ 		pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_;
+-	}
++
+ 	if (adapter->wolopts & WAKE_MAGIC) {
+ 		wucsr |= MAC_WUCSR_MPEN_;
+ 		macrx |= MAC_RX_RXEN_;
+@@ -3679,7 +3689,7 @@ static int lan743x_pm_suspend(struct device *dev)
+ 	lan743x_csr_write(adapter, MAC_WUCSR2, 0);
+ 	lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF);
+ 
+-	if (adapter->wolopts)
++	if (adapter->wolopts || adapter->phy_wolopts)
+ 		lan743x_pm_set_wol(adapter);
+ 
+ 	if (adapter->is_pci11x1x) {
+@@ -3703,6 +3713,7 @@ static int lan743x_pm_resume(struct device *dev)
+ 	struct pci_dev *pdev = to_pci_dev(dev);
+ 	struct net_device *netdev = pci_get_drvdata(pdev);
+ 	struct lan743x_adapter *adapter = netdev_priv(netdev);
++	u32 data;
+ 	int ret;
+ 
+ 	pci_set_power_state(pdev, PCI_D0);
+@@ -3721,6 +3732,30 @@ static int lan743x_pm_resume(struct device *dev)
+ 		return ret;
+ 	}
+ 
++	ret = lan743x_csr_read(adapter, MAC_WK_SRC);
++	netif_dbg(adapter, drv, adapter->netdev,
++		  "Wakeup source : 0x%08X\n", ret);
++
++	/* Clear the wol configuration and status bits. Note that
++	 * the status bits are "Write One to Clear (W1C)"
++	 */
++	data = MAC_WUCSR_EEE_TX_WAKE_ | MAC_WUCSR_EEE_RX_WAKE_ |
++	       MAC_WUCSR_RFE_WAKE_FR_ | MAC_WUCSR_PFDA_FR_ | MAC_WUCSR_WUFR_ |
++	       MAC_WUCSR_MPR_ | MAC_WUCSR_BCAST_FR_;
++	lan743x_csr_write(adapter, MAC_WUCSR, data);
++
++	data = MAC_WUCSR2_NS_RCD_ | MAC_WUCSR2_ARP_RCD_ |
++	       MAC_WUCSR2_IPV6_TCPSYN_RCD_ | MAC_WUCSR2_IPV4_TCPSYN_RCD_;
++	lan743x_csr_write(adapter, MAC_WUCSR2, data);
++
++	data = MAC_WK_SRC_ETH_PHY_WK_ | MAC_WK_SRC_IPV6_TCPSYN_RCD_WK_ |
++	       MAC_WK_SRC_IPV4_TCPSYN_RCD_WK_ | MAC_WK_SRC_EEE_TX_WK_ |
++	       MAC_WK_SRC_EEE_RX_WK_ | MAC_WK_SRC_RFE_FR_WK_ |
++	       MAC_WK_SRC_PFDA_FR_WK_ | MAC_WK_SRC_MP_FR_WK_ |
++	       MAC_WK_SRC_BCAST_FR_WK_ | MAC_WK_SRC_WU_FR_WK_ |
++	       MAC_WK_SRC_WK_FR_SAVED_;
++	lan743x_csr_write(adapter, MAC_WK_SRC, data);
++
+ 	/* open netdev when netdev is at running state while resume.
+ 	 * For instance, it is true when system wakesup after pm-suspend
+ 	 * However, it is false when system wakes up after suspend GUI menu
+@@ -3729,9 +3764,6 @@ static int lan743x_pm_resume(struct device *dev)
+ 		lan743x_netdev_open(netdev);
+ 
+ 	netif_device_attach(netdev);
+-	ret = lan743x_csr_read(adapter, MAC_WK_SRC);
+-	netif_info(adapter, drv, adapter->netdev,
+-		   "Wakeup source : 0x%08X\n", ret);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
+index 645bc048e52ef..3b2585a384e2c 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.h
++++ b/drivers/net/ethernet/microchip/lan743x_main.h
+@@ -61,6 +61,7 @@
+ #define PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_		BIT(18)
+ #define PMT_CTL_GPIO_WAKEUP_EN_			BIT(15)
+ #define PMT_CTL_EEE_WAKEUP_EN_			BIT(13)
++#define PMT_CTL_RES_CLR_WKP_MASK_		GENMASK(9, 8)
+ #define PMT_CTL_READY_				BIT(7)
+ #define PMT_CTL_ETH_PHY_RST_			BIT(4)
+ #define PMT_CTL_WOL_EN_				BIT(3)
+@@ -227,12 +228,31 @@
+ #define MAC_WUCSR				(0x140)
+ #define MAC_MP_SO_EN_				BIT(21)
+ #define MAC_WUCSR_RFE_WAKE_EN_			BIT(14)
++#define MAC_WUCSR_EEE_TX_WAKE_			BIT(13)
++#define MAC_WUCSR_EEE_RX_WAKE_			BIT(11)
++#define MAC_WUCSR_RFE_WAKE_FR_			BIT(9)
++#define MAC_WUCSR_PFDA_FR_			BIT(7)
++#define MAC_WUCSR_WUFR_				BIT(6)
++#define MAC_WUCSR_MPR_				BIT(5)
++#define MAC_WUCSR_BCAST_FR_			BIT(4)
+ #define MAC_WUCSR_PFDA_EN_			BIT(3)
+ #define MAC_WUCSR_WAKE_EN_			BIT(2)
+ #define MAC_WUCSR_MPEN_				BIT(1)
+ #define MAC_WUCSR_BCST_EN_			BIT(0)
+ 
+ #define MAC_WK_SRC				(0x144)
++#define MAC_WK_SRC_ETH_PHY_WK_			BIT(17)
++#define MAC_WK_SRC_IPV6_TCPSYN_RCD_WK_		BIT(16)
++#define MAC_WK_SRC_IPV4_TCPSYN_RCD_WK_		BIT(15)
++#define MAC_WK_SRC_EEE_TX_WK_			BIT(14)
++#define MAC_WK_SRC_EEE_RX_WK_			BIT(13)
++#define MAC_WK_SRC_RFE_FR_WK_			BIT(12)
++#define MAC_WK_SRC_PFDA_FR_WK_			BIT(11)
++#define MAC_WK_SRC_MP_FR_WK_			BIT(10)
++#define MAC_WK_SRC_BCAST_FR_WK_			BIT(9)
++#define MAC_WK_SRC_WU_FR_WK_			BIT(8)
++#define MAC_WK_SRC_WK_FR_SAVED_			BIT(7)
++
+ #define MAC_MP_SO_HI				(0x148)
+ #define MAC_MP_SO_LO				(0x14C)
+ 
+@@ -295,6 +315,10 @@
+ #define RFE_INDX(index)			(0x580 + (index << 2))
+ 
+ #define MAC_WUCSR2			(0x600)
++#define MAC_WUCSR2_NS_RCD_		BIT(7)
++#define MAC_WUCSR2_ARP_RCD_		BIT(6)
++#define MAC_WUCSR2_IPV6_TCPSYN_RCD_	BIT(5)
++#define MAC_WUCSR2_IPV4_TCPSYN_RCD_	BIT(4)
+ 
+ #define SGMII_ACC			(0x720)
+ #define SGMII_ACC_SGMII_BZY_		BIT(31)
+@@ -1018,6 +1042,8 @@ enum lan743x_sgmii_lsd {
+ 	LINK_2500_SLAVE
+ };
+ 
++#define MAC_SUPPORTED_WAKES  (WAKE_BCAST | WAKE_UCAST | WAKE_MCAST | \
++			      WAKE_MAGIC | WAKE_ARP)
+ struct lan743x_adapter {
+ 	struct net_device       *netdev;
+ 	struct mii_bus		*mdiobus;
+@@ -1025,6 +1051,8 @@ struct lan743x_adapter {
+ #ifdef CONFIG_PM
+ 	u32			wolopts;
+ 	u8			sopass[SOPASS_MAX];
++	u32			phy_wolopts;
++	u32			phy_wol_supported;
+ #endif
+ 	struct pci_dev		*pdev;
+ 	struct lan743x_csr      csr;
+diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
+index ff3b89e9028e9..ad06da0fdaa04 100644
+--- a/drivers/net/ethernet/qualcomm/qca_debug.c
++++ b/drivers/net/ethernet/qualcomm/qca_debug.c
+@@ -98,10 +98,8 @@ qcaspi_info_show(struct seq_file *s, void *what)
+ 
+ 	seq_printf(s, "IRQ              : %d\n",
+ 		   qca->spi_dev->irq);
+-	seq_printf(s, "INTR REQ         : %u\n",
+-		   qca->intr_req);
+-	seq_printf(s, "INTR SVC         : %u\n",
+-		   qca->intr_svc);
++	seq_printf(s, "INTR             : %lx\n",
++		   qca->intr);
+ 
+ 	seq_printf(s, "SPI max speed    : %lu\n",
+ 		   (unsigned long)qca->spi_dev->max_speed_hz);
+diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
+index 5799ecc88a875..8f7ce6b51a1c9 100644
+--- a/drivers/net/ethernet/qualcomm/qca_spi.c
++++ b/drivers/net/ethernet/qualcomm/qca_spi.c
+@@ -35,6 +35,8 @@
+ 
+ #define MAX_DMA_BURST_LEN 5000
+ 
++#define SPI_INTR 0
++
+ /*   Modules parameters     */
+ #define QCASPI_CLK_SPEED_MIN 1000000
+ #define QCASPI_CLK_SPEED_MAX 16000000
+@@ -579,14 +581,14 @@ qcaspi_spi_thread(void *data)
+ 			continue;
+ 		}
+ 
+-		if ((qca->intr_req == qca->intr_svc) &&
++		if (!test_bit(SPI_INTR, &qca->intr) &&
+ 		    !qca->txr.skb[qca->txr.head])
+ 			schedule();
+ 
+ 		set_current_state(TASK_RUNNING);
+ 
+-		netdev_dbg(qca->net_dev, "have work to do. int: %d, tx_skb: %p\n",
+-			   qca->intr_req - qca->intr_svc,
++		netdev_dbg(qca->net_dev, "have work to do. int: %lu, tx_skb: %p\n",
++			   qca->intr,
+ 			   qca->txr.skb[qca->txr.head]);
+ 
+ 		qcaspi_qca7k_sync(qca, QCASPI_EVENT_UPDATE);
+@@ -600,8 +602,7 @@ qcaspi_spi_thread(void *data)
+ 			msleep(QCASPI_QCA7K_REBOOT_TIME_MS);
+ 		}
+ 
+-		if (qca->intr_svc != qca->intr_req) {
+-			qca->intr_svc = qca->intr_req;
++		if (test_and_clear_bit(SPI_INTR, &qca->intr)) {
+ 			start_spi_intr_handling(qca, &intr_cause);
+ 
+ 			if (intr_cause & SPI_INT_CPU_ON) {
+@@ -663,7 +664,7 @@ qcaspi_intr_handler(int irq, void *data)
+ {
+ 	struct qcaspi *qca = data;
+ 
+-	qca->intr_req++;
++	set_bit(SPI_INTR, &qca->intr);
+ 	if (qca->spi_thread)
+ 		wake_up_process(qca->spi_thread);
+ 
+@@ -679,8 +680,7 @@ qcaspi_netdev_open(struct net_device *dev)
+ 	if (!qca)
+ 		return -EINVAL;
+ 
+-	qca->intr_req = 1;
+-	qca->intr_svc = 0;
++	set_bit(SPI_INTR, &qca->intr);
+ 	qca->sync = QCASPI_SYNC_UNKNOWN;
+ 	qcafrm_fsm_init_spi(&qca->frm_handle);
+ 
+diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
+index d59cb2352ceec..8f4808695e820 100644
+--- a/drivers/net/ethernet/qualcomm/qca_spi.h
++++ b/drivers/net/ethernet/qualcomm/qca_spi.h
+@@ -81,8 +81,7 @@ struct qcaspi {
+ 	struct qcafrm_handle frm_handle;
+ 	struct sk_buff *rx_skb;
+ 
+-	unsigned int intr_req;
+-	unsigned int intr_svc;
++	unsigned long intr;
+ 	u16 reset_count;
+ 
+ #ifdef CONFIG_DEBUG_FS
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+index f05bd757dfe52..5ef52ef2698fb 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+@@ -218,6 +218,7 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
+ {
+ 	u32 num_snapshot, ts_status, tsync_int;
+ 	struct ptp_clock_event event;
++	u32 acr_value, channel;
+ 	unsigned long flags;
+ 	u64 ptp_time;
+ 	int i;
+@@ -243,12 +244,15 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
+ 	num_snapshot = (ts_status & GMAC_TIMESTAMP_ATSNS_MASK) >>
+ 		       GMAC_TIMESTAMP_ATSNS_SHIFT;
+ 
++	acr_value = readl(priv->ptpaddr + PTP_ACR);
++	channel = ilog2(FIELD_GET(PTP_ACR_MASK, acr_value));
++
+ 	for (i = 0; i < num_snapshot; i++) {
+ 		read_lock_irqsave(&priv->ptp_lock, flags);
+ 		get_ptptime(priv->ptpaddr, &ptp_time);
+ 		read_unlock_irqrestore(&priv->ptp_lock, flags);
+ 		event.type = PTP_CLOCK_EXTTS;
+-		event.index = 0;
++		event.index = channel;
+ 		event.timestamp = ptp_time;
+ 		ptp_clock_event(priv->ptp_clock, &event);
+ 	}
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+index b1896379dbab5..7d240a2b54a85 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+@@ -358,24 +358,28 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
+ 
+ 	port_transmit_rate_kbps = qopt->idleslope - qopt->sendslope;
+ 
+-	/* Port Transmit Rate and Speed Divider */
+-	switch (div_s64(port_transmit_rate_kbps, 1000)) {
+-	case SPEED_10000:
+-	case SPEED_5000:
+-		ptr = 32;
+-		break;
+-	case SPEED_2500:
+-	case SPEED_1000:
+-		ptr = 8;
+-		break;
+-	case SPEED_100:
+-		ptr = 4;
+-		break;
+-	default:
+-		netdev_err(priv->dev,
+-			   "Invalid portTransmitRate %lld (idleSlope - sendSlope)\n",
+-			   port_transmit_rate_kbps);
+-		return -EINVAL;
++	if (qopt->enable) {
++		/* Port Transmit Rate and Speed Divider */
++		switch (div_s64(port_transmit_rate_kbps, 1000)) {
++		case SPEED_10000:
++		case SPEED_5000:
++			ptr = 32;
++			break;
++		case SPEED_2500:
++		case SPEED_1000:
++			ptr = 8;
++			break;
++		case SPEED_100:
++			ptr = 4;
++			break;
++		default:
++			netdev_err(priv->dev,
++				   "Invalid portTransmitRate %lld (idleSlope - sendSlope)\n",
++				   port_transmit_rate_kbps);
++			return -EINVAL;
++		}
++	} else {
++		ptr = 0;
+ 	}
+ 
+ 	mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
+diff --git a/drivers/net/phy/dp83tg720.c b/drivers/net/phy/dp83tg720.c
+index 326c9770a6dcc..c706429b225a2 100644
+--- a/drivers/net/phy/dp83tg720.c
++++ b/drivers/net/phy/dp83tg720.c
+@@ -17,6 +17,11 @@
+ #define DP83TG720S_PHY_RESET			0x1f
+ #define DP83TG720S_HW_RESET			BIT(15)
+ 
++#define DP83TG720S_LPS_CFG3			0x18c
++/* Power modes are documented as bit fields but used as values */
++/* Power Mode 0 is Normal mode */
++#define DP83TG720S_LPS_CFG3_PWR_MODE_0		BIT(0)
++
+ #define DP83TG720S_RGMII_DELAY_CTRL		0x602
+ /* In RGMII mode, Enable or disable the internal delay for RXD */
+ #define DP83TG720S_RGMII_RX_CLK_SEL		BIT(1)
+@@ -31,11 +36,20 @@
+ 
+ static int dp83tg720_config_aneg(struct phy_device *phydev)
+ {
++	int ret;
++
+ 	/* Autoneg is not supported and this PHY supports only one speed.
+ 	 * We need to care only about master/slave configuration if it was
+ 	 * changed by user.
+ 	 */
+-	return genphy_c45_pma_baset1_setup_master_slave(phydev);
++	ret = genphy_c45_pma_baset1_setup_master_slave(phydev);
++	if (ret)
++		return ret;
++
++	/* Re-read role configuration to make changes visible even if
++	 * the link is in administrative down state.
++	 */
++	return genphy_c45_pma_baset1_read_master_slave(phydev);
+ }
+ 
+ static int dp83tg720_read_status(struct phy_device *phydev)
+@@ -64,6 +78,8 @@ static int dp83tg720_read_status(struct phy_device *phydev)
+ 			return ret;
+ 
+ 		/* After HW reset we need to restore master/slave configuration.
++		 * genphy_c45_pma_baset1_read_master_slave() call will be done
++		 * by the dp83tg720_config_aneg() function.
+ 		 */
+ 		ret = dp83tg720_config_aneg(phydev);
+ 		if (ret)
+@@ -154,10 +170,24 @@ static int dp83tg720_config_init(struct phy_device *phydev)
+ 	 */
+ 	usleep_range(1000, 2000);
+ 
+-	if (phy_interface_is_rgmii(phydev))
+-		return dp83tg720_config_rgmii_delay(phydev);
++	if (phy_interface_is_rgmii(phydev)) {
++		ret = dp83tg720_config_rgmii_delay(phydev);
++		if (ret)
++			return ret;
++	}
++
++	/* In case the PHY is bootstrapped in managed mode, we need to
++	 * wake it.
++	 */
++	ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_LPS_CFG3,
++			    DP83TG720S_LPS_CFG3_PWR_MODE_0);
++	if (ret)
++		return ret;
+ 
+-	return 0;
++	/* Make role configuration visible for ethtool on init and after
++	 * rest.
++	 */
++	return genphy_c45_pma_baset1_read_master_slave(phydev);
+ }
+ 
+ static struct phy_driver dp83tg720_driver[] = {
+diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
+index b2d36a3a96f1e..e5f8ac4b4604b 100644
+--- a/drivers/net/phy/mxl-gpy.c
++++ b/drivers/net/phy/mxl-gpy.c
+@@ -107,6 +107,7 @@ struct gpy_priv {
+ 
+ 	u8 fw_major;
+ 	u8 fw_minor;
++	u32 wolopts;
+ 
+ 	/* It takes 3 seconds to fully switch out of loopback mode before
+ 	 * it can safely re-enter loopback mode. Record the time when
+@@ -221,6 +222,15 @@ static int gpy_hwmon_register(struct phy_device *phydev)
+ }
+ #endif
+ 
++static int gpy_ack_interrupt(struct phy_device *phydev)
++{
++	int ret;
++
++	/* Clear all pending interrupts */
++	ret = phy_read(phydev, PHY_ISTAT);
++	return ret < 0 ? ret : 0;
++}
++
+ static int gpy_mbox_read(struct phy_device *phydev, u32 addr)
+ {
+ 	struct gpy_priv *priv = phydev->priv;
+@@ -262,16 +272,8 @@ static int gpy_mbox_read(struct phy_device *phydev, u32 addr)
+ 
+ static int gpy_config_init(struct phy_device *phydev)
+ {
+-	int ret;
+-
+-	/* Mask all interrupts */
+-	ret = phy_write(phydev, PHY_IMASK, 0);
+-	if (ret)
+-		return ret;
+-
+-	/* Clear all pending interrupts */
+-	ret = phy_read(phydev, PHY_ISTAT);
+-	return ret < 0 ? ret : 0;
++	/* Nothing to configure. Configuration Requirement Placeholder */
++	return 0;
+ }
+ 
+ static int gpy21x_config_init(struct phy_device *phydev)
+@@ -627,11 +629,23 @@ static int gpy_read_status(struct phy_device *phydev)
+ 
+ static int gpy_config_intr(struct phy_device *phydev)
+ {
++	struct gpy_priv *priv = phydev->priv;
+ 	u16 mask = 0;
++	int ret;
++
++	ret = gpy_ack_interrupt(phydev);
++	if (ret)
++		return ret;
+ 
+ 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ 		mask = PHY_IMASK_MASK;
+ 
++	if (priv->wolopts & WAKE_MAGIC)
++		mask |= PHY_IMASK_WOL;
++
++	if (priv->wolopts & WAKE_PHY)
++		mask |= PHY_IMASK_LSTC;
++
+ 	return phy_write(phydev, PHY_IMASK, mask);
+ }
+ 
+@@ -678,6 +692,7 @@ static int gpy_set_wol(struct phy_device *phydev,
+ 		       struct ethtool_wolinfo *wol)
+ {
+ 	struct net_device *attach_dev = phydev->attached_dev;
++	struct gpy_priv *priv = phydev->priv;
+ 	int ret;
+ 
+ 	if (wol->wolopts & WAKE_MAGIC) {
+@@ -725,6 +740,8 @@ static int gpy_set_wol(struct phy_device *phydev,
+ 		ret = phy_read(phydev, PHY_ISTAT);
+ 		if (ret < 0)
+ 			return ret;
++
++		priv->wolopts |= WAKE_MAGIC;
+ 	} else {
+ 		/* Disable magic packet matching */
+ 		ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2,
+@@ -732,6 +749,13 @@ static int gpy_set_wol(struct phy_device *phydev,
+ 					 WOL_EN);
+ 		if (ret < 0)
+ 			return ret;
++
++		/* Disable the WOL interrupt */
++		ret = phy_clear_bits(phydev, PHY_IMASK, PHY_IMASK_WOL);
++		if (ret < 0)
++			return ret;
++
++		priv->wolopts &= ~WAKE_MAGIC;
+ 	}
+ 
+ 	if (wol->wolopts & WAKE_PHY) {
+@@ -748,9 +772,11 @@ static int gpy_set_wol(struct phy_device *phydev,
+ 		if (ret & (PHY_IMASK_MASK & ~PHY_IMASK_LSTC))
+ 			phy_trigger_machine(phydev);
+ 
++		priv->wolopts |= WAKE_PHY;
+ 		return 0;
+ 	}
+ 
++	priv->wolopts &= ~WAKE_PHY;
+ 	/* Disable the link state change interrupt */
+ 	return phy_clear_bits(phydev, PHY_IMASK, PHY_IMASK_LSTC);
+ }
+@@ -758,18 +784,10 @@ static int gpy_set_wol(struct phy_device *phydev,
+ static void gpy_get_wol(struct phy_device *phydev,
+ 			struct ethtool_wolinfo *wol)
+ {
+-	int ret;
++	struct gpy_priv *priv = phydev->priv;
+ 
+ 	wol->supported = WAKE_MAGIC | WAKE_PHY;
+-	wol->wolopts = 0;
+-
+-	ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, VPSPEC2_WOL_CTL);
+-	if (ret & WOL_EN)
+-		wol->wolopts |= WAKE_MAGIC;
+-
+-	ret = phy_read(phydev, PHY_IMASK);
+-	if (ret & PHY_IMASK_LSTC)
+-		wol->wolopts |= WAKE_PHY;
++	wol->wolopts = priv->wolopts;
+ }
+ 
+ static int gpy_loopback(struct phy_device *phydev, bool enable)
+diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
+index d999d9baadb26..52b71c7e78351 100644
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -385,18 +385,23 @@ static void sfp_fixup_rollball(struct sfp *sfp)
+ 	sfp->phy_t_retry = msecs_to_jiffies(1000);
+ }
+ 
+-static void sfp_fixup_fs_10gt(struct sfp *sfp)
++static void sfp_fixup_fs_2_5gt(struct sfp *sfp)
+ {
+-	sfp_fixup_10gbaset_30m(sfp);
+ 	sfp_fixup_rollball(sfp);
+ 
+-	/* The RollBall fixup is not enough for FS modules, the AQR chip inside
++	/* The RollBall fixup is not enough for FS modules, the PHY chip inside
+ 	 * them does not return 0xffff for PHY ID registers in all MMDs for the
+ 	 * while initializing. They need a 4 second wait before accessing PHY.
+ 	 */
+ 	sfp->module_t_wait = msecs_to_jiffies(4000);
+ }
+ 
++static void sfp_fixup_fs_10gt(struct sfp *sfp)
++{
++	sfp_fixup_10gbaset_30m(sfp);
++	sfp_fixup_fs_2_5gt(sfp);
++}
++
+ static void sfp_fixup_halny_gsfp(struct sfp *sfp)
+ {
+ 	/* Ignore the TX_FAULT and LOS signals on this module.
+@@ -472,6 +477,10 @@ static const struct sfp_quirk sfp_quirks[] = {
+ 	// Rollball protocol to talk to the PHY.
+ 	SFP_QUIRK_F("FS", "SFP-10G-T", sfp_fixup_fs_10gt),
+ 
++	// Fiberstore SFP-2.5G-T uses Rollball protocol to talk to the PHY and
++	// needs 4 sec wait before probing the PHY.
++	SFP_QUIRK_F("FS", "SFP-2.5G-T", sfp_fixup_fs_2_5gt),
++
+ 	// Fiberstore GPON-ONU-34-20BI can operate at 2500base-X, but report 1.2GBd
+ 	// NRZ in their EEPROM
+ 	SFP_QUIRK("FS", "GPON-ONU-34-20BI", sfp_quirk_2500basex,
+@@ -488,9 +497,6 @@ static const struct sfp_quirk sfp_quirks[] = {
+ 	SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex,
+ 		  sfp_fixup_ignore_tx_fault),
+ 
+-	// FS 2.5G Base-T
+-	SFP_QUIRK_M("FS", "SFP-2.5G-T", sfp_quirk_oem_2_5g),
+-
+ 	// Lantech 8330-262D-E can operate at 2500base-X, but incorrectly report
+ 	// 2500MBd NRZ in their EEPROM
+ 	SFP_QUIRK_M("Lantech", "8330-262D-E", sfp_quirk_2500basex),
+@@ -502,6 +508,9 @@ static const struct sfp_quirk sfp_quirks[] = {
+ 	SFP_QUIRK_F("Walsun", "HXSX-ATRC-1", sfp_fixup_fs_10gt),
+ 	SFP_QUIRK_F("Walsun", "HXSX-ATRI-1", sfp_fixup_fs_10gt),
+ 
++	// OEM SFP-GE-T is a 1000Base-T module with broken TX_FAULT indicator
++	SFP_QUIRK_F("OEM", "SFP-GE-T", sfp_fixup_ignore_tx_fault),
++
+ 	SFP_QUIRK_F("OEM", "SFP-10G-T", sfp_fixup_rollball_cc),
+ 	SFP_QUIRK_M("OEM", "SFP-2.5G-T", sfp_quirk_oem_2_5g),
+ 	SFP_QUIRK_F("OEM", "RTSFP-10", sfp_fixup_rollball_cc),
+diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
+index 56ede5fa02617..94d8b647f0f57 100644
+--- a/drivers/net/usb/ax88179_178a.c
++++ b/drivers/net/usb/ax88179_178a.c
+@@ -174,7 +174,6 @@ struct ax88179_data {
+ 	u32 wol_supported;
+ 	u32 wolopts;
+ 	u8 disconnecting;
+-	u8 initialized;
+ };
+ 
+ struct ax88179_int_data {
+@@ -1676,12 +1675,21 @@ static int ax88179_reset(struct usbnet *dev)
+ 
+ static int ax88179_net_reset(struct usbnet *dev)
+ {
+-	struct ax88179_data *ax179_data = dev->driver_priv;
++	u16 tmp16;
+ 
+-	if (ax179_data->initialized)
++	ax88179_read_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID, GMII_PHY_PHYSR,
++			 2, &tmp16);
++	if (tmp16) {
++		ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
++				 2, 2, &tmp16);
++		if (!(tmp16 & AX_MEDIUM_RECEIVE_EN)) {
++			tmp16 |= AX_MEDIUM_RECEIVE_EN;
++			ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
++					  2, 2, &tmp16);
++		}
++	} else {
+ 		ax88179_reset(dev);
+-	else
+-		ax179_data->initialized = 1;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
+index 97afd7335d868..01a3b2417a540 100644
+--- a/drivers/net/usb/rtl8150.c
++++ b/drivers/net/usb/rtl8150.c
+@@ -778,7 +778,8 @@ static int rtl8150_get_link_ksettings(struct net_device *netdev,
+ 				      struct ethtool_link_ksettings *ecmd)
+ {
+ 	rtl8150_t *dev = netdev_priv(netdev);
+-	short lpa, bmcr;
++	short lpa = 0;
++	short bmcr = 0;
+ 	u32 supported;
+ 
+ 	supported = (SUPPORTED_10baseT_Half |
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 574b052a517d7..290bec2926463 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -1225,6 +1225,10 @@ static struct sk_buff *receive_small_xdp(struct net_device *dev,
+ 	if (unlikely(hdr->hdr.gso_type))
+ 		goto err_xdp;
+ 
++	/* Partially checksummed packets must be dropped. */
++	if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
++		goto err_xdp;
++
+ 	buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
+ 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ 
+@@ -1542,6 +1546,10 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
+ 	if (unlikely(hdr->hdr.gso_type))
+ 		return NULL;
+ 
++	/* Partially checksummed packets must be dropped. */
++	if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
++		return NULL;
++
+ 	/* Now XDP core assumes frag size is PAGE_SIZE, but buffers
+ 	 * with headroom may add hole in truesize, which
+ 	 * make their length exceed PAGE_SIZE. So we disabled the
+@@ -1808,6 +1816,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
+ 	struct net_device *dev = vi->dev;
+ 	struct sk_buff *skb;
+ 	struct virtio_net_common_hdr *hdr;
++	u8 flags;
+ 
+ 	if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
+ 		pr_debug("%s: short packet %i\n", dev->name, len);
+@@ -1816,6 +1825,15 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
+ 		return;
+ 	}
+ 
++	/* 1. Save the flags early, as the XDP program might overwrite them.
++	 * These flags ensure packets marked as VIRTIO_NET_HDR_F_DATA_VALID
++	 * stay valid after XDP processing.
++	 * 2. XDP doesn't work with partially checksummed packets (refer to
++	 * virtnet_xdp_set()), so packets marked as
++	 * VIRTIO_NET_HDR_F_NEEDS_CSUM get dropped during XDP processing.
++	 */
++	flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags;
++
+ 	if (vi->mergeable_rx_bufs)
+ 		skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
+ 					stats);
+@@ -1831,7 +1849,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
+ 	if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
+ 		virtio_skb_set_hash(&hdr->hash_v1_hdr, skb);
+ 
+-	if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
++	if (flags & VIRTIO_NET_HDR_F_DATA_VALID)
+ 		skb->ip_summed = CHECKSUM_UNNECESSARY;
+ 
+ 	if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
+@@ -4698,8 +4716,16 @@ static int virtnet_probe(struct virtio_device *vdev)
+ 			dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
+ 		/* (!csum && gso) case will be fixed by register_netdev() */
+ 	}
+-	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
+-		dev->features |= NETIF_F_RXCSUM;
++
++	/* 1. With VIRTIO_NET_F_GUEST_CSUM negotiation, the driver doesn't
++	 * need to calculate checksums for partially checksummed packets,
++	 * as they're considered valid by the upper layer.
++	 * 2. Without VIRTIO_NET_F_GUEST_CSUM negotiation, the driver only
++	 * receives fully checksummed packets. The device may assist in
++	 * validating these packets' checksums, so the driver won't have to.
++	 */
++	dev->features |= NETIF_F_RXCSUM;
++
+ 	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
+ 	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
+ 		dev->features |= NETIF_F_GRO_HW;
+diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
+index f02a308a9ffc5..34654f710d8a1 100644
+--- a/drivers/net/wireless/ath/ath.h
++++ b/drivers/net/wireless/ath/ath.h
+@@ -171,8 +171,10 @@ struct ath_common {
+ 	unsigned int clockrate;
+ 
+ 	spinlock_t cc_lock;
+-	struct ath_cycle_counters cc_ani;
+-	struct ath_cycle_counters cc_survey;
++	struct_group(cc,
++		struct ath_cycle_counters cc_ani;
++		struct ath_cycle_counters cc_survey;
++	);
+ 
+ 	struct ath_regulatory regulatory;
+ 	struct ath_regulatory reg_world_copy;
+diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
+index 391b6fb2bd426..bff4598de4035 100644
+--- a/drivers/net/wireless/ath/ath12k/core.c
++++ b/drivers/net/wireless/ath/ath12k/core.c
+@@ -1132,7 +1132,6 @@ static void ath12k_core_reset(struct work_struct *work)
+ 						ATH12K_RECOVER_START_TIMEOUT_HZ);
+ 
+ 	ath12k_hif_power_down(ab);
+-	ath12k_qmi_free_resource(ab);
+ 	ath12k_hif_power_up(ab);
+ 
+ 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
+diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
+index 52a5fb8b03e9a..82ef4d4da681e 100644
+--- a/drivers/net/wireless/ath/ath12k/mac.c
++++ b/drivers/net/wireless/ath/ath12k/mac.c
+@@ -6286,14 +6286,24 @@ ath12k_mac_check_down_grade_phy_mode(struct ath12k *ar,
+ 				     enum nl80211_band band,
+ 				     enum nl80211_iftype type)
+ {
+-	struct ieee80211_sta_eht_cap *eht_cap;
++	struct ieee80211_sta_eht_cap *eht_cap = NULL;
+ 	enum wmi_phy_mode down_mode;
++	int n = ar->mac.sbands[band].n_iftype_data;
++	int i;
++	struct ieee80211_sband_iftype_data *data;
+ 
+ 	if (mode < MODE_11BE_EHT20)
+ 		return mode;
+ 
+-	eht_cap = &ar->mac.iftype[band][type].eht_cap;
+-	if (eht_cap->has_eht)
++	data = ar->mac.iftype[band];
++	for (i = 0; i < n; i++) {
++		if (data[i].types_mask & BIT(type)) {
++			eht_cap = &data[i].eht_cap;
++			break;
++		}
++	}
++
++	if (eht_cap && eht_cap->has_eht)
+ 		return mode;
+ 
+ 	switch (mode) {
+diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
+index 40b6abccbb508..e8eb996380eaf 100644
+--- a/drivers/net/wireless/ath/ath12k/qmi.c
++++ b/drivers/net/wireless/ath/ath12k/qmi.c
+@@ -2325,8 +2325,9 @@ static void ath12k_qmi_free_target_mem_chunk(struct ath12k_base *ab)
+ 	for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ 		if (!ab->qmi.target_mem[i].v.addr)
+ 			continue;
++
+ 		dma_free_coherent(ab->dev,
+-				  ab->qmi.target_mem[i].size,
++				  ab->qmi.target_mem[i].prev_size,
+ 				  ab->qmi.target_mem[i].v.addr,
+ 				  ab->qmi.target_mem[i].paddr);
+ 		ab->qmi.target_mem[i].v.addr = NULL;
+@@ -2352,6 +2353,20 @@ static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
+ 		case M3_DUMP_REGION_TYPE:
+ 		case PAGEABLE_MEM_REGION_TYPE:
+ 		case CALDB_MEM_REGION_TYPE:
++			/* Firmware reloads in recovery/resume.
++			 * In such cases, no need to allocate memory for FW again.
++			 */
++			if (chunk->v.addr) {
++				if (chunk->prev_type == chunk->type &&
++				    chunk->prev_size == chunk->size)
++					goto this_chunk_done;
++
++				/* cannot reuse the existing chunk */
++				dma_free_coherent(ab->dev, chunk->prev_size,
++						  chunk->v.addr, chunk->paddr);
++				chunk->v.addr = NULL;
++			}
++
+ 			chunk->v.addr = dma_alloc_coherent(ab->dev,
+ 							   chunk->size,
+ 							   &chunk->paddr,
+@@ -2370,6 +2385,10 @@ static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
+ 					    chunk->type, chunk->size);
+ 				return -ENOMEM;
+ 			}
++
++			chunk->prev_type = chunk->type;
++			chunk->prev_size = chunk->size;
++this_chunk_done:
+ 			break;
+ 		default:
+ 			ath12k_warn(ab, "memory type %u not supported\n",
+@@ -2666,6 +2685,19 @@ static int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab,
+ 	return ret;
+ }
+ 
++static void ath12k_qmi_m3_free(struct ath12k_base *ab)
++{
++	struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
++
++	if (!m3_mem->vaddr)
++		return;
++
++	dma_free_coherent(ab->dev, m3_mem->size,
++			  m3_mem->vaddr, m3_mem->paddr);
++	m3_mem->vaddr = NULL;
++	m3_mem->size = 0;
++}
++
+ static int ath12k_qmi_m3_load(struct ath12k_base *ab)
+ {
+ 	struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
+@@ -2675,10 +2707,6 @@ static int ath12k_qmi_m3_load(struct ath12k_base *ab)
+ 	size_t m3_len;
+ 	int ret;
+ 
+-	if (m3_mem->vaddr)
+-		/* m3 firmware buffer is already available in the DMA buffer */
+-		return 0;
+-
+ 	if (ab->fw.m3_data && ab->fw.m3_len > 0) {
+ 		/* firmware-N.bin had a m3 firmware file so use that */
+ 		m3_data = ab->fw.m3_data;
+@@ -2700,6 +2728,15 @@ static int ath12k_qmi_m3_load(struct ath12k_base *ab)
+ 		m3_len = fw->size;
+ 	}
+ 
++	/* In recovery/resume cases, M3 buffer is not freed, try to reuse that */
++	if (m3_mem->vaddr) {
++		if (m3_mem->size >= m3_len)
++			goto skip_m3_alloc;
++
++		/* Old buffer is too small, free and reallocate */
++		ath12k_qmi_m3_free(ab);
++	}
++
+ 	m3_mem->vaddr = dma_alloc_coherent(ab->dev,
+ 					   m3_len, &m3_mem->paddr,
+ 					   GFP_KERNEL);
+@@ -2710,6 +2747,7 @@ static int ath12k_qmi_m3_load(struct ath12k_base *ab)
+ 		goto out;
+ 	}
+ 
++skip_m3_alloc:
+ 	memcpy(m3_mem->vaddr, m3_data, m3_len);
+ 	m3_mem->size = m3_len;
+ 
+@@ -2721,19 +2759,6 @@ static int ath12k_qmi_m3_load(struct ath12k_base *ab)
+ 	return ret;
+ }
+ 
+-static void ath12k_qmi_m3_free(struct ath12k_base *ab)
+-{
+-	struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
+-
+-	if (!m3_mem->vaddr)
+-		return;
+-
+-	dma_free_coherent(ab->dev, m3_mem->size,
+-			  m3_mem->vaddr, m3_mem->paddr);
+-	m3_mem->vaddr = NULL;
+-	m3_mem->size = 0;
+-}
+-
+ static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
+ {
+ 	struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
+diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h
+index 6ee33c9851c6b..f34263d4bee88 100644
+--- a/drivers/net/wireless/ath/ath12k/qmi.h
++++ b/drivers/net/wireless/ath/ath12k/qmi.h
+@@ -96,6 +96,8 @@ struct ath12k_qmi_event_msg {
+ struct target_mem_chunk {
+ 	u32 size;
+ 	u32 type;
++	u32 prev_size;
++	u32 prev_type;
+ 	dma_addr_t paddr;
+ 	union {
+ 		void __iomem *ioaddr;
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index a2943aaecb202..01173aac30456 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -135,8 +135,7 @@ void ath9k_ps_wakeup(struct ath_softc *sc)
+ 	if (power_mode != ATH9K_PM_AWAKE) {
+ 		spin_lock(&common->cc_lock);
+ 		ath_hw_cycle_counters_update(common);
+-		memset(&common->cc_survey, 0, sizeof(common->cc_survey));
+-		memset(&common->cc_ani, 0, sizeof(common->cc_ani));
++		memset(&common->cc, 0, sizeof(common->cc));
+ 		spin_unlock(&common->cc_lock);
+ 	}
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index 2403ac2fcdc3b..5f6b16d3fc8a3 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -4643,7 +4643,7 @@ static int iwl_mvm_roc_station(struct iwl_mvm *mvm,
+ 
+ 	if (fw_ver == IWL_FW_CMD_VER_UNKNOWN) {
+ 		ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, vif, duration);
+-	} else if (fw_ver == 3) {
++	} else if (fw_ver >= 3) {
+ 		ret = iwl_mvm_roc_add_cmd(mvm, channel, vif, duration,
+ 					  ROC_ACTIVITY_HOTSPOT);
+ 	} else {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+index 867e14f6b93a0..73e42ef429837 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+@@ -663,6 +663,7 @@ void mt7921_mac_reset_work(struct work_struct *work)
+ 	int i, ret;
+ 
+ 	dev_dbg(dev->mt76.dev, "chip reset\n");
++	set_bit(MT76_RESET, &dev->mphy.state);
+ 	dev->hw_full_reset = true;
+ 	ieee80211_stop_queues(hw);
+ 
+@@ -691,6 +692,7 @@ void mt7921_mac_reset_work(struct work_struct *work)
+ 	}
+ 
+ 	dev->hw_full_reset = false;
++	clear_bit(MT76_RESET, &dev->mphy.state);
+ 	pm->suspended = false;
+ 	ieee80211_wake_queues(hw);
+ 	ieee80211_iterate_active_interfaces(hw,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
+index c866144ff0613..031ba9aaa4e2f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
+@@ -64,7 +64,6 @@ int mt7921e_mac_reset(struct mt792x_dev *dev)
+ 	mt76_wr(dev, dev->irq_map->host_irq_enable, 0);
+ 	mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
+ 
+-	set_bit(MT76_RESET, &dev->mphy.state);
+ 	set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ 	wake_up(&dev->mt76.mcu.wait);
+ 	skb_queue_purge(&dev->mt76.mcu.res_q);
+@@ -115,7 +114,6 @@ int mt7921e_mac_reset(struct mt792x_dev *dev)
+ 
+ 	err = __mt7921_start(&dev->phy);
+ out:
+-	clear_bit(MT76_RESET, &dev->mphy.state);
+ 
+ 	local_bh_disable();
+ 	napi_enable(&dev->mt76.tx_napi);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
+index 389eb0903807e..1f77cf71ca701 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
+@@ -98,7 +98,6 @@ int mt7921s_mac_reset(struct mt792x_dev *dev)
+ 	mt76_connac_free_pending_tx_skbs(&dev->pm, NULL);
+ 	mt76_txq_schedule_all(&dev->mphy);
+ 	mt76_worker_disable(&dev->mt76.tx_worker);
+-	set_bit(MT76_RESET, &dev->mphy.state);
+ 	set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ 	wake_up(&dev->mt76.mcu.wait);
+ 	skb_queue_purge(&dev->mt76.mcu.res_q);
+@@ -135,7 +134,6 @@ int mt7921s_mac_reset(struct mt792x_dev *dev)
+ 
+ 	err = __mt7921_start(&dev->phy);
+ out:
+-	clear_bit(MT76_RESET, &dev->mphy.state);
+ 
+ 	mt76_worker_enable(&dev->mt76.tx_worker);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
+index 3e88798df0178..a4ed00eebc483 100644
+--- a/drivers/net/wireless/mediatek/mt76/sdio.c
++++ b/drivers/net/wireless/mediatek/mt76/sdio.c
+@@ -499,7 +499,8 @@ static void mt76s_tx_status_data(struct mt76_worker *worker)
+ 	dev = container_of(sdio, struct mt76_dev, sdio);
+ 
+ 	while (true) {
+-		if (test_bit(MT76_REMOVED, &dev->phy.state))
++		if (test_bit(MT76_RESET, &dev->phy.state) ||
++		    test_bit(MT76_REMOVED, &dev->phy.state))
+ 			break;
+ 
+ 		if (!dev->drv->tx_status_data(dev, &update))
+diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
+index d474b8d5df3dd..b8d419a5b9db0 100644
+--- a/drivers/net/wireless/realtek/rtw89/core.c
++++ b/drivers/net/wireless/realtek/rtw89/core.c
+@@ -4069,6 +4069,24 @@ void rtw89_core_ntfy_btc_event(struct rtw89_dev *rtwdev, enum rtw89_btc_hmsg eve
+ 	}
+ }
+ 
++void rtw89_check_quirks(struct rtw89_dev *rtwdev, const struct dmi_system_id *quirks)
++{
++	const struct dmi_system_id *match;
++	enum rtw89_quirks quirk;
++
++	if (!quirks)
++		return;
++
++	for (match = dmi_first_match(quirks); match; match = dmi_first_match(match + 1)) {
++		quirk = (uintptr_t)match->driver_data;
++		if (quirk >= NUM_OF_RTW89_QUIRKS)
++			continue;
++
++		set_bit(quirk, rtwdev->quirks);
++	}
++}
++EXPORT_SYMBOL(rtw89_check_quirks);
++
+ int rtw89_core_start(struct rtw89_dev *rtwdev)
+ {
+ 	int ret;
+diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
+index 2e854c9af7099..509d84a493348 100644
+--- a/drivers/net/wireless/realtek/rtw89/core.h
++++ b/drivers/net/wireless/realtek/rtw89/core.h
+@@ -7,6 +7,7 @@
+ 
+ #include <linux/average.h>
+ #include <linux/bitfield.h>
++#include <linux/dmi.h>
+ #include <linux/firmware.h>
+ #include <linux/iopoll.h>
+ #include <linux/workqueue.h>
+@@ -3977,6 +3978,7 @@ union rtw89_bus_info {
+ 
+ struct rtw89_driver_info {
+ 	const struct rtw89_chip_info *chip;
++	const struct dmi_system_id *quirks;
+ 	union rtw89_bus_info bus;
+ };
+ 
+@@ -4324,6 +4326,12 @@ enum rtw89_flags {
+ 	NUM_OF_RTW89_FLAGS,
+ };
+ 
++enum rtw89_quirks {
++	RTW89_QUIRK_PCI_BER,
++
++	NUM_OF_RTW89_QUIRKS,
++};
++
+ enum rtw89_pkt_drop_sel {
+ 	RTW89_PKT_DROP_SEL_MACID_BE_ONCE,
+ 	RTW89_PKT_DROP_SEL_MACID_BK_ONCE,
+@@ -5084,6 +5092,7 @@ struct rtw89_dev {
+ 	DECLARE_BITMAP(mac_id_map, RTW89_MAX_MAC_ID_NUM);
+ 	DECLARE_BITMAP(flags, NUM_OF_RTW89_FLAGS);
+ 	DECLARE_BITMAP(pkt_offload, RTW89_MAX_PKT_OFLD_NUM);
++	DECLARE_BITMAP(quirks, NUM_OF_RTW89_QUIRKS);
+ 
+ 	struct rtw89_phy_stat phystat;
+ 	struct rtw89_rfk_wait_info rfk_wait;
+@@ -6129,6 +6138,7 @@ int rtw89_core_sta_remove(struct rtw89_dev *rtwdev,
+ void rtw89_core_set_tid_config(struct rtw89_dev *rtwdev,
+ 			       struct ieee80211_sta *sta,
+ 			       struct cfg80211_tid_config *tid_config);
++void rtw89_check_quirks(struct rtw89_dev *rtwdev, const struct dmi_system_id *quirks);
+ int rtw89_core_init(struct rtw89_dev *rtwdev);
+ void rtw89_core_deinit(struct rtw89_dev *rtwdev);
+ int rtw89_core_register(struct rtw89_dev *rtwdev);
+diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
+index 0afe22e568f43..3b0d97da048dc 100644
+--- a/drivers/net/wireless/realtek/rtw89/pci.c
++++ b/drivers/net/wireless/realtek/rtw89/pci.c
+@@ -2299,6 +2299,22 @@ static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev)
+ 	return 0;
+ }
+ 
++static void rtw89_pci_ber(struct rtw89_dev *rtwdev)
++{
++	u32 phy_offset;
++
++	if (!test_bit(RTW89_QUIRK_PCI_BER, rtwdev->quirks))
++		return;
++
++	phy_offset = R_RAC_DIRECT_OFFSET_G1;
++	rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G1_VAL);
++	rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL);
++
++	phy_offset = R_RAC_DIRECT_OFFSET_G2;
++	rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G2_VAL);
++	rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL);
++}
++
+ static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev)
+ {
+ 	if (rtwdev->chip->chip_id != RTL8852A)
+@@ -2696,6 +2712,7 @@ static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev)
+ 	const struct rtw89_pci_info *info = rtwdev->pci_info;
+ 	int ret;
+ 
++	rtw89_pci_ber(rtwdev);
+ 	rtw89_pci_rxdma_prefth(rtwdev);
+ 	rtw89_pci_l1off_pwroff(rtwdev);
+ 	rtw89_pci_deglitch_setting(rtwdev);
+@@ -4172,6 +4189,8 @@ int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	rtwdev->hci.rpwm_addr = pci_info->rpwm_addr;
+ 	rtwdev->hci.cpwm_addr = pci_info->cpwm_addr;
+ 
++	rtw89_check_quirks(rtwdev, info->quirks);
++
+ 	SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
+ 
+ 	ret = rtw89_core_init(rtwdev);
+diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
+index a63b6b7c9bfaf..87e7081664c1f 100644
+--- a/drivers/net/wireless/realtek/rtw89/pci.h
++++ b/drivers/net/wireless/realtek/rtw89/pci.h
+@@ -26,11 +26,16 @@
+ #define RAC_REG_FLD_0			0x1D
+ #define BAC_AUTOK_N_MASK		GENMASK(3, 2)
+ #define PCIE_AUTOK_4			0x3
++#define RAC_ANA1E			0x1E
++#define RAC_ANA1E_G1_VAL		0x66EA
++#define RAC_ANA1E_G2_VAL		0x6EEA
+ #define RAC_ANA1F			0x1F
+ #define RAC_ANA24			0x24
+ #define B_AX_DEGLITCH			GENMASK(11, 8)
+ #define RAC_ANA26			0x26
+ #define B_AX_RXEN			GENMASK(15, 14)
++#define RAC_ANA2E			0x2E
++#define RAC_ANA2E_VAL			0xFFFE
+ #define RAC_CTRL_PPR_V1			0x30
+ #define B_AX_CLK_CALIB_EN		BIT(12)
+ #define B_AX_CALIB_EN			BIT(13)
+diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851be.c b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
+index ca1374a717272..ec3629d95fda1 100644
+--- a/drivers/net/wireless/realtek/rtw89/rtw8851be.c
++++ b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
+@@ -63,6 +63,7 @@ static const struct rtw89_pci_info rtw8851b_pci_info = {
+ 
+ static const struct rtw89_driver_info rtw89_8851be_info = {
+ 	.chip = &rtw8851b_chip_info,
++	.quirks = NULL,
+ 	.bus = {
+ 		.pci = &rtw8851b_pci_info,
+ 	},
+diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
+index 7c6ffedb77e27..fdee5dd4ba148 100644
+--- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
++++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
+@@ -61,6 +61,7 @@ static const struct rtw89_pci_info rtw8852a_pci_info = {
+ 
+ static const struct rtw89_driver_info rtw89_8852ae_info = {
+ 	.chip = &rtw8852a_chip_info,
++	.quirks = NULL,
+ 	.bus = {
+ 		.pci = &rtw8852a_pci_info,
+ 	},
+diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852be.c b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
+index ed71364e6437b..5f941122655c4 100644
+--- a/drivers/net/wireless/realtek/rtw89/rtw8852be.c
++++ b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
+@@ -63,6 +63,7 @@ static const struct rtw89_pci_info rtw8852b_pci_info = {
+ 
+ static const struct rtw89_driver_info rtw89_8852be_info = {
+ 	.chip = &rtw8852b_chip_info,
++	.quirks = NULL,
+ 	.bus = {
+ 		.pci = &rtw8852b_pci_info,
+ 	},
+diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
+index 583ea673a4f54..e07c7f3ade41e 100644
+--- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
++++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
+@@ -68,8 +68,31 @@ static const struct rtw89_pci_info rtw8852c_pci_info = {
+ 	.recognize_intrs	= rtw89_pci_recognize_intrs_v1,
+ };
+ 
++static const struct dmi_system_id rtw8852c_pci_quirks[] = {
++	{
++		.ident = "Dell Inc. Vostro 16 5640",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 16 5640"),
++			DMI_MATCH(DMI_PRODUCT_SKU, "0CA0"),
++		},
++		.driver_data = (void *)RTW89_QUIRK_PCI_BER,
++	},
++	{
++		.ident = "Dell Inc. Inspiron 16 5640",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 16 5640"),
++			DMI_MATCH(DMI_PRODUCT_SKU, "0C9F"),
++		},
++		.driver_data = (void *)RTW89_QUIRK_PCI_BER,
++	},
++	{},
++};
++
+ static const struct rtw89_driver_info rtw89_8852ce_info = {
+ 	.chip = &rtw8852c_chip_info,
++	.quirks = rtw8852c_pci_quirks,
+ 	.bus = {
+ 		.pci = &rtw8852c_pci_info,
+ 	},
+diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
+index 4981b657bd7b0..ce8aaa9501e16 100644
+--- a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
++++ b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
+@@ -61,6 +61,7 @@ static const struct rtw89_pci_info rtw8922a_pci_info = {
+ 
+ static const struct rtw89_driver_info rtw89_8922ae_info = {
+ 	.chip = &rtw8922a_chip_info,
++	.quirks = NULL,
+ 	.bus = {
+ 		.pci = &rtw8922a_pci_info,
+ 	},
+diff --git a/drivers/opp/core.c b/drivers/opp/core.c
+index e233734b72205..cb4611fe1b5b2 100644
+--- a/drivers/opp/core.c
++++ b/drivers/opp/core.c
+@@ -2394,7 +2394,8 @@ static void _opp_detach_genpd(struct opp_table *opp_table)
+ static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev,
+ 			const char * const *names, struct device ***virt_devs)
+ {
+-	struct device *virt_dev;
++	struct device *virt_dev, *gdev;
++	struct opp_table *genpd_table;
+ 	int index = 0, ret = -EINVAL;
+ 	const char * const *name = names;
+ 
+@@ -2427,6 +2428,34 @@ static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev,
+ 			goto err;
+ 		}
+ 
++		/*
++		 * The required_opp_tables parsing is not perfect, as the OPP
++		 * core does the parsing solely based on the DT node pointers.
++		 * The core sets the required_opp_tables entry to the first OPP
++		 * table in the "opp_tables" list, that matches with the node
++		 * pointer.
++		 *
++		 * If the target DT OPP table is used by multiple devices and
++		 * they all create separate instances of 'struct opp_table' from
++		 * it, then it is possible that the required_opp_tables entry
++		 * may be set to the incorrect sibling device.
++		 *
++		 * Cross check it again and fix if required.
++		 */
++		gdev = dev_to_genpd_dev(virt_dev);
++		if (IS_ERR(gdev))
++			return PTR_ERR(gdev);
++
++		genpd_table = _find_opp_table(gdev);
++		if (!IS_ERR(genpd_table)) {
++			if (genpd_table != opp_table->required_opp_tables[index]) {
++				dev_pm_opp_put_opp_table(opp_table->required_opp_tables[index]);
++				opp_table->required_opp_tables[index] = genpd_table;
++			} else {
++				dev_pm_opp_put_opp_table(genpd_table);
++			}
++		}
++
+ 		/*
+ 		 * Add the virtual genpd device as a user of the OPP table, so
+ 		 * we can call dev_pm_opp_set_opp() on it directly.
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 70b8c87055cb6..cbbf197df80f1 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1277,6 +1277,11 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
+ 	for (;;) {
+ 		u32 id;
+ 
++		if (pci_dev_is_disconnected(dev)) {
++			pci_dbg(dev, "disconnected; not waiting\n");
++			return -ENOTTY;
++		}
++
+ 		pci_read_config_dword(dev, PCI_COMMAND, &id);
+ 		if (!PCI_POSSIBLE_ERROR(id))
+ 			break;
+@@ -2962,6 +2967,18 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
+ 			DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"),
+ 		},
+ 	},
++	{
++		/*
++		 * Changing power state of root port dGPU is connected fails
++		 * https://gitlab.freedesktop.org/drm/amd/-/issues/3229
++		 */
++		.ident = "Hewlett-Packard HP Pavilion 17 Notebook PC/1972",
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
++			DMI_MATCH(DMI_BOARD_NAME, "1972"),
++			DMI_MATCH(DMI_BOARD_VERSION, "95.33"),
++		},
++	},
+ #endif
+ 	{ }
+ };
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+index acc2b5b9ea255..b8919443e46c9 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
++++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+@@ -187,6 +187,31 @@ static const unsigned int qmp_v6_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ 	[QPHY_TX_TRANSCEIVER_BIAS_EN]	= QSERDES_V6_TX_TRANSCEIVER_BIAS_EN,
+ };
+ 
++static const unsigned int qmp_v6_n4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
++	[QPHY_SW_RESET]			= QPHY_V6_N4_PCS_SW_RESET,
++	[QPHY_START_CTRL]		= QPHY_V6_N4_PCS_START_CONTROL,
++	[QPHY_PCS_STATUS]		= QPHY_V6_N4_PCS_PCS_STATUS1,
++	[QPHY_PCS_POWER_DOWN_CONTROL]	= QPHY_V6_N4_PCS_POWER_DOWN_CONTROL,
++
++	/* In PCS_USB */
++	[QPHY_PCS_AUTONOMOUS_MODE_CTRL]	= QPHY_V6_PCS_USB3_AUTONOMOUS_MODE_CTRL,
++	[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V6_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR,
++
++	[QPHY_COM_RESETSM_CNTRL]	= QSERDES_V6_COM_RESETSM_CNTRL,
++	[QPHY_COM_C_READY_STATUS]	= QSERDES_V6_COM_C_READY_STATUS,
++	[QPHY_COM_CMN_STATUS]		= QSERDES_V6_COM_CMN_STATUS,
++	[QPHY_COM_BIAS_EN_CLKBUFLR_EN]	= QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN,
++
++	[QPHY_DP_PHY_STATUS]		= QSERDES_V6_DP_PHY_STATUS,
++	[QPHY_DP_PHY_VCO_DIV]		= QSERDES_V6_DP_PHY_VCO_DIV,
++
++	[QPHY_TX_TX_POL_INV]		= QSERDES_V6_N4_TX_TX_POL_INV,
++	[QPHY_TX_TX_DRV_LVL]		= QSERDES_V6_N4_TX_TX_DRV_LVL,
++	[QPHY_TX_TX_EMP_POST1_LVL]	= QSERDES_V6_N4_TX_TX_EMP_POST1_LVL,
++	[QPHY_TX_HIGHZ_DRVR_EN]		= QSERDES_V6_N4_TX_HIGHZ_DRVR_EN,
++	[QPHY_TX_TRANSCEIVER_BIAS_EN]	= QSERDES_V6_N4_TX_TRANSCEIVER_BIAS_EN,
++};
++
+ static const struct qmp_phy_init_tbl qmp_v3_usb3_serdes_tbl[] = {
+ 	QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
+ 	QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
+@@ -997,6 +1022,31 @@ static const struct qmp_phy_init_tbl qmp_v6_dp_serdes_tbl[] = {
+ 	QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0x0f),
+ };
+ 
++static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl[] = {
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_SVS_MODE_CLK_SEL, 0x15),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0x3b),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYS_CLK_CTRL, 0x02),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_ENABLE1, 0x0c),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_BUF_ENABLE, 0x06),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_SELECT, 0x30),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x07),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x36),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x16),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x06),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE0, 0x00),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE0, 0xc0),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x12),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x00),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_BG_TIMER, 0x0a),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CORE_CLK_DIV_MODE0, 0x14),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_CTRL, 0x00),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN, 0x17),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0x0f),
++};
++
+ static const struct qmp_phy_init_tbl qmp_v6_dp_tx_tbl[] = {
+ 	QMP_PHY_INIT_CFG(QSERDES_V6_TX_VMODE_CTRL1, 0x40),
+ 	QMP_PHY_INIT_CFG(QSERDES_V6_TX_PRE_STALL_LDO_BOOST_EN, 0x30),
+@@ -1011,6 +1061,19 @@ static const struct qmp_phy_init_tbl qmp_v6_dp_tx_tbl[] = {
+ 	QMP_PHY_INIT_CFG(QSERDES_V6_TX_TX_BAND, 0x4),
+ };
+ 
++static const struct qmp_phy_init_tbl qmp_v6_n4_dp_tx_tbl[] = {
++	QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_VMODE_CTRL1, 0x40),
++	QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_PRE_STALL_LDO_BOOST_EN, 0x00),
++	QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_INTERFACE_SELECT, 0xff),
++	QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_CLKBUF_ENABLE, 0x0f),
++	QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_RESET_TSYNC_EN, 0x03),
++	QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_TRAN_DRVR_EMP_EN, 0x0f),
++	QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_PARRATE_REC_DETECT_IDLE_EN, 0x00),
++	QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_TX, 0x11),
++	QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_RX, 0x11),
++	QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_TX_BAND, 0x1),
++};
++
+ static const struct qmp_phy_init_tbl qmp_v6_dp_serdes_tbl_rbr[] = {
+ 	QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x05),
+ 	QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34),
+@@ -1059,6 +1122,74 @@ static const struct qmp_phy_init_tbl qmp_v6_dp_serdes_tbl_hbr3[] = {
+ 	QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c),
+ };
+ 
++static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_rbr[] = {
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x05),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x04),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x0b),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x37),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x04),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x71),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x92),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01),
++};
++
++static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_hbr[] = {
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x03),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x08),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x0b),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x07),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x07),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x71),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x92),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01),
++};
++
++static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_hbr2[] = {
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x01),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x46),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x08),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x05),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x0f),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x0e),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x97),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x10),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x18),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x02),
++};
++
++static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_hbr3[] = {
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x00),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x08),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x0b),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x17),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x15),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x71),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x92),
++	QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01),
++};
++
+ static const struct qmp_phy_init_tbl sc8280xp_usb43dp_serdes_tbl[] = {
+ 	QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_EN_CENTER, 0x01),
+ 	QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31),
+@@ -1273,20 +1404,20 @@ static const struct qmp_phy_init_tbl x1e80100_usb43dp_rx_tbl[] = {
+ };
+ 
+ static const struct qmp_phy_init_tbl x1e80100_usb43dp_pcs_tbl[] = {
+-	QMP_PHY_INIT_CFG(QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+-	QMP_PHY_INIT_CFG(QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+-	QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG1, 0xc4),
+-	QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG2, 0x89),
+-	QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG3, 0x20),
+-	QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG6, 0x13),
+-	QMP_PHY_INIT_CFG(QPHY_V6_PCS_REFGEN_REQ_CONFIG1, 0x21),
+-	QMP_PHY_INIT_CFG(QPHY_V6_PCS_RX_SIGDET_LVL, 0x55),
+-	QMP_PHY_INIT_CFG(QPHY_V6_PCS_CDR_RESET_TIME, 0x0a),
+-	QMP_PHY_INIT_CFG(QPHY_V6_PCS_ALIGN_DETECT_CONFIG1, 0xd4),
+-	QMP_PHY_INIT_CFG(QPHY_V6_PCS_ALIGN_DETECT_CONFIG2, 0x30),
+-	QMP_PHY_INIT_CFG(QPHY_V6_PCS_PCS_TX_RX_CONFIG, 0x0c),
+-	QMP_PHY_INIT_CFG(QPHY_V6_PCS_EQ_CONFIG1, 0x4b),
+-	QMP_PHY_INIT_CFG(QPHY_V6_PCS_EQ_CONFIG5, 0x10),
++	QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
++	QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
++	QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG1, 0xc4),
++	QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG2, 0x89),
++	QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG3, 0x20),
++	QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG6, 0x13),
++	QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_REFGEN_REQ_CONFIG1, 0x21),
++	QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RX_SIGDET_LVL, 0x55),
++	QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RX_CONFIG, 0x0a),
++	QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG1, 0xd4),
++	QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG2, 0x30),
++	QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_PCS_TX_RX_CONFIG, 0x0c),
++	QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_EQ_CONFIG1, 0x4b),
++	QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_EQ_CONFIG5, 0x10),
+ };
+ 
+ static const struct qmp_phy_init_tbl x1e80100_usb43dp_pcs_usb_tbl[] = {
+@@ -1794,22 +1925,22 @@ static const struct qmp_phy_cfg x1e80100_usb3dpphy_cfg = {
+ 	.pcs_usb_tbl		= x1e80100_usb43dp_pcs_usb_tbl,
+ 	.pcs_usb_tbl_num	= ARRAY_SIZE(x1e80100_usb43dp_pcs_usb_tbl),
+ 
+-	.dp_serdes_tbl		= qmp_v6_dp_serdes_tbl,
+-	.dp_serdes_tbl_num	= ARRAY_SIZE(qmp_v6_dp_serdes_tbl),
+-	.dp_tx_tbl		= qmp_v6_dp_tx_tbl,
+-	.dp_tx_tbl_num		= ARRAY_SIZE(qmp_v6_dp_tx_tbl),
++	.dp_serdes_tbl		= qmp_v6_n4_dp_serdes_tbl,
++	.dp_serdes_tbl_num	= ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl),
++	.dp_tx_tbl		= qmp_v6_n4_dp_tx_tbl,
++	.dp_tx_tbl_num		= ARRAY_SIZE(qmp_v6_n4_dp_tx_tbl),
+ 
+-	.serdes_tbl_rbr		= qmp_v6_dp_serdes_tbl_rbr,
+-	.serdes_tbl_rbr_num	= ARRAY_SIZE(qmp_v6_dp_serdes_tbl_rbr),
+-	.serdes_tbl_hbr		= qmp_v6_dp_serdes_tbl_hbr,
+-	.serdes_tbl_hbr_num	= ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr),
+-	.serdes_tbl_hbr2	= qmp_v6_dp_serdes_tbl_hbr2,
+-	.serdes_tbl_hbr2_num	= ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr2),
+-	.serdes_tbl_hbr3	= qmp_v6_dp_serdes_tbl_hbr3,
+-	.serdes_tbl_hbr3_num	= ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr3),
++	.serdes_tbl_rbr		= qmp_v6_n4_dp_serdes_tbl_rbr,
++	.serdes_tbl_rbr_num	= ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_rbr),
++	.serdes_tbl_hbr		= qmp_v6_n4_dp_serdes_tbl_hbr,
++	.serdes_tbl_hbr_num	= ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_hbr),
++	.serdes_tbl_hbr2	= qmp_v6_n4_dp_serdes_tbl_hbr2,
++	.serdes_tbl_hbr2_num	= ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_hbr2),
++	.serdes_tbl_hbr3	= qmp_v6_n4_dp_serdes_tbl_hbr3,
++	.serdes_tbl_hbr3_num	= ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_hbr3),
+ 
+-	.swing_hbr_rbr		= &qmp_dp_v5_voltage_swing_hbr_rbr,
+-	.pre_emphasis_hbr_rbr	= &qmp_dp_v5_pre_emphasis_hbr_rbr,
++	.swing_hbr_rbr		= &qmp_dp_v6_voltage_swing_hbr_rbr,
++	.pre_emphasis_hbr_rbr	= &qmp_dp_v6_pre_emphasis_hbr_rbr,
+ 	.swing_hbr3_hbr2	= &qmp_dp_v5_voltage_swing_hbr3_hbr2,
+ 	.pre_emphasis_hbr3_hbr2 = &qmp_dp_v5_pre_emphasis_hbr3_hbr2,
+ 
+@@ -1822,7 +1953,7 @@ static const struct qmp_phy_cfg x1e80100_usb3dpphy_cfg = {
+ 	.num_resets		= ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ 	.vreg_list		= qmp_phy_vreg_l,
+ 	.num_vregs		= ARRAY_SIZE(qmp_phy_vreg_l),
+-	.regs			= qmp_v45_usb3phy_regs_layout,
++	.regs			= qmp_v6_n4_usb3phy_regs_layout,
+ };
+ 
+ static const struct qmp_phy_cfg sm6350_usb3dpphy_cfg = {
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6-n4.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6-n4.h
+new file mode 100644
+index 0000000000000..b3024714dab4e
+--- /dev/null
++++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6-n4.h
+@@ -0,0 +1,32 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * Copyright (c) 2023, Linaro Limited
++ */
++
++#ifndef QCOM_PHY_QMP_PCS_V6_N4_H_
++#define QCOM_PHY_QMP_PCS_V6_N4_H_
++
++/* Only for QMP V6 N4 PHY - USB/PCIe PCS registers */
++#define QPHY_V6_N4_PCS_SW_RESET			0x000
++#define QPHY_V6_N4_PCS_PCS_STATUS1		0x014
++#define QPHY_V6_N4_PCS_POWER_DOWN_CONTROL	0x040
++#define QPHY_V6_N4_PCS_START_CONTROL		0x044
++#define QPHY_V6_N4_PCS_POWER_STATE_CONFIG1	0x090
++#define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG1	0x0c4
++#define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG2	0x0c8
++#define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG3	0x0cc
++#define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG6	0x0d8
++#define QPHY_V6_N4_PCS_REFGEN_REQ_CONFIG1	0x0dc
++#define QPHY_V6_N4_PCS_RX_SIGDET_LVL		0x188
++#define QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_L	0x190
++#define QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_H	0x194
++#define QPHY_V6_N4_PCS_RATE_SLEW_CNTRL1		0x198
++#define QPHY_V6_N4_PCS_RX_CONFIG		0x1b0
++#define QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG1	0x1c0
++#define QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG2	0x1c4
++#define QPHY_V6_N4_PCS_PCS_TX_RX_CONFIG		0x1d0
++#define QPHY_V6_N4_PCS_EQ_CONFIG1		0x1dc
++#define QPHY_V6_N4_PCS_EQ_CONFIG2		0x1e0
++#define QPHY_V6_N4_PCS_EQ_CONFIG5		0x1ec
++
++#endif
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h
+index a814ad11af071..d37cc0d4fd365 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h
++++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h
+@@ -6,11 +6,24 @@
+ #ifndef QCOM_PHY_QMP_QSERDES_TXRX_V6_N4_H_
+ #define QCOM_PHY_QMP_QSERDES_TXRX_V6_N4_H_
+ 
++#define QSERDES_V6_N4_TX_CLKBUF_ENABLE			0x08
++#define QSERDES_V6_N4_TX_TX_EMP_POST1_LVL		0x0c
++#define QSERDES_V6_N4_TX_TX_DRV_LVL			0x14
++#define QSERDES_V6_N4_TX_RESET_TSYNC_EN			0x1c
++#define QSERDES_V6_N4_TX_PRE_STALL_LDO_BOOST_EN		0x20
+ #define QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_TX	0x30
+ #define QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_RX	0x34
++#define QSERDES_V6_N4_TX_TRANSCEIVER_BIAS_EN		0x48
++#define QSERDES_V6_N4_TX_HIGHZ_DRVR_EN			0x4c
++#define QSERDES_V6_N4_TX_TX_POL_INV			0x50
++#define QSERDES_V6_N4_TX_PARRATE_REC_DETECT_IDLE_EN	0x54
+ #define QSERDES_V6_N4_TX_LANE_MODE_1			0x78
+ #define QSERDES_V6_N4_TX_LANE_MODE_2			0x7c
+ #define QSERDES_V6_N4_TX_LANE_MODE_3			0x80
++#define QSERDES_V6_N4_TX_TRAN_DRVR_EMP_EN		0xac
++#define QSERDES_V6_N4_TX_TX_BAND			0xd8
++#define QSERDES_V6_N4_TX_INTERFACE_SELECT		0xe4
++#define QSERDES_V6_N4_TX_VMODE_CTRL1			0xb0
+ 
+ #define QSERDES_V6_N4_RX_UCDR_FO_GAIN_RATE2		0x8
+ #define QSERDES_V6_N4_RX_UCDR_SO_GAIN_RATE2		0x18
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
+index d10b8f653c4b2..d0f41e4aaa855 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
++++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
+@@ -46,6 +46,8 @@
+ 
+ #include "phy-qcom-qmp-pcs-v6.h"
+ 
++#include "phy-qcom-qmp-pcs-v6-n4.h"
++
+ #include "phy-qcom-qmp-pcs-v6_20.h"
+ 
+ #include "phy-qcom-qmp-pcs-v7.h"
+diff --git a/drivers/platform/chrome/cros_usbpd_logger.c b/drivers/platform/chrome/cros_usbpd_logger.c
+index f618757f8b321..930c2f47269f6 100644
+--- a/drivers/platform/chrome/cros_usbpd_logger.c
++++ b/drivers/platform/chrome/cros_usbpd_logger.c
+@@ -7,6 +7,7 @@
+ 
+ #include <linux/ktime.h>
+ #include <linux/math64.h>
++#include <linux/mod_devicetable.h>
+ #include <linux/module.h>
+ #include <linux/platform_data/cros_ec_commands.h>
+ #include <linux/platform_data/cros_ec_proto.h>
+@@ -249,6 +250,12 @@ static int __maybe_unused cros_usbpd_logger_suspend(struct device *dev)
+ static SIMPLE_DEV_PM_OPS(cros_usbpd_logger_pm_ops, cros_usbpd_logger_suspend,
+ 			 cros_usbpd_logger_resume);
+ 
++static const struct platform_device_id cros_usbpd_logger_id[] = {
++	{ DRV_NAME, 0 },
++	{}
++};
++MODULE_DEVICE_TABLE(platform, cros_usbpd_logger_id);
++
+ static struct platform_driver cros_usbpd_logger_driver = {
+ 	.driver = {
+ 		.name = DRV_NAME,
+@@ -256,10 +263,10 @@ static struct platform_driver cros_usbpd_logger_driver = {
+ 	},
+ 	.probe = cros_usbpd_logger_probe,
+ 	.remove_new = cros_usbpd_logger_remove,
++	.id_table = cros_usbpd_logger_id,
+ };
+ 
+ module_platform_driver(cros_usbpd_logger_driver);
+ 
+ MODULE_LICENSE("GPL v2");
+ MODULE_DESCRIPTION("Logging driver for ChromeOS EC USBPD Charger.");
+-MODULE_ALIAS("platform:" DRV_NAME);
+diff --git a/drivers/platform/chrome/cros_usbpd_notify.c b/drivers/platform/chrome/cros_usbpd_notify.c
+index aacad022f21df..c83f81d86483c 100644
+--- a/drivers/platform/chrome/cros_usbpd_notify.c
++++ b/drivers/platform/chrome/cros_usbpd_notify.c
+@@ -6,6 +6,7 @@
+  */
+ 
+ #include <linux/acpi.h>
++#include <linux/mod_devicetable.h>
+ #include <linux/module.h>
+ #include <linux/platform_data/cros_ec_proto.h>
+ #include <linux/platform_data/cros_usbpd_notify.h>
+@@ -218,12 +219,19 @@ static void cros_usbpd_notify_remove_plat(struct platform_device *pdev)
+ 					   &pdnotify->nb);
+ }
+ 
++static const struct platform_device_id cros_usbpd_notify_id[] = {
++	{ DRV_NAME, 0 },
++	{}
++};
++MODULE_DEVICE_TABLE(platform, cros_usbpd_notify_id);
++
+ static struct platform_driver cros_usbpd_notify_plat_driver = {
+ 	.driver = {
+ 		.name = DRV_NAME,
+ 	},
+ 	.probe = cros_usbpd_notify_probe_plat,
+ 	.remove_new = cros_usbpd_notify_remove_plat,
++	.id_table = cros_usbpd_notify_id,
+ };
+ 
+ static int __init cros_usbpd_notify_init(void)
+@@ -258,4 +266,3 @@ module_exit(cros_usbpd_notify_exit);
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("ChromeOS power delivery notifier device");
+ MODULE_AUTHOR("Jon Flatley <jflat@chromium.org>");
+-MODULE_ALIAS("platform:" DRV_NAME);
+diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c
+index 3d66e1d4eb1f5..1ac30034f3e59 100644
+--- a/drivers/platform/x86/p2sb.c
++++ b/drivers/platform/x86/p2sb.c
+@@ -56,12 +56,9 @@ static int p2sb_get_devfn(unsigned int *devfn)
+ 	return 0;
+ }
+ 
+-static bool p2sb_valid_resource(struct resource *res)
++static bool p2sb_valid_resource(const struct resource *res)
+ {
+-	if (res->flags)
+-		return true;
+-
+-	return false;
++	return res->flags & ~IORESOURCE_UNSET;
+ }
+ 
+ /* Copy resource from the first BAR of the device in question */
+@@ -220,16 +217,20 @@ EXPORT_SYMBOL_GPL(p2sb_bar);
+ 
+ static int __init p2sb_fs_init(void)
+ {
+-	p2sb_cache_resources();
+-	return 0;
++	return p2sb_cache_resources();
+ }
+ 
+ /*
+- * pci_rescan_remove_lock to avoid access to unhidden P2SB devices can
+- * not be locked in sysfs pci bus rescan path because of deadlock. To
+- * avoid the deadlock, access to P2SB devices with the lock at an early
+- * step in kernel initialization and cache required resources. This
+- * should happen after subsys_initcall which initializes PCI subsystem
+- * and before device_initcall which requires P2SB resources.
++ * pci_rescan_remove_lock() can not be locked in sysfs PCI bus rescan path
++ * because of deadlock. To avoid the deadlock, access P2SB devices with the lock
++ * at an early step in kernel initialization and cache required resources.
++ *
++ * We want to run as early as possible. If the P2SB was assigned a bad BAR,
++ * we'll need to wait on pcibios_assign_resources() to fix it. So, our list of
++ * initcall dependencies looks something like this:
++ *
++ * ...
++ * subsys_initcall (pci_subsys_init)
++ * fs_initcall     (pcibios_assign_resources)
+  */
+-fs_initcall(p2sb_fs_init);
++fs_initcall_sync(p2sb_fs_init);
+diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
+index 77244c9aa60d2..16e941449b144 100644
+--- a/drivers/platform/x86/toshiba_acpi.c
++++ b/drivers/platform/x86/toshiba_acpi.c
+@@ -57,6 +57,11 @@ module_param(turn_on_panel_on_resume, int, 0644);
+ MODULE_PARM_DESC(turn_on_panel_on_resume,
+ 	"Call HCI_PANEL_POWER_ON on resume (-1 = auto, 0 = no, 1 = yes");
+ 
++static int hci_hotkey_quickstart = -1;
++module_param(hci_hotkey_quickstart, int, 0644);
++MODULE_PARM_DESC(hci_hotkey_quickstart,
++		 "Call HCI_HOTKEY_EVENT with value 0x5 for quickstart button support (-1 = auto, 0 = no, 1 = yes");
++
+ #define TOSHIBA_WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100"
+ 
+ /* Scan code for Fn key on TOS1900 models */
+@@ -136,6 +141,7 @@ MODULE_PARM_DESC(turn_on_panel_on_resume,
+ #define HCI_ACCEL_MASK			0x7fff
+ #define HCI_ACCEL_DIRECTION_MASK	0x8000
+ #define HCI_HOTKEY_DISABLE		0x0b
++#define HCI_HOTKEY_ENABLE_QUICKSTART	0x05
+ #define HCI_HOTKEY_ENABLE		0x09
+ #define HCI_HOTKEY_SPECIAL_FUNCTIONS	0x10
+ #define HCI_LCD_BRIGHTNESS_BITS		3
+@@ -2731,10 +2737,15 @@ static int toshiba_acpi_enable_hotkeys(struct toshiba_acpi_dev *dev)
+ 		return -ENODEV;
+ 
+ 	/*
++	 * Enable quickstart buttons if supported.
++	 *
+ 	 * Enable the "Special Functions" mode only if they are
+ 	 * supported and if they are activated.
+ 	 */
+-	if (dev->kbd_function_keys_supported && dev->special_functions)
++	if (hci_hotkey_quickstart)
++		result = hci_write(dev, HCI_HOTKEY_EVENT,
++				   HCI_HOTKEY_ENABLE_QUICKSTART);
++	else if (dev->kbd_function_keys_supported && dev->special_functions)
+ 		result = hci_write(dev, HCI_HOTKEY_EVENT,
+ 				   HCI_HOTKEY_SPECIAL_FUNCTIONS);
+ 	else
+@@ -3258,7 +3269,14 @@ static const char *find_hci_method(acpi_handle handle)
+  * works. toshiba_acpi_resume() uses HCI_PANEL_POWER_ON to avoid changing
+  * the configured brightness level.
+  */
+-static const struct dmi_system_id turn_on_panel_on_resume_dmi_ids[] = {
++#define QUIRK_TURN_ON_PANEL_ON_RESUME		BIT(0)
++/*
++ * Some Toshibas use "quickstart" keys. On these, HCI_HOTKEY_EVENT must use
++ * the value HCI_HOTKEY_ENABLE_QUICKSTART.
++ */
++#define QUIRK_HCI_HOTKEY_QUICKSTART		BIT(1)
++
++static const struct dmi_system_id toshiba_dmi_quirks[] = {
+ 	{
+ 	 /* Toshiba Portégé R700 */
+ 	 /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
+@@ -3266,6 +3284,7 @@ static const struct dmi_system_id turn_on_panel_on_resume_dmi_ids[] = {
+ 		DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R700"),
+ 		},
++	 .driver_data = (void *)QUIRK_TURN_ON_PANEL_ON_RESUME,
+ 	},
+ 	{
+ 	 /* Toshiba Satellite/Portégé R830 */
+@@ -3275,6 +3294,7 @@ static const struct dmi_system_id turn_on_panel_on_resume_dmi_ids[] = {
+ 		DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "R830"),
+ 		},
++	 .driver_data = (void *)QUIRK_TURN_ON_PANEL_ON_RESUME,
+ 	},
+ 	{
+ 	 /* Toshiba Satellite/Portégé Z830 */
+@@ -3282,6 +3302,7 @@ static const struct dmi_system_id turn_on_panel_on_resume_dmi_ids[] = {
+ 		DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "Z830"),
+ 		},
++	 .driver_data = (void *)(QUIRK_TURN_ON_PANEL_ON_RESUME | QUIRK_HCI_HOTKEY_QUICKSTART),
+ 	},
+ };
+ 
+@@ -3290,6 +3311,8 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
+ 	struct toshiba_acpi_dev *dev;
+ 	const char *hci_method;
+ 	u32 dummy;
++	const struct dmi_system_id *dmi_id;
++	long quirks = 0;
+ 	int ret = 0;
+ 
+ 	if (toshiba_acpi)
+@@ -3442,8 +3465,15 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
+ 	}
+ #endif
+ 
++	dmi_id = dmi_first_match(toshiba_dmi_quirks);
++	if (dmi_id)
++		quirks = (long)dmi_id->driver_data;
++
+ 	if (turn_on_panel_on_resume == -1)
+-		turn_on_panel_on_resume = dmi_check_system(turn_on_panel_on_resume_dmi_ids);
++		turn_on_panel_on_resume = !!(quirks & QUIRK_TURN_ON_PANEL_ON_RESUME);
++
++	if (hci_hotkey_quickstart == -1)
++		hci_hotkey_quickstart = !!(quirks & QUIRK_HCI_HOTKEY_QUICKSTART);
+ 
+ 	toshiba_wwan_available(dev);
+ 	if (dev->wwan_supported)
+diff --git a/drivers/platform/x86/x86-android-tablets/core.c b/drivers/platform/x86/x86-android-tablets/core.c
+index a3415f1c0b5f8..6559bb4ea7305 100644
+--- a/drivers/platform/x86/x86-android-tablets/core.c
++++ b/drivers/platform/x86/x86-android-tablets/core.c
+@@ -278,25 +278,25 @@ static void x86_android_tablet_remove(struct platform_device *pdev)
+ {
+ 	int i;
+ 
+-	for (i = 0; i < serdev_count; i++) {
++	for (i = serdev_count - 1; i >= 0; i--) {
+ 		if (serdevs[i])
+ 			serdev_device_remove(serdevs[i]);
+ 	}
+ 
+ 	kfree(serdevs);
+ 
+-	for (i = 0; i < pdev_count; i++)
++	for (i = pdev_count - 1; i >= 0; i--)
+ 		platform_device_unregister(pdevs[i]);
+ 
+ 	kfree(pdevs);
+ 	kfree(buttons);
+ 
+-	for (i = 0; i < spi_dev_count; i++)
++	for (i = spi_dev_count - 1; i >= 0; i--)
+ 		spi_unregister_device(spi_devs[i]);
+ 
+ 	kfree(spi_devs);
+ 
+-	for (i = 0; i < i2c_client_count; i++)
++	for (i = i2c_client_count - 1; i >= 0; i--)
+ 		i2c_unregister_device(i2c_clients[i]);
+ 
+ 	kfree(i2c_clients);
+diff --git a/drivers/platform/x86/x86-android-tablets/dmi.c b/drivers/platform/x86/x86-android-tablets/dmi.c
+index 5d6c12494f082..141a2d25e83be 100644
+--- a/drivers/platform/x86/x86-android-tablets/dmi.c
++++ b/drivers/platform/x86/x86-android-tablets/dmi.c
+@@ -104,6 +104,24 @@ const struct dmi_system_id x86_android_tablet_ids[] __initconst = {
+ 		},
+ 		.driver_data = (void *)&lenovo_yogabook_x91_info,
+ 	},
++	{
++		/*
++		 * Lenovo Yoga Tablet 2 Pro 1380F/L (13") This has more or less
++		 * the same BIOS as the 830F/L or 1050F/L (8" and 10") below,
++		 * but unlike the 8" / 10" models which share the same mainboard
++		 * this model has a different mainboard.
++		 * This match for the 13" model MUST come before the 8" + 10"
++		 * match since that one will also match the 13" model!
++		 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"),
++			DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"),
++			/* Full match so as to NOT match the 830/1050 BIOS */
++			DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21.X64.0005.R00.1504101516"),
++		},
++		.driver_data = (void *)&lenovo_yoga_tab2_1380_info,
++	},
+ 	{
+ 		/*
+ 		 * Lenovo Yoga Tablet 2 830F/L or 1050F/L (The 8" and 10"
+diff --git a/drivers/platform/x86/x86-android-tablets/lenovo.c b/drivers/platform/x86/x86-android-tablets/lenovo.c
+index c297391955adb..16fa04d604a09 100644
+--- a/drivers/platform/x86/x86-android-tablets/lenovo.c
++++ b/drivers/platform/x86/x86-android-tablets/lenovo.c
+@@ -19,6 +19,7 @@
+ #include <linux/pinctrl/machine.h>
+ #include <linux/platform_data/lp855x.h>
+ #include <linux/platform_device.h>
++#include <linux/power/bq24190_charger.h>
+ #include <linux/reboot.h>
+ #include <linux/rmi.h>
+ #include <linux/spi/spi.h>
+@@ -565,6 +566,221 @@ static void lenovo_yoga_tab2_830_1050_exit(void)
+ 	}
+ }
+ 
++/*
++ * Lenovo Yoga Tablet 2 Pro 1380F/L
++ *
++ * The Lenovo Yoga Tablet 2 Pro 1380F/L mostly has the same design as the 830F/L
++ * and the 1050F/L so this re-uses some of the handling for that from above.
++ */
++static const char * const lc824206xa_chg_det_psy[] = { "lc824206xa-charger-detect" };
++
++static const struct property_entry lenovo_yoga_tab2_1380_bq24190_props[] = {
++	PROPERTY_ENTRY_STRING_ARRAY("supplied-from", lc824206xa_chg_det_psy),
++	PROPERTY_ENTRY_REF("monitored-battery", &generic_lipo_hv_4v35_battery_node),
++	PROPERTY_ENTRY_BOOL("omit-battery-class"),
++	PROPERTY_ENTRY_BOOL("disable-reset"),
++	{ }
++};
++
++static const struct software_node lenovo_yoga_tab2_1380_bq24190_node = {
++	.properties = lenovo_yoga_tab2_1380_bq24190_props,
++};
++
++/* For enabling the bq24190 5V boost based on id-pin */
++static struct regulator_consumer_supply lc824206xa_consumer = {
++	.supply = "vbus",
++	.dev_name = "i2c-lc824206xa",
++};
++
++static const struct regulator_init_data lenovo_yoga_tab2_1380_bq24190_vbus_init_data = {
++	.constraints = {
++		.name = "bq24190_vbus",
++		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
++	},
++	.consumer_supplies = &lc824206xa_consumer,
++	.num_consumer_supplies = 1,
++};
++
++struct bq24190_platform_data lenovo_yoga_tab2_1380_bq24190_pdata = {
++	.regulator_init_data = &lenovo_yoga_tab2_1380_bq24190_vbus_init_data,
++};
++
++static const struct property_entry lenovo_yoga_tab2_1380_lc824206xa_props[] = {
++	PROPERTY_ENTRY_BOOL("onnn,enable-miclr-for-dcp"),
++	{ }
++};
++
++static const struct software_node lenovo_yoga_tab2_1380_lc824206xa_node = {
++	.properties = lenovo_yoga_tab2_1380_lc824206xa_props,
++};
++
++static const char * const lenovo_yoga_tab2_1380_lms303d_mount_matrix[] = {
++	"0", "-1", "0",
++	"-1", "0", "0",
++	"0", "0", "1"
++};
++
++static const struct property_entry lenovo_yoga_tab2_1380_lms303d_props[] = {
++	PROPERTY_ENTRY_STRING_ARRAY("mount-matrix", lenovo_yoga_tab2_1380_lms303d_mount_matrix),
++	{ }
++};
++
++static const struct software_node lenovo_yoga_tab2_1380_lms303d_node = {
++	.properties = lenovo_yoga_tab2_1380_lms303d_props,
++};
++
++static const struct x86_i2c_client_info lenovo_yoga_tab2_1380_i2c_clients[] __initconst = {
++	{
++		/* BQ27541 fuel-gauge */
++		.board_info = {
++			.type = "bq27541",
++			.addr = 0x55,
++			.dev_name = "bq27541",
++			.swnode = &fg_bq24190_supply_node,
++		},
++		.adapter_path = "\\_SB_.I2C1",
++	}, {
++		/* bq24292i battery charger */
++		.board_info = {
++			.type = "bq24190",
++			.addr = 0x6b,
++			.dev_name = "bq24292i",
++			.swnode = &lenovo_yoga_tab2_1380_bq24190_node,
++			.platform_data = &lenovo_yoga_tab2_1380_bq24190_pdata,
++		},
++		.adapter_path = "\\_SB_.I2C1",
++		.irq_data = {
++			.type = X86_ACPI_IRQ_TYPE_GPIOINT,
++			.chip = "INT33FC:02",
++			.index = 2,
++			.trigger = ACPI_EDGE_SENSITIVE,
++			.polarity = ACPI_ACTIVE_HIGH,
++			.con_id = "bq24292i_irq",
++		},
++	}, {
++		/* LP8557 Backlight controller */
++		.board_info = {
++			.type = "lp8557",
++			.addr = 0x2c,
++			.dev_name = "lp8557",
++			.platform_data = &lenovo_lp8557_pwm_and_reg_pdata,
++		},
++		.adapter_path = "\\_SB_.I2C3",
++	}, {
++		/* LC824206XA Micro USB Switch */
++		.board_info = {
++			.type = "lc824206xa",
++			.addr = 0x48,
++			.dev_name = "lc824206xa",
++			.swnode = &lenovo_yoga_tab2_1380_lc824206xa_node,
++		},
++		.adapter_path = "\\_SB_.I2C3",
++		.irq_data = {
++			.type = X86_ACPI_IRQ_TYPE_GPIOINT,
++			.chip = "INT33FC:02",
++			.index = 1,
++			.trigger = ACPI_LEVEL_SENSITIVE,
++			.polarity = ACPI_ACTIVE_LOW,
++			.con_id = "lc824206xa_irq",
++		},
++	}, {
++		/* AL3320A ambient light sensor */
++		.board_info = {
++			.type = "al3320a",
++			.addr = 0x1c,
++			.dev_name = "al3320a",
++		},
++		.adapter_path = "\\_SB_.I2C5",
++	}, {
++		/* LSM303DA accelerometer + magnetometer */
++		.board_info = {
++			.type = "lsm303d",
++			.addr = 0x1d,
++			.dev_name = "lsm303d",
++			.swnode = &lenovo_yoga_tab2_1380_lms303d_node,
++		},
++		.adapter_path = "\\_SB_.I2C5",
++	}, {
++		/* Synaptics RMI touchscreen */
++		.board_info = {
++			.type = "rmi4_i2c",
++			.addr = 0x38,
++			.dev_name = "rmi4_i2c",
++			.platform_data = &lenovo_yoga_tab2_830_1050_rmi_pdata,
++		},
++		.adapter_path = "\\_SB_.I2C6",
++		.irq_data = {
++			.type = X86_ACPI_IRQ_TYPE_APIC,
++			.index = 0x45,
++			.trigger = ACPI_EDGE_SENSITIVE,
++			.polarity = ACPI_ACTIVE_HIGH,
++		},
++	}
++};
++
++static const struct platform_device_info lenovo_yoga_tab2_1380_pdevs[] __initconst = {
++	{
++		/* For the Tablet 2 Pro 1380's custom fast charging driver */
++		.name = "lenovo-yoga-tab2-pro-1380-fastcharger",
++		.id = PLATFORM_DEVID_NONE,
++	},
++};
++
++const char * const lenovo_yoga_tab2_1380_modules[] __initconst = {
++	"bq24190_charger",            /* For the Vbus regulator for lc824206xa */
++	NULL
++};
++
++static int __init lenovo_yoga_tab2_1380_init(void)
++{
++	int ret;
++
++	/* To verify that the DMI matching works vs the 830 / 1050 models */
++	pr_info("detected Lenovo Yoga Tablet 2 Pro 1380F/L\n");
++
++	ret = lenovo_yoga_tab2_830_1050_init_codec();
++	if (ret)
++		return ret;
++
++	/* SYS_OFF_PRIO_FIRMWARE + 1 so that it runs before acpi_power_off */
++	lenovo_yoga_tab2_830_1050_sys_off_handler =
++		register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_FIRMWARE + 1,
++					 lenovo_yoga_tab2_830_1050_power_off, NULL);
++	if (IS_ERR(lenovo_yoga_tab2_830_1050_sys_off_handler))
++		return PTR_ERR(lenovo_yoga_tab2_830_1050_sys_off_handler);
++
++	return 0;
++}
++
++static struct gpiod_lookup_table lenovo_yoga_tab2_1380_fc_gpios = {
++	.dev_id = "serial0-0",
++	.table = {
++		GPIO_LOOKUP("INT33FC:00", 57, "uart3_txd", GPIO_ACTIVE_HIGH),
++		GPIO_LOOKUP("INT33FC:00", 61, "uart3_rxd", GPIO_ACTIVE_HIGH),
++		{ }
++	},
++};
++
++static struct gpiod_lookup_table * const lenovo_yoga_tab2_1380_gpios[] = {
++	&lenovo_yoga_tab2_830_1050_codec_gpios,
++	&lenovo_yoga_tab2_1380_fc_gpios,
++	NULL
++};
++
++const struct x86_dev_info lenovo_yoga_tab2_1380_info __initconst = {
++	.i2c_client_info = lenovo_yoga_tab2_1380_i2c_clients,
++	.i2c_client_count = ARRAY_SIZE(lenovo_yoga_tab2_1380_i2c_clients),
++	.pdev_info = lenovo_yoga_tab2_1380_pdevs,
++	.pdev_count = ARRAY_SIZE(lenovo_yoga_tab2_1380_pdevs),
++	.gpio_button = &lenovo_yoga_tab2_830_1050_lid,
++	.gpio_button_count = 1,
++	.gpiod_lookup_tables = lenovo_yoga_tab2_1380_gpios,
++	.bat_swnode = &generic_lipo_hv_4v35_battery_node,
++	.modules = lenovo_yoga_tab2_1380_modules,
++	.init = lenovo_yoga_tab2_1380_init,
++	.exit = lenovo_yoga_tab2_830_1050_exit,
++};
++
+ /* Lenovo Yoga Tab 3 Pro YT3-X90F */
+ 
+ /*
+diff --git a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
+index 468993edfeee2..821dc094b0254 100644
+--- a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
++++ b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
+@@ -112,6 +112,7 @@ extern const struct x86_dev_info czc_p10t;
+ extern const struct x86_dev_info lenovo_yogabook_x90_info;
+ extern const struct x86_dev_info lenovo_yogabook_x91_info;
+ extern const struct x86_dev_info lenovo_yoga_tab2_830_1050_info;
++extern const struct x86_dev_info lenovo_yoga_tab2_1380_info;
+ extern const struct x86_dev_info lenovo_yt3_info;
+ extern const struct x86_dev_info medion_lifetab_s10346_info;
+ extern const struct x86_dev_info nextbook_ares8_info;
+diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
+index 4215ffd9b11c5..c40eda92a85a7 100644
+--- a/drivers/pmdomain/core.c
++++ b/drivers/pmdomain/core.c
+@@ -184,6 +184,16 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev)
+ 	return pd_to_genpd(dev->pm_domain);
+ }
+ 
++struct device *dev_to_genpd_dev(struct device *dev)
++{
++	struct generic_pm_domain *genpd = dev_to_genpd(dev);
++
++	if (IS_ERR(genpd))
++		return ERR_CAST(genpd);
++
++	return &genpd->dev;
++}
++
+ static int genpd_stop_dev(const struct generic_pm_domain *genpd,
+ 			  struct device *dev)
+ {
+diff --git a/drivers/power/supply/cros_usbpd-charger.c b/drivers/power/supply/cros_usbpd-charger.c
+index b6c96376776a9..8008e31c0c098 100644
+--- a/drivers/power/supply/cros_usbpd-charger.c
++++ b/drivers/power/supply/cros_usbpd-charger.c
+@@ -5,6 +5,7 @@
+  * Copyright (c) 2014 - 2018 Google, Inc
+  */
+ 
++#include <linux/mod_devicetable.h>
+ #include <linux/module.h>
+ #include <linux/platform_data/cros_ec_commands.h>
+ #include <linux/platform_data/cros_ec_proto.h>
+@@ -711,16 +712,22 @@ static int cros_usbpd_charger_resume(struct device *dev)
+ static SIMPLE_DEV_PM_OPS(cros_usbpd_charger_pm_ops, NULL,
+ 			 cros_usbpd_charger_resume);
+ 
++static const struct platform_device_id cros_usbpd_charger_id[] = {
++	{ DRV_NAME, 0 },
++	{}
++};
++MODULE_DEVICE_TABLE(platform, cros_usbpd_charger_id);
++
+ static struct platform_driver cros_usbpd_charger_driver = {
+ 	.driver = {
+ 		.name = DRV_NAME,
+ 		.pm = &cros_usbpd_charger_pm_ops,
+ 	},
+-	.probe = cros_usbpd_charger_probe
++	.probe = cros_usbpd_charger_probe,
++	.id_table = cros_usbpd_charger_id,
+ };
+ 
+ module_platform_driver(cros_usbpd_charger_driver);
+ 
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("ChromeOS EC USBPD charger");
+-MODULE_ALIAS("platform:" DRV_NAME);
+diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
+index a15460aaa03b3..6b1b8f57cd951 100644
+--- a/drivers/ptp/ptp_sysfs.c
++++ b/drivers/ptp/ptp_sysfs.c
+@@ -296,8 +296,7 @@ static ssize_t max_vclocks_store(struct device *dev,
+ 	if (max < ptp->n_vclocks)
+ 		goto out;
+ 
+-	size = sizeof(int) * max;
+-	vclock_index = kzalloc(size, GFP_KERNEL);
++	vclock_index = kcalloc(max, sizeof(int), GFP_KERNEL);
+ 	if (!vclock_index) {
+ 		err = -ENOMEM;
+ 		goto out;
+diff --git a/drivers/regulator/bd71815-regulator.c b/drivers/regulator/bd71815-regulator.c
+index 26192d55a6858..79fbb45297f6b 100644
+--- a/drivers/regulator/bd71815-regulator.c
++++ b/drivers/regulator/bd71815-regulator.c
+@@ -256,7 +256,7 @@ static int buck12_set_hw_dvs_levels(struct device_node *np,
+  * 10: 2.50mV/usec	10mV 4uS
+  * 11: 1.25mV/usec	10mV 8uS
+  */
+-static const unsigned int bd7181x_ramp_table[] = { 1250, 2500, 5000, 10000 };
++static const unsigned int bd7181x_ramp_table[] = { 10000, 5000, 2500, 1250 };
+ 
+ static int bd7181x_led_set_current_limit(struct regulator_dev *rdev,
+ 					int min_uA, int max_uA)
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 2c33653ffdea3..51ff3f9eafd0e 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -3347,6 +3347,7 @@ struct regmap *regulator_get_regmap(struct regulator *regulator)
+ 
+ 	return map ? map : ERR_PTR(-EOPNOTSUPP);
+ }
++EXPORT_SYMBOL_GPL(regulator_get_regmap);
+ 
+ /**
+  * regulator_get_hardware_vsel_register - get the HW voltage selector register
+diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
+index 8deb2001dc2ff..37eed6a278164 100644
+--- a/drivers/scsi/qedi/qedi_debugfs.c
++++ b/drivers/scsi/qedi/qedi_debugfs.c
+@@ -120,15 +120,11 @@ static ssize_t
+ qedi_dbg_do_not_recover_cmd_read(struct file *filp, char __user *buffer,
+ 				 size_t count, loff_t *ppos)
+ {
+-	size_t cnt = 0;
+-
+-	if (*ppos)
+-		return 0;
++	char buf[64];
++	int len;
+ 
+-	cnt = sprintf(buffer, "do_not_recover=%d\n", qedi_do_not_recover);
+-	cnt = min_t(int, count, cnt - *ppos);
+-	*ppos += cnt;
+-	return cnt;
++	len = sprintf(buf, "do_not_recover=%d\n", qedi_do_not_recover);
++	return simple_read_from_buffer(buffer, count, ppos, buf, len);
+ }
+ 
+ static int
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 4f7b9b5b9c5b4..a4638ea92571d 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -63,6 +63,7 @@
+ #include <scsi/scsi_cmnd.h>
+ #include <scsi/scsi_dbg.h>
+ #include <scsi/scsi_device.h>
++#include <scsi/scsi_devinfo.h>
+ #include <scsi/scsi_driver.h>
+ #include <scsi/scsi_eh.h>
+ #include <scsi/scsi_host.h>
+@@ -3125,6 +3126,9 @@ static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer)
+ 	struct scsi_mode_data data;
+ 	int res;
+ 
++	if (sdp->sdev_bflags & BLIST_SKIP_IO_HINTS)
++		return;
++
+ 	res = scsi_mode_sense(sdp, /*dbd=*/0x8, /*modepage=*/0x0a,
+ 			      /*subpage=*/0x05, buffer, SD_BUF_SIZE, SD_TIMEOUT,
+ 			      sdkp->max_retries, &data, &sshdr);
+diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c
+index aabef9fc84bdf..0d9c948e119af 100644
+--- a/drivers/spi/spi-cs42l43.c
++++ b/drivers/spi/spi-cs42l43.c
+@@ -21,7 +21,7 @@
+ #include <linux/units.h>
+ 
+ #define CS42L43_FIFO_SIZE		16
+-#define CS42L43_SPI_ROOT_HZ		(40 * HZ_PER_MHZ)
++#define CS42L43_SPI_ROOT_HZ		49152000
+ #define CS42L43_SPI_MAX_LENGTH		65532
+ 
+ enum cs42l43_spi_cmd {
+diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
+index c3e5cee18bea7..09b6c1b45f1a1 100644
+--- a/drivers/spi/spi-imx.c
++++ b/drivers/spi/spi-imx.c
+@@ -660,18 +660,8 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
+ 		ctrl |= (spi_imx->target_burst * 8 - 1)
+ 			<< MX51_ECSPI_CTRL_BL_OFFSET;
+ 	else {
+-		if (spi_imx->usedma) {
+-			ctrl |= (spi_imx->bits_per_word - 1)
+-				<< MX51_ECSPI_CTRL_BL_OFFSET;
+-		} else {
+-			if (spi_imx->count >= MX51_ECSPI_CTRL_MAX_BURST)
+-				ctrl |= (MX51_ECSPI_CTRL_MAX_BURST * BITS_PER_BYTE - 1)
+-						<< MX51_ECSPI_CTRL_BL_OFFSET;
+-			else
+-				ctrl |= (spi_imx->count / DIV_ROUND_UP(spi_imx->bits_per_word,
+-						BITS_PER_BYTE) * spi_imx->bits_per_word - 1)
+-						<< MX51_ECSPI_CTRL_BL_OFFSET;
+-		}
++		ctrl |= (spi_imx->bits_per_word - 1)
++			<< MX51_ECSPI_CTRL_BL_OFFSET;
+ 	}
+ 
+ 	/* set clock speed */
+diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
+index f1e922fd362ab..955c920c4b639 100644
+--- a/drivers/spi/spi-stm32-qspi.c
++++ b/drivers/spi/spi-stm32-qspi.c
+@@ -349,7 +349,7 @@ static int stm32_qspi_wait_poll_status(struct stm32_qspi *qspi)
+ 
+ static int stm32_qspi_get_mode(u8 buswidth)
+ {
+-	if (buswidth == 4)
++	if (buswidth >= 4)
+ 		return CCR_BUSWIDTH_4;
+ 
+ 	return buswidth;
+@@ -653,9 +653,7 @@ static int stm32_qspi_setup(struct spi_device *spi)
+ 		return -EINVAL;
+ 
+ 	mode = spi->mode & (SPI_TX_OCTAL | SPI_RX_OCTAL);
+-	if ((mode == SPI_TX_OCTAL || mode == SPI_RX_OCTAL) ||
+-	    ((mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) &&
+-	    gpiod_count(qspi->dev, "cs") == -ENOENT)) {
++	if (mode && gpiod_count(qspi->dev, "cs") == -ENOENT) {
+ 		dev_err(qspi->dev, "spi-rx-bus-width\\/spi-tx-bus-width\\/cs-gpios\n");
+ 		dev_err(qspi->dev, "configuration not supported\n");
+ 
+@@ -676,10 +674,10 @@ static int stm32_qspi_setup(struct spi_device *spi)
+ 	qspi->cr_reg = CR_APMS | 3 << CR_FTHRES_SHIFT | CR_SSHIFT | CR_EN;
+ 
+ 	/*
+-	 * Dual flash mode is only enable in case SPI_TX_OCTAL and SPI_TX_OCTAL
+-	 * are both set in spi->mode and "cs-gpios" properties is found in DT
++	 * Dual flash mode is only enable in case SPI_TX_OCTAL or SPI_RX_OCTAL
++	 * is set in spi->mode and "cs-gpios" properties is found in DT
+ 	 */
+-	if (mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) {
++	if (mode) {
+ 		qspi->cr_reg |= CR_DFM;
+ 		dev_dbg(qspi->dev, "Dual flash mode enable");
+ 	}
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 2cea7aeb10f95..c349d6012625a 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -685,10 +685,12 @@ static int __spi_add_device(struct spi_device *spi)
+ 	 * Make sure that multiple logical CS doesn't map to the same physical CS.
+ 	 * For example, spi->chip_select[0] != spi->chip_select[1] and so on.
+ 	 */
+-	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
+-		status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1);
+-		if (status)
+-			return status;
++	if (!spi_controller_is_target(ctlr)) {
++		for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
++			status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1);
++			if (status)
++				return status;
++		}
+ 	}
+ 
+ 	/* Set the bus ID string */
+diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
+index 9f30e0edadfe2..bdb6595ffd2d5 100644
+--- a/drivers/ssb/main.c
++++ b/drivers/ssb/main.c
+@@ -341,11 +341,13 @@ static int ssb_bus_match(struct device *dev, struct device_driver *drv)
+ 
+ static int ssb_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
+ {
+-	const struct ssb_device *ssb_dev = dev_to_ssb_dev(dev);
++	const struct ssb_device *ssb_dev;
+ 
+ 	if (!dev)
+ 		return -ENODEV;
+ 
++	ssb_dev = dev_to_ssb_dev(dev);
++
+ 	return add_uevent_var(env,
+ 			     "MODALIAS=ssb:v%04Xid%04Xrev%02X",
+ 			     ssb_dev->id.vendor, ssb_dev->id.coreid,
+diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+index 14e34eabc4191..4a1bfebb1b8e5 100644
+--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
++++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+@@ -150,7 +150,7 @@ static irqreturn_t proc_thermal_irq_handler(int irq, void *devid)
+ {
+ 	struct proc_thermal_pci *pci_info = devid;
+ 	struct proc_thermal_device *proc_priv;
+-	int ret = IRQ_HANDLED;
++	int ret = IRQ_NONE;
+ 	u32 status;
+ 
+ 	proc_priv = pci_info->proc_priv;
+@@ -175,6 +175,7 @@ static irqreturn_t proc_thermal_irq_handler(int irq, void *devid)
+ 		/* Disable enable interrupt flag */
+ 		proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0);
+ 		pkg_thermal_schedule_work(&pci_info->work);
++		ret = IRQ_HANDLED;
+ 	}
+ 
+ 	pci_write_config_byte(pci_info->pdev, 0xdc, 0x01);
+diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
+index 4e5c213a89225..6b9422bd8795d 100644
+--- a/drivers/thermal/mediatek/lvts_thermal.c
++++ b/drivers/thermal/mediatek/lvts_thermal.c
+@@ -740,7 +740,11 @@ static int lvts_golden_temp_init(struct device *dev, u32 *value, int temp_offset
+ 
+ 	gt = (*value) >> 24;
+ 
+-	if (gt && gt < LVTS_GOLDEN_TEMP_MAX)
++	/* A zero value for gt means that device has invalid efuse data */
++	if (!gt)
++		return -ENODATA;
++
++	if (gt < LVTS_GOLDEN_TEMP_MAX)
+ 		golden_temp = gt;
+ 
+ 	golden_temp_offset = golden_temp * 500 + temp_offset;
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index 258482036f1e9..28888ac43c0a1 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -1633,6 +1633,12 @@ static int thermal_pm_notify(struct notifier_block *nb,
+ 
+ static struct notifier_block thermal_pm_nb = {
+ 	.notifier_call = thermal_pm_notify,
++	/*
++	 * Run at the lowest priority to avoid interference between the thermal
++	 * zone resume work items spawned by thermal_pm_notify() and the other
++	 * PM notifiers.
++	 */
++	.priority = INT_MIN,
+ };
+ 
+ static int __init thermal_init(void)
+diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
+index 94d680e4b5353..a58890fd53e2c 100644
+--- a/drivers/tty/serial/8250/8250_dw.c
++++ b/drivers/tty/serial/8250/8250_dw.c
+@@ -57,6 +57,33 @@
+ #define DW_UART_QUIRK_APMC0D08		BIT(4)
+ #define DW_UART_QUIRK_CPR_VALUE		BIT(5)
+ 
++struct dw8250_platform_data {
++	u8 usr_reg;
++	u32 cpr_value;
++	unsigned int quirks;
++};
++
++struct dw8250_data {
++	struct dw8250_port_data	data;
++	const struct dw8250_platform_data *pdata;
++
++	int			msr_mask_on;
++	int			msr_mask_off;
++	struct clk		*clk;
++	struct clk		*pclk;
++	struct notifier_block	clk_notifier;
++	struct work_struct	clk_work;
++	struct reset_control	*rst;
++
++	unsigned int		skip_autocfg:1;
++	unsigned int		uart_16550_compatible:1;
++};
++
++static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data)
++{
++	return container_of(data, struct dw8250_data, data);
++}
++
+ static inline struct dw8250_data *clk_to_dw8250_data(struct notifier_block *nb)
+ {
+ 	return container_of(nb, struct dw8250_data, clk_notifier);
+diff --git a/drivers/tty/serial/8250/8250_dwlib.h b/drivers/tty/serial/8250/8250_dwlib.h
+index 794a9014cdac1..7dd2a8e7b7808 100644
+--- a/drivers/tty/serial/8250/8250_dwlib.h
++++ b/drivers/tty/serial/8250/8250_dwlib.h
+@@ -2,15 +2,10 @@
+ /* Synopsys DesignWare 8250 library header file. */
+ 
+ #include <linux/io.h>
+-#include <linux/notifier.h>
+ #include <linux/types.h>
+-#include <linux/workqueue.h>
+ 
+ #include "8250.h"
+ 
+-struct clk;
+-struct reset_control;
+-
+ struct dw8250_port_data {
+ 	/* Port properties */
+ 	int			line;
+@@ -26,36 +21,9 @@ struct dw8250_port_data {
+ 	bool			hw_rs485_support;
+ };
+ 
+-struct dw8250_platform_data {
+-	u8 usr_reg;
+-	u32 cpr_value;
+-	unsigned int quirks;
+-};
+-
+-struct dw8250_data {
+-	struct dw8250_port_data	data;
+-	const struct dw8250_platform_data *pdata;
+-
+-	int			msr_mask_on;
+-	int			msr_mask_off;
+-	struct clk		*clk;
+-	struct clk		*pclk;
+-	struct notifier_block	clk_notifier;
+-	struct work_struct	clk_work;
+-	struct reset_control	*rst;
+-
+-	unsigned int		skip_autocfg:1;
+-	unsigned int		uart_16550_compatible:1;
+-};
+-
+ void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios, const struct ktermios *old);
+ void dw8250_setup_port(struct uart_port *p);
+ 
+-static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data)
+-{
+-	return container_of(data, struct dw8250_data, data);
+-}
+-
+ static inline u32 dw8250_readl_ext(struct uart_port *p, int offset)
+ {
+ 	if (p->iotype == UPIO_MEM32BE)
+diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
+index 0440df7de1ed7..4d1e07343d0bb 100644
+--- a/drivers/tty/serial/8250/8250_exar.c
++++ b/drivers/tty/serial/8250/8250_exar.c
+@@ -46,8 +46,50 @@
+ #define PCI_DEVICE_ID_COMMTECH_4228PCIE		0x0021
+ #define PCI_DEVICE_ID_COMMTECH_4222PCIE		0x0022
+ 
++#define PCI_VENDOR_ID_CONNECT_TECH				0x12c4
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_SP_OPTO        0x0340
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_SP_OPTO_A      0x0341
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_SP_OPTO_B      0x0342
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_XPRS           0x0350
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_XPRS_A         0x0351
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_XPRS_B         0x0352
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_XPRS           0x0353
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_16_XPRS_A        0x0354
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_16_XPRS_B        0x0355
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_XPRS_OPTO      0x0360
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_XPRS_OPTO_A    0x0361
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_XPRS_OPTO_B    0x0362
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_SP             0x0370
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_SP_232         0x0371
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_SP_485         0x0372
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_4_SP           0x0373
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_6_2_SP           0x0374
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_6_SP           0x0375
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_SP_232_NS      0x0376
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_XP_OPTO_LEFT   0x0380
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_XP_OPTO_RIGHT  0x0381
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_XP_OPTO        0x0382
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_4_XPRS_OPTO    0x0392
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_XPRS_LP        0x03A0
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_XPRS_LP_232    0x03A1
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_XPRS_LP_485    0x03A2
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_XPRS_LP_232_NS 0x03A3
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XEG001               0x0602
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_BASE           0x1000
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_2              0x1002
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_4              0x1004
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_8              0x1008
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_12             0x100C
++#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_16             0x1010
++#define PCI_DEVICE_ID_CONNECT_TECH_PCI_XR79X_12_XIG00X          0x110c
++#define PCI_DEVICE_ID_CONNECT_TECH_PCI_XR79X_12_XIG01X          0x110d
++#define PCI_DEVICE_ID_CONNECT_TECH_PCI_XR79X_16                 0x1110
++
+ #define PCI_DEVICE_ID_EXAR_XR17V4358		0x4358
+ #define PCI_DEVICE_ID_EXAR_XR17V8358		0x8358
++#define PCI_DEVICE_ID_EXAR_XR17V252		0x0252
++#define PCI_DEVICE_ID_EXAR_XR17V254		0x0254
++#define PCI_DEVICE_ID_EXAR_XR17V258		0x0258
+ 
+ #define PCI_SUBDEVICE_ID_USR_2980		0x0128
+ #define PCI_SUBDEVICE_ID_USR_2981		0x0129
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index e148132506161..09c1678ddfd49 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -26,6 +26,7 @@
+ #include <linux/slab.h>
+ #include <linux/of.h>
+ #include <linux/io.h>
++#include <linux/iopoll.h>
+ #include <linux/dma-mapping.h>
+ 
+ #include <asm/irq.h>
+@@ -2010,7 +2011,7 @@ imx_uart_console_write(struct console *co, const char *s, unsigned int count)
+ 	struct imx_port *sport = imx_uart_ports[co->index];
+ 	struct imx_port_ucrs old_ucr;
+ 	unsigned long flags;
+-	unsigned int ucr1;
++	unsigned int ucr1, usr2;
+ 	int locked = 1;
+ 
+ 	if (sport->port.sysrq)
+@@ -2041,8 +2042,8 @@ imx_uart_console_write(struct console *co, const char *s, unsigned int count)
+ 	 *	Finally, wait for transmitter to become empty
+ 	 *	and restore UCR1/2/3
+ 	 */
+-	while (!(imx_uart_readl(sport, USR2) & USR2_TXDC));
+-
++	read_poll_timeout_atomic(imx_uart_readl, usr2, usr2 & USR2_TXDC,
++				 0, USEC_PER_SEC, false, sport, USR2);
+ 	imx_uart_ucrs_restore(sport, &old_ucr);
+ 
+ 	if (locked)
+diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
+index 3f68e213df1f7..d80e9d4c974b4 100644
+--- a/drivers/tty/tty_ldisc.c
++++ b/drivers/tty/tty_ldisc.c
+@@ -545,6 +545,12 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
+ 		goto out;
+ 	}
+ 
++	if (tty->ops->ldisc_ok) {
++		retval = tty->ops->ldisc_ok(tty, disc);
++		if (retval)
++			goto out;
++	}
++
+ 	old_ldisc = tty->ldisc;
+ 
+ 	/* Shutdown the old discipline. */
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 9b5b98dfc8b40..cd87e3d1291ed 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -3576,6 +3576,15 @@ static void con_cleanup(struct tty_struct *tty)
+ 	tty_port_put(&vc->port);
+ }
+ 
++/*
++ * We can't deal with anything but the N_TTY ldisc,
++ * because we can sleep in our write() routine.
++ */
++static int con_ldisc_ok(struct tty_struct *tty, int ldisc)
++{
++	return ldisc == N_TTY ? 0 : -EINVAL;
++}
++
+ static int default_color           = 7; /* white */
+ static int default_italic_color    = 2; // green (ASCII)
+ static int default_underline_color = 3; // cyan (ASCII)
+@@ -3695,6 +3704,7 @@ static const struct tty_operations con_ops = {
+ 	.resize = vt_resize,
+ 	.shutdown = con_shutdown,
+ 	.cleanup = con_cleanup,
++	.ldisc_ok = con_ldisc_ok,
+ };
+ 
+ static struct cdev vc0_cdev;
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index ce1abd5d725ad..f7d04f7c0017d 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -8972,6 +8972,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
+ 	    (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) {
+ 		/* Reset the device and controller before doing reinit */
+ 		ufshcd_device_reset(hba);
++		ufs_put_device_desc(hba);
+ 		ufshcd_hba_stop(hba);
+ 		ufshcd_vops_reinit_notify(hba);
+ 		ret = ufshcd_hba_enable(hba);
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index 497deed38c0c1..9ef821ca2fc71 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -8,6 +8,7 @@
+  *	    Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+  */
+ 
++#include <linux/dmi.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
+@@ -220,6 +221,7 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc,
+ 
+ 		if (pdev->device == PCI_DEVICE_ID_INTEL_BYT) {
+ 			struct gpio_desc *gpio;
++			const char *bios_ver;
+ 			int ret;
+ 
+ 			/* On BYT the FW does not always enable the refclock */
+@@ -277,8 +279,12 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc,
+ 			 * detection. These can be identified by them _not_
+ 			 * using the standard ACPI battery and ac drivers.
+ 			 */
++			bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
+ 			if (acpi_dev_present("INT33FD", "1", 2) &&
+-			    acpi_quirk_skip_acpi_ac_and_battery()) {
++			    acpi_quirk_skip_acpi_ac_and_battery() &&
++			    /* Lenovo Yoga Tablet 2 Pro 1380 uses LC824206XA instead */
++			    !(bios_ver &&
++			      strstarts(bios_ver, "BLADE_21.X64.0005.R00.1504101516"))) {
+ 				dev_info(&pdev->dev, "Using TUSB1211 phy for charger detection\n");
+ 				swnode = &dwc3_pci_intel_phy_charger_detect_swnode;
+ 			}
+diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
+index 3c8a9dd585c09..2db01e03bfbf0 100644
+--- a/drivers/usb/gadget/function/f_hid.c
++++ b/drivers/usb/gadget/function/f_hid.c
+@@ -1029,9 +1029,9 @@ static inline int hidg_get_minor(void)
+ {
+ 	int ret;
+ 
+-	ret = ida_simple_get(&hidg_ida, 0, 0, GFP_KERNEL);
++	ret = ida_alloc(&hidg_ida, GFP_KERNEL);
+ 	if (ret >= HIDG_MINORS) {
+-		ida_simple_remove(&hidg_ida, ret);
++		ida_free(&hidg_ida, ret);
+ 		ret = -ENODEV;
+ 	}
+ 
+@@ -1176,7 +1176,7 @@ static const struct config_item_type hid_func_type = {
+ 
+ static inline void hidg_put_minor(int minor)
+ {
+-	ida_simple_remove(&hidg_ida, minor);
++	ida_free(&hidg_ida, minor);
+ }
+ 
+ static void hidg_free_inst(struct usb_function_instance *f)
+diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
+index 076dd4c1be96c..ba7d180cc9e6d 100644
+--- a/drivers/usb/gadget/function/f_printer.c
++++ b/drivers/usb/gadget/function/f_printer.c
+@@ -1312,9 +1312,9 @@ static inline int gprinter_get_minor(void)
+ {
+ 	int ret;
+ 
+-	ret = ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL);
++	ret = ida_alloc(&printer_ida, GFP_KERNEL);
+ 	if (ret >= PRINTER_MINORS) {
+-		ida_simple_remove(&printer_ida, ret);
++		ida_free(&printer_ida, ret);
+ 		ret = -ENODEV;
+ 	}
+ 
+@@ -1323,7 +1323,7 @@ static inline int gprinter_get_minor(void)
+ 
+ static inline void gprinter_put_minor(int minor)
+ {
+-	ida_simple_remove(&printer_ida, minor);
++	ida_free(&printer_ida, minor);
+ }
+ 
+ static int gprinter_setup(int);
+diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
+index 29bf8664bf582..12c5d9cf450c1 100644
+--- a/drivers/usb/gadget/function/rndis.c
++++ b/drivers/usb/gadget/function/rndis.c
+@@ -869,12 +869,12 @@ EXPORT_SYMBOL_GPL(rndis_msg_parser);
+ 
+ static inline int rndis_get_nr(void)
+ {
+-	return ida_simple_get(&rndis_ida, 0, 1000, GFP_KERNEL);
++	return ida_alloc_max(&rndis_ida, 999, GFP_KERNEL);
+ }
+ 
+ static inline void rndis_put_nr(int nr)
+ {
+-	ida_simple_remove(&rndis_ida, nr);
++	ida_free(&rndis_ida, nr);
+ }
+ 
+ struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v)
+diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
+index a4377df612f51..6fac696ea8463 100644
+--- a/drivers/usb/gadget/function/uvc_configfs.c
++++ b/drivers/usb/gadget/function/uvc_configfs.c
+@@ -13,6 +13,7 @@
+ #include "uvc_configfs.h"
+ 
+ #include <linux/sort.h>
++#include <linux/usb/uvc.h>
+ #include <linux/usb/video.h>
+ 
+ /* -----------------------------------------------------------------------------
+@@ -2260,6 +2261,8 @@ static ssize_t uvcg_uncompressed_guid_format_store(struct config_item *item,
+ 	struct f_uvc_opts *opts;
+ 	struct config_item *opts_item;
+ 	struct mutex *su_mutex = &ch->fmt.group.cg_subsys->su_mutex;
++	const struct uvc_format_desc *format;
++	u8 tmpguidFormat[sizeof(ch->desc.guidFormat)];
+ 	int ret;
+ 
+ 	mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+@@ -2273,7 +2276,16 @@ static ssize_t uvcg_uncompressed_guid_format_store(struct config_item *item,
+ 		goto end;
+ 	}
+ 
+-	memcpy(ch->desc.guidFormat, page,
++	memcpy(tmpguidFormat, page,
++	       min(sizeof(tmpguidFormat), len));
++
++	format = uvc_format_by_guid(tmpguidFormat);
++	if (!format) {
++		ret = -EINVAL;
++		goto end;
++	}
++
++	memcpy(ch->desc.guidFormat, tmpguidFormat,
+ 	       min(sizeof(ch->desc.guidFormat), len));
+ 	ret = sizeof(ch->desc.guidFormat);
+ 
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index b271ec916926b..febf64723434c 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -271,17 +271,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 				"QUIRK: Fresco Logic revision %u "
+ 				"has broken MSI implementation",
+ 				pdev->revision);
+-		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ 	}
+ 
+ 	if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
+ 			pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009)
+ 		xhci->quirks |= XHCI_BROKEN_STREAMS;
+ 
+-	if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
+-			pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1100)
+-		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+-
+ 	if (pdev->vendor == PCI_VENDOR_ID_NEC)
+ 		xhci->quirks |= XHCI_NEC_HOST;
+ 
+@@ -308,11 +303,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 		xhci->quirks |= XHCI_RESET_ON_RESUME;
+ 	}
+ 
+-	if (pdev->vendor == PCI_VENDOR_ID_AMD) {
+-		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+-		if (pdev->device == 0x43f7)
+-			xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
+-	}
++	if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x43f7)
++		xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
+ 
+ 	if ((pdev->vendor == PCI_VENDOR_ID_AMD) &&
+ 		((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) ||
+@@ -400,7 +392,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 	if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ 			pdev->device == PCI_DEVICE_ID_EJ168) {
+ 		xhci->quirks |= XHCI_RESET_ON_RESUME;
+-		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ 		xhci->quirks |= XHCI_BROKEN_STREAMS;
+ 	}
+ 	if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+@@ -411,7 +402,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 
+ 	if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+ 	    pdev->device == 0x0014) {
+-		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ 		xhci->quirks |= XHCI_ZERO_64B_REGS;
+ 	}
+ 	if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+@@ -441,7 +431,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 	}
+ 	if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+ 		pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) {
+-		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ 		xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
+ 	}
+ 	if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
+index ab9c5969e4624..8b357647728c2 100644
+--- a/drivers/usb/host/xhci-rcar.c
++++ b/drivers/usb/host/xhci-rcar.c
+@@ -214,8 +214,7 @@ static int xhci_rcar_resume_quirk(struct usb_hcd *hcd)
+  */
+ #define SET_XHCI_PLAT_PRIV_FOR_RCAR(firmware)				\
+ 	.firmware_name = firmware,					\
+-	.quirks = XHCI_NO_64BIT_SUPPORT | XHCI_TRUST_TX_LENGTH |	\
+-		  XHCI_SLOW_SUSPEND,					\
++	.quirks = XHCI_NO_64BIT_SUPPORT |  XHCI_SLOW_SUSPEND,		\
+ 	.init_quirk = xhci_rcar_init_quirk,				\
+ 	.plat_start = xhci_rcar_start,					\
+ 	.resume_quirk = xhci_rcar_resume_quirk,
+@@ -229,8 +228,7 @@ static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = {
+ };
+ 
+ static const struct xhci_plat_priv xhci_plat_renesas_rzv2m = {
+-	.quirks = XHCI_NO_64BIT_SUPPORT | XHCI_TRUST_TX_LENGTH |
+-		  XHCI_SLOW_SUSPEND,
++	.quirks = XHCI_NO_64BIT_SUPPORT | XHCI_SLOW_SUSPEND,
+ 	.init_quirk = xhci_rzv2m_init_quirk,
+ 	.plat_start = xhci_rzv2m_start,
+ };
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 850846c206ed4..48d745e9f9730 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2431,8 +2431,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+ 			break;
+ 		if (remaining) {
+ 			frame->status = short_framestatus;
+-			if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
+-				sum_trbs_for_length = true;
++			sum_trbs_for_length = true;
+ 			break;
+ 		}
+ 		frame->status = 0;
+@@ -2681,15 +2680,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 	 * transfer type
+ 	 */
+ 	case COMP_SUCCESS:
+-		if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
+-			break;
+-		if (xhci->quirks & XHCI_TRUST_TX_LENGTH ||
+-		    ep_ring->last_td_was_short)
++		if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
+ 			trb_comp_code = COMP_SHORT_PACKET;
+-		else
+-			xhci_warn_ratelimited(xhci,
+-					      "WARN Successful completion on short TX for slot %u ep %u: needs XHCI_TRUST_TX_LENGTH quirk?\n",
+-					      slot_id, ep_index);
++			xhci_dbg(xhci, "Successful completion on short TX for slot %u ep %u with last td short %d\n",
++				 slot_id, ep_index, ep_ring->last_td_was_short);
++		}
+ 		break;
+ 	case COMP_SHORT_PACKET:
+ 		break;
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 069a187540a0c..1683d779e4bc0 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1590,7 +1590,7 @@ struct xhci_hcd {
+ #define XHCI_RESET_ON_RESUME	BIT_ULL(7)
+ #define	XHCI_SW_BW_CHECKING	BIT_ULL(8)
+ #define XHCI_AMD_0x96_HOST	BIT_ULL(9)
+-#define XHCI_TRUST_TX_LENGTH	BIT_ULL(10)
++#define XHCI_TRUST_TX_LENGTH	BIT_ULL(10) /* Deprecated */
+ #define XHCI_LPM_SUPPORT	BIT_ULL(11)
+ #define XHCI_INTEL_HOST		BIT_ULL(12)
+ #define XHCI_SPURIOUS_REBOOT	BIT_ULL(13)
+@@ -1730,8 +1730,6 @@ static inline bool xhci_has_one_roothub(struct xhci_hcd *xhci)
+ 	dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
+ #define xhci_warn(xhci, fmt, args...) \
+ 	dev_warn(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
+-#define xhci_warn_ratelimited(xhci, fmt, args...) \
+-	dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
+ #define xhci_info(xhci, fmt, args...) \
+ 	dev_info(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
+ 
+diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
+index b00d92db5dfd1..eb5a8e0d9e2d6 100644
+--- a/drivers/usb/misc/uss720.c
++++ b/drivers/usb/misc/uss720.c
+@@ -677,7 +677,7 @@ static int uss720_probe(struct usb_interface *intf,
+ 	struct parport_uss720_private *priv;
+ 	struct parport *pp;
+ 	unsigned char reg;
+-	int i;
++	int ret;
+ 
+ 	dev_dbg(&intf->dev, "probe: vendor id 0x%x, device id 0x%x\n",
+ 		le16_to_cpu(usbdev->descriptor.idVendor),
+@@ -688,8 +688,8 @@ static int uss720_probe(struct usb_interface *intf,
+ 		usb_put_dev(usbdev);
+ 		return -ENODEV;
+ 	}
+-	i = usb_set_interface(usbdev, intf->altsetting->desc.bInterfaceNumber, 2);
+-	dev_dbg(&intf->dev, "set interface result %d\n", i);
++	ret = usb_set_interface(usbdev, intf->altsetting->desc.bInterfaceNumber, 2);
++	dev_dbg(&intf->dev, "set interface result %d\n", ret);
+ 
+ 	interface = intf->cur_altsetting;
+ 
+@@ -725,12 +725,18 @@ static int uss720_probe(struct usb_interface *intf,
+ 	set_1284_register(pp, 7, 0x00, GFP_KERNEL);
+ 	set_1284_register(pp, 6, 0x30, GFP_KERNEL);  /* PS/2 mode */
+ 	set_1284_register(pp, 2, 0x0c, GFP_KERNEL);
+-	/* debugging */
+-	get_1284_register(pp, 0, &reg, GFP_KERNEL);
++
++	/* The Belkin F5U002 Rev 2 P80453-B USB parallel port adapter shares the
++	 * device ID 050d:0002 with some other device that works with this
++	 * driver, but it itself does not. Detect and handle the bad cable
++	 * here. */
++	ret = get_1284_register(pp, 0, &reg, GFP_KERNEL);
+ 	dev_dbg(&intf->dev, "reg: %7ph\n", priv->reg);
++	if (ret < 0)
++		return ret;
+ 
+-	i = usb_find_last_int_in_endpoint(interface, &epd);
+-	if (!i) {
++	ret = usb_find_last_int_in_endpoint(interface, &epd);
++	if (!ret) {
+ 		dev_dbg(&intf->dev, "epaddr %d interval %d\n",
+ 				epd->bEndpointAddress, epd->bInterval);
+ 	}
+diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
+index 12cf9940e5b67..7d87e2a521ed9 100644
+--- a/drivers/usb/storage/scsiglue.c
++++ b/drivers/usb/storage/scsiglue.c
+@@ -86,6 +86,12 @@ static int slave_alloc (struct scsi_device *sdev)
+ 	if (us->protocol == USB_PR_BULK && us->max_lun > 0)
+ 		sdev->sdev_bflags |= BLIST_FORCELUN;
+ 
++	/*
++	 * Some USB storage devices reset if the IO advice hints grouping mode
++	 * page is queried. Hence skip that mode page.
++	 */
++	sdev->sdev_bflags |= BLIST_SKIP_IO_HINTS;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index 08953f0d4532a..1ed2bdda0cd40 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -21,6 +21,7 @@
+ #include <scsi/scsi.h>
+ #include <scsi/scsi_eh.h>
+ #include <scsi/scsi_dbg.h>
++#include <scsi/scsi_devinfo.h>
+ #include <scsi/scsi_cmnd.h>
+ #include <scsi/scsi_device.h>
+ #include <scsi/scsi_host.h>
+@@ -820,6 +821,12 @@ static int uas_slave_alloc(struct scsi_device *sdev)
+ 	struct uas_dev_info *devinfo =
+ 		(struct uas_dev_info *)sdev->host->hostdata;
+ 
++	/*
++	 * Some USB storage devices reset if the IO advice hints grouping mode
++	 * page is queried. Hence skip that mode page.
++	 */
++	sdev->sdev_bflags |= BLIST_SKIP_IO_HINTS;
++
+ 	sdev->hostdata = devinfo;
+ 
+ 	/*
+diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
+index d3958c061a972..501eddb294e43 100644
+--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
++++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
+@@ -41,7 +41,7 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
+ 	struct device_node *np = dev->of_node;
+ 	const struct pmic_typec_resources *res;
+ 	struct regmap *regmap;
+-	struct device *bridge_dev;
++	struct auxiliary_device *bridge_dev;
+ 	u32 base;
+ 	int ret;
+ 
+@@ -92,7 +92,7 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
+ 	if (!tcpm->tcpc.fwnode)
+ 		return -EINVAL;
+ 
+-	bridge_dev = drm_dp_hpd_bridge_register(tcpm->dev, to_of_node(tcpm->tcpc.fwnode));
++	bridge_dev = devm_drm_dp_hpd_bridge_alloc(tcpm->dev, to_of_node(tcpm->tcpc.fwnode));
+ 	if (IS_ERR(bridge_dev))
+ 		return PTR_ERR(bridge_dev);
+ 
+@@ -110,8 +110,14 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto port_stop;
+ 
++	ret = devm_drm_dp_hpd_bridge_add(tcpm->dev, bridge_dev);
++	if (ret)
++		goto pdphy_stop;
++
+ 	return 0;
+ 
++pdphy_stop:
++	tcpm->pdphy_stop(tcpm);
+ port_stop:
+ 	tcpm->port_stop(tcpm);
+ port_unregister:
+diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
+index ce08eb33e5bec..1d0e2d87e9b31 100644
+--- a/drivers/usb/typec/ucsi/ucsi_glink.c
++++ b/drivers/usb/typec/ucsi/ucsi_glink.c
+@@ -176,7 +176,8 @@ static int pmic_glink_ucsi_sync_write(struct ucsi *__ucsi, unsigned int offset,
+ 	left = wait_for_completion_timeout(&ucsi->sync_ack, 5 * HZ);
+ 	if (!left) {
+ 		dev_err(ucsi->dev, "timeout waiting for UCSI sync write response\n");
+-		ret = -ETIMEDOUT;
++		/* return 0 here and let core UCSI code handle the CCI_BUSY */
++		ret = 0;
+ 	} else if (ucsi->sync_val) {
+ 		dev_err(ucsi->dev, "sync write returned: %d\n", ucsi->sync_val);
+ 	}
+@@ -243,10 +244,7 @@ static void pmic_glink_ucsi_notify(struct work_struct *work)
+ 		ucsi_connector_change(ucsi->ucsi, con_num);
+ 	}
+ 
+-	if (ucsi->sync_pending && cci & UCSI_CCI_BUSY) {
+-		ucsi->sync_val = -EBUSY;
+-		complete(&ucsi->sync_ack);
+-	} else if (ucsi->sync_pending &&
++	if (ucsi->sync_pending &&
+ 		   (cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))) {
+ 		complete(&ucsi->sync_ack);
+ 	}
+@@ -311,12 +309,14 @@ static void pmic_glink_ucsi_destroy(void *data)
+ 	mutex_unlock(&ucsi->lock);
+ }
+ 
++static unsigned long quirk_sc8180x = UCSI_NO_PARTNER_PDOS;
++
+ static const struct of_device_id pmic_glink_ucsi_of_quirks[] = {
+-	{ .compatible = "qcom,qcm6490-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
+-	{ .compatible = "qcom,sc8180x-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
+-	{ .compatible = "qcom,sc8280xp-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
+-	{ .compatible = "qcom,sm8350-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
+-	{ .compatible = "qcom,sm8550-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
++	{ .compatible = "qcom,qcm6490-pmic-glink", .data = &quirk_sc8180x, },
++	{ .compatible = "qcom,sc8180x-pmic-glink", .data = &quirk_sc8180x, },
++	{ .compatible = "qcom,sc8280xp-pmic-glink", .data = &quirk_sc8180x, },
++	{ .compatible = "qcom,sm8350-pmic-glink", .data = &quirk_sc8180x, },
++	{ .compatible = "qcom,sm8550-pmic-glink", .data = &quirk_sc8180x, },
+ 	{}
+ };
+ 
+@@ -354,7 +354,7 @@ static int pmic_glink_ucsi_probe(struct auxiliary_device *adev,
+ 
+ 	match = of_match_device(pmic_glink_ucsi_of_quirks, dev->parent);
+ 	if (match)
+-		ucsi->ucsi->quirks = (unsigned long)match->data;
++		ucsi->ucsi->quirks = *(unsigned long *)match->data;
+ 
+ 	ucsi_set_drvdata(ucsi->ucsi, ucsi);
+ 
+diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
+index d94d61b92c1ac..d8c95cc16be81 100644
+--- a/drivers/vfio/pci/vfio_pci_core.c
++++ b/drivers/vfio/pci/vfio_pci_core.c
+@@ -778,25 +778,26 @@ static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
+ }
+ 
+ struct vfio_pci_fill_info {
+-	struct vfio_pci_dependent_device __user *devices;
+-	struct vfio_pci_dependent_device __user *devices_end;
+ 	struct vfio_device *vdev;
++	struct vfio_pci_dependent_device *devices;
++	int nr_devices;
+ 	u32 count;
+ 	u32 flags;
+ };
+ 
+ static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
+ {
+-	struct vfio_pci_dependent_device info = {
+-		.segment = pci_domain_nr(pdev->bus),
+-		.bus = pdev->bus->number,
+-		.devfn = pdev->devfn,
+-	};
++	struct vfio_pci_dependent_device *info;
+ 	struct vfio_pci_fill_info *fill = data;
+ 
+-	fill->count++;
+-	if (fill->devices >= fill->devices_end)
+-		return 0;
++	/* The topology changed since we counted devices */
++	if (fill->count >= fill->nr_devices)
++		return -EAGAIN;
++
++	info = &fill->devices[fill->count++];
++	info->segment = pci_domain_nr(pdev->bus);
++	info->bus = pdev->bus->number;
++	info->devfn = pdev->devfn;
+ 
+ 	if (fill->flags & VFIO_PCI_HOT_RESET_FLAG_DEV_ID) {
+ 		struct iommufd_ctx *iommufd = vfio_iommufd_device_ictx(fill->vdev);
+@@ -809,19 +810,19 @@ static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
+ 		 */
+ 		vdev = vfio_find_device_in_devset(dev_set, &pdev->dev);
+ 		if (!vdev) {
+-			info.devid = VFIO_PCI_DEVID_NOT_OWNED;
++			info->devid = VFIO_PCI_DEVID_NOT_OWNED;
+ 		} else {
+ 			int id = vfio_iommufd_get_dev_id(vdev, iommufd);
+ 
+ 			if (id > 0)
+-				info.devid = id;
++				info->devid = id;
+ 			else if (id == -ENOENT)
+-				info.devid = VFIO_PCI_DEVID_OWNED;
++				info->devid = VFIO_PCI_DEVID_OWNED;
+ 			else
+-				info.devid = VFIO_PCI_DEVID_NOT_OWNED;
++				info->devid = VFIO_PCI_DEVID_NOT_OWNED;
+ 		}
+ 		/* If devid is VFIO_PCI_DEVID_NOT_OWNED, clear owned flag. */
+-		if (info.devid == VFIO_PCI_DEVID_NOT_OWNED)
++		if (info->devid == VFIO_PCI_DEVID_NOT_OWNED)
+ 			fill->flags &= ~VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED;
+ 	} else {
+ 		struct iommu_group *iommu_group;
+@@ -830,13 +831,10 @@ static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
+ 		if (!iommu_group)
+ 			return -EPERM; /* Cannot reset non-isolated devices */
+ 
+-		info.group_id = iommu_group_id(iommu_group);
++		info->group_id = iommu_group_id(iommu_group);
+ 		iommu_group_put(iommu_group);
+ 	}
+ 
+-	if (copy_to_user(fill->devices, &info, sizeof(info)))
+-		return -EFAULT;
+-	fill->devices++;
+ 	return 0;
+ }
+ 
+@@ -1258,10 +1256,11 @@ static int vfio_pci_ioctl_get_pci_hot_reset_info(
+ {
+ 	unsigned long minsz =
+ 		offsetofend(struct vfio_pci_hot_reset_info, count);
++	struct vfio_pci_dependent_device *devices = NULL;
+ 	struct vfio_pci_hot_reset_info hdr;
+ 	struct vfio_pci_fill_info fill = {};
+ 	bool slot = false;
+-	int ret = 0;
++	int ret, count;
+ 
+ 	if (copy_from_user(&hdr, arg, minsz))
+ 		return -EFAULT;
+@@ -1277,9 +1276,23 @@ static int vfio_pci_ioctl_get_pci_hot_reset_info(
+ 	else if (pci_probe_reset_bus(vdev->pdev->bus))
+ 		return -ENODEV;
+ 
+-	fill.devices = arg->devices;
+-	fill.devices_end = arg->devices +
+-			   (hdr.argsz - sizeof(hdr)) / sizeof(arg->devices[0]);
++	ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
++					    &count, slot);
++	if (ret)
++		return ret;
++
++	if (count > (hdr.argsz - sizeof(hdr)) / sizeof(*devices)) {
++		hdr.count = count;
++		ret = -ENOSPC;
++		goto header;
++	}
++
++	devices = kcalloc(count, sizeof(*devices), GFP_KERNEL);
++	if (!devices)
++		return -ENOMEM;
++
++	fill.devices = devices;
++	fill.nr_devices = count;
+ 	fill.vdev = &vdev->vdev;
+ 
+ 	if (vfio_device_cdev_opened(&vdev->vdev))
+@@ -1291,16 +1304,23 @@ static int vfio_pci_ioctl_get_pci_hot_reset_info(
+ 					    &fill, slot);
+ 	mutex_unlock(&vdev->vdev.dev_set->lock);
+ 	if (ret)
+-		return ret;
++		goto out;
++
++	if (copy_to_user(arg->devices, devices,
++			 sizeof(*devices) * fill.count)) {
++		ret = -EFAULT;
++		goto out;
++	}
+ 
+ 	hdr.count = fill.count;
+ 	hdr.flags = fill.flags;
+-	if (copy_to_user(arg, &hdr, minsz))
+-		return -EFAULT;
+ 
+-	if (fill.count > fill.devices - arg->devices)
+-		return -ENOSPC;
+-	return 0;
++header:
++	if (copy_to_user(arg, &hdr, minsz))
++		ret = -EFAULT;
++out:
++	kfree(devices);
++	return ret;
+ }
+ 
+ static int
+diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c
+index 477f350a8bd09..e3a57196b0ee0 100644
+--- a/fs/btrfs/bio.c
++++ b/fs/btrfs/bio.c
+@@ -741,7 +741,9 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
+ 			ret = btrfs_bio_csum(bbio);
+ 			if (ret)
+ 				goto fail_put_bio;
+-		} else if (use_append) {
++		} else if (use_append ||
++			   (btrfs_is_zoned(fs_info) && inode &&
++			    inode->flags & BTRFS_INODE_NODATASUM)) {
+ 			ret = btrfs_alloc_dummy_sum(bbio);
+ 			if (ret)
+ 				goto fail_put_bio;
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 1e09aeea69c22..1a66be33bb048 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -1785,6 +1785,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ 		container_of(work, struct btrfs_fs_info, reclaim_bgs_work);
+ 	struct btrfs_block_group *bg;
+ 	struct btrfs_space_info *space_info;
++	LIST_HEAD(retry_list);
+ 
+ 	if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
+ 		return;
+@@ -1921,8 +1922,11 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ 		}
+ 
+ next:
+-		if (ret)
+-			btrfs_mark_bg_to_reclaim(bg);
++		if (ret) {
++			/* Refcount held by the reclaim_bgs list after splice. */
++			btrfs_get_block_group(bg);
++			list_add_tail(&bg->bg_list, &retry_list);
++		}
+ 		btrfs_put_block_group(bg);
+ 
+ 		mutex_unlock(&fs_info->reclaim_bgs_lock);
+@@ -1942,6 +1946,9 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ 	spin_unlock(&fs_info->unused_bgs_lock);
+ 	mutex_unlock(&fs_info->reclaim_bgs_lock);
+ end:
++	spin_lock(&fs_info->unused_bgs_lock);
++	list_splice_tail(&retry_list, &fs_info->reclaim_bgs);
++	spin_unlock(&fs_info->unused_bgs_lock);
+ 	btrfs_exclop_finish(fs_info);
+ 	sb_end_write(fs_info->sb);
+ }
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 714f83632e3f9..66b5a68b0254e 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -831,6 +831,8 @@ static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len)
+ 		return 0;
+ 	if (order == MB_NUM_ORDERS(sb))
+ 		order--;
++	if (WARN_ON_ONCE(order > MB_NUM_ORDERS(sb)))
++		order = MB_NUM_ORDERS(sb) - 1;
+ 	return order;
+ }
+ 
+@@ -1008,6 +1010,8 @@ static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context
+ 	 * goal length.
+ 	 */
+ 	order = fls(ac->ac_g_ex.fe_len) - 1;
++	if (WARN_ON_ONCE(order - 1 > MB_NUM_ORDERS(ac->ac_sb)))
++		order = MB_NUM_ORDERS(ac->ac_sb);
+ 	min_order = order - sbi->s_mb_best_avail_max_trim_order;
+ 	if (min_order < 0)
+ 		min_order = 0;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 044135796f2b6..4b368f4dbc45a 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -5551,19 +5551,15 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
+ 	if (err)
+ 		goto failed_mount6;
+ 
+-	err = ext4_register_sysfs(sb);
+-	if (err)
+-		goto failed_mount7;
+-
+ 	err = ext4_init_orphan_info(sb);
+ 	if (err)
+-		goto failed_mount8;
++		goto failed_mount7;
+ #ifdef CONFIG_QUOTA
+ 	/* Enable quota usage during mount. */
+ 	if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) {
+ 		err = ext4_enable_quotas(sb);
+ 		if (err)
+-			goto failed_mount9;
++			goto failed_mount8;
+ 	}
+ #endif  /* CONFIG_QUOTA */
+ 
+@@ -5589,7 +5585,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
+ 		ext4_msg(sb, KERN_INFO, "recovery complete");
+ 		err = ext4_mark_recovery_complete(sb, es);
+ 		if (err)
+-			goto failed_mount10;
++			goto failed_mount9;
+ 	}
+ 
+ 	if (test_opt(sb, DISCARD) && !bdev_max_discard_sectors(sb->s_bdev))
+@@ -5606,15 +5602,17 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
+ 	atomic_set(&sbi->s_warning_count, 0);
+ 	atomic_set(&sbi->s_msg_count, 0);
+ 
++	/* Register sysfs after all initializations are complete. */
++	err = ext4_register_sysfs(sb);
++	if (err)
++		goto failed_mount9;
++
+ 	return 0;
+ 
+-failed_mount10:
++failed_mount9:
+ 	ext4_quotas_off(sb, EXT4_MAXQUOTAS);
+-failed_mount9: __maybe_unused
++failed_mount8: __maybe_unused
+ 	ext4_release_orphan_info(sb);
+-failed_mount8:
+-	ext4_unregister_sysfs(sb);
+-	kobject_put(&sbi->s_kobj);
+ failed_mount7:
+ 	ext4_unregister_li_request(sb);
+ failed_mount6:
+diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
+index 6d332dff79ddc..63cbda3700ea9 100644
+--- a/fs/ext4/sysfs.c
++++ b/fs/ext4/sysfs.c
+@@ -29,6 +29,7 @@ typedef enum {
+ 	attr_trigger_test_error,
+ 	attr_first_error_time,
+ 	attr_last_error_time,
++	attr_clusters_in_group,
+ 	attr_feature,
+ 	attr_pointer_ui,
+ 	attr_pointer_ul,
+@@ -104,7 +105,7 @@ static ssize_t reserved_clusters_store(struct ext4_sb_info *sbi,
+ 	int ret;
+ 
+ 	ret = kstrtoull(skip_spaces(buf), 0, &val);
+-	if (ret || val >= clusters)
++	if (ret || val >= clusters || (s64)val < 0)
+ 		return -EINVAL;
+ 
+ 	atomic64_set(&sbi->s_resv_clusters, val);
+@@ -207,13 +208,14 @@ EXT4_ATTR_FUNC(sra_exceeded_retry_limit, 0444);
+ 
+ EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, inode_readahead,
+ 		 ext4_sb_info, s_inode_readahead_blks);
++EXT4_ATTR_OFFSET(mb_group_prealloc, 0644, clusters_in_group,
++		 ext4_sb_info, s_mb_group_prealloc);
+ EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal);
+ EXT4_RW_ATTR_SBI_UI(mb_stats, s_mb_stats);
+ EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan);
+ EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
+ EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
+ EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
+-EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
+ EXT4_RW_ATTR_SBI_UI(mb_max_linear_groups, s_mb_max_linear_groups);
+ EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb);
+ EXT4_ATTR(trigger_fs_error, 0200, trigger_test_error);
+@@ -392,6 +394,7 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
+ 				(unsigned long long)
+ 			percpu_counter_sum(&sbi->s_sra_exceeded_retry_limit));
+ 	case attr_inode_readahead:
++	case attr_clusters_in_group:
+ 	case attr_pointer_ui:
+ 		if (!ptr)
+ 			return 0;
+@@ -451,7 +454,8 @@ static ssize_t ext4_attr_store(struct kobject *kobj,
+ 						s_kobj);
+ 	struct ext4_attr *a = container_of(attr, struct ext4_attr, attr);
+ 	void *ptr = calc_ptr(a, sbi);
+-	unsigned long t;
++	unsigned int t;
++	unsigned long lt;
+ 	int ret;
+ 
+ 	switch (a->attr_id) {
+@@ -460,7 +464,7 @@ static ssize_t ext4_attr_store(struct kobject *kobj,
+ 	case attr_pointer_ui:
+ 		if (!ptr)
+ 			return 0;
+-		ret = kstrtoul(skip_spaces(buf), 0, &t);
++		ret = kstrtouint(skip_spaces(buf), 0, &t);
+ 		if (ret)
+ 			return ret;
+ 		if (a->attr_ptr == ptr_ext4_super_block_offset)
+@@ -468,13 +472,21 @@ static ssize_t ext4_attr_store(struct kobject *kobj,
+ 		else
+ 			*((unsigned int *) ptr) = t;
+ 		return len;
++	case attr_clusters_in_group:
++		ret = kstrtouint(skip_spaces(buf), 0, &t);
++		if (ret)
++			return ret;
++		if (t > sbi->s_clusters_per_group)
++			return -EINVAL;
++		*((unsigned int *) ptr) = t;
++		return len;
+ 	case attr_pointer_ul:
+ 		if (!ptr)
+ 			return 0;
+-		ret = kstrtoul(skip_spaces(buf), 0, &t);
++		ret = kstrtoul(skip_spaces(buf), 0, &lt);
+ 		if (ret)
+ 			return ret;
+-		*((unsigned long *) ptr) = t;
++		*((unsigned long *) ptr) = lt;
+ 		return len;
+ 	case attr_inode_readahead:
+ 		return inode_readahead_blks_store(sbi, buf, len);
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 9fdd134220736..78f06f86c3835 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -1619,6 +1619,7 @@ static struct inode *ext4_xattr_inode_lookup_create(handle_t *handle,
+ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
+ 				struct ext4_xattr_search *s,
+ 				handle_t *handle, struct inode *inode,
++				struct inode *new_ea_inode,
+ 				bool is_block)
+ {
+ 	struct ext4_xattr_entry *last, *next;
+@@ -1626,7 +1627,6 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
+ 	size_t min_offs = s->end - s->base, name_len = strlen(i->name);
+ 	int in_inode = i->in_inode;
+ 	struct inode *old_ea_inode = NULL;
+-	struct inode *new_ea_inode = NULL;
+ 	size_t old_size, new_size;
+ 	int ret;
+ 
+@@ -1711,38 +1711,11 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
+ 			old_ea_inode = NULL;
+ 			goto out;
+ 		}
+-	}
+-	if (i->value && in_inode) {
+-		WARN_ON_ONCE(!i->value_len);
+-
+-		new_ea_inode = ext4_xattr_inode_lookup_create(handle, inode,
+-					i->value, i->value_len);
+-		if (IS_ERR(new_ea_inode)) {
+-			ret = PTR_ERR(new_ea_inode);
+-			new_ea_inode = NULL;
+-			goto out;
+-		}
+-	}
+ 
+-	if (old_ea_inode) {
+ 		/* We are ready to release ref count on the old_ea_inode. */
+ 		ret = ext4_xattr_inode_dec_ref(handle, old_ea_inode);
+-		if (ret) {
+-			/* Release newly required ref count on new_ea_inode. */
+-			if (new_ea_inode) {
+-				int err;
+-
+-				err = ext4_xattr_inode_dec_ref(handle,
+-							       new_ea_inode);
+-				if (err)
+-					ext4_warning_inode(new_ea_inode,
+-						  "dec ref new_ea_inode err=%d",
+-						  err);
+-				ext4_xattr_inode_free_quota(inode, new_ea_inode,
+-							    i->value_len);
+-			}
++		if (ret)
+ 			goto out;
+-		}
+ 
+ 		ext4_xattr_inode_free_quota(inode, old_ea_inode,
+ 					    le32_to_cpu(here->e_value_size));
+@@ -1866,7 +1839,6 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
+ 	ret = 0;
+ out:
+ 	iput(old_ea_inode);
+-	iput(new_ea_inode);
+ 	return ret;
+ }
+ 
+@@ -1929,9 +1901,21 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
+ 	size_t old_ea_inode_quota = 0;
+ 	unsigned int ea_ino;
+ 
+-
+ #define header(x) ((struct ext4_xattr_header *)(x))
+ 
++	/* If we need EA inode, prepare it before locking the buffer */
++	if (i->value && i->in_inode) {
++		WARN_ON_ONCE(!i->value_len);
++
++		ea_inode = ext4_xattr_inode_lookup_create(handle, inode,
++					i->value, i->value_len);
++		if (IS_ERR(ea_inode)) {
++			error = PTR_ERR(ea_inode);
++			ea_inode = NULL;
++			goto cleanup;
++		}
++	}
++
+ 	if (s->base) {
+ 		int offset = (char *)s->here - bs->bh->b_data;
+ 
+@@ -1940,6 +1924,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
+ 						      EXT4_JTR_NONE);
+ 		if (error)
+ 			goto cleanup;
++
+ 		lock_buffer(bs->bh);
+ 
+ 		if (header(s->base)->h_refcount == cpu_to_le32(1)) {
+@@ -1966,7 +1951,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
+ 			}
+ 			ea_bdebug(bs->bh, "modifying in-place");
+ 			error = ext4_xattr_set_entry(i, s, handle, inode,
+-						     true /* is_block */);
++					     ea_inode, true /* is_block */);
+ 			ext4_xattr_block_csum_set(inode, bs->bh);
+ 			unlock_buffer(bs->bh);
+ 			if (error == -EFSCORRUPTED)
+@@ -2034,29 +2019,13 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
+ 		s->end = s->base + sb->s_blocksize;
+ 	}
+ 
+-	error = ext4_xattr_set_entry(i, s, handle, inode, true /* is_block */);
++	error = ext4_xattr_set_entry(i, s, handle, inode, ea_inode,
++				     true /* is_block */);
+ 	if (error == -EFSCORRUPTED)
+ 		goto bad_block;
+ 	if (error)
+ 		goto cleanup;
+ 
+-	if (i->value && s->here->e_value_inum) {
+-		/*
+-		 * A ref count on ea_inode has been taken as part of the call to
+-		 * ext4_xattr_set_entry() above. We would like to drop this
+-		 * extra ref but we have to wait until the xattr block is
+-		 * initialized and has its own ref count on the ea_inode.
+-		 */
+-		ea_ino = le32_to_cpu(s->here->e_value_inum);
+-		error = ext4_xattr_inode_iget(inode, ea_ino,
+-					      le32_to_cpu(s->here->e_hash),
+-					      &ea_inode);
+-		if (error) {
+-			ea_inode = NULL;
+-			goto cleanup;
+-		}
+-	}
+-
+ inserted:
+ 	if (!IS_LAST_ENTRY(s->first)) {
+ 		new_bh = ext4_xattr_block_cache_find(inode, header(s->base),
+@@ -2198,17 +2167,16 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
+ 
+ cleanup:
+ 	if (ea_inode) {
+-		int error2;
+-
+-		error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
+-		if (error2)
+-			ext4_warning_inode(ea_inode, "dec ref error=%d",
+-					   error2);
++		if (error) {
++			int error2;
+ 
+-		/* If there was an error, revert the quota charge. */
+-		if (error)
++			error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
++			if (error2)
++				ext4_warning_inode(ea_inode, "dec ref error=%d",
++						   error2);
+ 			ext4_xattr_inode_free_quota(inode, ea_inode,
+ 						    i_size_read(ea_inode));
++		}
+ 		iput(ea_inode);
+ 	}
+ 	if (ce)
+@@ -2266,14 +2234,38 @@ int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
+ {
+ 	struct ext4_xattr_ibody_header *header;
+ 	struct ext4_xattr_search *s = &is->s;
++	struct inode *ea_inode = NULL;
+ 	int error;
+ 
+ 	if (!EXT4_INODE_HAS_XATTR_SPACE(inode))
+ 		return -ENOSPC;
+ 
+-	error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */);
+-	if (error)
++	/* If we need EA inode, prepare it before locking the buffer */
++	if (i->value && i->in_inode) {
++		WARN_ON_ONCE(!i->value_len);
++
++		ea_inode = ext4_xattr_inode_lookup_create(handle, inode,
++					i->value, i->value_len);
++		if (IS_ERR(ea_inode))
++			return PTR_ERR(ea_inode);
++	}
++	error = ext4_xattr_set_entry(i, s, handle, inode, ea_inode,
++				     false /* is_block */);
++	if (error) {
++		if (ea_inode) {
++			int error2;
++
++			error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
++			if (error2)
++				ext4_warning_inode(ea_inode, "dec ref error=%d",
++						   error2);
++
++			ext4_xattr_inode_free_quota(inode, ea_inode,
++						    i_size_read(ea_inode));
++			iput(ea_inode);
++		}
+ 		return error;
++	}
+ 	header = IHDR(inode, ext4_raw_inode(&is->iloc));
+ 	if (!IS_LAST_ENTRY(s->first)) {
+ 		header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
+@@ -2282,6 +2274,7 @@ int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
+ 		header->h_magic = cpu_to_le32(0);
+ 		ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
+ 	}
++	iput(ea_inode);
+ 	return 0;
+ }
+ 
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index 7df5ad84cb5ea..15c9a9f5750bc 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -1187,7 +1187,17 @@ int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
+ 		default:
+ 			BUG();
+ 		}
+-		if (err < 0 && err != -ENOENT)
++		if (err == -ENOENT) {
++			set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
++			f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
++			f2fs_err_ratelimited(sbi,
++				"truncate node fail, ino:%lu, nid:%u, "
++				"offset[0]:%d, offset[1]:%d, nofs:%d",
++				inode->i_ino, dn.nid, offset[0],
++				offset[1], nofs);
++			err = 0;
++		}
++		if (err < 0)
+ 			goto fail;
+ 		if (offset[1] == 0 &&
+ 				ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index a4bc26dfdb1af..2f75a7dfc311d 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -2132,8 +2132,6 @@ static void default_options(struct f2fs_sb_info *sbi, bool remount)
+ 	F2FS_OPTION(sbi).memory_mode = MEMORY_MODE_NORMAL;
+ 	F2FS_OPTION(sbi).errors = MOUNT_ERRORS_CONTINUE;
+ 
+-	sbi->sb->s_flags &= ~SB_INLINECRYPT;
+-
+ 	set_opt(sbi, INLINE_XATTR);
+ 	set_opt(sbi, INLINE_DATA);
+ 	set_opt(sbi, INLINE_DENTRY);
+@@ -4131,9 +4129,15 @@ void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason,
+ 	if (shutdown)
+ 		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
+ 
+-	/* continue filesystem operators if errors=continue */
+-	if (continue_fs || f2fs_readonly(sb))
++	/*
++	 * Continue filesystem operators if errors=continue. Should not set
++	 * RO by shutdown, since RO bypasses thaw_super which can hang the
++	 * system.
++	 */
++	if (continue_fs || f2fs_readonly(sb) || shutdown) {
++		f2fs_warn(sbi, "Stopped filesystem due to reason: %d", reason);
+ 		return;
++	}
+ 
+ 	f2fs_warn(sbi, "Remounting filesystem read-only");
+ 	/*
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index e4f17c53ddfcf..d31853032a931 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -2069,6 +2069,7 @@ static long wb_writeback(struct bdi_writeback *wb,
+ 	struct inode *inode;
+ 	long progress;
+ 	struct blk_plug plug;
++	bool queued = false;
+ 
+ 	blk_start_plug(&plug);
+ 	for (;;) {
+@@ -2111,8 +2112,10 @@ static long wb_writeback(struct bdi_writeback *wb,
+ 			dirtied_before = jiffies;
+ 
+ 		trace_writeback_start(wb, work);
+-		if (list_empty(&wb->b_io))
++		if (list_empty(&wb->b_io)) {
+ 			queue_io(wb, work, dirtied_before);
++			queued = true;
++		}
+ 		if (work->sb)
+ 			progress = writeback_sb_inodes(work->sb, wb, work);
+ 		else
+@@ -2127,7 +2130,7 @@ static long wb_writeback(struct bdi_writeback *wb,
+ 		 * mean the overall work is done. So we keep looping as long
+ 		 * as made some progress on cleaning pages or inodes.
+ 		 */
+-		if (progress) {
++		if (progress || !queued) {
+ 			spin_unlock(&wb->list_lock);
+ 			continue;
+ 		}
+diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
+index 604fea3a26ff0..86807086b2dfd 100644
+--- a/fs/ocfs2/journal.c
++++ b/fs/ocfs2/journal.c
+@@ -479,12 +479,6 @@ int ocfs2_allocate_extend_trans(handle_t *handle, int thresh)
+ 	return status;
+ }
+ 
+-
+-struct ocfs2_triggers {
+-	struct jbd2_buffer_trigger_type	ot_triggers;
+-	int				ot_offset;
+-};
+-
+ static inline struct ocfs2_triggers *to_ocfs2_trigger(struct jbd2_buffer_trigger_type *triggers)
+ {
+ 	return container_of(triggers, struct ocfs2_triggers, ot_triggers);
+@@ -548,85 +542,76 @@ static void ocfs2_db_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
+ static void ocfs2_abort_trigger(struct jbd2_buffer_trigger_type *triggers,
+ 				struct buffer_head *bh)
+ {
++	struct ocfs2_triggers *ot = to_ocfs2_trigger(triggers);
++
+ 	mlog(ML_ERROR,
+ 	     "ocfs2_abort_trigger called by JBD2.  bh = 0x%lx, "
+ 	     "bh->b_blocknr = %llu\n",
+ 	     (unsigned long)bh,
+ 	     (unsigned long long)bh->b_blocknr);
+ 
+-	ocfs2_error(bh->b_assoc_map->host->i_sb,
++	ocfs2_error(ot->sb,
+ 		    "JBD2 has aborted our journal, ocfs2 cannot continue\n");
+ }
+ 
+-static struct ocfs2_triggers di_triggers = {
+-	.ot_triggers = {
+-		.t_frozen = ocfs2_frozen_trigger,
+-		.t_abort = ocfs2_abort_trigger,
+-	},
+-	.ot_offset	= offsetof(struct ocfs2_dinode, i_check),
+-};
+-
+-static struct ocfs2_triggers eb_triggers = {
+-	.ot_triggers = {
+-		.t_frozen = ocfs2_frozen_trigger,
+-		.t_abort = ocfs2_abort_trigger,
+-	},
+-	.ot_offset	= offsetof(struct ocfs2_extent_block, h_check),
+-};
+-
+-static struct ocfs2_triggers rb_triggers = {
+-	.ot_triggers = {
+-		.t_frozen = ocfs2_frozen_trigger,
+-		.t_abort = ocfs2_abort_trigger,
+-	},
+-	.ot_offset	= offsetof(struct ocfs2_refcount_block, rf_check),
+-};
+-
+-static struct ocfs2_triggers gd_triggers = {
+-	.ot_triggers = {
+-		.t_frozen = ocfs2_frozen_trigger,
+-		.t_abort = ocfs2_abort_trigger,
+-	},
+-	.ot_offset	= offsetof(struct ocfs2_group_desc, bg_check),
+-};
+-
+-static struct ocfs2_triggers db_triggers = {
+-	.ot_triggers = {
+-		.t_frozen = ocfs2_db_frozen_trigger,
+-		.t_abort = ocfs2_abort_trigger,
+-	},
+-};
++static void ocfs2_setup_csum_triggers(struct super_block *sb,
++				      enum ocfs2_journal_trigger_type type,
++				      struct ocfs2_triggers *ot)
++{
++	BUG_ON(type >= OCFS2_JOURNAL_TRIGGER_COUNT);
+ 
+-static struct ocfs2_triggers xb_triggers = {
+-	.ot_triggers = {
+-		.t_frozen = ocfs2_frozen_trigger,
+-		.t_abort = ocfs2_abort_trigger,
+-	},
+-	.ot_offset	= offsetof(struct ocfs2_xattr_block, xb_check),
+-};
++	switch (type) {
++	case OCFS2_JTR_DI:
++		ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
++		ot->ot_offset = offsetof(struct ocfs2_dinode, i_check);
++		break;
++	case OCFS2_JTR_EB:
++		ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
++		ot->ot_offset = offsetof(struct ocfs2_extent_block, h_check);
++		break;
++	case OCFS2_JTR_RB:
++		ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
++		ot->ot_offset = offsetof(struct ocfs2_refcount_block, rf_check);
++		break;
++	case OCFS2_JTR_GD:
++		ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
++		ot->ot_offset = offsetof(struct ocfs2_group_desc, bg_check);
++		break;
++	case OCFS2_JTR_DB:
++		ot->ot_triggers.t_frozen = ocfs2_db_frozen_trigger;
++		break;
++	case OCFS2_JTR_XB:
++		ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
++		ot->ot_offset = offsetof(struct ocfs2_xattr_block, xb_check);
++		break;
++	case OCFS2_JTR_DQ:
++		ot->ot_triggers.t_frozen = ocfs2_dq_frozen_trigger;
++		break;
++	case OCFS2_JTR_DR:
++		ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
++		ot->ot_offset = offsetof(struct ocfs2_dx_root_block, dr_check);
++		break;
++	case OCFS2_JTR_DL:
++		ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
++		ot->ot_offset = offsetof(struct ocfs2_dx_leaf, dl_check);
++		break;
++	case OCFS2_JTR_NONE:
++		/* To make compiler happy... */
++		return;
++	}
+ 
+-static struct ocfs2_triggers dq_triggers = {
+-	.ot_triggers = {
+-		.t_frozen = ocfs2_dq_frozen_trigger,
+-		.t_abort = ocfs2_abort_trigger,
+-	},
+-};
++	ot->ot_triggers.t_abort = ocfs2_abort_trigger;
++	ot->sb = sb;
++}
+ 
+-static struct ocfs2_triggers dr_triggers = {
+-	.ot_triggers = {
+-		.t_frozen = ocfs2_frozen_trigger,
+-		.t_abort = ocfs2_abort_trigger,
+-	},
+-	.ot_offset	= offsetof(struct ocfs2_dx_root_block, dr_check),
+-};
++void ocfs2_initialize_journal_triggers(struct super_block *sb,
++				       struct ocfs2_triggers triggers[])
++{
++	enum ocfs2_journal_trigger_type type;
+ 
+-static struct ocfs2_triggers dl_triggers = {
+-	.ot_triggers = {
+-		.t_frozen = ocfs2_frozen_trigger,
+-		.t_abort = ocfs2_abort_trigger,
+-	},
+-	.ot_offset	= offsetof(struct ocfs2_dx_leaf, dl_check),
+-};
++	for (type = OCFS2_JTR_DI; type < OCFS2_JOURNAL_TRIGGER_COUNT; type++)
++		ocfs2_setup_csum_triggers(sb, type, &triggers[type]);
++}
+ 
+ static int __ocfs2_journal_access(handle_t *handle,
+ 				  struct ocfs2_caching_info *ci,
+@@ -708,56 +693,91 @@ static int __ocfs2_journal_access(handle_t *handle,
+ int ocfs2_journal_access_di(handle_t *handle, struct ocfs2_caching_info *ci,
+ 			    struct buffer_head *bh, int type)
+ {
+-	return __ocfs2_journal_access(handle, ci, bh, &di_triggers, type);
++	struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
++
++	return __ocfs2_journal_access(handle, ci, bh,
++				      &osb->s_journal_triggers[OCFS2_JTR_DI],
++				      type);
+ }
+ 
+ int ocfs2_journal_access_eb(handle_t *handle, struct ocfs2_caching_info *ci,
+ 			    struct buffer_head *bh, int type)
+ {
+-	return __ocfs2_journal_access(handle, ci, bh, &eb_triggers, type);
++	struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
++
++	return __ocfs2_journal_access(handle, ci, bh,
++				      &osb->s_journal_triggers[OCFS2_JTR_EB],
++				      type);
+ }
+ 
+ int ocfs2_journal_access_rb(handle_t *handle, struct ocfs2_caching_info *ci,
+ 			    struct buffer_head *bh, int type)
+ {
+-	return __ocfs2_journal_access(handle, ci, bh, &rb_triggers,
++	struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
++
++	return __ocfs2_journal_access(handle, ci, bh,
++				      &osb->s_journal_triggers[OCFS2_JTR_RB],
+ 				      type);
+ }
+ 
+ int ocfs2_journal_access_gd(handle_t *handle, struct ocfs2_caching_info *ci,
+ 			    struct buffer_head *bh, int type)
+ {
+-	return __ocfs2_journal_access(handle, ci, bh, &gd_triggers, type);
++	struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
++
++	return __ocfs2_journal_access(handle, ci, bh,
++				     &osb->s_journal_triggers[OCFS2_JTR_GD],
++				     type);
+ }
+ 
+ int ocfs2_journal_access_db(handle_t *handle, struct ocfs2_caching_info *ci,
+ 			    struct buffer_head *bh, int type)
+ {
+-	return __ocfs2_journal_access(handle, ci, bh, &db_triggers, type);
++	struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
++
++	return __ocfs2_journal_access(handle, ci, bh,
++				     &osb->s_journal_triggers[OCFS2_JTR_DB],
++				     type);
+ }
+ 
+ int ocfs2_journal_access_xb(handle_t *handle, struct ocfs2_caching_info *ci,
+ 			    struct buffer_head *bh, int type)
+ {
+-	return __ocfs2_journal_access(handle, ci, bh, &xb_triggers, type);
++	struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
++
++	return __ocfs2_journal_access(handle, ci, bh,
++				     &osb->s_journal_triggers[OCFS2_JTR_XB],
++				     type);
+ }
+ 
+ int ocfs2_journal_access_dq(handle_t *handle, struct ocfs2_caching_info *ci,
+ 			    struct buffer_head *bh, int type)
+ {
+-	return __ocfs2_journal_access(handle, ci, bh, &dq_triggers, type);
++	struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
++
++	return __ocfs2_journal_access(handle, ci, bh,
++				     &osb->s_journal_triggers[OCFS2_JTR_DQ],
++				     type);
+ }
+ 
+ int ocfs2_journal_access_dr(handle_t *handle, struct ocfs2_caching_info *ci,
+ 			    struct buffer_head *bh, int type)
+ {
+-	return __ocfs2_journal_access(handle, ci, bh, &dr_triggers, type);
++	struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
++
++	return __ocfs2_journal_access(handle, ci, bh,
++				     &osb->s_journal_triggers[OCFS2_JTR_DR],
++				     type);
+ }
+ 
+ int ocfs2_journal_access_dl(handle_t *handle, struct ocfs2_caching_info *ci,
+ 			    struct buffer_head *bh, int type)
+ {
+-	return __ocfs2_journal_access(handle, ci, bh, &dl_triggers, type);
++	struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
++
++	return __ocfs2_journal_access(handle, ci, bh,
++				     &osb->s_journal_triggers[OCFS2_JTR_DL],
++				     type);
+ }
+ 
+ int ocfs2_journal_access(handle_t *handle, struct ocfs2_caching_info *ci,
+@@ -778,13 +798,15 @@ void ocfs2_journal_dirty(handle_t *handle, struct buffer_head *bh)
+ 		if (!is_handle_aborted(handle)) {
+ 			journal_t *journal = handle->h_transaction->t_journal;
+ 
+-			mlog(ML_ERROR, "jbd2_journal_dirty_metadata failed. "
+-					"Aborting transaction and journal.\n");
++			mlog(ML_ERROR, "jbd2_journal_dirty_metadata failed: "
++			     "handle type %u started at line %u, credits %u/%u "
++			     "errcode %d. Aborting transaction and journal.\n",
++			     handle->h_type, handle->h_line_no,
++			     handle->h_requested_credits,
++			     jbd2_handle_buffer_credits(handle), status);
+ 			handle->h_err = status;
+ 			jbd2_journal_abort_handle(handle);
+ 			jbd2_journal_abort(journal, status);
+-			ocfs2_abort(bh->b_assoc_map->host->i_sb,
+-				    "Journal already aborted.\n");
+ 		}
+ 	}
+ }
+diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
+index a503c553bab21..8fe826143d7bf 100644
+--- a/fs/ocfs2/ocfs2.h
++++ b/fs/ocfs2/ocfs2.h
+@@ -284,6 +284,30 @@ enum ocfs2_mount_options
+ #define OCFS2_OSB_ERROR_FS	0x0004
+ #define OCFS2_DEFAULT_ATIME_QUANTUM	60
+ 
++struct ocfs2_triggers {
++	struct jbd2_buffer_trigger_type	ot_triggers;
++	int				ot_offset;
++	struct super_block		*sb;
++};
++
++enum ocfs2_journal_trigger_type {
++	OCFS2_JTR_DI,
++	OCFS2_JTR_EB,
++	OCFS2_JTR_RB,
++	OCFS2_JTR_GD,
++	OCFS2_JTR_DB,
++	OCFS2_JTR_XB,
++	OCFS2_JTR_DQ,
++	OCFS2_JTR_DR,
++	OCFS2_JTR_DL,
++	OCFS2_JTR_NONE  /* This must be the last entry */
++};
++
++#define OCFS2_JOURNAL_TRIGGER_COUNT OCFS2_JTR_NONE
++
++void ocfs2_initialize_journal_triggers(struct super_block *sb,
++				       struct ocfs2_triggers triggers[]);
++
+ struct ocfs2_journal;
+ struct ocfs2_slot_info;
+ struct ocfs2_recovery_map;
+@@ -351,6 +375,9 @@ struct ocfs2_super
+ 	struct ocfs2_journal *journal;
+ 	unsigned long osb_commit_interval;
+ 
++	/* Journal triggers for checksum */
++	struct ocfs2_triggers s_journal_triggers[OCFS2_JOURNAL_TRIGGER_COUNT];
++
+ 	struct delayed_work		la_enable_wq;
+ 
+ 	/*
+diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
+index 8aabaed2c1cb9..afee70125ae3b 100644
+--- a/fs/ocfs2/super.c
++++ b/fs/ocfs2/super.c
+@@ -1075,9 +1075,11 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
+ 	debugfs_create_file("fs_state", S_IFREG|S_IRUSR, osb->osb_debug_root,
+ 			    osb, &ocfs2_osb_debug_fops);
+ 
+-	if (ocfs2_meta_ecc(osb))
++	if (ocfs2_meta_ecc(osb)) {
++		ocfs2_initialize_journal_triggers(sb, osb->s_journal_triggers);
+ 		ocfs2_blockcheck_stats_debugfs_install( &osb->osb_ecc_stats,
+ 							osb->osb_debug_root);
++	}
+ 
+ 	status = ocfs2_mount_volume(sb);
+ 	if (status < 0)
+diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
+index 063409069f56d..5868cb2229552 100644
+--- a/fs/overlayfs/export.c
++++ b/fs/overlayfs/export.c
+@@ -181,6 +181,10 @@ static int ovl_check_encode_origin(struct dentry *dentry)
+ 	struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
+ 	bool decodable = ofs->config.nfs_export;
+ 
++	/* No upper layer? */
++	if (!ovl_upper_mnt(ofs))
++		return 1;
++
+ 	/* Lower file handle for non-upper non-decodable */
+ 	if (!ovl_dentry_upper(dentry) && !decodable)
+ 		return 1;
+@@ -209,7 +213,7 @@ static int ovl_check_encode_origin(struct dentry *dentry)
+ 	 * ovl_connect_layer() will try to make origin's layer "connected" by
+ 	 * copying up a "connectable" ancestor.
+ 	 */
+-	if (d_is_dir(dentry) && ovl_upper_mnt(ofs) && decodable)
++	if (d_is_dir(dentry) && decodable)
+ 		return ovl_connect_layer(dentry);
+ 
+ 	/* Lower file handle for indexed and non-upper dir/non-dir */
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index 4fb21affe4e11..d8ede68260007 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -134,7 +134,7 @@ module_param(enable_oplocks, bool, 0644);
+ MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
+ 
+ module_param(enable_gcm_256, bool, 0644);
+-MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0");
++MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/0");
+ 
+ module_param(require_gcm_256, bool, 0644);
+ MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
+diff --git a/fs/udf/udftime.c b/fs/udf/udftime.c
+index 758163af39c26..78ecc633606fb 100644
+--- a/fs/udf/udftime.c
++++ b/fs/udf/udftime.c
+@@ -46,13 +46,18 @@ udf_disk_stamp_to_time(struct timespec64 *dest, struct timestamp src)
+ 	dest->tv_sec = mktime64(year, src.month, src.day, src.hour, src.minute,
+ 			src.second);
+ 	dest->tv_sec -= offset * 60;
+-	dest->tv_nsec = 1000 * (src.centiseconds * 10000 +
+-			src.hundredsOfMicroseconds * 100 + src.microseconds);
++
+ 	/*
+ 	 * Sanitize nanosecond field since reportedly some filesystems are
+ 	 * recorded with bogus sub-second values.
+ 	 */
+-	dest->tv_nsec %= NSEC_PER_SEC;
++	if (src.centiseconds < 100 && src.hundredsOfMicroseconds < 100 &&
++	    src.microseconds < 100) {
++		dest->tv_nsec = 1000 * (src.centiseconds * 10000 +
++			src.hundredsOfMicroseconds * 100 + src.microseconds);
++	} else {
++		dest->tv_nsec = 0;
++	}
+ }
+ 
+ void
+diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
+index 3d90716f95229..f5fbc15e56980 100644
+--- a/include/acpi/acpixf.h
++++ b/include/acpi/acpixf.h
+@@ -662,6 +662,10 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+ 			    acpi_execute_reg_methods(acpi_handle device,
+ 						     acpi_adr_space_type
+ 						     space_id))
++ACPI_EXTERNAL_RETURN_STATUS(acpi_status
++			    acpi_execute_orphan_reg_method(acpi_handle device,
++							   acpi_adr_space_type
++							   space_id))
+ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+ 			    acpi_remove_address_space_handler(acpi_handle
+ 							      device,
+diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
+index 7cb1b75eee381..e742db470a711 100644
+--- a/include/linux/bpf_verifier.h
++++ b/include/linux/bpf_verifier.h
+@@ -737,6 +737,8 @@ struct bpf_verifier_env {
+ 	/* Same as scratched_regs but for stack slots */
+ 	u64 scratched_stack_slots;
+ 	u64 prev_log_pos, prev_insn_print_pos;
++	/* buffer used to temporary hold constants as scalar registers */
++	struct bpf_reg_state fake_reg[2];
+ 	/* buffer used to generate temporary string representations,
+ 	 * e.g., in reg_type_str() to generate reg_type string
+ 	 */
+diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
+index 0ce6ff0d9c9aa..de4cf0ee96f79 100644
+--- a/include/linux/cpuset.h
++++ b/include/linux/cpuset.h
+@@ -70,7 +70,6 @@ extern int cpuset_init(void);
+ extern void cpuset_init_smp(void);
+ extern void cpuset_force_rebuild(void);
+ extern void cpuset_update_active_cpus(void);
+-extern void cpuset_wait_for_hotplug(void);
+ extern void inc_dl_tasks_cs(struct task_struct *task);
+ extern void dec_dl_tasks_cs(struct task_struct *task);
+ extern void cpuset_lock(void);
+@@ -185,8 +184,6 @@ static inline void cpuset_update_active_cpus(void)
+ 	partition_sched_domains(1, NULL, NULL);
+ }
+ 
+-static inline void cpuset_wait_for_hotplug(void) { }
+-
+ static inline void inc_dl_tasks_cs(struct task_struct *task) { }
+ static inline void dec_dl_tasks_cs(struct task_struct *task) { }
+ static inline void cpuset_lock(void) { }
+diff --git a/include/linux/kcov.h b/include/linux/kcov.h
+index 1068a7318d897..75a2fb8b16c32 100644
+--- a/include/linux/kcov.h
++++ b/include/linux/kcov.h
+@@ -21,6 +21,8 @@ enum kcov_mode {
+ 	KCOV_MODE_TRACE_PC = 2,
+ 	/* Collecting comparison operands mode. */
+ 	KCOV_MODE_TRACE_CMP = 3,
++	/* The process owns a KCOV remote reference. */
++	KCOV_MODE_REMOTE = 4,
+ };
+ 
+ #define KCOV_IN_CTXSW	(1 << 30)
+diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
+index 0ff44d6633e33..5fcbc254d1864 100644
+--- a/include/linux/kprobes.h
++++ b/include/linux/kprobes.h
+@@ -378,11 +378,15 @@ static inline void wait_for_kprobe_optimizer(void) { }
+ extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ 				  struct ftrace_ops *ops, struct ftrace_regs *fregs);
+ extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
++/* Set when ftrace has been killed: kprobes on ftrace must be disabled for safety */
++extern bool kprobe_ftrace_disabled __read_mostly;
++extern void kprobe_ftrace_kill(void);
+ #else
+ static inline int arch_prepare_kprobe_ftrace(struct kprobe *p)
+ {
+ 	return -EINVAL;
+ }
++static inline void kprobe_ftrace_kill(void) {}
+ #endif /* CONFIG_KPROBES_ON_FTRACE */
+ 
+ /* Get the kprobe at this addr (if any) - called with preemption disabled */
+@@ -495,6 +499,9 @@ static inline void kprobe_flush_task(struct task_struct *tk)
+ static inline void kprobe_free_init_mem(void)
+ {
+ }
++static inline void kprobe_ftrace_kill(void)
++{
++}
+ static inline int disable_kprobe(struct kprobe *kp)
+ {
+ 	return -EOPNOTSUPP;
+diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
+index 334e00efbde45..7e539f6f8c674 100644
+--- a/include/linux/lsm_hook_defs.h
++++ b/include/linux/lsm_hook_defs.h
+@@ -412,7 +412,7 @@ LSM_HOOK(void, LSM_RET_VOID, key_post_create_or_update, struct key *keyring,
+ 
+ #ifdef CONFIG_AUDIT
+ LSM_HOOK(int, 0, audit_rule_init, u32 field, u32 op, char *rulestr,
+-	 void **lsmrule)
++	 void **lsmrule, gfp_t gfp)
+ LSM_HOOK(int, 0, audit_rule_known, struct audit_krule *krule)
+ LSM_HOOK(int, 0, audit_rule_match, u32 secid, u32 field, u32 op, void *lsmrule)
+ LSM_HOOK(void, LSM_RET_VOID, audit_rule_free, void *lsmrule)
+diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
+index 7a9a07ea451bc..4338b1b4ac444 100644
+--- a/include/linux/mod_devicetable.h
++++ b/include/linux/mod_devicetable.h
+@@ -690,6 +690,8 @@ struct x86_cpu_id {
+ 	__u16 model;
+ 	__u16 steppings;
+ 	__u16 feature;	/* bit index */
++	/* Solely for kernel-internal use: DO NOT EXPORT to userspace! */
++	__u16 flags;
+ 	kernel_ulong_t driver_data;
+ };
+ 
+diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
+index 5283b6193e7dc..d31e59e2e411a 100644
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -379,6 +379,10 @@ static inline void mapping_set_large_folios(struct address_space *mapping)
+  */
+ static inline bool mapping_large_folio_support(struct address_space *mapping)
+ {
++	/* AS_LARGE_FOLIO_SUPPORT is only reasonable for pagecache folios */
++	VM_WARN_ONCE((unsigned long)mapping & PAGE_MAPPING_ANON,
++			"Anonymous mapping always supports large folio");
++
+ 	return IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
+ 		test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
+ }
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 16493426a04ff..6f9c5ed5eb3ba 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -2519,7 +2519,12 @@ static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
+ 
+ static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
+ {
+-	return dev->error_state == pci_channel_io_perm_failure;
++	/*
++	 * error_state is set in pci_dev_set_io_state() using xchg/cmpxchg()
++	 * and read w/o common lock. READ_ONCE() ensures compiler cannot cache
++	 * the value (e.g. inside the loop in pci_dev_wait()).
++	 */
++	return READ_ONCE(dev->error_state) == pci_channel_io_perm_failure;
+ }
+ 
+ void pci_request_acs(void);
+diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
+index 772d3280d35fa..f24546a3d3db3 100644
+--- a/include/linux/pm_domain.h
++++ b/include/linux/pm_domain.h
+@@ -260,6 +260,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+ int pm_genpd_init(struct generic_pm_domain *genpd,
+ 		  struct dev_power_governor *gov, bool is_off);
+ int pm_genpd_remove(struct generic_pm_domain *genpd);
++struct device *dev_to_genpd_dev(struct device *dev);
+ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state);
+ int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb);
+ int dev_pm_genpd_remove_notifier(struct device *dev);
+@@ -307,6 +308,11 @@ static inline int pm_genpd_remove(struct generic_pm_domain *genpd)
+ 	return -EOPNOTSUPP;
+ }
+ 
++static inline struct device *dev_to_genpd_dev(struct device *dev)
++{
++	return ERR_PTR(-EOPNOTSUPP);
++}
++
+ static inline int dev_pm_genpd_set_performance_state(struct device *dev,
+ 						     unsigned int state)
+ {
+diff --git a/include/linux/security.h b/include/linux/security.h
+index 41a8f667bdfa0..5122e3ad83b19 100644
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -2048,7 +2048,8 @@ static inline void security_key_post_create_or_update(struct key *keyring,
+ 
+ #ifdef CONFIG_AUDIT
+ #ifdef CONFIG_SECURITY
+-int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule);
++int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule,
++			     gfp_t gfp);
+ int security_audit_rule_known(struct audit_krule *krule);
+ int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule);
+ void security_audit_rule_free(void *lsmrule);
+@@ -2056,7 +2057,7 @@ void security_audit_rule_free(void *lsmrule);
+ #else
+ 
+ static inline int security_audit_rule_init(u32 field, u32 op, char *rulestr,
+-					   void **lsmrule)
++					   void **lsmrule, gfp_t gfp)
+ {
+ 	return 0;
+ }
+diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
+index 7372124fbf90b..dd4b31ce6d5d4 100644
+--- a/include/linux/tty_driver.h
++++ b/include/linux/tty_driver.h
+@@ -154,6 +154,13 @@ struct serial_struct;
+  *
+  *	Optional. Called under the @tty->termios_rwsem. May sleep.
+  *
++ * @ldisc_ok: ``int ()(struct tty_struct *tty, int ldisc)``
++ *
++ *	This routine allows the @tty driver to decide if it can deal
++ *	with a particular @ldisc.
++ *
++ *	Optional. Called under the @tty->ldisc_sem and @tty->termios_rwsem.
++ *
+  * @set_ldisc: ``void ()(struct tty_struct *tty)``
+  *
+  *	This routine allows the @tty driver to be notified when the device's
+@@ -372,6 +379,7 @@ struct tty_operations {
+ 	void (*hangup)(struct tty_struct *tty);
+ 	int (*break_ctl)(struct tty_struct *tty, int state);
+ 	void (*flush_buffer)(struct tty_struct *tty);
++	int (*ldisc_ok)(struct tty_struct *tty, int ldisc);
+ 	void (*set_ldisc)(struct tty_struct *tty);
+ 	void (*wait_until_sent)(struct tty_struct *tty, int timeout);
+ 	void (*send_xchar)(struct tty_struct *tty, u8 ch);
+diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
+index 02bbdc577f8e2..a6a0bf4a247e5 100644
+--- a/include/net/netns/netfilter.h
++++ b/include/net/netns/netfilter.h
+@@ -15,6 +15,9 @@ struct netns_nf {
+ 	const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO];
+ #ifdef CONFIG_SYSCTL
+ 	struct ctl_table_header *nf_log_dir_header;
++#ifdef CONFIG_LWTUNNEL
++	struct ctl_table_header *nf_lwtnl_dir_header;
++#endif
+ #endif
+ 	struct nf_hook_entries __rcu *hooks_ipv4[NF_INET_NUMHOOKS];
+ 	struct nf_hook_entries __rcu *hooks_ipv6[NF_INET_NUMHOOKS];
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 41ca14e81d55f..0014b9ee5e381 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -128,6 +128,7 @@ struct Qdisc {
+ 
+ 	struct rcu_head		rcu;
+ 	netdevice_tracker	dev_tracker;
++	struct lock_class_key	root_lock_key;
+ 	/* private data */
+ 	long privdata[] ____cacheline_aligned;
+ };
+diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
+index 6b548dc2c4965..1d79a3b536cee 100644
+--- a/include/scsi/scsi_devinfo.h
++++ b/include/scsi/scsi_devinfo.h
+@@ -69,8 +69,10 @@
+ #define BLIST_RETRY_ITF		((__force blist_flags_t)(1ULL << 32))
+ /* Always retry ABORTED_COMMAND with ASC 0xc1 */
+ #define BLIST_RETRY_ASC_C1	((__force blist_flags_t)(1ULL << 33))
++/* Do not query the IO Advice Hints Grouping mode page */
++#define BLIST_SKIP_IO_HINTS	((__force blist_flags_t)(1ULL << 34))
+ 
+-#define __BLIST_LAST_USED BLIST_RETRY_ASC_C1
++#define __BLIST_LAST_USED BLIST_SKIP_IO_HINTS
+ 
+ #define __BLIST_HIGH_UNUSED (~(__BLIST_LAST_USED | \
+ 			       (__force blist_flags_t) \
+diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
+index 87f9aa7cf9255..956e2c715eb48 100644
+--- a/io_uring/rsrc.c
++++ b/io_uring/rsrc.c
+@@ -1105,7 +1105,6 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
+ 			 * branch doesn't expect non PAGE_SIZE'd chunks.
+ 			 */
+ 			iter->bvec = bvec;
+-			iter->nr_segs = bvec->bv_len;
+ 			iter->count -= offset;
+ 			iter->iov_offset = offset;
+ 		} else {
+diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
+index 158ab09c605ba..b3722e5275e77 100644
+--- a/io_uring/sqpoll.c
++++ b/io_uring/sqpoll.c
+@@ -293,6 +293,14 @@ static int io_sq_thread(void *data)
+ 		sqd->sq_cpu = raw_smp_processor_id();
+ 	}
+ 
++	/*
++	 * Force audit context to get setup, in case we do prep side async
++	 * operations that would trigger an audit call before any issue side
++	 * audit has been done.
++	 */
++	audit_uring_entry(IORING_OP_NOP);
++	audit_uring_exit(true, 0);
++
+ 	mutex_lock(&sqd->lock);
+ 	while (1) {
+ 		bool cap_entries, sqt_spin = false;
+diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
+index be8c680121e46..d6ef4f4f9cba5 100644
+--- a/kernel/auditfilter.c
++++ b/kernel/auditfilter.c
+@@ -529,7 +529,8 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
+ 			entry->rule.buflen += f_val;
+ 			f->lsm_str = str;
+ 			err = security_audit_rule_init(f->type, f->op, str,
+-						       (void **)&f->lsm_rule);
++						       (void **)&f->lsm_rule,
++						       GFP_KERNEL);
+ 			/* Keep currently invalid fields around in case they
+ 			 * become valid after a policy reload. */
+ 			if (err == -EINVAL) {
+@@ -799,7 +800,7 @@ static inline int audit_dupe_lsm_field(struct audit_field *df,
+ 
+ 	/* our own (refreshed) copy of lsm_rule */
+ 	ret = security_audit_rule_init(df->type, df->op, df->lsm_str,
+-				       (void **)&df->lsm_rule);
++				       (void **)&df->lsm_rule, GFP_KERNEL);
+ 	/* Keep currently invalid fields around in case they
+ 	 * become valid after a policy reload. */
+ 	if (ret == -EINVAL) {
+diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
+index 050fe1ebf0f7d..d0febf07051ed 100644
+--- a/kernel/bpf/lpm_trie.c
++++ b/kernel/bpf/lpm_trie.c
+@@ -308,6 +308,7 @@ static long trie_update_elem(struct bpf_map *map,
+ {
+ 	struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
+ 	struct lpm_trie_node *node, *im_node = NULL, *new_node = NULL;
++	struct lpm_trie_node *free_node = NULL;
+ 	struct lpm_trie_node __rcu **slot;
+ 	struct bpf_lpm_trie_key_u8 *key = _key;
+ 	unsigned long irq_flags;
+@@ -382,7 +383,7 @@ static long trie_update_elem(struct bpf_map *map,
+ 			trie->n_entries--;
+ 
+ 		rcu_assign_pointer(*slot, new_node);
+-		kfree_rcu(node, rcu);
++		free_node = node;
+ 
+ 		goto out;
+ 	}
+@@ -429,6 +430,7 @@ static long trie_update_elem(struct bpf_map *map,
+ 	}
+ 
+ 	spin_unlock_irqrestore(&trie->lock, irq_flags);
++	kfree_rcu(free_node, rcu);
+ 
+ 	return ret;
+ }
+@@ -437,6 +439,7 @@ static long trie_update_elem(struct bpf_map *map,
+ static long trie_delete_elem(struct bpf_map *map, void *_key)
+ {
+ 	struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
++	struct lpm_trie_node *free_node = NULL, *free_parent = NULL;
+ 	struct bpf_lpm_trie_key_u8 *key = _key;
+ 	struct lpm_trie_node __rcu **trim, **trim2;
+ 	struct lpm_trie_node *node, *parent;
+@@ -506,8 +509,8 @@ static long trie_delete_elem(struct bpf_map *map, void *_key)
+ 		else
+ 			rcu_assign_pointer(
+ 				*trim2, rcu_access_pointer(parent->child[0]));
+-		kfree_rcu(parent, rcu);
+-		kfree_rcu(node, rcu);
++		free_parent = parent;
++		free_node = node;
+ 		goto out;
+ 	}
+ 
+@@ -521,10 +524,12 @@ static long trie_delete_elem(struct bpf_map *map, void *_key)
+ 		rcu_assign_pointer(*trim, rcu_access_pointer(node->child[1]));
+ 	else
+ 		RCU_INIT_POINTER(*trim, NULL);
+-	kfree_rcu(node, rcu);
++	free_node = node;
+ 
+ out:
+ 	spin_unlock_irqrestore(&trie->lock, irq_flags);
++	kfree_rcu(free_parent, rcu);
++	kfree_rcu(free_node, rcu);
+ 
+ 	return ret;
+ }
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 8a29309db4245..0ef18ae40bc5a 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -14973,7 +14973,6 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ 	struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
+ 	struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
+ 	struct bpf_reg_state *eq_branch_regs;
+-	struct bpf_reg_state fake_reg = {};
+ 	u8 opcode = BPF_OP(insn->code);
+ 	bool is_jmp32;
+ 	int pred = -1;
+@@ -15039,7 +15038,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ 			verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
+ 			return -EINVAL;
+ 		}
+-		src_reg = &fake_reg;
++		src_reg = &env->fake_reg[0];
++		memset(src_reg, 0, sizeof(*src_reg));
+ 		src_reg->type = SCALAR_VALUE;
+ 		__mark_reg_known(src_reg, insn->imm);
+ 	}
+@@ -15099,10 +15099,16 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ 				      &other_branch_regs[insn->src_reg],
+ 				      dst_reg, src_reg, opcode, is_jmp32);
+ 	} else /* BPF_SRC(insn->code) == BPF_K */ {
++		/* reg_set_min_max() can mangle the fake_reg. Make a copy
++		 * so that these are two different memory locations. The
++		 * src_reg is not used beyond here in context of K.
++		 */
++		memcpy(&env->fake_reg[1], &env->fake_reg[0],
++		       sizeof(env->fake_reg[0]));
+ 		err = reg_set_min_max(env,
+ 				      &other_branch_regs[insn->dst_reg],
+-				      src_reg /* fake one */,
+-				      dst_reg, src_reg /* same fake one */,
++				      &env->fake_reg[0],
++				      dst_reg, &env->fake_reg[1],
+ 				      opcode, is_jmp32);
+ 	}
+ 	if (err)
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index da24187c4e025..73ef0dabc3f22 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -201,6 +201,14 @@ struct cpuset {
+ 	struct list_head remote_sibling;
+ };
+ 
++/*
++ * Legacy hierarchy call to cgroup_transfer_tasks() is handled asynchrously
++ */
++struct cpuset_remove_tasks_struct {
++	struct work_struct work;
++	struct cpuset *cs;
++};
++
+ /*
+  * Exclusive CPUs distributed out to sub-partitions of top_cpuset
+  */
+@@ -449,12 +457,6 @@ static DEFINE_SPINLOCK(callback_lock);
+ 
+ static struct workqueue_struct *cpuset_migrate_mm_wq;
+ 
+-/*
+- * CPU / memory hotplug is handled asynchronously.
+- */
+-static void cpuset_hotplug_workfn(struct work_struct *work);
+-static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
+-
+ static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
+ 
+ static inline void check_insane_mems_config(nodemask_t *nodes)
+@@ -540,22 +542,10 @@ static void guarantee_online_cpus(struct task_struct *tsk,
+ 	rcu_read_lock();
+ 	cs = task_cs(tsk);
+ 
+-	while (!cpumask_intersects(cs->effective_cpus, pmask)) {
++	while (!cpumask_intersects(cs->effective_cpus, pmask))
+ 		cs = parent_cs(cs);
+-		if (unlikely(!cs)) {
+-			/*
+-			 * The top cpuset doesn't have any online cpu as a
+-			 * consequence of a race between cpuset_hotplug_work
+-			 * and cpu hotplug notifier.  But we know the top
+-			 * cpuset's effective_cpus is on its way to be
+-			 * identical to cpu_online_mask.
+-			 */
+-			goto out_unlock;
+-		}
+-	}
+-	cpumask_and(pmask, pmask, cs->effective_cpus);
+ 
+-out_unlock:
++	cpumask_and(pmask, pmask, cs->effective_cpus);
+ 	rcu_read_unlock();
+ }
+ 
+@@ -1217,7 +1207,7 @@ static void rebuild_sched_domains_locked(void)
+ 	/*
+ 	 * If we have raced with CPU hotplug, return early to avoid
+ 	 * passing doms with offlined cpu to partition_sched_domains().
+-	 * Anyways, cpuset_hotplug_workfn() will rebuild sched domains.
++	 * Anyways, cpuset_handle_hotplug() will rebuild sched domains.
+ 	 *
+ 	 * With no CPUs in any subpartitions, top_cpuset's effective CPUs
+ 	 * should be the same as the active CPUs, so checking only top_cpuset
+@@ -1260,12 +1250,17 @@ static void rebuild_sched_domains_locked(void)
+ }
+ #endif /* CONFIG_SMP */
+ 
+-void rebuild_sched_domains(void)
++static void rebuild_sched_domains_cpuslocked(void)
+ {
+-	cpus_read_lock();
+ 	mutex_lock(&cpuset_mutex);
+ 	rebuild_sched_domains_locked();
+ 	mutex_unlock(&cpuset_mutex);
++}
++
++void rebuild_sched_domains(void)
++{
++	cpus_read_lock();
++	rebuild_sched_domains_cpuslocked();
+ 	cpus_read_unlock();
+ }
+ 
+@@ -2079,14 +2074,11 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
+ 
+ 	/*
+ 	 * For partcmd_update without newmask, it is being called from
+-	 * cpuset_hotplug_workfn() where cpus_read_lock() wasn't taken.
+-	 * Update the load balance flag and scheduling domain if
+-	 * cpus_read_trylock() is successful.
++	 * cpuset_handle_hotplug(). Update the load balance flag and
++	 * scheduling domain accordingly.
+ 	 */
+-	if ((cmd == partcmd_update) && !newmask && cpus_read_trylock()) {
++	if ((cmd == partcmd_update) && !newmask)
+ 		update_partition_sd_lb(cs, old_prs);
+-		cpus_read_unlock();
+-	}
+ 
+ 	notify_partition_change(cs, old_prs);
+ 	return 0;
+@@ -3599,8 +3591,8 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
+ 	 * proceeding, so that we don't end up keep removing tasks added
+ 	 * after execution capability is restored.
+ 	 *
+-	 * cpuset_hotplug_work calls back into cgroup core via
+-	 * cgroup_transfer_tasks() and waiting for it from a cgroupfs
++	 * cpuset_handle_hotplug may call back into cgroup core asynchronously
++	 * via cgroup_transfer_tasks() and waiting for it from a cgroupfs
+ 	 * operation like this one can lead to a deadlock through kernfs
+ 	 * active_ref protection.  Let's break the protection.  Losing the
+ 	 * protection is okay as we check whether @cs is online after
+@@ -3609,7 +3601,6 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
+ 	 */
+ 	css_get(&cs->css);
+ 	kernfs_break_active_protection(of->kn);
+-	flush_work(&cpuset_hotplug_work);
+ 
+ 	cpus_read_lock();
+ 	mutex_lock(&cpuset_mutex);
+@@ -4354,6 +4345,16 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
+ 	}
+ }
+ 
++static void cpuset_migrate_tasks_workfn(struct work_struct *work)
++{
++	struct cpuset_remove_tasks_struct *s;
++
++	s = container_of(work, struct cpuset_remove_tasks_struct, work);
++	remove_tasks_in_empty_cpuset(s->cs);
++	css_put(&s->cs->css);
++	kfree(s);
++}
++
+ static void
+ hotplug_update_tasks_legacy(struct cpuset *cs,
+ 			    struct cpumask *new_cpus, nodemask_t *new_mems,
+@@ -4383,12 +4384,21 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
+ 	/*
+ 	 * Move tasks to the nearest ancestor with execution resources,
+ 	 * This is full cgroup operation which will also call back into
+-	 * cpuset. Should be done outside any lock.
++	 * cpuset. Execute it asynchronously using workqueue.
+ 	 */
+-	if (is_empty) {
+-		mutex_unlock(&cpuset_mutex);
+-		remove_tasks_in_empty_cpuset(cs);
+-		mutex_lock(&cpuset_mutex);
++	if (is_empty && cs->css.cgroup->nr_populated_csets &&
++	    css_tryget_online(&cs->css)) {
++		struct cpuset_remove_tasks_struct *s;
++
++		s = kzalloc(sizeof(*s), GFP_KERNEL);
++		if (WARN_ON_ONCE(!s)) {
++			css_put(&cs->css);
++			return;
++		}
++
++		s->cs = cs;
++		INIT_WORK(&s->work, cpuset_migrate_tasks_workfn);
++		schedule_work(&s->work);
+ 	}
+ }
+ 
+@@ -4421,30 +4431,6 @@ void cpuset_force_rebuild(void)
+ 	force_rebuild = true;
+ }
+ 
+-/*
+- * Attempt to acquire a cpus_read_lock while a hotplug operation may be in
+- * progress.
+- * Return: true if successful, false otherwise
+- *
+- * To avoid circular lock dependency between cpuset_mutex and cpus_read_lock,
+- * cpus_read_trylock() is used here to acquire the lock.
+- */
+-static bool cpuset_hotplug_cpus_read_trylock(void)
+-{
+-	int retries = 0;
+-
+-	while (!cpus_read_trylock()) {
+-		/*
+-		 * CPU hotplug still in progress. Retry 5 times
+-		 * with a 10ms wait before bailing out.
+-		 */
+-		if (++retries > 5)
+-			return false;
+-		msleep(10);
+-	}
+-	return true;
+-}
+-
+ /**
+  * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
+  * @cs: cpuset in interest
+@@ -4493,13 +4479,11 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
+ 		compute_partition_effective_cpumask(cs, &new_cpus);
+ 
+ 	if (remote && cpumask_empty(&new_cpus) &&
+-	    partition_is_populated(cs, NULL) &&
+-	    cpuset_hotplug_cpus_read_trylock()) {
++	    partition_is_populated(cs, NULL)) {
+ 		remote_partition_disable(cs, tmp);
+ 		compute_effective_cpumask(&new_cpus, cs, parent);
+ 		remote = false;
+ 		cpuset_force_rebuild();
+-		cpus_read_unlock();
+ 	}
+ 
+ 	/*
+@@ -4519,18 +4503,8 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
+ 	else if (is_partition_valid(parent) && is_partition_invalid(cs))
+ 		partcmd = partcmd_update;
+ 
+-	/*
+-	 * cpus_read_lock needs to be held before calling
+-	 * update_parent_effective_cpumask(). To avoid circular lock
+-	 * dependency between cpuset_mutex and cpus_read_lock,
+-	 * cpus_read_trylock() is used here to acquire the lock.
+-	 */
+ 	if (partcmd >= 0) {
+-		if (!cpuset_hotplug_cpus_read_trylock())
+-			goto update_tasks;
+-
+ 		update_parent_effective_cpumask(cs, partcmd, NULL, tmp);
+-		cpus_read_unlock();
+ 		if ((partcmd == partcmd_invalidate) || is_partition_valid(cs)) {
+ 			compute_partition_effective_cpumask(cs, &new_cpus);
+ 			cpuset_force_rebuild();
+@@ -4558,8 +4532,7 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
+ }
+ 
+ /**
+- * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
+- * @work: unused
++ * cpuset_handle_hotplug - handle CPU/memory hot{,un}plug for a cpuset
+  *
+  * This function is called after either CPU or memory configuration has
+  * changed and updates cpuset accordingly.  The top_cpuset is always
+@@ -4573,8 +4546,10 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
+  *
+  * Note that CPU offlining during suspend is ignored.  We don't modify
+  * cpusets across suspend/resume cycles at all.
++ *
++ * CPU / memory hotplug is handled synchronously.
+  */
+-static void cpuset_hotplug_workfn(struct work_struct *work)
++static void cpuset_handle_hotplug(void)
+ {
+ 	static cpumask_t new_cpus;
+ 	static nodemask_t new_mems;
+@@ -4585,6 +4560,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
+ 	if (on_dfl && !alloc_cpumasks(NULL, &tmp))
+ 		ptmp = &tmp;
+ 
++	lockdep_assert_cpus_held();
+ 	mutex_lock(&cpuset_mutex);
+ 
+ 	/* fetch the available cpus/mems and find out which changed how */
+@@ -4666,7 +4642,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
+ 	/* rebuild sched domains if cpus_allowed has changed */
+ 	if (cpus_updated || force_rebuild) {
+ 		force_rebuild = false;
+-		rebuild_sched_domains();
++		rebuild_sched_domains_cpuslocked();
+ 	}
+ 
+ 	free_cpumasks(NULL, ptmp);
+@@ -4679,12 +4655,7 @@ void cpuset_update_active_cpus(void)
+ 	 * inside cgroup synchronization.  Bounce actual hotplug processing
+ 	 * to a work item to avoid reverse locking order.
+ 	 */
+-	schedule_work(&cpuset_hotplug_work);
+-}
+-
+-void cpuset_wait_for_hotplug(void)
+-{
+-	flush_work(&cpuset_hotplug_work);
++	cpuset_handle_hotplug();
+ }
+ 
+ /*
+@@ -4695,7 +4666,7 @@ void cpuset_wait_for_hotplug(void)
+ static int cpuset_track_online_nodes(struct notifier_block *self,
+ 				unsigned long action, void *arg)
+ {
+-	schedule_work(&cpuset_hotplug_work);
++	cpuset_handle_hotplug();
+ 	return NOTIFY_OK;
+ }
+ 
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 63447eb85dab6..563877d6c28b6 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -1208,52 +1208,6 @@ void __init cpuhp_threads_init(void)
+ 	kthread_unpark(this_cpu_read(cpuhp_state.thread));
+ }
+ 
+-/*
+- *
+- * Serialize hotplug trainwrecks outside of the cpu_hotplug_lock
+- * protected region.
+- *
+- * The operation is still serialized against concurrent CPU hotplug via
+- * cpu_add_remove_lock, i.e. CPU map protection.  But it is _not_
+- * serialized against other hotplug related activity like adding or
+- * removing of state callbacks and state instances, which invoke either the
+- * startup or the teardown callback of the affected state.
+- *
+- * This is required for subsystems which are unfixable vs. CPU hotplug and
+- * evade lock inversion problems by scheduling work which has to be
+- * completed _before_ cpu_up()/_cpu_down() returns.
+- *
+- * Don't even think about adding anything to this for any new code or even
+- * drivers. It's only purpose is to keep existing lock order trainwrecks
+- * working.
+- *
+- * For cpu_down() there might be valid reasons to finish cleanups which are
+- * not required to be done under cpu_hotplug_lock, but that's a different
+- * story and would be not invoked via this.
+- */
+-static void cpu_up_down_serialize_trainwrecks(bool tasks_frozen)
+-{
+-	/*
+-	 * cpusets delegate hotplug operations to a worker to "solve" the
+-	 * lock order problems. Wait for the worker, but only if tasks are
+-	 * _not_ frozen (suspend, hibernate) as that would wait forever.
+-	 *
+-	 * The wait is required because otherwise the hotplug operation
+-	 * returns with inconsistent state, which could even be observed in
+-	 * user space when a new CPU is brought up. The CPU plug uevent
+-	 * would be delivered and user space reacting on it would fail to
+-	 * move tasks to the newly plugged CPU up to the point where the
+-	 * work has finished because up to that point the newly plugged CPU
+-	 * is not assignable in cpusets/cgroups. On unplug that's not
+-	 * necessarily a visible issue, but it is still inconsistent state,
+-	 * which is the real problem which needs to be "fixed". This can't
+-	 * prevent the transient state between scheduling the work and
+-	 * returning from waiting for it.
+-	 */
+-	if (!tasks_frozen)
+-		cpuset_wait_for_hotplug();
+-}
+-
+ #ifdef CONFIG_HOTPLUG_CPU
+ #ifndef arch_clear_mm_cpumask_cpu
+ #define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
+@@ -1494,7 +1448,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
+ 	 */
+ 	lockup_detector_cleanup();
+ 	arch_smt_update();
+-	cpu_up_down_serialize_trainwrecks(tasks_frozen);
+ 	return ret;
+ }
+ 
+@@ -1728,7 +1681,6 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
+ out:
+ 	cpus_write_unlock();
+ 	arch_smt_update();
+-	cpu_up_down_serialize_trainwrecks(tasks_frozen);
+ 	return ret;
+ }
+ 
+diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c
+index 74a4ef1da9ad7..fd75b4a484d76 100644
+--- a/kernel/gcov/gcc_4_7.c
++++ b/kernel/gcov/gcc_4_7.c
+@@ -18,7 +18,9 @@
+ #include <linux/mm.h>
+ #include "gcov.h"
+ 
+-#if (__GNUC__ >= 10)
++#if (__GNUC__ >= 14)
++#define GCOV_COUNTERS			9
++#elif (__GNUC__ >= 10)
+ #define GCOV_COUNTERS			8
+ #elif (__GNUC__ >= 7)
+ #define GCOV_COUNTERS			9
+diff --git a/kernel/kcov.c b/kernel/kcov.c
+index f9ac2e9e460fc..9f4affae4fad4 100644
+--- a/kernel/kcov.c
++++ b/kernel/kcov.c
+@@ -631,6 +631,7 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
+ 			return -EINVAL;
+ 		kcov->mode = mode;
+ 		t->kcov = kcov;
++	        t->kcov_mode = KCOV_MODE_REMOTE;
+ 		kcov->t = t;
+ 		kcov->remote = true;
+ 		kcov->remote_size = remote_arg->area_size;
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 65adc815fc6e6..4f917bdad1e11 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -1068,6 +1068,7 @@ static struct ftrace_ops kprobe_ipmodify_ops __read_mostly = {
+ 
+ static int kprobe_ipmodify_enabled;
+ static int kprobe_ftrace_enabled;
++bool kprobe_ftrace_disabled;
+ 
+ static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
+ 			       int *cnt)
+@@ -1136,6 +1137,11 @@ static int disarm_kprobe_ftrace(struct kprobe *p)
+ 		ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
+ 		ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
+ }
++
++void kprobe_ftrace_kill(void)
++{
++	kprobe_ftrace_disabled = true;
++}
+ #else	/* !CONFIG_KPROBES_ON_FTRACE */
+ static inline int arm_kprobe_ftrace(struct kprobe *p)
+ {
+diff --git a/kernel/padata.c b/kernel/padata.c
+index e3f639ff16707..53f4bc9127127 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -106,7 +106,7 @@ static int __init padata_work_alloc_mt(int nworks, void *data,
+ {
+ 	int i;
+ 
+-	spin_lock(&padata_works_lock);
++	spin_lock_bh(&padata_works_lock);
+ 	/* Start at 1 because the current task participates in the job. */
+ 	for (i = 1; i < nworks; ++i) {
+ 		struct padata_work *pw = padata_work_alloc();
+@@ -116,7 +116,7 @@ static int __init padata_work_alloc_mt(int nworks, void *data,
+ 		padata_work_init(pw, padata_mt_helper, data, 0);
+ 		list_add(&pw->pw_list, head);
+ 	}
+-	spin_unlock(&padata_works_lock);
++	spin_unlock_bh(&padata_works_lock);
+ 
+ 	return i;
+ }
+@@ -134,12 +134,12 @@ static void __init padata_works_free(struct list_head *works)
+ 	if (list_empty(works))
+ 		return;
+ 
+-	spin_lock(&padata_works_lock);
++	spin_lock_bh(&padata_works_lock);
+ 	list_for_each_entry_safe(cur, next, works, pw_list) {
+ 		list_del(&cur->pw_list);
+ 		padata_work_free(cur);
+ 	}
+-	spin_unlock(&padata_works_lock);
++	spin_unlock_bh(&padata_works_lock);
+ }
+ 
+ static void padata_parallel_worker(struct work_struct *parallel_work)
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index cae81a87cc91e..66ac067d9ae64 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -194,8 +194,6 @@ void thaw_processes(void)
+ 	__usermodehelper_set_disable_depth(UMH_FREEZING);
+ 	thaw_workqueues();
+ 
+-	cpuset_wait_for_hotplug();
+-
+ 	read_lock(&tasklist_lock);
+ 	for_each_process_thread(g, p) {
+ 		/* No other threads should have PF_SUSPEND_TASK set */
+diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
+index 45d6b4c3d199c..cf2e907534a8d 100644
+--- a/kernel/rcu/rcutorture.c
++++ b/kernel/rcu/rcutorture.c
+@@ -1997,7 +1997,8 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
+ 	preempt_disable();
+ 	pipe_count = READ_ONCE(p->rtort_pipe_count);
+ 	if (pipe_count > RCU_TORTURE_PIPE_LEN) {
+-		/* Should not happen, but... */
++		// Should not happen in a correct RCU implementation,
++		// happens quite often for torture_type=busted.
+ 		pipe_count = RCU_TORTURE_PIPE_LEN;
+ 	}
+ 	completed = cur_ops->get_gp_seq();
+@@ -2486,8 +2487,8 @@ static int rcu_torture_stall(void *args)
+ 			preempt_disable();
+ 		pr_alert("%s start on CPU %d.\n",
+ 			  __func__, raw_smp_processor_id());
+-		while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
+-				    stop_at))
++		while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), stop_at) &&
++		       !kthread_should_stop())
+ 			if (stall_cpu_block) {
+ #ifdef CONFIG_PREEMPTION
+ 				preempt_schedule();
+@@ -3040,11 +3041,12 @@ static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
+ }
+ 
+ /* IPI handler to get callback posted on desired CPU, if online. */
+-static void rcu_torture_barrier1cb(void *rcu_void)
++static int rcu_torture_barrier1cb(void *rcu_void)
+ {
+ 	struct rcu_head *rhp = rcu_void;
+ 
+ 	cur_ops->call(rhp, rcu_torture_barrier_cbf);
++	return 0;
+ }
+ 
+ /* kthread function to register callbacks used to test RCU barriers. */
+@@ -3070,11 +3072,9 @@ static int rcu_torture_barrier_cbs(void *arg)
+ 		 * The above smp_load_acquire() ensures barrier_phase load
+ 		 * is ordered before the following ->call().
+ 		 */
+-		if (smp_call_function_single(myid, rcu_torture_barrier1cb,
+-					     &rcu, 1)) {
+-			// IPI failed, so use direct call from current CPU.
++		if (smp_call_on_cpu(myid, rcu_torture_barrier1cb, &rcu, 1))
+ 			cur_ops->call(&rcu, rcu_torture_barrier_cbf);
+-		}
++
+ 		if (atomic_dec_and_test(&barrier_cbs_count))
+ 			wake_up(&barrier_wq);
+ 	} while (!torture_must_stop());
+diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
+index e5b260aa0e02c..4d50d53ac719f 100644
+--- a/kernel/time/clocksource.c
++++ b/kernel/time/clocksource.c
+@@ -20,6 +20,16 @@
+ #include "tick-internal.h"
+ #include "timekeeping_internal.h"
+ 
++static noinline u64 cycles_to_nsec_safe(struct clocksource *cs, u64 start, u64 end)
++{
++	u64 delta = clocksource_delta(end, start, cs->mask);
++
++	if (likely(delta < cs->max_cycles))
++		return clocksource_cyc2ns(delta, cs->mult, cs->shift);
++
++	return mul_u64_u32_shr(delta, cs->mult, cs->shift);
++}
++
+ /**
+  * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
+  * @mult:	pointer to mult variable
+@@ -222,8 +232,8 @@ enum wd_read_status {
+ static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow)
+ {
+ 	unsigned int nretries, max_retries;
+-	u64 wd_end, wd_end2, wd_delta;
+ 	int64_t wd_delay, wd_seq_delay;
++	u64 wd_end, wd_end2;
+ 
+ 	max_retries = clocksource_get_max_watchdog_retry();
+ 	for (nretries = 0; nretries <= max_retries; nretries++) {
+@@ -234,9 +244,7 @@ static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow,
+ 		wd_end2 = watchdog->read(watchdog);
+ 		local_irq_enable();
+ 
+-		wd_delta = clocksource_delta(wd_end, *wdnow, watchdog->mask);
+-		wd_delay = clocksource_cyc2ns(wd_delta, watchdog->mult,
+-					      watchdog->shift);
++		wd_delay = cycles_to_nsec_safe(watchdog, *wdnow, wd_end);
+ 		if (wd_delay <= WATCHDOG_MAX_SKEW) {
+ 			if (nretries > 1 || nretries >= max_retries) {
+ 				pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n",
+@@ -254,8 +262,7 @@ static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow,
+ 		 * report system busy, reinit the watchdog and skip the current
+ 		 * watchdog test.
+ 		 */
+-		wd_delta = clocksource_delta(wd_end2, wd_end, watchdog->mask);
+-		wd_seq_delay = clocksource_cyc2ns(wd_delta, watchdog->mult, watchdog->shift);
++		wd_seq_delay = cycles_to_nsec_safe(watchdog, wd_end, wd_end2);
+ 		if (wd_seq_delay > WATCHDOG_MAX_SKEW/2)
+ 			goto skip_test;
+ 	}
+@@ -366,8 +373,7 @@ void clocksource_verify_percpu(struct clocksource *cs)
+ 		delta = (csnow_end - csnow_mid) & cs->mask;
+ 		if (delta < 0)
+ 			cpumask_set_cpu(cpu, &cpus_ahead);
+-		delta = clocksource_delta(csnow_end, csnow_begin, cs->mask);
+-		cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
++		cs_nsec = cycles_to_nsec_safe(cs, csnow_begin, csnow_end);
+ 		if (cs_nsec > cs_nsec_max)
+ 			cs_nsec_max = cs_nsec;
+ 		if (cs_nsec < cs_nsec_min)
+@@ -398,8 +404,8 @@ static inline void clocksource_reset_watchdog(void)
+ 
+ static void clocksource_watchdog(struct timer_list *unused)
+ {
+-	u64 csnow, wdnow, cslast, wdlast, delta;
+ 	int64_t wd_nsec, cs_nsec, interval;
++	u64 csnow, wdnow, cslast, wdlast;
+ 	int next_cpu, reset_pending;
+ 	struct clocksource *cs;
+ 	enum wd_read_status read_ret;
+@@ -456,12 +462,8 @@ static void clocksource_watchdog(struct timer_list *unused)
+ 			continue;
+ 		}
+ 
+-		delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask);
+-		wd_nsec = clocksource_cyc2ns(delta, watchdog->mult,
+-					     watchdog->shift);
+-
+-		delta = clocksource_delta(csnow, cs->cs_last, cs->mask);
+-		cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
++		wd_nsec = cycles_to_nsec_safe(watchdog, cs->wd_last, wdnow);
++		cs_nsec = cycles_to_nsec_safe(cs, cs->cs_last, csnow);
+ 		wdlast = cs->wd_last; /* save these in case we print them */
+ 		cslast = cs->cs_last;
+ 		cs->cs_last = csnow;
+@@ -832,7 +834,7 @@ void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles)
+  */
+ u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now)
+ {
+-	u64 now, delta, nsec = 0;
++	u64 now, nsec = 0;
+ 
+ 	if (!suspend_clocksource)
+ 		return 0;
+@@ -847,12 +849,8 @@ u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now)
+ 	else
+ 		now = suspend_clocksource->read(suspend_clocksource);
+ 
+-	if (now > suspend_start) {
+-		delta = clocksource_delta(now, suspend_start,
+-					  suspend_clocksource->mask);
+-		nsec = mul_u64_u32_shr(delta, suspend_clocksource->mult,
+-				       suspend_clocksource->shift);
+-	}
++	if (now > suspend_start)
++		nsec = cycles_to_nsec_safe(suspend_clocksource, suspend_start, now);
+ 
+ 	/*
+ 	 * Disable the suspend timer to save power if current clocksource is
+diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
+index 47345bf1d4a9f..34804c7152ddf 100644
+--- a/kernel/trace/Kconfig
++++ b/kernel/trace/Kconfig
+@@ -1123,7 +1123,7 @@ config PREEMPTIRQ_DELAY_TEST
+ 
+ config SYNTH_EVENT_GEN_TEST
+ 	tristate "Test module for in-kernel synthetic event generation"
+-	depends on SYNTH_EVENTS
++	depends on SYNTH_EVENTS && m
+ 	help
+           This option creates a test module to check the base
+           functionality of in-kernel synthetic event definition and
+@@ -1136,7 +1136,7 @@ config SYNTH_EVENT_GEN_TEST
+ 
+ config KPROBE_EVENT_GEN_TEST
+ 	tristate "Test module for in-kernel kprobe event generation"
+-	depends on KPROBE_EVENTS
++	depends on KPROBE_EVENTS && m
+ 	help
+           This option creates a test module to check the base
+           functionality of in-kernel kprobe event definition.
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 1e12df9bb531b..2e11236722366 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -7902,6 +7902,7 @@ void ftrace_kill(void)
+ 	ftrace_disabled = 1;
+ 	ftrace_enabled = 0;
+ 	ftrace_trace_function = ftrace_stub;
++	kprobe_ftrace_kill();
+ }
+ 
+ /**
+diff --git a/kernel/trace/preemptirq_delay_test.c b/kernel/trace/preemptirq_delay_test.c
+index 8c4ffd0761624..cb0871fbdb07f 100644
+--- a/kernel/trace/preemptirq_delay_test.c
++++ b/kernel/trace/preemptirq_delay_test.c
+@@ -215,4 +215,5 @@ static void __exit preemptirq_delay_exit(void)
+ 
+ module_init(preemptirq_delay_init)
+ module_exit(preemptirq_delay_exit)
++MODULE_DESCRIPTION("Preempt / IRQ disable delay thread to test latency tracers");
+ MODULE_LICENSE("GPL v2");
+diff --git a/lib/ubsan.h b/lib/ubsan.h
+index 0abbbac8700d1..0982578fbd98f 100644
+--- a/lib/ubsan.h
++++ b/lib/ubsan.h
+@@ -124,19 +124,32 @@ typedef s64 s_max;
+ typedef u64 u_max;
+ #endif
+ 
+-void __ubsan_handle_add_overflow(void *data, void *lhs, void *rhs);
+-void __ubsan_handle_sub_overflow(void *data, void *lhs, void *rhs);
+-void __ubsan_handle_mul_overflow(void *data, void *lhs, void *rhs);
+-void __ubsan_handle_negate_overflow(void *_data, void *old_val);
+-void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs);
+-void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, void *ptr);
+-void __ubsan_handle_type_mismatch_v1(void *_data, void *ptr);
+-void __ubsan_handle_out_of_bounds(void *_data, void *index);
+-void __ubsan_handle_shift_out_of_bounds(void *_data, void *lhs, void *rhs);
+-void __ubsan_handle_builtin_unreachable(void *_data);
+-void __ubsan_handle_load_invalid_value(void *_data, void *val);
+-void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
+-					 unsigned long align,
+-					 unsigned long offset);
++/*
++ * When generating Runtime Calls, Clang doesn't respect the -mregparm=3
++ * option used on i386: https://github.com/llvm/llvm-project/issues/89670
++ * Fix this for earlier Clang versions by forcing the calling convention
++ * to use non-register arguments.
++ */
++#if defined(CONFIG_X86_32) && \
++    defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 190000
++# define ubsan_linkage asmlinkage
++#else
++# define ubsan_linkage
++#endif
++
++void ubsan_linkage __ubsan_handle_add_overflow(void *data, void *lhs, void *rhs);
++void ubsan_linkage __ubsan_handle_sub_overflow(void *data, void *lhs, void *rhs);
++void ubsan_linkage __ubsan_handle_mul_overflow(void *data, void *lhs, void *rhs);
++void ubsan_linkage __ubsan_handle_negate_overflow(void *_data, void *old_val);
++void ubsan_linkage __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs);
++void ubsan_linkage __ubsan_handle_type_mismatch(struct type_mismatch_data *data, void *ptr);
++void ubsan_linkage __ubsan_handle_type_mismatch_v1(void *_data, void *ptr);
++void ubsan_linkage __ubsan_handle_out_of_bounds(void *_data, void *index);
++void ubsan_linkage __ubsan_handle_shift_out_of_bounds(void *_data, void *lhs, void *rhs);
++void ubsan_linkage __ubsan_handle_builtin_unreachable(void *_data);
++void ubsan_linkage __ubsan_handle_load_invalid_value(void *_data, void *val);
++void ubsan_linkage __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
++						       unsigned long align,
++						       unsigned long offset);
+ 
+ #endif
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index dd1fc105f70bd..769e8a125f0c9 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -3058,30 +3058,36 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
+ 	if (new_order >= folio_order(folio))
+ 		return -EINVAL;
+ 
+-	/* Cannot split anonymous THP to order-1 */
+-	if (new_order == 1 && folio_test_anon(folio)) {
+-		VM_WARN_ONCE(1, "Cannot split to order-1 folio");
+-		return -EINVAL;
+-	}
+-
+-	if (new_order) {
+-		/* Only swapping a whole PMD-mapped folio is supported */
+-		if (folio_test_swapcache(folio))
++	if (folio_test_anon(folio)) {
++		/* order-1 is not supported for anonymous THP. */
++		if (new_order == 1) {
++			VM_WARN_ONCE(1, "Cannot split to order-1 folio");
+ 			return -EINVAL;
++		}
++	} else if (new_order) {
+ 		/* Split shmem folio to non-zero order not supported */
+ 		if (shmem_mapping(folio->mapping)) {
+ 			VM_WARN_ONCE(1,
+ 				"Cannot split shmem folio to non-0 order");
+ 			return -EINVAL;
+ 		}
+-		/* No split if the file system does not support large folio */
+-		if (!mapping_large_folio_support(folio->mapping)) {
++		/*
++		 * No split if the file system does not support large folio.
++		 * Note that we might still have THPs in such mappings due to
++		 * CONFIG_READ_ONLY_THP_FOR_FS. But in that case, the mapping
++		 * does not actually support large folios properly.
++		 */
++		if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
++		    !mapping_large_folio_support(folio->mapping)) {
+ 			VM_WARN_ONCE(1,
+ 				"Cannot split file folio to non-0 order");
+ 			return -EINVAL;
+ 		}
+ 	}
+ 
++	/* Only swapping a whole PMD-mapped folio is supported */
++	if (folio_test_swapcache(folio) && new_order)
++		return -EINVAL;
+ 
+ 	is_hzp = is_huge_zero_page(&folio->page);
+ 	if (is_hzp) {
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index fabce2b50c695..612558f306f4a 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -7531,8 +7531,7 @@ void __mem_cgroup_uncharge_folios(struct folio_batch *folios)
+  * @new: Replacement folio.
+  *
+  * Charge @new as a replacement folio for @old. @old will
+- * be uncharged upon free. This is only used by the page cache
+- * (in replace_page_cache_folio()).
++ * be uncharged upon free.
+  *
+  * Both folios must be locked, @new->mapping must be set up.
+  */
+diff --git a/mm/page_table_check.c b/mm/page_table_check.c
+index af69c3c8f7c2d..6363f93a47c69 100644
+--- a/mm/page_table_check.c
++++ b/mm/page_table_check.c
+@@ -71,6 +71,9 @@ static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt)
+ 	page = pfn_to_page(pfn);
+ 	page_ext = page_ext_get(page);
+ 
++	if (!page_ext)
++		return;
++
+ 	BUG_ON(PageSlab(page));
+ 	anon = PageAnon(page);
+ 
+@@ -108,6 +111,9 @@ static void page_table_check_set(unsigned long pfn, unsigned long pgcnt,
+ 	page = pfn_to_page(pfn);
+ 	page_ext = page_ext_get(page);
+ 
++	if (!page_ext)
++		return;
++
+ 	BUG_ON(PageSlab(page));
+ 	anon = PageAnon(page);
+ 
+@@ -138,7 +144,10 @@ void __page_table_check_zero(struct page *page, unsigned int order)
+ 	BUG_ON(PageSlab(page));
+ 
+ 	page_ext = page_ext_get(page);
+-	BUG_ON(!page_ext);
++
++	if (!page_ext)
++		return;
++
+ 	for (i = 0; i < (1ul << order); i++) {
+ 		struct page_table_check *ptc = get_page_table_check(page_ext);
+ 
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 1f84a41aeb850..b66e99e2c70f3 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -1786,7 +1786,7 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
+ 	xa_lock_irq(&swap_mapping->i_pages);
+ 	error = shmem_replace_entry(swap_mapping, swap_index, old, new);
+ 	if (!error) {
+-		mem_cgroup_migrate(old, new);
++		mem_cgroup_replace_folio(old, new);
+ 		__lruvec_stat_mod_folio(new, NR_FILE_PAGES, 1);
+ 		__lruvec_stat_mod_folio(new, NR_SHMEM, 1);
+ 		__lruvec_stat_mod_folio(old, NR_FILE_PAGES, -1);
+diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
+index 71c143d4b6d05..ac74f6ead62d5 100644
+--- a/net/batman-adv/originator.c
++++ b/net/batman-adv/originator.c
+@@ -1266,6 +1266,8 @@ void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
+ 	/* for all origins... */
+ 	for (i = 0; i < hash->size; i++) {
+ 		head = &hash->table[i];
++		if (hlist_empty(head))
++			continue;
+ 		list_lock = &hash->list_locks[i];
+ 
+ 		spin_lock_bh(list_lock);
+diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
+index b0f221d658be8..430ed18f8584c 100644
+--- a/net/core/drop_monitor.c
++++ b/net/core/drop_monitor.c
+@@ -74,7 +74,7 @@ struct net_dm_hw_entries {
+ };
+ 
+ struct per_cpu_dm_data {
+-	spinlock_t		lock;	/* Protects 'skb', 'hw_entries' and
++	raw_spinlock_t		lock;	/* Protects 'skb', 'hw_entries' and
+ 					 * 'send_timer'
+ 					 */
+ 	union {
+@@ -168,9 +168,9 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
+ err:
+ 	mod_timer(&data->send_timer, jiffies + HZ / 10);
+ out:
+-	spin_lock_irqsave(&data->lock, flags);
++	raw_spin_lock_irqsave(&data->lock, flags);
+ 	swap(data->skb, skb);
+-	spin_unlock_irqrestore(&data->lock, flags);
++	raw_spin_unlock_irqrestore(&data->lock, flags);
+ 
+ 	if (skb) {
+ 		struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
+@@ -225,7 +225,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
+ 
+ 	local_irq_save(flags);
+ 	data = this_cpu_ptr(&dm_cpu_data);
+-	spin_lock(&data->lock);
++	raw_spin_lock(&data->lock);
+ 	dskb = data->skb;
+ 
+ 	if (!dskb)
+@@ -259,7 +259,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
+ 	}
+ 
+ out:
+-	spin_unlock_irqrestore(&data->lock, flags);
++	raw_spin_unlock_irqrestore(&data->lock, flags);
+ }
+ 
+ static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb,
+@@ -314,9 +314,9 @@ net_dm_hw_reset_per_cpu_data(struct per_cpu_dm_data *hw_data)
+ 		mod_timer(&hw_data->send_timer, jiffies + HZ / 10);
+ 	}
+ 
+-	spin_lock_irqsave(&hw_data->lock, flags);
++	raw_spin_lock_irqsave(&hw_data->lock, flags);
+ 	swap(hw_data->hw_entries, hw_entries);
+-	spin_unlock_irqrestore(&hw_data->lock, flags);
++	raw_spin_unlock_irqrestore(&hw_data->lock, flags);
+ 
+ 	return hw_entries;
+ }
+@@ -448,7 +448,7 @@ net_dm_hw_trap_summary_probe(void *ignore, const struct devlink *devlink,
+ 		return;
+ 
+ 	hw_data = this_cpu_ptr(&dm_hw_cpu_data);
+-	spin_lock_irqsave(&hw_data->lock, flags);
++	raw_spin_lock_irqsave(&hw_data->lock, flags);
+ 	hw_entries = hw_data->hw_entries;
+ 
+ 	if (!hw_entries)
+@@ -477,7 +477,7 @@ net_dm_hw_trap_summary_probe(void *ignore, const struct devlink *devlink,
+ 	}
+ 
+ out:
+-	spin_unlock_irqrestore(&hw_data->lock, flags);
++	raw_spin_unlock_irqrestore(&hw_data->lock, flags);
+ }
+ 
+ static const struct net_dm_alert_ops net_dm_alert_summary_ops = {
+@@ -1673,7 +1673,7 @@ static struct notifier_block dropmon_net_notifier = {
+ 
+ static void __net_dm_cpu_data_init(struct per_cpu_dm_data *data)
+ {
+-	spin_lock_init(&data->lock);
++	raw_spin_lock_init(&data->lock);
+ 	skb_queue_head_init(&data->drop_queue);
+ 	u64_stats_init(&data->stats.syncp);
+ }
+diff --git a/net/core/filter.c b/net/core/filter.c
+index a5856a8b4498b..ce255e0a2fbd9 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -1662,6 +1662,11 @@ static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
+ static inline int __bpf_try_make_writable(struct sk_buff *skb,
+ 					  unsigned int write_len)
+ {
++#ifdef CONFIG_DEBUG_NET
++	/* Avoid a splat in pskb_may_pull_reason() */
++	if (write_len > INT_MAX)
++		return -EINVAL;
++#endif
+ 	return skb_ensure_writable(skb, write_len);
+ }
+ 
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index 9d690d32da33a..b1dc84c4fda11 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -693,11 +693,16 @@ EXPORT_SYMBOL_GPL(__put_net);
+  * get_net_ns - increment the refcount of the network namespace
+  * @ns: common namespace (net)
+  *
+- * Returns the net's common namespace.
++ * Returns the net's common namespace or ERR_PTR() if ref is zero.
+  */
+ struct ns_common *get_net_ns(struct ns_common *ns)
+ {
+-	return &get_net(container_of(ns, struct net, ns))->ns;
++	struct net *net;
++
++	net = maybe_get_net(container_of(ns, struct net, ns));
++	if (net)
++		return &net->ns;
++	return ERR_PTR(-EINVAL);
+ }
+ EXPORT_SYMBOL_GPL(get_net_ns);
+ 
+diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
+index 7004b3399c2b0..8c2d5a0bc208e 100644
+--- a/net/core/netdev-genl.c
++++ b/net/core/netdev-genl.c
+@@ -59,22 +59,22 @@ XDP_METADATA_KFUNC_xxx
+ 	    nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES,
+ 			      xdp_rx_meta, NETDEV_A_DEV_PAD) ||
+ 	    nla_put_u64_64bit(rsp, NETDEV_A_DEV_XSK_FEATURES,
+-			      xsk_features, NETDEV_A_DEV_PAD)) {
+-		genlmsg_cancel(rsp, hdr);
+-		return -EINVAL;
+-	}
++			      xsk_features, NETDEV_A_DEV_PAD))
++		goto err_cancel_msg;
+ 
+ 	if (netdev->xdp_features & NETDEV_XDP_ACT_XSK_ZEROCOPY) {
+ 		if (nla_put_u32(rsp, NETDEV_A_DEV_XDP_ZC_MAX_SEGS,
+-				netdev->xdp_zc_max_segs)) {
+-			genlmsg_cancel(rsp, hdr);
+-			return -EINVAL;
+-		}
++				netdev->xdp_zc_max_segs))
++			goto err_cancel_msg;
+ 	}
+ 
+ 	genlmsg_end(rsp, hdr);
+ 
+ 	return 0;
++
++err_cancel_msg:
++	genlmsg_cancel(rsp, hdr);
++	return -EMSGSIZE;
+ }
+ 
+ static void
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c
+index 543007f159f99..55bcacf67df3b 100644
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -316,7 +316,7 @@ static int netpoll_owner_active(struct net_device *dev)
+ 	struct napi_struct *napi;
+ 
+ 	list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
+-		if (napi->poll_owner == smp_processor_id())
++		if (READ_ONCE(napi->poll_owner) == smp_processor_id())
+ 			return 1;
+ 	}
+ 	return 0;
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 0963689a59506..09eccc9c5020a 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -3743,6 +3743,9 @@ void sk_common_release(struct sock *sk)
+ 
+ 	sk->sk_prot->unhash(sk);
+ 
++	if (sk->sk_socket)
++		sk->sk_socket->sk = NULL;
++
+ 	/*
+ 	 * In this point socket cannot receive new packets, but it is possible
+ 	 * that some packets are in flight because some CPU runs receiver and
+diff --git a/net/devlink/core.c b/net/devlink/core.c
+index 7f0b093208d75..f49cd83f1955f 100644
+--- a/net/devlink/core.c
++++ b/net/devlink/core.c
+@@ -314,7 +314,7 @@ static void devlink_release(struct work_struct *work)
+ 	mutex_destroy(&devlink->lock);
+ 	lockdep_unregister_key(&devlink->lock_key);
+ 	put_device(devlink->dev);
+-	kfree(devlink);
++	kvfree(devlink);
+ }
+ 
+ void devlink_put(struct devlink *devlink)
+@@ -420,7 +420,7 @@ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
+ 	if (!devlink_reload_actions_valid(ops))
+ 		return NULL;
+ 
+-	devlink = kzalloc(sizeof(*devlink) + priv_size, GFP_KERNEL);
++	devlink = kvzalloc(struct_size(devlink, priv, priv_size), GFP_KERNEL);
+ 	if (!devlink)
+ 		return NULL;
+ 
+@@ -455,7 +455,7 @@ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
+ 	return devlink;
+ 
+ err_xa_alloc:
+-	kfree(devlink);
++	kvfree(devlink);
+ 	return NULL;
+ }
+ EXPORT_SYMBOL_GPL(devlink_alloc_ns);
+diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
+index 8b17d83e5fde4..1eb98440c01ea 100644
+--- a/net/ipv4/cipso_ipv4.c
++++ b/net/ipv4/cipso_ipv4.c
+@@ -2012,12 +2012,16 @@ static int cipso_v4_delopt(struct ip_options_rcu __rcu **opt_ptr)
+ 		 * from there we can determine the new total option length */
+ 		iter = 0;
+ 		optlen_new = 0;
+-		while (iter < opt->opt.optlen)
+-			if (opt->opt.__data[iter] != IPOPT_NOP) {
++		while (iter < opt->opt.optlen) {
++			if (opt->opt.__data[iter] == IPOPT_END) {
++				break;
++			} else if (opt->opt.__data[iter] == IPOPT_NOP) {
++				iter++;
++			} else {
+ 				iter += opt->opt.__data[iter + 1];
+ 				optlen_new = iter;
+-			} else
+-				iter++;
++			}
++		}
+ 		hdr_delta = opt->opt.optlen;
+ 		opt->opt.optlen = (optlen_new + 3) & ~3;
+ 		hdr_delta -= opt->opt.optlen;
+diff --git a/net/ipv4/tcp_ao.c b/net/ipv4/tcp_ao.c
+index 37c42b63ff993..09c0fa6756b7d 100644
+--- a/net/ipv4/tcp_ao.c
++++ b/net/ipv4/tcp_ao.c
+@@ -1968,8 +1968,10 @@ static int tcp_ao_info_cmd(struct sock *sk, unsigned short int family,
+ 		first = true;
+ 	}
+ 
+-	if (cmd.ao_required && tcp_ao_required_verify(sk))
+-		return -EKEYREJECTED;
++	if (cmd.ao_required && tcp_ao_required_verify(sk)) {
++		err = -EKEYREJECTED;
++		goto out;
++	}
+ 
+ 	/* For sockets in TCP_CLOSED it's possible set keys that aren't
+ 	 * matching the future peer (address/port/VRF/etc),
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index a140d9f7a0a36..1054a440332d3 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -6289,6 +6289,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
+ 		skb_rbtree_walk_from(data)
+ 			 tcp_mark_skb_lost(sk, data);
+ 		tcp_xmit_retransmit_queue(sk);
++		tp->retrans_stamp = 0;
+ 		NET_INC_STATS(sock_net(sk),
+ 				LINUX_MIB_TCPFASTOPENACTIVEFAIL);
+ 		return true;
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 8f8c8fcfd1c21..d7a5ca012a8fc 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -638,6 +638,8 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
+ 	rcu_read_lock();
+ 	last_probe = READ_ONCE(fib6_nh->last_probe);
+ 	idev = __in6_dev_get(dev);
++	if (!idev)
++		goto out;
+ 	neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
+ 	if (neigh) {
+ 		if (READ_ONCE(neigh->nud_state) & NUD_VALID)
+@@ -3603,7 +3605,7 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
+ 	if (!dev)
+ 		goto out;
+ 
+-	if (idev->cnf.disable_ipv6) {
++	if (!idev || idev->cnf.disable_ipv6) {
+ 		NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
+ 		err = -EACCES;
+ 		goto out;
+diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
+index 24e2b4b494cb0..c434940131b1d 100644
+--- a/net/ipv6/seg6_local.c
++++ b/net/ipv6/seg6_local.c
+@@ -941,8 +941,8 @@ static int input_action_end_dx6(struct sk_buff *skb,
+ 
+ 	if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
+ 		return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
+-			       dev_net(skb->dev), NULL, skb, NULL,
+-			       skb_dst(skb)->dev, input_action_end_dx6_finish);
++			       dev_net(skb->dev), NULL, skb, skb->dev,
++			       NULL, input_action_end_dx6_finish);
+ 
+ 	return input_action_end_dx6_finish(dev_net(skb->dev), NULL, skb);
+ drop:
+@@ -991,8 +991,8 @@ static int input_action_end_dx4(struct sk_buff *skb,
+ 
+ 	if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
+ 		return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
+-			       dev_net(skb->dev), NULL, skb, NULL,
+-			       skb_dst(skb)->dev, input_action_end_dx4_finish);
++			       dev_net(skb->dev), NULL, skb, skb->dev,
++			       NULL, input_action_end_dx4_finish);
+ 
+ 	return input_action_end_dx4_finish(dev_net(skb->dev), NULL, skb);
+ drop:
+diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
+index ce48173c60e56..4332d4b82b1dc 100644
+--- a/net/ipv6/xfrm6_policy.c
++++ b/net/ipv6/xfrm6_policy.c
+@@ -56,12 +56,18 @@ static int xfrm6_get_saddr(struct net *net, int oif,
+ {
+ 	struct dst_entry *dst;
+ 	struct net_device *dev;
++	struct inet6_dev *idev;
+ 
+ 	dst = xfrm6_dst_lookup(net, 0, oif, NULL, daddr, mark);
+ 	if (IS_ERR(dst))
+ 		return -EHOSTUNREACH;
+ 
+-	dev = ip6_dst_idev(dst)->dev;
++	idev = ip6_dst_idev(dst);
++	if (!idev) {
++		dst_release(dst);
++		return -EHOSTUNREACH;
++	}
++	dev = idev->dev;
+ 	ipv6_dev_get_saddr(dev_net(dev), dev, &daddr->in6, 0, &saddr->in6);
+ 	dst_release(dst);
+ 	return 0;
+diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
+index dce37ba8ebe37..254d745832cbf 100644
+--- a/net/mac80211/driver-ops.c
++++ b/net/mac80211/driver-ops.c
+@@ -311,6 +311,18 @@ int drv_assign_vif_chanctx(struct ieee80211_local *local,
+ 	might_sleep();
+ 	lockdep_assert_wiphy(local->hw.wiphy);
+ 
++	/*
++	 * We should perhaps push emulate chanctx down and only
++	 * make it call ->config() when the chanctx is actually
++	 * assigned here (and unassigned below), but that's yet
++	 * another change to all drivers to add assign/unassign
++	 * emulation callbacks. Maybe later.
++	 */
++	if (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
++	    local->emulate_chanctx &&
++	    !ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
++		return 0;
++
+ 	if (!check_sdata_in_driver(sdata))
+ 		return -EIO;
+ 
+@@ -338,6 +350,11 @@ void drv_unassign_vif_chanctx(struct ieee80211_local *local,
+ 	might_sleep();
+ 	lockdep_assert_wiphy(local->hw.wiphy);
+ 
++	if (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
++	    local->emulate_chanctx &&
++	    !ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
++		return;
++
+ 	if (!check_sdata_in_driver(sdata))
+ 		return;
+ 
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index ef6b0fc82d022..7c8a421f0901d 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -686,6 +686,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
+ 			ieee80211_del_virtual_monitor(local);
+ 
+ 		ieee80211_recalc_idle(local);
++		ieee80211_recalc_offload(local);
+ 
+ 		if (!(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE))
+ 			break;
+@@ -1121,9 +1122,6 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
+ 	struct ieee80211_sub_if_data *sdata;
+ 	int ret;
+ 
+-	if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
+-		return 0;
+-
+ 	ASSERT_RTNL();
+ 	lockdep_assert_wiphy(local->hw.wiphy);
+ 
+@@ -1145,11 +1143,13 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
+ 
+ 	ieee80211_set_default_queues(sdata);
+ 
+-	ret = drv_add_interface(local, sdata);
+-	if (WARN_ON(ret)) {
+-		/* ok .. stupid driver, it asked for this! */
+-		kfree(sdata);
+-		return ret;
++	if (ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) {
++		ret = drv_add_interface(local, sdata);
++		if (WARN_ON(ret)) {
++			/* ok .. stupid driver, it asked for this! */
++			kfree(sdata);
++			return ret;
++		}
+ 	}
+ 
+ 	set_bit(SDATA_STATE_RUNNING, &sdata->state);
+@@ -1187,9 +1187,6 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
+ {
+ 	struct ieee80211_sub_if_data *sdata;
+ 
+-	if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
+-		return;
+-
+ 	ASSERT_RTNL();
+ 	lockdep_assert_wiphy(local->hw.wiphy);
+ 
+@@ -1209,7 +1206,8 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
+ 
+ 	ieee80211_link_release_channel(&sdata->deflink);
+ 
+-	drv_remove_interface(local, sdata);
++	if (ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
++		drv_remove_interface(local, sdata);
+ 
+ 	kfree(sdata);
+ }
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index a237cbcf7b491..0da5f6082d159 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -1841,7 +1841,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
+ 
+ 	/* add interfaces */
+ 	sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata);
+-	if (sdata) {
++	if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) {
+ 		/* in HW restart it exists already */
+ 		WARN_ON(local->resuming);
+ 		res = drv_add_interface(local, sdata);
+diff --git a/net/netfilter/core.c b/net/netfilter/core.c
+index 3126911f50425..b00fc285b3349 100644
+--- a/net/netfilter/core.c
++++ b/net/netfilter/core.c
+@@ -815,12 +815,21 @@ int __init netfilter_init(void)
+ 	if (ret < 0)
+ 		goto err;
+ 
++#ifdef CONFIG_LWTUNNEL
++	ret = netfilter_lwtunnel_init();
++	if (ret < 0)
++		goto err_lwtunnel_pernet;
++#endif
+ 	ret = netfilter_log_init();
+ 	if (ret < 0)
+-		goto err_pernet;
++		goto err_log_pernet;
+ 
+ 	return 0;
+-err_pernet:
++err_log_pernet:
++#ifdef CONFIG_LWTUNNEL
++	netfilter_lwtunnel_fini();
++err_lwtunnel_pernet:
++#endif
+ 	unregister_pernet_subsys(&netfilter_net_ops);
+ err:
+ 	return ret;
+diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
+index c7ae4d9bf3d24..61431690cbd5f 100644
+--- a/net/netfilter/ipset/ip_set_core.c
++++ b/net/netfilter/ipset/ip_set_core.c
+@@ -53,12 +53,13 @@ MODULE_DESCRIPTION("core IP set support");
+ MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
+ 
+ /* When the nfnl mutex or ip_set_ref_lock is held: */
+-#define ip_set_dereference(p)		\
+-	rcu_dereference_protected(p,	\
++#define ip_set_dereference(inst)	\
++	rcu_dereference_protected((inst)->ip_set_list,	\
+ 		lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \
+-		lockdep_is_held(&ip_set_ref_lock))
++		lockdep_is_held(&ip_set_ref_lock) || \
++		(inst)->is_deleted)
+ #define ip_set(inst, id)		\
+-	ip_set_dereference((inst)->ip_set_list)[id]
++	ip_set_dereference(inst)[id]
+ #define ip_set_ref_netlink(inst,id)	\
+ 	rcu_dereference_raw((inst)->ip_set_list)[id]
+ #define ip_set_dereference_nfnl(p)	\
+@@ -1133,7 +1134,7 @@ static int ip_set_create(struct sk_buff *skb, const struct nfnl_info *info,
+ 		if (!list)
+ 			goto cleanup;
+ 		/* nfnl mutex is held, both lists are valid */
+-		tmp = ip_set_dereference(inst->ip_set_list);
++		tmp = ip_set_dereference(inst);
+ 		memcpy(list, tmp, sizeof(struct ip_set *) * inst->ip_set_max);
+ 		rcu_assign_pointer(inst->ip_set_list, list);
+ 		/* Make sure all current packets have passed through */
+diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
+index 0ee98ce5b8165..559665467b04d 100644
+--- a/net/netfilter/nf_conntrack_standalone.c
++++ b/net/netfilter/nf_conntrack_standalone.c
+@@ -22,9 +22,6 @@
+ #include <net/netfilter/nf_conntrack_acct.h>
+ #include <net/netfilter/nf_conntrack_zones.h>
+ #include <net/netfilter/nf_conntrack_timestamp.h>
+-#ifdef CONFIG_LWTUNNEL
+-#include <net/netfilter/nf_hooks_lwtunnel.h>
+-#endif
+ #include <linux/rculist_nulls.h>
+ 
+ static bool enable_hooks __read_mostly;
+@@ -612,9 +609,6 @@ enum nf_ct_sysctl_index {
+ 	NF_SYSCTL_CT_PROTO_TIMEOUT_GRE,
+ 	NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM,
+ #endif
+-#ifdef CONFIG_LWTUNNEL
+-	NF_SYSCTL_CT_LWTUNNEL,
+-#endif
+ 
+ 	__NF_SYSCTL_CT_LAST_SYSCTL,
+ };
+@@ -947,15 +941,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
+ 		.mode           = 0644,
+ 		.proc_handler   = proc_dointvec_jiffies,
+ 	},
+-#endif
+-#ifdef CONFIG_LWTUNNEL
+-	[NF_SYSCTL_CT_LWTUNNEL] = {
+-		.procname	= "nf_hooks_lwtunnel",
+-		.data		= NULL,
+-		.maxlen		= sizeof(int),
+-		.mode		= 0644,
+-		.proc_handler	= nf_hooks_lwtunnel_sysctl_handler,
+-	},
+ #endif
+ 	{}
+ };
+diff --git a/net/netfilter/nf_hooks_lwtunnel.c b/net/netfilter/nf_hooks_lwtunnel.c
+index 00e89ffd78f69..7cdb59bb4459f 100644
+--- a/net/netfilter/nf_hooks_lwtunnel.c
++++ b/net/netfilter/nf_hooks_lwtunnel.c
+@@ -3,6 +3,9 @@
+ #include <linux/sysctl.h>
+ #include <net/lwtunnel.h>
+ #include <net/netfilter/nf_hooks_lwtunnel.h>
++#include <linux/netfilter.h>
++
++#include "nf_internals.h"
+ 
+ static inline int nf_hooks_lwtunnel_get(void)
+ {
+@@ -50,4 +53,68 @@ int nf_hooks_lwtunnel_sysctl_handler(struct ctl_table *table, int write,
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(nf_hooks_lwtunnel_sysctl_handler);
++
++static struct ctl_table nf_lwtunnel_sysctl_table[] = {
++	{
++		.procname	= "nf_hooks_lwtunnel",
++		.data		= NULL,
++		.maxlen		= sizeof(int),
++		.mode		= 0644,
++		.proc_handler	= nf_hooks_lwtunnel_sysctl_handler,
++	},
++};
++
++static int __net_init nf_lwtunnel_net_init(struct net *net)
++{
++	struct ctl_table_header *hdr;
++	struct ctl_table *table;
++
++	table = nf_lwtunnel_sysctl_table;
++	if (!net_eq(net, &init_net)) {
++		table = kmemdup(nf_lwtunnel_sysctl_table,
++				sizeof(nf_lwtunnel_sysctl_table),
++				GFP_KERNEL);
++		if (!table)
++			goto err_alloc;
++	}
++
++	hdr = register_net_sysctl_sz(net, "net/netfilter", table,
++				     ARRAY_SIZE(nf_lwtunnel_sysctl_table));
++	if (!hdr)
++		goto err_reg;
++
++	net->nf.nf_lwtnl_dir_header = hdr;
++
++	return 0;
++err_reg:
++	if (!net_eq(net, &init_net))
++		kfree(table);
++err_alloc:
++	return -ENOMEM;
++}
++
++static void __net_exit nf_lwtunnel_net_exit(struct net *net)
++{
++	const struct ctl_table *table;
++
++	table = net->nf.nf_lwtnl_dir_header->ctl_table_arg;
++	unregister_net_sysctl_table(net->nf.nf_lwtnl_dir_header);
++	if (!net_eq(net, &init_net))
++		kfree(table);
++}
++
++static struct pernet_operations nf_lwtunnel_net_ops = {
++	.init = nf_lwtunnel_net_init,
++	.exit = nf_lwtunnel_net_exit,
++};
++
++int __init netfilter_lwtunnel_init(void)
++{
++	return register_pernet_subsys(&nf_lwtunnel_net_ops);
++}
++
++void netfilter_lwtunnel_fini(void)
++{
++	unregister_pernet_subsys(&nf_lwtunnel_net_ops);
++}
+ #endif /* CONFIG_SYSCTL */
+diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
+index 832ae64179f0f..25403023060b6 100644
+--- a/net/netfilter/nf_internals.h
++++ b/net/netfilter/nf_internals.h
+@@ -29,6 +29,12 @@ void nf_queue_nf_hook_drop(struct net *net);
+ /* nf_log.c */
+ int __init netfilter_log_init(void);
+ 
++#ifdef CONFIG_LWTUNNEL
++/* nf_hooks_lwtunnel.c */
++int __init netfilter_lwtunnel_init(void);
++void netfilter_lwtunnel_fini(void);
++#endif
++
+ /* core.c */
+ void nf_hook_entries_delete_raw(struct nf_hook_entries __rcu **pp,
+ 				const struct nf_hook_ops *reg);
+diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
+index 4e7c968cde2dc..5e3ca068f04e0 100644
+--- a/net/netrom/nr_timer.c
++++ b/net/netrom/nr_timer.c
+@@ -121,7 +121,8 @@ static void nr_heartbeat_expiry(struct timer_list *t)
+ 		   is accepted() it isn't 'dead' so doesn't get removed. */
+ 		if (sock_flag(sk, SOCK_DESTROY) ||
+ 		    (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
+-			sock_hold(sk);
++			if (sk->sk_state == TCP_LISTEN)
++				sock_hold(sk);
+ 			bh_unlock_sock(sk);
+ 			nr_destroy_socket(sk);
+ 			goto out;
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 150451ddd7553..ea3ebc160e25c 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -3799,28 +3799,30 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
+ 	case PACKET_TX_RING:
+ 	{
+ 		union tpacket_req_u req_u;
+-		int len;
+ 
++		ret = -EINVAL;
+ 		lock_sock(sk);
+ 		switch (po->tp_version) {
+ 		case TPACKET_V1:
+ 		case TPACKET_V2:
+-			len = sizeof(req_u.req);
++			if (optlen < sizeof(req_u.req))
++				break;
++			ret = copy_from_sockptr(&req_u.req, optval,
++						sizeof(req_u.req)) ?
++						-EINVAL : 0;
+ 			break;
+ 		case TPACKET_V3:
+ 		default:
+-			len = sizeof(req_u.req3);
++			if (optlen < sizeof(req_u.req3))
++				break;
++			ret = copy_from_sockptr(&req_u.req3, optval,
++						sizeof(req_u.req3)) ?
++						-EINVAL : 0;
+ 			break;
+ 		}
+-		if (optlen < len) {
+-			ret = -EINVAL;
+-		} else {
+-			if (copy_from_sockptr(&req_u.req, optval, len))
+-				ret = -EFAULT;
+-			else
+-				ret = packet_set_ring(sk, &req_u, 0,
+-						    optname == PACKET_TX_RING);
+-		}
++		if (!ret)
++			ret = packet_set_ring(sk, &req_u, 0,
++					      optname == PACKET_TX_RING);
+ 		release_sock(sk);
+ 		return ret;
+ 	}
+diff --git a/net/sched/act_api.c b/net/sched/act_api.c
+index 9ee622fb1160f..2520708b06a12 100644
+--- a/net/sched/act_api.c
++++ b/net/sched/act_api.c
+@@ -830,7 +830,6 @@ int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
+ 	u32 max;
+ 
+ 	if (*index) {
+-again:
+ 		rcu_read_lock();
+ 		p = idr_find(&idrinfo->action_idr, *index);
+ 
+@@ -839,7 +838,7 @@ int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
+ 			 * index but did not assign the pointer yet.
+ 			 */
+ 			rcu_read_unlock();
+-			goto again;
++			return -EAGAIN;
+ 		}
+ 
+ 		if (!p) {
+diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
+index baac083fd8f10..2a96d9c1db65b 100644
+--- a/net/sched/act_ct.c
++++ b/net/sched/act_ct.c
+@@ -41,21 +41,26 @@ static struct workqueue_struct *act_ct_wq;
+ static struct rhashtable zones_ht;
+ static DEFINE_MUTEX(zones_mutex);
+ 
++struct zones_ht_key {
++	struct net *net;
++	u16 zone;
++};
++
+ struct tcf_ct_flow_table {
+ 	struct rhash_head node; /* In zones tables */
+ 
+ 	struct rcu_work rwork;
+ 	struct nf_flowtable nf_ft;
+ 	refcount_t ref;
+-	u16 zone;
++	struct zones_ht_key key;
+ 
+ 	bool dying;
+ };
+ 
+ static const struct rhashtable_params zones_params = {
+ 	.head_offset = offsetof(struct tcf_ct_flow_table, node),
+-	.key_offset = offsetof(struct tcf_ct_flow_table, zone),
+-	.key_len = sizeof_field(struct tcf_ct_flow_table, zone),
++	.key_offset = offsetof(struct tcf_ct_flow_table, key),
++	.key_len = sizeof_field(struct tcf_ct_flow_table, key),
+ 	.automatic_shrinking = true,
+ };
+ 
+@@ -316,11 +321,12 @@ static struct nf_flowtable_type flowtable_ct = {
+ 
+ static int tcf_ct_flow_table_get(struct net *net, struct tcf_ct_params *params)
+ {
++	struct zones_ht_key key = { .net = net, .zone = params->zone };
+ 	struct tcf_ct_flow_table *ct_ft;
+ 	int err = -ENOMEM;
+ 
+ 	mutex_lock(&zones_mutex);
+-	ct_ft = rhashtable_lookup_fast(&zones_ht, &params->zone, zones_params);
++	ct_ft = rhashtable_lookup_fast(&zones_ht, &key, zones_params);
+ 	if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
+ 		goto out_unlock;
+ 
+@@ -329,7 +335,7 @@ static int tcf_ct_flow_table_get(struct net *net, struct tcf_ct_params *params)
+ 		goto err_alloc;
+ 	refcount_set(&ct_ft->ref, 1);
+ 
+-	ct_ft->zone = params->zone;
++	ct_ft->key = key;
+ 	err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
+ 	if (err)
+ 		goto err_insert;
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 60239378d43fb..6292d6d73b720 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1389,6 +1389,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
+ 		ops->destroy(sch);
+ 	qdisc_put_stab(rtnl_dereference(sch->stab));
+ err_out3:
++	lockdep_unregister_key(&sch->root_lock_key);
+ 	netdev_put(dev, &sch->dev_tracker);
+ 	qdisc_free(sch);
+ err_out2:
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index 10b1491d55809..fb32984d7a168 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -946,7 +946,9 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
+ 	__skb_queue_head_init(&sch->gso_skb);
+ 	__skb_queue_head_init(&sch->skb_bad_txq);
+ 	gnet_stats_basic_sync_init(&sch->bstats);
++	lockdep_register_key(&sch->root_lock_key);
+ 	spin_lock_init(&sch->q.lock);
++	lockdep_set_class(&sch->q.lock, &sch->root_lock_key);
+ 
+ 	if (ops->static_flags & TCQ_F_CPUSTATS) {
+ 		sch->cpu_bstats =
+@@ -981,6 +983,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
+ 
+ 	return sch;
+ errout1:
++	lockdep_unregister_key(&sch->root_lock_key);
+ 	kfree(sch);
+ errout:
+ 	return ERR_PTR(err);
+@@ -1069,6 +1072,7 @@ static void __qdisc_destroy(struct Qdisc *qdisc)
+ 	if (ops->destroy)
+ 		ops->destroy(qdisc);
+ 
++	lockdep_unregister_key(&qdisc->root_lock_key);
+ 	module_put(ops->owner);
+ 	netdev_put(dev, &qdisc->dev_tracker);
+ 
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index 93e6fb56f3b58..ff3de37874e4b 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -1039,13 +1039,6 @@ static void htb_work_func(struct work_struct *work)
+ 	rcu_read_unlock();
+ }
+ 
+-static void htb_set_lockdep_class_child(struct Qdisc *q)
+-{
+-	static struct lock_class_key child_key;
+-
+-	lockdep_set_class(qdisc_lock(q), &child_key);
+-}
+-
+ static int htb_offload(struct net_device *dev, struct tc_htb_qopt_offload *opt)
+ {
+ 	return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_HTB, opt);
+@@ -1132,7 +1125,6 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt,
+ 			return -ENOMEM;
+ 		}
+ 
+-		htb_set_lockdep_class_child(qdisc);
+ 		q->direct_qdiscs[ntx] = qdisc;
+ 		qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
+ 	}
+@@ -1468,7 +1460,6 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ 	}
+ 
+ 	if (q->offload) {
+-		htb_set_lockdep_class_child(new);
+ 		/* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
+ 		qdisc_refcount_inc(new);
+ 		old_q = htb_graft_helper(dev_queue, new);
+@@ -1733,11 +1724,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg,
+ 		new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
+ 					  cl->parent->common.classid,
+ 					  NULL);
+-		if (q->offload) {
+-			if (new_q)
+-				htb_set_lockdep_class_child(new_q);
++		if (q->offload)
+ 			htb_parent_to_leaf_offload(sch, dev_queue, new_q);
+-		}
+ 	}
+ 
+ 	sch_tree_lock(sch);
+@@ -1947,13 +1935,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
+ 		new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
+ 					  classid, NULL);
+ 		if (q->offload) {
+-			if (new_q) {
+-				htb_set_lockdep_class_child(new_q);
+-				/* One ref for cl->leaf.q, the other for
+-				 * dev_queue->qdisc.
+-				 */
++			/* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
++			if (new_q)
+ 				qdisc_refcount_inc(new_q);
+-			}
+ 			old_q = htb_graft_helper(dev_queue, new_q);
+ 			/* No qdisc_put needed. */
+ 			WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
+diff --git a/net/tipc/node.c b/net/tipc/node.c
+index c1e890a824347..500320e5ca479 100644
+--- a/net/tipc/node.c
++++ b/net/tipc/node.c
+@@ -2105,6 +2105,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
+ 	} else {
+ 		n = tipc_node_find_by_id(net, ehdr->id);
+ 	}
++	skb_dst_force(skb);
+ 	tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b);
+ 	if (!skb)
+ 		return;
+diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c
+index 45beb1c5f747a..6b5181c668b5b 100644
+--- a/security/apparmor/audit.c
++++ b/security/apparmor/audit.c
+@@ -217,7 +217,7 @@ void aa_audit_rule_free(void *vrule)
+ 	}
+ }
+ 
+-int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
++int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule, gfp_t gfp)
+ {
+ 	struct aa_audit_rule *rule;
+ 
+@@ -230,14 +230,14 @@ int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
+ 		return -EINVAL;
+ 	}
+ 
+-	rule = kzalloc(sizeof(struct aa_audit_rule), GFP_KERNEL);
++	rule = kzalloc(sizeof(struct aa_audit_rule), gfp);
+ 
+ 	if (!rule)
+ 		return -ENOMEM;
+ 
+ 	/* Currently rules are treated as coming from the root ns */
+ 	rule->label = aa_label_parse(&root_ns->unconfined->label, rulestr,
+-				     GFP_KERNEL, true, false);
++				     gfp, true, false);
+ 	if (IS_ERR(rule->label)) {
+ 		int err = PTR_ERR(rule->label);
+ 		aa_audit_rule_free(rule);
+diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h
+index acbb03b9bd25c..0c8cc86b417b5 100644
+--- a/security/apparmor/include/audit.h
++++ b/security/apparmor/include/audit.h
+@@ -200,7 +200,7 @@ static inline int complain_error(int error)
+ }
+ 
+ void aa_audit_rule_free(void *vrule);
+-int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule);
++int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule, gfp_t gfp);
+ int aa_audit_rule_known(struct audit_krule *rule);
+ int aa_audit_rule_match(u32 sid, u32 field, u32 op, void *vrule);
+ 
+diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
+index 11d7c03322070..0a4f274f77810 100644
+--- a/security/integrity/ima/ima.h
++++ b/security/integrity/ima/ima.h
+@@ -540,7 +540,7 @@ static inline void ima_free_modsig(struct modsig *modsig)
+ #else
+ 
+ static inline int ima_filter_rule_init(u32 field, u32 op, char *rulestr,
+-				       void **lsmrule)
++				       void **lsmrule, gfp_t gfp)
+ {
+ 	return -EINVAL;
+ }
+diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
+index c0556907c2e67..09da8e6392395 100644
+--- a/security/integrity/ima/ima_policy.c
++++ b/security/integrity/ima/ima_policy.c
+@@ -401,7 +401,8 @@ static void ima_free_rule(struct ima_rule_entry *entry)
+ 	kfree(entry);
+ }
+ 
+-static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
++static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry,
++						gfp_t gfp)
+ {
+ 	struct ima_rule_entry *nentry;
+ 	int i;
+@@ -410,7 +411,7 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
+ 	 * Immutable elements are copied over as pointers and data; only
+ 	 * lsm rules can change
+ 	 */
+-	nentry = kmemdup(entry, sizeof(*nentry), GFP_KERNEL);
++	nentry = kmemdup(entry, sizeof(*nentry), gfp);
+ 	if (!nentry)
+ 		return NULL;
+ 
+@@ -425,7 +426,8 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
+ 
+ 		ima_filter_rule_init(nentry->lsm[i].type, Audit_equal,
+ 				     nentry->lsm[i].args_p,
+-				     &nentry->lsm[i].rule);
++				     &nentry->lsm[i].rule,
++				     gfp);
+ 		if (!nentry->lsm[i].rule)
+ 			pr_warn("rule for LSM \'%s\' is undefined\n",
+ 				nentry->lsm[i].args_p);
+@@ -438,7 +440,7 @@ static int ima_lsm_update_rule(struct ima_rule_entry *entry)
+ 	int i;
+ 	struct ima_rule_entry *nentry;
+ 
+-	nentry = ima_lsm_copy_rule(entry);
++	nentry = ima_lsm_copy_rule(entry, GFP_KERNEL);
+ 	if (!nentry)
+ 		return -ENOMEM;
+ 
+@@ -664,7 +666,7 @@ static bool ima_match_rules(struct ima_rule_entry *rule,
+ 		}
+ 
+ 		if (rc == -ESTALE && !rule_reinitialized) {
+-			lsm_rule = ima_lsm_copy_rule(rule);
++			lsm_rule = ima_lsm_copy_rule(rule, GFP_ATOMIC);
+ 			if (lsm_rule) {
+ 				rule_reinitialized = true;
+ 				goto retry;
+@@ -1140,7 +1142,8 @@ static int ima_lsm_rule_init(struct ima_rule_entry *entry,
+ 	entry->lsm[lsm_rule].type = audit_type;
+ 	result = ima_filter_rule_init(entry->lsm[lsm_rule].type, Audit_equal,
+ 				      entry->lsm[lsm_rule].args_p,
+-				      &entry->lsm[lsm_rule].rule);
++				      &entry->lsm[lsm_rule].rule,
++				      GFP_KERNEL);
+ 	if (!entry->lsm[lsm_rule].rule) {
+ 		pr_warn("rule for LSM \'%s\' is undefined\n",
+ 			entry->lsm[lsm_rule].args_p);
+diff --git a/security/security.c b/security/security.c
+index 0a9a0ac3f2662..4fd3c839353ec 100644
+--- a/security/security.c
++++ b/security/security.c
+@@ -5331,15 +5331,17 @@ void security_key_post_create_or_update(struct key *keyring, struct key *key,
+  * @op: rule operator
+  * @rulestr: rule context
+  * @lsmrule: receive buffer for audit rule struct
++ * @gfp: GFP flag used for kmalloc
+  *
+  * Allocate and initialize an LSM audit rule structure.
+  *
+  * Return: Return 0 if @lsmrule has been successfully set, -EINVAL in case of
+  *         an invalid rule.
+  */
+-int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule)
++int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule,
++			     gfp_t gfp)
+ {
+-	return call_int_hook(audit_rule_init, field, op, rulestr, lsmrule);
++	return call_int_hook(audit_rule_init, field, op, rulestr, lsmrule, gfp);
+ }
+ 
+ /**
+diff --git a/security/selinux/include/audit.h b/security/selinux/include/audit.h
+index 52aca71210b47..29c7d4c86f6d5 100644
+--- a/security/selinux/include/audit.h
++++ b/security/selinux/include/audit.h
+@@ -21,12 +21,14 @@
+  *	@op: the operator the rule uses
+  *	@rulestr: the text "target" of the rule
+  *	@rule: pointer to the new rule structure returned via this
++ *	@gfp: GFP flag used for kmalloc
+  *
+  *	Returns 0 if successful, -errno if not.  On success, the rule structure
+  *	will be allocated internally.  The caller must free this structure with
+  *	selinux_audit_rule_free() after use.
+  */
+-int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **rule);
++int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **rule,
++			    gfp_t gfp);
+ 
+ /**
+  *	selinux_audit_rule_free - free an selinux audit rule structure.
+diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
+index e88b1b6c4adbb..ded250e525e9c 100644
+--- a/security/selinux/ss/services.c
++++ b/security/selinux/ss/services.c
+@@ -3508,7 +3508,8 @@ void selinux_audit_rule_free(void *vrule)
+ 	}
+ }
+ 
+-int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
++int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule,
++			    gfp_t gfp)
+ {
+ 	struct selinux_state *state = &selinux_state;
+ 	struct selinux_policy *policy;
+@@ -3549,7 +3550,7 @@ int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
+ 		return -EINVAL;
+ 	}
+ 
+-	tmprule = kzalloc(sizeof(struct selinux_audit_rule), GFP_KERNEL);
++	tmprule = kzalloc(sizeof(struct selinux_audit_rule), gfp);
+ 	if (!tmprule)
+ 		return -ENOMEM;
+ 	context_init(&tmprule->au_ctxt);
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index 146667937811b..c6df5c00c7bab 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -4692,11 +4692,13 @@ static int smack_post_notification(const struct cred *w_cred,
+  * @op: required testing operator (=, !=, >, <, ...)
+  * @rulestr: smack label to be audited
+  * @vrule: pointer to save our own audit rule representation
++ * @gfp: type of the memory for the allocation
+  *
+  * Prepare to audit cases where (@field @op @rulestr) is true.
+  * The label to be audited is created if necessay.
+  */
+-static int smack_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
++static int smack_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule,
++				 gfp_t gfp)
+ {
+ 	struct smack_known *skp;
+ 	char **rule = (char **)vrule;
+diff --git a/sound/core/seq/seq_ump_convert.c b/sound/core/seq/seq_ump_convert.c
+index 171fb75267afa..d81f776a4c3dd 100644
+--- a/sound/core/seq/seq_ump_convert.c
++++ b/sound/core/seq/seq_ump_convert.c
+@@ -1075,6 +1075,8 @@ static const struct seq_ev_to_ump seq_ev_ump_encoders[] = {
+ 	  system_ev_to_ump_midi1, system_ev_to_ump_midi2 },
+ 	{ SNDRV_SEQ_EVENT_SENSING, UMP_SYSTEM_STATUS_ACTIVE_SENSING,
+ 	  system_ev_to_ump_midi1, system_ev_to_ump_midi2 },
++	{ SNDRV_SEQ_EVENT_RESET, UMP_SYSTEM_STATUS_RESET,
++	  system_ev_to_ump_midi1, system_ev_to_ump_midi2 },
+ };
+ 
+ static const struct seq_ev_to_ump *find_ump_encoder(int type)
+diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
+index d1f6cdcf1866e..e7c2ef6c6b4cb 100644
+--- a/sound/hda/intel-dsp-config.c
++++ b/sound/hda/intel-dsp-config.c
+@@ -16,7 +16,7 @@
+ static int dsp_driver;
+ 
+ module_param(dsp_driver, int, 0444);
+-MODULE_PARM_DESC(dsp_driver, "Force the DSP driver for Intel DSP (0=auto, 1=legacy, 2=SST, 3=SOF)");
++MODULE_PARM_DESC(dsp_driver, "Force the DSP driver for Intel DSP (0=auto, 1=legacy, 2=SST, 3=SOF, 4=AVS)");
+ 
+ #define FLAG_SST			BIT(0)
+ #define FLAG_SOF			BIT(1)
+diff --git a/sound/pci/hda/cs35l41_hda.c b/sound/pci/hda/cs35l41_hda.c
+index d3fa6e136744d..ec688c60c153b 100644
+--- a/sound/pci/hda/cs35l41_hda.c
++++ b/sound/pci/hda/cs35l41_hda.c
+@@ -1362,7 +1362,7 @@ static void cs35l41_hda_unbind(struct device *dev, struct device *master, void *
+ 	if (comps[cs35l41->index].dev == dev) {
+ 		memset(&comps[cs35l41->index], 0, sizeof(*comps));
+ 		sleep_flags = lock_system_sleep();
+-		device_link_remove(&comps->codec->core.dev, cs35l41->dev);
++		device_link_remove(&cs35l41->codec->core.dev, cs35l41->dev);
+ 		unlock_system_sleep(sleep_flags);
+ 	}
+ }
+@@ -1857,6 +1857,8 @@ void cs35l41_hda_remove(struct device *dev)
+ {
+ 	struct cs35l41_hda *cs35l41 = dev_get_drvdata(dev);
+ 
++	component_del(cs35l41->dev, &cs35l41_hda_comp_ops);
++
+ 	pm_runtime_get_sync(cs35l41->dev);
+ 	pm_runtime_dont_use_autosuspend(cs35l41->dev);
+ 	pm_runtime_disable(cs35l41->dev);
+@@ -1864,8 +1866,6 @@ void cs35l41_hda_remove(struct device *dev)
+ 	if (cs35l41->halo_initialized)
+ 		cs35l41_remove_dsp(cs35l41);
+ 
+-	component_del(cs35l41->dev, &cs35l41_hda_comp_ops);
+-
+ 	acpi_dev_put(cs35l41->dacpi);
+ 
+ 	pm_runtime_put_noidle(cs35l41->dev);
+diff --git a/sound/pci/hda/cs35l56_hda.c b/sound/pci/hda/cs35l56_hda.c
+index 11b0570ff56d4..6b77c38a0e155 100644
+--- a/sound/pci/hda/cs35l56_hda.c
++++ b/sound/pci/hda/cs35l56_hda.c
+@@ -1072,12 +1072,12 @@ void cs35l56_hda_remove(struct device *dev)
+ {
+ 	struct cs35l56_hda *cs35l56 = dev_get_drvdata(dev);
+ 
++	component_del(cs35l56->base.dev, &cs35l56_hda_comp_ops);
++
+ 	pm_runtime_dont_use_autosuspend(cs35l56->base.dev);
+ 	pm_runtime_get_sync(cs35l56->base.dev);
+ 	pm_runtime_disable(cs35l56->base.dev);
+ 
+-	component_del(cs35l56->base.dev, &cs35l56_hda_comp_ops);
+-
+ 	cs_dsp_remove(&cs35l56->cs_dsp);
+ 
+ 	kfree(cs35l56->system_name);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 1a1ca7caaff07..3e6de1d86022f 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10147,6 +10147,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8b92, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8b97, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8bb3, "HP Slim OMEN", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x8bb4, "HP Slim OMEN", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x8bdd, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x8bde, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x8bdf, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2),
+@@ -10167,6 +10169,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8c47, "HP EliteBook 840 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c48, "HP EliteBook 860 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c49, "HP Elite x360 830 2-in-1 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8c4d, "HP Omen", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x103c, 0x8c4e, "HP Omen", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x8c4f, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x8c50, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x103c, 0x8c51, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
+@@ -10179,6 +10183,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8c70, "HP EliteBook 835 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8c7b, "HP ProBook 445 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8c7c, "HP ProBook 445 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8c7d, "HP ProBook 465 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8c7e, "HP ProBook 465 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8c89, "HP ProBook 460 G11", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c8a, "HP EliteBook 630", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c8c, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED),
+@@ -10486,7 +10494,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
+ 	SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940 / Yoga Duet 7", ALC298_FIXUP_LENOVO_C940_DUET7),
+ 	SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS),
+-	SND_PCI_QUIRK(0x17aa, 0x3820, "Yoga Duet 7 13ITL6", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
++	SND_PCI_QUIRK(0x17aa, 0x3820, "IdeaPad 330-17IKB 81DM", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS),
+ 	SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
+ 	SND_PCI_QUIRK(0x17aa, 0x3834, "Lenovo IdeaPad Slim 9i 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+@@ -10497,6 +10505,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x3852, "Lenovo Yoga 7 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+ 	SND_PCI_QUIRK(0x17aa, 0x3853, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+ 	SND_PCI_QUIRK(0x17aa, 0x3855, "Legion 7 16ITHG6", ALC287_FIXUP_LEGION_16ITHG6),
++	SND_PCI_QUIRK(0x17aa, 0x3865, "Lenovo 13X", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x17aa, 0x3866, "Lenovo 13X", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x17aa, 0x3869, "Lenovo Yoga7 14IAL7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
+ 	SND_PCI_QUIRK(0x17aa, 0x386f, "Legion Pro 7/7i", ALC287_FIXUP_LENOVO_LEGION_7),
+ 	SND_PCI_QUIRK(0x17aa, 0x3870, "Lenovo Yoga 7 14ARB7", ALC287_FIXUP_YOGA7_14ARB7_I2C),
+@@ -10508,6 +10518,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x3882, "Lenovo Yoga Pro 7 14APH8", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
+ 	SND_PCI_QUIRK(0x17aa, 0x3884, "Y780 YG DUAL", ALC287_FIXUP_TAS2781_I2C),
+ 	SND_PCI_QUIRK(0x17aa, 0x3886, "Y780 VECO DUAL", ALC287_FIXUP_TAS2781_I2C),
++	SND_PCI_QUIRK(0x17aa, 0x3891, "Lenovo Yoga Pro 7 14AHP9", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
+ 	SND_PCI_QUIRK(0x17aa, 0x38a7, "Y780P AMD YG dual", ALC287_FIXUP_TAS2781_I2C),
+ 	SND_PCI_QUIRK(0x17aa, 0x38a8, "Y780P AMD VECO dual", ALC287_FIXUP_TAS2781_I2C),
+ 	SND_PCI_QUIRK(0x17aa, 0x38a9, "Thinkbook 16P", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
+@@ -10562,6 +10573,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
+ 	SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
+ 	SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
++	SND_PCI_QUIRK(0x1c6c, 0x122a, "Positivo N14AP7", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x1c6c, 0x1251, "Positivo N14KP6-TG", ALC288_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_SET_COEF_DEFAULTS),
+ 	SND_PCI_QUIRK(0x1d05, 0x1096, "TongFang GMxMRxx", ALC269_FIXUP_NO_SHUTUP),
+@@ -10586,7 +10598,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
+ 	SND_PCI_QUIRK(0x8086, 0x3038, "Intel NUC 13", ALC295_FIXUP_CHROME_BOOK),
+ 	SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+-	SND_PCI_QUIRK(0xf111, 0x0005, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0xf111, 0x0006, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+ 
+ #if 0
+diff --git a/sound/pci/hda/tas2781_hda_i2c.c b/sound/pci/hda/tas2781_hda_i2c.c
+index 75f7674c66ee7..fdee6592c502d 100644
+--- a/sound/pci/hda/tas2781_hda_i2c.c
++++ b/sound/pci/hda/tas2781_hda_i2c.c
+@@ -777,11 +777,11 @@ static void tas2781_hda_remove(struct device *dev)
+ {
+ 	struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
+ 
++	component_del(tas_hda->dev, &tas2781_hda_comp_ops);
++
+ 	pm_runtime_get_sync(tas_hda->dev);
+ 	pm_runtime_disable(tas_hda->dev);
+ 
+-	component_del(tas_hda->dev, &tas2781_hda_comp_ops);
+-
+ 	pm_runtime_put_noidle(tas_hda->dev);
+ 
+ 	tasdevice_remove(tas_hda->priv);
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index a90b43162a54b..5d0125ef140c3 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -429,6 +429,15 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 					RT711_JD2 |
+ 					SOF_SDW_FOUR_SPK),
+ 	},
++	{
++		.callback = sof_sdw_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0C0F")
++		},
++		.driver_data = (void *)(SOF_SDW_TGL_HDMI |
++					RT711_JD2),
++	},
+ 	{
+ 		.callback = sof_sdw_quirk_cb,
+ 		.matches = {
+@@ -495,6 +504,15 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 					SOF_BT_OFFLOAD_SSP(1) |
+ 					SOF_SSP_BT_OFFLOAD_PRESENT),
+ 	},
++	{
++		.callback = sof_sdw_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "OMEN Transcend Gaming Laptop"),
++		},
++		.driver_data = (void *)(RT711_JD2),
++	},
++
+ 	/* LunarLake devices */
+ 	{
+ 		.callback = sof_sdw_quirk_cb,
+diff --git a/tools/arch/arm64/include/asm/sysreg.h b/tools/arch/arm64/include/asm/sysreg.h
+index ccc13e9913760..cd8420e8c3ad8 100644
+--- a/tools/arch/arm64/include/asm/sysreg.h
++++ b/tools/arch/arm64/include/asm/sysreg.h
+@@ -701,18 +701,18 @@
+  * Permission Indirection Extension (PIE) permission encodings.
+  * Encodings with the _O suffix, have overlays applied (Permission Overlay Extension).
+  */
+-#define PIE_NONE_O	0x0
+-#define PIE_R_O		0x1
+-#define PIE_X_O		0x2
+-#define PIE_RX_O	0x3
+-#define PIE_RW_O	0x5
+-#define PIE_RWnX_O	0x6
+-#define PIE_RWX_O	0x7
+-#define PIE_R		0x8
+-#define PIE_GCS		0x9
+-#define PIE_RX		0xa
+-#define PIE_RW		0xc
+-#define PIE_RWX		0xe
++#define PIE_NONE_O	UL(0x0)
++#define PIE_R_O		UL(0x1)
++#define PIE_X_O		UL(0x2)
++#define PIE_RX_O	UL(0x3)
++#define PIE_RW_O	UL(0x5)
++#define PIE_RWnX_O	UL(0x6)
++#define PIE_RWX_O	UL(0x7)
++#define PIE_R		UL(0x8)
++#define PIE_GCS		UL(0x9)
++#define PIE_RX		UL(0xa)
++#define PIE_RW		UL(0xc)
++#define PIE_RWX		UL(0xe)
+ 
+ #define PIRx_ELx_PERM(idx, perm)	((perm) << ((idx) * 4))
+ 
+diff --git a/tools/testing/selftests/arm64/tags/tags_test.c b/tools/testing/selftests/arm64/tags/tags_test.c
+index 5701163460ef7..955f87c1170d7 100644
+--- a/tools/testing/selftests/arm64/tags/tags_test.c
++++ b/tools/testing/selftests/arm64/tags/tags_test.c
+@@ -6,6 +6,7 @@
+ #include <stdint.h>
+ #include <sys/prctl.h>
+ #include <sys/utsname.h>
++#include "../../kselftest.h"
+ 
+ #define SHIFT_TAG(tag)		((uint64_t)(tag) << 56)
+ #define SET_TAG(ptr, tag)	(((uint64_t)(ptr) & ~SHIFT_TAG(0xff)) | \
+@@ -21,6 +22,9 @@ int main(void)
+ 	if (prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0) == 0)
+ 		tbi_enabled = 1;
+ 	ptr = (struct utsname *)malloc(sizeof(*ptr));
++	if (!ptr)
++		ksft_exit_fail_msg("Failed to allocate utsname buffer\n");
++
+ 	if (tbi_enabled)
+ 		tag = 0x42;
+ 	ptr = (struct utsname *)SET_TAG(ptr, tag);
+diff --git a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
+index a8b53b8736f01..f66ceccd7029c 100644
+--- a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
++++ b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
+@@ -25,7 +25,7 @@ static void test_lookup_update(void)
+ 	int map1_fd, map2_fd, map3_fd, map4_fd, map5_fd, map1_id, map2_id;
+ 	int outer_arr_fd, outer_hash_fd, outer_arr_dyn_fd;
+ 	struct test_btf_map_in_map *skel;
+-	int err, key = 0, val, i, fd;
++	int err, key = 0, val, i;
+ 
+ 	skel = test_btf_map_in_map__open_and_load();
+ 	if (CHECK(!skel, "skel_open", "failed to open&load skeleton\n"))
+@@ -102,30 +102,6 @@ static void test_lookup_update(void)
+ 	CHECK(map1_id == 0, "map1_id", "failed to get ID 1\n");
+ 	CHECK(map2_id == 0, "map2_id", "failed to get ID 2\n");
+ 
+-	test_btf_map_in_map__destroy(skel);
+-	skel = NULL;
+-
+-	/* we need to either wait for or force synchronize_rcu(), before
+-	 * checking for "still exists" condition, otherwise map could still be
+-	 * resolvable by ID, causing false positives.
+-	 *
+-	 * Older kernels (5.8 and earlier) freed map only after two
+-	 * synchronize_rcu()s, so trigger two, to be entirely sure.
+-	 */
+-	CHECK(kern_sync_rcu(), "sync_rcu", "failed\n");
+-	CHECK(kern_sync_rcu(), "sync_rcu", "failed\n");
+-
+-	fd = bpf_map_get_fd_by_id(map1_id);
+-	if (CHECK(fd >= 0, "map1_leak", "inner_map1 leaked!\n")) {
+-		close(fd);
+-		goto cleanup;
+-	}
+-	fd = bpf_map_get_fd_by_id(map2_id);
+-	if (CHECK(fd >= 0, "map2_leak", "inner_map2 leaked!\n")) {
+-		close(fd);
+-		goto cleanup;
+-	}
+-
+ cleanup:
+ 	test_btf_map_in_map__destroy(skel);
+ }
+diff --git a/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c b/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c
+index baff5ffe94051..a9fc30ed4d732 100644
+--- a/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c
++++ b/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c
+@@ -8,6 +8,13 @@
+ #include "xdp_metadata.h"
+ #include "bpf_kfuncs.h"
+ 
++/* The compiler may be able to detect the access to uninitialized
++   memory in the routines performing out of bound memory accesses and
++   emit warnings about it.  This is the case of GCC. */
++#if !defined(__clang__)
++#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
++#endif
++
+ int arr[1];
+ int unkn_idx;
+ const volatile bool call_dead_subprog = false;
+diff --git a/tools/testing/selftests/bpf/test_tc_tunnel.sh b/tools/testing/selftests/bpf/test_tc_tunnel.sh
+index 910044f08908a..7989ec6084545 100755
+--- a/tools/testing/selftests/bpf/test_tc_tunnel.sh
++++ b/tools/testing/selftests/bpf/test_tc_tunnel.sh
+@@ -72,7 +72,6 @@ cleanup() {
+ server_listen() {
+ 	ip netns exec "${ns2}" nc "${netcat_opt}" -l "${port}" > "${outfile}" &
+ 	server_pid=$!
+-	sleep 0.2
+ }
+ 
+ client_connect() {
+@@ -93,6 +92,16 @@ verify_data() {
+ 	fi
+ }
+ 
++wait_for_port() {
++	for i in $(seq 20); do
++		if ip netns exec "${ns2}" ss ${2:--4}OHntl | grep -q "$1"; then
++			return 0
++		fi
++		sleep 0.1
++	done
++	return 1
++}
++
+ set -e
+ 
+ # no arguments: automated test, run all
+@@ -193,6 +202,7 @@ setup
+ # basic communication works
+ echo "test basic connectivity"
+ server_listen
++wait_for_port ${port} ${netcat_opt}
+ client_connect
+ verify_data
+ 
+@@ -204,6 +214,7 @@ ip netns exec "${ns1}" tc filter add dev veth1 egress \
+ 	section "encap_${tuntype}_${mac}"
+ echo "test bpf encap without decap (expect failure)"
+ server_listen
++wait_for_port ${port} ${netcat_opt}
+ ! client_connect
+ 
+ if [[ "$tuntype" =~ "udp" ]]; then
+diff --git a/tools/testing/selftests/net/cmsg_sender.c b/tools/testing/selftests/net/cmsg_sender.c
+index c79e65581dc37..161db24e3c409 100644
+--- a/tools/testing/selftests/net/cmsg_sender.c
++++ b/tools/testing/selftests/net/cmsg_sender.c
+@@ -333,16 +333,17 @@ static const char *cs_ts_info2str(unsigned int info)
+ 	return "unknown";
+ }
+ 
+-static void
++static unsigned long
+ cs_read_cmsg(int fd, struct msghdr *msg, char *cbuf, size_t cbuf_sz)
+ {
+ 	struct sock_extended_err *see;
+ 	struct scm_timestamping *ts;
++	unsigned long ts_seen = 0;
+ 	struct cmsghdr *cmsg;
+ 	int i, err;
+ 
+ 	if (!opt.ts.ena)
+-		return;
++		return 0;
+ 	msg->msg_control = cbuf;
+ 	msg->msg_controllen = cbuf_sz;
+ 
+@@ -396,8 +397,11 @@ cs_read_cmsg(int fd, struct msghdr *msg, char *cbuf, size_t cbuf_sz)
+ 			printf(" %5s ts%d %lluus\n",
+ 			       cs_ts_info2str(see->ee_info),
+ 			       i, rel_time);
++			ts_seen |= 1 << see->ee_info;
+ 		}
+ 	}
++
++	return ts_seen;
+ }
+ 
+ static void ca_set_sockopts(int fd)
+@@ -509,10 +513,16 @@ int main(int argc, char *argv[])
+ 	err = ERN_SUCCESS;
+ 
+ 	if (opt.ts.ena) {
+-		/* Make sure all timestamps have time to loop back */
+-		usleep(opt.txtime.delay);
++		unsigned long seen;
++		int i;
+ 
+-		cs_read_cmsg(fd, &msg, cbuf, sizeof(cbuf));
++		/* Make sure all timestamps have time to loop back */
++		for (i = 0; i < 40; i++) {
++			seen = cs_read_cmsg(fd, &msg, cbuf, sizeof(cbuf));
++			if (seen & (1 << SCM_TSTAMP_SND))
++				break;
++			usleep(opt.txtime.delay / 20);
++		}
+ 	}
+ 
+ err_out:
+diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
+index 9e2981f2d7f5c..9cb05978269d1 100755
+--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
++++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
+@@ -160,10 +160,12 @@ make_connection()
+ 	local is_v6=$1
+ 	local app_port=$app4_port
+ 	local connect_addr="10.0.1.1"
++	local client_addr="10.0.1.2"
+ 	local listen_addr="0.0.0.0"
+ 	if [ "$is_v6" = "v6" ]
+ 	then
+ 		connect_addr="dead:beef:1::1"
++		client_addr="dead:beef:1::2"
+ 		listen_addr="::"
+ 		app_port=$app6_port
+ 	else
+@@ -206,6 +208,7 @@ make_connection()
+ 		   [ "$server_serverside" = 1 ]
+ 	then
+ 		test_pass
++		print_title "Connection info: ${client_addr}:${client_port} -> ${connect_addr}:${app_port}"
+ 	else
+ 		test_fail "Expected tokens (c:${client_token} - s:${server_token}) and server (c:${client_serverside} - s:${server_serverside})"
+ 		mptcp_lib_result_print_all_tap
+@@ -297,7 +300,7 @@ test_announce()
+ 	ip netns exec "$ns2"\
+ 	   ./pm_nl_ctl ann 10.0.2.2 token "$client4_token" id $client_addr_id dev\
+ 	   ns2eth1
+-	print_test "ADD_ADDR id:${client_addr_id} 10.0.2.2 (ns2) => ns1, reuse port"
++	print_test "ADD_ADDR id:client 10.0.2.2 (ns2) => ns1, reuse port"
+ 	sleep 0.5
+ 	verify_announce_event $server_evts $ANNOUNCED $server4_token "10.0.2.2" $client_addr_id \
+ 			      "$client4_port"
+@@ -306,7 +309,7 @@ test_announce()
+ 	:>"$server_evts"
+ 	ip netns exec "$ns2" ./pm_nl_ctl ann\
+ 	   dead:beef:2::2 token "$client6_token" id $client_addr_id dev ns2eth1
+-	print_test "ADD_ADDR6 id:${client_addr_id} dead:beef:2::2 (ns2) => ns1, reuse port"
++	print_test "ADD_ADDR6 id:client dead:beef:2::2 (ns2) => ns1, reuse port"
+ 	sleep 0.5
+ 	verify_announce_event "$server_evts" "$ANNOUNCED" "$server6_token" "dead:beef:2::2"\
+ 			      "$client_addr_id" "$client6_port" "v6"
+@@ -316,7 +319,7 @@ test_announce()
+ 	client_addr_id=$((client_addr_id+1))
+ 	ip netns exec "$ns2" ./pm_nl_ctl ann 10.0.2.2 token "$client4_token" id\
+ 	   $client_addr_id dev ns2eth1 port $new4_port
+-	print_test "ADD_ADDR id:${client_addr_id} 10.0.2.2 (ns2) => ns1, new port"
++	print_test "ADD_ADDR id:client+1 10.0.2.2 (ns2) => ns1, new port"
+ 	sleep 0.5
+ 	verify_announce_event "$server_evts" "$ANNOUNCED" "$server4_token" "10.0.2.2"\
+ 			      "$client_addr_id" "$new4_port"
+@@ -327,7 +330,7 @@ test_announce()
+ 	# ADD_ADDR from the server to client machine reusing the subflow port
+ 	ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server4_token" id\
+ 	   $server_addr_id dev ns1eth2
+-	print_test "ADD_ADDR id:${server_addr_id} 10.0.2.1 (ns1) => ns2, reuse port"
++	print_test "ADD_ADDR id:server 10.0.2.1 (ns1) => ns2, reuse port"
+ 	sleep 0.5
+ 	verify_announce_event "$client_evts" "$ANNOUNCED" "$client4_token" "10.0.2.1"\
+ 			      "$server_addr_id" "$app4_port"
+@@ -336,7 +339,7 @@ test_announce()
+ 	:>"$client_evts"
+ 	ip netns exec "$ns1" ./pm_nl_ctl ann dead:beef:2::1 token "$server6_token" id\
+ 	   $server_addr_id dev ns1eth2
+-	print_test "ADD_ADDR6 id:${server_addr_id} dead:beef:2::1 (ns1) => ns2, reuse port"
++	print_test "ADD_ADDR6 id:server dead:beef:2::1 (ns1) => ns2, reuse port"
+ 	sleep 0.5
+ 	verify_announce_event "$client_evts" "$ANNOUNCED" "$client6_token" "dead:beef:2::1"\
+ 			      "$server_addr_id" "$app6_port" "v6"
+@@ -346,7 +349,7 @@ test_announce()
+ 	server_addr_id=$((server_addr_id+1))
+ 	ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server4_token" id\
+ 	   $server_addr_id dev ns1eth2 port $new4_port
+-	print_test "ADD_ADDR id:${server_addr_id} 10.0.2.1 (ns1) => ns2, new port"
++	print_test "ADD_ADDR id:server+1 10.0.2.1 (ns1) => ns2, new port"
+ 	sleep 0.5
+ 	verify_announce_event "$client_evts" "$ANNOUNCED" "$client4_token" "10.0.2.1"\
+ 			      "$server_addr_id" "$new4_port"
+@@ -380,7 +383,7 @@ test_remove()
+ 	local invalid_token=$(( client4_token - 1 ))
+ 	ip netns exec "$ns2" ./pm_nl_ctl rem token $invalid_token id\
+ 	   $client_addr_id > /dev/null 2>&1
+-	print_test "RM_ADDR id:${client_addr_id} ns2 => ns1, invalid token"
++	print_test "RM_ADDR id:client ns2 => ns1, invalid token"
+ 	local type
+ 	type=$(mptcp_lib_evts_get_info type "$server_evts")
+ 	if [ "$type" = "" ]
+@@ -394,7 +397,7 @@ test_remove()
+ 	local invalid_id=$(( client_addr_id + 1 ))
+ 	ip netns exec "$ns2" ./pm_nl_ctl rem token "$client4_token" id\
+ 	   $invalid_id > /dev/null 2>&1
+-	print_test "RM_ADDR id:${invalid_id} ns2 => ns1, invalid id"
++	print_test "RM_ADDR id:client+1 ns2 => ns1, invalid id"
+ 	type=$(mptcp_lib_evts_get_info type "$server_evts")
+ 	if [ "$type" = "" ]
+ 	then
+@@ -407,7 +410,7 @@ test_remove()
+ 	:>"$server_evts"
+ 	ip netns exec "$ns2" ./pm_nl_ctl rem token "$client4_token" id\
+ 	   $client_addr_id
+-	print_test "RM_ADDR id:${client_addr_id} ns2 => ns1"
++	print_test "RM_ADDR id:client ns2 => ns1"
+ 	sleep 0.5
+ 	verify_remove_event "$server_evts" "$REMOVED" "$server4_token" "$client_addr_id"
+ 
+@@ -416,7 +419,7 @@ test_remove()
+ 	client_addr_id=$(( client_addr_id - 1 ))
+ 	ip netns exec "$ns2" ./pm_nl_ctl rem token "$client4_token" id\
+ 	   $client_addr_id
+-	print_test "RM_ADDR id:${client_addr_id} ns2 => ns1"
++	print_test "RM_ADDR id:client-1 ns2 => ns1"
+ 	sleep 0.5
+ 	verify_remove_event "$server_evts" "$REMOVED" "$server4_token" "$client_addr_id"
+ 
+@@ -424,7 +427,7 @@ test_remove()
+ 	:>"$server_evts"
+ 	ip netns exec "$ns2" ./pm_nl_ctl rem token "$client6_token" id\
+ 	   $client_addr_id
+-	print_test "RM_ADDR6 id:${client_addr_id} ns2 => ns1"
++	print_test "RM_ADDR6 id:client-1 ns2 => ns1"
+ 	sleep 0.5
+ 	verify_remove_event "$server_evts" "$REMOVED" "$server6_token" "$client_addr_id"
+ 
+@@ -434,7 +437,7 @@ test_remove()
+ 	# RM_ADDR from the server to client machine
+ 	ip netns exec "$ns1" ./pm_nl_ctl rem token "$server4_token" id\
+ 	   $server_addr_id
+-	print_test "RM_ADDR id:${server_addr_id} ns1 => ns2"
++	print_test "RM_ADDR id:server ns1 => ns2"
+ 	sleep 0.5
+ 	verify_remove_event "$client_evts" "$REMOVED" "$client4_token" "$server_addr_id"
+ 
+@@ -443,7 +446,7 @@ test_remove()
+ 	server_addr_id=$(( server_addr_id - 1 ))
+ 	ip netns exec "$ns1" ./pm_nl_ctl rem token "$server4_token" id\
+ 	   $server_addr_id
+-	print_test "RM_ADDR id:${server_addr_id} ns1 => ns2"
++	print_test "RM_ADDR id:server-1 ns1 => ns2"
+ 	sleep 0.5
+ 	verify_remove_event "$client_evts" "$REMOVED" "$client4_token" "$server_addr_id"
+ 
+@@ -451,7 +454,7 @@ test_remove()
+ 	:>"$client_evts"
+ 	ip netns exec "$ns1" ./pm_nl_ctl rem token "$server6_token" id\
+ 	   $server_addr_id
+-	print_test "RM_ADDR6 id:${server_addr_id} ns1 => ns2"
++	print_test "RM_ADDR6 id:server-1 ns1 => ns2"
+ 	sleep 0.5
+ 	verify_remove_event "$client_evts" "$REMOVED" "$client6_token" "$server_addr_id"
+ }
+@@ -479,8 +482,14 @@ verify_subflow_events()
+ 	local locid
+ 	local remid
+ 	local info
++	local e_dport_txt
+ 
+-	info="${e_saddr} (${e_from}) => ${e_daddr}:${e_dport} (${e_to})"
++	# only display the fixed ports
++	if [ "${e_dport}" -ge "${app4_port}" ] && [ "${e_dport}" -le "${app6_port}" ]; then
++		e_dport_txt=":${e_dport}"
++	fi
++
++	info="${e_saddr} (${e_from}) => ${e_daddr}${e_dport_txt} (${e_to})"
+ 
+ 	if [ "$e_type" = "$SUB_ESTABLISHED" ]
+ 	then
+@@ -766,7 +775,7 @@ test_subflows_v4_v6_mix()
+ 	:>"$client_evts"
+ 	ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server6_token" id\
+ 	   $server_addr_id dev ns1eth2
+-	print_test "ADD_ADDR4 id:${server_addr_id} 10.0.2.1 (ns1) => ns2, reuse port"
++	print_test "ADD_ADDR4 id:server 10.0.2.1 (ns1) => ns2, reuse port"
+ 	sleep 0.5
+ 	verify_announce_event "$client_evts" "$ANNOUNCED" "$client6_token" "10.0.2.1"\
+ 			      "$server_addr_id" "$app6_port"
+@@ -861,7 +870,7 @@ test_listener()
+ 	local listener_pid=$!
+ 
+ 	sleep 0.5
+-	print_test "CREATE_LISTENER 10.0.2.2:$client4_port"
++	print_test "CREATE_LISTENER 10.0.2.2 (client port)"
+ 	verify_listener_events $client_evts $LISTENER_CREATED $AF_INET 10.0.2.2 $client4_port
+ 
+ 	# ADD_ADDR from client to server machine reusing the subflow port
+@@ -878,13 +887,14 @@ test_listener()
+ 	mptcp_lib_kill_wait $listener_pid
+ 
+ 	sleep 0.5
+-	print_test "CLOSE_LISTENER 10.0.2.2:$client4_port"
++	print_test "CLOSE_LISTENER 10.0.2.2 (client port)"
+ 	verify_listener_events $client_evts $LISTENER_CLOSED $AF_INET 10.0.2.2 $client4_port
+ }
+ 
+ print_title "Make connections"
+ make_connection
+ make_connection "v6"
++print_title "Will be using address IDs ${client_addr_id} (client) and ${server_addr_id} (server)"
+ 
+ test_announce
+ test_remove
+diff --git a/tools/testing/selftests/net/openvswitch/openvswitch.sh b/tools/testing/selftests/net/openvswitch/openvswitch.sh
+index 5cae535438491..15bca07087179 100755
+--- a/tools/testing/selftests/net/openvswitch/openvswitch.sh
++++ b/tools/testing/selftests/net/openvswitch/openvswitch.sh
+@@ -1,4 +1,4 @@
+-#!/bin/sh
++#!/bin/bash
+ # SPDX-License-Identifier: GPL-2.0
+ #
+ # OVS kernel module self tests
+diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
+index 0f4e0cf4f158b..747fe251e445b 100644
+--- a/virt/kvm/guest_memfd.c
++++ b/virt/kvm/guest_memfd.c
+@@ -510,8 +510,10 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
+ 	}
+ 
+ 	if (folio_test_hwpoison(folio)) {
++		folio_unlock(folio);
++		folio_put(folio);
+ 		r = -EHWPOISON;
+-		goto out_unlock;
++		goto out_fput;
+ 	}
+ 
+ 	page = folio_file_page(folio, index);
+@@ -522,7 +524,6 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
+ 
+ 	r = 0;
+ 
+-out_unlock:
+ 	folio_unlock(folio);
+ out_fput:
+ 	fput(file);
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index ff0a20565f908..d9ce063c76f92 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -4066,12 +4066,13 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
+ {
+ 	struct kvm *kvm = me->kvm;
+ 	struct kvm_vcpu *vcpu;
+-	int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
++	int last_boosted_vcpu;
+ 	unsigned long i;
+ 	int yielded = 0;
+ 	int try = 3;
+ 	int pass;
+ 
++	last_boosted_vcpu = READ_ONCE(kvm->last_boosted_vcpu);
+ 	kvm_vcpu_set_in_spin_loop(me, true);
+ 	/*
+ 	 * We boost the priority of a VCPU that is runnable but not
+@@ -4109,7 +4110,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
+ 
+ 			yielded = kvm_vcpu_yield_to(vcpu);
+ 			if (yielded > 0) {
+-				kvm->last_boosted_vcpu = i;
++				WRITE_ONCE(kvm->last_boosted_vcpu, i);
+ 				break;
+ 			} else if (yielded < 0) {
+ 				try--;


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [gentoo-commits] proj/linux-patches:6.9 commit in: /
@ 2024-07-03 23:05 Mike Pagano
  0 siblings, 0 replies; 18+ messages in thread
From: Mike Pagano @ 2024-07-03 23:05 UTC (permalink / raw
  To: gentoo-commits

commit:     25334cfc78985c9f6ee2478fcb35cd24fb98505c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jul  3 23:04:24 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jul  3 23:04:24 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=25334cfc

BMQ Schedular (USE=experimental) v6.9-r0

Set default for BMQ. default to n

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/0000_README b/0000_README
index 5ba3f04f..3b52f37f 100644
--- a/0000_README
+++ b/0000_README
@@ -110,3 +110,11 @@ Desc:   Add Gentoo Linux support config settings and defaults.
 Patch:  5010_enable-cpu-optimizations-universal.patch
 From:   https://github.com/graysky2/kernel_compiler_patch
 Desc:   Kernel >= 5.15 patch enables gcc = v11.1+ optimizations for additional CPUs.
+
+Patch:  5020_BMQ-and-PDS-io-scheduler-v6.9-r0.patch
+From:   https://gitlab.com/alfredchen/projectc
+Desc:   BMQ(BitMap Queue) Scheduler. A new CPU scheduler developed from PDS(incld). Inspired by the scheduler in zircon.
+
+Patch:  5021_BMQ-and-PDS-gentoo-defaults.patch
+From:   https://gitweb.gentoo.org/proj/linux-patches.git/
+Desc:   Set defaults for BMQ. default to n


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [gentoo-commits] proj/linux-patches:6.9 commit in: /
@ 2024-07-05 10:47 Mike Pagano
  0 siblings, 0 replies; 18+ messages in thread
From: Mike Pagano @ 2024-07-05 10:47 UTC (permalink / raw
  To: gentoo-commits

commit:     d878fcc990820961f38822ee3084643c80143397
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jul  5 10:47:13 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jul  5 10:47:13 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d878fcc9

BMQ v6.9-r0

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 5020_BMQ-and-PDS-io-scheduler-v6.9-r0.patch | 11515 ++++++++++++++++++++++++++
 5021_BMQ-and-PDS-gentoo-defaults.patch      |    13 +
 2 files changed, 11528 insertions(+)

diff --git a/5020_BMQ-and-PDS-io-scheduler-v6.9-r0.patch b/5020_BMQ-and-PDS-io-scheduler-v6.9-r0.patch
new file mode 100644
index 00000000..f5ec553c
--- /dev/null
+++ b/5020_BMQ-and-PDS-io-scheduler-v6.9-r0.patch
@@ -0,0 +1,11515 @@
+diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
+index 7fd43947832f..a07de351a5c2 100644
+--- a/Documentation/admin-guide/sysctl/kernel.rst
++++ b/Documentation/admin-guide/sysctl/kernel.rst
+@@ -1673,3 +1673,12 @@ is 10 seconds.
+ 
+ The softlockup threshold is (``2 * watchdog_thresh``). Setting this
+ tunable to zero will disable lockup detection altogether.
++
++yield_type:
++===========
++
++BMQ/PDS CPU scheduler only. This determines what type of yield calls
++to sched_yield() will be performed.
++
++  0 - No yield.
++  1 - Requeue task. (default)
+diff --git a/Documentation/scheduler/sched-BMQ.txt b/Documentation/scheduler/sched-BMQ.txt
+new file mode 100644
+index 000000000000..05c84eec0f31
+--- /dev/null
++++ b/Documentation/scheduler/sched-BMQ.txt
+@@ -0,0 +1,110 @@
++                         BitMap queue CPU Scheduler
++                         --------------------------
++
++CONTENT
++========
++
++ Background
++ Design
++   Overview
++   Task policy
++   Priority management
++   BitMap Queue
++   CPU Assignment and Migration
++
++
++Background
++==========
++
++BitMap Queue CPU scheduler, referred to as BMQ from here on, is an evolution
++of previous Priority and Deadline based Skiplist multiple queue scheduler(PDS),
++and inspired by Zircon scheduler. The goal of it is to keep the scheduler code
++simple, while efficiency and scalable for interactive tasks, such as desktop,
++movie playback and gaming etc.
++
++Design
++======
++
++Overview
++--------
++
++BMQ use per CPU run queue design, each CPU(logical) has it's own run queue,
++each CPU is responsible for scheduling the tasks that are putting into it's
++run queue.
++
++The run queue is a set of priority queues. Note that these queues are fifo
++queue for non-rt tasks or priority queue for rt tasks in data structure. See
++BitMap Queue below for details. BMQ is optimized for non-rt tasks in the fact
++that most applications are non-rt tasks. No matter the queue is fifo or
++priority, In each queue is an ordered list of runnable tasks awaiting execution
++and the data structures are the same. When it is time for a new task to run,
++the scheduler simply looks the lowest numbered queueue that contains a task,
++and runs the first task from the head of that queue. And per CPU idle task is
++also in the run queue, so the scheduler can always find a task to run on from
++its run queue.
++
++Each task will assigned the same timeslice(default 4ms) when it is picked to
++start running. Task will be reinserted at the end of the appropriate priority
++queue when it uses its whole timeslice. When the scheduler selects a new task
++from the priority queue it sets the CPU's preemption timer for the remainder of
++the previous timeslice. When that timer fires the scheduler will stop execution
++on that task, select another task and start over again.
++
++If a task blocks waiting for a shared resource then it's taken out of its
++priority queue and is placed in a wait queue for the shared resource. When it
++is unblocked it will be reinserted in the appropriate priority queue of an
++eligible CPU.
++
++Task policy
++-----------
++
++BMQ supports DEADLINE, FIFO, RR, NORMAL, BATCH and IDLE task policy like the
++mainline CFS scheduler. But BMQ is heavy optimized for non-rt task, that's
++NORMAL/BATCH/IDLE policy tasks. Below is the implementation detail of each
++policy.
++
++DEADLINE
++	It is squashed as priority 0 FIFO task.
++
++FIFO/RR
++	All RT tasks share one single priority queue in BMQ run queue designed. The
++complexity of insert operation is O(n). BMQ is not designed for system runs
++with major rt policy tasks.
++
++NORMAL/BATCH/IDLE
++	BATCH and IDLE tasks are treated as the same policy. They compete CPU with
++NORMAL policy tasks, but they just don't boost. To control the priority of
++NORMAL/BATCH/IDLE tasks, simply use nice level.
++
++ISO
++	ISO policy is not supported in BMQ. Please use nice level -20 NORMAL policy
++task instead.
++
++Priority management
++-------------------
++
++RT tasks have priority from 0-99. For non-rt tasks, there are three different
++factors used to determine the effective priority of a task. The effective
++priority being what is used to determine which queue it will be in.
++
++The first factor is simply the task’s static priority. Which is assigned from
++task's nice level, within [-20, 19] in userland's point of view and [0, 39]
++internally.
++
++The second factor is the priority boost. This is a value bounded between
++[-MAX_PRIORITY_ADJ, MAX_PRIORITY_ADJ] used to offset the base priority, it is
++modified by the following cases:
++
++*When a thread has used up its entire timeslice, always deboost its boost by
++increasing by one.
++*When a thread gives up cpu control(voluntary or non-voluntary) to reschedule,
++and its switch-in time(time after last switch and run) below the thredhold
++based on its priority boost, will boost its boost by decreasing by one buti is
++capped at 0 (won’t go negative).
++
++The intent in this system is to ensure that interactive threads are serviced
++quickly. These are usually the threads that interact directly with the user
++and cause user-perceivable latency. These threads usually do little work and
++spend most of their time blocked awaiting another user event. So they get the
++priority boost from unblocking while background threads that do most of the
++processing receive the priority penalty for using their entire timeslice.
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 18550c071d71..fa8cf95568b6 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -481,7 +481,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
+ 		seq_puts(m, "0 0 0\n");
+ 	else
+ 		seq_printf(m, "%llu %llu %lu\n",
+-		   (unsigned long long)task->se.sum_exec_runtime,
++		   (unsigned long long)tsk_seruntime(task),
+ 		   (unsigned long long)task->sched_info.run_delay,
+ 		   task->sched_info.pcount);
+ 
+diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h
+index 8874f681b056..59eb72bf7d5f 100644
+--- a/include/asm-generic/resource.h
++++ b/include/asm-generic/resource.h
+@@ -23,7 +23,7 @@
+ 	[RLIMIT_LOCKS]		= {  RLIM_INFINITY,  RLIM_INFINITY },	\
+ 	[RLIMIT_SIGPENDING]	= { 		0,	       0 },	\
+ 	[RLIMIT_MSGQUEUE]	= {   MQ_BYTES_MAX,   MQ_BYTES_MAX },	\
+-	[RLIMIT_NICE]		= { 0, 0 },				\
++	[RLIMIT_NICE]		= { 30, 30 },				\
+ 	[RLIMIT_RTPRIO]		= { 0, 0 },				\
+ 	[RLIMIT_RTTIME]		= {  RLIM_INFINITY,  RLIM_INFINITY },	\
+ }
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 3c2abbc587b4..1245570d1996 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -771,8 +771,14 @@ struct task_struct {
+ 	unsigned int			ptrace;
+ 
+ #ifdef CONFIG_SMP
+-	int				on_cpu;
+ 	struct __call_single_node	wake_entry;
++#endif
++#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_ALT)
++	int				on_cpu;
++#endif
++
++#ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ 	unsigned int			wakee_flips;
+ 	unsigned long			wakee_flip_decay_ts;
+ 	struct task_struct		*last_wakee;
+@@ -786,6 +792,7 @@ struct task_struct {
+ 	 */
+ 	int				recent_used_cpu;
+ 	int				wake_cpu;
++#endif /* !CONFIG_SCHED_ALT */
+ #endif
+ 	int				on_rq;
+ 
+@@ -794,6 +801,19 @@ struct task_struct {
+ 	int				normal_prio;
+ 	unsigned int			rt_priority;
+ 
++#ifdef CONFIG_SCHED_ALT
++	u64				last_ran;
++	s64				time_slice;
++	struct list_head		sq_node;
++#ifdef CONFIG_SCHED_BMQ
++	int				boost_prio;
++#endif /* CONFIG_SCHED_BMQ */
++#ifdef CONFIG_SCHED_PDS
++	u64				deadline;
++#endif /* CONFIG_SCHED_PDS */
++	/* sched_clock time spent running */
++	u64				sched_time;
++#else /* !CONFIG_SCHED_ALT */
+ 	struct sched_entity		se;
+ 	struct sched_rt_entity		rt;
+ 	struct sched_dl_entity		dl;
+@@ -805,6 +825,7 @@ struct task_struct {
+ 	unsigned long			core_cookie;
+ 	unsigned int			core_occupation;
+ #endif
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ #ifdef CONFIG_CGROUP_SCHED
+ 	struct task_group		*sched_task_group;
+@@ -1567,6 +1588,15 @@ struct task_struct {
+ 	 */
+ };
+ 
++#ifdef CONFIG_SCHED_ALT
++#define tsk_seruntime(t)		((t)->sched_time)
++/* replace the uncertian rt_timeout with 0UL */
++#define tsk_rttimeout(t)		(0UL)
++#else /* CFS */
++#define tsk_seruntime(t)	((t)->se.sum_exec_runtime)
++#define tsk_rttimeout(t)	((t)->rt.timeout)
++#endif /* !CONFIG_SCHED_ALT */
++
+ #define TASK_REPORT_IDLE	(TASK_REPORT + 1)
+ #define TASK_REPORT_MAX		(TASK_REPORT_IDLE << 1)
+ 
+diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
+index df3aca89d4f5..1df1f7635188 100644
+--- a/include/linux/sched/deadline.h
++++ b/include/linux/sched/deadline.h
+@@ -2,6 +2,25 @@
+ #ifndef _LINUX_SCHED_DEADLINE_H
+ #define _LINUX_SCHED_DEADLINE_H
+ 
++#ifdef CONFIG_SCHED_ALT
++
++static inline int dl_task(struct task_struct *p)
++{
++	return 0;
++}
++
++#ifdef CONFIG_SCHED_BMQ
++#define __tsk_deadline(p)	(0UL)
++#endif
++
++#ifdef CONFIG_SCHED_PDS
++#define __tsk_deadline(p)	((((u64) ((p)->prio))<<56) | (p)->deadline)
++#endif
++
++#else
++
++#define __tsk_deadline(p)	((p)->dl.deadline)
++
+ /*
+  * SCHED_DEADLINE tasks has negative priorities, reflecting
+  * the fact that any of them has higher prio than RT and
+@@ -23,6 +42,7 @@ static inline int dl_task(struct task_struct *p)
+ {
+ 	return dl_prio(p->prio);
+ }
++#endif /* CONFIG_SCHED_ALT */
+ 
+ static inline bool dl_time_before(u64 a, u64 b)
+ {
+diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
+index ab83d85e1183..e66dfb553bc5 100644
+--- a/include/linux/sched/prio.h
++++ b/include/linux/sched/prio.h
+@@ -18,6 +18,28 @@
+ #define MAX_PRIO		(MAX_RT_PRIO + NICE_WIDTH)
+ #define DEFAULT_PRIO		(MAX_RT_PRIO + NICE_WIDTH / 2)
+ 
++#ifdef CONFIG_SCHED_ALT
++
++/* Undefine MAX_PRIO and DEFAULT_PRIO */
++#undef MAX_PRIO
++#undef DEFAULT_PRIO
++
++/* +/- priority levels from the base priority */
++#ifdef CONFIG_SCHED_BMQ
++#define MAX_PRIORITY_ADJ	(12)
++#endif
++
++#ifdef CONFIG_SCHED_PDS
++#define MAX_PRIORITY_ADJ	(0)
++#endif
++
++#define MIN_NORMAL_PRIO		(128)
++#define NORMAL_PRIO_NUM		(64)
++#define MAX_PRIO		(MIN_NORMAL_PRIO + NORMAL_PRIO_NUM)
++#define DEFAULT_PRIO		(MAX_PRIO - MAX_PRIORITY_ADJ - NICE_WIDTH / 2)
++
++#endif /* CONFIG_SCHED_ALT */
++
+ /*
+  * Convert user-nice values [ -20 ... 0 ... 19 ]
+  * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
+diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
+index b2b9e6eb9683..09bd4d8758b2 100644
+--- a/include/linux/sched/rt.h
++++ b/include/linux/sched/rt.h
+@@ -24,8 +24,10 @@ static inline bool task_is_realtime(struct task_struct *tsk)
+ 
+ 	if (policy == SCHED_FIFO || policy == SCHED_RR)
+ 		return true;
++#ifndef CONFIG_SCHED_ALT
+ 	if (policy == SCHED_DEADLINE)
+ 		return true;
++#endif
+ 	return false;
+ }
+ 
+diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
+index 18572c9ea724..ff205c23c9e7 100644
+--- a/include/linux/sched/topology.h
++++ b/include/linux/sched/topology.h
+@@ -244,7 +244,8 @@ static inline bool cpus_share_resources(int this_cpu, int that_cpu)
+ 
+ #endif	/* !CONFIG_SMP */
+ 
+-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
++#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) && \
++	!defined(CONFIG_SCHED_ALT)
+ extern void rebuild_sched_domains_energy(void);
+ #else
+ static inline void rebuild_sched_domains_energy(void)
+diff --git a/init/Kconfig b/init/Kconfig
+index 664bedb9a71f..9a53977f5961 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -638,6 +638,7 @@ config TASK_IO_ACCOUNTING
+ 
+ config PSI
+ 	bool "Pressure stall information tracking"
++	depends on !SCHED_ALT
+ 	select KERNFS
+ 	help
+ 	  Collect metrics that indicate how overcommitted the CPU, memory,
+@@ -803,6 +804,7 @@ menu "Scheduler features"
+ config UCLAMP_TASK
+ 	bool "Enable utilization clamping for RT/FAIR tasks"
+ 	depends on CPU_FREQ_GOV_SCHEDUTIL
++	depends on !SCHED_ALT
+ 	help
+ 	  This feature enables the scheduler to track the clamped utilization
+ 	  of each CPU based on RUNNABLE tasks scheduled on that CPU.
+@@ -849,6 +851,35 @@ config UCLAMP_BUCKETS_COUNT
+ 
+ 	  If in doubt, use the default value.
+ 
++menuconfig SCHED_ALT
++	bool "Alternative CPU Schedulers"
++	default y
++	help
++	  This feature enable alternative CPU scheduler"
++
++if SCHED_ALT
++
++choice
++	prompt "Alternative CPU Scheduler"
++	default SCHED_BMQ
++
++config SCHED_BMQ
++	bool "BMQ CPU scheduler"
++	help
++	  The BitMap Queue CPU scheduler for excellent interactivity and
++	  responsiveness on the desktop and solid scalability on normal
++	  hardware and commodity servers.
++
++config SCHED_PDS
++	bool "PDS CPU scheduler"
++	help
++	  The Priority and Deadline based Skip list multiple queue CPU
++	  Scheduler.
++
++endchoice
++
++endif
++
+ endmenu
+ 
+ #
+@@ -914,6 +945,7 @@ config NUMA_BALANCING
+ 	depends on ARCH_SUPPORTS_NUMA_BALANCING
+ 	depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
+ 	depends on SMP && NUMA && MIGRATION && !PREEMPT_RT
++	depends on !SCHED_ALT
+ 	help
+ 	  This option adds support for automatic NUMA aware memory/task placement.
+ 	  The mechanism is quite primitive and is based on migrating memory when
+@@ -1011,6 +1043,7 @@ config FAIR_GROUP_SCHED
+ 	depends on CGROUP_SCHED
+ 	default CGROUP_SCHED
+ 
++if !SCHED_ALT
+ config CFS_BANDWIDTH
+ 	bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED"
+ 	depends on FAIR_GROUP_SCHED
+@@ -1033,6 +1066,7 @@ config RT_GROUP_SCHED
+ 	  realtime bandwidth for them.
+ 	  See Documentation/scheduler/sched-rt-group.rst for more information.
+ 
++endif #!SCHED_ALT
+ endif #CGROUP_SCHED
+ 
+ config SCHED_MM_CID
+@@ -1281,6 +1315,7 @@ config CHECKPOINT_RESTORE
+ 
+ config SCHED_AUTOGROUP
+ 	bool "Automatic process group scheduling"
++	depends on !SCHED_ALT
+ 	select CGROUPS
+ 	select CGROUP_SCHED
+ 	select FAIR_GROUP_SCHED
+diff --git a/init/init_task.c b/init/init_task.c
+index 4daee6d761c8..9d27c1d39272 100644
+--- a/init/init_task.c
++++ b/init/init_task.c
+@@ -70,9 +70,15 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
+ 	.stack		= init_stack,
+ 	.usage		= REFCOUNT_INIT(2),
+ 	.flags		= PF_KTHREAD,
++#ifdef CONFIG_SCHED_ALT
++	.prio		= DEFAULT_PRIO,
++	.static_prio	= DEFAULT_PRIO,
++	.normal_prio	= DEFAULT_PRIO,
++#else
+ 	.prio		= MAX_PRIO - 20,
+ 	.static_prio	= MAX_PRIO - 20,
+ 	.normal_prio	= MAX_PRIO - 20,
++#endif
+ 	.policy		= SCHED_NORMAL,
+ 	.cpus_ptr	= &init_task.cpus_mask,
+ 	.user_cpus_ptr	= NULL,
+@@ -84,6 +90,16 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
+ 	.restart_block	= {
+ 		.fn = do_no_restart_syscall,
+ 	},
++#ifdef CONFIG_SCHED_ALT
++	.sq_node	= LIST_HEAD_INIT(init_task.sq_node),
++#ifdef CONFIG_SCHED_BMQ
++	.boost_prio	= 0,
++#endif
++#ifdef CONFIG_SCHED_PDS
++	.deadline	= 0,
++#endif
++	.time_slice	= HZ,
++#else
+ 	.se		= {
+ 		.group_node 	= LIST_HEAD_INIT(init_task.se.group_node),
+ 	},
+@@ -91,6 +107,7 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
+ 		.run_list	= LIST_HEAD_INIT(init_task.rt.run_list),
+ 		.time_slice	= RR_TIMESLICE,
+ 	},
++#endif
+ 	.tasks		= LIST_HEAD_INIT(init_task.tasks),
+ #ifdef CONFIG_SMP
+ 	.pushable_tasks	= PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO),
+diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
+index c2f1fd95a821..41654679b1b2 100644
+--- a/kernel/Kconfig.preempt
++++ b/kernel/Kconfig.preempt
+@@ -117,7 +117,7 @@ config PREEMPT_DYNAMIC
+ 
+ config SCHED_CORE
+ 	bool "Core Scheduling for SMT"
+-	depends on SCHED_SMT
++	depends on SCHED_SMT && !SCHED_ALT
+ 	help
+ 	  This option permits Core Scheduling, a means of coordinated task
+ 	  selection across SMT siblings. When enabled -- see
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index 4237c8748715..a67002cb9e1d 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -855,7 +855,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
+ 	return ret;
+ }
+ 
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_ALT)
+ /*
+  * Helper routine for generate_sched_domains().
+  * Do cpusets a, b have overlapping effective cpus_allowed masks?
+@@ -1254,7 +1254,7 @@ static void rebuild_sched_domains_locked(void)
+ 	/* Have scheduler rebuild the domains */
+ 	partition_and_rebuild_sched_domains(ndoms, doms, attr);
+ }
+-#else /* !CONFIG_SMP */
++#else /* !CONFIG_SMP || CONFIG_SCHED_ALT */
+ static void rebuild_sched_domains_locked(void)
+ {
+ }
+@@ -3308,12 +3308,15 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
+ 				goto out_unlock;
+ 		}
+ 
++#ifndef CONFIG_SCHED_ALT
+ 		if (dl_task(task)) {
+ 			cs->nr_migrate_dl_tasks++;
+ 			cs->sum_migrate_dl_bw += task->dl.dl_bw;
+ 		}
++#endif
+ 	}
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	if (!cs->nr_migrate_dl_tasks)
+ 		goto out_success;
+ 
+@@ -3334,6 +3337,7 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
+ 	}
+ 
+ out_success:
++#endif
+ 	/*
+ 	 * Mark attach is in progress.  This makes validate_change() fail
+ 	 * changes which zero cpus/mems_allowed.
+@@ -3357,12 +3361,14 @@ static void cpuset_cancel_attach(struct cgroup_taskset *tset)
+ 	if (!cs->attach_in_progress)
+ 		wake_up(&cpuset_attach_wq);
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	if (cs->nr_migrate_dl_tasks) {
+ 		int cpu = cpumask_any(cs->effective_cpus);
+ 
+ 		dl_bw_free(cpu, cs->sum_migrate_dl_bw);
+ 		reset_migrate_dl_data(cs);
+ 	}
++#endif
+ 
+ 	mutex_unlock(&cpuset_mutex);
+ }
+diff --git a/kernel/delayacct.c b/kernel/delayacct.c
+index 6f0c358e73d8..8111481ce8b1 100644
+--- a/kernel/delayacct.c
++++ b/kernel/delayacct.c
+@@ -150,7 +150,7 @@ int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
+ 	 */
+ 	t1 = tsk->sched_info.pcount;
+ 	t2 = tsk->sched_info.run_delay;
+-	t3 = tsk->se.sum_exec_runtime;
++	t3 = tsk_seruntime(tsk);
+ 
+ 	d->cpu_count += t1;
+ 
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 41a12630cbbc..fc73453ec51a 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -176,7 +176,7 @@ static void __exit_signal(struct task_struct *tsk)
+ 			sig->curr_target = next_thread(tsk);
+ 	}
+ 
+-	add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
++	add_device_randomness((const void*) &tsk_seruntime(tsk),
+ 			      sizeof(unsigned long long));
+ 
+ 	/*
+@@ -197,7 +197,7 @@ static void __exit_signal(struct task_struct *tsk)
+ 	sig->inblock += task_io_get_inblock(tsk);
+ 	sig->oublock += task_io_get_oublock(tsk);
+ 	task_io_accounting_add(&sig->ioac, &tsk->ioac);
+-	sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
++	sig->sum_sched_runtime += tsk_seruntime(tsk);
+ 	sig->nr_threads--;
+ 	__unhash_process(tsk, group_dead);
+ 	write_sequnlock(&sig->stats_lock);
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 88d08eeb8bc0..03e8793badfc 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -363,7 +363,7 @@ waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
+ 	lockdep_assert(RB_EMPTY_NODE(&waiter->tree.entry));
+ 
+ 	waiter->tree.prio = __waiter_prio(task);
+-	waiter->tree.deadline = task->dl.deadline;
++	waiter->tree.deadline = __tsk_deadline(task);
+ }
+ 
+ /*
+@@ -384,16 +384,20 @@ waiter_clone_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
+  * Only use with rt_waiter_node_{less,equal}()
+  */
+ #define task_to_waiter_node(p)	\
+-	&(struct rt_waiter_node){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline }
++	&(struct rt_waiter_node){ .prio = __waiter_prio(p), .deadline = __tsk_deadline(p) }
+ #define task_to_waiter(p)	\
+ 	&(struct rt_mutex_waiter){ .tree = *task_to_waiter_node(p) }
+ 
+ static __always_inline int rt_waiter_node_less(struct rt_waiter_node *left,
+ 					       struct rt_waiter_node *right)
+ {
++#ifdef CONFIG_SCHED_PDS
++	return (left->deadline < right->deadline);
++#else
+ 	if (left->prio < right->prio)
+ 		return 1;
+ 
++#ifndef CONFIG_SCHED_BMQ
+ 	/*
+ 	 * If both waiters have dl_prio(), we check the deadlines of the
+ 	 * associated tasks.
+@@ -402,16 +406,22 @@ static __always_inline int rt_waiter_node_less(struct rt_waiter_node *left,
+ 	 */
+ 	if (dl_prio(left->prio))
+ 		return dl_time_before(left->deadline, right->deadline);
++#endif
+ 
+ 	return 0;
++#endif
+ }
+ 
+ static __always_inline int rt_waiter_node_equal(struct rt_waiter_node *left,
+ 						 struct rt_waiter_node *right)
+ {
++#ifdef CONFIG_SCHED_PDS
++	return (left->deadline == right->deadline);
++#else
+ 	if (left->prio != right->prio)
+ 		return 0;
+ 
++#ifndef CONFIG_SCHED_BMQ
+ 	/*
+ 	 * If both waiters have dl_prio(), we check the deadlines of the
+ 	 * associated tasks.
+@@ -420,8 +430,10 @@ static __always_inline int rt_waiter_node_equal(struct rt_waiter_node *left,
+ 	 */
+ 	if (dl_prio(left->prio))
+ 		return left->deadline == right->deadline;
++#endif
+ 
+ 	return 1;
++#endif
+ }
+ 
+ static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
+diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
+index 976092b7bd45..31d587c16ec1 100644
+--- a/kernel/sched/Makefile
++++ b/kernel/sched/Makefile
+@@ -28,7 +28,12 @@ endif
+ # These compilation units have roughly the same size and complexity - so their
+ # build parallelizes well and finishes roughly at once:
+ #
++ifdef CONFIG_SCHED_ALT
++obj-y += alt_core.o
++obj-$(CONFIG_SCHED_DEBUG) += alt_debug.o
++else
+ obj-y += core.o
+ obj-y += fair.o
++endif
+ obj-y += build_policy.o
+ obj-y += build_utility.o
+diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
+new file mode 100644
+index 000000000000..23b285393326
+--- /dev/null
++++ b/kernel/sched/alt_core.c
+@@ -0,0 +1,8958 @@
++/*
++ *  kernel/sched/alt_core.c
++ *
++ *  Core alternative kernel scheduler code and related syscalls
++ *
++ *  Copyright (C) 1991-2002  Linus Torvalds
++ *
++ *  2009-08-13	Brainfuck deadline scheduling policy by Con Kolivas deletes
++ *		a whole lot of those previous things.
++ *  2017-09-06	Priority and Deadline based Skip list multiple queue kernel
++ *		scheduler by Alfred Chen.
++ *  2019-02-20	BMQ(BitMap Queue) kernel scheduler by Alfred Chen.
++ */
++#include <linux/sched/clock.h>
++#include <linux/sched/cputime.h>
++#include <linux/sched/debug.h>
++#include <linux/sched/hotplug.h>
++#include <linux/sched/init.h>
++#include <linux/sched/isolation.h>
++#include <linux/sched/loadavg.h>
++#include <linux/sched/mm.h>
++#include <linux/sched/nohz.h>
++#include <linux/sched/stat.h>
++#include <linux/sched/wake_q.h>
++
++#include <linux/blkdev.h>
++#include <linux/context_tracking.h>
++#include <linux/cpuset.h>
++#include <linux/delayacct.h>
++#include <linux/init_task.h>
++#include <linux/kcov.h>
++#include <linux/kprobes.h>
++#include <linux/nmi.h>
++#include <linux/rseq.h>
++#include <linux/scs.h>
++
++#include <uapi/linux/sched/types.h>
++
++#include <asm/irq_regs.h>
++#include <asm/switch_to.h>
++
++#define CREATE_TRACE_POINTS
++#include <trace/events/sched.h>
++#include <trace/events/ipi.h>
++#undef CREATE_TRACE_POINTS
++
++#include "sched.h"
++#include "smp.h"
++
++#include "pelt.h"
++
++#include "../../io_uring/io-wq.h"
++#include "../smpboot.h"
++
++EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
++EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
++
++/*
++ * Export tracepoints that act as a bare tracehook (ie: have no trace event
++ * associated with them) to allow external modules to probe them.
++ */
++EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
++
++#ifdef CONFIG_SCHED_DEBUG
++#define sched_feat(x)	(1)
++/*
++ * Print a warning if need_resched is set for the given duration (if
++ * LATENCY_WARN is enabled).
++ *
++ * If sysctl_resched_latency_warn_once is set, only one warning will be shown
++ * per boot.
++ */
++__read_mostly int sysctl_resched_latency_warn_ms = 100;
++__read_mostly int sysctl_resched_latency_warn_once = 1;
++#else
++#define sched_feat(x)	(0)
++#endif /* CONFIG_SCHED_DEBUG */
++
++#define ALT_SCHED_VERSION "v6.9-r0"
++
++/*
++ * Compile time debug macro
++ * #define ALT_SCHED_DEBUG
++ */
++
++/* rt_prio(prio) defined in include/linux/sched/rt.h */
++#define rt_task(p)		rt_prio((p)->prio)
++#define rt_policy(policy)	((policy) == SCHED_FIFO || (policy) == SCHED_RR)
++#define task_has_rt_policy(p)	(rt_policy((p)->policy))
++
++#define STOP_PRIO		(MAX_RT_PRIO - 1)
++
++/*
++ * Time slice
++ * (default: 4 msec, units: nanoseconds)
++ */
++unsigned int sysctl_sched_base_slice __read_mostly	= (4 << 20);
++
++#ifdef CONFIG_SCHED_BMQ
++#include "bmq.h"
++#endif
++#ifdef CONFIG_SCHED_PDS
++#include "pds.h"
++#endif
++
++struct affinity_context {
++	const struct cpumask *new_mask;
++	struct cpumask *user_mask;
++	unsigned int flags;
++};
++
++/* Reschedule if less than this many μs left */
++#define RESCHED_NS		(100 << 10)
++
++/**
++ * sched_yield_type - Type of sched_yield() will be performed.
++ * 0: No yield.
++ * 1: Requeue task. (default)
++ */
++int sched_yield_type __read_mostly = 1;
++
++#ifdef CONFIG_SMP
++static cpumask_t sched_rq_pending_mask ____cacheline_aligned_in_smp;
++
++DEFINE_PER_CPU_ALIGNED(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
++DEFINE_PER_CPU_ALIGNED(cpumask_t *, sched_cpu_llc_mask);
++DEFINE_PER_CPU_ALIGNED(cpumask_t *, sched_cpu_topo_end_mask);
++
++#ifdef CONFIG_SCHED_SMT
++DEFINE_STATIC_KEY_FALSE(sched_smt_present);
++EXPORT_SYMBOL_GPL(sched_smt_present);
++#endif
++
++/*
++ * Keep a unique ID per domain (we use the first CPUs number in the cpumask of
++ * the domain), this allows us to quickly tell if two cpus are in the same cache
++ * domain, see cpus_share_cache().
++ */
++DEFINE_PER_CPU(int, sd_llc_id);
++#endif /* CONFIG_SMP */
++
++static DEFINE_MUTEX(sched_hotcpu_mutex);
++
++DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
++
++#ifndef prepare_arch_switch
++# define prepare_arch_switch(next)	do { } while (0)
++#endif
++#ifndef finish_arch_post_lock_switch
++# define finish_arch_post_lock_switch()	do { } while (0)
++#endif
++
++static cpumask_t sched_preempt_mask[SCHED_QUEUE_BITS + 1] ____cacheline_aligned_in_smp;
++
++static cpumask_t *const sched_idle_mask = &sched_preempt_mask[SCHED_QUEUE_BITS - 1];
++#ifdef CONFIG_SCHED_SMT
++static cpumask_t *const sched_sg_idle_mask = &sched_preempt_mask[SCHED_QUEUE_BITS];
++#endif
++
++/* task function */
++static inline const struct cpumask *task_user_cpus(struct task_struct *p)
++{
++	if (!p->user_cpus_ptr)
++		return cpu_possible_mask; /* &init_task.cpus_mask */
++	return p->user_cpus_ptr;
++}
++
++/* sched_queue related functions */
++static inline void sched_queue_init(struct sched_queue *q)
++{
++	int i;
++
++	bitmap_zero(q->bitmap, SCHED_QUEUE_BITS);
++	for(i = 0; i < SCHED_LEVELS; i++)
++		INIT_LIST_HEAD(&q->heads[i]);
++}
++
++/*
++ * Init idle task and put into queue structure of rq
++ * IMPORTANT: may be called multiple times for a single cpu
++ */
++static inline void sched_queue_init_idle(struct sched_queue *q,
++					 struct task_struct *idle)
++{
++	INIT_LIST_HEAD(&q->heads[IDLE_TASK_SCHED_PRIO]);
++	list_add_tail(&idle->sq_node, &q->heads[IDLE_TASK_SCHED_PRIO]);
++	idle->on_rq = TASK_ON_RQ_QUEUED;
++}
++
++#define CLEAR_CACHED_PREEMPT_MASK(pr, low, high, cpu)		\
++	if (low < pr && pr <= high)				\
++		cpumask_clear_cpu(cpu, sched_preempt_mask + pr);
++
++#define SET_CACHED_PREEMPT_MASK(pr, low, high, cpu)		\
++	if (low < pr && pr <= high)				\
++		cpumask_set_cpu(cpu, sched_preempt_mask + pr);
++
++static atomic_t sched_prio_record = ATOMIC_INIT(0);
++
++/* water mark related functions */
++static inline void update_sched_preempt_mask(struct rq *rq)
++{
++	int prio = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS);
++	int last_prio = rq->prio;
++	int cpu, pr;
++
++	if (prio == last_prio)
++		return;
++
++	rq->prio = prio;
++#ifdef CONFIG_SCHED_PDS
++	rq->prio_idx = sched_prio2idx(rq->prio, rq);
++#endif
++	cpu = cpu_of(rq);
++	pr = atomic_read(&sched_prio_record);
++
++	if (prio < last_prio) {
++		if (IDLE_TASK_SCHED_PRIO == last_prio) {
++#ifdef CONFIG_SCHED_SMT
++			if (static_branch_likely(&sched_smt_present))
++				cpumask_andnot(sched_sg_idle_mask,
++					       sched_sg_idle_mask, cpu_smt_mask(cpu));
++#endif
++			cpumask_clear_cpu(cpu, sched_idle_mask);
++			last_prio -= 2;
++		}
++		CLEAR_CACHED_PREEMPT_MASK(pr, prio, last_prio, cpu);
++
++		return;
++	}
++	/* last_prio < prio */
++	if (IDLE_TASK_SCHED_PRIO == prio) {
++#ifdef CONFIG_SCHED_SMT
++		if (static_branch_likely(&sched_smt_present) &&
++		    cpumask_intersects(cpu_smt_mask(cpu), sched_idle_mask))
++			cpumask_or(sched_sg_idle_mask, sched_sg_idle_mask, cpu_smt_mask(cpu));
++#endif
++		cpumask_set_cpu(cpu, sched_idle_mask);
++		prio -= 2;
++	}
++	SET_CACHED_PREEMPT_MASK(pr, last_prio, prio, cpu);
++}
++
++/*
++ * This routine assume that the idle task always in queue
++ */
++static inline struct task_struct *sched_rq_first_task(struct rq *rq)
++{
++	const struct list_head *head = &rq->queue.heads[sched_rq_prio_idx(rq)];
++
++	return list_first_entry(head, struct task_struct, sq_node);
++}
++
++static inline struct task_struct * sched_rq_next_task(struct task_struct *p, struct rq *rq)
++{
++	struct list_head *next = p->sq_node.next;
++
++	if (&rq->queue.heads[0] <= next && next < &rq->queue.heads[SCHED_LEVELS]) {
++		struct list_head *head;
++		unsigned long idx = next - &rq->queue.heads[0];
++
++		idx = find_next_bit(rq->queue.bitmap, SCHED_QUEUE_BITS,
++				    sched_idx2prio(idx, rq) + 1);
++		head = &rq->queue.heads[sched_prio2idx(idx, rq)];
++
++		return list_first_entry(head, struct task_struct, sq_node);
++	}
++
++	return list_next_entry(p, sq_node);
++}
++
++/*
++ * Serialization rules:
++ *
++ * Lock order:
++ *
++ *   p->pi_lock
++ *     rq->lock
++ *       hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
++ *
++ *  rq1->lock
++ *    rq2->lock  where: rq1 < rq2
++ *
++ * Regular state:
++ *
++ * Normal scheduling state is serialized by rq->lock. __schedule() takes the
++ * local CPU's rq->lock, it optionally removes the task from the runqueue and
++ * always looks at the local rq data structures to find the most eligible task
++ * to run next.
++ *
++ * Task enqueue is also under rq->lock, possibly taken from another CPU.
++ * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
++ * the local CPU to avoid bouncing the runqueue state around [ see
++ * ttwu_queue_wakelist() ]
++ *
++ * Task wakeup, specifically wakeups that involve migration, are horribly
++ * complicated to avoid having to take two rq->locks.
++ *
++ * Special state:
++ *
++ * System-calls and anything external will use task_rq_lock() which acquires
++ * both p->pi_lock and rq->lock. As a consequence the state they change is
++ * stable while holding either lock:
++ *
++ *  - sched_setaffinity()/
++ *    set_cpus_allowed_ptr():	p->cpus_ptr, p->nr_cpus_allowed
++ *  - set_user_nice():		p->se.load, p->*prio
++ *  - __sched_setscheduler():	p->sched_class, p->policy, p->*prio,
++ *				p->se.load, p->rt_priority,
++ *				p->dl.dl_{runtime, deadline, period, flags, bw, density}
++ *  - sched_setnuma():		p->numa_preferred_nid
++ *  - sched_move_task():        p->sched_task_group
++ *  - uclamp_update_active()	p->uclamp*
++ *
++ * p->state <- TASK_*:
++ *
++ *   is changed locklessly using set_current_state(), __set_current_state() or
++ *   set_special_state(), see their respective comments, or by
++ *   try_to_wake_up(). This latter uses p->pi_lock to serialize against
++ *   concurrent self.
++ *
++ * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
++ *
++ *   is set by activate_task() and cleared by deactivate_task(), under
++ *   rq->lock. Non-zero indicates the task is runnable, the special
++ *   ON_RQ_MIGRATING state is used for migration without holding both
++ *   rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
++ *
++ * p->on_cpu <- { 0, 1 }:
++ *
++ *   is set by prepare_task() and cleared by finish_task() such that it will be
++ *   set before p is scheduled-in and cleared after p is scheduled-out, both
++ *   under rq->lock. Non-zero indicates the task is running on its CPU.
++ *
++ *   [ The astute reader will observe that it is possible for two tasks on one
++ *     CPU to have ->on_cpu = 1 at the same time. ]
++ *
++ * task_cpu(p): is changed by set_task_cpu(), the rules are:
++ *
++ *  - Don't call set_task_cpu() on a blocked task:
++ *
++ *    We don't care what CPU we're not running on, this simplifies hotplug,
++ *    the CPU assignment of blocked tasks isn't required to be valid.
++ *
++ *  - for try_to_wake_up(), called under p->pi_lock:
++ *
++ *    This allows try_to_wake_up() to only take one rq->lock, see its comment.
++ *
++ *  - for migration called under rq->lock:
++ *    [ see task_on_rq_migrating() in task_rq_lock() ]
++ *
++ *    o move_queued_task()
++ *    o detach_task()
++ *
++ *  - for migration called under double_rq_lock():
++ *
++ *    o __migrate_swap_task()
++ *    o push_rt_task() / pull_rt_task()
++ *    o push_dl_task() / pull_dl_task()
++ *    o dl_task_offline_migration()
++ *
++ */
++
++/*
++ * Context: p->pi_lock
++ */
++static inline struct rq *__task_access_lock(struct task_struct *p, raw_spinlock_t **plock)
++{
++	struct rq *rq;
++	for (;;) {
++		rq = task_rq(p);
++		if (p->on_cpu || task_on_rq_queued(p)) {
++			raw_spin_lock(&rq->lock);
++			if (likely((p->on_cpu || task_on_rq_queued(p)) && rq == task_rq(p))) {
++				*plock = &rq->lock;
++				return rq;
++			}
++			raw_spin_unlock(&rq->lock);
++		} else if (task_on_rq_migrating(p)) {
++			do {
++				cpu_relax();
++			} while (unlikely(task_on_rq_migrating(p)));
++		} else {
++			*plock = NULL;
++			return rq;
++		}
++	}
++}
++
++static inline void __task_access_unlock(struct task_struct *p, raw_spinlock_t *lock)
++{
++	if (NULL != lock)
++		raw_spin_unlock(lock);
++}
++
++static inline struct rq *
++task_access_lock_irqsave(struct task_struct *p, raw_spinlock_t **plock, unsigned long *flags)
++{
++	struct rq *rq;
++	for (;;) {
++		rq = task_rq(p);
++		if (p->on_cpu || task_on_rq_queued(p)) {
++			raw_spin_lock_irqsave(&rq->lock, *flags);
++			if (likely((p->on_cpu || task_on_rq_queued(p)) && rq == task_rq(p))) {
++				*plock = &rq->lock;
++				return rq;
++			}
++			raw_spin_unlock_irqrestore(&rq->lock, *flags);
++		} else if (task_on_rq_migrating(p)) {
++			do {
++				cpu_relax();
++			} while (unlikely(task_on_rq_migrating(p)));
++		} else {
++			raw_spin_lock_irqsave(&p->pi_lock, *flags);
++			if (likely(!p->on_cpu && !p->on_rq && rq == task_rq(p))) {
++				*plock = &p->pi_lock;
++				return rq;
++			}
++			raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
++		}
++	}
++}
++
++static inline void
++task_access_unlock_irqrestore(struct task_struct *p, raw_spinlock_t *lock, unsigned long *flags)
++{
++	raw_spin_unlock_irqrestore(lock, *flags);
++}
++
++/*
++ * __task_rq_lock - lock the rq @p resides on.
++ */
++struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	struct rq *rq;
++
++	lockdep_assert_held(&p->pi_lock);
++
++	for (;;) {
++		rq = task_rq(p);
++		raw_spin_lock(&rq->lock);
++		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
++			return rq;
++		raw_spin_unlock(&rq->lock);
++
++		while (unlikely(task_on_rq_migrating(p)))
++			cpu_relax();
++	}
++}
++
++/*
++ * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
++ */
++struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(p->pi_lock)
++	__acquires(rq->lock)
++{
++	struct rq *rq;
++
++	for (;;) {
++		raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
++		rq = task_rq(p);
++		raw_spin_lock(&rq->lock);
++		/*
++		 *	move_queued_task()		task_rq_lock()
++		 *
++		 *	ACQUIRE (rq->lock)
++		 *	[S] ->on_rq = MIGRATING		[L] rq = task_rq()
++		 *	WMB (__set_task_cpu())		ACQUIRE (rq->lock);
++		 *	[S] ->cpu = new_cpu		[L] task_rq()
++		 *					[L] ->on_rq
++		 *	RELEASE (rq->lock)
++		 *
++		 * If we observe the old CPU in task_rq_lock(), the acquire of
++		 * the old rq->lock will fully serialize against the stores.
++		 *
++		 * If we observe the new CPU in task_rq_lock(), the address
++		 * dependency headed by '[L] rq = task_rq()' and the acquire
++		 * will pair with the WMB to ensure we then also see migrating.
++		 */
++		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
++			return rq;
++		}
++		raw_spin_unlock(&rq->lock);
++		raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
++
++		while (unlikely(task_on_rq_migrating(p)))
++			cpu_relax();
++	}
++}
++
++static inline void rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	raw_spin_lock_irqsave(&rq->lock, rf->flags);
++}
++
++static inline void rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
++}
++
++DEFINE_LOCK_GUARD_1(rq_lock_irqsave, struct rq,
++		    rq_lock_irqsave(_T->lock, &_T->rf),
++		    rq_unlock_irqrestore(_T->lock, &_T->rf),
++		    struct rq_flags rf)
++
++void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
++{
++	raw_spinlock_t *lock;
++
++	/* Matches synchronize_rcu() in __sched_core_enable() */
++	preempt_disable();
++
++	for (;;) {
++		lock = __rq_lockp(rq);
++		raw_spin_lock_nested(lock, subclass);
++		if (likely(lock == __rq_lockp(rq))) {
++			/* preempt_count *MUST* be > 1 */
++			preempt_enable_no_resched();
++			return;
++		}
++		raw_spin_unlock(lock);
++	}
++}
++
++void raw_spin_rq_unlock(struct rq *rq)
++{
++	raw_spin_unlock(rq_lockp(rq));
++}
++
++/*
++ * RQ-clock updating methods:
++ */
++
++static void update_rq_clock_task(struct rq *rq, s64 delta)
++{
++/*
++ * In theory, the compile should just see 0 here, and optimize out the call
++ * to sched_rt_avg_update. But I don't trust it...
++ */
++	s64 __maybe_unused steal = 0, irq_delta = 0;
++
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
++
++	/*
++	 * Since irq_time is only updated on {soft,}irq_exit, we might run into
++	 * this case when a previous update_rq_clock() happened inside a
++	 * {soft,}irq region.
++	 *
++	 * When this happens, we stop ->clock_task and only update the
++	 * prev_irq_time stamp to account for the part that fit, so that a next
++	 * update will consume the rest. This ensures ->clock_task is
++	 * monotonic.
++	 *
++	 * It does however cause some slight miss-attribution of {soft,}irq
++	 * time, a more accurate solution would be to update the irq_time using
++	 * the current rq->clock timestamp, except that would require using
++	 * atomic ops.
++	 */
++	if (irq_delta > delta)
++		irq_delta = delta;
++
++	rq->prev_irq_time += irq_delta;
++	delta -= irq_delta;
++	delayacct_irq(rq->curr, irq_delta);
++#endif
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++	if (static_key_false((&paravirt_steal_rq_enabled))) {
++		steal = paravirt_steal_clock(cpu_of(rq));
++		steal -= rq->prev_steal_time_rq;
++
++		if (unlikely(steal > delta))
++			steal = delta;
++
++		rq->prev_steal_time_rq += steal;
++		delta -= steal;
++	}
++#endif
++
++	rq->clock_task += delta;
++
++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
++	if ((irq_delta + steal))
++		update_irq_load_avg(rq, irq_delta + steal);
++#endif
++}
++
++static inline void update_rq_clock(struct rq *rq)
++{
++	s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
++
++	if (unlikely(delta <= 0))
++		return;
++	rq->clock += delta;
++	sched_update_rq_clock(rq);
++	update_rq_clock_task(rq, delta);
++}
++
++/*
++ * RQ Load update routine
++ */
++#define RQ_LOAD_HISTORY_BITS		(sizeof(s32) * 8ULL)
++#define RQ_UTIL_SHIFT			(8)
++#define RQ_LOAD_HISTORY_TO_UTIL(l)	(((l) >> (RQ_LOAD_HISTORY_BITS - 1 - RQ_UTIL_SHIFT)) & 0xff)
++
++#define LOAD_BLOCK(t)		((t) >> 17)
++#define LOAD_HALF_BLOCK(t)	((t) >> 16)
++#define BLOCK_MASK(t)		((t) & ((0x01 << 18) - 1))
++#define LOAD_BLOCK_BIT(b)	(1UL << (RQ_LOAD_HISTORY_BITS - 1 - (b)))
++#define CURRENT_LOAD_BIT	LOAD_BLOCK_BIT(0)
++
++static inline void rq_load_update(struct rq *rq)
++{
++	u64 time = rq->clock;
++	u64 delta = min(LOAD_BLOCK(time) - LOAD_BLOCK(rq->load_stamp), RQ_LOAD_HISTORY_BITS - 1);
++	u64 prev = !!(rq->load_history & CURRENT_LOAD_BIT);
++	u64 curr = !!rq->nr_running;
++
++	if (delta) {
++		rq->load_history = rq->load_history >> delta;
++
++		if (delta < RQ_UTIL_SHIFT) {
++			rq->load_block += (~BLOCK_MASK(rq->load_stamp)) * prev;
++			if (!!LOAD_HALF_BLOCK(rq->load_block) ^ curr)
++				rq->load_history ^= LOAD_BLOCK_BIT(delta);
++		}
++
++		rq->load_block = BLOCK_MASK(time) * prev;
++	} else {
++		rq->load_block += (time - rq->load_stamp) * prev;
++	}
++	if (prev ^ curr)
++		rq->load_history ^= CURRENT_LOAD_BIT;
++	rq->load_stamp = time;
++}
++
++unsigned long rq_load_util(struct rq *rq, unsigned long max)
++{
++	return RQ_LOAD_HISTORY_TO_UTIL(rq->load_history) * (max >> RQ_UTIL_SHIFT);
++}
++
++#ifdef CONFIG_SMP
++unsigned long sched_cpu_util(int cpu)
++{
++	return rq_load_util(cpu_rq(cpu), arch_scale_cpu_capacity(cpu));
++}
++#endif /* CONFIG_SMP */
++
++#ifdef CONFIG_CPU_FREQ
++/**
++ * cpufreq_update_util - Take a note about CPU utilization changes.
++ * @rq: Runqueue to carry out the update for.
++ * @flags: Update reason flags.
++ *
++ * This function is called by the scheduler on the CPU whose utilization is
++ * being updated.
++ *
++ * It can only be called from RCU-sched read-side critical sections.
++ *
++ * The way cpufreq is currently arranged requires it to evaluate the CPU
++ * performance state (frequency/voltage) on a regular basis to prevent it from
++ * being stuck in a completely inadequate performance level for too long.
++ * That is not guaranteed to happen if the updates are only triggered from CFS
++ * and DL, though, because they may not be coming in if only RT tasks are
++ * active all the time (or there are RT tasks only).
++ *
++ * As a workaround for that issue, this function is called periodically by the
++ * RT sched class to trigger extra cpufreq updates to prevent it from stalling,
++ * but that really is a band-aid.  Going forward it should be replaced with
++ * solutions targeted more specifically at RT tasks.
++ */
++static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
++{
++	struct update_util_data *data;
++
++#ifdef CONFIG_SMP
++	rq_load_update(rq);
++#endif
++	data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, cpu_of(rq)));
++	if (data)
++		data->func(data, rq_clock(rq), flags);
++}
++#else
++static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
++{
++#ifdef CONFIG_SMP
++	rq_load_update(rq);
++#endif
++}
++#endif /* CONFIG_CPU_FREQ */
++
++#ifdef CONFIG_NO_HZ_FULL
++/*
++ * Tick may be needed by tasks in the runqueue depending on their policy and
++ * requirements. If tick is needed, lets send the target an IPI to kick it out
++ * of nohz mode if necessary.
++ */
++static inline void sched_update_tick_dependency(struct rq *rq)
++{
++	int cpu = cpu_of(rq);
++
++	if (!tick_nohz_full_cpu(cpu))
++		return;
++
++	if (rq->nr_running < 2)
++		tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
++	else
++		tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
++}
++#else /* !CONFIG_NO_HZ_FULL */
++static inline void sched_update_tick_dependency(struct rq *rq) { }
++#endif
++
++bool sched_task_on_rq(struct task_struct *p)
++{
++	return task_on_rq_queued(p);
++}
++
++unsigned long get_wchan(struct task_struct *p)
++{
++	unsigned long ip = 0;
++	unsigned int state;
++
++	if (!p || p == current)
++		return 0;
++
++	/* Only get wchan if task is blocked and we can keep it that way. */
++	raw_spin_lock_irq(&p->pi_lock);
++	state = READ_ONCE(p->__state);
++	smp_rmb(); /* see try_to_wake_up() */
++	if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
++		ip = __get_wchan(p);
++	raw_spin_unlock_irq(&p->pi_lock);
++
++	return ip;
++}
++
++/*
++ * Add/Remove/Requeue task to/from the runqueue routines
++ * Context: rq->lock
++ */
++#define __SCHED_DEQUEUE_TASK(p, rq, flags, func)					\
++	sched_info_dequeue(rq, p);							\
++											\
++	__list_del_entry(&p->sq_node);							\
++	if (p->sq_node.prev == p->sq_node.next) {					\
++		clear_bit(sched_idx2prio(p->sq_node.next - &rq->queue.heads[0], rq),	\
++			  rq->queue.bitmap);						\
++		func;									\
++	}
++
++#define __SCHED_ENQUEUE_TASK(p, rq, flags, func)					\
++	sched_info_enqueue(rq, p);							\
++	{										\
++	int idx, prio;									\
++	TASK_SCHED_PRIO_IDX(p, rq, idx, prio);						\
++	list_add_tail(&p->sq_node, &rq->queue.heads[idx]);				\
++	if (list_is_first(&p->sq_node, &rq->queue.heads[idx])) {			\
++		set_bit(prio, rq->queue.bitmap);					\
++		func;									\
++	}										\
++	}
++
++static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags)
++{
++#ifdef ALT_SCHED_DEBUG
++	lockdep_assert_held(&rq->lock);
++
++	/*printk(KERN_INFO "sched: dequeue(%d) %px %016llx\n", cpu_of(rq), p, p->deadline);*/
++	WARN_ONCE(task_rq(p) != rq, "sched: dequeue task reside on cpu%d from cpu%d\n",
++		  task_cpu(p), cpu_of(rq));
++#endif
++
++	__SCHED_DEQUEUE_TASK(p, rq, flags, update_sched_preempt_mask(rq));
++	--rq->nr_running;
++#ifdef CONFIG_SMP
++	if (1 == rq->nr_running)
++		cpumask_clear_cpu(cpu_of(rq), &sched_rq_pending_mask);
++#endif
++
++	sched_update_tick_dependency(rq);
++}
++
++static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags)
++{
++#ifdef ALT_SCHED_DEBUG
++	lockdep_assert_held(&rq->lock);
++
++	/*printk(KERN_INFO "sched: enqueue(%d) %px %d\n", cpu_of(rq), p, p->prio);*/
++	WARN_ONCE(task_rq(p) != rq, "sched: enqueue task reside on cpu%d to cpu%d\n",
++		  task_cpu(p), cpu_of(rq));
++#endif
++
++	__SCHED_ENQUEUE_TASK(p, rq, flags, update_sched_preempt_mask(rq));
++	++rq->nr_running;
++#ifdef CONFIG_SMP
++	if (2 == rq->nr_running)
++		cpumask_set_cpu(cpu_of(rq), &sched_rq_pending_mask);
++#endif
++
++	sched_update_tick_dependency(rq);
++}
++
++static inline void requeue_task(struct task_struct *p, struct rq *rq)
++{
++	struct list_head *node = &p->sq_node;
++	int deq_idx, idx, prio;
++
++	TASK_SCHED_PRIO_IDX(p, rq, idx, prio);
++#ifdef ALT_SCHED_DEBUG
++	lockdep_assert_held(&rq->lock);
++	/*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->deadline);*/
++	WARN_ONCE(task_rq(p) != rq, "sched: cpu[%d] requeue task reside on cpu%d\n",
++		  cpu_of(rq), task_cpu(p));
++#endif
++	if (list_is_last(node, &rq->queue.heads[idx]))
++		return;
++
++	__list_del_entry(node);
++	if (node->prev == node->next && (deq_idx = node->next - &rq->queue.heads[0]) != idx)
++		clear_bit(sched_idx2prio(deq_idx, rq), rq->queue.bitmap);
++
++	list_add_tail(node, &rq->queue.heads[idx]);
++	if (list_is_first(node, &rq->queue.heads[idx]))
++		set_bit(prio, rq->queue.bitmap);
++	update_sched_preempt_mask(rq);
++}
++
++/*
++ * cmpxchg based fetch_or, macro so it works for different integer types
++ */
++#define fetch_or(ptr, mask)						\
++	({								\
++		typeof(ptr) _ptr = (ptr);				\
++		typeof(mask) _mask = (mask);				\
++		typeof(*_ptr) _val = *_ptr;				\
++									\
++		do {							\
++		} while (!try_cmpxchg(_ptr, &_val, _val | _mask));	\
++	_val;								\
++})
++
++#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
++/*
++ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
++ * this avoids any races wrt polling state changes and thereby avoids
++ * spurious IPIs.
++ */
++static inline bool set_nr_and_not_polling(struct task_struct *p)
++{
++	struct thread_info *ti = task_thread_info(p);
++	return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
++}
++
++/*
++ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
++ *
++ * If this returns true, then the idle task promises to call
++ * sched_ttwu_pending() and reschedule soon.
++ */
++static bool set_nr_if_polling(struct task_struct *p)
++{
++	struct thread_info *ti = task_thread_info(p);
++	typeof(ti->flags) val = READ_ONCE(ti->flags);
++
++	do {
++		if (!(val & _TIF_POLLING_NRFLAG))
++			return false;
++		if (val & _TIF_NEED_RESCHED)
++			return true;
++	} while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED));
++
++	return true;
++}
++
++#else
++static inline bool set_nr_and_not_polling(struct task_struct *p)
++{
++	set_tsk_need_resched(p);
++	return true;
++}
++
++#ifdef CONFIG_SMP
++static inline bool set_nr_if_polling(struct task_struct *p)
++{
++	return false;
++}
++#endif
++#endif
++
++static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
++{
++	struct wake_q_node *node = &task->wake_q;
++
++	/*
++	 * Atomically grab the task, if ->wake_q is !nil already it means
++	 * it's already queued (either by us or someone else) and will get the
++	 * wakeup due to that.
++	 *
++	 * In order to ensure that a pending wakeup will observe our pending
++	 * state, even in the failed case, an explicit smp_mb() must be used.
++	 */
++	smp_mb__before_atomic();
++	if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
++		return false;
++
++	/*
++	 * The head is context local, there can be no concurrency.
++	 */
++	*head->lastp = node;
++	head->lastp = &node->next;
++	return true;
++}
++
++/**
++ * wake_q_add() - queue a wakeup for 'later' waking.
++ * @head: the wake_q_head to add @task to
++ * @task: the task to queue for 'later' wakeup
++ *
++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
++ * instantly.
++ *
++ * This function must be used as-if it were wake_up_process(); IOW the task
++ * must be ready to be woken at this location.
++ */
++void wake_q_add(struct wake_q_head *head, struct task_struct *task)
++{
++	if (__wake_q_add(head, task))
++		get_task_struct(task);
++}
++
++/**
++ * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
++ * @head: the wake_q_head to add @task to
++ * @task: the task to queue for 'later' wakeup
++ *
++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
++ * instantly.
++ *
++ * This function must be used as-if it were wake_up_process(); IOW the task
++ * must be ready to be woken at this location.
++ *
++ * This function is essentially a task-safe equivalent to wake_q_add(). Callers
++ * that already hold reference to @task can call the 'safe' version and trust
++ * wake_q to do the right thing depending whether or not the @task is already
++ * queued for wakeup.
++ */
++void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
++{
++	if (!__wake_q_add(head, task))
++		put_task_struct(task);
++}
++
++void wake_up_q(struct wake_q_head *head)
++{
++	struct wake_q_node *node = head->first;
++
++	while (node != WAKE_Q_TAIL) {
++		struct task_struct *task;
++
++		task = container_of(node, struct task_struct, wake_q);
++		/* task can safely be re-inserted now: */
++		node = node->next;
++		task->wake_q.next = NULL;
++
++		/*
++		 * wake_up_process() executes a full barrier, which pairs with
++		 * the queueing in wake_q_add() so as not to miss wakeups.
++		 */
++		wake_up_process(task);
++		put_task_struct(task);
++	}
++}
++
++/*
++ * resched_curr - mark rq's current task 'to be rescheduled now'.
++ *
++ * On UP this means the setting of the need_resched flag, on SMP it
++ * might also involve a cross-CPU call to trigger the scheduler on
++ * the target CPU.
++ */
++static inline void resched_curr(struct rq *rq)
++{
++	struct task_struct *curr = rq->curr;
++	int cpu;
++
++	lockdep_assert_held(&rq->lock);
++
++	if (test_tsk_need_resched(curr))
++		return;
++
++	cpu = cpu_of(rq);
++	if (cpu == smp_processor_id()) {
++		set_tsk_need_resched(curr);
++		set_preempt_need_resched();
++		return;
++	}
++
++	if (set_nr_and_not_polling(curr))
++		smp_send_reschedule(cpu);
++	else
++		trace_sched_wake_idle_without_ipi(cpu);
++}
++
++void resched_cpu(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	if (cpu_online(cpu) || cpu == smp_processor_id())
++		resched_curr(cpu_rq(cpu));
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++}
++
++#ifdef CONFIG_SMP
++#ifdef CONFIG_NO_HZ_COMMON
++/*
++ * This routine will record that the CPU is going idle with tick stopped.
++ * This info will be used in performing idle load balancing in the future.
++ */
++void nohz_balance_enter_idle(int cpu) {}
++
++/*
++ * In the semi idle case, use the nearest busy CPU for migrating timers
++ * from an idle CPU.  This is good for power-savings.
++ *
++ * We don't do similar optimization for completely idle system, as
++ * selecting an idle CPU will add more delays to the timers than intended
++ * (as that CPU's timer base may not be uptodate wrt jiffies etc).
++ */
++int get_nohz_timer_target(void)
++{
++	int i, cpu = smp_processor_id(), default_cpu = -1;
++	struct cpumask *mask;
++	const struct cpumask *hk_mask;
++
++	if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) {
++		if (!idle_cpu(cpu))
++			return cpu;
++		default_cpu = cpu;
++	}
++
++	hk_mask = housekeeping_cpumask(HK_TYPE_TIMER);
++
++	for (mask = per_cpu(sched_cpu_topo_masks, cpu);
++	     mask < per_cpu(sched_cpu_topo_end_mask, cpu); mask++)
++		for_each_cpu_and(i, mask, hk_mask)
++			if (!idle_cpu(i))
++				return i;
++
++	if (default_cpu == -1)
++		default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER);
++	cpu = default_cpu;
++
++	return cpu;
++}
++
++/*
++ * When add_timer_on() enqueues a timer into the timer wheel of an
++ * idle CPU then this timer might expire before the next timer event
++ * which is scheduled to wake up that CPU. In case of a completely
++ * idle system the next event might even be infinite time into the
++ * future. wake_up_idle_cpu() ensures that the CPU is woken up and
++ * leaves the inner idle loop so the newly added timer is taken into
++ * account when the CPU goes back to idle and evaluates the timer
++ * wheel for the next timer event.
++ */
++static inline void wake_up_idle_cpu(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (cpu == smp_processor_id())
++		return;
++
++	/*
++	 * Set TIF_NEED_RESCHED and send an IPI if in the non-polling
++	 * part of the idle loop. This forces an exit from the idle loop
++	 * and a round trip to schedule(). Now this could be optimized
++	 * because a simple new idle loop iteration is enough to
++	 * re-evaluate the next tick. Provided some re-ordering of tick
++	 * nohz functions that would need to follow TIF_NR_POLLING
++	 * clearing:
++	 *
++	 * - On most archs, a simple fetch_or on ti::flags with a
++	 *   "0" value would be enough to know if an IPI needs to be sent.
++	 *
++	 * - x86 needs to perform a last need_resched() check between
++	 *   monitor and mwait which doesn't take timers into account.
++	 *   There a dedicated TIF_TIMER flag would be required to
++	 *   fetch_or here and be checked along with TIF_NEED_RESCHED
++	 *   before mwait().
++	 *
++	 * However, remote timer enqueue is not such a frequent event
++	 * and testing of the above solutions didn't appear to report
++	 * much benefits.
++	 */
++	if (set_nr_and_not_polling(rq->idle))
++		smp_send_reschedule(cpu);
++	else
++		trace_sched_wake_idle_without_ipi(cpu);
++}
++
++static inline bool wake_up_full_nohz_cpu(int cpu)
++{
++	/*
++	 * We just need the target to call irq_exit() and re-evaluate
++	 * the next tick. The nohz full kick at least implies that.
++	 * If needed we can still optimize that later with an
++	 * empty IRQ.
++	 */
++	if (cpu_is_offline(cpu))
++		return true;  /* Don't try to wake offline CPUs. */
++	if (tick_nohz_full_cpu(cpu)) {
++		if (cpu != smp_processor_id() ||
++		    tick_nohz_tick_stopped())
++			tick_nohz_full_kick_cpu(cpu);
++		return true;
++	}
++
++	return false;
++}
++
++void wake_up_nohz_cpu(int cpu)
++{
++	if (!wake_up_full_nohz_cpu(cpu))
++		wake_up_idle_cpu(cpu);
++}
++
++static void nohz_csd_func(void *info)
++{
++	struct rq *rq = info;
++	int cpu = cpu_of(rq);
++	unsigned int flags;
++
++	/*
++	 * Release the rq::nohz_csd.
++	 */
++	flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(cpu));
++	WARN_ON(!(flags & NOHZ_KICK_MASK));
++
++	rq->idle_balance = idle_cpu(cpu);
++	if (rq->idle_balance && !need_resched()) {
++		rq->nohz_idle_balance = flags;
++		raise_softirq_irqoff(SCHED_SOFTIRQ);
++	}
++}
++
++#endif /* CONFIG_NO_HZ_COMMON */
++#endif /* CONFIG_SMP */
++
++static inline void wakeup_preempt(struct rq *rq)
++{
++	if (sched_rq_first_task(rq) != rq->curr)
++		resched_curr(rq);
++}
++
++static __always_inline
++int __task_state_match(struct task_struct *p, unsigned int state)
++{
++	if (READ_ONCE(p->__state) & state)
++		return 1;
++
++	if (READ_ONCE(p->saved_state) & state)
++		return -1;
++
++	return 0;
++}
++
++static __always_inline
++int task_state_match(struct task_struct *p, unsigned int state)
++{
++	/*
++	 * Serialize against current_save_and_set_rtlock_wait_state(),
++	 * current_restore_rtlock_saved_state(), and __refrigerator().
++	 */
++	guard(raw_spinlock_irq)(&p->pi_lock);
++
++	return __task_state_match(p, state);
++}
++
++/*
++ * wait_task_inactive - wait for a thread to unschedule.
++ *
++ * Wait for the thread to block in any of the states set in @match_state.
++ * If it changes, i.e. @p might have woken up, then return zero.  When we
++ * succeed in waiting for @p to be off its CPU, we return a positive number
++ * (its total switch count).  If a second call a short while later returns the
++ * same number, the caller can be sure that @p has remained unscheduled the
++ * whole time.
++ *
++ * The caller must ensure that the task *will* unschedule sometime soon,
++ * else this function might spin for a *long* time. This function can't
++ * be called with interrupts off, or it may introduce deadlock with
++ * smp_call_function() if an IPI is sent by the same process we are
++ * waiting to become inactive.
++ */
++unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
++{
++	unsigned long flags;
++	int running, queued, match;
++	unsigned long ncsw;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	for (;;) {
++		rq = task_rq(p);
++
++		/*
++		 * If the task is actively running on another CPU
++		 * still, just relax and busy-wait without holding
++		 * any locks.
++		 *
++		 * NOTE! Since we don't hold any locks, it's not
++		 * even sure that "rq" stays as the right runqueue!
++		 * But we don't care, since this will return false
++		 * if the runqueue has changed and p is actually now
++		 * running somewhere else!
++		 */
++		while (task_on_cpu(p)) {
++			if (!task_state_match(p, match_state))
++				return 0;
++			cpu_relax();
++		}
++
++		/*
++		 * Ok, time to look more closely! We need the rq
++		 * lock now, to be *sure*. If we're wrong, we'll
++		 * just go back and repeat.
++		 */
++		task_access_lock_irqsave(p, &lock, &flags);
++		trace_sched_wait_task(p);
++		running = task_on_cpu(p);
++		queued = p->on_rq;
++		ncsw = 0;
++		if ((match = __task_state_match(p, match_state))) {
++			/*
++			 * When matching on p->saved_state, consider this task
++			 * still queued so it will wait.
++			 */
++			if (match < 0)
++				queued = 1;
++			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
++		}
++		task_access_unlock_irqrestore(p, lock, &flags);
++
++		/*
++		 * If it changed from the expected state, bail out now.
++		 */
++		if (unlikely(!ncsw))
++			break;
++
++		/*
++		 * Was it really running after all now that we
++		 * checked with the proper locks actually held?
++		 *
++		 * Oops. Go back and try again..
++		 */
++		if (unlikely(running)) {
++			cpu_relax();
++			continue;
++		}
++
++		/*
++		 * It's not enough that it's not actively running,
++		 * it must be off the runqueue _entirely_, and not
++		 * preempted!
++		 *
++		 * So if it was still runnable (but just not actively
++		 * running right now), it's preempted, and we should
++		 * yield - it could be a while.
++		 */
++		if (unlikely(queued)) {
++			ktime_t to = NSEC_PER_SEC / HZ;
++
++			set_current_state(TASK_UNINTERRUPTIBLE);
++			schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
++			continue;
++		}
++
++		/*
++		 * Ahh, all good. It wasn't running, and it wasn't
++		 * runnable, which means that it will never become
++		 * running in the future either. We're all done!
++		 */
++		break;
++	}
++
++	return ncsw;
++}
++
++#ifdef CONFIG_SCHED_HRTICK
++/*
++ * Use HR-timers to deliver accurate preemption points.
++ */
++
++static void hrtick_clear(struct rq *rq)
++{
++	if (hrtimer_active(&rq->hrtick_timer))
++		hrtimer_cancel(&rq->hrtick_timer);
++}
++
++/*
++ * High-resolution timer tick.
++ * Runs from hardirq context with interrupts disabled.
++ */
++static enum hrtimer_restart hrtick(struct hrtimer *timer)
++{
++	struct rq *rq = container_of(timer, struct rq, hrtick_timer);
++
++	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
++
++	raw_spin_lock(&rq->lock);
++	resched_curr(rq);
++	raw_spin_unlock(&rq->lock);
++
++	return HRTIMER_NORESTART;
++}
++
++/*
++ * Use hrtick when:
++ *  - enabled by features
++ *  - hrtimer is actually high res
++ */
++static inline int hrtick_enabled(struct rq *rq)
++{
++	/**
++	 * Alt schedule FW doesn't support sched_feat yet
++	if (!sched_feat(HRTICK))
++		return 0;
++	*/
++	if (!cpu_active(cpu_of(rq)))
++		return 0;
++	return hrtimer_is_hres_active(&rq->hrtick_timer);
++}
++
++#ifdef CONFIG_SMP
++
++static void __hrtick_restart(struct rq *rq)
++{
++	struct hrtimer *timer = &rq->hrtick_timer;
++	ktime_t time = rq->hrtick_time;
++
++	hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
++}
++
++/*
++ * called from hardirq (IPI) context
++ */
++static void __hrtick_start(void *arg)
++{
++	struct rq *rq = arg;
++
++	raw_spin_lock(&rq->lock);
++	__hrtick_restart(rq);
++	raw_spin_unlock(&rq->lock);
++}
++
++/*
++ * Called to set the hrtick timer state.
++ *
++ * called with rq->lock held and irqs disabled
++ */
++static inline void hrtick_start(struct rq *rq, u64 delay)
++{
++	struct hrtimer *timer = &rq->hrtick_timer;
++	s64 delta;
++
++	/*
++	 * Don't schedule slices shorter than 10000ns, that just
++	 * doesn't make sense and can cause timer DoS.
++	 */
++	delta = max_t(s64, delay, 10000LL);
++
++	rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
++
++	if (rq == this_rq())
++		__hrtick_restart(rq);
++	else
++		smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
++}
++
++#else
++/*
++ * Called to set the hrtick timer state.
++ *
++ * called with rq->lock held and irqs disabled
++ */
++static inline void hrtick_start(struct rq *rq, u64 delay)
++{
++	/*
++	 * Don't schedule slices shorter than 10000ns, that just
++	 * doesn't make sense. Rely on vruntime for fairness.
++	 */
++	delay = max_t(u64, delay, 10000LL);
++	hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
++		      HRTIMER_MODE_REL_PINNED_HARD);
++}
++#endif /* CONFIG_SMP */
++
++static void hrtick_rq_init(struct rq *rq)
++{
++#ifdef CONFIG_SMP
++	INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
++#endif
++
++	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
++	rq->hrtick_timer.function = hrtick;
++}
++#else	/* CONFIG_SCHED_HRTICK */
++static inline int hrtick_enabled(struct rq *rq)
++{
++	return 0;
++}
++
++static inline void hrtick_clear(struct rq *rq)
++{
++}
++
++static inline void hrtick_rq_init(struct rq *rq)
++{
++}
++#endif	/* CONFIG_SCHED_HRTICK */
++
++static inline int __normal_prio(int policy, int rt_prio, int static_prio)
++{
++	return rt_policy(policy) ? (MAX_RT_PRIO - 1 - rt_prio) : static_prio;
++}
++
++/*
++ * Calculate the expected normal priority: i.e. priority
++ * without taking RT-inheritance into account. Might be
++ * boosted by interactivity modifiers. Changes upon fork,
++ * setprio syscalls, and whenever the interactivity
++ * estimator recalculates.
++ */
++static inline int normal_prio(struct task_struct *p)
++{
++	return __normal_prio(p->policy, p->rt_priority, p->static_prio);
++}
++
++/*
++ * Calculate the current priority, i.e. the priority
++ * taken into account by the scheduler. This value might
++ * be boosted by RT tasks as it will be RT if the task got
++ * RT-boosted. If not then it returns p->normal_prio.
++ */
++static int effective_prio(struct task_struct *p)
++{
++	p->normal_prio = normal_prio(p);
++	/*
++	 * If we are RT tasks or we were boosted to RT priority,
++	 * keep the priority unchanged. Otherwise, update priority
++	 * to the normal priority:
++	 */
++	if (!rt_prio(p->prio))
++		return p->normal_prio;
++	return p->prio;
++}
++
++/*
++ * activate_task - move a task to the runqueue.
++ *
++ * Context: rq->lock
++ */
++static void activate_task(struct task_struct *p, struct rq *rq)
++{
++	enqueue_task(p, rq, ENQUEUE_WAKEUP);
++
++	WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED);
++	ASSERT_EXCLUSIVE_WRITER(p->on_rq);
++
++	/*
++	 * If in_iowait is set, the code below may not trigger any cpufreq
++	 * utilization updates, so do it here explicitly with the IOWAIT flag
++	 * passed.
++	 */
++	cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT * p->in_iowait);
++}
++
++/*
++ * deactivate_task - remove a task from the runqueue.
++ *
++ * Context: rq->lock
++ */
++static inline void deactivate_task(struct task_struct *p, struct rq *rq)
++{
++	WRITE_ONCE(p->on_rq, 0);
++	ASSERT_EXCLUSIVE_WRITER(p->on_rq);
++
++	dequeue_task(p, rq, DEQUEUE_SLEEP);
++
++	cpufreq_update_util(rq, 0);
++}
++
++static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
++{
++#ifdef CONFIG_SMP
++	/*
++	 * After ->cpu is set up to a new value, task_access_lock(p, ...) can be
++	 * successfully executed on another CPU. We must ensure that updates of
++	 * per-task data have been completed by this moment.
++	 */
++	smp_wmb();
++
++	WRITE_ONCE(task_thread_info(p)->cpu, cpu);
++#endif
++}
++
++static inline bool is_migration_disabled(struct task_struct *p)
++{
++#ifdef CONFIG_SMP
++	return p->migration_disabled;
++#else
++	return false;
++#endif
++}
++
++#define SCA_CHECK		0x01
++#define SCA_USER		0x08
++
++#ifdef CONFIG_SMP
++
++void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
++{
++#ifdef CONFIG_SCHED_DEBUG
++	unsigned int state = READ_ONCE(p->__state);
++
++	/*
++	 * We should never call set_task_cpu() on a blocked task,
++	 * ttwu() will sort out the placement.
++	 */
++	WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
++
++#ifdef CONFIG_LOCKDEP
++	/*
++	 * The caller should hold either p->pi_lock or rq->lock, when changing
++	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
++	 *
++	 * sched_move_task() holds both and thus holding either pins the cgroup,
++	 * see task_group().
++	 */
++	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
++				      lockdep_is_held(&task_rq(p)->lock)));
++#endif
++	/*
++	 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
++	 */
++	WARN_ON_ONCE(!cpu_online(new_cpu));
++
++	WARN_ON_ONCE(is_migration_disabled(p));
++#endif
++	trace_sched_migrate_task(p, new_cpu);
++
++	if (task_cpu(p) != new_cpu)
++	{
++		rseq_migrate(p);
++		perf_event_task_migrate(p);
++	}
++
++	__set_task_cpu(p, new_cpu);
++}
++
++#define MDF_FORCE_ENABLED	0x80
++
++static void
++__do_set_cpus_ptr(struct task_struct *p, const struct cpumask *new_mask)
++{
++	/*
++	 * This here violates the locking rules for affinity, since we're only
++	 * supposed to change these variables while holding both rq->lock and
++	 * p->pi_lock.
++	 *
++	 * HOWEVER, it magically works, because ttwu() is the only code that
++	 * accesses these variables under p->pi_lock and only does so after
++	 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
++	 * before finish_task().
++	 *
++	 * XXX do further audits, this smells like something putrid.
++	 */
++	SCHED_WARN_ON(!p->on_cpu);
++	p->cpus_ptr = new_mask;
++}
++
++void migrate_disable(void)
++{
++	struct task_struct *p = current;
++	int cpu;
++
++	if (p->migration_disabled) {
++		p->migration_disabled++;
++		return;
++	}
++
++	guard(preempt)();
++	cpu = smp_processor_id();
++	if (cpumask_test_cpu(cpu, &p->cpus_mask)) {
++		cpu_rq(cpu)->nr_pinned++;
++		p->migration_disabled = 1;
++		p->migration_flags &= ~MDF_FORCE_ENABLED;
++
++		/*
++		 * Violates locking rules! see comment in __do_set_cpus_ptr().
++		 */
++		if (p->cpus_ptr == &p->cpus_mask)
++			__do_set_cpus_ptr(p, cpumask_of(cpu));
++	}
++}
++EXPORT_SYMBOL_GPL(migrate_disable);
++
++void migrate_enable(void)
++{
++	struct task_struct *p = current;
++
++	if (0 == p->migration_disabled)
++		return;
++
++	if (p->migration_disabled > 1) {
++		p->migration_disabled--;
++		return;
++	}
++
++	if (WARN_ON_ONCE(!p->migration_disabled))
++		return;
++
++	/*
++	 * Ensure stop_task runs either before or after this, and that
++	 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
++	 */
++	guard(preempt)();
++	/*
++	 * Assumption: current should be running on allowed cpu
++	 */
++	WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &p->cpus_mask));
++	if (p->cpus_ptr != &p->cpus_mask)
++		__do_set_cpus_ptr(p, &p->cpus_mask);
++	/*
++	 * Mustn't clear migration_disabled() until cpus_ptr points back at the
++	 * regular cpus_mask, otherwise things that race (eg.
++	 * select_fallback_rq) get confused.
++	 */
++	barrier();
++	p->migration_disabled = 0;
++	this_rq()->nr_pinned--;
++}
++EXPORT_SYMBOL_GPL(migrate_enable);
++
++static inline bool rq_has_pinned_tasks(struct rq *rq)
++{
++	return rq->nr_pinned;
++}
++
++/*
++ * Per-CPU kthreads are allowed to run on !active && online CPUs, see
++ * __set_cpus_allowed_ptr() and select_fallback_rq().
++ */
++static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
++{
++	/* When not in the task's cpumask, no point in looking further. */
++	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
++		return false;
++
++	/* migrate_disabled() must be allowed to finish. */
++	if (is_migration_disabled(p))
++		return cpu_online(cpu);
++
++	/* Non kernel threads are not allowed during either online or offline. */
++	if (!(p->flags & PF_KTHREAD))
++		return cpu_active(cpu) && task_cpu_possible(cpu, p);
++
++	/* KTHREAD_IS_PER_CPU is always allowed. */
++	if (kthread_is_per_cpu(p))
++		return cpu_online(cpu);
++
++	/* Regular kernel threads don't get to stay during offline. */
++	if (cpu_dying(cpu))
++		return false;
++
++	/* But are allowed during online. */
++	return cpu_online(cpu);
++}
++
++/*
++ * This is how migration works:
++ *
++ * 1) we invoke migration_cpu_stop() on the target CPU using
++ *    stop_one_cpu().
++ * 2) stopper starts to run (implicitly forcing the migrated thread
++ *    off the CPU)
++ * 3) it checks whether the migrated task is still in the wrong runqueue.
++ * 4) if it's in the wrong runqueue then the migration thread removes
++ *    it and puts it into the right queue.
++ * 5) stopper completes and stop_one_cpu() returns and the migration
++ *    is done.
++ */
++
++/*
++ * move_queued_task - move a queued task to new rq.
++ *
++ * Returns (locked) new rq. Old rq's lock is released.
++ */
++static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new_cpu)
++{
++	int src_cpu;
++
++	lockdep_assert_held(&rq->lock);
++
++	src_cpu = cpu_of(rq);
++	WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
++	dequeue_task(p, rq, 0);
++	set_task_cpu(p, new_cpu);
++	raw_spin_unlock(&rq->lock);
++
++	rq = cpu_rq(new_cpu);
++
++	raw_spin_lock(&rq->lock);
++	WARN_ON_ONCE(task_cpu(p) != new_cpu);
++
++	sched_mm_cid_migrate_to(rq, p, src_cpu);
++
++	sched_task_sanity_check(p, rq);
++	enqueue_task(p, rq, 0);
++	WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED);
++	wakeup_preempt(rq);
++
++	return rq;
++}
++
++struct migration_arg {
++	struct task_struct *task;
++	int dest_cpu;
++};
++
++/*
++ * Move (not current) task off this CPU, onto the destination CPU. We're doing
++ * this because either it can't run here any more (set_cpus_allowed()
++ * away from this CPU, or CPU going down), or because we're
++ * attempting to rebalance this task on exec (sched_exec).
++ *
++ * So we race with normal scheduler movements, but that's OK, as long
++ * as the task is no longer on this CPU.
++ */
++static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu)
++{
++	/* Affinity changed (again). */
++	if (!is_cpu_allowed(p, dest_cpu))
++		return rq;
++
++	return move_queued_task(rq, p, dest_cpu);
++}
++
++/*
++ * migration_cpu_stop - this will be executed by a highprio stopper thread
++ * and performs thread migration by bumping thread off CPU then
++ * 'pushing' onto another runqueue.
++ */
++static int migration_cpu_stop(void *data)
++{
++	struct migration_arg *arg = data;
++	struct task_struct *p = arg->task;
++	struct rq *rq = this_rq();
++	unsigned long flags;
++
++	/*
++	 * The original target CPU might have gone down and we might
++	 * be on another CPU but it doesn't matter.
++	 */
++	local_irq_save(flags);
++	/*
++	 * We need to explicitly wake pending tasks before running
++	 * __migrate_task() such that we will not miss enforcing cpus_ptr
++	 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
++	 */
++	flush_smp_call_function_queue();
++
++	raw_spin_lock(&p->pi_lock);
++	raw_spin_lock(&rq->lock);
++	/*
++	 * If task_rq(p) != rq, it cannot be migrated here, because we're
++	 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
++	 * we're holding p->pi_lock.
++	 */
++	if (task_rq(p) == rq && task_on_rq_queued(p)) {
++		update_rq_clock(rq);
++		rq = __migrate_task(rq, p, arg->dest_cpu);
++	}
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++	return 0;
++}
++
++static inline void
++set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx)
++{
++	cpumask_copy(&p->cpus_mask, ctx->new_mask);
++	p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
++
++	/*
++	 * Swap in a new user_cpus_ptr if SCA_USER flag set
++	 */
++	if (ctx->flags & SCA_USER)
++		swap(p->user_cpus_ptr, ctx->user_mask);
++}
++
++static void
++__do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
++{
++	lockdep_assert_held(&p->pi_lock);
++	set_cpus_allowed_common(p, ctx);
++}
++
++/*
++ * Used for kthread_bind() and select_fallback_rq(), in both cases the user
++ * affinity (if any) should be destroyed too.
++ */
++void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++{
++	struct affinity_context ac = {
++		.new_mask  = new_mask,
++		.user_mask = NULL,
++		.flags     = SCA_USER,	/* clear the user requested mask */
++	};
++	union cpumask_rcuhead {
++		cpumask_t cpumask;
++		struct rcu_head rcu;
++	};
++
++	__do_set_cpus_allowed(p, &ac);
++
++	/*
++	 * Because this is called with p->pi_lock held, it is not possible
++	 * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
++	 * kfree_rcu().
++	 */
++	kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
++}
++
++static cpumask_t *alloc_user_cpus_ptr(int node)
++{
++	/*
++	 * See do_set_cpus_allowed() above for the rcu_head usage.
++	 */
++	int size = max_t(int, cpumask_size(), sizeof(struct rcu_head));
++
++	return kmalloc_node(size, GFP_KERNEL, node);
++}
++
++int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
++		      int node)
++{
++	cpumask_t *user_mask;
++	unsigned long flags;
++
++	/*
++	 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
++	 * may differ by now due to racing.
++	 */
++	dst->user_cpus_ptr = NULL;
++
++	/*
++	 * This check is racy and losing the race is a valid situation.
++	 * It is not worth the extra overhead of taking the pi_lock on
++	 * every fork/clone.
++	 */
++	if (data_race(!src->user_cpus_ptr))
++		return 0;
++
++	user_mask = alloc_user_cpus_ptr(node);
++	if (!user_mask)
++		return -ENOMEM;
++
++	/*
++	 * Use pi_lock to protect content of user_cpus_ptr
++	 *
++	 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
++	 * do_set_cpus_allowed().
++	 */
++	raw_spin_lock_irqsave(&src->pi_lock, flags);
++	if (src->user_cpus_ptr) {
++		swap(dst->user_cpus_ptr, user_mask);
++		cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
++	}
++	raw_spin_unlock_irqrestore(&src->pi_lock, flags);
++
++	if (unlikely(user_mask))
++		kfree(user_mask);
++
++	return 0;
++}
++
++static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
++{
++	struct cpumask *user_mask = NULL;
++
++	swap(p->user_cpus_ptr, user_mask);
++
++	return user_mask;
++}
++
++void release_user_cpus_ptr(struct task_struct *p)
++{
++	kfree(clear_user_cpus_ptr(p));
++}
++
++#endif
++
++/**
++ * task_curr - is this task currently executing on a CPU?
++ * @p: the task in question.
++ *
++ * Return: 1 if the task is currently executing. 0 otherwise.
++ */
++inline int task_curr(const struct task_struct *p)
++{
++	return cpu_curr(task_cpu(p)) == p;
++}
++
++#ifdef CONFIG_SMP
++/***
++ * kick_process - kick a running thread to enter/exit the kernel
++ * @p: the to-be-kicked thread
++ *
++ * Cause a process which is running on another CPU to enter
++ * kernel-mode, without any delay. (to get signals handled.)
++ *
++ * NOTE: this function doesn't have to take the runqueue lock,
++ * because all it wants to ensure is that the remote task enters
++ * the kernel. If the IPI races and the task has been migrated
++ * to another CPU then no harm is done and the purpose has been
++ * achieved as well.
++ */
++void kick_process(struct task_struct *p)
++{
++	guard(preempt)();
++	int cpu = task_cpu(p);
++
++	if ((cpu != smp_processor_id()) && task_curr(p))
++		smp_send_reschedule(cpu);
++}
++EXPORT_SYMBOL_GPL(kick_process);
++
++/*
++ * ->cpus_ptr is protected by both rq->lock and p->pi_lock
++ *
++ * A few notes on cpu_active vs cpu_online:
++ *
++ *  - cpu_active must be a subset of cpu_online
++ *
++ *  - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
++ *    see __set_cpus_allowed_ptr(). At this point the newly online
++ *    CPU isn't yet part of the sched domains, and balancing will not
++ *    see it.
++ *
++ *  - on cpu-down we clear cpu_active() to mask the sched domains and
++ *    avoid the load balancer to place new tasks on the to be removed
++ *    CPU. Existing tasks will remain running there and will be taken
++ *    off.
++ *
++ * This means that fallback selection must not select !active CPUs.
++ * And can assume that any active CPU must be online. Conversely
++ * select_task_rq() below may allow selection of !active CPUs in order
++ * to satisfy the above rules.
++ */
++static int select_fallback_rq(int cpu, struct task_struct *p)
++{
++	int nid = cpu_to_node(cpu);
++	const struct cpumask *nodemask = NULL;
++	enum { cpuset, possible, fail } state = cpuset;
++	int dest_cpu;
++
++	/*
++	 * If the node that the CPU is on has been offlined, cpu_to_node()
++	 * will return -1. There is no CPU on the node, and we should
++	 * select the CPU on the other node.
++	 */
++	if (nid != -1) {
++		nodemask = cpumask_of_node(nid);
++
++		/* Look for allowed, online CPU in same node. */
++		for_each_cpu(dest_cpu, nodemask) {
++			if (is_cpu_allowed(p, dest_cpu))
++				return dest_cpu;
++		}
++	}
++
++	for (;;) {
++		/* Any allowed, online CPU? */
++		for_each_cpu(dest_cpu, p->cpus_ptr) {
++			if (!is_cpu_allowed(p, dest_cpu))
++				continue;
++			goto out;
++		}
++
++		/* No more Mr. Nice Guy. */
++		switch (state) {
++		case cpuset:
++			if (cpuset_cpus_allowed_fallback(p)) {
++				state = possible;
++				break;
++			}
++			fallthrough;
++		case possible:
++			/*
++			 * XXX When called from select_task_rq() we only
++			 * hold p->pi_lock and again violate locking order.
++			 *
++			 * More yuck to audit.
++			 */
++			do_set_cpus_allowed(p, task_cpu_possible_mask(p));
++			state = fail;
++			break;
++
++		case fail:
++			BUG();
++			break;
++		}
++	}
++
++out:
++	if (state != cpuset) {
++		/*
++		 * Don't tell them about moving exiting tasks or
++		 * kernel threads (both mm NULL), since they never
++		 * leave kernel.
++		 */
++		if (p->mm && printk_ratelimit()) {
++			printk_deferred("process %d (%s) no longer affine to cpu%d\n",
++					task_pid_nr(p), p->comm, cpu);
++		}
++	}
++
++	return dest_cpu;
++}
++
++static inline void
++sched_preempt_mask_flush(cpumask_t *mask, int prio, int ref)
++{
++	int cpu;
++
++	cpumask_copy(mask, sched_preempt_mask + ref);
++	if (prio < ref) {
++		for_each_clear_bit(cpu, cpumask_bits(mask), nr_cpumask_bits) {
++			if (prio < cpu_rq(cpu)->prio)
++				cpumask_set_cpu(cpu, mask);
++		}
++	} else {
++		for_each_cpu_andnot(cpu, mask, sched_idle_mask) {
++			if (prio >= cpu_rq(cpu)->prio)
++				cpumask_clear_cpu(cpu, mask);
++		}
++	}
++}
++
++static inline int
++preempt_mask_check(cpumask_t *preempt_mask, cpumask_t *allow_mask, int prio)
++{
++	cpumask_t *mask = sched_preempt_mask + prio;
++	int pr = atomic_read(&sched_prio_record);
++
++	if (pr != prio && SCHED_QUEUE_BITS - 1 != prio) {
++		sched_preempt_mask_flush(mask, prio, pr);
++		atomic_set(&sched_prio_record, prio);
++	}
++
++	return cpumask_and(preempt_mask, allow_mask, mask);
++}
++
++static inline int select_task_rq(struct task_struct *p)
++{
++	cpumask_t allow_mask, mask;
++
++	if (unlikely(!cpumask_and(&allow_mask, p->cpus_ptr, cpu_active_mask)))
++		return select_fallback_rq(task_cpu(p), p);
++
++	if (
++#ifdef CONFIG_SCHED_SMT
++	    cpumask_and(&mask, &allow_mask, sched_sg_idle_mask) ||
++#endif
++	    cpumask_and(&mask, &allow_mask, sched_idle_mask) ||
++	    preempt_mask_check(&mask, &allow_mask, task_sched_prio(p)))
++		return best_mask_cpu(task_cpu(p), &mask);
++
++	return best_mask_cpu(task_cpu(p), &allow_mask);
++}
++
++void sched_set_stop_task(int cpu, struct task_struct *stop)
++{
++	static struct lock_class_key stop_pi_lock;
++	struct sched_param stop_param = { .sched_priority = STOP_PRIO };
++	struct sched_param start_param = { .sched_priority = 0 };
++	struct task_struct *old_stop = cpu_rq(cpu)->stop;
++
++	if (stop) {
++		/*
++		 * Make it appear like a SCHED_FIFO task, its something
++		 * userspace knows about and won't get confused about.
++		 *
++		 * Also, it will make PI more or less work without too
++		 * much confusion -- but then, stop work should not
++		 * rely on PI working anyway.
++		 */
++		sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param);
++
++		/*
++		 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
++		 * adjust the effective priority of a task. As a result,
++		 * rt_mutex_setprio() can trigger (RT) balancing operations,
++		 * which can then trigger wakeups of the stop thread to push
++		 * around the current task.
++		 *
++		 * The stop task itself will never be part of the PI-chain, it
++		 * never blocks, therefore that ->pi_lock recursion is safe.
++		 * Tell lockdep about this by placing the stop->pi_lock in its
++		 * own class.
++		 */
++		lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
++	}
++
++	cpu_rq(cpu)->stop = stop;
++
++	if (old_stop) {
++		/*
++		 * Reset it back to a normal scheduling policy so that
++		 * it can die in pieces.
++		 */
++		sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param);
++	}
++}
++
++static int affine_move_task(struct rq *rq, struct task_struct *p, int dest_cpu,
++			    raw_spinlock_t *lock, unsigned long irq_flags)
++	__releases(rq->lock)
++	__releases(p->pi_lock)
++{
++	/* Can the task run on the task's current CPU? If so, we're done */
++	if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
++		if (p->migration_disabled) {
++			if (likely(p->cpus_ptr != &p->cpus_mask))
++				__do_set_cpus_ptr(p, &p->cpus_mask);
++			p->migration_disabled = 0;
++			p->migration_flags |= MDF_FORCE_ENABLED;
++			/* When p is migrate_disabled, rq->lock should be held */
++			rq->nr_pinned--;
++		}
++
++		if (task_on_cpu(p) || READ_ONCE(p->__state) == TASK_WAKING) {
++			struct migration_arg arg = { p, dest_cpu };
++
++			/* Need help from migration thread: drop lock and wait. */
++			__task_access_unlock(p, lock);
++			raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++			stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
++			return 0;
++		}
++		if (task_on_rq_queued(p)) {
++			/*
++			 * OK, since we're going to drop the lock immediately
++			 * afterwards anyway.
++			 */
++			update_rq_clock(rq);
++			rq = move_queued_task(rq, p, dest_cpu);
++			lock = &rq->lock;
++		}
++	}
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++	return 0;
++}
++
++static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
++					 struct affinity_context *ctx,
++					 struct rq *rq,
++					 raw_spinlock_t *lock,
++					 unsigned long irq_flags)
++{
++	const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
++	const struct cpumask *cpu_valid_mask = cpu_active_mask;
++	bool kthread = p->flags & PF_KTHREAD;
++	int dest_cpu;
++	int ret = 0;
++
++	if (kthread || is_migration_disabled(p)) {
++		/*
++		 * Kernel threads are allowed on online && !active CPUs,
++		 * however, during cpu-hot-unplug, even these might get pushed
++		 * away if not KTHREAD_IS_PER_CPU.
++		 *
++		 * Specifically, migration_disabled() tasks must not fail the
++		 * cpumask_any_and_distribute() pick below, esp. so on
++		 * SCA_MIGRATE_ENABLE, otherwise we'll not call
++		 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
++		 */
++		cpu_valid_mask = cpu_online_mask;
++	}
++
++	if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	/*
++	 * Must re-check here, to close a race against __kthread_bind(),
++	 * sched_setaffinity() is not guaranteed to observe the flag.
++	 */
++	if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	if (cpumask_equal(&p->cpus_mask, ctx->new_mask))
++		goto out;
++
++	dest_cpu = cpumask_any_and(cpu_valid_mask, ctx->new_mask);
++	if (dest_cpu >= nr_cpu_ids) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	__do_set_cpus_allowed(p, ctx);
++
++	return affine_move_task(rq, p, dest_cpu, lock, irq_flags);
++
++out:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++
++	return ret;
++}
++
++/*
++ * Change a given task's CPU affinity. Migrate the thread to a
++ * is removed from the allowed bitmask.
++ *
++ * NOTE: the caller must have a valid reference to the task, the
++ * task must not exit() & deallocate itself prematurely. The
++ * call is not atomic; no spinlocks may be held.
++ */
++static int __set_cpus_allowed_ptr(struct task_struct *p,
++				  struct affinity_context *ctx)
++{
++	unsigned long irq_flags;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
++	rq = __task_access_lock(p, &lock);
++	/*
++	 * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
++	 * flags are set.
++	 */
++	if (p->user_cpus_ptr &&
++	    !(ctx->flags & SCA_USER) &&
++	    cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
++		ctx->new_mask = rq->scratch_mask;
++
++
++	return __set_cpus_allowed_ptr_locked(p, ctx, rq, lock, irq_flags);
++}
++
++int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
++{
++	struct affinity_context ac = {
++		.new_mask  = new_mask,
++		.flags     = 0,
++	};
++
++	return __set_cpus_allowed_ptr(p, &ac);
++}
++EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
++
++/*
++ * Change a given task's CPU affinity to the intersection of its current
++ * affinity mask and @subset_mask, writing the resulting mask to @new_mask.
++ * If user_cpus_ptr is defined, use it as the basis for restricting CPU
++ * affinity or use cpu_online_mask instead.
++ *
++ * If the resulting mask is empty, leave the affinity unchanged and return
++ * -EINVAL.
++ */
++static int restrict_cpus_allowed_ptr(struct task_struct *p,
++				     struct cpumask *new_mask,
++				     const struct cpumask *subset_mask)
++{
++	struct affinity_context ac = {
++		.new_mask  = new_mask,
++		.flags     = 0,
++	};
++	unsigned long irq_flags;
++	raw_spinlock_t *lock;
++	struct rq *rq;
++	int err;
++
++	raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
++	rq = __task_access_lock(p, &lock);
++
++	if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
++		err = -EINVAL;
++		goto err_unlock;
++	}
++
++	return __set_cpus_allowed_ptr_locked(p, &ac, rq, lock, irq_flags);
++
++err_unlock:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++	return err;
++}
++
++/*
++ * Restrict the CPU affinity of task @p so that it is a subset of
++ * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
++ * old affinity mask. If the resulting mask is empty, we warn and walk
++ * up the cpuset hierarchy until we find a suitable mask.
++ */
++void force_compatible_cpus_allowed_ptr(struct task_struct *p)
++{
++	cpumask_var_t new_mask;
++	const struct cpumask *override_mask = task_cpu_possible_mask(p);
++
++	alloc_cpumask_var(&new_mask, GFP_KERNEL);
++
++	/*
++	 * __migrate_task() can fail silently in the face of concurrent
++	 * offlining of the chosen destination CPU, so take the hotplug
++	 * lock to ensure that the migration succeeds.
++	 */
++	cpus_read_lock();
++	if (!cpumask_available(new_mask))
++		goto out_set_mask;
++
++	if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
++		goto out_free_mask;
++
++	/*
++	 * We failed to find a valid subset of the affinity mask for the
++	 * task, so override it based on its cpuset hierarchy.
++	 */
++	cpuset_cpus_allowed(p, new_mask);
++	override_mask = new_mask;
++
++out_set_mask:
++	if (printk_ratelimit()) {
++		printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
++				task_pid_nr(p), p->comm,
++				cpumask_pr_args(override_mask));
++	}
++
++	WARN_ON(set_cpus_allowed_ptr(p, override_mask));
++out_free_mask:
++	cpus_read_unlock();
++	free_cpumask_var(new_mask);
++}
++
++static int
++__sched_setaffinity(struct task_struct *p, struct affinity_context *ctx);
++
++/*
++ * Restore the affinity of a task @p which was previously restricted by a
++ * call to force_compatible_cpus_allowed_ptr().
++ *
++ * It is the caller's responsibility to serialise this with any calls to
++ * force_compatible_cpus_allowed_ptr(@p).
++ */
++void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
++{
++	struct affinity_context ac = {
++		.new_mask  = task_user_cpus(p),
++		.flags     = 0,
++	};
++	int ret;
++
++	/*
++	 * Try to restore the old affinity mask with __sched_setaffinity().
++	 * Cpuset masking will be done there too.
++	 */
++	ret = __sched_setaffinity(p, &ac);
++	WARN_ON_ONCE(ret);
++}
++
++#else /* CONFIG_SMP */
++
++static inline int select_task_rq(struct task_struct *p)
++{
++	return 0;
++}
++
++static inline int
++__set_cpus_allowed_ptr(struct task_struct *p,
++		       struct affinity_context *ctx)
++{
++	return set_cpus_allowed_ptr(p, ctx->new_mask);
++}
++
++static inline bool rq_has_pinned_tasks(struct rq *rq)
++{
++	return false;
++}
++
++static inline cpumask_t *alloc_user_cpus_ptr(int node)
++{
++	return NULL;
++}
++
++#endif /* !CONFIG_SMP */
++
++static void
++ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
++{
++	struct rq *rq;
++
++	if (!schedstat_enabled())
++		return;
++
++	rq = this_rq();
++
++#ifdef CONFIG_SMP
++	if (cpu == rq->cpu) {
++		__schedstat_inc(rq->ttwu_local);
++		__schedstat_inc(p->stats.nr_wakeups_local);
++	} else {
++		/** Alt schedule FW ToDo:
++		 * How to do ttwu_wake_remote
++		 */
++	}
++#endif /* CONFIG_SMP */
++
++	__schedstat_inc(rq->ttwu_count);
++	__schedstat_inc(p->stats.nr_wakeups);
++}
++
++/*
++ * Mark the task runnable.
++ */
++static inline void ttwu_do_wakeup(struct task_struct *p)
++{
++	WRITE_ONCE(p->__state, TASK_RUNNING);
++	trace_sched_wakeup(p);
++}
++
++static inline void
++ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
++{
++	if (p->sched_contributes_to_load)
++		rq->nr_uninterruptible--;
++
++	if (
++#ifdef CONFIG_SMP
++	    !(wake_flags & WF_MIGRATED) &&
++#endif
++	    p->in_iowait) {
++		delayacct_blkio_end(p);
++		atomic_dec(&task_rq(p)->nr_iowait);
++	}
++
++	activate_task(p, rq);
++	wakeup_preempt(rq);
++
++	ttwu_do_wakeup(p);
++}
++
++/*
++ * Consider @p being inside a wait loop:
++ *
++ *   for (;;) {
++ *      set_current_state(TASK_UNINTERRUPTIBLE);
++ *
++ *      if (CONDITION)
++ *         break;
++ *
++ *      schedule();
++ *   }
++ *   __set_current_state(TASK_RUNNING);
++ *
++ * between set_current_state() and schedule(). In this case @p is still
++ * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
++ * an atomic manner.
++ *
++ * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
++ * then schedule() must still happen and p->state can be changed to
++ * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
++ * need to do a full wakeup with enqueue.
++ *
++ * Returns: %true when the wakeup is done,
++ *          %false otherwise.
++ */
++static int ttwu_runnable(struct task_struct *p, int wake_flags)
++{
++	struct rq *rq;
++	raw_spinlock_t *lock;
++	int ret = 0;
++
++	rq = __task_access_lock(p, &lock);
++	if (task_on_rq_queued(p)) {
++		if (!task_on_cpu(p)) {
++			/*
++			 * When on_rq && !on_cpu the task is preempted, see if
++			 * it should preempt the task that is current now.
++			 */
++			update_rq_clock(rq);
++			wakeup_preempt(rq);
++		}
++		ttwu_do_wakeup(p);
++		ret = 1;
++	}
++	__task_access_unlock(p, lock);
++
++	return ret;
++}
++
++#ifdef CONFIG_SMP
++void sched_ttwu_pending(void *arg)
++{
++	struct llist_node *llist = arg;
++	struct rq *rq = this_rq();
++	struct task_struct *p, *t;
++	struct rq_flags rf;
++
++	if (!llist)
++		return;
++
++	rq_lock_irqsave(rq, &rf);
++	update_rq_clock(rq);
++
++	llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
++		if (WARN_ON_ONCE(p->on_cpu))
++			smp_cond_load_acquire(&p->on_cpu, !VAL);
++
++		if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
++			set_task_cpu(p, cpu_of(rq));
++
++		ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0);
++	}
++
++	/*
++	 * Must be after enqueueing at least once task such that
++	 * idle_cpu() does not observe a false-negative -- if it does,
++	 * it is possible for select_idle_siblings() to stack a number
++	 * of tasks on this CPU during that window.
++	 *
++	 * It is ok to clear ttwu_pending when another task pending.
++	 * We will receive IPI after local irq enabled and then enqueue it.
++	 * Since now nr_running > 0, idle_cpu() will always get correct result.
++	 */
++	WRITE_ONCE(rq->ttwu_pending, 0);
++	rq_unlock_irqrestore(rq, &rf);
++}
++
++/*
++ * Prepare the scene for sending an IPI for a remote smp_call
++ *
++ * Returns true if the caller can proceed with sending the IPI.
++ * Returns false otherwise.
++ */
++bool call_function_single_prep_ipi(int cpu)
++{
++	if (set_nr_if_polling(cpu_rq(cpu)->idle)) {
++		trace_sched_wake_idle_without_ipi(cpu);
++		return false;
++	}
++
++	return true;
++}
++
++/*
++ * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
++ * necessary. The wakee CPU on receipt of the IPI will queue the task
++ * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
++ * of the wakeup instead of the waker.
++ */
++static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
++
++	WRITE_ONCE(rq->ttwu_pending, 1);
++	__smp_call_single_queue(cpu, &p->wake_entry.llist);
++}
++
++static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
++{
++	/*
++	 * Do not complicate things with the async wake_list while the CPU is
++	 * in hotplug state.
++	 */
++	if (!cpu_active(cpu))
++		return false;
++
++	/* Ensure the task will still be allowed to run on the CPU. */
++	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
++		return false;
++
++	/*
++	 * If the CPU does not share cache, then queue the task on the
++	 * remote rqs wakelist to avoid accessing remote data.
++	 */
++	if (!cpus_share_cache(smp_processor_id(), cpu))
++		return true;
++
++	if (cpu == smp_processor_id())
++		return false;
++
++	/*
++	 * If the wakee cpu is idle, or the task is descheduling and the
++	 * only running task on the CPU, then use the wakelist to offload
++	 * the task activation to the idle (or soon-to-be-idle) CPU as
++	 * the current CPU is likely busy. nr_running is checked to
++	 * avoid unnecessary task stacking.
++	 *
++	 * Note that we can only get here with (wakee) p->on_rq=0,
++	 * p->on_cpu can be whatever, we've done the dequeue, so
++	 * the wakee has been accounted out of ->nr_running.
++	 */
++	if (!cpu_rq(cpu)->nr_running)
++		return true;
++
++	return false;
++}
++
++static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
++{
++	if (__is_defined(ALT_SCHED_TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
++		sched_clock_cpu(cpu); /* Sync clocks across CPUs */
++		__ttwu_queue_wakelist(p, cpu, wake_flags);
++		return true;
++	}
++
++	return false;
++}
++
++void wake_up_if_idle(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	guard(rcu)();
++	if (is_idle_task(rcu_dereference(rq->curr))) {
++		guard(raw_spinlock_irqsave)(&rq->lock);
++		if (is_idle_task(rq->curr))
++			resched_curr(rq);
++	}
++}
++
++extern struct static_key_false sched_asym_cpucapacity;
++
++static __always_inline bool sched_asym_cpucap_active(void)
++{
++	return static_branch_unlikely(&sched_asym_cpucapacity);
++}
++
++bool cpus_equal_capacity(int this_cpu, int that_cpu)
++{
++	if (!sched_asym_cpucap_active())
++		return true;
++
++	if (this_cpu == that_cpu)
++		return true;
++
++	return arch_scale_cpu_capacity(this_cpu) == arch_scale_cpu_capacity(that_cpu);
++}
++
++bool cpus_share_cache(int this_cpu, int that_cpu)
++{
++	if (this_cpu == that_cpu)
++		return true;
++
++	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
++}
++#else /* !CONFIG_SMP */
++
++static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
++{
++	return false;
++}
++
++#endif /* CONFIG_SMP */
++
++static inline void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (ttwu_queue_wakelist(p, cpu, wake_flags))
++		return;
++
++	raw_spin_lock(&rq->lock);
++	update_rq_clock(rq);
++	ttwu_do_activate(rq, p, wake_flags);
++	raw_spin_unlock(&rq->lock);
++}
++
++/*
++ * Invoked from try_to_wake_up() to check whether the task can be woken up.
++ *
++ * The caller holds p::pi_lock if p != current or has preemption
++ * disabled when p == current.
++ *
++ * The rules of saved_state:
++ *
++ *   The related locking code always holds p::pi_lock when updating
++ *   p::saved_state, which means the code is fully serialized in both cases.
++ *
++ *  For PREEMPT_RT, the lock wait and lock wakeups happen via TASK_RTLOCK_WAIT.
++ *  No other bits set. This allows to distinguish all wakeup scenarios.
++ *
++ *  For FREEZER, the wakeup happens via TASK_FROZEN. No other bits set. This
++ *  allows us to prevent early wakeup of tasks before they can be run on
++ *  asymmetric ISA architectures (eg ARMv9).
++ */
++static __always_inline
++bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
++{
++	int match;
++
++	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
++		WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
++			     state != TASK_RTLOCK_WAIT);
++	}
++
++	*success = !!(match = __task_state_match(p, state));
++
++	/*
++	 * Saved state preserves the task state across blocking on
++	 * an RT lock or TASK_FREEZABLE tasks.  If the state matches,
++	 * set p::saved_state to TASK_RUNNING, but do not wake the task
++	 * because it waits for a lock wakeup or __thaw_task(). Also
++	 * indicate success because from the regular waker's point of
++	 * view this has succeeded.
++	 *
++	 * After acquiring the lock the task will restore p::__state
++	 * from p::saved_state which ensures that the regular
++	 * wakeup is not lost. The restore will also set
++	 * p::saved_state to TASK_RUNNING so any further tests will
++	 * not result in false positives vs. @success
++	 */
++	if (match < 0)
++		p->saved_state = TASK_RUNNING;
++
++	return match > 0;
++}
++
++/*
++ * Notes on Program-Order guarantees on SMP systems.
++ *
++ *  MIGRATION
++ *
++ * The basic program-order guarantee on SMP systems is that when a task [t]
++ * migrates, all its activity on its old CPU [c0] happens-before any subsequent
++ * execution on its new CPU [c1].
++ *
++ * For migration (of runnable tasks) this is provided by the following means:
++ *
++ *  A) UNLOCK of the rq(c0)->lock scheduling out task t
++ *  B) migration for t is required to synchronize *both* rq(c0)->lock and
++ *     rq(c1)->lock (if not at the same time, then in that order).
++ *  C) LOCK of the rq(c1)->lock scheduling in task
++ *
++ * Transitivity guarantees that B happens after A and C after B.
++ * Note: we only require RCpc transitivity.
++ * Note: the CPU doing B need not be c0 or c1
++ *
++ * Example:
++ *
++ *   CPU0            CPU1            CPU2
++ *
++ *   LOCK rq(0)->lock
++ *   sched-out X
++ *   sched-in Y
++ *   UNLOCK rq(0)->lock
++ *
++ *                                   LOCK rq(0)->lock // orders against CPU0
++ *                                   dequeue X
++ *                                   UNLOCK rq(0)->lock
++ *
++ *                                   LOCK rq(1)->lock
++ *                                   enqueue X
++ *                                   UNLOCK rq(1)->lock
++ *
++ *                   LOCK rq(1)->lock // orders against CPU2
++ *                   sched-out Z
++ *                   sched-in X
++ *                   UNLOCK rq(1)->lock
++ *
++ *
++ *  BLOCKING -- aka. SLEEP + WAKEUP
++ *
++ * For blocking we (obviously) need to provide the same guarantee as for
++ * migration. However the means are completely different as there is no lock
++ * chain to provide order. Instead we do:
++ *
++ *   1) smp_store_release(X->on_cpu, 0)   -- finish_task()
++ *   2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
++ *
++ * Example:
++ *
++ *   CPU0 (schedule)  CPU1 (try_to_wake_up) CPU2 (schedule)
++ *
++ *   LOCK rq(0)->lock LOCK X->pi_lock
++ *   dequeue X
++ *   sched-out X
++ *   smp_store_release(X->on_cpu, 0);
++ *
++ *                    smp_cond_load_acquire(&X->on_cpu, !VAL);
++ *                    X->state = WAKING
++ *                    set_task_cpu(X,2)
++ *
++ *                    LOCK rq(2)->lock
++ *                    enqueue X
++ *                    X->state = RUNNING
++ *                    UNLOCK rq(2)->lock
++ *
++ *                                          LOCK rq(2)->lock // orders against CPU1
++ *                                          sched-out Z
++ *                                          sched-in X
++ *                                          UNLOCK rq(2)->lock
++ *
++ *                    UNLOCK X->pi_lock
++ *   UNLOCK rq(0)->lock
++ *
++ *
++ * However; for wakeups there is a second guarantee we must provide, namely we
++ * must observe the state that lead to our wakeup. That is, not only must our
++ * task observe its own prior state, it must also observe the stores prior to
++ * its wakeup.
++ *
++ * This means that any means of doing remote wakeups must order the CPU doing
++ * the wakeup against the CPU the task is going to end up running on. This,
++ * however, is already required for the regular Program-Order guarantee above,
++ * since the waking CPU is the one issueing the ACQUIRE (smp_cond_load_acquire).
++ *
++ */
++
++/**
++ * try_to_wake_up - wake up a thread
++ * @p: the thread to be awakened
++ * @state: the mask of task states that can be woken
++ * @wake_flags: wake modifier flags (WF_*)
++ *
++ * Conceptually does:
++ *
++ *   If (@state & @p->state) @p->state = TASK_RUNNING.
++ *
++ * If the task was not queued/runnable, also place it back on a runqueue.
++ *
++ * This function is atomic against schedule() which would dequeue the task.
++ *
++ * It issues a full memory barrier before accessing @p->state, see the comment
++ * with set_current_state().
++ *
++ * Uses p->pi_lock to serialize against concurrent wake-ups.
++ *
++ * Relies on p->pi_lock stabilizing:
++ *  - p->sched_class
++ *  - p->cpus_ptr
++ *  - p->sched_task_group
++ * in order to do migration, see its use of select_task_rq()/set_task_cpu().
++ *
++ * Tries really hard to only take one task_rq(p)->lock for performance.
++ * Takes rq->lock in:
++ *  - ttwu_runnable()    -- old rq, unavoidable, see comment there;
++ *  - ttwu_queue()       -- new rq, for enqueue of the task;
++ *  - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
++ *
++ * As a consequence we race really badly with just about everything. See the
++ * many memory barriers and their comments for details.
++ *
++ * Return: %true if @p->state changes (an actual wakeup was done),
++ *	   %false otherwise.
++ */
++int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
++{
++	guard(preempt)();
++	int cpu, success = 0;
++
++	if (p == current) {
++		/*
++		 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
++		 * == smp_processor_id()'. Together this means we can special
++		 * case the whole 'p->on_rq && ttwu_runnable()' case below
++		 * without taking any locks.
++		 *
++		 * In particular:
++		 *  - we rely on Program-Order guarantees for all the ordering,
++		 *  - we're serialized against set_special_state() by virtue of
++		 *    it disabling IRQs (this allows not taking ->pi_lock).
++		 */
++		if (!ttwu_state_match(p, state, &success))
++			goto out;
++
++		trace_sched_waking(p);
++		ttwu_do_wakeup(p);
++		goto out;
++	}
++
++	/*
++	 * If we are going to wake up a thread waiting for CONDITION we
++	 * need to ensure that CONDITION=1 done by the caller can not be
++	 * reordered with p->state check below. This pairs with smp_store_mb()
++	 * in set_current_state() that the waiting thread does.
++	 */
++	scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
++		smp_mb__after_spinlock();
++		if (!ttwu_state_match(p, state, &success))
++			break;
++
++		trace_sched_waking(p);
++
++		/*
++		 * Ensure we load p->on_rq _after_ p->state, otherwise it would
++		 * be possible to, falsely, observe p->on_rq == 0 and get stuck
++		 * in smp_cond_load_acquire() below.
++		 *
++		 * sched_ttwu_pending()			try_to_wake_up()
++		 *   STORE p->on_rq = 1			  LOAD p->state
++		 *   UNLOCK rq->lock
++		 *
++		 * __schedule() (switch to task 'p')
++		 *   LOCK rq->lock			  smp_rmb();
++		 *   smp_mb__after_spinlock();
++		 *   UNLOCK rq->lock
++		 *
++		 * [task p]
++		 *   STORE p->state = UNINTERRUPTIBLE	  LOAD p->on_rq
++		 *
++		 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
++		 * __schedule().  See the comment for smp_mb__after_spinlock().
++		 *
++		 * A similar smp_rmb() lives in __task_needs_rq_lock().
++		 */
++		smp_rmb();
++		if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
++			break;
++
++#ifdef CONFIG_SMP
++		/*
++		 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
++		 * possible to, falsely, observe p->on_cpu == 0.
++		 *
++		 * One must be running (->on_cpu == 1) in order to remove oneself
++		 * from the runqueue.
++		 *
++		 * __schedule() (switch to task 'p')	try_to_wake_up()
++		 *   STORE p->on_cpu = 1		  LOAD p->on_rq
++		 *   UNLOCK rq->lock
++		 *
++		 * __schedule() (put 'p' to sleep)
++		 *   LOCK rq->lock			  smp_rmb();
++		 *   smp_mb__after_spinlock();
++		 *   STORE p->on_rq = 0			  LOAD p->on_cpu
++		 *
++		 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
++		 * __schedule().  See the comment for smp_mb__after_spinlock().
++		 *
++		 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
++		 * schedule()'s deactivate_task() has 'happened' and p will no longer
++		 * care about it's own p->state. See the comment in __schedule().
++		 */
++		smp_acquire__after_ctrl_dep();
++
++		/*
++		 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
++		 * == 0), which means we need to do an enqueue, change p->state to
++		 * TASK_WAKING such that we can unlock p->pi_lock before doing the
++		 * enqueue, such as ttwu_queue_wakelist().
++		 */
++		WRITE_ONCE(p->__state, TASK_WAKING);
++
++		/*
++		 * If the owning (remote) CPU is still in the middle of schedule() with
++		 * this task as prev, considering queueing p on the remote CPUs wake_list
++		 * which potentially sends an IPI instead of spinning on p->on_cpu to
++		 * let the waker make forward progress. This is safe because IRQs are
++		 * disabled and the IPI will deliver after on_cpu is cleared.
++		 *
++		 * Ensure we load task_cpu(p) after p->on_cpu:
++		 *
++		 * set_task_cpu(p, cpu);
++		 *   STORE p->cpu = @cpu
++		 * __schedule() (switch to task 'p')
++		 *   LOCK rq->lock
++		 *   smp_mb__after_spin_lock()          smp_cond_load_acquire(&p->on_cpu)
++		 *   STORE p->on_cpu = 1                LOAD p->cpu
++		 *
++		 * to ensure we observe the correct CPU on which the task is currently
++		 * scheduling.
++		 */
++		if (smp_load_acquire(&p->on_cpu) &&
++		    ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
++			break;
++
++		/*
++		 * If the owning (remote) CPU is still in the middle of schedule() with
++		 * this task as prev, wait until it's done referencing the task.
++		 *
++		 * Pairs with the smp_store_release() in finish_task().
++		 *
++		 * This ensures that tasks getting woken will be fully ordered against
++		 * their previous state and preserve Program Order.
++		 */
++		smp_cond_load_acquire(&p->on_cpu, !VAL);
++
++		sched_task_ttwu(p);
++
++		if ((wake_flags & WF_CURRENT_CPU) &&
++		    cpumask_test_cpu(smp_processor_id(), p->cpus_ptr))
++			cpu = smp_processor_id();
++		else
++			cpu = select_task_rq(p);
++
++		if (cpu != task_cpu(p)) {
++			if (p->in_iowait) {
++				delayacct_blkio_end(p);
++				atomic_dec(&task_rq(p)->nr_iowait);
++			}
++
++			wake_flags |= WF_MIGRATED;
++			set_task_cpu(p, cpu);
++		}
++#else
++		sched_task_ttwu(p);
++
++		cpu = task_cpu(p);
++#endif /* CONFIG_SMP */
++
++		ttwu_queue(p, cpu, wake_flags);
++	}
++out:
++	if (success)
++		ttwu_stat(p, task_cpu(p), wake_flags);
++
++	return success;
++}
++
++static bool __task_needs_rq_lock(struct task_struct *p)
++{
++	unsigned int state = READ_ONCE(p->__state);
++
++	/*
++	 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
++	 * the task is blocked. Make sure to check @state since ttwu() can drop
++	 * locks at the end, see ttwu_queue_wakelist().
++	 */
++	if (state == TASK_RUNNING || state == TASK_WAKING)
++		return true;
++
++	/*
++	 * Ensure we load p->on_rq after p->__state, otherwise it would be
++	 * possible to, falsely, observe p->on_rq == 0.
++	 *
++	 * See try_to_wake_up() for a longer comment.
++	 */
++	smp_rmb();
++	if (p->on_rq)
++		return true;
++
++#ifdef CONFIG_SMP
++	/*
++	 * Ensure the task has finished __schedule() and will not be referenced
++	 * anymore. Again, see try_to_wake_up() for a longer comment.
++	 */
++	smp_rmb();
++	smp_cond_load_acquire(&p->on_cpu, !VAL);
++#endif
++
++	return false;
++}
++
++/**
++ * task_call_func - Invoke a function on task in fixed state
++ * @p: Process for which the function is to be invoked, can be @current.
++ * @func: Function to invoke.
++ * @arg: Argument to function.
++ *
++ * Fix the task in it's current state by avoiding wakeups and or rq operations
++ * and call @func(@arg) on it.  This function can use ->on_rq and task_curr()
++ * to work out what the state is, if required.  Given that @func can be invoked
++ * with a runqueue lock held, it had better be quite lightweight.
++ *
++ * Returns:
++ *   Whatever @func returns
++ */
++int task_call_func(struct task_struct *p, task_call_f func, void *arg)
++{
++	struct rq *rq = NULL;
++	struct rq_flags rf;
++	int ret;
++
++	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
++
++	if (__task_needs_rq_lock(p))
++		rq = __task_rq_lock(p, &rf);
++
++	/*
++	 * At this point the task is pinned; either:
++	 *  - blocked and we're holding off wakeups      (pi->lock)
++	 *  - woken, and we're holding off enqueue       (rq->lock)
++	 *  - queued, and we're holding off schedule     (rq->lock)
++	 *  - running, and we're holding off de-schedule (rq->lock)
++	 *
++	 * The called function (@func) can use: task_curr(), p->on_rq and
++	 * p->__state to differentiate between these states.
++	 */
++	ret = func(p, arg);
++
++	if (rq)
++		__task_rq_unlock(rq, &rf);
++
++	raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
++	return ret;
++}
++
++/**
++ * cpu_curr_snapshot - Return a snapshot of the currently running task
++ * @cpu: The CPU on which to snapshot the task.
++ *
++ * Returns the task_struct pointer of the task "currently" running on
++ * the specified CPU.  If the same task is running on that CPU throughout,
++ * the return value will be a pointer to that task's task_struct structure.
++ * If the CPU did any context switches even vaguely concurrently with the
++ * execution of this function, the return value will be a pointer to the
++ * task_struct structure of a randomly chosen task that was running on
++ * that CPU somewhere around the time that this function was executing.
++ *
++ * If the specified CPU was offline, the return value is whatever it
++ * is, perhaps a pointer to the task_struct structure of that CPU's idle
++ * task, but there is no guarantee.  Callers wishing a useful return
++ * value must take some action to ensure that the specified CPU remains
++ * online throughout.
++ *
++ * This function executes full memory barriers before and after fetching
++ * the pointer, which permits the caller to confine this function's fetch
++ * with respect to the caller's accesses to other shared variables.
++ */
++struct task_struct *cpu_curr_snapshot(int cpu)
++{
++	struct task_struct *t;
++
++	smp_mb(); /* Pairing determined by caller's synchronization design. */
++	t = rcu_dereference(cpu_curr(cpu));
++	smp_mb(); /* Pairing determined by caller's synchronization design. */
++	return t;
++}
++
++/**
++ * wake_up_process - Wake up a specific process
++ * @p: The process to be woken up.
++ *
++ * Attempt to wake up the nominated process and move it to the set of runnable
++ * processes.
++ *
++ * Return: 1 if the process was woken up, 0 if it was already running.
++ *
++ * This function executes a full memory barrier before accessing the task state.
++ */
++int wake_up_process(struct task_struct *p)
++{
++	return try_to_wake_up(p, TASK_NORMAL, 0);
++}
++EXPORT_SYMBOL(wake_up_process);
++
++int wake_up_state(struct task_struct *p, unsigned int state)
++{
++	return try_to_wake_up(p, state, 0);
++}
++
++/*
++ * Perform scheduler related setup for a newly forked process p.
++ * p is forked by current.
++ *
++ * __sched_fork() is basic setup used by init_idle() too:
++ */
++static inline void __sched_fork(unsigned long clone_flags, struct task_struct *p)
++{
++	p->on_rq			= 0;
++	p->on_cpu			= 0;
++	p->utime			= 0;
++	p->stime			= 0;
++	p->sched_time			= 0;
++
++#ifdef CONFIG_SCHEDSTATS
++	/* Even if schedstat is disabled, there should not be garbage */
++	memset(&p->stats, 0, sizeof(p->stats));
++#endif
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++	INIT_HLIST_HEAD(&p->preempt_notifiers);
++#endif
++
++#ifdef CONFIG_COMPACTION
++	p->capture_control = NULL;
++#endif
++#ifdef CONFIG_SMP
++	p->wake_entry.u_flags = CSD_TYPE_TTWU;
++#endif
++	init_sched_mm_cid(p);
++}
++
++/*
++ * fork()/clone()-time setup:
++ */
++int sched_fork(unsigned long clone_flags, struct task_struct *p)
++{
++	__sched_fork(clone_flags, p);
++	/*
++	 * We mark the process as NEW here. This guarantees that
++	 * nobody will actually run it, and a signal or other external
++	 * event cannot wake it up and insert it on the runqueue either.
++	 */
++	p->__state = TASK_NEW;
++
++	/*
++	 * Make sure we do not leak PI boosting priority to the child.
++	 */
++	p->prio = current->normal_prio;
++
++	/*
++	 * Revert to default priority/policy on fork if requested.
++	 */
++	if (unlikely(p->sched_reset_on_fork)) {
++		if (task_has_rt_policy(p)) {
++			p->policy = SCHED_NORMAL;
++			p->static_prio = NICE_TO_PRIO(0);
++			p->rt_priority = 0;
++		} else if (PRIO_TO_NICE(p->static_prio) < 0)
++			p->static_prio = NICE_TO_PRIO(0);
++
++		p->prio = p->normal_prio = p->static_prio;
++
++		/*
++		 * We don't need the reset flag anymore after the fork. It has
++		 * fulfilled its duty:
++		 */
++		p->sched_reset_on_fork = 0;
++	}
++
++#ifdef CONFIG_SCHED_INFO
++	if (unlikely(sched_info_on()))
++		memset(&p->sched_info, 0, sizeof(p->sched_info));
++#endif
++	init_task_preempt_count(p);
++
++	return 0;
++}
++
++void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
++{
++	unsigned long flags;
++	struct rq *rq;
++
++	/*
++	 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
++	 * required yet, but lockdep gets upset if rules are violated.
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	/*
++	 * Share the timeslice between parent and child, thus the
++	 * total amount of pending timeslices in the system doesn't change,
++	 * resulting in more scheduling fairness.
++	 */
++	rq = this_rq();
++	raw_spin_lock(&rq->lock);
++
++	rq->curr->time_slice /= 2;
++	p->time_slice = rq->curr->time_slice;
++#ifdef CONFIG_SCHED_HRTICK
++	hrtick_start(rq, rq->curr->time_slice);
++#endif
++
++	if (p->time_slice < RESCHED_NS) {
++		p->time_slice = sysctl_sched_base_slice;
++		resched_curr(rq);
++	}
++	sched_task_fork(p, rq);
++	raw_spin_unlock(&rq->lock);
++
++	rseq_migrate(p);
++	/*
++	 * We're setting the CPU for the first time, we don't migrate,
++	 * so use __set_task_cpu().
++	 */
++	__set_task_cpu(p, smp_processor_id());
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++
++void sched_post_fork(struct task_struct *p)
++{
++}
++
++#ifdef CONFIG_SCHEDSTATS
++
++DEFINE_STATIC_KEY_FALSE(sched_schedstats);
++
++static void set_schedstats(bool enabled)
++{
++	if (enabled)
++		static_branch_enable(&sched_schedstats);
++	else
++		static_branch_disable(&sched_schedstats);
++}
++
++void force_schedstat_enabled(void)
++{
++	if (!schedstat_enabled()) {
++		pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
++		static_branch_enable(&sched_schedstats);
++	}
++}
++
++static int __init setup_schedstats(char *str)
++{
++	int ret = 0;
++	if (!str)
++		goto out;
++
++	if (!strcmp(str, "enable")) {
++		set_schedstats(true);
++		ret = 1;
++	} else if (!strcmp(str, "disable")) {
++		set_schedstats(false);
++		ret = 1;
++	}
++out:
++	if (!ret)
++		pr_warn("Unable to parse schedstats=\n");
++
++	return ret;
++}
++__setup("schedstats=", setup_schedstats);
++
++#ifdef CONFIG_PROC_SYSCTL
++static int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
++		size_t *lenp, loff_t *ppos)
++{
++	struct ctl_table t;
++	int err;
++	int state = static_branch_likely(&sched_schedstats);
++
++	if (write && !capable(CAP_SYS_ADMIN))
++		return -EPERM;
++
++	t = *table;
++	t.data = &state;
++	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
++	if (err < 0)
++		return err;
++	if (write)
++		set_schedstats(state);
++	return err;
++}
++
++static struct ctl_table sched_core_sysctls[] = {
++	{
++		.procname       = "sched_schedstats",
++		.data           = NULL,
++		.maxlen         = sizeof(unsigned int),
++		.mode           = 0644,
++		.proc_handler   = sysctl_schedstats,
++		.extra1         = SYSCTL_ZERO,
++		.extra2         = SYSCTL_ONE,
++	},
++	{}
++};
++static int __init sched_core_sysctl_init(void)
++{
++	register_sysctl_init("kernel", sched_core_sysctls);
++	return 0;
++}
++late_initcall(sched_core_sysctl_init);
++#endif /* CONFIG_PROC_SYSCTL */
++#endif /* CONFIG_SCHEDSTATS */
++
++/*
++ * wake_up_new_task - wake up a newly created task for the first time.
++ *
++ * This function will do some initial scheduler statistics housekeeping
++ * that must be done for every newly created context, then puts the task
++ * on the runqueue and wakes it.
++ */
++void wake_up_new_task(struct task_struct *p)
++{
++	unsigned long flags;
++	struct rq *rq;
++
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	WRITE_ONCE(p->__state, TASK_RUNNING);
++	rq = cpu_rq(select_task_rq(p));
++#ifdef CONFIG_SMP
++	rseq_migrate(p);
++	/*
++	 * Fork balancing, do it here and not earlier because:
++	 * - cpus_ptr can change in the fork path
++	 * - any previously selected CPU might disappear through hotplug
++	 *
++	 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
++	 * as we're not fully set-up yet.
++	 */
++	__set_task_cpu(p, cpu_of(rq));
++#endif
++
++	raw_spin_lock(&rq->lock);
++	update_rq_clock(rq);
++
++	activate_task(p, rq);
++	trace_sched_wakeup_new(p);
++	wakeup_preempt(rq);
++
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++
++static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
++
++void preempt_notifier_inc(void)
++{
++	static_branch_inc(&preempt_notifier_key);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_inc);
++
++void preempt_notifier_dec(void)
++{
++	static_branch_dec(&preempt_notifier_key);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_dec);
++
++/**
++ * preempt_notifier_register - tell me when current is being preempted & rescheduled
++ * @notifier: notifier struct to register
++ */
++void preempt_notifier_register(struct preempt_notifier *notifier)
++{
++	if (!static_branch_unlikely(&preempt_notifier_key))
++		WARN(1, "registering preempt_notifier while notifiers disabled\n");
++
++	hlist_add_head(&notifier->link, &current->preempt_notifiers);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_register);
++
++/**
++ * preempt_notifier_unregister - no longer interested in preemption notifications
++ * @notifier: notifier struct to unregister
++ *
++ * This is *not* safe to call from within a preemption notifier.
++ */
++void preempt_notifier_unregister(struct preempt_notifier *notifier)
++{
++	hlist_del(&notifier->link);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
++
++static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++	struct preempt_notifier *notifier;
++
++	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
++		notifier->ops->sched_in(notifier, raw_smp_processor_id());
++}
++
++static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++	if (static_branch_unlikely(&preempt_notifier_key))
++		__fire_sched_in_preempt_notifiers(curr);
++}
++
++static void
++__fire_sched_out_preempt_notifiers(struct task_struct *curr,
++				   struct task_struct *next)
++{
++	struct preempt_notifier *notifier;
++
++	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
++		notifier->ops->sched_out(notifier, next);
++}
++
++static __always_inline void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++				 struct task_struct *next)
++{
++	if (static_branch_unlikely(&preempt_notifier_key))
++		__fire_sched_out_preempt_notifiers(curr, next);
++}
++
++#else /* !CONFIG_PREEMPT_NOTIFIERS */
++
++static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++}
++
++static inline void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++				 struct task_struct *next)
++{
++}
++
++#endif /* CONFIG_PREEMPT_NOTIFIERS */
++
++static inline void prepare_task(struct task_struct *next)
++{
++	/*
++	 * Claim the task as running, we do this before switching to it
++	 * such that any running task will have this set.
++	 *
++	 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
++	 * its ordering comment.
++	 */
++	WRITE_ONCE(next->on_cpu, 1);
++}
++
++static inline void finish_task(struct task_struct *prev)
++{
++#ifdef CONFIG_SMP
++	/*
++	 * This must be the very last reference to @prev from this CPU. After
++	 * p->on_cpu is cleared, the task can be moved to a different CPU. We
++	 * must ensure this doesn't happen until the switch is completely
++	 * finished.
++	 *
++	 * In particular, the load of prev->state in finish_task_switch() must
++	 * happen before this.
++	 *
++	 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
++	 */
++	smp_store_release(&prev->on_cpu, 0);
++#else
++	prev->on_cpu = 0;
++#endif
++}
++
++#ifdef CONFIG_SMP
++
++static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
++{
++	void (*func)(struct rq *rq);
++	struct balance_callback *next;
++
++	lockdep_assert_held(&rq->lock);
++
++	while (head) {
++		func = (void (*)(struct rq *))head->func;
++		next = head->next;
++		head->next = NULL;
++		head = next;
++
++		func(rq);
++	}
++}
++
++static void balance_push(struct rq *rq);
++
++/*
++ * balance_push_callback is a right abuse of the callback interface and plays
++ * by significantly different rules.
++ *
++ * Where the normal balance_callback's purpose is to be ran in the same context
++ * that queued it (only later, when it's safe to drop rq->lock again),
++ * balance_push_callback is specifically targeted at __schedule().
++ *
++ * This abuse is tolerated because it places all the unlikely/odd cases behind
++ * a single test, namely: rq->balance_callback == NULL.
++ */
++struct balance_callback balance_push_callback = {
++	.next = NULL,
++	.func = balance_push,
++};
++
++static inline struct balance_callback *
++__splice_balance_callbacks(struct rq *rq, bool split)
++{
++	struct balance_callback *head = rq->balance_callback;
++
++	if (likely(!head))
++		return NULL;
++
++	lockdep_assert_rq_held(rq);
++	/*
++	 * Must not take balance_push_callback off the list when
++	 * splice_balance_callbacks() and balance_callbacks() are not
++	 * in the same rq->lock section.
++	 *
++	 * In that case it would be possible for __schedule() to interleave
++	 * and observe the list empty.
++	 */
++	if (split && head == &balance_push_callback)
++		head = NULL;
++	else
++		rq->balance_callback = NULL;
++
++	return head;
++}
++
++static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
++{
++	return __splice_balance_callbacks(rq, true);
++}
++
++static void __balance_callbacks(struct rq *rq)
++{
++	do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
++}
++
++static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
++{
++	unsigned long flags;
++
++	if (unlikely(head)) {
++		raw_spin_lock_irqsave(&rq->lock, flags);
++		do_balance_callbacks(rq, head);
++		raw_spin_unlock_irqrestore(&rq->lock, flags);
++	}
++}
++
++#else
++
++static inline void __balance_callbacks(struct rq *rq)
++{
++}
++
++static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
++{
++	return NULL;
++}
++
++static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
++{
++}
++
++#endif
++
++static inline void
++prepare_lock_switch(struct rq *rq, struct task_struct *next)
++{
++	/*
++	 * Since the runqueue lock will be released by the next
++	 * task (which is an invalid locking op but in the case
++	 * of the scheduler it's an obvious special-case), so we
++	 * do an early lockdep release here:
++	 */
++	spin_release(&rq->lock.dep_map, _THIS_IP_);
++#ifdef CONFIG_DEBUG_SPINLOCK
++	/* this is a valid case when another task releases the spinlock */
++	rq->lock.owner = next;
++#endif
++}
++
++static inline void finish_lock_switch(struct rq *rq)
++{
++	/*
++	 * If we are tracking spinlock dependencies then we have to
++	 * fix up the runqueue lock - which gets 'carried over' from
++	 * prev into current:
++	 */
++	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
++	__balance_callbacks(rq);
++	raw_spin_unlock_irq(&rq->lock);
++}
++
++/*
++ * NOP if the arch has not defined these:
++ */
++
++#ifndef prepare_arch_switch
++# define prepare_arch_switch(next)	do { } while (0)
++#endif
++
++#ifndef finish_arch_post_lock_switch
++# define finish_arch_post_lock_switch()	do { } while (0)
++#endif
++
++static inline void kmap_local_sched_out(void)
++{
++#ifdef CONFIG_KMAP_LOCAL
++	if (unlikely(current->kmap_ctrl.idx))
++		__kmap_local_sched_out();
++#endif
++}
++
++static inline void kmap_local_sched_in(void)
++{
++#ifdef CONFIG_KMAP_LOCAL
++	if (unlikely(current->kmap_ctrl.idx))
++		__kmap_local_sched_in();
++#endif
++}
++
++/**
++ * prepare_task_switch - prepare to switch tasks
++ * @rq: the runqueue preparing to switch
++ * @next: the task we are going to switch to.
++ *
++ * This is called with the rq lock held and interrupts off. It must
++ * be paired with a subsequent finish_task_switch after the context
++ * switch.
++ *
++ * prepare_task_switch sets up locking and calls architecture specific
++ * hooks.
++ */
++static inline void
++prepare_task_switch(struct rq *rq, struct task_struct *prev,
++		    struct task_struct *next)
++{
++	kcov_prepare_switch(prev);
++	sched_info_switch(rq, prev, next);
++	perf_event_task_sched_out(prev, next);
++	rseq_preempt(prev);
++	fire_sched_out_preempt_notifiers(prev, next);
++	kmap_local_sched_out();
++	prepare_task(next);
++	prepare_arch_switch(next);
++}
++
++/**
++ * finish_task_switch - clean up after a task-switch
++ * @rq: runqueue associated with task-switch
++ * @prev: the thread we just switched away from.
++ *
++ * finish_task_switch must be called after the context switch, paired
++ * with a prepare_task_switch call before the context switch.
++ * finish_task_switch will reconcile locking set up by prepare_task_switch,
++ * and do any other architecture-specific cleanup actions.
++ *
++ * Note that we may have delayed dropping an mm in context_switch(). If
++ * so, we finish that here outside of the runqueue lock.  (Doing it
++ * with the lock held can cause deadlocks; see schedule() for
++ * details.)
++ *
++ * The context switch have flipped the stack from under us and restored the
++ * local variables which were saved when this task called schedule() in the
++ * past. prev == current is still correct but we need to recalculate this_rq
++ * because prev may have moved to another CPU.
++ */
++static struct rq *finish_task_switch(struct task_struct *prev)
++	__releases(rq->lock)
++{
++	struct rq *rq = this_rq();
++	struct mm_struct *mm = rq->prev_mm;
++	unsigned int prev_state;
++
++	/*
++	 * The previous task will have left us with a preempt_count of 2
++	 * because it left us after:
++	 *
++	 *	schedule()
++	 *	  preempt_disable();			// 1
++	 *	  __schedule()
++	 *	    raw_spin_lock_irq(&rq->lock)	// 2
++	 *
++	 * Also, see FORK_PREEMPT_COUNT.
++	 */
++	if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
++		      "corrupted preempt_count: %s/%d/0x%x\n",
++		      current->comm, current->pid, preempt_count()))
++		preempt_count_set(FORK_PREEMPT_COUNT);
++
++	rq->prev_mm = NULL;
++
++	/*
++	 * A task struct has one reference for the use as "current".
++	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
++	 * schedule one last time. The schedule call will never return, and
++	 * the scheduled task must drop that reference.
++	 *
++	 * We must observe prev->state before clearing prev->on_cpu (in
++	 * finish_task), otherwise a concurrent wakeup can get prev
++	 * running on another CPU and we could rave with its RUNNING -> DEAD
++	 * transition, resulting in a double drop.
++	 */
++	prev_state = READ_ONCE(prev->__state);
++	vtime_task_switch(prev);
++	perf_event_task_sched_in(prev, current);
++	finish_task(prev);
++	tick_nohz_task_switch();
++	finish_lock_switch(rq);
++	finish_arch_post_lock_switch();
++	kcov_finish_switch(current);
++	/*
++	 * kmap_local_sched_out() is invoked with rq::lock held and
++	 * interrupts disabled. There is no requirement for that, but the
++	 * sched out code does not have an interrupt enabled section.
++	 * Restoring the maps on sched in does not require interrupts being
++	 * disabled either.
++	 */
++	kmap_local_sched_in();
++
++	fire_sched_in_preempt_notifiers(current);
++	/*
++	 * When switching through a kernel thread, the loop in
++	 * membarrier_{private,global}_expedited() may have observed that
++	 * kernel thread and not issued an IPI. It is therefore possible to
++	 * schedule between user->kernel->user threads without passing though
++	 * switch_mm(). Membarrier requires a barrier after storing to
++	 * rq->curr, before returning to userspace, so provide them here:
++	 *
++	 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
++	 *   provided by mmdrop(),
++	 * - a sync_core for SYNC_CORE.
++	 */
++	if (mm) {
++		membarrier_mm_sync_core_before_usermode(mm);
++		mmdrop_sched(mm);
++	}
++	if (unlikely(prev_state == TASK_DEAD)) {
++		/* Task is done with its stack. */
++		put_task_stack(prev);
++
++		put_task_struct_rcu_user(prev);
++	}
++
++	return rq;
++}
++
++/**
++ * schedule_tail - first thing a freshly forked thread must call.
++ * @prev: the thread we just switched away from.
++ */
++asmlinkage __visible void schedule_tail(struct task_struct *prev)
++	__releases(rq->lock)
++{
++	/*
++	 * New tasks start with FORK_PREEMPT_COUNT, see there and
++	 * finish_task_switch() for details.
++	 *
++	 * finish_task_switch() will drop rq->lock() and lower preempt_count
++	 * and the preempt_enable() will end up enabling preemption (on
++	 * PREEMPT_COUNT kernels).
++	 */
++
++	finish_task_switch(prev);
++	preempt_enable();
++
++	if (current->set_child_tid)
++		put_user(task_pid_vnr(current), current->set_child_tid);
++
++	calculate_sigpending();
++}
++
++/*
++ * context_switch - switch to the new MM and the new thread's register state.
++ */
++static __always_inline struct rq *
++context_switch(struct rq *rq, struct task_struct *prev,
++	       struct task_struct *next)
++{
++	prepare_task_switch(rq, prev, next);
++
++	/*
++	 * For paravirt, this is coupled with an exit in switch_to to
++	 * combine the page table reload and the switch backend into
++	 * one hypercall.
++	 */
++	arch_start_context_switch(prev);
++
++	/*
++	 * kernel -> kernel   lazy + transfer active
++	 *   user -> kernel   lazy + mmgrab() active
++	 *
++	 * kernel ->   user   switch + mmdrop() active
++	 *   user ->   user   switch
++	 *
++	 * switch_mm_cid() needs to be updated if the barriers provided
++	 * by context_switch() are modified.
++	 */
++	if (!next->mm) {                                // to kernel
++		enter_lazy_tlb(prev->active_mm, next);
++
++		next->active_mm = prev->active_mm;
++		if (prev->mm)                           // from user
++			mmgrab(prev->active_mm);
++		else
++			prev->active_mm = NULL;
++	} else {                                        // to user
++		membarrier_switch_mm(rq, prev->active_mm, next->mm);
++		/*
++		 * sys_membarrier() requires an smp_mb() between setting
++		 * rq->curr / membarrier_switch_mm() and returning to userspace.
++		 *
++		 * The below provides this either through switch_mm(), or in
++		 * case 'prev->active_mm == next->mm' through
++		 * finish_task_switch()'s mmdrop().
++		 */
++		switch_mm_irqs_off(prev->active_mm, next->mm, next);
++		lru_gen_use_mm(next->mm);
++
++		if (!prev->mm) {                        // from kernel
++			/* will mmdrop() in finish_task_switch(). */
++			rq->prev_mm = prev->active_mm;
++			prev->active_mm = NULL;
++		}
++	}
++
++	/* switch_mm_cid() requires the memory barriers above. */
++	switch_mm_cid(rq, prev, next);
++
++	prepare_lock_switch(rq, next);
++
++	/* Here we just switch the register state and the stack. */
++	switch_to(prev, next, prev);
++	barrier();
++
++	return finish_task_switch(prev);
++}
++
++/*
++ * nr_running, nr_uninterruptible and nr_context_switches:
++ *
++ * externally visible scheduler statistics: current number of runnable
++ * threads, total number of context switches performed since bootup.
++ */
++unsigned int nr_running(void)
++{
++	unsigned int i, sum = 0;
++
++	for_each_online_cpu(i)
++		sum += cpu_rq(i)->nr_running;
++
++	return sum;
++}
++
++/*
++ * Check if only the current task is running on the CPU.
++ *
++ * Caution: this function does not check that the caller has disabled
++ * preemption, thus the result might have a time-of-check-to-time-of-use
++ * race.  The caller is responsible to use it correctly, for example:
++ *
++ * - from a non-preemptible section (of course)
++ *
++ * - from a thread that is bound to a single CPU
++ *
++ * - in a loop with very short iterations (e.g. a polling loop)
++ */
++bool single_task_running(void)
++{
++	return raw_rq()->nr_running == 1;
++}
++EXPORT_SYMBOL(single_task_running);
++
++unsigned long long nr_context_switches_cpu(int cpu)
++{
++	return cpu_rq(cpu)->nr_switches;
++}
++
++unsigned long long nr_context_switches(void)
++{
++	int i;
++	unsigned long long sum = 0;
++
++	for_each_possible_cpu(i)
++		sum += cpu_rq(i)->nr_switches;
++
++	return sum;
++}
++
++/*
++ * Consumers of these two interfaces, like for example the cpuidle menu
++ * governor, are using nonsensical data. Preferring shallow idle state selection
++ * for a CPU that has IO-wait which might not even end up running the task when
++ * it does become runnable.
++ */
++
++unsigned int nr_iowait_cpu(int cpu)
++{
++	return atomic_read(&cpu_rq(cpu)->nr_iowait);
++}
++
++/*
++ * IO-wait accounting, and how it's mostly bollocks (on SMP).
++ *
++ * The idea behind IO-wait account is to account the idle time that we could
++ * have spend running if it were not for IO. That is, if we were to improve the
++ * storage performance, we'd have a proportional reduction in IO-wait time.
++ *
++ * This all works nicely on UP, where, when a task blocks on IO, we account
++ * idle time as IO-wait, because if the storage were faster, it could've been
++ * running and we'd not be idle.
++ *
++ * This has been extended to SMP, by doing the same for each CPU. This however
++ * is broken.
++ *
++ * Imagine for instance the case where two tasks block on one CPU, only the one
++ * CPU will have IO-wait accounted, while the other has regular idle. Even
++ * though, if the storage were faster, both could've ran at the same time,
++ * utilising both CPUs.
++ *
++ * This means, that when looking globally, the current IO-wait accounting on
++ * SMP is a lower bound, by reason of under accounting.
++ *
++ * Worse, since the numbers are provided per CPU, they are sometimes
++ * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
++ * associated with any one particular CPU, it can wake to another CPU than it
++ * blocked on. This means the per CPU IO-wait number is meaningless.
++ *
++ * Task CPU affinities can make all that even more 'interesting'.
++ */
++
++unsigned int nr_iowait(void)
++{
++	unsigned int i, sum = 0;
++
++	for_each_possible_cpu(i)
++		sum += nr_iowait_cpu(i);
++
++	return sum;
++}
++
++#ifdef CONFIG_SMP
++
++/*
++ * sched_exec - execve() is a valuable balancing opportunity, because at
++ * this point the task has the smallest effective memory and cache
++ * footprint.
++ */
++void sched_exec(void)
++{
++}
++
++#endif
++
++DEFINE_PER_CPU(struct kernel_stat, kstat);
++DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
++
++EXPORT_PER_CPU_SYMBOL(kstat);
++EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
++
++static inline void update_curr(struct rq *rq, struct task_struct *p)
++{
++	s64 ns = rq->clock_task - p->last_ran;
++
++	p->sched_time += ns;
++	cgroup_account_cputime(p, ns);
++	account_group_exec_runtime(p, ns);
++
++	p->time_slice -= ns;
++	p->last_ran = rq->clock_task;
++}
++
++/*
++ * Return accounted runtime for the task.
++ * Return separately the current's pending runtime that have not been
++ * accounted yet.
++ */
++unsigned long long task_sched_runtime(struct task_struct *p)
++{
++	unsigned long flags;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++	u64 ns;
++
++#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
++	/*
++	 * 64-bit doesn't need locks to atomically read a 64-bit value.
++	 * So we have a optimization chance when the task's delta_exec is 0.
++	 * Reading ->on_cpu is racy, but this is ok.
++	 *
++	 * If we race with it leaving CPU, we'll take a lock. So we're correct.
++	 * If we race with it entering CPU, unaccounted time is 0. This is
++	 * indistinguishable from the read occurring a few cycles earlier.
++	 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
++	 * been accounted, so we're correct here as well.
++	 */
++	if (!p->on_cpu || !task_on_rq_queued(p))
++		return tsk_seruntime(p);
++#endif
++
++	rq = task_access_lock_irqsave(p, &lock, &flags);
++	/*
++	 * Must be ->curr _and_ ->on_rq.  If dequeued, we would
++	 * project cycles that may never be accounted to this
++	 * thread, breaking clock_gettime().
++	 */
++	if (p == rq->curr && task_on_rq_queued(p)) {
++		update_rq_clock(rq);
++		update_curr(rq, p);
++	}
++	ns = tsk_seruntime(p);
++	task_access_unlock_irqrestore(p, lock, &flags);
++
++	return ns;
++}
++
++/* This manages tasks that have run out of timeslice during a scheduler_tick */
++static inline void scheduler_task_tick(struct rq *rq)
++{
++	struct task_struct *p = rq->curr;
++
++	if (is_idle_task(p))
++		return;
++
++	update_curr(rq, p);
++	cpufreq_update_util(rq, 0);
++
++	/*
++	 * Tasks have less than RESCHED_NS of time slice left they will be
++	 * rescheduled.
++	 */
++	if (p->time_slice >= RESCHED_NS)
++		return;
++	set_tsk_need_resched(p);
++	set_preempt_need_resched();
++}
++
++#ifdef CONFIG_SCHED_DEBUG
++static u64 cpu_resched_latency(struct rq *rq)
++{
++	int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
++	u64 resched_latency, now = rq_clock(rq);
++	static bool warned_once;
++
++	if (sysctl_resched_latency_warn_once && warned_once)
++		return 0;
++
++	if (!need_resched() || !latency_warn_ms)
++		return 0;
++
++	if (system_state == SYSTEM_BOOTING)
++		return 0;
++
++	if (!rq->last_seen_need_resched_ns) {
++		rq->last_seen_need_resched_ns = now;
++		rq->ticks_without_resched = 0;
++		return 0;
++	}
++
++	rq->ticks_without_resched++;
++	resched_latency = now - rq->last_seen_need_resched_ns;
++	if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
++		return 0;
++
++	warned_once = true;
++
++	return resched_latency;
++}
++
++static int __init setup_resched_latency_warn_ms(char *str)
++{
++	long val;
++
++	if ((kstrtol(str, 0, &val))) {
++		pr_warn("Unable to set resched_latency_warn_ms\n");
++		return 1;
++	}
++
++	sysctl_resched_latency_warn_ms = val;
++	return 1;
++}
++__setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
++#else
++static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
++#endif /* CONFIG_SCHED_DEBUG */
++
++/*
++ * This function gets called by the timer code, with HZ frequency.
++ * We call it with interrupts disabled.
++ */
++void scheduler_tick(void)
++{
++	int cpu __maybe_unused = smp_processor_id();
++	struct rq *rq = cpu_rq(cpu);
++	struct task_struct *curr = rq->curr;
++	u64 resched_latency;
++
++	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
++		arch_scale_freq_tick();
++
++	sched_clock_tick();
++
++	raw_spin_lock(&rq->lock);
++	update_rq_clock(rq);
++
++	scheduler_task_tick(rq);
++	if (sched_feat(LATENCY_WARN))
++		resched_latency = cpu_resched_latency(rq);
++	calc_global_load_tick(rq);
++
++	task_tick_mm_cid(rq, rq->curr);
++
++	raw_spin_unlock(&rq->lock);
++
++	if (sched_feat(LATENCY_WARN) && resched_latency)
++		resched_latency_warn(cpu, resched_latency);
++
++	perf_event_task_tick();
++
++	if (curr->flags & PF_WQ_WORKER)
++		wq_worker_tick(curr);
++}
++
++static int active_balance_cpu_stop(void *data)
++{
++	struct balance_arg *arg = data;
++	struct task_struct *p = arg->task;
++	struct rq *rq = this_rq();
++	unsigned long flags;
++	cpumask_t tmp;
++
++	local_irq_save(flags);
++
++	raw_spin_lock(&p->pi_lock);
++	raw_spin_lock(&rq->lock);
++
++	arg->active = 0;
++
++	if (task_on_rq_queued(p) && task_rq(p) == rq &&
++	    cpumask_and(&tmp, p->cpus_ptr, arg->cpumask) &&
++	    !is_migration_disabled(p)) {
++		int dcpu = __best_mask_cpu(&tmp, per_cpu(sched_cpu_llc_mask, cpu_of(rq)));
++		rq = move_queued_task(rq, p, dcpu);
++	}
++
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++	return 0;
++}
++
++/* trigger_active_balance - for @cpu/@rq */
++static inline int
++trigger_active_balance(struct rq *src_rq, struct rq *rq, struct balance_arg *arg)
++{
++	unsigned long flags;
++	struct task_struct *p;
++	int res;
++
++	if (!raw_spin_trylock_irqsave(&rq->lock, flags))
++		return 0;
++
++	res = (1 == rq->nr_running) &&					\
++	      !is_migration_disabled((p = sched_rq_first_task(rq))) &&	\
++	      cpumask_intersects(p->cpus_ptr, arg->cpumask) &&		\
++	      !arg->active;
++	if (res) {
++		arg->task = p;
++
++		arg->active = 1;
++	}
++
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++	if (res) {
++		preempt_disable();
++		raw_spin_unlock(&src_rq->lock);
++
++		stop_one_cpu_nowait(cpu_of(rq), active_balance_cpu_stop,
++				    arg, &rq->active_balance_work);
++
++		preempt_enable();
++		raw_spin_lock(&src_rq->lock);
++	}
++
++	return res;
++}
++
++#ifdef CONFIG_SCHED_SMT
++/*
++ * sg_balance - slibing group balance check for run queue @rq
++ */
++static inline void sg_balance(struct rq *rq)
++{
++	cpumask_t chk;
++
++	if (cpumask_andnot(&chk, cpu_active_mask, sched_idle_mask) &&
++	    cpumask_andnot(&chk, &chk, &sched_rq_pending_mask)) {
++		int i, cpu = cpu_of(rq);
++
++		for_each_cpu_wrap(i, &chk, cpu) {
++			if (cpumask_subset(cpu_smt_mask(i), &chk)) {
++				struct rq *target_rq = cpu_rq(i);
++				if (trigger_active_balance(rq, target_rq, &target_rq->sg_balance_arg))
++					return;
++			}
++		}
++	}
++}
++
++static DEFINE_PER_CPU(struct balance_callback, sg_balance_head) = {
++	.func = sg_balance,
++};
++#endif /* CONFIG_SCHED_SMT */
++
++#ifdef CONFIG_NO_HZ_FULL
++
++struct tick_work {
++	int			cpu;
++	atomic_t		state;
++	struct delayed_work	work;
++};
++/* Values for ->state, see diagram below. */
++#define TICK_SCHED_REMOTE_OFFLINE	0
++#define TICK_SCHED_REMOTE_OFFLINING	1
++#define TICK_SCHED_REMOTE_RUNNING	2
++
++/*
++ * State diagram for ->state:
++ *
++ *
++ *          TICK_SCHED_REMOTE_OFFLINE
++ *                    |   ^
++ *                    |   |
++ *                    |   | sched_tick_remote()
++ *                    |   |
++ *                    |   |
++ *                    +--TICK_SCHED_REMOTE_OFFLINING
++ *                    |   ^
++ *                    |   |
++ * sched_tick_start() |   | sched_tick_stop()
++ *                    |   |
++ *                    V   |
++ *          TICK_SCHED_REMOTE_RUNNING
++ *
++ *
++ * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
++ * and sched_tick_start() are happy to leave the state in RUNNING.
++ */
++
++static struct tick_work __percpu *tick_work_cpu;
++
++static void sched_tick_remote(struct work_struct *work)
++{
++	struct delayed_work *dwork = to_delayed_work(work);
++	struct tick_work *twork = container_of(dwork, struct tick_work, work);
++	int cpu = twork->cpu;
++	struct rq *rq = cpu_rq(cpu);
++	int os;
++
++	/*
++	 * Handle the tick only if it appears the remote CPU is running in full
++	 * dynticks mode. The check is racy by nature, but missing a tick or
++	 * having one too much is no big deal because the scheduler tick updates
++	 * statistics and checks timeslices in a time-independent way, regardless
++	 * of when exactly it is running.
++	 */
++	if (tick_nohz_tick_stopped_cpu(cpu)) {
++		guard(raw_spinlock_irqsave)(&rq->lock);
++		struct task_struct *curr = rq->curr;
++
++		if (cpu_online(cpu)) {
++			update_rq_clock(rq);
++
++			if (!is_idle_task(curr)) {
++				/*
++				 * Make sure the next tick runs within a
++				 * reasonable amount of time.
++				 */
++				u64 delta = rq_clock_task(rq) - curr->last_ran;
++				WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
++			}
++			scheduler_task_tick(rq);
++
++			calc_load_nohz_remote(rq);
++		}
++	}
++
++	/*
++	 * Run the remote tick once per second (1Hz). This arbitrary
++	 * frequency is large enough to avoid overload but short enough
++	 * to keep scheduler internal stats reasonably up to date.  But
++	 * first update state to reflect hotplug activity if required.
++	 */
++	os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
++	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
++	if (os == TICK_SCHED_REMOTE_RUNNING)
++		queue_delayed_work(system_unbound_wq, dwork, HZ);
++}
++
++static void sched_tick_start(int cpu)
++{
++	int os;
++	struct tick_work *twork;
++
++	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
++		return;
++
++	WARN_ON_ONCE(!tick_work_cpu);
++
++	twork = per_cpu_ptr(tick_work_cpu, cpu);
++	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
++	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
++	if (os == TICK_SCHED_REMOTE_OFFLINE) {
++		twork->cpu = cpu;
++		INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
++		queue_delayed_work(system_unbound_wq, &twork->work, HZ);
++	}
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++static void sched_tick_stop(int cpu)
++{
++	struct tick_work *twork;
++	int os;
++
++	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
++		return;
++
++	WARN_ON_ONCE(!tick_work_cpu);
++
++	twork = per_cpu_ptr(tick_work_cpu, cpu);
++	/* There cannot be competing actions, but don't rely on stop-machine. */
++	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
++	WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
++	/* Don't cancel, as this would mess up the state machine. */
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++int __init sched_tick_offload_init(void)
++{
++	tick_work_cpu = alloc_percpu(struct tick_work);
++	BUG_ON(!tick_work_cpu);
++	return 0;
++}
++
++#else /* !CONFIG_NO_HZ_FULL */
++static inline void sched_tick_start(int cpu) { }
++static inline void sched_tick_stop(int cpu) { }
++#endif
++
++#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
++				defined(CONFIG_PREEMPT_TRACER))
++/*
++ * If the value passed in is equal to the current preempt count
++ * then we just disabled preemption. Start timing the latency.
++ */
++static inline void preempt_latency_start(int val)
++{
++	if (preempt_count() == val) {
++		unsigned long ip = get_lock_parent_ip();
++#ifdef CONFIG_DEBUG_PREEMPT
++		current->preempt_disable_ip = ip;
++#endif
++		trace_preempt_off(CALLER_ADDR0, ip);
++	}
++}
++
++void preempt_count_add(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++	/*
++	 * Underflow?
++	 */
++	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
++		return;
++#endif
++	__preempt_count_add(val);
++#ifdef CONFIG_DEBUG_PREEMPT
++	/*
++	 * Spinlock count overflowing soon?
++	 */
++	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
++				PREEMPT_MASK - 10);
++#endif
++	preempt_latency_start(val);
++}
++EXPORT_SYMBOL(preempt_count_add);
++NOKPROBE_SYMBOL(preempt_count_add);
++
++/*
++ * If the value passed in equals to the current preempt count
++ * then we just enabled preemption. Stop timing the latency.
++ */
++static inline void preempt_latency_stop(int val)
++{
++	if (preempt_count() == val)
++		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
++}
++
++void preempt_count_sub(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++	/*
++	 * Underflow?
++	 */
++	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
++		return;
++	/*
++	 * Is the spinlock portion underflowing?
++	 */
++	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
++			!(preempt_count() & PREEMPT_MASK)))
++		return;
++#endif
++
++	preempt_latency_stop(val);
++	__preempt_count_sub(val);
++}
++EXPORT_SYMBOL(preempt_count_sub);
++NOKPROBE_SYMBOL(preempt_count_sub);
++
++#else
++static inline void preempt_latency_start(int val) { }
++static inline void preempt_latency_stop(int val) { }
++#endif
++
++static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++	return p->preempt_disable_ip;
++#else
++	return 0;
++#endif
++}
++
++/*
++ * Print scheduling while atomic bug:
++ */
++static noinline void __schedule_bug(struct task_struct *prev)
++{
++	/* Save this before calling printk(), since that will clobber it */
++	unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
++
++	if (oops_in_progress)
++		return;
++
++	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
++		prev->comm, prev->pid, preempt_count());
++
++	debug_show_held_locks(prev);
++	print_modules();
++	if (irqs_disabled())
++		print_irqtrace_events(prev);
++	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
++		pr_err("Preemption disabled at:");
++		print_ip_sym(KERN_ERR, preempt_disable_ip);
++	}
++	check_panic_on_warn("scheduling while atomic");
++
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++
++/*
++ * Various schedule()-time debugging checks and statistics:
++ */
++static inline void schedule_debug(struct task_struct *prev, bool preempt)
++{
++#ifdef CONFIG_SCHED_STACK_END_CHECK
++	if (task_stack_end_corrupted(prev))
++		panic("corrupted stack end detected inside scheduler\n");
++
++	if (task_scs_end_corrupted(prev))
++		panic("corrupted shadow stack detected inside scheduler\n");
++#endif
++
++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
++	if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
++		printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
++			prev->comm, prev->pid, prev->non_block_count);
++		dump_stack();
++		add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++	}
++#endif
++
++	if (unlikely(in_atomic_preempt_off())) {
++		__schedule_bug(prev);
++		preempt_count_set(PREEMPT_DISABLED);
++	}
++	rcu_sleep_check();
++	SCHED_WARN_ON(ct_state() == CONTEXT_USER);
++
++	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
++
++	schedstat_inc(this_rq()->sched_count);
++}
++
++#ifdef ALT_SCHED_DEBUG
++void alt_sched_debug(void)
++{
++	printk(KERN_INFO "sched: pending: 0x%04lx, idle: 0x%04lx, sg_idle: 0x%04lx\n",
++	       sched_rq_pending_mask.bits[0],
++	       sched_idle_mask->bits[0],
++	       sched_sg_idle_mask.bits[0]);
++}
++#else
++inline void alt_sched_debug(void) {}
++#endif
++
++#ifdef	CONFIG_SMP
++
++#ifdef CONFIG_PREEMPT_RT
++#define SCHED_NR_MIGRATE_BREAK 8
++#else
++#define SCHED_NR_MIGRATE_BREAK 32
++#endif
++
++const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
++
++/*
++ * Migrate pending tasks in @rq to @dest_cpu
++ */
++static inline int
++migrate_pending_tasks(struct rq *rq, struct rq *dest_rq, const int dest_cpu)
++{
++	struct task_struct *p, *skip = rq->curr;
++	int nr_migrated = 0;
++	int nr_tries = min(rq->nr_running / 2, sysctl_sched_nr_migrate);
++
++	/* WA to check rq->curr is still on rq */
++	if (!task_on_rq_queued(skip))
++		return 0;
++
++	while (skip != rq->idle && nr_tries &&
++	       (p = sched_rq_next_task(skip, rq)) != rq->idle) {
++		skip = sched_rq_next_task(p, rq);
++		if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) {
++			__SCHED_DEQUEUE_TASK(p, rq, 0, );
++			set_task_cpu(p, dest_cpu);
++			sched_task_sanity_check(p, dest_rq);
++			sched_mm_cid_migrate_to(dest_rq, p, cpu_of(rq));
++			__SCHED_ENQUEUE_TASK(p, dest_rq, 0, );
++			nr_migrated++;
++		}
++		nr_tries--;
++	}
++
++	return nr_migrated;
++}
++
++static inline int take_other_rq_tasks(struct rq *rq, int cpu)
++{
++	cpumask_t *topo_mask, *end_mask, chk;
++
++	if (unlikely(!rq->online))
++		return 0;
++
++	if (cpumask_empty(&sched_rq_pending_mask))
++		return 0;
++
++	topo_mask = per_cpu(sched_cpu_topo_masks, cpu);
++	end_mask = per_cpu(sched_cpu_topo_end_mask, cpu);
++	do {
++		int i;
++
++		if (!cpumask_and(&chk, &sched_rq_pending_mask, topo_mask))
++			continue;
++
++		for_each_cpu_wrap(i, &chk, cpu) {
++			int nr_migrated;
++			struct rq *src_rq;
++
++			src_rq = cpu_rq(i);
++			if (!do_raw_spin_trylock(&src_rq->lock))
++				continue;
++			spin_acquire(&src_rq->lock.dep_map,
++				     SINGLE_DEPTH_NESTING, 1, _RET_IP_);
++
++			if ((nr_migrated = migrate_pending_tasks(src_rq, rq, cpu))) {
++				src_rq->nr_running -= nr_migrated;
++				if (src_rq->nr_running < 2)
++					cpumask_clear_cpu(i, &sched_rq_pending_mask);
++
++				spin_release(&src_rq->lock.dep_map, _RET_IP_);
++				do_raw_spin_unlock(&src_rq->lock);
++
++				rq->nr_running += nr_migrated;
++				if (rq->nr_running > 1)
++					cpumask_set_cpu(cpu, &sched_rq_pending_mask);
++
++				update_sched_preempt_mask(rq);
++				cpufreq_update_util(rq, 0);
++
++				return 1;
++			}
++
++			spin_release(&src_rq->lock.dep_map, _RET_IP_);
++			do_raw_spin_unlock(&src_rq->lock);
++		}
++	} while (++topo_mask < end_mask);
++
++	return 0;
++}
++#endif
++
++static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
++{
++	p->time_slice = sysctl_sched_base_slice;
++
++	sched_task_renew(p, rq);
++
++	if (SCHED_FIFO != p->policy && task_on_rq_queued(p))
++		requeue_task(p, rq);
++}
++
++/*
++ * Timeslices below RESCHED_NS are considered as good as expired as there's no
++ * point rescheduling when there's so little time left.
++ */
++static inline void check_curr(struct task_struct *p, struct rq *rq)
++{
++	if (unlikely(rq->idle == p))
++		return;
++
++	update_curr(rq, p);
++
++	if (p->time_slice < RESCHED_NS)
++		time_slice_expired(p, rq);
++}
++
++static inline struct task_struct *
++choose_next_task(struct rq *rq, int cpu)
++{
++	struct task_struct *next = sched_rq_first_task(rq);
++
++	if (next == rq->idle) {
++#ifdef	CONFIG_SMP
++		if (!take_other_rq_tasks(rq, cpu)) {
++#endif
++
++#ifdef CONFIG_SCHED_SMT
++			if (static_key_count(&sched_smt_present.key) > 1 &&
++			    cpumask_test_cpu(cpu, sched_sg_idle_mask) &&
++			    rq->online)
++				__queue_balance_callback(rq, &per_cpu(sg_balance_head, cpu));
++#endif
++			schedstat_inc(rq->sched_goidle);
++			/*printk(KERN_INFO "sched: choose_next_task(%d) idle %px\n", cpu, next);*/
++			return next;
++#ifdef	CONFIG_SMP
++		}
++		next = sched_rq_first_task(rq);
++#endif
++	}
++#ifdef CONFIG_HIGH_RES_TIMERS
++	hrtick_start(rq, next->time_slice);
++#endif
++	/*printk(KERN_INFO "sched: choose_next_task(%d) next %px\n", cpu, next);*/
++	return next;
++}
++
++/*
++ * Constants for the sched_mode argument of __schedule().
++ *
++ * The mode argument allows RT enabled kernels to differentiate a
++ * preemption from blocking on an 'sleeping' spin/rwlock. Note that
++ * SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to
++ * optimize the AND operation out and just check for zero.
++ */
++#define SM_NONE			0x0
++#define SM_PREEMPT		0x1
++#define SM_RTLOCK_WAIT		0x2
++
++#ifndef CONFIG_PREEMPT_RT
++# define SM_MASK_PREEMPT	(~0U)
++#else
++# define SM_MASK_PREEMPT	SM_PREEMPT
++#endif
++
++/*
++ * schedule() is the main scheduler function.
++ *
++ * The main means of driving the scheduler and thus entering this function are:
++ *
++ *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
++ *
++ *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
++ *      paths. For example, see arch/x86/entry_64.S.
++ *
++ *      To drive preemption between tasks, the scheduler sets the flag in timer
++ *      interrupt handler scheduler_tick().
++ *
++ *   3. Wakeups don't really cause entry into schedule(). They add a
++ *      task to the run-queue and that's it.
++ *
++ *      Now, if the new task added to the run-queue preempts the current
++ *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
++ *      called on the nearest possible occasion:
++ *
++ *       - If the kernel is preemptible (CONFIG_PREEMPTION=y):
++ *
++ *         - in syscall or exception context, at the next outmost
++ *           preempt_enable(). (this might be as soon as the wake_up()'s
++ *           spin_unlock()!)
++ *
++ *         - in IRQ context, return from interrupt-handler to
++ *           preemptible context
++ *
++ *       - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
++ *         then at the next:
++ *
++ *          - cond_resched() call
++ *          - explicit schedule() call
++ *          - return from syscall or exception to user-space
++ *          - return from interrupt-handler to user-space
++ *
++ * WARNING: must be called with preemption disabled!
++ */
++static void __sched notrace __schedule(unsigned int sched_mode)
++{
++	struct task_struct *prev, *next;
++	unsigned long *switch_count;
++	unsigned long prev_state;
++	struct rq *rq;
++	int cpu;
++
++	cpu = smp_processor_id();
++	rq = cpu_rq(cpu);
++	prev = rq->curr;
++
++	schedule_debug(prev, !!sched_mode);
++
++	/* by passing sched_feat(HRTICK) checking which Alt schedule FW doesn't support */
++	hrtick_clear(rq);
++
++	local_irq_disable();
++	rcu_note_context_switch(!!sched_mode);
++
++	/*
++	 * Make sure that signal_pending_state()->signal_pending() below
++	 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
++	 * done by the caller to avoid the race with signal_wake_up():
++	 *
++	 * __set_current_state(@state)		signal_wake_up()
++	 * schedule()				  set_tsk_thread_flag(p, TIF_SIGPENDING)
++	 *					  wake_up_state(p, state)
++	 *   LOCK rq->lock			    LOCK p->pi_state
++	 *   smp_mb__after_spinlock()		    smp_mb__after_spinlock()
++	 *     if (signal_pending_state())	    if (p->state & @state)
++	 *
++	 * Also, the membarrier system call requires a full memory barrier
++	 * after coming from user-space, before storing to rq->curr; this
++	 * barrier matches a full barrier in the proximity of the membarrier
++	 * system call exit.
++	 */
++	raw_spin_lock(&rq->lock);
++	smp_mb__after_spinlock();
++
++	update_rq_clock(rq);
++
++	switch_count = &prev->nivcsw;
++	/*
++	 * We must load prev->state once (task_struct::state is volatile), such
++	 * that we form a control dependency vs deactivate_task() below.
++	 */
++	prev_state = READ_ONCE(prev->__state);
++	if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) {
++		if (signal_pending_state(prev_state, prev)) {
++			WRITE_ONCE(prev->__state, TASK_RUNNING);
++		} else {
++			prev->sched_contributes_to_load =
++				(prev_state & TASK_UNINTERRUPTIBLE) &&
++				!(prev_state & TASK_NOLOAD) &&
++				!(prev_state & TASK_FROZEN);
++
++			if (prev->sched_contributes_to_load)
++				rq->nr_uninterruptible++;
++
++			/*
++			 * __schedule()			ttwu()
++			 *   prev_state = prev->state;    if (p->on_rq && ...)
++			 *   if (prev_state)		    goto out;
++			 *     p->on_rq = 0;		  smp_acquire__after_ctrl_dep();
++			 *				  p->state = TASK_WAKING
++			 *
++			 * Where __schedule() and ttwu() have matching control dependencies.
++			 *
++			 * After this, schedule() must not care about p->state any more.
++			 */
++			sched_task_deactivate(prev, rq);
++			deactivate_task(prev, rq);
++
++			if (prev->in_iowait) {
++				atomic_inc(&rq->nr_iowait);
++				delayacct_blkio_start();
++			}
++		}
++		switch_count = &prev->nvcsw;
++	}
++
++	check_curr(prev, rq);
++
++	next = choose_next_task(rq, cpu);
++	clear_tsk_need_resched(prev);
++	clear_preempt_need_resched();
++#ifdef CONFIG_SCHED_DEBUG
++	rq->last_seen_need_resched_ns = 0;
++#endif
++
++	if (likely(prev != next)) {
++		next->last_ran = rq->clock_task;
++
++		/*printk(KERN_INFO "sched: %px -> %px\n", prev, next);*/
++		rq->nr_switches++;
++		/*
++		 * RCU users of rcu_dereference(rq->curr) may not see
++		 * changes to task_struct made by pick_next_task().
++		 */
++		RCU_INIT_POINTER(rq->curr, next);
++		/*
++		 * The membarrier system call requires each architecture
++		 * to have a full memory barrier after updating
++		 * rq->curr, before returning to user-space.
++		 *
++		 * Here are the schemes providing that barrier on the
++		 * various architectures:
++		 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC,
++		 *   RISC-V.  switch_mm() relies on membarrier_arch_switch_mm()
++		 *   on PowerPC and on RISC-V.
++		 * - finish_lock_switch() for weakly-ordered
++		 *   architectures where spin_unlock is a full barrier,
++		 * - switch_to() for arm64 (weakly-ordered, spin_unlock
++		 *   is a RELEASE barrier),
++		 *
++		 * The barrier matches a full barrier in the proximity of
++		 * the membarrier system call entry.
++		 *
++		 * On RISC-V, this barrier pairing is also needed for the
++		 * SYNC_CORE command when switching between processes, cf.
++		 * the inline comments in membarrier_arch_switch_mm().
++		 */
++		++*switch_count;
++
++		trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
++
++		/* Also unlocks the rq: */
++		rq = context_switch(rq, prev, next);
++
++		cpu = cpu_of(rq);
++	} else {
++		__balance_callbacks(rq);
++		raw_spin_unlock_irq(&rq->lock);
++	}
++}
++
++void __noreturn do_task_dead(void)
++{
++	/* Causes final put_task_struct in finish_task_switch(): */
++	set_special_state(TASK_DEAD);
++
++	/* Tell freezer to ignore us: */
++	current->flags |= PF_NOFREEZE;
++
++	__schedule(SM_NONE);
++	BUG();
++
++	/* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
++	for (;;)
++		cpu_relax();
++}
++
++static inline void sched_submit_work(struct task_struct *tsk)
++{
++	static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
++	unsigned int task_flags;
++
++	/*
++	 * Establish LD_WAIT_CONFIG context to ensure none of the code called
++	 * will use a blocking primitive -- which would lead to recursion.
++	 */
++	lock_map_acquire_try(&sched_map);
++
++	task_flags = tsk->flags;
++	/*
++	 * If a worker goes to sleep, notify and ask workqueue whether it
++	 * wants to wake up a task to maintain concurrency.
++	 */
++	if (task_flags & PF_WQ_WORKER)
++		wq_worker_sleeping(tsk);
++	else if (task_flags & PF_IO_WORKER)
++		io_wq_worker_sleeping(tsk);
++
++	/*
++	 * spinlock and rwlock must not flush block requests.  This will
++	 * deadlock if the callback attempts to acquire a lock which is
++	 * already acquired.
++	 */
++	SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT);
++
++	/*
++	 * If we are going to sleep and we have plugged IO queued,
++	 * make sure to submit it to avoid deadlocks.
++	 */
++	blk_flush_plug(tsk->plug, true);
++
++	lock_map_release(&sched_map);
++}
++
++static void sched_update_worker(struct task_struct *tsk)
++{
++	if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER | PF_BLOCK_TS)) {
++		if (tsk->flags & PF_BLOCK_TS)
++			blk_plug_invalidate_ts(tsk);
++		if (tsk->flags & PF_WQ_WORKER)
++			wq_worker_running(tsk);
++		else if (tsk->flags & PF_IO_WORKER)
++			io_wq_worker_running(tsk);
++	}
++}
++
++static __always_inline void __schedule_loop(unsigned int sched_mode)
++{
++	do {
++		preempt_disable();
++		__schedule(sched_mode);
++		sched_preempt_enable_no_resched();
++	} while (need_resched());
++}
++
++asmlinkage __visible void __sched schedule(void)
++{
++	struct task_struct *tsk = current;
++
++#ifdef CONFIG_RT_MUTEXES
++	lockdep_assert(!tsk->sched_rt_mutex);
++#endif
++
++	if (!task_is_running(tsk))
++		sched_submit_work(tsk);
++	__schedule_loop(SM_NONE);
++	sched_update_worker(tsk);
++}
++EXPORT_SYMBOL(schedule);
++
++/*
++ * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
++ * state (have scheduled out non-voluntarily) by making sure that all
++ * tasks have either left the run queue or have gone into user space.
++ * As idle tasks do not do either, they must not ever be preempted
++ * (schedule out non-voluntarily).
++ *
++ * schedule_idle() is similar to schedule_preempt_disable() except that it
++ * never enables preemption because it does not call sched_submit_work().
++ */
++void __sched schedule_idle(void)
++{
++	/*
++	 * As this skips calling sched_submit_work(), which the idle task does
++	 * regardless because that function is a nop when the task is in a
++	 * TASK_RUNNING state, make sure this isn't used someplace that the
++	 * current task can be in any other state. Note, idle is always in the
++	 * TASK_RUNNING state.
++	 */
++	WARN_ON_ONCE(current->__state);
++	do {
++		__schedule(SM_NONE);
++	} while (need_resched());
++}
++
++#if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
++asmlinkage __visible void __sched schedule_user(void)
++{
++	/*
++	 * If we come here after a random call to set_need_resched(),
++	 * or we have been woken up remotely but the IPI has not yet arrived,
++	 * we haven't yet exited the RCU idle mode. Do it here manually until
++	 * we find a better solution.
++	 *
++	 * NB: There are buggy callers of this function.  Ideally we
++	 * should warn if prev_state != CONTEXT_USER, but that will trigger
++	 * too frequently to make sense yet.
++	 */
++	enum ctx_state prev_state = exception_enter();
++	schedule();
++	exception_exit(prev_state);
++}
++#endif
++
++/**
++ * schedule_preempt_disabled - called with preemption disabled
++ *
++ * Returns with preemption disabled. Note: preempt_count must be 1
++ */
++void __sched schedule_preempt_disabled(void)
++{
++	sched_preempt_enable_no_resched();
++	schedule();
++	preempt_disable();
++}
++
++#ifdef CONFIG_PREEMPT_RT
++void __sched notrace schedule_rtlock(void)
++{
++	__schedule_loop(SM_RTLOCK_WAIT);
++}
++NOKPROBE_SYMBOL(schedule_rtlock);
++#endif
++
++static void __sched notrace preempt_schedule_common(void)
++{
++	do {
++		/*
++		 * Because the function tracer can trace preempt_count_sub()
++		 * and it also uses preempt_enable/disable_notrace(), if
++		 * NEED_RESCHED is set, the preempt_enable_notrace() called
++		 * by the function tracer will call this function again and
++		 * cause infinite recursion.
++		 *
++		 * Preemption must be disabled here before the function
++		 * tracer can trace. Break up preempt_disable() into two
++		 * calls. One to disable preemption without fear of being
++		 * traced. The other to still record the preemption latency,
++		 * which can also be traced by the function tracer.
++		 */
++		preempt_disable_notrace();
++		preempt_latency_start(1);
++		__schedule(SM_PREEMPT);
++		preempt_latency_stop(1);
++		preempt_enable_no_resched_notrace();
++
++		/*
++		 * Check again in case we missed a preemption opportunity
++		 * between schedule and now.
++		 */
++	} while (need_resched());
++}
++
++#ifdef CONFIG_PREEMPTION
++/*
++ * This is the entry point to schedule() from in-kernel preemption
++ * off of preempt_enable.
++ */
++asmlinkage __visible void __sched notrace preempt_schedule(void)
++{
++	/*
++	 * If there is a non-zero preempt_count or interrupts are disabled,
++	 * we do not want to preempt the current task. Just return..
++	 */
++	if (likely(!preemptible()))
++		return;
++
++	preempt_schedule_common();
++}
++NOKPROBE_SYMBOL(preempt_schedule);
++EXPORT_SYMBOL(preempt_schedule);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
++#ifndef preempt_schedule_dynamic_enabled
++#define preempt_schedule_dynamic_enabled	preempt_schedule
++#define preempt_schedule_dynamic_disabled	NULL
++#endif
++DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
++EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
++static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
++void __sched notrace dynamic_preempt_schedule(void)
++{
++	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
++		return;
++	preempt_schedule();
++}
++NOKPROBE_SYMBOL(dynamic_preempt_schedule);
++EXPORT_SYMBOL(dynamic_preempt_schedule);
++#endif
++#endif
++
++/**
++ * preempt_schedule_notrace - preempt_schedule called by tracing
++ *
++ * The tracing infrastructure uses preempt_enable_notrace to prevent
++ * recursion and tracing preempt enabling caused by the tracing
++ * infrastructure itself. But as tracing can happen in areas coming
++ * from userspace or just about to enter userspace, a preempt enable
++ * can occur before user_exit() is called. This will cause the scheduler
++ * to be called when the system is still in usermode.
++ *
++ * To prevent this, the preempt_enable_notrace will use this function
++ * instead of preempt_schedule() to exit user context if needed before
++ * calling the scheduler.
++ */
++asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
++{
++	enum ctx_state prev_ctx;
++
++	if (likely(!preemptible()))
++		return;
++
++	do {
++		/*
++		 * Because the function tracer can trace preempt_count_sub()
++		 * and it also uses preempt_enable/disable_notrace(), if
++		 * NEED_RESCHED is set, the preempt_enable_notrace() called
++		 * by the function tracer will call this function again and
++		 * cause infinite recursion.
++		 *
++		 * Preemption must be disabled here before the function
++		 * tracer can trace. Break up preempt_disable() into two
++		 * calls. One to disable preemption without fear of being
++		 * traced. The other to still record the preemption latency,
++		 * which can also be traced by the function tracer.
++		 */
++		preempt_disable_notrace();
++		preempt_latency_start(1);
++		/*
++		 * Needs preempt disabled in case user_exit() is traced
++		 * and the tracer calls preempt_enable_notrace() causing
++		 * an infinite recursion.
++		 */
++		prev_ctx = exception_enter();
++		__schedule(SM_PREEMPT);
++		exception_exit(prev_ctx);
++
++		preempt_latency_stop(1);
++		preempt_enable_no_resched_notrace();
++	} while (need_resched());
++}
++EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
++#ifndef preempt_schedule_notrace_dynamic_enabled
++#define preempt_schedule_notrace_dynamic_enabled	preempt_schedule_notrace
++#define preempt_schedule_notrace_dynamic_disabled	NULL
++#endif
++DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
++EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
++static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
++void __sched notrace dynamic_preempt_schedule_notrace(void)
++{
++	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
++		return;
++	preempt_schedule_notrace();
++}
++NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
++EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
++#endif
++#endif
++
++#endif /* CONFIG_PREEMPTION */
++
++/*
++ * This is the entry point to schedule() from kernel preemption
++ * off of irq context.
++ * Note, that this is called and return with irqs disabled. This will
++ * protect us against recursive calling from irq.
++ */
++asmlinkage __visible void __sched preempt_schedule_irq(void)
++{
++	enum ctx_state prev_state;
++
++	/* Catch callers which need to be fixed */
++	BUG_ON(preempt_count() || !irqs_disabled());
++
++	prev_state = exception_enter();
++
++	do {
++		preempt_disable();
++		local_irq_enable();
++		__schedule(SM_PREEMPT);
++		local_irq_disable();
++		sched_preempt_enable_no_resched();
++	} while (need_resched());
++
++	exception_exit(prev_state);
++}
++
++int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
++			  void *key)
++{
++	WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~(WF_SYNC|WF_CURRENT_CPU));
++	return try_to_wake_up(curr->private, mode, wake_flags);
++}
++EXPORT_SYMBOL(default_wake_function);
++
++static inline void check_task_changed(struct task_struct *p, struct rq *rq)
++{
++	/* Trigger resched if task sched_prio has been modified. */
++	if (task_on_rq_queued(p)) {
++		update_rq_clock(rq);
++		requeue_task(p, rq);
++		wakeup_preempt(rq);
++	}
++}
++
++static void __setscheduler_prio(struct task_struct *p, int prio)
++{
++	p->prio = prio;
++}
++
++#ifdef CONFIG_RT_MUTEXES
++
++/*
++ * Would be more useful with typeof()/auto_type but they don't mix with
++ * bit-fields. Since it's a local thing, use int. Keep the generic sounding
++ * name such that if someone were to implement this function we get to compare
++ * notes.
++ */
++#define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; })
++
++void rt_mutex_pre_schedule(void)
++{
++	lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1));
++	sched_submit_work(current);
++}
++
++void rt_mutex_schedule(void)
++{
++	lockdep_assert(current->sched_rt_mutex);
++	__schedule_loop(SM_NONE);
++}
++
++void rt_mutex_post_schedule(void)
++{
++	sched_update_worker(current);
++	lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0));
++}
++
++static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
++{
++	if (pi_task)
++		prio = min(prio, pi_task->prio);
++
++	return prio;
++}
++
++static inline int rt_effective_prio(struct task_struct *p, int prio)
++{
++	struct task_struct *pi_task = rt_mutex_get_top_task(p);
++
++	return __rt_effective_prio(pi_task, prio);
++}
++
++/*
++ * rt_mutex_setprio - set the current priority of a task
++ * @p: task to boost
++ * @pi_task: donor task
++ *
++ * This function changes the 'effective' priority of a task. It does
++ * not touch ->normal_prio like __setscheduler().
++ *
++ * Used by the rt_mutex code to implement priority inheritance
++ * logic. Call site only calls if the priority of the task changed.
++ */
++void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
++{
++	int prio;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	/* XXX used to be waiter->prio, not waiter->task->prio */
++	prio = __rt_effective_prio(pi_task, p->normal_prio);
++
++	/*
++	 * If nothing changed; bail early.
++	 */
++	if (p->pi_top_task == pi_task && prio == p->prio)
++		return;
++
++	rq = __task_access_lock(p, &lock);
++	/*
++	 * Set under pi_lock && rq->lock, such that the value can be used under
++	 * either lock.
++	 *
++	 * Note that there is loads of tricky to make this pointer cache work
++	 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
++	 * ensure a task is de-boosted (pi_task is set to NULL) before the
++	 * task is allowed to run again (and can exit). This ensures the pointer
++	 * points to a blocked task -- which guarantees the task is present.
++	 */
++	p->pi_top_task = pi_task;
++
++	/*
++	 * For FIFO/RR we only need to set prio, if that matches we're done.
++	 */
++	if (prio == p->prio)
++		goto out_unlock;
++
++	/*
++	 * Idle task boosting is a nono in general. There is one
++	 * exception, when PREEMPT_RT and NOHZ is active:
++	 *
++	 * The idle task calls get_next_timer_interrupt() and holds
++	 * the timer wheel base->lock on the CPU and another CPU wants
++	 * to access the timer (probably to cancel it). We can safely
++	 * ignore the boosting request, as the idle CPU runs this code
++	 * with interrupts disabled and will complete the lock
++	 * protected section without being interrupted. So there is no
++	 * real need to boost.
++	 */
++	if (unlikely(p == rq->idle)) {
++		WARN_ON(p != rq->curr);
++		WARN_ON(p->pi_blocked_on);
++		goto out_unlock;
++	}
++
++	trace_sched_pi_setprio(p, pi_task);
++
++	__setscheduler_prio(p, prio);
++
++	check_task_changed(p, rq);
++out_unlock:
++	/* Avoid rq from going away on us: */
++	preempt_disable();
++
++	if (task_on_rq_queued(p))
++		__balance_callbacks(rq);
++	__task_access_unlock(p, lock);
++
++	preempt_enable();
++}
++#else
++static inline int rt_effective_prio(struct task_struct *p, int prio)
++{
++	return prio;
++}
++#endif
++
++void set_user_nice(struct task_struct *p, long nice)
++{
++	unsigned long flags;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
++		return;
++	/*
++	 * We have to be careful, if called from sys_setpriority(),
++	 * the task might be in the middle of scheduling on another CPU.
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	rq = __task_access_lock(p, &lock);
++
++	p->static_prio = NICE_TO_PRIO(nice);
++	/*
++	 * The RT priorities are set via sched_setscheduler(), but we still
++	 * allow the 'normal' nice value to be set - but as expected
++	 * it won't have any effect on scheduling until the task is
++	 * not SCHED_NORMAL/SCHED_BATCH:
++	 */
++	if (task_has_rt_policy(p))
++		goto out_unlock;
++
++	p->prio = effective_prio(p);
++
++	check_task_changed(p, rq);
++out_unlock:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++EXPORT_SYMBOL(set_user_nice);
++
++/*
++ * is_nice_reduction - check if nice value is an actual reduction
++ *
++ * Similar to can_nice() but does not perform a capability check.
++ *
++ * @p: task
++ * @nice: nice value
++ */
++static bool is_nice_reduction(const struct task_struct *p, const int nice)
++{
++	/* Convert nice value [19,-20] to rlimit style value [1,40]: */
++	int nice_rlim = nice_to_rlimit(nice);
++
++	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE));
++}
++
++/*
++ * can_nice - check if a task can reduce its nice value
++ * @p: task
++ * @nice: nice value
++ */
++int can_nice(const struct task_struct *p, const int nice)
++{
++	return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE);
++}
++
++#ifdef __ARCH_WANT_SYS_NICE
++
++/*
++ * sys_nice - change the priority of the current process.
++ * @increment: priority increment
++ *
++ * sys_setpriority is a more generic, but much slower function that
++ * does similar things.
++ */
++SYSCALL_DEFINE1(nice, int, increment)
++{
++	long nice, retval;
++
++	/*
++	 * Setpriority might change our priority at the same moment.
++	 * We don't have to worry. Conceptually one call occurs first
++	 * and we have a single winner.
++	 */
++
++	increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
++	nice = task_nice(current) + increment;
++
++	nice = clamp_val(nice, MIN_NICE, MAX_NICE);
++	if (increment < 0 && !can_nice(current, nice))
++		return -EPERM;
++
++	retval = security_task_setnice(current, nice);
++	if (retval)
++		return retval;
++
++	set_user_nice(current, nice);
++	return 0;
++}
++
++#endif
++
++/**
++ * task_prio - return the priority value of a given task.
++ * @p: the task in question.
++ *
++ * Return: The priority value as seen by users in /proc.
++ *
++ * sched policy         return value   kernel prio    user prio/nice
++ *
++ * (BMQ)normal, batch, idle[0 ... 53]  [100 ... 139]          0/[-20 ... 19]/[-7 ... 7]
++ * (PDS)normal, batch, idle[0 ... 39]            100          0/[-20 ... 19]
++ * fifo, rr             [-1 ... -100]     [99 ... 0]  [0 ... 99]
++ */
++int task_prio(const struct task_struct *p)
++{
++	return (p->prio < MAX_RT_PRIO) ? p->prio - MAX_RT_PRIO :
++		task_sched_prio_normal(p, task_rq(p));
++}
++
++/**
++ * idle_cpu - is a given CPU idle currently?
++ * @cpu: the processor in question.
++ *
++ * Return: 1 if the CPU is currently idle. 0 otherwise.
++ */
++int idle_cpu(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (rq->curr != rq->idle)
++		return 0;
++
++	if (rq->nr_running)
++		return 0;
++
++#ifdef CONFIG_SMP
++	if (rq->ttwu_pending)
++		return 0;
++#endif
++
++	return 1;
++}
++
++/**
++ * idle_task - return the idle task for a given CPU.
++ * @cpu: the processor in question.
++ *
++ * Return: The idle task for the cpu @cpu.
++ */
++struct task_struct *idle_task(int cpu)
++{
++	return cpu_rq(cpu)->idle;
++}
++
++/**
++ * find_process_by_pid - find a process with a matching PID value.
++ * @pid: the pid in question.
++ *
++ * The task of @pid, if found. %NULL otherwise.
++ */
++static inline struct task_struct *find_process_by_pid(pid_t pid)
++{
++	return pid ? find_task_by_vpid(pid) : current;
++}
++
++static struct task_struct *find_get_task(pid_t pid)
++{
++	struct task_struct *p;
++	guard(rcu)();
++
++	p = find_process_by_pid(pid);
++	if (likely(p))
++		get_task_struct(p);
++
++	return p;
++}
++
++DEFINE_CLASS(find_get_task, struct task_struct *, if (_T) put_task_struct(_T),
++	     find_get_task(pid), pid_t pid)
++
++/*
++ * sched_setparam() passes in -1 for its policy, to let the functions
++ * it calls know not to change it.
++ */
++#define SETPARAM_POLICY -1
++
++static void __setscheduler_params(struct task_struct *p,
++		const struct sched_attr *attr)
++{
++	int policy = attr->sched_policy;
++
++	if (policy == SETPARAM_POLICY)
++		policy = p->policy;
++
++	p->policy = policy;
++
++	/*
++	 * allow normal nice value to be set, but will not have any
++	 * effect on scheduling until the task not SCHED_NORMAL/
++	 * SCHED_BATCH
++	 */
++	p->static_prio = NICE_TO_PRIO(attr->sched_nice);
++
++	/*
++	 * __sched_setscheduler() ensures attr->sched_priority == 0 when
++	 * !rt_policy. Always setting this ensures that things like
++	 * getparam()/getattr() don't report silly values for !rt tasks.
++	 */
++	p->rt_priority = attr->sched_priority;
++	p->normal_prio = normal_prio(p);
++}
++
++/*
++ * check the target process has a UID that matches the current process's
++ */
++static bool check_same_owner(struct task_struct *p)
++{
++	const struct cred *cred = current_cred(), *pcred;
++	guard(rcu)();
++
++	pcred = __task_cred(p);
++	return (uid_eq(cred->euid, pcred->euid) ||
++	        uid_eq(cred->euid, pcred->uid));
++}
++
++/*
++ * Allow unprivileged RT tasks to decrease priority.
++ * Only issue a capable test if needed and only once to avoid an audit
++ * event on permitted non-privileged operations:
++ */
++static int user_check_sched_setscheduler(struct task_struct *p,
++					 const struct sched_attr *attr,
++					 int policy, int reset_on_fork)
++{
++	if (rt_policy(policy)) {
++		unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
++
++		/* Can't set/change the rt policy: */
++		if (policy != p->policy && !rlim_rtprio)
++			goto req_priv;
++
++		/* Can't increase priority: */
++		if (attr->sched_priority > p->rt_priority &&
++		    attr->sched_priority > rlim_rtprio)
++			goto req_priv;
++	}
++
++	/* Can't change other user's priorities: */
++	if (!check_same_owner(p))
++		goto req_priv;
++
++	/* Normal users shall not reset the sched_reset_on_fork flag: */
++	if (p->sched_reset_on_fork && !reset_on_fork)
++		goto req_priv;
++
++	return 0;
++
++req_priv:
++	if (!capable(CAP_SYS_NICE))
++		return -EPERM;
++
++	return 0;
++}
++
++static int __sched_setscheduler(struct task_struct *p,
++				const struct sched_attr *attr,
++				bool user, bool pi)
++{
++	const struct sched_attr dl_squash_attr = {
++		.size		= sizeof(struct sched_attr),
++		.sched_policy	= SCHED_FIFO,
++		.sched_nice	= 0,
++		.sched_priority = 99,
++	};
++	int oldpolicy = -1, policy = attr->sched_policy;
++	int retval, newprio;
++	struct balance_callback *head;
++	unsigned long flags;
++	struct rq *rq;
++	int reset_on_fork;
++	raw_spinlock_t *lock;
++
++	/* The pi code expects interrupts enabled */
++	BUG_ON(pi && in_interrupt());
++
++	/*
++	 * Alt schedule FW supports SCHED_DEADLINE by squash it as prio 0 SCHED_FIFO
++	 */
++	if (unlikely(SCHED_DEADLINE == policy)) {
++		attr = &dl_squash_attr;
++		policy = attr->sched_policy;
++	}
++recheck:
++	/* Double check policy once rq lock held */
++	if (policy < 0) {
++		reset_on_fork = p->sched_reset_on_fork;
++		policy = oldpolicy = p->policy;
++	} else {
++		reset_on_fork = !!(attr->sched_flags & SCHED_RESET_ON_FORK);
++
++		if (policy > SCHED_IDLE)
++			return -EINVAL;
++	}
++
++	if (attr->sched_flags & ~(SCHED_FLAG_ALL))
++		return -EINVAL;
++
++	/*
++	 * Valid priorities for SCHED_FIFO and SCHED_RR are
++	 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL and
++	 * SCHED_BATCH and SCHED_IDLE is 0.
++	 */
++	if (attr->sched_priority < 0 ||
++	    (p->mm && attr->sched_priority > MAX_RT_PRIO - 1) ||
++	    (!p->mm && attr->sched_priority > MAX_RT_PRIO - 1))
++		return -EINVAL;
++	if ((SCHED_RR == policy || SCHED_FIFO == policy) !=
++	    (attr->sched_priority != 0))
++		return -EINVAL;
++
++	if (user) {
++		retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork);
++		if (retval)
++			return retval;
++
++		retval = security_task_setscheduler(p);
++		if (retval)
++			return retval;
++	}
++
++	/*
++	 * Make sure no PI-waiters arrive (or leave) while we are
++	 * changing the priority of the task:
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++
++	/*
++	 * To be able to change p->policy safely, task_access_lock()
++	 * must be called.
++	 * IF use task_access_lock() here:
++	 * For the task p which is not running, reading rq->stop is
++	 * racy but acceptable as ->stop doesn't change much.
++	 * An enhancemnet can be made to read rq->stop saftly.
++	 */
++	rq = __task_access_lock(p, &lock);
++
++	/*
++	 * Changing the policy of the stop threads its a very bad idea
++	 */
++	if (p == rq->stop) {
++		retval = -EINVAL;
++		goto unlock;
++	}
++
++	/*
++	 * If not changing anything there's no need to proceed further:
++	 */
++	if (unlikely(policy == p->policy)) {
++		if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
++			goto change;
++		if (!rt_policy(policy) &&
++		    NICE_TO_PRIO(attr->sched_nice) != p->static_prio)
++			goto change;
++
++		p->sched_reset_on_fork = reset_on_fork;
++		retval = 0;
++		goto unlock;
++	}
++change:
++
++	/* Re-check policy now with rq lock held */
++	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
++		policy = oldpolicy = -1;
++		__task_access_unlock(p, lock);
++		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++		goto recheck;
++	}
++
++	p->sched_reset_on_fork = reset_on_fork;
++
++	newprio = __normal_prio(policy, attr->sched_priority, NICE_TO_PRIO(attr->sched_nice));
++	if (pi) {
++		/*
++		 * Take priority boosted tasks into account. If the new
++		 * effective priority is unchanged, we just store the new
++		 * normal parameters and do not touch the scheduler class and
++		 * the runqueue. This will be done when the task deboost
++		 * itself.
++		 */
++		newprio = rt_effective_prio(p, newprio);
++	}
++
++	if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
++		__setscheduler_params(p, attr);
++		__setscheduler_prio(p, newprio);
++	}
++
++	check_task_changed(p, rq);
++
++	/* Avoid rq from going away on us: */
++	preempt_disable();
++	head = splice_balance_callbacks(rq);
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++	if (pi)
++		rt_mutex_adjust_pi(p);
++
++	/* Run balance callbacks after we've adjusted the PI chain: */
++	balance_callbacks(rq, head);
++	preempt_enable();
++
++	return 0;
++
++unlock:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++	return retval;
++}
++
++static int _sched_setscheduler(struct task_struct *p, int policy,
++			       const struct sched_param *param, bool check)
++{
++	struct sched_attr attr = {
++		.sched_policy   = policy,
++		.sched_priority = param->sched_priority,
++		.sched_nice     = PRIO_TO_NICE(p->static_prio),
++	};
++
++	/* Fixup the legacy SCHED_RESET_ON_FORK hack. */
++	if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
++		attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
++		policy &= ~SCHED_RESET_ON_FORK;
++		attr.sched_policy = policy;
++	}
++
++	return __sched_setscheduler(p, &attr, check, true);
++}
++
++/**
++ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
++ * @p: the task in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Use sched_set_fifo(), read its comment.
++ *
++ * Return: 0 on success. An error code otherwise.
++ *
++ * NOTE that the task may be already dead.
++ */
++int sched_setscheduler(struct task_struct *p, int policy,
++		       const struct sched_param *param)
++{
++	return _sched_setscheduler(p, policy, param, true);
++}
++
++int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
++{
++	return __sched_setscheduler(p, attr, true, true);
++}
++
++int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
++{
++	return __sched_setscheduler(p, attr, false, true);
++}
++EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
++
++/**
++ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
++ * @p: the task in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Just like sched_setscheduler, only don't bother checking if the
++ * current context has permission.  For example, this is needed in
++ * stop_machine(): we create temporary high priority worker threads,
++ * but our caller might not have that capability.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++int sched_setscheduler_nocheck(struct task_struct *p, int policy,
++			       const struct sched_param *param)
++{
++	return _sched_setscheduler(p, policy, param, false);
++}
++
++/*
++ * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
++ * incapable of resource management, which is the one thing an OS really should
++ * be doing.
++ *
++ * This is of course the reason it is limited to privileged users only.
++ *
++ * Worse still; it is fundamentally impossible to compose static priority
++ * workloads. You cannot take two correctly working static prio workloads
++ * and smash them together and still expect them to work.
++ *
++ * For this reason 'all' FIFO tasks the kernel creates are basically at:
++ *
++ *   MAX_RT_PRIO / 2
++ *
++ * The administrator _MUST_ configure the system, the kernel simply doesn't
++ * know enough information to make a sensible choice.
++ */
++void sched_set_fifo(struct task_struct *p)
++{
++	struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
++	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
++}
++EXPORT_SYMBOL_GPL(sched_set_fifo);
++
++/*
++ * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
++ */
++void sched_set_fifo_low(struct task_struct *p)
++{
++	struct sched_param sp = { .sched_priority = 1 };
++	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
++}
++EXPORT_SYMBOL_GPL(sched_set_fifo_low);
++
++void sched_set_normal(struct task_struct *p, int nice)
++{
++	struct sched_attr attr = {
++		.sched_policy = SCHED_NORMAL,
++		.sched_nice = nice,
++	};
++	WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
++}
++EXPORT_SYMBOL_GPL(sched_set_normal);
++
++static int
++do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
++{
++	struct sched_param lparam;
++
++	if (!param || pid < 0)
++		return -EINVAL;
++	if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
++		return -EFAULT;
++
++	CLASS(find_get_task, p)(pid);
++	if (!p)
++		return -ESRCH;
++
++	return sched_setscheduler(p, policy, &lparam);
++}
++
++/*
++ * Mimics kernel/events/core.c perf_copy_attr().
++ */
++static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
++{
++	u32 size;
++	int ret;
++
++	/* Zero the full structure, so that a short copy will be nice: */
++	memset(attr, 0, sizeof(*attr));
++
++	ret = get_user(size, &uattr->size);
++	if (ret)
++		return ret;
++
++	/* ABI compatibility quirk: */
++	if (!size)
++		size = SCHED_ATTR_SIZE_VER0;
++
++	if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
++		goto err_size;
++
++	ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
++	if (ret) {
++		if (ret == -E2BIG)
++			goto err_size;
++		return ret;
++	}
++
++	/*
++	 * XXX: Do we want to be lenient like existing syscalls; or do we want
++	 * to be strict and return an error on out-of-bounds values?
++	 */
++	attr->sched_nice = clamp(attr->sched_nice, -20, 19);
++
++	/* sched/core.c uses zero here but we already know ret is zero */
++	return 0;
++
++err_size:
++	put_user(sizeof(*attr), &uattr->size);
++	return -E2BIG;
++}
++
++/**
++ * sys_sched_setscheduler - set/change the scheduler policy and RT priority
++ * @pid: the pid in question.
++ * @policy: new policy.
++ *
++ * Return: 0 on success. An error code otherwise.
++ * @param: structure containing the new RT priority.
++ */
++SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
++{
++	if (policy < 0)
++		return -EINVAL;
++
++	return do_sched_setscheduler(pid, policy, param);
++}
++
++/**
++ * sys_sched_setparam - set/change the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the new RT priority.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
++{
++	return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
++}
++
++static void get_params(struct task_struct *p, struct sched_attr *attr)
++{
++	if (task_has_rt_policy(p))
++		attr->sched_priority = p->rt_priority;
++	else
++		attr->sched_nice = task_nice(p);
++}
++
++/**
++ * sys_sched_setattr - same as above, but with extended sched_attr
++ * @pid: the pid in question.
++ * @uattr: structure containing the extended parameters.
++ */
++SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
++			       unsigned int, flags)
++{
++	struct sched_attr attr;
++	int retval;
++
++	if (!uattr || pid < 0 || flags)
++		return -EINVAL;
++
++	retval = sched_copy_attr(uattr, &attr);
++	if (retval)
++		return retval;
++
++	if ((int)attr.sched_policy < 0)
++		return -EINVAL;
++
++	CLASS(find_get_task, p)(pid);
++	if (!p)
++		return -ESRCH;
++
++	if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS)
++		get_params(p, &attr);
++
++	return sched_setattr(p, &attr);
++}
++
++/**
++ * sys_sched_getscheduler - get the policy (scheduling class) of a thread
++ * @pid: the pid in question.
++ *
++ * Return: On success, the policy of the thread. Otherwise, a negative error
++ * code.
++ */
++SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
++{
++	struct task_struct *p;
++	int retval = -EINVAL;
++
++	if (pid < 0)
++		return -ESRCH;
++
++	guard(rcu)();
++	p = find_process_by_pid(pid);
++	if (!p)
++		return -ESRCH;
++
++	retval = security_task_getscheduler(p);
++	if (!retval)
++		retval = p->policy;
++
++	return retval;
++}
++
++/**
++ * sys_sched_getscheduler - get the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the RT priority.
++ *
++ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
++ * code.
++ */
++SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
++{
++	struct sched_param lp = { .sched_priority = 0 };
++	struct task_struct *p;
++
++	if (!param || pid < 0)
++		return -EINVAL;
++
++	scoped_guard (rcu) {
++		int retval;
++
++		p = find_process_by_pid(pid);
++		if (!p)
++			return -EINVAL;
++
++		retval = security_task_getscheduler(p);
++		if (retval)
++			return retval;
++
++		if (task_has_rt_policy(p))
++			lp.sched_priority = p->rt_priority;
++	}
++
++	/*
++	 * This one might sleep, we cannot do it with a spinlock held ...
++	 */
++	return copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
++}
++
++/*
++ * Copy the kernel size attribute structure (which might be larger
++ * than what user-space knows about) to user-space.
++ *
++ * Note that all cases are valid: user-space buffer can be larger or
++ * smaller than the kernel-space buffer. The usual case is that both
++ * have the same size.
++ */
++static int
++sched_attr_copy_to_user(struct sched_attr __user *uattr,
++			struct sched_attr *kattr,
++			unsigned int usize)
++{
++	unsigned int ksize = sizeof(*kattr);
++
++	if (!access_ok(uattr, usize))
++		return -EFAULT;
++
++	/*
++	 * sched_getattr() ABI forwards and backwards compatibility:
++	 *
++	 * If usize == ksize then we just copy everything to user-space and all is good.
++	 *
++	 * If usize < ksize then we only copy as much as user-space has space for,
++	 * this keeps ABI compatibility as well. We skip the rest.
++	 *
++	 * If usize > ksize then user-space is using a newer version of the ABI,
++	 * which part the kernel doesn't know about. Just ignore it - tooling can
++	 * detect the kernel's knowledge of attributes from the attr->size value
++	 * which is set to ksize in this case.
++	 */
++	kattr->size = min(usize, ksize);
++
++	if (copy_to_user(uattr, kattr, kattr->size))
++		return -EFAULT;
++
++	return 0;
++}
++
++/**
++ * sys_sched_getattr - similar to sched_getparam, but with sched_attr
++ * @pid: the pid in question.
++ * @uattr: structure containing the extended parameters.
++ * @usize: sizeof(attr) for fwd/bwd comp.
++ * @flags: for future extension.
++ */
++SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
++		unsigned int, usize, unsigned int, flags)
++{
++	struct sched_attr kattr = { };
++	struct task_struct *p;
++	int retval;
++
++	if (!uattr || pid < 0 || usize > PAGE_SIZE ||
++	    usize < SCHED_ATTR_SIZE_VER0 || flags)
++		return -EINVAL;
++
++	scoped_guard (rcu) {
++		p = find_process_by_pid(pid);
++		if (!p)
++			return -ESRCH;
++
++		retval = security_task_getscheduler(p);
++		if (retval)
++			return retval;
++
++		kattr.sched_policy = p->policy;
++		if (p->sched_reset_on_fork)
++			kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
++		get_params(p, &kattr);
++		kattr.sched_flags &= SCHED_FLAG_ALL;
++
++#ifdef CONFIG_UCLAMP_TASK
++		kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
++		kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
++#endif
++	}
++
++	return sched_attr_copy_to_user(uattr, &kattr, usize);
++}
++
++#ifdef CONFIG_SMP
++int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
++{
++	return 0;
++}
++#endif
++
++static int
++__sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
++{
++	int retval;
++	cpumask_var_t cpus_allowed, new_mask;
++
++	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL))
++		return -ENOMEM;
++
++	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
++		retval = -ENOMEM;
++		goto out_free_cpus_allowed;
++	}
++
++	cpuset_cpus_allowed(p, cpus_allowed);
++	cpumask_and(new_mask, ctx->new_mask, cpus_allowed);
++
++	ctx->new_mask = new_mask;
++	ctx->flags |= SCA_CHECK;
++
++	retval = __set_cpus_allowed_ptr(p, ctx);
++	if (retval)
++		goto out_free_new_mask;
++
++	cpuset_cpus_allowed(p, cpus_allowed);
++	if (!cpumask_subset(new_mask, cpus_allowed)) {
++		/*
++		 * We must have raced with a concurrent cpuset
++		 * update. Just reset the cpus_allowed to the
++		 * cpuset's cpus_allowed
++		 */
++		cpumask_copy(new_mask, cpus_allowed);
++
++		/*
++		 * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr()
++		 * will restore the previous user_cpus_ptr value.
++		 *
++		 * In the unlikely event a previous user_cpus_ptr exists,
++		 * we need to further restrict the mask to what is allowed
++		 * by that old user_cpus_ptr.
++		 */
++		if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) {
++			bool empty = !cpumask_and(new_mask, new_mask,
++						  ctx->user_mask);
++
++			if (WARN_ON_ONCE(empty))
++				cpumask_copy(new_mask, cpus_allowed);
++		}
++		__set_cpus_allowed_ptr(p, ctx);
++		retval = -EINVAL;
++	}
++
++out_free_new_mask:
++	free_cpumask_var(new_mask);
++out_free_cpus_allowed:
++	free_cpumask_var(cpus_allowed);
++	return retval;
++}
++
++long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
++{
++	struct affinity_context ac;
++	struct cpumask *user_mask;
++	int retval;
++
++	CLASS(find_get_task, p)(pid);
++	if (!p)
++		return -ESRCH;
++
++	if (p->flags & PF_NO_SETAFFINITY)
++		return -EINVAL;
++
++	if (!check_same_owner(p)) {
++		guard(rcu)();
++		if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE))
++			return -EPERM;
++	}
++
++	retval = security_task_setscheduler(p);
++	if (retval)
++		return retval;
++
++	/*
++	 * With non-SMP configs, user_cpus_ptr/user_mask isn't used and
++	 * alloc_user_cpus_ptr() returns NULL.
++	 */
++	user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE);
++	if (user_mask) {
++		cpumask_copy(user_mask, in_mask);
++	} else if (IS_ENABLED(CONFIG_SMP)) {
++		return -ENOMEM;
++	}
++
++	ac = (struct affinity_context){
++		.new_mask  = in_mask,
++		.user_mask = user_mask,
++		.flags     = SCA_USER,
++	};
++
++	retval = __sched_setaffinity(p, &ac);
++	kfree(ac.user_mask);
++
++	return retval;
++}
++
++static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
++			     struct cpumask *new_mask)
++{
++	if (len < cpumask_size())
++		cpumask_clear(new_mask);
++	else if (len > cpumask_size())
++		len = cpumask_size();
++
++	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
++}
++
++/**
++ * sys_sched_setaffinity - set the CPU affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to the new CPU mask
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
++		unsigned long __user *, user_mask_ptr)
++{
++	cpumask_var_t new_mask;
++	int retval;
++
++	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
++		return -ENOMEM;
++
++	retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
++	if (retval == 0)
++		retval = sched_setaffinity(pid, new_mask);
++	free_cpumask_var(new_mask);
++	return retval;
++}
++
++long sched_getaffinity(pid_t pid, cpumask_t *mask)
++{
++	struct task_struct *p;
++	int retval;
++
++	guard(rcu)();
++	p = find_process_by_pid(pid);
++	if (!p)
++		return -ESRCH;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		return retval;
++
++	guard(raw_spinlock_irqsave)(&p->pi_lock);
++	cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
++
++	return retval;
++}
++
++/**
++ * sys_sched_getaffinity - get the CPU affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to hold the current CPU mask
++ *
++ * Return: size of CPU mask copied to user_mask_ptr on success. An
++ * error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
++		unsigned long __user *, user_mask_ptr)
++{
++	int ret;
++	cpumask_var_t mask;
++
++	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
++		return -EINVAL;
++	if (len & (sizeof(unsigned long)-1))
++		return -EINVAL;
++
++	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
++		return -ENOMEM;
++
++	ret = sched_getaffinity(pid, mask);
++	if (ret == 0) {
++		unsigned int retlen = min(len, cpumask_size());
++
++		if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen))
++			ret = -EFAULT;
++		else
++			ret = retlen;
++	}
++	free_cpumask_var(mask);
++
++	return ret;
++}
++
++static void do_sched_yield(void)
++{
++	struct rq *rq;
++	struct rq_flags rf;
++	struct task_struct *p;
++
++	if (!sched_yield_type)
++		return;
++
++	rq = this_rq_lock_irq(&rf);
++
++	schedstat_inc(rq->yld_count);
++
++	p = current;
++	if (rt_task(p)) {
++		if (task_on_rq_queued(p))
++			requeue_task(p, rq);
++	} else if (rq->nr_running > 1) {
++		do_sched_yield_type_1(p, rq);
++		if (task_on_rq_queued(p))
++			requeue_task(p, rq);
++	}
++
++	preempt_disable();
++	raw_spin_unlock_irq(&rq->lock);
++	sched_preempt_enable_no_resched();
++
++	schedule();
++}
++
++/**
++ * sys_sched_yield - yield the current processor to other threads.
++ *
++ * This function yields the current CPU to other tasks. If there are no
++ * other threads running on this CPU then this function will return.
++ *
++ * Return: 0.
++ */
++SYSCALL_DEFINE0(sched_yield)
++{
++	do_sched_yield();
++	return 0;
++}
++
++#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
++int __sched __cond_resched(void)
++{
++	if (should_resched(0)) {
++		preempt_schedule_common();
++		return 1;
++	}
++	/*
++	 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
++	 * whether the current CPU is in an RCU read-side critical section,
++	 * so the tick can report quiescent states even for CPUs looping
++	 * in kernel context.  In contrast, in non-preemptible kernels,
++	 * RCU readers leave no in-memory hints, which means that CPU-bound
++	 * processes executing in kernel context might never report an
++	 * RCU quiescent state.  Therefore, the following code causes
++	 * cond_resched() to report a quiescent state, but only when RCU
++	 * is in urgent need of one.
++	 */
++#ifndef CONFIG_PREEMPT_RCU
++	rcu_all_qs();
++#endif
++	return 0;
++}
++EXPORT_SYMBOL(__cond_resched);
++#endif
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
++#define cond_resched_dynamic_enabled	__cond_resched
++#define cond_resched_dynamic_disabled	((void *)&__static_call_return0)
++DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
++EXPORT_STATIC_CALL_TRAMP(cond_resched);
++
++#define might_resched_dynamic_enabled	__cond_resched
++#define might_resched_dynamic_disabled	((void *)&__static_call_return0)
++DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
++EXPORT_STATIC_CALL_TRAMP(might_resched);
++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
++static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
++int __sched dynamic_cond_resched(void)
++{
++	klp_sched_try_switch();
++	if (!static_branch_unlikely(&sk_dynamic_cond_resched))
++		return 0;
++	return __cond_resched();
++}
++EXPORT_SYMBOL(dynamic_cond_resched);
++
++static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
++int __sched dynamic_might_resched(void)
++{
++	if (!static_branch_unlikely(&sk_dynamic_might_resched))
++		return 0;
++	return __cond_resched();
++}
++EXPORT_SYMBOL(dynamic_might_resched);
++#endif
++#endif
++
++/*
++ * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
++ * call schedule, and on return reacquire the lock.
++ *
++ * This works OK both with and without CONFIG_PREEMPTION.  We do strange low-level
++ * operations here to prevent schedule() from being called twice (once via
++ * spin_unlock(), once by hand).
++ */
++int __cond_resched_lock(spinlock_t *lock)
++{
++	int resched = should_resched(PREEMPT_LOCK_OFFSET);
++	int ret = 0;
++
++	lockdep_assert_held(lock);
++
++	if (spin_needbreak(lock) || resched) {
++		spin_unlock(lock);
++		if (!_cond_resched())
++			cpu_relax();
++		ret = 1;
++		spin_lock(lock);
++	}
++	return ret;
++}
++EXPORT_SYMBOL(__cond_resched_lock);
++
++int __cond_resched_rwlock_read(rwlock_t *lock)
++{
++	int resched = should_resched(PREEMPT_LOCK_OFFSET);
++	int ret = 0;
++
++	lockdep_assert_held_read(lock);
++
++	if (rwlock_needbreak(lock) || resched) {
++		read_unlock(lock);
++		if (!_cond_resched())
++			cpu_relax();
++		ret = 1;
++		read_lock(lock);
++	}
++	return ret;
++}
++EXPORT_SYMBOL(__cond_resched_rwlock_read);
++
++int __cond_resched_rwlock_write(rwlock_t *lock)
++{
++	int resched = should_resched(PREEMPT_LOCK_OFFSET);
++	int ret = 0;
++
++	lockdep_assert_held_write(lock);
++
++	if (rwlock_needbreak(lock) || resched) {
++		write_unlock(lock);
++		if (!_cond_resched())
++			cpu_relax();
++		ret = 1;
++		write_lock(lock);
++	}
++	return ret;
++}
++EXPORT_SYMBOL(__cond_resched_rwlock_write);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++
++#ifdef CONFIG_GENERIC_ENTRY
++#include <linux/entry-common.h>
++#endif
++
++/*
++ * SC:cond_resched
++ * SC:might_resched
++ * SC:preempt_schedule
++ * SC:preempt_schedule_notrace
++ * SC:irqentry_exit_cond_resched
++ *
++ *
++ * NONE:
++ *   cond_resched               <- __cond_resched
++ *   might_resched              <- RET0
++ *   preempt_schedule           <- NOP
++ *   preempt_schedule_notrace   <- NOP
++ *   irqentry_exit_cond_resched <- NOP
++ *
++ * VOLUNTARY:
++ *   cond_resched               <- __cond_resched
++ *   might_resched              <- __cond_resched
++ *   preempt_schedule           <- NOP
++ *   preempt_schedule_notrace   <- NOP
++ *   irqentry_exit_cond_resched <- NOP
++ *
++ * FULL:
++ *   cond_resched               <- RET0
++ *   might_resched              <- RET0
++ *   preempt_schedule           <- preempt_schedule
++ *   preempt_schedule_notrace   <- preempt_schedule_notrace
++ *   irqentry_exit_cond_resched <- irqentry_exit_cond_resched
++ */
++
++enum {
++	preempt_dynamic_undefined = -1,
++	preempt_dynamic_none,
++	preempt_dynamic_voluntary,
++	preempt_dynamic_full,
++};
++
++int preempt_dynamic_mode = preempt_dynamic_undefined;
++
++int sched_dynamic_mode(const char *str)
++{
++	if (!strcmp(str, "none"))
++		return preempt_dynamic_none;
++
++	if (!strcmp(str, "voluntary"))
++		return preempt_dynamic_voluntary;
++
++	if (!strcmp(str, "full"))
++		return preempt_dynamic_full;
++
++	return -EINVAL;
++}
++
++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
++#define preempt_dynamic_enable(f)	static_call_update(f, f##_dynamic_enabled)
++#define preempt_dynamic_disable(f)	static_call_update(f, f##_dynamic_disabled)
++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
++#define preempt_dynamic_enable(f)	static_key_enable(&sk_dynamic_##f.key)
++#define preempt_dynamic_disable(f)	static_key_disable(&sk_dynamic_##f.key)
++#else
++#error "Unsupported PREEMPT_DYNAMIC mechanism"
++#endif
++
++static DEFINE_MUTEX(sched_dynamic_mutex);
++static bool klp_override;
++
++static void __sched_dynamic_update(int mode)
++{
++	/*
++	 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
++	 * the ZERO state, which is invalid.
++	 */
++	if (!klp_override)
++		preempt_dynamic_enable(cond_resched);
++	preempt_dynamic_enable(cond_resched);
++	preempt_dynamic_enable(might_resched);
++	preempt_dynamic_enable(preempt_schedule);
++	preempt_dynamic_enable(preempt_schedule_notrace);
++	preempt_dynamic_enable(irqentry_exit_cond_resched);
++
++	switch (mode) {
++	case preempt_dynamic_none:
++		if (!klp_override)
++			preempt_dynamic_enable(cond_resched);
++		preempt_dynamic_disable(might_resched);
++		preempt_dynamic_disable(preempt_schedule);
++		preempt_dynamic_disable(preempt_schedule_notrace);
++		preempt_dynamic_disable(irqentry_exit_cond_resched);
++		if (mode != preempt_dynamic_mode)
++			pr_info("Dynamic Preempt: none\n");
++		break;
++
++	case preempt_dynamic_voluntary:
++		if (!klp_override)
++			preempt_dynamic_enable(cond_resched);
++		preempt_dynamic_enable(might_resched);
++		preempt_dynamic_disable(preempt_schedule);
++		preempt_dynamic_disable(preempt_schedule_notrace);
++		preempt_dynamic_disable(irqentry_exit_cond_resched);
++		if (mode != preempt_dynamic_mode)
++			pr_info("Dynamic Preempt: voluntary\n");
++		break;
++
++	case preempt_dynamic_full:
++		if (!klp_override)
++			preempt_dynamic_enable(cond_resched);
++		preempt_dynamic_disable(might_resched);
++		preempt_dynamic_enable(preempt_schedule);
++		preempt_dynamic_enable(preempt_schedule_notrace);
++		preempt_dynamic_enable(irqentry_exit_cond_resched);
++		if (mode != preempt_dynamic_mode)
++			pr_info("Dynamic Preempt: full\n");
++		break;
++	}
++
++	preempt_dynamic_mode = mode;
++}
++
++void sched_dynamic_update(int mode)
++{
++	mutex_lock(&sched_dynamic_mutex);
++	__sched_dynamic_update(mode);
++	mutex_unlock(&sched_dynamic_mutex);
++}
++
++#ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL
++
++static int klp_cond_resched(void)
++{
++	__klp_sched_try_switch();
++	return __cond_resched();
++}
++
++void sched_dynamic_klp_enable(void)
++{
++	mutex_lock(&sched_dynamic_mutex);
++
++	klp_override = true;
++	static_call_update(cond_resched, klp_cond_resched);
++
++	mutex_unlock(&sched_dynamic_mutex);
++}
++
++void sched_dynamic_klp_disable(void)
++{
++	mutex_lock(&sched_dynamic_mutex);
++
++	klp_override = false;
++	__sched_dynamic_update(preempt_dynamic_mode);
++
++	mutex_unlock(&sched_dynamic_mutex);
++}
++
++#endif /* CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
++
++
++static int __init setup_preempt_mode(char *str)
++{
++	int mode = sched_dynamic_mode(str);
++	if (mode < 0) {
++		pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
++		return 0;
++	}
++
++	sched_dynamic_update(mode);
++	return 1;
++}
++__setup("preempt=", setup_preempt_mode);
++
++static void __init preempt_dynamic_init(void)
++{
++	if (preempt_dynamic_mode == preempt_dynamic_undefined) {
++		if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
++			sched_dynamic_update(preempt_dynamic_none);
++		} else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
++			sched_dynamic_update(preempt_dynamic_voluntary);
++		} else {
++			/* Default static call setting, nothing to do */
++			WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
++			preempt_dynamic_mode = preempt_dynamic_full;
++			pr_info("Dynamic Preempt: full\n");
++		}
++	}
++}
++
++#define PREEMPT_MODEL_ACCESSOR(mode) \
++	bool preempt_model_##mode(void)						 \
++	{									 \
++		WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
++		return preempt_dynamic_mode == preempt_dynamic_##mode;		 \
++	}									 \
++	EXPORT_SYMBOL_GPL(preempt_model_##mode)
++
++PREEMPT_MODEL_ACCESSOR(none);
++PREEMPT_MODEL_ACCESSOR(voluntary);
++PREEMPT_MODEL_ACCESSOR(full);
++
++#else /* !CONFIG_PREEMPT_DYNAMIC */
++
++static inline void preempt_dynamic_init(void) { }
++
++#endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
++
++/**
++ * yield - yield the current processor to other threads.
++ *
++ * Do not ever use this function, there's a 99% chance you're doing it wrong.
++ *
++ * The scheduler is at all times free to pick the calling task as the most
++ * eligible task to run, if removing the yield() call from your code breaks
++ * it, it's already broken.
++ *
++ * Typical broken usage is:
++ *
++ * while (!event)
++ * 	yield();
++ *
++ * where one assumes that yield() will let 'the other' process run that will
++ * make event true. If the current task is a SCHED_FIFO task that will never
++ * happen. Never use yield() as a progress guarantee!!
++ *
++ * If you want to use yield() to wait for something, use wait_event().
++ * If you want to use yield() to be 'nice' for others, use cond_resched().
++ * If you still want to use yield(), do not!
++ */
++void __sched yield(void)
++{
++	set_current_state(TASK_RUNNING);
++	do_sched_yield();
++}
++EXPORT_SYMBOL(yield);
++
++/**
++ * yield_to - yield the current processor to another thread in
++ * your thread group, or accelerate that thread toward the
++ * processor it's on.
++ * @p: target task
++ * @preempt: whether task preemption is allowed or not
++ *
++ * It's the caller's job to ensure that the target task struct
++ * can't go away on us before we can do any checks.
++ *
++ * In Alt schedule FW, yield_to is not supported.
++ *
++ * Return:
++ *	true (>0) if we indeed boosted the target task.
++ *	false (0) if we failed to boost the target.
++ *	-ESRCH if there's no task to yield to.
++ */
++int __sched yield_to(struct task_struct *p, bool preempt)
++{
++	return 0;
++}
++EXPORT_SYMBOL_GPL(yield_to);
++
++int io_schedule_prepare(void)
++{
++	int old_iowait = current->in_iowait;
++
++	current->in_iowait = 1;
++	blk_flush_plug(current->plug, true);
++	return old_iowait;
++}
++
++void io_schedule_finish(int token)
++{
++	current->in_iowait = token;
++}
++
++/*
++ * This task is about to go to sleep on IO.  Increment rq->nr_iowait so
++ * that process accounting knows that this is a task in IO wait state.
++ *
++ * But don't do that if it is a deliberate, throttling IO wait (this task
++ * has set its backing_dev_info: the queue against which it should throttle)
++ */
++
++long __sched io_schedule_timeout(long timeout)
++{
++	int token;
++	long ret;
++
++	token = io_schedule_prepare();
++	ret = schedule_timeout(timeout);
++	io_schedule_finish(token);
++
++	return ret;
++}
++EXPORT_SYMBOL(io_schedule_timeout);
++
++void __sched io_schedule(void)
++{
++	int token;
++
++	token = io_schedule_prepare();
++	schedule();
++	io_schedule_finish(token);
++}
++EXPORT_SYMBOL(io_schedule);
++
++/**
++ * sys_sched_get_priority_max - return maximum RT priority.
++ * @policy: scheduling class.
++ *
++ * Return: On success, this syscall returns the maximum
++ * rt_priority that can be used by a given scheduling class.
++ * On failure, a negative error code is returned.
++ */
++SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
++{
++	int ret = -EINVAL;
++
++	switch (policy) {
++	case SCHED_FIFO:
++	case SCHED_RR:
++		ret = MAX_RT_PRIO - 1;
++		break;
++	case SCHED_NORMAL:
++	case SCHED_BATCH:
++	case SCHED_IDLE:
++		ret = 0;
++		break;
++	}
++	return ret;
++}
++
++/**
++ * sys_sched_get_priority_min - return minimum RT priority.
++ * @policy: scheduling class.
++ *
++ * Return: On success, this syscall returns the minimum
++ * rt_priority that can be used by a given scheduling class.
++ * On failure, a negative error code is returned.
++ */
++SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
++{
++	int ret = -EINVAL;
++
++	switch (policy) {
++	case SCHED_FIFO:
++	case SCHED_RR:
++		ret = 1;
++		break;
++	case SCHED_NORMAL:
++	case SCHED_BATCH:
++	case SCHED_IDLE:
++		ret = 0;
++		break;
++	}
++	return ret;
++}
++
++static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
++{
++	struct task_struct *p;
++	int retval;
++
++	alt_sched_debug();
++
++	if (pid < 0)
++		return -EINVAL;
++
++	guard(rcu)();
++	p = find_process_by_pid(pid);
++	if (!p)
++		return -EINVAL;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		return retval;
++
++	*t = ns_to_timespec64(sysctl_sched_base_slice);
++	return 0;
++}
++
++/**
++ * sys_sched_rr_get_interval - return the default timeslice of a process.
++ * @pid: pid of the process.
++ * @interval: userspace pointer to the timeslice value.
++ *
++ *
++ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
++ * an error code.
++ */
++SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
++		struct __kernel_timespec __user *, interval)
++{
++	struct timespec64 t;
++	int retval = sched_rr_get_interval(pid, &t);
++
++	if (retval == 0)
++		retval = put_timespec64(&t, interval);
++
++	return retval;
++}
++
++#ifdef CONFIG_COMPAT_32BIT_TIME
++SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
++		struct old_timespec32 __user *, interval)
++{
++	struct timespec64 t;
++	int retval = sched_rr_get_interval(pid, &t);
++
++	if (retval == 0)
++		retval = put_old_timespec32(&t, interval);
++	return retval;
++}
++#endif
++
++void sched_show_task(struct task_struct *p)
++{
++	unsigned long free = 0;
++	int ppid;
++
++	if (!try_get_task_stack(p))
++		return;
++
++	pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
++
++	if (task_is_running(p))
++		pr_cont("  running task    ");
++#ifdef CONFIG_DEBUG_STACK_USAGE
++	free = stack_not_used(p);
++#endif
++	ppid = 0;
++	rcu_read_lock();
++	if (pid_alive(p))
++		ppid = task_pid_nr(rcu_dereference(p->real_parent));
++	rcu_read_unlock();
++	pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d flags:0x%08lx\n",
++		free, task_pid_nr(p), task_tgid_nr(p),
++		ppid, read_task_thread_flags(p));
++
++	print_worker_info(KERN_INFO, p);
++	print_stop_info(KERN_INFO, p);
++	show_stack(p, NULL, KERN_INFO);
++	put_task_stack(p);
++}
++EXPORT_SYMBOL_GPL(sched_show_task);
++
++static inline bool
++state_filter_match(unsigned long state_filter, struct task_struct *p)
++{
++	unsigned int state = READ_ONCE(p->__state);
++
++	/* no filter, everything matches */
++	if (!state_filter)
++		return true;
++
++	/* filter, but doesn't match */
++	if (!(state & state_filter))
++		return false;
++
++	/*
++	 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
++	 * TASK_KILLABLE).
++	 */
++	if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
++		return false;
++
++	return true;
++}
++
++
++void show_state_filter(unsigned int state_filter)
++{
++	struct task_struct *g, *p;
++
++	rcu_read_lock();
++	for_each_process_thread(g, p) {
++		/*
++		 * reset the NMI-timeout, listing all files on a slow
++		 * console might take a lot of time:
++		 * Also, reset softlockup watchdogs on all CPUs, because
++		 * another CPU might be blocked waiting for us to process
++		 * an IPI.
++		 */
++		touch_nmi_watchdog();
++		touch_all_softlockup_watchdogs();
++		if (state_filter_match(state_filter, p))
++			sched_show_task(p);
++	}
++
++#ifdef CONFIG_SCHED_DEBUG
++	/* TODO: Alt schedule FW should support this
++	if (!state_filter)
++		sysrq_sched_debug_show();
++	*/
++#endif
++	rcu_read_unlock();
++	/*
++	 * Only show locks if all tasks are dumped:
++	 */
++	if (!state_filter)
++		debug_show_all_locks();
++}
++
++void dump_cpu_task(int cpu)
++{
++	if (cpu == smp_processor_id() && in_hardirq()) {
++		struct pt_regs *regs;
++
++		regs = get_irq_regs();
++		if (regs) {
++			show_regs(regs);
++			return;
++		}
++	}
++
++	if (trigger_single_cpu_backtrace(cpu))
++		return;
++
++	pr_info("Task dump for CPU %d:\n", cpu);
++	sched_show_task(cpu_curr(cpu));
++}
++
++/**
++ * init_idle - set up an idle thread for a given CPU
++ * @idle: task in question
++ * @cpu: CPU the idle task belongs to
++ *
++ * NOTE: this function does not set the idle thread's NEED_RESCHED
++ * flag, to make booting more robust.
++ */
++void __init init_idle(struct task_struct *idle, int cpu)
++{
++#ifdef CONFIG_SMP
++	struct affinity_context ac = (struct affinity_context) {
++		.new_mask  = cpumask_of(cpu),
++		.flags     = 0,
++	};
++#endif
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	__sched_fork(0, idle);
++
++	raw_spin_lock_irqsave(&idle->pi_lock, flags);
++	raw_spin_lock(&rq->lock);
++
++	idle->last_ran = rq->clock_task;
++	idle->__state = TASK_RUNNING;
++	/*
++	 * PF_KTHREAD should already be set at this point; regardless, make it
++	 * look like a proper per-CPU kthread.
++	 */
++	idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY;
++	kthread_set_per_cpu(idle, cpu);
++
++	sched_queue_init_idle(&rq->queue, idle);
++
++#ifdef CONFIG_SMP
++	/*
++	 * It's possible that init_idle() gets called multiple times on a task,
++	 * in that case do_set_cpus_allowed() will not do the right thing.
++	 *
++	 * And since this is boot we can forgo the serialisation.
++	 */
++	set_cpus_allowed_common(idle, &ac);
++#endif
++
++	/* Silence PROVE_RCU */
++	rcu_read_lock();
++	__set_task_cpu(idle, cpu);
++	rcu_read_unlock();
++
++	rq->idle = idle;
++	rcu_assign_pointer(rq->curr, idle);
++	idle->on_cpu = 1;
++
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
++
++	/* Set the preempt count _outside_ the spinlocks! */
++	init_idle_preempt_count(idle, cpu);
++
++	ftrace_graph_init_idle_task(idle, cpu);
++	vtime_init_idle(idle, cpu);
++#ifdef CONFIG_SMP
++	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
++#endif
++}
++
++#ifdef CONFIG_SMP
++
++int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur,
++			      const struct cpumask __maybe_unused *trial)
++{
++	return 1;
++}
++
++int task_can_attach(struct task_struct *p)
++{
++	int ret = 0;
++
++	/*
++	 * Kthreads which disallow setaffinity shouldn't be moved
++	 * to a new cpuset; we don't want to change their CPU
++	 * affinity and isolating such threads by their set of
++	 * allowed nodes is unnecessary.  Thus, cpusets are not
++	 * applicable for such threads.  This prevents checking for
++	 * success of set_cpus_allowed_ptr() on all attached tasks
++	 * before cpus_mask may be changed.
++	 */
++	if (p->flags & PF_NO_SETAFFINITY)
++		ret = -EINVAL;
++
++	return ret;
++}
++
++bool sched_smp_initialized __read_mostly;
++
++#ifdef CONFIG_HOTPLUG_CPU
++/*
++ * Ensures that the idle task is using init_mm right before its CPU goes
++ * offline.
++ */
++void idle_task_exit(void)
++{
++	struct mm_struct *mm = current->active_mm;
++
++	BUG_ON(current != this_rq()->idle);
++
++	if (mm != &init_mm) {
++		switch_mm(mm, &init_mm, current);
++		finish_arch_post_lock_switch();
++	}
++
++	/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
++}
++
++static int __balance_push_cpu_stop(void *arg)
++{
++	struct task_struct *p = arg;
++	struct rq *rq = this_rq();
++	struct rq_flags rf;
++	int cpu;
++
++	raw_spin_lock_irq(&p->pi_lock);
++	rq_lock(rq, &rf);
++
++	update_rq_clock(rq);
++
++	if (task_rq(p) == rq && task_on_rq_queued(p)) {
++		cpu = select_fallback_rq(rq->cpu, p);
++		rq = __migrate_task(rq, p, cpu);
++	}
++
++	rq_unlock(rq, &rf);
++	raw_spin_unlock_irq(&p->pi_lock);
++
++	put_task_struct(p);
++
++	return 0;
++}
++
++static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
++
++/*
++ * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
++ * effective when the hotplug motion is down.
++ */
++static void balance_push(struct rq *rq)
++{
++	struct task_struct *push_task = rq->curr;
++
++	lockdep_assert_held(&rq->lock);
++
++	/*
++	 * Ensure the thing is persistent until balance_push_set(.on = false);
++	 */
++	rq->balance_callback = &balance_push_callback;
++
++	/*
++	 * Only active while going offline and when invoked on the outgoing
++	 * CPU.
++	 */
++	if (!cpu_dying(rq->cpu) || rq != this_rq())
++		return;
++
++	/*
++	 * Both the cpu-hotplug and stop task are in this case and are
++	 * required to complete the hotplug process.
++	 */
++	if (kthread_is_per_cpu(push_task) ||
++	    is_migration_disabled(push_task)) {
++
++		/*
++		 * If this is the idle task on the outgoing CPU try to wake
++		 * up the hotplug control thread which might wait for the
++		 * last task to vanish. The rcuwait_active() check is
++		 * accurate here because the waiter is pinned on this CPU
++		 * and can't obviously be running in parallel.
++		 *
++		 * On RT kernels this also has to check whether there are
++		 * pinned and scheduled out tasks on the runqueue. They
++		 * need to leave the migrate disabled section first.
++		 */
++		if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
++		    rcuwait_active(&rq->hotplug_wait)) {
++			raw_spin_unlock(&rq->lock);
++			rcuwait_wake_up(&rq->hotplug_wait);
++			raw_spin_lock(&rq->lock);
++		}
++		return;
++	}
++
++	get_task_struct(push_task);
++	/*
++	 * Temporarily drop rq->lock such that we can wake-up the stop task.
++	 * Both preemption and IRQs are still disabled.
++	 */
++	preempt_disable();
++	raw_spin_unlock(&rq->lock);
++	stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
++			    this_cpu_ptr(&push_work));
++	preempt_enable();
++	/*
++	 * At this point need_resched() is true and we'll take the loop in
++	 * schedule(). The next pick is obviously going to be the stop task
++	 * which kthread_is_per_cpu() and will push this task away.
++	 */
++	raw_spin_lock(&rq->lock);
++}
++
++static void balance_push_set(int cpu, bool on)
++{
++	struct rq *rq = cpu_rq(cpu);
++	struct rq_flags rf;
++
++	rq_lock_irqsave(rq, &rf);
++	if (on) {
++		WARN_ON_ONCE(rq->balance_callback);
++		rq->balance_callback = &balance_push_callback;
++	} else if (rq->balance_callback == &balance_push_callback) {
++		rq->balance_callback = NULL;
++	}
++	rq_unlock_irqrestore(rq, &rf);
++}
++
++/*
++ * Invoked from a CPUs hotplug control thread after the CPU has been marked
++ * inactive. All tasks which are not per CPU kernel threads are either
++ * pushed off this CPU now via balance_push() or placed on a different CPU
++ * during wakeup. Wait until the CPU is quiescent.
++ */
++static void balance_hotplug_wait(void)
++{
++	struct rq *rq = this_rq();
++
++	rcuwait_wait_event(&rq->hotplug_wait,
++			   rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
++			   TASK_UNINTERRUPTIBLE);
++}
++
++#else
++
++static void balance_push(struct rq *rq)
++{
++}
++
++static void balance_push_set(int cpu, bool on)
++{
++}
++
++static inline void balance_hotplug_wait(void)
++{
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++static void set_rq_offline(struct rq *rq)
++{
++	if (rq->online) {
++		update_rq_clock(rq);
++		rq->online = false;
++	}
++}
++
++static void set_rq_online(struct rq *rq)
++{
++	if (!rq->online)
++		rq->online = true;
++}
++
++/*
++ * used to mark begin/end of suspend/resume:
++ */
++static int num_cpus_frozen;
++
++/*
++ * Update cpusets according to cpu_active mask.  If cpusets are
++ * disabled, cpuset_update_active_cpus() becomes a simple wrapper
++ * around partition_sched_domains().
++ *
++ * If we come here as part of a suspend/resume, don't touch cpusets because we
++ * want to restore it back to its original state upon resume anyway.
++ */
++static void cpuset_cpu_active(void)
++{
++	if (cpuhp_tasks_frozen) {
++		/*
++		 * num_cpus_frozen tracks how many CPUs are involved in suspend
++		 * resume sequence. As long as this is not the last online
++		 * operation in the resume sequence, just build a single sched
++		 * domain, ignoring cpusets.
++		 */
++		partition_sched_domains(1, NULL, NULL);
++		if (--num_cpus_frozen)
++			return;
++		/*
++		 * This is the last CPU online operation. So fall through and
++		 * restore the original sched domains by considering the
++		 * cpuset configurations.
++		 */
++		cpuset_force_rebuild();
++	}
++
++	cpuset_update_active_cpus();
++}
++
++static int cpuset_cpu_inactive(unsigned int cpu)
++{
++	if (!cpuhp_tasks_frozen) {
++		cpuset_update_active_cpus();
++	} else {
++		num_cpus_frozen++;
++		partition_sched_domains(1, NULL, NULL);
++	}
++	return 0;
++}
++
++int sched_cpu_activate(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	/*
++	 * Clear the balance_push callback and prepare to schedule
++	 * regular tasks.
++	 */
++	balance_push_set(cpu, false);
++
++#ifdef CONFIG_SCHED_SMT
++	/*
++	 * When going up, increment the number of cores with SMT present.
++	 */
++	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
++		static_branch_inc_cpuslocked(&sched_smt_present);
++#endif
++	set_cpu_active(cpu, true);
++
++	if (sched_smp_initialized)
++		cpuset_cpu_active();
++
++	/*
++	 * Put the rq online, if not already. This happens:
++	 *
++	 * 1) In the early boot process, because we build the real domains
++	 *    after all cpus have been brought up.
++	 *
++	 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
++	 *    domains.
++	 */
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	set_rq_online(rq);
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++	return 0;
++}
++
++int sched_cpu_deactivate(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++	int ret;
++
++	set_cpu_active(cpu, false);
++
++	/*
++	 * From this point forward, this CPU will refuse to run any task that
++	 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
++	 * push those tasks away until this gets cleared, see
++	 * sched_cpu_dying().
++	 */
++	balance_push_set(cpu, true);
++
++	/*
++	 * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
++	 * users of this state to go away such that all new such users will
++	 * observe it.
++	 *
++	 * Specifically, we rely on ttwu to no longer target this CPU, see
++	 * ttwu_queue_cond() and is_cpu_allowed().
++	 *
++	 * Do sync before park smpboot threads to take care the rcu boost case.
++	 */
++	synchronize_rcu();
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	set_rq_offline(rq);
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++#ifdef CONFIG_SCHED_SMT
++	/*
++	 * When going down, decrement the number of cores with SMT present.
++	 */
++	if (cpumask_weight(cpu_smt_mask(cpu)) == 2) {
++		static_branch_dec_cpuslocked(&sched_smt_present);
++		if (!static_branch_likely(&sched_smt_present))
++			cpumask_clear(sched_sg_idle_mask);
++	}
++#endif
++
++	if (!sched_smp_initialized)
++		return 0;
++
++	ret = cpuset_cpu_inactive(cpu);
++	if (ret) {
++		balance_push_set(cpu, false);
++		set_cpu_active(cpu, true);
++		return ret;
++	}
++
++	return 0;
++}
++
++static void sched_rq_cpu_starting(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	rq->calc_load_update = calc_load_update;
++}
++
++int sched_cpu_starting(unsigned int cpu)
++{
++	sched_rq_cpu_starting(cpu);
++	sched_tick_start(cpu);
++	return 0;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++
++/*
++ * Invoked immediately before the stopper thread is invoked to bring the
++ * CPU down completely. At this point all per CPU kthreads except the
++ * hotplug thread (current) and the stopper thread (inactive) have been
++ * either parked or have been unbound from the outgoing CPU. Ensure that
++ * any of those which might be on the way out are gone.
++ *
++ * If after this point a bound task is being woken on this CPU then the
++ * responsible hotplug callback has failed to do it's job.
++ * sched_cpu_dying() will catch it with the appropriate fireworks.
++ */
++int sched_cpu_wait_empty(unsigned int cpu)
++{
++	balance_hotplug_wait();
++	return 0;
++}
++
++/*
++ * Since this CPU is going 'away' for a while, fold any nr_active delta we
++ * might have. Called from the CPU stopper task after ensuring that the
++ * stopper is the last running task on the CPU, so nr_active count is
++ * stable. We need to take the teardown thread which is calling this into
++ * account, so we hand in adjust = 1 to the load calculation.
++ *
++ * Also see the comment "Global load-average calculations".
++ */
++static void calc_load_migrate(struct rq *rq)
++{
++	long delta = calc_load_fold_active(rq, 1);
++
++	if (delta)
++		atomic_long_add(delta, &calc_load_tasks);
++}
++
++static void dump_rq_tasks(struct rq *rq, const char *loglvl)
++{
++	struct task_struct *g, *p;
++	int cpu = cpu_of(rq);
++
++	lockdep_assert_held(&rq->lock);
++
++	printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
++	for_each_process_thread(g, p) {
++		if (task_cpu(p) != cpu)
++			continue;
++
++		if (!task_on_rq_queued(p))
++			continue;
++
++		printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
++	}
++}
++
++int sched_cpu_dying(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	/* Handle pending wakeups and then migrate everything off */
++	sched_tick_stop(cpu);
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
++		WARN(true, "Dying CPU not properly vacated!");
++		dump_rq_tasks(rq, KERN_WARNING);
++	}
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++	calc_load_migrate(rq);
++	hrtick_clear(rq);
++	return 0;
++}
++#endif
++
++#ifdef CONFIG_SMP
++static void sched_init_topology_cpumask_early(void)
++{
++	int cpu;
++	cpumask_t *tmp;
++
++	for_each_possible_cpu(cpu) {
++		/* init topo masks */
++		tmp = per_cpu(sched_cpu_topo_masks, cpu);
++
++		cpumask_copy(tmp, cpu_possible_mask);
++		per_cpu(sched_cpu_llc_mask, cpu) = tmp;
++		per_cpu(sched_cpu_topo_end_mask, cpu) = ++tmp;
++	}
++}
++
++#define TOPOLOGY_CPUMASK(name, mask, last)\
++	if (cpumask_and(topo, topo, mask)) {					\
++		cpumask_copy(topo, mask);					\
++		printk(KERN_INFO "sched: cpu#%02d topo: 0x%08lx - "#name,	\
++		       cpu, (topo++)->bits[0]);					\
++	}									\
++	if (!last)								\
++		bitmap_complement(cpumask_bits(topo), cpumask_bits(mask),	\
++				  nr_cpumask_bits);
++
++static void sched_init_topology_cpumask(void)
++{
++	int cpu;
++	cpumask_t *topo;
++
++	for_each_online_cpu(cpu) {
++		/* take chance to reset time slice for idle tasks */
++		cpu_rq(cpu)->idle->time_slice = sysctl_sched_base_slice;
++
++		topo = per_cpu(sched_cpu_topo_masks, cpu);
++
++		bitmap_complement(cpumask_bits(topo), cpumask_bits(cpumask_of(cpu)),
++				  nr_cpumask_bits);
++#ifdef CONFIG_SCHED_SMT
++		TOPOLOGY_CPUMASK(smt, topology_sibling_cpumask(cpu), false);
++#endif
++		TOPOLOGY_CPUMASK(cluster, topology_cluster_cpumask(cpu), false);
++
++		per_cpu(sd_llc_id, cpu) = cpumask_first(cpu_coregroup_mask(cpu));
++		per_cpu(sched_cpu_llc_mask, cpu) = topo;
++		TOPOLOGY_CPUMASK(coregroup, cpu_coregroup_mask(cpu), false);
++
++		TOPOLOGY_CPUMASK(core, topology_core_cpumask(cpu), false);
++
++		TOPOLOGY_CPUMASK(others, cpu_online_mask, true);
++
++		per_cpu(sched_cpu_topo_end_mask, cpu) = topo;
++		printk(KERN_INFO "sched: cpu#%02d llc_id = %d, llc_mask idx = %d\n",
++		       cpu, per_cpu(sd_llc_id, cpu),
++		       (int) (per_cpu(sched_cpu_llc_mask, cpu) -
++			      per_cpu(sched_cpu_topo_masks, cpu)));
++	}
++}
++#endif
++
++void __init sched_init_smp(void)
++{
++	/* Move init over to a non-isolated CPU */
++	if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
++		BUG();
++	current->flags &= ~PF_NO_SETAFFINITY;
++
++	sched_init_topology_cpumask();
++
++	sched_smp_initialized = true;
++}
++
++static int __init migration_init(void)
++{
++	sched_cpu_starting(smp_processor_id());
++	return 0;
++}
++early_initcall(migration_init);
++
++#else
++void __init sched_init_smp(void)
++{
++	cpu_rq(0)->idle->time_slice = sysctl_sched_base_slice;
++}
++#endif /* CONFIG_SMP */
++
++int in_sched_functions(unsigned long addr)
++{
++	return in_lock_functions(addr) ||
++		(addr >= (unsigned long)__sched_text_start
++		&& addr < (unsigned long)__sched_text_end);
++}
++
++#ifdef CONFIG_CGROUP_SCHED
++/*
++ * Default task group.
++ * Every task in system belongs to this group at bootup.
++ */
++struct task_group root_task_group;
++LIST_HEAD(task_groups);
++
++/* Cacheline aligned slab cache for task_group */
++static struct kmem_cache *task_group_cache __ro_after_init;
++#endif /* CONFIG_CGROUP_SCHED */
++
++void __init sched_init(void)
++{
++	int i;
++	struct rq *rq;
++	struct balance_arg balance_arg = {.cpumask = sched_sg_idle_mask, .active = 0};
++
++	printk(KERN_INFO "sched/alt: "ALT_SCHED_NAME" CPU Scheduler "ALT_SCHED_VERSION\
++			 " by Alfred Chen.\n");
++
++	wait_bit_init();
++
++#ifdef CONFIG_SMP
++	for (i = 0; i < SCHED_QUEUE_BITS; i++)
++		cpumask_copy(sched_preempt_mask + i, cpu_present_mask);
++#endif
++
++#ifdef CONFIG_CGROUP_SCHED
++	task_group_cache = KMEM_CACHE(task_group, 0);
++
++	list_add(&root_task_group.list, &task_groups);
++	INIT_LIST_HEAD(&root_task_group.children);
++	INIT_LIST_HEAD(&root_task_group.siblings);
++#endif /* CONFIG_CGROUP_SCHED */
++	for_each_possible_cpu(i) {
++		rq = cpu_rq(i);
++
++		sched_queue_init(&rq->queue);
++		rq->prio = IDLE_TASK_SCHED_PRIO;
++#ifdef CONFIG_SCHED_PDS
++		rq->prio_idx = rq->prio;
++#endif
++
++		raw_spin_lock_init(&rq->lock);
++		rq->nr_running = rq->nr_uninterruptible = 0;
++		rq->calc_load_active = 0;
++		rq->calc_load_update = jiffies + LOAD_FREQ;
++#ifdef CONFIG_SMP
++		rq->online = false;
++		rq->cpu = i;
++
++#ifdef CONFIG_SCHED_SMT
++		rq->sg_balance_arg = balance_arg;
++#endif
++
++#ifdef CONFIG_NO_HZ_COMMON
++		INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
++#endif
++		rq->balance_callback = &balance_push_callback;
++#ifdef CONFIG_HOTPLUG_CPU
++		rcuwait_init(&rq->hotplug_wait);
++#endif
++#endif /* CONFIG_SMP */
++		rq->nr_switches = 0;
++
++		hrtick_rq_init(rq);
++		atomic_set(&rq->nr_iowait, 0);
++
++		zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
++	}
++#ifdef CONFIG_SMP
++	/* Set rq->online for cpu 0 */
++	cpu_rq(0)->online = true;
++#endif
++	/*
++	 * The boot idle thread does lazy MMU switching as well:
++	 */
++	mmgrab(&init_mm);
++	enter_lazy_tlb(&init_mm, current);
++
++	/*
++	 * The idle task doesn't need the kthread struct to function, but it
++	 * is dressed up as a per-CPU kthread and thus needs to play the part
++	 * if we want to avoid special-casing it in code that deals with per-CPU
++	 * kthreads.
++	 */
++	WARN_ON(!set_kthread_struct(current));
++
++	/*
++	 * Make us the idle thread. Technically, schedule() should not be
++	 * called from this thread, however somewhere below it might be,
++	 * but because we are the idle thread, we just pick up running again
++	 * when this runqueue becomes "idle".
++	 */
++	init_idle(current, smp_processor_id());
++
++	calc_load_update = jiffies + LOAD_FREQ;
++
++#ifdef CONFIG_SMP
++	idle_thread_set_boot_cpu();
++	balance_push_set(smp_processor_id(), false);
++
++	sched_init_topology_cpumask_early();
++#endif /* SMP */
++
++	preempt_dynamic_init();
++}
++
++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
++
++void __might_sleep(const char *file, int line)
++{
++	unsigned int state = get_current_state();
++	/*
++	 * Blocking primitives will set (and therefore destroy) current->state,
++	 * since we will exit with TASK_RUNNING make sure we enter with it,
++	 * otherwise we will destroy state.
++	 */
++	WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
++			"do not call blocking ops when !TASK_RUNNING; "
++			"state=%x set at [<%p>] %pS\n", state,
++			(void *)current->task_state_change,
++			(void *)current->task_state_change);
++
++	__might_resched(file, line, 0);
++}
++EXPORT_SYMBOL(__might_sleep);
++
++static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
++{
++	if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
++		return;
++
++	if (preempt_count() == preempt_offset)
++		return;
++
++	pr_err("Preemption disabled at:");
++	print_ip_sym(KERN_ERR, ip);
++}
++
++static inline bool resched_offsets_ok(unsigned int offsets)
++{
++	unsigned int nested = preempt_count();
++
++	nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
++
++	return nested == offsets;
++}
++
++void __might_resched(const char *file, int line, unsigned int offsets)
++{
++	/* Ratelimiting timestamp: */
++	static unsigned long prev_jiffy;
++
++	unsigned long preempt_disable_ip;
++
++	/* WARN_ON_ONCE() by default, no rate limit required: */
++	rcu_sleep_check();
++
++	if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
++	     !is_idle_task(current) && !current->non_block_count) ||
++	    system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
++	    oops_in_progress)
++		return;
++	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++		return;
++	prev_jiffy = jiffies;
++
++	/* Save this before calling printk(), since that will clobber it: */
++	preempt_disable_ip = get_preempt_disable_ip(current);
++
++	pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
++	       file, line);
++	pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
++	       in_atomic(), irqs_disabled(), current->non_block_count,
++	       current->pid, current->comm);
++	pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
++	       offsets & MIGHT_RESCHED_PREEMPT_MASK);
++
++	if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
++		pr_err("RCU nest depth: %d, expected: %u\n",
++		       rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
++	}
++
++	if (task_stack_end_corrupted(current))
++		pr_emerg("Thread overran stack, or stack corrupted\n");
++
++	debug_show_held_locks(current);
++	if (irqs_disabled())
++		print_irqtrace_events(current);
++
++	print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
++				 preempt_disable_ip);
++
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL(__might_resched);
++
++void __cant_sleep(const char *file, int line, int preempt_offset)
++{
++	static unsigned long prev_jiffy;
++
++	if (irqs_disabled())
++		return;
++
++	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
++		return;
++
++	if (preempt_count() > preempt_offset)
++		return;
++
++	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++		return;
++	prev_jiffy = jiffies;
++
++	printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
++	printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
++			in_atomic(), irqs_disabled(),
++			current->pid, current->comm);
++
++	debug_show_held_locks(current);
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL_GPL(__cant_sleep);
++
++#ifdef CONFIG_SMP
++void __cant_migrate(const char *file, int line)
++{
++	static unsigned long prev_jiffy;
++
++	if (irqs_disabled())
++		return;
++
++	if (is_migration_disabled(current))
++		return;
++
++	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
++		return;
++
++	if (preempt_count() > 0)
++		return;
++
++	if (current->migration_flags & MDF_FORCE_ENABLED)
++		return;
++
++	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++		return;
++	prev_jiffy = jiffies;
++
++	pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
++	pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
++	       in_atomic(), irqs_disabled(), is_migration_disabled(current),
++	       current->pid, current->comm);
++
++	debug_show_held_locks(current);
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL_GPL(__cant_migrate);
++#endif
++#endif
++
++#ifdef CONFIG_MAGIC_SYSRQ
++void normalize_rt_tasks(void)
++{
++	struct task_struct *g, *p;
++	struct sched_attr attr = {
++		.sched_policy = SCHED_NORMAL,
++	};
++
++	read_lock(&tasklist_lock);
++	for_each_process_thread(g, p) {
++		/*
++		 * Only normalize user tasks:
++		 */
++		if (p->flags & PF_KTHREAD)
++			continue;
++
++		schedstat_set(p->stats.wait_start,  0);
++		schedstat_set(p->stats.sleep_start, 0);
++		schedstat_set(p->stats.block_start, 0);
++
++		if (!rt_task(p)) {
++			/*
++			 * Renice negative nice level userspace
++			 * tasks back to 0:
++			 */
++			if (task_nice(p) < 0)
++				set_user_nice(p, 0);
++			continue;
++		}
++
++		__sched_setscheduler(p, &attr, false, false);
++	}
++	read_unlock(&tasklist_lock);
++}
++#endif /* CONFIG_MAGIC_SYSRQ */
++
++#if defined(CONFIG_KGDB_KDB)
++/*
++ * These functions are only useful for kdb.
++ *
++ * They can only be called when the whole system has been
++ * stopped - every CPU needs to be quiescent, and no scheduling
++ * activity can take place. Using them for anything else would
++ * be a serious bug, and as a result, they aren't even visible
++ * under any other configuration.
++ */
++
++/**
++ * curr_task - return the current task for a given CPU.
++ * @cpu: the processor in question.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ *
++ * Return: The current task for @cpu.
++ */
++struct task_struct *curr_task(int cpu)
++{
++	return cpu_curr(cpu);
++}
++
++#endif /* defined(CONFIG_KGDB_KDB) */
++
++#ifdef CONFIG_CGROUP_SCHED
++static void sched_free_group(struct task_group *tg)
++{
++	kmem_cache_free(task_group_cache, tg);
++}
++
++static void sched_free_group_rcu(struct rcu_head *rhp)
++{
++	sched_free_group(container_of(rhp, struct task_group, rcu));
++}
++
++static void sched_unregister_group(struct task_group *tg)
++{
++	/*
++	 * We have to wait for yet another RCU grace period to expire, as
++	 * print_cfs_stats() might run concurrently.
++	 */
++	call_rcu(&tg->rcu, sched_free_group_rcu);
++}
++
++/* allocate runqueue etc for a new task group */
++struct task_group *sched_create_group(struct task_group *parent)
++{
++	struct task_group *tg;
++
++	tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
++	if (!tg)
++		return ERR_PTR(-ENOMEM);
++
++	return tg;
++}
++
++void sched_online_group(struct task_group *tg, struct task_group *parent)
++{
++}
++
++/* rcu callback to free various structures associated with a task group */
++static void sched_unregister_group_rcu(struct rcu_head *rhp)
++{
++	/* Now it should be safe to free those cfs_rqs: */
++	sched_unregister_group(container_of(rhp, struct task_group, rcu));
++}
++
++void sched_destroy_group(struct task_group *tg)
++{
++	/* Wait for possible concurrent references to cfs_rqs complete: */
++	call_rcu(&tg->rcu, sched_unregister_group_rcu);
++}
++
++void sched_release_group(struct task_group *tg)
++{
++}
++
++static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
++{
++	return css ? container_of(css, struct task_group, css) : NULL;
++}
++
++static struct cgroup_subsys_state *
++cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
++{
++	struct task_group *parent = css_tg(parent_css);
++	struct task_group *tg;
++
++	if (!parent) {
++		/* This is early initialization for the top cgroup */
++		return &root_task_group.css;
++	}
++
++	tg = sched_create_group(parent);
++	if (IS_ERR(tg))
++		return ERR_PTR(-ENOMEM);
++	return &tg->css;
++}
++
++/* Expose task group only after completing cgroup initialization */
++static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
++{
++	struct task_group *tg = css_tg(css);
++	struct task_group *parent = css_tg(css->parent);
++
++	if (parent)
++		sched_online_group(tg, parent);
++	return 0;
++}
++
++static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
++{
++	struct task_group *tg = css_tg(css);
++
++	sched_release_group(tg);
++}
++
++static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
++{
++	struct task_group *tg = css_tg(css);
++
++	/*
++	 * Relies on the RCU grace period between css_released() and this.
++	 */
++	sched_unregister_group(tg);
++}
++
++#ifdef CONFIG_RT_GROUP_SCHED
++static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
++{
++	return 0;
++}
++#endif
++
++static void cpu_cgroup_attach(struct cgroup_taskset *tset)
++{
++}
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++static DEFINE_MUTEX(shares_mutex);
++
++static int sched_group_set_shares(struct task_group *tg, unsigned long shares)
++{
++	/*
++	 * We can't change the weight of the root cgroup.
++	 */
++	if (&root_task_group == tg)
++		return -EINVAL;
++
++	shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
++
++	mutex_lock(&shares_mutex);
++	if (tg->shares == shares)
++		goto done;
++
++	tg->shares = shares;
++done:
++	mutex_unlock(&shares_mutex);
++	return 0;
++}
++
++static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
++				struct cftype *cftype, u64 shareval)
++{
++	if (shareval > scale_load_down(ULONG_MAX))
++		shareval = MAX_SHARES;
++	return sched_group_set_shares(css_tg(css), scale_load(shareval));
++}
++
++static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
++			       struct cftype *cft)
++{
++	struct task_group *tg = css_tg(css);
++
++	return (u64) scale_load_down(tg->shares);
++}
++#endif
++
++static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
++				  struct cftype *cft)
++{
++	return 0;
++}
++
++static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
++				   struct cftype *cftype, s64 cfs_quota_us)
++{
++	return 0;
++}
++
++static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
++				   struct cftype *cft)
++{
++	return 0;
++}
++
++static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
++				    struct cftype *cftype, u64 cfs_period_us)
++{
++	return 0;
++}
++
++static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css,
++				  struct cftype *cft)
++{
++	return 0;
++}
++
++static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css,
++				   struct cftype *cftype, u64 cfs_burst_us)
++{
++	return 0;
++}
++
++static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
++{
++	return 0;
++}
++
++static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v)
++{
++	return 0;
++}
++
++static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
++				struct cftype *cft, s64 val)
++{
++	return 0;
++}
++
++static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
++			       struct cftype *cft)
++{
++	return 0;
++}
++
++static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
++				    struct cftype *cftype, u64 rt_period_us)
++{
++	return 0;
++}
++
++static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
++				   struct cftype *cft)
++{
++	return 0;
++}
++
++static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
++{
++	return 0;
++}
++
++static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
++{
++	return 0;
++}
++
++static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of,
++				    char *buf, size_t nbytes,
++				    loff_t off)
++{
++	return nbytes;
++}
++
++static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of,
++				    char *buf, size_t nbytes,
++				    loff_t off)
++{
++	return nbytes;
++}
++
++static struct cftype cpu_legacy_files[] = {
++#ifdef CONFIG_FAIR_GROUP_SCHED
++	{
++		.name = "shares",
++		.read_u64 = cpu_shares_read_u64,
++		.write_u64 = cpu_shares_write_u64,
++	},
++#endif
++	{
++		.name = "cfs_quota_us",
++		.read_s64 = cpu_cfs_quota_read_s64,
++		.write_s64 = cpu_cfs_quota_write_s64,
++	},
++	{
++		.name = "cfs_period_us",
++		.read_u64 = cpu_cfs_period_read_u64,
++		.write_u64 = cpu_cfs_period_write_u64,
++	},
++	{
++		.name = "cfs_burst_us",
++		.read_u64 = cpu_cfs_burst_read_u64,
++		.write_u64 = cpu_cfs_burst_write_u64,
++	},
++	{
++		.name = "stat",
++		.seq_show = cpu_cfs_stat_show,
++	},
++	{
++		.name = "stat.local",
++		.seq_show = cpu_cfs_local_stat_show,
++	},
++	{
++		.name = "rt_runtime_us",
++		.read_s64 = cpu_rt_runtime_read,
++		.write_s64 = cpu_rt_runtime_write,
++	},
++	{
++		.name = "rt_period_us",
++		.read_u64 = cpu_rt_period_read_uint,
++		.write_u64 = cpu_rt_period_write_uint,
++	},
++	{
++		.name = "uclamp.min",
++		.flags = CFTYPE_NOT_ON_ROOT,
++		.seq_show = cpu_uclamp_min_show,
++		.write = cpu_uclamp_min_write,
++	},
++	{
++		.name = "uclamp.max",
++		.flags = CFTYPE_NOT_ON_ROOT,
++		.seq_show = cpu_uclamp_max_show,
++		.write = cpu_uclamp_max_write,
++	},
++	{ }	/* Terminate */
++};
++
++static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
++			       struct cftype *cft)
++{
++	return 0;
++}
++
++static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
++				struct cftype *cft, u64 weight)
++{
++	return 0;
++}
++
++static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
++				    struct cftype *cft)
++{
++	return 0;
++}
++
++static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
++				     struct cftype *cft, s64 nice)
++{
++	return 0;
++}
++
++static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
++			       struct cftype *cft)
++{
++	return 0;
++}
++
++static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
++				struct cftype *cft, s64 idle)
++{
++	return 0;
++}
++
++static int cpu_max_show(struct seq_file *sf, void *v)
++{
++	return 0;
++}
++
++static ssize_t cpu_max_write(struct kernfs_open_file *of,
++			     char *buf, size_t nbytes, loff_t off)
++{
++	return nbytes;
++}
++
++static struct cftype cpu_files[] = {
++	{
++		.name = "weight",
++		.flags = CFTYPE_NOT_ON_ROOT,
++		.read_u64 = cpu_weight_read_u64,
++		.write_u64 = cpu_weight_write_u64,
++	},
++	{
++		.name = "weight.nice",
++		.flags = CFTYPE_NOT_ON_ROOT,
++		.read_s64 = cpu_weight_nice_read_s64,
++		.write_s64 = cpu_weight_nice_write_s64,
++	},
++	{
++		.name = "idle",
++		.flags = CFTYPE_NOT_ON_ROOT,
++		.read_s64 = cpu_idle_read_s64,
++		.write_s64 = cpu_idle_write_s64,
++	},
++	{
++		.name = "max",
++		.flags = CFTYPE_NOT_ON_ROOT,
++		.seq_show = cpu_max_show,
++		.write = cpu_max_write,
++	},
++	{
++		.name = "max.burst",
++		.flags = CFTYPE_NOT_ON_ROOT,
++		.read_u64 = cpu_cfs_burst_read_u64,
++		.write_u64 = cpu_cfs_burst_write_u64,
++	},
++	{
++		.name = "uclamp.min",
++		.flags = CFTYPE_NOT_ON_ROOT,
++		.seq_show = cpu_uclamp_min_show,
++		.write = cpu_uclamp_min_write,
++	},
++	{
++		.name = "uclamp.max",
++		.flags = CFTYPE_NOT_ON_ROOT,
++		.seq_show = cpu_uclamp_max_show,
++		.write = cpu_uclamp_max_write,
++	},
++	{ }	/* terminate */
++};
++
++static int cpu_extra_stat_show(struct seq_file *sf,
++			       struct cgroup_subsys_state *css)
++{
++	return 0;
++}
++
++static int cpu_local_stat_show(struct seq_file *sf,
++			       struct cgroup_subsys_state *css)
++{
++	return 0;
++}
++
++struct cgroup_subsys cpu_cgrp_subsys = {
++	.css_alloc	= cpu_cgroup_css_alloc,
++	.css_online	= cpu_cgroup_css_online,
++	.css_released	= cpu_cgroup_css_released,
++	.css_free	= cpu_cgroup_css_free,
++	.css_extra_stat_show = cpu_extra_stat_show,
++	.css_local_stat_show = cpu_local_stat_show,
++#ifdef CONFIG_RT_GROUP_SCHED
++	.can_attach	= cpu_cgroup_can_attach,
++#endif
++	.attach		= cpu_cgroup_attach,
++	.legacy_cftypes	= cpu_legacy_files,
++	.dfl_cftypes	= cpu_files,
++	.early_init	= true,
++	.threaded	= true,
++};
++#endif	/* CONFIG_CGROUP_SCHED */
++
++#undef CREATE_TRACE_POINTS
++
++#ifdef CONFIG_SCHED_MM_CID
++
++#
++/*
++ * @cid_lock: Guarantee forward-progress of cid allocation.
++ *
++ * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock
++ * is only used when contention is detected by the lock-free allocation so
++ * forward progress can be guaranteed.
++ */
++DEFINE_RAW_SPINLOCK(cid_lock);
++
++/*
++ * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock.
++ *
++ * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is
++ * detected, it is set to 1 to ensure that all newly coming allocations are
++ * serialized by @cid_lock until the allocation which detected contention
++ * completes and sets @use_cid_lock back to 0. This guarantees forward progress
++ * of a cid allocation.
++ */
++int use_cid_lock;
++
++/*
++ * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid
++ * concurrently with respect to the execution of the source runqueue context
++ * switch.
++ *
++ * There is one basic properties we want to guarantee here:
++ *
++ * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively
++ * used by a task. That would lead to concurrent allocation of the cid and
++ * userspace corruption.
++ *
++ * Provide this guarantee by introducing a Dekker memory ordering to guarantee
++ * that a pair of loads observe at least one of a pair of stores, which can be
++ * shown as:
++ *
++ *      X = Y = 0
++ *
++ *      w[X]=1          w[Y]=1
++ *      MB              MB
++ *      r[Y]=y          r[X]=x
++ *
++ * Which guarantees that x==0 && y==0 is impossible. But rather than using
++ * values 0 and 1, this algorithm cares about specific state transitions of the
++ * runqueue current task (as updated by the scheduler context switch), and the
++ * per-mm/cpu cid value.
++ *
++ * Let's introduce task (Y) which has task->mm == mm and task (N) which has
++ * task->mm != mm for the rest of the discussion. There are two scheduler state
++ * transitions on context switch we care about:
++ *
++ * (TSA) Store to rq->curr with transition from (N) to (Y)
++ *
++ * (TSB) Store to rq->curr with transition from (Y) to (N)
++ *
++ * On the remote-clear side, there is one transition we care about:
++ *
++ * (TMA) cmpxchg to *pcpu_cid to set the LAZY flag
++ *
++ * There is also a transition to UNSET state which can be performed from all
++ * sides (scheduler, remote-clear). It is always performed with a cmpxchg which
++ * guarantees that only a single thread will succeed:
++ *
++ * (TMB) cmpxchg to *pcpu_cid to mark UNSET
++ *
++ * Just to be clear, what we do _not_ want to happen is a transition to UNSET
++ * when a thread is actively using the cid (property (1)).
++ *
++ * Let's looks at the relevant combinations of TSA/TSB, and TMA transitions.
++ *
++ * Scenario A) (TSA)+(TMA) (from next task perspective)
++ *
++ * CPU0                                      CPU1
++ *
++ * Context switch CS-1                       Remote-clear
++ *   - store to rq->curr: (N)->(Y) (TSA)     - cmpxchg to *pcpu_id to LAZY (TMA)
++ *                                             (implied barrier after cmpxchg)
++ *   - switch_mm_cid()
++ *     - memory barrier (see switch_mm_cid()
++ *       comment explaining how this barrier
++ *       is combined with other scheduler
++ *       barriers)
++ *     - mm_cid_get (next)
++ *       - READ_ONCE(*pcpu_cid)              - rcu_dereference(src_rq->curr)
++ *
++ * This Dekker ensures that either task (Y) is observed by the
++ * rcu_dereference() or the LAZY flag is observed by READ_ONCE(), or both are
++ * observed.
++ *
++ * If task (Y) store is observed by rcu_dereference(), it means that there is
++ * still an active task on the cpu. Remote-clear will therefore not transition
++ * to UNSET, which fulfills property (1).
++ *
++ * If task (Y) is not observed, but the lazy flag is observed by READ_ONCE(),
++ * it will move its state to UNSET, which clears the percpu cid perhaps
++ * uselessly (which is not an issue for correctness). Because task (Y) is not
++ * observed, CPU1 can move ahead to set the state to UNSET. Because moving
++ * state to UNSET is done with a cmpxchg expecting that the old state has the
++ * LAZY flag set, only one thread will successfully UNSET.
++ *
++ * If both states (LAZY flag and task (Y)) are observed, the thread on CPU0
++ * will observe the LAZY flag and transition to UNSET (perhaps uselessly), and
++ * CPU1 will observe task (Y) and do nothing more, which is fine.
++ *
++ * What we are effectively preventing with this Dekker is a scenario where
++ * neither LAZY flag nor store (Y) are observed, which would fail property (1)
++ * because this would UNSET a cid which is actively used.
++ */
++
++void sched_mm_cid_migrate_from(struct task_struct *t)
++{
++	t->migrate_from_cpu = task_cpu(t);
++}
++
++static
++int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq,
++					  struct task_struct *t,
++					  struct mm_cid *src_pcpu_cid)
++{
++	struct mm_struct *mm = t->mm;
++	struct task_struct *src_task;
++	int src_cid, last_mm_cid;
++
++	if (!mm)
++		return -1;
++
++	last_mm_cid = t->last_mm_cid;
++	/*
++	 * If the migrated task has no last cid, or if the current
++	 * task on src rq uses the cid, it means the source cid does not need
++	 * to be moved to the destination cpu.
++	 */
++	if (last_mm_cid == -1)
++		return -1;
++	src_cid = READ_ONCE(src_pcpu_cid->cid);
++	if (!mm_cid_is_valid(src_cid) || last_mm_cid != src_cid)
++		return -1;
++
++	/*
++	 * If we observe an active task using the mm on this rq, it means we
++	 * are not the last task to be migrated from this cpu for this mm, so
++	 * there is no need to move src_cid to the destination cpu.
++	 */
++	rcu_read_lock();
++	src_task = rcu_dereference(src_rq->curr);
++	if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
++		rcu_read_unlock();
++		t->last_mm_cid = -1;
++		return -1;
++	}
++	rcu_read_unlock();
++
++	return src_cid;
++}
++
++static
++int __sched_mm_cid_migrate_from_try_steal_cid(struct rq *src_rq,
++					      struct task_struct *t,
++					      struct mm_cid *src_pcpu_cid,
++					      int src_cid)
++{
++	struct task_struct *src_task;
++	struct mm_struct *mm = t->mm;
++	int lazy_cid;
++
++	if (src_cid == -1)
++		return -1;
++
++	/*
++	 * Attempt to clear the source cpu cid to move it to the destination
++	 * cpu.
++	 */
++	lazy_cid = mm_cid_set_lazy_put(src_cid);
++	if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid))
++		return -1;
++
++	/*
++	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
++	 * rq->curr->mm matches the scheduler barrier in context_switch()
++	 * between store to rq->curr and load of prev and next task's
++	 * per-mm/cpu cid.
++	 *
++	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
++	 * rq->curr->mm_cid_active matches the barrier in
++	 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
++	 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
++	 * load of per-mm/cpu cid.
++	 */
++
++	/*
++	 * If we observe an active task using the mm on this rq after setting
++	 * the lazy-put flag, this task will be responsible for transitioning
++	 * from lazy-put flag set to MM_CID_UNSET.
++	 */
++	scoped_guard (rcu) {
++		src_task = rcu_dereference(src_rq->curr);
++		if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
++			rcu_read_unlock();
++			/*
++			 * We observed an active task for this mm, there is therefore
++			 * no point in moving this cid to the destination cpu.
++			 */
++			t->last_mm_cid = -1;
++			return -1;
++		}
++	}
++
++	/*
++	 * The src_cid is unused, so it can be unset.
++	 */
++	if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
++		return -1;
++	return src_cid;
++}
++
++/*
++ * Migration to dst cpu. Called with dst_rq lock held.
++ * Interrupts are disabled, which keeps the window of cid ownership without the
++ * source rq lock held small.
++ */
++void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t, int src_cpu)
++{
++	struct mm_cid *src_pcpu_cid, *dst_pcpu_cid;
++	struct mm_struct *mm = t->mm;
++	int src_cid, dst_cid;
++	struct rq *src_rq;
++
++	lockdep_assert_rq_held(dst_rq);
++
++	if (!mm)
++		return;
++	if (src_cpu == -1) {
++		t->last_mm_cid = -1;
++		return;
++	}
++	/*
++	 * Move the src cid if the dst cid is unset. This keeps id
++	 * allocation closest to 0 in cases where few threads migrate around
++	 * many cpus.
++	 *
++	 * If destination cid is already set, we may have to just clear
++	 * the src cid to ensure compactness in frequent migrations
++	 * scenarios.
++	 *
++	 * It is not useful to clear the src cid when the number of threads is
++	 * greater or equal to the number of allowed cpus, because user-space
++	 * can expect that the number of allowed cids can reach the number of
++	 * allowed cpus.
++	 */
++	dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq));
++	dst_cid = READ_ONCE(dst_pcpu_cid->cid);
++	if (!mm_cid_is_unset(dst_cid) &&
++	    atomic_read(&mm->mm_users) >= t->nr_cpus_allowed)
++		return;
++	src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu);
++	src_rq = cpu_rq(src_cpu);
++	src_cid = __sched_mm_cid_migrate_from_fetch_cid(src_rq, t, src_pcpu_cid);
++	if (src_cid == -1)
++		return;
++	src_cid = __sched_mm_cid_migrate_from_try_steal_cid(src_rq, t, src_pcpu_cid,
++							    src_cid);
++	if (src_cid == -1)
++		return;
++	if (!mm_cid_is_unset(dst_cid)) {
++		__mm_cid_put(mm, src_cid);
++		return;
++	}
++	/* Move src_cid to dst cpu. */
++	mm_cid_snapshot_time(dst_rq, mm);
++	WRITE_ONCE(dst_pcpu_cid->cid, src_cid);
++}
++
++static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_cid,
++				      int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	struct task_struct *t;
++	int cid, lazy_cid;
++
++	cid = READ_ONCE(pcpu_cid->cid);
++	if (!mm_cid_is_valid(cid))
++		return;
++
++	/*
++	 * Clear the cpu cid if it is set to keep cid allocation compact.  If
++	 * there happens to be other tasks left on the source cpu using this
++	 * mm, the next task using this mm will reallocate its cid on context
++	 * switch.
++	 */
++	lazy_cid = mm_cid_set_lazy_put(cid);
++	if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid))
++		return;
++
++	/*
++	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
++	 * rq->curr->mm matches the scheduler barrier in context_switch()
++	 * between store to rq->curr and load of prev and next task's
++	 * per-mm/cpu cid.
++	 *
++	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
++	 * rq->curr->mm_cid_active matches the barrier in
++	 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
++	 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
++	 * load of per-mm/cpu cid.
++	 */
++
++	/*
++	 * If we observe an active task using the mm on this rq after setting
++	 * the lazy-put flag, that task will be responsible for transitioning
++	 * from lazy-put flag set to MM_CID_UNSET.
++	 */
++	scoped_guard (rcu) {
++		t = rcu_dereference(rq->curr);
++		if (READ_ONCE(t->mm_cid_active) && t->mm == mm)
++			return;
++	}
++
++	/*
++	 * The cid is unused, so it can be unset.
++	 * Disable interrupts to keep the window of cid ownership without rq
++	 * lock small.
++	 */
++	scoped_guard (irqsave) {
++		if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
++			__mm_cid_put(mm, cid);
++	}
++}
++
++static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	struct mm_cid *pcpu_cid;
++	struct task_struct *curr;
++	u64 rq_clock;
++
++	/*
++	 * rq->clock load is racy on 32-bit but one spurious clear once in a
++	 * while is irrelevant.
++	 */
++	rq_clock = READ_ONCE(rq->clock);
++	pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
++
++	/*
++	 * In order to take care of infrequently scheduled tasks, bump the time
++	 * snapshot associated with this cid if an active task using the mm is
++	 * observed on this rq.
++	 */
++	scoped_guard (rcu) {
++		curr = rcu_dereference(rq->curr);
++		if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) {
++			WRITE_ONCE(pcpu_cid->time, rq_clock);
++			return;
++		}
++	}
++
++	if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS)
++		return;
++	sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
++}
++
++static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu,
++					     int weight)
++{
++	struct mm_cid *pcpu_cid;
++	int cid;
++
++	pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
++	cid = READ_ONCE(pcpu_cid->cid);
++	if (!mm_cid_is_valid(cid) || cid < weight)
++		return;
++	sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
++}
++
++static void task_mm_cid_work(struct callback_head *work)
++{
++	unsigned long now = jiffies, old_scan, next_scan;
++	struct task_struct *t = current;
++	struct cpumask *cidmask;
++	struct mm_struct *mm;
++	int weight, cpu;
++
++	SCHED_WARN_ON(t != container_of(work, struct task_struct, cid_work));
++
++	work->next = work;	/* Prevent double-add */
++	if (t->flags & PF_EXITING)
++		return;
++	mm = t->mm;
++	if (!mm)
++		return;
++	old_scan = READ_ONCE(mm->mm_cid_next_scan);
++	next_scan = now + msecs_to_jiffies(MM_CID_SCAN_DELAY);
++	if (!old_scan) {
++		unsigned long res;
++
++		res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan);
++		if (res != old_scan)
++			old_scan = res;
++		else
++			old_scan = next_scan;
++	}
++	if (time_before(now, old_scan))
++		return;
++	if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan))
++		return;
++	cidmask = mm_cidmask(mm);
++	/* Clear cids that were not recently used. */
++	for_each_possible_cpu(cpu)
++		sched_mm_cid_remote_clear_old(mm, cpu);
++	weight = cpumask_weight(cidmask);
++	/*
++	 * Clear cids that are greater or equal to the cidmask weight to
++	 * recompact it.
++	 */
++	for_each_possible_cpu(cpu)
++		sched_mm_cid_remote_clear_weight(mm, cpu, weight);
++}
++
++void init_sched_mm_cid(struct task_struct *t)
++{
++	struct mm_struct *mm = t->mm;
++	int mm_users = 0;
++
++	if (mm) {
++		mm_users = atomic_read(&mm->mm_users);
++		if (mm_users == 1)
++			mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY);
++	}
++	t->cid_work.next = &t->cid_work;	/* Protect against double add */
++	init_task_work(&t->cid_work, task_mm_cid_work);
++}
++
++void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
++{
++	struct callback_head *work = &curr->cid_work;
++	unsigned long now = jiffies;
++
++	if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) ||
++	    work->next != work)
++		return;
++	if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan)))
++		return;
++	task_work_add(curr, work, TWA_RESUME);
++}
++
++void sched_mm_cid_exit_signals(struct task_struct *t)
++{
++	struct mm_struct *mm = t->mm;
++	struct rq *rq;
++
++	if (!mm)
++		return;
++
++	preempt_disable();
++	rq = this_rq();
++	guard(rq_lock_irqsave)(rq);
++	preempt_enable_no_resched();	/* holding spinlock */
++	WRITE_ONCE(t->mm_cid_active, 0);
++	/*
++	 * Store t->mm_cid_active before loading per-mm/cpu cid.
++	 * Matches barrier in sched_mm_cid_remote_clear_old().
++	 */
++	smp_mb();
++	mm_cid_put(mm);
++	t->last_mm_cid = t->mm_cid = -1;
++}
++
++void sched_mm_cid_before_execve(struct task_struct *t)
++{
++	struct mm_struct *mm = t->mm;
++	struct rq *rq;
++
++	if (!mm)
++		return;
++
++	preempt_disable();
++	rq = this_rq();
++	guard(rq_lock_irqsave)(rq);
++	preempt_enable_no_resched();	/* holding spinlock */
++	WRITE_ONCE(t->mm_cid_active, 0);
++	/*
++	 * Store t->mm_cid_active before loading per-mm/cpu cid.
++	 * Matches barrier in sched_mm_cid_remote_clear_old().
++	 */
++	smp_mb();
++	mm_cid_put(mm);
++	t->last_mm_cid = t->mm_cid = -1;
++}
++
++void sched_mm_cid_after_execve(struct task_struct *t)
++{
++	struct mm_struct *mm = t->mm;
++	struct rq *rq;
++
++	if (!mm)
++		return;
++
++	preempt_disable();
++	rq = this_rq();
++	scoped_guard (rq_lock_irqsave, rq) {
++		preempt_enable_no_resched();	/* holding spinlock */
++		WRITE_ONCE(t->mm_cid_active, 1);
++		/*
++		 * Store t->mm_cid_active before loading per-mm/cpu cid.
++		 * Matches barrier in sched_mm_cid_remote_clear_old().
++		 */
++		smp_mb();
++		t->last_mm_cid = t->mm_cid = mm_cid_get(rq, mm);
++	}
++	rseq_set_notify_resume(t);
++}
++
++void sched_mm_cid_fork(struct task_struct *t)
++{
++	WARN_ON_ONCE(!t->mm || t->mm_cid != -1);
++	t->mm_cid_active = 1;
++}
++#endif
+diff --git a/kernel/sched/alt_debug.c b/kernel/sched/alt_debug.c
+new file mode 100644
+index 000000000000..1dbd7eb6a434
+--- /dev/null
++++ b/kernel/sched/alt_debug.c
+@@ -0,0 +1,32 @@
++/*
++ * kernel/sched/alt_debug.c
++ *
++ * Print the alt scheduler debugging details
++ *
++ * Author: Alfred Chen
++ * Date  : 2020
++ */
++#include "sched.h"
++#include "linux/sched/debug.h"
++
++/*
++ * This allows printing both to /proc/sched_debug and
++ * to the console
++ */
++#define SEQ_printf(m, x...)			\
++ do {						\
++	if (m)					\
++		seq_printf(m, x);		\
++	else					\
++		pr_cont(x);			\
++ } while (0)
++
++void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
++			  struct seq_file *m)
++{
++	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
++						get_nr_threads(p));
++}
++
++void proc_sched_set_task(struct task_struct *p)
++{}
+diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
+new file mode 100644
+index 000000000000..3cf48365fbae
+--- /dev/null
++++ b/kernel/sched/alt_sched.h
+@@ -0,0 +1,980 @@
++#ifndef ALT_SCHED_H
++#define ALT_SCHED_H
++
++#include <linux/context_tracking.h>
++#include <linux/profile.h>
++#include <linux/stop_machine.h>
++#include <linux/syscalls.h>
++#include <linux/tick.h>
++
++#include <trace/events/power.h>
++#include <trace/events/sched.h>
++
++#include "../workqueue_internal.h"
++
++#include "cpupri.h"
++
++#ifdef CONFIG_CGROUP_SCHED
++/* task group related information */
++struct task_group {
++	struct cgroup_subsys_state css;
++
++	struct rcu_head rcu;
++	struct list_head list;
++
++	struct task_group *parent;
++	struct list_head siblings;
++	struct list_head children;
++#ifdef CONFIG_FAIR_GROUP_SCHED
++	unsigned long		shares;
++#endif
++};
++
++extern struct task_group *sched_create_group(struct task_group *parent);
++extern void sched_online_group(struct task_group *tg,
++			       struct task_group *parent);
++extern void sched_destroy_group(struct task_group *tg);
++extern void sched_release_group(struct task_group *tg);
++#endif /* CONFIG_CGROUP_SCHED */
++
++#define MIN_SCHED_NORMAL_PRIO	(32)
++/*
++ * levels: RT(0-24), reserved(25-31), NORMAL(32-63), cpu idle task(64)
++ *
++ * -- BMQ --
++ * NORMAL: (lower boost range 12, NICE_WIDTH 40, higher boost range 12) / 2
++ * -- PDS --
++ * NORMAL: SCHED_EDGE_DELTA + ((NICE_WIDTH 40) / 2)
++ */
++#define SCHED_LEVELS		(64 + 1)
++
++#define IDLE_TASK_SCHED_PRIO	(SCHED_LEVELS - 1)
++
++#ifdef CONFIG_SCHED_DEBUG
++# define SCHED_WARN_ON(x)	WARN_ONCE(x, #x)
++extern void resched_latency_warn(int cpu, u64 latency);
++#else
++# define SCHED_WARN_ON(x)	({ (void)(x), 0; })
++static inline void resched_latency_warn(int cpu, u64 latency) {}
++#endif
++
++/*
++ * Increase resolution of nice-level calculations for 64-bit architectures.
++ * The extra resolution improves shares distribution and load balancing of
++ * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
++ * hierarchies, especially on larger systems. This is not a user-visible change
++ * and does not change the user-interface for setting shares/weights.
++ *
++ * We increase resolution only if we have enough bits to allow this increased
++ * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
++ * are pretty high and the returns do not justify the increased costs.
++ *
++ * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to
++ * increase coverage and consistency always enable it on 64-bit platforms.
++ */
++#ifdef CONFIG_64BIT
++# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
++# define scale_load(w)		((w) << SCHED_FIXEDPOINT_SHIFT)
++# define scale_load_down(w) \
++({ \
++	unsigned long __w = (w); \
++	if (__w) \
++		__w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \
++	__w; \
++})
++#else
++# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT)
++# define scale_load(w)		(w)
++# define scale_load_down(w)	(w)
++#endif
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++#define ROOT_TASK_GROUP_LOAD	NICE_0_LOAD
++
++/*
++ * A weight of 0 or 1 can cause arithmetics problems.
++ * A weight of a cfs_rq is the sum of weights of which entities
++ * are queued on this cfs_rq, so a weight of a entity should not be
++ * too large, so as the shares value of a task group.
++ * (The default weight is 1024 - so there's no practical
++ *  limitation from this.)
++ */
++#define MIN_SHARES		(1UL <<  1)
++#define MAX_SHARES		(1UL << 18)
++#endif
++
++/*
++ * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
++ */
++#ifdef CONFIG_SCHED_DEBUG
++# define const_debug __read_mostly
++#else
++# define const_debug const
++#endif
++
++/* task_struct::on_rq states: */
++#define TASK_ON_RQ_QUEUED	1
++#define TASK_ON_RQ_MIGRATING	2
++
++static inline int task_on_rq_queued(struct task_struct *p)
++{
++	return p->on_rq == TASK_ON_RQ_QUEUED;
++}
++
++static inline int task_on_rq_migrating(struct task_struct *p)
++{
++	return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
++}
++
++/* Wake flags. The first three directly map to some SD flag value */
++#define WF_EXEC         0x02 /* Wakeup after exec; maps to SD_BALANCE_EXEC */
++#define WF_FORK         0x04 /* Wakeup after fork; maps to SD_BALANCE_FORK */
++#define WF_TTWU         0x08 /* Wakeup;            maps to SD_BALANCE_WAKE */
++
++#define WF_SYNC         0x10 /* Waker goes to sleep after wakeup */
++#define WF_MIGRATED     0x20 /* Internal use, task got migrated */
++#define WF_CURRENT_CPU  0x40 /* Prefer to move the wakee to the current CPU. */
++
++#ifdef CONFIG_SMP
++static_assert(WF_EXEC == SD_BALANCE_EXEC);
++static_assert(WF_FORK == SD_BALANCE_FORK);
++static_assert(WF_TTWU == SD_BALANCE_WAKE);
++#endif
++
++#define SCHED_QUEUE_BITS	(SCHED_LEVELS - 1)
++
++struct sched_queue {
++	DECLARE_BITMAP(bitmap, SCHED_QUEUE_BITS);
++	struct list_head heads[SCHED_LEVELS];
++};
++
++struct rq;
++struct cpuidle_state;
++
++struct balance_callback {
++	struct balance_callback *next;
++	void (*func)(struct rq *rq);
++};
++
++struct balance_arg {
++	struct task_struct	*task;
++	int			active;
++	cpumask_t		*cpumask;
++};
++
++/*
++ * This is the main, per-CPU runqueue data structure.
++ * This data should only be modified by the local cpu.
++ */
++struct rq {
++	/* runqueue lock: */
++	raw_spinlock_t			lock;
++
++	struct task_struct __rcu	*curr;
++	struct task_struct		*idle;
++	struct task_struct		*stop;
++	struct mm_struct		*prev_mm;
++
++	struct sched_queue		queue		____cacheline_aligned;
++
++	int				prio;
++#ifdef CONFIG_SCHED_PDS
++	int				prio_idx;
++	u64				time_edge;
++#endif
++
++	/* switch count */
++	u64 nr_switches;
++
++	atomic_t nr_iowait;
++
++#ifdef CONFIG_SCHED_DEBUG
++	u64 last_seen_need_resched_ns;
++	int ticks_without_resched;
++#endif
++
++#ifdef CONFIG_MEMBARRIER
++	int membarrier_state;
++#endif
++
++#ifdef CONFIG_SMP
++	int cpu;		/* cpu of this runqueue */
++	bool online;
++
++	unsigned int		ttwu_pending;
++	unsigned char		nohz_idle_balance;
++	unsigned char		idle_balance;
++
++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
++	struct sched_avg	avg_irq;
++#endif
++
++#ifdef CONFIG_SCHED_SMT
++	struct balance_arg	sg_balance_arg		____cacheline_aligned;
++#endif
++	struct cpu_stop_work	active_balance_work;
++
++	struct balance_callback	*balance_callback;
++#ifdef CONFIG_HOTPLUG_CPU
++	struct rcuwait		hotplug_wait;
++#endif
++	unsigned int		nr_pinned;
++
++#endif /* CONFIG_SMP */
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++	u64 prev_irq_time;
++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
++#ifdef CONFIG_PARAVIRT
++	u64 prev_steal_time;
++#endif /* CONFIG_PARAVIRT */
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++	u64 prev_steal_time_rq;
++#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */
++
++	/* For genenal cpu load util */
++	s32 load_history;
++	u64 load_block;
++	u64 load_stamp;
++
++	/* calc_load related fields */
++	unsigned long calc_load_update;
++	long calc_load_active;
++
++	/* Ensure that all clocks are in the same cache line */
++	u64			clock ____cacheline_aligned;
++	u64			clock_task;
++
++	unsigned int  nr_running;
++	unsigned long nr_uninterruptible;
++
++#ifdef CONFIG_SCHED_HRTICK
++#ifdef CONFIG_SMP
++	call_single_data_t hrtick_csd;
++#endif
++	struct hrtimer		hrtick_timer;
++	ktime_t			hrtick_time;
++#endif
++
++#ifdef CONFIG_SCHEDSTATS
++
++	/* latency stats */
++	struct sched_info rq_sched_info;
++	unsigned long long rq_cpu_time;
++	/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
++
++	/* sys_sched_yield() stats */
++	unsigned int yld_count;
++
++	/* schedule() stats */
++	unsigned int sched_switch;
++	unsigned int sched_count;
++	unsigned int sched_goidle;
++
++	/* try_to_wake_up() stats */
++	unsigned int ttwu_count;
++	unsigned int ttwu_local;
++#endif /* CONFIG_SCHEDSTATS */
++
++#ifdef CONFIG_CPU_IDLE
++	/* Must be inspected within a rcu lock section */
++	struct cpuidle_state *idle_state;
++#endif
++
++#ifdef CONFIG_NO_HZ_COMMON
++#ifdef CONFIG_SMP
++	call_single_data_t	nohz_csd;
++#endif
++	atomic_t		nohz_flags;
++#endif /* CONFIG_NO_HZ_COMMON */
++
++	/* Scratch cpumask to be temporarily used under rq_lock */
++	cpumask_var_t		scratch_mask;
++};
++
++extern unsigned int sysctl_sched_base_slice;
++
++extern unsigned long rq_load_util(struct rq *rq, unsigned long max);
++
++extern unsigned long calc_load_update;
++extern atomic_long_t calc_load_tasks;
++
++extern void calc_global_load_tick(struct rq *this_rq);
++extern long calc_load_fold_active(struct rq *this_rq, long adjust);
++
++DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
++#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
++#define this_rq()		this_cpu_ptr(&runqueues)
++#define task_rq(p)		cpu_rq(task_cpu(p))
++#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
++#define raw_rq()		raw_cpu_ptr(&runqueues)
++
++#ifdef CONFIG_SMP
++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
++void register_sched_domain_sysctl(void);
++void unregister_sched_domain_sysctl(void);
++#else
++static inline void register_sched_domain_sysctl(void)
++{
++}
++static inline void unregister_sched_domain_sysctl(void)
++{
++}
++#endif
++
++extern bool sched_smp_initialized;
++
++enum {
++#ifdef CONFIG_SCHED_SMT
++	SMT_LEVEL_SPACE_HOLDER,
++#endif
++	COREGROUP_LEVEL_SPACE_HOLDER,
++	CORE_LEVEL_SPACE_HOLDER,
++	OTHER_LEVEL_SPACE_HOLDER,
++	NR_CPU_AFFINITY_LEVELS
++};
++
++DECLARE_PER_CPU_ALIGNED(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
++
++static inline int
++__best_mask_cpu(const cpumask_t *cpumask, const cpumask_t *mask)
++{
++	int cpu;
++
++	while ((cpu = cpumask_any_and(cpumask, mask)) >= nr_cpu_ids)
++		mask++;
++
++	return cpu;
++}
++
++static inline int best_mask_cpu(int cpu, const cpumask_t *mask)
++{
++	return __best_mask_cpu(mask, per_cpu(sched_cpu_topo_masks, cpu));
++}
++
++extern void flush_smp_call_function_queue(void);
++
++#else  /* !CONFIG_SMP */
++static inline void flush_smp_call_function_queue(void) { }
++#endif
++
++#ifndef arch_scale_freq_tick
++static __always_inline
++void arch_scale_freq_tick(void)
++{
++}
++#endif
++
++#ifndef arch_scale_freq_capacity
++static __always_inline
++unsigned long arch_scale_freq_capacity(int cpu)
++{
++	return SCHED_CAPACITY_SCALE;
++}
++#endif
++
++static inline u64 __rq_clock_broken(struct rq *rq)
++{
++	return READ_ONCE(rq->clock);
++}
++
++static inline u64 rq_clock(struct rq *rq)
++{
++	/*
++	 * Relax lockdep_assert_held() checking as in VRQ, call to
++	 * sched_info_xxxx() may not held rq->lock
++	 * lockdep_assert_held(&rq->lock);
++	 */
++	return rq->clock;
++}
++
++static inline u64 rq_clock_task(struct rq *rq)
++{
++	/*
++	 * Relax lockdep_assert_held() checking as in VRQ, call to
++	 * sched_info_xxxx() may not held rq->lock
++	 * lockdep_assert_held(&rq->lock);
++	 */
++	return rq->clock_task;
++}
++
++/*
++ * {de,en}queue flags:
++ *
++ * DEQUEUE_SLEEP  - task is no longer runnable
++ * ENQUEUE_WAKEUP - task just became runnable
++ *
++ */
++
++#define DEQUEUE_SLEEP		0x01
++
++#define ENQUEUE_WAKEUP		0x01
++
++
++/*
++ * Below are scheduler API which using in other kernel code
++ * It use the dummy rq_flags
++ * ToDo : BMQ need to support these APIs for compatibility with mainline
++ * scheduler code.
++ */
++struct rq_flags {
++	unsigned long flags;
++};
++
++struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(rq->lock);
++
++struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(p->pi_lock)
++	__acquires(rq->lock);
++
++static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock(&rq->lock);
++}
++
++static inline void
++task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
++	__releases(rq->lock)
++	__releases(p->pi_lock)
++{
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
++}
++
++static inline void
++rq_lock(struct rq *rq, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	raw_spin_lock(&rq->lock);
++}
++
++static inline void
++rq_unlock(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock(&rq->lock);
++}
++
++static inline void
++rq_lock_irq(struct rq *rq, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	raw_spin_lock_irq(&rq->lock);
++}
++
++static inline void
++rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock_irq(&rq->lock);
++}
++
++static inline struct rq *
++this_rq_lock_irq(struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	struct rq *rq;
++
++	local_irq_disable();
++	rq = this_rq();
++	raw_spin_lock(&rq->lock);
++
++	return rq;
++}
++
++static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
++{
++	return &rq->lock;
++}
++
++static inline raw_spinlock_t *rq_lockp(struct rq *rq)
++{
++	return __rq_lockp(rq);
++}
++
++static inline void lockdep_assert_rq_held(struct rq *rq)
++{
++	lockdep_assert_held(__rq_lockp(rq));
++}
++
++extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass);
++extern void raw_spin_rq_unlock(struct rq *rq);
++
++static inline void raw_spin_rq_lock(struct rq *rq)
++{
++	raw_spin_rq_lock_nested(rq, 0);
++}
++
++static inline void raw_spin_rq_lock_irq(struct rq *rq)
++{
++	local_irq_disable();
++	raw_spin_rq_lock(rq);
++}
++
++static inline void raw_spin_rq_unlock_irq(struct rq *rq)
++{
++	raw_spin_rq_unlock(rq);
++	local_irq_enable();
++}
++
++static inline int task_current(struct rq *rq, struct task_struct *p)
++{
++	return rq->curr == p;
++}
++
++static inline bool task_on_cpu(struct task_struct *p)
++{
++	return p->on_cpu;
++}
++
++extern int task_running_nice(struct task_struct *p);
++
++extern struct static_key_false sched_schedstats;
++
++#ifdef CONFIG_CPU_IDLE
++static inline void idle_set_state(struct rq *rq,
++				  struct cpuidle_state *idle_state)
++{
++	rq->idle_state = idle_state;
++}
++
++static inline struct cpuidle_state *idle_get_state(struct rq *rq)
++{
++	WARN_ON(!rcu_read_lock_held());
++	return rq->idle_state;
++}
++#else
++static inline void idle_set_state(struct rq *rq,
++				  struct cpuidle_state *idle_state)
++{
++}
++
++static inline struct cpuidle_state *idle_get_state(struct rq *rq)
++{
++	return NULL;
++}
++#endif
++
++static inline int cpu_of(const struct rq *rq)
++{
++#ifdef CONFIG_SMP
++	return rq->cpu;
++#else
++	return 0;
++#endif
++}
++
++extern void resched_cpu(int cpu);
++
++#include "stats.h"
++
++#ifdef CONFIG_NO_HZ_COMMON
++#define NOHZ_BALANCE_KICK_BIT	0
++#define NOHZ_STATS_KICK_BIT	1
++
++#define NOHZ_BALANCE_KICK	BIT(NOHZ_BALANCE_KICK_BIT)
++#define NOHZ_STATS_KICK		BIT(NOHZ_STATS_KICK_BIT)
++
++#define NOHZ_KICK_MASK	(NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
++
++#define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
++
++/* TODO: needed?
++extern void nohz_balance_exit_idle(struct rq *rq);
++#else
++static inline void nohz_balance_exit_idle(struct rq *rq) { }
++*/
++#endif
++
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++struct irqtime {
++	u64			total;
++	u64			tick_delta;
++	u64			irq_start_time;
++	struct u64_stats_sync	sync;
++};
++
++DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
++
++/*
++ * Returns the irqtime minus the softirq time computed by ksoftirqd.
++ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
++ * and never move forward.
++ */
++static inline u64 irq_time_read(int cpu)
++{
++	struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
++	unsigned int seq;
++	u64 total;
++
++	do {
++		seq = __u64_stats_fetch_begin(&irqtime->sync);
++		total = irqtime->total;
++	} while (__u64_stats_fetch_retry(&irqtime->sync, seq));
++
++	return total;
++}
++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
++
++#ifdef CONFIG_CPU_FREQ
++DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
++#endif /* CONFIG_CPU_FREQ */
++
++#ifdef CONFIG_NO_HZ_FULL
++extern int __init sched_tick_offload_init(void);
++#else
++static inline int sched_tick_offload_init(void) { return 0; }
++#endif
++
++#ifdef arch_scale_freq_capacity
++#ifndef arch_scale_freq_invariant
++#define arch_scale_freq_invariant()	(true)
++#endif
++#else /* arch_scale_freq_capacity */
++#define arch_scale_freq_invariant()	(false)
++#endif
++
++#ifdef CONFIG_SMP
++unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual,
++				 unsigned long min,
++				 unsigned long max);
++#endif /* CONFIG_SMP */
++
++extern void schedule_idle(void);
++
++#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
++
++/*
++ * !! For sched_setattr_nocheck() (kernel) only !!
++ *
++ * This is actually gross. :(
++ *
++ * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE
++ * tasks, but still be able to sleep. We need this on platforms that cannot
++ * atomically change clock frequency. Remove once fast switching will be
++ * available on such platforms.
++ *
++ * SUGOV stands for SchedUtil GOVernor.
++ */
++#define SCHED_FLAG_SUGOV	0x10000000
++
++#ifdef CONFIG_MEMBARRIER
++/*
++ * The scheduler provides memory barriers required by membarrier between:
++ * - prior user-space memory accesses and store to rq->membarrier_state,
++ * - store to rq->membarrier_state and following user-space memory accesses.
++ * In the same way it provides those guarantees around store to rq->curr.
++ */
++static inline void membarrier_switch_mm(struct rq *rq,
++					struct mm_struct *prev_mm,
++					struct mm_struct *next_mm)
++{
++	int membarrier_state;
++
++	if (prev_mm == next_mm)
++		return;
++
++	membarrier_state = atomic_read(&next_mm->membarrier_state);
++	if (READ_ONCE(rq->membarrier_state) == membarrier_state)
++		return;
++
++	WRITE_ONCE(rq->membarrier_state, membarrier_state);
++}
++#else
++static inline void membarrier_switch_mm(struct rq *rq,
++					struct mm_struct *prev_mm,
++					struct mm_struct *next_mm)
++{
++}
++#endif
++
++#ifdef CONFIG_NUMA
++extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
++#else
++static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
++{
++	return nr_cpu_ids;
++}
++#endif
++
++extern void swake_up_all_locked(struct swait_queue_head *q);
++extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
++
++extern int try_to_wake_up(struct task_struct *tsk, unsigned int state, int wake_flags);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++extern int preempt_dynamic_mode;
++extern int sched_dynamic_mode(const char *str);
++extern void sched_dynamic_update(int mode);
++#endif
++
++static inline void nohz_run_idle_balance(int cpu) { }
++
++static inline
++unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
++				  struct task_struct *p)
++{
++	return util;
++}
++
++static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; }
++
++#ifdef CONFIG_SCHED_MM_CID
++
++#define SCHED_MM_CID_PERIOD_NS	(100ULL * 1000000)	/* 100ms */
++#define MM_CID_SCAN_DELAY	100			/* 100ms */
++
++extern raw_spinlock_t cid_lock;
++extern int use_cid_lock;
++
++extern void sched_mm_cid_migrate_from(struct task_struct *t);
++extern void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t, int src_cpu);
++extern void task_tick_mm_cid(struct rq *rq, struct task_struct *curr);
++extern void init_sched_mm_cid(struct task_struct *t);
++
++static inline void __mm_cid_put(struct mm_struct *mm, int cid)
++{
++	if (cid < 0)
++		return;
++	cpumask_clear_cpu(cid, mm_cidmask(mm));
++}
++
++/*
++ * The per-mm/cpu cid can have the MM_CID_LAZY_PUT flag set or transition to
++ * the MM_CID_UNSET state without holding the rq lock, but the rq lock needs to
++ * be held to transition to other states.
++ *
++ * State transitions synchronized with cmpxchg or try_cmpxchg need to be
++ * consistent across cpus, which prevents use of this_cpu_cmpxchg.
++ */
++static inline void mm_cid_put_lazy(struct task_struct *t)
++{
++	struct mm_struct *mm = t->mm;
++	struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid;
++	int cid;
++
++	lockdep_assert_irqs_disabled();
++	cid = __this_cpu_read(pcpu_cid->cid);
++	if (!mm_cid_is_lazy_put(cid) ||
++	    !try_cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, &cid, MM_CID_UNSET))
++		return;
++	__mm_cid_put(mm, mm_cid_clear_lazy_put(cid));
++}
++
++static inline int mm_cid_pcpu_unset(struct mm_struct *mm)
++{
++	struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid;
++	int cid, res;
++
++	lockdep_assert_irqs_disabled();
++	cid = __this_cpu_read(pcpu_cid->cid);
++	for (;;) {
++		if (mm_cid_is_unset(cid))
++			return MM_CID_UNSET;
++		/*
++		 * Attempt transition from valid or lazy-put to unset.
++		 */
++		res = cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, cid, MM_CID_UNSET);
++		if (res == cid)
++			break;
++		cid = res;
++	}
++	return cid;
++}
++
++static inline void mm_cid_put(struct mm_struct *mm)
++{
++	int cid;
++
++	lockdep_assert_irqs_disabled();
++	cid = mm_cid_pcpu_unset(mm);
++	if (cid == MM_CID_UNSET)
++		return;
++	__mm_cid_put(mm, mm_cid_clear_lazy_put(cid));
++}
++
++static inline int __mm_cid_try_get(struct mm_struct *mm)
++{
++	struct cpumask *cpumask;
++	int cid;
++
++	cpumask = mm_cidmask(mm);
++	/*
++	 * Retry finding first zero bit if the mask is temporarily
++	 * filled. This only happens during concurrent remote-clear
++	 * which owns a cid without holding a rq lock.
++	 */
++	for (;;) {
++		cid = cpumask_first_zero(cpumask);
++		if (cid < nr_cpu_ids)
++			break;
++		cpu_relax();
++	}
++	if (cpumask_test_and_set_cpu(cid, cpumask))
++		return -1;
++	return cid;
++}
++
++/*
++ * Save a snapshot of the current runqueue time of this cpu
++ * with the per-cpu cid value, allowing to estimate how recently it was used.
++ */
++static inline void mm_cid_snapshot_time(struct rq *rq, struct mm_struct *mm)
++{
++	struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(rq));
++
++	lockdep_assert_rq_held(rq);
++	WRITE_ONCE(pcpu_cid->time, rq->clock);
++}
++
++static inline int __mm_cid_get(struct rq *rq, struct mm_struct *mm)
++{
++	int cid;
++
++	/*
++	 * All allocations (even those using the cid_lock) are lock-free. If
++	 * use_cid_lock is set, hold the cid_lock to perform cid allocation to
++	 * guarantee forward progress.
++	 */
++	if (!READ_ONCE(use_cid_lock)) {
++		cid = __mm_cid_try_get(mm);
++		if (cid >= 0)
++			goto end;
++		raw_spin_lock(&cid_lock);
++	} else {
++		raw_spin_lock(&cid_lock);
++		cid = __mm_cid_try_get(mm);
++		if (cid >= 0)
++			goto unlock;
++	}
++
++	/*
++	 * cid concurrently allocated. Retry while forcing following
++	 * allocations to use the cid_lock to ensure forward progress.
++	 */
++	WRITE_ONCE(use_cid_lock, 1);
++	/*
++	 * Set use_cid_lock before allocation. Only care about program order
++	 * because this is only required for forward progress.
++	 */
++	barrier();
++	/*
++	 * Retry until it succeeds. It is guaranteed to eventually succeed once
++	 * all newcoming allocations observe the use_cid_lock flag set.
++	 */
++	do {
++		cid = __mm_cid_try_get(mm);
++		cpu_relax();
++	} while (cid < 0);
++	/*
++	 * Allocate before clearing use_cid_lock. Only care about
++	 * program order because this is for forward progress.
++	 */
++	barrier();
++	WRITE_ONCE(use_cid_lock, 0);
++unlock:
++	raw_spin_unlock(&cid_lock);
++end:
++	mm_cid_snapshot_time(rq, mm);
++	return cid;
++}
++
++static inline int mm_cid_get(struct rq *rq, struct mm_struct *mm)
++{
++	struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid;
++	struct cpumask *cpumask;
++	int cid;
++
++	lockdep_assert_rq_held(rq);
++	cpumask = mm_cidmask(mm);
++	cid = __this_cpu_read(pcpu_cid->cid);
++	if (mm_cid_is_valid(cid)) {
++		mm_cid_snapshot_time(rq, mm);
++		return cid;
++	}
++	if (mm_cid_is_lazy_put(cid)) {
++		if (try_cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, &cid, MM_CID_UNSET))
++			__mm_cid_put(mm, mm_cid_clear_lazy_put(cid));
++	}
++	cid = __mm_cid_get(rq, mm);
++	__this_cpu_write(pcpu_cid->cid, cid);
++	return cid;
++}
++
++static inline void switch_mm_cid(struct rq *rq,
++				 struct task_struct *prev,
++				 struct task_struct *next)
++{
++	/*
++	 * Provide a memory barrier between rq->curr store and load of
++	 * {prev,next}->mm->pcpu_cid[cpu] on rq->curr->mm transition.
++	 *
++	 * Should be adapted if context_switch() is modified.
++	 */
++	if (!next->mm) {                                // to kernel
++		/*
++		 * user -> kernel transition does not guarantee a barrier, but
++		 * we can use the fact that it performs an atomic operation in
++		 * mmgrab().
++		 */
++		if (prev->mm)                           // from user
++			smp_mb__after_mmgrab();
++		/*
++		 * kernel -> kernel transition does not change rq->curr->mm
++		 * state. It stays NULL.
++		 */
++	} else {                                        // to user
++		/*
++		 * kernel -> user transition does not provide a barrier
++		 * between rq->curr store and load of {prev,next}->mm->pcpu_cid[cpu].
++		 * Provide it here.
++		 */
++		if (!prev->mm)                          // from kernel
++			smp_mb();
++		/*
++		 * user -> user transition guarantees a memory barrier through
++		 * switch_mm() when current->mm changes. If current->mm is
++		 * unchanged, no barrier is needed.
++		 */
++	}
++	if (prev->mm_cid_active) {
++		mm_cid_snapshot_time(rq, prev->mm);
++		mm_cid_put_lazy(prev);
++		prev->mm_cid = -1;
++	}
++	if (next->mm_cid_active)
++		next->last_mm_cid = next->mm_cid = mm_cid_get(rq, next->mm);
++}
++
++#else
++static inline void switch_mm_cid(struct rq *rq, struct task_struct *prev, struct task_struct *next) { }
++static inline void sched_mm_cid_migrate_from(struct task_struct *t) { }
++static inline void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t, int src_cpu) { }
++static inline void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) { }
++static inline void init_sched_mm_cid(struct task_struct *t) { }
++#endif
++
++#ifdef CONFIG_SMP
++extern struct balance_callback balance_push_callback;
++
++static inline void
++__queue_balance_callback(struct rq *rq,
++			 struct balance_callback *head)
++{
++	lockdep_assert_rq_held(rq);
++
++	/*
++	 * Don't (re)queue an already queued item; nor queue anything when
++	 * balance_push() is active, see the comment with
++	 * balance_push_callback.
++	 */
++	if (unlikely(head->next || rq->balance_callback == &balance_push_callback))
++		return;
++
++	head->next = rq->balance_callback;
++	rq->balance_callback = head;
++}
++#endif /* CONFIG_SMP */
++
++#endif /* ALT_SCHED_H */
+diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h
+new file mode 100644
+index 000000000000..bdc34ad3cc8c
+--- /dev/null
++++ b/kernel/sched/bmq.h
+@@ -0,0 +1,98 @@
++#define ALT_SCHED_NAME "BMQ"
++
++/*
++ * BMQ only routines
++ */
++static inline void boost_task(struct task_struct *p, int n)
++{
++	int limit;
++
++	switch (p->policy) {
++	case SCHED_NORMAL:
++		limit = -MAX_PRIORITY_ADJ;
++		break;
++	case SCHED_BATCH:
++		limit = 0;
++		break;
++	default:
++		return;
++	}
++
++	p->boost_prio = max(limit, p->boost_prio - n);
++}
++
++static inline void deboost_task(struct task_struct *p)
++{
++	if (p->boost_prio < MAX_PRIORITY_ADJ)
++		p->boost_prio++;
++}
++
++/*
++ * Common interfaces
++ */
++static inline void sched_timeslice_imp(const int timeslice_ms) {}
++
++/* This API is used in task_prio(), return value readed by human users */
++static inline int
++task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
++{
++	return p->prio + p->boost_prio - MIN_NORMAL_PRIO;
++}
++
++static inline int task_sched_prio(const struct task_struct *p)
++{
++	return (p->prio < MIN_NORMAL_PRIO)? (p->prio >> 2) :
++		MIN_SCHED_NORMAL_PRIO + (p->prio + p->boost_prio - MIN_NORMAL_PRIO) / 2;
++}
++
++#define TASK_SCHED_PRIO_IDX(p, rq, idx, prio)	\
++	prio = task_sched_prio(p);		\
++	idx = prio;
++
++static inline int sched_prio2idx(int prio, struct rq *rq)
++{
++	return prio;
++}
++
++static inline int sched_idx2prio(int idx, struct rq *rq)
++{
++	return idx;
++}
++
++static inline int sched_rq_prio_idx(struct rq *rq)
++{
++	return rq->prio;
++}
++
++inline int task_running_nice(struct task_struct *p)
++{
++	return (p->prio + p->boost_prio > DEFAULT_PRIO);
++}
++
++static inline void sched_update_rq_clock(struct rq *rq) {}
++
++static inline void sched_task_renew(struct task_struct *p, const struct rq *rq)
++{
++	deboost_task(p);
++}
++
++static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq) {}
++static void sched_task_fork(struct task_struct *p, struct rq *rq) {}
++
++static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
++{
++	p->boost_prio = MAX_PRIORITY_ADJ;
++}
++
++static inline void sched_task_ttwu(struct task_struct *p)
++{
++	s64 delta = this_rq()->clock_task > p->last_ran;
++
++	if (likely(delta > 0))
++		boost_task(p, delta  >> 22);
++}
++
++static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq)
++{
++	boost_task(p, 1);
++}
+diff --git a/kernel/sched/build_policy.c b/kernel/sched/build_policy.c
+index d9dc9ab3773f..71a25540d65e 100644
+--- a/kernel/sched/build_policy.c
++++ b/kernel/sched/build_policy.c
+@@ -42,13 +42,19 @@
+ 
+ #include "idle.c"
+ 
++#ifndef CONFIG_SCHED_ALT
+ #include "rt.c"
++#endif
+ 
+ #ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ # include "cpudeadline.c"
++#endif
+ # include "pelt.c"
+ #endif
+ 
+ #include "cputime.c"
+-#include "deadline.c"
+ 
++#ifndef CONFIG_SCHED_ALT
++#include "deadline.c"
++#endif
+diff --git a/kernel/sched/build_utility.c b/kernel/sched/build_utility.c
+index 80a3df49ab47..bc17d5a6fc41 100644
+--- a/kernel/sched/build_utility.c
++++ b/kernel/sched/build_utility.c
+@@ -84,7 +84,9 @@
+ 
+ #ifdef CONFIG_SMP
+ # include "cpupri.c"
++#ifndef CONFIG_SCHED_ALT
+ # include "stop_task.c"
++#endif
+ # include "topology.c"
+ #endif
+ 
+diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+index eece6244f9d2..3075127f9e95 100644
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -197,12 +197,17 @@ unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual,
+ 
+ static void sugov_get_util(struct sugov_cpu *sg_cpu, unsigned long boost)
+ {
++#ifndef CONFIG_SCHED_ALT
+ 	unsigned long min, max, util = cpu_util_cfs_boost(sg_cpu->cpu);
+ 
+ 	util = effective_cpu_util(sg_cpu->cpu, util, &min, &max);
+ 	util = max(util, boost);
+ 	sg_cpu->bw_min = min;
+ 	sg_cpu->util = sugov_effective_cpu_perf(sg_cpu->cpu, util, min, max);
++#else /* CONFIG_SCHED_ALT */
++	sg_cpu->bw_min = 0;
++	sg_cpu->util = rq_load_util(cpu_rq(sg_cpu->cpu), arch_scale_cpu_capacity(sg_cpu->cpu));
++#endif /* CONFIG_SCHED_ALT */
+ }
+ 
+ /**
+@@ -343,8 +348,10 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
+  */
+ static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)
+ {
++#ifndef CONFIG_SCHED_ALT
+ 	if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_min)
+ 		sg_cpu->sg_policy->limits_changed = true;
++#endif
+ }
+ 
+ static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
+@@ -676,6 +683,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
+ 	}
+ 
+ 	ret = sched_setattr_nocheck(thread, &attr);
++
+ 	if (ret) {
+ 		kthread_stop(thread);
+ 		pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
+diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
+index af7952f12e6c..6461cbbb734d 100644
+--- a/kernel/sched/cputime.c
++++ b/kernel/sched/cputime.c
+@@ -126,7 +126,7 @@ void account_user_time(struct task_struct *p, u64 cputime)
+ 	p->utime += cputime;
+ 	account_group_user_time(p, cputime);
+ 
+-	index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
++	index = task_running_nice(p) ? CPUTIME_NICE : CPUTIME_USER;
+ 
+ 	/* Add user time to cpustat. */
+ 	task_group_account_field(p, index, cputime);
+@@ -150,7 +150,7 @@ void account_guest_time(struct task_struct *p, u64 cputime)
+ 	p->gtime += cputime;
+ 
+ 	/* Add guest time to cpustat. */
+-	if (task_nice(p) > 0) {
++	if (task_running_nice(p)) {
+ 		task_group_account_field(p, CPUTIME_NICE, cputime);
+ 		cpustat[CPUTIME_GUEST_NICE] += cputime;
+ 	} else {
+@@ -288,7 +288,7 @@ static inline u64 account_other_time(u64 max)
+ #ifdef CONFIG_64BIT
+ static inline u64 read_sum_exec_runtime(struct task_struct *t)
+ {
+-	return t->se.sum_exec_runtime;
++	return tsk_seruntime(t);
+ }
+ #else
+ static u64 read_sum_exec_runtime(struct task_struct *t)
+@@ -298,7 +298,7 @@ static u64 read_sum_exec_runtime(struct task_struct *t)
+ 	struct rq *rq;
+ 
+ 	rq = task_rq_lock(t, &rf);
+-	ns = t->se.sum_exec_runtime;
++	ns = tsk_seruntime(t);
+ 	task_rq_unlock(rq, t, &rf);
+ 
+ 	return ns;
+@@ -630,7 +630,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
+ void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
+ {
+ 	struct task_cputime cputime = {
+-		.sum_exec_runtime = p->se.sum_exec_runtime,
++		.sum_exec_runtime = tsk_seruntime(p),
+ 	};
+ 
+ 	if (task_cputime(p, &cputime.utime, &cputime.stime))
+diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
+index 8d5d98a5834d..15b9a02717ae 100644
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -7,6 +7,7 @@
+  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
+  */
+ 
++#ifndef CONFIG_SCHED_ALT
+ /*
+  * This allows printing both to /sys/kernel/debug/sched/debug and
+  * to the console
+@@ -215,6 +216,7 @@ static const struct file_operations sched_scaling_fops = {
+ };
+ 
+ #endif /* SMP */
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ #ifdef CONFIG_PREEMPT_DYNAMIC
+ 
+@@ -278,6 +280,7 @@ static const struct file_operations sched_dynamic_fops = {
+ 
+ #endif /* CONFIG_PREEMPT_DYNAMIC */
+ 
++#ifndef CONFIG_SCHED_ALT
+ __read_mostly bool sched_debug_verbose;
+ 
+ #ifdef CONFIG_SMP
+@@ -332,6 +335,7 @@ static const struct file_operations sched_debug_fops = {
+ 	.llseek		= seq_lseek,
+ 	.release	= seq_release,
+ };
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ static struct dentry *debugfs_sched;
+ 
+@@ -341,14 +345,17 @@ static __init int sched_init_debug(void)
+ 
+ 	debugfs_sched = debugfs_create_dir("sched", NULL);
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
+ 	debugfs_create_file_unsafe("verbose", 0644, debugfs_sched, &sched_debug_verbose, &sched_verbose_fops);
++#endif /* !CONFIG_SCHED_ALT */
+ #ifdef CONFIG_PREEMPT_DYNAMIC
+ 	debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
+ #endif
+ 
+ 	debugfs_create_u32("base_slice_ns", 0644, debugfs_sched, &sysctl_sched_base_slice);
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
+ 	debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
+ 
+@@ -373,11 +380,13 @@ static __init int sched_init_debug(void)
+ #endif
+ 
+ 	debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ 	return 0;
+ }
+ late_initcall(sched_init_debug);
+ 
++#ifndef CONFIG_SCHED_ALT
+ #ifdef CONFIG_SMP
+ 
+ static cpumask_var_t		sd_sysctl_cpus;
+@@ -1110,6 +1119,7 @@ void proc_sched_set_task(struct task_struct *p)
+ 	memset(&p->stats, 0, sizeof(p->stats));
+ #endif
+ }
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ void resched_latency_warn(int cpu, u64 latency)
+ {
+diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
+index 6135fbe83d68..57708dc54827 100644
+--- a/kernel/sched/idle.c
++++ b/kernel/sched/idle.c
+@@ -430,6 +430,7 @@ void cpu_startup_entry(enum cpuhp_state state)
+ 		do_idle();
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ /*
+  * idle-task scheduling class.
+  */
+@@ -551,3 +552,4 @@ DEFINE_SCHED_CLASS(idle) = {
+ 	.switched_to		= switched_to_idle,
+ 	.update_curr		= update_curr_idle,
+ };
++#endif
+diff --git a/kernel/sched/pds.h b/kernel/sched/pds.h
+new file mode 100644
+index 000000000000..f9de95e0ec0e
+--- /dev/null
++++ b/kernel/sched/pds.h
+@@ -0,0 +1,134 @@
++#define ALT_SCHED_NAME "PDS"
++
++static const u64 RT_MASK = ((1ULL << MIN_SCHED_NORMAL_PRIO) - 1);
++
++#define SCHED_NORMAL_PRIO_NUM	(32)
++#define SCHED_EDGE_DELTA	(SCHED_NORMAL_PRIO_NUM - NICE_WIDTH / 2)
++
++/* PDS assume SCHED_NORMAL_PRIO_NUM is power of 2 */
++#define SCHED_NORMAL_PRIO_MOD(x)	((x) & (SCHED_NORMAL_PRIO_NUM - 1))
++
++/* default time slice 4ms -> shift 22, 2 time slice slots -> shift 23 */
++static __read_mostly int sched_timeslice_shift = 23;
++
++/*
++ * Common interfaces
++ */
++static inline int
++task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
++{
++	u64 sched_dl = max(p->deadline, rq->time_edge);
++
++#ifdef ALT_SCHED_DEBUG
++	if (WARN_ONCE(sched_dl - rq->time_edge > NORMAL_PRIO_NUM - 1,
++		      "pds: task_sched_prio_normal() delta %lld\n", sched_dl - rq->time_edge))
++		return SCHED_NORMAL_PRIO_NUM - 1;
++#endif
++
++	return sched_dl - rq->time_edge;
++}
++
++static inline int task_sched_prio(const struct task_struct *p)
++{
++	return (p->prio < MIN_NORMAL_PRIO) ? (p->prio >> 2) :
++		MIN_SCHED_NORMAL_PRIO + task_sched_prio_normal(p, task_rq(p));
++}
++
++#define TASK_SCHED_PRIO_IDX(p, rq, idx, prio)							\
++	if (p->prio < MIN_NORMAL_PRIO) {							\
++		prio = p->prio >> 2;								\
++		idx = prio;									\
++	} else {										\
++		u64 sched_dl = max(p->deadline, rq->time_edge);					\
++		prio = MIN_SCHED_NORMAL_PRIO + sched_dl - rq->time_edge;			\
++		idx = MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(sched_dl);			\
++	}
++
++static inline int sched_prio2idx(int sched_prio, struct rq *rq)
++{
++	return (IDLE_TASK_SCHED_PRIO == sched_prio || sched_prio < MIN_SCHED_NORMAL_PRIO) ?
++		sched_prio :
++		MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(sched_prio + rq->time_edge);
++}
++
++static inline int sched_idx2prio(int sched_idx, struct rq *rq)
++{
++	return (sched_idx < MIN_SCHED_NORMAL_PRIO) ?
++		sched_idx :
++		MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(sched_idx - rq->time_edge);
++}
++
++static inline int sched_rq_prio_idx(struct rq *rq)
++{
++	return rq->prio_idx;
++}
++
++int task_running_nice(struct task_struct *p)
++{
++	return (p->prio > DEFAULT_PRIO);
++}
++
++static inline void sched_update_rq_clock(struct rq *rq)
++{
++	struct list_head head;
++	u64 old = rq->time_edge;
++	u64 now = rq->clock >> sched_timeslice_shift;
++	u64 prio, delta;
++	DECLARE_BITMAP(normal, SCHED_QUEUE_BITS);
++
++	if (now == old)
++		return;
++
++	rq->time_edge = now;
++	delta = min_t(u64, SCHED_NORMAL_PRIO_NUM, now - old);
++	INIT_LIST_HEAD(&head);
++
++	prio = MIN_SCHED_NORMAL_PRIO;
++	for_each_set_bit_from(prio, rq->queue.bitmap, MIN_SCHED_NORMAL_PRIO + delta)
++		list_splice_tail_init(rq->queue.heads + MIN_SCHED_NORMAL_PRIO +
++				      SCHED_NORMAL_PRIO_MOD(prio + old), &head);
++
++	bitmap_shift_right(normal, rq->queue.bitmap, delta, SCHED_QUEUE_BITS);
++	if (!list_empty(&head)) {
++		u64 idx = MIN_SCHED_NORMAL_PRIO + SCHED_NORMAL_PRIO_MOD(now);
++
++		__list_splice(&head, rq->queue.heads + idx, rq->queue.heads[idx].next);
++		set_bit(MIN_SCHED_NORMAL_PRIO, normal);
++	}
++	bitmap_replace(rq->queue.bitmap, normal, rq->queue.bitmap,
++		       (const unsigned long *)&RT_MASK, SCHED_QUEUE_BITS);
++
++	if (rq->prio < MIN_SCHED_NORMAL_PRIO || IDLE_TASK_SCHED_PRIO == rq->prio)
++		return;
++
++	rq->prio = max_t(u64, MIN_SCHED_NORMAL_PRIO, rq->prio - delta);
++	rq->prio_idx = sched_prio2idx(rq->prio, rq);
++}
++
++static inline void sched_task_renew(struct task_struct *p, const struct rq *rq)
++{
++	if (p->prio >= MIN_NORMAL_PRIO)
++		p->deadline = rq->time_edge + SCHED_EDGE_DELTA +
++			      (p->static_prio - (MAX_PRIO - NICE_WIDTH)) / 2;
++}
++
++static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq)
++{
++	u64 max_dl = rq->time_edge + SCHED_EDGE_DELTA + NICE_WIDTH / 2 - 1;
++	if (unlikely(p->deadline > max_dl))
++		p->deadline = max_dl;
++}
++
++static void sched_task_fork(struct task_struct *p, struct rq *rq)
++{
++	sched_task_renew(p, rq);
++}
++
++static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
++{
++	p->time_slice = sysctl_sched_base_slice;
++	sched_task_renew(p, rq);
++}
++
++static inline void sched_task_ttwu(struct task_struct *p) {}
++static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq) {}
+diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
+index 63b6cf898220..9ca10ece4d3a 100644
+--- a/kernel/sched/pelt.c
++++ b/kernel/sched/pelt.c
+@@ -266,6 +266,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
+ 	WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ /*
+  * sched_entity:
+  *
+@@ -383,8 +384,9 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+ 
+ 	return 0;
+ }
++#endif
+ 
+-#ifdef CONFIG_SCHED_THERMAL_PRESSURE
++#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT)
+ /*
+  * thermal:
+  *
+diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
+index 9e1083465fbc..d35f050e8296 100644
+--- a/kernel/sched/pelt.h
++++ b/kernel/sched/pelt.h
+@@ -1,13 +1,15 @@
+ #ifdef CONFIG_SMP
+ #include "sched-pelt.h"
+ 
++#ifndef CONFIG_SCHED_ALT
+ int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
+ int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
+ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
+ int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
+ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
++#endif
+ 
+-#ifdef CONFIG_SCHED_THERMAL_PRESSURE
++#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT)
+ int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
+ 
+ static inline u64 thermal_load_avg(struct rq *rq)
+@@ -44,6 +46,7 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
+ 	return PELT_MIN_DIVIDER + avg->period_contrib;
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ static inline void cfs_se_util_change(struct sched_avg *avg)
+ {
+ 	unsigned int enqueued;
+@@ -180,9 +183,11 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
+ 	return rq_clock_pelt(rq_of(cfs_rq));
+ }
+ #endif
++#endif /* CONFIG_SCHED_ALT */
+ 
+ #else
+ 
++#ifndef CONFIG_SCHED_ALT
+ static inline int
+ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
+ {
+@@ -200,6 +205,7 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+ {
+ 	return 0;
+ }
++#endif
+ 
+ static inline int
+ update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index ae50f212775e..02c91585f82d 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -5,6 +5,10 @@
+ #ifndef _KERNEL_SCHED_SCHED_H
+ #define _KERNEL_SCHED_SCHED_H
+ 
++#ifdef CONFIG_SCHED_ALT
++#include "alt_sched.h"
++#else
++
+ #include <linux/sched/affinity.h>
+ #include <linux/sched/autogroup.h>
+ #include <linux/sched/cpufreq.h>
+@@ -3481,4 +3485,9 @@ static inline void init_sched_mm_cid(struct task_struct *t) { }
+ extern u64 avg_vruntime(struct cfs_rq *cfs_rq);
+ extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se);
+ 
++static inline int task_running_nice(struct task_struct *p)
++{
++	return (task_nice(p) > 0);
++}
++#endif /* !CONFIG_SCHED_ALT */
+ #endif /* _KERNEL_SCHED_SCHED_H */
+diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
+index 857f837f52cb..5486c63e4790 100644
+--- a/kernel/sched/stats.c
++++ b/kernel/sched/stats.c
+@@ -125,8 +125,10 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ 	} else {
+ 		struct rq *rq;
+ #ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ 		struct sched_domain *sd;
+ 		int dcount = 0;
++#endif
+ #endif
+ 		cpu = (unsigned long)(v - 2);
+ 		rq = cpu_rq(cpu);
+@@ -143,6 +145,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ 		seq_printf(seq, "\n");
+ 
+ #ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ 		/* domain-specific stats */
+ 		rcu_read_lock();
+ 		for_each_domain(cpu, sd) {
+@@ -171,6 +174,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ 			    sd->ttwu_move_balance);
+ 		}
+ 		rcu_read_unlock();
++#endif
+ #endif
+ 	}
+ 	return 0;
+diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
+index 38f3698f5e5b..b9d597394316 100644
+--- a/kernel/sched/stats.h
++++ b/kernel/sched/stats.h
+@@ -89,6 +89,7 @@ static inline void rq_sched_info_depart  (struct rq *rq, unsigned long long delt
+ 
+ #endif /* CONFIG_SCHEDSTATS */
+ 
++#ifndef CONFIG_SCHED_ALT
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+ struct sched_entity_stats {
+ 	struct sched_entity     se;
+@@ -105,6 +106,7 @@ __schedstats_from_se(struct sched_entity *se)
+ #endif
+ 	return &task_of(se)->stats;
+ }
++#endif /* CONFIG_SCHED_ALT */
+ 
+ #ifdef CONFIG_PSI
+ void psi_task_change(struct task_struct *task, int clear, int set);
+diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
+index 99ea5986038c..f42fecd40766 100644
+--- a/kernel/sched/topology.c
++++ b/kernel/sched/topology.c
+@@ -3,6 +3,7 @@
+  * Scheduler topology setup/handling methods
+  */
+ 
++#ifndef CONFIG_SCHED_ALT
+ #include <linux/bsearch.h>
+ 
+ DEFINE_MUTEX(sched_domains_mutex);
+@@ -1445,8 +1446,10 @@ static void asym_cpu_capacity_scan(void)
+  */
+ 
+ static int default_relax_domain_level = -1;
++#endif /* CONFIG_SCHED_ALT */
+ int sched_domain_level_max;
+ 
++#ifndef CONFIG_SCHED_ALT
+ static int __init setup_relax_domain_level(char *str)
+ {
+ 	if (kstrtoint(str, 0, &default_relax_domain_level))
+@@ -1681,6 +1684,7 @@ sd_init(struct sched_domain_topology_level *tl,
+ 
+ 	return sd;
+ }
++#endif /* CONFIG_SCHED_ALT */
+ 
+ /*
+  * Topology list, bottom-up.
+@@ -1717,6 +1721,7 @@ void __init set_sched_topology(struct sched_domain_topology_level *tl)
+ 	sched_domain_topology_saved = NULL;
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ #ifdef CONFIG_NUMA
+ 
+ static const struct cpumask *sd_numa_mask(int cpu)
+@@ -2794,3 +2799,28 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
+ 	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
+ 	mutex_unlock(&sched_domains_mutex);
+ }
++#else /* CONFIG_SCHED_ALT */
++DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity);
++
++void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
++			     struct sched_domain_attr *dattr_new)
++{}
++
++#ifdef CONFIG_NUMA
++int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
++{
++	return best_mask_cpu(cpu, cpus);
++}
++
++int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
++{
++	return cpumask_nth(cpu, cpus);
++}
++
++const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops)
++{
++	return ERR_PTR(-EOPNOTSUPP);
++}
++EXPORT_SYMBOL_GPL(sched_numa_hop_mask);
++#endif /* CONFIG_NUMA */
++#endif
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 81cc974913bb..dfc2ea508dd4 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -92,6 +92,10 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals);
+ 
+ /* Constants used for minimum and maximum */
+ 
++#ifdef CONFIG_SCHED_ALT
++extern int sched_yield_type;
++#endif
++
+ #ifdef CONFIG_PERF_EVENTS
+ static const int six_hundred_forty_kb = 640 * 1024;
+ #endif
+@@ -1912,6 +1916,17 @@ static struct ctl_table kern_table[] = {
+ 		.proc_handler	= proc_dointvec,
+ 	},
+ #endif
++#ifdef CONFIG_SCHED_ALT
++	{
++		.procname	= "yield_type",
++		.data		= &sched_yield_type,
++		.maxlen		= sizeof (int),
++		.mode		= 0644,
++		.proc_handler	= &proc_dointvec_minmax,
++		.extra1		= SYSCTL_ZERO,
++		.extra2		= SYSCTL_TWO,
++	},
++#endif
+ #if defined(CONFIG_S390) && defined(CONFIG_SMP)
+ 	{
+ 		.procname	= "spin_retry",
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 70625dff62ce..81193fdd096f 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -2095,8 +2095,10 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
+ 	int ret = 0;
+ 	u64 slack;
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	slack = current->timer_slack_ns;
+-	if (rt_task(current))
++	if (dl_task(current) || rt_task(current))
++#endif
+ 		slack = 0;
+ 
+ 	hrtimer_init_sleeper_on_stack(&t, clockid, mode);
+diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
+index e9c6f9d0e42c..43ee0a94abdd 100644
+--- a/kernel/time/posix-cpu-timers.c
++++ b/kernel/time/posix-cpu-timers.c
+@@ -223,7 +223,7 @@ static void task_sample_cputime(struct task_struct *p, u64 *samples)
+ 	u64 stime, utime;
+ 
+ 	task_cputime(p, &utime, &stime);
+-	store_samples(samples, stime, utime, p->se.sum_exec_runtime);
++	store_samples(samples, stime, utime, tsk_seruntime(p));
+ }
+ 
+ static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
+@@ -867,6 +867,7 @@ static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
+ 	}
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ static inline void check_dl_overrun(struct task_struct *tsk)
+ {
+ 	if (tsk->dl.dl_overrun) {
+@@ -874,6 +875,7 @@ static inline void check_dl_overrun(struct task_struct *tsk)
+ 		send_signal_locked(SIGXCPU, SEND_SIG_PRIV, tsk, PIDTYPE_TGID);
+ 	}
+ }
++#endif
+ 
+ static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
+ {
+@@ -901,8 +903,10 @@ static void check_thread_timers(struct task_struct *tsk,
+ 	u64 samples[CPUCLOCK_MAX];
+ 	unsigned long soft;
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	if (dl_task(tsk))
+ 		check_dl_overrun(tsk);
++#endif
+ 
+ 	if (expiry_cache_is_inactive(pct))
+ 		return;
+@@ -916,7 +920,7 @@ static void check_thread_timers(struct task_struct *tsk,
+ 	soft = task_rlimit(tsk, RLIMIT_RTTIME);
+ 	if (soft != RLIM_INFINITY) {
+ 		/* Task RT timeout is accounted in jiffies. RTTIME is usec */
+-		unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
++		unsigned long rttime = tsk_rttimeout(tsk) * (USEC_PER_SEC / HZ);
+ 		unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
+ 
+ 		/* At the hard limit, send SIGKILL. No further action. */
+@@ -1152,8 +1156,10 @@ static inline bool fastpath_timer_check(struct task_struct *tsk)
+ 			return true;
+ 	}
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	if (dl_task(tsk) && tsk->dl.dl_overrun)
+ 		return true;
++#endif
+ 
+ 	return false;
+ }
+diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
+index e9c5058a8efd..8c23dc364046 100644
+--- a/kernel/trace/trace_selftest.c
++++ b/kernel/trace/trace_selftest.c
+@@ -1155,10 +1155,15 @@ static int trace_wakeup_test_thread(void *data)
+ {
+ 	/* Make this a -deadline thread */
+ 	static const struct sched_attr attr = {
++#ifdef CONFIG_SCHED_ALT
++		/* No deadline on BMQ/PDS, use RR */
++		.sched_policy = SCHED_RR,
++#else
+ 		.sched_policy = SCHED_DEADLINE,
+ 		.sched_runtime = 100000ULL,
+ 		.sched_deadline = 10000000ULL,
+ 		.sched_period = 10000000ULL
++#endif
+ 	};
+ 	struct wakeup_test_data *x = data;
+ 
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index d2dbe099286b..5c2b99ab08c1 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1256,6 +1256,7 @@ static bool kick_pool(struct worker_pool *pool)
+ 
+ 	p = worker->task;
+ 
++#ifndef CONFIG_SCHED_ALT
+ #ifdef CONFIG_SMP
+ 	/*
+ 	 * Idle @worker is about to execute @work and waking up provides an
+@@ -1285,6 +1286,8 @@ static bool kick_pool(struct worker_pool *pool)
+ 		}
+ 	}
+ #endif
++#endif /* !CONFIG_SCHED_ALT */
++
+ 	wake_up_process(p);
+ 	return true;
+ }
+@@ -1413,7 +1416,11 @@ void wq_worker_running(struct task_struct *task)
+ 	 * CPU intensive auto-detection cares about how long a work item hogged
+ 	 * CPU without sleeping. Reset the starting timestamp on wakeup.
+ 	 */
++#ifdef CONFIG_SCHED_ALT
++	worker->current_at = worker->task->sched_time;
++#else
+ 	worker->current_at = worker->task->se.sum_exec_runtime;
++#endif
+ 
+ 	WRITE_ONCE(worker->sleeping, 0);
+ }
+@@ -1498,7 +1505,11 @@ void wq_worker_tick(struct task_struct *task)
+ 	 * We probably want to make this prettier in the future.
+ 	 */
+ 	if ((worker->flags & WORKER_NOT_RUNNING) || READ_ONCE(worker->sleeping) ||
++#ifdef CONFIG_SCHED_ALT
++	    worker->task->sched_time - worker->current_at <
++#else
+ 	    worker->task->se.sum_exec_runtime - worker->current_at <
++#endif
+ 	    wq_cpu_intensive_thresh_us * NSEC_PER_USEC)
+ 		return;
+ 
+@@ -3195,7 +3206,11 @@ __acquires(&pool->lock)
+ 	worker->current_func = work->func;
+ 	worker->current_pwq = pwq;
+ 	if (worker->task)
++#ifdef CONFIG_SCHED_ALT
++		worker->current_at = worker->task->sched_time;
++#else
+ 		worker->current_at = worker->task->se.sum_exec_runtime;
++#endif
+ 	work_data = *work_data_bits(work);
+ 	worker->current_color = get_work_color(work_data);
+ 

diff --git a/5021_BMQ-and-PDS-gentoo-defaults.patch b/5021_BMQ-and-PDS-gentoo-defaults.patch
new file mode 100644
index 00000000..6dc48eec
--- /dev/null
+++ b/5021_BMQ-and-PDS-gentoo-defaults.patch
@@ -0,0 +1,13 @@
+--- a/init/Kconfig	2023-02-13 08:16:09.534315265 -0500
++++ b/init/Kconfig	2023-02-13 08:17:24.130237204 -0500
+@@ -867,8 +867,9 @@ config UCLAMP_BUCKETS_COUNT
+ 	  If in doubt, use the default value.
+ 
+ menuconfig SCHED_ALT
++	depends on X86_64
+ 	bool "Alternative CPU Schedulers"
+-	default y
++	default n
+ 	help
+ 	  This feature enable alternative CPU scheduler"
+ 


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [gentoo-commits] proj/linux-patches:6.9 commit in: /
@ 2024-07-05 10:48 Mike Pagano
  0 siblings, 0 replies; 18+ messages in thread
From: Mike Pagano @ 2024-07-05 10:48 UTC (permalink / raw
  To: gentoo-commits

commit:     5b9c7d2bab733a9f743196f1268796a1de83d7e4
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jul  5 10:48:23 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jul  5 10:48:23 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5b9c7d2b

Linux patch 6.9.8

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |    4 +
 1007_linux-6.9.8.patch | 8262 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 8266 insertions(+)

diff --git a/0000_README b/0000_README
index 3b52f37f..8008738f 100644
--- a/0000_README
+++ b/0000_README
@@ -71,6 +71,10 @@ Patch:  1006_linux-6.9.7.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.9.7
 
+Patch:  1007_linux-6.9.8.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.9.8
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch
 Desc:   Enable link security restrictions by default.

diff --git a/1007_linux-6.9.8.patch b/1007_linux-6.9.8.patch
new file mode 100644
index 00000000..3e9734e4
--- /dev/null
+++ b/1007_linux-6.9.8.patch
@@ -0,0 +1,8262 @@
+diff --git a/Documentation/kbuild/modules.rst b/Documentation/kbuild/modules.rst
+index a1f3eb7a43e23..131863142cbb3 100644
+--- a/Documentation/kbuild/modules.rst
++++ b/Documentation/kbuild/modules.rst
+@@ -128,7 +128,7 @@ executed to make module versioning work.
+ 
+ 	modules_install
+ 		Install the external module(s). The default location is
+-		/lib/modules/<kernel_release>/extra/, but a prefix may
++		/lib/modules/<kernel_release>/updates/, but a prefix may
+ 		be added with INSTALL_MOD_PATH (discussed in section 5).
+ 
+ 	clean
+@@ -417,7 +417,7 @@ directory:
+ 
+ And external modules are installed in:
+ 
+-	/lib/modules/$(KERNELRELEASE)/extra/
++	/lib/modules/$(KERNELRELEASE)/updates/
+ 
+ 5.1 INSTALL_MOD_PATH
+ --------------------
+@@ -438,10 +438,10 @@ And external modules are installed in:
+ -------------------
+ 
+ 	External modules are by default installed to a directory under
+-	/lib/modules/$(KERNELRELEASE)/extra/, but you may wish to
++	/lib/modules/$(KERNELRELEASE)/updates/, but you may wish to
+ 	locate modules for a specific functionality in a separate
+ 	directory. For this purpose, use INSTALL_MOD_DIR to specify an
+-	alternative name to "extra."::
++	alternative name to "updates."::
+ 
+ 		$ make INSTALL_MOD_DIR=gandalf -C $KDIR \
+ 		       M=$PWD modules_install
+diff --git a/Makefile b/Makefile
+index 17dc3e55323e7..060e20dba35e5 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 9
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm/boot/dts/rockchip/rk3066a.dtsi b/arch/arm/boot/dts/rockchip/rk3066a.dtsi
+index 30139f21de64d..15cbd94d7ec05 100644
+--- a/arch/arm/boot/dts/rockchip/rk3066a.dtsi
++++ b/arch/arm/boot/dts/rockchip/rk3066a.dtsi
+@@ -128,6 +128,7 @@ hdmi: hdmi@10116000 {
+ 		pinctrl-0 = <&hdmii2c_xfer>, <&hdmi_hpd>;
+ 		power-domains = <&power RK3066_PD_VIO>;
+ 		rockchip,grf = <&grf>;
++		#sound-dai-cells = <0>;
+ 		status = "disabled";
+ 
+ 		ports {
+diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
+index 72b5cd697f5d9..deeb8f292454b 100644
+--- a/arch/arm/net/bpf_jit_32.c
++++ b/arch/arm/net/bpf_jit_32.c
+@@ -2252,28 +2252,21 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+ 	/* If building the body of the JITed code fails somehow,
+ 	 * we fall back to the interpretation.
+ 	 */
+-	if (build_body(&ctx) < 0) {
+-		image_ptr = NULL;
+-		bpf_jit_binary_free(header);
+-		prog = orig_prog;
+-		goto out_imms;
+-	}
++	if (build_body(&ctx) < 0)
++		goto out_free;
+ 	build_epilogue(&ctx);
+ 
+ 	/* 3.) Extra pass to validate JITed Code */
+-	if (validate_code(&ctx)) {
+-		image_ptr = NULL;
+-		bpf_jit_binary_free(header);
+-		prog = orig_prog;
+-		goto out_imms;
+-	}
++	if (validate_code(&ctx))
++		goto out_free;
+ 	flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
+ 
+ 	if (bpf_jit_enable > 1)
+ 		/* there are 2 passes here */
+ 		bpf_jit_dump(prog->len, image_size, 2, ctx.target);
+ 
+-	bpf_jit_binary_lock_ro(header);
++	if (bpf_jit_binary_lock_ro(header))
++		goto out_free;
+ 	prog->bpf_func = (void *)ctx.target;
+ 	prog->jited = 1;
+ 	prog->jited_len = image_size;
+@@ -2290,5 +2283,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+ 		bpf_jit_prog_release_other(prog, prog == orig_prog ?
+ 					   tmp : orig_prog);
+ 	return prog;
++
++out_free:
++	image_ptr = NULL;
++	bpf_jit_binary_free(header);
++	prog = orig_prog;
++	goto out_imms;
+ }
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts b/arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts
+index b47fe02c33fbd..079101cddd65f 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts
+@@ -5,6 +5,8 @@
+  */
+ 
+ /dts-v1/;
++
++#include <dt-bindings/leds/common.h>
+ #include "rk3308.dtsi"
+ 
+ / {
+@@ -24,17 +26,21 @@ chosen {
+ 	leds {
+ 		compatible = "gpio-leds";
+ 		pinctrl-names = "default";
+-		pinctrl-0 = <&green_led_gio>, <&heartbeat_led_gpio>;
++		pinctrl-0 = <&green_led>, <&heartbeat_led>;
+ 
+ 		green-led {
++			color = <LED_COLOR_ID_GREEN>;
+ 			default-state = "on";
++			function = LED_FUNCTION_POWER;
+ 			gpios = <&gpio0 RK_PA6 GPIO_ACTIVE_HIGH>;
+ 			label = "rockpis:green:power";
+ 			linux,default-trigger = "default-on";
+ 		};
+ 
+ 		blue-led {
++			color = <LED_COLOR_ID_BLUE>;
+ 			default-state = "on";
++			function = LED_FUNCTION_HEARTBEAT;
+ 			gpios = <&gpio0 RK_PA5 GPIO_ACTIVE_HIGH>;
+ 			label = "rockpis:blue:user";
+ 			linux,default-trigger = "heartbeat";
+@@ -126,10 +132,12 @@ &cpu0 {
+ };
+ 
+ &emmc {
+-	bus-width = <4>;
+ 	cap-mmc-highspeed;
+-	mmc-hs200-1_8v;
++	cap-sd-highspeed;
++	no-sdio;
+ 	non-removable;
++	pinctrl-names = "default";
++	pinctrl-0 = <&emmc_bus8 &emmc_clk &emmc_cmd>;
+ 	vmmc-supply = <&vcc_io>;
+ 	status = "okay";
+ };
+@@ -214,11 +222,11 @@ &pinctrl {
+ 	pinctrl-0 = <&rtc_32k>;
+ 
+ 	leds {
+-		green_led_gio: green-led-gpio {
++		green_led: green-led {
+ 			rockchip,pins = <0 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>;
+ 		};
+ 
+-		heartbeat_led_gpio: heartbeat-led-gpio {
++		heartbeat_led: heartbeat-led {
+ 			rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
+index f09d60bbe6c4f..a608a219543e5 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
+@@ -241,8 +241,8 @@ &i2c1 {
+ 	rk805: pmic@18 {
+ 		compatible = "rockchip,rk805";
+ 		reg = <0x18>;
+-		interrupt-parent = <&gpio2>;
+-		interrupts = <6 IRQ_TYPE_LEVEL_LOW>;
++		interrupt-parent = <&gpio0>;
++		interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ 		#clock-cells = <1>;
+ 		clock-output-names = "xin32k", "rk805-clkout2";
+ 		gpio-controller;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+index 62af0cb94839b..5ce82b64b6836 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+@@ -793,6 +793,7 @@ spdif: spdif@ff880000 {
+ 		dma-names = "tx";
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&spdif_tx>;
++		#sound-dai-cells = <0>;
+ 		status = "disabled";
+ 	};
+ 
+@@ -804,6 +805,7 @@ i2s_2ch: i2s-2ch@ff890000 {
+ 		clocks = <&cru SCLK_I2S_2CH>, <&cru HCLK_I2S_2CH>;
+ 		dmas = <&dmac_bus 6>, <&dmac_bus 7>;
+ 		dma-names = "tx", "rx";
++		#sound-dai-cells = <0>;
+ 		status = "disabled";
+ 	};
+ 
+@@ -817,6 +819,7 @@ i2s_8ch: i2s-8ch@ff898000 {
+ 		dma-names = "tx", "rx";
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&i2s_8ch_bus>;
++		#sound-dai-cells = <0>;
+ 		status = "disabled";
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
+index 789fd0dcc88ba..3cd63d1e8f15b 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
+@@ -450,7 +450,7 @@ da7219_aad {
+ 			dlg,btn-cfg = <50>;
+ 			dlg,mic-det-thr = <500>;
+ 			dlg,jack-ins-deb = <20>;
+-			dlg,jack-det-rate = "32ms_64ms";
++			dlg,jack-det-rate = "32_64";
+ 			dlg,jack-rem-deb = <1>;
+ 
+ 			dlg,a-d-btn-thr = <0xa>;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts
+index 1a604429fb266..e74871491ef56 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts
+@@ -444,6 +444,7 @@ &sdhci {
+ &sdmmc {
+ 	bus-width = <4>;
+ 	cap-sd-highspeed;
++	cd-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_LOW>;
+ 	disable-wp;
+ 	max-frequency = <150000000>;
+ 	no-sdio;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts b/arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts
+index 22bbfbe729c11..b6628889b707e 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts
+@@ -429,6 +429,7 @@ &sdhci {
+ &sdmmc {
+ 	bus-width = <4>;
+ 	cap-sd-highspeed;
++	cd-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_LOW>;
+ 	disable-wp;
+ 	max-frequency = <150000000>;
+ 	no-sdio;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts b/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts
+index 1fe8b2a0ed75e..9b7bf6cec8bd1 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts
+@@ -378,6 +378,7 @@ &sdmmc {
+ 	bus-width = <4>;
+ 	cap-mmc-highspeed;
+ 	cap-sd-highspeed;
++	cd-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_LOW>;
+ 	disable-wp;
+ 	sd-uhs-sdr104;
+ 	vmmc-supply = <&vcc_3v3_s3>;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi
+index 1eb2543a5fde6..64ff1c90afe2c 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi
+@@ -324,6 +324,11 @@ module_led_pin: module-led-pin {
+ 	};
+ };
+ 
++&pwm0 {
++	pinctrl-0 = <&pwm0m1_pins>;
++	pinctrl-names = "default";
++};
++
+ &saradc {
+ 	vref-supply = <&vcc_1v8_s0>;
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts b/arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts
+index e037bf9db75af..adba82c6e2e1e 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts
+@@ -283,9 +283,9 @@ &i2c7 {
+ 	pinctrl-0 = <&i2c7m0_xfer>;
+ 	status = "okay";
+ 
+-	es8316: audio-codec@11 {
++	es8316: audio-codec@10 {
+ 		compatible = "everest,es8316";
+-		reg = <0x11>;
++		reg = <0x10>;
+ 		assigned-clocks = <&cru I2S0_8CH_MCLKOUT>;
+ 		assigned-clock-rates = <12288000>;
+ 		clocks = <&cru I2S0_8CH_MCLKOUT>;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts b/arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts
+index 00afb90d4eb10..ef776aec9d4b5 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts
+@@ -366,6 +366,7 @@ &sdmmc {
+ 	bus-width = <4>;
+ 	cap-mmc-highspeed;
+ 	cap-sd-highspeed;
++	cd-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_LOW>;
+ 	disable-wp;
+ 	max-frequency = <150000000>;
+ 	no-sdio;
+@@ -393,6 +394,7 @@ pmic@0 {
+ 		pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>,
+ 			    <&rk806_dvs2_null>, <&rk806_dvs3_null>;
+ 		spi-max-frequency = <1000000>;
++		system-power-controller;
+ 
+ 		vcc1-supply = <&vcc5v0_sys>;
+ 		vcc2-supply = <&vcc5v0_sys>;
+diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
+index 7118282d1c797..9ee1ffded2638 100644
+--- a/arch/arm64/include/asm/unistd32.h
++++ b/arch/arm64/include/asm/unistd32.h
+@@ -840,7 +840,7 @@ __SYSCALL(__NR_pselect6_time64, compat_sys_pselect6_time64)
+ #define __NR_ppoll_time64 414
+ __SYSCALL(__NR_ppoll_time64, compat_sys_ppoll_time64)
+ #define __NR_io_pgetevents_time64 416
+-__SYSCALL(__NR_io_pgetevents_time64, sys_io_pgetevents)
++__SYSCALL(__NR_io_pgetevents_time64, compat_sys_io_pgetevents_time64)
+ #define __NR_recvmmsg_time64 417
+ __SYSCALL(__NR_recvmmsg_time64, compat_sys_recvmmsg_time64)
+ #define __NR_mq_timedsend_time64 418
+diff --git a/arch/arm64/kernel/pi/map_kernel.c b/arch/arm64/kernel/pi/map_kernel.c
+index 5fa08e13e17e5..f374a3e5a5fe1 100644
+--- a/arch/arm64/kernel/pi/map_kernel.c
++++ b/arch/arm64/kernel/pi/map_kernel.c
+@@ -173,7 +173,7 @@ static void __init remap_idmap_for_lpa2(void)
+ 	 * Don't bother with the FDT, we no longer need it after this.
+ 	 */
+ 	memset(init_idmap_pg_dir, 0,
+-	       (u64)init_idmap_pg_dir - (u64)init_idmap_pg_end);
++	       (u64)init_idmap_pg_end - (u64)init_idmap_pg_dir);
+ 
+ 	create_init_idmap(init_idmap_pg_dir, mask);
+ 	dsb(ishst);
+diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
+index ad198262b9817..7230f6e20ab8b 100644
+--- a/arch/arm64/kernel/syscall.c
++++ b/arch/arm64/kernel/syscall.c
+@@ -53,17 +53,15 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
+ 	syscall_set_return_value(current, regs, 0, ret);
+ 
+ 	/*
+-	 * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
+-	 * but not enough for arm64 stack utilization comfort. To keep
+-	 * reasonable stack head room, reduce the maximum offset to 9 bits.
++	 * This value will get limited by KSTACK_OFFSET_MAX(), which is 10
++	 * bits. The actual entropy will be further reduced by the compiler
++	 * when applying stack alignment constraints: the AAPCS mandates a
++	 * 16-byte aligned SP at function boundaries, which will remove the
++	 * 4 low bits from any entropy chosen here.
+ 	 *
+-	 * The actual entropy will be further reduced by the compiler when
+-	 * applying stack alignment constraints: the AAPCS mandates a
+-	 * 16-byte (i.e. 4-bit) aligned SP at function boundaries.
+-	 *
+-	 * The resulting 5 bits of entropy is seen in SP[8:4].
++	 * The resulting 6 bits of entropy is seen in SP[9:4].
+ 	 */
+-	choose_random_kstack_offset(get_random_u16() & 0x1FF);
++	choose_random_kstack_offset(get_random_u16());
+ }
+ 
+ static inline bool has_syscall_work(unsigned long flags)
+diff --git a/arch/csky/include/uapi/asm/unistd.h b/arch/csky/include/uapi/asm/unistd.h
+index 7ff6a2466af10..e0594b6370a65 100644
+--- a/arch/csky/include/uapi/asm/unistd.h
++++ b/arch/csky/include/uapi/asm/unistd.h
+@@ -6,6 +6,7 @@
+ #define __ARCH_WANT_SYS_CLONE3
+ #define __ARCH_WANT_SET_GET_RLIMIT
+ #define __ARCH_WANT_TIME32_SYSCALLS
++#define __ARCH_WANT_SYNC_FILE_RANGE2
+ #include <asm-generic/unistd.h>
+ 
+ #define __NR_set_thread_area	(__NR_arch_specific_syscall + 0)
+diff --git a/arch/hexagon/include/asm/syscalls.h b/arch/hexagon/include/asm/syscalls.h
+new file mode 100644
+index 0000000000000..40f2d08bec92c
+--- /dev/null
++++ b/arch/hexagon/include/asm/syscalls.h
+@@ -0,0 +1,6 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++
++#include <asm-generic/syscalls.h>
++
++asmlinkage long sys_hexagon_fadvise64_64(int fd, int advice,
++	                                  u32 a2, u32 a3, u32 a4, u32 a5);
+diff --git a/arch/hexagon/include/uapi/asm/unistd.h b/arch/hexagon/include/uapi/asm/unistd.h
+index 432c4db1b6239..21ae22306b5dc 100644
+--- a/arch/hexagon/include/uapi/asm/unistd.h
++++ b/arch/hexagon/include/uapi/asm/unistd.h
+@@ -36,5 +36,6 @@
+ #define __ARCH_WANT_SYS_VFORK
+ #define __ARCH_WANT_SYS_FORK
+ #define __ARCH_WANT_TIME32_SYSCALLS
++#define __ARCH_WANT_SYNC_FILE_RANGE2
+ 
+ #include <asm-generic/unistd.h>
+diff --git a/arch/hexagon/kernel/syscalltab.c b/arch/hexagon/kernel/syscalltab.c
+index 0fadd582cfc77..5d98bdc494ec2 100644
+--- a/arch/hexagon/kernel/syscalltab.c
++++ b/arch/hexagon/kernel/syscalltab.c
+@@ -14,6 +14,13 @@
+ #undef __SYSCALL
+ #define __SYSCALL(nr, call) [nr] = (call),
+ 
++SYSCALL_DEFINE6(hexagon_fadvise64_64, int, fd, int, advice,
++		SC_ARG64(offset), SC_ARG64(len))
++{
++	return ksys_fadvise64_64(fd, SC_VAL64(loff_t, offset), SC_VAL64(loff_t, len), advice);
++}
++#define sys_fadvise64_64 sys_hexagon_fadvise64_64
++
+ void *sys_call_table[__NR_syscalls] = {
+ #include <asm/unistd.h>
+ };
+diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
+index e73323d759d0b..7dbefd4ba2107 100644
+--- a/arch/loongarch/net/bpf_jit.c
++++ b/arch/loongarch/net/bpf_jit.c
+@@ -1294,16 +1294,19 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+ 	flush_icache_range((unsigned long)header, (unsigned long)(ctx.image + ctx.idx));
+ 
+ 	if (!prog->is_func || extra_pass) {
++		int err;
++
+ 		if (extra_pass && ctx.idx != jit_data->ctx.idx) {
+ 			pr_err_once("multi-func JIT bug %d != %d\n",
+ 				    ctx.idx, jit_data->ctx.idx);
+-			bpf_jit_binary_free(header);
+-			prog->bpf_func = NULL;
+-			prog->jited = 0;
+-			prog->jited_len = 0;
+-			goto out_offset;
++			goto out_free;
++		}
++		err = bpf_jit_binary_lock_ro(header);
++		if (err) {
++			pr_err_once("bpf_jit_binary_lock_ro() returned %d\n",
++				    err);
++			goto out_free;
+ 		}
+-		bpf_jit_binary_lock_ro(header);
+ 	} else {
+ 		jit_data->ctx = ctx;
+ 		jit_data->image = image_ptr;
+@@ -1334,6 +1337,13 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+ 	out_offset = -1;
+ 
+ 	return prog;
++
++out_free:
++	bpf_jit_binary_free(header);
++	prog->bpf_func = NULL;
++	prog->jited = 0;
++	prog->jited_len = 0;
++	goto out_offset;
+ }
+ 
+ /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
+diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl
+index 83cfc9eb6b88e..c5e1cacd2ab6a 100644
+--- a/arch/mips/kernel/syscalls/syscall_n32.tbl
++++ b/arch/mips/kernel/syscalls/syscall_n32.tbl
+@@ -354,7 +354,7 @@
+ 412	n32	utimensat_time64		sys_utimensat
+ 413	n32	pselect6_time64			compat_sys_pselect6_time64
+ 414	n32	ppoll_time64			compat_sys_ppoll_time64
+-416	n32	io_pgetevents_time64		sys_io_pgetevents
++416	n32	io_pgetevents_time64		compat_sys_io_pgetevents_time64
+ 417	n32	recvmmsg_time64			compat_sys_recvmmsg_time64
+ 418	n32	mq_timedsend_time64		sys_mq_timedsend
+ 419	n32	mq_timedreceive_time64		sys_mq_timedreceive
+diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
+index f45c9530ea93a..0352c07c608e9 100644
+--- a/arch/mips/kernel/syscalls/syscall_o32.tbl
++++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
+@@ -403,7 +403,7 @@
+ 412	o32	utimensat_time64		sys_utimensat			sys_utimensat
+ 413	o32	pselect6_time64			sys_pselect6			compat_sys_pselect6_time64
+ 414	o32	ppoll_time64			sys_ppoll			compat_sys_ppoll_time64
+-416	o32	io_pgetevents_time64		sys_io_pgetevents		sys_io_pgetevents
++416	o32	io_pgetevents_time64		sys_io_pgetevents		compat_sys_io_pgetevents_time64
+ 417	o32	recvmmsg_time64			sys_recvmmsg			compat_sys_recvmmsg_time64
+ 418	o32	mq_timedsend_time64		sys_mq_timedsend		sys_mq_timedsend
+ 419	o32	mq_timedreceive_time64		sys_mq_timedreceive		sys_mq_timedreceive
+diff --git a/arch/mips/net/bpf_jit_comp.c b/arch/mips/net/bpf_jit_comp.c
+index a40d926b65139..e355dfca44008 100644
+--- a/arch/mips/net/bpf_jit_comp.c
++++ b/arch/mips/net/bpf_jit_comp.c
+@@ -1012,7 +1012,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+ 	bpf_prog_fill_jited_linfo(prog, &ctx.descriptors[1]);
+ 
+ 	/* Set as read-only exec and flush instruction cache */
+-	bpf_jit_binary_lock_ro(header);
++	if (bpf_jit_binary_lock_ro(header))
++		goto out_err;
+ 	flush_icache_range((unsigned long)header,
+ 			   (unsigned long)&ctx.target[ctx.jit_index]);
+ 
+diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
+index daafeb20f9937..dc9b902de8ea9 100644
+--- a/arch/parisc/Kconfig
++++ b/arch/parisc/Kconfig
+@@ -16,6 +16,7 @@ config PARISC
+ 	select ARCH_HAS_UBSAN
+ 	select ARCH_HAS_PTE_SPECIAL
+ 	select ARCH_NO_SG_CHAIN
++	select ARCH_SPLIT_ARG64 if !64BIT
+ 	select ARCH_SUPPORTS_HUGETLBFS if PA20
+ 	select ARCH_SUPPORTS_MEMORY_FAILURE
+ 	select ARCH_STACKWALK
+diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
+index 2a12a547b447b..826c8e51b5853 100644
+--- a/arch/parisc/kernel/sys_parisc32.c
++++ b/arch/parisc/kernel/sys_parisc32.c
+@@ -23,12 +23,3 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
+     	current->comm, current->pid, r20);
+     return -ENOSYS;
+ }
+-
+-asmlinkage long sys32_fanotify_mark(compat_int_t fanotify_fd, compat_uint_t flags,
+-	compat_uint_t mask0, compat_uint_t mask1, compat_int_t dfd,
+-	const char  __user * pathname)
+-{
+-	return sys_fanotify_mark(fanotify_fd, flags,
+-			((__u64)mask1 << 32) | mask0,
+-			 dfd, pathname);
+-}
+diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
+index b236a84c4e127..0f631cb2cdea7 100644
+--- a/arch/parisc/kernel/syscalls/syscall.tbl
++++ b/arch/parisc/kernel/syscalls/syscall.tbl
+@@ -108,7 +108,7 @@
+ 95	common	fchown			sys_fchown
+ 96	common	getpriority		sys_getpriority
+ 97	common	setpriority		sys_setpriority
+-98	common	recv			sys_recv
++98	common	recv			sys_recv			compat_sys_recv
+ 99	common	statfs			sys_statfs			compat_sys_statfs
+ 100	common	fstatfs			sys_fstatfs			compat_sys_fstatfs
+ 101	common	stat64			sys_stat64
+@@ -135,7 +135,7 @@
+ 120	common	clone			sys_clone_wrapper
+ 121	common	setdomainname		sys_setdomainname
+ 122	common	sendfile		sys_sendfile			compat_sys_sendfile
+-123	common	recvfrom		sys_recvfrom
++123	common	recvfrom		sys_recvfrom			compat_sys_recvfrom
+ 124	32	adjtimex		sys_adjtimex_time32
+ 124	64	adjtimex		sys_adjtimex
+ 125	common	mprotect		sys_mprotect
+@@ -364,7 +364,7 @@
+ 320	common	accept4			sys_accept4
+ 321	common	prlimit64		sys_prlimit64
+ 322	common	fanotify_init		sys_fanotify_init
+-323	common	fanotify_mark		sys_fanotify_mark		sys32_fanotify_mark
++323	common	fanotify_mark		sys_fanotify_mark		compat_sys_fanotify_mark
+ 324	32	clock_adjtime		sys_clock_adjtime32
+ 324	64	clock_adjtime		sys_clock_adjtime
+ 325	common	name_to_handle_at	sys_name_to_handle_at
+diff --git a/arch/parisc/net/bpf_jit_core.c b/arch/parisc/net/bpf_jit_core.c
+index d6ee2fd455503..979f45d4d1fbe 100644
+--- a/arch/parisc/net/bpf_jit_core.c
++++ b/arch/parisc/net/bpf_jit_core.c
+@@ -167,7 +167,13 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+ 	bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns);
+ 
+ 	if (!prog->is_func || extra_pass) {
+-		bpf_jit_binary_lock_ro(jit_data->header);
++		if (bpf_jit_binary_lock_ro(jit_data->header)) {
++			bpf_jit_binary_free(jit_data->header);
++			prog->bpf_func = NULL;
++			prog->jited = 0;
++			prog->jited_len = 0;
++			goto out_offset;
++		}
+ 		prologue_len = ctx->epilogue_offset - ctx->body_len;
+ 		for (i = 0; i < prog->len; i++)
+ 			ctx->offset[i] += prologue_len;
+diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
+index 17173b82ca21d..18c49fce49ed1 100644
+--- a/arch/powerpc/kernel/syscalls/syscall.tbl
++++ b/arch/powerpc/kernel/syscalls/syscall.tbl
+@@ -230,8 +230,10 @@
+ 178	nospu 	rt_sigsuspend			sys_rt_sigsuspend		compat_sys_rt_sigsuspend
+ 179	32	pread64				sys_ppc_pread64			compat_sys_ppc_pread64
+ 179	64	pread64				sys_pread64
++179	spu	pread64				sys_pread64
+ 180	32	pwrite64			sys_ppc_pwrite64		compat_sys_ppc_pwrite64
+ 180	64	pwrite64			sys_pwrite64
++180	spu	pwrite64			sys_pwrite64
+ 181	common	chown				sys_chown
+ 182	common	getcwd				sys_getcwd
+ 183	common	capget				sys_capget
+@@ -246,6 +248,7 @@
+ 190	common	ugetrlimit			sys_getrlimit			compat_sys_getrlimit
+ 191	32	readahead			sys_ppc_readahead		compat_sys_ppc_readahead
+ 191	64	readahead			sys_readahead
++191	spu	readahead			sys_readahead
+ 192	32	mmap2				sys_mmap2			compat_sys_mmap2
+ 193	32	truncate64			sys_ppc_truncate64		compat_sys_ppc_truncate64
+ 194	32	ftruncate64			sys_ppc_ftruncate64		compat_sys_ppc_ftruncate64
+@@ -293,6 +296,7 @@
+ 232	nospu	set_tid_address			sys_set_tid_address
+ 233	32	fadvise64			sys_ppc32_fadvise64		compat_sys_ppc32_fadvise64
+ 233	64	fadvise64			sys_fadvise64
++233	spu	fadvise64			sys_fadvise64
+ 234	nospu	exit_group			sys_exit_group
+ 235	nospu	lookup_dcookie			sys_ni_syscall
+ 236	common	epoll_create			sys_epoll_create
+@@ -502,7 +506,7 @@
+ 412	32	utimensat_time64		sys_utimensat			sys_utimensat
+ 413	32	pselect6_time64			sys_pselect6			compat_sys_pselect6_time64
+ 414	32	ppoll_time64			sys_ppoll			compat_sys_ppoll_time64
+-416	32	io_pgetevents_time64		sys_io_pgetevents		sys_io_pgetevents
++416	32	io_pgetevents_time64		sys_io_pgetevents		compat_sys_io_pgetevents_time64
+ 417	32	recvmmsg_time64			sys_recvmmsg			compat_sys_recvmmsg_time64
+ 418	32	mq_timedsend_time64		sys_mq_timedsend		sys_mq_timedsend
+ 419	32	mq_timedreceive_time64		sys_mq_timedreceive		sys_mq_timedreceive
+diff --git a/arch/riscv/include/asm/insn.h b/arch/riscv/include/asm/insn.h
+index 06e439eeef9ad..09fde95a5e8f7 100644
+--- a/arch/riscv/include/asm/insn.h
++++ b/arch/riscv/include/asm/insn.h
+@@ -145,7 +145,7 @@
+ 
+ /* parts of opcode for RVF, RVD and RVQ */
+ #define RVFDQ_FL_FS_WIDTH_OFF	12
+-#define RVFDQ_FL_FS_WIDTH_MASK	GENMASK(3, 0)
++#define RVFDQ_FL_FS_WIDTH_MASK	GENMASK(2, 0)
+ #define RVFDQ_FL_FS_WIDTH_W	2
+ #define RVFDQ_FL_FS_WIDTH_D	3
+ #define RVFDQ_LS_FS_WIDTH_Q	4
+diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
+index 528ec7cc9a622..0d3f00eb0baee 100644
+--- a/arch/riscv/kernel/stacktrace.c
++++ b/arch/riscv/kernel/stacktrace.c
+@@ -156,7 +156,7 @@ unsigned long __get_wchan(struct task_struct *task)
+ 	return pc;
+ }
+ 
+-noinline void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
++noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+ 		     struct task_struct *task, struct pt_regs *regs)
+ {
+ 	walk_stackframe(task, regs, consume_entry, cookie);
+diff --git a/arch/s390/include/asm/entry-common.h b/arch/s390/include/asm/entry-common.h
+index 7f5004065e8aa..35555c9446308 100644
+--- a/arch/s390/include/asm/entry-common.h
++++ b/arch/s390/include/asm/entry-common.h
+@@ -54,7 +54,7 @@ static __always_inline void arch_exit_to_user_mode(void)
+ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ 						  unsigned long ti_work)
+ {
+-	choose_random_kstack_offset(get_tod_clock_fast() & 0xff);
++	choose_random_kstack_offset(get_tod_clock_fast());
+ }
+ 
+ #define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
+diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
+index 095bb86339a7d..07a942cab8e50 100644
+--- a/arch/s390/kernel/syscalls/syscall.tbl
++++ b/arch/s390/kernel/syscalls/syscall.tbl
+@@ -418,7 +418,7 @@
+ 412	32	utimensat_time64	-				sys_utimensat
+ 413	32	pselect6_time64		-				compat_sys_pselect6_time64
+ 414	32	ppoll_time64		-				compat_sys_ppoll_time64
+-416	32	io_pgetevents_time64	-				sys_io_pgetevents
++416	32	io_pgetevents_time64	-				compat_sys_io_pgetevents_time64
+ 417	32	recvmmsg_time64		-				compat_sys_recvmmsg_time64
+ 418	32	mq_timedsend_time64	-				sys_mq_timedsend
+ 419	32	mq_timedreceive_time64	-				sys_mq_timedreceive
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index 1d168a98ae21b..4be8f5cadd026 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -2112,7 +2112,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
+ 		print_fn_code(jit.prg_buf, jit.size_prg);
+ 	}
+ 	if (!fp->is_func || extra_pass) {
+-		bpf_jit_binary_lock_ro(header);
++		if (bpf_jit_binary_lock_ro(header)) {
++			bpf_jit_binary_free(header);
++			fp = orig_fp;
++			goto free_addrs;
++		}
+ 	} else {
+ 		jit_data->header = header;
+ 		jit_data->ctx = jit;
+diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c
+index ff8f24854c646..0ef83b6ac0db7 100644
+--- a/arch/s390/pci/pci_irq.c
++++ b/arch/s390/pci/pci_irq.c
+@@ -410,7 +410,7 @@ static void __init cpu_enable_directed_irq(void *unused)
+ 	union zpci_sic_iib iib = {{0}};
+ 	union zpci_sic_iib ziib = {{0}};
+ 
+-	iib.cdiib.dibv_addr = (u64) zpci_ibv[smp_processor_id()]->vector;
++	iib.cdiib.dibv_addr = virt_to_phys(zpci_ibv[smp_processor_id()]->vector);
+ 
+ 	zpci_set_irq_ctrl(SIC_IRQ_MODE_SET_CPU, 0, &iib);
+ 	zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE, PCI_ISC, &ziib);
+diff --git a/arch/sh/kernel/sys_sh32.c b/arch/sh/kernel/sys_sh32.c
+index 9dca568509a5e..d6f4afcb0e870 100644
+--- a/arch/sh/kernel/sys_sh32.c
++++ b/arch/sh/kernel/sys_sh32.c
+@@ -59,3 +59,14 @@ asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
+ 				 (u64)len0 << 32 | len1, advice);
+ #endif
+ }
++
++/*
++ * swap the arguments the way that libc wants them instead of
++ * moving flags ahead of the 64-bit nbytes argument
++ */
++SYSCALL_DEFINE6(sh_sync_file_range6, int, fd, SC_ARG64(offset),
++                SC_ARG64(nbytes), unsigned int, flags)
++{
++        return ksys_sync_file_range(fd, SC_VAL64(loff_t, offset),
++                                    SC_VAL64(loff_t, nbytes), flags);
++}
+diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl
+index 86fe269f02203..0495a57c1e618 100644
+--- a/arch/sh/kernel/syscalls/syscall.tbl
++++ b/arch/sh/kernel/syscalls/syscall.tbl
+@@ -321,7 +321,7 @@
+ 311	common	set_robust_list			sys_set_robust_list
+ 312	common	get_robust_list			sys_get_robust_list
+ 313	common	splice				sys_splice
+-314	common	sync_file_range			sys_sync_file_range
++314	common	sync_file_range			sys_sh_sync_file_range6
+ 315	common	tee				sys_tee
+ 316	common	vmsplice			sys_vmsplice
+ 317	common	move_pages			sys_move_pages
+@@ -395,6 +395,7 @@
+ 385	common	pkey_alloc			sys_pkey_alloc
+ 386	common	pkey_free			sys_pkey_free
+ 387	common	rseq				sys_rseq
++388	common	sync_file_range2		sys_sync_file_range2
+ # room for arch specific syscalls
+ 393	common	semget				sys_semget
+ 394	common	semctl				sys_semctl
+diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
+index a45f0f31fe51a..a3d308f2043e5 100644
+--- a/arch/sparc/kernel/sys32.S
++++ b/arch/sparc/kernel/sys32.S
+@@ -18,224 +18,3 @@ sys32_mmap2:
+ 	sethi		%hi(sys_mmap), %g1
+ 	jmpl		%g1 + %lo(sys_mmap), %g0
+ 	 sllx		%o5, 12, %o5
+-
+-	.align		32
+-	.globl		sys32_socketcall
+-sys32_socketcall:	/* %o0=call, %o1=args */
+-	cmp		%o0, 1
+-	bl,pn		%xcc, do_einval
+-	 cmp		%o0, 18
+-	bg,pn		%xcc, do_einval
+-	 sub		%o0, 1, %o0
+-	sllx		%o0, 5, %o0
+-	sethi		%hi(__socketcall_table_begin), %g2
+-	or		%g2, %lo(__socketcall_table_begin), %g2
+-	jmpl		%g2 + %o0, %g0
+-	 nop
+-do_einval:
+-	retl
+-	 mov		-EINVAL, %o0
+-
+-	.align		32
+-__socketcall_table_begin:
+-
+-	/* Each entry is exactly 32 bytes. */
+-do_sys_socket: /* sys_socket(int, int, int) */
+-1:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_socket), %g1
+-2:	ldswa		[%o1 + 0x8] %asi, %o2
+-	jmpl		%g1 + %lo(sys_socket), %g0
+-3:	 ldswa		[%o1 + 0x4] %asi, %o1
+-	nop
+-	nop
+-	nop
+-do_sys_bind: /* sys_bind(int fd, struct sockaddr *, int) */
+-4:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_bind), %g1
+-5:	ldswa		[%o1 + 0x8] %asi, %o2
+-	jmpl		%g1 + %lo(sys_bind), %g0
+-6:	 lduwa		[%o1 + 0x4] %asi, %o1
+-	nop
+-	nop
+-	nop
+-do_sys_connect: /* sys_connect(int, struct sockaddr *, int) */
+-7:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_connect), %g1
+-8:	ldswa		[%o1 + 0x8] %asi, %o2
+-	jmpl		%g1 + %lo(sys_connect), %g0
+-9:	 lduwa		[%o1 + 0x4] %asi, %o1
+-	nop
+-	nop
+-	nop
+-do_sys_listen: /* sys_listen(int, int) */
+-10:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_listen), %g1
+-	jmpl		%g1 + %lo(sys_listen), %g0
+-11:	 ldswa		[%o1 + 0x4] %asi, %o1
+-	nop
+-	nop
+-	nop
+-	nop
+-do_sys_accept: /* sys_accept(int, struct sockaddr *, int *) */
+-12:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_accept), %g1
+-13:	lduwa		[%o1 + 0x8] %asi, %o2
+-	jmpl		%g1 + %lo(sys_accept), %g0
+-14:	 lduwa		[%o1 + 0x4] %asi, %o1
+-	nop
+-	nop
+-	nop
+-do_sys_getsockname: /* sys_getsockname(int, struct sockaddr *, int *) */
+-15:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_getsockname), %g1
+-16:	lduwa		[%o1 + 0x8] %asi, %o2
+-	jmpl		%g1 + %lo(sys_getsockname), %g0
+-17:	 lduwa		[%o1 + 0x4] %asi, %o1
+-	nop
+-	nop
+-	nop
+-do_sys_getpeername: /* sys_getpeername(int, struct sockaddr *, int *) */
+-18:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_getpeername), %g1
+-19:	lduwa		[%o1 + 0x8] %asi, %o2
+-	jmpl		%g1 + %lo(sys_getpeername), %g0
+-20:	 lduwa		[%o1 + 0x4] %asi, %o1
+-	nop
+-	nop
+-	nop
+-do_sys_socketpair: /* sys_socketpair(int, int, int, int *) */
+-21:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_socketpair), %g1
+-22:	ldswa		[%o1 + 0x8] %asi, %o2
+-23:	lduwa		[%o1 + 0xc] %asi, %o3
+-	jmpl		%g1 + %lo(sys_socketpair), %g0
+-24:	 ldswa		[%o1 + 0x4] %asi, %o1
+-	nop
+-	nop
+-do_sys_send: /* sys_send(int, void *, size_t, unsigned int) */
+-25:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_send), %g1
+-26:	lduwa		[%o1 + 0x8] %asi, %o2
+-27:	lduwa		[%o1 + 0xc] %asi, %o3
+-	jmpl		%g1 + %lo(sys_send), %g0
+-28:	 lduwa		[%o1 + 0x4] %asi, %o1
+-	nop
+-	nop
+-do_sys_recv: /* sys_recv(int, void *, size_t, unsigned int) */
+-29:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_recv), %g1
+-30:	lduwa		[%o1 + 0x8] %asi, %o2
+-31:	lduwa		[%o1 + 0xc] %asi, %o3
+-	jmpl		%g1 + %lo(sys_recv), %g0
+-32:	 lduwa		[%o1 + 0x4] %asi, %o1
+-	nop
+-	nop
+-do_sys_sendto: /* sys_sendto(int, u32, compat_size_t, unsigned int, u32, int) */
+-33:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_sendto), %g1
+-34:	lduwa		[%o1 + 0x8] %asi, %o2
+-35:	lduwa		[%o1 + 0xc] %asi, %o3
+-36:	lduwa		[%o1 + 0x10] %asi, %o4
+-37:	ldswa		[%o1 + 0x14] %asi, %o5
+-	jmpl		%g1 + %lo(sys_sendto), %g0
+-38:	 lduwa		[%o1 + 0x4] %asi, %o1
+-do_sys_recvfrom: /* sys_recvfrom(int, u32, compat_size_t, unsigned int, u32, u32) */
+-39:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_recvfrom), %g1
+-40:	lduwa		[%o1 + 0x8] %asi, %o2
+-41:	lduwa		[%o1 + 0xc] %asi, %o3
+-42:	lduwa		[%o1 + 0x10] %asi, %o4
+-43:	lduwa		[%o1 + 0x14] %asi, %o5
+-	jmpl		%g1 + %lo(sys_recvfrom), %g0
+-44:	 lduwa		[%o1 + 0x4] %asi, %o1
+-do_sys_shutdown: /* sys_shutdown(int, int) */
+-45:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_shutdown), %g1
+-	jmpl		%g1 + %lo(sys_shutdown), %g0
+-46:	 ldswa		[%o1 + 0x4] %asi, %o1
+-	nop
+-	nop
+-	nop
+-	nop
+-do_sys_setsockopt: /* sys_setsockopt(int, int, int, char *, int) */
+-47:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_setsockopt), %g1
+-48:	ldswa		[%o1 + 0x8] %asi, %o2
+-49:	lduwa		[%o1 + 0xc] %asi, %o3
+-50:	ldswa		[%o1 + 0x10] %asi, %o4
+-	jmpl		%g1 + %lo(sys_setsockopt), %g0
+-51:	 ldswa		[%o1 + 0x4] %asi, %o1
+-	nop
+-do_sys_getsockopt: /* sys_getsockopt(int, int, int, u32, u32) */
+-52:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_getsockopt), %g1
+-53:	ldswa		[%o1 + 0x8] %asi, %o2
+-54:	lduwa		[%o1 + 0xc] %asi, %o3
+-55:	lduwa		[%o1 + 0x10] %asi, %o4
+-	jmpl		%g1 + %lo(sys_getsockopt), %g0
+-56:	 ldswa		[%o1 + 0x4] %asi, %o1
+-	nop
+-do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */
+-57:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(compat_sys_sendmsg), %g1
+-58:	lduwa		[%o1 + 0x8] %asi, %o2
+-	jmpl		%g1 + %lo(compat_sys_sendmsg), %g0
+-59:	 lduwa		[%o1 + 0x4] %asi, %o1
+-	nop
+-	nop
+-	nop
+-do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int) */
+-60:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(compat_sys_recvmsg), %g1
+-61:	lduwa		[%o1 + 0x8] %asi, %o2
+-	jmpl		%g1 + %lo(compat_sys_recvmsg), %g0
+-62:	 lduwa		[%o1 + 0x4] %asi, %o1
+-	nop
+-	nop
+-	nop
+-do_sys_accept4: /* sys_accept4(int, struct sockaddr *, int *, int) */
+-63:	ldswa		[%o1 + 0x0] %asi, %o0
+-	sethi		%hi(sys_accept4), %g1
+-64:	lduwa		[%o1 + 0x8] %asi, %o2
+-65:	ldswa		[%o1 + 0xc] %asi, %o3
+-	jmpl		%g1 + %lo(sys_accept4), %g0
+-66:	 lduwa		[%o1 + 0x4] %asi, %o1
+-	nop
+-	nop
+-
+-	.section	__ex_table,"a"
+-	.align		4
+-	.word		1b, __retl_efault, 2b, __retl_efault
+-	.word		3b, __retl_efault, 4b, __retl_efault
+-	.word		5b, __retl_efault, 6b, __retl_efault
+-	.word		7b, __retl_efault, 8b, __retl_efault
+-	.word		9b, __retl_efault, 10b, __retl_efault
+-	.word		11b, __retl_efault, 12b, __retl_efault
+-	.word		13b, __retl_efault, 14b, __retl_efault
+-	.word		15b, __retl_efault, 16b, __retl_efault
+-	.word		17b, __retl_efault, 18b, __retl_efault
+-	.word		19b, __retl_efault, 20b, __retl_efault
+-	.word		21b, __retl_efault, 22b, __retl_efault
+-	.word		23b, __retl_efault, 24b, __retl_efault
+-	.word		25b, __retl_efault, 26b, __retl_efault
+-	.word		27b, __retl_efault, 28b, __retl_efault
+-	.word		29b, __retl_efault, 30b, __retl_efault
+-	.word		31b, __retl_efault, 32b, __retl_efault
+-	.word		33b, __retl_efault, 34b, __retl_efault
+-	.word		35b, __retl_efault, 36b, __retl_efault
+-	.word		37b, __retl_efault, 38b, __retl_efault
+-	.word		39b, __retl_efault, 40b, __retl_efault
+-	.word		41b, __retl_efault, 42b, __retl_efault
+-	.word		43b, __retl_efault, 44b, __retl_efault
+-	.word		45b, __retl_efault, 46b, __retl_efault
+-	.word		47b, __retl_efault, 48b, __retl_efault
+-	.word		49b, __retl_efault, 50b, __retl_efault
+-	.word		51b, __retl_efault, 52b, __retl_efault
+-	.word		53b, __retl_efault, 54b, __retl_efault
+-	.word		55b, __retl_efault, 56b, __retl_efault
+-	.word		57b, __retl_efault, 58b, __retl_efault
+-	.word		59b, __retl_efault, 60b, __retl_efault
+-	.word		61b, __retl_efault, 62b, __retl_efault
+-	.word		63b, __retl_efault, 64b, __retl_efault
+-	.word		65b, __retl_efault, 66b, __retl_efault
+-	.previous
+diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl
+index b23d59313589a..72985b705f44b 100644
+--- a/arch/sparc/kernel/syscalls/syscall.tbl
++++ b/arch/sparc/kernel/syscalls/syscall.tbl
+@@ -117,7 +117,7 @@
+ 90	common	dup2			sys_dup2
+ 91	32	setfsuid32		sys_setfsuid
+ 92	common	fcntl			sys_fcntl			compat_sys_fcntl
+-93	common	select			sys_select
++93	common	select			sys_select			compat_sys_select
+ 94	32	setfsgid32		sys_setfsgid
+ 95	common	fsync			sys_fsync
+ 96	common	setpriority		sys_setpriority
+@@ -155,7 +155,7 @@
+ 123	32	fchown			sys_fchown16
+ 123	64	fchown			sys_fchown
+ 124	common	fchmod			sys_fchmod
+-125	common	recvfrom		sys_recvfrom
++125	common	recvfrom		sys_recvfrom			compat_sys_recvfrom
+ 126	32	setreuid		sys_setreuid16
+ 126	64	setreuid		sys_setreuid
+ 127	32	setregid		sys_setregid16
+@@ -247,7 +247,7 @@
+ 204	32	readdir			sys_old_readdir			compat_sys_old_readdir
+ 204	64	readdir			sys_nis_syscall
+ 205	common	readahead		sys_readahead			compat_sys_readahead
+-206	common	socketcall		sys_socketcall			sys32_socketcall
++206	common	socketcall		sys_socketcall			compat_sys_socketcall
+ 207	common	syslog			sys_syslog
+ 208	common	lookup_dcookie		sys_ni_syscall
+ 209	common	fadvise64		sys_fadvise64			compat_sys_fadvise64
+@@ -461,7 +461,7 @@
+ 412	32	utimensat_time64		sys_utimensat			sys_utimensat
+ 413	32	pselect6_time64			sys_pselect6			compat_sys_pselect6_time64
+ 414	32	ppoll_time64			sys_ppoll			compat_sys_ppoll_time64
+-416	32	io_pgetevents_time64		sys_io_pgetevents		sys_io_pgetevents
++416	32	io_pgetevents_time64		sys_io_pgetevents		compat_sys_io_pgetevents_time64
+ 417	32	recvmmsg_time64			sys_recvmmsg			compat_sys_recvmmsg_time64
+ 418	32	mq_timedsend_time64		sys_mq_timedsend		sys_mq_timedsend
+ 419	32	mq_timedreceive_time64		sys_mq_timedreceive		sys_mq_timedreceive
+diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c
+index fa0759bfe498e..73bf0aea8baf1 100644
+--- a/arch/sparc/net/bpf_jit_comp_64.c
++++ b/arch/sparc/net/bpf_jit_comp_64.c
+@@ -1602,7 +1602,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+ 	bpf_flush_icache(header, (u8 *)header + header->size);
+ 
+ 	if (!prog->is_func || extra_pass) {
+-		bpf_jit_binary_lock_ro(header);
++		if (bpf_jit_binary_lock_ro(header)) {
++			bpf_jit_binary_free(header);
++			prog = orig_prog;
++			goto out_off;
++		}
+ 	} else {
+ 		jit_data->ctx = ctx;
+ 		jit_data->image = image_ptr;
+diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
+index 5f8591ce7f25e..fccb825ca2ca3 100644
+--- a/arch/x86/entry/syscalls/syscall_32.tbl
++++ b/arch/x86/entry/syscalls/syscall_32.tbl
+@@ -420,7 +420,7 @@
+ 412	i386	utimensat_time64	sys_utimensat
+ 413	i386	pselect6_time64		sys_pselect6			compat_sys_pselect6_time64
+ 414	i386	ppoll_time64		sys_ppoll			compat_sys_ppoll_time64
+-416	i386	io_pgetevents_time64	sys_io_pgetevents
++416	i386	io_pgetevents_time64	sys_io_pgetevents		compat_sys_io_pgetevents_time64
+ 417	i386	recvmmsg_time64		sys_recvmmsg			compat_sys_recvmmsg_time64
+ 418	i386	mq_timedsend_time64	sys_mq_timedsend
+ 419	i386	mq_timedreceive_time64	sys_mq_timedreceive
+diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h
+index 7e523bb3d2d31..fb2809b20b0ac 100644
+--- a/arch/x86/include/asm/entry-common.h
++++ b/arch/x86/include/asm/entry-common.h
+@@ -73,19 +73,16 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ #endif
+ 
+ 	/*
+-	 * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
+-	 * but not enough for x86 stack utilization comfort. To keep
+-	 * reasonable stack head room, reduce the maximum offset to 8 bits.
+-	 *
+-	 * The actual entropy will be further reduced by the compiler when
+-	 * applying stack alignment constraints (see cc_stack_align4/8 in
++	 * This value will get limited by KSTACK_OFFSET_MAX(), which is 10
++	 * bits. The actual entropy will be further reduced by the compiler
++	 * when applying stack alignment constraints (see cc_stack_align4/8 in
+ 	 * arch/x86/Makefile), which will remove the 3 (x86_64) or 2 (ia32)
+ 	 * low bits from any entropy chosen here.
+ 	 *
+-	 * Therefore, final stack offset entropy will be 5 (x86_64) or
+-	 * 6 (ia32) bits.
++	 * Therefore, final stack offset entropy will be 7 (x86_64) or
++	 * 8 (ia32) bits.
+ 	 */
+-	choose_random_kstack_offset(rdtsc() & 0xFF);
++	choose_random_kstack_offset(rdtsc());
+ }
+ #define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
+ 
+diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
+index 520deb411a702..1209c7aebb211 100644
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -145,8 +145,8 @@ void restore_fpregs_from_fpstate(struct fpstate *fpstate, u64 mask)
+ 		asm volatile(
+ 			"fnclex\n\t"
+ 			"emms\n\t"
+-			"fildl %P[addr]"	/* set F?P to defined value */
+-			: : [addr] "m" (fpstate));
++			"fildl %[addr]"	/* set F?P to defined value */
++			: : [addr] "m" (*fpstate));
+ 	}
+ 
+ 	if (use_xsave()) {
+diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
+index e42faa792c079..52e1f3f0b361c 100644
+--- a/arch/x86/kernel/time.c
++++ b/arch/x86/kernel/time.c
+@@ -27,25 +27,7 @@
+ 
+ unsigned long profile_pc(struct pt_regs *regs)
+ {
+-	unsigned long pc = instruction_pointer(regs);
+-
+-	if (!user_mode(regs) && in_lock_functions(pc)) {
+-#ifdef CONFIG_FRAME_POINTER
+-		return *(unsigned long *)(regs->bp + sizeof(long));
+-#else
+-		unsigned long *sp = (unsigned long *)regs->sp;
+-		/*
+-		 * Return address is either directly at stack pointer
+-		 * or above a saved flags. Eflags has bits 22-31 zero,
+-		 * kernel addresses don't.
+-		 */
+-		if (sp[0] >> 22)
+-			return sp[0];
+-		if (sp[1] >> 22)
+-			return sp[1];
+-#endif
+-	}
+-	return pc;
++	return instruction_pointer(regs);
+ }
+ EXPORT_SYMBOL(profile_pc);
+ 
+diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c
+index c10083a8e68e6..de0f9e5f9f73a 100644
+--- a/arch/x86/net/bpf_jit_comp32.c
++++ b/arch/x86/net/bpf_jit_comp32.c
+@@ -2600,8 +2600,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+ 	if (bpf_jit_enable > 1)
+ 		bpf_jit_dump(prog->len, proglen, pass + 1, image);
+ 
+-	if (image) {
+-		bpf_jit_binary_lock_ro(header);
++	if (image && !bpf_jit_binary_lock_ro(header)) {
+ 		prog->bpf_func = (void *)image;
+ 		prog->jited = 1;
+ 		prog->jited_len = proglen;
+diff --git a/crypto/ecdh.c b/crypto/ecdh.c
+index 80afee3234fbe..3049f147e0117 100644
+--- a/crypto/ecdh.c
++++ b/crypto/ecdh.c
+@@ -33,6 +33,8 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
+ 	    params.key_size > sizeof(u64) * ctx->ndigits)
+ 		return -EINVAL;
+ 
++	memset(ctx->private_key, 0, sizeof(ctx->private_key));
++
+ 	if (!params.key || !params.key_size)
+ 		return ecc_gen_privkey(ctx->curve_id, ctx->ndigits,
+ 				       ctx->private_key);
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 5eb38fbbbecdb..fc6fd583faf8a 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -1975,8 +1975,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
+ 
+ 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+-	if (!host)
+-		return -ENOMEM;
++	if (!host) {
++		rc = -ENOMEM;
++		goto err_rm_sysfs_file;
++	}
+ 	host->private_data = hpriv;
+ 
+ 	if (ahci_init_msi(pdev, n_ports, hpriv) < 0) {
+@@ -2031,11 +2033,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	/* initialize adapter */
+ 	rc = ahci_configure_dma_masks(pdev, hpriv);
+ 	if (rc)
+-		return rc;
++		goto err_rm_sysfs_file;
+ 
+ 	rc = ahci_pci_reset_controller(host);
+ 	if (rc)
+-		return rc;
++		goto err_rm_sysfs_file;
+ 
+ 	ahci_pci_init_controller(host);
+ 	ahci_pci_print_info(host);
+@@ -2044,10 +2046,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 
+ 	rc = ahci_host_activate(host, &ahci_sht);
+ 	if (rc)
+-		return rc;
++		goto err_rm_sysfs_file;
+ 
+ 	pm_runtime_put_noidle(&pdev->dev);
+ 	return 0;
++
++err_rm_sysfs_file:
++	sysfs_remove_file_from_group(&pdev->dev.kobj,
++				     &dev_attr_remapped_nvme.attr, NULL);
++	return rc;
+ }
+ 
+ static void ahci_shutdown_one(struct pci_dev *pdev)
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 28caed151e05f..d937e6e5cc7ad 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4181,8 +4181,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 	{ "PIONEER BD-RW   BDR-205",	NULL,	ATA_HORKAGE_NOLPM },
+ 
+ 	/* Crucial devices with broken LPM support */
+-	{ "CT500BX100SSD1",		NULL,	ATA_HORKAGE_NOLPM },
+-	{ "CT240BX500SSD1",		NULL,	ATA_HORKAGE_NOLPM },
++	{ "CT*0BX*00SSD1",		NULL,	ATA_HORKAGE_NOLPM },
+ 
+ 	/* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
+ 	{ "Crucial_CT512MX100*",	"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
+@@ -5534,6 +5533,18 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
+ 	return ap;
+ }
+ 
++void ata_port_free(struct ata_port *ap)
++{
++	if (!ap)
++		return;
++
++	kfree(ap->pmp_link);
++	kfree(ap->slave_link);
++	kfree(ap->ncq_sense_buf);
++	kfree(ap);
++}
++EXPORT_SYMBOL_GPL(ata_port_free);
++
+ static void ata_devres_release(struct device *gendev, void *res)
+ {
+ 	struct ata_host *host = dev_get_drvdata(gendev);
+@@ -5560,12 +5571,7 @@ static void ata_host_release(struct kref *kref)
+ 	int i;
+ 
+ 	for (i = 0; i < host->n_ports; i++) {
+-		struct ata_port *ap = host->ports[i];
+-
+-		kfree(ap->pmp_link);
+-		kfree(ap->slave_link);
+-		kfree(ap->ncq_sense_buf);
+-		kfree(ap);
++		ata_port_free(host->ports[i]);
+ 		host->ports[i] = NULL;
+ 	}
+ 	kfree(host);
+@@ -5615,8 +5621,10 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
+ 	if (!host)
+ 		return NULL;
+ 
+-	if (!devres_open_group(dev, NULL, GFP_KERNEL))
+-		goto err_free;
++	if (!devres_open_group(dev, NULL, GFP_KERNEL)) {
++		kfree(host);
++		return NULL;
++	}
+ 
+ 	dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
+ 	if (!dr)
+@@ -5648,8 +5656,6 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
+ 
+  err_out:
+ 	devres_release_group(dev, NULL);
+- err_free:
+-	kfree(host);
+ 	return NULL;
+ }
+ EXPORT_SYMBOL_GPL(ata_host_alloc);
+@@ -5948,7 +5954,7 @@ int ata_host_register(struct ata_host *host, const struct scsi_host_template *sh
+ 	 * allocation time.
+ 	 */
+ 	for (i = host->n_ports; host->ports[i]; i++)
+-		kfree(host->ports[i]);
++		ata_port_free(host->ports[i]);
+ 
+ 	/* give ports names and add SCSI hosts */
+ 	for (i = 0; i < host->n_ports; i++) {
+diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c
+index b0f24cf3e891d..4d3de4a35801f 100644
+--- a/drivers/counter/ti-eqep.c
++++ b/drivers/counter/ti-eqep.c
+@@ -6,6 +6,7 @@
+  */
+ 
+ #include <linux/bitops.h>
++#include <linux/clk.h>
+ #include <linux/counter.h>
+ #include <linux/kernel.h>
+ #include <linux/mod_devicetable.h>
+@@ -376,6 +377,7 @@ static int ti_eqep_probe(struct platform_device *pdev)
+ 	struct counter_device *counter;
+ 	struct ti_eqep_cnt *priv;
+ 	void __iomem *base;
++	struct clk *clk;
+ 	int err;
+ 
+ 	counter = devm_counter_alloc(dev, sizeof(*priv));
+@@ -415,6 +417,10 @@ static int ti_eqep_probe(struct platform_device *pdev)
+ 	pm_runtime_enable(dev);
+ 	pm_runtime_get_sync(dev);
+ 
++	clk = devm_clk_get_enabled(dev, NULL);
++	if (IS_ERR(clk))
++		return dev_err_probe(dev, PTR_ERR(clk), "failed to enable clock\n");
++
+ 	err = counter_add(counter);
+ 	if (err < 0) {
+ 		pm_runtime_put_sync(dev);
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index dbbf299f42197..3405bf69b1c0d 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -357,15 +357,14 @@ static void intel_pstate_set_itmt_prio(int cpu)
+ 	int ret;
+ 
+ 	ret = cppc_get_perf_caps(cpu, &cppc_perf);
+-	if (ret)
+-		return;
+-
+ 	/*
+-	 * On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff.
+-	 * In this case we can't use CPPC.highest_perf to enable ITMT.
+-	 * In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide.
++	 * If CPPC is not available, fall back to MSR_HWP_CAPABILITIES bits [8:0].
++	 *
++	 * Also, on some systems with overclocking enabled, CPPC.highest_perf is
++	 * hardcoded to 0xff, so CPPC.highest_perf cannot be used to enable ITMT.
++	 * Fall back to MSR_HWP_CAPABILITIES then too.
+ 	 */
+-	if (cppc_perf.highest_perf == CPPC_MAX_PERF)
++	if (ret || cppc_perf.highest_perf == CPPC_MAX_PERF)
+ 		cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached));
+ 
+ 	/*
+diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
+index bc5a95665aa0a..87008505f8a9e 100644
+--- a/drivers/cxl/core/core.h
++++ b/drivers/cxl/core/core.h
+@@ -27,7 +27,14 @@ void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled);
+ int cxl_region_init(void);
+ void cxl_region_exit(void);
+ int cxl_get_poison_by_endpoint(struct cxl_port *port);
++struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa);
++
+ #else
++static inline
++struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa)
++{
++	return NULL;
++}
+ static inline int cxl_get_poison_by_endpoint(struct cxl_port *port)
+ {
+ 	return 0;
+diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
+index 7d97790b893d7..e01c16fdc7575 100644
+--- a/drivers/cxl/core/hdm.c
++++ b/drivers/cxl/core/hdm.c
+@@ -52,6 +52,14 @@ int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
+ 	struct cxl_dport *dport = NULL;
+ 	int single_port_map[1];
+ 	unsigned long index;
++	struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
++
++	/*
++	 * Capability checks are moot for passthrough decoders, support
++	 * any and all possibilities.
++	 */
++	cxlhdm->interleave_mask = ~0U;
++	cxlhdm->iw_cap_mask = ~0UL;
+ 
+ 	cxlsd = cxl_switch_decoder_alloc(port, 1);
+ 	if (IS_ERR(cxlsd))
+@@ -79,6 +87,11 @@ static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
+ 		cxlhdm->interleave_mask |= GENMASK(11, 8);
+ 	if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap))
+ 		cxlhdm->interleave_mask |= GENMASK(14, 12);
++	cxlhdm->iw_cap_mask = BIT(1) | BIT(2) | BIT(4) | BIT(8);
++	if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_3_6_12_WAY, hdm_cap))
++		cxlhdm->iw_cap_mask |= BIT(3) | BIT(6) | BIT(12);
++	if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_16_WAY, hdm_cap))
++		cxlhdm->iw_cap_mask |= BIT(16);
+ }
+ 
+ static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
+diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
+index d4e259f3a7e91..0277726afd049 100644
+--- a/drivers/cxl/core/memdev.c
++++ b/drivers/cxl/core/memdev.c
+@@ -251,50 +251,6 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
+ }
+ EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, CXL);
+ 
+-struct cxl_dpa_to_region_context {
+-	struct cxl_region *cxlr;
+-	u64 dpa;
+-};
+-
+-static int __cxl_dpa_to_region(struct device *dev, void *arg)
+-{
+-	struct cxl_dpa_to_region_context *ctx = arg;
+-	struct cxl_endpoint_decoder *cxled;
+-	u64 dpa = ctx->dpa;
+-
+-	if (!is_endpoint_decoder(dev))
+-		return 0;
+-
+-	cxled = to_cxl_endpoint_decoder(dev);
+-	if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
+-		return 0;
+-
+-	if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start)
+-		return 0;
+-
+-	dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa,
+-		dev_name(&cxled->cxld.region->dev));
+-
+-	ctx->cxlr = cxled->cxld.region;
+-
+-	return 1;
+-}
+-
+-static struct cxl_region *cxl_dpa_to_region(struct cxl_memdev *cxlmd, u64 dpa)
+-{
+-	struct cxl_dpa_to_region_context ctx;
+-	struct cxl_port *port;
+-
+-	ctx = (struct cxl_dpa_to_region_context) {
+-		.dpa = dpa,
+-	};
+-	port = cxlmd->endpoint;
+-	if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port))
+-		device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
+-
+-	return ctx.cxlr;
+-}
+-
+ static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
+ {
+ 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
+diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
+index e69625a8d6a1d..c00f3a933164f 100644
+--- a/drivers/cxl/core/pmem.c
++++ b/drivers/cxl/core/pmem.c
+@@ -62,10 +62,14 @@ static int match_nvdimm_bridge(struct device *dev, void *data)
+ 	return is_cxl_nvdimm_bridge(dev);
+ }
+ 
+-struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd)
++/**
++ * cxl_find_nvdimm_bridge() - find a bridge device relative to a port
++ * @port: any descendant port of an nvdimm-bridge associated
++ *        root-cxl-port
++ */
++struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_port *port)
+ {
+-	struct cxl_root *cxl_root __free(put_cxl_root) =
+-		find_cxl_root(cxlmd->endpoint);
++	struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
+ 	struct device *dev;
+ 
+ 	if (!cxl_root)
+@@ -242,18 +246,20 @@ static void cxlmd_release_nvdimm(void *_cxlmd)
+ 
+ /**
+  * devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm
++ * @parent_port: parent port for the (to be added) @cxlmd endpoint port
+  * @cxlmd: cxl_memdev instance that will perform LIBNVDIMM operations
+  *
+  * Return: 0 on success negative error code on failure.
+  */
+-int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd)
++int devm_cxl_add_nvdimm(struct cxl_port *parent_port,
++			struct cxl_memdev *cxlmd)
+ {
+ 	struct cxl_nvdimm_bridge *cxl_nvb;
+ 	struct cxl_nvdimm *cxl_nvd;
+ 	struct device *dev;
+ 	int rc;
+ 
+-	cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
++	cxl_nvb = cxl_find_nvdimm_bridge(parent_port);
+ 	if (!cxl_nvb)
+ 		return -ENODEV;
+ 
+diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
+index 18b95149640b6..a600feb8a4ed5 100644
+--- a/drivers/cxl/core/region.c
++++ b/drivers/cxl/core/region.c
+@@ -1101,6 +1101,26 @@ static int cxl_port_attach_region(struct cxl_port *port,
+ 	}
+ 	cxld = cxl_rr->decoder;
+ 
++	/*
++	 * the number of targets should not exceed the target_count
++	 * of the decoder
++	 */
++	if (is_switch_decoder(&cxld->dev)) {
++		struct cxl_switch_decoder *cxlsd;
++
++		cxlsd = to_cxl_switch_decoder(&cxld->dev);
++		if (cxl_rr->nr_targets > cxlsd->nr_targets) {
++			dev_dbg(&cxlr->dev,
++				"%s:%s %s add: %s:%s @ %d overflows targets: %d\n",
++				dev_name(port->uport_dev), dev_name(&port->dev),
++				dev_name(&cxld->dev), dev_name(&cxlmd->dev),
++				dev_name(&cxled->cxld.dev), pos,
++				cxlsd->nr_targets);
++			rc = -ENXIO;
++			goto out_erase;
++		}
++	}
++
+ 	rc = cxl_rr_ep_add(cxl_rr, cxled);
+ 	if (rc) {
+ 		dev_dbg(&cxlr->dev,
+@@ -1210,6 +1230,50 @@ static int check_last_peer(struct cxl_endpoint_decoder *cxled,
+ 	return 0;
+ }
+ 
++static int check_interleave_cap(struct cxl_decoder *cxld, int iw, int ig)
++{
++	struct cxl_port *port = to_cxl_port(cxld->dev.parent);
++	struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
++	unsigned int interleave_mask;
++	u8 eiw;
++	u16 eig;
++	int high_pos, low_pos;
++
++	if (!test_bit(iw, &cxlhdm->iw_cap_mask))
++		return -ENXIO;
++	/*
++	 * Per CXL specification r3.1(8.2.4.20.13 Decoder Protection),
++	 * if eiw < 8:
++	 *   DPAOFFSET[51: eig + 8] = HPAOFFSET[51: eig + 8 + eiw]
++	 *   DPAOFFSET[eig + 7: 0]  = HPAOFFSET[eig + 7: 0]
++	 *
++	 *   when the eiw is 0, all the bits of HPAOFFSET[51: 0] are used, the
++	 *   interleave bits are none.
++	 *
++	 * if eiw >= 8:
++	 *   DPAOFFSET[51: eig + 8] = HPAOFFSET[51: eig + eiw] / 3
++	 *   DPAOFFSET[eig + 7: 0]  = HPAOFFSET[eig + 7: 0]
++	 *
++	 *   when the eiw is 8, all the bits of HPAOFFSET[51: 0] are used, the
++	 *   interleave bits are none.
++	 */
++	ways_to_eiw(iw, &eiw);
++	if (eiw == 0 || eiw == 8)
++		return 0;
++
++	granularity_to_eig(ig, &eig);
++	if (eiw > 8)
++		high_pos = eiw + eig - 1;
++	else
++		high_pos = eiw + eig + 7;
++	low_pos = eig + 8;
++	interleave_mask = GENMASK(high_pos, low_pos);
++	if (interleave_mask & ~cxlhdm->interleave_mask)
++		return -ENXIO;
++
++	return 0;
++}
++
+ static int cxl_port_setup_targets(struct cxl_port *port,
+ 				  struct cxl_region *cxlr,
+ 				  struct cxl_endpoint_decoder *cxled)
+@@ -1360,6 +1424,15 @@ static int cxl_port_setup_targets(struct cxl_port *port,
+ 			return -ENXIO;
+ 		}
+ 	} else {
++		rc = check_interleave_cap(cxld, iw, ig);
++		if (rc) {
++			dev_dbg(&cxlr->dev,
++				"%s:%s iw: %d ig: %d is not supported\n",
++				dev_name(port->uport_dev),
++				dev_name(&port->dev), iw, ig);
++			return rc;
++		}
++
+ 		cxld->interleave_ways = iw;
+ 		cxld->interleave_granularity = ig;
+ 		cxld->hpa_range = (struct range) {
+@@ -1796,6 +1869,15 @@ static int cxl_region_attach(struct cxl_region *cxlr,
+ 	struct cxl_dport *dport;
+ 	int rc = -ENXIO;
+ 
++	rc = check_interleave_cap(&cxled->cxld, p->interleave_ways,
++				  p->interleave_granularity);
++	if (rc) {
++		dev_dbg(&cxlr->dev, "%s iw: %d ig: %d is not supported\n",
++			dev_name(&cxled->cxld.dev), p->interleave_ways,
++			p->interleave_granularity);
++		return rc;
++	}
++
+ 	if (cxled->mode != cxlr->mode) {
+ 		dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n",
+ 			dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode);
+@@ -2679,28 +2761,78 @@ int cxl_get_poison_by_endpoint(struct cxl_port *port)
+ 	return rc;
+ }
+ 
++struct cxl_dpa_to_region_context {
++	struct cxl_region *cxlr;
++	u64 dpa;
++};
++
++static int __cxl_dpa_to_region(struct device *dev, void *arg)
++{
++	struct cxl_dpa_to_region_context *ctx = arg;
++	struct cxl_endpoint_decoder *cxled;
++	struct cxl_region *cxlr;
++	u64 dpa = ctx->dpa;
++
++	if (!is_endpoint_decoder(dev))
++		return 0;
++
++	cxled = to_cxl_endpoint_decoder(dev);
++	if (!cxled || !cxled->dpa_res || !resource_size(cxled->dpa_res))
++		return 0;
++
++	if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start)
++		return 0;
++
++	/*
++	 * Stop the region search (return 1) when an endpoint mapping is
++	 * found. The region may not be fully constructed so offering
++	 * the cxlr in the context structure is not guaranteed.
++	 */
++	cxlr = cxled->cxld.region;
++	if (cxlr)
++		dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa,
++			dev_name(&cxlr->dev));
++	else
++		dev_dbg(dev, "dpa:0x%llx mapped in endpoint:%s\n", dpa,
++			dev_name(dev));
++
++	ctx->cxlr = cxlr;
++
++	return 1;
++}
++
++struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa)
++{
++	struct cxl_dpa_to_region_context ctx;
++	struct cxl_port *port;
++
++	ctx = (struct cxl_dpa_to_region_context) {
++		.dpa = dpa,
++	};
++	port = cxlmd->endpoint;
++	if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port))
++		device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
++
++	return ctx.cxlr;
++}
++
+ static struct lock_class_key cxl_pmem_region_key;
+ 
+-static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
++static int cxl_pmem_region_alloc(struct cxl_region *cxlr)
+ {
+ 	struct cxl_region_params *p = &cxlr->params;
+ 	struct cxl_nvdimm_bridge *cxl_nvb;
+-	struct cxl_pmem_region *cxlr_pmem;
+ 	struct device *dev;
+ 	int i;
+ 
+-	down_read(&cxl_region_rwsem);
+-	if (p->state != CXL_CONFIG_COMMIT) {
+-		cxlr_pmem = ERR_PTR(-ENXIO);
+-		goto out;
+-	}
++	guard(rwsem_read)(&cxl_region_rwsem);
++	if (p->state != CXL_CONFIG_COMMIT)
++		return -ENXIO;
+ 
+-	cxlr_pmem = kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets),
+-			    GFP_KERNEL);
+-	if (!cxlr_pmem) {
+-		cxlr_pmem = ERR_PTR(-ENOMEM);
+-		goto out;
+-	}
++	struct cxl_pmem_region *cxlr_pmem __free(kfree) =
++		kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets), GFP_KERNEL);
++	if (!cxlr_pmem)
++		return -ENOMEM;
+ 
+ 	cxlr_pmem->hpa_range.start = p->res->start;
+ 	cxlr_pmem->hpa_range.end = p->res->end;
+@@ -2717,12 +2849,9 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
+ 		 * bridge for one device is the same for all.
+ 		 */
+ 		if (i == 0) {
+-			cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
+-			if (!cxl_nvb) {
+-				kfree(cxlr_pmem);
+-				cxlr_pmem = ERR_PTR(-ENODEV);
+-				goto out;
+-			}
++			cxl_nvb = cxl_find_nvdimm_bridge(cxlmd->endpoint);
++			if (!cxl_nvb)
++				return -ENODEV;
+ 			cxlr->cxl_nvb = cxl_nvb;
+ 		}
+ 		m->cxlmd = cxlmd;
+@@ -2733,18 +2862,16 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
+ 	}
+ 
+ 	dev = &cxlr_pmem->dev;
+-	cxlr_pmem->cxlr = cxlr;
+-	cxlr->cxlr_pmem = cxlr_pmem;
+ 	device_initialize(dev);
+ 	lockdep_set_class(&dev->mutex, &cxl_pmem_region_key);
+ 	device_set_pm_not_required(dev);
+ 	dev->parent = &cxlr->dev;
+ 	dev->bus = &cxl_bus_type;
+ 	dev->type = &cxl_pmem_region_type;
+-out:
+-	up_read(&cxl_region_rwsem);
++	cxlr_pmem->cxlr = cxlr;
++	cxlr->cxlr_pmem = no_free_ptr(cxlr_pmem);
+ 
+-	return cxlr_pmem;
++	return 0;
+ }
+ 
+ static void cxl_dax_region_release(struct device *dev)
+@@ -2861,9 +2988,10 @@ static int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
+ 	struct device *dev;
+ 	int rc;
+ 
+-	cxlr_pmem = cxl_pmem_region_alloc(cxlr);
+-	if (IS_ERR(cxlr_pmem))
+-		return PTR_ERR(cxlr_pmem);
++	rc = cxl_pmem_region_alloc(cxlr);
++	if (rc)
++		return rc;
++	cxlr_pmem = cxlr->cxlr_pmem;
+ 	cxl_nvb = cxlr->cxl_nvb;
+ 
+ 	dev = &cxlr_pmem->dev;
+diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
+index 72fa477407689..6f9270f2faf96 100644
+--- a/drivers/cxl/cxl.h
++++ b/drivers/cxl/cxl.h
+@@ -45,6 +45,8 @@
+ #define   CXL_HDM_DECODER_TARGET_COUNT_MASK GENMASK(7, 4)
+ #define   CXL_HDM_DECODER_INTERLEAVE_11_8 BIT(8)
+ #define   CXL_HDM_DECODER_INTERLEAVE_14_12 BIT(9)
++#define   CXL_HDM_DECODER_INTERLEAVE_3_6_12_WAY BIT(11)
++#define   CXL_HDM_DECODER_INTERLEAVE_16_WAY BIT(12)
+ #define CXL_HDM_DECODER_CTRL_OFFSET 0x4
+ #define   CXL_HDM_DECODER_ENABLE BIT(1)
+ #define CXL_HDM_DECODER0_BASE_LOW_OFFSET(i) (0x20 * (i) + 0x10)
+@@ -848,8 +850,8 @@ struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
+ struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
+ bool is_cxl_nvdimm(struct device *dev);
+ bool is_cxl_nvdimm_bridge(struct device *dev);
+-int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd);
+-struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd);
++int devm_cxl_add_nvdimm(struct cxl_port *parent_port, struct cxl_memdev *cxlmd);
++struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_port *port);
+ 
+ #ifdef CONFIG_CXL_REGION
+ bool is_cxl_pmem_region(struct device *dev);
+diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
+index 36cee9c30cebd..07e65a7605f3e 100644
+--- a/drivers/cxl/cxlmem.h
++++ b/drivers/cxl/cxlmem.h
+@@ -848,11 +848,21 @@ static inline void cxl_mem_active_dec(void)
+ 
+ int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd);
+ 
++/**
++ * struct cxl_hdm - HDM Decoder registers and cached / decoded capabilities
++ * @regs: mapped registers, see devm_cxl_setup_hdm()
++ * @decoder_count: number of decoders for this port
++ * @target_count: for switch decoders, max downstream port targets
++ * @interleave_mask: interleave granularity capability, see check_interleave_cap()
++ * @iw_cap_mask: bitmask of supported interleave ways, see check_interleave_cap()
++ * @port: mapped cxl_port, see devm_cxl_setup_hdm()
++ */
+ struct cxl_hdm {
+ 	struct cxl_component_regs regs;
+ 	unsigned int decoder_count;
+ 	unsigned int target_count;
+ 	unsigned int interleave_mask;
++	unsigned long iw_cap_mask;
+ 	struct cxl_port *port;
+ };
+ 
+diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
+index 0c79d9ce877cc..2f1b49bfe162f 100644
+--- a/drivers/cxl/mem.c
++++ b/drivers/cxl/mem.c
+@@ -152,6 +152,15 @@ static int cxl_mem_probe(struct device *dev)
+ 		return -ENXIO;
+ 	}
+ 
++	if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM)) {
++		rc = devm_cxl_add_nvdimm(parent_port, cxlmd);
++		if (rc) {
++			if (rc == -ENODEV)
++				dev_info(dev, "PMEM disabled by platform\n");
++			return rc;
++		}
++	}
++
+ 	if (dport->rch)
+ 		endpoint_parent = parent_port->uport_dev;
+ 	else
+@@ -174,14 +183,6 @@ static int cxl_mem_probe(struct device *dev)
+ 	if (rc)
+ 		return rc;
+ 
+-	if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM)) {
+-		rc = devm_cxl_add_nvdimm(cxlmd);
+-		if (rc == -ENODEV)
+-			dev_info(dev, "PMEM disabled by platform\n");
+-		else
+-			return rc;
+-	}
+-
+ 	/*
+ 	 * The kernel may be operating out of CXL memory on this device,
+ 	 * there is no spec defined way to determine whether this device
+diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
+index bb499e3629125..1d0175d6350b7 100644
+--- a/drivers/gpio/gpio-davinci.c
++++ b/drivers/gpio/gpio-davinci.c
+@@ -225,6 +225,11 @@ static int davinci_gpio_probe(struct platform_device *pdev)
+ 	else
+ 		nirq = DIV_ROUND_UP(ngpio, 16);
+ 
++	if (nirq > MAX_INT_PER_BANK) {
++		dev_err(dev, "Too many IRQs!\n");
++		return -EINVAL;
++	}
++
+ 	chips = devm_kzalloc(dev, sizeof(*chips), GFP_KERNEL);
+ 	if (!chips)
+ 		return -ENOMEM;
+diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
+index 9dad67ea25974..5639abce6ec57 100644
+--- a/drivers/gpio/gpiolib-cdev.c
++++ b/drivers/gpio/gpiolib-cdev.c
+@@ -89,6 +89,10 @@ struct linehandle_state {
+ 	GPIOHANDLE_REQUEST_OPEN_DRAIN | \
+ 	GPIOHANDLE_REQUEST_OPEN_SOURCE)
+ 
++#define GPIOHANDLE_REQUEST_DIRECTION_FLAGS \
++	(GPIOHANDLE_REQUEST_INPUT | \
++	 GPIOHANDLE_REQUEST_OUTPUT)
++
+ static int linehandle_validate_flags(u32 flags)
+ {
+ 	/* Return an error if an unknown flag is set */
+@@ -169,21 +173,21 @@ static long linehandle_set_config(struct linehandle_state *lh,
+ 	if (ret)
+ 		return ret;
+ 
++	/* Lines must be reconfigured explicitly as input or output. */
++	if (!(lflags & GPIOHANDLE_REQUEST_DIRECTION_FLAGS))
++		return -EINVAL;
++
+ 	for (i = 0; i < lh->num_descs; i++) {
+ 		desc = lh->descs[i];
+-		linehandle_flags_to_desc_flags(gcnf.flags, &desc->flags);
++		linehandle_flags_to_desc_flags(lflags, &desc->flags);
+ 
+-		/*
+-		 * Lines have to be requested explicitly for input
+-		 * or output, else the line will be treated "as is".
+-		 */
+ 		if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
+ 			int val = !!gcnf.default_values[i];
+ 
+ 			ret = gpiod_direction_output(desc, val);
+ 			if (ret)
+ 				return ret;
+-		} else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
++		} else {
+ 			ret = gpiod_direction_input(desc);
+ 			if (ret)
+ 				return ret;
+@@ -1530,12 +1534,14 @@ static long linereq_set_config(struct linereq *lr, void __user *ip)
+ 		line = &lr->lines[i];
+ 		desc = lr->lines[i].desc;
+ 		flags = gpio_v2_line_config_flags(&lc, i);
+-		gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
+-		edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
+ 		/*
+-		 * Lines have to be requested explicitly for input
+-		 * or output, else the line will be treated "as is".
++		 * Lines not explicitly reconfigured as input or output
++		 * are left unchanged.
+ 		 */
++		if (!(flags & GPIO_V2_LINE_DIRECTION_FLAGS))
++			continue;
++		gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
++		edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
+ 		if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
+ 			int val = gpio_v2_line_config_output_value(&lc, i);
+ 
+@@ -1543,7 +1549,7 @@ static long linereq_set_config(struct linereq *lr, void __user *ip)
+ 			ret = gpiod_direction_output(desc, val);
+ 			if (ret)
+ 				return ret;
+-		} else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
++		} else {
+ 			ret = gpiod_direction_input(desc);
+ 			if (ret)
+ 				return ret;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+index c8c23dbb90916..12b48851820e3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+@@ -399,7 +399,7 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
+ 					mem_channel_number = vram_info->v30.channel_num;
+ 					mem_channel_width = vram_info->v30.channel_width;
+ 					if (vram_width)
+-						*vram_width = mem_channel_number * (1 << mem_channel_width);
++						*vram_width = mem_channel_number * 16;
+ 					break;
+ 				default:
+ 					return -EINVAL;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 941d6e379b8a6..eb8af023326ab 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -5121,11 +5121,14 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
+ 
+ 	dev_info(adev->dev, "GPU mode1 reset\n");
+ 
++	/* Cache the state before bus master disable. The saved config space
++	 * values are used in other cases like restore after mode-2 reset.
++	 */
++	amdgpu_device_cache_pci_state(adev->pdev);
++
+ 	/* disable BM */
+ 	pci_clear_master(adev->pdev);
+ 
+-	amdgpu_device_cache_pci_state(adev->pdev);
+-
+ 	if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
+ 		dev_info(adev->dev, "GPU smu mode1 reset\n");
+ 		ret = amdgpu_dpm_mode1_reset(adev);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+index 8baa2e0935cc6..ba6d1876ce1c8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+@@ -3,6 +3,7 @@
+ #include <drm/drm_atomic_helper.h>
+ #include <drm/drm_edid.h>
+ #include <drm/drm_simple_kms_helper.h>
++#include <drm/drm_gem_framebuffer_helper.h>
+ #include <drm/drm_vblank.h>
+ 
+ #include "amdgpu.h"
+@@ -314,7 +315,13 @@ static int amdgpu_vkms_prepare_fb(struct drm_plane *plane,
+ 		return 0;
+ 	}
+ 	afb = to_amdgpu_framebuffer(new_state->fb);
+-	obj = new_state->fb->obj[0];
++
++	obj = drm_gem_fb_get_obj(new_state->fb, 0);
++	if (!obj) {
++		DRM_ERROR("Failed to get obj from framebuffer\n");
++		return -EINVAL;
++	}
++
+ 	rbo = gem_to_amdgpu_bo(obj);
+ 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
+ 
+@@ -368,12 +375,19 @@ static void amdgpu_vkms_cleanup_fb(struct drm_plane *plane,
+ 				   struct drm_plane_state *old_state)
+ {
+ 	struct amdgpu_bo *rbo;
++	struct drm_gem_object *obj;
+ 	int r;
+ 
+ 	if (!old_state->fb)
+ 		return;
+ 
+-	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
++	obj = drm_gem_fb_get_obj(old_state->fb, 0);
++	if (!obj) {
++		DRM_ERROR("Failed to get obj from framebuffer\n");
++		return;
++	}
++
++	rbo = gem_to_amdgpu_bo(obj);
+ 	r = amdgpu_bo_reserve(rbo, false);
+ 	if (unlikely(r)) {
+ 		DRM_ERROR("failed to reserve rbo before unpin\n");
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
+index f15d1dbad6a96..b72ed3e78df05 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
+@@ -327,6 +327,8 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont
+ 			dml_pipe_idx = dml2_helper_find_dml_pipe_idx_by_stream_id(in_ctx, context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->stream_id);
+ 		}
+ 
++		if (dml_pipe_idx == 0xFFFFFFFF)
++			continue;
+ 		ASSERT(in_ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id_valid[dml_pipe_idx]);
+ 		ASSERT(in_ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[dml_pipe_idx] == context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->stream_id);
+ 
+@@ -468,6 +470,9 @@ bool dml2_verify_det_buffer_configuration(struct dml2_context *in_ctx, struct dc
+ 			dml_pipe_idx = find_dml_pipe_idx_by_plane_id(in_ctx, plane_id);
+ 		else
+ 			dml_pipe_idx = dml2_helper_find_dml_pipe_idx_by_stream_id(in_ctx, display_state->res_ctx.pipe_ctx[i].stream->stream_id);
++
++		if (dml_pipe_idx == 0xFFFFFFFF)
++			continue;
+ 		total_det_allocated += dml_get_det_buffer_size_kbytes(&in_ctx->v20.dml_core_ctx, dml_pipe_idx);
+ 		if (total_det_allocated > max_det_size) {
+ 			need_recalculation = true;
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+index 289f5d1333424..f608dd3bbac32 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+@@ -1590,9 +1590,17 @@ static bool retrieve_link_cap(struct dc_link *link)
+ 			return false;
+ 	}
+ 
+-	if (dp_is_lttpr_present(link))
++	if (dp_is_lttpr_present(link)) {
+ 		configure_lttpr_mode_transparent(link);
+ 
++		// Echo TOTAL_LTTPR_CNT back downstream
++		core_link_write_dpcd(
++				link,
++				DP_TOTAL_LTTPR_CNT,
++				&link->dpcd_caps.lttpr_caps.phy_repeater_cnt,
++				sizeof(link->dpcd_caps.lttpr_caps.phy_repeater_cnt));
++	}
++
+ 	/* Read DP tunneling information. */
+ 	status = dpcd_get_tunneling_device_data(link);
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+index 04d142f974745..2fb1d00ff9654 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+@@ -892,7 +892,7 @@ static const struct dc_debug_options debug_defaults_drv = {
+ 	.disable_z10 = true,
+ 	.enable_legacy_fast_update = true,
+ 	.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
+-	.dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE,
++	.dml_hostvm_override = DML_HOSTVM_NO_OVERRIDE,
+ 	.using_dml2 = false,
+ };
+ 
+diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+index 914f28e9f2242..aee5170f5fb23 100644
+--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
++++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+@@ -177,4 +177,9 @@ enum dpcd_psr_sink_states {
+ #define DP_SINK_PR_PIXEL_DEVIATION_PER_LINE     0x379
+ #define DP_SINK_PR_MAX_NUMBER_OF_DEVIATION_LINE 0x37A
+ 
++/* Remove once drm_dp_helper.h is updated upstream */
++#ifndef DP_TOTAL_LTTPR_CNT
++#define DP_TOTAL_LTTPR_CNT                                  0xF000A /* 2.1 */
++#endif
++
+ #endif /* __DAL_DPCD_DEFS_H__ */
+diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
+index d612133e2cf7e..117237d3528bd 100644
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -524,6 +524,9 @@ struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper)
+ 	if (!info)
+ 		return ERR_PTR(-ENOMEM);
+ 
++	if (!drm_leak_fbdev_smem)
++		info->flags |= FBINFO_HIDE_SMEM_START;
++
+ 	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ 	if (ret)
+ 		goto err_release;
+@@ -1860,9 +1863,6 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper)
+ 	info = fb_helper->info;
+ 	info->var.pixclock = 0;
+ 
+-	if (!drm_leak_fbdev_smem)
+-		info->flags |= FBINFO_HIDE_SMEM_START;
+-
+ 	/* Need to drop locks to avoid recursive deadlock in
+ 	 * register_framebuffer. This is ok because the only thing left to do is
+ 	 * register the fbdev emulation instance in kernel_fb_helper_list. */
+diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c
+index 6c9427bb4053b..13cd754af311d 100644
+--- a/drivers/gpu/drm/drm_fbdev_dma.c
++++ b/drivers/gpu/drm/drm_fbdev_dma.c
+@@ -130,7 +130,10 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
+ 		info->flags |= FBINFO_READS_FAST; /* signal caching */
+ 	info->screen_size = sizes->surface_height * fb->pitches[0];
+ 	info->screen_buffer = map.vaddr;
+-	info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
++	if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
++		if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
++			info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
++	}
+ 	info->fix.smem_len = info->screen_size;
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
+index 638ffa4444f51..714e42b051080 100644
+--- a/drivers/gpu/drm/drm_file.c
++++ b/drivers/gpu/drm/drm_file.c
+@@ -469,14 +469,12 @@ void drm_file_update_pid(struct drm_file *filp)
+ 
+ 	dev = filp->minor->dev;
+ 	mutex_lock(&dev->filelist_mutex);
++	get_pid(pid);
+ 	old = rcu_replace_pointer(filp->pid, pid, 1);
+ 	mutex_unlock(&dev->filelist_mutex);
+ 
+-	if (pid != old) {
+-		get_pid(pid);
+-		synchronize_rcu();
+-		put_pid(old);
+-	}
++	synchronize_rcu();
++	put_pid(old);
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+index 40371b8a9bbbd..93bc1cc1ee7e6 100644
+--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
++++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+@@ -298,6 +298,7 @@ void i915_vma_revoke_fence(struct i915_vma *vma)
+ 		return;
+ 
+ 	GEM_BUG_ON(fence->vma != vma);
++	i915_active_wait(&fence->active);
+ 	GEM_BUG_ON(!i915_active_is_idle(&fence->active));
+ 	GEM_BUG_ON(atomic_read(&fence->pin_count));
+ 
+diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+index 670c9739e5e18..2033214c4b784 100644
+--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
++++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+@@ -209,6 +209,8 @@ static int nv17_tv_get_ld_modes(struct drm_encoder *encoder,
+ 		struct drm_display_mode *mode;
+ 
+ 		mode = drm_mode_duplicate(encoder->dev, tv_mode);
++		if (!mode)
++			continue;
+ 
+ 		mode->clock = tv_norm->tv_enc_mode.vrefresh *
+ 			mode->htotal / 1000 *
+@@ -258,6 +260,8 @@ static int nv17_tv_get_hd_modes(struct drm_encoder *encoder,
+ 		if (modes[i].hdisplay == output_mode->hdisplay &&
+ 		    modes[i].vdisplay == output_mode->vdisplay) {
+ 			mode = drm_mode_duplicate(encoder->dev, output_mode);
++			if (!mode)
++				continue;
+ 			mode->type |= DRM_MODE_TYPE_PREFERRED;
+ 
+ 		} else {
+@@ -265,6 +269,8 @@ static int nv17_tv_get_hd_modes(struct drm_encoder *encoder,
+ 					    modes[i].vdisplay, 60, false,
+ 					    (output_mode->flags &
+ 					     DRM_MODE_FLAG_INTERLACE), false);
++			if (!mode)
++				continue;
+ 		}
+ 
+ 		/* CVT modes are sometimes unsuitable... */
+diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+index 2ffe5f68a8903..4c8c317191f3c 100644
+--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
++++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+@@ -1080,10 +1080,10 @@ static int ili9881c_prepare(struct drm_panel *panel)
+ 	msleep(5);
+ 
+ 	/* And reset it */
+-	gpiod_set_value(ctx->reset, 1);
++	gpiod_set_value_cansleep(ctx->reset, 1);
+ 	msleep(20);
+ 
+-	gpiod_set_value(ctx->reset, 0);
++	gpiod_set_value_cansleep(ctx->reset, 0);
+ 	msleep(20);
+ 
+ 	for (i = 0; i < ctx->desc->init_length; i++) {
+@@ -1138,7 +1138,7 @@ static int ili9881c_unprepare(struct drm_panel *panel)
+ 
+ 	mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
+ 	regulator_disable(ctx->power);
+-	gpiod_set_value(ctx->reset, 1);
++	gpiod_set_value_cansleep(ctx->reset, 1);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index e8fe5a69454d0..6aac6f2accb43 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -2707,6 +2707,7 @@ static const struct display_timing koe_tx26d202vm0bwa_timing = {
+ 	.vfront_porch = { 3, 5, 10 },
+ 	.vback_porch = { 2, 5, 10 },
+ 	.vsync_len = { 5, 5, 5 },
++	.flags = DISPLAY_FLAGS_DE_HIGH,
+ };
+ 
+ static const struct panel_desc koe_tx26d202vm0bwa = {
+diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
+index 3e5ff17e3cafb..0999c8eaae94a 100644
+--- a/drivers/gpu/drm/radeon/radeon.h
++++ b/drivers/gpu/drm/radeon/radeon.h
+@@ -132,7 +132,6 @@ extern int radeon_cik_support;
+ /* RADEON_IB_POOL_SIZE must be a power of 2 */
+ #define RADEON_IB_POOL_SIZE			16
+ #define RADEON_DEBUGFS_MAX_COMPONENTS		32
+-#define RADEONFB_CONN_LIMIT			4
+ #define RADEON_BIOS_NUM_SCRATCH			8
+ 
+ /* internal ring indices */
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index efd18c8d84c83..5f1d24d3120c4 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -683,7 +683,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
+ 	struct radeon_device *rdev = dev->dev_private;
+ 	struct radeon_crtc *radeon_crtc;
+ 
+-	radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
++	radeon_crtc = kzalloc(sizeof(*radeon_crtc), GFP_KERNEL);
+ 	if (radeon_crtc == NULL)
+ 		return;
+ 
+@@ -709,12 +709,6 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
+ 	dev->mode_config.cursor_width = radeon_crtc->max_cursor_width;
+ 	dev->mode_config.cursor_height = radeon_crtc->max_cursor_height;
+ 
+-#if 0
+-	radeon_crtc->mode_set.crtc = &radeon_crtc->base;
+-	radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1);
+-	radeon_crtc->mode_set.num_connectors = 0;
+-#endif
+-
+ 	if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom))
+ 		radeon_atombios_init_crtc(dev, radeon_crtc);
+ 	else
+diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
+index 68d3d623a05bf..ccec291b02ccd 100644
+--- a/drivers/gpu/drm/xe/xe_devcoredump.c
++++ b/drivers/gpu/drm/xe/xe_devcoredump.c
+@@ -74,17 +74,19 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
+ 				   size_t count, void *data, size_t datalen)
+ {
+ 	struct xe_devcoredump *coredump = data;
+-	struct xe_device *xe = coredump_to_xe(coredump);
+-	struct xe_devcoredump_snapshot *ss = &coredump->snapshot;
++	struct xe_device *xe;
++	struct xe_devcoredump_snapshot *ss;
+ 	struct drm_printer p;
+ 	struct drm_print_iterator iter;
+ 	struct timespec64 ts;
+ 	int i;
+ 
+-	/* Our device is gone already... */
+-	if (!data || !coredump_to_xe(coredump))
++	if (!coredump)
+ 		return -ENODEV;
+ 
++	xe = coredump_to_xe(coredump);
++	ss = &coredump->snapshot;
++
+ 	/* Ensure delayed work is captured before continuing */
+ 	flush_work(&ss->work);
+ 
+diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c
+index e148934d554b0..351ab902eb600 100644
+--- a/drivers/gpu/drm/xe/xe_pat.c
++++ b/drivers/gpu/drm/xe/xe_pat.c
+@@ -457,7 +457,7 @@ void xe_pat_dump(struct xe_gt *gt, struct drm_printer *p)
+ {
+ 	struct xe_device *xe = gt_to_xe(gt);
+ 
+-	if (!xe->pat.ops->dump)
++	if (!xe->pat.ops)
+ 		return;
+ 
+ 	xe->pat.ops->dump(gt, p);
+diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
+index 3107d2a12426c..fb35e46d68b49 100644
+--- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
++++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
+@@ -207,6 +207,11 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe)
+ 	u64 stolen_size, io_size, pgsize;
+ 	int err;
+ 
++	if (!mgr) {
++		drm_dbg_kms(&xe->drm, "Stolen mgr init failed\n");
++		return;
++	}
++
+ 	if (IS_SRIOV_VF(xe))
+ 		stolen_size = 0;
+ 	else if (IS_DGFX(xe))
+diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
+index 115ec745e5029..0678faf832126 100644
+--- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
++++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
+@@ -91,7 +91,7 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
+ 
+ 	min_page_size = mgr->default_page_size;
+ 	if (tbo->page_alignment)
+-		min_page_size = tbo->page_alignment << PAGE_SHIFT;
++		min_page_size = (u64)tbo->page_alignment << PAGE_SHIFT;
+ 
+ 	if (WARN_ON(min_page_size < mm->chunk_size)) {
+ 		err = -EINVAL;
+diff --git a/drivers/i2c/i2c-slave-testunit.c b/drivers/i2c/i2c-slave-testunit.c
+index a49642bbae4b7..ca43e98cae1b2 100644
+--- a/drivers/i2c/i2c-slave-testunit.c
++++ b/drivers/i2c/i2c-slave-testunit.c
+@@ -118,9 +118,12 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
+ 			queue_delayed_work(system_long_wq, &tu->worker,
+ 					   msecs_to_jiffies(10 * tu->regs[TU_REG_DELAY]));
+ 		}
+-		fallthrough;
++		break;
+ 
+ 	case I2C_SLAVE_WRITE_REQUESTED:
++		if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags))
++			return -EBUSY;
++
+ 		memset(tu->regs, 0, TU_NUM_REGS);
+ 		tu->reg_idx = 0;
+ 		break;
+diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
+index c2da5066e9a7b..80b57d3ee3a72 100644
+--- a/drivers/iio/accel/Kconfig
++++ b/drivers/iio/accel/Kconfig
+@@ -330,6 +330,8 @@ config DMARD10
+ config FXLS8962AF
+ 	tristate
+ 	depends on I2C || !I2C # cannot be built-in for modular I2C
++	select IIO_BUFFER
++	select IIO_KFIFO_BUF
+ 
+ config FXLS8962AF_I2C
+ 	tristate "NXP FXLS8962AF/FXLS8964AF Accelerometer I2C Driver"
+diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
+index 468c2656d2be7..98648c679a55c 100644
+--- a/drivers/iio/adc/ad7266.c
++++ b/drivers/iio/adc/ad7266.c
+@@ -157,6 +157,8 @@ static int ad7266_read_raw(struct iio_dev *indio_dev,
+ 		ret = ad7266_read_single(st, val, chan->address);
+ 		iio_device_release_direct_mode(indio_dev);
+ 
++		if (ret < 0)
++			return ret;
+ 		*val = (*val >> 2) & 0xfff;
+ 		if (chan->scan_type.sign == 's')
+ 			*val = sign_extend32(*val,
+diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c
+index f0b71a1220e02..f52abf759260f 100644
+--- a/drivers/iio/adc/xilinx-ams.c
++++ b/drivers/iio/adc/xilinx-ams.c
+@@ -414,8 +414,12 @@ static void ams_enable_channel_sequence(struct iio_dev *indio_dev)
+ 
+ 	/* Run calibration of PS & PL as part of the sequence */
+ 	scan_mask = BIT(0) | BIT(AMS_PS_SEQ_MAX);
+-	for (i = 0; i < indio_dev->num_channels; i++)
+-		scan_mask |= BIT_ULL(indio_dev->channels[i].scan_index);
++	for (i = 0; i < indio_dev->num_channels; i++) {
++		const struct iio_chan_spec *chan = &indio_dev->channels[i];
++
++		if (chan->scan_index < AMS_CTRL_SEQ_BASE)
++			scan_mask |= BIT_ULL(chan->scan_index);
++	}
+ 
+ 	if (ams->ps_base) {
+ 		/* put sysmon in a soft reset to change the sequence */
+diff --git a/drivers/iio/chemical/bme680.h b/drivers/iio/chemical/bme680.h
+index 4edc5d21cb9fa..f959252a4fe66 100644
+--- a/drivers/iio/chemical/bme680.h
++++ b/drivers/iio/chemical/bme680.h
+@@ -54,7 +54,9 @@
+ #define   BME680_NB_CONV_MASK			GENMASK(3, 0)
+ 
+ #define BME680_REG_MEAS_STAT_0			0x1D
++#define   BME680_NEW_DATA_BIT			BIT(7)
+ #define   BME680_GAS_MEAS_BIT			BIT(6)
++#define   BME680_MEAS_BIT			BIT(5)
+ 
+ /* Calibration Parameters */
+ #define BME680_T2_LSB_REG	0x8A
+diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c
+index ef5e0e46fd344..500f56834b01f 100644
+--- a/drivers/iio/chemical/bme680_core.c
++++ b/drivers/iio/chemical/bme680_core.c
+@@ -10,6 +10,7 @@
+  */
+ #include <linux/acpi.h>
+ #include <linux/bitfield.h>
++#include <linux/delay.h>
+ #include <linux/device.h>
+ #include <linux/module.h>
+ #include <linux/log2.h>
+@@ -38,7 +39,7 @@ struct bme680_calib {
+ 	s8  par_h3;
+ 	s8  par_h4;
+ 	s8  par_h5;
+-	s8  par_h6;
++	u8  par_h6;
+ 	s8  par_h7;
+ 	s8  par_gh1;
+ 	s16 par_gh2;
+@@ -342,10 +343,10 @@ static s16 bme680_compensate_temp(struct bme680_data *data,
+ 	if (!calib->par_t2)
+ 		bme680_read_calib(data, calib);
+ 
+-	var1 = (adc_temp >> 3) - (calib->par_t1 << 1);
++	var1 = (adc_temp >> 3) - ((s32)calib->par_t1 << 1);
+ 	var2 = (var1 * calib->par_t2) >> 11;
+ 	var3 = ((var1 >> 1) * (var1 >> 1)) >> 12;
+-	var3 = (var3 * (calib->par_t3 << 4)) >> 14;
++	var3 = (var3 * ((s32)calib->par_t3 << 4)) >> 14;
+ 	data->t_fine = var2 + var3;
+ 	calc_temp = (data->t_fine * 5 + 128) >> 8;
+ 
+@@ -368,9 +369,9 @@ static u32 bme680_compensate_press(struct bme680_data *data,
+ 	var1 = (data->t_fine >> 1) - 64000;
+ 	var2 = ((((var1 >> 2) * (var1 >> 2)) >> 11) * calib->par_p6) >> 2;
+ 	var2 = var2 + (var1 * calib->par_p5 << 1);
+-	var2 = (var2 >> 2) + (calib->par_p4 << 16);
++	var2 = (var2 >> 2) + ((s32)calib->par_p4 << 16);
+ 	var1 = (((((var1 >> 2) * (var1 >> 2)) >> 13) *
+-			(calib->par_p3 << 5)) >> 3) +
++			((s32)calib->par_p3 << 5)) >> 3) +
+ 			((calib->par_p2 * var1) >> 1);
+ 	var1 = var1 >> 18;
+ 	var1 = ((32768 + var1) * calib->par_p1) >> 15;
+@@ -388,7 +389,7 @@ static u32 bme680_compensate_press(struct bme680_data *data,
+ 	var3 = ((press_comp >> 8) * (press_comp >> 8) *
+ 			(press_comp >> 8) * calib->par_p10) >> 17;
+ 
+-	press_comp += (var1 + var2 + var3 + (calib->par_p7 << 7)) >> 4;
++	press_comp += (var1 + var2 + var3 + ((s32)calib->par_p7 << 7)) >> 4;
+ 
+ 	return press_comp;
+ }
+@@ -414,7 +415,7 @@ static u32 bme680_compensate_humid(struct bme680_data *data,
+ 		 (((temp_scaled * ((temp_scaled * calib->par_h5) / 100))
+ 		   >> 6) / 100) + (1 << 14))) >> 10;
+ 	var3 = var1 * var2;
+-	var4 = calib->par_h6 << 7;
++	var4 = (s32)calib->par_h6 << 7;
+ 	var4 = (var4 + ((temp_scaled * calib->par_h7) / 100)) >> 4;
+ 	var5 = ((var3 >> 14) * (var3 >> 14)) >> 10;
+ 	var6 = (var4 * var5) >> 1;
+@@ -532,6 +533,43 @@ static u8 bme680_oversampling_to_reg(u8 val)
+ 	return ilog2(val) + 1;
+ }
+ 
++/*
++ * Taken from Bosch BME680 API:
++ * https://github.com/boschsensortec/BME68x_SensorAPI/blob/v4.4.8/bme68x.c#L490
++ */
++static int bme680_wait_for_eoc(struct bme680_data *data)
++{
++	struct device *dev = regmap_get_device(data->regmap);
++	unsigned int check;
++	int ret;
++	/*
++	 * (Sum of oversampling ratios * time per oversampling) +
++	 * TPH measurement + gas measurement + wait transition from forced mode
++	 * + heater duration
++	 */
++	int wait_eoc_us = ((data->oversampling_temp + data->oversampling_press +
++			   data->oversampling_humid) * 1936) + (477 * 4) +
++			   (477 * 5) + 1000 + (data->heater_dur * 1000);
++
++	usleep_range(wait_eoc_us, wait_eoc_us + 100);
++
++	ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &check);
++	if (ret) {
++		dev_err(dev, "failed to read measurement status register.\n");
++		return ret;
++	}
++	if (check & BME680_MEAS_BIT) {
++		dev_err(dev, "Device measurement cycle incomplete.\n");
++		return -EBUSY;
++	}
++	if (!(check & BME680_NEW_DATA_BIT)) {
++		dev_err(dev, "No new data available from the device.\n");
++		return -ENODATA;
++	}
++
++	return 0;
++}
++
+ static int bme680_chip_config(struct bme680_data *data)
+ {
+ 	struct device *dev = regmap_get_device(data->regmap);
+@@ -622,6 +660,10 @@ static int bme680_read_temp(struct bme680_data *data, int *val)
+ 	if (ret < 0)
+ 		return ret;
+ 
++	ret = bme680_wait_for_eoc(data);
++	if (ret)
++		return ret;
++
+ 	ret = regmap_bulk_read(data->regmap, BME680_REG_TEMP_MSB,
+ 			       &tmp, 3);
+ 	if (ret < 0) {
+@@ -678,7 +720,7 @@ static int bme680_read_press(struct bme680_data *data,
+ 	}
+ 
+ 	*val = bme680_compensate_press(data, adc_press);
+-	*val2 = 100;
++	*val2 = 1000;
+ 	return IIO_VAL_FRACTIONAL;
+ }
+ 
+@@ -738,6 +780,10 @@ static int bme680_read_gas(struct bme680_data *data,
+ 	if (ret < 0)
+ 		return ret;
+ 
++	ret = bme680_wait_for_eoc(data);
++	if (ret)
++		return ret;
++
+ 	ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &check);
+ 	if (check & BME680_GAS_MEAS_BIT) {
+ 		dev_err(dev, "gas measurement incomplete\n");
+diff --git a/drivers/iio/humidity/hdc3020.c b/drivers/iio/humidity/hdc3020.c
+index 1e5d0d4797b16..1e702fcc48515 100644
+--- a/drivers/iio/humidity/hdc3020.c
++++ b/drivers/iio/humidity/hdc3020.c
+@@ -18,6 +18,7 @@
+ #include <linux/i2c.h>
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
++#include <linux/math64.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
+ #include <linux/units.h>
+@@ -63,8 +64,10 @@
+ 
+ #define HDC3020_CRC8_POLYNOMIAL		0x31
+ 
+-#define HDC3020_MIN_TEMP		-40
+-#define HDC3020_MAX_TEMP		125
++#define HDC3020_MIN_TEMP_MICRO		-39872968
++#define HDC3020_MAX_TEMP_MICRO		124875639
++#define HDC3020_MAX_TEMP_HYST_MICRO	164748607
++#define HDC3020_MAX_HUM_MICRO		99220264
+ 
+ struct hdc3020_data {
+ 	struct i2c_client *client;
+@@ -363,6 +366,105 @@ static int hdc3020_write_raw(struct iio_dev *indio_dev,
+ 	return -EINVAL;
+ }
+ 
++static int hdc3020_thresh_get_temp(u16 thresh)
++{
++	int temp;
++
++	/*
++	 * Get the temperature threshold from 9 LSBs, shift them to get
++	 * the truncated temperature threshold representation and
++	 * calculate the threshold according to the formula in the
++	 * datasheet. Result is degree celsius scaled by 65535.
++	 */
++	temp = FIELD_GET(HDC3020_THRESH_TEMP_MASK, thresh) <<
++	       HDC3020_THRESH_TEMP_TRUNC_SHIFT;
++
++	return -2949075 + (175 * temp);
++}
++
++static int hdc3020_thresh_get_hum(u16 thresh)
++{
++	int hum;
++
++	/*
++	 * Get the humidity threshold from 7 MSBs, shift them to get the
++	 * truncated humidity threshold representation and calculate the
++	 * threshold according to the formula in the datasheet. Result is
++	 * percent scaled by 65535.
++	 */
++	hum = FIELD_GET(HDC3020_THRESH_HUM_MASK, thresh) <<
++	      HDC3020_THRESH_HUM_TRUNC_SHIFT;
++
++	return hum * 100;
++}
++
++static u16 hdc3020_thresh_set_temp(int s_temp, u16 curr_thresh)
++{
++	u64 temp;
++	u16 thresh;
++
++	/*
++	 * Calculate temperature threshold, shift it down to get the
++	 * truncated threshold representation in the 9LSBs while keeping
++	 * the current humidity threshold in the 7 MSBs.
++	 */
++	temp = (u64)(s_temp + 45000000) * 65535ULL;
++	temp = div_u64(temp, 1000000 * 175) >> HDC3020_THRESH_TEMP_TRUNC_SHIFT;
++	thresh = FIELD_PREP(HDC3020_THRESH_TEMP_MASK, temp);
++	thresh |= (FIELD_GET(HDC3020_THRESH_HUM_MASK, curr_thresh) <<
++		  HDC3020_THRESH_HUM_TRUNC_SHIFT);
++
++	return thresh;
++}
++
++static u16 hdc3020_thresh_set_hum(int s_hum, u16 curr_thresh)
++{
++	u64 hum;
++	u16 thresh;
++
++	/*
++	 * Calculate humidity threshold, shift it down and up to get the
++	 * truncated threshold representation in the 7MSBs while keeping
++	 * the current temperature threshold in the 9 LSBs.
++	 */
++	hum = (u64)(s_hum) * 65535ULL;
++	hum = div_u64(hum, 1000000 * 100) >> HDC3020_THRESH_HUM_TRUNC_SHIFT;
++	thresh = FIELD_PREP(HDC3020_THRESH_HUM_MASK, hum);
++	thresh |= FIELD_GET(HDC3020_THRESH_TEMP_MASK, curr_thresh);
++
++	return thresh;
++}
++
++static
++int hdc3020_thresh_clr(s64 s_thresh, s64 s_hyst, enum iio_event_direction dir)
++{
++	s64 s_clr;
++
++	/*
++	 * Include directions when calculation the clear value,
++	 * since hysteresis is unsigned by definition and the
++	 * clear value is an absolute value which is signed.
++	 */
++	if (dir == IIO_EV_DIR_RISING)
++		s_clr = s_thresh - s_hyst;
++	else
++		s_clr = s_thresh + s_hyst;
++
++	/* Divide by 65535 to get units of micro */
++	return div_s64(s_clr, 65535);
++}
++
++static int _hdc3020_write_thresh(struct hdc3020_data *data, u16 reg, u16 val)
++{
++	u8 buf[5];
++
++	put_unaligned_be16(reg, buf);
++	put_unaligned_be16(val, buf + 2);
++	buf[4] = crc8(hdc3020_crc8_table, buf + 2, 2, CRC8_INIT_VALUE);
++
++	return hdc3020_write_bytes(data, buf, 5);
++}
++
+ static int hdc3020_write_thresh(struct iio_dev *indio_dev,
+ 				const struct iio_chan_spec *chan,
+ 				enum iio_event_type type,
+@@ -371,67 +473,126 @@ static int hdc3020_write_thresh(struct iio_dev *indio_dev,
+ 				int val, int val2)
+ {
+ 	struct hdc3020_data *data = iio_priv(indio_dev);
+-	u8 buf[5];
+-	u64 tmp;
+-	u16 reg;
+-	int ret;
+-
+-	/* Supported temperature range is from –40 to 125 degree celsius */
+-	if (val < HDC3020_MIN_TEMP || val > HDC3020_MAX_TEMP)
+-		return -EINVAL;
+-
+-	/* Select threshold register */
+-	if (info == IIO_EV_INFO_VALUE) {
+-		if (dir == IIO_EV_DIR_RISING)
+-			reg = HDC3020_S_T_RH_THRESH_HIGH;
+-		else
+-			reg = HDC3020_S_T_RH_THRESH_LOW;
++	u16 reg, reg_val, reg_thresh_rd, reg_clr_rd, reg_thresh_wr, reg_clr_wr;
++	s64 s_thresh, s_hyst, s_clr;
++	int s_val, thresh, clr, ret;
++
++	/* Select threshold registers */
++	if (dir == IIO_EV_DIR_RISING) {
++		reg_thresh_rd = HDC3020_R_T_RH_THRESH_HIGH;
++		reg_thresh_wr = HDC3020_S_T_RH_THRESH_HIGH;
++		reg_clr_rd = HDC3020_R_T_RH_THRESH_HIGH_CLR;
++		reg_clr_wr = HDC3020_S_T_RH_THRESH_HIGH_CLR;
+ 	} else {
+-		if (dir == IIO_EV_DIR_RISING)
+-			reg = HDC3020_S_T_RH_THRESH_HIGH_CLR;
+-		else
+-			reg = HDC3020_S_T_RH_THRESH_LOW_CLR;
++		reg_thresh_rd = HDC3020_R_T_RH_THRESH_LOW;
++		reg_thresh_wr = HDC3020_S_T_RH_THRESH_LOW;
++		reg_clr_rd = HDC3020_R_T_RH_THRESH_LOW_CLR;
++		reg_clr_wr = HDC3020_S_T_RH_THRESH_LOW_CLR;
+ 	}
+ 
+ 	guard(mutex)(&data->lock);
+-	ret = hdc3020_read_be16(data, reg);
++	ret = hdc3020_read_be16(data, reg_thresh_rd);
+ 	if (ret < 0)
+ 		return ret;
+ 
++	thresh = ret;
++	ret = hdc3020_read_be16(data, reg_clr_rd);
++	if (ret < 0)
++		return ret;
++
++	clr = ret;
++	/* Scale value to include decimal part into calculations */
++	s_val = (val < 0) ? (val * 1000000 - val2) : (val * 1000000 + val2);
+ 	switch (chan->type) {
+ 	case IIO_TEMP:
+-		/*
+-		 * Calculate temperature threshold, shift it down to get the
+-		 * truncated threshold representation in the 9LSBs while keeping
+-		 * the current humidity threshold in the 7 MSBs.
+-		 */
+-		tmp = ((u64)(((val + 45) * MICRO) + val2)) * 65535ULL;
+-		tmp = div_u64(tmp, MICRO * 175);
+-		val = tmp >> HDC3020_THRESH_TEMP_TRUNC_SHIFT;
+-		val = FIELD_PREP(HDC3020_THRESH_TEMP_MASK, val);
+-		val |= (FIELD_GET(HDC3020_THRESH_HUM_MASK, ret) <<
+-			HDC3020_THRESH_HUM_TRUNC_SHIFT);
++		switch (info) {
++		case IIO_EV_INFO_VALUE:
++			s_val = max(s_val, HDC3020_MIN_TEMP_MICRO);
++			s_val = min(s_val, HDC3020_MAX_TEMP_MICRO);
++			reg = reg_thresh_wr;
++			reg_val = hdc3020_thresh_set_temp(s_val, thresh);
++			ret = _hdc3020_write_thresh(data, reg, reg_val);
++			if (ret < 0)
++				return ret;
++
++			/* Calculate old hysteresis */
++			s_thresh = (s64)hdc3020_thresh_get_temp(thresh) * 1000000;
++			s_clr = (s64)hdc3020_thresh_get_temp(clr) * 1000000;
++			s_hyst = div_s64(abs(s_thresh - s_clr), 65535);
++			/* Set new threshold */
++			thresh = reg_val;
++			/* Set old hysteresis */
++			s_val = s_hyst;
++			fallthrough;
++		case IIO_EV_INFO_HYSTERESIS:
++			/*
++			 * Function hdc3020_thresh_get_temp returns temperature
++			 * in degree celsius scaled by 65535. Scale by 1000000
++			 * to be able to subtract scaled hysteresis value.
++			 */
++			s_thresh = (s64)hdc3020_thresh_get_temp(thresh) * 1000000;
++			/*
++			 * Units of s_val are in micro degree celsius, scale by
++			 * 65535 to get same units as s_thresh.
++			 */
++			s_val = min(abs(s_val), HDC3020_MAX_TEMP_HYST_MICRO);
++			s_hyst = (s64)s_val * 65535;
++			s_clr = hdc3020_thresh_clr(s_thresh, s_hyst, dir);
++			s_clr = max(s_clr, HDC3020_MIN_TEMP_MICRO);
++			s_clr = min(s_clr, HDC3020_MAX_TEMP_MICRO);
++			reg = reg_clr_wr;
++			reg_val = hdc3020_thresh_set_temp(s_clr, clr);
++			break;
++		default:
++			return -EOPNOTSUPP;
++		}
+ 		break;
+ 	case IIO_HUMIDITYRELATIVE:
+-		/*
+-		 * Calculate humidity threshold, shift it down and up to get the
+-		 * truncated threshold representation in the 7MSBs while keeping
+-		 * the current temperature threshold in the 9 LSBs.
+-		 */
+-		tmp = ((u64)((val * MICRO) + val2)) * 65535ULL;
+-		tmp = div_u64(tmp, MICRO * 100);
+-		val = tmp >> HDC3020_THRESH_HUM_TRUNC_SHIFT;
+-		val = FIELD_PREP(HDC3020_THRESH_HUM_MASK, val);
+-		val |= FIELD_GET(HDC3020_THRESH_TEMP_MASK, ret);
++		s_val = (s_val < 0) ? 0 : min(s_val, HDC3020_MAX_HUM_MICRO);
++		switch (info) {
++		case IIO_EV_INFO_VALUE:
++			reg = reg_thresh_wr;
++			reg_val = hdc3020_thresh_set_hum(s_val, thresh);
++			ret = _hdc3020_write_thresh(data, reg, reg_val);
++			if (ret < 0)
++				return ret;
++
++			/* Calculate old hysteresis */
++			s_thresh = (s64)hdc3020_thresh_get_hum(thresh) * 1000000;
++			s_clr = (s64)hdc3020_thresh_get_hum(clr) * 1000000;
++			s_hyst = div_s64(abs(s_thresh - s_clr), 65535);
++			/* Set new threshold */
++			thresh = reg_val;
++			/* Try to set old hysteresis */
++			s_val = min(abs(s_hyst), HDC3020_MAX_HUM_MICRO);
++			fallthrough;
++		case IIO_EV_INFO_HYSTERESIS:
++			/*
++			 * Function hdc3020_thresh_get_hum returns relative
++			 * humidity in percent scaled by 65535. Scale by 1000000
++			 * to be able to subtract scaled hysteresis value.
++			 */
++			s_thresh = (s64)hdc3020_thresh_get_hum(thresh) * 1000000;
++			/*
++			 * Units of s_val are in micro percent, scale by 65535
++			 * to get same units as s_thresh.
++			 */
++			s_hyst = (s64)s_val * 65535;
++			s_clr = hdc3020_thresh_clr(s_thresh, s_hyst, dir);
++			s_clr = max(s_clr, 0);
++			s_clr = min(s_clr, HDC3020_MAX_HUM_MICRO);
++			reg = reg_clr_wr;
++			reg_val = hdc3020_thresh_set_hum(s_clr, clr);
++			break;
++		default:
++			return -EOPNOTSUPP;
++		}
+ 		break;
+ 	default:
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+-	put_unaligned_be16(reg, buf);
+-	put_unaligned_be16(val, buf + 2);
+-	buf[4] = crc8(hdc3020_crc8_table, buf + 2, 2, CRC8_INIT_VALUE);
+-	return hdc3020_write_bytes(data, buf, 5);
++	return _hdc3020_write_thresh(data, reg, reg_val);
+ }
+ 
+ static int hdc3020_read_thresh(struct iio_dev *indio_dev,
+@@ -442,48 +603,60 @@ static int hdc3020_read_thresh(struct iio_dev *indio_dev,
+ 			       int *val, int *val2)
+ {
+ 	struct hdc3020_data *data = iio_priv(indio_dev);
+-	u16 reg;
+-	int ret;
++	u16 reg_thresh, reg_clr;
++	int thresh, clr, ret;
+ 
+-	/* Select threshold register */
+-	if (info == IIO_EV_INFO_VALUE) {
+-		if (dir == IIO_EV_DIR_RISING)
+-			reg = HDC3020_R_T_RH_THRESH_HIGH;
+-		else
+-			reg = HDC3020_R_T_RH_THRESH_LOW;
++	/* Select threshold registers */
++	if (dir == IIO_EV_DIR_RISING) {
++		reg_thresh = HDC3020_R_T_RH_THRESH_HIGH;
++		reg_clr = HDC3020_R_T_RH_THRESH_HIGH_CLR;
+ 	} else {
+-		if (dir == IIO_EV_DIR_RISING)
+-			reg = HDC3020_R_T_RH_THRESH_HIGH_CLR;
+-		else
+-			reg = HDC3020_R_T_RH_THRESH_LOW_CLR;
++		reg_thresh = HDC3020_R_T_RH_THRESH_LOW;
++		reg_clr = HDC3020_R_T_RH_THRESH_LOW_CLR;
+ 	}
+ 
+ 	guard(mutex)(&data->lock);
+-	ret = hdc3020_read_be16(data, reg);
++	ret = hdc3020_read_be16(data, reg_thresh);
+ 	if (ret < 0)
+ 		return ret;
+ 
+ 	switch (chan->type) {
+ 	case IIO_TEMP:
+-		/*
+-		 * Get the temperature threshold from 9 LSBs, shift them to get
+-		 * the truncated temperature threshold representation and
+-		 * calculate the threshold according to the formula in the
+-		 * datasheet.
+-		 */
+-		*val = FIELD_GET(HDC3020_THRESH_TEMP_MASK, ret);
+-		*val = *val << HDC3020_THRESH_TEMP_TRUNC_SHIFT;
+-		*val = -2949075 + (175 * (*val));
++		thresh = hdc3020_thresh_get_temp(ret);
++		switch (info) {
++		case IIO_EV_INFO_VALUE:
++			*val = thresh;
++			break;
++		case IIO_EV_INFO_HYSTERESIS:
++			ret = hdc3020_read_be16(data, reg_clr);
++			if (ret < 0)
++				return ret;
++
++			clr = hdc3020_thresh_get_temp(ret);
++			*val = abs(thresh - clr);
++			break;
++		default:
++			return -EOPNOTSUPP;
++		}
+ 		*val2 = 65535;
+ 		return IIO_VAL_FRACTIONAL;
+ 	case IIO_HUMIDITYRELATIVE:
+-		/*
+-		 * Get the humidity threshold from 7 MSBs, shift them to get the
+-		 * truncated humidity threshold representation and calculate the
+-		 * threshold according to the formula in the datasheet.
+-		 */
+-		*val = FIELD_GET(HDC3020_THRESH_HUM_MASK, ret);
+-		*val = (*val << HDC3020_THRESH_HUM_TRUNC_SHIFT) * 100;
++		thresh = hdc3020_thresh_get_hum(ret);
++		switch (info) {
++		case IIO_EV_INFO_VALUE:
++			*val = thresh;
++			break;
++		case IIO_EV_INFO_HYSTERESIS:
++			ret = hdc3020_read_be16(data, reg_clr);
++			if (ret < 0)
++				return ret;
++
++			clr = hdc3020_thresh_get_hum(ret);
++			*val = abs(thresh - clr);
++			break;
++		default:
++			return -EOPNOTSUPP;
++		}
+ 		*val2 = 65535;
+ 		return IIO_VAL_FRACTIONAL;
+ 	default:
+diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
+index 01a499a8b88db..438ed35881752 100644
+--- a/drivers/infiniband/core/restrack.c
++++ b/drivers/infiniband/core/restrack.c
+@@ -37,22 +37,6 @@ int rdma_restrack_init(struct ib_device *dev)
+ 	return 0;
+ }
+ 
+-static const char *type2str(enum rdma_restrack_type type)
+-{
+-	static const char * const names[RDMA_RESTRACK_MAX] = {
+-		[RDMA_RESTRACK_PD] = "PD",
+-		[RDMA_RESTRACK_CQ] = "CQ",
+-		[RDMA_RESTRACK_QP] = "QP",
+-		[RDMA_RESTRACK_CM_ID] = "CM_ID",
+-		[RDMA_RESTRACK_MR] = "MR",
+-		[RDMA_RESTRACK_CTX] = "CTX",
+-		[RDMA_RESTRACK_COUNTER] = "COUNTER",
+-		[RDMA_RESTRACK_SRQ] = "SRQ",
+-	};
+-
+-	return names[type];
+-};
+-
+ /**
+  * rdma_restrack_clean() - clean resource tracking
+  * @dev:  IB device
+@@ -60,47 +44,14 @@ static const char *type2str(enum rdma_restrack_type type)
+ void rdma_restrack_clean(struct ib_device *dev)
+ {
+ 	struct rdma_restrack_root *rt = dev->res;
+-	struct rdma_restrack_entry *e;
+-	char buf[TASK_COMM_LEN];
+-	bool found = false;
+-	const char *owner;
+ 	int i;
+ 
+ 	for (i = 0 ; i < RDMA_RESTRACK_MAX; i++) {
+ 		struct xarray *xa = &dev->res[i].xa;
+ 
+-		if (!xa_empty(xa)) {
+-			unsigned long index;
+-
+-			if (!found) {
+-				pr_err("restrack: %s", CUT_HERE);
+-				dev_err(&dev->dev, "BUG: RESTRACK detected leak of resources\n");
+-			}
+-			xa_for_each(xa, index, e) {
+-				if (rdma_is_kernel_res(e)) {
+-					owner = e->kern_name;
+-				} else {
+-					/*
+-					 * There is no need to call get_task_struct here,
+-					 * because we can be here only if there are more
+-					 * get_task_struct() call than put_task_struct().
+-					 */
+-					get_task_comm(buf, e->task);
+-					owner = buf;
+-				}
+-
+-				pr_err("restrack: %s %s object allocated by %s is not freed\n",
+-				       rdma_is_kernel_res(e) ? "Kernel" :
+-							       "User",
+-				       type2str(e->type), owner);
+-			}
+-			found = true;
+-		}
++		WARN_ON(!xa_empty(xa));
+ 		xa_destroy(xa);
+ 	}
+-	if (found)
+-		pr_err("restrack: %s", CUT_HERE);
+-
+ 	kfree(rt);
+ }
+ 
+diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
+index 31ffdc2a93f35..79bdb2b109496 100644
+--- a/drivers/input/touchscreen/ili210x.c
++++ b/drivers/input/touchscreen/ili210x.c
+@@ -261,8 +261,8 @@ static int ili251x_read_touch_data(struct i2c_client *client, u8 *data)
+ 	if (!error && data[0] == 2) {
+ 		error = i2c_master_recv(client, data + ILI251X_DATA_SIZE1,
+ 					ILI251X_DATA_SIZE2);
+-		if (error >= 0 && error != ILI251X_DATA_SIZE2)
+-			error = -EIO;
++		if (error >= 0)
++			error = error == ILI251X_DATA_SIZE2 ? 0 : -EIO;
+ 	}
+ 
+ 	return error;
+diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
+index f482aab420f78..95a161fdbae2c 100644
+--- a/drivers/iommu/amd/amd_iommu.h
++++ b/drivers/iommu/amd/amd_iommu.h
+@@ -56,6 +56,7 @@ int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid);
+ void amd_iommu_flush_all_caches(struct amd_iommu *iommu);
+ void amd_iommu_update_and_flush_device_table(struct protection_domain *domain);
+ void amd_iommu_domain_update(struct protection_domain *domain);
++void amd_iommu_dev_update_dte(struct iommu_dev_data *dev_data, bool set);
+ void amd_iommu_domain_flush_complete(struct protection_domain *domain);
+ void amd_iommu_domain_flush_pages(struct protection_domain *domain,
+ 				  u64 address, size_t size);
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index e740dc54c4685..21798a0fa9268 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -2784,6 +2784,7 @@ static void early_enable_iommu(struct amd_iommu *iommu)
+ 	iommu_enable_command_buffer(iommu);
+ 	iommu_enable_event_buffer(iommu);
+ 	iommu_set_exclusion_range(iommu);
++	iommu_enable_gt(iommu);
+ 	iommu_enable_ga(iommu);
+ 	iommu_enable_xt(iommu);
+ 	iommu_enable_irtcachedis(iommu);
+diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
+index fb727f5b0b82d..e2b900ffbc158 100644
+--- a/drivers/iommu/amd/iommu.c
++++ b/drivers/iommu/amd/iommu.c
+@@ -2002,6 +2002,21 @@ static void clear_dte_entry(struct amd_iommu *iommu, u16 devid)
+ 	amd_iommu_apply_erratum_63(iommu, devid);
+ }
+ 
++/* Update and flush DTE for the given device */
++void amd_iommu_dev_update_dte(struct iommu_dev_data *dev_data, bool set)
++{
++	struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev);
++
++	if (set)
++		set_dte_entry(iommu, dev_data);
++	else
++		clear_dte_entry(iommu, dev_data->devid);
++
++	clone_aliases(iommu, dev_data->dev);
++	device_flush_dte(dev_data);
++	iommu_completion_wait(iommu);
++}
++
+ static int do_attach(struct iommu_dev_data *dev_data,
+ 		     struct protection_domain *domain)
+ {
+@@ -2036,10 +2051,7 @@ static int do_attach(struct iommu_dev_data *dev_data,
+ 	}
+ 
+ 	/* Update device table */
+-	set_dte_entry(iommu, dev_data);
+-	clone_aliases(iommu, dev_data->dev);
+-
+-	device_flush_dte(dev_data);
++	amd_iommu_dev_update_dte(dev_data, true);
+ 
+ 	return ret;
+ }
+@@ -2049,6 +2061,12 @@ static void do_detach(struct iommu_dev_data *dev_data)
+ 	struct protection_domain *domain = dev_data->domain;
+ 	struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
+ 
++	/* Clear DTE and flush the entry */
++	amd_iommu_dev_update_dte(dev_data, false);
++
++	/* Flush IOTLB and wait for the flushes to finish */
++	amd_iommu_domain_flush_all(domain);
++
+ 	/* Clear GCR3 table */
+ 	if (domain->pd_mode == PD_MODE_V2) {
+ 		update_gcr3(dev_data, 0, 0, false);
+@@ -2058,14 +2076,6 @@ static void do_detach(struct iommu_dev_data *dev_data)
+ 	/* Update data structures */
+ 	dev_data->domain = NULL;
+ 	list_del(&dev_data->list);
+-	clear_dte_entry(iommu, dev_data->devid);
+-	clone_aliases(iommu, dev_data->dev);
+-
+-	/* Flush the DTE entry */
+-	device_flush_dte(dev_data);
+-
+-	/* Flush IOTLB and wait for the flushes to finish */
+-	amd_iommu_domain_flush_all(domain);
+ 
+ 	/* decrease reference counters - needs to happen after the flushes */
+ 	domain->dev_iommu[iommu->index] -= 1;
+diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+index 2cd433a9c8a0f..41b44baef15e8 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+@@ -569,6 +569,9 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
+ 	int ret = 0;
+ 	struct mm_struct *mm = domain->mm;
+ 
++	if (mm_get_enqcmd_pasid(mm) != id)
++		return -EINVAL;
++
+ 	mutex_lock(&sva_lock);
+ 	ret = __arm_smmu_sva_bind(dev, id, mm);
+ 	mutex_unlock(&sva_lock);
+diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
+index 72c07a12f5e18..bfa1d77749f3e 100644
+--- a/drivers/irqchip/Kconfig
++++ b/drivers/irqchip/Kconfig
+@@ -568,7 +568,7 @@ config IRQ_LOONGARCH_CPU
+ 	bool
+ 	select GENERIC_IRQ_CHIP
+ 	select IRQ_DOMAIN
+-	select GENERIC_IRQ_EFFECTIVE_AFF_MASK
++	select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
+ 	select LOONGSON_HTVEC
+ 	select LOONGSON_LIOINTC
+ 	select LOONGSON_EIOINTC
+diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
+index b64cbe3052e84..f2921b26ba84b 100644
+--- a/drivers/irqchip/irq-loongson-eiointc.c
++++ b/drivers/irqchip/irq-loongson-eiointc.c
+@@ -15,6 +15,7 @@
+ #include <linux/irqchip/chained_irq.h>
+ #include <linux/kernel.h>
+ #include <linux/syscore_ops.h>
++#include <asm/numa.h>
+ 
+ #define EIOINTC_REG_NODEMAP	0x14a0
+ #define EIOINTC_REG_IPMAP	0x14c0
+@@ -339,7 +340,7 @@ static int __init pch_msi_parse_madt(union acpi_subtable_headers *header,
+ 	int node;
+ 
+ 	if (cpu_has_flatmode)
+-		node = cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE);
++		node = early_cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE);
+ 	else
+ 		node = eiointc_priv[nr_pics - 1]->node;
+ 
+@@ -431,7 +432,7 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
+ 		goto out_free_handle;
+ 
+ 	if (cpu_has_flatmode)
+-		node = cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE);
++		node = early_cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE);
+ 	else
+ 		node = acpi_eiointc->node;
+ 	acpi_set_vec_parent(node, priv->eiointc_domain, pch_group);
+diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
+index e4b33aed1c97b..7c4fe7ab4b830 100644
+--- a/drivers/irqchip/irq-loongson-liointc.c
++++ b/drivers/irqchip/irq-loongson-liointc.c
+@@ -28,7 +28,7 @@
+ 
+ #define LIOINTC_INTC_CHIP_START	0x20
+ 
+-#define LIOINTC_REG_INTC_STATUS	(LIOINTC_INTC_CHIP_START + 0x20)
++#define LIOINTC_REG_INTC_STATUS(core)	(LIOINTC_INTC_CHIP_START + 0x20 + (core) * 8)
+ #define LIOINTC_REG_INTC_EN_STATUS	(LIOINTC_INTC_CHIP_START + 0x04)
+ #define LIOINTC_REG_INTC_ENABLE	(LIOINTC_INTC_CHIP_START + 0x08)
+ #define LIOINTC_REG_INTC_DISABLE	(LIOINTC_INTC_CHIP_START + 0x0c)
+@@ -217,7 +217,7 @@ static int liointc_init(phys_addr_t addr, unsigned long size, int revision,
+ 		goto out_free_priv;
+ 
+ 	for (i = 0; i < LIOINTC_NUM_CORES; i++)
+-		priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS;
++		priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS(i);
+ 
+ 	for (i = 0; i < LIOINTC_NUM_PARENT; i++)
+ 		priv->handler[i].parent_int_map = parent_int_map[i];
+diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
+index 733d0bc4b4cc3..b43695bc51e75 100644
+--- a/drivers/media/dvb-core/dvbdev.c
++++ b/drivers/media/dvb-core/dvbdev.c
+@@ -956,7 +956,7 @@ int dvb_usercopy(struct file *file,
+ 		 int (*func)(struct file *file,
+ 			     unsigned int cmd, void *arg))
+ {
+-	char    sbuf[128];
++	char    sbuf[128] = {};
+ 	void    *mbuf = NULL;
+ 	void    *parg = NULL;
+ 	int     err  = -EINVAL;
+diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
+index 9a5f75163acae..8ede4ce932714 100644
+--- a/drivers/mmc/host/moxart-mmc.c
++++ b/drivers/mmc/host/moxart-mmc.c
+@@ -131,10 +131,12 @@ struct moxart_host {
+ 	struct dma_async_tx_descriptor	*tx_desc;
+ 	struct mmc_host			*mmc;
+ 	struct mmc_request		*mrq;
++	struct scatterlist		*cur_sg;
+ 	struct completion		dma_complete;
+ 	struct completion		pio_complete;
+ 
+-	struct sg_mapping_iter		sg_miter;
++	u32				num_sg;
++	u32				data_remain;
+ 	u32				data_len;
+ 	u32				fifo_width;
+ 	u32				timeout;
+@@ -146,6 +148,35 @@ struct moxart_host {
+ 	bool				is_removed;
+ };
+ 
++static inline void moxart_init_sg(struct moxart_host *host,
++				  struct mmc_data *data)
++{
++	host->cur_sg = data->sg;
++	host->num_sg = data->sg_len;
++	host->data_remain = host->cur_sg->length;
++
++	if (host->data_remain > host->data_len)
++		host->data_remain = host->data_len;
++}
++
++static inline int moxart_next_sg(struct moxart_host *host)
++{
++	int remain;
++	struct mmc_data *data = host->mrq->cmd->data;
++
++	host->cur_sg++;
++	host->num_sg--;
++
++	if (host->num_sg > 0) {
++		host->data_remain = host->cur_sg->length;
++		remain = host->data_len - data->bytes_xfered;
++		if (remain > 0 && remain < host->data_remain)
++			host->data_remain = remain;
++	}
++
++	return host->num_sg;
++}
++
+ static int moxart_wait_for_status(struct moxart_host *host,
+ 				  u32 mask, u32 *status)
+ {
+@@ -278,29 +309,14 @@ static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host)
+ 
+ static void moxart_transfer_pio(struct moxart_host *host)
+ {
+-	struct sg_mapping_iter *sgm = &host->sg_miter;
+ 	struct mmc_data *data = host->mrq->cmd->data;
+ 	u32 *sgp, len = 0, remain, status;
+ 
+ 	if (host->data_len == data->bytes_xfered)
+ 		return;
+ 
+-	/*
+-	 * By updating sgm->consumes this will get a proper pointer into the
+-	 * buffer at any time.
+-	 */
+-	if (!sg_miter_next(sgm)) {
+-		/* This shold not happen */
+-		dev_err(mmc_dev(host->mmc), "ran out of scatterlist prematurely\n");
+-		data->error = -EINVAL;
+-		complete(&host->pio_complete);
+-		return;
+-	}
+-	sgp = sgm->addr;
+-	remain = sgm->length;
+-	if (remain > host->data_len)
+-		remain = host->data_len;
+-	sgm->consumed = 0;
++	sgp = sg_virt(host->cur_sg);
++	remain = host->data_remain;
+ 
+ 	if (data->flags & MMC_DATA_WRITE) {
+ 		while (remain > 0) {
+@@ -315,7 +331,6 @@ static void moxart_transfer_pio(struct moxart_host *host)
+ 				sgp++;
+ 				len += 4;
+ 			}
+-			sgm->consumed += len;
+ 			remain -= len;
+ 		}
+ 
+@@ -332,22 +347,22 @@ static void moxart_transfer_pio(struct moxart_host *host)
+ 				sgp++;
+ 				len += 4;
+ 			}
+-			sgm->consumed += len;
+ 			remain -= len;
+ 		}
+ 	}
+ 
+-	data->bytes_xfered += sgm->consumed;
+-	if (host->data_len == data->bytes_xfered) {
++	data->bytes_xfered += host->data_remain - remain;
++	host->data_remain = remain;
++
++	if (host->data_len != data->bytes_xfered)
++		moxart_next_sg(host);
++	else
+ 		complete(&host->pio_complete);
+-		return;
+-	}
+ }
+ 
+ static void moxart_prepare_data(struct moxart_host *host)
+ {
+ 	struct mmc_data *data = host->mrq->cmd->data;
+-	unsigned int flags = SG_MITER_ATOMIC; /* Used from IRQ */
+ 	u32 datactrl;
+ 	int blksz_bits;
+ 
+@@ -358,19 +373,15 @@ static void moxart_prepare_data(struct moxart_host *host)
+ 	blksz_bits = ffs(data->blksz) - 1;
+ 	BUG_ON(1 << blksz_bits != data->blksz);
+ 
++	moxart_init_sg(host, data);
++
+ 	datactrl = DCR_DATA_EN | (blksz_bits & DCR_BLK_SIZE);
+ 
+-	if (data->flags & MMC_DATA_WRITE) {
+-		flags |= SG_MITER_FROM_SG;
++	if (data->flags & MMC_DATA_WRITE)
+ 		datactrl |= DCR_DATA_WRITE;
+-	} else {
+-		flags |= SG_MITER_TO_SG;
+-	}
+ 
+ 	if (moxart_use_dma(host))
+ 		datactrl |= DCR_DMA_EN;
+-	else
+-		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
+ 
+ 	writel(DCR_DATA_FIFO_RESET, host->base + REG_DATA_CONTROL);
+ 	writel(MASK_DATA | FIFO_URUN | FIFO_ORUN, host->base + REG_CLEAR);
+@@ -443,9 +454,6 @@ static void moxart_request(struct mmc_host *mmc, struct mmc_request *mrq)
+ 	}
+ 
+ request_done:
+-	if (!moxart_use_dma(host))
+-		sg_miter_stop(&host->sg_miter);
+-
+ 	spin_unlock_irqrestore(&host->lock, flags);
+ 	mmc_request_done(host->mmc, mrq);
+ }
+diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
+index 9053526fa212a..150fb477b7cc9 100644
+--- a/drivers/mmc/host/sdhci-brcmstb.c
++++ b/drivers/mmc/host/sdhci-brcmstb.c
+@@ -24,6 +24,7 @@
+ #define BRCMSTB_MATCH_FLAGS_NO_64BIT		BIT(0)
+ #define BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT	BIT(1)
+ #define BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE	BIT(2)
++#define BRCMSTB_MATCH_FLAGS_USE_CARD_BUSY	BIT(4)
+ 
+ #define BRCMSTB_PRIV_FLAGS_HAS_CQE		BIT(0)
+ #define BRCMSTB_PRIV_FLAGS_GATE_CLOCK		BIT(1)
+@@ -384,6 +385,9 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
+ 	if (match_priv->flags & BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT)
+ 		host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+ 
++	if (!(match_priv->flags & BRCMSTB_MATCH_FLAGS_USE_CARD_BUSY))
++		host->mmc_host_ops.card_busy = NULL;
++
+ 	/* Change the base clock frequency if the DT property exists */
+ 	if (device_property_read_u32(&pdev->dev, "clock-frequency",
+ 				     &priv->base_freq_hz) != 0)
+diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
+index 025b31aa712ca..9fe40f342d251 100644
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -1326,7 +1326,7 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
+ 
+ 	ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch);
+ 	if (ret)
+-		return ret;
++		goto fail;
+ 
+ 	/*
+ 	 * Turn PMOS on [bit 0], set over current detection to 2.4 V
+@@ -1337,7 +1337,10 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
+ 	else
+ 		scratch &= ~0x47;
+ 
+-	return pci_write_config_byte(chip->pdev, 0xAE, scratch);
++	ret = pci_write_config_byte(chip->pdev, 0xAE, scratch);
++
++fail:
++	return pcibios_err_to_errno(ret);
+ }
+ 
+ static int jmicron_probe(struct sdhci_pci_chip *chip)
+@@ -2202,7 +2205,7 @@ static int sdhci_pci_probe(struct pci_dev *pdev,
+ 
+ 	ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
+ 	if (ret)
+-		return ret;
++		return pcibios_err_to_errno(ret);
+ 
+ 	slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
+ 	dev_dbg(&pdev->dev, "found %d slot(s)\n", slots);
+@@ -2211,7 +2214,7 @@ static int sdhci_pci_probe(struct pci_dev *pdev,
+ 
+ 	ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
+ 	if (ret)
+-		return ret;
++		return pcibios_err_to_errno(ret);
+ 
+ 	first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
+ 
+diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
+index d4a02184784a3..058bef1c7e419 100644
+--- a/drivers/mmc/host/sdhci-pci-o2micro.c
++++ b/drivers/mmc/host/sdhci-pci-o2micro.c
+@@ -823,7 +823,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ 		ret = pci_read_config_byte(chip->pdev,
+ 				O2_SD_LOCK_WP, &scratch);
+ 		if (ret)
+-			return ret;
++			goto read_fail;
+ 		scratch &= 0x7f;
+ 		pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ 
+@@ -834,7 +834,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ 		ret = pci_read_config_byte(chip->pdev,
+ 				O2_SD_CLKREQ, &scratch);
+ 		if (ret)
+-			return ret;
++			goto read_fail;
+ 		scratch |= 0x20;
+ 		pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch);
+ 
+@@ -843,7 +843,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ 		 */
+ 		ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch);
+ 		if (ret)
+-			return ret;
++			goto read_fail;
+ 		scratch |= 0x01;
+ 		pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch);
+ 		pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73);
+@@ -856,7 +856,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ 		ret = pci_read_config_byte(chip->pdev,
+ 				O2_SD_INF_MOD, &scratch);
+ 		if (ret)
+-			return ret;
++			goto read_fail;
+ 		scratch |= 0x08;
+ 		pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch);
+ 
+@@ -864,7 +864,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ 		ret = pci_read_config_byte(chip->pdev,
+ 				O2_SD_LOCK_WP, &scratch);
+ 		if (ret)
+-			return ret;
++			goto read_fail;
+ 		scratch |= 0x80;
+ 		pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ 		break;
+@@ -875,7 +875,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ 		ret = pci_read_config_byte(chip->pdev,
+ 				O2_SD_LOCK_WP, &scratch);
+ 		if (ret)
+-			return ret;
++			goto read_fail;
+ 
+ 		scratch &= 0x7f;
+ 		pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+@@ -886,7 +886,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ 						    O2_SD_FUNC_REG0,
+ 						    &scratch_32);
+ 			if (ret)
+-				return ret;
++				goto read_fail;
+ 			scratch_32 = ((scratch_32 & 0xFF000000) >> 24);
+ 
+ 			/* Check Whether subId is 0x11 or 0x12 */
+@@ -898,7 +898,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ 							    O2_SD_FUNC_REG4,
+ 							    &scratch_32);
+ 				if (ret)
+-					return ret;
++					goto read_fail;
+ 
+ 				/* Enable Base Clk setting change */
+ 				scratch_32 |= O2_SD_FREG4_ENABLE_CLK_SET;
+@@ -921,7 +921,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ 		ret = pci_read_config_dword(chip->pdev,
+ 					    O2_SD_CLK_SETTING, &scratch_32);
+ 		if (ret)
+-			return ret;
++			goto read_fail;
+ 
+ 		scratch_32 &= ~(0xFF00);
+ 		scratch_32 |= 0x07E0C800;
+@@ -931,14 +931,14 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ 		ret = pci_read_config_dword(chip->pdev,
+ 					    O2_SD_CLKREQ, &scratch_32);
+ 		if (ret)
+-			return ret;
++			goto read_fail;
+ 		scratch_32 |= 0x3;
+ 		pci_write_config_dword(chip->pdev, O2_SD_CLKREQ, scratch_32);
+ 
+ 		ret = pci_read_config_dword(chip->pdev,
+ 					    O2_SD_PLL_SETTING, &scratch_32);
+ 		if (ret)
+-			return ret;
++			goto read_fail;
+ 
+ 		scratch_32 &= ~(0x1F3F070E);
+ 		scratch_32 |= 0x18270106;
+@@ -949,7 +949,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ 		ret = pci_read_config_dword(chip->pdev,
+ 					    O2_SD_CAP_REG2, &scratch_32);
+ 		if (ret)
+-			return ret;
++			goto read_fail;
+ 		scratch_32 &= ~(0xE0);
+ 		pci_write_config_dword(chip->pdev,
+ 				       O2_SD_CAP_REG2, scratch_32);
+@@ -961,7 +961,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ 		ret = pci_read_config_byte(chip->pdev,
+ 					   O2_SD_LOCK_WP, &scratch);
+ 		if (ret)
+-			return ret;
++			goto read_fail;
+ 		scratch |= 0x80;
+ 		pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ 		break;
+@@ -971,7 +971,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ 		ret = pci_read_config_byte(chip->pdev,
+ 				O2_SD_LOCK_WP, &scratch);
+ 		if (ret)
+-			return ret;
++			goto read_fail;
+ 
+ 		scratch &= 0x7f;
+ 		pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+@@ -979,7 +979,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ 		ret = pci_read_config_dword(chip->pdev,
+ 					    O2_SD_PLL_SETTING, &scratch_32);
+ 		if (ret)
+-			return ret;
++			goto read_fail;
+ 
+ 		if ((scratch_32 & 0xff000000) == 0x01000000) {
+ 			scratch_32 &= 0x0000FFFF;
+@@ -998,7 +998,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ 						    O2_SD_FUNC_REG4,
+ 						    &scratch_32);
+ 			if (ret)
+-				return ret;
++				goto read_fail;
+ 			scratch_32 |= (1 << 22);
+ 			pci_write_config_dword(chip->pdev,
+ 					       O2_SD_FUNC_REG4, scratch_32);
+@@ -1017,7 +1017,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ 		ret = pci_read_config_byte(chip->pdev,
+ 					   O2_SD_LOCK_WP, &scratch);
+ 		if (ret)
+-			return ret;
++			goto read_fail;
+ 		scratch |= 0x80;
+ 		pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ 		break;
+@@ -1028,7 +1028,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ 		/* UnLock WP */
+ 		ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
+ 		if (ret)
+-			return ret;
++			goto read_fail;
+ 		scratch &= 0x7f;
+ 		pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ 
+@@ -1057,13 +1057,16 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ 		/* Lock WP */
+ 		ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
+ 		if (ret)
+-			return ret;
++			goto read_fail;
+ 		scratch |= 0x80;
+ 		pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ 		break;
+ 	}
+ 
+ 	return 0;
++
++read_fail:
++	return pcibios_err_to_errno(ret);
+ }
+ 
+ #ifdef CONFIG_PM_SLEEP
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 746f4cf7ab033..112584aa07723 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -2515,26 +2515,29 @@ EXPORT_SYMBOL_GPL(sdhci_get_cd_nogpio);
+ 
+ static int sdhci_check_ro(struct sdhci_host *host)
+ {
+-	unsigned long flags;
++	bool allow_invert = false;
+ 	int is_readonly;
+ 
+-	spin_lock_irqsave(&host->lock, flags);
+-
+-	if (host->flags & SDHCI_DEVICE_DEAD)
++	if (host->flags & SDHCI_DEVICE_DEAD) {
+ 		is_readonly = 0;
+-	else if (host->ops->get_ro)
++	} else if (host->ops->get_ro) {
+ 		is_readonly = host->ops->get_ro(host);
+-	else if (mmc_can_gpio_ro(host->mmc))
++	} else if (mmc_can_gpio_ro(host->mmc)) {
+ 		is_readonly = mmc_gpio_get_ro(host->mmc);
+-	else
++		/* Do not invert twice */
++		allow_invert = !(host->mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH);
++	} else {
+ 		is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
+ 				& SDHCI_WRITE_PROTECT);
++		allow_invert = true;
++	}
+ 
+-	spin_unlock_irqrestore(&host->lock, flags);
++	if (is_readonly >= 0 &&
++	    allow_invert &&
++	    (host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT))
++		is_readonly = !is_readonly;
+ 
+-	/* This quirk needs to be replaced by a callback-function later */
+-	return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
+-		!is_readonly : is_readonly;
++	return is_readonly;
+ }
+ 
+ #define SAMPLE_COUNT	5
+diff --git a/drivers/mtd/parsers/redboot.c b/drivers/mtd/parsers/redboot.c
+index a16b42a885816..3b55b676ca6b9 100644
+--- a/drivers/mtd/parsers/redboot.c
++++ b/drivers/mtd/parsers/redboot.c
+@@ -102,7 +102,7 @@ static int parse_redboot_partitions(struct mtd_info *master,
+ 			offset -= master->erasesize;
+ 		}
+ 	} else {
+-		offset = directory * master->erasesize;
++		offset = (unsigned long) directory * master->erasesize;
+ 		while (mtd_block_isbad(master, offset)) {
+ 			offset += master->erasesize;
+ 			if (offset == master->size)
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index bceda85f0dcf6..cb66310c8d76b 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -5773,6 +5773,9 @@ static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
+ 	if (real_dev) {
+ 		ret = ethtool_get_ts_info_by_layer(real_dev, info);
+ 	} else {
++		info->phc_index = -1;
++		info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
++					SOF_TIMESTAMPING_SOFTWARE;
+ 		/* Check if all slaves support software tx timestamping */
+ 		rcu_read_lock();
+ 		bond_for_each_slave_rcu(bond, slave, iter) {
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+index 1d9057dc44f25..bf1589aef1fc6 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+@@ -1618,11 +1618,20 @@ static int mcp251xfd_open(struct net_device *ndev)
+ 	clear_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
+ 	can_rx_offload_enable(&priv->offload);
+ 
++	priv->wq = alloc_ordered_workqueue("%s-mcp251xfd_wq",
++					   WQ_FREEZABLE | WQ_MEM_RECLAIM,
++					   dev_name(&spi->dev));
++	if (!priv->wq) {
++		err = -ENOMEM;
++		goto out_can_rx_offload_disable;
++	}
++	INIT_WORK(&priv->tx_work, mcp251xfd_tx_obj_write_sync);
++
+ 	err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq,
+ 				   IRQF_SHARED | IRQF_ONESHOT,
+ 				   dev_name(&spi->dev), priv);
+ 	if (err)
+-		goto out_can_rx_offload_disable;
++		goto out_destroy_workqueue;
+ 
+ 	err = mcp251xfd_chip_interrupts_enable(priv);
+ 	if (err)
+@@ -1634,6 +1643,8 @@ static int mcp251xfd_open(struct net_device *ndev)
+ 
+  out_free_irq:
+ 	free_irq(spi->irq, priv);
++ out_destroy_workqueue:
++	destroy_workqueue(priv->wq);
+  out_can_rx_offload_disable:
+ 	can_rx_offload_disable(&priv->offload);
+ 	set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
+@@ -1661,6 +1672,7 @@ static int mcp251xfd_stop(struct net_device *ndev)
+ 	hrtimer_cancel(&priv->tx_irq_timer);
+ 	mcp251xfd_chip_interrupts_disable(priv);
+ 	free_irq(ndev->irq, priv);
++	destroy_workqueue(priv->wq);
+ 	can_rx_offload_disable(&priv->offload);
+ 	mcp251xfd_timestamp_stop(priv);
+ 	mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
+index 160528d3cc26b..b1de8052a45cc 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
+@@ -131,6 +131,39 @@ mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
+ 	tx_obj->xfer[0].len = len;
+ }
+ 
++static void mcp251xfd_tx_failure_drop(const struct mcp251xfd_priv *priv,
++				      struct mcp251xfd_tx_ring *tx_ring,
++				      int err)
++{
++	struct net_device *ndev = priv->ndev;
++	struct net_device_stats *stats = &ndev->stats;
++	unsigned int frame_len = 0;
++	u8 tx_head;
++
++	tx_ring->head--;
++	stats->tx_dropped++;
++	tx_head = mcp251xfd_get_tx_head(tx_ring);
++	can_free_echo_skb(ndev, tx_head, &frame_len);
++	netdev_completed_queue(ndev, 1, frame_len);
++	netif_wake_queue(ndev);
++
++	if (net_ratelimit())
++		netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
++}
++
++void mcp251xfd_tx_obj_write_sync(struct work_struct *work)
++{
++	struct mcp251xfd_priv *priv = container_of(work, struct mcp251xfd_priv,
++						   tx_work);
++	struct mcp251xfd_tx_obj *tx_obj = priv->tx_work_obj;
++	struct mcp251xfd_tx_ring *tx_ring = priv->tx;
++	int err;
++
++	err = spi_sync(priv->spi, &tx_obj->msg);
++	if (err)
++		mcp251xfd_tx_failure_drop(priv, tx_ring, err);
++}
++
+ static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv,
+ 				  struct mcp251xfd_tx_obj *tx_obj)
+ {
+@@ -162,6 +195,11 @@ static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv,
+ 	return false;
+ }
+ 
++static bool mcp251xfd_work_busy(struct work_struct *work)
++{
++	return work_busy(work);
++}
++
+ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
+ 				 struct net_device *ndev)
+ {
+@@ -175,7 +213,8 @@ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
+ 	if (can_dev_dropped_skb(ndev, skb))
+ 		return NETDEV_TX_OK;
+ 
+-	if (mcp251xfd_tx_busy(priv, tx_ring))
++	if (mcp251xfd_tx_busy(priv, tx_ring) ||
++	    mcp251xfd_work_busy(&priv->tx_work))
+ 		return NETDEV_TX_BUSY;
+ 
+ 	tx_obj = mcp251xfd_get_tx_obj_next(tx_ring);
+@@ -193,13 +232,13 @@ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
+ 		netdev_sent_queue(priv->ndev, frame_len);
+ 
+ 	err = mcp251xfd_tx_obj_write(priv, tx_obj);
+-	if (err)
+-		goto out_err;
+-
+-	return NETDEV_TX_OK;
+-
+- out_err:
+-	netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
++	if (err == -EBUSY) {
++		netif_stop_queue(ndev);
++		priv->tx_work_obj = tx_obj;
++		queue_work(priv->wq, &priv->tx_work);
++	} else if (err) {
++		mcp251xfd_tx_failure_drop(priv, tx_ring, err);
++	}
+ 
+ 	return NETDEV_TX_OK;
+ }
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+index 24510b3b80203..b35bfebd23f29 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+@@ -633,6 +633,10 @@ struct mcp251xfd_priv {
+ 	struct mcp251xfd_rx_ring *rx[MCP251XFD_FIFO_RX_NUM];
+ 	struct mcp251xfd_tx_ring tx[MCP251XFD_FIFO_TX_NUM];
+ 
++	struct workqueue_struct *wq;
++	struct work_struct tx_work;
++	struct mcp251xfd_tx_obj *tx_work_obj;
++
+ 	DECLARE_BITMAP(flags, __MCP251XFD_FLAGS_SIZE__);
+ 
+ 	u8 rx_ring_num;
+@@ -952,6 +956,7 @@ void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv,
+ void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv);
+ void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv);
+ 
++void mcp251xfd_tx_obj_write_sync(struct work_struct *work);
+ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
+ 				 struct net_device *ndev);
+ 
+diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
+index 7f745628c84d1..dde5c65c2c366 100644
+--- a/drivers/net/dsa/microchip/ksz9477.c
++++ b/drivers/net/dsa/microchip/ksz9477.c
+@@ -355,10 +355,8 @@ int ksz9477_reset_switch(struct ksz_device *dev)
+ 			   SPI_AUTO_EDGE_DETECTION, 0);
+ 
+ 	/* default configuration */
+-	ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
+-	data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING |
+-	      SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE;
+-	ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
++	ksz_write8(dev, REG_SW_LUE_CTRL_1,
++		   SW_AGING_ENABLE | SW_LINK_AUTO_AGING | SW_SRC_ADDR_FILTER);
+ 
+ 	/* disable interrupts */
+ 	ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
+@@ -1305,6 +1303,10 @@ int ksz9477_setup(struct dsa_switch *ds)
+ 	/* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */
+ 	ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true);
+ 
++	/* Use collision based back pressure mode. */
++	ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_BACK_PRESSURE,
++		SW_BACK_PRESSURE_COLLISION);
++
+ 	/* Now we can configure default MTU value */
+ 	ret = regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2, REG_SW_MTU_MASK,
+ 				 VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
+diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
+index f3a205ee483f2..fb124be8edd30 100644
+--- a/drivers/net/dsa/microchip/ksz9477_reg.h
++++ b/drivers/net/dsa/microchip/ksz9477_reg.h
+@@ -247,6 +247,7 @@
+ #define REG_SW_MAC_CTRL_1		0x0331
+ 
+ #define SW_BACK_PRESSURE		BIT(5)
++#define SW_BACK_PRESSURE_COLLISION	0
+ #define FAIR_FLOW_CTRL			BIT(4)
+ #define NO_EXC_COLLISION_DROP		BIT(3)
+ #define SW_JUMBO_PACKET			BIT(2)
+diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
+index 2a5861a88d0e6..e54f83a2e7d30 100644
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -2129,7 +2129,7 @@ static void ksz_irq_bus_sync_unlock(struct irq_data *d)
+ 	struct ksz_device *dev = kirq->dev;
+ 	int ret;
+ 
+-	ret = ksz_write32(dev, kirq->reg_mask, kirq->masked);
++	ret = ksz_write8(dev, kirq->reg_mask, kirq->masked);
+ 	if (ret)
+ 		dev_err(dev->dev, "failed to change IRQ mask\n");
+ 
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+index 888509cf1f210..40e8818295951 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+@@ -2896,11 +2896,14 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
+ static int update_xps(struct dpaa2_eth_priv *priv)
+ {
+ 	struct net_device *net_dev = priv->net_dev;
+-	struct cpumask xps_mask;
+-	struct dpaa2_eth_fq *fq;
+ 	int i, num_queues, netdev_queues;
++	struct dpaa2_eth_fq *fq;
++	cpumask_var_t xps_mask;
+ 	int err = 0;
+ 
++	if (!alloc_cpumask_var(&xps_mask, GFP_KERNEL))
++		return -ENOMEM;
++
+ 	num_queues = dpaa2_eth_queue_count(priv);
+ 	netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
+ 
+@@ -2910,16 +2913,17 @@ static int update_xps(struct dpaa2_eth_priv *priv)
+ 	for (i = 0; i < netdev_queues; i++) {
+ 		fq = &priv->fq[i % num_queues];
+ 
+-		cpumask_clear(&xps_mask);
+-		cpumask_set_cpu(fq->target_cpu, &xps_mask);
++		cpumask_clear(xps_mask);
++		cpumask_set_cpu(fq->target_cpu, xps_mask);
+ 
+-		err = netif_set_xps_queue(net_dev, &xps_mask, i);
++		err = netif_set_xps_queue(net_dev, xps_mask, i);
+ 		if (err) {
+ 			netdev_warn_once(net_dev, "Error setting XPS queue\n");
+ 			break;
+ 		}
+ 	}
+ 
++	free_cpumask_var(xps_mask);
+ 	return err;
+ }
+ 
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 30c47b8470ade..722bb724361c2 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -4057,6 +4057,12 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
+ 		adapter->num_active_tx_scrqs = 0;
+ 	}
+ 
++	/* Clean any remaining outstanding SKBs
++	 * we freed the irq so we won't be hearing
++	 * from them
++	 */
++	clean_tx_pools(adapter);
++
+ 	if (adapter->rx_scrq) {
+ 		for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
+ 			if (!adapter->rx_scrq[i])
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 61eef3259cbaa..88d4675cc3428 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -4102,7 +4102,7 @@ bool ice_is_wol_supported(struct ice_hw *hw)
+ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
+ {
+ 	struct ice_pf *pf = vsi->back;
+-	int err = 0, timeout = 50;
++	int i, err = 0, timeout = 50;
+ 
+ 	if (!new_rx && !new_tx)
+ 		return -EINVAL;
+@@ -4128,6 +4128,14 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
+ 
+ 	ice_vsi_close(vsi);
+ 	ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
++
++	ice_for_each_traffic_class(i) {
++		if (vsi->tc_cfg.ena_tc & BIT(i))
++			netdev_set_tc_queue(vsi->netdev,
++					    vsi->tc_cfg.tc_info[i].netdev_tc,
++					    vsi->tc_cfg.tc_info[i].qcount_tx,
++					    vsi->tc_cfg.tc_info[i].qoffset);
++	}
+ 	ice_pf_dcb_recfg(pf, locked);
+ 	ice_vsi_open(vsi);
+ done:
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
+index f42a1b1c93687..653a47dd43862 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
+@@ -1490,18 +1490,25 @@ static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
+ 	return -EBUSY;
+ }
+ 
+-static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci)
++static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci,
++					  bool pci_reset_sbr_supported)
+ {
+ 	struct pci_dev *pdev = mlxsw_pci->pdev;
+ 	char mrsr_pl[MLXSW_REG_MRSR_LEN];
+ 	int err;
+ 
++	if (!pci_reset_sbr_supported) {
++		pci_dbg(pdev, "Performing PCI hot reset instead of \"all reset\"\n");
++		goto sbr;
++	}
++
+ 	mlxsw_reg_mrsr_pack(mrsr_pl,
+ 			    MLXSW_REG_MRSR_COMMAND_RESET_AT_PCI_DISABLE);
+ 	err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
+ 	if (err)
+ 		return err;
+ 
++sbr:
+ 	device_lock_assert(&pdev->dev);
+ 
+ 	pci_cfg_access_lock(pdev);
+@@ -1529,6 +1536,7 @@ static int
+ mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
+ {
+ 	struct pci_dev *pdev = mlxsw_pci->pdev;
++	bool pci_reset_sbr_supported = false;
+ 	char mcam_pl[MLXSW_REG_MCAM_LEN];
+ 	bool pci_reset_supported = false;
+ 	u32 sys_status;
+@@ -1548,13 +1556,17 @@ mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
+ 	mlxsw_reg_mcam_pack(mcam_pl,
+ 			    MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
+ 	err = mlxsw_reg_query(mlxsw_pci->core, MLXSW_REG(mcam), mcam_pl);
+-	if (!err)
++	if (!err) {
+ 		mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET,
+ 				      &pci_reset_supported);
++		mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET_SBR,
++				      &pci_reset_sbr_supported);
++	}
+ 
+ 	if (pci_reset_supported) {
+ 		pci_dbg(pdev, "Starting PCI reset flow\n");
+-		err = mlxsw_pci_reset_at_pci_disable(mlxsw_pci);
++		err = mlxsw_pci_reset_at_pci_disable(mlxsw_pci,
++						     pci_reset_sbr_supported);
+ 	} else {
+ 		pci_dbg(pdev, "Starting software reset flow\n");
+ 		err = mlxsw_pci_reset_sw(mlxsw_pci);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
+index 8892654c685f3..010eecab5147a 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
+@@ -10668,6 +10668,8 @@ enum mlxsw_reg_mcam_mng_feature_cap_mask_bits {
+ 	MLXSW_REG_MCAM_MCIA_128B = 34,
+ 	/* If set, MRSR.command=6 is supported. */
+ 	MLXSW_REG_MCAM_PCI_RESET = 48,
++	/* If set, MRSR.command=6 is supported with Secondary Bus Reset. */
++	MLXSW_REG_MCAM_PCI_RESET_SBR = 67,
+ };
+ 
+ #define MLXSW_REG_BYTES_PER_DWORD 0x4
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+index c9f1c79f3f9d0..ba090262e27ef 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+@@ -1607,8 +1607,8 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
+ int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
+ 			     unsigned int sb_index)
+ {
++	u16 local_port, local_port_1, first_local_port, last_local_port;
+ 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+-	u16 local_port, local_port_1, last_local_port;
+ 	struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
+ 	u8 masked_count, current_page = 0;
+ 	unsigned long cb_priv = 0;
+@@ -1628,6 +1628,7 @@ int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
+ 	masked_count = 0;
+ 	mlxsw_reg_sbsr_pack(sbsr_pl, false);
+ 	mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
++	first_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE;
+ 	last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
+ 			  MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
+ 
+@@ -1645,9 +1646,12 @@ int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
+ 		if (local_port != MLXSW_PORT_CPU_PORT) {
+ 			/* Ingress quotas are not supported for the CPU port */
+ 			mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
+-							     local_port, 1);
++							     local_port - first_local_port,
++							     1);
+ 		}
+-		mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
++		mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl,
++						    local_port - first_local_port,
++						    1);
+ 		for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
+ 			err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
+ 						       &bulk_list);
+@@ -1684,7 +1688,7 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
+ 			      unsigned int sb_index)
+ {
+ 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+-	u16 local_port, last_local_port;
++	u16 local_port, first_local_port, last_local_port;
+ 	LIST_HEAD(bulk_list);
+ 	unsigned int masked_count;
+ 	u8 current_page = 0;
+@@ -1702,6 +1706,7 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
+ 	masked_count = 0;
+ 	mlxsw_reg_sbsr_pack(sbsr_pl, true);
+ 	mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
++	first_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE;
+ 	last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
+ 			  MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
+ 
+@@ -1719,9 +1724,12 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
+ 		if (local_port != MLXSW_PORT_CPU_PORT) {
+ 			/* Ingress quotas are not supported for the CPU port */
+ 			mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
+-							     local_port, 1);
++							     local_port - first_local_port,
++							     1);
+ 		}
+-		mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
++		mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl,
++						    local_port - first_local_port,
++						    1);
+ 		for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
+ 			err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
+ 						       &bulk_list);
+diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
+index d8af5e7e15b4d..191d50ba646d8 100644
+--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
++++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
+@@ -2800,6 +2800,8 @@ static int add_adev(struct gdma_dev *gd)
+ 	if (ret)
+ 		goto init_fail;
+ 
++	/* madev is owned by the auxiliary device */
++	madev = NULL;
+ 	ret = auxiliary_device_add(adev);
+ 	if (ret)
+ 		goto add_fail;
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+index f30eee4a5a80e..b6c01a88098dc 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
++++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+@@ -375,7 +375,9 @@ typedef void (*ionic_cq_done_cb)(void *done_arg);
+ unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do,
+ 			      ionic_cq_cb cb, ionic_cq_done_cb done_cb,
+ 			      void *done_arg);
+-unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do);
++unsigned int ionic_tx_cq_service(struct ionic_cq *cq,
++				 unsigned int work_to_do,
++				 bool in_napi);
+ 
+ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
+ 		 struct ionic_queue *q, unsigned int index, const char *name,
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+index 0cd819bc4ae35..1dec4ebd708f2 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+@@ -1189,7 +1189,7 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
+ 					   ionic_rx_service, NULL, NULL);
+ 
+ 	if (lif->hwstamp_txq)
+-		tx_work = ionic_tx_cq_service(&lif->hwstamp_txq->cq, budget);
++		tx_work = ionic_tx_cq_service(&lif->hwstamp_txq->cq, budget, !!budget);
+ 
+ 	work_done = max(max(n_work, a_work), max(rx_work, tx_work));
+ 	if (work_done < budget && napi_complete_done(napi, work_done)) {
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+index 2427610f4306d..9fdd7cd3ef19d 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+@@ -23,7 +23,8 @@ static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
+ 
+ static void ionic_tx_clean(struct ionic_queue *q,
+ 			   struct ionic_tx_desc_info *desc_info,
+-			   struct ionic_txq_comp *comp);
++			   struct ionic_txq_comp *comp,
++			   bool in_napi);
+ 
+ static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell)
+ {
+@@ -480,6 +481,20 @@ int ionic_xdp_xmit(struct net_device *netdev, int n,
+ 	return nxmit;
+ }
+ 
++static void ionic_xdp_rx_put_bufs(struct ionic_queue *q,
++				  struct ionic_buf_info *buf_info,
++				  int nbufs)
++{
++	int i;
++
++	for (i = 0; i < nbufs; i++) {
++		dma_unmap_page(q->dev, buf_info->dma_addr,
++			       IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
++		buf_info->page = NULL;
++		buf_info++;
++	}
++}
++
+ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
+ 			  struct net_device *netdev,
+ 			  struct bpf_prog *xdp_prog,
+@@ -493,6 +508,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
+ 	struct netdev_queue *nq;
+ 	struct xdp_frame *xdpf;
+ 	int remain_len;
++	int nbufs = 1;
+ 	int frag_len;
+ 	int err = 0;
+ 
+@@ -542,6 +558,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
+ 			if (page_is_pfmemalloc(bi->page))
+ 				xdp_buff_set_frag_pfmemalloc(&xdp_buf);
+ 		} while (remain_len > 0);
++		nbufs += sinfo->nr_frags;
+ 	}
+ 
+ 	xdp_action = bpf_prog_run_xdp(xdp_prog, &xdp_buf);
+@@ -574,9 +591,6 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
+ 			goto out_xdp_abort;
+ 		}
+ 
+-		dma_unmap_page(rxq->dev, buf_info->dma_addr,
+-			       IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
+-
+ 		err = ionic_xdp_post_frame(txq, xdpf, XDP_TX,
+ 					   buf_info->page,
+ 					   buf_info->page_offset,
+@@ -586,23 +600,19 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
+ 			netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err);
+ 			goto out_xdp_abort;
+ 		}
+-		buf_info->page = NULL;
++		ionic_xdp_rx_put_bufs(rxq, buf_info, nbufs);
+ 		stats->xdp_tx++;
+ 
+ 		/* the Tx completion will free the buffers */
+ 		break;
+ 
+ 	case XDP_REDIRECT:
+-		/* unmap the pages before handing them to a different device */
+-		dma_unmap_page(rxq->dev, buf_info->dma_addr,
+-			       IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
+-
+ 		err = xdp_do_redirect(netdev, &xdp_buf, xdp_prog);
+ 		if (err) {
+ 			netdev_dbg(netdev, "xdp_do_redirect err %d\n", err);
+ 			goto out_xdp_abort;
+ 		}
+-		buf_info->page = NULL;
++		ionic_xdp_rx_put_bufs(rxq, buf_info, nbufs);
+ 		rxq->xdp_flush = true;
+ 		stats->xdp_redirect++;
+ 		break;
+@@ -935,7 +945,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
+ 	u32 work_done = 0;
+ 	u32 flags = 0;
+ 
+-	work_done = ionic_tx_cq_service(cq, budget);
++	work_done = ionic_tx_cq_service(cq, budget, !!budget);
+ 
+ 	if (unlikely(!budget))
+ 		return budget;
+@@ -1019,7 +1029,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
+ 	txqcq = lif->txqcqs[qi];
+ 	txcq = &lif->txqcqs[qi]->cq;
+ 
+-	tx_work_done = ionic_tx_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT);
++	tx_work_done = ionic_tx_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT, !!budget);
+ 
+ 	if (unlikely(!budget))
+ 		return budget;
+@@ -1152,7 +1162,8 @@ static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
+ 
+ static void ionic_tx_clean(struct ionic_queue *q,
+ 			   struct ionic_tx_desc_info *desc_info,
+-			   struct ionic_txq_comp *comp)
++			   struct ionic_txq_comp *comp,
++			   bool in_napi)
+ {
+ 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
+ 	struct ionic_qcq *qcq = q_to_qcq(q);
+@@ -1204,11 +1215,13 @@ static void ionic_tx_clean(struct ionic_queue *q,
+ 	desc_info->bytes = skb->len;
+ 	stats->clean++;
+ 
+-	napi_consume_skb(skb, 1);
++	napi_consume_skb(skb, likely(in_napi) ? 1 : 0);
+ }
+ 
+ static bool ionic_tx_service(struct ionic_cq *cq,
+-			     unsigned int *total_pkts, unsigned int *total_bytes)
++			     unsigned int *total_pkts,
++			     unsigned int *total_bytes,
++			     bool in_napi)
+ {
+ 	struct ionic_tx_desc_info *desc_info;
+ 	struct ionic_queue *q = cq->bound_q;
+@@ -1230,7 +1243,7 @@ static bool ionic_tx_service(struct ionic_cq *cq,
+ 		desc_info->bytes = 0;
+ 		index = q->tail_idx;
+ 		q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
+-		ionic_tx_clean(q, desc_info, comp);
++		ionic_tx_clean(q, desc_info, comp, in_napi);
+ 		if (desc_info->skb) {
+ 			pkts++;
+ 			bytes += desc_info->bytes;
+@@ -1244,7 +1257,9 @@ static bool ionic_tx_service(struct ionic_cq *cq,
+ 	return true;
+ }
+ 
+-unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do)
++unsigned int ionic_tx_cq_service(struct ionic_cq *cq,
++				 unsigned int work_to_do,
++				 bool in_napi)
+ {
+ 	unsigned int work_done = 0;
+ 	unsigned int bytes = 0;
+@@ -1253,7 +1268,7 @@ unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do)
+ 	if (work_to_do == 0)
+ 		return 0;
+ 
+-	while (ionic_tx_service(cq, &pkts, &bytes)) {
++	while (ionic_tx_service(cq, &pkts, &bytes, in_napi)) {
+ 		if (cq->tail_idx == cq->num_descs - 1)
+ 			cq->done_color = !cq->done_color;
+ 		cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
+@@ -1279,7 +1294,7 @@ void ionic_tx_flush(struct ionic_cq *cq)
+ {
+ 	u32 work_done;
+ 
+-	work_done = ionic_tx_cq_service(cq, cq->num_descs);
++	work_done = ionic_tx_cq_service(cq, cq->num_descs, false);
+ 	if (work_done)
+ 		ionic_intr_credits(cq->idev->intr_ctrl, cq->bound_intr->index,
+ 				   work_done, IONIC_INTR_CRED_RESET_COALESCE);
+@@ -1296,7 +1311,7 @@ void ionic_tx_empty(struct ionic_queue *q)
+ 		desc_info = &q->tx_info[q->tail_idx];
+ 		desc_info->bytes = 0;
+ 		q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
+-		ionic_tx_clean(q, desc_info, NULL);
++		ionic_tx_clean(q, desc_info, NULL, false);
+ 		if (desc_info->skb) {
+ 			pkts++;
+ 			bytes += desc_info->bytes;
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 4b22bb6393e26..4a28b654ce877 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -5094,6 +5094,7 @@ static struct mdio_device_id __maybe_unused micrel_tbl[] = {
+ 	{ PHY_ID_KSZ8081, MICREL_PHY_ID_MASK },
+ 	{ PHY_ID_KSZ8873MLL, MICREL_PHY_ID_MASK },
+ 	{ PHY_ID_KSZ886X, MICREL_PHY_ID_MASK },
++	{ PHY_ID_KSZ9477, MICREL_PHY_ID_MASK },
+ 	{ PHY_ID_LAN8814, MICREL_PHY_ID_MASK },
+ 	{ PHY_ID_LAN8804, MICREL_PHY_ID_MASK },
+ 	{ PHY_ID_LAN8841, MICREL_PHY_ID_MASK },
+diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
+index 52b71c7e78351..e4b759690c441 100644
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -385,23 +385,18 @@ static void sfp_fixup_rollball(struct sfp *sfp)
+ 	sfp->phy_t_retry = msecs_to_jiffies(1000);
+ }
+ 
+-static void sfp_fixup_fs_2_5gt(struct sfp *sfp)
++static void sfp_fixup_fs_10gt(struct sfp *sfp)
+ {
++	sfp_fixup_10gbaset_30m(sfp);
+ 	sfp_fixup_rollball(sfp);
+ 
+-	/* The RollBall fixup is not enough for FS modules, the PHY chip inside
++	/* The RollBall fixup is not enough for FS modules, the AQR chip inside
+ 	 * them does not return 0xffff for PHY ID registers in all MMDs for the
+ 	 * while initializing. They need a 4 second wait before accessing PHY.
+ 	 */
+ 	sfp->module_t_wait = msecs_to_jiffies(4000);
+ }
+ 
+-static void sfp_fixup_fs_10gt(struct sfp *sfp)
+-{
+-	sfp_fixup_10gbaset_30m(sfp);
+-	sfp_fixup_fs_2_5gt(sfp);
+-}
+-
+ static void sfp_fixup_halny_gsfp(struct sfp *sfp)
+ {
+ 	/* Ignore the TX_FAULT and LOS signals on this module.
+@@ -477,10 +472,6 @@ static const struct sfp_quirk sfp_quirks[] = {
+ 	// Rollball protocol to talk to the PHY.
+ 	SFP_QUIRK_F("FS", "SFP-10G-T", sfp_fixup_fs_10gt),
+ 
+-	// Fiberstore SFP-2.5G-T uses Rollball protocol to talk to the PHY and
+-	// needs 4 sec wait before probing the PHY.
+-	SFP_QUIRK_F("FS", "SFP-2.5G-T", sfp_fixup_fs_2_5gt),
+-
+ 	// Fiberstore GPON-ONU-34-20BI can operate at 2500base-X, but report 1.2GBd
+ 	// NRZ in their EEPROM
+ 	SFP_QUIRK("FS", "GPON-ONU-34-20BI", sfp_quirk_2500basex,
+@@ -497,6 +488,9 @@ static const struct sfp_quirk sfp_quirks[] = {
+ 	SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex,
+ 		  sfp_fixup_ignore_tx_fault),
+ 
++	// FS 2.5G Base-T
++	SFP_QUIRK_M("FS", "SFP-2.5G-T", sfp_quirk_oem_2_5g),
++
+ 	// Lantech 8330-262D-E can operate at 2500base-X, but incorrectly report
+ 	// 2500MBd NRZ in their EEPROM
+ 	SFP_QUIRK_M("Lantech", "8330-262D-E", sfp_quirk_2500basex),
+diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
+index 94d8b647f0f57..bb70b56c97295 100644
+--- a/drivers/net/usb/ax88179_178a.c
++++ b/drivers/net/usb/ax88179_178a.c
+@@ -326,7 +326,8 @@ static void ax88179_status(struct usbnet *dev, struct urb *urb)
+ 
+ 	if (netif_carrier_ok(dev->net) != link) {
+ 		usbnet_link_change(dev, link, 1);
+-		netdev_info(dev->net, "ax88179 - Link status is: %d\n", link);
++		if (!link)
++			netdev_info(dev->net, "ax88179 - Link status is: 0\n");
+ 	}
+ }
+ 
+@@ -1540,6 +1541,7 @@ static int ax88179_link_reset(struct usbnet *dev)
+ 			 GMII_PHY_PHYSR, 2, &tmp16);
+ 
+ 	if (!(tmp16 & GMII_PHY_PHYSR_LINK)) {
++		netdev_info(dev->net, "ax88179 - Link status is: 0\n");
+ 		return 0;
+ 	} else if (GMII_PHY_PHYSR_GIGA == (tmp16 & GMII_PHY_PHYSR_SMASK)) {
+ 		mode |= AX_MEDIUM_GIGAMODE | AX_MEDIUM_EN_125MHZ;
+@@ -1577,6 +1579,8 @@ static int ax88179_link_reset(struct usbnet *dev)
+ 
+ 	netif_carrier_on(dev->net);
+ 
++	netdev_info(dev->net, "ax88179 - Link status is: 1\n");
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
+index b2d054f59f30f..49779ba3c4b7b 100644
+--- a/drivers/net/vxlan/vxlan_core.c
++++ b/drivers/net/vxlan/vxlan_core.c
+@@ -2336,7 +2336,7 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ 	struct ip_tunnel_key *pkey;
+ 	struct ip_tunnel_key key;
+ 	struct vxlan_dev *vxlan = netdev_priv(dev);
+-	const struct iphdr *old_iph = ip_hdr(skb);
++	const struct iphdr *old_iph;
+ 	struct vxlan_metadata _md;
+ 	struct vxlan_metadata *md = &_md;
+ 	unsigned int pkt_len = skb->len;
+@@ -2350,8 +2350,15 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ 	bool use_cache;
+ 	bool udp_sum = false;
+ 	bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev));
++	bool no_eth_encap;
+ 	__be32 vni = 0;
+ 
++	no_eth_encap = flags & VXLAN_F_GPE && skb->protocol != htons(ETH_P_TEB);
++	if (!skb_vlan_inet_prepare(skb, no_eth_encap))
++		goto drop;
++
++	old_iph = ip_hdr(skb);
++
+ 	info = skb_tunnel_info(skb);
+ 	use_cache = ip_tunnel_dst_cache_usable(skb, info);
+ 
+diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
+index 185cd339c0855..6c75ebbb21caa 100644
+--- a/drivers/net/wireless/realtek/rtw89/fw.c
++++ b/drivers/net/wireless/realtek/rtw89/fw.c
+@@ -1349,13 +1349,12 @@ static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev)
+ static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev)
+ {
+ 	u32 val32;
+-	u16 val16;
+ 
+ 	val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL);
+ 	rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32);
+ 
+-	val16 = rtw89_read16(rtwdev, R_AX_BOOT_DBG + 2);
+-	rtw89_err(rtwdev, "[ERR]fwdl 0x83F2 = 0x%x\n", val16);
++	val32 = rtw89_read32(rtwdev, R_AX_BOOT_DBG);
++	rtw89_err(rtwdev, "[ERR]fwdl 0x83F0 = 0x%x\n", val32);
+ 
+ 	rtw89_fw_prog_cnt_dump(rtwdev);
+ }
+@@ -1394,8 +1393,9 @@ static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev,
+ 	return 0;
+ }
+ 
+-int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
+-		      bool include_bb)
++static
++int __rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
++			bool include_bb)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	struct rtw89_fw_info *fw_info = &rtwdev->fw;
+@@ -1433,7 +1433,7 @@ int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
+ 	ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE);
+ 	if (ret) {
+ 		rtw89_warn(rtwdev, "download firmware fail\n");
+-		return ret;
++		goto fwdl_err;
+ 	}
+ 
+ 	return ret;
+@@ -1443,6 +1443,21 @@ int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
+ 	return ret;
+ }
+ 
++int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
++		      bool include_bb)
++{
++	int retry;
++	int ret;
++
++	for (retry = 0; retry < 5; retry++) {
++		ret = __rtw89_fw_download(rtwdev, type, include_bb);
++		if (!ret)
++			return 0;
++	}
++
++	return ret;
++}
++
+ int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev)
+ {
+ 	struct rtw89_fw_info *fw = &rtwdev->fw;
+diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
+index dfdff6aba6953..d80c3b93d6ce9 100644
+--- a/drivers/nvme/target/configfs.c
++++ b/drivers/nvme/target/configfs.c
+@@ -410,7 +410,29 @@ static ssize_t nvmet_addr_tsas_show(struct config_item *item,
+ 				return sprintf(page, "%s\n", nvmet_addr_tsas_rdma[i].name);
+ 		}
+ 	}
+-	return sprintf(page, "reserved\n");
++	return sprintf(page, "\n");
++}
++
++static u8 nvmet_addr_tsas_rdma_store(const char *page)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_rdma); i++) {
++		if (sysfs_streq(page, nvmet_addr_tsas_rdma[i].name))
++			return nvmet_addr_tsas_rdma[i].type;
++	}
++	return NVMF_RDMA_QPTYPE_INVALID;
++}
++
++static u8 nvmet_addr_tsas_tcp_store(const char *page)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) {
++		if (sysfs_streq(page, nvmet_addr_tsas_tcp[i].name))
++			return nvmet_addr_tsas_tcp[i].type;
++	}
++	return NVMF_TCP_SECTYPE_INVALID;
+ }
+ 
+ static ssize_t nvmet_addr_tsas_store(struct config_item *item,
+@@ -418,20 +440,19 @@ static ssize_t nvmet_addr_tsas_store(struct config_item *item,
+ {
+ 	struct nvmet_port *port = to_nvmet_port(item);
+ 	u8 treq = nvmet_port_disc_addr_treq_mask(port);
+-	u8 sectype;
+-	int i;
++	u8 sectype, qptype;
+ 
+ 	if (nvmet_is_port_enabled(port, __func__))
+ 		return -EACCES;
+ 
+-	if (port->disc_addr.trtype != NVMF_TRTYPE_TCP)
+-		return -EINVAL;
+-
+-	for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) {
+-		if (sysfs_streq(page, nvmet_addr_tsas_tcp[i].name)) {
+-			sectype = nvmet_addr_tsas_tcp[i].type;
++	if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) {
++		qptype = nvmet_addr_tsas_rdma_store(page);
++		if (qptype == port->disc_addr.tsas.rdma.qptype)
++			return count;
++	} else if (port->disc_addr.trtype == NVMF_TRTYPE_TCP) {
++		sectype = nvmet_addr_tsas_tcp_store(page);
++		if (sectype != NVMF_TCP_SECTYPE_INVALID)
+ 			goto found;
+-		}
+ 	}
+ 
+ 	pr_err("Invalid value '%s' for tsas\n", page);
+diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
+index 337ee1cb09ae6..381b4394731f1 100644
+--- a/drivers/nvme/target/fc.c
++++ b/drivers/nvme/target/fc.c
+@@ -148,7 +148,7 @@ struct nvmet_fc_tgt_queue {
+ 	struct workqueue_struct		*work_q;
+ 	struct kref			ref;
+ 	/* array of fcp_iods */
+-	struct nvmet_fc_fcp_iod		fod[] __counted_by(sqsize);
++	struct nvmet_fc_fcp_iod		fod[] /* __counted_by(sqsize) */;
+ } __aligned(sizeof(unsigned long long));
+ 
+ struct nvmet_fc_hostport {
+diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
+index 682fa877478fe..d9d3f57c6f77b 100644
+--- a/drivers/pci/msi/msi.c
++++ b/drivers/pci/msi/msi.c
+@@ -349,7 +349,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
+ 			       struct irq_affinity *affd)
+ {
+ 	struct irq_affinity_desc *masks = NULL;
+-	struct msi_desc *entry;
++	struct msi_desc *entry, desc;
+ 	int ret;
+ 
+ 	/* Reject multi-MSI early on irq domain enabled architectures */
+@@ -374,6 +374,12 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
+ 	/* All MSIs are unmasked by default; mask them all */
+ 	entry = msi_first_desc(&dev->dev, MSI_DESC_ALL);
+ 	pci_msi_mask(entry, msi_multi_mask(entry));
++	/*
++	 * Copy the MSI descriptor for the error path because
++	 * pci_msi_setup_msi_irqs() will free it for the hierarchical
++	 * interrupt domain case.
++	 */
++	memcpy(&desc, entry, sizeof(desc));
+ 
+ 	/* Configure MSI capability structure */
+ 	ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
+@@ -393,7 +399,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
+ 	goto unlock;
+ 
+ err:
+-	pci_msi_unmask(entry, msi_multi_mask(entry));
++	pci_msi_unmask(&desc, msi_multi_mask(&desc));
+ 	pci_free_msi_irqs(dev);
+ fail:
+ 	dev->msi_enabled = 0;
+diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
+index cffeb869130dd..f424a57f00136 100644
+--- a/drivers/pinctrl/core.c
++++ b/drivers/pinctrl/core.c
+@@ -1106,8 +1106,8 @@ static struct pinctrl *create_pinctrl(struct device *dev,
+ 		 * an -EPROBE_DEFER later, as that is the worst case.
+ 		 */
+ 		if (ret == -EPROBE_DEFER) {
+-			pinctrl_free(p, false);
+ 			mutex_unlock(&pinctrl_maps_mutex);
++			pinctrl_free(p, false);
+ 			return ERR_PTR(ret);
+ 		}
+ 	}
+diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
+index 3bedf36a0019d..3f56991f5b892 100644
+--- a/drivers/pinctrl/pinctrl-rockchip.c
++++ b/drivers/pinctrl/pinctrl-rockchip.c
+@@ -634,23 +634,68 @@ static struct rockchip_mux_recalced_data rk3308_mux_recalced_data[] = {
+ 
+ static struct rockchip_mux_recalced_data rk3328_mux_recalced_data[] = {
+ 	{
+-		.num = 2,
+-		.pin = 12,
+-		.reg = 0x24,
+-		.bit = 8,
+-		.mask = 0x3
+-	}, {
++		/* gpio2_b7_sel */
+ 		.num = 2,
+ 		.pin = 15,
+ 		.reg = 0x28,
+ 		.bit = 0,
+ 		.mask = 0x7
+ 	}, {
++		/* gpio2_c7_sel */
+ 		.num = 2,
+ 		.pin = 23,
+ 		.reg = 0x30,
+ 		.bit = 14,
+ 		.mask = 0x3
++	}, {
++		/* gpio3_b1_sel */
++		.num = 3,
++		.pin = 9,
++		.reg = 0x44,
++		.bit = 2,
++		.mask = 0x3
++	}, {
++		/* gpio3_b2_sel */
++		.num = 3,
++		.pin = 10,
++		.reg = 0x44,
++		.bit = 4,
++		.mask = 0x3
++	}, {
++		/* gpio3_b3_sel */
++		.num = 3,
++		.pin = 11,
++		.reg = 0x44,
++		.bit = 6,
++		.mask = 0x3
++	}, {
++		/* gpio3_b4_sel */
++		.num = 3,
++		.pin = 12,
++		.reg = 0x44,
++		.bit = 8,
++		.mask = 0x3
++	}, {
++		/* gpio3_b5_sel */
++		.num = 3,
++		.pin = 13,
++		.reg = 0x44,
++		.bit = 10,
++		.mask = 0x3
++	}, {
++		/* gpio3_b6_sel */
++		.num = 3,
++		.pin = 14,
++		.reg = 0x44,
++		.bit = 12,
++		.mask = 0x3
++	}, {
++		/* gpio3_b7_sel */
++		.num = 3,
++		.pin = 15,
++		.reg = 0x44,
++		.bit = 14,
++		.mask = 0x3
+ 	},
+ };
+ 
+@@ -2433,6 +2478,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
+ 	case RK3188:
+ 	case RK3288:
+ 	case RK3308:
++	case RK3328:
+ 	case RK3368:
+ 	case RK3399:
+ 	case RK3568:
+@@ -2491,6 +2537,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
+ 	case RK3188:
+ 	case RK3288:
+ 	case RK3308:
++	case RK3328:
+ 	case RK3368:
+ 	case RK3399:
+ 	case RK3568:
+@@ -2704,8 +2751,10 @@ static int rockchip_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
+ 
+ 	if (ret) {
+ 		/* revert the already done pin settings */
+-		for (cnt--; cnt >= 0; cnt--)
++		for (cnt--; cnt >= 0; cnt--) {
++			bank = pin_to_bank(info, pins[cnt]);
+ 			rockchip_set_mux(bank, pins[cnt] - bank->pin_base, 0);
++		}
+ 
+ 		return ret;
+ 	}
+@@ -2753,6 +2802,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
+ 	case RK3188:
+ 	case RK3288:
+ 	case RK3308:
++	case RK3328:
+ 	case RK3368:
+ 	case RK3399:
+ 	case RK3568:
+@@ -3763,7 +3813,7 @@ static struct rockchip_pin_bank rk3328_pin_banks[] = {
+ 	PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", 0, 0, 0, 0),
+ 	PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", 0, 0, 0, 0),
+ 	PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2", 0,
+-			     IOMUX_WIDTH_3BIT,
++			     0,
+ 			     IOMUX_WIDTH_3BIT,
+ 			     0),
+ 	PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3",
+@@ -3777,7 +3827,7 @@ static struct rockchip_pin_ctrl rk3328_pin_ctrl = {
+ 		.pin_banks		= rk3328_pin_banks,
+ 		.nr_banks		= ARRAY_SIZE(rk3328_pin_banks),
+ 		.label			= "RK3328-GPIO",
+-		.type			= RK3288,
++		.type			= RK3328,
+ 		.grf_mux_offset		= 0x0,
+ 		.iomux_recalced		= rk3328_mux_recalced_data,
+ 		.niomux_recalced	= ARRAY_SIZE(rk3328_mux_recalced_data),
+diff --git a/drivers/pinctrl/pinctrl-rockchip.h b/drivers/pinctrl/pinctrl-rockchip.h
+index 4759f336941ef..849266f8b1913 100644
+--- a/drivers/pinctrl/pinctrl-rockchip.h
++++ b/drivers/pinctrl/pinctrl-rockchip.h
+@@ -193,6 +193,7 @@ enum rockchip_pinctrl_type {
+ 	RK3188,
+ 	RK3288,
+ 	RK3308,
++	RK3328,
+ 	RK3368,
+ 	RK3399,
+ 	RK3568,
+diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+index f4e2c88a7c822..e61be7d054948 100644
+--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
++++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+@@ -1206,7 +1206,6 @@ static const struct of_device_id pmic_gpio_of_match[] = {
+ 	{ .compatible = "qcom,pm7325-gpio", .data = (void *) 10 },
+ 	{ .compatible = "qcom,pm7550ba-gpio", .data = (void *) 8},
+ 	{ .compatible = "qcom,pm8005-gpio", .data = (void *) 4 },
+-	{ .compatible = "qcom,pm8008-gpio", .data = (void *) 2 },
+ 	{ .compatible = "qcom,pm8019-gpio", .data = (void *) 6 },
+ 	/* pm8150 has 10 GPIOs with holes on 2, 5, 7 and 8 */
+ 	{ .compatible = "qcom,pm8150-gpio", .data = (void *) 10 },
+diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+index 248ab71b9f9da..747b22cd38aea 100644
+--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
++++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+@@ -2071,11 +2071,11 @@ static void rzg2l_gpio_irq_restore(struct rzg2l_pinctrl *pctrl)
+ 		 * This has to be atomically executed to protect against a concurrent
+ 		 * interrupt.
+ 		 */
+-		raw_spin_lock_irqsave(&pctrl->lock.rlock, flags);
++		spin_lock_irqsave(&pctrl->lock, flags);
+ 		ret = rzg2l_gpio_irq_set_type(data, irqd_get_trigger_type(data));
+ 		if (!ret && !irqd_irq_disabled(data))
+ 			rzg2l_gpio_irq_enable(data);
+-		raw_spin_unlock_irqrestore(&pctrl->lock.rlock, flags);
++		spin_unlock_irqrestore(&pctrl->lock, flags);
+ 
+ 		if (ret)
+ 			dev_crit(pctrl->dev, "Failed to set IRQ type for virq=%u\n", virq);
+diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
+index 0c028d17c0752..5baa487f350e7 100644
+--- a/drivers/pwm/pwm-stm32.c
++++ b/drivers/pwm/pwm-stm32.c
+@@ -309,29 +309,43 @@ static int stm32_pwm_capture(struct pwm_chip *chip, struct pwm_device *pwm,
+ }
+ 
+ static int stm32_pwm_config(struct stm32_pwm *priv, unsigned int ch,
+-			    int duty_ns, int period_ns)
++			    u64 duty_ns, u64 period_ns)
+ {
+-	unsigned long long prd, div, dty;
+-	unsigned int prescaler = 0;
++	unsigned long long prd, dty;
++	unsigned long long prescaler;
+ 	u32 ccmr, mask, shift;
+ 
+-	/* Period and prescaler values depends on clock rate */
+-	div = (unsigned long long)clk_get_rate(priv->clk) * period_ns;
+-
+-	do_div(div, NSEC_PER_SEC);
+-	prd = div;
+-
+-	while (div > priv->max_arr) {
+-		prescaler++;
+-		div = prd;
+-		do_div(div, prescaler + 1);
+-	}
+-
+-	prd = div;
++	/*
++	 * .probe() asserted that clk_get_rate() is not bigger than 1 GHz, so
++	 * the calculations here won't overflow.
++	 * First we need to find the minimal value for prescaler such that
++	 *
++	 *        period_ns * clkrate
++	 *   ------------------------------ < max_arr + 1
++	 *   NSEC_PER_SEC * (prescaler + 1)
++	 *
++	 * This equation is equivalent to
++	 *
++	 *        period_ns * clkrate
++	 *   ---------------------------- < prescaler + 1
++	 *   NSEC_PER_SEC * (max_arr + 1)
++	 *
++	 * Using integer division and knowing that the right hand side is
++	 * integer, this is further equivalent to
++	 *
++	 *   (period_ns * clkrate) // (NSEC_PER_SEC * (max_arr + 1)) ≤ prescaler
++	 */
+ 
++	prescaler = mul_u64_u64_div_u64(period_ns, clk_get_rate(priv->clk),
++					(u64)NSEC_PER_SEC * ((u64)priv->max_arr + 1));
+ 	if (prescaler > MAX_TIM_PSC)
+ 		return -EINVAL;
+ 
++	prd = mul_u64_u64_div_u64(period_ns, clk_get_rate(priv->clk),
++				  (u64)NSEC_PER_SEC * (prescaler + 1));
++	if (!prd)
++		return -EINVAL;
++
+ 	/*
+ 	 * All channels share the same prescaler and counter so when two
+ 	 * channels are active at the same time we can't change them
+@@ -351,8 +365,8 @@ static int stm32_pwm_config(struct stm32_pwm *priv, unsigned int ch,
+ 	regmap_set_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE);
+ 
+ 	/* Calculate the duty cycles */
+-	dty = prd * duty_ns;
+-	do_div(dty, period_ns);
++	dty = mul_u64_u64_div_u64(duty_ns, clk_get_rate(priv->clk),
++				  (u64)NSEC_PER_SEC * (prescaler + 1));
+ 
+ 	regmap_write(priv->regmap, TIM_CCR1 + 4 * ch, dty);
+ 
+@@ -656,6 +670,18 @@ static int stm32_pwm_probe(struct platform_device *pdev)
+ 
+ 	stm32_pwm_detect_complementary(priv);
+ 
++	ret = devm_clk_rate_exclusive_get(dev, priv->clk);
++	if (ret)
++		return dev_err_probe(dev, ret, "Failed to lock clock\n");
++
++	/*
++	 * With the clk running with not more than 1 GHz the calculations in
++	 * .apply() won't overflow.
++	 */
++	if (clk_get_rate(priv->clk) > 1000000000)
++		return dev_err_probe(dev, -EINVAL, "Clock freq too high (%lu)\n",
++				     clk_get_rate(priv->clk));
++
+ 	chip->ops = &stm32pwm_ops;
+ 
+ 	/* Initialize clock refcount to number of enabled PWM channels. */
+diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
+index 85b27c42cf65b..f426b4c391796 100644
+--- a/drivers/reset/Kconfig
++++ b/drivers/reset/Kconfig
+@@ -68,6 +68,7 @@ config RESET_BRCMSTB_RESCAL
+ 
+ config RESET_GPIO
+ 	tristate "GPIO reset controller"
++	depends on GPIOLIB
+ 	help
+ 	  This enables a generic reset controller for resets attached via
+ 	  GPIOs.  Typically for OF platforms this driver expects "reset-gpios"
+diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
+index d7569f3955591..d6491fc84e8c5 100644
+--- a/drivers/s390/virtio/virtio_ccw.c
++++ b/drivers/s390/virtio/virtio_ccw.c
+@@ -698,6 +698,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+ 	dma64_t *indicatorp = NULL;
+ 	int ret, i, queue_idx = 0;
+ 	struct ccw1 *ccw;
++	dma32_t indicatorp_dma = 0;
+ 
+ 	ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
+ 	if (!ccw)
+@@ -725,7 +726,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+ 	*/
+ 	indicatorp = ccw_device_dma_zalloc(vcdev->cdev,
+ 					   sizeof(*indicatorp),
+-					   &ccw->cda);
++					   &indicatorp_dma);
+ 	if (!indicatorp)
+ 		goto out;
+ 	*indicatorp = indicators_dma(vcdev);
+@@ -735,6 +736,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+ 			/* no error, just fall back to legacy interrupts */
+ 			vcdev->is_thinint = false;
+ 	}
++	ccw->cda = indicatorp_dma;
+ 	if (!vcdev->is_thinint) {
+ 		/* Register queue indicators with host. */
+ 		*indicators(vcdev) = 0;
+diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
+index 12e2653846e3f..70891a1e98a01 100644
+--- a/drivers/scsi/libsas/sas_ata.c
++++ b/drivers/scsi/libsas/sas_ata.c
+@@ -610,15 +610,15 @@ int sas_ata_init(struct domain_device *found_dev)
+ 
+ 	rc = ata_sas_tport_add(ata_host->dev, ap);
+ 	if (rc)
+-		goto destroy_port;
++		goto free_port;
+ 
+ 	found_dev->sata_dev.ata_host = ata_host;
+ 	found_dev->sata_dev.ap = ap;
+ 
+ 	return 0;
+ 
+-destroy_port:
+-	kfree(ap);
++free_port:
++	ata_port_free(ap);
+ free_host:
+ 	ata_host_put(ata_host);
+ 	return rc;
+diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
+index 8fb7c41c09624..48d975c6dbf2c 100644
+--- a/drivers/scsi/libsas/sas_discover.c
++++ b/drivers/scsi/libsas/sas_discover.c
+@@ -301,7 +301,7 @@ void sas_free_device(struct kref *kref)
+ 
+ 	if (dev_is_sata(dev) && dev->sata_dev.ap) {
+ 		ata_sas_tport_delete(dev->sata_dev.ap);
+-		kfree(dev->sata_dev.ap);
++		ata_port_free(dev->sata_dev.ap);
+ 		ata_host_put(dev->sata_dev.ata_host);
+ 		dev->sata_dev.ata_host = NULL;
+ 		dev->sata_dev.ap = NULL;
+diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
+index 6a1c6b34c414a..88f774db92084 100644
+--- a/drivers/soc/ti/wkup_m3_ipc.c
++++ b/drivers/soc/ti/wkup_m3_ipc.c
+@@ -16,7 +16,6 @@
+ #include <linux/irq.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+-#include <linux/omap-mailbox.h>
+ #include <linux/platform_device.h>
+ #include <linux/remoteproc.h>
+ #include <linux/suspend.h>
+@@ -314,7 +313,6 @@ static irqreturn_t wkup_m3_txev_handler(int irq, void *ipc_data)
+ static int wkup_m3_ping(struct wkup_m3_ipc *m3_ipc)
+ {
+ 	struct device *dev = m3_ipc->dev;
+-	mbox_msg_t dummy_msg = 0;
+ 	int ret;
+ 
+ 	if (!m3_ipc->mbox) {
+@@ -330,7 +328,7 @@ static int wkup_m3_ping(struct wkup_m3_ipc *m3_ipc)
+ 	 * the RX callback to avoid multiple interrupts being received
+ 	 * by the CM3.
+ 	 */
+-	ret = mbox_send_message(m3_ipc->mbox, &dummy_msg);
++	ret = mbox_send_message(m3_ipc->mbox, NULL);
+ 	if (ret < 0) {
+ 		dev_err(dev, "%s: mbox_send_message() failed: %d\n",
+ 			__func__, ret);
+@@ -352,7 +350,6 @@ static int wkup_m3_ping(struct wkup_m3_ipc *m3_ipc)
+ static int wkup_m3_ping_noirq(struct wkup_m3_ipc *m3_ipc)
+ {
+ 	struct device *dev = m3_ipc->dev;
+-	mbox_msg_t dummy_msg = 0;
+ 	int ret;
+ 
+ 	if (!m3_ipc->mbox) {
+@@ -361,7 +358,7 @@ static int wkup_m3_ping_noirq(struct wkup_m3_ipc *m3_ipc)
+ 		return -EIO;
+ 	}
+ 
+-	ret = mbox_send_message(m3_ipc->mbox, &dummy_msg);
++	ret = mbox_send_message(m3_ipc->mbox, NULL);
+ 	if (ret < 0) {
+ 		dev_err(dev, "%s: mbox_send_message() failed: %d\n",
+ 			__func__, ret);
+diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
+index 458bb1280ebf9..5b97e420a95f8 100644
+--- a/drivers/tty/mxser.c
++++ b/drivers/tty/mxser.c
+@@ -288,7 +288,7 @@ struct mxser_board {
+ 	enum mxser_must_hwid must_hwid;
+ 	speed_t max_baud;
+ 
+-	struct mxser_port ports[] __counted_by(nports);
++	struct mxser_port ports[] /* __counted_by(nports) */;
+ };
+ 
+ static DECLARE_BITMAP(mxser_boards, MXSER_BOARDS);
+diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
+index 66901d93089a3..ddf23ccbc9253 100644
+--- a/drivers/tty/serial/8250/8250_omap.c
++++ b/drivers/tty/serial/8250/8250_omap.c
+@@ -116,6 +116,10 @@
+ /* RX FIFO occupancy indicator */
+ #define UART_OMAP_RX_LVL		0x19
+ 
++/* Timeout low and High */
++#define UART_OMAP_TO_L                 0x26
++#define UART_OMAP_TO_H                 0x27
++
+ /*
+  * Copy of the genpd flags for the console.
+  * Only used if console suspend is disabled
+@@ -664,13 +668,25 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
+ 
+ 	/*
+ 	 * On K3 SoCs, it is observed that RX TIMEOUT is signalled after
+-	 * FIFO has been drained, in which case a dummy read of RX FIFO
+-	 * is required to clear RX TIMEOUT condition.
++	 * FIFO has been drained or erroneously.
++	 * So apply solution of Errata i2310 as mentioned in
++	 * https://www.ti.com/lit/pdf/sprz536
+ 	 */
+ 	if (priv->habit & UART_RX_TIMEOUT_QUIRK &&
+ 	    (iir & UART_IIR_RX_TIMEOUT) == UART_IIR_RX_TIMEOUT &&
+ 	    serial_port_in(port, UART_OMAP_RX_LVL) == 0) {
+-		serial_port_in(port, UART_RX);
++		unsigned char efr2, timeout_h, timeout_l;
++
++		efr2 = serial_in(up, UART_OMAP_EFR2);
++		timeout_h = serial_in(up, UART_OMAP_TO_H);
++		timeout_l = serial_in(up, UART_OMAP_TO_L);
++		serial_out(up, UART_OMAP_TO_H, 0xFF);
++		serial_out(up, UART_OMAP_TO_L, 0xFF);
++		serial_out(up, UART_OMAP_EFR2, UART_OMAP_EFR2_TIMEOUT_BEHAVE);
++		serial_in(up, UART_IIR);
++		serial_out(up, UART_OMAP_EFR2, efr2);
++		serial_out(up, UART_OMAP_TO_H, timeout_h);
++		serial_out(up, UART_OMAP_TO_L, timeout_l);
+ 	}
+ 
+ 	/* Stop processing interrupts on input overrun */
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index e2e4f99f9d347..fe0fb2b4e9db7 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -1985,6 +1985,17 @@ enum {
+ 	MOXA_SUPP_RS485 = BIT(2),
+ };
+ 
++static unsigned short moxa_get_nports(unsigned short device)
++{
++	switch (device) {
++	case PCI_DEVICE_ID_MOXA_CP116E_A_A:
++	case PCI_DEVICE_ID_MOXA_CP116E_A_B:
++		return 8;
++	}
++
++	return FIELD_GET(0x00F0, device);
++}
++
+ static bool pci_moxa_is_mini_pcie(unsigned short device)
+ {
+ 	if (device == PCI_DEVICE_ID_MOXA_CP102N	||
+@@ -2038,7 +2049,7 @@ static int pci_moxa_init(struct pci_dev *dev)
+ {
+ 	unsigned short device = dev->device;
+ 	resource_size_t iobar_addr = pci_resource_start(dev, 2);
+-	unsigned int num_ports = (device & 0x00F0) >> 4, i;
++	unsigned int i, num_ports = moxa_get_nports(device);
+ 	u8 val, init_mode = MOXA_RS232;
+ 
+ 	if (!(pci_moxa_supported_rs(dev) & MOXA_SUPP_RS232)) {
+diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
+index 34801a6f300b6..b88cc28c94e33 100644
+--- a/drivers/tty/serial/bcm63xx_uart.c
++++ b/drivers/tty/serial/bcm63xx_uart.c
+@@ -308,8 +308,8 @@ static void bcm_uart_do_tx(struct uart_port *port)
+ 
+ 	val = bcm_uart_readl(port, UART_MCTL_REG);
+ 	val = (val & UART_MCTL_TXFIFOFILL_MASK) >> UART_MCTL_TXFIFOFILL_SHIFT;
+-
+-	pending = uart_port_tx_limited(port, ch, port->fifosize - val,
++	pending = uart_port_tx_limited_flags(port, ch, UART_TX_NOSTOP,
++		port->fifosize - val,
+ 		true,
+ 		bcm_uart_writel(port, ch, UART_FIFO_REG),
+ 		({}));
+@@ -320,6 +320,9 @@ static void bcm_uart_do_tx(struct uart_port *port)
+ 	val = bcm_uart_readl(port, UART_IR_REG);
+ 	val &= ~UART_TX_INT_MASK;
+ 	bcm_uart_writel(port, val, UART_IR_REG);
++
++	if (uart_tx_stopped(port))
++		bcm_uart_stop_tx(port);
+ }
+ 
+ /*
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index 09c1678ddfd49..9552228d21614 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -119,6 +119,7 @@
+ #define UCR4_OREN	(1<<1)	/* Receiver overrun interrupt enable */
+ #define UCR4_DREN	(1<<0)	/* Recv data ready interrupt enable */
+ #define UFCR_RXTL_SHF	0	/* Receiver trigger level shift */
++#define UFCR_RXTL_MASK	0x3F	/* Receiver trigger 6 bits wide */
+ #define UFCR_DCEDTE	(1<<6)	/* DCE/DTE mode select */
+ #define UFCR_RFDIV	(7<<7)	/* Reference freq divider mask */
+ #define UFCR_RFDIV_REG(x)	(((x) < 7 ? 6 - (x) : 6) << 7)
+@@ -1941,7 +1942,7 @@ static int imx_uart_rs485_config(struct uart_port *port, struct ktermios *termio
+ 				 struct serial_rs485 *rs485conf)
+ {
+ 	struct imx_port *sport = (struct imx_port *)port;
+-	u32 ucr2;
++	u32 ucr2, ufcr;
+ 
+ 	if (rs485conf->flags & SER_RS485_ENABLED) {
+ 		/* Enable receiver if low-active RTS signal is requested */
+@@ -1960,8 +1961,13 @@ static int imx_uart_rs485_config(struct uart_port *port, struct ktermios *termio
+ 
+ 	/* Make sure Rx is enabled in case Tx is active with Rx disabled */
+ 	if (!(rs485conf->flags & SER_RS485_ENABLED) ||
+-	    rs485conf->flags & SER_RS485_RX_DURING_TX)
++	    rs485conf->flags & SER_RS485_RX_DURING_TX) {
++		/* If the receiver trigger is 0, set it to a default value */
++		ufcr = imx_uart_readl(sport, UFCR);
++		if ((ufcr & UFCR_RXTL_MASK) == 0)
++			imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
+ 		imx_uart_start_rx(port);
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
+index b0604d6da0257..58858dd352c59 100644
+--- a/drivers/tty/serial/mcf.c
++++ b/drivers/tty/serial/mcf.c
+@@ -462,7 +462,7 @@ static const struct uart_ops mcf_uart_ops = {
+ 	.verify_port	= mcf_verify_port,
+ };
+ 
+-static struct mcf_uart mcf_ports[4];
++static struct mcf_uart mcf_ports[10];
+ 
+ #define	MCF_MAXPORTS	ARRAY_SIZE(mcf_ports)
+ 
+diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
+index 4ce7cba2b48aa..8f3b9a0a38e1d 100644
+--- a/drivers/usb/atm/cxacru.c
++++ b/drivers/usb/atm/cxacru.c
+@@ -1131,6 +1131,7 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
+ 	struct cxacru_data *instance;
+ 	struct usb_device *usb_dev = interface_to_usbdev(intf);
+ 	struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD];
++	struct usb_endpoint_descriptor *in, *out;
+ 	int ret;
+ 
+ 	/* instance init */
+@@ -1177,6 +1178,19 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
+ 		goto fail;
+ 	}
+ 
++	if (usb_endpoint_xfer_int(&cmd_ep->desc))
++		ret = usb_find_common_endpoints(intf->cur_altsetting,
++						NULL, NULL, &in, &out);
++	else
++		ret = usb_find_common_endpoints(intf->cur_altsetting,
++						&in, &out, NULL, NULL);
++
++	if (ret) {
++		usb_err(usbatm_instance, "cxacru_bind: interface has incorrect endpoints\n");
++		ret = -ENODEV;
++		goto fail;
++	}
++
+ 	if ((cmd_ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+ 			== USB_ENDPOINT_XFER_INT) {
+ 		usb_fill_int_urb(instance->rcv_urb,
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 100041320e8dd..cfe754f480a07 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -879,12 +879,16 @@ static bool dwc3_core_is_valid(struct dwc3 *dwc)
+ 
+ static void dwc3_core_setup_global_control(struct dwc3 *dwc)
+ {
++	unsigned int power_opt;
++	unsigned int hw_mode;
+ 	u32 reg;
+ 
+ 	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ 	reg &= ~DWC3_GCTL_SCALEDOWN_MASK;
++	hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
++	power_opt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
+ 
+-	switch (DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1)) {
++	switch (power_opt) {
+ 	case DWC3_GHWPARAMS1_EN_PWROPT_CLK:
+ 		/**
+ 		 * WORKAROUND: DWC3 revisions between 2.10a and 2.50a have an
+@@ -917,6 +921,20 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc)
+ 		break;
+ 	}
+ 
++	/*
++	 * This is a workaround for STAR#4846132, which only affects
++	 * DWC_usb31 version2.00a operating in host mode.
++	 *
++	 * There is a problem in DWC_usb31 version 2.00a operating
++	 * in host mode that would cause a CSR read timeout When CSR
++	 * read coincides with RAM Clock Gating Entry. By disable
++	 * Clock Gating, sacrificing power consumption for normal
++	 * operation.
++	 */
++	if (power_opt != DWC3_GHWPARAMS1_EN_PWROPT_NO &&
++	    hw_mode != DWC3_GHWPARAMS0_MODE_GADGET && DWC3_VER_IS(DWC31, 200A))
++		reg |= DWC3_GCTL_DSBLCLKGTNG;
++
+ 	/* check if current dwc3 is on simulation board */
+ 	if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) {
+ 		dev_info(dwc->dev, "Running with FPGA optimizations\n");
+@@ -2084,7 +2102,6 @@ static int dwc3_core_init_for_resume(struct dwc3 *dwc)
+ 
+ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
+ {
+-	unsigned long	flags;
+ 	u32 reg;
+ 
+ 	switch (dwc->current_dr_role) {
+@@ -2122,9 +2139,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
+ 			break;
+ 
+ 		if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) {
+-			spin_lock_irqsave(&dwc->lock, flags);
+ 			dwc3_gadget_suspend(dwc);
+-			spin_unlock_irqrestore(&dwc->lock, flags);
+ 			synchronize_irq(dwc->irq_gadget);
+ 		}
+ 
+@@ -2141,7 +2156,6 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
+ 
+ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
+ {
+-	unsigned long	flags;
+ 	int		ret;
+ 	u32		reg;
+ 
+@@ -2190,9 +2204,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
+ 		if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) {
+ 			dwc3_otg_host_init(dwc);
+ 		} else if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) {
+-			spin_lock_irqsave(&dwc->lock, flags);
+ 			dwc3_gadget_resume(dwc);
+-			spin_unlock_irqrestore(&dwc->lock, flags);
+ 		}
+ 
+ 		break;
+diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
+index ba7d180cc9e6d..44e20c6c36d32 100644
+--- a/drivers/usb/gadget/function/f_printer.c
++++ b/drivers/usb/gadget/function/f_printer.c
+@@ -213,6 +213,7 @@ static inline struct usb_endpoint_descriptor *ep_desc(struct usb_gadget *gadget,
+ 					struct usb_endpoint_descriptor *ss)
+ {
+ 	switch (gadget->speed) {
++	case USB_SPEED_SUPER_PLUS:
+ 	case USB_SPEED_SUPER:
+ 		return ss;
+ 	case USB_SPEED_HIGH:
+@@ -449,11 +450,8 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
+ 	mutex_lock(&dev->lock_printer_io);
+ 	spin_lock_irqsave(&dev->lock, flags);
+ 
+-	if (dev->interface < 0) {
+-		spin_unlock_irqrestore(&dev->lock, flags);
+-		mutex_unlock(&dev->lock_printer_io);
+-		return -ENODEV;
+-	}
++	if (dev->interface < 0)
++		goto out_disabled;
+ 
+ 	/* We will use this flag later to check if a printer reset happened
+ 	 * after we turn interrupts back on.
+@@ -461,6 +459,9 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
+ 	dev->reset_printer = 0;
+ 
+ 	setup_rx_reqs(dev);
++	/* this dropped the lock - need to retest */
++	if (dev->interface < 0)
++		goto out_disabled;
+ 
+ 	bytes_copied = 0;
+ 	current_rx_req = dev->current_rx_req;
+@@ -494,6 +495,8 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
+ 		wait_event_interruptible(dev->rx_wait,
+ 				(likely(!list_empty(&dev->rx_buffers))));
+ 		spin_lock_irqsave(&dev->lock, flags);
++		if (dev->interface < 0)
++			goto out_disabled;
+ 	}
+ 
+ 	/* We have data to return then copy it to the caller's buffer.*/
+@@ -537,6 +540,9 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
+ 			return -EAGAIN;
+ 		}
+ 
++		if (dev->interface < 0)
++			goto out_disabled;
++
+ 		/* If we not returning all the data left in this RX request
+ 		 * buffer then adjust the amount of data left in the buffer.
+ 		 * Othewise if we are done with this RX request buffer then
+@@ -566,6 +572,11 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
+ 		return bytes_copied;
+ 	else
+ 		return -EAGAIN;
++
++out_disabled:
++	spin_unlock_irqrestore(&dev->lock, flags);
++	mutex_unlock(&dev->lock_printer_io);
++	return -ENODEV;
+ }
+ 
+ static ssize_t
+@@ -586,11 +597,8 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ 	mutex_lock(&dev->lock_printer_io);
+ 	spin_lock_irqsave(&dev->lock, flags);
+ 
+-	if (dev->interface < 0) {
+-		spin_unlock_irqrestore(&dev->lock, flags);
+-		mutex_unlock(&dev->lock_printer_io);
+-		return -ENODEV;
+-	}
++	if (dev->interface < 0)
++		goto out_disabled;
+ 
+ 	/* Check if a printer reset happens while we have interrupts on */
+ 	dev->reset_printer = 0;
+@@ -613,6 +621,8 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ 		wait_event_interruptible(dev->tx_wait,
+ 				(likely(!list_empty(&dev->tx_reqs))));
+ 		spin_lock_irqsave(&dev->lock, flags);
++		if (dev->interface < 0)
++			goto out_disabled;
+ 	}
+ 
+ 	while (likely(!list_empty(&dev->tx_reqs)) && len) {
+@@ -662,6 +672,9 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ 			return -EAGAIN;
+ 		}
+ 
++		if (dev->interface < 0)
++			goto out_disabled;
++
+ 		list_add(&req->list, &dev->tx_reqs_active);
+ 
+ 		/* here, we unlock, and only unlock, to avoid deadlock. */
+@@ -674,6 +687,8 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ 			mutex_unlock(&dev->lock_printer_io);
+ 			return -EAGAIN;
+ 		}
++		if (dev->interface < 0)
++			goto out_disabled;
+ 	}
+ 
+ 	spin_unlock_irqrestore(&dev->lock, flags);
+@@ -685,6 +700,11 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ 		return bytes_copied;
+ 	else
+ 		return -EAGAIN;
++
++out_disabled:
++	spin_unlock_irqrestore(&dev->lock, flags);
++	mutex_unlock(&dev->lock_printer_io);
++	return -ENODEV;
+ }
+ 
+ static int
+diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
+index 444212c0b5a98..be3b0bad554e5 100644
+--- a/drivers/usb/gadget/function/u_ether.c
++++ b/drivers/usb/gadget/function/u_ether.c
+@@ -1163,8 +1163,6 @@ struct net_device *gether_connect(struct gether *link)
+ 		if (netif_running(dev->net))
+ 			eth_start(dev, GFP_ATOMIC);
+ 
+-		netif_device_attach(dev->net);
+-
+ 	/* on error, disable any endpoints  */
+ 	} else {
+ 		(void) usb_ep_disable(link->out_ep);
+@@ -1202,7 +1200,7 @@ void gether_disconnect(struct gether *link)
+ 
+ 	DBG(dev, "%s\n", __func__);
+ 
+-	netif_device_detach(dev->net);
++	netif_stop_queue(dev->net);
+ 	netif_carrier_off(dev->net);
+ 
+ 	/* disable endpoints, forcing (synchronous) completion
+diff --git a/drivers/usb/gadget/udc/aspeed_udc.c b/drivers/usb/gadget/udc/aspeed_udc.c
+index 3916c8e2ba01f..821a6ab5da56f 100644
+--- a/drivers/usb/gadget/udc/aspeed_udc.c
++++ b/drivers/usb/gadget/udc/aspeed_udc.c
+@@ -66,8 +66,8 @@
+ #define USB_UPSTREAM_EN			BIT(0)
+ 
+ /* Main config reg */
+-#define UDC_CFG_SET_ADDR(x)		((x) & 0x3f)
+-#define UDC_CFG_ADDR_MASK		(0x3f)
++#define UDC_CFG_SET_ADDR(x)		((x) & UDC_CFG_ADDR_MASK)
++#define UDC_CFG_ADDR_MASK		GENMASK(6, 0)
+ 
+ /* Interrupt ctrl & status reg */
+ #define UDC_IRQ_EP_POOL_NAK		BIT(17)
+diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
+index 8abf3a567e30a..108d9a593a80d 100644
+--- a/drivers/usb/musb/da8xx.c
++++ b/drivers/usb/musb/da8xx.c
+@@ -556,7 +556,7 @@ static int da8xx_probe(struct platform_device *pdev)
+ 	ret = of_platform_populate(pdev->dev.of_node, NULL,
+ 				   da8xx_auxdata_lookup, &pdev->dev);
+ 	if (ret)
+-		return ret;
++		goto err_unregister_phy;
+ 
+ 	pinfo = da8xx_dev_info;
+ 	pinfo.parent = &pdev->dev;
+@@ -571,9 +571,13 @@ static int da8xx_probe(struct platform_device *pdev)
+ 	ret = PTR_ERR_OR_ZERO(glue->musb);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
+-		usb_phy_generic_unregister(glue->usb_phy);
++		goto err_unregister_phy;
+ 	}
+ 
++	return 0;
++
++err_unregister_phy:
++	usb_phy_generic_unregister(glue->usb_phy);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index 7801501837b69..911d774c9805a 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -49,22 +49,16 @@ static int ucsi_read_message_in(struct ucsi *ucsi, void *buf,
+ 	return ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, buf, buf_size);
+ }
+ 
+-static int ucsi_acknowledge_command(struct ucsi *ucsi)
++static int ucsi_acknowledge(struct ucsi *ucsi, bool conn_ack)
+ {
+ 	u64 ctrl;
+ 
+ 	ctrl = UCSI_ACK_CC_CI;
+ 	ctrl |= UCSI_ACK_COMMAND_COMPLETE;
+-
+-	return ucsi->ops->sync_write(ucsi, UCSI_CONTROL, &ctrl, sizeof(ctrl));
+-}
+-
+-static int ucsi_acknowledge_connector_change(struct ucsi *ucsi)
+-{
+-	u64 ctrl;
+-
+-	ctrl = UCSI_ACK_CC_CI;
+-	ctrl |= UCSI_ACK_CONNECTOR_CHANGE;
++	if (conn_ack) {
++		clear_bit(EVENT_PENDING, &ucsi->flags);
++		ctrl |= UCSI_ACK_CONNECTOR_CHANGE;
++	}
+ 
+ 	return ucsi->ops->sync_write(ucsi, UCSI_CONTROL, &ctrl, sizeof(ctrl));
+ }
+@@ -77,7 +71,7 @@ static int ucsi_read_error(struct ucsi *ucsi)
+ 	int ret;
+ 
+ 	/* Acknowledge the command that failed */
+-	ret = ucsi_acknowledge_command(ucsi);
++	ret = ucsi_acknowledge(ucsi, false);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -89,7 +83,7 @@ static int ucsi_read_error(struct ucsi *ucsi)
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = ucsi_acknowledge_command(ucsi);
++	ret = ucsi_acknowledge(ucsi, false);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -152,28 +146,33 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd)
+ 		return -EIO;
+ 
+ 	if (cci & UCSI_CCI_NOT_SUPPORTED) {
+-		if (ucsi_acknowledge_command(ucsi) < 0)
++		if (ucsi_acknowledge(ucsi, false) < 0)
+ 			dev_err(ucsi->dev,
+ 				"ACK of unsupported command failed\n");
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+ 	if (cci & UCSI_CCI_ERROR) {
+-		if (cmd == UCSI_GET_ERROR_STATUS)
++		if (cmd == UCSI_GET_ERROR_STATUS) {
++			ret = ucsi_acknowledge(ucsi, false);
++			if (ret)
++				return ret;
++
+ 			return -EIO;
++		}
+ 		return ucsi_read_error(ucsi);
+ 	}
+ 
+ 	if (cmd == UCSI_CANCEL && cci & UCSI_CCI_CANCEL_COMPLETE) {
+-		ret = ucsi_acknowledge_command(ucsi);
++		ret = ucsi_acknowledge(ucsi, false);
+ 		return ret ? ret : -EBUSY;
+ 	}
+ 
+ 	return UCSI_CCI_LENGTH(cci);
+ }
+ 
+-int ucsi_send_command(struct ucsi *ucsi, u64 command,
+-		      void *data, size_t size)
++static int ucsi_send_command_common(struct ucsi *ucsi, u64 command,
++				    void *data, size_t size, bool conn_ack)
+ {
+ 	u8 length;
+ 	int ret;
+@@ -192,7 +191,7 @@ int ucsi_send_command(struct ucsi *ucsi, u64 command,
+ 			goto out;
+ 	}
+ 
+-	ret = ucsi_acknowledge_command(ucsi);
++	ret = ucsi_acknowledge(ucsi, conn_ack);
+ 	if (ret)
+ 		goto out;
+ 
+@@ -201,6 +200,12 @@ int ucsi_send_command(struct ucsi *ucsi, u64 command,
+ 	mutex_unlock(&ucsi->ppm_lock);
+ 	return ret;
+ }
++
++int ucsi_send_command(struct ucsi *ucsi, u64 command,
++		      void *data, size_t size)
++{
++	return ucsi_send_command_common(ucsi, command, data, size, false);
++}
+ EXPORT_SYMBOL_GPL(ucsi_send_command);
+ 
+ /* -------------------------------------------------------------------------- */
+@@ -1157,7 +1162,9 @@ static void ucsi_handle_connector_change(struct work_struct *work)
+ 	mutex_lock(&con->lock);
+ 
+ 	command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num);
+-	ret = ucsi_send_command(ucsi, command, &con->status, sizeof(con->status));
++
++	ret = ucsi_send_command_common(ucsi, command, &con->status,
++				       sizeof(con->status), true);
+ 	if (ret < 0) {
+ 		dev_err(ucsi->dev, "%s: GET_CONNECTOR_STATUS failed (%d)\n",
+ 			__func__, ret);
+@@ -1214,14 +1221,6 @@ static void ucsi_handle_connector_change(struct work_struct *work)
+ 	if (con->status.change & UCSI_CONSTAT_CAM_CHANGE)
+ 		ucsi_partner_task(con, ucsi_check_altmodes, 1, 0);
+ 
+-	mutex_lock(&ucsi->ppm_lock);
+-	clear_bit(EVENT_PENDING, &con->ucsi->flags);
+-	ret = ucsi_acknowledge_connector_change(ucsi);
+-	mutex_unlock(&ucsi->ppm_lock);
+-
+-	if (ret)
+-		dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
+-
+ out_unlock:
+ 	mutex_unlock(&con->lock);
+ }
+diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
+index 1d0e2d87e9b31..4763bd6e55a79 100644
+--- a/drivers/usb/typec/ucsi/ucsi_glink.c
++++ b/drivers/usb/typec/ucsi/ucsi_glink.c
+@@ -365,6 +365,7 @@ static int pmic_glink_ucsi_probe(struct auxiliary_device *adev,
+ 		ret = fwnode_property_read_u32(fwnode, "reg", &port);
+ 		if (ret < 0) {
+ 			dev_err(dev, "missing reg property of %pOFn\n", fwnode);
++			fwnode_handle_put(fwnode);
+ 			return ret;
+ 		}
+ 
+@@ -379,9 +380,11 @@ static int pmic_glink_ucsi_probe(struct auxiliary_device *adev,
+ 		if (!desc)
+ 			continue;
+ 
+-		if (IS_ERR(desc))
++		if (IS_ERR(desc)) {
++			fwnode_handle_put(fwnode);
+ 			return dev_err_probe(dev, PTR_ERR(desc),
+ 					     "unable to acquire orientation gpio\n");
++		}
+ 		ucsi->port_orientation[port] = desc;
+ 
+ 		ucsi->port_switch[port] = fwnode_typec_switch_get(fwnode);
+diff --git a/drivers/usb/typec/ucsi/ucsi_stm32g0.c b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
+index 93d7806681cf0..1d7ee833eb4fd 100644
+--- a/drivers/usb/typec/ucsi/ucsi_stm32g0.c
++++ b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
+@@ -64,6 +64,7 @@ struct ucsi_stm32g0 {
+ 	struct completion complete;
+ 	struct device *dev;
+ 	unsigned long flags;
++#define ACK_PENDING	2
+ 	const char *fw_name;
+ 	struct ucsi *ucsi;
+ 	bool suspended;
+@@ -395,9 +396,13 @@ static int ucsi_stm32g0_sync_write(struct ucsi *ucsi, unsigned int offset, const
+ 				   size_t len)
+ {
+ 	struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi);
++	bool ack = UCSI_COMMAND(*(u64 *)val) == UCSI_ACK_CC_CI;
+ 	int ret;
+ 
+-	set_bit(COMMAND_PENDING, &g0->flags);
++	if (ack)
++		set_bit(ACK_PENDING, &g0->flags);
++	else
++		set_bit(COMMAND_PENDING, &g0->flags);
+ 
+ 	ret = ucsi_stm32g0_async_write(ucsi, offset, val, len);
+ 	if (ret)
+@@ -405,9 +410,14 @@ static int ucsi_stm32g0_sync_write(struct ucsi *ucsi, unsigned int offset, const
+ 
+ 	if (!wait_for_completion_timeout(&g0->complete, msecs_to_jiffies(5000)))
+ 		ret = -ETIMEDOUT;
++	else
++		return 0;
+ 
+ out_clear_bit:
+-	clear_bit(COMMAND_PENDING, &g0->flags);
++	if (ack)
++		clear_bit(ACK_PENDING, &g0->flags);
++	else
++		clear_bit(COMMAND_PENDING, &g0->flags);
+ 
+ 	return ret;
+ }
+@@ -428,8 +438,9 @@ static irqreturn_t ucsi_stm32g0_irq_handler(int irq, void *data)
+ 	if (UCSI_CCI_CONNECTOR(cci))
+ 		ucsi_connector_change(g0->ucsi, UCSI_CCI_CONNECTOR(cci));
+ 
+-	if (test_bit(COMMAND_PENDING, &g0->flags) &&
+-	    cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
++	if (cci & UCSI_CCI_ACK_COMPLETE && test_and_clear_bit(ACK_PENDING, &g0->flags))
++		complete(&g0->complete);
++	if (cci & UCSI_CCI_COMMAND_COMPLETE && test_and_clear_bit(COMMAND_PENDING, &g0->flags))
+ 		complete(&g0->complete);
+ 
+ 	return IRQ_HANDLED;
+diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
+index 73c89701fc9d4..ac8b5b52e3dc4 100644
+--- a/drivers/vdpa/vdpa_user/vduse_dev.c
++++ b/drivers/vdpa/vdpa_user/vduse_dev.c
+@@ -8,6 +8,7 @@
+  *
+  */
+ 
++#include "linux/virtio_net.h"
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/cdev.h>
+@@ -28,6 +29,7 @@
+ #include <uapi/linux/virtio_config.h>
+ #include <uapi/linux/virtio_ids.h>
+ #include <uapi/linux/virtio_blk.h>
++#include <uapi/linux/virtio_ring.h>
+ #include <linux/mod_devicetable.h>
+ 
+ #include "iova_domain.h"
+@@ -1705,13 +1707,17 @@ static bool device_is_allowed(u32 device_id)
+ 	return false;
+ }
+ 
+-static bool features_is_valid(u64 features)
++static bool features_is_valid(struct vduse_dev_config *config)
+ {
+-	if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
++	if (!(config->features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)))
+ 		return false;
+ 
+ 	/* Now we only support read-only configuration space */
+-	if (features & (1ULL << VIRTIO_BLK_F_CONFIG_WCE))
++	if ((config->device_id == VIRTIO_ID_BLOCK) &&
++			(config->features & BIT_ULL(VIRTIO_BLK_F_CONFIG_WCE)))
++		return false;
++	else if ((config->device_id == VIRTIO_ID_NET) &&
++			(config->features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
+ 		return false;
+ 
+ 	return true;
+@@ -1738,7 +1744,7 @@ static bool vduse_validate_config(struct vduse_dev_config *config)
+ 	if (!device_is_allowed(config->device_id))
+ 		return false;
+ 
+-	if (!features_is_valid(config->features))
++	if (!features_is_valid(config))
+ 		return false;
+ 
+ 	return true;
+diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
+index 91c3c1fef233d..e6bcf74b0d60a 100644
+--- a/fs/bcachefs/bcachefs.h
++++ b/fs/bcachefs/bcachefs.h
+@@ -455,6 +455,7 @@ enum bch_time_stats {
+ };
+ 
+ #include "alloc_types.h"
++#include "btree_gc_types.h"
+ #include "btree_types.h"
+ #include "btree_node_scan_types.h"
+ #include "btree_write_buffer_types.h"
+@@ -485,49 +486,6 @@ enum bch_time_stats {
+ 
+ struct btree;
+ 
+-enum gc_phase {
+-	GC_PHASE_NOT_RUNNING,
+-	GC_PHASE_START,
+-	GC_PHASE_SB,
+-
+-	GC_PHASE_BTREE_stripes,
+-	GC_PHASE_BTREE_extents,
+-	GC_PHASE_BTREE_inodes,
+-	GC_PHASE_BTREE_dirents,
+-	GC_PHASE_BTREE_xattrs,
+-	GC_PHASE_BTREE_alloc,
+-	GC_PHASE_BTREE_quotas,
+-	GC_PHASE_BTREE_reflink,
+-	GC_PHASE_BTREE_subvolumes,
+-	GC_PHASE_BTREE_snapshots,
+-	GC_PHASE_BTREE_lru,
+-	GC_PHASE_BTREE_freespace,
+-	GC_PHASE_BTREE_need_discard,
+-	GC_PHASE_BTREE_backpointers,
+-	GC_PHASE_BTREE_bucket_gens,
+-	GC_PHASE_BTREE_snapshot_trees,
+-	GC_PHASE_BTREE_deleted_inodes,
+-	GC_PHASE_BTREE_logged_ops,
+-	GC_PHASE_BTREE_rebalance_work,
+-	GC_PHASE_BTREE_subvolume_children,
+-
+-	GC_PHASE_PENDING_DELETE,
+-};
+-
+-struct gc_pos {
+-	enum gc_phase		phase;
+-	struct bpos		pos;
+-	unsigned		level;
+-};
+-
+-struct reflink_gc {
+-	u64		offset;
+-	u32		size;
+-	u32		refcount;
+-};
+-
+-typedef GENRADIX(struct reflink_gc) reflink_gc_table;
+-
+ struct io_count {
+ 	u64			sectors[2][BCH_DATA_NR];
+ };
+diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
+index 791470b0c6545..58f7c99e0ed88 100644
+--- a/fs/bcachefs/btree_gc.c
++++ b/fs/bcachefs/btree_gc.c
+@@ -1080,8 +1080,7 @@ static int bch2_gc_btree_init(struct btree_trans *trans,
+ 
+ static inline int btree_id_gc_phase_cmp(enum btree_id l, enum btree_id r)
+ {
+-	return  (int) btree_id_to_gc_phase(l) -
+-		(int) btree_id_to_gc_phase(r);
++	return cmp_int(gc_btree_order(l), gc_btree_order(r));
+ }
+ 
+ static int bch2_gc_btrees(struct bch_fs *c, bool initial, bool metadata_only)
+@@ -1126,7 +1125,7 @@ static void mark_metadata_sectors(struct bch_fs *c, struct bch_dev *ca,
+ 			min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
+ 
+ 		bch2_mark_metadata_bucket(c, ca, b, type, sectors,
+-					  gc_phase(GC_PHASE_SB), flags);
++					  gc_phase(GC_PHASE_sb), flags);
+ 		b++;
+ 		start += sectors;
+ 	} while (start < end);
+@@ -1155,14 +1154,14 @@ static void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
+ 		b = ca->journal.buckets[i];
+ 		bch2_mark_metadata_bucket(c, ca, b, BCH_DATA_journal,
+ 					  ca->mi.bucket_size,
+-					  gc_phase(GC_PHASE_SB), flags);
++					  gc_phase(GC_PHASE_sb), flags);
+ 	}
+ }
+ 
+ static void bch2_mark_superblocks(struct bch_fs *c)
+ {
+ 	mutex_lock(&c->sb_lock);
+-	gc_pos_set(c, gc_phase(GC_PHASE_SB));
++	gc_pos_set(c, gc_phase(GC_PHASE_sb));
+ 
+ 	for_each_online_member(c, ca)
+ 		bch2_mark_dev_superblock(c, ca, BTREE_TRIGGER_GC);
+@@ -1773,7 +1772,7 @@ int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only)
+ 	if (ret)
+ 		goto out;
+ again:
+-	gc_pos_set(c, gc_phase(GC_PHASE_START));
++	gc_pos_set(c, gc_phase(GC_PHASE_start));
+ 
+ 	bch2_mark_superblocks(c);
+ 
+@@ -1800,7 +1799,7 @@ int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only)
+ 		 */
+ 		bch_info(c, "Second GC pass needed, restarting:");
+ 		clear_bit(BCH_FS_need_another_gc, &c->flags);
+-		__gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
++		__gc_pos_set(c, gc_phase(GC_PHASE_not_running));
+ 
+ 		bch2_gc_stripes_reset(c, metadata_only);
+ 		bch2_gc_alloc_reset(c, metadata_only);
+@@ -1827,7 +1826,7 @@ int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only)
+ 
+ 	percpu_down_write(&c->mark_lock);
+ 	/* Indicates that gc is no longer in progress: */
+-	__gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
++	__gc_pos_set(c, gc_phase(GC_PHASE_not_running));
+ 
+ 	bch2_gc_free(c);
+ 	percpu_up_write(&c->mark_lock);
+diff --git a/fs/bcachefs/btree_gc.h b/fs/bcachefs/btree_gc.h
+index 607575f83a002..5c97eb6a38790 100644
+--- a/fs/bcachefs/btree_gc.h
++++ b/fs/bcachefs/btree_gc.h
+@@ -3,6 +3,7 @@
+ #define _BCACHEFS_BTREE_GC_H
+ 
+ #include "bkey.h"
++#include "btree_gc_types.h"
+ #include "btree_types.h"
+ 
+ int bch2_check_topology(struct bch_fs *);
+@@ -35,38 +36,17 @@ int bch2_gc_thread_start(struct bch_fs *);
+ /* Position of (the start of) a gc phase: */
+ static inline struct gc_pos gc_phase(enum gc_phase phase)
+ {
+-	return (struct gc_pos) {
+-		.phase	= phase,
+-		.pos	= POS_MIN,
+-		.level	= 0,
+-	};
+-}
+-
+-static inline int gc_pos_cmp(struct gc_pos l, struct gc_pos r)
+-{
+-	return  cmp_int(l.phase, r.phase) ?:
+-		bpos_cmp(l.pos, r.pos) ?:
+-		cmp_int(l.level, r.level);
+-}
+-
+-static inline enum gc_phase btree_id_to_gc_phase(enum btree_id id)
+-{
+-	switch (id) {
+-#define x(name, v, ...) case BTREE_ID_##name: return GC_PHASE_BTREE_##name;
+-	BCH_BTREE_IDS()
+-#undef x
+-	default:
+-		BUG();
+-	}
++	return (struct gc_pos) { .phase	= phase, };
+ }
+ 
+-static inline struct gc_pos gc_pos_btree(enum btree_id id,
++static inline struct gc_pos gc_pos_btree(enum btree_id btree,
+ 					 struct bpos pos, unsigned level)
+ {
+ 	return (struct gc_pos) {
+-		.phase	= btree_id_to_gc_phase(id),
+-		.pos	= pos,
++		.phase	= GC_PHASE_btree,
++		.btree	= btree,
+ 		.level	= level,
++		.pos	= pos,
+ 	};
+ }
+ 
+@@ -91,6 +71,22 @@ static inline struct gc_pos gc_pos_btree_root(enum btree_id id)
+ 	return gc_pos_btree(id, SPOS_MAX, BTREE_MAX_DEPTH);
+ }
+ 
++static inline int gc_btree_order(enum btree_id btree)
++{
++	if (btree == BTREE_ID_stripes)
++		return -1;
++	return btree;
++}
++
++static inline int gc_pos_cmp(struct gc_pos l, struct gc_pos r)
++{
++	return   cmp_int(l.phase, r.phase) ?:
++		 cmp_int(gc_btree_order(l.btree),
++			 gc_btree_order(r.btree)) ?:
++		-cmp_int(l.level, r.level) ?:
++		 bpos_cmp(l.pos, r.pos);
++}
++
+ static inline bool gc_visited(struct bch_fs *c, struct gc_pos pos)
+ {
+ 	unsigned seq;
+diff --git a/fs/bcachefs/btree_gc_types.h b/fs/bcachefs/btree_gc_types.h
+new file mode 100644
+index 0000000000000..b82c24bcc0880
+--- /dev/null
++++ b/fs/bcachefs/btree_gc_types.h
+@@ -0,0 +1,29 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _BCACHEFS_BTREE_GC_TYPES_H
++#define _BCACHEFS_BTREE_GC_TYPES_H
++
++#include <linux/generic-radix-tree.h>
++
++enum gc_phase {
++	GC_PHASE_not_running,
++	GC_PHASE_start,
++	GC_PHASE_sb,
++	GC_PHASE_btree,
++};
++
++struct gc_pos {
++	enum gc_phase		phase:8;
++	enum btree_id		btree:8;
++	u16			level;
++	struct bpos		pos;
++};
++
++struct reflink_gc {
++	u64		offset;
++	u32		size;
++	u32		refcount;
++};
++
++typedef GENRADIX(struct reflink_gc) reflink_gc_table;
++
++#endif /* _BCACHEFS_BTREE_GC_TYPES_H */
+diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c
+index 556a217108d32..0a49c2e9955ad 100644
+--- a/fs/bcachefs/ec.c
++++ b/fs/bcachefs/ec.c
+@@ -880,7 +880,7 @@ static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
+ 	if (!genradix_ptr_alloc(&c->stripes, idx, gfp))
+ 		return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
+ 
+-	if (c->gc_pos.phase != GC_PHASE_NOT_RUNNING &&
++	if (c->gc_pos.phase != GC_PHASE_not_running &&
+ 	    !genradix_ptr_alloc(&c->gc_stripes, idx, gfp))
+ 		return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
+ 
+diff --git a/fs/bcachefs/sb-downgrade.c b/fs/bcachefs/sb-downgrade.c
+index a98ef940b7a32..ed6d298bc19c9 100644
+--- a/fs/bcachefs/sb-downgrade.c
++++ b/fs/bcachefs/sb-downgrade.c
+@@ -134,7 +134,8 @@ downgrade_entry_next_c(const struct bch_sb_field_downgrade_entry *e)
+ #define for_each_downgrade_entry(_d, _i)						\
+ 	for (const struct bch_sb_field_downgrade_entry *_i = (_d)->entries;		\
+ 	     (void *) _i	< vstruct_end(&(_d)->field) &&				\
+-	     (void *) &_i->errors[0] < vstruct_end(&(_d)->field);			\
++	     (void *) &_i->errors[0] <= vstruct_end(&(_d)->field) &&			\
++	     (void *) downgrade_entry_next_c(_i) <= vstruct_end(&(_d)->field);		\
+ 	     _i = downgrade_entry_next_c(_i))
+ 
+ static int bch2_sb_downgrade_validate(struct bch_sb *sb, struct bch_sb_field *f,
+@@ -142,7 +143,17 @@ static int bch2_sb_downgrade_validate(struct bch_sb *sb, struct bch_sb_field *f,
+ {
+ 	struct bch_sb_field_downgrade *e = field_to_type(f, downgrade);
+ 
+-	for_each_downgrade_entry(e, i) {
++	for (const struct bch_sb_field_downgrade_entry *i = e->entries;
++	     (void *) i	< vstruct_end(&e->field);
++	     i = downgrade_entry_next_c(i)) {
++		/*
++		 * Careful: sb_field_downgrade_entry is only 2 byte aligned, but
++		 * section sizes are 8 byte aligned - an empty entry spanning
++		 * the end of the section is allowed (and ignored):
++		 */
++		if ((void *) &i->errors[0] > vstruct_end(&e->field))
++			break;
++
+ 		if (BCH_VERSION_MAJOR(le16_to_cpu(i->version)) !=
+ 		    BCH_VERSION_MAJOR(le16_to_cpu(sb->version))) {
+ 			prt_printf(err, "downgrade entry with mismatched major version (%u != %u)",
+@@ -214,7 +225,7 @@ int bch2_sb_downgrade_update(struct bch_fs *c)
+ 
+ 		dst = (void *) &darray_top(table);
+ 		dst->version = cpu_to_le16(src->version);
+-		dst->recovery_passes[0]	= cpu_to_le64(src->recovery_passes);
++		dst->recovery_passes[0]	= cpu_to_le64(bch2_recovery_passes_to_stable(src->recovery_passes));
+ 		dst->recovery_passes[1]	= 0;
+ 		dst->nr_errors		= cpu_to_le16(src->nr_errors);
+ 		for (unsigned i = 0; i < src->nr_errors; i++)
+diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
+index bfdb15e7d778e..a337246e912aa 100644
+--- a/fs/bcachefs/super-io.c
++++ b/fs/bcachefs/super-io.c
+@@ -1123,18 +1123,12 @@ bool bch2_check_version_downgrade(struct bch_fs *c)
+ 	 * c->sb will be checked before we write the superblock, so update it as
+ 	 * well:
+ 	 */
+-	if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) > bcachefs_metadata_version_current) {
++	if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) > bcachefs_metadata_version_current)
+ 		SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, bcachefs_metadata_version_current);
+-		c->sb.version_upgrade_complete = bcachefs_metadata_version_current;
+-	}
+-	if (c->sb.version > bcachefs_metadata_version_current) {
++	if (c->sb.version > bcachefs_metadata_version_current)
+ 		c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current);
+-		c->sb.version = bcachefs_metadata_version_current;
+-	}
+-	if (c->sb.version_min > bcachefs_metadata_version_current) {
++	if (c->sb.version_min > bcachefs_metadata_version_current)
+ 		c->disk_sb.sb->version_min = cpu_to_le16(bcachefs_metadata_version_current);
+-		c->sb.version_min = bcachefs_metadata_version_current;
+-	}
+ 	c->disk_sb.sb->compat[0] &= cpu_to_le64((1ULL << BCH_COMPAT_NR) - 1);
+ 	return ret;
+ }
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index c8a05d5eb9cbc..b642df5e52558 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -2697,7 +2697,7 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
+ 	u64 offset = bytenr - block_group->start;
+ 	u64 to_free, to_unusable;
+ 	int bg_reclaim_threshold = 0;
+-	bool initial = (size == block_group->length);
++	bool initial = ((size == block_group->length) && (block_group->alloc_offset == 0));
+ 	u64 reclaimable_unusable;
+ 
+ 	WARN_ON(!initial && offset + size > block_group->zone_capacity);
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index d4fc5fedd8ee5..9d156aa8f20d1 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -138,6 +138,25 @@ static void wait_log_commit(struct btrfs_root *root, int transid);
+  * and once to do all the other items.
+  */
+ 
++static struct inode *btrfs_iget_logging(u64 objectid, struct btrfs_root *root)
++{
++	unsigned int nofs_flag;
++	struct inode *inode;
++
++	/*
++	 * We're holding a transaction handle whether we are logging or
++	 * replaying a log tree, so we must make sure NOFS semantics apply
++	 * because btrfs_alloc_inode() may be triggered and it uses GFP_KERNEL
++	 * to allocate an inode, which can recurse back into the filesystem and
++	 * attempt a transaction commit, resulting in a deadlock.
++	 */
++	nofs_flag = memalloc_nofs_save();
++	inode = btrfs_iget(root->fs_info->sb, objectid, root);
++	memalloc_nofs_restore(nofs_flag);
++
++	return inode;
++}
++
+ /*
+  * start a sub transaction and setup the log tree
+  * this increments the log tree writer count to make the people
+@@ -600,7 +619,7 @@ static noinline struct inode *read_one_inode(struct btrfs_root *root,
+ {
+ 	struct inode *inode;
+ 
+-	inode = btrfs_iget(root->fs_info->sb, objectid, root);
++	inode = btrfs_iget_logging(objectid, root);
+ 	if (IS_ERR(inode))
+ 		inode = NULL;
+ 	return inode;
+@@ -5434,7 +5453,6 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
+ 				struct btrfs_log_ctx *ctx)
+ {
+ 	struct btrfs_root *root = start_inode->root;
+-	struct btrfs_fs_info *fs_info = root->fs_info;
+ 	struct btrfs_path *path;
+ 	LIST_HEAD(dir_list);
+ 	struct btrfs_dir_list *dir_elem;
+@@ -5495,7 +5513,7 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
+ 				continue;
+ 
+ 			btrfs_release_path(path);
+-			di_inode = btrfs_iget(fs_info->sb, di_key.objectid, root);
++			di_inode = btrfs_iget_logging(di_key.objectid, root);
+ 			if (IS_ERR(di_inode)) {
+ 				ret = PTR_ERR(di_inode);
+ 				goto out;
+@@ -5555,7 +5573,7 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
+ 		btrfs_add_delayed_iput(curr_inode);
+ 		curr_inode = NULL;
+ 
+-		vfs_inode = btrfs_iget(fs_info->sb, ino, root);
++		vfs_inode = btrfs_iget_logging(ino, root);
+ 		if (IS_ERR(vfs_inode)) {
+ 			ret = PTR_ERR(vfs_inode);
+ 			break;
+@@ -5650,7 +5668,7 @@ static int add_conflicting_inode(struct btrfs_trans_handle *trans,
+ 	if (ctx->num_conflict_inodes >= MAX_CONFLICT_INODES)
+ 		return BTRFS_LOG_FORCE_COMMIT;
+ 
+-	inode = btrfs_iget(root->fs_info->sb, ino, root);
++	inode = btrfs_iget_logging(ino, root);
+ 	/*
+ 	 * If the other inode that had a conflicting dir entry was deleted in
+ 	 * the current transaction then we either:
+@@ -5751,7 +5769,6 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
+ 				  struct btrfs_root *root,
+ 				  struct btrfs_log_ctx *ctx)
+ {
+-	struct btrfs_fs_info *fs_info = root->fs_info;
+ 	int ret = 0;
+ 
+ 	/*
+@@ -5782,7 +5799,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
+ 		list_del(&curr->list);
+ 		kfree(curr);
+ 
+-		inode = btrfs_iget(fs_info->sb, ino, root);
++		inode = btrfs_iget_logging(ino, root);
+ 		/*
+ 		 * If the other inode that had a conflicting dir entry was
+ 		 * deleted in the current transaction, we need to log its parent
+@@ -5793,7 +5810,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
+ 			if (ret != -ENOENT)
+ 				break;
+ 
+-			inode = btrfs_iget(fs_info->sb, parent, root);
++			inode = btrfs_iget_logging(parent, root);
+ 			if (IS_ERR(inode)) {
+ 				ret = PTR_ERR(inode);
+ 				break;
+@@ -6315,7 +6332,6 @@ static int log_new_delayed_dentries(struct btrfs_trans_handle *trans,
+ 				    struct btrfs_log_ctx *ctx)
+ {
+ 	const bool orig_log_new_dentries = ctx->log_new_dentries;
+-	struct btrfs_fs_info *fs_info = trans->fs_info;
+ 	struct btrfs_delayed_item *item;
+ 	int ret = 0;
+ 
+@@ -6341,7 +6357,7 @@ static int log_new_delayed_dentries(struct btrfs_trans_handle *trans,
+ 		if (key.type == BTRFS_ROOT_ITEM_KEY)
+ 			continue;
+ 
+-		di_inode = btrfs_iget(fs_info->sb, key.objectid, inode->root);
++		di_inode = btrfs_iget_logging(key.objectid, inode->root);
+ 		if (IS_ERR(di_inode)) {
+ 			ret = PTR_ERR(di_inode);
+ 			break;
+@@ -6725,7 +6741,6 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
+ 				 struct btrfs_inode *inode,
+ 				 struct btrfs_log_ctx *ctx)
+ {
+-	struct btrfs_fs_info *fs_info = trans->fs_info;
+ 	int ret;
+ 	struct btrfs_path *path;
+ 	struct btrfs_key key;
+@@ -6790,8 +6805,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
+ 				cur_offset = item_size;
+ 			}
+ 
+-			dir_inode = btrfs_iget(fs_info->sb, inode_key.objectid,
+-					       root);
++			dir_inode = btrfs_iget_logging(inode_key.objectid, root);
+ 			/*
+ 			 * If the parent inode was deleted, return an error to
+ 			 * fallback to a transaction commit. This is to prevent
+@@ -6853,7 +6867,6 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
+ 	btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
+ 
+ 	while (true) {
+-		struct btrfs_fs_info *fs_info = root->fs_info;
+ 		struct extent_buffer *leaf;
+ 		int slot;
+ 		struct btrfs_key search_key;
+@@ -6868,7 +6881,7 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
+ 		search_key.objectid = found_key.offset;
+ 		search_key.type = BTRFS_INODE_ITEM_KEY;
+ 		search_key.offset = 0;
+-		inode = btrfs_iget(fs_info->sb, ino, root);
++		inode = btrfs_iget_logging(ino, root);
+ 		if (IS_ERR(inode))
+ 			return PTR_ERR(inode);
+ 
+diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
+index 8cddf955ebc0c..a6dd68b458cec 100644
+--- a/fs/gfs2/log.c
++++ b/fs/gfs2/log.c
+@@ -1108,7 +1108,8 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
+ 	lops_before_commit(sdp, tr);
+ 	if (gfs2_withdrawing_or_withdrawn(sdp))
+ 		goto out_withdraw;
+-	gfs2_log_submit_bio(&sdp->sd_jdesc->jd_log_bio, REQ_OP_WRITE);
++	if (sdp->sd_jdesc)
++		gfs2_log_submit_bio(&sdp->sd_jdesc->jd_log_bio, REQ_OP_WRITE);
+ 	if (gfs2_withdrawing_or_withdrawn(sdp))
+ 		goto out_withdraw;
+ 
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index 2d780b4701a23..ee61fcb7f200d 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -67,9 +67,13 @@ void gfs2_jindex_free(struct gfs2_sbd *sdp)
+ 	sdp->sd_journals = 0;
+ 	spin_unlock(&sdp->sd_jindex_spin);
+ 
++	down_write(&sdp->sd_log_flush_lock);
+ 	sdp->sd_jdesc = NULL;
++	up_write(&sdp->sd_log_flush_lock);
++
+ 	while (!list_empty(&list)) {
+ 		jd = list_first_entry(&list, struct gfs2_jdesc, jd_list);
++		BUG_ON(jd->jd_log_bio);
+ 		gfs2_free_journal_extents(jd);
+ 		list_del(&jd->jd_list);
+ 		iput(jd->jd_inode);
+diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c
+index 912ad0a1df021..d2ce0849bb53d 100644
+--- a/fs/netfs/buffered_write.c
++++ b/fs/netfs/buffered_write.c
+@@ -507,6 +507,7 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
+ {
+ 	struct folio *folio = page_folio(vmf->page);
+ 	struct file *file = vmf->vma->vm_file;
++	struct address_space *mapping = file->f_mapping;
+ 	struct inode *inode = file_inode(file);
+ 	vm_fault_t ret = VM_FAULT_RETRY;
+ 	int err;
+@@ -520,6 +521,11 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
+ 
+ 	if (folio_lock_killable(folio) < 0)
+ 		goto out;
++	if (folio->mapping != mapping) {
++		folio_unlock(folio);
++		ret = VM_FAULT_NOPAGE;
++		goto out;
++	}
+ 
+ 	/* Can we see a streaming write here? */
+ 	if (WARN_ON(!folio_test_uptodate(folio))) {
+@@ -529,9 +535,9 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
+ 
+ 	if (netfs_folio_group(folio) != netfs_group) {
+ 		folio_unlock(folio);
+-		err = filemap_fdatawait_range(inode->i_mapping,
+-					      folio_pos(folio),
+-					      folio_pos(folio) + folio_size(folio));
++		err = filemap_fdatawrite_range(mapping,
++					       folio_pos(folio),
++					       folio_pos(folio) + folio_size(folio));
+ 		switch (err) {
+ 		case 0:
+ 			ret = VM_FAULT_RETRY;
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index bb2f583eb28bf..90079ca134dd3 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -141,8 +141,6 @@ int nfs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
+ {
+ 	ssize_t ret;
+ 
+-	VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
+-
+ 	if (iov_iter_rw(iter) == READ)
+ 		ret = nfs_file_direct_read(iocb, iter, true);
+ 	else
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index 4d23bb1d08c0a..332847daa1b41 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -1679,6 +1679,8 @@ static __net_init int nfsd_net_init(struct net *net)
+ 	nn->nfsd_svcstats.program = &nfsd_program;
+ 	nn->nfsd_versions = NULL;
+ 	nn->nfsd4_minorversions = NULL;
++	nn->nfsd_info.mutex = &nfsd_mutex;
++	nn->nfsd_serv = NULL;
+ 	nfsd4_init_leases_net(nn);
+ 	get_random_bytes(&nn->siphash_key, sizeof(nn->siphash_key));
+ 	seqlock_init(&nn->writeverf_lock);
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index c0d17b92b249f..f23b00cb9f631 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -673,7 +673,6 @@ int nfsd_create_serv(struct net *net)
+ 		return error;
+ 	}
+ 	spin_lock(&nfsd_notifier_lock);
+-	nn->nfsd_info.mutex = &nfsd_mutex;
+ 	nn->nfsd_serv = serv;
+ 	spin_unlock(&nfsd_notifier_lock);
+ 
+diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
+index b82185075de7d..469143110f310 100644
+--- a/fs/ocfs2/aops.c
++++ b/fs/ocfs2/aops.c
+@@ -2368,6 +2368,11 @@ static int ocfs2_dio_end_io_write(struct inode *inode,
+ 	}
+ 
+ 	list_for_each_entry(ue, &dwc->dw_zero_list, ue_node) {
++		ret = ocfs2_assure_trans_credits(handle, credits);
++		if (ret < 0) {
++			mlog_errno(ret);
++			break;
++		}
+ 		ret = ocfs2_mark_extent_written(inode, &et, handle,
+ 						ue->ue_cpos, 1,
+ 						ue->ue_phys,
+diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
+index 86807086b2dfd..530fba34f6d31 100644
+--- a/fs/ocfs2/journal.c
++++ b/fs/ocfs2/journal.c
+@@ -445,6 +445,23 @@ int ocfs2_extend_trans(handle_t *handle, int nblocks)
+ 	return status;
+ }
+ 
++/*
++ * Make sure handle has at least 'nblocks' credits available. If it does not
++ * have that many credits available, we will try to extend the handle to have
++ * enough credits. If that fails, we will restart transaction to have enough
++ * credits. Similar notes regarding data consistency and locking implications
++ * as for ocfs2_extend_trans() apply here.
++ */
++int ocfs2_assure_trans_credits(handle_t *handle, int nblocks)
++{
++	int old_nblks = jbd2_handle_buffer_credits(handle);
++
++	trace_ocfs2_assure_trans_credits(old_nblks);
++	if (old_nblks >= nblocks)
++		return 0;
++	return ocfs2_extend_trans(handle, nblocks - old_nblks);
++}
++
+ /*
+  * If we have fewer than thresh credits, extend by OCFS2_MAX_TRANS_DATA.
+  * If that fails, restart the transaction & regain write access for the
+diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
+index 41c9fe7e62f9b..e3c3a35dc5e0e 100644
+--- a/fs/ocfs2/journal.h
++++ b/fs/ocfs2/journal.h
+@@ -243,6 +243,8 @@ handle_t		    *ocfs2_start_trans(struct ocfs2_super *osb,
+ int			     ocfs2_commit_trans(struct ocfs2_super *osb,
+ 						handle_t *handle);
+ int			     ocfs2_extend_trans(handle_t *handle, int nblocks);
++int			     ocfs2_assure_trans_credits(handle_t *handle,
++						int nblocks);
+ int			     ocfs2_allocate_extend_trans(handle_t *handle,
+ 						int thresh);
+ 
+diff --git a/fs/ocfs2/ocfs2_trace.h b/fs/ocfs2/ocfs2_trace.h
+index 9898c11bdfa1b..d194d202413da 100644
+--- a/fs/ocfs2/ocfs2_trace.h
++++ b/fs/ocfs2/ocfs2_trace.h
+@@ -2577,6 +2577,8 @@ DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_commit_cache_end);
+ 
+ DEFINE_OCFS2_INT_INT_EVENT(ocfs2_extend_trans);
+ 
++DEFINE_OCFS2_INT_EVENT(ocfs2_assure_trans_credits);
++
+ DEFINE_OCFS2_INT_EVENT(ocfs2_extend_trans_restart);
+ 
+ DEFINE_OCFS2_INT_INT_EVENT(ocfs2_allocate_extend_trans);
+diff --git a/fs/open.c b/fs/open.c
+index ee8460c83c779..0131ae30351ec 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -202,13 +202,13 @@ long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
+ 	return error;
+ }
+ 
+-SYSCALL_DEFINE2(ftruncate, unsigned int, fd, unsigned long, length)
++SYSCALL_DEFINE2(ftruncate, unsigned int, fd, off_t, length)
+ {
+ 	return do_sys_ftruncate(fd, length, 1);
+ }
+ 
+ #ifdef CONFIG_COMPAT
+-COMPAT_SYSCALL_DEFINE2(ftruncate, unsigned int, fd, compat_ulong_t, length)
++COMPAT_SYSCALL_DEFINE2(ftruncate, unsigned int, fd, compat_off_t, length)
+ {
+ 	return do_sys_ftruncate(fd, length, 1);
+ }
+diff --git a/include/linux/compat.h b/include/linux/compat.h
+index 233f61ec8afca..56cebaff0c910 100644
+--- a/include/linux/compat.h
++++ b/include/linux/compat.h
+@@ -608,7 +608,7 @@ asmlinkage long compat_sys_fstatfs(unsigned int fd,
+ asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
+ 				     struct compat_statfs64 __user *buf);
+ asmlinkage long compat_sys_truncate(const char __user *, compat_off_t);
+-asmlinkage long compat_sys_ftruncate(unsigned int, compat_ulong_t);
++asmlinkage long compat_sys_ftruncate(unsigned int, compat_off_t);
+ /* No generic prototype for truncate64, ftruncate64, fallocate */
+ asmlinkage long compat_sys_openat(int dfd, const char __user *filename,
+ 				  int flags, umode_t mode);
+diff --git a/include/linux/filter.h b/include/linux/filter.h
+index 219ee7a768744..cf12bfa2a78cc 100644
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -887,20 +887,22 @@ bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default)
+ 
+ #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
+ 
+-static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
++static inline int __must_check bpf_prog_lock_ro(struct bpf_prog *fp)
+ {
+ #ifndef CONFIG_BPF_JIT_ALWAYS_ON
+ 	if (!fp->jited) {
+ 		set_vm_flush_reset_perms(fp);
+-		set_memory_ro((unsigned long)fp, fp->pages);
++		return set_memory_ro((unsigned long)fp, fp->pages);
+ 	}
+ #endif
++	return 0;
+ }
+ 
+-static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
++static inline int __must_check
++bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
+ {
+ 	set_vm_flush_reset_perms(hdr);
+-	set_memory_rox((unsigned long)hdr, hdr->size >> PAGE_SHIFT);
++	return set_memory_rox((unsigned long)hdr, hdr->size >> PAGE_SHIFT);
+ }
+ 
+ int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
+diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
+index ac5be38d8aaf0..4fd9735bb75e5 100644
+--- a/include/linux/ieee80211.h
++++ b/include/linux/ieee80211.h
+@@ -5166,7 +5166,7 @@ static inline bool ieee80211_mle_size_ok(const u8 *data, size_t len)
+ 	bool check_common_len = false;
+ 	u16 control;
+ 
+-	if (len < fixed)
++	if (!data || len < fixed)
+ 		return false;
+ 
+ 	control = le16_to_cpu(mle->control);
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index 324d792e7c786..186a6cbbbfbc6 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -1242,6 +1242,7 @@ extern int ata_slave_link_init(struct ata_port *ap);
+ extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
+ 					   struct ata_port_info *, struct Scsi_Host *);
+ extern void ata_port_probe(struct ata_port *ap);
++extern void ata_port_free(struct ata_port *ap);
+ extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap);
+ extern void ata_sas_tport_delete(struct ata_port *ap);
+ extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index c11b7cde81efa..a4f6f1fecc6f3 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -651,13 +651,12 @@ enum zone_watermarks {
+ };
+ 
+ /*
+- * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. One additional list
+- * for THP which will usually be GFP_MOVABLE. Even if it is another type,
+- * it should not contribute to serious fragmentation causing THP allocation
+- * failures.
++ * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. Two additional lists
++ * are added for THP. One PCP list is used by GPF_MOVABLE, and the other PCP list
++ * is used by GFP_UNMOVABLE and GFP_RECLAIMABLE.
+  */
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+-#define NR_PCP_THP 1
++#define NR_PCP_THP 2
+ #else
+ #define NR_PCP_THP 0
+ #endif
+diff --git a/include/linux/nvme.h b/include/linux/nvme.h
+index 4255732022953..c693ac344ec05 100644
+--- a/include/linux/nvme.h
++++ b/include/linux/nvme.h
+@@ -85,10 +85,11 @@ enum {
+ enum {
+ 	NVMF_RDMA_QPTYPE_CONNECTED	= 1, /* Reliable Connected */
+ 	NVMF_RDMA_QPTYPE_DATAGRAM	= 2, /* Reliable Datagram */
++	NVMF_RDMA_QPTYPE_INVALID	= 0xff,
+ };
+ 
+-/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
+- * RDMA_QPTYPE field
++/* RDMA Provider Type codes for Discovery Log Page entry TSAS
++ * RDMA_PRTYPE field
+  */
+ enum {
+ 	NVMF_RDMA_PRTYPE_NOT_SPECIFIED	= 1, /* No Provider Specified */
+@@ -110,6 +111,7 @@ enum {
+ 	NVMF_TCP_SECTYPE_NONE = 0, /* No Security */
+ 	NVMF_TCP_SECTYPE_TLS12 = 1, /* TLSv1.2, NVMe-oF 1.1 and NVMe-TCP 3.6.1.1 */
+ 	NVMF_TCP_SECTYPE_TLS13 = 2, /* TLSv1.3, NVMe-oF 1.1 and NVMe-TCP 3.6.1.1 */
++	NVMF_TCP_SECTYPE_INVALID = 0xff,
+ };
+ 
+ #define NVME_AQ_DEPTH		32
+diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
+index 0a0f6e21d40ec..4697b8f450445 100644
+--- a/include/linux/serial_core.h
++++ b/include/linux/serial_core.h
+@@ -789,8 +789,7 @@ enum UART_TX_FLAGS {
+ 	if (pending < WAKEUP_CHARS) {					      \
+ 		uart_write_wakeup(__port);				      \
+ 									      \
+-		if (!((flags) & UART_TX_NOSTOP) && pending == 0 &&	      \
+-		    __port->ops->tx_empty(__port))			      \
++		if (!((flags) & UART_TX_NOSTOP) && pending == 0)	      \
+ 			__port->ops->stop_tx(__port);			      \
+ 	}								      \
+ 									      \
+@@ -829,6 +828,24 @@ enum UART_TX_FLAGS {
+ 			__count--);					      \
+ })
+ 
++/**
++ * uart_port_tx_limited_flags -- transmit helper for uart_port with count limiting with flags
++ * @port: uart port
++ * @ch: variable to store a character to be written to the HW
++ * @flags: %UART_TX_NOSTOP or similar
++ * @count: a limit of characters to send
++ * @tx_ready: can HW accept more data function
++ * @put_char: function to write a character
++ * @tx_done: function to call after the loop is done
++ *
++ * See uart_port_tx_limited() for more details.
++ */
++#define uart_port_tx_limited_flags(port, ch, flags, count, tx_ready, put_char, tx_done) ({ \
++	unsigned int __count = (count);							   \
++	__uart_port_tx(port, ch, flags, tx_ready, put_char, tx_done, __count,		   \
++			__count--);							   \
++})
++
+ /**
+  * uart_port_tx -- transmit helper for uart_port
+  * @port: uart port
+diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
+index e619ac10cd234..ac3db59a74e23 100644
+--- a/include/linux/syscalls.h
++++ b/include/linux/syscalls.h
+@@ -418,7 +418,7 @@ asmlinkage long sys_listmount(const struct mnt_id_req __user *req,
+ 			      u64 __user *mnt_ids, size_t nr_mnt_ids,
+ 			      unsigned int flags);
+ asmlinkage long sys_truncate(const char __user *path, long length);
+-asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
++asmlinkage long sys_ftruncate(unsigned int fd, off_t length);
+ #if BITS_PER_LONG == 32
+ asmlinkage long sys_truncate64(const char __user *path, loff_t length);
+ asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length);
+@@ -858,9 +858,15 @@ asmlinkage long sys_prlimit64(pid_t pid, unsigned int resource,
+ 				const struct rlimit64 __user *new_rlim,
+ 				struct rlimit64 __user *old_rlim);
+ asmlinkage long sys_fanotify_init(unsigned int flags, unsigned int event_f_flags);
++#if defined(CONFIG_ARCH_SPLIT_ARG64)
++asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags,
++                                unsigned int mask_1, unsigned int mask_2,
++				int dfd, const char  __user * pathname);
++#else
+ asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags,
+ 				  u64 mask, int fd,
+ 				  const char  __user *pathname);
++#endif
+ asmlinkage long sys_name_to_handle_at(int dfd, const char __user *name,
+ 				      struct file_handle __user *handle,
+ 				      int __user *mnt_id, int flag);
+diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
+index 158784dd189ab..72031fa804147 100644
+--- a/include/linux/workqueue.h
++++ b/include/linux/workqueue.h
+@@ -92,7 +92,7 @@ enum wq_misc_consts {
+ 	WORK_BUSY_RUNNING	= 1 << 1,
+ 
+ 	/* maximum string length for set_worker_desc() */
+-	WORKER_DESC_LEN		= 24,
++	WORKER_DESC_LEN		= 32,
+ };
+ 
+ /* Convenience constants - of type 'unsigned long', not 'enum'! */
+diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
+index ccf171f7eb60d..146ece8563cae 100644
+--- a/include/net/inet_connection_sock.h
++++ b/include/net/inet_connection_sock.h
+@@ -266,7 +266,7 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
+ struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
+ 				      struct request_sock *req,
+ 				      struct sock *child);
+-void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
++bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
+ 				   unsigned long timeout);
+ struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
+ 					 struct request_sock *req,
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 3f1ed467f951f..2164fa350fa69 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -619,6 +619,11 @@ static inline void *nft_set_priv(const struct nft_set *set)
+ 	return (void *)set->data;
+ }
+ 
++static inline enum nft_data_types nft_set_datatype(const struct nft_set *set)
++{
++	return set->dtype == NFT_DATA_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE;
++}
++
+ static inline bool nft_set_gc_is_pending(const struct nft_set *s)
+ {
+ 	return refcount_read(&s->refs) != 1;
+diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h
+index 1f4258308b967..061fd49603035 100644
+--- a/include/trace/events/qdisc.h
++++ b/include/trace/events/qdisc.h
+@@ -81,14 +81,14 @@ TRACE_EVENT(qdisc_reset,
+ 	TP_ARGS(q),
+ 
+ 	TP_STRUCT__entry(
+-		__string(	dev,		qdisc_dev(q)->name	)
++		__string(	dev,		qdisc_dev(q) ? qdisc_dev(q)->name : "(null)"	)
+ 		__string(	kind,		q->ops->id		)
+ 		__field(	u32,		parent			)
+ 		__field(	u32,		handle			)
+ 	),
+ 
+ 	TP_fast_assign(
+-		__assign_str(dev, qdisc_dev(q)->name);
++		__assign_str(dev, qdisc_dev(q) ? qdisc_dev(q)->name : "(null)");
+ 		__assign_str(kind, q->ops->id);
+ 		__entry->parent = q->parent;
+ 		__entry->handle = q->handle;
+diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
+index 75f00965ab158..060f68e4195bd 100644
+--- a/include/uapi/asm-generic/unistd.h
++++ b/include/uapi/asm-generic/unistd.h
+@@ -737,7 +737,7 @@ __SC_COMP(__NR_pselect6_time64, sys_pselect6, compat_sys_pselect6_time64)
+ #define __NR_ppoll_time64 414
+ __SC_COMP(__NR_ppoll_time64, sys_ppoll, compat_sys_ppoll_time64)
+ #define __NR_io_pgetevents_time64 416
+-__SYSCALL(__NR_io_pgetevents_time64, sys_io_pgetevents)
++__SC_COMP(__NR_io_pgetevents_time64, sys_io_pgetevents, compat_sys_io_pgetevents_time64)
+ #define __NR_recvmmsg_time64 417
+ __SC_COMP(__NR_recvmmsg_time64, sys_recvmmsg, compat_sys_recvmmsg_time64)
+ #define __NR_mq_timedsend_time64 418
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 8a216b1d6dc1c..d3926a27714e2 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -1374,8 +1374,8 @@ static void io_req_normal_work_add(struct io_kiocb *req)
+ 	if (ctx->flags & IORING_SETUP_SQPOLL) {
+ 		struct io_sq_data *sqd = ctx->sq_data;
+ 
+-		if (wq_has_sleeper(&sqd->wait))
+-			wake_up(&sqd->wait);
++		if (sqd->thread)
++			__set_notify_signal(sqd->thread);
+ 		return;
+ 	}
+ 
+diff --git a/kernel/bpf/arena.c b/kernel/bpf/arena.c
+index 343c3456c8ddf..a59ae9c582253 100644
+--- a/kernel/bpf/arena.c
++++ b/kernel/bpf/arena.c
+@@ -212,6 +212,7 @@ static u64 arena_map_mem_usage(const struct bpf_map *map)
+ struct vma_list {
+ 	struct vm_area_struct *vma;
+ 	struct list_head head;
++	atomic_t mmap_count;
+ };
+ 
+ static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
+@@ -221,20 +222,30 @@ static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
+ 	vml = kmalloc(sizeof(*vml), GFP_KERNEL);
+ 	if (!vml)
+ 		return -ENOMEM;
++	atomic_set(&vml->mmap_count, 1);
+ 	vma->vm_private_data = vml;
+ 	vml->vma = vma;
+ 	list_add(&vml->head, &arena->vma_list);
+ 	return 0;
+ }
+ 
++static void arena_vm_open(struct vm_area_struct *vma)
++{
++	struct vma_list *vml = vma->vm_private_data;
++
++	atomic_inc(&vml->mmap_count);
++}
++
+ static void arena_vm_close(struct vm_area_struct *vma)
+ {
+ 	struct bpf_map *map = vma->vm_file->private_data;
+ 	struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
+-	struct vma_list *vml;
++	struct vma_list *vml = vma->vm_private_data;
+ 
++	if (!atomic_dec_and_test(&vml->mmap_count))
++		return;
+ 	guard(mutex)(&arena->lock);
+-	vml = vma->vm_private_data;
++	/* update link list under lock */
+ 	list_del(&vml->head);
+ 	vma->vm_private_data = NULL;
+ 	kfree(vml);
+@@ -287,6 +298,7 @@ static vm_fault_t arena_vm_fault(struct vm_fault *vmf)
+ }
+ 
+ static const struct vm_operations_struct arena_vm_ops = {
++	.open		= arena_vm_open,
+ 	.close		= arena_vm_close,
+ 	.fault          = arena_vm_fault,
+ };
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 1ea5ce5bb5993..80bcfde927206 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -2204,6 +2204,7 @@ static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn
+ 	u64 stack[stack_size / sizeof(u64)]; \
+ 	u64 regs[MAX_BPF_EXT_REG] = {}; \
+ \
++	kmsan_unpoison_memory(stack, sizeof(stack)); \
+ 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
+ 	ARG1 = (u64) (unsigned long) ctx; \
+ 	return ___bpf_prog_run(regs, insn); \
+@@ -2217,6 +2218,7 @@ static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
+ 	u64 stack[stack_size / sizeof(u64)]; \
+ 	u64 regs[MAX_BPF_EXT_REG]; \
+ \
++	kmsan_unpoison_memory(stack, sizeof(stack)); \
+ 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
+ 	BPF_R1 = r1; \
+ 	BPF_R2 = r2; \
+@@ -2403,7 +2405,9 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
+ 	}
+ 
+ finalize:
+-	bpf_prog_lock_ro(fp);
++	*err = bpf_prog_lock_ro(fp);
++	if (*err)
++		return fp;
+ 
+ 	/* The tail call compatibility check can only be done at
+ 	 * this late stage as we need to determine, if we deal
+diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
+index 0ee653a936ea0..e20b90c361316 100644
+--- a/kernel/bpf/ringbuf.c
++++ b/kernel/bpf/ringbuf.c
+@@ -51,7 +51,8 @@ struct bpf_ringbuf {
+ 	 * This prevents a user-space application from modifying the
+ 	 * position and ruining in-kernel tracking. The permissions of the
+ 	 * pages depend on who is producing samples: user-space or the
+-	 * kernel.
++	 * kernel. Note that the pending counter is placed in the same
++	 * page as the producer, so that it shares the same cache line.
+ 	 *
+ 	 * Kernel-producer
+ 	 * ---------------
+@@ -70,6 +71,7 @@ struct bpf_ringbuf {
+ 	 */
+ 	unsigned long consumer_pos __aligned(PAGE_SIZE);
+ 	unsigned long producer_pos __aligned(PAGE_SIZE);
++	unsigned long pending_pos;
+ 	char data[] __aligned(PAGE_SIZE);
+ };
+ 
+@@ -179,6 +181,7 @@ static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node)
+ 	rb->mask = data_sz - 1;
+ 	rb->consumer_pos = 0;
+ 	rb->producer_pos = 0;
++	rb->pending_pos = 0;
+ 
+ 	return rb;
+ }
+@@ -404,9 +407,9 @@ bpf_ringbuf_restore_from_rec(struct bpf_ringbuf_hdr *hdr)
+ 
+ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
+ {
+-	unsigned long cons_pos, prod_pos, new_prod_pos, flags;
+-	u32 len, pg_off;
++	unsigned long cons_pos, prod_pos, new_prod_pos, pend_pos, flags;
+ 	struct bpf_ringbuf_hdr *hdr;
++	u32 len, pg_off, tmp_size, hdr_len;
+ 
+ 	if (unlikely(size > RINGBUF_MAX_RECORD_SZ))
+ 		return NULL;
+@@ -424,13 +427,29 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
+ 		spin_lock_irqsave(&rb->spinlock, flags);
+ 	}
+ 
++	pend_pos = rb->pending_pos;
+ 	prod_pos = rb->producer_pos;
+ 	new_prod_pos = prod_pos + len;
+ 
+-	/* check for out of ringbuf space by ensuring producer position
+-	 * doesn't advance more than (ringbuf_size - 1) ahead
++	while (pend_pos < prod_pos) {
++		hdr = (void *)rb->data + (pend_pos & rb->mask);
++		hdr_len = READ_ONCE(hdr->len);
++		if (hdr_len & BPF_RINGBUF_BUSY_BIT)
++			break;
++		tmp_size = hdr_len & ~BPF_RINGBUF_DISCARD_BIT;
++		tmp_size = round_up(tmp_size + BPF_RINGBUF_HDR_SZ, 8);
++		pend_pos += tmp_size;
++	}
++	rb->pending_pos = pend_pos;
++
++	/* check for out of ringbuf space:
++	 * - by ensuring producer position doesn't advance more than
++	 *   (ringbuf_size - 1) ahead
++	 * - by ensuring oldest not yet committed record until newest
++	 *   record does not span more than (ringbuf_size - 1)
+ 	 */
+-	if (new_prod_pos - cons_pos > rb->mask) {
++	if (new_prod_pos - cons_pos > rb->mask ||
++	    new_prod_pos - pend_pos > rb->mask) {
+ 		spin_unlock_irqrestore(&rb->spinlock, flags);
+ 		return NULL;
+ 	}
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 0ef18ae40bc5a..d5fca9deac5a1 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -6223,6 +6223,7 @@ static void set_sext32_default_val(struct bpf_reg_state *reg, int size)
+ 	}
+ 	reg->u32_min_value = 0;
+ 	reg->u32_max_value = U32_MAX;
++	reg->var_off = tnum_subreg(tnum_unknown);
+ }
+ 
+ static void coerce_subreg_to_size_sx(struct bpf_reg_state *reg, int size)
+@@ -6267,6 +6268,7 @@ static void coerce_subreg_to_size_sx(struct bpf_reg_state *reg, int size)
+ 		reg->s32_max_value = s32_max;
+ 		reg->u32_min_value = (u32)s32_min;
+ 		reg->u32_max_value = (u32)s32_max;
++		reg->var_off = tnum_subreg(tnum_range(s32_min, s32_max));
+ 		return;
+ 	}
+ 
+@@ -12544,6 +12546,16 @@ static bool signed_add32_overflows(s32 a, s32 b)
+ 	return res < a;
+ }
+ 
++static bool signed_add16_overflows(s16 a, s16 b)
++{
++	/* Do the add in u16, where overflow is well-defined */
++	s16 res = (s16)((u16)a + (u16)b);
++
++	if (b < 0)
++		return res > a;
++	return res < a;
++}
++
+ static bool signed_sub_overflows(s64 a, s64 b)
+ {
+ 	/* Do the sub in u64, where overflow is well-defined */
+@@ -17296,11 +17308,11 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
+ 				goto skip_inf_loop_check;
+ 			}
+ 			if (is_may_goto_insn_at(env, insn_idx)) {
+-				if (states_equal(env, &sl->state, cur, RANGE_WITHIN)) {
++				if (sl->state.may_goto_depth != cur->may_goto_depth &&
++				    states_equal(env, &sl->state, cur, RANGE_WITHIN)) {
+ 					update_loop_entry(cur, &sl->state);
+ 					goto hit;
+ 				}
+-				goto skip_inf_loop_check;
+ 			}
+ 			if (calls_callback(env, insn_idx)) {
+ 				if (states_equal(env, &sl->state, cur, RANGE_WITHIN))
+@@ -18562,6 +18574,39 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
+ 	return new_prog;
+ }
+ 
++/*
++ * For all jmp insns in a given 'prog' that point to 'tgt_idx' insn adjust the
++ * jump offset by 'delta'.
++ */
++static int adjust_jmp_off(struct bpf_prog *prog, u32 tgt_idx, u32 delta)
++{
++	struct bpf_insn *insn = prog->insnsi;
++	u32 insn_cnt = prog->len, i;
++
++	for (i = 0; i < insn_cnt; i++, insn++) {
++		u8 code = insn->code;
++
++		if ((BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) ||
++		    BPF_OP(code) == BPF_CALL || BPF_OP(code) == BPF_EXIT)
++			continue;
++
++		if (insn->code == (BPF_JMP32 | BPF_JA)) {
++			if (i + 1 + insn->imm != tgt_idx)
++				continue;
++			if (signed_add32_overflows(insn->imm, delta))
++				return -ERANGE;
++			insn->imm += delta;
++		} else {
++			if (i + 1 + insn->off != tgt_idx)
++				continue;
++			if (signed_add16_overflows(insn->imm, delta))
++				return -ERANGE;
++			insn->off += delta;
++		}
++	}
++	return 0;
++}
++
+ static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
+ 					      u32 off, u32 cnt)
+ {
+@@ -19309,10 +19354,14 @@ static int jit_subprogs(struct bpf_verifier_env *env)
+ 	 * bpf_prog_load will add the kallsyms for the main program.
+ 	 */
+ 	for (i = 1; i < env->subprog_cnt; i++) {
+-		bpf_prog_lock_ro(func[i]);
+-		bpf_prog_kallsyms_add(func[i]);
++		err = bpf_prog_lock_ro(func[i]);
++		if (err)
++			goto out_free;
+ 	}
+ 
++	for (i = 1; i < env->subprog_cnt; i++)
++		bpf_prog_kallsyms_add(func[i]);
++
+ 	/* Last step: make now unused interpreter insns from main
+ 	 * prog consistent for later dump requests, so they can
+ 	 * later look the same as if they were interpreted only.
+@@ -19808,7 +19857,10 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
+ 
+ 			stack_depth_extra = 8;
+ 			insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_AX, BPF_REG_10, stack_off);
+-			insn_buf[1] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, insn->off + 2);
++			if (insn->off >= 0)
++				insn_buf[1] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, insn->off + 2);
++			else
++				insn_buf[1] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, insn->off - 1);
+ 			insn_buf[2] = BPF_ALU64_IMM(BPF_SUB, BPF_REG_AX, 1);
+ 			insn_buf[3] = BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_AX, stack_off);
+ 			cnt = 4;
+@@ -20266,6 +20318,13 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
+ 		if (!new_prog)
+ 			return -ENOMEM;
+ 		env->prog = prog = new_prog;
++		/*
++		 * If may_goto is a first insn of a prog there could be a jmp
++		 * insn that points to it, hence adjust all such jmps to point
++		 * to insn after BPF_ST that inits may_goto count.
++		 * Adjustment will succeed because bpf_patch_insn_data() didn't fail.
++		 */
++		WARN_ON(adjust_jmp_off(env->prog, subprog_start, 1));
+ 	}
+ 
+ 	/* Since poke tab is now finalized, publish aux to tracker. */
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 563877d6c28b6..3d2bf1d50a0c4 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -1859,6 +1859,9 @@ static inline bool cpuhp_bringup_cpus_parallel(unsigned int ncpus) { return fals
+ 
+ void __init bringup_nonboot_cpus(unsigned int max_cpus)
+ {
++	if (!max_cpus)
++		return;
++
+ 	/* Try parallel bringup optimization if enabled */
+ 	if (cpuhp_bringup_cpus_parallel(max_cpus))
+ 		return;
+@@ -2446,7 +2449,7 @@ EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
+  * The caller needs to hold cpus read locked while calling this function.
+  * Return:
+  *   On success:
+- *      Positive state number if @state is CPUHP_AP_ONLINE_DYN;
++ *      Positive state number if @state is CPUHP_AP_ONLINE_DYN or CPUHP_BP_PREPARE_DYN;
+  *      0 for all other states
+  *   On failure: proper (negative) error code
+  */
+@@ -2469,7 +2472,7 @@ int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
+ 	ret = cpuhp_store_callbacks(state, name, startup, teardown,
+ 				    multi_instance);
+ 
+-	dynstate = state == CPUHP_AP_ONLINE_DYN;
++	dynstate = state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN;
+ 	if (ret > 0 && dynstate) {
+ 		state = ret;
+ 		ret = 0;
+@@ -2500,8 +2503,8 @@ int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
+ out:
+ 	mutex_unlock(&cpuhp_state_mutex);
+ 	/*
+-	 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
+-	 * dynamically allocated state in case of success.
++	 * If the requested state is CPUHP_AP_ONLINE_DYN or CPUHP_BP_PREPARE_DYN,
++	 * return the dynamically allocated state in case of success.
+ 	 */
+ 	if (!ret && dynstate)
+ 		return state;
+diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
+index faad00cce269c..858196ff9b894 100644
+--- a/kernel/sys_ni.c
++++ b/kernel/sys_ni.c
+@@ -46,8 +46,8 @@ COND_SYSCALL(io_getevents_time32);
+ COND_SYSCALL(io_getevents);
+ COND_SYSCALL(io_pgetevents_time32);
+ COND_SYSCALL(io_pgetevents);
+-COND_SYSCALL_COMPAT(io_pgetevents_time32);
+ COND_SYSCALL_COMPAT(io_pgetevents);
++COND_SYSCALL_COMPAT(io_pgetevents_time64);
+ COND_SYSCALL(io_uring_setup);
+ COND_SYSCALL(io_uring_enter);
+ COND_SYSCALL(io_uring_register);
+diff --git a/mm/kasan/common.c b/mm/kasan/common.c
+index e7c9a4dc89f82..85e7c6b4575c2 100644
+--- a/mm/kasan/common.c
++++ b/mm/kasan/common.c
+@@ -532,7 +532,7 @@ void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)
+ 		return;
+ 
+ 	/* Unpoison the object and save alloc info for non-kmalloc() allocations. */
+-	unpoison_slab_object(slab->slab_cache, ptr, size, flags);
++	unpoison_slab_object(slab->slab_cache, ptr, flags, false);
+ 
+ 	/* Poison the redzone and save alloc info for kmalloc() allocations. */
+ 	if (is_kmalloc_cache(slab->slab_cache))
+diff --git a/mm/memory.c b/mm/memory.c
+index d2155ced45f8f..4bd6d68f1b174 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -4583,8 +4583,9 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
+ 	if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
+ 		return ret;
+ 
+-	if (page != &folio->page || folio_order(folio) != HPAGE_PMD_ORDER)
++	if (folio_order(folio) != HPAGE_PMD_ORDER)
+ 		return ret;
++	page = &folio->page;
+ 
+ 	/*
+ 	 * Just backoff if any subpage of a THP is corrupted otherwise
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 14d39f34d3367..00fafda76bde9 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -521,10 +521,15 @@ static void bad_page(struct page *page, const char *reason)
+ 
+ static inline unsigned int order_to_pindex(int migratetype, int order)
+ {
++	bool __maybe_unused movable;
++
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ 	if (order > PAGE_ALLOC_COSTLY_ORDER) {
+ 		VM_BUG_ON(order != pageblock_order);
+-		return NR_LOWORDER_PCP_LISTS;
++
++		movable = migratetype == MIGRATE_MOVABLE;
++
++		return NR_LOWORDER_PCP_LISTS + movable;
+ 	}
+ #else
+ 	VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
+@@ -538,7 +543,7 @@ static inline int pindex_to_order(unsigned int pindex)
+ 	int order = pindex / MIGRATE_PCPTYPES;
+ 
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+-	if (pindex == NR_LOWORDER_PCP_LISTS)
++	if (pindex >= NR_LOWORDER_PCP_LISTS)
+ 		order = pageblock_order;
+ #else
+ 	VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 109272b8ee2e9..2cd015e976102 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -2474,6 +2474,7 @@ struct vmap_block {
+ 	struct list_head free_list;
+ 	struct rcu_head rcu_head;
+ 	struct list_head purge;
++	unsigned int cpu;
+ };
+ 
+ /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
+@@ -2601,8 +2602,15 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
+ 		free_vmap_area(va);
+ 		return ERR_PTR(err);
+ 	}
+-
+-	vbq = raw_cpu_ptr(&vmap_block_queue);
++	/*
++	 * list_add_tail_rcu could happened in another core
++	 * rather than vb->cpu due to task migration, which
++	 * is safe as list_add_tail_rcu will ensure the list's
++	 * integrity together with list_for_each_rcu from read
++	 * side.
++	 */
++	vb->cpu = raw_smp_processor_id();
++	vbq = per_cpu_ptr(&vmap_block_queue, vb->cpu);
+ 	spin_lock(&vbq->lock);
+ 	list_add_tail_rcu(&vb->free_list, &vbq->free);
+ 	spin_unlock(&vbq->lock);
+@@ -2630,9 +2638,10 @@ static void free_vmap_block(struct vmap_block *vb)
+ }
+ 
+ static bool purge_fragmented_block(struct vmap_block *vb,
+-		struct vmap_block_queue *vbq, struct list_head *purge_list,
+-		bool force_purge)
++		struct list_head *purge_list, bool force_purge)
+ {
++	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, vb->cpu);
++
+ 	if (vb->free + vb->dirty != VMAP_BBMAP_BITS ||
+ 	    vb->dirty == VMAP_BBMAP_BITS)
+ 		return false;
+@@ -2680,7 +2689,7 @@ static void purge_fragmented_blocks(int cpu)
+ 			continue;
+ 
+ 		spin_lock(&vb->lock);
+-		purge_fragmented_block(vb, vbq, &purge, true);
++		purge_fragmented_block(vb, &purge, true);
+ 		spin_unlock(&vb->lock);
+ 	}
+ 	rcu_read_unlock();
+@@ -2817,7 +2826,7 @@ static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
+ 			 * not purgeable, check whether there is dirty
+ 			 * space to be flushed.
+ 			 */
+-			if (!purge_fragmented_block(vb, vbq, &purge_list, false) &&
++			if (!purge_fragmented_block(vb, &purge_list, false) &&
+ 			    vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) {
+ 				unsigned long va_start = vb->va->va_start;
+ 				unsigned long s, e;
+diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
+index ac74f6ead62d5..8f6dd2c6ee41d 100644
+--- a/net/batman-adv/originator.c
++++ b/net/batman-adv/originator.c
+@@ -12,6 +12,7 @@
+ #include <linux/errno.h>
+ #include <linux/etherdevice.h>
+ #include <linux/gfp.h>
++#include <linux/if_vlan.h>
+ #include <linux/jiffies.h>
+ #include <linux/kref.h>
+ #include <linux/list.h>
+@@ -131,6 +132,29 @@ batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
+ 	return vlan;
+ }
+ 
++/**
++ * batadv_vlan_id_valid() - check if vlan id is in valid batman-adv encoding
++ * @vid: the VLAN identifier
++ *
++ * Return: true when either no vlan is set or if VLAN is in correct range,
++ *  false otherwise
++ */
++static bool batadv_vlan_id_valid(unsigned short vid)
++{
++	unsigned short non_vlan = vid & ~(BATADV_VLAN_HAS_TAG | VLAN_VID_MASK);
++
++	if (vid == 0)
++		return true;
++
++	if (!(vid & BATADV_VLAN_HAS_TAG))
++		return false;
++
++	if (non_vlan)
++		return false;
++
++	return true;
++}
++
+ /**
+  * batadv_orig_node_vlan_new() - search and possibly create an orig_node_vlan
+  *  object
+@@ -149,6 +173,9 @@ batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
+ {
+ 	struct batadv_orig_node_vlan *vlan;
+ 
++	if (!batadv_vlan_id_valid(vid))
++		return NULL;
++
+ 	spin_lock_bh(&orig_node->vlan_list_lock);
+ 
+ 	/* first look if an object for this vid already exists */
+diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
+index a6fb89fa62785..7e8a20f2fc42b 100644
+--- a/net/can/j1939/main.c
++++ b/net/can/j1939/main.c
+@@ -30,10 +30,6 @@ MODULE_ALIAS("can-proto-" __stringify(CAN_J1939));
+ /* CAN_HDR: #bytes before can_frame data part */
+ #define J1939_CAN_HDR (offsetof(struct can_frame, data))
+ 
+-/* CAN_FTR: #bytes beyond data part */
+-#define J1939_CAN_FTR (sizeof(struct can_frame) - J1939_CAN_HDR - \
+-		 sizeof(((struct can_frame *)0)->data))
+-
+ /* lowest layer */
+ static void j1939_can_recv(struct sk_buff *iskb, void *data)
+ {
+@@ -342,7 +338,7 @@ int j1939_send_one(struct j1939_priv *priv, struct sk_buff *skb)
+ 	memset(cf, 0, J1939_CAN_HDR);
+ 
+ 	/* make it a full can frame again */
+-	skb_put(skb, J1939_CAN_FTR + (8 - dlc));
++	skb_put_zero(skb, 8 - dlc);
+ 
+ 	canid = CAN_EFF_FLAG |
+ 		(skcb->priority << 26) |
+diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
+index fe3df23a25957..4be73de5033cb 100644
+--- a/net/can/j1939/transport.c
++++ b/net/can/j1939/transport.c
+@@ -1593,8 +1593,8 @@ j1939_session *j1939_xtp_rx_rts_session_new(struct j1939_priv *priv,
+ 	struct j1939_sk_buff_cb skcb = *j1939_skb_to_cb(skb);
+ 	struct j1939_session *session;
+ 	const u8 *dat;
++	int len, ret;
+ 	pgn_t pgn;
+-	int len;
+ 
+ 	netdev_dbg(priv->ndev, "%s\n", __func__);
+ 
+@@ -1653,7 +1653,22 @@ j1939_session *j1939_xtp_rx_rts_session_new(struct j1939_priv *priv,
+ 	session->tskey = priv->rx_tskey++;
+ 	j1939_sk_errqueue(session, J1939_ERRQUEUE_RX_RTS);
+ 
+-	WARN_ON_ONCE(j1939_session_activate(session));
++	ret = j1939_session_activate(session);
++	if (ret) {
++		/* Entering this scope indicates an issue with the J1939 bus.
++		 * Possible scenarios include:
++		 * - A time lapse occurred, and a new session was initiated
++		 *   due to another packet being sent correctly. This could
++		 *   have been caused by too long interrupt, debugger, or being
++		 *   out-scheduled by another task.
++		 * - The bus is receiving numerous erroneous packets, either
++		 *   from a malfunctioning device or during a test scenario.
++		 */
++		netdev_alert(priv->ndev, "%s: 0x%p: concurrent session with same addr (%02x %02x) is already active.\n",
++			     __func__, session, skcb.addr.sa, skcb.addr.da);
++		j1939_session_put(session);
++		return NULL;
++	}
+ 
+ 	return session;
+ }
+@@ -1681,6 +1696,8 @@ static int j1939_xtp_rx_rts_session_active(struct j1939_session *session,
+ 
+ 		j1939_session_timers_cancel(session);
+ 		j1939_session_cancel(session, J1939_XTP_ABORT_BUSY);
++		if (session->transmission)
++			j1939_session_deactivate_activate_next(session);
+ 
+ 		return -EBUSY;
+ 	}
+diff --git a/net/core/filter.c b/net/core/filter.c
+index ce255e0a2fbd9..15d850ea7d4ad 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -87,6 +87,9 @@
+ 
+ #include "dev.h"
+ 
++/* Keep the struct bpf_fib_lookup small so that it fits into a cacheline */
++static_assert(sizeof(struct bpf_fib_lookup) == 64, "struct bpf_fib_lookup size check");
++
+ static const struct bpf_func_proto *
+ bpf_sk_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
+ 
+diff --git a/net/core/xdp.c b/net/core/xdp.c
+index 41693154e426f..022c12059cf2f 100644
+--- a/net/core/xdp.c
++++ b/net/core/xdp.c
+@@ -295,10 +295,8 @@ static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem,
+ 		mutex_lock(&mem_id_lock);
+ 		ret = __mem_id_init_hash_table();
+ 		mutex_unlock(&mem_id_lock);
+-		if (ret < 0) {
+-			WARN_ON(1);
++		if (ret < 0)
+ 			return ERR_PTR(ret);
+-		}
+ 	}
+ 
+ 	xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index 44b033fe1ef68..f94d30b171992 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -655,8 +655,11 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+ 	if (dccp_v4_send_response(sk, req))
+ 		goto drop_and_free;
+ 
+-	inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
+-	reqsk_put(req);
++	if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT)))
++		reqsk_free(req);
++	else
++		reqsk_put(req);
++
+ 	return 0;
+ 
+ drop_and_free:
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index ded07e09f8135..ddbd490b3531b 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -398,8 +398,11 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
+ 	if (dccp_v6_send_response(sk, req))
+ 		goto drop_and_free;
+ 
+-	inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
+-	reqsk_put(req);
++	if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT)))
++		reqsk_free(req);
++	else
++		reqsk_put(req);
++
+ 	return 0;
+ 
+ drop_and_free:
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 3b38610958ee4..39e9070fe3cdf 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -1121,25 +1121,34 @@ static void reqsk_timer_handler(struct timer_list *t)
+ 	inet_csk_reqsk_queue_drop_and_put(oreq->rsk_listener, oreq);
+ }
+ 
+-static void reqsk_queue_hash_req(struct request_sock *req,
++static bool reqsk_queue_hash_req(struct request_sock *req,
+ 				 unsigned long timeout)
+ {
++	bool found_dup_sk = false;
++
++	if (!inet_ehash_insert(req_to_sk(req), NULL, &found_dup_sk))
++		return false;
++
++	/* The timer needs to be setup after a successful insertion. */
+ 	timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
+ 	mod_timer(&req->rsk_timer, jiffies + timeout);
+ 
+-	inet_ehash_insert(req_to_sk(req), NULL, NULL);
+ 	/* before letting lookups find us, make sure all req fields
+ 	 * are committed to memory and refcnt initialized.
+ 	 */
+ 	smp_wmb();
+ 	refcount_set(&req->rsk_refcnt, 2 + 1);
++	return true;
+ }
+ 
+-void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
++bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
+ 				   unsigned long timeout)
+ {
+-	reqsk_queue_hash_req(req, timeout);
++	if (!reqsk_queue_hash_req(req, timeout))
++		return false;
++
+ 	inet_csk_reqsk_queue_added(sk);
++	return true;
+ }
+ EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
+ 
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 1054a440332d3..0953c915bb4de 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -2779,13 +2779,37 @@ static void tcp_mtup_probe_success(struct sock *sk)
+ 	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
+ }
+ 
++/* Sometimes we deduce that packets have been dropped due to reasons other than
++ * congestion, like path MTU reductions or failed client TFO attempts. In these
++ * cases we call this function to retransmit as many packets as cwnd allows,
++ * without reducing cwnd. Given that retransmits will set retrans_stamp to a
++ * non-zero value (and may do so in a later calling context due to TSQ), we
++ * also enter CA_Loss so that we track when all retransmitted packets are ACKed
++ * and clear retrans_stamp when that happens (to ensure later recurring RTOs
++ * are using the correct retrans_stamp and don't declare ETIMEDOUT
++ * prematurely).
++ */
++static void tcp_non_congestion_loss_retransmit(struct sock *sk)
++{
++	const struct inet_connection_sock *icsk = inet_csk(sk);
++	struct tcp_sock *tp = tcp_sk(sk);
++
++	if (icsk->icsk_ca_state != TCP_CA_Loss) {
++		tp->high_seq = tp->snd_nxt;
++		tp->snd_ssthresh = tcp_current_ssthresh(sk);
++		tp->prior_ssthresh = 0;
++		tp->undo_marker = 0;
++		tcp_set_ca_state(sk, TCP_CA_Loss);
++	}
++	tcp_xmit_retransmit_queue(sk);
++}
++
+ /* Do a simple retransmit without using the backoff mechanisms in
+  * tcp_timer. This is used for path mtu discovery.
+  * The socket is already locked here.
+  */
+ void tcp_simple_retransmit(struct sock *sk)
+ {
+-	const struct inet_connection_sock *icsk = inet_csk(sk);
+ 	struct tcp_sock *tp = tcp_sk(sk);
+ 	struct sk_buff *skb;
+ 	int mss;
+@@ -2825,14 +2849,7 @@ void tcp_simple_retransmit(struct sock *sk)
+ 	 * in network, but units changed and effective
+ 	 * cwnd/ssthresh really reduced now.
+ 	 */
+-	if (icsk->icsk_ca_state != TCP_CA_Loss) {
+-		tp->high_seq = tp->snd_nxt;
+-		tp->snd_ssthresh = tcp_current_ssthresh(sk);
+-		tp->prior_ssthresh = 0;
+-		tp->undo_marker = 0;
+-		tcp_set_ca_state(sk, TCP_CA_Loss);
+-	}
+-	tcp_xmit_retransmit_queue(sk);
++	tcp_non_congestion_loss_retransmit(sk);
+ }
+ EXPORT_SYMBOL(tcp_simple_retransmit);
+ 
+@@ -6288,8 +6305,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
+ 			tp->fastopen_client_fail = TFO_DATA_NOT_ACKED;
+ 		skb_rbtree_walk_from(data)
+ 			 tcp_mark_skb_lost(sk, data);
+-		tcp_xmit_retransmit_queue(sk);
+-		tp->retrans_stamp = 0;
++		tcp_non_congestion_loss_retransmit(sk);
+ 		NET_INC_STATS(sock_net(sk),
+ 				LINUX_MIB_TCPFASTOPENACTIVEFAIL);
+ 		return true;
+@@ -7243,7 +7259,12 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
+ 		tcp_rsk(req)->tfo_listener = false;
+ 		if (!want_cookie) {
+ 			req->timeout = tcp_timeout_init((struct sock *)req);
+-			inet_csk_reqsk_queue_hash_add(sk, req, req->timeout);
++			if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req,
++								    req->timeout))) {
++				reqsk_free(req);
++				return 0;
++			}
++
+ 		}
+ 		af_ops->send_synack(sk, dst, &fl, req, &foc,
+ 				    !want_cookie ? TCP_SYNACK_NORMAL :
+diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
+index a4ab615ca3e3e..5e37a8ceebcb8 100644
+--- a/net/iucv/iucv.c
++++ b/net/iucv/iucv.c
+@@ -520,7 +520,7 @@ static void iucv_setmask_mp(void)
+  */
+ static void iucv_setmask_up(void)
+ {
+-	cpumask_t cpumask;
++	static cpumask_t cpumask;
+ 	int cpu;
+ 
+ 	/* Disable all cpu but the first in cpu_irq_cpumask. */
+@@ -628,23 +628,33 @@ static int iucv_cpu_online(unsigned int cpu)
+ 
+ static int iucv_cpu_down_prep(unsigned int cpu)
+ {
+-	cpumask_t cpumask;
++	cpumask_var_t cpumask;
++	int ret = 0;
+ 
+ 	if (!iucv_path_table)
+ 		return 0;
+ 
+-	cpumask_copy(&cpumask, &iucv_buffer_cpumask);
+-	cpumask_clear_cpu(cpu, &cpumask);
+-	if (cpumask_empty(&cpumask))
++	if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
++		return -ENOMEM;
++
++	cpumask_copy(cpumask, &iucv_buffer_cpumask);
++	cpumask_clear_cpu(cpu, cpumask);
++	if (cpumask_empty(cpumask)) {
+ 		/* Can't offline last IUCV enabled cpu. */
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto __free_cpumask;
++	}
+ 
+ 	iucv_retrieve_cpu(NULL);
+ 	if (!cpumask_empty(&iucv_irq_cpumask))
+-		return 0;
++		goto __free_cpumask;
++
+ 	smp_call_function_single(cpumask_first(&iucv_buffer_cpumask),
+ 				 iucv_allow_cpu, NULL, 1);
+-	return 0;
++
++__free_cpumask:
++	free_cpumask_var(cpumask);
++	return ret;
+ }
+ 
+ /**
+diff --git a/net/netfilter/nf_hooks_lwtunnel.c b/net/netfilter/nf_hooks_lwtunnel.c
+index 7cdb59bb4459f..d8ebebc9775d7 100644
+--- a/net/netfilter/nf_hooks_lwtunnel.c
++++ b/net/netfilter/nf_hooks_lwtunnel.c
+@@ -117,4 +117,7 @@ void netfilter_lwtunnel_fini(void)
+ {
+ 	unregister_pernet_subsys(&nf_lwtunnel_net_ops);
+ }
++#else
++int __init netfilter_lwtunnel_init(void) { return 0; }
++void netfilter_lwtunnel_fini(void) {}
+ #endif /* CONFIG_SYSCTL */
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 167074283ea91..faa77b031d1f3 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -5740,8 +5740,7 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
+ 
+ 	if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA) &&
+ 	    nft_data_dump(skb, NFTA_SET_ELEM_DATA, nft_set_ext_data(ext),
+-			  set->dtype == NFT_DATA_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE,
+-			  set->dlen) < 0)
++			  nft_set_datatype(set), set->dlen) < 0)
+ 		goto nla_put_failure;
+ 
+ 	if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPRESSIONS) &&
+@@ -11069,6 +11068,9 @@ static int nft_validate_register_store(const struct nft_ctx *ctx,
+ 
+ 		return 0;
+ 	default:
++		if (type != NFT_DATA_VALUE)
++			return -EINVAL;
++
+ 		if (reg < NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE)
+ 			return -EINVAL;
+ 		if (len == 0)
+@@ -11077,8 +11079,6 @@ static int nft_validate_register_store(const struct nft_ctx *ctx,
+ 		    sizeof_field(struct nft_regs, data))
+ 			return -ERANGE;
+ 
+-		if (data != NULL && type != NFT_DATA_VALUE)
+-			return -EINVAL;
+ 		return 0;
+ 	}
+ }
+diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
+index b314ca728a291..f3080fa1b2263 100644
+--- a/net/netfilter/nft_lookup.c
++++ b/net/netfilter/nft_lookup.c
+@@ -132,7 +132,8 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
+ 			return -EINVAL;
+ 
+ 		err = nft_parse_register_store(ctx, tb[NFTA_LOOKUP_DREG],
+-					       &priv->dreg, NULL, set->dtype,
++					       &priv->dreg, NULL,
++					       nft_set_datatype(set),
+ 					       set->dlen);
+ 		if (err < 0)
+ 			return err;
+diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
+index 2928c142a2ddb..3b980bf2770bb 100644
+--- a/net/openvswitch/conntrack.c
++++ b/net/openvswitch/conntrack.c
+@@ -168,8 +168,13 @@ static u32 ovs_ct_get_mark(const struct nf_conn *ct)
+ static void ovs_ct_get_labels(const struct nf_conn *ct,
+ 			      struct ovs_key_ct_labels *labels)
+ {
+-	struct nf_conn_labels *cl = ct ? nf_ct_labels_find(ct) : NULL;
++	struct nf_conn_labels *cl = NULL;
+ 
++	if (ct) {
++		if (ct->master && !nf_ct_is_confirmed(ct))
++			ct = ct->master;
++		cl = nf_ct_labels_find(ct);
++	}
+ 	if (cl)
+ 		memcpy(labels, cl->bits, OVS_CT_LABELS_LEN);
+ 	else
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index 2b4b1276d4e86..d9cda1e53a017 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -1557,9 +1557,11 @@ void svc_process(struct svc_rqst *rqstp)
+  */
+ void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp)
+ {
++	struct rpc_timeout timeout = {
++		.to_increment		= 0,
++	};
+ 	struct rpc_task *task;
+ 	int proc_error;
+-	struct rpc_timeout timeout;
+ 
+ 	/* Build the svc_rqst used by the common processing routine */
+ 	rqstp->rq_xid = req->rq_xid;
+@@ -1612,6 +1614,7 @@ void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp)
+ 		timeout.to_initval = req->rq_xprt->timeout->to_initval;
+ 		timeout.to_retries = req->rq_xprt->timeout->to_retries;
+ 	}
++	timeout.to_maxval = timeout.to_initval;
+ 	memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
+ 	task = rpc_run_bc_task(req, &timeout);
+ 
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 68a58bc07cf23..24286ce0ef3ee 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -2660,10 +2660,24 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
+ {
+ 	struct unix_sock *u = unix_sk(sk);
+ 
+-	if (!unix_skb_len(skb) && !(flags & MSG_PEEK)) {
+-		skb_unlink(skb, &sk->sk_receive_queue);
+-		consume_skb(skb);
+-		skb = NULL;
++	if (!unix_skb_len(skb)) {
++		struct sk_buff *unlinked_skb = NULL;
++
++		spin_lock(&sk->sk_receive_queue.lock);
++
++		if (copied && (!u->oob_skb || skb == u->oob_skb)) {
++			skb = NULL;
++		} else if (flags & MSG_PEEK) {
++			skb = skb_peek_next(skb, &sk->sk_receive_queue);
++		} else {
++			unlinked_skb = skb;
++			skb = skb_peek_next(skb, &sk->sk_receive_queue);
++			__skb_unlink(unlinked_skb, &sk->sk_receive_queue);
++		}
++
++		spin_unlock(&sk->sk_receive_queue.lock);
++
++		consume_skb(unlinked_skb);
+ 	} else {
+ 		struct sk_buff *unlinked_skb = NULL;
+ 
+@@ -3140,12 +3154,23 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+ #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
+ 	case SIOCATMARK:
+ 		{
++			struct unix_sock *u = unix_sk(sk);
+ 			struct sk_buff *skb;
+ 			int answ = 0;
+ 
++			mutex_lock(&u->iolock);
++
+ 			skb = skb_peek(&sk->sk_receive_queue);
+-			if (skb && skb == READ_ONCE(unix_sk(sk)->oob_skb))
+-				answ = 1;
++			if (skb) {
++				struct sk_buff *oob_skb = READ_ONCE(u->oob_skb);
++
++				if (skb == oob_skb ||
++				    (!oob_skb && !unix_skb_len(skb)))
++					answ = 1;
++			}
++
++			mutex_unlock(&u->iolock);
++
+ 			err = put_user(answ, (int __user *)arg);
+ 		}
+ 		break;
+diff --git a/scripts/Makefile.dtbinst b/scripts/Makefile.dtbinst
+index 67956f6496a5c..9d920419a62cf 100644
+--- a/scripts/Makefile.dtbinst
++++ b/scripts/Makefile.dtbinst
+@@ -17,7 +17,7 @@ include $(srctree)/scripts/Kbuild.include
+ dst := $(INSTALL_DTBS_PATH)
+ 
+ quiet_cmd_dtb_install = INSTALL $@
+-      cmd_dtb_install = install -D $< $@
++      cmd_dtb_install = install -D -m 0644 $< $@
+ 
+ $(dst)/%: $(obj)/%
+ 	$(call cmd,dtb_install)
+diff --git a/scripts/Makefile.package b/scripts/Makefile.package
+index 38653f3e81088..bf016af8bf8ad 100644
+--- a/scripts/Makefile.package
++++ b/scripts/Makefile.package
+@@ -103,7 +103,7 @@ debian-orig: private version = $(shell dpkg-parsechangelog -S Version | sed 's/-
+ debian-orig: private orig-name = $(source)_$(version).orig.tar$(debian-orig-suffix)
+ debian-orig: mkdebian-opts = --need-source
+ debian-orig: linux.tar$(debian-orig-suffix) debian
+-	$(Q)if [ "$(df  --output=target .. 2>/dev/null)" = "$(df --output=target $< 2>/dev/null)" ]; then \
++	$(Q)if [ "$$(df  --output=target .. 2>/dev/null)" = "$$(df --output=target $< 2>/dev/null)" ]; then \
+ 		ln -f $< ../$(orig-name); \
+ 	else \
+ 		cp $< ../$(orig-name); \
+diff --git a/scripts/package/kernel.spec b/scripts/package/kernel.spec
+index e095eb1e290ec..fffc8af8deb17 100644
+--- a/scripts/package/kernel.spec
++++ b/scripts/package/kernel.spec
+@@ -57,7 +57,8 @@ patch -p1 < %{SOURCE2}
+ %install
+ mkdir -p %{buildroot}/lib/modules/%{KERNELRELEASE}
+ cp $(%{make} %{makeflags} -s image_name) %{buildroot}/lib/modules/%{KERNELRELEASE}/vmlinuz
+-%{make} %{makeflags} INSTALL_MOD_PATH=%{buildroot} modules_install
++# DEPMOD=true makes depmod no-op. We do not package depmod-generated files.
++%{make} %{makeflags} INSTALL_MOD_PATH=%{buildroot} DEPMOD=true modules_install
+ %{make} %{makeflags} INSTALL_HDR_PATH=%{buildroot}/usr headers_install
+ cp System.map %{buildroot}/lib/modules/%{KERNELRELEASE}
+ cp .config %{buildroot}/lib/modules/%{KERNELRELEASE}/config
+@@ -70,10 +71,7 @@ ln -fns /usr/src/kernels/%{KERNELRELEASE} %{buildroot}/lib/modules/%{KERNELRELEA
+ %endif
+ 
+ {
+-	for x in System.map config kernel modules.builtin \
+-			modules.builtin.modinfo modules.order vmlinuz; do
+-		echo "/lib/modules/%{KERNELRELEASE}/${x}"
+-	done
++	echo "/lib/modules/%{KERNELRELEASE}"
+ 
+ 	for x in alias alias.bin builtin.alias.bin builtin.bin dep dep.bin \
+ 					devname softdep symbols symbols.bin; do
+diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
+index 81dbade5b9b3d..518b3090cdb77 100644
+--- a/security/integrity/evm/evm_main.c
++++ b/security/integrity/evm/evm_main.c
+@@ -192,7 +192,11 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
+ 		     iint->evm_status == INTEGRITY_PASS_IMMUTABLE))
+ 		return iint->evm_status;
+ 
+-	if (is_unsupported_fs(dentry))
++	/*
++	 * On unsupported filesystems without EVM_INIT_X509 enabled, skip
++	 * signature verification.
++	 */
++	if (!(evm_initialized & EVM_INIT_X509) && is_unsupported_fs(dentry))
+ 		return INTEGRITY_UNKNOWN;
+ 
+ 	/* if status is not PASS, try to check again - against -ENOMEM */
+@@ -260,7 +264,8 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
+ 				evm_status = INTEGRITY_PASS_IMMUTABLE;
+ 			} else if (!IS_RDONLY(inode) &&
+ 				   !(inode->i_sb->s_readonly_remount) &&
+-				   !IS_IMMUTABLE(inode)) {
++				   !IS_IMMUTABLE(inode) &&
++				   !is_unsupported_fs(dentry)) {
+ 				evm_update_evmxattr(dentry, xattr_name,
+ 						    xattr_value,
+ 						    xattr_value_len);
+@@ -418,9 +423,6 @@ enum integrity_status evm_verifyxattr(struct dentry *dentry,
+ 	if (!evm_key_loaded() || !evm_protected_xattr(xattr_name))
+ 		return INTEGRITY_UNKNOWN;
+ 
+-	if (is_unsupported_fs(dentry))
+-		return INTEGRITY_UNKNOWN;
+-
+ 	return evm_verify_hmac(dentry, xattr_name, xattr_value,
+ 				 xattr_value_len);
+ }
+diff --git a/sound/core/seq/seq_ump_convert.c b/sound/core/seq/seq_ump_convert.c
+index d81f776a4c3dd..e90b27a135e6f 100644
+--- a/sound/core/seq/seq_ump_convert.c
++++ b/sound/core/seq/seq_ump_convert.c
+@@ -791,7 +791,8 @@ static int paf_ev_to_ump_midi2(const struct snd_seq_event *event,
+ 
+ /* set up the MIDI2 RPN/NRPN packet data from the parsed info */
+ static void fill_rpn(struct snd_seq_ump_midi2_bank *cc,
+-		     union snd_ump_midi2_msg *data)
++		     union snd_ump_midi2_msg *data,
++		     unsigned char channel)
+ {
+ 	if (cc->rpn_set) {
+ 		data->rpn.status = UMP_MSG_STATUS_RPN;
+@@ -808,6 +809,7 @@ static void fill_rpn(struct snd_seq_ump_midi2_bank *cc,
+ 	}
+ 	data->rpn.data = upscale_14_to_32bit((cc->cc_data_msb << 7) |
+ 					     cc->cc_data_lsb);
++	data->rpn.channel = channel;
+ 	cc->cc_data_msb = cc->cc_data_lsb = 0;
+ }
+ 
+@@ -855,7 +857,7 @@ static int cc_ev_to_ump_midi2(const struct snd_seq_event *event,
+ 		cc->cc_data_lsb = val;
+ 		if (!(cc->rpn_set || cc->nrpn_set))
+ 			return 0; // skip
+-		fill_rpn(cc, data);
++		fill_rpn(cc, data, channel);
+ 		return 1;
+ 	}
+ 
+@@ -957,7 +959,7 @@ static int ctrl14_ev_to_ump_midi2(const struct snd_seq_event *event,
+ 		cc->cc_data_lsb = lsb;
+ 		if (!(cc->rpn_set || cc->nrpn_set))
+ 			return 0; // skip
+-		fill_rpn(cc, data);
++		fill_rpn(cc, data, channel);
+ 		return 1;
+ 	}
+ 
+@@ -1018,7 +1020,7 @@ static int system_2p_ev_to_ump_midi2(const struct snd_seq_event *event,
+ 				     union snd_ump_midi2_msg *data,
+ 				     unsigned char status)
+ {
+-	return system_1p_ev_to_ump_midi1(event, dest_port,
++	return system_2p_ev_to_ump_midi1(event, dest_port,
+ 					 (union snd_ump_midi1_msg *)data,
+ 					 status);
+ }
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 3e6de1d86022f..3a56434c86bd9 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10187,6 +10187,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8c7c, "HP ProBook 445 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8c7d, "HP ProBook 465 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8c7e, "HP ProBook 465 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8c7f, "HP EliteBook 645 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8c80, "HP EliteBook 645 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8c81, "HP EliteBook 665 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8c89, "HP ProBook 460 G11", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c8a, "HP EliteBook 630", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8c8c, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED),
+diff --git a/sound/soc/amd/acp/acp-i2s.c b/sound/soc/amd/acp/acp-i2s.c
+index 60cbc881be6e1..ef12f97ddc69e 100644
+--- a/sound/soc/amd/acp/acp-i2s.c
++++ b/sound/soc/amd/acp/acp-i2s.c
+@@ -588,20 +588,12 @@ static int acp_i2s_probe(struct snd_soc_dai *dai)
+ {
+ 	struct device *dev = dai->component->dev;
+ 	struct acp_dev_data *adata = dev_get_drvdata(dev);
+-	struct acp_resource *rsrc = adata->rsrc;
+-	unsigned int val;
+ 
+ 	if (!adata->acp_base) {
+ 		dev_err(dev, "I2S base is NULL\n");
+ 		return -EINVAL;
+ 	}
+ 
+-	val = readl(adata->acp_base + rsrc->i2s_pin_cfg_offset);
+-	if (val != rsrc->i2s_mode) {
+-		dev_err(dev, "I2S Mode not supported val %x\n", val);
+-		return -EINVAL;
+-	}
+-
+ 	return 0;
+ }
+ 
+diff --git a/sound/soc/amd/acp/acp-pci.c b/sound/soc/amd/acp/acp-pci.c
+index ad320b29e87dc..777b5a78d8a9e 100644
+--- a/sound/soc/amd/acp/acp-pci.c
++++ b/sound/soc/amd/acp/acp-pci.c
+@@ -100,6 +100,7 @@ static int acp_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id
+ 		ret = -EINVAL;
+ 		goto release_regions;
+ 	}
++	chip->flag = flag;
+ 	dmic_dev = platform_device_register_data(dev, "dmic-codec", PLATFORM_DEVID_NONE, NULL, 0);
+ 	if (IS_ERR(dmic_dev)) {
+ 		dev_err(dev, "failed to create DMIC device\n");
+@@ -139,7 +140,6 @@ static int acp_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id
+ 		}
+ 	}
+ 
+-	chip->flag = flag;
+ 	memset(&pdevinfo, 0, sizeof(pdevinfo));
+ 
+ 	pdevinfo.name = chip->name;
+@@ -199,10 +199,12 @@ static int __maybe_unused snd_acp_resume(struct device *dev)
+ 	ret = acp_init(chip);
+ 	if (ret)
+ 		dev_err(dev, "ACP init failed\n");
+-	child = chip->chip_pdev->dev;
+-	adata = dev_get_drvdata(&child);
+-	if (adata)
+-		acp_enable_interrupts(adata);
++	if (chip->chip_pdev) {
++		child = chip->chip_pdev->dev;
++		adata = dev_get_drvdata(&child);
++		if (adata)
++			acp_enable_interrupts(adata);
++	}
+ 	return ret;
+ }
+ 
+diff --git a/sound/soc/atmel/atmel-classd.c b/sound/soc/atmel/atmel-classd.c
+index 6aed1ee443b44..ba314b2799190 100644
+--- a/sound/soc/atmel/atmel-classd.c
++++ b/sound/soc/atmel/atmel-classd.c
+@@ -473,19 +473,22 @@ static int atmel_classd_asoc_card_init(struct device *dev,
+ 	if (!dai_link)
+ 		return -ENOMEM;
+ 
+-	comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
++	comp = devm_kzalloc(dev, 2 * sizeof(*comp), GFP_KERNEL);
+ 	if (!comp)
+ 		return -ENOMEM;
+ 
+-	dai_link->cpus		= comp;
++	dai_link->cpus		= &comp[0];
+ 	dai_link->codecs	= &snd_soc_dummy_dlc;
++	dai_link->platforms	= &comp[1];
+ 
+ 	dai_link->num_cpus	= 1;
+ 	dai_link->num_codecs	= 1;
++	dai_link->num_platforms = 1;
+ 
+ 	dai_link->name			= "CLASSD";
+ 	dai_link->stream_name		= "CLASSD PCM";
+ 	dai_link->cpus->dai_name	= dev_name(dev);
++	dai_link->platforms->name	= dev_name(dev);
+ 
+ 	card->dai_link	= dai_link;
+ 	card->num_links	= 1;
+diff --git a/sound/soc/codecs/cs42l43-jack.c b/sound/soc/codecs/cs42l43-jack.c
+index 901b9dbcf5854..d9ab003e166bf 100644
+--- a/sound/soc/codecs/cs42l43-jack.c
++++ b/sound/soc/codecs/cs42l43-jack.c
+@@ -121,7 +121,7 @@ int cs42l43_set_jack(struct snd_soc_component *component,
+ 		priv->buttons[3] = 735;
+ 	}
+ 
+-	ret = cs42l43_find_index(priv, "cirrus,detect-us", 1000, &priv->detect_us,
++	ret = cs42l43_find_index(priv, "cirrus,detect-us", 50000, &priv->detect_us,
+ 				 cs42l43_accdet_us, ARRAY_SIZE(cs42l43_accdet_us));
+ 	if (ret < 0)
+ 		goto error;
+@@ -433,7 +433,7 @@ irqreturn_t cs42l43_button_press(int irq, void *data)
+ 
+ 	// Wait for 2 full cycles of comb filter to ensure good reading
+ 	queue_delayed_work(system_wq, &priv->button_press_work,
+-			   msecs_to_jiffies(10));
++			   msecs_to_jiffies(20));
+ 
+ 	return IRQ_HANDLED;
+ }
+diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
+index bc07f26ba303f..e5ba256b3de22 100644
+--- a/sound/soc/fsl/fsl-asoc-card.c
++++ b/sound/soc/fsl/fsl-asoc-card.c
+@@ -558,6 +558,8 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
+ 	if (!priv)
+ 		return -ENOMEM;
+ 
++	priv->pdev = pdev;
++
+ 	cpu_np = of_parse_phandle(np, "audio-cpu", 0);
+ 	/* Give a chance to old DT binding */
+ 	if (!cpu_np)
+@@ -780,7 +782,6 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	/* Initialize sound card */
+-	priv->pdev = pdev;
+ 	priv->card.dev = &pdev->dev;
+ 	priv->card.owner = THIS_MODULE;
+ 	ret = snd_soc_of_parse_card_name(&priv->card, "model");
+diff --git a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
+index acaf81fd6c9b5..f848e14b091a1 100644
+--- a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
++++ b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
+@@ -31,7 +31,7 @@ struct mt8183_da7219_max98357_priv {
+ 
+ static struct snd_soc_jack_pin mt8183_da7219_max98357_jack_pins[] = {
+ 	{
+-		.pin	= "Headphone",
++		.pin	= "Headphones",
+ 		.mask	= SND_JACK_HEADPHONE,
+ 	},
+ 	{
+@@ -626,7 +626,7 @@ static struct snd_soc_codec_conf mt6358_codec_conf[] = {
+ };
+ 
+ static const struct snd_kcontrol_new mt8183_da7219_max98357_snd_controls[] = {
+-	SOC_DAPM_PIN_SWITCH("Headphone"),
++	SOC_DAPM_PIN_SWITCH("Headphones"),
+ 	SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ 	SOC_DAPM_PIN_SWITCH("Speakers"),
+ 	SOC_DAPM_PIN_SWITCH("Line Out"),
+@@ -634,7 +634,7 @@ static const struct snd_kcontrol_new mt8183_da7219_max98357_snd_controls[] = {
+ 
+ static const
+ struct snd_soc_dapm_widget mt8183_da7219_max98357_dapm_widgets[] = {
+-	SND_SOC_DAPM_HP("Headphone", NULL),
++	SND_SOC_DAPM_HP("Headphones", NULL),
+ 	SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ 	SND_SOC_DAPM_SPK("Speakers", NULL),
+ 	SND_SOC_DAPM_SPK("Line Out", NULL),
+@@ -680,7 +680,7 @@ static struct snd_soc_codec_conf mt8183_da7219_rt1015_codec_conf[] = {
+ };
+ 
+ static const struct snd_kcontrol_new mt8183_da7219_rt1015_snd_controls[] = {
+-	SOC_DAPM_PIN_SWITCH("Headphone"),
++	SOC_DAPM_PIN_SWITCH("Headphones"),
+ 	SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ 	SOC_DAPM_PIN_SWITCH("Left Spk"),
+ 	SOC_DAPM_PIN_SWITCH("Right Spk"),
+@@ -689,7 +689,7 @@ static const struct snd_kcontrol_new mt8183_da7219_rt1015_snd_controls[] = {
+ 
+ static const
+ struct snd_soc_dapm_widget mt8183_da7219_rt1015_dapm_widgets[] = {
+-	SND_SOC_DAPM_HP("Headphone", NULL),
++	SND_SOC_DAPM_HP("Headphones", NULL),
+ 	SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ 	SND_SOC_DAPM_SPK("Left Spk", NULL),
+ 	SND_SOC_DAPM_SPK("Right Spk", NULL),
+diff --git a/sound/soc/mediatek/mt8195/mt8195-mt6359.c b/sound/soc/mediatek/mt8195/mt8195-mt6359.c
+index 53fd8a897b9d2..c25a526c90d25 100644
+--- a/sound/soc/mediatek/mt8195/mt8195-mt6359.c
++++ b/sound/soc/mediatek/mt8195/mt8195-mt6359.c
+@@ -939,6 +939,7 @@ SND_SOC_DAILINK_DEFS(ETDM2_IN_BE,
+ 
+ SND_SOC_DAILINK_DEFS(ETDM1_OUT_BE,
+ 		     DAILINK_COMP_ARRAY(COMP_CPU("ETDM1_OUT")),
++		     DAILINK_COMP_ARRAY(COMP_EMPTY()),
+ 		     DAILINK_COMP_ARRAY(COMP_EMPTY()));
+ 
+ SND_SOC_DAILINK_DEFS(ETDM2_OUT_BE,
+diff --git a/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c b/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
+index 68a38f63a2dbf..66b911b49e3f4 100644
+--- a/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
++++ b/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
+@@ -141,14 +141,17 @@ static void q6apm_lpass_dai_shutdown(struct snd_pcm_substream *substream, struct
+ 	struct q6apm_lpass_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ 	int rc;
+ 
+-	if (!dai_data->is_port_started[dai->id])
+-		return;
+-	rc = q6apm_graph_stop(dai_data->graph[dai->id]);
+-	if (rc < 0)
+-		dev_err(dai->dev, "fail to close APM port (%d)\n", rc);
++	if (dai_data->is_port_started[dai->id]) {
++		rc = q6apm_graph_stop(dai_data->graph[dai->id]);
++		dai_data->is_port_started[dai->id] = false;
++		if (rc < 0)
++			dev_err(dai->dev, "fail to close APM port (%d)\n", rc);
++	}
+ 
+-	q6apm_graph_close(dai_data->graph[dai->id]);
+-	dai_data->is_port_started[dai->id] = false;
++	if (dai_data->graph[dai->id]) {
++		q6apm_graph_close(dai_data->graph[dai->id]);
++		dai_data->graph[dai->id] = NULL;
++	}
+ }
+ 
+ static int q6apm_lpass_dai_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
+@@ -163,8 +166,10 @@ static int q6apm_lpass_dai_prepare(struct snd_pcm_substream *substream, struct s
+ 		q6apm_graph_stop(dai_data->graph[dai->id]);
+ 		dai_data->is_port_started[dai->id] = false;
+ 
+-		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ 			q6apm_graph_close(dai_data->graph[dai->id]);
++			dai_data->graph[dai->id] = NULL;
++		}
+ 	}
+ 
+ 	/**
+@@ -183,26 +188,29 @@ static int q6apm_lpass_dai_prepare(struct snd_pcm_substream *substream, struct s
+ 
+ 	cfg->direction = substream->stream;
+ 	rc = q6apm_graph_media_format_pcm(dai_data->graph[dai->id], cfg);
+-
+ 	if (rc) {
+ 		dev_err(dai->dev, "Failed to set media format %d\n", rc);
+-		return rc;
++		goto err;
+ 	}
+ 
+ 	rc = q6apm_graph_prepare(dai_data->graph[dai->id]);
+ 	if (rc) {
+ 		dev_err(dai->dev, "Failed to prepare Graph %d\n", rc);
+-		return rc;
++		goto err;
+ 	}
+ 
+ 	rc = q6apm_graph_start(dai_data->graph[dai->id]);
+ 	if (rc < 0) {
+ 		dev_err(dai->dev, "fail to start APM port %x\n", dai->id);
+-		return rc;
++		goto err;
+ 	}
+ 	dai_data->is_port_started[dai->id] = true;
+ 
+ 	return 0;
++err:
++	q6apm_graph_close(dai_data->graph[dai->id]);
++	dai_data->graph[dai->id] = NULL;
++	return rc;
+ }
+ 
+ static int q6apm_lpass_dai_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
+diff --git a/sound/soc/rockchip/rockchip_i2s_tdm.c b/sound/soc/rockchip/rockchip_i2s_tdm.c
+index 9fa020ef7eab9..ee517d7b5b7bb 100644
+--- a/sound/soc/rockchip/rockchip_i2s_tdm.c
++++ b/sound/soc/rockchip/rockchip_i2s_tdm.c
+@@ -655,8 +655,17 @@ static int rockchip_i2s_tdm_hw_params(struct snd_pcm_substream *substream,
+ 	int err;
+ 
+ 	if (i2s_tdm->is_master_mode) {
+-		struct clk *mclk = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
+-			i2s_tdm->mclk_tx : i2s_tdm->mclk_rx;
++		struct clk *mclk;
++
++		if (i2s_tdm->clk_trcm == TRCM_TX) {
++			mclk = i2s_tdm->mclk_tx;
++		} else if (i2s_tdm->clk_trcm == TRCM_RX) {
++			mclk = i2s_tdm->mclk_rx;
++		} else if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
++			mclk = i2s_tdm->mclk_tx;
++		} else {
++			mclk = i2s_tdm->mclk_rx;
++		}
+ 
+ 		err = clk_set_rate(mclk, DEFAULT_MCLK_FS * params_rate(params));
+ 		if (err)
+diff --git a/sound/synth/emux/soundfont.c b/sound/synth/emux/soundfont.c
+index 16f00097cb95a..eed47e4830248 100644
+--- a/sound/synth/emux/soundfont.c
++++ b/sound/synth/emux/soundfont.c
+@@ -701,7 +701,6 @@ load_data(struct snd_sf_list *sflist, const void __user *data, long count)
+ 	struct snd_soundfont *sf;
+ 	struct soundfont_sample_info sample_info;
+ 	struct snd_sf_sample *sp;
+-	long off;
+ 
+ 	/* patch must be opened */
+ 	sf = sflist->currsf;
+@@ -711,12 +710,16 @@ load_data(struct snd_sf_list *sflist, const void __user *data, long count)
+ 	if (is_special_type(sf->type))
+ 		return -EINVAL;
+ 
++	if (count < (long)sizeof(sample_info)) {
++		return -EINVAL;
++	}
+ 	if (copy_from_user(&sample_info, data, sizeof(sample_info)))
+ 		return -EFAULT;
++	data += sizeof(sample_info);
++	count -= sizeof(sample_info);
+ 
+-	off = sizeof(sample_info);
+-
+-	if (sample_info.size != (count-off)/2)
++	// SoundFont uses S16LE samples.
++	if (sample_info.size * 2 != count)
+ 		return -EINVAL;
+ 
+ 	/* Check for dup */
+@@ -744,7 +747,7 @@ load_data(struct snd_sf_list *sflist, const void __user *data, long count)
+ 		int  rc;
+ 		rc = sflist->callback.sample_new
+ 			(sflist->callback.private_data, sp, sflist->memhdr,
+-			 data + off, count - off);
++			 data, count);
+ 		if (rc < 0) {
+ 			sf_sample_delete(sflist, sf, sp);
+ 			return rc;
+@@ -957,10 +960,12 @@ load_guspatch(struct snd_sf_list *sflist, const char __user *data,
+ 	}
+ 	if (copy_from_user(&patch, data, sizeof(patch)))
+ 		return -EFAULT;
+-	
+ 	count -= sizeof(patch);
+ 	data += sizeof(patch);
+ 
++	if ((patch.len << (patch.mode & WAVE_16_BITS ? 1 : 0)) != count)
++		return -EINVAL;
++
+ 	sf = newsf(sflist, SNDRV_SFNT_PAT_TYPE_GUS|SNDRV_SFNT_PAT_SHARED, NULL);
+ 	if (sf == NULL)
+ 		return -ENOMEM;
+diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
+index 98256468e2480..8071a3ef2a2e8 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -7851,7 +7851,7 @@ void cmdline(int argc, char **argv)
+ 	 * Parse some options early, because they may make other options invalid,
+ 	 * like adding the MSR counter with --add and at the same time using --no-msr.
+ 	 */
+-	while ((opt = getopt_long_only(argc, argv, "MP", long_options, &option_index)) != -1) {
++	while ((opt = getopt_long_only(argc, argv, "MPn:", long_options, &option_index)) != -1) {
+ 		switch (opt) {
+ 		case 'M':
+ 			no_msr = 1;
+diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
+index 3482248aa3442..90d5afd52dd06 100644
+--- a/tools/testing/cxl/test/cxl.c
++++ b/tools/testing/cxl/test/cxl.c
+@@ -630,11 +630,15 @@ static struct cxl_hdm *mock_cxl_setup_hdm(struct cxl_port *port,
+ 					  struct cxl_endpoint_dvsec_info *info)
+ {
+ 	struct cxl_hdm *cxlhdm = devm_kzalloc(&port->dev, sizeof(*cxlhdm), GFP_KERNEL);
++	struct device *dev = &port->dev;
+ 
+ 	if (!cxlhdm)
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	cxlhdm->port = port;
++	cxlhdm->interleave_mask = ~0U;
++	cxlhdm->iw_cap_mask = ~0UL;
++	dev_set_drvdata(dev, cxlhdm);
+ 	return cxlhdm;
+ }
+ 


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [gentoo-commits] proj/linux-patches:6.9 commit in: /
@ 2024-07-11 11:47 Mike Pagano
  0 siblings, 0 replies; 18+ messages in thread
From: Mike Pagano @ 2024-07-11 11:47 UTC (permalink / raw
  To: gentoo-commits

commit:     3507dfb3bd809fee9977bd10ae9a03a35dff682c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jul 11 11:47:45 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jul 11 11:47:45 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3507dfb3

Linux patch 6.9.9

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |    4 +
 1008_linux-6.9.9.patch | 7414 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 7418 insertions(+)

diff --git a/0000_README b/0000_README
index 8008738f..7f674d82 100644
--- a/0000_README
+++ b/0000_README
@@ -75,6 +75,10 @@ Patch:  1007_linux-6.9.8.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.9.8
 
+Patch:  1008_linux-6.9.9.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.9.9
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch
 Desc:   Enable link security restrictions by default.

diff --git a/1008_linux-6.9.9.patch b/1008_linux-6.9.9.patch
new file mode 100644
index 00000000..aabc3078
--- /dev/null
+++ b/1008_linux-6.9.9.patch
@@ -0,0 +1,7414 @@
+diff --git a/Makefile b/Makefile
+index 060e20dba35e5..cbe3a580ff480 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 9
+-SUBLEVEL = 8
++SUBLEVEL = 9
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
+index 2d92713be2a09..6195937aa6dc5 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
+@@ -289,7 +289,7 @@ vdd_gpu: DCDC_REG2 {
+ 				regulator-name = "vdd_gpu";
+ 				regulator-always-on;
+ 				regulator-boot-on;
+-				regulator-min-microvolt = <900000>;
++				regulator-min-microvolt = <500000>;
+ 				regulator-max-microvolt = <1350000>;
+ 				regulator-ramp-delay = <6001>;
+ 
+diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
+index 7b610864b3645..2d6c886b40f44 100644
+--- a/arch/powerpc/include/asm/interrupt.h
++++ b/arch/powerpc/include/asm/interrupt.h
+@@ -336,6 +336,14 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
+ 	if (IS_ENABLED(CONFIG_KASAN))
+ 		return;
+ 
++	/*
++	 * Likewise, do not use it in real mode if percpu first chunk is not
++	 * embedded. With CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK enabled there
++	 * are chances where percpu allocation can come from vmalloc area.
++	 */
++	if (percpu_first_chunk_is_paged)
++		return;
++
+ 	/* Otherwise, it should be safe to call it */
+ 	nmi_enter();
+ }
+@@ -351,6 +359,8 @@ static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct inter
+ 		// no nmi_exit for a pseries hash guest taking a real mode exception
+ 	} else if (IS_ENABLED(CONFIG_KASAN)) {
+ 		// no nmi_exit for KASAN in real mode
++	} else if (percpu_first_chunk_is_paged) {
++		// no nmi_exit if percpu first chunk is not embedded
+ 	} else {
+ 		nmi_exit();
+ 	}
+diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
+index ba2e13bb879dc..048e3705af20b 100644
+--- a/arch/powerpc/include/asm/io.h
++++ b/arch/powerpc/include/asm/io.h
+@@ -37,7 +37,7 @@ extern struct pci_dev *isa_bridge_pcidev;
+  * define properly based on the platform
+  */
+ #ifndef CONFIG_PCI
+-#define _IO_BASE	0
++#define _IO_BASE	POISON_POINTER_DELTA
+ #define _ISA_MEM_BASE	0
+ #define PCI_DRAM_OFFSET 0
+ #elif defined(CONFIG_PPC32)
+diff --git a/arch/powerpc/include/asm/percpu.h b/arch/powerpc/include/asm/percpu.h
+index 8e5b7d0b851c6..634970ce13c6b 100644
+--- a/arch/powerpc/include/asm/percpu.h
++++ b/arch/powerpc/include/asm/percpu.h
+@@ -15,6 +15,16 @@
+ #endif /* CONFIG_SMP */
+ #endif /* __powerpc64__ */
+ 
++#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) && defined(CONFIG_SMP)
++#include <linux/jump_label.h>
++DECLARE_STATIC_KEY_FALSE(__percpu_first_chunk_is_paged);
++
++#define percpu_first_chunk_is_paged	\
++		(static_key_enabled(&__percpu_first_chunk_is_paged.key))
++#else
++#define percpu_first_chunk_is_paged	false
++#endif /* CONFIG_PPC64 && CONFIG_SMP */
++
+ #include <asm-generic/percpu.h>
+ 
+ #include <asm/paca.h>
+diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
+index 4690c219bfa4d..63432a33ec49a 100644
+--- a/arch/powerpc/kernel/head_64.S
++++ b/arch/powerpc/kernel/head_64.S
+@@ -647,8 +647,9 @@ __after_prom_start:
+  * Note: This process overwrites the OF exception vectors.
+  */
+ 	LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET)
+-	mr.	r4,r26			/* In some cases the loader may  */
+-	beq	9f			/* have already put us at zero */
++	mr	r4,r26			/* Load the virtual source address into r4 */
++	cmpld	r3,r4			/* Check if source == dest */
++	beq	9f			/* If so skip the copy  */
+ 	li	r6,0x100		/* Start offset, the first 0x100 */
+ 					/* bytes were copied earlier.	 */
+ 
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index 2f19d5e944852..ae36a129789ff 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -834,6 +834,7 @@ static __init int pcpu_cpu_to_node(int cpu)
+ 
+ unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+ EXPORT_SYMBOL(__per_cpu_offset);
++DEFINE_STATIC_KEY_FALSE(__percpu_first_chunk_is_paged);
+ 
+ void __init setup_per_cpu_areas(void)
+ {
+@@ -876,6 +877,7 @@ void __init setup_per_cpu_areas(void)
+ 	if (rc < 0)
+ 		panic("cannot initialize percpu area (err=%d)", rc);
+ 
++	static_key_enable(&__percpu_first_chunk_is_paged.key);
+ 	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+ 	for_each_possible_cpu(cpu) {
+                 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
+diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c
+index 762e4d09aacfa..27254624f6389 100644
+--- a/arch/powerpc/kexec/core_64.c
++++ b/arch/powerpc/kexec/core_64.c
+@@ -26,6 +26,7 @@
+ #include <asm/paca.h>
+ #include <asm/mmu.h>
+ #include <asm/sections.h>	/* _end */
++#include <asm/setup.h>
+ #include <asm/smp.h>
+ #include <asm/hw_breakpoint.h>
+ #include <asm/svm.h>
+@@ -315,6 +316,16 @@ void default_machine_kexec(struct kimage *image)
+ 	if (!kdump_in_progress())
+ 		kexec_prepare_cpus();
+ 
++#ifdef CONFIG_PPC_PSERIES
++	/*
++	 * This must be done after other CPUs have shut down, otherwise they
++	 * could execute the 'scv' instruction, which is not supported with
++	 * reloc disabled (see configure_exceptions()).
++	 */
++	if (firmware_has_feature(FW_FEATURE_SET_MODE))
++		pseries_disable_reloc_on_exc();
++#endif
++
+ 	printk("kexec: Starting switchover sequence.\n");
+ 
+ 	/* switch to a staticly allocated stack.  Based on irq stack code.
+diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c
+index 096d09ed89f67..431be156ca9bb 100644
+--- a/arch/powerpc/platforms/pseries/kexec.c
++++ b/arch/powerpc/platforms/pseries/kexec.c
+@@ -61,11 +61,3 @@ void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
+ 	} else
+ 		xics_kexec_teardown_cpu(secondary);
+ }
+-
+-void pseries_machine_kexec(struct kimage *image)
+-{
+-	if (firmware_has_feature(FW_FEATURE_SET_MODE))
+-		pseries_disable_reloc_on_exc();
+-
+-	default_machine_kexec(image);
+-}
+diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
+index bba4ad192b0fe..3968a6970fa81 100644
+--- a/arch/powerpc/platforms/pseries/pseries.h
++++ b/arch/powerpc/platforms/pseries/pseries.h
+@@ -38,7 +38,6 @@ static inline void smp_init_pseries(void) { }
+ #endif
+ 
+ extern void pseries_kexec_cpu_down(int crash_shutdown, int secondary);
+-void pseries_machine_kexec(struct kimage *image);
+ 
+ extern void pSeries_final_fixup(void);
+ 
+diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
+index 284a6fa04b0c2..b44de0f0822f0 100644
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -1159,7 +1159,6 @@ define_machine(pseries) {
+ 	.machine_check_exception = pSeries_machine_check_exception,
+ 	.machine_check_log_err	= pSeries_machine_check_log_err,
+ #ifdef CONFIG_KEXEC_CORE
+-	.machine_kexec          = pseries_machine_kexec,
+ 	.kexec_cpu_down         = pseries_kexec_cpu_down,
+ #endif
+ #ifdef CONFIG_MEMORY_HOTPLUG
+diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
+index d79d6633f3336..bd4813bad317e 100644
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -1350,7 +1350,7 @@ static int cpu_cmd(void)
+ 	}
+ 	termch = cpu;
+ 
+-	if (!scanhex(&cpu)) {
++	if (!scanhex(&cpu) || cpu >= num_possible_cpus()) {
+ 		/* print cpus waiting or in xmon */
+ 		printf("cpus stopped:");
+ 		last_cpu = first_cpu = NR_CPUS;
+@@ -2772,7 +2772,7 @@ static void dump_pacas(void)
+ 
+ 	termch = c;	/* Put c back, it wasn't 'a' */
+ 
+-	if (scanhex(&num))
++	if (scanhex(&num) && num < num_possible_cpus())
+ 		dump_one_paca(num);
+ 	else
+ 		dump_one_paca(xmon_owner);
+@@ -2845,7 +2845,7 @@ static void dump_xives(void)
+ 
+ 	termch = c;	/* Put c back, it wasn't 'a' */
+ 
+-	if (scanhex(&num))
++	if (scanhex(&num) && num < num_possible_cpus())
+ 		dump_one_xive(num);
+ 	else
+ 		dump_one_xive(xmon_owner);
+diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
+index efd851e1b4832..7c8a71a526a30 100644
+--- a/arch/riscv/include/asm/errata_list.h
++++ b/arch/riscv/include/asm/errata_list.h
+@@ -43,11 +43,21 @@ ALTERNATIVE(__stringify(RISCV_PTR do_page_fault),			\
+ 	    CONFIG_ERRATA_SIFIVE_CIP_453)
+ #else /* !__ASSEMBLY__ */
+ 
+-#define ALT_FLUSH_TLB_PAGE(x)						\
++#define ALT_SFENCE_VMA_ASID(asid)					\
++asm(ALTERNATIVE("sfence.vma x0, %0", "sfence.vma", SIFIVE_VENDOR_ID,	\
++		ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200)	\
++		: : "r" (asid) : "memory")
++
++#define ALT_SFENCE_VMA_ADDR(addr)					\
+ asm(ALTERNATIVE("sfence.vma %0", "sfence.vma", SIFIVE_VENDOR_ID,	\
+ 		ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200)	\
+ 		: : "r" (addr) : "memory")
+ 
++#define ALT_SFENCE_VMA_ADDR_ASID(addr, asid)				\
++asm(ALTERNATIVE("sfence.vma %0, %1", "sfence.vma", SIFIVE_VENDOR_ID,	\
++		ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200)	\
++		: : "r" (addr), "r" (asid) : "memory")
++
+ /*
+  * _val is marked as "will be overwritten", so need to set it to 0
+  * in the default case.
+diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
+index 4112cc8d1d69f..79dada53d7eb5 100644
+--- a/arch/riscv/include/asm/tlbflush.h
++++ b/arch/riscv/include/asm/tlbflush.h
+@@ -22,10 +22,27 @@ static inline void local_flush_tlb_all(void)
+ 	__asm__ __volatile__ ("sfence.vma" : : : "memory");
+ }
+ 
++static inline void local_flush_tlb_all_asid(unsigned long asid)
++{
++	if (asid != FLUSH_TLB_NO_ASID)
++		ALT_SFENCE_VMA_ASID(asid);
++	else
++		local_flush_tlb_all();
++}
++
+ /* Flush one page from local TLB */
+ static inline void local_flush_tlb_page(unsigned long addr)
+ {
+-	ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
++	ALT_SFENCE_VMA_ADDR(addr);
++}
++
++static inline void local_flush_tlb_page_asid(unsigned long addr,
++					     unsigned long asid)
++{
++	if (asid != FLUSH_TLB_NO_ASID)
++		ALT_SFENCE_VMA_ADDR_ASID(addr, asid);
++	else
++		local_flush_tlb_page(addr);
+ }
+ #else /* CONFIG_MMU */
+ #define local_flush_tlb_all()			do { } while (0)
+diff --git a/arch/riscv/kernel/machine_kexec.c b/arch/riscv/kernel/machine_kexec.c
+index ed9cad20c039d..3c830a6f7ef46 100644
+--- a/arch/riscv/kernel/machine_kexec.c
++++ b/arch/riscv/kernel/machine_kexec.c
+@@ -121,20 +121,12 @@ static void machine_kexec_mask_interrupts(void)
+ 
+ 	for_each_irq_desc(i, desc) {
+ 		struct irq_chip *chip;
+-		int ret;
+ 
+ 		chip = irq_desc_get_chip(desc);
+ 		if (!chip)
+ 			continue;
+ 
+-		/*
+-		 * First try to remove the active state. If this
+-		 * fails, try to EOI the interrupt.
+-		 */
+-		ret = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false);
+-
+-		if (ret && irqd_irq_inprogress(&desc->irq_data) &&
+-		    chip->irq_eoi)
++		if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
+ 			chip->irq_eoi(&desc->irq_data);
+ 
+ 		if (chip->irq_mask)
+diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c
+index 86391a5061dda..cee1b9ca4ec48 100644
+--- a/arch/riscv/kvm/vcpu_pmu.c
++++ b/arch/riscv/kvm/vcpu_pmu.c
+@@ -39,7 +39,7 @@ static u64 kvm_pmu_get_sample_period(struct kvm_pmc *pmc)
+ 	u64 sample_period;
+ 
+ 	if (!pmc->counter_val)
+-		sample_period = counter_val_mask + 1;
++		sample_period = counter_val_mask;
+ 	else
+ 		sample_period = (-pmc->counter_val) & counter_val_mask;
+ 
+diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
+index 07d743f87b3f6..a6f788774856b 100644
+--- a/arch/riscv/mm/tlbflush.c
++++ b/arch/riscv/mm/tlbflush.c
+@@ -7,29 +7,6 @@
+ #include <asm/sbi.h>
+ #include <asm/mmu_context.h>
+ 
+-static inline void local_flush_tlb_all_asid(unsigned long asid)
+-{
+-	if (asid != FLUSH_TLB_NO_ASID)
+-		__asm__ __volatile__ ("sfence.vma x0, %0"
+-				:
+-				: "r" (asid)
+-				: "memory");
+-	else
+-		local_flush_tlb_all();
+-}
+-
+-static inline void local_flush_tlb_page_asid(unsigned long addr,
+-		unsigned long asid)
+-{
+-	if (asid != FLUSH_TLB_NO_ASID)
+-		__asm__ __volatile__ ("sfence.vma %0, %1"
+-				:
+-				: "r" (addr), "r" (asid)
+-				: "memory");
+-	else
+-		local_flush_tlb_page(addr);
+-}
+-
+ /*
+  * Flush entire TLB if number of entries to be flushed is greater
+  * than the threshold below.
+diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
+index 95990461888fc..9281063636a73 100644
+--- a/arch/s390/include/asm/kvm_host.h
++++ b/arch/s390/include/asm/kvm_host.h
+@@ -427,6 +427,7 @@ struct kvm_vcpu_stat {
+ 	u64 instruction_io_other;
+ 	u64 instruction_lpsw;
+ 	u64 instruction_lpswe;
++	u64 instruction_lpswey;
+ 	u64 instruction_pfmf;
+ 	u64 instruction_ptff;
+ 	u64 instruction_sck;
+diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
+index bbbdc5abe2b2c..a589547ee0200 100644
+--- a/arch/s390/include/asm/processor.h
++++ b/arch/s390/include/asm/processor.h
+@@ -305,8 +305,8 @@ static inline void __load_psw(psw_t psw)
+  */
+ static __always_inline void __load_psw_mask(unsigned long mask)
+ {
++	psw_t psw __uninitialized;
+ 	unsigned long addr;
+-	psw_t psw;
+ 
+ 	psw.mask = mask;
+ 
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 82e9631cd9efb..54b5b2565df8d 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -132,6 +132,7 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+ 	STATS_DESC_COUNTER(VCPU, instruction_io_other),
+ 	STATS_DESC_COUNTER(VCPU, instruction_lpsw),
+ 	STATS_DESC_COUNTER(VCPU, instruction_lpswe),
++	STATS_DESC_COUNTER(VCPU, instruction_lpswey),
+ 	STATS_DESC_COUNTER(VCPU, instruction_pfmf),
+ 	STATS_DESC_COUNTER(VCPU, instruction_ptff),
+ 	STATS_DESC_COUNTER(VCPU, instruction_sck),
+diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
+index 111eb5c747840..bf8534218af3d 100644
+--- a/arch/s390/kvm/kvm-s390.h
++++ b/arch/s390/kvm/kvm-s390.h
+@@ -138,6 +138,21 @@ static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, u8 *ar)
+ 	return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
+ }
+ 
++static inline u64 kvm_s390_get_base_disp_siy(struct kvm_vcpu *vcpu, u8 *ar)
++{
++	u32 base1 = vcpu->arch.sie_block->ipb >> 28;
++	s64 disp1;
++
++	/* The displacement is a 20bit _SIGNED_ value */
++	disp1 = sign_extend64(((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
++			      ((vcpu->arch.sie_block->ipb & 0xff00) << 4), 19);
++
++	if (ar)
++		*ar = base1;
++
++	return (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
++}
++
+ static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
+ 					      u64 *address1, u64 *address2,
+ 					      u8 *ar_b1, u8 *ar_b2)
+diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
+index 1be19cc9d73c1..1a49b89706f86 100644
+--- a/arch/s390/kvm/priv.c
++++ b/arch/s390/kvm/priv.c
+@@ -797,6 +797,36 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
+ 	return 0;
+ }
+ 
++static int handle_lpswey(struct kvm_vcpu *vcpu)
++{
++	psw_t new_psw;
++	u64 addr;
++	int rc;
++	u8 ar;
++
++	vcpu->stat.instruction_lpswey++;
++
++	if (!test_kvm_facility(vcpu->kvm, 193))
++		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
++
++	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
++		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
++
++	addr = kvm_s390_get_base_disp_siy(vcpu, &ar);
++	if (addr & 7)
++		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
++
++	rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
++	if (rc)
++		return kvm_s390_inject_prog_cond(vcpu, rc);
++
++	vcpu->arch.sie_block->gpsw = new_psw;
++	if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
++		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
++
++	return 0;
++}
++
+ static int handle_stidp(struct kvm_vcpu *vcpu)
+ {
+ 	u64 stidp_data = vcpu->kvm->arch.model.cpuid;
+@@ -1462,6 +1492,8 @@ int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
+ 	case 0x61:
+ 	case 0x62:
+ 		return handle_ri(vcpu);
++	case 0x71:
++		return handle_lpswey(vcpu);
+ 	default:
+ 		return -EOPNOTSUPP;
+ 	}
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index 15319b217bf3f..4bd7cbab4c241 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -104,6 +104,7 @@ static int blk_validate_zoned_limits(struct queue_limits *lim)
+ static int blk_validate_limits(struct queue_limits *lim)
+ {
+ 	unsigned int max_hw_sectors;
++	unsigned int logical_block_sectors;
+ 
+ 	/*
+ 	 * Unless otherwise specified, default to 512 byte logical blocks and a
+@@ -134,8 +135,11 @@ static int blk_validate_limits(struct queue_limits *lim)
+ 		lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
+ 	if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
+ 		return -EINVAL;
++	logical_block_sectors = lim->logical_block_size >> SECTOR_SHIFT;
++	if (WARN_ON_ONCE(logical_block_sectors > lim->max_hw_sectors))
++		return -EINVAL;
+ 	lim->max_hw_sectors = round_down(lim->max_hw_sectors,
+-			lim->logical_block_size >> SECTOR_SHIFT);
++			logical_block_sectors);
+ 
+ 	/*
+ 	 * The actual max_sectors value is a complex beast and also takes the
+@@ -153,7 +157,7 @@ static int blk_validate_limits(struct queue_limits *lim)
+ 		lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
+ 	}
+ 	lim->max_sectors = round_down(lim->max_sectors,
+-			lim->logical_block_size >> SECTOR_SHIFT);
++			logical_block_sectors);
+ 
+ 	/*
+ 	 * Random default for the maximum number of segments.  Driver should not
+diff --git a/crypto/aead.c b/crypto/aead.c
+index 54906633566a2..5f3c1954d8e5d 100644
+--- a/crypto/aead.c
++++ b/crypto/aead.c
+@@ -45,8 +45,7 @@ static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
+ 	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
+ 	memcpy(alignbuffer, key, keylen);
+ 	ret = crypto_aead_alg(tfm)->setkey(tfm, alignbuffer, keylen);
+-	memset(alignbuffer, 0, keylen);
+-	kfree(buffer);
++	kfree_sensitive(buffer);
+ 	return ret;
+ }
+ 
+diff --git a/crypto/cipher.c b/crypto/cipher.c
+index 47c77a3e59783..40cae908788ec 100644
+--- a/crypto/cipher.c
++++ b/crypto/cipher.c
+@@ -34,8 +34,7 @@ static int setkey_unaligned(struct crypto_cipher *tfm, const u8 *key,
+ 	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
+ 	memcpy(alignbuffer, key, keylen);
+ 	ret = cia->cia_setkey(crypto_cipher_tfm(tfm), alignbuffer, keylen);
+-	memset(alignbuffer, 0, keylen);
+-	kfree(buffer);
++	kfree_sensitive(buffer);
+ 	return ret;
+ 
+ }
+diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
+index 3ec611dc0c09f..a905e955bbfc7 100644
+--- a/drivers/base/regmap/regmap-i2c.c
++++ b/drivers/base/regmap/regmap-i2c.c
+@@ -350,7 +350,8 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
+ 
+ 		if (quirks->max_write_len &&
+ 		    (bus->max_raw_write == 0 || bus->max_raw_write > quirks->max_write_len))
+-			max_write = quirks->max_write_len;
++			max_write = quirks->max_write_len -
++				(config->reg_bits + config->pad_bits) / BITS_PER_BYTE;
+ 
+ 		if (max_read || max_write) {
+ 			ret_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
+diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
+index 27928deccc643..74d0418ddac78 100644
+--- a/drivers/block/null_blk/zoned.c
++++ b/drivers/block/null_blk/zoned.c
+@@ -84,6 +84,17 @@ int null_init_zoned_dev(struct nullb_device *dev,
+ 		return -EINVAL;
+ 	}
+ 
++	/*
++	 * If a smaller zone capacity was requested, do not allow a smaller last
++	 * zone at the same time as such zone configuration does not correspond
++	 * to any real zoned device.
++	 */
++	if (dev->zone_capacity != dev->zone_size &&
++	    dev->size & (dev->zone_size - 1)) {
++		pr_err("A smaller last zone is not allowed with zone capacity smaller than zone size.\n");
++		return -EINVAL;
++	}
++
+ 	zone_capacity_sects = mb_to_sects(dev->zone_capacity);
+ 	dev_capacity_sects = mb_to_sects(dev->size);
+ 	dev->zone_size_sects = mb_to_sects(dev->zone_size);
+diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c
+index 0c2f15235b4cd..d90858ea2fe59 100644
+--- a/drivers/bluetooth/hci_bcm4377.c
++++ b/drivers/bluetooth/hci_bcm4377.c
+@@ -495,6 +495,10 @@ struct bcm4377_data;
+  *                  extended scanning
+  * broken_mws_transport_config: Set to true if the chip erroneously claims to
+  *                              support MWS Transport Configuration
++ * broken_le_ext_adv_report_phy: Set to true if this chip stuffs flags inside
++ *                               reserved bits of Primary/Secondary_PHY inside
++ *                               LE Extended Advertising Report events which
++ *                               have to be ignored
+  * send_calibration: Optional callback to send calibration data
+  * send_ptb: Callback to send "PTB" regulatory/calibration data
+  */
+@@ -513,6 +517,7 @@ struct bcm4377_hw {
+ 	unsigned long broken_ext_scan : 1;
+ 	unsigned long broken_mws_transport_config : 1;
+ 	unsigned long broken_le_coded : 1;
++	unsigned long broken_le_ext_adv_report_phy : 1;
+ 
+ 	int (*send_calibration)(struct bcm4377_data *bcm4377);
+ 	int (*send_ptb)(struct bcm4377_data *bcm4377,
+@@ -716,7 +721,7 @@ static void bcm4377_handle_ack(struct bcm4377_data *bcm4377,
+ 		ring->events[msgid] = NULL;
+ 	}
+ 
+-	bitmap_release_region(ring->msgids, msgid, ring->n_entries);
++	bitmap_release_region(ring->msgids, msgid, 0);
+ 
+ unlock:
+ 	spin_unlock_irqrestore(&ring->lock, flags);
+@@ -2373,6 +2378,8 @@ static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 		set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
+ 	if (bcm4377->hw->broken_le_coded)
+ 		set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks);
++	if (bcm4377->hw->broken_le_ext_adv_report_phy)
++		set_bit(HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY, &hdev->quirks);
+ 
+ 	pci_set_drvdata(pdev, bcm4377);
+ 	hci_set_drvdata(hdev, bcm4377);
+@@ -2477,6 +2484,7 @@ static const struct bcm4377_hw bcm4377_hw_variants[] = {
+ 		.clear_pciecfg_subsystem_ctrl_bit19 = true,
+ 		.broken_mws_transport_config = true,
+ 		.broken_le_coded = true,
++		.broken_le_ext_adv_report_phy = true,
+ 		.send_calibration = bcm4387_send_calibration,
+ 		.send_ptb = bcm4378_send_ptb,
+ 	},
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index 0c9c9ee56592d..9a0bc86f9aace 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -2450,15 +2450,27 @@ static void qca_serdev_shutdown(struct device *dev)
+ 	struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
+ 	struct hci_uart *hu = &qcadev->serdev_hu;
+ 	struct hci_dev *hdev = hu->hdev;
+-	struct qca_data *qca = hu->priv;
+ 	const u8 ibs_wake_cmd[] = { 0xFD };
+ 	const u8 edl_reset_soc_cmd[] = { 0x01, 0x00, 0xFC, 0x01, 0x05 };
+ 
+ 	if (qcadev->btsoc_type == QCA_QCA6390) {
+-		if (test_bit(QCA_BT_OFF, &qca->flags) ||
+-		    !test_bit(HCI_RUNNING, &hdev->flags))
++		/* The purpose of sending the VSC is to reset SOC into a initial
++		 * state and the state will ensure next hdev->setup() success.
++		 * if HCI_QUIRK_NON_PERSISTENT_SETUP is set, it means that
++		 * hdev->setup() can do its job regardless of SoC state, so
++		 * don't need to send the VSC.
++		 * if HCI_SETUP is set, it means that hdev->setup() was never
++		 * invoked and the SOC is already in the initial state, so
++		 * don't also need to send the VSC.
++		 */
++		if (test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks) ||
++		    hci_dev_test_flag(hdev, HCI_SETUP))
+ 			return;
+ 
++		/* The serdev must be in open state when conrol logic arrives
++		 * here, so also fix the use-after-free issue caused by that
++		 * the serdev is flushed or wrote after it is closed.
++		 */
+ 		serdev_device_write_flush(serdev);
+ 		ret = serdev_device_write_buf(serdev, ibs_wake_cmd,
+ 					      sizeof(ibs_wake_cmd));
+diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
+index a5e07270e0d41..20c90ebb3a3f6 100644
+--- a/drivers/cdrom/cdrom.c
++++ b/drivers/cdrom/cdrom.c
+@@ -2358,7 +2358,7 @@ static int cdrom_ioctl_timed_media_change(struct cdrom_device_info *cdi,
+ 		return -EFAULT;
+ 
+ 	tmp_info.media_flags = 0;
+-	if (tmp_info.last_media_change - cdi->last_media_change_ms < 0)
++	if (cdi->last_media_change_ms > tmp_info.last_media_change)
+ 		tmp_info.media_flags |= MEDIA_CHANGED_FLAG;
+ 
+ 	tmp_info.last_media_change = cdi->last_media_change_ms;
+diff --git a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
+index ba504e19d4203..62d876e150e11 100644
+--- a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
++++ b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
+@@ -29,6 +29,7 @@ static const struct mtk_gate mfg_clks[] = {
+ static const struct mtk_clk_desc mfg_desc = {
+ 	.clks = mfg_clks,
+ 	.num_clks = ARRAY_SIZE(mfg_clks),
++	.need_runtime_pm = true,
+ };
+ 
+ static const struct of_device_id of_match_clk_mt8183_mfg[] = {
+diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
+index bd37ab4d1a9bb..ba1d1c495bc2b 100644
+--- a/drivers/clk/mediatek/clk-mtk.c
++++ b/drivers/clk/mediatek/clk-mtk.c
+@@ -496,14 +496,16 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
+ 	}
+ 
+ 
+-	devm_pm_runtime_enable(&pdev->dev);
+-	/*
+-	 * Do a pm_runtime_resume_and_get() to workaround a possible
+-	 * deadlock between clk_register() and the genpd framework.
+-	 */
+-	r = pm_runtime_resume_and_get(&pdev->dev);
+-	if (r)
+-		return r;
++	if (mcd->need_runtime_pm) {
++		devm_pm_runtime_enable(&pdev->dev);
++		/*
++		 * Do a pm_runtime_resume_and_get() to workaround a possible
++		 * deadlock between clk_register() and the genpd framework.
++		 */
++		r = pm_runtime_resume_and_get(&pdev->dev);
++		if (r)
++			return r;
++	}
+ 
+ 	/* Calculate how many clk_hw_onecell_data entries to allocate */
+ 	num_clks = mcd->num_clks + mcd->num_composite_clks;
+@@ -585,7 +587,8 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
+ 			goto unregister_clks;
+ 	}
+ 
+-	pm_runtime_put(&pdev->dev);
++	if (mcd->need_runtime_pm)
++		pm_runtime_put(&pdev->dev);
+ 
+ 	return r;
+ 
+@@ -618,7 +621,8 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
+ 	if (mcd->shared_io && base)
+ 		iounmap(base);
+ 
+-	pm_runtime_put(&pdev->dev);
++	if (mcd->need_runtime_pm)
++		pm_runtime_put(&pdev->dev);
+ 	return r;
+ }
+ 
+diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
+index 22096501a60a7..c17fe1c2d732d 100644
+--- a/drivers/clk/mediatek/clk-mtk.h
++++ b/drivers/clk/mediatek/clk-mtk.h
+@@ -237,6 +237,8 @@ struct mtk_clk_desc {
+ 
+ 	int (*clk_notifier_func)(struct device *dev, struct clk *clk);
+ 	unsigned int mfg_clk_idx;
++
++	bool need_runtime_pm;
+ };
+ 
+ int mtk_clk_pdev_probe(struct platform_device *pdev);
+diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
+index be18ff983d35c..003308a288968 100644
+--- a/drivers/clk/qcom/clk-alpha-pll.c
++++ b/drivers/clk/qcom/clk-alpha-pll.c
+@@ -2555,6 +2555,9 @@ static int clk_alpha_pll_stromer_plus_set_rate(struct clk_hw *hw,
+ 	regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
+ 					a >> ALPHA_BITWIDTH);
+ 
++	regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
++			   PLL_ALPHA_EN, PLL_ALPHA_EN);
++
+ 	regmap_write(pll->clkr.regmap, PLL_MODE(pll), PLL_BYPASSNL);
+ 
+ 	/* Wait five micro seconds or more */
+diff --git a/drivers/clk/qcom/gcc-ipq9574.c b/drivers/clk/qcom/gcc-ipq9574.c
+index 0a3f846695b80..f8b9a1e93bef2 100644
+--- a/drivers/clk/qcom/gcc-ipq9574.c
++++ b/drivers/clk/qcom/gcc-ipq9574.c
+@@ -2140,9 +2140,10 @@ static struct clk_rcg2 pcnoc_bfdcd_clk_src = {
+ 
+ static struct clk_branch gcc_crypto_axi_clk = {
+ 	.halt_reg = 0x16010,
++	.halt_check = BRANCH_HALT_VOTED,
+ 	.clkr = {
+-		.enable_reg = 0x16010,
+-		.enable_mask = BIT(0),
++		.enable_reg = 0xb004,
++		.enable_mask = BIT(15),
+ 		.hw.init = &(const struct clk_init_data) {
+ 			.name = "gcc_crypto_axi_clk",
+ 			.parent_hws = (const struct clk_hw *[]) {
+@@ -2156,9 +2157,10 @@ static struct clk_branch gcc_crypto_axi_clk = {
+ 
+ static struct clk_branch gcc_crypto_ahb_clk = {
+ 	.halt_reg = 0x16014,
++	.halt_check = BRANCH_HALT_VOTED,
+ 	.clkr = {
+-		.enable_reg = 0x16014,
+-		.enable_mask = BIT(0),
++		.enable_reg = 0xb004,
++		.enable_mask = BIT(16),
+ 		.hw.init = &(const struct clk_init_data) {
+ 			.name = "gcc_crypto_ahb_clk",
+ 			.parent_hws = (const struct clk_hw *[]) {
+diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
+index cf4a7b6e0b23a..0559a33faf00e 100644
+--- a/drivers/clk/qcom/gcc-sm6350.c
++++ b/drivers/clk/qcom/gcc-sm6350.c
+@@ -100,8 +100,8 @@ static struct clk_alpha_pll gpll6 = {
+ 		.enable_mask = BIT(6),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gpll6",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&gpll0.clkr.hw,
++			.parent_data = &(const struct clk_parent_data){
++				.fw_name = "bi_tcxo",
+ 			},
+ 			.num_parents = 1,
+ 			.ops = &clk_alpha_pll_fixed_fabia_ops,
+@@ -124,7 +124,7 @@ static struct clk_alpha_pll_postdiv gpll6_out_even = {
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "gpll6_out_even",
+ 		.parent_hws = (const struct clk_hw*[]){
+-			&gpll0.clkr.hw,
++			&gpll6.clkr.hw,
+ 		},
+ 		.num_parents = 1,
+ 		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+@@ -139,8 +139,8 @@ static struct clk_alpha_pll gpll7 = {
+ 		.enable_mask = BIT(7),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gpll7",
+-			.parent_hws = (const struct clk_hw*[]){
+-				&gpll0.clkr.hw,
++			.parent_data = &(const struct clk_parent_data){
++				.fw_name = "bi_tcxo",
+ 			},
+ 			.num_parents = 1,
+ 			.ops = &clk_alpha_pll_fixed_fabia_ops,
+diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c
+index ac0091b4ce242..be375ce0149c8 100644
+--- a/drivers/clk/sunxi-ng/ccu_common.c
++++ b/drivers/clk/sunxi-ng/ccu_common.c
+@@ -132,7 +132,6 @@ static int sunxi_ccu_probe(struct sunxi_ccu *ccu, struct device *dev,
+ 
+ 	for (i = 0; i < desc->hw_clks->num ; i++) {
+ 		struct clk_hw *hw = desc->hw_clks->hws[i];
+-		struct ccu_common *common = hw_to_ccu_common(hw);
+ 		const char *name;
+ 
+ 		if (!hw)
+@@ -147,14 +146,21 @@ static int sunxi_ccu_probe(struct sunxi_ccu *ccu, struct device *dev,
+ 			pr_err("Couldn't register clock %d - %s\n", i, name);
+ 			goto err_clk_unreg;
+ 		}
++	}
++
++	for (i = 0; i < desc->num_ccu_clks; i++) {
++		struct ccu_common *cclk = desc->ccu_clks[i];
++
++		if (!cclk)
++			continue;
+ 
+-		if (common->max_rate)
+-			clk_hw_set_rate_range(hw, common->min_rate,
+-					      common->max_rate);
++		if (cclk->max_rate)
++			clk_hw_set_rate_range(&cclk->hw, cclk->min_rate,
++					      cclk->max_rate);
+ 		else
+-			WARN(common->min_rate,
++			WARN(cclk->min_rate,
+ 			     "No max_rate, ignoring min_rate of clock %d - %s\n",
+-			     i, name);
++			     i, clk_hw_get_name(&cclk->hw));
+ 	}
+ 
+ 	ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
+diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c
+index cd67fa348ca72..6351a452878dd 100644
+--- a/drivers/crypto/hisilicon/debugfs.c
++++ b/drivers/crypto/hisilicon/debugfs.c
+@@ -809,8 +809,14 @@ static void dfx_regs_uninit(struct hisi_qm *qm,
+ {
+ 	int i;
+ 
++	if (!dregs)
++		return;
++
+ 	/* Setting the pointer is NULL to prevent double free */
+ 	for (i = 0; i < reg_len; i++) {
++		if (!dregs[i].regs)
++			continue;
++
+ 		kfree(dregs[i].regs);
+ 		dregs[i].regs = NULL;
+ 	}
+@@ -860,14 +866,21 @@ static struct dfx_diff_registers *dfx_regs_init(struct hisi_qm *qm,
+ static int qm_diff_regs_init(struct hisi_qm *qm,
+ 		struct dfx_diff_registers *dregs, u32 reg_len)
+ {
++	int ret;
++
+ 	qm->debug.qm_diff_regs = dfx_regs_init(qm, qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
+-	if (IS_ERR(qm->debug.qm_diff_regs))
+-		return PTR_ERR(qm->debug.qm_diff_regs);
++	if (IS_ERR(qm->debug.qm_diff_regs)) {
++		ret = PTR_ERR(qm->debug.qm_diff_regs);
++		qm->debug.qm_diff_regs = NULL;
++		return ret;
++	}
+ 
+ 	qm->debug.acc_diff_regs = dfx_regs_init(qm, dregs, reg_len);
+ 	if (IS_ERR(qm->debug.acc_diff_regs)) {
+ 		dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
+-		return PTR_ERR(qm->debug.acc_diff_regs);
++		ret = PTR_ERR(qm->debug.acc_diff_regs);
++		qm->debug.acc_diff_regs = NULL;
++		return ret;
+ 	}
+ 
+ 	return 0;
+@@ -908,7 +921,9 @@ static int qm_last_regs_init(struct hisi_qm *qm)
+ static void qm_diff_regs_uninit(struct hisi_qm *qm, u32 reg_len)
+ {
+ 	dfx_regs_uninit(qm, qm->debug.acc_diff_regs, reg_len);
++	qm->debug.acc_diff_regs = NULL;
+ 	dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
++	qm->debug.qm_diff_regs = NULL;
+ }
+ 
+ /**
+diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
+index c290d8937b19c..fabea0d650297 100644
+--- a/drivers/crypto/hisilicon/sec2/sec_main.c
++++ b/drivers/crypto/hisilicon/sec2/sec_main.c
+@@ -152,7 +152,7 @@ static const struct hisi_qm_cap_info sec_basic_info[] = {
+ 	{SEC_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x1, 0x1, 0x1},
+ 	{SEC_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x4, 0x4, 0x4},
+ 	{SEC_CORES_PER_CLUSTER_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x4, 0x4, 0x4},
+-	{SEC_CORE_ENABLE_BITMAP, 0x3140, 32, GENMASK(31, 0), 0x17F, 0x17F, 0xF},
++	{SEC_CORE_ENABLE_BITMAP, 0x3140, 0, GENMASK(31, 0), 0x17F, 0x17F, 0xF},
+ 	{SEC_DRV_ALG_BITMAP_LOW, 0x3144, 0, GENMASK(31, 0), 0x18050CB, 0x18050CB, 0x18670CF},
+ 	{SEC_DRV_ALG_BITMAP_HIGH, 0x3148, 0, GENMASK(31, 0), 0x395C, 0x395C, 0x395C},
+ 	{SEC_DEV_ALG_BITMAP_LOW, 0x314c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
+index 015c95a825d31..ac2a5d2d47463 100644
+--- a/drivers/firmware/dmi_scan.c
++++ b/drivers/firmware/dmi_scan.c
+@@ -101,6 +101,17 @@ static void dmi_decode_table(u8 *buf,
+ 	       (data - buf + sizeof(struct dmi_header)) <= dmi_len) {
+ 		const struct dmi_header *dm = (const struct dmi_header *)data;
+ 
++		/*
++		 * If a short entry is found (less than 4 bytes), not only it
++		 * is invalid, but we cannot reliably locate the next entry.
++		 */
++		if (dm->length < sizeof(struct dmi_header)) {
++			pr_warn(FW_BUG
++				"Corrupted DMI table, offset %zd (only %d entries processed)\n",
++				data - buf, i);
++			break;
++		}
++
+ 		/*
+ 		 *  We want to know the total length (formatted area and
+ 		 *  strings) before decoding to make sure we won't run off the
+diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c
+index 880ffcb500887..921f61507ae83 100644
+--- a/drivers/firmware/sysfb.c
++++ b/drivers/firmware/sysfb.c
+@@ -101,8 +101,10 @@ static __init struct device *sysfb_parent_dev(const struct screen_info *si)
+ 	if (IS_ERR(pdev)) {
+ 		return ERR_CAST(pdev);
+ 	} else if (pdev) {
+-		if (!sysfb_pci_dev_is_enabled(pdev))
++		if (!sysfb_pci_dev_is_enabled(pdev)) {
++			pci_dev_put(pdev);
+ 			return ERR_PTR(-ENODEV);
++		}
+ 		return &pdev->dev;
+ 	}
+ 
+@@ -137,7 +139,7 @@ static __init int sysfb_init(void)
+ 	if (compatible) {
+ 		pd = sysfb_create_simplefb(si, &mode, parent);
+ 		if (!IS_ERR(pd))
+-			goto unlock_mutex;
++			goto put_device;
+ 	}
+ 
+ 	/* if the FB is incompatible, create a legacy framebuffer device */
+@@ -155,7 +157,7 @@ static __init int sysfb_init(void)
+ 	pd = platform_device_alloc(name, 0);
+ 	if (!pd) {
+ 		ret = -ENOMEM;
+-		goto unlock_mutex;
++		goto put_device;
+ 	}
+ 
+ 	pd->dev.parent = parent;
+@@ -170,9 +172,11 @@ static __init int sysfb_init(void)
+ 	if (ret)
+ 		goto err;
+ 
+-	goto unlock_mutex;
++	goto put_device;
+ err:
+ 	platform_device_put(pd);
++put_device:
++	put_device(parent);
+ unlock_mutex:
+ 	mutex_unlock(&disable_lock);
+ 	return ret;
+diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
+index 71e1af7c21847..d89e78f0ead31 100644
+--- a/drivers/gpio/gpio-mmio.c
++++ b/drivers/gpio/gpio-mmio.c
+@@ -619,8 +619,6 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev,
+ 	ret = gpiochip_get_ngpios(gc, dev);
+ 	if (ret)
+ 		gc->ngpio = gc->bgpio_bits;
+-	else
+-		gc->bgpio_bits = roundup_pow_of_two(round_up(gc->ngpio, 8));
+ 
+ 	ret = bgpio_setup_io(gc, dat, set, clr, flags);
+ 	if (ret)
+diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
+index cb0cefaec37e8..5c4442200118a 100644
+--- a/drivers/gpio/gpiolib-of.c
++++ b/drivers/gpio/gpiolib-of.c
+@@ -202,6 +202,24 @@ static void of_gpio_try_fixup_polarity(const struct device_node *np,
+ 		 * helper, and be consistent with what other drivers do.
+ 		 */
+ 		{ "qi,lb60",		"rb-gpios",	true },
++#endif
++#if IS_ENABLED(CONFIG_PCI_LANTIQ)
++		/*
++		 * According to the PCI specification, the RST# pin is an
++		 * active-low signal. However, most of the device trees that
++		 * have been widely used for a long time incorrectly describe
++		 * reset GPIO as active-high, and were also using wrong name
++		 * for the property.
++		 */
++		{ "lantiq,pci-xway",	"gpio-reset",	false },
++#endif
++#if IS_ENABLED(CONFIG_TOUCHSCREEN_TSC2005)
++		/*
++		 * DTS for Nokia N900 incorrectly specified "active high"
++		 * polarity for the reset line, while the chip actually
++		 * treats it as "active low".
++		 */
++		{ "ti,tsc2005",		"reset-gpios",	false },
+ #endif
+ 	};
+ 	unsigned int i;
+@@ -504,9 +522,9 @@ static struct gpio_desc *of_find_gpio_rename(struct device_node *np,
+ 		{ "reset",	"reset-n-io",	"marvell,nfc-uart" },
+ 		{ "reset",	"reset-n-io",	"mrvl,nfc-uart" },
+ #endif
+-#if !IS_ENABLED(CONFIG_PCI_LANTIQ)
++#if IS_ENABLED(CONFIG_PCI_LANTIQ)
+ 		/* MIPS Lantiq PCI */
+-		{ "reset",	"gpios-reset",	"lantiq,pci-xway" },
++		{ "reset",	"gpio-reset",	"lantiq,pci-xway" },
+ #endif
+ 
+ 		/*
+diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+index 576067d66bb9a..d0a8da67dc2a1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
++++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+@@ -97,7 +97,7 @@ static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev)
+ 		adev->ip_blocks[i].status.hw = false;
+ 	}
+ 
+-	return r;
++	return 0;
+ }
+ 
+ static int
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+index 35dd6effa9a34..7291c3fd8cf70 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+@@ -455,6 +455,9 @@ void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev,
+ 		else
+ 			mem_info->local_mem_size_private =
+ 					KFD_XCP_MEMORY_SIZE(adev, xcp->id);
++	} else if (adev->flags & AMD_IS_APU) {
++		mem_info->local_mem_size_public = (ttm_tt_pages_limit() << PAGE_SHIFT);
++		mem_info->local_mem_size_private = 0;
+ 	} else {
+ 		mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
+ 		mem_info->local_mem_size_private = adev->gmc.real_vram_size -
+@@ -809,6 +812,8 @@ u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id)
+ 		}
+ 		do_div(tmp, adev->xcp_mgr->num_xcp_per_mem_partition);
+ 		return ALIGN_DOWN(tmp, PAGE_SIZE);
++	} else if (adev->flags & AMD_IS_APU) {
++		return (ttm_tt_pages_limit() << PAGE_SHIFT);
+ 	} else {
+ 		return adev->gmc.real_vram_size;
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 0535b07987d9d..8975cf41a91ac 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -196,7 +196,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
+ 			return -EINVAL;
+ 
+ 		vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id);
+-		if (adev->gmc.is_app_apu) {
++		if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
+ 			system_mem_needed = size;
+ 			ttm_mem_needed = size;
+ 		}
+@@ -232,7 +232,8 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
+ 		  "adev reference can't be null when vram is used");
+ 	if (adev && xcp_id >= 0) {
+ 		adev->kfd.vram_used[xcp_id] += vram_needed;
+-		adev->kfd.vram_used_aligned[xcp_id] += adev->gmc.is_app_apu ?
++		adev->kfd.vram_used_aligned[xcp_id] +=
++				(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) ?
+ 				vram_needed :
+ 				ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
+ 	}
+@@ -260,7 +261,7 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
+ 
+ 		if (adev) {
+ 			adev->kfd.vram_used[xcp_id] -= size;
+-			if (adev->gmc.is_app_apu) {
++			if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
+ 				adev->kfd.vram_used_aligned[xcp_id] -= size;
+ 				kfd_mem_limit.system_mem_used -= size;
+ 				kfd_mem_limit.ttm_mem_used -= size;
+@@ -889,7 +890,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
+ 	 * if peer device has large BAR. In contrast, access over xGMI is
+ 	 * allowed for both small and large BAR configurations of peer device
+ 	 */
+-	if ((adev != bo_adev && !adev->gmc.is_app_apu) &&
++	if ((adev != bo_adev && !(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU)) &&
+ 	    ((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) ||
+ 	     (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) ||
+ 	     (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
+@@ -1657,7 +1658,7 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
+ 		- atomic64_read(&adev->vram_pin_size)
+ 		- reserved_for_pt;
+ 
+-	if (adev->gmc.is_app_apu) {
++	if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
+ 		system_mem_available = no_system_mem_limit ?
+ 					kfd_mem_limit.max_system_mem_limit :
+ 					kfd_mem_limit.max_system_mem_limit -
+@@ -1705,7 +1706,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+ 	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
+ 		domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
+ 
+-		if (adev->gmc.is_app_apu) {
++		if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
+ 			domain = AMDGPU_GEM_DOMAIN_GTT;
+ 			alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
+ 			alloc_flags = 0;
+@@ -1952,7 +1953,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
+ 	if (size) {
+ 		if (!is_imported &&
+ 		   (mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM ||
+-		   (adev->gmc.is_app_apu &&
++		   ((adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) &&
+ 		    mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT)))
+ 			*size = bo_size;
+ 		else
+@@ -2374,8 +2375,9 @@ static int import_obj_create(struct amdgpu_device *adev,
+ 	(*mem)->dmabuf = dma_buf;
+ 	(*mem)->bo = bo;
+ 	(*mem)->va = va;
+-	(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) && !adev->gmc.is_app_apu ?
+-		AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
++	(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) &&
++			 !(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) ?
++			 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
+ 
+ 	(*mem)->mapped_to_gpu_memory = 0;
+ 	(*mem)->process_info = avm->process_info;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+index f5d0fa207a88b..b62ae3c91a9db 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+@@ -2065,12 +2065,13 @@ static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+ 	char reg_offset[11];
+ 	uint32_t *new = NULL, *tmp = NULL;
+-	int ret, i = 0, len = 0;
++	unsigned int len = 0;
++	int ret, i = 0;
+ 
+ 	do {
+ 		memset(reg_offset, 0, 11);
+ 		if (copy_from_user(reg_offset, buf + len,
+-					min(10, ((int)size-len)))) {
++					min(10, (size-len)))) {
+ 			ret = -EFAULT;
+ 			goto error_free;
+ 		}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+index 55d5508987ffe..1d955652f3ba6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -1206,7 +1206,8 @@ void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev,
+ 		fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
+ 		break;
+ 	default:
+-		break;
++		dev_err(adev->dev, "Invalid ucode id %u\n", ucode_id);
++		return;
+ 	}
+ 
+ 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+index 7e6d09730e6d3..665c63f552787 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+@@ -445,6 +445,14 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
+ 
+ 	entry.ih = ih;
+ 	entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
++
++	/*
++	 * timestamp is not supported on some legacy SOCs (cik, cz, iceland,
++	 * si and tonga), so initialize timestamp and timestamp_src to 0
++	 */
++	entry.timestamp = 0;
++	entry.timestamp_src = 0;
++
+ 	amdgpu_ih_decode_iv(adev, &entry);
+ 
+ 	trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+index e0f8ce9d84406..db9cb2b4e9823 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+@@ -43,7 +43,7 @@ struct amdgpu_iv_entry;
+ #define AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(x)		AMDGPU_GET_REG_FIELD(x, 7, 7)
+ #define AMDGPU_RAS_GPU_ERR_SOCKET_ID(x)			AMDGPU_GET_REG_FIELD(x, 10, 8)
+ #define AMDGPU_RAS_GPU_ERR_AID_ID(x)			AMDGPU_GET_REG_FIELD(x, 12, 11)
+-#define AMDGPU_RAS_GPU_ERR_HBM_ID(x)			AMDGPU_GET_REG_FIELD(x, 13, 13)
++#define AMDGPU_RAS_GPU_ERR_HBM_ID(x)			AMDGPU_GET_REG_FIELD(x, 14, 13)
+ #define AMDGPU_RAS_GPU_ERR_BOOT_STATUS(x)		AMDGPU_GET_REG_FIELD(x, 31, 31)
+ 
+ #define AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT	1000
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+index 20436f81856ad..6f7451e3ee87e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+@@ -170,6 +170,7 @@ static void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
+ 	}
+ 
+ 	kfree(err_data->err_addr);
++	err_data->err_addr = NULL;
+ 
+ 	mutex_unlock(&con->page_retirement_lock);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+index 59acf424a078f..968ca2c84ef7e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+@@ -743,7 +743,8 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p,
+ 	uint32_t created = 0;
+ 	uint32_t allocated = 0;
+ 	uint32_t tmp, handle = 0;
+-	uint32_t *size = &tmp;
++	uint32_t dummy = 0xffffffff;
++	uint32_t *size = &dummy;
+ 	unsigned int idx;
+ 	int i, r = 0;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
+index 93f6772d1b241..481217c32d853 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
++++ b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
+@@ -92,7 +92,7 @@ static int sienna_cichlid_mode2_suspend_ip(struct amdgpu_device *adev)
+ 		adev->ip_blocks[i].status.hw = false;
+ 	}
+ 
+-	return r;
++	return 0;
+ }
+ 
+ static int
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+index 5c8d81bfce7ab..ba651d12f1fa0 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+@@ -1023,7 +1023,7 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
+ 	if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 1))
+ 		return -EINVAL;
+ 
+-	if (adev->gmc.is_app_apu)
++	if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU)
+ 		return 0;
+ 
+ 	pgmap = &kfddev->pgmap;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+index 386875e6eb96b..069b81eeea03c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+@@ -2619,7 +2619,8 @@ svm_range_best_restore_location(struct svm_range *prange,
+ 		return -1;
+ 	}
+ 
+-	if (node->adev->gmc.is_app_apu)
++	if (node->adev->gmc.is_app_apu ||
++	    node->adev->flags & AMD_IS_APU)
+ 		return 0;
+ 
+ 	if (prange->preferred_loc == gpuid ||
+@@ -3337,7 +3338,8 @@ svm_range_best_prefetch_location(struct svm_range *prange)
+ 		goto out;
+ 	}
+ 
+-	if (bo_node->adev->gmc.is_app_apu) {
++	if (bo_node->adev->gmc.is_app_apu ||
++	    bo_node->adev->flags & AMD_IS_APU) {
+ 		best_loc = 0;
+ 		goto out;
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+index 026863a0abcd3..9c37bd0567efa 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+@@ -201,7 +201,8 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s
+  * is initialized to not 0 when page migration register device memory.
+  */
+ #define KFD_IS_SVM_API_SUPPORTED(adev) ((adev)->kfd.pgmap.type != 0 ||\
+-					(adev)->gmc.is_app_apu)
++					(adev)->gmc.is_app_apu ||\
++					((adev)->flags & AMD_IS_APU))
+ 
+ void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index f866a02f4f489..2152e40ee1c27 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -274,7 +274,7 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+ 				  u32 *vbl, u32 *position)
+ {
+-	u32 v_blank_start, v_blank_end, h_position, v_position;
++	u32 v_blank_start = 0, v_blank_end = 0, h_position = 0, v_position = 0;
+ 	struct amdgpu_crtc *acrtc = NULL;
+ 	struct dc *dc = adev->dm.dc;
+ 
+@@ -848,7 +848,7 @@ static void dm_handle_hpd_work(struct work_struct *work)
+  */
+ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
+ {
+-	struct dmub_notification notify;
++	struct dmub_notification notify = {0};
+ 	struct common_irq_params *irq_params = interrupt_params;
+ 	struct amdgpu_device *adev = irq_params->adev;
+ 	struct amdgpu_display_manager *dm = &adev->dm;
+@@ -7192,7 +7192,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
+ 	struct amdgpu_dm_connector *aconnector;
+ 	struct dm_connector_state *dm_conn_state;
+ 	int i, j, ret;
+-	int vcpi, pbn_div, pbn, slot_num = 0;
++	int vcpi, pbn_div, pbn = 0, slot_num = 0;
+ 
+ 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
+ 
+@@ -10595,7 +10595,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ 	struct drm_dp_mst_topology_mgr *mgr;
+ 	struct drm_dp_mst_topology_state *mst_state;
+-	struct dsc_mst_fairness_vars vars[MAX_PIPES];
++	struct dsc_mst_fairness_vars vars[MAX_PIPES] = {0};
+ 
+ 	trace_amdgpu_dm_atomic_check_begin(state);
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+index c7715a17f388b..4d7a5d470b1ea 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+@@ -1249,7 +1249,7 @@ static ssize_t dp_sdp_message_debugfs_write(struct file *f, const char __user *b
+ 				 size_t size, loff_t *pos)
+ {
+ 	int r;
+-	uint8_t data[36];
++	uint8_t data[36] = {0};
+ 	struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ 	struct dm_crtc_state *acrtc_state;
+ 	uint32_t write_size = 36;
+@@ -2960,7 +2960,7 @@ static int psr_read_residency(void *data, u64 *val)
+ {
+ 	struct amdgpu_dm_connector *connector = data;
+ 	struct dc_link *link = connector->dc_link;
+-	u32 residency;
++	u32 residency = 0;
+ 
+ 	link->dc->link_srv->edp_get_psr_residency(link, &residency);
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+index 3271c8c7905dd..4e036356b6a89 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+@@ -560,11 +560,19 @@ void dcn3_clk_mgr_construct(
+ 	dce_clock_read_ss_info(clk_mgr);
+ 
+ 	clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL);
++	if (!clk_mgr->base.bw_params) {
++		BREAK_TO_DEBUGGER();
++		return;
++	}
+ 
+ 	/* need physical address of table to give to PMFW */
+ 	clk_mgr->wm_range_table = dm_helpers_allocate_gpu_mem(clk_mgr->base.ctx,
+ 			DC_MEM_ALLOC_TYPE_GART, sizeof(WatermarksExternal_t),
+ 			&clk_mgr->wm_range_table_addr);
++	if (!clk_mgr->wm_range_table) {
++		BREAK_TO_DEBUGGER();
++		return;
++	}
+ }
+ 
+ void dcn3_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+index e506e4f969ca9..dda1173be35ea 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+@@ -1208,11 +1208,19 @@ void dcn32_clk_mgr_construct(
+ 	clk_mgr->smu_present = false;
+ 
+ 	clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL);
++	if (!clk_mgr->base.bw_params) {
++		BREAK_TO_DEBUGGER();
++		return;
++	}
+ 
+ 	/* need physical address of table to give to PMFW */
+ 	clk_mgr->wm_range_table = dm_helpers_allocate_gpu_mem(clk_mgr->base.ctx,
+ 			DC_MEM_ALLOC_TYPE_GART, sizeof(WatermarksExternal_t),
+ 			&clk_mgr->wm_range_table_addr);
++	if (!clk_mgr->wm_range_table) {
++		BREAK_TO_DEBUGGER();
++		return;
++	}
+ }
+ 
+ void dcn32_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index ec4bf9432bdb1..ab598e1f088cf 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -2168,50 +2168,91 @@ static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe,
+ 	}
+ }
+ 
+-void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
++static void resource_log_pipe_for_stream(struct dc *dc, struct dc_state *state,
++		struct pipe_ctx *otg_master, int stream_idx)
+ {
+-	struct pipe_ctx *otg_master;
+ 	struct pipe_ctx *opp_heads[MAX_PIPES];
+ 	struct pipe_ctx *dpp_pipes[MAX_PIPES];
+ 
+-	int stream_idx, slice_idx, dpp_idx, plane_idx, slice_count, dpp_count;
++	int slice_idx, dpp_idx, plane_idx, slice_count, dpp_count;
+ 	bool is_primary;
+ 	DC_LOGGER_INIT(dc->ctx->logger);
+ 
++	slice_count = resource_get_opp_heads_for_otg_master(otg_master,
++			&state->res_ctx, opp_heads);
++	for (slice_idx = 0; slice_idx < slice_count; slice_idx++) {
++		plane_idx = -1;
++		if (opp_heads[slice_idx]->plane_state) {
++			dpp_count = resource_get_dpp_pipes_for_opp_head(
++					opp_heads[slice_idx],
++					&state->res_ctx,
++					dpp_pipes);
++			for (dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) {
++				is_primary = !dpp_pipes[dpp_idx]->top_pipe ||
++						dpp_pipes[dpp_idx]->top_pipe->plane_state != dpp_pipes[dpp_idx]->plane_state;
++				if (is_primary)
++					plane_idx++;
++				resource_log_pipe(dc, dpp_pipes[dpp_idx],
++						stream_idx, slice_idx,
++						plane_idx, slice_count,
++						is_primary);
++			}
++		} else {
++			resource_log_pipe(dc, opp_heads[slice_idx],
++					stream_idx, slice_idx, plane_idx,
++					slice_count, true);
++		}
++
++	}
++}
++
++static int resource_stream_to_stream_idx(struct dc_state *state,
++		struct dc_stream_state *stream)
++{
++	int i, stream_idx = -1;
++
++	for (i = 0; i < state->stream_count; i++)
++		if (state->streams[i] == stream) {
++			stream_idx = i;
++			break;
++		}
++
++	/* never return negative array index */
++	if (stream_idx == -1) {
++		ASSERT(0);
++		return 0;
++	}
++
++	return stream_idx;
++}
++
++void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
++{
++	struct pipe_ctx *otg_master;
++	int stream_idx, phantom_stream_idx;
++	DC_LOGGER_INIT(dc->ctx->logger);
++
+ 	DC_LOG_DC("    pipe topology update");
+ 	DC_LOG_DC("  ________________________");
+ 	for (stream_idx = 0; stream_idx < state->stream_count; stream_idx++) {
++		if (state->streams[stream_idx]->is_phantom)
++			continue;
++
+ 		otg_master = resource_get_otg_master_for_stream(
+ 				&state->res_ctx, state->streams[stream_idx]);
+-		if (!otg_master	|| otg_master->stream_res.tg == NULL) {
+-			DC_LOG_DC("topology update: otg_master NULL stream_idx %d!\n", stream_idx);
+-			return;
+-		}
+-		slice_count = resource_get_opp_heads_for_otg_master(otg_master,
+-				&state->res_ctx, opp_heads);
+-		for (slice_idx = 0; slice_idx < slice_count; slice_idx++) {
+-			plane_idx = -1;
+-			if (opp_heads[slice_idx]->plane_state) {
+-				dpp_count = resource_get_dpp_pipes_for_opp_head(
+-						opp_heads[slice_idx],
+-						&state->res_ctx,
+-						dpp_pipes);
+-				for (dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) {
+-					is_primary = !dpp_pipes[dpp_idx]->top_pipe ||
+-							dpp_pipes[dpp_idx]->top_pipe->plane_state != dpp_pipes[dpp_idx]->plane_state;
+-					if (is_primary)
+-						plane_idx++;
+-					resource_log_pipe(dc, dpp_pipes[dpp_idx],
+-							stream_idx, slice_idx,
+-							plane_idx, slice_count,
+-							is_primary);
+-				}
+-			} else {
+-				resource_log_pipe(dc, opp_heads[slice_idx],
+-						stream_idx, slice_idx, plane_idx,
+-						slice_count, true);
+-			}
++		resource_log_pipe_for_stream(dc, state, otg_master, stream_idx);
++	}
++	if (state->phantom_stream_count > 0) {
++		DC_LOG_DC(" |    (phantom pipes)     |");
++		for (stream_idx = 0; stream_idx < state->stream_count; stream_idx++) {
++			if (state->stream_status[stream_idx].mall_stream_config.type != SUBVP_MAIN)
++				continue;
+ 
++			phantom_stream_idx = resource_stream_to_stream_idx(state,
++					state->stream_status[stream_idx].mall_stream_config.paired_stream);
++			otg_master = resource_get_otg_master_for_stream(
++					&state->res_ctx, state->streams[phantom_stream_idx]);
++			resource_log_pipe_for_stream(dc, state, otg_master, stream_idx);
+ 		}
+ 	}
+ 	DC_LOG_DC(" |________________________|\n");
+@@ -3117,6 +3158,9 @@ static struct audio *find_first_free_audio(
+ {
+ 	int i, available_audio_count;
+ 
++	if (id == ENGINE_ID_UNKNOWN)
++		return NULL;
++
+ 	available_audio_count = pool->audio_count;
+ 
+ 	for (i = 0; i < available_audio_count; i++) {
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+index 9be5ebf3a8c0b..79cd4c4790439 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+@@ -9460,8 +9460,10 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
+ 
+ 		/* Copy the calculated watermarks to mp.Watermark as the getter functions are
+ 		 * implemented by the DML team to copy the calculated values from the mp.Watermark interface.
++		 * &mode_lib->mp.Watermark and &locals->Watermark are the same address, memcpy may lead to
++		 * unexpected behavior. memmove should be used.
+ 		 */
+-		memcpy(&mode_lib->mp.Watermark, CalculateWatermarks_params->Watermark, sizeof(struct Watermarks));
++		memmove(&mode_lib->mp.Watermark, CalculateWatermarks_params->Watermark, sizeof(struct Watermarks));
+ 
+ 		for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
+ 			if (mode_lib->ms.cache_display_cfg.writeback.WritebackEnable[k] == true) {
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+index a52c594e1ba4b..e1f1b5dd13203 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+@@ -88,7 +88,8 @@ static int find_disp_cfg_idx_by_plane_id(struct dml2_dml_to_dc_pipe_mapping *map
+ 			return  i;
+ 	}
+ 
+-	return -1;
++	ASSERT(false);
++	return __DML2_WRAPPER_MAX_STREAMS_PLANES__;
+ }
+ 
+ static int find_disp_cfg_idx_by_stream_id(struct dml2_dml_to_dc_pipe_mapping *mapping, unsigned int stream_id)
+@@ -100,7 +101,8 @@ static int find_disp_cfg_idx_by_stream_id(struct dml2_dml_to_dc_pipe_mapping *ma
+ 			return  i;
+ 	}
+ 
+-	return -1;
++	ASSERT(false);
++	return __DML2_WRAPPER_MAX_STREAMS_PLANES__;
+ }
+ 
+ // The master pipe of a stream is defined as the top pipe in odm slice 0
+diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
+index 1c0d89e675da5..bb576a9c5fdbd 100644
+--- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
++++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
+@@ -211,8 +211,12 @@ bool dce110_vblank_set(struct irq_service *irq_service,
+ 						   info->ext_id);
+ 	uint8_t pipe_offset = dal_irq_src - IRQ_TYPE_VBLANK;
+ 
+-	struct timing_generator *tg =
+-			dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
++	struct timing_generator *tg;
++
++	if (pipe_offset >= MAX_PIPES)
++		return false;
++
++	tg = dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
+ 
+ 	if (enable) {
+ 		if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) {
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+index ecc477ef8e3b7..b427a98066c11 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+@@ -2050,6 +2050,9 @@ bool dcn30_validate_bandwidth(struct dc *dc,
+ 
+ 	BW_VAL_TRACE_COUNT();
+ 
++	if (!pipes)
++		goto validate_fail;
++
+ 	DC_FP_START();
+ 	out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
+ 	DC_FP_END();
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+index 2fb1d00ff9654..f38de5391176f 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+@@ -1311,6 +1311,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
+ 
+ 	/* allocate HPO link encoder */
+ 	hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
++	if (!hpo_dp_enc31)
++		return NULL; /* out of memory */
+ 
+ 	hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
+ 					&hpo_dp_link_enc_regs[inst],
+@@ -1767,6 +1769,9 @@ bool dcn31_validate_bandwidth(struct dc *dc,
+ 
+ 	BW_VAL_TRACE_COUNT();
+ 
++	if (!pipes)
++		goto validate_fail;
++
+ 	DC_FP_START();
+ 	out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
+ 	DC_FP_END();
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+index c97391edb5ff7..2791fc45bb8c7 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+@@ -1384,6 +1384,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
+ 
+ 	/* allocate HPO link encoder */
+ 	hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
++	if (!hpo_dp_enc31)
++		return NULL; /* out of memory */
+ 
+ 	hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
+ 					&hpo_dp_link_enc_regs[inst],
+@@ -1744,6 +1746,9 @@ bool dcn314_validate_bandwidth(struct dc *dc,
+ 
+ 	BW_VAL_TRACE_COUNT();
+ 
++	if (!pipes)
++		goto validate_fail;
++
+ 	if (filter_modes_for_single_channel_workaround(dc, context))
+ 		goto validate_fail;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+index 515ba435f759c..4ce0f4bf1d9bb 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+@@ -1309,6 +1309,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
+ 
+ 	/* allocate HPO link encoder */
+ 	hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
++	if (!hpo_dp_enc31)
++		return NULL; /* out of memory */
+ 
+ 	hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
+ 					&hpo_dp_link_enc_regs[inst],
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
+index b9753d4606f89..efa5627b0c50a 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
+@@ -1306,6 +1306,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
+ 
+ 	/* allocate HPO link encoder */
+ 	hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
++	if (!hpo_dp_enc31)
++		return NULL; /* out of memory */
+ 
+ 	hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
+ 					&hpo_dp_link_enc_regs[inst],
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+index ce1754cc1f463..1f5a91b764828 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+@@ -1304,6 +1304,8 @@ static struct hpo_dp_link_encoder *dcn32_hpo_dp_link_encoder_create(
+ 
+ 	/* allocate HPO link encoder */
+ 	hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
++	if (!hpo_dp_enc31)
++		return NULL; /* out of memory */
+ 
+ #undef REG_STRUCT
+ #define REG_STRUCT hpo_dp_link_enc_regs
+@@ -1751,6 +1753,9 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
+ 
+ 	BW_VAL_TRACE_COUNT();
+ 
++	if (!pipes)
++		goto validate_fail;
++
+ 	DC_FP_START();
+ 	out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
+ 	DC_FP_END();
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+index 296a0a8e71459..e83d340ed6260 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+@@ -1288,6 +1288,8 @@ static struct hpo_dp_link_encoder *dcn321_hpo_dp_link_encoder_create(
+ 
+ 	/* allocate HPO link encoder */
+ 	hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
++	if (!hpo_dp_enc31)
++		return NULL; /* out of memory */
+ 
+ #undef REG_STRUCT
+ #define REG_STRUCT hpo_dp_link_enc_regs
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+index 5d52853cac96a..cf0cb5cf4b5b2 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+@@ -1368,6 +1368,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
+ 
+ 	/* allocate HPO link encoder */
+ 	hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
++	if (!hpo_dp_enc31)
++		return NULL; /* out of memory */
+ 
+ #undef REG_STRUCT
+ #define REG_STRUCT hpo_dp_link_enc_regs
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+index 909e14261f9b4..116b59123199f 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+@@ -1348,6 +1348,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
+ 
+ 	/* allocate HPO link encoder */
+ 	hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
++	if (!hpo_dp_enc31)
++		return NULL; /* out of memory */
+ 
+ #undef REG_STRUCT
+ #define REG_STRUCT hpo_dp_link_enc_regs
+diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+index f7b5583ee609a..8e9caae7c9559 100644
+--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+@@ -156,6 +156,10 @@ static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
+ 	uint32_t cur_size = 0;
+ 	uint32_t data_offset = 0;
+ 
++	if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) {
++		return MOD_HDCP_STATUS_DDC_FAILURE;
++	}
++
+ 	if (is_dp_hdcp(hdcp)) {
+ 		while (buf_len > 0) {
+ 			cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
+@@ -215,6 +219,10 @@ static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
+ 	uint32_t cur_size = 0;
+ 	uint32_t data_offset = 0;
+ 
++	if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) {
++		return MOD_HDCP_STATUS_DDC_FAILURE;
++	}
++
+ 	if (is_dp_hdcp(hdcp)) {
+ 		while (buf_len > 0) {
+ 			cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
+diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
+index 1acb2d2c5597b..09cbc3afd6d89 100644
+--- a/drivers/gpu/drm/amd/include/atomfirmware.h
++++ b/drivers/gpu/drm/amd/include/atomfirmware.h
+@@ -734,7 +734,7 @@ struct atom_gpio_pin_lut_v2_1
+ {
+   struct  atom_common_table_header  table_header;
+   /*the real number of this included in the structure is calcualted by using the (whole structure size - the header size)/size of atom_gpio_pin_lut  */
+-  struct  atom_gpio_pin_assignment  gpio_pin[8];
++  struct  atom_gpio_pin_assignment  gpio_pin[];
+ };
+ 
+ 
+@@ -3583,7 +3583,7 @@ struct atom_gpio_voltage_object_v4
+    uint8_t  phase_delay_us;                      // phase delay in unit of micro second
+    uint8_t  reserved;   
+    uint32_t gpio_mask_val;                         // GPIO Mask value
+-   struct atom_voltage_gpio_map_lut voltage_gpio_lut[1];
++   struct atom_voltage_gpio_map_lut voltage_gpio_lut[] __counted_by(gpio_entry_num);
+ };
+ 
+ struct  atom_svid2_voltage_object_v4
+diff --git a/drivers/gpu/drm/drm_fbdev_generic.c b/drivers/gpu/drm/drm_fbdev_generic.c
+index b4659cd6285ab..cbb7418b789f8 100644
+--- a/drivers/gpu/drm/drm_fbdev_generic.c
++++ b/drivers/gpu/drm/drm_fbdev_generic.c
+@@ -84,7 +84,8 @@ static int drm_fbdev_generic_helper_fb_probe(struct drm_fb_helper *fb_helper,
+ 		    sizes->surface_width, sizes->surface_height,
+ 		    sizes->surface_bpp);
+ 
+-	format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
++	format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
++					     sizes->surface_depth);
+ 	buffer = drm_client_framebuffer_create(client, sizes->surface_width,
+ 					       sizes->surface_height, format);
+ 	if (IS_ERR(buffer))
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index aa93129c3397e..426bbee2d9f5e 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -421,6 +421,13 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"),
+ 		},
+ 		.driver_data = (void *)&lcd800x1280_rightside_up,
++	}, {	/* Valve Steam Deck */
++		.matches = {
++		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Valve"),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galileo"),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"),
++		},
++		.driver_data = (void *)&lcd800x1280_rightside_up,
+ 	}, {	/* VIOS LTH17 */
+ 		.matches = {
+ 		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),
+diff --git a/drivers/gpu/drm/lima/lima_gp.c b/drivers/gpu/drm/lima/lima_gp.c
+index e15295071533b..3282997a0358d 100644
+--- a/drivers/gpu/drm/lima/lima_gp.c
++++ b/drivers/gpu/drm/lima/lima_gp.c
+@@ -345,7 +345,9 @@ int lima_gp_init(struct lima_ip *ip)
+ 
+ void lima_gp_fini(struct lima_ip *ip)
+ {
++	struct lima_device *dev = ip->dev;
+ 
++	devm_free_irq(dev->dev, ip->irq, ip);
+ }
+ 
+ int lima_gp_pipe_init(struct lima_device *dev)
+diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c
+index e18317c5ca8c1..6611e2836bf0d 100644
+--- a/drivers/gpu/drm/lima/lima_mmu.c
++++ b/drivers/gpu/drm/lima/lima_mmu.c
+@@ -118,7 +118,12 @@ int lima_mmu_init(struct lima_ip *ip)
+ 
+ void lima_mmu_fini(struct lima_ip *ip)
+ {
++	struct lima_device *dev = ip->dev;
++
++	if (ip->id == lima_ip_ppmmu_bcast)
++		return;
+ 
++	devm_free_irq(dev->dev, ip->irq, ip);
+ }
+ 
+ void lima_mmu_flush_tlb(struct lima_ip *ip)
+diff --git a/drivers/gpu/drm/lima/lima_pp.c b/drivers/gpu/drm/lima/lima_pp.c
+index a4a2ffe6527c2..eaab4788dff49 100644
+--- a/drivers/gpu/drm/lima/lima_pp.c
++++ b/drivers/gpu/drm/lima/lima_pp.c
+@@ -286,7 +286,9 @@ int lima_pp_init(struct lima_ip *ip)
+ 
+ void lima_pp_fini(struct lima_ip *ip)
+ {
++	struct lima_device *dev = ip->dev;
+ 
++	devm_free_irq(dev->dev, ip->irq, ip);
+ }
+ 
+ int lima_pp_bcast_resume(struct lima_ip *ip)
+@@ -319,7 +321,9 @@ int lima_pp_bcast_init(struct lima_ip *ip)
+ 
+ void lima_pp_bcast_fini(struct lima_ip *ip)
+ {
++	struct lima_device *dev = ip->dev;
+ 
++	devm_free_irq(dev->dev, ip->irq, ip);
+ }
+ 
+ static int lima_pp_task_validate(struct lima_sched_pipe *pipe,
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index 856b3ef5edb89..0c71d761d3785 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -1001,6 +1001,9 @@ nouveau_connector_get_modes(struct drm_connector *connector)
+ 		struct drm_display_mode *mode;
+ 
+ 		mode = drm_mode_duplicate(dev, nv_connector->native_mode);
++		if (!mode)
++			return 0;
++
+ 		drm_mode_probed_add(connector, mode);
+ 		ret = 1;
+ 	}
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index 96a724e8f3ffe..c4feaacf17ac2 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -346,6 +346,7 @@ static void ttm_bo_release(struct kref *kref)
+ 		if (!dma_resv_test_signaled(bo->base.resv,
+ 					    DMA_RESV_USAGE_BOOKKEEP) ||
+ 		    (want_init_on_free() && (bo->ttm != NULL)) ||
++		    bo->type == ttm_bo_type_sg ||
+ 		    !dma_resv_trylock(bo->base.resv)) {
+ 			/* The BO is not idle, resurrect it for delayed destroy */
+ 			ttm_bo_flush_all_fences(bo);
+diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
+index 9f6d571d7fa9c..a3d2dd42adf96 100644
+--- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c
++++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
+@@ -12,6 +12,7 @@
+ #include "tests/xe_pci_test.h"
+ 
+ #include "xe_pci.h"
++#include "xe_pm.h"
+ 
+ static bool p2p_enabled(struct dma_buf_test_params *params)
+ {
+@@ -259,6 +260,7 @@ static int dma_buf_run_device(struct xe_device *xe)
+ 	const struct dma_buf_test_params *params;
+ 	struct kunit *test = xe_cur_kunit();
+ 
++	xe_pm_runtime_get(xe);
+ 	for (params = test_params; params->mem_mask; ++params) {
+ 		struct dma_buf_test_params p = *params;
+ 
+@@ -266,6 +268,7 @@ static int dma_buf_run_device(struct xe_device *xe)
+ 		test->priv = &p;
+ 		xe_test_dmabuf_import_same_driver(xe);
+ 	}
++	xe_pm_runtime_put(xe);
+ 
+ 	/* A non-zero return would halt iteration over driver devices */
+ 	return 0;
+diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
+index a7ab9ba645f99..c78fbb9bc5fc7 100644
+--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
++++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
+@@ -315,7 +315,7 @@ static void init_steering_oaddrm(struct xe_gt *gt)
+ 	else
+ 		gt->steering[OADDRM].group_target = 1;
+ 
+-	gt->steering[DSS].instance_target = 0;		/* unused */
++	gt->steering[OADDRM].instance_target = 0;	/* unused */
+ }
+ 
+ static void init_steering_sqidi_psmi(struct xe_gt *gt)
+@@ -330,8 +330,8 @@ static void init_steering_sqidi_psmi(struct xe_gt *gt)
+ 
+ static void init_steering_inst0(struct xe_gt *gt)
+ {
+-	gt->steering[DSS].group_target = 0;		/* unused */
+-	gt->steering[DSS].instance_target = 0;		/* unused */
++	gt->steering[INSTANCE0].group_target = 0;	/* unused */
++	gt->steering[INSTANCE0].instance_target = 0;	/* unused */
+ }
+ 
+ static const struct {
+diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
+index aca519f5b85d9..bebfdc8897813 100644
+--- a/drivers/gpu/drm/xe/xe_migrate.c
++++ b/drivers/gpu/drm/xe/xe_migrate.c
+@@ -1336,7 +1336,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
+ 						 GFP_KERNEL, true, 0);
+ 			if (IS_ERR(sa_bo)) {
+ 				err = PTR_ERR(sa_bo);
+-				goto err;
++				goto err_bb;
+ 			}
+ 
+ 			ppgtt_ofs = NUM_KERNEL_PDE +
+@@ -1387,7 +1387,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
+ 					 update_idx);
+ 	if (IS_ERR(job)) {
+ 		err = PTR_ERR(job);
+-		goto err_bb;
++		goto err_sa;
+ 	}
+ 
+ 	/* Wait on BO move */
+@@ -1436,12 +1436,12 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
+ 
+ err_job:
+ 	xe_sched_job_put(job);
++err_sa:
++	drm_suballoc_free(sa_bo, NULL);
+ err_bb:
+ 	if (!q)
+ 		mutex_unlock(&m->job_mutex);
+ 	xe_bb_free(bb, NULL);
+-err:
+-	drm_suballoc_free(sa_bo, NULL);
+ 	return ERR_PTR(err);
+ }
+ 
+diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
+index efcf78673e747..b6a995c852ab4 100644
+--- a/drivers/hwmon/dell-smm-hwmon.c
++++ b/drivers/hwmon/dell-smm-hwmon.c
+@@ -1530,6 +1530,14 @@ static const struct dmi_system_id i8k_whitelist_fan_control[] __initconst = {
+ 		},
+ 		.driver_data = (void *)&i8k_fan_control_data[I8K_FAN_30A3_31A3],
+ 	},
++	{
++		.ident = "Dell G15 5511",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Dell G15 5511"),
++		},
++		.driver_data = (void *)&i8k_fan_control_data[I8K_FAN_30A3_31A3],
++	},
+ 	{ }
+ };
+ 
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index 79870dd7a0146..fafd999b4bcb2 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -1059,7 +1059,7 @@ static const struct pci_device_id i801_ids[] = {
+ MODULE_DEVICE_TABLE(pci, i801_ids);
+ 
+ #if defined CONFIG_X86 && defined CONFIG_DMI
+-static unsigned char apanel_addr;
++static unsigned char apanel_addr __ro_after_init;
+ 
+ /* Scan the system ROM for the signature "FJKEYINF" */
+ static __init const void __iomem *bios_signature(const void __iomem *bios)
+diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
+index a12525b3186bc..f448505d54682 100644
+--- a/drivers/i2c/busses/i2c-pnx.c
++++ b/drivers/i2c/busses/i2c-pnx.c
+@@ -15,7 +15,6 @@
+ #include <linux/ioport.h>
+ #include <linux/delay.h>
+ #include <linux/i2c.h>
+-#include <linux/timer.h>
+ #include <linux/completion.h>
+ #include <linux/platform_device.h>
+ #include <linux/io.h>
+@@ -32,7 +31,6 @@ struct i2c_pnx_mif {
+ 	int			ret;		/* Return value */
+ 	int			mode;		/* Interface mode */
+ 	struct completion	complete;	/* I/O completion */
+-	struct timer_list	timer;		/* Timeout */
+ 	u8 *			buf;		/* Data buffer */
+ 	int			len;		/* Length of data buffer */
+ 	int			order;		/* RX Bytes to order via TX */
+@@ -117,24 +115,6 @@ static inline int wait_reset(struct i2c_pnx_algo_data *data)
+ 	return (timeout <= 0);
+ }
+ 
+-static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data)
+-{
+-	struct timer_list *timer = &alg_data->mif.timer;
+-	unsigned long expires = msecs_to_jiffies(alg_data->timeout);
+-
+-	if (expires <= 1)
+-		expires = 2;
+-
+-	del_timer_sync(timer);
+-
+-	dev_dbg(&alg_data->adapter.dev, "Timer armed at %lu plus %lu jiffies.\n",
+-		jiffies, expires);
+-
+-	timer->expires = jiffies + expires;
+-
+-	add_timer(timer);
+-}
+-
+ /**
+  * i2c_pnx_start - start a device
+  * @slave_addr:		slave address
+@@ -259,8 +239,6 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data)
+ 				~(mcntrl_afie | mcntrl_naie | mcntrl_drmie),
+ 				  I2C_REG_CTL(alg_data));
+ 
+-			del_timer_sync(&alg_data->mif.timer);
+-
+ 			dev_dbg(&alg_data->adapter.dev,
+ 				"%s(): Waking up xfer routine.\n",
+ 				__func__);
+@@ -276,8 +254,6 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data)
+ 			~(mcntrl_afie | mcntrl_naie | mcntrl_drmie),
+ 			  I2C_REG_CTL(alg_data));
+ 
+-		/* Stop timer. */
+-		del_timer_sync(&alg_data->mif.timer);
+ 		dev_dbg(&alg_data->adapter.dev,
+ 			"%s(): Waking up xfer routine after zero-xfer.\n",
+ 			__func__);
+@@ -364,8 +340,6 @@ static int i2c_pnx_master_rcv(struct i2c_pnx_algo_data *alg_data)
+ 				 mcntrl_drmie | mcntrl_daie);
+ 			iowrite32(ctl, I2C_REG_CTL(alg_data));
+ 
+-			/* Kill timer. */
+-			del_timer_sync(&alg_data->mif.timer);
+ 			complete(&alg_data->mif.complete);
+ 		}
+ 	}
+@@ -400,8 +374,6 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
+ 			 mcntrl_drmie);
+ 		iowrite32(ctl, I2C_REG_CTL(alg_data));
+ 
+-		/* Stop timer, to prevent timeout. */
+-		del_timer_sync(&alg_data->mif.timer);
+ 		complete(&alg_data->mif.complete);
+ 	} else if (stat & mstatus_nai) {
+ 		/* Slave did not acknowledge, generate a STOP */
+@@ -419,8 +391,6 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
+ 		/* Our return value. */
+ 		alg_data->mif.ret = -EIO;
+ 
+-		/* Stop timer, to prevent timeout. */
+-		del_timer_sync(&alg_data->mif.timer);
+ 		complete(&alg_data->mif.complete);
+ 	} else {
+ 		/*
+@@ -453,9 +423,8 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
+ 	return IRQ_HANDLED;
+ }
+ 
+-static void i2c_pnx_timeout(struct timer_list *t)
++static void i2c_pnx_timeout(struct i2c_pnx_algo_data *alg_data)
+ {
+-	struct i2c_pnx_algo_data *alg_data = from_timer(alg_data, t, mif.timer);
+ 	u32 ctl;
+ 
+ 	dev_err(&alg_data->adapter.dev,
+@@ -472,7 +441,6 @@ static void i2c_pnx_timeout(struct timer_list *t)
+ 	iowrite32(ctl, I2C_REG_CTL(alg_data));
+ 	wait_reset(alg_data);
+ 	alg_data->mif.ret = -EIO;
+-	complete(&alg_data->mif.complete);
+ }
+ 
+ static inline void bus_reset_if_active(struct i2c_pnx_algo_data *alg_data)
+@@ -514,6 +482,7 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ 	struct i2c_msg *pmsg;
+ 	int rc = 0, completed = 0, i;
+ 	struct i2c_pnx_algo_data *alg_data = adap->algo_data;
++	unsigned long time_left;
+ 	u32 stat;
+ 
+ 	dev_dbg(&alg_data->adapter.dev,
+@@ -548,7 +517,6 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ 		dev_dbg(&alg_data->adapter.dev, "%s(): mode %d, %d bytes\n",
+ 			__func__, alg_data->mif.mode, alg_data->mif.len);
+ 
+-		i2c_pnx_arm_timer(alg_data);
+ 
+ 		/* initialize the completion var */
+ 		init_completion(&alg_data->mif.complete);
+@@ -564,7 +532,10 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ 			break;
+ 
+ 		/* Wait for completion */
+-		wait_for_completion(&alg_data->mif.complete);
++		time_left = wait_for_completion_timeout(&alg_data->mif.complete,
++							alg_data->timeout);
++		if (time_left == 0)
++			i2c_pnx_timeout(alg_data);
+ 
+ 		if (!(rc = alg_data->mif.ret))
+ 			completed++;
+@@ -653,7 +624,10 @@ static int i2c_pnx_probe(struct platform_device *pdev)
+ 	alg_data->adapter.algo_data = alg_data;
+ 	alg_data->adapter.nr = pdev->id;
+ 
+-	alg_data->timeout = I2C_PNX_TIMEOUT_DEFAULT;
++	alg_data->timeout = msecs_to_jiffies(I2C_PNX_TIMEOUT_DEFAULT);
++	if (alg_data->timeout <= 1)
++		alg_data->timeout = 2;
++
+ #ifdef CONFIG_OF
+ 	alg_data->adapter.dev.of_node = of_node_get(pdev->dev.of_node);
+ 	if (pdev->dev.of_node) {
+@@ -673,8 +647,6 @@ static int i2c_pnx_probe(struct platform_device *pdev)
+ 	if (IS_ERR(alg_data->clk))
+ 		return PTR_ERR(alg_data->clk);
+ 
+-	timer_setup(&alg_data->mif.timer, i2c_pnx_timeout, 0);
+-
+ 	snprintf(alg_data->adapter.name, sizeof(alg_data->adapter.name),
+ 		 "%s", pdev->name);
+ 
+diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
+index f5feca7fa9b9c..2ed749f50a29f 100644
+--- a/drivers/infiniband/core/user_mad.c
++++ b/drivers/infiniband/core/user_mad.c
+@@ -63,6 +63,8 @@ MODULE_AUTHOR("Roland Dreier");
+ MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
+ MODULE_LICENSE("Dual BSD/GPL");
+ 
++#define MAX_UMAD_RECV_LIST_SIZE 200000
++
+ enum {
+ 	IB_UMAD_MAX_PORTS  = RDMA_MAX_PORTS,
+ 	IB_UMAD_MAX_AGENTS = 32,
+@@ -113,6 +115,7 @@ struct ib_umad_file {
+ 	struct mutex		mutex;
+ 	struct ib_umad_port    *port;
+ 	struct list_head	recv_list;
++	atomic_t		recv_list_size;
+ 	struct list_head	send_list;
+ 	struct list_head	port_list;
+ 	spinlock_t		send_lock;
+@@ -180,24 +183,28 @@ static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id)
+ 	return file->agents_dead ? NULL : file->agent[id];
+ }
+ 
+-static int queue_packet(struct ib_umad_file *file,
+-			struct ib_mad_agent *agent,
+-			struct ib_umad_packet *packet)
++static int queue_packet(struct ib_umad_file *file, struct ib_mad_agent *agent,
++			struct ib_umad_packet *packet, bool is_recv_mad)
+ {
+ 	int ret = 1;
+ 
+ 	mutex_lock(&file->mutex);
+ 
++	if (is_recv_mad &&
++	    atomic_read(&file->recv_list_size) > MAX_UMAD_RECV_LIST_SIZE)
++		goto unlock;
++
+ 	for (packet->mad.hdr.id = 0;
+ 	     packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;
+ 	     packet->mad.hdr.id++)
+ 		if (agent == __get_agent(file, packet->mad.hdr.id)) {
+ 			list_add_tail(&packet->list, &file->recv_list);
++			atomic_inc(&file->recv_list_size);
+ 			wake_up_interruptible(&file->recv_wait);
+ 			ret = 0;
+ 			break;
+ 		}
+-
++unlock:
+ 	mutex_unlock(&file->mutex);
+ 
+ 	return ret;
+@@ -224,7 +231,7 @@ static void send_handler(struct ib_mad_agent *agent,
+ 	if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) {
+ 		packet->length = IB_MGMT_MAD_HDR;
+ 		packet->mad.hdr.status = ETIMEDOUT;
+-		if (!queue_packet(file, agent, packet))
++		if (!queue_packet(file, agent, packet, false))
+ 			return;
+ 	}
+ 	kfree(packet);
+@@ -284,7 +291,7 @@ static void recv_handler(struct ib_mad_agent *agent,
+ 		rdma_destroy_ah_attr(&ah_attr);
+ 	}
+ 
+-	if (queue_packet(file, agent, packet))
++	if (queue_packet(file, agent, packet, true))
+ 		goto err2;
+ 	return;
+ 
+@@ -409,6 +416,7 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
+ 
+ 	packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
+ 	list_del(&packet->list);
++	atomic_dec(&file->recv_list_size);
+ 
+ 	mutex_unlock(&file->mutex);
+ 
+@@ -421,6 +429,7 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
+ 		/* Requeue packet */
+ 		mutex_lock(&file->mutex);
+ 		list_add(&packet->list, &file->recv_list);
++		atomic_inc(&file->recv_list_size);
+ 		mutex_unlock(&file->mutex);
+ 	} else {
+ 		if (packet->recv_wc)
+diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c
+index 16231fe080b00..609a5f01761bd 100644
+--- a/drivers/input/ff-core.c
++++ b/drivers/input/ff-core.c
+@@ -9,8 +9,10 @@
+ /* #define DEBUG */
+ 
+ #include <linux/input.h>
++#include <linux/limits.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
++#include <linux/overflow.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ 
+@@ -315,9 +317,8 @@ int input_ff_create(struct input_dev *dev, unsigned int max_effects)
+ 		return -EINVAL;
+ 	}
+ 
+-	ff_dev_size = sizeof(struct ff_device) +
+-				max_effects * sizeof(struct file *);
+-	if (ff_dev_size < max_effects) /* overflow */
++	ff_dev_size = struct_size(ff, effect_owners, max_effects);
++	if (ff_dev_size == SIZE_MAX) /* overflow */
+ 		return -EINVAL;
+ 
+ 	ff = kzalloc(ff_dev_size, GFP_KERNEL);
+diff --git a/drivers/leds/leds-an30259a.c b/drivers/leds/leds-an30259a.c
+index 0216afed3b6e7..decfca447d8a7 100644
+--- a/drivers/leds/leds-an30259a.c
++++ b/drivers/leds/leds-an30259a.c
+@@ -283,7 +283,10 @@ static int an30259a_probe(struct i2c_client *client)
+ 	if (err < 0)
+ 		return err;
+ 
+-	mutex_init(&chip->mutex);
++	err = devm_mutex_init(&client->dev, &chip->mutex);
++	if (err)
++		return err;
++
+ 	chip->client = client;
+ 	i2c_set_clientdata(client, chip);
+ 
+@@ -317,17 +320,9 @@ static int an30259a_probe(struct i2c_client *client)
+ 	return 0;
+ 
+ exit:
+-	mutex_destroy(&chip->mutex);
+ 	return err;
+ }
+ 
+-static void an30259a_remove(struct i2c_client *client)
+-{
+-	struct an30259a *chip = i2c_get_clientdata(client);
+-
+-	mutex_destroy(&chip->mutex);
+-}
+-
+ static const struct of_device_id an30259a_match_table[] = {
+ 	{ .compatible = "panasonic,an30259a", },
+ 	{ /* sentinel */ },
+@@ -347,7 +342,6 @@ static struct i2c_driver an30259a_driver = {
+ 		.of_match_table = an30259a_match_table,
+ 	},
+ 	.probe = an30259a_probe,
+-	.remove = an30259a_remove,
+ 	.id_table = an30259a_id,
+ };
+ 
+diff --git a/drivers/leds/leds-mlxreg.c b/drivers/leds/leds-mlxreg.c
+index 5595788d98d20..1b70de72376cc 100644
+--- a/drivers/leds/leds-mlxreg.c
++++ b/drivers/leds/leds-mlxreg.c
+@@ -256,6 +256,7 @@ static int mlxreg_led_probe(struct platform_device *pdev)
+ {
+ 	struct mlxreg_core_platform_data *led_pdata;
+ 	struct mlxreg_led_priv_data *priv;
++	int err;
+ 
+ 	led_pdata = dev_get_platdata(&pdev->dev);
+ 	if (!led_pdata) {
+@@ -267,26 +268,21 @@ static int mlxreg_led_probe(struct platform_device *pdev)
+ 	if (!priv)
+ 		return -ENOMEM;
+ 
+-	mutex_init(&priv->access_lock);
++	err = devm_mutex_init(&pdev->dev, &priv->access_lock);
++	if (err)
++		return err;
++
+ 	priv->pdev = pdev;
+ 	priv->pdata = led_pdata;
+ 
+ 	return mlxreg_led_config(priv);
+ }
+ 
+-static void mlxreg_led_remove(struct platform_device *pdev)
+-{
+-	struct mlxreg_led_priv_data *priv = dev_get_drvdata(&pdev->dev);
+-
+-	mutex_destroy(&priv->access_lock);
+-}
+-
+ static struct platform_driver mlxreg_led_driver = {
+ 	.driver = {
+ 	    .name = "leds-mlxreg",
+ 	},
+ 	.probe = mlxreg_led_probe,
+-	.remove_new = mlxreg_led_remove,
+ };
+ 
+ module_platform_driver(mlxreg_led_driver);
+diff --git a/drivers/media/dvb-frontends/as102_fe_types.h b/drivers/media/dvb-frontends/as102_fe_types.h
+index 297f9520ebf9d..8a4e392c88965 100644
+--- a/drivers/media/dvb-frontends/as102_fe_types.h
++++ b/drivers/media/dvb-frontends/as102_fe_types.h
+@@ -174,6 +174,6 @@ struct as10x_register_addr {
+ 	uint32_t addr;
+ 	/* register mode access */
+ 	uint8_t mode;
+-};
++} __packed;
+ 
+ #endif
+diff --git a/drivers/media/dvb-frontends/tda10048.c b/drivers/media/dvb-frontends/tda10048.c
+index 5d5e4e9e4422e..3e725cdcc66bd 100644
+--- a/drivers/media/dvb-frontends/tda10048.c
++++ b/drivers/media/dvb-frontends/tda10048.c
+@@ -410,6 +410,7 @@ static int tda10048_set_if(struct dvb_frontend *fe, u32 bw)
+ 	struct tda10048_config *config = &state->config;
+ 	int i;
+ 	u32 if_freq_khz;
++	u64 sample_freq;
+ 
+ 	dprintk(1, "%s(bw = %d)\n", __func__, bw);
+ 
+@@ -451,9 +452,11 @@ static int tda10048_set_if(struct dvb_frontend *fe, u32 bw)
+ 	dprintk(1, "- pll_pfactor = %d\n", state->pll_pfactor);
+ 
+ 	/* Calculate the sample frequency */
+-	state->sample_freq = state->xtal_hz * (state->pll_mfactor + 45);
+-	state->sample_freq /= (state->pll_nfactor + 1);
+-	state->sample_freq /= (state->pll_pfactor + 4);
++	sample_freq = state->xtal_hz;
++	sample_freq *= state->pll_mfactor + 45;
++	do_div(sample_freq, state->pll_nfactor + 1);
++	do_div(sample_freq, state->pll_pfactor + 4);
++	state->sample_freq = sample_freq;
+ 	dprintk(1, "- sample_freq = %d\n", state->sample_freq);
+ 
+ 	/* Update the I/F */
+diff --git a/drivers/media/dvb-frontends/tda18271c2dd.c b/drivers/media/dvb-frontends/tda18271c2dd.c
+index a348344879433..fd928787207ed 100644
+--- a/drivers/media/dvb-frontends/tda18271c2dd.c
++++ b/drivers/media/dvb-frontends/tda18271c2dd.c
+@@ -328,7 +328,7 @@ static int CalcMainPLL(struct tda_state *state, u32 freq)
+ 
+ 	OscFreq = (u64) freq * (u64) Div;
+ 	OscFreq *= (u64) 16384;
+-	do_div(OscFreq, (u64)16000000);
++	do_div(OscFreq, 16000000);
+ 	MainDiv = OscFreq;
+ 
+ 	state->m_Regs[MPD] = PostDiv & 0x77;
+@@ -352,7 +352,7 @@ static int CalcCalPLL(struct tda_state *state, u32 freq)
+ 	OscFreq = (u64)freq * (u64)Div;
+ 	/* CalDiv = u32( OscFreq * 16384 / 16000000 ); */
+ 	OscFreq *= (u64)16384;
+-	do_div(OscFreq, (u64)16000000);
++	do_div(OscFreq, 16000000);
+ 	CalDiv = OscFreq;
+ 
+ 	state->m_Regs[CPD] = PostDiv;
+diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
+index f250640729ca4..b947a55281f0e 100644
+--- a/drivers/media/i2c/st-mipid02.c
++++ b/drivers/media/i2c/st-mipid02.c
+@@ -326,7 +326,7 @@ static int mipid02_configure_from_rx_speed(struct mipid02_dev *bridge,
+ 	}
+ 
+ 	dev_dbg(&client->dev, "detect link_freq = %lld Hz", link_freq);
+-	do_div(ui_4, link_freq);
++	ui_4 = div64_u64(ui_4, link_freq);
+ 	bridge->r.clk_lane_reg1 |= ui_4 << 2;
+ 
+ 	return 0;
+diff --git a/drivers/media/i2c/tc358746.c b/drivers/media/i2c/tc358746.c
+index d676adc4401bb..edf79107adc51 100644
+--- a/drivers/media/i2c/tc358746.c
++++ b/drivers/media/i2c/tc358746.c
+@@ -844,8 +844,7 @@ static unsigned long tc358746_find_pll_settings(struct tc358746 *tc358746,
+ 			continue;
+ 
+ 		tmp = fout * postdiv;
+-		do_div(tmp, fin);
+-		mul = tmp;
++		mul = div64_ul(tmp, fin);
+ 		if (mul > 511)
+ 			continue;
+ 
+diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_av1_req_lat_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_av1_req_lat_if.c
+index 2b6a5adbc4199..b0e2e59f61b5d 100644
+--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_av1_req_lat_if.c
++++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_av1_req_lat_if.c
+@@ -1023,18 +1023,26 @@ static void vdec_av1_slice_free_working_buffer(struct vdec_av1_slice_instance *i
+ 	int i;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(instance->mv); i++)
+-		mtk_vcodec_mem_free(ctx, &instance->mv[i]);
++		if (instance->mv[i].va)
++			mtk_vcodec_mem_free(ctx, &instance->mv[i]);
+ 
+ 	for (i = 0; i < ARRAY_SIZE(instance->seg); i++)
+-		mtk_vcodec_mem_free(ctx, &instance->seg[i]);
++		if (instance->seg[i].va)
++			mtk_vcodec_mem_free(ctx, &instance->seg[i]);
+ 
+ 	for (i = 0; i < ARRAY_SIZE(instance->cdf); i++)
+-		mtk_vcodec_mem_free(ctx, &instance->cdf[i]);
++		if (instance->cdf[i].va)
++			mtk_vcodec_mem_free(ctx, &instance->cdf[i]);
++
+ 
+-	mtk_vcodec_mem_free(ctx, &instance->tile);
+-	mtk_vcodec_mem_free(ctx, &instance->cdf_temp);
+-	mtk_vcodec_mem_free(ctx, &instance->cdf_table);
+-	mtk_vcodec_mem_free(ctx, &instance->iq_table);
++	if (instance->tile.va)
++		mtk_vcodec_mem_free(ctx, &instance->tile);
++	if (instance->cdf_temp.va)
++		mtk_vcodec_mem_free(ctx, &instance->cdf_temp);
++	if (instance->cdf_table.va)
++		mtk_vcodec_mem_free(ctx, &instance->cdf_table);
++	if (instance->iq_table.va)
++		mtk_vcodec_mem_free(ctx, &instance->iq_table);
+ 
+ 	instance->level = AV1_RES_NONE;
+ }
+diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c
+index a68dac72c4e42..f8145998fcaf7 100644
+--- a/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c
++++ b/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c
+@@ -301,11 +301,12 @@ static void h264_enc_free_work_buf(struct venc_h264_inst *inst)
+ 	 * other buffers need to be freed by AP.
+ 	 */
+ 	for (i = 0; i < VENC_H264_VPU_WORK_BUF_MAX; i++) {
+-		if (i != VENC_H264_VPU_WORK_BUF_SKIP_FRAME)
++		if (i != VENC_H264_VPU_WORK_BUF_SKIP_FRAME && inst->work_bufs[i].va)
+ 			mtk_vcodec_mem_free(inst->ctx, &inst->work_bufs[i]);
+ 	}
+ 
+-	mtk_vcodec_mem_free(inst->ctx, &inst->pps_buf);
++	if (inst->pps_buf.va)
++		mtk_vcodec_mem_free(inst->ctx, &inst->pps_buf);
+ }
+ 
+ static int h264_enc_alloc_work_buf(struct venc_h264_inst *inst, bool is_34bit)
+diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
+index 3af594134a6de..6ddc205133939 100644
+--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
++++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
+@@ -2412,7 +2412,12 @@ static int stk9090m_frontend_attach(struct dvb_usb_adapter *adap)
+ 
+ 	adap->fe_adap[0].fe = dvb_attach(dib9000_attach, &adap->dev->i2c_adap, 0x80, &stk9090m_config);
+ 
+-	return adap->fe_adap[0].fe == NULL ?  -ENODEV : 0;
++	if (!adap->fe_adap[0].fe) {
++		release_firmware(state->frontend_firmware);
++		return -ENODEV;
++	}
++
++	return 0;
+ }
+ 
+ static int dib9090_tuner_attach(struct dvb_usb_adapter *adap)
+@@ -2485,8 +2490,10 @@ static int nim9090md_frontend_attach(struct dvb_usb_adapter *adap)
+ 	dib9000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x20, 0x80);
+ 	adap->fe_adap[0].fe = dvb_attach(dib9000_attach, &adap->dev->i2c_adap, 0x80, &nim9090md_config[0]);
+ 
+-	if (adap->fe_adap[0].fe == NULL)
++	if (!adap->fe_adap[0].fe) {
++		release_firmware(state->frontend_firmware);
+ 		return -ENODEV;
++	}
+ 
+ 	i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_3_4, 0);
+ 	dib9000_i2c_enumeration(i2c, 1, 0x12, 0x82);
+@@ -2494,7 +2501,12 @@ static int nim9090md_frontend_attach(struct dvb_usb_adapter *adap)
+ 	fe_slave = dvb_attach(dib9000_attach, i2c, 0x82, &nim9090md_config[1]);
+ 	dib9000_set_slave_frontend(adap->fe_adap[0].fe, fe_slave);
+ 
+-	return fe_slave == NULL ?  -ENODEV : 0;
++	if (!fe_slave) {
++		release_firmware(state->frontend_firmware);
++		return -ENODEV;
++	}
++
++	return 0;
+ }
+ 
+ static int nim9090md_tuner_attach(struct dvb_usb_adapter *adap)
+diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
+index b3bb1805829ad..f31d3835430e7 100644
+--- a/drivers/media/usb/dvb-usb/dw2102.c
++++ b/drivers/media/usb/dvb-usb/dw2102.c
+@@ -716,6 +716,7 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ {
+ 	struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ 	struct dw2102_state *state;
++	int j;
+ 
+ 	if (!d)
+ 		return -ENODEV;
+@@ -729,11 +730,11 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ 		return -EAGAIN;
+ 	}
+ 
+-	switch (num) {
+-	case 1:
+-		switch (msg[0].addr) {
++	j = 0;
++	while (j < num) {
++		switch (msg[j].addr) {
+ 		case SU3000_STREAM_CTRL:
+-			state->data[0] = msg[0].buf[0] + 0x36;
++			state->data[0] = msg[j].buf[0] + 0x36;
+ 			state->data[1] = 3;
+ 			state->data[2] = 0;
+ 			if (dvb_usb_generic_rw(d, state->data, 3,
+@@ -745,61 +746,86 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ 			if (dvb_usb_generic_rw(d, state->data, 1,
+ 					state->data, 2, 0) < 0)
+ 				err("i2c transfer failed.");
+-			msg[0].buf[1] = state->data[0];
+-			msg[0].buf[0] = state->data[1];
++			msg[j].buf[1] = state->data[0];
++			msg[j].buf[0] = state->data[1];
+ 			break;
+ 		default:
+-			if (3 + msg[0].len > sizeof(state->data)) {
+-				warn("i2c wr: len=%d is too big!\n",
+-				     msg[0].len);
++			/* if the current write msg is followed by a another
++			 * read msg to/from the same address
++			 */
++			if ((j+1 < num) && (msg[j+1].flags & I2C_M_RD) &&
++			    (msg[j].addr == msg[j+1].addr)) {
++				/* join both i2c msgs to one usb read command */
++				if (4 + msg[j].len > sizeof(state->data)) {
++					warn("i2c combined wr/rd: write len=%d is too big!\n",
++					    msg[j].len);
++					num = -EOPNOTSUPP;
++					break;
++				}
++				if (1 + msg[j+1].len > sizeof(state->data)) {
++					warn("i2c combined wr/rd: read len=%d is too big!\n",
++					    msg[j+1].len);
++					num = -EOPNOTSUPP;
++					break;
++				}
++
++				state->data[0] = 0x09;
++				state->data[1] = msg[j].len;
++				state->data[2] = msg[j+1].len;
++				state->data[3] = msg[j].addr;
++				memcpy(&state->data[4], msg[j].buf, msg[j].len);
++
++				if (dvb_usb_generic_rw(d, state->data, msg[j].len + 4,
++					state->data, msg[j+1].len + 1, 0) < 0)
++					err("i2c transfer failed.");
++
++				memcpy(msg[j+1].buf, &state->data[1], msg[j+1].len);
++				j++;
++				break;
++			}
++
++			if (msg[j].flags & I2C_M_RD) {
++				/* single read */
++				if (4 + msg[j].len > sizeof(state->data)) {
++					warn("i2c rd: len=%d is too big!\n", msg[j].len);
++					num = -EOPNOTSUPP;
++					break;
++				}
++
++				state->data[0] = 0x09;
++				state->data[1] = 0;
++				state->data[2] = msg[j].len;
++				state->data[3] = msg[j].addr;
++				memcpy(&state->data[4], msg[j].buf, msg[j].len);
++
++				if (dvb_usb_generic_rw(d, state->data, 4,
++					state->data, msg[j].len + 1, 0) < 0)
++					err("i2c transfer failed.");
++
++				memcpy(msg[j].buf, &state->data[1], msg[j].len);
++				break;
++			}
++
++			/* single write */
++			if (3 + msg[j].len > sizeof(state->data)) {
++				warn("i2c wr: len=%d is too big!\n", msg[j].len);
+ 				num = -EOPNOTSUPP;
+ 				break;
+ 			}
+ 
+-			/* always i2c write*/
+ 			state->data[0] = 0x08;
+-			state->data[1] = msg[0].addr;
+-			state->data[2] = msg[0].len;
++			state->data[1] = msg[j].addr;
++			state->data[2] = msg[j].len;
+ 
+-			memcpy(&state->data[3], msg[0].buf, msg[0].len);
++			memcpy(&state->data[3], msg[j].buf, msg[j].len);
+ 
+-			if (dvb_usb_generic_rw(d, state->data, msg[0].len + 3,
++			if (dvb_usb_generic_rw(d, state->data, msg[j].len + 3,
+ 						state->data, 1, 0) < 0)
+ 				err("i2c transfer failed.");
++		} // switch
++		j++;
+ 
+-		}
+-		break;
+-	case 2:
+-		/* always i2c read */
+-		if (4 + msg[0].len > sizeof(state->data)) {
+-			warn("i2c rd: len=%d is too big!\n",
+-			     msg[0].len);
+-			num = -EOPNOTSUPP;
+-			break;
+-		}
+-		if (1 + msg[1].len > sizeof(state->data)) {
+-			warn("i2c rd: len=%d is too big!\n",
+-			     msg[1].len);
+-			num = -EOPNOTSUPP;
+-			break;
+-		}
+-
+-		state->data[0] = 0x09;
+-		state->data[1] = msg[0].len;
+-		state->data[2] = msg[1].len;
+-		state->data[3] = msg[0].addr;
+-		memcpy(&state->data[4], msg[0].buf, msg[0].len);
+-
+-		if (dvb_usb_generic_rw(d, state->data, msg[0].len + 4,
+-					state->data, msg[1].len + 1, 0) < 0)
+-			err("i2c transfer failed.");
+-
+-		memcpy(msg[1].buf, &state->data[1], msg[1].len);
+-		break;
+-	default:
+-		warn("more than 2 i2c messages at a time is not handled yet.");
+-		break;
+-	}
++	} // while
+ 	mutex_unlock(&d->data_mutex);
+ 	mutex_unlock(&d->i2c_mutex);
+ 	return num;
+diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
+index 8e1de1e8bd127..a6e450181fd01 100644
+--- a/drivers/media/usb/s2255/s2255drv.c
++++ b/drivers/media/usb/s2255/s2255drv.c
+@@ -247,7 +247,7 @@ struct s2255_vc {
+ struct s2255_dev {
+ 	struct s2255_vc         vc[MAX_CHANNELS];
+ 	struct v4l2_device      v4l2_dev;
+-	atomic_t                num_channels;
++	refcount_t		num_channels;
+ 	int			frames;
+ 	struct mutex		lock;	/* channels[].vdev.lock */
+ 	struct mutex		cmdlock; /* protects cmdbuf */
+@@ -1550,11 +1550,11 @@ static void s2255_video_device_release(struct video_device *vdev)
+ 		container_of(vdev, struct s2255_vc, vdev);
+ 
+ 	dprintk(dev, 4, "%s, chnls: %d\n", __func__,
+-		atomic_read(&dev->num_channels));
++		refcount_read(&dev->num_channels));
+ 
+ 	v4l2_ctrl_handler_free(&vc->hdl);
+ 
+-	if (atomic_dec_and_test(&dev->num_channels))
++	if (refcount_dec_and_test(&dev->num_channels))
+ 		s2255_destroy(dev);
+ 	return;
+ }
+@@ -1659,7 +1659,7 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
+ 				"failed to register video device!\n");
+ 			break;
+ 		}
+-		atomic_inc(&dev->num_channels);
++		refcount_inc(&dev->num_channels);
+ 		v4l2_info(&dev->v4l2_dev, "V4L2 device registered as %s\n",
+ 			  video_device_node_name(&vc->vdev));
+ 
+@@ -1667,11 +1667,11 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
+ 	pr_info("Sensoray 2255 V4L driver Revision: %s\n",
+ 		S2255_VERSION);
+ 	/* if no channels registered, return error and probe will fail*/
+-	if (atomic_read(&dev->num_channels) == 0) {
++	if (refcount_read(&dev->num_channels) == 0) {
+ 		v4l2_device_unregister(&dev->v4l2_dev);
+ 		return ret;
+ 	}
+-	if (atomic_read(&dev->num_channels) != MAX_CHANNELS)
++	if (refcount_read(&dev->num_channels) != MAX_CHANNELS)
+ 		pr_warn("s2255: Not all channels available.\n");
+ 	return 0;
+ }
+@@ -2221,7 +2221,7 @@ static int s2255_probe(struct usb_interface *interface,
+ 		goto errorFWDATA1;
+ 	}
+ 
+-	atomic_set(&dev->num_channels, 0);
++	refcount_set(&dev->num_channels, 0);
+ 	dev->pid = id->idProduct;
+ 	dev->fw_data = kzalloc(sizeof(struct s2255_fw), GFP_KERNEL);
+ 	if (!dev->fw_data)
+@@ -2341,12 +2341,12 @@ static void s2255_disconnect(struct usb_interface *interface)
+ {
+ 	struct s2255_dev *dev = to_s2255_dev(usb_get_intfdata(interface));
+ 	int i;
+-	int channels = atomic_read(&dev->num_channels);
++	int channels = refcount_read(&dev->num_channels);
+ 	mutex_lock(&dev->lock);
+ 	v4l2_device_disconnect(&dev->v4l2_dev);
+ 	mutex_unlock(&dev->lock);
+ 	/*see comments in the uvc_driver.c usb disconnect function */
+-	atomic_inc(&dev->num_channels);
++	refcount_inc(&dev->num_channels);
+ 	/* unregister each video device. */
+ 	for (i = 0; i < channels; i++)
+ 		video_unregister_device(&dev->vc[i].vdev);
+@@ -2359,7 +2359,7 @@ static void s2255_disconnect(struct usb_interface *interface)
+ 		dev->vc[i].vidstatus_ready = 1;
+ 		wake_up(&dev->vc[i].wait_vidstatus);
+ 	}
+-	if (atomic_dec_and_test(&dev->num_channels))
++	if (refcount_dec_and_test(&dev->num_channels))
+ 		s2255_destroy(dev);
+ 	dev_info(&interface->dev, "%s\n", __func__);
+ }
+diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
+index d7dbbd469b892..53e16d39af4bf 100644
+--- a/drivers/mtd/nand/raw/nand_base.c
++++ b/drivers/mtd/nand/raw/nand_base.c
+@@ -1093,28 +1093,32 @@ static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
+ 				   unsigned int offset_in_page)
+ {
+ 	struct mtd_info *mtd = nand_to_mtd(chip);
++	bool ident_stage = !mtd->writesize;
+ 
+-	/* Make sure the offset is less than the actual page size. */
+-	if (offset_in_page > mtd->writesize + mtd->oobsize)
+-		return -EINVAL;
++	/* Bypass all checks during NAND identification */
++	if (likely(!ident_stage)) {
++		/* Make sure the offset is less than the actual page size. */
++		if (offset_in_page > mtd->writesize + mtd->oobsize)
++			return -EINVAL;
+ 
+-	/*
+-	 * On small page NANDs, there's a dedicated command to access the OOB
+-	 * area, and the column address is relative to the start of the OOB
+-	 * area, not the start of the page. Asjust the address accordingly.
+-	 */
+-	if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
+-		offset_in_page -= mtd->writesize;
++		/*
++		 * On small page NANDs, there's a dedicated command to access the OOB
++		 * area, and the column address is relative to the start of the OOB
++		 * area, not the start of the page. Asjust the address accordingly.
++		 */
++		if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
++			offset_in_page -= mtd->writesize;
+ 
+-	/*
+-	 * The offset in page is expressed in bytes, if the NAND bus is 16-bit
+-	 * wide, then it must be divided by 2.
+-	 */
+-	if (chip->options & NAND_BUSWIDTH_16) {
+-		if (WARN_ON(offset_in_page % 2))
+-			return -EINVAL;
++		/*
++		 * The offset in page is expressed in bytes, if the NAND bus is 16-bit
++		 * wide, then it must be divided by 2.
++		 */
++		if (chip->options & NAND_BUSWIDTH_16) {
++			if (WARN_ON(offset_in_page % 2))
++				return -EINVAL;
+ 
+-		offset_in_page /= 2;
++			offset_in_page /= 2;
++		}
+ 	}
+ 
+ 	addrs[0] = offset_in_page;
+@@ -1123,7 +1127,7 @@ static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
+ 	 * Small page NANDs use 1 cycle for the columns, while large page NANDs
+ 	 * need 2
+ 	 */
+-	if (mtd->writesize <= 512)
++	if (!ident_stage && mtd->writesize <= 512)
+ 		return 1;
+ 
+ 	addrs[1] = offset_in_page >> 8;
+@@ -1436,16 +1440,19 @@ int nand_change_read_column_op(struct nand_chip *chip,
+ 			       unsigned int len, bool force_8bit)
+ {
+ 	struct mtd_info *mtd = nand_to_mtd(chip);
++	bool ident_stage = !mtd->writesize;
+ 
+ 	if (len && !buf)
+ 		return -EINVAL;
+ 
+-	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+-		return -EINVAL;
++	if (!ident_stage) {
++		if (offset_in_page + len > mtd->writesize + mtd->oobsize)
++			return -EINVAL;
+ 
+-	/* Small page NANDs do not support column change. */
+-	if (mtd->writesize <= 512)
+-		return -ENOTSUPP;
++		/* Small page NANDs do not support column change. */
++		if (mtd->writesize <= 512)
++			return -ENOTSUPP;
++	}
+ 
+ 	if (nand_has_exec_op(chip)) {
+ 		const struct nand_interface_config *conf =
+@@ -2173,7 +2180,7 @@ EXPORT_SYMBOL_GPL(nand_reset_op);
+ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
+ 		      bool force_8bit, bool check_only)
+ {
+-	if (!len || !buf)
++	if (!len || (!check_only && !buf))
+ 		return -EINVAL;
+ 
+ 	if (nand_has_exec_op(chip)) {
+@@ -6301,6 +6308,7 @@ static const struct nand_ops rawnand_ops = {
+ static int nand_scan_tail(struct nand_chip *chip)
+ {
+ 	struct mtd_info *mtd = nand_to_mtd(chip);
++	struct nand_device *base = &chip->base;
+ 	struct nand_ecc_ctrl *ecc = &chip->ecc;
+ 	int ret, i;
+ 
+@@ -6445,9 +6453,13 @@ static int nand_scan_tail(struct nand_chip *chip)
+ 	if (!ecc->write_oob_raw)
+ 		ecc->write_oob_raw = ecc->write_oob;
+ 
+-	/* propagate ecc info to mtd_info */
++	/* Propagate ECC info to the generic NAND and MTD layers */
+ 	mtd->ecc_strength = ecc->strength;
++	if (!base->ecc.ctx.conf.strength)
++		base->ecc.ctx.conf.strength = ecc->strength;
+ 	mtd->ecc_step_size = ecc->size;
++	if (!base->ecc.ctx.conf.step_size)
++		base->ecc.ctx.conf.step_size = ecc->size;
+ 
+ 	/*
+ 	 * Set the number of read / write steps for one page depending on ECC
+@@ -6455,6 +6467,8 @@ static int nand_scan_tail(struct nand_chip *chip)
+ 	 */
+ 	if (!ecc->steps)
+ 		ecc->steps = mtd->writesize / ecc->size;
++	if (!base->ecc.ctx.nsteps)
++		base->ecc.ctx.nsteps = ecc->steps;
+ 	if (ecc->steps * ecc->size != mtd->writesize) {
+ 		WARN(1, "Invalid ECC parameters\n");
+ 		ret = -EINVAL;
+diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c
+index 7baaef69d70ad..55580447633be 100644
+--- a/drivers/mtd/nand/raw/rockchip-nand-controller.c
++++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c
+@@ -420,13 +420,13 @@ static int rk_nfc_setup_interface(struct nand_chip *chip, int target,
+ 	u32 rate, tc2rw, trwpw, trw2c;
+ 	u32 temp;
+ 
+-	if (target < 0)
+-		return 0;
+-
+ 	timings = nand_get_sdr_timings(conf);
+ 	if (IS_ERR(timings))
+ 		return -EOPNOTSUPP;
+ 
++	if (target < 0)
++		return 0;
++
+ 	if (IS_ERR(nfc->nfc_clk))
+ 		rate = clk_get_rate(nfc->ahb_clk);
+ 	else
+diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
+index 4cdbc7e084f4b..fea1d87a97539 100644
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -1214,9 +1214,9 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond,
+ 	__be32 target;
+ 
+ 	if (newval->string) {
+-		if (!in4_pton(newval->string+1, -1, (u8 *)&target, -1, NULL)) {
+-			netdev_err(bond->dev, "invalid ARP target %pI4 specified\n",
+-				   &target);
++		if (strlen(newval->string) < 1 ||
++		    !in4_pton(newval->string + 1, -1, (u8 *)&target, -1, NULL)) {
++			netdev_err(bond->dev, "invalid ARP target specified\n");
+ 			return ret;
+ 		}
+ 		if (newval->string[0] == '+')
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+index 8faf8a462c055..ffc3e93292501 100644
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+@@ -125,6 +125,7 @@ static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err_liste
+ 
+ static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leafimx = {
+ 	.quirks = 0,
++	.family = KVASER_LEAF,
+ 	.ops = &kvaser_usb_leaf_dev_ops,
+ };
+ 
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 5a202edfec371..80741e506f422 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -131,8 +131,8 @@ struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip)
+ {
+ 	struct mv88e6xxx_mdio_bus *mdio_bus;
+ 
+-	mdio_bus = list_first_entry(&chip->mdios, struct mv88e6xxx_mdio_bus,
+-				    list);
++	mdio_bus = list_first_entry_or_null(&chip->mdios,
++					    struct mv88e6xxx_mdio_bus, list);
+ 	if (!mdio_bus)
+ 		return NULL;
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+index e2a4e1088b7f4..9580ab83d387c 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+@@ -1262,7 +1262,7 @@ enum {
+ 
+ struct bnx2x_fw_stats_req {
+ 	struct stats_query_header hdr;
+-	struct stats_query_entry query[FP_SB_MAX_E1x+
++	struct stats_query_entry query[FP_SB_MAX_E2 +
+ 		BNX2X_FIRST_QUEUE_QUERY_IDX];
+ };
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 0fab62a56f3b3..2b7936b3fb3ef 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -12436,7 +12436,11 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
+ 	if (!BNXT_NEW_RM(bp))
+ 		return true;
+ 
+-	if (hwr.vnic == bp->hw_resc.resv_vnics &&
++	/* Do not reduce VNIC and RSS ctx reservations.  There is a FW
++	 * issue that will mess up the default VNIC if we reduce the
++	 * reservations.
++	 */
++	if (hwr.vnic <= bp->hw_resc.resv_vnics &&
+ 	    hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
+ 		return true;
+ 
+diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
+index 9aebfb843d9d1..ae90c09c56a89 100644
+--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
++++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
+@@ -8,6 +8,7 @@
+ #include "gve.h"
+ #include "gve_adminq.h"
+ #include "gve_dqo.h"
++#include "gve_utils.h"
+ 
+ static void gve_get_drvinfo(struct net_device *netdev,
+ 			    struct ethtool_drvinfo *info)
+@@ -165,6 +166,8 @@ gve_get_ethtool_stats(struct net_device *netdev,
+ 	struct stats *report_stats;
+ 	int *rx_qid_to_stats_idx;
+ 	int *tx_qid_to_stats_idx;
++	int num_stopped_rxqs = 0;
++	int num_stopped_txqs = 0;
+ 	struct gve_priv *priv;
+ 	bool skip_nic_stats;
+ 	unsigned int start;
+@@ -181,12 +184,23 @@ gve_get_ethtool_stats(struct net_device *netdev,
+ 					    sizeof(int), GFP_KERNEL);
+ 	if (!rx_qid_to_stats_idx)
+ 		return;
++	for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
++		rx_qid_to_stats_idx[ring] = -1;
++		if (!gve_rx_was_added_to_block(priv, ring))
++			num_stopped_rxqs++;
++	}
+ 	tx_qid_to_stats_idx = kmalloc_array(num_tx_queues,
+ 					    sizeof(int), GFP_KERNEL);
+ 	if (!tx_qid_to_stats_idx) {
+ 		kfree(rx_qid_to_stats_idx);
+ 		return;
+ 	}
++	for (ring = 0; ring < num_tx_queues; ring++) {
++		tx_qid_to_stats_idx[ring] = -1;
++		if (!gve_tx_was_added_to_block(priv, ring))
++			num_stopped_txqs++;
++	}
++
+ 	for (rx_pkts = 0, rx_bytes = 0, rx_hsplit_pkt = 0,
+ 	     rx_skb_alloc_fail = 0, rx_buf_alloc_fail = 0,
+ 	     rx_desc_err_dropped_pkt = 0, rx_hsplit_unsplit_pkt = 0,
+@@ -260,7 +274,13 @@ gve_get_ethtool_stats(struct net_device *netdev,
+ 	/* For rx cross-reporting stats, start from nic rx stats in report */
+ 	base_stats_idx = GVE_TX_STATS_REPORT_NUM * num_tx_queues +
+ 		GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues;
+-	max_stats_idx = NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues +
++	/* The boundary between driver stats and NIC stats shifts if there are
++	 * stopped queues.
++	 */
++	base_stats_idx += NIC_RX_STATS_REPORT_NUM * num_stopped_rxqs +
++		NIC_TX_STATS_REPORT_NUM * num_stopped_txqs;
++	max_stats_idx = NIC_RX_STATS_REPORT_NUM *
++		(priv->rx_cfg.num_queues - num_stopped_rxqs) +
+ 		base_stats_idx;
+ 	/* Preprocess the stats report for rx, map queue id to start index */
+ 	skip_nic_stats = false;
+@@ -274,6 +294,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
+ 			skip_nic_stats = true;
+ 			break;
+ 		}
++		if (queue_id < 0 || queue_id >= priv->rx_cfg.num_queues) {
++			net_err_ratelimited("Invalid rxq id in NIC stats\n");
++			continue;
++		}
+ 		rx_qid_to_stats_idx[queue_id] = stats_idx;
+ 	}
+ 	/* walk RX rings */
+@@ -308,11 +332,11 @@ gve_get_ethtool_stats(struct net_device *netdev,
+ 			data[i++] = rx->rx_copybreak_pkt;
+ 			data[i++] = rx->rx_copied_pkt;
+ 			/* stats from NIC */
+-			if (skip_nic_stats) {
++			stats_idx = rx_qid_to_stats_idx[ring];
++			if (skip_nic_stats || stats_idx < 0) {
+ 				/* skip NIC rx stats */
+ 				i += NIC_RX_STATS_REPORT_NUM;
+ 			} else {
+-				stats_idx = rx_qid_to_stats_idx[ring];
+ 				for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) {
+ 					u64 value =
+ 						be64_to_cpu(report_stats[stats_idx + j].value);
+@@ -338,7 +362,8 @@ gve_get_ethtool_stats(struct net_device *netdev,
+ 
+ 	/* For tx cross-reporting stats, start from nic tx stats in report */
+ 	base_stats_idx = max_stats_idx;
+-	max_stats_idx = NIC_TX_STATS_REPORT_NUM * num_tx_queues +
++	max_stats_idx = NIC_TX_STATS_REPORT_NUM *
++		(num_tx_queues - num_stopped_txqs) +
+ 		max_stats_idx;
+ 	/* Preprocess the stats report for tx, map queue id to start index */
+ 	skip_nic_stats = false;
+@@ -352,6 +377,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
+ 			skip_nic_stats = true;
+ 			break;
+ 		}
++		if (queue_id < 0 || queue_id >= num_tx_queues) {
++			net_err_ratelimited("Invalid txq id in NIC stats\n");
++			continue;
++		}
+ 		tx_qid_to_stats_idx[queue_id] = stats_idx;
+ 	}
+ 	/* walk TX rings */
+@@ -383,11 +412,11 @@ gve_get_ethtool_stats(struct net_device *netdev,
+ 			data[i++] = gve_tx_load_event_counter(priv, tx);
+ 			data[i++] = tx->dma_mapping_error;
+ 			/* stats from NIC */
+-			if (skip_nic_stats) {
++			stats_idx = tx_qid_to_stats_idx[ring];
++			if (skip_nic_stats || stats_idx < 0) {
+ 				/* skip NIC tx stats */
+ 				i += NIC_TX_STATS_REPORT_NUM;
+ 			} else {
+-				stats_idx = tx_qid_to_stats_idx[ring];
+ 				for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
+ 					u64 value =
+ 						be64_to_cpu(report_stats[stats_idx + j].value);
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index 3692fce201959..334f652c60601 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -6363,49 +6363,49 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
+ 		mac_data |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+ 		ew32(EXTCNF_CTRL, mac_data);
+ 
+-		/* Enable the Dynamic Power Gating in the MAC */
+-		mac_data = er32(FEXTNVM7);
+-		mac_data |= BIT(22);
+-		ew32(FEXTNVM7, mac_data);
+-
+ 		/* Disable disconnected cable conditioning for Power Gating */
+ 		mac_data = er32(DPGFR);
+ 		mac_data |= BIT(2);
+ 		ew32(DPGFR, mac_data);
+ 
+-		/* Don't wake from dynamic Power Gating with clock request */
+-		mac_data = er32(FEXTNVM12);
+-		mac_data |= BIT(12);
+-		ew32(FEXTNVM12, mac_data);
+-
+-		/* Ungate PGCB clock */
+-		mac_data = er32(FEXTNVM9);
+-		mac_data &= ~BIT(28);
+-		ew32(FEXTNVM9, mac_data);
+-
+-		/* Enable K1 off to enable mPHY Power Gating */
+-		mac_data = er32(FEXTNVM6);
+-		mac_data |= BIT(31);
+-		ew32(FEXTNVM6, mac_data);
+-
+-		/* Enable mPHY power gating for any link and speed */
+-		mac_data = er32(FEXTNVM8);
+-		mac_data |= BIT(9);
+-		ew32(FEXTNVM8, mac_data);
+-
+ 		/* Enable the Dynamic Clock Gating in the DMA and MAC */
+ 		mac_data = er32(CTRL_EXT);
+ 		mac_data |= E1000_CTRL_EXT_DMA_DYN_CLK_EN;
+ 		ew32(CTRL_EXT, mac_data);
+-
+-		/* No MAC DPG gating SLP_S0 in modern standby
+-		 * Switch the logic of the lanphypc to use PMC counter
+-		 */
+-		mac_data = er32(FEXTNVM5);
+-		mac_data |= BIT(7);
+-		ew32(FEXTNVM5, mac_data);
+ 	}
+ 
++	/* Enable the Dynamic Power Gating in the MAC */
++	mac_data = er32(FEXTNVM7);
++	mac_data |= BIT(22);
++	ew32(FEXTNVM7, mac_data);
++
++	/* Don't wake from dynamic Power Gating with clock request */
++	mac_data = er32(FEXTNVM12);
++	mac_data |= BIT(12);
++	ew32(FEXTNVM12, mac_data);
++
++	/* Ungate PGCB clock */
++	mac_data = er32(FEXTNVM9);
++	mac_data &= ~BIT(28);
++	ew32(FEXTNVM9, mac_data);
++
++	/* Enable K1 off to enable mPHY Power Gating */
++	mac_data = er32(FEXTNVM6);
++	mac_data |= BIT(31);
++	ew32(FEXTNVM6, mac_data);
++
++	/* Enable mPHY power gating for any link and speed */
++	mac_data = er32(FEXTNVM8);
++	mac_data |= BIT(9);
++	ew32(FEXTNVM8, mac_data);
++
++	/* No MAC DPG gating SLP_S0 in modern standby
++	 * Switch the logic of the lanphypc to use PMC counter
++	 */
++	mac_data = er32(FEXTNVM5);
++	mac_data |= BIT(7);
++	ew32(FEXTNVM5, mac_data);
++
+ 	/* Disable the time synchronization clock */
+ 	mac_data = er32(FEXTNVM7);
+ 	mac_data |= BIT(31);
+@@ -6498,33 +6498,6 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
+ 	} else {
+ 		/* Request driver unconfigure the device from S0ix */
+ 
+-		/* Disable the Dynamic Power Gating in the MAC */
+-		mac_data = er32(FEXTNVM7);
+-		mac_data &= 0xFFBFFFFF;
+-		ew32(FEXTNVM7, mac_data);
+-
+-		/* Disable mPHY power gating for any link and speed */
+-		mac_data = er32(FEXTNVM8);
+-		mac_data &= ~BIT(9);
+-		ew32(FEXTNVM8, mac_data);
+-
+-		/* Disable K1 off */
+-		mac_data = er32(FEXTNVM6);
+-		mac_data &= ~BIT(31);
+-		ew32(FEXTNVM6, mac_data);
+-
+-		/* Disable Ungate PGCB clock */
+-		mac_data = er32(FEXTNVM9);
+-		mac_data |= BIT(28);
+-		ew32(FEXTNVM9, mac_data);
+-
+-		/* Cancel not waking from dynamic
+-		 * Power Gating with clock request
+-		 */
+-		mac_data = er32(FEXTNVM12);
+-		mac_data &= ~BIT(12);
+-		ew32(FEXTNVM12, mac_data);
+-
+ 		/* Cancel disable disconnected cable conditioning
+ 		 * for Power Gating
+ 		 */
+@@ -6537,13 +6510,6 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
+ 		mac_data &= 0xFFF7FFFF;
+ 		ew32(CTRL_EXT, mac_data);
+ 
+-		/* Revert the lanphypc logic to use the internal Gbe counter
+-		 * and not the PMC counter
+-		 */
+-		mac_data = er32(FEXTNVM5);
+-		mac_data &= 0xFFFFFF7F;
+-		ew32(FEXTNVM5, mac_data);
+-
+ 		/* Enable the periodic inband message,
+ 		 * Request PCIe clock in K1 page770_17[10:9] =01b
+ 		 */
+@@ -6581,6 +6547,40 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
+ 	mac_data &= ~BIT(31);
+ 	mac_data |= BIT(0);
+ 	ew32(FEXTNVM7, mac_data);
++
++	/* Disable the Dynamic Power Gating in the MAC */
++	mac_data = er32(FEXTNVM7);
++	mac_data &= 0xFFBFFFFF;
++	ew32(FEXTNVM7, mac_data);
++
++	/* Disable mPHY power gating for any link and speed */
++	mac_data = er32(FEXTNVM8);
++	mac_data &= ~BIT(9);
++	ew32(FEXTNVM8, mac_data);
++
++	/* Disable K1 off */
++	mac_data = er32(FEXTNVM6);
++	mac_data &= ~BIT(31);
++	ew32(FEXTNVM6, mac_data);
++
++	/* Disable Ungate PGCB clock */
++	mac_data = er32(FEXTNVM9);
++	mac_data |= BIT(28);
++	ew32(FEXTNVM9, mac_data);
++
++	/* Cancel not waking from dynamic
++	 * Power Gating with clock request
++	 */
++	mac_data = er32(FEXTNVM12);
++	mac_data &= ~BIT(12);
++	ew32(FEXTNVM12, mac_data);
++
++	/* Revert the lanphypc logic to use the internal Gbe counter
++	 * and not the PMC counter
++	 */
++	mac_data = er32(FEXTNVM5);
++	mac_data &= 0xFFFFFF7F;
++	ew32(FEXTNVM5, mac_data);
+ }
+ 
+ static int e1000e_pm_freeze(struct device *dev)
+diff --git a/drivers/net/ethernet/intel/ice/ice_hwmon.c b/drivers/net/ethernet/intel/ice/ice_hwmon.c
+index e4c2c1bff6c08..b7aa6812510a4 100644
+--- a/drivers/net/ethernet/intel/ice/ice_hwmon.c
++++ b/drivers/net/ethernet/intel/ice/ice_hwmon.c
+@@ -96,7 +96,7 @@ static bool ice_is_internal_reading_supported(struct ice_pf *pf)
+ 
+ 	unsigned long sensors = pf->hw.dev_caps.supported_sensors;
+ 
+-	return _test_bit(ICE_SENSOR_SUPPORT_E810_INT_TEMP_BIT, &sensors);
++	return test_bit(ICE_SENSOR_SUPPORT_E810_INT_TEMP_BIT, &sensors);
+ };
+ 
+ void ice_hwmon_init(struct ice_pf *pf)
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
+index c11eba07283c6..f46d879c62d26 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
++++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
+@@ -1578,6 +1578,10 @@ void ice_ptp_extts_event(struct ice_pf *pf)
+ 	u8 chan, tmr_idx;
+ 	u32 hi, lo;
+ 
++	/* Don't process timestamp events if PTP is not ready */
++	if (pf->ptp.state != ICE_PTP_READY)
++		return;
++
+ 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ 	/* Event time is captured by one of the two matched registers
+ 	 *      GLTSYN_EVNT_L: 32 LSB of sampled time event
+@@ -1603,27 +1607,33 @@ void ice_ptp_extts_event(struct ice_pf *pf)
+ /**
+  * ice_ptp_cfg_extts - Configure EXTTS pin and channel
+  * @pf: Board private structure
+- * @ena: true to enable; false to disable
+  * @chan: GPIO channel (0-3)
+- * @gpio_pin: GPIO pin
+- * @extts_flags: request flags from the ptp_extts_request.flags
++ * @config: desired EXTTS configuration.
++ * @store: If set to true, the values will be stored
++ *
++ * Configure an external timestamp event on the requested channel.
++ *
++ * Return: 0 on success, -EOPNOTUSPP on unsupported flags
+  */
+-static int
+-ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin,
+-		  unsigned int extts_flags)
++static int ice_ptp_cfg_extts(struct ice_pf *pf, unsigned int chan,
++			     struct ice_extts_channel *config, bool store)
+ {
+ 	u32 func, aux_reg, gpio_reg, irq_reg;
+ 	struct ice_hw *hw = &pf->hw;
+ 	u8 tmr_idx;
+ 
+-	if (chan > (unsigned int)pf->ptp.info.n_ext_ts)
+-		return -EINVAL;
++	/* Reject requests with unsupported flags */
++	if (config->flags & ~(PTP_ENABLE_FEATURE |
++			      PTP_RISING_EDGE |
++			      PTP_FALLING_EDGE |
++			      PTP_STRICT_FLAGS))
++		return -EOPNOTSUPP;
+ 
+ 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ 
+ 	irq_reg = rd32(hw, PFINT_OICR_ENA);
+ 
+-	if (ena) {
++	if (config->ena) {
+ 		/* Enable the interrupt */
+ 		irq_reg |= PFINT_OICR_TSYN_EVNT_M;
+ 		aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M;
+@@ -1632,9 +1642,9 @@ ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin,
+ #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE	BIT(1)
+ 
+ 		/* set event level to requested edge */
+-		if (extts_flags & PTP_FALLING_EDGE)
++		if (config->flags & PTP_FALLING_EDGE)
+ 			aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE;
+-		if (extts_flags & PTP_RISING_EDGE)
++		if (config->flags & PTP_RISING_EDGE)
+ 			aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE;
+ 
+ 		/* Write GPIO CTL reg.
+@@ -1655,11 +1665,51 @@ ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin,
+ 
+ 	wr32(hw, PFINT_OICR_ENA, irq_reg);
+ 	wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg);
+-	wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg);
++	wr32(hw, GLGEN_GPIO_CTL(config->gpio_pin), gpio_reg);
++
++	if (store)
++		memcpy(&pf->ptp.extts_channels[chan], config, sizeof(*config));
+ 
+ 	return 0;
+ }
+ 
++/**
++ * ice_ptp_disable_all_extts - Disable all EXTTS channels
++ * @pf: Board private structure
++ */
++static void ice_ptp_disable_all_extts(struct ice_pf *pf)
++{
++	struct ice_extts_channel extts_cfg = {};
++	int i;
++
++	for (i = 0; i < pf->ptp.info.n_ext_ts; i++) {
++		if (pf->ptp.extts_channels[i].ena) {
++			extts_cfg.gpio_pin = pf->ptp.extts_channels[i].gpio_pin;
++			extts_cfg.ena = false;
++			ice_ptp_cfg_extts(pf, i, &extts_cfg, false);
++		}
++	}
++
++	synchronize_irq(pf->oicr_irq.virq);
++}
++
++/**
++ * ice_ptp_enable_all_extts - Enable all EXTTS channels
++ * @pf: Board private structure
++ *
++ * Called during reset to restore user configuration.
++ */
++static void ice_ptp_enable_all_extts(struct ice_pf *pf)
++{
++	int i;
++
++	for (i = 0; i < pf->ptp.info.n_ext_ts; i++) {
++		if (pf->ptp.extts_channels[i].ena)
++			ice_ptp_cfg_extts(pf, i, &pf->ptp.extts_channels[i],
++					  false);
++	}
++}
++
+ /**
+  * ice_ptp_cfg_clkout - Configure clock to generate periodic wave
+  * @pf: Board private structure
+@@ -1678,6 +1728,9 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
+ 	u32 func, val, gpio_pin;
+ 	u8 tmr_idx;
+ 
++	if (config && config->flags & ~PTP_PEROUT_PHASE)
++		return -EOPNOTSUPP;
++
+ 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ 
+ 	/* 0. Reset mode & out_en in AUX_OUT */
+@@ -1814,17 +1867,18 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
+ 			 struct ptp_clock_request *rq, int on)
+ {
+ 	struct ice_pf *pf = ptp_info_to_pf(info);
+-	struct ice_perout_channel clk_cfg = {0};
+ 	bool sma_pres = false;
+ 	unsigned int chan;
+ 	u32 gpio_pin;
+-	int err;
+ 
+ 	if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
+ 		sma_pres = true;
+ 
+ 	switch (rq->type) {
+ 	case PTP_CLK_REQ_PEROUT:
++	{
++		struct ice_perout_channel clk_cfg = {};
++
+ 		chan = rq->perout.index;
+ 		if (sma_pres) {
+ 			if (chan == ice_pin_desc_e810t[SMA1].chan)
+@@ -1844,15 +1898,19 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
+ 			clk_cfg.gpio_pin = chan;
+ 		}
+ 
++		clk_cfg.flags = rq->perout.flags;
+ 		clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) +
+ 				   rq->perout.period.nsec);
+ 		clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) +
+ 				       rq->perout.start.nsec);
+ 		clk_cfg.ena = !!on;
+ 
+-		err = ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true);
+-		break;
++		return ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true);
++	}
+ 	case PTP_CLK_REQ_EXTTS:
++	{
++		struct ice_extts_channel extts_cfg = {};
++
+ 		chan = rq->extts.index;
+ 		if (sma_pres) {
+ 			if (chan < ice_pin_desc_e810t[SMA2].chan)
+@@ -1868,14 +1926,15 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
+ 			gpio_pin = chan;
+ 		}
+ 
+-		err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin,
+-					rq->extts.flags);
+-		break;
++		extts_cfg.flags = rq->extts.flags;
++		extts_cfg.gpio_pin = gpio_pin;
++		extts_cfg.ena = !!on;
++
++		return ice_ptp_cfg_extts(pf, chan, &extts_cfg, true);
++	}
+ 	default:
+ 		return -EOPNOTSUPP;
+ 	}
+-
+-	return err;
+ }
+ 
+ /**
+@@ -1888,26 +1947,32 @@ static int ice_ptp_gpio_enable_e823(struct ptp_clock_info *info,
+ 				    struct ptp_clock_request *rq, int on)
+ {
+ 	struct ice_pf *pf = ptp_info_to_pf(info);
+-	struct ice_perout_channel clk_cfg = {0};
+-	int err;
+ 
+ 	switch (rq->type) {
+ 	case PTP_CLK_REQ_PPS:
++	{
++		struct ice_perout_channel clk_cfg = {};
++
++		clk_cfg.flags = rq->perout.flags;
+ 		clk_cfg.gpio_pin = PPS_PIN_INDEX;
+ 		clk_cfg.period = NSEC_PER_SEC;
+ 		clk_cfg.ena = !!on;
+ 
+-		err = ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true);
+-		break;
++		return ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true);
++	}
+ 	case PTP_CLK_REQ_EXTTS:
+-		err = ice_ptp_cfg_extts(pf, !!on, rq->extts.index,
+-					TIME_SYNC_PIN_INDEX, rq->extts.flags);
+-		break;
++	{
++		struct ice_extts_channel extts_cfg = {};
++
++		extts_cfg.flags = rq->extts.flags;
++		extts_cfg.gpio_pin = TIME_SYNC_PIN_INDEX;
++		extts_cfg.ena = !!on;
++
++		return ice_ptp_cfg_extts(pf, rq->extts.index, &extts_cfg, true);
++	}
+ 	default:
+ 		return -EOPNOTSUPP;
+ 	}
+-
+-	return err;
+ }
+ 
+ /**
+@@ -2745,6 +2810,10 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf)
+ 		ice_ptp_restart_all_phy(pf);
+ 	}
+ 
++	/* Re-enable all periodic outputs and external timestamp events */
++	ice_ptp_enable_all_clkout(pf);
++	ice_ptp_enable_all_extts(pf);
++
+ 	return 0;
+ }
+ 
+@@ -3300,6 +3369,8 @@ void ice_ptp_release(struct ice_pf *pf)
+ 
+ 	ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
+ 
++	ice_ptp_disable_all_extts(pf);
++
+ 	kthread_cancel_delayed_work_sync(&pf->ptp.work);
+ 
+ 	ice_ptp_port_phy_stop(&pf->ptp.port);
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
+index 3af20025043a6..e2af9749061ca 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
++++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
+@@ -29,10 +29,17 @@ enum ice_ptp_pin_e810t {
+ struct ice_perout_channel {
+ 	bool ena;
+ 	u32 gpio_pin;
++	u32 flags;
+ 	u64 period;
+ 	u64 start_time;
+ };
+ 
++struct ice_extts_channel {
++	bool ena;
++	u32 gpio_pin;
++	u32 flags;
++};
++
+ /* The ice hardware captures Tx hardware timestamps in the PHY. The timestamp
+  * is stored in a buffer of registers. Depending on the specific hardware,
+  * this buffer might be shared across multiple PHY ports.
+@@ -226,6 +233,7 @@ enum ice_ptp_state {
+  * @ext_ts_irq: the external timestamp IRQ in use
+  * @kworker: kwork thread for handling periodic work
+  * @perout_channels: periodic output data
++ * @extts_channels: channels for external timestamps
+  * @info: structure defining PTP hardware capabilities
+  * @clock: pointer to registered PTP clock device
+  * @tstamp_config: hardware timestamping configuration
+@@ -249,6 +257,7 @@ struct ice_ptp {
+ 	u8 ext_ts_irq;
+ 	struct kthread_worker *kworker;
+ 	struct ice_perout_channel perout_channels[GLTSYN_TGT_H_IDX_MAX];
++	struct ice_extts_channel extts_channels[GLTSYN_TGT_H_IDX_MAX];
+ 	struct ptp_clock_info info;
+ 	struct ptp_clock *clock;
+ 	struct hwtstamp_config tstamp_config;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+index c54fd01ea635a..3d274599015be 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+@@ -989,7 +989,12 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
+ 	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
+ 	struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
+ 	struct net *net = dev_net(x->xso.dev);
++	u64 trailer_packets = 0, trailer_bytes = 0;
++	u64 replay_packets = 0, replay_bytes = 0;
++	u64 auth_packets = 0, auth_bytes = 0;
++	u64 success_packets, success_bytes;
+ 	u64 packets, bytes, lastuse;
++	size_t headers;
+ 
+ 	lockdep_assert(lockdep_is_held(&x->lock) ||
+ 		       lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex) ||
+@@ -999,26 +1004,43 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
+ 		return;
+ 
+ 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
+-		mlx5_fc_query_cached(ipsec_rule->auth.fc, &bytes, &packets, &lastuse);
+-		x->stats.integrity_failed += packets;
+-		XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR, packets);
+-
+-		mlx5_fc_query_cached(ipsec_rule->trailer.fc, &bytes, &packets, &lastuse);
+-		XFRM_ADD_STATS(net, LINUX_MIB_XFRMINHDRERROR, packets);
++		mlx5_fc_query_cached(ipsec_rule->auth.fc, &auth_bytes,
++				     &auth_packets, &lastuse);
++		x->stats.integrity_failed += auth_packets;
++		XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR, auth_packets);
++
++		mlx5_fc_query_cached(ipsec_rule->trailer.fc, &trailer_bytes,
++				     &trailer_packets, &lastuse);
++		XFRM_ADD_STATS(net, LINUX_MIB_XFRMINHDRERROR, trailer_packets);
+ 	}
+ 
+ 	if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
+ 		return;
+ 
+-	mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
+-	x->curlft.packets += packets;
+-	x->curlft.bytes += bytes;
+-
+ 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
+-		mlx5_fc_query_cached(ipsec_rule->replay.fc, &bytes, &packets, &lastuse);
+-		x->stats.replay += packets;
+-		XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR, packets);
++		mlx5_fc_query_cached(ipsec_rule->replay.fc, &replay_bytes,
++				     &replay_packets, &lastuse);
++		x->stats.replay += replay_packets;
++		XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR, replay_packets);
+ 	}
++
++	mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
++	success_packets = packets - auth_packets - trailer_packets - replay_packets;
++	x->curlft.packets += success_packets;
++	/* NIC counts all bytes passed through flow steering and doesn't have
++	 * an ability to count payload data size which is needed for SA.
++	 *
++	 * To overcome HW limitestion, let's approximate the payload size
++	 * by removing always available headers.
++	 */
++	headers = sizeof(struct ethhdr);
++	if (sa_entry->attrs.family == AF_INET)
++		headers += sizeof(struct iphdr);
++	else
++		headers += sizeof(struct ipv6hdr);
++
++	success_bytes = bytes - auth_bytes - trailer_bytes - replay_bytes;
++	x->curlft.bytes += success_bytes - headers * success_packets;
+ }
+ 
+ static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 981a3e058840d..cab1770aa476c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -5732,6 +5732,11 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
+ 		kfree(priv->htb_qos_sq_stats[i]);
+ 	kvfree(priv->htb_qos_sq_stats);
+ 
++	if (priv->mqprio_rl) {
++		mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
++		mlx5e_mqprio_rl_free(priv->mqprio_rl);
++	}
++
+ 	memset(priv, 0, sizeof(*priv));
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
+index 50d2ea3239798..a436ce895e45a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
+@@ -6,6 +6,9 @@
+ #include "helper.h"
+ #include "ofld.h"
+ 
++static int
++acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
++
+ static bool
+ esw_acl_ingress_prio_tag_enabled(struct mlx5_eswitch *esw,
+ 				 const struct mlx5_vport *vport)
+@@ -123,18 +126,31 @@ static int esw_acl_ingress_src_port_drop_create(struct mlx5_eswitch *esw,
+ {
+ 	struct mlx5_flow_act flow_act = {};
+ 	struct mlx5_flow_handle *flow_rule;
++	bool created = false;
+ 	int err = 0;
+ 
++	if (!vport->ingress.acl) {
++		err = acl_ingress_ofld_setup(esw, vport);
++		if (err)
++			return err;
++		created = true;
++	}
++
+ 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ 	flow_act.fg = vport->ingress.offloads.drop_grp;
+ 	flow_rule = mlx5_add_flow_rules(vport->ingress.acl, NULL, &flow_act, NULL, 0);
+ 	if (IS_ERR(flow_rule)) {
+ 		err = PTR_ERR(flow_rule);
+-		goto out;
++		goto err_out;
+ 	}
+ 
+ 	vport->ingress.offloads.drop_rule = flow_rule;
+-out:
++
++	return 0;
++err_out:
++	/* Only destroy ingress acl created in this function. */
++	if (created)
++		esw_acl_ingress_ofld_cleanup(esw, vport);
+ 	return err;
+ }
+ 
+@@ -299,16 +315,12 @@ static void esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport *vport)
+ 	}
+ }
+ 
+-int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
+-			       struct mlx5_vport *vport)
++static int
++acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+ {
+ 	int num_ftes = 0;
+ 	int err;
+ 
+-	if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
+-	    !esw_acl_ingress_prio_tag_enabled(esw, vport))
+-		return 0;
+-
+ 	esw_acl_ingress_allow_rule_destroy(vport);
+ 
+ 	if (mlx5_eswitch_vport_match_metadata_enabled(esw))
+@@ -347,6 +359,15 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
+ 	return err;
+ }
+ 
++int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
++{
++	if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
++	    !esw_acl_ingress_prio_tag_enabled(esw, vport))
++		return 0;
++
++	return acl_ingress_ofld_setup(esw, vport);
++}
++
+ void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw,
+ 				  struct mlx5_vport *vport)
+ {
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
+index 025e0db983feb..b032d5a4b3b84 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
+@@ -1484,6 +1484,7 @@ static int mlxsw_linecard_types_init(struct mlxsw_core *mlxsw_core,
+ 	vfree(types_info->data);
+ err_data_alloc:
+ 	kfree(types_info);
++	linecards->types_info = NULL;
+ 	return err;
+ }
+ 
+diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
+index dcab638c57fe8..24c90d8f5a442 100644
+--- a/drivers/net/ethernet/renesas/rswitch.c
++++ b/drivers/net/ethernet/renesas/rswitch.c
+@@ -871,13 +871,13 @@ static void rswitch_tx_free(struct net_device *ndev)
+ 		dma_rmb();
+ 		skb = gq->skbs[gq->dirty];
+ 		if (skb) {
++			rdev->ndev->stats.tx_packets++;
++			rdev->ndev->stats.tx_bytes += skb->len;
+ 			dma_unmap_single(ndev->dev.parent,
+ 					 gq->unmap_addrs[gq->dirty],
+ 					 skb->len, DMA_TO_DEVICE);
+ 			dev_kfree_skb_any(gq->skbs[gq->dirty]);
+ 			gq->skbs[gq->dirty] = NULL;
+-			rdev->ndev->stats.tx_packets++;
+-			rdev->ndev->stats.tx_bytes += skb->len;
+ 		}
+ 		desc->desc.die_dt = DT_EEMPTY;
+ 	}
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+index 65d7370b47d57..466c4002f00d4 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+@@ -272,7 +272,7 @@ static const struct ethqos_emac_por emac_v4_0_0_por[] = {
+ 
+ static const struct ethqos_emac_driver_data emac_v4_0_0_data = {
+ 	.por = emac_v4_0_0_por,
+-	.num_por = ARRAY_SIZE(emac_v3_0_0_por),
++	.num_por = ARRAY_SIZE(emac_v4_0_0_por),
+ 	.rgmii_config_loopback_en = false,
+ 	.has_emac_ge_3 = true,
+ 	.link_clk_name = "phyaux",
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 7c6fb14b55550..39e8340446c71 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -7662,9 +7662,10 @@ int stmmac_dvr_probe(struct device *device,
+ #ifdef STMMAC_VLAN_TAG_USED
+ 	/* Both mac100 and gmac support receive VLAN tag detection */
+ 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
+-	ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+-	priv->hw->hw_vlan_en = true;
+-
++	if (priv->plat->has_gmac4) {
++		ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
++		priv->hw->hw_vlan_en = true;
++	}
+ 	if (priv->dma_cap.vlhash) {
+ 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
+diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+index c09a6f7445754..db640ea63f034 100644
+--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
++++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+@@ -1959,6 +1959,7 @@ int wx_sw_init(struct wx *wx)
+ 	}
+ 
+ 	bitmap_zero(wx->state, WX_STATE_NBITS);
++	wx->misc_irq_domain = false;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+index 07ba3a270a14f..88e5e390770b5 100644
+--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
++++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+@@ -1686,6 +1686,7 @@ static int wx_set_interrupt_capability(struct wx *wx)
+ 	}
+ 
+ 	pdev->irq = pci_irq_vector(pdev, 0);
++	wx->num_q_vectors = 1;
+ 
+ 	return 0;
+ }
+@@ -1996,7 +1997,8 @@ void wx_free_irq(struct wx *wx)
+ 	int vector;
+ 
+ 	if (!(pdev->msix_enabled)) {
+-		free_irq(pdev->irq, wx);
++		if (!wx->misc_irq_domain)
++			free_irq(pdev->irq, wx);
+ 		return;
+ 	}
+ 
+@@ -2011,7 +2013,7 @@ void wx_free_irq(struct wx *wx)
+ 		free_irq(entry->vector, q_vector);
+ 	}
+ 
+-	if (wx->mac.type == wx_mac_em)
++	if (!wx->misc_irq_domain)
+ 		free_irq(wx->msix_entry->vector, wx);
+ }
+ EXPORT_SYMBOL(wx_free_irq);
+@@ -2026,6 +2028,9 @@ int wx_setup_isb_resources(struct wx *wx)
+ {
+ 	struct pci_dev *pdev = wx->pdev;
+ 
++	if (wx->isb_mem)
++		return 0;
++
+ 	wx->isb_mem = dma_alloc_coherent(&pdev->dev,
+ 					 sizeof(u32) * 4,
+ 					 &wx->isb_dma,
+@@ -2385,7 +2390,6 @@ static void wx_free_all_tx_resources(struct wx *wx)
+ 
+ void wx_free_resources(struct wx *wx)
+ {
+-	wx_free_isb_resources(wx);
+ 	wx_free_all_rx_resources(wx);
+ 	wx_free_all_tx_resources(wx);
+ }
+diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
+index 5aaf7b1fa2db9..0df7f5712b6f7 100644
+--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
++++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
+@@ -1058,6 +1058,7 @@ struct wx {
+ 	dma_addr_t isb_dma;
+ 	u32 *isb_mem;
+ 	u32 isb_tag[WX_ISB_MAX];
++	bool misc_irq_domain;
+ 
+ #define WX_MAX_RETA_ENTRIES 128
+ #define WX_RSS_INDIR_TBL_MAX 64
+diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+index e894e01d030d1..af30ca0312b81 100644
+--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
++++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+@@ -387,6 +387,7 @@ static int ngbe_open(struct net_device *netdev)
+ err_free_irq:
+ 	wx_free_irq(wx);
+ err_free_resources:
++	wx_free_isb_resources(wx);
+ 	wx_free_resources(wx);
+ 	return err;
+ }
+@@ -408,6 +409,7 @@ static int ngbe_close(struct net_device *netdev)
+ 
+ 	ngbe_down(wx);
+ 	wx_free_irq(wx);
++	wx_free_isb_resources(wx);
+ 	wx_free_resources(wx);
+ 	phylink_disconnect_phy(wx->phylink);
+ 	wx_control_hw(wx, false);
+diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
+index b3e3605d1edb3..a4cf682dca650 100644
+--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
++++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
+@@ -27,57 +27,19 @@ void txgbe_irq_enable(struct wx *wx, bool queues)
+ }
+ 
+ /**
+- * txgbe_intr - msi/legacy mode Interrupt Handler
+- * @irq: interrupt number
+- * @data: pointer to a network interface device structure
+- **/
+-static irqreturn_t txgbe_intr(int __always_unused irq, void *data)
+-{
+-	struct wx_q_vector *q_vector;
+-	struct wx *wx  = data;
+-	struct pci_dev *pdev;
+-	u32 eicr;
+-
+-	q_vector = wx->q_vector[0];
+-	pdev = wx->pdev;
+-
+-	eicr = wx_misc_isb(wx, WX_ISB_VEC0);
+-	if (!eicr) {
+-		/* shared interrupt alert!
+-		 * the interrupt that we masked before the ICR read.
+-		 */
+-		if (netif_running(wx->netdev))
+-			txgbe_irq_enable(wx, true);
+-		return IRQ_NONE;        /* Not our interrupt */
+-	}
+-	wx->isb_mem[WX_ISB_VEC0] = 0;
+-	if (!(pdev->msi_enabled))
+-		wr32(wx, WX_PX_INTA, 1);
+-
+-	wx->isb_mem[WX_ISB_MISC] = 0;
+-	/* would disable interrupts here but it is auto disabled */
+-	napi_schedule_irqoff(&q_vector->napi);
+-
+-	/* re-enable link(maybe) and non-queue interrupts, no flush.
+-	 * txgbe_poll will re-enable the queue interrupts
+-	 */
+-	if (netif_running(wx->netdev))
+-		txgbe_irq_enable(wx, false);
+-
+-	return IRQ_HANDLED;
+-}
+-
+-/**
+- * txgbe_request_msix_irqs - Initialize MSI-X interrupts
++ * txgbe_request_queue_irqs - Initialize MSI-X queue interrupts
+  * @wx: board private structure
+  *
+- * Allocate MSI-X vectors and request interrupts from the kernel.
++ * Allocate MSI-X queue vectors and request interrupts from the kernel.
+  **/
+-static int txgbe_request_msix_irqs(struct wx *wx)
++int txgbe_request_queue_irqs(struct wx *wx)
+ {
+ 	struct net_device *netdev = wx->netdev;
+ 	int vector, err;
+ 
++	if (!wx->pdev->msix_enabled)
++		return 0;
++
+ 	for (vector = 0; vector < wx->num_q_vectors; vector++) {
+ 		struct wx_q_vector *q_vector = wx->q_vector[vector];
+ 		struct msix_entry *entry = &wx->msix_q_entries[vector];
+@@ -110,34 +72,6 @@ static int txgbe_request_msix_irqs(struct wx *wx)
+ 	return err;
+ }
+ 
+-/**
+- * txgbe_request_irq - initialize interrupts
+- * @wx: board private structure
+- *
+- * Attempt to configure interrupts using the best available
+- * capabilities of the hardware and kernel.
+- **/
+-int txgbe_request_irq(struct wx *wx)
+-{
+-	struct net_device *netdev = wx->netdev;
+-	struct pci_dev *pdev = wx->pdev;
+-	int err;
+-
+-	if (pdev->msix_enabled)
+-		err = txgbe_request_msix_irqs(wx);
+-	else if (pdev->msi_enabled)
+-		err = request_irq(wx->pdev->irq, &txgbe_intr, 0,
+-				  netdev->name, wx);
+-	else
+-		err = request_irq(wx->pdev->irq, &txgbe_intr, IRQF_SHARED,
+-				  netdev->name, wx);
+-
+-	if (err)
+-		wx_err(wx, "request_irq failed, Error %d\n", err);
+-
+-	return err;
+-}
+-
+ static int txgbe_request_gpio_irq(struct txgbe *txgbe)
+ {
+ 	txgbe->gpio_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
+@@ -177,6 +111,36 @@ static const struct irq_domain_ops txgbe_misc_irq_domain_ops = {
+ };
+ 
+ static irqreturn_t txgbe_misc_irq_handle(int irq, void *data)
++{
++	struct wx_q_vector *q_vector;
++	struct txgbe *txgbe = data;
++	struct wx *wx = txgbe->wx;
++	u32 eicr;
++
++	if (wx->pdev->msix_enabled)
++		return IRQ_WAKE_THREAD;
++
++	eicr = wx_misc_isb(wx, WX_ISB_VEC0);
++	if (!eicr) {
++		/* shared interrupt alert!
++		 * the interrupt that we masked before the ICR read.
++		 */
++		if (netif_running(wx->netdev))
++			txgbe_irq_enable(wx, true);
++		return IRQ_NONE;        /* Not our interrupt */
++	}
++	wx->isb_mem[WX_ISB_VEC0] = 0;
++	if (!(wx->pdev->msi_enabled))
++		wr32(wx, WX_PX_INTA, 1);
++
++	/* would disable interrupts here but it is auto disabled */
++	q_vector = wx->q_vector[0];
++	napi_schedule_irqoff(&q_vector->napi);
++
++	return IRQ_WAKE_THREAD;
++}
++
++static irqreturn_t txgbe_misc_irq_thread_fn(int irq, void *data)
+ {
+ 	struct txgbe *txgbe = data;
+ 	struct wx *wx = txgbe->wx;
+@@ -223,6 +187,7 @@ void txgbe_free_misc_irq(struct txgbe *txgbe)
+ 
+ int txgbe_setup_misc_irq(struct txgbe *txgbe)
+ {
++	unsigned long flags = IRQF_ONESHOT;
+ 	struct wx *wx = txgbe->wx;
+ 	int hwirq, err;
+ 
+@@ -236,14 +201,17 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
+ 		irq_create_mapping(txgbe->misc.domain, hwirq);
+ 
+ 	txgbe->misc.chip = txgbe_irq_chip;
+-	if (wx->pdev->msix_enabled)
++	if (wx->pdev->msix_enabled) {
+ 		txgbe->misc.irq = wx->msix_entry->vector;
+-	else
++	} else {
+ 		txgbe->misc.irq = wx->pdev->irq;
++		if (!wx->pdev->msi_enabled)
++			flags |= IRQF_SHARED;
++	}
+ 
+-	err = request_threaded_irq(txgbe->misc.irq, NULL,
+-				   txgbe_misc_irq_handle,
+-				   IRQF_ONESHOT,
++	err = request_threaded_irq(txgbe->misc.irq, txgbe_misc_irq_handle,
++				   txgbe_misc_irq_thread_fn,
++				   flags,
+ 				   wx->netdev->name, txgbe);
+ 	if (err)
+ 		goto del_misc_irq;
+@@ -256,6 +224,8 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
+ 	if (err)
+ 		goto free_gpio_irq;
+ 
++	wx->misc_irq_domain = true;
++
+ 	return 0;
+ 
+ free_gpio_irq:
+diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h
+index b77945e7a0f26..e6285b94625ea 100644
+--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h
++++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h
+@@ -2,6 +2,6 @@
+ /* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
+ 
+ void txgbe_irq_enable(struct wx *wx, bool queues);
+-int txgbe_request_irq(struct wx *wx);
++int txgbe_request_queue_irqs(struct wx *wx);
+ void txgbe_free_misc_irq(struct txgbe *txgbe);
+ int txgbe_setup_misc_irq(struct txgbe *txgbe);
+diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+index 8c7a74981b907..ca74d9422065a 100644
+--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
++++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+@@ -294,9 +294,9 @@ static int txgbe_open(struct net_device *netdev)
+ 
+ 	wx_configure(wx);
+ 
+-	err = txgbe_request_irq(wx);
++	err = txgbe_request_queue_irqs(wx);
+ 	if (err)
+-		goto err_free_isb;
++		goto err_free_resources;
+ 
+ 	/* Notify the stack of the actual queue counts. */
+ 	err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues);
+@@ -313,8 +313,8 @@ static int txgbe_open(struct net_device *netdev)
+ 
+ err_free_irq:
+ 	wx_free_irq(wx);
+-err_free_isb:
+-	wx_free_isb_resources(wx);
++err_free_resources:
++	wx_free_resources(wx);
+ err_reset:
+ 	txgbe_reset(wx);
+ 
+@@ -729,6 +729,7 @@ static void txgbe_remove(struct pci_dev *pdev)
+ 
+ 	txgbe_remove_phy(txgbe);
+ 	txgbe_free_misc_irq(txgbe);
++	wx_free_isb_resources(wx);
+ 
+ 	pci_release_selected_regions(pdev,
+ 				     pci_select_bars(pdev, IORESOURCE_MEM));
+diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
+index 536bd6564f8b8..dade51cf599c6 100644
+--- a/drivers/net/ntb_netdev.c
++++ b/drivers/net/ntb_netdev.c
+@@ -119,7 +119,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
+ 	skb->protocol = eth_type_trans(skb, ndev);
+ 	skb->ip_summed = CHECKSUM_NONE;
+ 
+-	if (__netif_rx(skb) == NET_RX_DROP) {
++	if (netif_rx(skb) == NET_RX_DROP) {
+ 		ndev->stats.rx_errors++;
+ 		ndev->stats.rx_dropped++;
+ 	} else {
+diff --git a/drivers/net/phy/aquantia/aquantia.h b/drivers/net/phy/aquantia/aquantia.h
+index 1c19ae74ad2b4..4830b25e6c7d3 100644
+--- a/drivers/net/phy/aquantia/aquantia.h
++++ b/drivers/net/phy/aquantia/aquantia.h
+@@ -6,6 +6,9 @@
+  * Author: Heiner Kallweit <hkallweit1@gmail.com>
+  */
+ 
++#ifndef AQUANTIA_H
++#define AQUANTIA_H
++
+ #include <linux/device.h>
+ #include <linux/phy.h>
+ 
+@@ -120,3 +123,5 @@ static inline int aqr_hwmon_probe(struct phy_device *phydev) { return 0; }
+ #endif
+ 
+ int aqr_firmware_load(struct phy_device *phydev);
++
++#endif /* AQUANTIA_H */
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+index fb8bd50eb7de8..5521c0ea5b261 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+@@ -257,7 +257,7 @@ mt76_connac_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len,
+ 	};
+ 	u16 ntlv;
+ 
+-	ptlv = skb_put(skb, len);
++	ptlv = skb_put_zero(skb, len);
+ 	memcpy(ptlv, &tlv, sizeof(tlv));
+ 
+ 	ntlv = le16_to_cpu(ntlv_hdr->tlv_num);
+@@ -1670,7 +1670,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ 	set_bit(MT76_HW_SCANNING, &phy->state);
+ 	mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f;
+ 
+-	req = (struct mt76_connac_hw_scan_req *)skb_put(skb, sizeof(*req));
++	req = (struct mt76_connac_hw_scan_req *)skb_put_zero(skb, sizeof(*req));
+ 
+ 	req->seq_num = mvif->scan_seq_num | mvif->band_idx << 7;
+ 	req->bss_idx = mvif->idx;
+@@ -1798,7 +1798,7 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
+ 
+ 	mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f;
+ 
+-	req = (struct mt76_connac_sched_scan_req *)skb_put(skb, sizeof(*req));
++	req = (struct mt76_connac_sched_scan_req *)skb_put_zero(skb, sizeof(*req));
+ 	req->version = 1;
+ 	req->seq_num = mvif->scan_seq_num | mvif->band_idx << 7;
+ 
+@@ -2321,7 +2321,7 @@ int mt76_connac_mcu_update_gtk_rekey(struct ieee80211_hw *hw,
+ 		return -ENOMEM;
+ 
+ 	skb_put_data(skb, &hdr, sizeof(hdr));
+-	gtk_tlv = (struct mt76_connac_gtk_rekey_tlv *)skb_put(skb,
++	gtk_tlv = (struct mt76_connac_gtk_rekey_tlv *)skb_put_zero(skb,
+ 							 sizeof(*gtk_tlv));
+ 	gtk_tlv->tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_GTK_REKEY);
+ 	gtk_tlv->len = cpu_to_le16(sizeof(*gtk_tlv));
+@@ -2446,7 +2446,7 @@ mt76_connac_mcu_set_wow_pattern(struct mt76_dev *dev,
+ 		return -ENOMEM;
+ 
+ 	skb_put_data(skb, &hdr, sizeof(hdr));
+-	ptlv = (struct mt76_connac_wow_pattern_tlv *)skb_put(skb, sizeof(*ptlv));
++	ptlv = (struct mt76_connac_wow_pattern_tlv *)skb_put_zero(skb, sizeof(*ptlv));
+ 	ptlv->tag = cpu_to_le16(UNI_SUSPEND_WOW_PATTERN);
+ 	ptlv->len = cpu_to_le16(sizeof(*ptlv));
+ 	ptlv->data_len = pattern->pattern_len;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+index d90f98c500399..b7157bdb3103f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+@@ -424,7 +424,7 @@ mt7915_mcu_add_nested_subtlv(struct sk_buff *skb, int sub_tag, int sub_len,
+ 		.len = cpu_to_le16(sub_len),
+ 	};
+ 
+-	ptlv = skb_put(skb, sub_len);
++	ptlv = skb_put_zero(skb, sub_len);
+ 	memcpy(ptlv, &tlv, sizeof(tlv));
+ 
+ 	le16_add_cpu(sub_ntlv, 1);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
+index 9bd953586b041..62c03d088925c 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
+@@ -225,6 +225,11 @@ mt7996_radar_trigger(void *data, u64 val)
+ 	if (val > MT_RX_SEL2)
+ 		return -EINVAL;
+ 
++	if (val == MT_RX_SEL2 && !dev->rdd2_phy) {
++		dev_err(dev->mt76.dev, "Background radar is not enabled\n");
++		return -EINVAL;
++	}
++
+ 	return mt7996_mcu_rdd_cmd(dev, RDD_RADAR_EMULATE,
+ 				  val, 0, 0);
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+index e86c05d0eecc9..bc3b2babb094d 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+@@ -355,7 +355,10 @@ mt7996_mcu_rx_radar_detected(struct mt7996_dev *dev, struct sk_buff *skb)
+ 	if (r->band_idx >= ARRAY_SIZE(dev->mt76.phys))
+ 		return;
+ 
+-	if (dev->rdd2_phy && r->band_idx == MT_RX_SEL2)
++	if (r->band_idx == MT_RX_SEL2 && !dev->rdd2_phy)
++		return;
++
++	if (r->band_idx == MT_RX_SEL2)
+ 		mphy = dev->rdd2_phy->mt76;
+ 	else
+ 		mphy = dev->mt76.phys[r->band_idx];
+diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
+index f1085ccb7eedc..7719e4f3e2a23 100644
+--- a/drivers/net/wireless/microchip/wilc1000/hif.c
++++ b/drivers/net/wireless/microchip/wilc1000/hif.c
+@@ -382,7 +382,8 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
+ 	struct ieee80211_p2p_noa_attr noa_attr;
+ 	const struct cfg80211_bss_ies *ies;
+ 	struct wilc_join_bss_param *param;
+-	u8 rates_len = 0, ies_len;
++	u8 rates_len = 0;
++	int ies_len;
+ 	int ret;
+ 
+ 	param = kzalloc(sizeof(*param), GFP_KERNEL);
+diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
+index 6c75ebbb21caa..ef86389545ffb 100644
+--- a/drivers/net/wireless/realtek/rtw89/fw.c
++++ b/drivers/net/wireless/realtek/rtw89/fw.c
+@@ -4646,6 +4646,10 @@ static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev,
+ 	u8 i, idx;
+ 
+ 	sband = rtwdev->hw->wiphy->bands[NL80211_BAND_6GHZ];
++	if (!sband) {
++		option->prohib_chan = U64_MAX;
++		return;
++	}
+ 
+ 	for (i = 0; i < sband->n_channels; i++) {
+ 		chan = &sband->channels[i];
+diff --git a/drivers/nfc/virtual_ncidev.c b/drivers/nfc/virtual_ncidev.c
+index 590b038e449e5..6b89d596ba9af 100644
+--- a/drivers/nfc/virtual_ncidev.c
++++ b/drivers/nfc/virtual_ncidev.c
+@@ -125,6 +125,10 @@ static ssize_t virtual_ncidev_write(struct file *file,
+ 		kfree_skb(skb);
+ 		return -EFAULT;
+ 	}
++	if (strnlen(skb->data, count) != count) {
++		kfree_skb(skb);
++		return -EINVAL;
++	}
+ 
+ 	nci_recv_frame(vdev->ndev, skb);
+ 	return count;
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index a4e46eb20be63..1bee176fd850e 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -596,7 +596,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
+ 		int node, srcu_idx;
+ 
+ 		srcu_idx = srcu_read_lock(&head->srcu);
+-		for_each_node(node)
++		for_each_online_node(node)
+ 			__nvme_find_path(head, node);
+ 		srcu_read_unlock(&head->srcu, srcu_idx);
+ 	}
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 710043086dffa..102a9fb0c65ff 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -778,7 +778,8 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
+ 		struct bio_vec bv = req_bvec(req);
+ 
+ 		if (!is_pci_p2pdma_page(bv.bv_page)) {
+-			if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
++			if ((bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1)) +
++			     bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
+ 				return nvme_setup_prp_simple(dev, req,
+ 							     &cmnd->rw, &bv);
+ 
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index 2fde22323622e..06f0c587f3437 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -818,6 +818,15 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
+ 	percpu_ref_exit(&sq->ref);
+ 	nvmet_auth_sq_free(sq);
+ 
++	/*
++	 * we must reference the ctrl again after waiting for inflight IO
++	 * to complete. Because admin connect may have sneaked in after we
++	 * store sq->ctrl locally, but before we killed the percpu_ref. the
++	 * admin connect allocates and assigns sq->ctrl, which now needs a
++	 * final ref put, as this ctrl is going away.
++	 */
++	ctrl = sq->ctrl;
++
+ 	if (ctrl) {
+ 		/*
+ 		 * The teardown flow may take some time, and the host may not
+diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
+index 16e941449b144..7d345009c3270 100644
+--- a/drivers/platform/x86/toshiba_acpi.c
++++ b/drivers/platform/x86/toshiba_acpi.c
+@@ -3276,7 +3276,7 @@ static const char *find_hci_method(acpi_handle handle)
+  */
+ #define QUIRK_HCI_HOTKEY_QUICKSTART		BIT(1)
+ 
+-static const struct dmi_system_id toshiba_dmi_quirks[] = {
++static const struct dmi_system_id toshiba_dmi_quirks[] __initconst = {
+ 	{
+ 	 /* Toshiba Portégé R700 */
+ 	 /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
+@@ -3311,8 +3311,6 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
+ 	struct toshiba_acpi_dev *dev;
+ 	const char *hci_method;
+ 	u32 dummy;
+-	const struct dmi_system_id *dmi_id;
+-	long quirks = 0;
+ 	int ret = 0;
+ 
+ 	if (toshiba_acpi)
+@@ -3465,16 +3463,6 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
+ 	}
+ #endif
+ 
+-	dmi_id = dmi_first_match(toshiba_dmi_quirks);
+-	if (dmi_id)
+-		quirks = (long)dmi_id->driver_data;
+-
+-	if (turn_on_panel_on_resume == -1)
+-		turn_on_panel_on_resume = !!(quirks & QUIRK_TURN_ON_PANEL_ON_RESUME);
+-
+-	if (hci_hotkey_quickstart == -1)
+-		hci_hotkey_quickstart = !!(quirks & QUIRK_HCI_HOTKEY_QUICKSTART);
+-
+ 	toshiba_wwan_available(dev);
+ 	if (dev->wwan_supported)
+ 		toshiba_acpi_setup_wwan_rfkill(dev);
+@@ -3624,10 +3612,27 @@ static struct acpi_driver toshiba_acpi_driver = {
+ 	.drv.pm	= &toshiba_acpi_pm,
+ };
+ 
++static void __init toshiba_dmi_init(void)
++{
++	const struct dmi_system_id *dmi_id;
++	long quirks = 0;
++
++	dmi_id = dmi_first_match(toshiba_dmi_quirks);
++	if (dmi_id)
++		quirks = (long)dmi_id->driver_data;
++
++	if (turn_on_panel_on_resume == -1)
++		turn_on_panel_on_resume = !!(quirks & QUIRK_TURN_ON_PANEL_ON_RESUME);
++
++	if (hci_hotkey_quickstart == -1)
++		hci_hotkey_quickstart = !!(quirks & QUIRK_HCI_HOTKEY_QUICKSTART);
++}
++
+ static int __init toshiba_acpi_init(void)
+ {
+ 	int ret;
+ 
++	toshiba_dmi_init();
+ 	toshiba_proc_dir = proc_mkdir(PROC_TOSHIBA, acpi_root_dir);
+ 	if (!toshiba_proc_dir) {
+ 		pr_err("Unable to create proc dir " PROC_TOSHIBA "\n");
+diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
+index c6a10ec2c83f6..89e1be0815b52 100644
+--- a/drivers/platform/x86/touchscreen_dmi.c
++++ b/drivers/platform/x86/touchscreen_dmi.c
+@@ -897,6 +897,22 @@ static const struct ts_dmi_data schneider_sct101ctm_data = {
+ 	.properties	= schneider_sct101ctm_props,
+ };
+ 
++static const struct property_entry globalspace_solt_ivw116_props[] = {
++	PROPERTY_ENTRY_U32("touchscreen-min-x", 7),
++	PROPERTY_ENTRY_U32("touchscreen-min-y", 22),
++	PROPERTY_ENTRY_U32("touchscreen-size-x", 1723),
++	PROPERTY_ENTRY_U32("touchscreen-size-y", 1077),
++	PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-globalspace-solt-ivw116.fw"),
++	PROPERTY_ENTRY_U32("silead,max-fingers", 10),
++	PROPERTY_ENTRY_BOOL("silead,home-button"),
++	{ }
++};
++
++static const struct ts_dmi_data globalspace_solt_ivw116_data = {
++	.acpi_name	= "MSSL1680:00",
++	.properties	= globalspace_solt_ivw116_props,
++};
++
+ static const struct property_entry techbite_arc_11_6_props[] = {
+ 	PROPERTY_ENTRY_U32("touchscreen-min-x", 5),
+ 	PROPERTY_ENTRY_U32("touchscreen-min-y", 7),
+@@ -1385,6 +1401,17 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ 			DMI_MATCH(DMI_BIOS_DATE, "04/24/2018"),
+ 		},
+ 	},
++	{
++		/* Jumper EZpad 6s Pro */
++		.driver_data = (void *)&jumper_ezpad_6_pro_b_data,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Jumper"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Ezpad"),
++			/* Above matches are too generic, add bios match */
++			DMI_MATCH(DMI_BIOS_VERSION, "E.WSA116_8.E1.042.bin"),
++			DMI_MATCH(DMI_BIOS_DATE, "01/08/2020"),
++		},
++	},
+ 	{
+ 		/* Jumper EZpad 6 m4 */
+ 		.driver_data = (void *)&jumper_ezpad_6_m4_data,
+@@ -1624,6 +1651,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "SCT101CTM"),
+ 		},
+ 	},
++	{
++		/* GlobalSpace SoLT IVW 11.6" */
++		.driver_data = (void *)&globalspace_solt_ivw116_data,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Globalspace Tech Pvt Ltd"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "SolTIVW"),
++			DMI_MATCH(DMI_PRODUCT_SKU, "PN20170413488"),
++		},
++	},
+ 	{
+ 		/* Techbite Arc 11.6 */
+ 		.driver_data = (void *)&techbite_arc_11_6_data,
+diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
+index 180a008d38eaa..4118b64781cb5 100644
+--- a/drivers/s390/block/dasd_eckd.c
++++ b/drivers/s390/block/dasd_eckd.c
+@@ -4906,7 +4906,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
+ 				ccw++;
+ 			if (dst) {
+ 				if (ccw->flags & CCW_FLAG_IDA)
+-					cda = *((char **)dma32_to_virt(ccw->cda));
++					cda = dma64_to_virt(*((dma64_t *)dma32_to_virt(ccw->cda)));
+ 				else
+ 					cda = dma32_to_virt(ccw->cda);
+ 				if (dst != cda) {
+@@ -5525,7 +5525,7 @@ dasd_eckd_dump_ccw_range(struct dasd_device *device, struct ccw1 *from,
+ 
+ 		/* get pointer to data (consider IDALs) */
+ 		if (from->flags & CCW_FLAG_IDA)
+-			datap = (char *)*((addr_t *)dma32_to_virt(from->cda));
++			datap = dma64_to_virt(*((dma64_t *)dma32_to_virt(from->cda)));
+ 		else
+ 			datap = dma32_to_virt(from->cda);
+ 
+diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
+index 361e9bd752570..9f2023a077c20 100644
+--- a/drivers/s390/block/dasd_fba.c
++++ b/drivers/s390/block/dasd_fba.c
+@@ -585,7 +585,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
+ 				ccw++;
+ 			if (dst) {
+ 				if (ccw->flags & CCW_FLAG_IDA)
+-					cda = *((char **)dma32_to_virt(ccw->cda));
++					cda = dma64_to_virt(*((dma64_t *)dma32_to_virt(ccw->cda)));
+ 				else
+ 					cda = dma32_to_virt(ccw->cda);
+ 				if (dst != cda) {
+diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
+index 6e5c508b1e07c..5f6e102256276 100644
+--- a/drivers/s390/cio/vfio_ccw_cp.c
++++ b/drivers/s390/cio/vfio_ccw_cp.c
+@@ -490,13 +490,14 @@ static int ccwchain_fetch_tic(struct ccw1 *ccw,
+ 			      struct channel_program *cp)
+ {
+ 	struct ccwchain *iter;
+-	u32 cda, ccw_head;
++	u32 offset, ccw_head;
+ 
+ 	list_for_each_entry(iter, &cp->ccwchain_list, next) {
+ 		ccw_head = iter->ch_iova;
+ 		if (is_cpa_within_range(ccw->cda, ccw_head, iter->ch_len)) {
+-			cda = (u64)iter->ch_ccw + dma32_to_u32(ccw->cda) - ccw_head;
+-			ccw->cda = u32_to_dma32(cda);
++			/* Calculate offset of TIC target */
++			offset = dma32_to_u32(ccw->cda) - ccw_head;
++			ccw->cda = virt_to_dma32((void *)iter->ch_ccw + offset);
+ 			return 0;
+ 		}
+ 	}
+@@ -914,7 +915,7 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
+ 	 * in the ioctl directly. Path status changes etc.
+ 	 */
+ 	list_for_each_entry(chain, &cp->ccwchain_list, next) {
+-		ccw_head = (u32)(u64)chain->ch_ccw;
++		ccw_head = dma32_to_u32(virt_to_dma32(chain->ch_ccw));
+ 		/*
+ 		 * On successful execution, cpa points just beyond the end
+ 		 * of the chain.
+diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
+index dccf664a3d957..ffc0b5db55c29 100644
+--- a/drivers/s390/crypto/pkey_api.c
++++ b/drivers/s390/crypto/pkey_api.c
+@@ -1359,10 +1359,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		rc = cca_genseckey(kgs.cardnr, kgs.domain,
+ 				   kgs.keytype, kgs.seckey.seckey);
+ 		pr_debug("%s cca_genseckey()=%d\n", __func__, rc);
+-		if (rc)
+-			break;
+-		if (copy_to_user(ugs, &kgs, sizeof(kgs)))
+-			return -EFAULT;
++		if (!rc && copy_to_user(ugs, &kgs, sizeof(kgs)))
++			rc = -EFAULT;
++		memzero_explicit(&kgs, sizeof(kgs));
+ 		break;
+ 	}
+ 	case PKEY_CLR2SECK: {
+@@ -1374,10 +1373,8 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		rc = cca_clr2seckey(kcs.cardnr, kcs.domain, kcs.keytype,
+ 				    kcs.clrkey.clrkey, kcs.seckey.seckey);
+ 		pr_debug("%s cca_clr2seckey()=%d\n", __func__, rc);
+-		if (rc)
+-			break;
+-		if (copy_to_user(ucs, &kcs, sizeof(kcs)))
+-			return -EFAULT;
++		if (!rc && copy_to_user(ucs, &kcs, sizeof(kcs)))
++			rc = -EFAULT;
+ 		memzero_explicit(&kcs, sizeof(kcs));
+ 		break;
+ 	}
+@@ -1392,10 +1389,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 				     ksp.seckey.seckey, ksp.protkey.protkey,
+ 				     &ksp.protkey.len, &ksp.protkey.type);
+ 		pr_debug("%s cca_sec2protkey()=%d\n", __func__, rc);
+-		if (rc)
+-			break;
+-		if (copy_to_user(usp, &ksp, sizeof(ksp)))
+-			return -EFAULT;
++		if (!rc && copy_to_user(usp, &ksp, sizeof(ksp)))
++			rc = -EFAULT;
++		memzero_explicit(&ksp, sizeof(ksp));
+ 		break;
+ 	}
+ 	case PKEY_CLR2PROTK: {
+@@ -1409,10 +1405,8 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 				      kcp.protkey.protkey,
+ 				      &kcp.protkey.len, &kcp.protkey.type);
+ 		pr_debug("%s pkey_clr2protkey()=%d\n", __func__, rc);
+-		if (rc)
+-			break;
+-		if (copy_to_user(ucp, &kcp, sizeof(kcp)))
+-			return -EFAULT;
++		if (!rc && copy_to_user(ucp, &kcp, sizeof(kcp)))
++			rc = -EFAULT;
+ 		memzero_explicit(&kcp, sizeof(kcp));
+ 		break;
+ 	}
+@@ -1441,10 +1435,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		rc = pkey_skey2pkey(ksp.seckey.seckey, ksp.protkey.protkey,
+ 				    &ksp.protkey.len, &ksp.protkey.type);
+ 		pr_debug("%s pkey_skey2pkey()=%d\n", __func__, rc);
+-		if (rc)
+-			break;
+-		if (copy_to_user(usp, &ksp, sizeof(ksp)))
+-			return -EFAULT;
++		if (!rc && copy_to_user(usp, &ksp, sizeof(ksp)))
++			rc = -EFAULT;
++		memzero_explicit(&ksp, sizeof(ksp));
+ 		break;
+ 	}
+ 	case PKEY_VERIFYKEY: {
+@@ -1456,10 +1449,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		rc = pkey_verifykey(&kvk.seckey, &kvk.cardnr, &kvk.domain,
+ 				    &kvk.keysize, &kvk.attributes);
+ 		pr_debug("%s pkey_verifykey()=%d\n", __func__, rc);
+-		if (rc)
+-			break;
+-		if (copy_to_user(uvk, &kvk, sizeof(kvk)))
+-			return -EFAULT;
++		if (!rc && copy_to_user(uvk, &kvk, sizeof(kvk)))
++			rc = -EFAULT;
++		memzero_explicit(&kvk, sizeof(kvk));
+ 		break;
+ 	}
+ 	case PKEY_GENPROTK: {
+@@ -1472,10 +1464,9 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		rc = pkey_genprotkey(kgp.keytype, kgp.protkey.protkey,
+ 				     &kgp.protkey.len, &kgp.protkey.type);
+ 		pr_debug("%s pkey_genprotkey()=%d\n", __func__, rc);
+-		if (rc)
+-			break;
+-		if (copy_to_user(ugp, &kgp, sizeof(kgp)))
+-			return -EFAULT;
++		if (!rc && copy_to_user(ugp, &kgp, sizeof(kgp)))
++			rc = -EFAULT;
++		memzero_explicit(&kgp, sizeof(kgp));
+ 		break;
+ 	}
+ 	case PKEY_VERIFYPROTK: {
+@@ -1487,6 +1478,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		rc = pkey_verifyprotkey(kvp.protkey.protkey,
+ 					kvp.protkey.len, kvp.protkey.type);
+ 		pr_debug("%s pkey_verifyprotkey()=%d\n", __func__, rc);
++		memzero_explicit(&kvp, sizeof(kvp));
+ 		break;
+ 	}
+ 	case PKEY_KBLOB2PROTK: {
+@@ -1503,12 +1495,10 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		rc = pkey_keyblob2pkey(kkey, ktp.keylen, ktp.protkey.protkey,
+ 				       &ktp.protkey.len, &ktp.protkey.type);
+ 		pr_debug("%s pkey_keyblob2pkey()=%d\n", __func__, rc);
+-		memzero_explicit(kkey, ktp.keylen);
+-		kfree(kkey);
+-		if (rc)
+-			break;
+-		if (copy_to_user(utp, &ktp, sizeof(ktp)))
+-			return -EFAULT;
++		kfree_sensitive(kkey);
++		if (!rc && copy_to_user(utp, &ktp, sizeof(ktp)))
++			rc = -EFAULT;
++		memzero_explicit(&ktp, sizeof(ktp));
+ 		break;
+ 	}
+ 	case PKEY_GENSECK2: {
+@@ -1534,23 +1524,23 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		pr_debug("%s pkey_genseckey2()=%d\n", __func__, rc);
+ 		kfree(apqns);
+ 		if (rc) {
+-			kfree(kkey);
++			kfree_sensitive(kkey);
+ 			break;
+ 		}
+ 		if (kgs.key) {
+ 			if (kgs.keylen < klen) {
+-				kfree(kkey);
++				kfree_sensitive(kkey);
+ 				return -EINVAL;
+ 			}
+ 			if (copy_to_user(kgs.key, kkey, klen)) {
+-				kfree(kkey);
++				kfree_sensitive(kkey);
+ 				return -EFAULT;
+ 			}
+ 		}
+ 		kgs.keylen = klen;
+ 		if (copy_to_user(ugs, &kgs, sizeof(kgs)))
+ 			rc = -EFAULT;
+-		kfree(kkey);
++		kfree_sensitive(kkey);
+ 		break;
+ 	}
+ 	case PKEY_CLR2SECK2: {
+@@ -1563,11 +1553,14 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		if (copy_from_user(&kcs, ucs, sizeof(kcs)))
+ 			return -EFAULT;
+ 		apqns = _copy_apqns_from_user(kcs.apqns, kcs.apqn_entries);
+-		if (IS_ERR(apqns))
++		if (IS_ERR(apqns)) {
++			memzero_explicit(&kcs, sizeof(kcs));
+ 			return PTR_ERR(apqns);
++		}
+ 		kkey = kzalloc(klen, GFP_KERNEL);
+ 		if (!kkey) {
+ 			kfree(apqns);
++			memzero_explicit(&kcs, sizeof(kcs));
+ 			return -ENOMEM;
+ 		}
+ 		rc = pkey_clr2seckey2(apqns, kcs.apqn_entries,
+@@ -1576,16 +1569,19 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		pr_debug("%s pkey_clr2seckey2()=%d\n", __func__, rc);
+ 		kfree(apqns);
+ 		if (rc) {
+-			kfree(kkey);
++			kfree_sensitive(kkey);
++			memzero_explicit(&kcs, sizeof(kcs));
+ 			break;
+ 		}
+ 		if (kcs.key) {
+ 			if (kcs.keylen < klen) {
+-				kfree(kkey);
++				kfree_sensitive(kkey);
++				memzero_explicit(&kcs, sizeof(kcs));
+ 				return -EINVAL;
+ 			}
+ 			if (copy_to_user(kcs.key, kkey, klen)) {
+-				kfree(kkey);
++				kfree_sensitive(kkey);
++				memzero_explicit(&kcs, sizeof(kcs));
+ 				return -EFAULT;
+ 			}
+ 		}
+@@ -1593,7 +1589,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		if (copy_to_user(ucs, &kcs, sizeof(kcs)))
+ 			rc = -EFAULT;
+ 		memzero_explicit(&kcs, sizeof(kcs));
+-		kfree(kkey);
++		kfree_sensitive(kkey);
+ 		break;
+ 	}
+ 	case PKEY_VERIFYKEY2: {
+@@ -1610,7 +1606,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 				     &kvk.cardnr, &kvk.domain,
+ 				     &kvk.type, &kvk.size, &kvk.flags);
+ 		pr_debug("%s pkey_verifykey2()=%d\n", __func__, rc);
+-		kfree(kkey);
++		kfree_sensitive(kkey);
+ 		if (rc)
+ 			break;
+ 		if (copy_to_user(uvk, &kvk, sizeof(kvk)))
+@@ -1640,12 +1636,10 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 					&ktp.protkey.type);
+ 		pr_debug("%s pkey_keyblob2pkey2()=%d\n", __func__, rc);
+ 		kfree(apqns);
+-		memzero_explicit(kkey, ktp.keylen);
+-		kfree(kkey);
+-		if (rc)
+-			break;
+-		if (copy_to_user(utp, &ktp, sizeof(ktp)))
+-			return -EFAULT;
++		kfree_sensitive(kkey);
++		if (!rc && copy_to_user(utp, &ktp, sizeof(ktp)))
++			rc = -EFAULT;
++		memzero_explicit(&ktp, sizeof(ktp));
+ 		break;
+ 	}
+ 	case PKEY_APQNS4K: {
+@@ -1673,7 +1667,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		rc = pkey_apqns4key(kkey, kak.keylen, kak.flags,
+ 				    apqns, &nr_apqns);
+ 		pr_debug("%s pkey_apqns4key()=%d\n", __func__, rc);
+-		kfree(kkey);
++		kfree_sensitive(kkey);
+ 		if (rc && rc != -ENOSPC) {
+ 			kfree(apqns);
+ 			break;
+@@ -1759,7 +1753,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		protkey = kmalloc(protkeylen, GFP_KERNEL);
+ 		if (!protkey) {
+ 			kfree(apqns);
+-			kfree(kkey);
++			kfree_sensitive(kkey);
+ 			return -ENOMEM;
+ 		}
+ 		rc = pkey_keyblob2pkey3(apqns, ktp.apqn_entries,
+@@ -1767,23 +1761,22 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 					protkey, &protkeylen, &ktp.pkeytype);
+ 		pr_debug("%s pkey_keyblob2pkey3()=%d\n", __func__, rc);
+ 		kfree(apqns);
+-		memzero_explicit(kkey, ktp.keylen);
+-		kfree(kkey);
++		kfree_sensitive(kkey);
+ 		if (rc) {
+-			kfree(protkey);
++			kfree_sensitive(protkey);
+ 			break;
+ 		}
+ 		if (ktp.pkey && ktp.pkeylen) {
+ 			if (protkeylen > ktp.pkeylen) {
+-				kfree(protkey);
++				kfree_sensitive(protkey);
+ 				return -EINVAL;
+ 			}
+ 			if (copy_to_user(ktp.pkey, protkey, protkeylen)) {
+-				kfree(protkey);
++				kfree_sensitive(protkey);
+ 				return -EFAULT;
+ 			}
+ 		}
+-		kfree(protkey);
++		kfree_sensitive(protkey);
+ 		ktp.pkeylen = protkeylen;
+ 		if (copy_to_user(utp, &ktp, sizeof(ktp)))
+ 			return -EFAULT;
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
+index d32ad46318cb0..5d261c2f2d200 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
+@@ -1355,11 +1355,21 @@ static struct mpi3mr_sas_port *mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc,
+ 	mpi3mr_sas_port_sanity_check(mrioc, mr_sas_node,
+ 	    mr_sas_port->remote_identify.sas_address, hba_port);
+ 
++	if (mr_sas_node->num_phys > sizeof(mr_sas_port->phy_mask) * 8)
++		ioc_info(mrioc, "max port count %u could be too high\n",
++		    mr_sas_node->num_phys);
++
+ 	for (i = 0; i < mr_sas_node->num_phys; i++) {
+ 		if ((mr_sas_node->phy[i].remote_identify.sas_address !=
+ 		    mr_sas_port->remote_identify.sas_address) ||
+ 		    (mr_sas_node->phy[i].hba_port != hba_port))
+ 			continue;
++
++		if (i > sizeof(mr_sas_port->phy_mask) * 8) {
++			ioc_warn(mrioc, "skipping port %u, max allowed value is %zu\n",
++			    i, sizeof(mr_sas_port->phy_mask) * 8);
++			goto out_fail;
++		}
+ 		list_add_tail(&mr_sas_node->phy[i].port_siblings,
+ 		    &mr_sas_port->phy_list);
+ 		mr_sas_port->num_phys++;
+diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
+index bf921caaf6aea..054a51713d556 100644
+--- a/drivers/scsi/qedf/qedf_io.c
++++ b/drivers/scsi/qedf/qedf_io.c
+@@ -2324,9 +2324,6 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, u64 tm_lun,
+ 	io_req->fcport = fcport;
+ 	io_req->cmd_type = QEDF_TASK_MGMT_CMD;
+ 
+-	/* Record which cpu this request is associated with */
+-	io_req->cpu = smp_processor_id();
+-
+ 	/* Set TM flags */
+ 	io_req->io_req_flags = QEDF_READ;
+ 	io_req->data_xfer_len = 0;
+@@ -2349,6 +2346,9 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, u64 tm_lun,
+ 
+ 	spin_lock_irqsave(&fcport->rport_lock, flags);
+ 
++	/* Record which cpu this request is associated with */
++	io_req->cpu = smp_processor_id();
++
+ 	sqe_idx = qedf_get_sqe_idx(fcport);
+ 	sqe = &fcport->sq[sqe_idx];
+ 	memset(sqe, 0, sizeof(struct fcoe_wqe));
+diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c
+index 8648b8eb080dc..cdce2e280f663 100644
+--- a/drivers/spi/spi-cadence-xspi.c
++++ b/drivers/spi/spi-cadence-xspi.c
+@@ -145,6 +145,9 @@
+ #define CDNS_XSPI_STIG_DONE_FLAG		BIT(0)
+ #define CDNS_XSPI_TRD_STATUS			0x0104
+ 
++#define MODE_NO_OF_BYTES			GENMASK(25, 24)
++#define MODEBYTES_COUNT			1
++
+ /* Helper macros for filling command registers */
+ #define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase) ( \
+ 	FIELD_PREP(CDNS_XSPI_CMD_INSTR_TYPE, (data_phase) ? \
+@@ -157,9 +160,10 @@
+ 	FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR3, ((op)->addr.val >> 24) & 0xFF) | \
+ 	FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR4, ((op)->addr.val >> 32) & 0xFF))
+ 
+-#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op) ( \
++#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op, modebytes) ( \
+ 	FIELD_PREP(CDNS_XSPI_CMD_P1_R3_ADDR5, ((op)->addr.val >> 40) & 0xFF) | \
+ 	FIELD_PREP(CDNS_XSPI_CMD_P1_R3_CMD, (op)->cmd.opcode) | \
++	FIELD_PREP(MODE_NO_OF_BYTES, modebytes) | \
+ 	FIELD_PREP(CDNS_XSPI_CMD_P1_R3_NUM_ADDR_BYTES, (op)->addr.nbytes))
+ 
+ #define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op, chipsel) ( \
+@@ -173,12 +177,12 @@
+ #define CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op) \
+ 	FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R2_DCNT_L, (op)->data.nbytes & 0xFFFF)
+ 
+-#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op) ( \
++#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op, dummybytes) ( \
+ 	FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_DCNT_H, \
+ 		((op)->data.nbytes >> 16) & 0xffff) | \
+ 	FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY, \
+ 		  (op)->dummy.buswidth != 0 ? \
+-		  (((op)->dummy.nbytes * 8) / (op)->dummy.buswidth) : \
++		  (((dummybytes) * 8) / (op)->dummy.buswidth) : \
+ 		  0))
+ 
+ #define CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op, chipsel) ( \
+@@ -351,6 +355,7 @@ static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
+ 	u32 cmd_regs[6];
+ 	u32 cmd_status;
+ 	int ret;
++	int dummybytes = op->dummy.nbytes;
+ 
+ 	ret = cdns_xspi_wait_for_controller_idle(cdns_xspi);
+ 	if (ret < 0)
+@@ -365,7 +370,12 @@ static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
+ 	memset(cmd_regs, 0, sizeof(cmd_regs));
+ 	cmd_regs[1] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase);
+ 	cmd_regs[2] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_2(op);
+-	cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op);
++	if (dummybytes != 0) {
++		cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op, 1);
++		dummybytes--;
++	} else {
++		cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op, 0);
++	}
+ 	cmd_regs[4] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op,
+ 						       cdns_xspi->cur_cs);
+ 
+@@ -375,7 +385,7 @@ static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
+ 		cmd_regs[0] = CDNS_XSPI_STIG_DONE_FLAG;
+ 		cmd_regs[1] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_1(op);
+ 		cmd_regs[2] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op);
+-		cmd_regs[3] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op);
++		cmd_regs[3] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op, dummybytes);
+ 		cmd_regs[4] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op,
+ 							   cdns_xspi->cur_cs);
+ 
+diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
+index 6b9422bd8795d..25f836c00e226 100644
+--- a/drivers/thermal/mediatek/lvts_thermal.c
++++ b/drivers/thermal/mediatek/lvts_thermal.c
+@@ -1250,6 +1250,8 @@ static int lvts_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	lvts_data = of_device_get_match_data(dev);
++	if (!lvts_data)
++		return -ENODEV;
+ 
+ 	lvts_td->clk = devm_clk_get_enabled(dev, NULL);
+ 	if (IS_ERR(lvts_td->clk))
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index 9552228d21614..f63cdd6794419 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -1314,7 +1314,7 @@ static void imx_uart_clear_rx_errors(struct imx_port *sport)
+ 
+ }
+ 
+-#define TXTL_DEFAULT 2 /* reset default */
++#define TXTL_DEFAULT 8
+ #define RXTL_DEFAULT 8 /* 8 characters or aging timer */
+ #define TXTL_DMA 8 /* DMA burst setting */
+ #define RXTL_DMA 9 /* DMA burst setting */
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 48d745e9f9730..48028bab57e34 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2658,16 +2658,17 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 			else
+ 				xhci_handle_halted_endpoint(xhci, ep, NULL,
+ 							    EP_SOFT_RESET);
+-			goto cleanup;
++			break;
+ 		case COMP_RING_UNDERRUN:
+ 		case COMP_RING_OVERRUN:
+ 		case COMP_STOPPED_LENGTH_INVALID:
+-			goto cleanup;
++			break;
+ 		default:
+ 			xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n",
+ 				 slot_id, ep_index);
+ 			goto err_out;
+ 		}
++		return 0;
+ 	}
+ 
+ 	/* Count current td numbers if ep->skip is set */
+diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
+index 282aac45c6909..f34f9895b8984 100644
+--- a/drivers/vhost/scsi.c
++++ b/drivers/vhost/scsi.c
+@@ -497,10 +497,8 @@ vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
+ 		vq_err(vq, "Faulted on vhost_scsi_send_event\n");
+ }
+ 
+-static void vhost_scsi_evt_work(struct vhost_work *work)
++static void vhost_scsi_complete_events(struct vhost_scsi *vs, bool drop)
+ {
+-	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
+-					vs_event_work);
+ 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
+ 	struct vhost_scsi_evt *evt, *t;
+ 	struct llist_node *llnode;
+@@ -508,12 +506,20 @@ static void vhost_scsi_evt_work(struct vhost_work *work)
+ 	mutex_lock(&vq->mutex);
+ 	llnode = llist_del_all(&vs->vs_event_list);
+ 	llist_for_each_entry_safe(evt, t, llnode, list) {
+-		vhost_scsi_do_evt_work(vs, evt);
++		if (!drop)
++			vhost_scsi_do_evt_work(vs, evt);
+ 		vhost_scsi_free_evt(vs, evt);
+ 	}
+ 	mutex_unlock(&vq->mutex);
+ }
+ 
++static void vhost_scsi_evt_work(struct vhost_work *work)
++{
++	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
++					     vs_event_work);
++	vhost_scsi_complete_events(vs, false);
++}
++
+ static int vhost_scsi_copy_sgl_to_iov(struct vhost_scsi_cmd *cmd)
+ {
+ 	struct iov_iter *iter = &cmd->saved_iter;
+@@ -1509,7 +1515,8 @@ vhost_scsi_send_evt(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
+ 	}
+ 
+ 	llist_add(&evt->list, &vs->vs_event_list);
+-	vhost_vq_work_queue(vq, &vs->vs_event_work);
++	if (!vhost_vq_work_queue(vq, &vs->vs_event_work))
++		vhost_scsi_complete_events(vs, true);
+ }
+ 
+ static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index 8995730ce0bfc..1740a5f1f35e7 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -276,21 +276,36 @@ void vhost_vq_flush(struct vhost_virtqueue *vq)
+ EXPORT_SYMBOL_GPL(vhost_vq_flush);
+ 
+ /**
+- * vhost_worker_flush - flush a worker
++ * __vhost_worker_flush - flush a worker
+  * @worker: worker to flush
+  *
+- * This does not use RCU to protect the worker, so the device or worker
+- * mutex must be held.
++ * The worker's flush_mutex must be held.
+  */
+-static void vhost_worker_flush(struct vhost_worker *worker)
++static void __vhost_worker_flush(struct vhost_worker *worker)
+ {
+ 	struct vhost_flush_struct flush;
+ 
++	if (!worker->attachment_cnt || worker->killed)
++		return;
++
+ 	init_completion(&flush.wait_event);
+ 	vhost_work_init(&flush.work, vhost_flush_work);
+ 
+ 	vhost_worker_queue(worker, &flush.work);
++	/*
++	 * Drop mutex in case our worker is killed and it needs to take the
++	 * mutex to force cleanup.
++	 */
++	mutex_unlock(&worker->mutex);
+ 	wait_for_completion(&flush.wait_event);
++	mutex_lock(&worker->mutex);
++}
++
++static void vhost_worker_flush(struct vhost_worker *worker)
++{
++	mutex_lock(&worker->mutex);
++	__vhost_worker_flush(worker);
++	mutex_unlock(&worker->mutex);
+ }
+ 
+ void vhost_dev_flush(struct vhost_dev *dev)
+@@ -298,15 +313,8 @@ void vhost_dev_flush(struct vhost_dev *dev)
+ 	struct vhost_worker *worker;
+ 	unsigned long i;
+ 
+-	xa_for_each(&dev->worker_xa, i, worker) {
+-		mutex_lock(&worker->mutex);
+-		if (!worker->attachment_cnt) {
+-			mutex_unlock(&worker->mutex);
+-			continue;
+-		}
++	xa_for_each(&dev->worker_xa, i, worker)
+ 		vhost_worker_flush(worker);
+-		mutex_unlock(&worker->mutex);
+-	}
+ }
+ EXPORT_SYMBOL_GPL(vhost_dev_flush);
+ 
+@@ -392,7 +400,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
+ 	__vhost_vq_meta_reset(vq);
+ }
+ 
+-static bool vhost_worker(void *data)
++static bool vhost_run_work_list(void *data)
+ {
+ 	struct vhost_worker *worker = data;
+ 	struct vhost_work *work, *work_next;
+@@ -417,6 +425,40 @@ static bool vhost_worker(void *data)
+ 	return !!node;
+ }
+ 
++static void vhost_worker_killed(void *data)
++{
++	struct vhost_worker *worker = data;
++	struct vhost_dev *dev = worker->dev;
++	struct vhost_virtqueue *vq;
++	int i, attach_cnt = 0;
++
++	mutex_lock(&worker->mutex);
++	worker->killed = true;
++
++	for (i = 0; i < dev->nvqs; i++) {
++		vq = dev->vqs[i];
++
++		mutex_lock(&vq->mutex);
++		if (worker ==
++		    rcu_dereference_check(vq->worker,
++					  lockdep_is_held(&vq->mutex))) {
++			rcu_assign_pointer(vq->worker, NULL);
++			attach_cnt++;
++		}
++		mutex_unlock(&vq->mutex);
++	}
++
++	worker->attachment_cnt -= attach_cnt;
++	if (attach_cnt)
++		synchronize_rcu();
++	/*
++	 * Finish vhost_worker_flush calls and any other works that snuck in
++	 * before the synchronize_rcu.
++	 */
++	vhost_run_work_list(worker);
++	mutex_unlock(&worker->mutex);
++}
++
+ static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
+ {
+ 	kfree(vq->indirect);
+@@ -631,9 +673,11 @@ static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
+ 	if (!worker)
+ 		return NULL;
+ 
++	worker->dev = dev;
+ 	snprintf(name, sizeof(name), "vhost-%d", current->pid);
+ 
+-	vtsk = vhost_task_create(vhost_worker, worker, name);
++	vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed,
++				 worker, name);
+ 	if (!vtsk)
+ 		goto free_worker;
+ 
+@@ -664,22 +708,37 @@ static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq,
+ {
+ 	struct vhost_worker *old_worker;
+ 
+-	old_worker = rcu_dereference_check(vq->worker,
+-					   lockdep_is_held(&vq->dev->mutex));
+-
+ 	mutex_lock(&worker->mutex);
+-	worker->attachment_cnt++;
+-	mutex_unlock(&worker->mutex);
++	if (worker->killed) {
++		mutex_unlock(&worker->mutex);
++		return;
++	}
++
++	mutex_lock(&vq->mutex);
++
++	old_worker = rcu_dereference_check(vq->worker,
++					   lockdep_is_held(&vq->mutex));
+ 	rcu_assign_pointer(vq->worker, worker);
++	worker->attachment_cnt++;
+ 
+-	if (!old_worker)
++	if (!old_worker) {
++		mutex_unlock(&vq->mutex);
++		mutex_unlock(&worker->mutex);
+ 		return;
++	}
++	mutex_unlock(&vq->mutex);
++	mutex_unlock(&worker->mutex);
++
+ 	/*
+ 	 * Take the worker mutex to make sure we see the work queued from
+ 	 * device wide flushes which doesn't use RCU for execution.
+ 	 */
+ 	mutex_lock(&old_worker->mutex);
+-	old_worker->attachment_cnt--;
++	if (old_worker->killed) {
++		mutex_unlock(&old_worker->mutex);
++		return;
++	}
++
+ 	/*
+ 	 * We don't want to call synchronize_rcu for every vq during setup
+ 	 * because it will slow down VM startup. If we haven't done
+@@ -690,6 +749,8 @@ static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq,
+ 	mutex_lock(&vq->mutex);
+ 	if (!vhost_vq_get_backend(vq) && !vq->kick) {
+ 		mutex_unlock(&vq->mutex);
++
++		old_worker->attachment_cnt--;
+ 		mutex_unlock(&old_worker->mutex);
+ 		/*
+ 		 * vsock can queue anytime after VHOST_VSOCK_SET_GUEST_CID.
+@@ -705,7 +766,8 @@ static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq,
+ 	/* Make sure new vq queue/flush/poll calls see the new worker */
+ 	synchronize_rcu();
+ 	/* Make sure whatever was queued gets run */
+-	vhost_worker_flush(old_worker);
++	__vhost_worker_flush(old_worker);
++	old_worker->attachment_cnt--;
+ 	mutex_unlock(&old_worker->mutex);
+ }
+ 
+@@ -754,10 +816,16 @@ static int vhost_free_worker(struct vhost_dev *dev,
+ 		return -ENODEV;
+ 
+ 	mutex_lock(&worker->mutex);
+-	if (worker->attachment_cnt) {
++	if (worker->attachment_cnt || worker->killed) {
+ 		mutex_unlock(&worker->mutex);
+ 		return -EBUSY;
+ 	}
++	/*
++	 * A flush might have raced and snuck in before attachment_cnt was set
++	 * to zero. Make sure flushes are flushed from the queue before
++	 * freeing.
++	 */
++	__vhost_worker_flush(worker);
+ 	mutex_unlock(&worker->mutex);
+ 
+ 	vhost_worker_destroy(dev, worker);
+diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
+index 9e942fcda5c3f..dc94e6a7d3c22 100644
+--- a/drivers/vhost/vhost.h
++++ b/drivers/vhost/vhost.h
+@@ -28,12 +28,14 @@ struct vhost_work {
+ 
+ struct vhost_worker {
+ 	struct vhost_task	*vtsk;
++	struct vhost_dev	*dev;
+ 	/* Used to serialize device wide flushing with worker swapping. */
+ 	struct mutex		mutex;
+ 	struct llist_head	work_list;
+ 	u64			kcov_handle;
+ 	u32			id;
+ 	int			attachment_cnt;
++	bool			killed;
+ };
+ 
+ /* Poll a file (eventfd or socket) */
+diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
+index 584af7816532b..f6b0b00e4599f 100644
+--- a/drivers/virtio/virtio_pci_common.c
++++ b/drivers/virtio/virtio_pci_common.c
+@@ -236,7 +236,7 @@ void vp_del_vqs(struct virtio_device *vdev)
+ 	int i;
+ 
+ 	list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
+-		if (vp_dev->is_avq(vdev, vq->index))
++		if (vp_dev->is_avq && vp_dev->is_avq(vdev, vq->index))
+ 			continue;
+ 
+ 		if (vp_dev->per_vq_vectors) {
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 1a66be33bb048..60066822b5329 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -1924,8 +1924,17 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ next:
+ 		if (ret) {
+ 			/* Refcount held by the reclaim_bgs list after splice. */
+-			btrfs_get_block_group(bg);
+-			list_add_tail(&bg->bg_list, &retry_list);
++			spin_lock(&fs_info->unused_bgs_lock);
++			/*
++			 * This block group might be added to the unused list
++			 * during the above process. Move it back to the
++			 * reclaim list otherwise.
++			 */
++			if (list_empty(&bg->bg_list)) {
++				btrfs_get_block_group(bg);
++				list_add_tail(&bg->bg_list, &retry_list);
++			}
++			spin_unlock(&fs_info->unused_bgs_lock);
+ 		}
+ 		btrfs_put_block_group(bg);
+ 
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 41173701f1bef..1e020620748d6 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -3526,7 +3526,7 @@ struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
+ 	for (int i = 0; i < num_folios; i++) {
+ 		if (eb->folios[i]) {
+ 			detach_extent_buffer_folio(eb, eb->folios[i]);
+-			__folio_put(eb->folios[i]);
++			folio_put(eb->folios[i]);
+ 		}
+ 	}
+ 	__free_extent_buffer(eb);
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 1167899a16d05..4caa078d972a3 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -3065,8 +3065,6 @@ int btrfs_qgroup_check_inherit(struct btrfs_fs_info *fs_info,
+ 			       struct btrfs_qgroup_inherit *inherit,
+ 			       size_t size)
+ {
+-	if (!btrfs_qgroup_enabled(fs_info))
+-		return 0;
+ 	if (inherit->flags & ~BTRFS_QGROUP_INHERIT_FLAGS_SUPP)
+ 		return -EOPNOTSUPP;
+ 	if (size < sizeof(*inherit) || size > PAGE_SIZE)
+@@ -3090,6 +3088,14 @@ int btrfs_qgroup_check_inherit(struct btrfs_fs_info *fs_info,
+ 	if (size != struct_size(inherit, qgroups, inherit->num_qgroups))
+ 		return -EINVAL;
+ 
++	/*
++	 * Skip the inherit source qgroups check if qgroup is not enabled.
++	 * Qgroup can still be later enabled causing problems, but in that case
++	 * btrfs_qgroup_inherit() would just ignore those invalid ones.
++	 */
++	if (!btrfs_qgroup_enabled(fs_info))
++		return 0;
++
+ 	/*
+ 	 * Now check all the remaining qgroups, they should all:
+ 	 *
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index 4b22cfe9a98cb..afd6932f5e895 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -2100,7 +2100,7 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx,
+ 	struct btrfs_fs_info *fs_info = sctx->fs_info;
+ 	const u64 logical_end = logical_start + logical_length;
+ 	u64 cur_logical = logical_start;
+-	int ret;
++	int ret = 0;
+ 
+ 	/* The range must be inside the bg */
+ 	ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
+diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
+index d620323d08eae..ae8c56442549c 100644
+--- a/fs/btrfs/space-info.c
++++ b/fs/btrfs/space-info.c
+@@ -373,11 +373,18 @@ static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
+ 	 * "optimal" chunk size based on the fs size.  However when we actually
+ 	 * allocate the chunk we will strip this down further, making it no more
+ 	 * than 10% of the disk or 1G, whichever is smaller.
++	 *
++	 * On the zoned mode, we need to use zone_size (=
++	 * data_sinfo->chunk_size) as it is.
+ 	 */
+ 	data_sinfo = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
+-	data_chunk_size = min(data_sinfo->chunk_size,
+-			      mult_perc(fs_info->fs_devices->total_rw_bytes, 10));
+-	data_chunk_size = min_t(u64, data_chunk_size, SZ_1G);
++	if (!btrfs_is_zoned(fs_info)) {
++		data_chunk_size = min(data_sinfo->chunk_size,
++				      mult_perc(fs_info->fs_devices->total_rw_bytes, 10));
++		data_chunk_size = min_t(u64, data_chunk_size, SZ_1G);
++	} else {
++		data_chunk_size = data_sinfo->chunk_size;
++	}
+ 
+ 	/*
+ 	 * Since data allocations immediately use block groups as part of the
+@@ -405,6 +412,17 @@ static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
+ 		avail >>= 3;
+ 	else
+ 		avail >>= 1;
++
++	/*
++	 * On the zoned mode, we always allocate one zone as one chunk.
++	 * Returning non-zone size alingned bytes here will result in
++	 * less pressure for the async metadata reclaim process, and it
++	 * will over-commit too much leading to ENOSPC. Align down to the
++	 * zone size to avoid that.
++	 */
++	if (btrfs_is_zoned(fs_info))
++		avail = ALIGN_DOWN(avail, fs_info->zone_size);
++
+ 	return avail;
+ }
+ 
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 07b3675ea1694..6c60e43ec8e58 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -72,7 +72,7 @@ enum {
+ 
+ struct f2fs_fault_info {
+ 	atomic_t inject_ops;
+-	unsigned int inject_rate;
++	int inject_rate;
+ 	unsigned int inject_type;
+ };
+ 
+@@ -4597,10 +4597,14 @@ static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
+ }
+ 
+ #ifdef CONFIG_F2FS_FAULT_INJECTION
+-extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
+-							unsigned int type);
++extern int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate,
++							unsigned long type);
+ #else
+-#define f2fs_build_fault_attr(sbi, rate, type)		do { } while (0)
++static inline int f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
++					unsigned long rate, unsigned long type)
++{
++	return 0;
++}
+ #endif
+ 
+ static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 2f75a7dfc311d..0c3ebe4d9026d 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -66,21 +66,31 @@ const char *f2fs_fault_name[FAULT_MAX] = {
+ 	[FAULT_NO_SEGMENT]		= "no free segment",
+ };
+ 
+-void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
+-							unsigned int type)
++int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate,
++							unsigned long type)
+ {
+ 	struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
+ 
+ 	if (rate) {
++		if (rate > INT_MAX)
++			return -EINVAL;
+ 		atomic_set(&ffi->inject_ops, 0);
+-		ffi->inject_rate = rate;
++		ffi->inject_rate = (int)rate;
+ 	}
+ 
+-	if (type)
+-		ffi->inject_type = type;
++	if (type) {
++		if (type >= BIT(FAULT_MAX))
++			return -EINVAL;
++		ffi->inject_type = (unsigned int)type;
++	}
+ 
+ 	if (!rate && !type)
+ 		memset(ffi, 0, sizeof(struct f2fs_fault_info));
++	else
++		f2fs_info(sbi,
++			"build fault injection attr: rate: %lu, type: 0x%lx",
++								rate, type);
++	return 0;
+ }
+ #endif
+ 
+@@ -886,14 +896,17 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
+ 		case Opt_fault_injection:
+ 			if (args->from && match_int(args, &arg))
+ 				return -EINVAL;
+-			f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
++			if (f2fs_build_fault_attr(sbi, arg,
++					F2FS_ALL_FAULT_TYPE))
++				return -EINVAL;
+ 			set_opt(sbi, FAULT_INJECTION);
+ 			break;
+ 
+ 		case Opt_fault_type:
+ 			if (args->from && match_int(args, &arg))
+ 				return -EINVAL;
+-			f2fs_build_fault_attr(sbi, 0, arg);
++			if (f2fs_build_fault_attr(sbi, 0, arg))
++				return -EINVAL;
+ 			set_opt(sbi, FAULT_INJECTION);
+ 			break;
+ #else
+diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
+index a568ce96cf563..7aa3844e7a808 100644
+--- a/fs/f2fs/sysfs.c
++++ b/fs/f2fs/sysfs.c
+@@ -484,10 +484,16 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
+ 	if (ret < 0)
+ 		return ret;
+ #ifdef CONFIG_F2FS_FAULT_INJECTION
+-	if (a->struct_type == FAULT_INFO_TYPE && t >= BIT(FAULT_MAX))
+-		return -EINVAL;
+-	if (a->struct_type == FAULT_INFO_RATE && t >= UINT_MAX)
+-		return -EINVAL;
++	if (a->struct_type == FAULT_INFO_TYPE) {
++		if (f2fs_build_fault_attr(sbi, 0, t))
++			return -EINVAL;
++		return count;
++	}
++	if (a->struct_type == FAULT_INFO_RATE) {
++		if (f2fs_build_fault_attr(sbi, t, 0))
++			return -EINVAL;
++		return count;
++	}
+ #endif
+ 	if (a->struct_type == RESERVED_BLOCKS) {
+ 		spin_lock(&sbi->stat_lock);
+diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
+index aede1be4dc0cd..4545f885c41ef 100644
+--- a/fs/jffs2/super.c
++++ b/fs/jffs2/super.c
+@@ -58,6 +58,7 @@ static void jffs2_i_init_once(void *foo)
+ 	struct jffs2_inode_info *f = foo;
+ 
+ 	mutex_init(&f->sem);
++	f->target = NULL;
+ 	inode_init_once(&f->vfs_inode);
+ }
+ 
+diff --git a/fs/locks.c b/fs/locks.c
+index 90c8746874ded..c360d1992d21f 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -2448,8 +2448,9 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
+ 	error = do_lock_file_wait(filp, cmd, file_lock);
+ 
+ 	/*
+-	 * Attempt to detect a close/fcntl race and recover by releasing the
+-	 * lock that was just acquired. There is no need to do that when we're
++	 * Detect close/fcntl races and recover by zapping all POSIX locks
++	 * associated with this file and our files_struct, just like on
++	 * filp_flush(). There is no need to do that when we're
+ 	 * unlocking though, or for OFD locks.
+ 	 */
+ 	if (!error && file_lock->c.flc_type != F_UNLCK &&
+@@ -2464,9 +2465,7 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
+ 		f = files_lookup_fd_locked(files, fd);
+ 		spin_unlock(&files->file_lock);
+ 		if (f != filp) {
+-			file_lock->c.flc_type = F_UNLCK;
+-			error = do_lock_file_wait(filp, cmd, file_lock);
+-			WARN_ON_ONCE(error);
++			locks_remove_posix(filp, files);
+ 			error = -EBADF;
+ 		}
+ 	}
+diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
+index 89caef7513db3..ba50388ee4bf1 100644
+--- a/fs/nilfs2/alloc.c
++++ b/fs/nilfs2/alloc.c
+@@ -377,11 +377,12 @@ void *nilfs_palloc_block_get_entry(const struct inode *inode, __u64 nr,
+  * @target: offset number of an entry in the group (start point)
+  * @bsize: size in bits
+  * @lock: spin lock protecting @bitmap
++ * @wrap: whether to wrap around
+  */
+ static int nilfs_palloc_find_available_slot(unsigned char *bitmap,
+ 					    unsigned long target,
+ 					    unsigned int bsize,
+-					    spinlock_t *lock)
++					    spinlock_t *lock, bool wrap)
+ {
+ 	int pos, end = bsize;
+ 
+@@ -397,6 +398,8 @@ static int nilfs_palloc_find_available_slot(unsigned char *bitmap,
+ 
+ 		end = target;
+ 	}
++	if (!wrap)
++		return -ENOSPC;
+ 
+ 	/* wrap around */
+ 	for (pos = 0; pos < end; pos++) {
+@@ -495,9 +498,10 @@ int nilfs_palloc_count_max_entries(struct inode *inode, u64 nused, u64 *nmaxp)
+  * nilfs_palloc_prepare_alloc_entry - prepare to allocate a persistent object
+  * @inode: inode of metadata file using this allocator
+  * @req: nilfs_palloc_req structure exchanged for the allocation
++ * @wrap: whether to wrap around
+  */
+ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
+-				     struct nilfs_palloc_req *req)
++				     struct nilfs_palloc_req *req, bool wrap)
+ {
+ 	struct buffer_head *desc_bh, *bitmap_bh;
+ 	struct nilfs_palloc_group_desc *desc;
+@@ -516,7 +520,7 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
+ 	entries_per_group = nilfs_palloc_entries_per_group(inode);
+ 
+ 	for (i = 0; i < ngroups; i += n) {
+-		if (group >= ngroups) {
++		if (group >= ngroups && wrap) {
+ 			/* wrap around */
+ 			group = 0;
+ 			maxgroup = nilfs_palloc_group(inode, req->pr_entry_nr,
+@@ -550,7 +554,14 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
+ 			bitmap_kaddr = kmap_local_page(bitmap_bh->b_page);
+ 			bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
+ 			pos = nilfs_palloc_find_available_slot(
+-				bitmap, group_offset, entries_per_group, lock);
++				bitmap, group_offset, entries_per_group, lock,
++				wrap);
++			/*
++			 * Since the search for a free slot in the second and
++			 * subsequent bitmap blocks always starts from the
++			 * beginning, the wrap flag only has an effect on the
++			 * first search.
++			 */
+ 			kunmap_local(bitmap_kaddr);
+ 			if (pos >= 0)
+ 				goto found;
+diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h
+index b667e869ac076..d825a9faca6d9 100644
+--- a/fs/nilfs2/alloc.h
++++ b/fs/nilfs2/alloc.h
+@@ -50,8 +50,8 @@ struct nilfs_palloc_req {
+ 	struct buffer_head *pr_entry_bh;
+ };
+ 
+-int nilfs_palloc_prepare_alloc_entry(struct inode *,
+-				     struct nilfs_palloc_req *);
++int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
++				     struct nilfs_palloc_req *req, bool wrap);
+ void nilfs_palloc_commit_alloc_entry(struct inode *,
+ 				     struct nilfs_palloc_req *);
+ void nilfs_palloc_abort_alloc_entry(struct inode *, struct nilfs_palloc_req *);
+diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
+index 180fc8d36213d..fc1caf63a42ae 100644
+--- a/fs/nilfs2/dat.c
++++ b/fs/nilfs2/dat.c
+@@ -75,7 +75,7 @@ int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
+ {
+ 	int ret;
+ 
+-	ret = nilfs_palloc_prepare_alloc_entry(dat, req);
++	ret = nilfs_palloc_prepare_alloc_entry(dat, req, true);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
+index 35e6c55a0d231..d748d9dce74e4 100644
+--- a/fs/nilfs2/dir.c
++++ b/fs/nilfs2/dir.c
+@@ -135,6 +135,9 @@ static bool nilfs_check_folio(struct folio *folio, char *kaddr)
+ 			goto Enamelen;
+ 		if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))
+ 			goto Espan;
++		if (unlikely(p->inode &&
++			     NILFS_PRIVATE_INODE(le64_to_cpu(p->inode))))
++			goto Einumber;
+ 	}
+ 	if (offs != limit)
+ 		goto Eend;
+@@ -160,6 +163,9 @@ static bool nilfs_check_folio(struct folio *folio, char *kaddr)
+ 	goto bad_entry;
+ Espan:
+ 	error = "directory entry across blocks";
++	goto bad_entry;
++Einumber:
++	error = "disallowed inode number";
+ bad_entry:
+ 	nilfs_error(sb,
+ 		    "bad entry in directory #%lu: %s - offset=%lu, inode=%lu, rec_len=%zd, name_len=%d",
+diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c
+index 612e609158b52..1e86b9303b7ca 100644
+--- a/fs/nilfs2/ifile.c
++++ b/fs/nilfs2/ifile.c
+@@ -56,13 +56,10 @@ int nilfs_ifile_create_inode(struct inode *ifile, ino_t *out_ino,
+ 	struct nilfs_palloc_req req;
+ 	int ret;
+ 
+-	req.pr_entry_nr = 0;  /*
+-			       * 0 says find free inode from beginning
+-			       * of a group. dull code!!
+-			       */
++	req.pr_entry_nr = NILFS_FIRST_INO(ifile->i_sb);
+ 	req.pr_entry_bh = NULL;
+ 
+-	ret = nilfs_palloc_prepare_alloc_entry(ifile, &req);
++	ret = nilfs_palloc_prepare_alloc_entry(ifile, &req, false);
+ 	if (!ret) {
+ 		ret = nilfs_palloc_get_entry_block(ifile, req.pr_entry_nr, 1,
+ 						   &req.pr_entry_bh);
+diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
+index 2e29b98ba8bab..fe982c3e08770 100644
+--- a/fs/nilfs2/nilfs.h
++++ b/fs/nilfs2/nilfs.h
+@@ -116,9 +116,15 @@ enum {
+ #define NILFS_FIRST_INO(sb) (((struct the_nilfs *)sb->s_fs_info)->ns_first_ino)
+ 
+ #define NILFS_MDT_INODE(sb, ino) \
+-	((ino) < NILFS_FIRST_INO(sb) && (NILFS_MDT_INO_BITS & BIT(ino)))
++	((ino) < NILFS_USER_INO && (NILFS_MDT_INO_BITS & BIT(ino)))
+ #define NILFS_VALID_INODE(sb, ino) \
+-	((ino) >= NILFS_FIRST_INO(sb) || (NILFS_SYS_INO_BITS & BIT(ino)))
++	((ino) >= NILFS_FIRST_INO(sb) ||				\
++	 ((ino) < NILFS_USER_INO && (NILFS_SYS_INO_BITS & BIT(ino))))
++
++#define NILFS_PRIVATE_INODE(ino) ({					\
++	ino_t __ino = (ino);						\
++	((__ino) < NILFS_USER_INO && (__ino) != NILFS_ROOT_INO &&	\
++	 (__ino) != NILFS_SKETCH_INO); })
+ 
+ /**
+  * struct nilfs_transaction_info: context information for synchronization
+diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
+index 2ae2c1bbf6d17..5aac4f9118fd9 100644
+--- a/fs/nilfs2/the_nilfs.c
++++ b/fs/nilfs2/the_nilfs.c
+@@ -452,6 +452,12 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
+ 	}
+ 
+ 	nilfs->ns_first_ino = le32_to_cpu(sbp->s_first_ino);
++	if (nilfs->ns_first_ino < NILFS_USER_INO) {
++		nilfs_err(nilfs->ns_sb,
++			  "too small lower limit for non-reserved inode numbers: %u",
++			  nilfs->ns_first_ino);
++		return -EINVAL;
++	}
+ 
+ 	nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment);
+ 	if (nilfs->ns_blocks_per_segment < NILFS_SEG_MIN_BLOCKS) {
+diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
+index cd4ae1b8ae165..17fee562ee503 100644
+--- a/fs/nilfs2/the_nilfs.h
++++ b/fs/nilfs2/the_nilfs.h
+@@ -182,7 +182,7 @@ struct the_nilfs {
+ 	unsigned long		ns_nrsvsegs;
+ 	unsigned long		ns_first_data_block;
+ 	int			ns_inode_size;
+-	int			ns_first_ino;
++	unsigned int		ns_first_ino;
+ 	u32			ns_crc_seed;
+ 
+ 	/* /sys/fs/<nilfs>/<device> */
+diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
+index 53e7d1fa036aa..73785dece7a7f 100644
+--- a/fs/ntfs3/xattr.c
++++ b/fs/ntfs3/xattr.c
+@@ -219,8 +219,11 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
+ 		if (!ea->name_len)
+ 			break;
+ 
+-		if (ea->name_len > ea_size)
++		if (ea->name_len > ea_size) {
++			ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
++			err = -EINVAL; /* corrupted fs */
+ 			break;
++		}
+ 
+ 		if (buffer) {
+ 			/* Check if we can use field ea->name */
+diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
+index 34849b4a3243c..907765673765c 100644
+--- a/fs/orangefs/super.c
++++ b/fs/orangefs/super.c
+@@ -201,7 +201,8 @@ static int orangefs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ 		     (long)new_op->downcall.resp.statfs.files_avail);
+ 
+ 	buf->f_type = sb->s_magic;
+-	memcpy(&buf->f_fsid, &ORANGEFS_SB(sb)->fs_id, sizeof(buf->f_fsid));
++	buf->f_fsid.val[0] = ORANGEFS_SB(sb)->fs_id;
++	buf->f_fsid.val[1] = ORANGEFS_SB(sb)->id;
+ 	buf->f_bsize = new_op->downcall.resp.statfs.block_size;
+ 	buf->f_namelen = ORANGEFS_NAME_MAX;
+ 
+diff --git a/fs/super.c b/fs/super.c
+index 69ce6c6009684..4e52aba2fbea4 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -1501,8 +1501,17 @@ static int fs_bdev_thaw(struct block_device *bdev)
+ 
+ 	lockdep_assert_held(&bdev->bd_fsfreeze_mutex);
+ 
++	/*
++	 * The block device may have been frozen before it was claimed by a
++	 * filesystem. Concurrently another process might try to mount that
++	 * frozen block device and has temporarily claimed the block device for
++	 * that purpose causing a concurrent fs_bdev_thaw() to end up here. The
++	 * mounter is already about to abort mounting because they still saw an
++	 * elevanted bdev->bd_fsfreeze_count so get_bdev_super() will return
++	 * NULL in that case.
++	 */
+ 	sb = get_bdev_super(bdev);
+-	if (WARN_ON_ONCE(!sb))
++	if (!sb)
+ 		return -EINVAL;
+ 
+ 	if (sb->s_op->thaw_super)
+diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h
+index 5693a4be0d9a9..ff9c65841ae8d 100644
+--- a/include/linux/dynamic_queue_limits.h
++++ b/include/linux/dynamic_queue_limits.h
+@@ -91,7 +91,8 @@ static inline void dql_queued(struct dql *dql, unsigned int count)
+ {
+ 	unsigned long map, now, now_hi, i;
+ 
+-	BUG_ON(count > DQL_MAX_OBJECT);
++	if (WARN_ON_ONCE(count > DQL_MAX_OBJECT))
++		return;
+ 
+ 	dql->last_obj_cnt = count;
+ 
+diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
+index 1a9de119a0f73..f79853d2778b7 100644
+--- a/include/linux/fsnotify.h
++++ b/include/linux/fsnotify.h
+@@ -99,7 +99,13 @@ static inline int fsnotify_file(struct file *file, __u32 mask)
+ {
+ 	const struct path *path;
+ 
+-	if (file->f_mode & FMODE_NONOTIFY)
++	/*
++	 * FMODE_NONOTIFY are fds generated by fanotify itself which should not
++	 * generate new events. We also don't want to generate events for
++	 * FMODE_PATH fds (involves open & close events) as they are just
++	 * handle creation / destruction events and not "real" file events.
++	 */
++	if (file->f_mode & (FMODE_NONOTIFY | FMODE_PATH))
+ 		return 0;
+ 
+ 	path = &file->f_path;
+diff --git a/include/linux/mutex.h b/include/linux/mutex.h
+index 67edc4ca2beeb..a561c629d89f0 100644
+--- a/include/linux/mutex.h
++++ b/include/linux/mutex.h
+@@ -22,6 +22,8 @@
+ #include <linux/cleanup.h>
+ #include <linux/mutex_types.h>
+ 
++struct device;
++
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ # define __DEP_MAP_MUTEX_INITIALIZER(lockname)			\
+ 		, .dep_map = {					\
+@@ -117,6 +119,31 @@ do {							\
+ } while (0)
+ #endif /* CONFIG_PREEMPT_RT */
+ 
++#ifdef CONFIG_DEBUG_MUTEXES
++
++int __devm_mutex_init(struct device *dev, struct mutex *lock);
++
++#else
++
++static inline int __devm_mutex_init(struct device *dev, struct mutex *lock)
++{
++	/*
++	 * When CONFIG_DEBUG_MUTEXES is off mutex_destroy() is just a nop so
++	 * no really need to register it in the devm subsystem.
++	 */
++	return 0;
++}
++
++#endif
++
++#define devm_mutex_init(dev, mutex)			\
++({							\
++	typeof(mutex) mutex_ = (mutex);			\
++							\
++	mutex_init(mutex_);				\
++	__devm_mutex_init(dev, mutex_);			\
++})
++
+ /*
+  * See kernel/locking/mutex.c for detailed documentation of these APIs.
+  * Also see Documentation/locking/mutex-design.rst.
+diff --git a/include/linux/phy.h b/include/linux/phy.h
+index 3f68b8239bb11..a62d86bce1b63 100644
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -1121,7 +1121,7 @@ struct phy_driver {
+ 				  u8 index, enum led_brightness value);
+ 
+ 	/**
+-	 * @led_blink_set: Set a PHY LED brightness.  Index indicates
++	 * @led_blink_set: Set a PHY LED blinking.  Index indicates
+ 	 * which of the PHYs led should be configured to blink. Delays
+ 	 * are in milliseconds and if both are zero then a sensible
+ 	 * default should be chosen.  The call should adjust the
+diff --git a/include/linux/sched/vhost_task.h b/include/linux/sched/vhost_task.h
+index bc60243d43b36..25446c5d35081 100644
+--- a/include/linux/sched/vhost_task.h
++++ b/include/linux/sched/vhost_task.h
+@@ -4,7 +4,8 @@
+ 
+ struct vhost_task;
+ 
+-struct vhost_task *vhost_task_create(bool (*fn)(void *), void *arg,
++struct vhost_task *vhost_task_create(bool (*fn)(void *),
++				     void (*handle_kill)(void *), void *arg,
+ 				     const char *name);
+ void vhost_task_start(struct vhost_task *vtsk);
+ void vhost_task_stop(struct vhost_task *vtsk);
+diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
+index f187510428ca6..d46320f7fd685 100644
+--- a/include/net/bluetooth/hci.h
++++ b/include/net/bluetooth/hci.h
+@@ -324,6 +324,17 @@ enum {
+ 	 * claim to support it.
+ 	 */
+ 	HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE,
++
++	/*
++	 * When this quirk is set, the reserved bits of Primary/Secondary_PHY
++	 * inside the LE Extended Advertising Report events are discarded.
++	 * This is required for some Apple/Broadcom controllers which
++	 * abuse these reserved bits for unrelated flags.
++	 *
++	 * This quirk can be set before hci_register_dev is called or
++	 * during the hdev->setup vendor callback.
++	 */
++	HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY,
+ };
+ 
+ /* HCI device flags */
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h
+index 2d7f87bc5324b..baaff7bc09119 100644
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -395,7 +395,7 @@ enum ieee80211_bss_change {
+ 	BSS_CHANGED_HE_OBSS_PD		= 1<<28,
+ 	BSS_CHANGED_HE_BSS_COLOR	= 1<<29,
+ 	BSS_CHANGED_FILS_DISCOVERY      = 1<<30,
+-	BSS_CHANGED_UNSOL_BCAST_PROBE_RESP = 1<<31,
++	BSS_CHANGED_UNSOL_BCAST_PROBE_RESP = BIT_ULL(31),
+ 	BSS_CHANGED_MLD_VALID_LINKS	= BIT_ULL(33),
+ 	BSS_CHANGED_MLD_TTLM		= BIT_ULL(34),
+ 
+diff --git a/include/uapi/linux/cn_proc.h b/include/uapi/linux/cn_proc.h
+index f2afb7cc4926c..18e3745b86cd4 100644
+--- a/include/uapi/linux/cn_proc.h
++++ b/include/uapi/linux/cn_proc.h
+@@ -69,8 +69,7 @@ struct proc_input {
+ 
+ static inline enum proc_cn_event valid_event(enum proc_cn_event ev_type)
+ {
+-	ev_type &= PROC_EVENT_ALL;
+-	return ev_type;
++	return (enum proc_cn_event)(ev_type & PROC_EVENT_ALL);
+ }
+ 
+ /*
+diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c
+index f7f3d14fa69a7..4950e0b622b1f 100644
+--- a/kernel/dma/map_benchmark.c
++++ b/kernel/dma/map_benchmark.c
+@@ -256,6 +256,9 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
+ 		 * dma_mask changed by benchmark
+ 		 */
+ 		dma_set_mask(map->dev, old_dma_mask);
++
++		if (ret)
++			return ret;
+ 		break;
+ 	default:
+ 		return -EINVAL;
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 41a12630cbbc9..2b9ef8abff79d 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -488,6 +488,8 @@ void mm_update_next_owner(struct mm_struct *mm)
+ 	 * Search through everything else, we should not get here often.
+ 	 */
+ 	for_each_process(g) {
++		if (atomic_read(&mm->mm_users) <= 1)
++			break;
+ 		if (g->flags & PF_KTHREAD)
+ 			continue;
+ 		for_each_thread(g, c) {
+diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
+index bc8abb8549d20..6e6f6071cfa27 100644
+--- a/kernel/locking/mutex-debug.c
++++ b/kernel/locking/mutex-debug.c
+@@ -12,6 +12,7 @@
+  */
+ #include <linux/mutex.h>
+ #include <linux/delay.h>
++#include <linux/device.h>
+ #include <linux/export.h>
+ #include <linux/poison.h>
+ #include <linux/sched.h>
+@@ -89,6 +90,17 @@ void debug_mutex_init(struct mutex *lock, const char *name,
+ 	lock->magic = lock;
+ }
+ 
++static void devm_mutex_release(void *res)
++{
++	mutex_destroy(res);
++}
++
++int __devm_mutex_init(struct device *dev, struct mutex *lock)
++{
++	return devm_add_action_or_reset(dev, devm_mutex_release, lock);
++}
++EXPORT_SYMBOL_GPL(__devm_mutex_init);
++
+ /***
+  * mutex_destroy - mark a mutex unusable
+  * @lock: the mutex to be destroyed
+diff --git a/kernel/power/swap.c b/kernel/power/swap.c
+index 5bc04bfe2db1d..c6f24d17866d8 100644
+--- a/kernel/power/swap.c
++++ b/kernel/power/swap.c
+@@ -1600,7 +1600,7 @@ int swsusp_check(bool exclusive)
+ 
+ put:
+ 		if (error)
+-			fput(hib_resume_bdev_file);
++			bdev_fput(hib_resume_bdev_file);
+ 		else
+ 			pr_debug("Image signature found, resuming\n");
+ 	} else {
+diff --git a/kernel/vhost_task.c b/kernel/vhost_task.c
+index da35e5b7f0473..8800f5acc0071 100644
+--- a/kernel/vhost_task.c
++++ b/kernel/vhost_task.c
+@@ -10,38 +10,32 @@
+ 
+ enum vhost_task_flags {
+ 	VHOST_TASK_FLAGS_STOP,
++	VHOST_TASK_FLAGS_KILLED,
+ };
+ 
+ struct vhost_task {
+ 	bool (*fn)(void *data);
++	void (*handle_sigkill)(void *data);
+ 	void *data;
+ 	struct completion exited;
+ 	unsigned long flags;
+ 	struct task_struct *task;
++	/* serialize SIGKILL and vhost_task_stop calls */
++	struct mutex exit_mutex;
+ };
+ 
+ static int vhost_task_fn(void *data)
+ {
+ 	struct vhost_task *vtsk = data;
+-	bool dead = false;
+ 
+ 	for (;;) {
+ 		bool did_work;
+ 
+-		if (!dead && signal_pending(current)) {
++		if (signal_pending(current)) {
+ 			struct ksignal ksig;
+-			/*
+-			 * Calling get_signal will block in SIGSTOP,
+-			 * or clear fatal_signal_pending, but remember
+-			 * what was set.
+-			 *
+-			 * This thread won't actually exit until all
+-			 * of the file descriptors are closed, and
+-			 * the release function is called.
+-			 */
+-			dead = get_signal(&ksig);
+-			if (dead)
+-				clear_thread_flag(TIF_SIGPENDING);
++
++			if (get_signal(&ksig))
++				break;
+ 		}
+ 
+ 		/* mb paired w/ vhost_task_stop */
+@@ -57,7 +51,19 @@ static int vhost_task_fn(void *data)
+ 			schedule();
+ 	}
+ 
++	mutex_lock(&vtsk->exit_mutex);
++	/*
++	 * If a vhost_task_stop and SIGKILL race, we can ignore the SIGKILL.
++	 * When the vhost layer has called vhost_task_stop it's already stopped
++	 * new work and flushed.
++	 */
++	if (!test_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags)) {
++		set_bit(VHOST_TASK_FLAGS_KILLED, &vtsk->flags);
++		vtsk->handle_sigkill(vtsk->data);
++	}
++	mutex_unlock(&vtsk->exit_mutex);
+ 	complete(&vtsk->exited);
++
+ 	do_exit(0);
+ }
+ 
+@@ -78,12 +84,17 @@ EXPORT_SYMBOL_GPL(vhost_task_wake);
+  * @vtsk: vhost_task to stop
+  *
+  * vhost_task_fn ensures the worker thread exits after
+- * VHOST_TASK_FLAGS_SOP becomes true.
++ * VHOST_TASK_FLAGS_STOP becomes true.
+  */
+ void vhost_task_stop(struct vhost_task *vtsk)
+ {
+-	set_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags);
+-	vhost_task_wake(vtsk);
++	mutex_lock(&vtsk->exit_mutex);
++	if (!test_bit(VHOST_TASK_FLAGS_KILLED, &vtsk->flags)) {
++		set_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags);
++		vhost_task_wake(vtsk);
++	}
++	mutex_unlock(&vtsk->exit_mutex);
++
+ 	/*
+ 	 * Make sure vhost_task_fn is no longer accessing the vhost_task before
+ 	 * freeing it below.
+@@ -96,14 +107,16 @@ EXPORT_SYMBOL_GPL(vhost_task_stop);
+ /**
+  * vhost_task_create - create a copy of a task to be used by the kernel
+  * @fn: vhost worker function
+- * @arg: data to be passed to fn
++ * @handle_sigkill: vhost function to handle when we are killed
++ * @arg: data to be passed to fn and handled_kill
+  * @name: the thread's name
+  *
+  * This returns a specialized task for use by the vhost layer or NULL on
+  * failure. The returned task is inactive, and the caller must fire it up
+  * through vhost_task_start().
+  */
+-struct vhost_task *vhost_task_create(bool (*fn)(void *), void *arg,
++struct vhost_task *vhost_task_create(bool (*fn)(void *),
++				     void (*handle_sigkill)(void *), void *arg,
+ 				     const char *name)
+ {
+ 	struct kernel_clone_args args = {
+@@ -122,8 +135,10 @@ struct vhost_task *vhost_task_create(bool (*fn)(void *), void *arg,
+ 	if (!vtsk)
+ 		return NULL;
+ 	init_completion(&vtsk->exited);
++	mutex_init(&vtsk->exit_mutex);
+ 	vtsk->data = arg;
+ 	vtsk->fn = fn;
++	vtsk->handle_sigkill = handle_sigkill;
+ 
+ 	args.fn_arg = vtsk;
+ 
+diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c
+index fdba0eaf19a59..ad29721b956bc 100644
+--- a/lib/fortify_kunit.c
++++ b/lib/fortify_kunit.c
+@@ -15,10 +15,17 @@
+  */
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
++/* We don't need to fill dmesg with the fortify WARNs during testing. */
++#ifdef DEBUG
++# define FORTIFY_REPORT_KUNIT(x...) __fortify_report(x)
++#else
++# define FORTIFY_REPORT_KUNIT(x...) do { } while (0)
++#endif
++
+ /* Redefine fortify_panic() to track failures. */
+ void fortify_add_kunit_error(int write);
+ #define fortify_panic(func, write, avail, size, retfail) do {		\
+-	__fortify_report(FORTIFY_REASON(func, write), avail, size);	\
++	FORTIFY_REPORT_KUNIT(FORTIFY_REASON(func, write), avail, size);	\
+ 	fortify_add_kunit_error(write);					\
+ 	return (retfail);						\
+ } while (0)
+diff --git a/lib/kunit/try-catch.c b/lib/kunit/try-catch.c
+index d9d1df28cc52e..9c9e4dcf06d96 100644
+--- a/lib/kunit/try-catch.c
++++ b/lib/kunit/try-catch.c
+@@ -78,7 +78,6 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
+ 	time_remaining = wait_for_completion_timeout(&try_completion,
+ 						     kunit_test_timeout());
+ 	if (time_remaining == 0) {
+-		kunit_err(test, "try timed out\n");
+ 		try_catch->try_result = -ETIMEDOUT;
+ 		kthread_stop(task_struct);
+ 	}
+@@ -93,6 +92,8 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
+ 		try_catch->try_result = 0;
+ 	else if (exit_code == -EINTR)
+ 		kunit_err(test, "wake_up_process() was never called\n");
++	else if (exit_code == -ETIMEDOUT)
++		kunit_err(test, "try timed out\n");
+ 	else if (exit_code)
+ 		kunit_err(test, "Unknown error: %d\n", exit_code);
+ 
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index 3e19b87049db1..c9af72f292a8b 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -415,13 +415,20 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
+ 	else
+ 		bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
+ 
+-	if (bg_thresh >= thresh)
+-		bg_thresh = thresh / 2;
+ 	tsk = current;
+ 	if (rt_task(tsk)) {
+ 		bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
+ 		thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
+ 	}
++	/*
++	 * Dirty throttling logic assumes the limits in page units fit into
++	 * 32-bits. This gives 16TB dirty limits max which is hopefully enough.
++	 */
++	if (thresh > UINT_MAX)
++		thresh = UINT_MAX;
++	/* This makes sure bg_thresh is within 32-bits as well */
++	if (bg_thresh >= thresh)
++		bg_thresh = thresh / 2;
+ 	dtc->thresh = thresh;
+ 	dtc->bg_thresh = bg_thresh;
+ 
+@@ -471,7 +478,11 @@ static unsigned long node_dirty_limit(struct pglist_data *pgdat)
+ 	if (rt_task(tsk))
+ 		dirty += dirty / 4;
+ 
+-	return dirty;
++	/*
++	 * Dirty throttling logic assumes the limits in page units fit into
++	 * 32-bits. This gives 16TB dirty limits max which is hopefully enough.
++	 */
++	return min_t(unsigned long, dirty, UINT_MAX);
+ }
+ 
+ /**
+@@ -508,10 +519,17 @@ static int dirty_background_bytes_handler(struct ctl_table *table, int write,
+ 		void *buffer, size_t *lenp, loff_t *ppos)
+ {
+ 	int ret;
++	unsigned long old_bytes = dirty_background_bytes;
+ 
+ 	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
+-	if (ret == 0 && write)
++	if (ret == 0 && write) {
++		if (DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE) >
++								UINT_MAX) {
++			dirty_background_bytes = old_bytes;
++			return -ERANGE;
++		}
+ 		dirty_background_ratio = 0;
++	}
+ 	return ret;
+ }
+ 
+@@ -537,6 +555,10 @@ static int dirty_bytes_handler(struct ctl_table *table, int write,
+ 
+ 	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
+ 	if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
++		if (DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) > UINT_MAX) {
++			vm_dirty_bytes = old_bytes;
++			return -ERANGE;
++		}
+ 		writeback_set_ratelimit();
+ 		vm_dirty_ratio = 0;
+ 	}
+@@ -1638,7 +1660,7 @@ static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
+ 	 */
+ 	dtc->wb_thresh = __wb_calc_thresh(dtc);
+ 	dtc->wb_bg_thresh = dtc->thresh ?
+-		div64_u64(dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
++		div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
+ 
+ 	/*
+ 	 * In order to avoid the stacked BDI deadlock we need
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 08ae30fd31551..baca48ce8d0c6 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -904,8 +904,8 @@ static int hci_conn_hash_alloc_unset(struct hci_dev *hdev)
+ 			       U16_MAX, GFP_ATOMIC);
+ }
+ 
+-struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+-			      u8 role, u16 handle)
++static struct hci_conn *__hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
++				       u8 role, u16 handle)
+ {
+ 	struct hci_conn *conn;
+ 
+@@ -1046,7 +1046,16 @@ struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
+ 	if (unlikely(handle < 0))
+ 		return ERR_PTR(-ECONNREFUSED);
+ 
+-	return hci_conn_add(hdev, type, dst, role, handle);
++	return __hci_conn_add(hdev, type, dst, role, handle);
++}
++
++struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
++			      u8 role, u16 handle)
++{
++	if (handle > HCI_CONN_HANDLE_MAX)
++		return ERR_PTR(-EINVAL);
++
++	return __hci_conn_add(hdev, type, dst, role, handle);
+ }
+ 
+ static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason)
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 1ed734a7fb313..069f109d973b2 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -6312,6 +6312,13 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
+ 
+ 		evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK;
+ 		legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
++
++		if (test_bit(HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY,
++			     &hdev->quirks)) {
++			info->primary_phy &= 0x1f;
++			info->secondary_phy &= 0x1f;
++		}
++
+ 		if (legacy_evt_type != LE_ADV_INVALID) {
+ 			process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
+ 					   info->bdaddr_type, NULL, 0,
+@@ -6661,6 +6668,7 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
+ 	struct bt_iso_qos *qos;
+ 	bool pending = false;
+ 	u16 handle = __le16_to_cpu(ev->handle);
++	u32 c_sdu_interval, p_sdu_interval;
+ 
+ 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
+ 
+@@ -6685,12 +6693,25 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
+ 
+ 	pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags);
+ 
+-	/* Convert ISO Interval (1.25 ms slots) to SDU Interval (us) */
+-	qos->ucast.in.interval = le16_to_cpu(ev->interval) * 1250;
+-	qos->ucast.out.interval = qos->ucast.in.interval;
++	/* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 6, Part G
++	 * page 3075:
++	 * Transport_Latency_C_To_P = CIG_Sync_Delay + (FT_C_To_P) ×
++	 * ISO_Interval + SDU_Interval_C_To_P
++	 * ...
++	 * SDU_Interval = (CIG_Sync_Delay + (FT) x ISO_Interval) -
++	 *					Transport_Latency
++	 */
++	c_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) +
++			 (ev->c_ft * le16_to_cpu(ev->interval) * 1250)) -
++			get_unaligned_le24(ev->c_latency);
++	p_sdu_interval = (get_unaligned_le24(ev->cig_sync_delay) +
++			 (ev->p_ft * le16_to_cpu(ev->interval) * 1250)) -
++			get_unaligned_le24(ev->p_latency);
+ 
+ 	switch (conn->role) {
+ 	case HCI_ROLE_SLAVE:
++		qos->ucast.in.interval = c_sdu_interval;
++		qos->ucast.out.interval = p_sdu_interval;
+ 		/* Convert Transport Latency (us) to Latency (msec) */
+ 		qos->ucast.in.latency =
+ 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
+@@ -6704,6 +6725,8 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
+ 		qos->ucast.out.phy = ev->p_phy;
+ 		break;
+ 	case HCI_ROLE_MASTER:
++		qos->ucast.in.interval = p_sdu_interval;
++		qos->ucast.out.interval = c_sdu_interval;
+ 		/* Convert Transport Latency (us) to Latency (msec) */
+ 		qos->ucast.out.latency =
+ 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
+@@ -6894,6 +6917,10 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
+ 
+ 		bis = hci_conn_hash_lookup_handle(hdev, handle);
+ 		if (!bis) {
++			if (handle > HCI_CONN_HANDLE_MAX) {
++				bt_dev_dbg(hdev, "ignore too large handle %u", handle);
++				continue;
++			}
+ 			bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
+ 					   HCI_ROLE_SLAVE, handle);
+ 			if (IS_ERR(bis))
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index 00c0d8413c638..dd33400c21822 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -1356,8 +1356,7 @@ static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg,
+ 		lock_sock(sk);
+ 		switch (sk->sk_state) {
+ 		case BT_CONNECT2:
+-			if (pi->conn->hcon &&
+-			    test_bit(HCI_CONN_PA_SYNC, &pi->conn->hcon->flags)) {
++			if (test_bit(BT_SK_PA_SYNC, &pi->flags)) {
+ 				iso_conn_big_sync(sk);
+ 				sk->sk_state = BT_LISTEN;
+ 			} else {
+diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c
+index de33dc1b0daad..7236349cf0598 100644
+--- a/net/bpf/bpf_dummy_struct_ops.c
++++ b/net/bpf/bpf_dummy_struct_ops.c
+@@ -79,6 +79,51 @@ static int dummy_ops_call_op(void *image, struct bpf_dummy_ops_test_args *args)
+ 		    args->args[3], args->args[4]);
+ }
+ 
++static const struct bpf_ctx_arg_aux *find_ctx_arg_info(struct bpf_prog_aux *aux, int offset)
++{
++	int i;
++
++	for (i = 0; i < aux->ctx_arg_info_size; i++)
++		if (aux->ctx_arg_info[i].offset == offset)
++			return &aux->ctx_arg_info[i];
++
++	return NULL;
++}
++
++/* There is only one check at the moment:
++ * - zero should not be passed for pointer parameters not marked as nullable.
++ */
++static int check_test_run_args(struct bpf_prog *prog, struct bpf_dummy_ops_test_args *args)
++{
++	const struct btf_type *func_proto = prog->aux->attach_func_proto;
++
++	for (u32 arg_no = 0; arg_no < btf_type_vlen(func_proto) ; ++arg_no) {
++		const struct btf_param *param = &btf_params(func_proto)[arg_no];
++		const struct bpf_ctx_arg_aux *info;
++		const struct btf_type *t;
++		int offset;
++
++		if (args->args[arg_no] != 0)
++			continue;
++
++		/* Program is validated already, so there is no need
++		 * to check if t is NULL.
++		 */
++		t = btf_type_skip_modifiers(bpf_dummy_ops_btf, param->type, NULL);
++		if (!btf_type_is_ptr(t))
++			continue;
++
++		offset = btf_ctx_arg_offset(bpf_dummy_ops_btf, func_proto, arg_no);
++		info = find_ctx_arg_info(prog->aux, offset);
++		if (info && (info->reg_type & PTR_MAYBE_NULL))
++			continue;
++
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
+ extern const struct bpf_link_ops bpf_struct_ops_link_lops;
+ 
+ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
+@@ -87,7 +132,7 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
+ 	const struct bpf_struct_ops *st_ops = &bpf_bpf_dummy_ops;
+ 	const struct btf_type *func_proto;
+ 	struct bpf_dummy_ops_test_args *args;
+-	struct bpf_tramp_links *tlinks;
++	struct bpf_tramp_links *tlinks = NULL;
+ 	struct bpf_tramp_link *link = NULL;
+ 	void *image = NULL;
+ 	unsigned int op_idx;
+@@ -109,6 +154,10 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
+ 	if (IS_ERR(args))
+ 		return PTR_ERR(args);
+ 
++	err = check_test_run_args(prog, args);
++	if (err)
++		goto out;
++
+ 	tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
+ 	if (!tlinks) {
+ 		err = -ENOMEM;
+@@ -230,7 +279,7 @@ static void bpf_dummy_unreg(void *kdata)
+ {
+ }
+ 
+-static int bpf_dummy_test_1(struct bpf_dummy_ops_state *cb)
++static int bpf_dummy_ops__test_1(struct bpf_dummy_ops_state *cb__nullable)
+ {
+ 	return 0;
+ }
+@@ -247,7 +296,7 @@ static int bpf_dummy_test_sleepable(struct bpf_dummy_ops_state *cb)
+ }
+ 
+ static struct bpf_dummy_ops __bpf_bpf_dummy_ops = {
+-	.test_1 = bpf_dummy_test_1,
++	.test_1 = bpf_dummy_ops__test_1,
+ 	.test_2 = bpf_dummy_test_2,
+ 	.test_sleepable = bpf_dummy_test_sleepable,
+ };
+diff --git a/net/core/datagram.c b/net/core/datagram.c
+index a8b625abe242c..cb72923acc21c 100644
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -435,15 +435,22 @@ static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
+ 
+ 		end = start + skb_frag_size(frag);
+ 		if ((copy = end - offset) > 0) {
+-			struct page *page = skb_frag_page(frag);
+-			u8 *vaddr = kmap(page);
++			u32 p_off, p_len, copied;
++			struct page *p;
++			u8 *vaddr;
+ 
+ 			if (copy > len)
+ 				copy = len;
+-			n = INDIRECT_CALL_1(cb, simple_copy_to_iter,
+-					vaddr + skb_frag_off(frag) + offset - start,
+-					copy, data, to);
+-			kunmap(page);
++
++			skb_frag_foreach_page(frag,
++					      skb_frag_off(frag) + offset - start,
++					      copy, p, p_off, p_len, copied) {
++				vaddr = kmap_local_page(p);
++				n = INDIRECT_CALL_1(cb, simple_copy_to_iter,
++					vaddr + p_off, p_len, data, to);
++				kunmap_local(vaddr);
++			}
++
+ 			offset += n;
+ 			if (n != copy)
+ 				goto short_copy;
+diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
+index 7adace541fe29..9712cdb8087c2 100644
+--- a/net/ipv4/inet_diag.c
++++ b/net/ipv4/inet_diag.c
+@@ -1383,6 +1383,7 @@ static int inet_diag_dump_compat(struct sk_buff *skb,
+ 	req.sdiag_family = AF_UNSPEC; /* compatibility */
+ 	req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
+ 	req.idiag_ext = rc->idiag_ext;
++	req.pad = 0;
+ 	req.idiag_states = rc->idiag_states;
+ 	req.id = rc->id;
+ 
+@@ -1398,6 +1399,7 @@ static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
+ 	req.sdiag_family = rc->idiag_family;
+ 	req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type);
+ 	req.idiag_ext = rc->idiag_ext;
++	req.pad = 0;
+ 	req.idiag_states = rc->idiag_states;
+ 	req.id = rc->id;
+ 
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 0953c915bb4de..7b692bcb61d4a 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3074,7 +3074,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
+ 			return;
+ 
+ 		if (tcp_try_undo_dsack(sk))
+-			tcp_try_keep_open(sk);
++			tcp_try_to_open(sk, flag);
+ 
+ 		tcp_identify_packet_loss(sk, ack_flag);
+ 		if (icsk->icsk_ca_state != TCP_CA_Recovery) {
+@@ -4220,6 +4220,13 @@ void tcp_parse_options(const struct net *net,
+ 				 * checked (see tcp_v{4,6}_rcv()).
+ 				 */
+ 				break;
++#endif
++#ifdef CONFIG_TCP_AO
++			case TCPOPT_AO:
++				/* TCP AO has already been checked
++				 * (see tcp_inbound_ao_hash()).
++				 */
++				break;
+ #endif
+ 			case TCPOPT_FASTOPEN:
+ 				tcp_parse_fastopen_option(
+diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
+index c2a925538542b..e0883ba709b0b 100644
+--- a/net/ipv4/tcp_metrics.c
++++ b/net/ipv4/tcp_metrics.c
+@@ -619,6 +619,7 @@ static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] =
+ 	[TCP_METRICS_ATTR_ADDR_IPV4]	= { .type = NLA_U32, },
+ 	[TCP_METRICS_ATTR_ADDR_IPV6]	= { .type = NLA_BINARY,
+ 					    .len = sizeof(struct in6_addr), },
++	[TCP_METRICS_ATTR_SADDR_IPV4]	= { .type = NLA_U32, },
+ 	/* Following attributes are not received for GET/DEL,
+ 	 * we keep them for reference
+ 	 */
+diff --git a/net/mac802154/main.c b/net/mac802154/main.c
+index 9ab7396668d22..21b7c3b280b45 100644
+--- a/net/mac802154/main.c
++++ b/net/mac802154/main.c
+@@ -161,8 +161,10 @@ void ieee802154_configure_durations(struct wpan_phy *phy,
+ 	}
+ 
+ 	phy->symbol_duration = duration;
+-	phy->lifs_period = (IEEE802154_LIFS_PERIOD * phy->symbol_duration) / NSEC_PER_SEC;
+-	phy->sifs_period = (IEEE802154_SIFS_PERIOD * phy->symbol_duration) / NSEC_PER_SEC;
++	phy->lifs_period =
++		(IEEE802154_LIFS_PERIOD * phy->symbol_duration) / NSEC_PER_USEC;
++	phy->sifs_period =
++		(IEEE802154_SIFS_PERIOD * phy->symbol_duration) / NSEC_PER_USEC;
+ }
+ EXPORT_SYMBOL(ieee802154_configure_durations);
+ 
+@@ -184,10 +186,10 @@ static void ieee802154_setup_wpan_phy_pib(struct wpan_phy *wpan_phy)
+ 	 * Should be done when all drivers sets this value.
+ 	 */
+ 
+-	wpan_phy->lifs_period =
+-		(IEEE802154_LIFS_PERIOD * wpan_phy->symbol_duration) / 1000;
+-	wpan_phy->sifs_period =
+-		(IEEE802154_SIFS_PERIOD * wpan_phy->symbol_duration) / 1000;
++	wpan_phy->lifs_period =	(IEEE802154_LIFS_PERIOD *
++				 wpan_phy->symbol_duration) / NSEC_PER_USEC;
++	wpan_phy->sifs_period =	(IEEE802154_SIFS_PERIOD *
++				 wpan_phy->symbol_duration) / NSEC_PER_USEC;
+ }
+ 
+ int ieee802154_register_hw(struct ieee802154_hw *hw)
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index faa77b031d1f3..0f77ba3306c23 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -11479,8 +11479,7 @@ static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event,
+ 
+ 	gc_seq = nft_gc_seq_begin(nft_net);
+ 
+-	if (!list_empty(&nf_tables_destroy_list))
+-		nf_tables_trans_destroy_flush_work();
++	nf_tables_trans_destroy_flush_work();
+ again:
+ 	list_for_each_entry(table, &nft_net->tables, list) {
+ 		if (nft_table_has_owner(table) &&
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index c67679a41044f..13b3998c6177f 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -7119,6 +7119,7 @@ static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
+ 	struct sctp_sock *sp = sctp_sk(sk);
+ 	struct sctp_association *asoc;
+ 	struct sctp_assoc_ids *ids;
++	size_t ids_size;
+ 	u32 num = 0;
+ 
+ 	if (sctp_style(sk, TCP))
+@@ -7131,11 +7132,11 @@ static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
+ 		num++;
+ 	}
+ 
+-	if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num)
++	ids_size = struct_size(ids, gaids_assoc_id, num);
++	if (len < ids_size)
+ 		return -EINVAL;
+ 
+-	len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num;
+-
++	len = ids_size;
+ 	ids = kmalloc(len, GFP_USER | __GFP_NOWARN);
+ 	if (unlikely(!ids))
+ 		return -ENOMEM;
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 65c416e8d25eb..c9866db2ea468 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -468,6 +468,10 @@ static const struct netlink_range_validation nl80211_punct_bitmap_range = {
+ 	.max = 0xffff,
+ };
+ 
++static const struct netlink_range_validation q_range = {
++	.max = INT_MAX,
++};
++
+ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
+ 	[0] = { .strict_start_type = NL80211_ATTR_HE_OBSS_PD },
+ 	[NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
+@@ -754,7 +758,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
+ 
+ 	[NL80211_ATTR_TXQ_LIMIT] = { .type = NLA_U32 },
+ 	[NL80211_ATTR_TXQ_MEMORY_LIMIT] = { .type = NLA_U32 },
+-	[NL80211_ATTR_TXQ_QUANTUM] = { .type = NLA_U32 },
++	[NL80211_ATTR_TXQ_QUANTUM] = NLA_POLICY_FULL_RANGE(NLA_U32, &q_range),
+ 	[NL80211_ATTR_HE_CAPABILITY] =
+ 		NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_he_capa,
+ 				       NL80211_HE_MAX_CAPABILITY_LEN),
+diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
+index 7862a81017477..a9434a72cac4f 100755
+--- a/scripts/link-vmlinux.sh
++++ b/scripts/link-vmlinux.sh
+@@ -182,7 +182,7 @@ kallsyms_step()
+ 	mksysmap ${kallsyms_vmlinux} ${kallsyms_vmlinux}.syms ${kallsymso_prev}
+ 	kallsyms ${kallsyms_vmlinux}.syms ${kallsyms_S}
+ 
+-	info AS ${kallsyms_S}
++	info AS ${kallsymso}
+ 	${CC} ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS} \
+ 	      ${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
+ 	      -c -o ${kallsymso} ${kallsyms_S}
+diff --git a/sound/core/ump.c b/sound/core/ump.c
+index 117c7ecc48563..3f61220c23b4e 100644
+--- a/sound/core/ump.c
++++ b/sound/core/ump.c
+@@ -967,6 +967,14 @@ int snd_ump_parse_endpoint(struct snd_ump_endpoint *ump)
+ 	if (err < 0)
+ 		ump_dbg(ump, "Unable to get UMP EP stream config\n");
+ 
++	/* If no protocol is set by some reason, assume the valid one */
++	if (!(ump->info.protocol & SNDRV_UMP_EP_INFO_PROTO_MIDI_MASK)) {
++		if (ump->info.protocol_caps & SNDRV_UMP_EP_INFO_PROTO_MIDI2)
++			ump->info.protocol |= SNDRV_UMP_EP_INFO_PROTO_MIDI2;
++		else if (ump->info.protocol_caps & SNDRV_UMP_EP_INFO_PROTO_MIDI1)
++			ump->info.protocol |= SNDRV_UMP_EP_INFO_PROTO_MIDI1;
++	}
++
+ 	/* Query and create blocks from Function Blocks */
+ 	for (blk = 0; blk < ump->info.num_blocks; blk++) {
+ 		err = create_block_from_fb_info(ump, blk);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 3a56434c86bd9..c0530d4aa3fc3 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -12029,6 +12029,7 @@ enum {
+ 	ALC897_FIXUP_LENOVO_HEADSET_MODE,
+ 	ALC897_FIXUP_HEADSET_MIC_PIN2,
+ 	ALC897_FIXUP_UNIS_H3C_X500S,
++	ALC897_FIXUP_HEADSET_MIC_PIN3,
+ };
+ 
+ static const struct hda_fixup alc662_fixups[] = {
+@@ -12475,10 +12476,18 @@ static const struct hda_fixup alc662_fixups[] = {
+ 			{}
+ 		},
+ 	},
++	[ALC897_FIXUP_HEADSET_MIC_PIN3] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x19, 0x03a11050 }, /* use as headset mic */
++			{ }
++		},
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1019, 0x9087, "ECS", ALC662_FIXUP_ASUS_MODE2),
++	SND_PCI_QUIRK(0x1019, 0x9859, "JP-IK LEAP W502", ALC897_FIXUP_HEADSET_MIC_PIN3),
+ 	SND_PCI_QUIRK(0x1025, 0x022f, "Acer Aspire One", ALC662_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x1025, 0x0241, "Packard Bell DOTS", ALC662_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
+diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h
+index 1ce738d91685a..670726353aa50 100644
+--- a/tools/lib/bpf/bpf_core_read.h
++++ b/tools/lib/bpf/bpf_core_read.h
+@@ -104,6 +104,7 @@ enum bpf_enum_value_kind {
+ 	case 2: val = *(const unsigned short *)p; break;		      \
+ 	case 4: val = *(const unsigned int *)p; break;			      \
+ 	case 8: val = *(const unsigned long long *)p; break;		      \
++	default: val = 0; break;					      \
+ 	}								      \
+ 	val <<= __CORE_RELO(s, field, LSHIFT_U64);			      \
+ 	if (__CORE_RELO(s, field, SIGNED))				      \
+diff --git a/tools/lib/bpf/features.c b/tools/lib/bpf/features.c
+index a336786a22a38..50befe125ddc5 100644
+--- a/tools/lib/bpf/features.c
++++ b/tools/lib/bpf/features.c
+@@ -392,11 +392,41 @@ static int probe_uprobe_multi_link(int token_fd)
+ 	link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
+ 	err = -errno; /* close() can clobber errno */
+ 
++	if (link_fd >= 0 || err != -EBADF) {
++		if (link_fd >= 0)
++			close(link_fd);
++		close(prog_fd);
++		return 0;
++	}
++
++	/* Initial multi-uprobe support in kernel didn't handle PID filtering
++	 * correctly (it was doing thread filtering, not process filtering).
++	 * So now we'll detect if PID filtering logic was fixed, and, if not,
++	 * we'll pretend multi-uprobes are not supported, if not.
++	 * Multi-uprobes are used in USDT attachment logic, and we need to be
++	 * conservative here, because multi-uprobe selection happens early at
++	 * load time, while the use of PID filtering is known late at
++	 * attachment time, at which point it's too late to undo multi-uprobe
++	 * selection.
++	 *
++	 * Creating uprobe with pid == -1 for (invalid) '/' binary will fail
++	 * early with -EINVAL on kernels with fixed PID filtering logic;
++	 * otherwise -ESRCH would be returned if passed correct binary path
++	 * (but we'll just get -BADF, of course).
++	 */
++	link_opts.uprobe_multi.pid = -1; /* invalid PID */
++	link_opts.uprobe_multi.path = "/"; /* invalid path */
++	link_opts.uprobe_multi.offsets = &offset;
++	link_opts.uprobe_multi.cnt = 1;
++
++	link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
++	err = -errno; /* close() can clobber errno */
++
+ 	if (link_fd >= 0)
+ 		close(link_fd);
+ 	close(prog_fd);
+ 
+-	return link_fd < 0 && err == -EBADF;
++	return link_fd < 0 && err == -EINVAL;
+ }
+ 
+ static int probe_kern_bpf_cookie(int token_fd)
+diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
+index 8071a3ef2a2e8..5d80d193e5bee 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -1022,6 +1022,7 @@ struct rapl_counter_info_t {
+ 
+ /* struct rapl_counter_info_t for each RAPL domain */
+ struct rapl_counter_info_t *rapl_counter_info_perdomain;
++unsigned int rapl_counter_info_perdomain_size;
+ 
+ #define RAPL_COUNTER_FLAG_USE_MSR_SUM (1u << 1)
+ 
+@@ -1415,6 +1416,9 @@ struct topo_params {
+ 	int allowed_cpus;
+ 	int allowed_cores;
+ 	int max_cpu_num;
++	int max_core_id;
++	int max_package_id;
++	int max_die_id;
+ 	int max_node_num;
+ 	int nodes_per_pkg;
+ 	int cores_per_node;
+@@ -3368,15 +3372,18 @@ void write_rapl_counter(struct rapl_counter *rc, struct rapl_counter_info_t *rci
+ 	rc->scale = rci->scale[idx];
+ }
+ 
+-int get_rapl_counters(int cpu, int domain, struct core_data *c, struct pkg_data *p)
++int get_rapl_counters(int cpu, unsigned int domain, struct core_data *c, struct pkg_data *p)
+ {
+ 	unsigned long long perf_data[NUM_RAPL_COUNTERS + 1];
+-	struct rapl_counter_info_t *rci = &rapl_counter_info_perdomain[domain];
++	struct rapl_counter_info_t *rci;
+ 
+ 	if (debug)
+ 		fprintf(stderr, "%s: cpu%d domain%d\n", __func__, cpu, domain);
+ 
+ 	assert(rapl_counter_info_perdomain);
++	assert(domain < rapl_counter_info_perdomain_size);
++
++	rci = &rapl_counter_info_perdomain[domain];
+ 
+ 	/*
+ 	 * If we have any perf counters to read, read them all now, in bulk
+@@ -4180,7 +4187,7 @@ void free_fd_rapl_percpu(void)
+ 	if (!rapl_counter_info_perdomain)
+ 		return;
+ 
+-	const int num_domains = platform->has_per_core_rapl ? topo.num_cores : topo.num_packages;
++	const int num_domains = rapl_counter_info_perdomain_size;
+ 
+ 	for (int domain_id = 0; domain_id < num_domains; ++domain_id) {
+ 		if (rapl_counter_info_perdomain[domain_id].fd_perf != -1)
+@@ -4188,6 +4195,8 @@ void free_fd_rapl_percpu(void)
+ 	}
+ 
+ 	free(rapl_counter_info_perdomain);
++	rapl_counter_info_perdomain = NULL;
++	rapl_counter_info_perdomain_size = 0;
+ }
+ 
+ void free_all_buffers(void)
+@@ -6478,17 +6487,18 @@ void linux_perf_init(void)
+ 
+ void rapl_perf_init(void)
+ {
+-	const int num_domains = platform->has_per_core_rapl ? topo.num_cores : topo.num_packages;
++	const unsigned int num_domains = (platform->has_per_core_rapl ? topo.max_core_id : topo.max_package_id) + 1;
+ 	bool *domain_visited = calloc(num_domains, sizeof(bool));
+ 
+ 	rapl_counter_info_perdomain = calloc(num_domains, sizeof(*rapl_counter_info_perdomain));
+ 	if (rapl_counter_info_perdomain == NULL)
+ 		err(-1, "calloc rapl_counter_info_percpu");
++	rapl_counter_info_perdomain_size = num_domains;
+ 
+ 	/*
+ 	 * Initialize rapl_counter_info_percpu
+ 	 */
+-	for (int domain_id = 0; domain_id < num_domains; ++domain_id) {
++	for (unsigned int domain_id = 0; domain_id < num_domains; ++domain_id) {
+ 		struct rapl_counter_info_t *rci = &rapl_counter_info_perdomain[domain_id];
+ 
+ 		rci->fd_perf = -1;
+@@ -6508,7 +6518,7 @@ void rapl_perf_init(void)
+ 		bool has_counter = 0;
+ 		double scale;
+ 		enum rapl_unit unit;
+-		int next_domain;
++		unsigned int next_domain;
+ 
+ 		memset(domain_visited, 0, num_domains * sizeof(*domain_visited));
+ 
+@@ -6521,6 +6531,8 @@ void rapl_perf_init(void)
+ 			next_domain =
+ 			    platform->has_per_core_rapl ? cpus[cpu].physical_core_id : cpus[cpu].physical_package_id;
+ 
++			assert(next_domain < num_domains);
++
+ 			if (domain_visited[next_domain])
+ 				continue;
+ 
+@@ -6967,7 +6979,6 @@ void topology_probe(bool startup)
+ 	int i;
+ 	int max_core_id = 0;
+ 	int max_package_id = 0;
+-	int max_die_id = 0;
+ 	int max_siblings = 0;
+ 
+ 	/* Initialize num_cpus, max_cpu_num */
+@@ -7084,8 +7095,8 @@ void topology_probe(bool startup)
+ 
+ 		/* get die information */
+ 		cpus[i].die_id = get_die_id(i);
+-		if (cpus[i].die_id > max_die_id)
+-			max_die_id = cpus[i].die_id;
++		if (cpus[i].die_id > topo.max_die_id)
++			topo.max_die_id = cpus[i].die_id;
+ 
+ 		/* get numa node information */
+ 		cpus[i].physical_node_id = get_physical_node_id(&cpus[i]);
+@@ -7104,6 +7115,8 @@ void topology_probe(bool startup)
+ 		if (cpus[i].thread_id == 0)
+ 			topo.num_cores++;
+ 	}
++	topo.max_core_id = max_core_id;
++	topo.max_package_id = max_package_id;
+ 
+ 	topo.cores_per_node = max_core_id + 1;
+ 	if (debug > 1)
+@@ -7111,9 +7124,9 @@ void topology_probe(bool startup)
+ 	if (!summary_only && topo.cores_per_node > 1)
+ 		BIC_PRESENT(BIC_Core);
+ 
+-	topo.num_die = max_die_id + 1;
++	topo.num_die = topo.max_die_id + 1;
+ 	if (debug > 1)
+-		fprintf(outf, "max_die_id %d, sizing for %d die\n", max_die_id, topo.num_die);
++		fprintf(outf, "max_die_id %d, sizing for %d die\n", topo.max_die_id, topo.num_die);
+ 	if (!summary_only && topo.num_die > 1)
+ 		BIC_PRESENT(BIC_Die);
+ 
+diff --git a/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c
+index f43fcb13d2c46..d3d94596ab79c 100644
+--- a/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c
++++ b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c
+@@ -98,7 +98,8 @@ static void test_dummy_init_ptr_arg(void)
+ 
+ static void test_dummy_multiple_args(void)
+ {
+-	__u64 args[5] = {0, -100, 0x8a5f, 'c', 0x1234567887654321ULL};
++	struct bpf_dummy_ops_state st = { 7 };
++	__u64 args[5] = {(__u64)&st, -100, 0x8a5f, 'c', 0x1234567887654321ULL};
+ 	LIBBPF_OPTS(bpf_test_run_opts, attr,
+ 		.ctx_in = args,
+ 		.ctx_size_in = sizeof(args),
+@@ -115,6 +116,7 @@ static void test_dummy_multiple_args(void)
+ 	fd = bpf_program__fd(skel->progs.test_2);
+ 	err = bpf_prog_test_run_opts(fd, &attr);
+ 	ASSERT_OK(err, "test_run");
++	args[0] = 7;
+ 	for (i = 0; i < ARRAY_SIZE(args); i++) {
+ 		snprintf(name, sizeof(name), "arg %zu", i);
+ 		ASSERT_EQ(skel->bss->test_2_args[i], args[i], name);
+@@ -125,7 +127,8 @@ static void test_dummy_multiple_args(void)
+ 
+ static void test_dummy_sleepable(void)
+ {
+-	__u64 args[1] = {0};
++	struct bpf_dummy_ops_state st;
++	__u64 args[1] = {(__u64)&st};
+ 	LIBBPF_OPTS(bpf_test_run_opts, attr,
+ 		.ctx_in = args,
+ 		.ctx_size_in = sizeof(args),
+@@ -144,6 +147,31 @@ static void test_dummy_sleepable(void)
+ 	dummy_st_ops_success__destroy(skel);
+ }
+ 
++/* dummy_st_ops.test_sleepable() parameter is not marked as nullable,
++ * thus bpf_prog_test_run_opts() below should be rejected as it tries
++ * to pass NULL for this parameter.
++ */
++static void test_dummy_sleepable_reject_null(void)
++{
++	__u64 args[1] = {0};
++	LIBBPF_OPTS(bpf_test_run_opts, attr,
++		.ctx_in = args,
++		.ctx_size_in = sizeof(args),
++	);
++	struct dummy_st_ops_success *skel;
++	int fd, err;
++
++	skel = dummy_st_ops_success__open_and_load();
++	if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
++		return;
++
++	fd = bpf_program__fd(skel->progs.test_sleepable);
++	err = bpf_prog_test_run_opts(fd, &attr);
++	ASSERT_EQ(err, -EINVAL, "test_run");
++
++	dummy_st_ops_success__destroy(skel);
++}
++
+ void test_dummy_st_ops(void)
+ {
+ 	if (test__start_subtest("dummy_st_ops_attach"))
+@@ -156,6 +184,8 @@ void test_dummy_st_ops(void)
+ 		test_dummy_multiple_args();
+ 	if (test__start_subtest("dummy_sleepable"))
+ 		test_dummy_sleepable();
++	if (test__start_subtest("dummy_sleepable_reject_null"))
++		test_dummy_sleepable_reject_null();
+ 
+ 	RUN_TESTS(dummy_st_ops_fail);
+ }
+diff --git a/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c b/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c
+index 1efa746c25dc7..ec0c595d47af8 100644
+--- a/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c
++++ b/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c
+@@ -11,8 +11,17 @@ int BPF_PROG(test_1, struct bpf_dummy_ops_state *state)
+ {
+ 	int ret;
+ 
+-	if (!state)
+-		return 0xf2f3f4f5;
++	/* Check that 'state' nullable status is detected correctly.
++	 * If 'state' argument would be assumed non-null by verifier
++	 * the code below would be deleted as dead (which it shouldn't).
++	 * Hide it from the compiler behind 'asm' block to avoid
++	 * unnecessary optimizations.
++	 */
++	asm volatile (
++		"if %[state] != 0 goto +2;"
++		"r0 = 0xf2f3f4f5;"
++		"exit;"
++	::[state]"p"(state));
+ 
+ 	ret = state->val;
+ 	state->val = 0x5a;
+@@ -25,7 +34,7 @@ SEC("struct_ops/test_2")
+ int BPF_PROG(test_2, struct bpf_dummy_ops_state *state, int a1, unsigned short a2,
+ 	     char a3, unsigned long a4)
+ {
+-	test_2_args[0] = (unsigned long)state;
++	test_2_args[0] = state->val;
+ 	test_2_args[1] = a1;
+ 	test_2_args[2] = a2;
+ 	test_2_args[3] = a3;
+diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
+index b634969cbb6f1..40723a6a083f4 100644
+--- a/tools/testing/selftests/kselftest_harness.h
++++ b/tools/testing/selftests/kselftest_harness.h
+@@ -66,8 +66,6 @@
+ #include <sys/wait.h>
+ #include <unistd.h>
+ #include <setjmp.h>
+-#include <syscall.h>
+-#include <linux/sched.h>
+ 
+ #include "kselftest.h"
+ 
+@@ -82,17 +80,6 @@
+ #  define TH_LOG_ENABLED 1
+ #endif
+ 
+-/* Wait for the child process to end but without sharing memory mapping. */
+-static inline pid_t clone3_vfork(void)
+-{
+-	struct clone_args args = {
+-		.flags = CLONE_VFORK,
+-		.exit_signal = SIGCHLD,
+-	};
+-
+-	return syscall(__NR_clone3, &args, sizeof(args));
+-}
+-
+ /**
+  * TH_LOG()
+  *
+@@ -437,7 +424,7 @@ static inline pid_t clone3_vfork(void)
+ 		} \
+ 		if (setjmp(_metadata->env) == 0) { \
+ 			/* _metadata and potentially self are shared with all forks. */ \
+-			child = clone3_vfork(); \
++			child = fork(); \
+ 			if (child == 0) { \
+ 				fixture_name##_setup(_metadata, self, variant->data); \
+ 				/* Let setup failure terminate early. */ \
+@@ -1016,7 +1003,14 @@ void __wait_for_test(struct __test_metadata *t)
+ 		.sa_flags = SA_SIGINFO,
+ 	};
+ 	struct sigaction saved_action;
+-	int status;
++	/*
++	 * Sets status so that WIFEXITED(status) returns true and
++	 * WEXITSTATUS(status) returns KSFT_FAIL.  This safe default value
++	 * should never be evaluated because of the waitpid(2) check and
++	 * SIGALRM handling.
++	 */
++	int status = KSFT_FAIL << 8;
++	int child;
+ 
+ 	if (sigaction(SIGALRM, &action, &saved_action)) {
+ 		t->exit_code = KSFT_FAIL;
+@@ -1028,7 +1022,15 @@ void __wait_for_test(struct __test_metadata *t)
+ 	__active_test = t;
+ 	t->timed_out = false;
+ 	alarm(t->timeout);
+-	waitpid(t->pid, &status, 0);
++	child = waitpid(t->pid, &status, 0);
++	if (child == -1 && errno != EINTR) {
++		t->exit_code = KSFT_FAIL;
++		fprintf(TH_LOG_STREAM,
++			"# %s: Failed to wait for PID %d (errno: %d)\n",
++			t->name, t->pid, errno);
++		return;
++	}
++
+ 	alarm(0);
+ 	if (sigaction(SIGALRM, &saved_action, NULL)) {
+ 		t->exit_code = KSFT_FAIL;
+@@ -1083,6 +1085,7 @@ void __wait_for_test(struct __test_metadata *t)
+ 				WTERMSIG(status));
+ 		}
+ 	} else {
++		t->exit_code = KSFT_FAIL;
+ 		fprintf(TH_LOG_STREAM,
+ 			"# %s: Test ended in some other way [%u]\n",
+ 			t->name,
+@@ -1218,6 +1221,7 @@ void __run_test(struct __fixture_metadata *f,
+ 	struct __test_xfail *xfail;
+ 	char test_name[1024];
+ 	const char *diagnostic;
++	int child;
+ 
+ 	/* reset test struct */
+ 	t->exit_code = KSFT_PASS;
+@@ -1236,15 +1240,16 @@ void __run_test(struct __fixture_metadata *f,
+ 	fflush(stdout);
+ 	fflush(stderr);
+ 
+-	t->pid = clone3_vfork();
+-	if (t->pid < 0) {
++	child = fork();
++	if (child < 0) {
+ 		ksft_print_msg("ERROR SPAWNING TEST CHILD\n");
+ 		t->exit_code = KSFT_FAIL;
+-	} else if (t->pid == 0) {
++	} else if (child == 0) {
+ 		setpgrp();
+ 		t->fn(t, variant);
+ 		_exit(t->exit_code);
+ 	} else {
++		t->pid = child;
+ 		__wait_for_test(t);
+ 	}
+ 	ksft_print_msg("         %4s  %s\n",
+diff --git a/tools/testing/selftests/net/gro.c b/tools/testing/selftests/net/gro.c
+index 353e1e867fbb2..6038b96ecee88 100644
+--- a/tools/testing/selftests/net/gro.c
++++ b/tools/testing/selftests/net/gro.c
+@@ -119,6 +119,9 @@ static void setup_sock_filter(int fd)
+ 		next_off = offsetof(struct ipv6hdr, nexthdr);
+ 	ipproto_off = ETH_HLEN + next_off;
+ 
++	/* Overridden later if exthdrs are used: */
++	opt_ipproto_off = ipproto_off;
++
+ 	if (strcmp(testname, "ip") == 0) {
+ 		if (proto == PF_INET)
+ 			optlen = sizeof(struct ip_timestamp);
+diff --git a/tools/testing/selftests/net/ip_local_port_range.c b/tools/testing/selftests/net/ip_local_port_range.c
+index 193b82745fd87..29451d2244b75 100644
+--- a/tools/testing/selftests/net/ip_local_port_range.c
++++ b/tools/testing/selftests/net/ip_local_port_range.c
+@@ -359,7 +359,7 @@ TEST_F(ip_local_port_range, late_bind)
+ 		struct sockaddr_in v4;
+ 		struct sockaddr_in6 v6;
+ 	} addr;
+-	socklen_t addr_len;
++	socklen_t addr_len = 0;
+ 	const int one = 1;
+ 	int fd, err;
+ 	__u32 range;
+diff --git a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
+index 7426a2cbd4a03..7ad5a59adff2b 100644
+--- a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
++++ b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
+@@ -1276,7 +1276,7 @@ int add_listener(int argc, char *argv[])
+ 	struct sockaddr_storage addr;
+ 	struct sockaddr_in6 *a6;
+ 	struct sockaddr_in *a4;
+-	u_int16_t family;
++	u_int16_t family = AF_UNSPEC;
+ 	int enable = 1;
+ 	int sock;
+ 	int err;
+diff --git a/tools/testing/selftests/net/msg_zerocopy.c b/tools/testing/selftests/net/msg_zerocopy.c
+index bdc03a2097e85..7ea5fb28c93db 100644
+--- a/tools/testing/selftests/net/msg_zerocopy.c
++++ b/tools/testing/selftests/net/msg_zerocopy.c
+@@ -85,6 +85,7 @@ static bool cfg_rx;
+ static int  cfg_runtime_ms	= 4200;
+ static int  cfg_verbose;
+ static int  cfg_waittime_ms	= 500;
++static int  cfg_notification_limit = 32;
+ static bool cfg_zerocopy;
+ 
+ static socklen_t cfg_alen;
+@@ -95,6 +96,7 @@ static char payload[IP_MAXPACKET];
+ static long packets, bytes, completions, expected_completions;
+ static int  zerocopied = -1;
+ static uint32_t next_completion;
++static uint32_t sends_since_notify;
+ 
+ static unsigned long gettimeofday_ms(void)
+ {
+@@ -208,6 +210,7 @@ static bool do_sendmsg(int fd, struct msghdr *msg, bool do_zerocopy, int domain)
+ 		error(1, errno, "send");
+ 	if (cfg_verbose && ret != len)
+ 		fprintf(stderr, "send: ret=%u != %u\n", ret, len);
++	sends_since_notify++;
+ 
+ 	if (len) {
+ 		packets++;
+@@ -435,7 +438,7 @@ static bool do_recv_completion(int fd, int domain)
+ 	/* Detect notification gaps. These should not happen often, if at all.
+ 	 * Gaps can occur due to drops, reordering and retransmissions.
+ 	 */
+-	if (lo != next_completion)
++	if (cfg_verbose && lo != next_completion)
+ 		fprintf(stderr, "gap: %u..%u does not append to %u\n",
+ 			lo, hi, next_completion);
+ 	next_completion = hi + 1;
+@@ -460,6 +463,7 @@ static bool do_recv_completion(int fd, int domain)
+ static void do_recv_completions(int fd, int domain)
+ {
+ 	while (do_recv_completion(fd, domain)) {}
++	sends_since_notify = 0;
+ }
+ 
+ /* Wait for all remaining completions on the errqueue */
+@@ -549,6 +553,9 @@ static void do_tx(int domain, int type, int protocol)
+ 		else
+ 			do_sendmsg(fd, &msg, cfg_zerocopy, domain);
+ 
++		if (cfg_zerocopy && sends_since_notify >= cfg_notification_limit)
++			do_recv_completions(fd, domain);
++
+ 		while (!do_poll(fd, POLLOUT)) {
+ 			if (cfg_zerocopy)
+ 				do_recv_completions(fd, domain);
+@@ -708,7 +715,7 @@ static void parse_opts(int argc, char **argv)
+ 
+ 	cfg_payload_len = max_payload_len;
+ 
+-	while ((c = getopt(argc, argv, "46c:C:D:i:mp:rs:S:t:vz")) != -1) {
++	while ((c = getopt(argc, argv, "46c:C:D:i:l:mp:rs:S:t:vz")) != -1) {
+ 		switch (c) {
+ 		case '4':
+ 			if (cfg_family != PF_UNSPEC)
+@@ -736,6 +743,9 @@ static void parse_opts(int argc, char **argv)
+ 			if (cfg_ifindex == 0)
+ 				error(1, errno, "invalid iface: %s", optarg);
+ 			break;
++		case 'l':
++			cfg_notification_limit = strtoul(optarg, NULL, 0);
++			break;
+ 		case 'm':
+ 			cfg_cork_mixed = true;
+ 			break;
+diff --git a/tools/testing/selftests/resctrl/cat_test.c b/tools/testing/selftests/resctrl/cat_test.c
+index 4cb991be8e31b..178a41d4bb1be 100644
+--- a/tools/testing/selftests/resctrl/cat_test.c
++++ b/tools/testing/selftests/resctrl/cat_test.c
+@@ -294,11 +294,30 @@ static int cat_run_test(const struct resctrl_test *test, const struct user_param
+ 	return ret;
+ }
+ 
++static bool arch_supports_noncont_cat(const struct resctrl_test *test)
++{
++	unsigned int eax, ebx, ecx, edx;
++
++	/* AMD always supports non-contiguous CBM. */
++	if (get_vendor() == ARCH_AMD)
++		return true;
++
++	/* Intel support for non-contiguous CBM needs to be discovered. */
++	if (!strcmp(test->resource, "L3"))
++		__cpuid_count(0x10, 1, eax, ebx, ecx, edx);
++	else if (!strcmp(test->resource, "L2"))
++		__cpuid_count(0x10, 2, eax, ebx, ecx, edx);
++	else
++		return false;
++
++	return ((ecx >> 3) & 1);
++}
++
+ static int noncont_cat_run_test(const struct resctrl_test *test,
+ 				const struct user_params *uparams)
+ {
+ 	unsigned long full_cache_mask, cont_mask, noncont_mask;
+-	unsigned int eax, ebx, ecx, edx, sparse_masks;
++	unsigned int sparse_masks;
+ 	int bit_center, ret;
+ 	char schemata[64];
+ 
+@@ -307,15 +326,8 @@ static int noncont_cat_run_test(const struct resctrl_test *test,
+ 	if (ret)
+ 		return ret;
+ 
+-	if (!strcmp(test->resource, "L3"))
+-		__cpuid_count(0x10, 1, eax, ebx, ecx, edx);
+-	else if (!strcmp(test->resource, "L2"))
+-		__cpuid_count(0x10, 2, eax, ebx, ecx, edx);
+-	else
+-		return -EINVAL;
+-
+-	if (sparse_masks != ((ecx >> 3) & 1)) {
+-		ksft_print_msg("CPUID output doesn't match 'sparse_masks' file content!\n");
++	if (arch_supports_noncont_cat(test) != sparse_masks) {
++		ksft_print_msg("Hardware and kernel differ on non-contiguous CBM support!\n");
+ 		return 1;
+ 	}
+ 


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [gentoo-commits] proj/linux-patches:6.9 commit in: /
@ 2024-07-18 12:14 Mike Pagano
  0 siblings, 0 replies; 18+ messages in thread
From: Mike Pagano @ 2024-07-18 12:14 UTC (permalink / raw
  To: gentoo-commits

commit:     cb8f187d3fecf7b8999130fe1e869021f88a0153
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jul 18 12:14:09 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jul 18 12:14:09 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=cb8f187d

Linux patch 6.9.10

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1009_linux-6.9.10.patch | 4439 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4443 insertions(+)

diff --git a/0000_README b/0000_README
index 7f674d82..e8d9065b 100644
--- a/0000_README
+++ b/0000_README
@@ -79,6 +79,10 @@ Patch:  1008_linux-6.9.9.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.9.9
 
+Patch:  1009_linux-6.9.10.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.9.10
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch
 Desc:   Enable link security restrictions by default.

diff --git a/1009_linux-6.9.10.patch b/1009_linux-6.9.10.patch
new file mode 100644
index 00000000..18927134
--- /dev/null
+++ b/1009_linux-6.9.10.patch
@@ -0,0 +1,4439 @@
+diff --git a/Documentation/admin-guide/cifs/usage.rst b/Documentation/admin-guide/cifs/usage.rst
+index aa8290a29dc88..fd4b56c0996f4 100644
+--- a/Documentation/admin-guide/cifs/usage.rst
++++ b/Documentation/admin-guide/cifs/usage.rst
+@@ -723,40 +723,26 @@ Configuration pseudo-files:
+ ======================= =======================================================
+ SecurityFlags		Flags which control security negotiation and
+ 			also packet signing. Authentication (may/must)
+-			flags (e.g. for NTLM and/or NTLMv2) may be combined with
++			flags (e.g. for NTLMv2) may be combined with
+ 			the signing flags.  Specifying two different password
+ 			hashing mechanisms (as "must use") on the other hand
+ 			does not make much sense. Default flags are::
+ 
+-				0x07007
+-
+-			(NTLM, NTLMv2 and packet signing allowed).  The maximum
+-			allowable flags if you want to allow mounts to servers
+-			using weaker password hashes is 0x37037 (lanman,
+-			plaintext, ntlm, ntlmv2, signing allowed).  Some
+-			SecurityFlags require the corresponding menuconfig
+-			options to be enabled.  Enabling plaintext
+-			authentication currently requires also enabling
+-			lanman authentication in the security flags
+-			because the cifs module only supports sending
+-			laintext passwords using the older lanman dialect
+-			form of the session setup SMB.  (e.g. for authentication
+-			using plain text passwords, set the SecurityFlags
+-			to 0x30030)::
++				0x00C5
++
++			(NTLMv2 and packet signing allowed).  Some SecurityFlags
++			may require enabling a corresponding menuconfig option.
+ 
+ 			  may use packet signing			0x00001
+ 			  must use packet signing			0x01001
+-			  may use NTLM (most common password hash)	0x00002
+-			  must use NTLM					0x02002
+ 			  may use NTLMv2				0x00004
+ 			  must use NTLMv2				0x04004
+-			  may use Kerberos security			0x00008
+-			  must use Kerberos				0x08008
+-			  may use lanman (weak) password hash		0x00010
+-			  must use lanman password hash			0x10010
+-			  may use plaintext passwords			0x00020
+-			  must use plaintext passwords			0x20020
+-			  (reserved for future packet encryption)	0x00040
++			  may use Kerberos security (krb5)		0x00008
++			  must use Kerberos                             0x08008
++			  may use NTLMSSP               		0x00080
++			  must use NTLMSSP           			0x80080
++			  seal (packet encryption)			0x00040
++			  must seal (not implemented yet)               0x40040
+ 
+ cifsFYI			If set to non-zero value, additional debug information
+ 			will be logged to the system error log.  This field
+diff --git a/Documentation/devicetree/bindings/cache/qcom,llcc.yaml b/Documentation/devicetree/bindings/cache/qcom,llcc.yaml
+index 07ccbda4a0ab5..b9a9f2cf32a1b 100644
+--- a/Documentation/devicetree/bindings/cache/qcom,llcc.yaml
++++ b/Documentation/devicetree/bindings/cache/qcom,llcc.yaml
+@@ -66,7 +66,6 @@ allOf:
+         compatible:
+           contains:
+             enum:
+-              - qcom,qdu1000-llcc
+               - qcom,sc7180-llcc
+               - qcom,sm6350-llcc
+     then:
+@@ -104,6 +103,7 @@ allOf:
+         compatible:
+           contains:
+             enum:
++              - qcom,qdu1000-llcc
+               - qcom,sc8180x-llcc
+               - qcom,sc8280xp-llcc
+               - qcom,x1e80100-llcc
+diff --git a/Makefile b/Makefile
+index cbe3a580ff480..5471f554f95ec 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 9
+-SUBLEVEL = 9
++SUBLEVEL = 10
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c
+index 8aa39db095d76..2c5155bd376ba 100644
+--- a/arch/arm/mach-davinci/pm.c
++++ b/arch/arm/mach-davinci/pm.c
+@@ -61,7 +61,7 @@ static void davinci_pm_suspend(void)
+ 
+ 	/* Configure sleep count in deep sleep register */
+ 	val = __raw_readl(pm_config.deepsleep_reg);
+-	val &= ~DEEPSLEEP_SLEEPCOUNT_MASK,
++	val &= ~DEEPSLEEP_SLEEPCOUNT_MASK;
+ 	val |= pm_config.sleepcount;
+ 	__raw_writel(val, pm_config.deepsleep_reg);
+ 
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h64-remix-mini-pc.dts b/arch/arm64/boot/dts/allwinner/sun50i-h64-remix-mini-pc.dts
+index b6e3c169797f0..0dba413963776 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h64-remix-mini-pc.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h64-remix-mini-pc.dts
+@@ -191,7 +191,7 @@ axp803: pmic@3a3 {
+ 		compatible = "x-powers,axp803";
+ 		reg = <0x3a3>;
+ 		interrupt-parent = <&r_intc>;
+-		interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_LOW>;
++		interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
+ 		x-powers,drive-vbus-en;
+ 
+ 		vin1-supply = <&reg_vcc5v>;
+diff --git a/arch/arm64/boot/dts/qcom/qdu1000.dtsi b/arch/arm64/boot/dts/qcom/qdu1000.dtsi
+index 832f472c4b7a5..ceed9c4e8fcd6 100644
+--- a/arch/arm64/boot/dts/qcom/qdu1000.dtsi
++++ b/arch/arm64/boot/dts/qcom/qdu1000.dtsi
+@@ -1459,9 +1459,23 @@ gem_noc: interconnect@19100000 {
+ 
+ 		system-cache-controller@19200000 {
+ 			compatible = "qcom,qdu1000-llcc";
+-			reg = <0 0x19200000 0 0xd80000>,
++			reg = <0 0x19200000 0 0x80000>,
++			      <0 0x19300000 0 0x80000>,
++			      <0 0x19600000 0 0x80000>,
++			      <0 0x19700000 0 0x80000>,
++			      <0 0x19a00000 0 0x80000>,
++			      <0 0x19b00000 0 0x80000>,
++			      <0 0x19e00000 0 0x80000>,
++			      <0 0x19f00000 0 0x80000>,
+ 			      <0 0x1a200000 0 0x80000>;
+ 			reg-names = "llcc0_base",
++				    "llcc1_base",
++				    "llcc2_base",
++				    "llcc3_base",
++				    "llcc4_base",
++				    "llcc5_base",
++				    "llcc6_base",
++				    "llcc7_base",
+ 				    "llcc_broadcast_base";
+ 			interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
+ 		};
+diff --git a/arch/arm64/boot/dts/qcom/sa8775p.dtsi b/arch/arm64/boot/dts/qcom/sa8775p.dtsi
+index 231cea1f0fa8f..beb203964315b 100644
+--- a/arch/arm64/boot/dts/qcom/sa8775p.dtsi
++++ b/arch/arm64/boot/dts/qcom/sa8775p.dtsi
+@@ -3605,7 +3605,7 @@ arch_timer: timer {
+ 		interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ 			     <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ 			     <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+-			     <GIC_PPI 12 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
++			     <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
+ 	};
+ 
+ 	pcie0: pcie@1c00000 {
+diff --git a/arch/arm64/boot/dts/qcom/sc8180x.dtsi b/arch/arm64/boot/dts/qcom/sc8180x.dtsi
+index 053f7861c3cec..b594938c757bf 100644
+--- a/arch/arm64/boot/dts/qcom/sc8180x.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc8180x.dtsi
+@@ -2608,11 +2608,14 @@ usb_sec_dpphy: dp-phy@88ef200 {
+ 
+ 		system-cache-controller@9200000 {
+ 			compatible = "qcom,sc8180x-llcc";
+-			reg = <0 0x09200000 0 0x50000>, <0 0x09280000 0 0x50000>,
+-			      <0 0x09300000 0 0x50000>, <0 0x09380000 0 0x50000>,
+-			      <0 0x09600000 0 0x50000>;
++			reg = <0 0x09200000 0 0x58000>, <0 0x09280000 0 0x58000>,
++			      <0 0x09300000 0 0x58000>, <0 0x09380000 0 0x58000>,
++			      <0 0x09400000 0 0x58000>, <0 0x09480000 0 0x58000>,
++			      <0 0x09500000 0 0x58000>, <0 0x09580000 0 0x58000>,
++			      <0 0x09600000 0 0x58000>;
+ 			reg-names = "llcc0_base", "llcc1_base", "llcc2_base",
+-				    "llcc3_base", "llcc_broadcast_base";
++				    "llcc3_base", "llcc4_base", "llcc5_base",
++				    "llcc6_base", "llcc7_base",  "llcc_broadcast_base";
+ 			interrupts = <GIC_SPI 582 IRQ_TYPE_LEVEL_HIGH>;
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
+index 15ae94c1602d5..4ddca2ff7a11f 100644
+--- a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
++++ b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
+@@ -618,15 +618,16 @@ &i2c4 {
+ 
+ 	status = "okay";
+ 
+-	/* FIXME: verify */
+ 	touchscreen@10 {
+-		compatible = "hid-over-i2c";
++		compatible = "elan,ekth5015m", "elan,ekth6915";
+ 		reg = <0x10>;
+ 
+-		hid-descr-addr = <0x1>;
+ 		interrupts-extended = <&tlmm 175 IRQ_TYPE_LEVEL_LOW>;
+-		vdd-supply = <&vreg_misc_3p3>;
+-		vddl-supply = <&vreg_s10b>;
++		reset-gpios = <&tlmm 99 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
++		no-reset-on-power-off;
++
++		vcc33-supply = <&vreg_misc_3p3>;
++		vccio-supply = <&vreg_misc_3p3>;
+ 
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&ts0_default>;
+@@ -1417,8 +1418,8 @@ int-n-pins {
+ 		reset-n-pins {
+ 			pins = "gpio99";
+ 			function = "gpio";
+-			output-high;
+-			drive-strength = <16>;
++			drive-strength = <2>;
++			bias-disable;
+ 		};
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sm6115.dtsi b/arch/arm64/boot/dts/qcom/sm6115.dtsi
+index aca0a87092e45..9ed062150aaf2 100644
+--- a/arch/arm64/boot/dts/qcom/sm6115.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6115.dtsi
+@@ -1090,6 +1090,7 @@ sdhc_1: mmc@4744000 {
+ 
+ 			power-domains = <&rpmpd SM6115_VDDCX>;
+ 			operating-points-v2 = <&sdhc1_opp_table>;
++			iommus = <&apps_smmu 0x00c0 0x0>;
+ 			interconnects = <&system_noc MASTER_SDCC_1 RPM_ALWAYS_TAG
+ 					 &bimc SLAVE_EBI_CH0 RPM_ALWAYS_TAG>,
+ 					<&bimc MASTER_AMPSS_M0 RPM_ALWAYS_TAG
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
+index 6a0a54532e5fe..c5442396dcec0 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
++++ b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
+@@ -48,6 +48,15 @@ chosen {
+ 		stdout-path = "serial0:115200n8";
+ 	};
+ 
++	reserved-memory {
++		linux,cma {
++			compatible = "shared-dma-pool";
++			size = <0x0 0x8000000>;
++			reusable;
++			linux,cma-default;
++		};
++	};
++
+ 	sound {
+ 		compatible = "qcom,x1e80100-sndcard";
+ 		model = "X1E80100-CRD";
+@@ -92,7 +101,7 @@ cpu {
+ 			};
+ 
+ 			codec {
+-				sound-dai = <&wcd938x 1>, <&swr2 0>, <&lpass_txmacro 0>;
++				sound-dai = <&wcd938x 1>, <&swr2 1>, <&lpass_txmacro 0>;
+ 			};
+ 
+ 			platform {
+@@ -727,7 +736,7 @@ &swr2 {
+ 	wcd_tx: codec@0,3 {
+ 		compatible = "sdw20217010d00";
+ 		reg = <0 3>;
+-		qcom,tx-port-mapping = <1 1 2 3>;
++		qcom,tx-port-mapping = <2 2 3 4>;
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
+index e76d29053d79b..49e19a64455b8 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
++++ b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
+@@ -22,6 +22,15 @@ chosen {
+ 		stdout-path = "serial0:115200n8";
+ 	};
+ 
++	reserved-memory {
++		linux,cma {
++			compatible = "shared-dma-pool";
++			size = <0x0 0x8000000>;
++			reusable;
++			linux,cma-default;
++		};
++	};
++
+ 	vph_pwr: vph-pwr-regulator {
+ 		compatible = "regulator-fixed";
+ 
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100.dtsi b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
+index 6b40082bac68c..ee78185ca5387 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100.dtsi
++++ b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
+@@ -2737,15 +2737,17 @@ pcie6a: pci@1bf8000 {
+ 			device_type = "pci";
+ 			compatible = "qcom,pcie-x1e80100";
+ 			reg = <0 0x01bf8000 0 0x3000>,
+-			      <0 0x70000000 0 0xf1d>,
+-			      <0 0x70000f20 0 0xa8>,
++			      <0 0x70000000 0 0xf20>,
++			      <0 0x70000f40 0 0xa8>,
+ 			      <0 0x70001000 0 0x1000>,
+-			      <0 0x70100000 0 0x100000>;
++			      <0 0x70100000 0 0x100000>,
++			      <0 0x01bfb000 0 0x1000>;
+ 			reg-names = "parf",
+ 				    "dbi",
+ 				    "elbi",
+ 				    "atu",
+-				    "config";
++				    "config",
++				    "mhi";
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 			ranges = <0x01000000 0 0x00000000 0 0x70200000 0 0x100000>,
+diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
+index abb629d7e1319..7e3e767ab87d6 100644
+--- a/arch/s390/mm/pgalloc.c
++++ b/arch/s390/mm/pgalloc.c
+@@ -55,6 +55,8 @@ unsigned long *crst_table_alloc(struct mm_struct *mm)
+ 
+ void crst_table_free(struct mm_struct *mm, unsigned long *table)
+ {
++	if (!table)
++		return;
+ 	pagetable_free(virt_to_ptdesc(table));
+ }
+ 
+@@ -262,6 +264,8 @@ static unsigned long *base_crst_alloc(unsigned long val)
+ 
+ static void base_crst_free(unsigned long *table)
+ {
++	if (!table)
++		return;
+ 	pagetable_free(virt_to_ptdesc(table));
+ }
+ 
+diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
+index c779046cc3fe7..2e8ead6090393 100644
+--- a/arch/x86/entry/entry_64_compat.S
++++ b/arch/x86/entry/entry_64_compat.S
+@@ -90,10 +90,6 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL)
+ 
+ 	cld
+ 
+-	IBRS_ENTER
+-	UNTRAIN_RET
+-	CLEAR_BRANCH_HISTORY
+-
+ 	/*
+ 	 * SYSENTER doesn't filter flags, so we need to clear NT and AC
+ 	 * ourselves.  To save a few cycles, we can check whether
+@@ -117,6 +113,16 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL)
+ 	jnz	.Lsysenter_fix_flags
+ .Lsysenter_flags_fixed:
+ 
++	/*
++	 * CPU bugs mitigations mechanisms can call other functions. They
++	 * should be invoked after making sure TF is cleared because
++	 * single-step is ignored only for instructions inside the
++	 * entry_SYSENTER_compat function.
++	 */
++	IBRS_ENTER
++	UNTRAIN_RET
++	CLEAR_BRANCH_HISTORY
++
+ 	movq	%rsp, %rdi
+ 	call	do_SYSENTER_32
+ 	jmp	sysret32_from_system_call
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index bd6a7857ce058..831fa4a121598 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -16,7 +16,6 @@
+ #include <linux/acpi.h>
+ #include <linux/dmi.h>
+ #include <linux/sched.h>       /* need_resched() */
+-#include <linux/sort.h>
+ #include <linux/tick.h>
+ #include <linux/cpuidle.h>
+ #include <linux/cpu.h>
+@@ -386,25 +385,24 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
+ 	acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
+ }
+ 
+-static int acpi_cst_latency_cmp(const void *a, const void *b)
++static void acpi_cst_latency_sort(struct acpi_processor_cx *states, size_t length)
+ {
+-	const struct acpi_processor_cx *x = a, *y = b;
++	int i, j, k;
+ 
+-	if (!(x->valid && y->valid))
+-		return 0;
+-	if (x->latency > y->latency)
+-		return 1;
+-	if (x->latency < y->latency)
+-		return -1;
+-	return 0;
+-}
+-static void acpi_cst_latency_swap(void *a, void *b, int n)
+-{
+-	struct acpi_processor_cx *x = a, *y = b;
++	for (i = 1; i < length; i++) {
++		if (!states[i].valid)
++			continue;
+ 
+-	if (!(x->valid && y->valid))
+-		return;
+-	swap(x->latency, y->latency);
++		for (j = i - 1, k = i; j >= 0; j--) {
++			if (!states[j].valid)
++				continue;
++
++			if (states[j].latency > states[k].latency)
++				swap(states[j].latency, states[k].latency);
++
++			k = j;
++		}
++	}
+ }
+ 
+ static int acpi_processor_power_verify(struct acpi_processor *pr)
+@@ -449,10 +447,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
+ 
+ 	if (buggy_latency) {
+ 		pr_notice("FW issue: working around C-state latencies out of order\n");
+-		sort(&pr->power.states[1], max_cstate,
+-		     sizeof(struct acpi_processor_cx),
+-		     acpi_cst_latency_cmp,
+-		     acpi_cst_latency_swap);
++		acpi_cst_latency_sort(&pr->power.states[1], max_cstate);
+ 	}
+ 
+ 	lapic_timer_propagate_broadcast(pr);
+diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
+index d51fc8321d411..da32e8ed08309 100644
+--- a/drivers/char/hpet.c
++++ b/drivers/char/hpet.c
+@@ -269,8 +269,13 @@ hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos)
+ 	if (!devp->hd_ireqfreq)
+ 		return -EIO;
+ 
+-	if (count < sizeof(unsigned long))
+-		return -EINVAL;
++	if (in_compat_syscall()) {
++		if (count < sizeof(compat_ulong_t))
++			return -EINVAL;
++	} else {
++		if (count < sizeof(unsigned long))
++			return -EINVAL;
++	}
+ 
+ 	add_wait_queue(&devp->hd_waitqueue, &wait);
+ 
+@@ -294,9 +299,16 @@ hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos)
+ 		schedule();
+ 	}
+ 
+-	retval = put_user(data, (unsigned long __user *)buf);
+-	if (!retval)
+-		retval = sizeof(unsigned long);
++	if (in_compat_syscall()) {
++		retval = put_user(data, (compat_ulong_t __user *)buf);
++		if (!retval)
++			retval = sizeof(compat_ulong_t);
++	} else {
++		retval = put_user(data, (unsigned long __user *)buf);
++		if (!retval)
++			retval = sizeof(unsigned long);
++	}
++
+ out:
+ 	__set_current_state(TASK_RUNNING);
+ 	remove_wait_queue(&devp->hd_waitqueue, &wait);
+@@ -651,12 +663,24 @@ struct compat_hpet_info {
+ 	unsigned short hi_timer;
+ };
+ 
++/* 32-bit types would lead to different command codes which should be
++ * translated into 64-bit ones before passed to hpet_ioctl_common
++ */
++#define COMPAT_HPET_INFO       _IOR('h', 0x03, struct compat_hpet_info)
++#define COMPAT_HPET_IRQFREQ    _IOW('h', 0x6, compat_ulong_t)
++
+ static long
+ hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ {
+ 	struct hpet_info info;
+ 	int err;
+ 
++	if (cmd == COMPAT_HPET_INFO)
++		cmd = HPET_INFO;
++
++	if (cmd == COMPAT_HPET_IRQFREQ)
++		cmd = HPET_IRQFREQ;
++
+ 	mutex_lock(&hpet_mutex);
+ 	err = hpet_ioctl_common(file->private_data, cmd, arg, &info);
+ 	mutex_unlock(&hpet_mutex);
+diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
+index 37f1cdf46d291..4ac3a35dcd983 100644
+--- a/drivers/cpufreq/acpi-cpufreq.c
++++ b/drivers/cpufreq/acpi-cpufreq.c
+@@ -890,8 +890,10 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ 	if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency)
+ 		pr_warn(FW_WARN "P-state 0 is not max freq\n");
+ 
+-	if (acpi_cpufreq_driver.set_boost)
++	if (acpi_cpufreq_driver.set_boost) {
+ 		set_boost(policy, acpi_cpufreq_driver.boost_enabled);
++		policy->boost_enabled = acpi_cpufreq_driver.boost_enabled;
++	}
+ 
+ 	return result;
+ 
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index fd9c3ed21f49c..d7630d9cdb2f4 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -1431,7 +1431,8 @@ static int cpufreq_online(unsigned int cpu)
+ 		}
+ 
+ 		/* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
+-		policy->boost_enabled = cpufreq_boost_enabled() && policy_has_boost_freq(policy);
++		if (cpufreq_boost_enabled() && policy_has_boost_freq(policy))
++			policy->boost_enabled = true;
+ 
+ 		/*
+ 		 * The initialization has succeeded and the policy is online.
+diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
+index 9f3d665cfdcf7..a1da7581adb03 100644
+--- a/drivers/firmware/cirrus/cs_dsp.c
++++ b/drivers/firmware/cirrus/cs_dsp.c
+@@ -1053,9 +1053,16 @@ struct cs_dsp_coeff_parsed_coeff {
+ 	int len;
+ };
+ 
+-static int cs_dsp_coeff_parse_string(int bytes, const u8 **pos, const u8 **str)
++static int cs_dsp_coeff_parse_string(int bytes, const u8 **pos, unsigned int avail,
++				     const u8 **str)
+ {
+-	int length;
++	int length, total_field_len;
++
++	/* String fields are at least one __le32 */
++	if (sizeof(__le32) > avail) {
++		*pos = NULL;
++		return 0;
++	}
+ 
+ 	switch (bytes) {
+ 	case 1:
+@@ -1068,10 +1075,16 @@ static int cs_dsp_coeff_parse_string(int bytes, const u8 **pos, const u8 **str)
+ 		return 0;
+ 	}
+ 
++	total_field_len = ((length + bytes) + 3) & ~0x03;
++	if ((unsigned int)total_field_len > avail) {
++		*pos = NULL;
++		return 0;
++	}
++
+ 	if (str)
+ 		*str = *pos + bytes;
+ 
+-	*pos += ((length + bytes) + 3) & ~0x03;
++	*pos += total_field_len;
+ 
+ 	return length;
+ }
+@@ -1096,71 +1109,134 @@ static int cs_dsp_coeff_parse_int(int bytes, const u8 **pos)
+ 	return val;
+ }
+ 
+-static inline void cs_dsp_coeff_parse_alg(struct cs_dsp *dsp, const u8 **data,
+-					  struct cs_dsp_coeff_parsed_alg *blk)
++static int cs_dsp_coeff_parse_alg(struct cs_dsp *dsp,
++				  const struct wmfw_region *region,
++				  struct cs_dsp_coeff_parsed_alg *blk)
+ {
+ 	const struct wmfw_adsp_alg_data *raw;
++	unsigned int data_len = le32_to_cpu(region->len);
++	unsigned int pos;
++	const u8 *tmp;
++
++	raw = (const struct wmfw_adsp_alg_data *)region->data;
+ 
+ 	switch (dsp->fw_ver) {
+ 	case 0:
+ 	case 1:
+-		raw = (const struct wmfw_adsp_alg_data *)*data;
+-		*data = raw->data;
++		if (sizeof(*raw) > data_len)
++			return -EOVERFLOW;
+ 
+ 		blk->id = le32_to_cpu(raw->id);
+ 		blk->name = raw->name;
+-		blk->name_len = strlen(raw->name);
++		blk->name_len = strnlen(raw->name, ARRAY_SIZE(raw->name));
+ 		blk->ncoeff = le32_to_cpu(raw->ncoeff);
++
++		pos = sizeof(*raw);
+ 		break;
+ 	default:
+-		blk->id = cs_dsp_coeff_parse_int(sizeof(raw->id), data);
+-		blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), data,
++		if (sizeof(raw->id) > data_len)
++			return -EOVERFLOW;
++
++		tmp = region->data;
++		blk->id = cs_dsp_coeff_parse_int(sizeof(raw->id), &tmp);
++		pos = tmp - region->data;
++
++		tmp = &region->data[pos];
++		blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), &tmp, data_len - pos,
+ 							  &blk->name);
+-		cs_dsp_coeff_parse_string(sizeof(u16), data, NULL);
+-		blk->ncoeff = cs_dsp_coeff_parse_int(sizeof(raw->ncoeff), data);
++		if (!tmp)
++			return -EOVERFLOW;
++
++		pos = tmp - region->data;
++		cs_dsp_coeff_parse_string(sizeof(u16), &tmp, data_len - pos, NULL);
++		if (!tmp)
++			return -EOVERFLOW;
++
++		pos = tmp - region->data;
++		if (sizeof(raw->ncoeff) > (data_len - pos))
++			return -EOVERFLOW;
++
++		blk->ncoeff = cs_dsp_coeff_parse_int(sizeof(raw->ncoeff), &tmp);
++		pos += sizeof(raw->ncoeff);
+ 		break;
+ 	}
+ 
++	if ((int)blk->ncoeff < 0)
++		return -EOVERFLOW;
++
+ 	cs_dsp_dbg(dsp, "Algorithm ID: %#x\n", blk->id);
+ 	cs_dsp_dbg(dsp, "Algorithm name: %.*s\n", blk->name_len, blk->name);
+ 	cs_dsp_dbg(dsp, "# of coefficient descriptors: %#x\n", blk->ncoeff);
++
++	return pos;
+ }
+ 
+-static inline void cs_dsp_coeff_parse_coeff(struct cs_dsp *dsp, const u8 **data,
+-					    struct cs_dsp_coeff_parsed_coeff *blk)
++static int cs_dsp_coeff_parse_coeff(struct cs_dsp *dsp,
++				    const struct wmfw_region *region,
++				    unsigned int pos,
++				    struct cs_dsp_coeff_parsed_coeff *blk)
+ {
+ 	const struct wmfw_adsp_coeff_data *raw;
++	unsigned int data_len = le32_to_cpu(region->len);
++	unsigned int blk_len, blk_end_pos;
+ 	const u8 *tmp;
+-	int length;
++
++	raw = (const struct wmfw_adsp_coeff_data *)&region->data[pos];
++	if (sizeof(raw->hdr) > (data_len - pos))
++		return -EOVERFLOW;
++
++	blk_len = le32_to_cpu(raw->hdr.size);
++	if (blk_len > S32_MAX)
++		return -EOVERFLOW;
++
++	if (blk_len > (data_len - pos - sizeof(raw->hdr)))
++		return -EOVERFLOW;
++
++	blk_end_pos = pos + sizeof(raw->hdr) + blk_len;
++
++	blk->offset = le16_to_cpu(raw->hdr.offset);
++	blk->mem_type = le16_to_cpu(raw->hdr.type);
+ 
+ 	switch (dsp->fw_ver) {
+ 	case 0:
+ 	case 1:
+-		raw = (const struct wmfw_adsp_coeff_data *)*data;
+-		*data = *data + sizeof(raw->hdr) + le32_to_cpu(raw->hdr.size);
++		if (sizeof(*raw) > (data_len - pos))
++			return -EOVERFLOW;
+ 
+-		blk->offset = le16_to_cpu(raw->hdr.offset);
+-		blk->mem_type = le16_to_cpu(raw->hdr.type);
+ 		blk->name = raw->name;
+-		blk->name_len = strlen(raw->name);
++		blk->name_len = strnlen(raw->name, ARRAY_SIZE(raw->name));
+ 		blk->ctl_type = le16_to_cpu(raw->ctl_type);
+ 		blk->flags = le16_to_cpu(raw->flags);
+ 		blk->len = le32_to_cpu(raw->len);
+ 		break;
+ 	default:
+-		tmp = *data;
+-		blk->offset = cs_dsp_coeff_parse_int(sizeof(raw->hdr.offset), &tmp);
+-		blk->mem_type = cs_dsp_coeff_parse_int(sizeof(raw->hdr.type), &tmp);
+-		length = cs_dsp_coeff_parse_int(sizeof(raw->hdr.size), &tmp);
+-		blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), &tmp,
++		pos += sizeof(raw->hdr);
++		tmp = &region->data[pos];
++		blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), &tmp, data_len - pos,
+ 							  &blk->name);
+-		cs_dsp_coeff_parse_string(sizeof(u8), &tmp, NULL);
+-		cs_dsp_coeff_parse_string(sizeof(u16), &tmp, NULL);
++		if (!tmp)
++			return -EOVERFLOW;
++
++		pos = tmp - region->data;
++		cs_dsp_coeff_parse_string(sizeof(u8), &tmp, data_len - pos, NULL);
++		if (!tmp)
++			return -EOVERFLOW;
++
++		pos = tmp - region->data;
++		cs_dsp_coeff_parse_string(sizeof(u16), &tmp, data_len - pos, NULL);
++		if (!tmp)
++			return -EOVERFLOW;
++
++		pos = tmp - region->data;
++		if (sizeof(raw->ctl_type) + sizeof(raw->flags) + sizeof(raw->len) >
++		    (data_len - pos))
++			return -EOVERFLOW;
++
+ 		blk->ctl_type = cs_dsp_coeff_parse_int(sizeof(raw->ctl_type), &tmp);
++		pos += sizeof(raw->ctl_type);
+ 		blk->flags = cs_dsp_coeff_parse_int(sizeof(raw->flags), &tmp);
++		pos += sizeof(raw->flags);
+ 		blk->len = cs_dsp_coeff_parse_int(sizeof(raw->len), &tmp);
+-
+-		*data = *data + sizeof(raw->hdr) + length;
+ 		break;
+ 	}
+ 
+@@ -1170,6 +1246,8 @@ static inline void cs_dsp_coeff_parse_coeff(struct cs_dsp *dsp, const u8 **data,
+ 	cs_dsp_dbg(dsp, "\tCoefficient flags: %#x\n", blk->flags);
+ 	cs_dsp_dbg(dsp, "\tALSA control type: %#x\n", blk->ctl_type);
+ 	cs_dsp_dbg(dsp, "\tALSA control len: %#x\n", blk->len);
++
++	return blk_end_pos;
+ }
+ 
+ static int cs_dsp_check_coeff_flags(struct cs_dsp *dsp,
+@@ -1193,12 +1271,16 @@ static int cs_dsp_parse_coeff(struct cs_dsp *dsp,
+ 	struct cs_dsp_alg_region alg_region = {};
+ 	struct cs_dsp_coeff_parsed_alg alg_blk;
+ 	struct cs_dsp_coeff_parsed_coeff coeff_blk;
+-	const u8 *data = region->data;
+-	int i, ret;
++	int i, pos, ret;
++
++	pos = cs_dsp_coeff_parse_alg(dsp, region, &alg_blk);
++	if (pos < 0)
++		return pos;
+ 
+-	cs_dsp_coeff_parse_alg(dsp, &data, &alg_blk);
+ 	for (i = 0; i < alg_blk.ncoeff; i++) {
+-		cs_dsp_coeff_parse_coeff(dsp, &data, &coeff_blk);
++		pos = cs_dsp_coeff_parse_coeff(dsp, region, pos, &coeff_blk);
++		if (pos < 0)
++			return pos;
+ 
+ 		switch (coeff_blk.ctl_type) {
+ 		case WMFW_CTL_TYPE_BYTES:
+@@ -1267,6 +1349,10 @@ static unsigned int cs_dsp_adsp1_parse_sizes(struct cs_dsp *dsp,
+ 	const struct wmfw_adsp1_sizes *adsp1_sizes;
+ 
+ 	adsp1_sizes = (void *)&firmware->data[pos];
++	if (sizeof(*adsp1_sizes) > firmware->size - pos) {
++		cs_dsp_err(dsp, "%s: file truncated\n", file);
++		return 0;
++	}
+ 
+ 	cs_dsp_dbg(dsp, "%s: %d DM, %d PM, %d ZM\n", file,
+ 		   le32_to_cpu(adsp1_sizes->dm), le32_to_cpu(adsp1_sizes->pm),
+@@ -1283,6 +1369,10 @@ static unsigned int cs_dsp_adsp2_parse_sizes(struct cs_dsp *dsp,
+ 	const struct wmfw_adsp2_sizes *adsp2_sizes;
+ 
+ 	adsp2_sizes = (void *)&firmware->data[pos];
++	if (sizeof(*adsp2_sizes) > firmware->size - pos) {
++		cs_dsp_err(dsp, "%s: file truncated\n", file);
++		return 0;
++	}
+ 
+ 	cs_dsp_dbg(dsp, "%s: %d XM, %d YM %d PM, %d ZM\n", file,
+ 		   le32_to_cpu(adsp2_sizes->xm), le32_to_cpu(adsp2_sizes->ym),
+@@ -1322,7 +1412,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
+ 	struct regmap *regmap = dsp->regmap;
+ 	unsigned int pos = 0;
+ 	const struct wmfw_header *header;
+-	const struct wmfw_adsp1_sizes *adsp1_sizes;
+ 	const struct wmfw_footer *footer;
+ 	const struct wmfw_region *region;
+ 	const struct cs_dsp_region *mem;
+@@ -1338,10 +1427,8 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
+ 
+ 	ret = -EINVAL;
+ 
+-	pos = sizeof(*header) + sizeof(*adsp1_sizes) + sizeof(*footer);
+-	if (pos >= firmware->size) {
+-		cs_dsp_err(dsp, "%s: file too short, %zu bytes\n",
+-			   file, firmware->size);
++	if (sizeof(*header) >= firmware->size) {
++		ret = -EOVERFLOW;
+ 		goto out_fw;
+ 	}
+ 
+@@ -1369,22 +1456,36 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
+ 
+ 	pos = sizeof(*header);
+ 	pos = dsp->ops->parse_sizes(dsp, file, pos, firmware);
++	if ((pos == 0) || (sizeof(*footer) > firmware->size - pos)) {
++		ret = -EOVERFLOW;
++		goto out_fw;
++	}
+ 
+ 	footer = (void *)&firmware->data[pos];
+ 	pos += sizeof(*footer);
+ 
+ 	if (le32_to_cpu(header->len) != pos) {
+-		cs_dsp_err(dsp, "%s: unexpected header length %d\n",
+-			   file, le32_to_cpu(header->len));
++		ret = -EOVERFLOW;
+ 		goto out_fw;
+ 	}
+ 
+ 	cs_dsp_dbg(dsp, "%s: timestamp %llu\n", file,
+ 		   le64_to_cpu(footer->timestamp));
+ 
+-	while (pos < firmware->size &&
+-	       sizeof(*region) < firmware->size - pos) {
++	while (pos < firmware->size) {
++		/* Is there enough data for a complete block header? */
++		if (sizeof(*region) > firmware->size - pos) {
++			ret = -EOVERFLOW;
++			goto out_fw;
++		}
++
+ 		region = (void *)&(firmware->data[pos]);
++
++		if (le32_to_cpu(region->len) > firmware->size - pos - sizeof(*region)) {
++			ret = -EOVERFLOW;
++			goto out_fw;
++		}
++
+ 		region_name = "Unknown";
+ 		reg = 0;
+ 		text = NULL;
+@@ -1441,16 +1542,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
+ 			   regions, le32_to_cpu(region->len), offset,
+ 			   region_name);
+ 
+-		if (le32_to_cpu(region->len) >
+-		    firmware->size - pos - sizeof(*region)) {
+-			cs_dsp_err(dsp,
+-				   "%s.%d: %s region len %d bytes exceeds file length %zu\n",
+-				   file, regions, region_name,
+-				   le32_to_cpu(region->len), firmware->size);
+-			ret = -EINVAL;
+-			goto out_fw;
+-		}
+-
+ 		if (text) {
+ 			memcpy(text, region->data, le32_to_cpu(region->len));
+ 			cs_dsp_info(dsp, "%s: %s\n", file, text);
+@@ -1501,6 +1592,9 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
+ 	cs_dsp_buf_free(&buf_list);
+ 	kfree(text);
+ 
++	if (ret == -EOVERFLOW)
++		cs_dsp_err(dsp, "%s: file content overflows file data\n", file);
++
+ 	return ret;
+ }
+ 
+@@ -2068,10 +2162,20 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
+ 	pos = le32_to_cpu(hdr->len);
+ 
+ 	blocks = 0;
+-	while (pos < firmware->size &&
+-	       sizeof(*blk) < firmware->size - pos) {
++	while (pos < firmware->size) {
++		/* Is there enough data for a complete block header? */
++		if (sizeof(*blk) > firmware->size - pos) {
++			ret = -EOVERFLOW;
++			goto out_fw;
++		}
++
+ 		blk = (void *)(&firmware->data[pos]);
+ 
++		if (le32_to_cpu(blk->len) > firmware->size - pos - sizeof(*blk)) {
++			ret = -EOVERFLOW;
++			goto out_fw;
++		}
++
+ 		type = le16_to_cpu(blk->type);
+ 		offset = le16_to_cpu(blk->offset);
+ 		version = le32_to_cpu(blk->ver) >> 8;
+@@ -2168,17 +2272,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
+ 		}
+ 
+ 		if (reg) {
+-			if (le32_to_cpu(blk->len) >
+-			    firmware->size - pos - sizeof(*blk)) {
+-				cs_dsp_err(dsp,
+-					   "%s.%d: %s region len %d bytes exceeds file length %zu\n",
+-					   file, blocks, region_name,
+-					   le32_to_cpu(blk->len),
+-					   firmware->size);
+-				ret = -EINVAL;
+-				goto out_fw;
+-			}
+-
+ 			buf = cs_dsp_buf_alloc(blk->data,
+ 					       le32_to_cpu(blk->len),
+ 					       &buf_list);
+@@ -2218,6 +2311,10 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
+ 	regmap_async_complete(regmap);
+ 	cs_dsp_buf_free(&buf_list);
+ 	kfree(text);
++
++	if (ret == -EOVERFLOW)
++		cs_dsp_err(dsp, "%s: file content overflows file data\n", file);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
+index 828aa2ea0fe4c..185a5d60f1019 100644
+--- a/drivers/i2c/busses/i2c-rcar.c
++++ b/drivers/i2c/busses/i2c-rcar.c
+@@ -257,6 +257,14 @@ static void rcar_i2c_init(struct rcar_i2c_priv *priv)
+ 	}
+ }
+ 
++static void rcar_i2c_reset_slave(struct rcar_i2c_priv *priv)
++{
++	rcar_i2c_write(priv, ICSIER, 0);
++	rcar_i2c_write(priv, ICSSR, 0);
++	rcar_i2c_write(priv, ICSCR, SDBS);
++	rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
++}
++
+ static int rcar_i2c_bus_barrier(struct rcar_i2c_priv *priv)
+ {
+ 	int ret;
+@@ -875,6 +883,10 @@ static int rcar_i2c_do_reset(struct rcar_i2c_priv *priv)
+ {
+ 	int ret;
+ 
++	/* Don't reset if a slave instance is currently running */
++	if (priv->slave)
++		return -EISCONN;
++
+ 	ret = reset_control_reset(priv->rstc);
+ 	if (ret)
+ 		return ret;
+@@ -903,10 +915,10 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
+ 
+ 	/* Gen3+ needs a reset. That also allows RXDMA once */
+ 	if (priv->devtype >= I2C_RCAR_GEN3) {
+-		priv->flags &= ~ID_P_NO_RXDMA;
+ 		ret = rcar_i2c_do_reset(priv);
+ 		if (ret)
+ 			goto out;
++		priv->flags &= ~ID_P_NO_RXDMA;
+ 	}
+ 
+ 	rcar_i2c_init(priv);
+@@ -1033,11 +1045,8 @@ static int rcar_unreg_slave(struct i2c_client *slave)
+ 
+ 	/* ensure no irq is running before clearing ptr */
+ 	disable_irq(priv->irq);
+-	rcar_i2c_write(priv, ICSIER, 0);
+-	rcar_i2c_write(priv, ICSSR, 0);
++	rcar_i2c_reset_slave(priv);
+ 	enable_irq(priv->irq);
+-	rcar_i2c_write(priv, ICSCR, SDBS);
+-	rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
+ 
+ 	priv->slave = NULL;
+ 
+@@ -1152,7 +1161,9 @@ static int rcar_i2c_probe(struct platform_device *pdev)
+ 		goto out_pm_disable;
+ 	}
+ 
+-	rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
++	/* Bring hardware to known state */
++	rcar_i2c_init(priv);
++	rcar_i2c_reset_slave(priv);
+ 
+ 	if (priv->devtype < I2C_RCAR_GEN3) {
+ 		irqflags |= IRQF_NO_THREAD;
+@@ -1168,6 +1179,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
+ 	if (of_property_read_bool(dev->of_node, "smbus"))
+ 		priv->flags |= ID_P_HOST_NOTIFY;
+ 
++	/* R-Car Gen3+ needs a reset before every transfer */
+ 	if (priv->devtype >= I2C_RCAR_GEN3) {
+ 		priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ 		if (IS_ERR(priv->rstc)) {
+@@ -1178,6 +1190,9 @@ static int rcar_i2c_probe(struct platform_device *pdev)
+ 		ret = reset_control_status(priv->rstc);
+ 		if (ret < 0)
+ 			goto out_pm_put;
++
++		/* hard reset disturbs HostNotify local target, so disable it */
++		priv->flags &= ~ID_P_HOST_NOTIFY;
+ 	}
+ 
+ 	ret = platform_get_irq(pdev, 0);
+diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
+index db0d1ac82910e..7e7b15440832b 100644
+--- a/drivers/i2c/i2c-core-base.c
++++ b/drivers/i2c/i2c-core-base.c
+@@ -1067,6 +1067,7 @@ EXPORT_SYMBOL(i2c_find_device_by_fwnode);
+ 
+ static const struct i2c_device_id dummy_id[] = {
+ 	{ "dummy", 0 },
++	{ "smbus_host_notify", 0 },
+ 	{ },
+ };
+ 
+diff --git a/drivers/i2c/i2c-slave-testunit.c b/drivers/i2c/i2c-slave-testunit.c
+index ca43e98cae1b2..23a11e4e92567 100644
+--- a/drivers/i2c/i2c-slave-testunit.c
++++ b/drivers/i2c/i2c-slave-testunit.c
+@@ -118,6 +118,13 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
+ 			queue_delayed_work(system_long_wq, &tu->worker,
+ 					   msecs_to_jiffies(10 * tu->regs[TU_REG_DELAY]));
+ 		}
++
++		/*
++		 * Reset reg_idx to avoid that work gets queued again in case of
++		 * STOP after a following read message. But do not clear TU regs
++		 * here because we still need them in the workqueue!
++		 */
++		tu->reg_idx = 0;
+ 		break;
+ 
+ 	case I2C_SLAVE_WRITE_REQUESTED:
+diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
+index 18f83158f637f..b5fed8a000ea9 100644
+--- a/drivers/iio/industrialio-trigger.c
++++ b/drivers/iio/industrialio-trigger.c
+@@ -322,7 +322,7 @@ int iio_trigger_attach_poll_func(struct iio_trigger *trig,
+ 	 * this is the case if the IIO device and the trigger device share the
+ 	 * same parent device.
+ 	 */
+-	if (iio_validate_own_trigger(pf->indio_dev, trig))
++	if (!iio_validate_own_trigger(pf->indio_dev, trig))
+ 		trig->attached_own_device = true;
+ 
+ 	return ret;
+diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
+index 4c67e2c5a82e1..a7a2bcedb37e4 100644
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -1238,6 +1238,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
+ 	struct fastrpc_phy_page pages[1];
+ 	char *name;
+ 	int err;
++	bool scm_done = false;
+ 	struct {
+ 		int pgid;
+ 		u32 namelen;
+@@ -1289,6 +1290,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
+ 					fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err);
+ 				goto err_map;
+ 			}
++			scm_done = true;
+ 		}
+ 	}
+ 
+@@ -1320,10 +1322,11 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
+ 		goto err_invoke;
+ 
+ 	kfree(args);
++	kfree(name);
+ 
+ 	return 0;
+ err_invoke:
+-	if (fl->cctx->vmcount) {
++	if (fl->cctx->vmcount && scm_done) {
+ 		u64 src_perms = 0;
+ 		struct qcom_scm_vmperm dst_perms;
+ 		u32 i;
+@@ -1693,16 +1696,20 @@ static int fastrpc_get_info_from_dsp(struct fastrpc_user *fl, uint32_t *dsp_attr
+ {
+ 	struct fastrpc_invoke_args args[2] = { 0 };
+ 
+-	/* Capability filled in userspace */
++	/*
++	 * Capability filled in userspace. This carries the information
++	 * about the remoteproc support which is fetched from the remoteproc
++	 * sysfs node by userspace.
++	 */
+ 	dsp_attr_buf[0] = 0;
++	dsp_attr_buf_len -= 1;
+ 
+ 	args[0].ptr = (u64)(uintptr_t)&dsp_attr_buf_len;
+ 	args[0].length = sizeof(dsp_attr_buf_len);
+ 	args[0].fd = -1;
+ 	args[1].ptr = (u64)(uintptr_t)&dsp_attr_buf[1];
+-	args[1].length = dsp_attr_buf_len;
++	args[1].length = dsp_attr_buf_len * sizeof(u32);
+ 	args[1].fd = -1;
+-	fl->pd = USER_PD;
+ 
+ 	return fastrpc_internal_invoke(fl, true, FASTRPC_DSP_UTILITIES_HANDLE,
+ 				       FASTRPC_SCALARS(0, 1, 1), args);
+@@ -1730,7 +1737,7 @@ static int fastrpc_get_info_from_kernel(struct fastrpc_ioctl_capability *cap,
+ 	if (!dsp_attributes)
+ 		return -ENOMEM;
+ 
+-	err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN);
++	err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES);
+ 	if (err == DSP_UNSUPPORTED_API) {
+ 		dev_info(&cctx->rpdev->dev,
+ 			 "Warning: DSP capabilities not supported on domain: %d\n", domain);
+@@ -1783,7 +1790,7 @@ static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp)
+ 	if (err)
+ 		return err;
+ 
+-	if (copy_to_user(argp, &cap.capability, sizeof(cap.capability)))
++	if (copy_to_user(argp, &cap, sizeof(cap)))
+ 		return -EFAULT;
+ 
+ 	return 0;
+@@ -2080,6 +2087,16 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
+ 	return err;
+ }
+ 
++static int is_attach_rejected(struct fastrpc_user *fl)
++{
++	/* Check if the device node is non-secure */
++	if (!fl->is_secure_dev) {
++		dev_dbg(&fl->cctx->rpdev->dev, "untrusted app trying to attach to privileged DSP PD\n");
++		return -EACCES;
++	}
++	return 0;
++}
++
+ static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
+ 				 unsigned long arg)
+ {
+@@ -2092,13 +2109,19 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
+ 		err = fastrpc_invoke(fl, argp);
+ 		break;
+ 	case FASTRPC_IOCTL_INIT_ATTACH:
+-		err = fastrpc_init_attach(fl, ROOT_PD);
++		err = is_attach_rejected(fl);
++		if (!err)
++			err = fastrpc_init_attach(fl, ROOT_PD);
+ 		break;
+ 	case FASTRPC_IOCTL_INIT_ATTACH_SNS:
+-		err = fastrpc_init_attach(fl, SENSORS_PD);
++		err = is_attach_rejected(fl);
++		if (!err)
++			err = fastrpc_init_attach(fl, SENSORS_PD);
+ 		break;
+ 	case FASTRPC_IOCTL_INIT_CREATE_STATIC:
+-		err = fastrpc_init_create_static_process(fl, argp);
++		err = is_attach_rejected(fl);
++		if (!err)
++			err = fastrpc_init_create_static_process(fl, argp);
+ 		break;
+ 	case FASTRPC_IOCTL_INIT_CREATE:
+ 		err = fastrpc_init_create_process(fl, argp);
+diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
+index 16695cb5e69c7..7c3d8bedf90ba 100644
+--- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
++++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
+@@ -153,7 +153,6 @@ static int pci1xxxx_eeprom_read(void *priv_t, unsigned int off,
+ 
+ 		buf[byte] = readl(rb + MMAP_EEPROM_OFFSET(EEPROM_DATA_REG));
+ 	}
+-	ret = byte;
+ error:
+ 	release_sys_lock(priv);
+ 	return ret;
+@@ -197,7 +196,6 @@ static int pci1xxxx_eeprom_write(void *priv_t, unsigned int off,
+ 			goto error;
+ 		}
+ 	}
+-	ret = byte;
+ error:
+ 	release_sys_lock(priv);
+ 	return ret;
+@@ -258,7 +256,6 @@ static int pci1xxxx_otp_read(void *priv_t, unsigned int off,
+ 
+ 		buf[byte] = readl(rb + MMAP_OTP_OFFSET(OTP_RD_DATA_OFFSET));
+ 	}
+-	ret = byte;
+ error:
+ 	release_sys_lock(priv);
+ 	return ret;
+@@ -315,7 +312,6 @@ static int pci1xxxx_otp_write(void *priv_t, unsigned int off,
+ 			goto error;
+ 		}
+ 	}
+-	ret = byte;
+ error:
+ 	release_sys_lock(priv);
+ 	return ret;
+diff --git a/drivers/misc/mei/platform-vsc.c b/drivers/misc/mei/platform-vsc.c
+index 1ec65d87488a3..d02f6e881139f 100644
+--- a/drivers/misc/mei/platform-vsc.c
++++ b/drivers/misc/mei/platform-vsc.c
+@@ -28,8 +28,8 @@
+ 
+ #define MEI_VSC_MAX_MSG_SIZE		512
+ 
+-#define MEI_VSC_POLL_DELAY_US		(50 * USEC_PER_MSEC)
+-#define MEI_VSC_POLL_TIMEOUT_US		(200 * USEC_PER_MSEC)
++#define MEI_VSC_POLL_DELAY_US		(100 * USEC_PER_MSEC)
++#define MEI_VSC_POLL_TIMEOUT_US		(400 * USEC_PER_MSEC)
+ 
+ #define mei_dev_to_vsc_hw(dev)		((struct mei_vsc_hw *)((dev)->hw))
+ 
+diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c
+index e6a98dba8a735..876330474444b 100644
+--- a/drivers/misc/mei/vsc-tp.c
++++ b/drivers/misc/mei/vsc-tp.c
+@@ -336,7 +336,7 @@ int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len)
+ 		return ret;
+ 
+ 	if (ibuf)
+-		cpu_to_be32_array(ibuf, tp->rx_buf, words);
++		be32_to_cpu_array(ibuf, tp->rx_buf, words);
+ 
+ 	return ret;
+ }
+@@ -568,6 +568,19 @@ static void vsc_tp_remove(struct spi_device *spi)
+ 	free_irq(spi->irq, tp);
+ }
+ 
++static void vsc_tp_shutdown(struct spi_device *spi)
++{
++	struct vsc_tp *tp = spi_get_drvdata(spi);
++
++	platform_device_unregister(tp->pdev);
++
++	mutex_destroy(&tp->mutex);
++
++	vsc_tp_reset(tp);
++
++	free_irq(spi->irq, tp);
++}
++
+ static const struct acpi_device_id vsc_tp_acpi_ids[] = {
+ 	{ "INTC1009" }, /* Raptor Lake */
+ 	{ "INTC1058" }, /* Tiger Lake */
+@@ -580,6 +593,7 @@ MODULE_DEVICE_TABLE(acpi, vsc_tp_acpi_ids);
+ static struct spi_driver vsc_tp_driver = {
+ 	.probe = vsc_tp_probe,
+ 	.remove = vsc_tp_remove,
++	.shutdown = vsc_tp_shutdown,
+ 	.driver = {
+ 		.name = "vsc-tp",
+ 		.acpi_match_table = vsc_tp_acpi_ids,
+diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
+index d7427894e0bc9..c302eb380e427 100644
+--- a/drivers/mmc/host/davinci_mmc.c
++++ b/drivers/mmc/host/davinci_mmc.c
+@@ -224,6 +224,9 @@ static void davinci_fifo_data_trans(struct mmc_davinci_host *host,
+ 	}
+ 	p = sgm->addr;
+ 
++	if (n > sgm->length)
++		n = sgm->length;
++
+ 	/* NOTE:  we never transfer more than rw_threshold bytes
+ 	 * to/from the fifo here; there's no I/O overlap.
+ 	 * This also assumes that access width( i.e. ACCWD) is 4 bytes
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 112584aa07723..fbf7a91bed356 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -4727,6 +4727,21 @@ int sdhci_setup_host(struct sdhci_host *host)
+ 		if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) {
+ 			host->max_adma = 65532; /* 32-bit alignment */
+ 			mmc->max_seg_size = 65535;
++			/*
++			 * sdhci_adma_table_pre() expects to define 1 DMA
++			 * descriptor per segment, so the maximum segment size
++			 * is set accordingly. SDHCI allows up to 64KiB per DMA
++			 * descriptor (16-bit field), but some controllers do
++			 * not support "zero means 65536" reducing the maximum
++			 * for them to 65535. That is a problem if PAGE_SIZE is
++			 * 64KiB because the block layer does not support
++			 * max_seg_size < PAGE_SIZE, however
++			 * sdhci_adma_table_pre() has a workaround to handle
++			 * that case, and split the descriptor. Refer also
++			 * comment in sdhci_adma_table_pre().
++			 */
++			if (mmc->max_seg_size < PAGE_SIZE)
++				mmc->max_seg_size = PAGE_SIZE;
+ 		} else {
+ 			mmc->max_seg_size = 65536;
+ 		}
+diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
+index fcb20eac332a6..b47841e67e988 100644
+--- a/drivers/net/dsa/lan9303-core.c
++++ b/drivers/net/dsa/lan9303-core.c
+@@ -1048,31 +1048,31 @@ static int lan9303_get_sset_count(struct dsa_switch *ds, int port, int sset)
+ 	return ARRAY_SIZE(lan9303_mib);
+ }
+ 
+-static int lan9303_phy_read(struct dsa_switch *ds, int phy, int regnum)
++static int lan9303_phy_read(struct dsa_switch *ds, int port, int regnum)
+ {
+ 	struct lan9303 *chip = ds->priv;
+ 	int phy_base = chip->phy_addr_base;
+ 
+-	if (phy == phy_base)
++	if (port == 0)
+ 		return lan9303_virt_phy_reg_read(chip, regnum);
+-	if (phy > phy_base + 2)
++	if (port > 2)
+ 		return -ENODEV;
+ 
+-	return chip->ops->phy_read(chip, phy, regnum);
++	return chip->ops->phy_read(chip, phy_base + port, regnum);
+ }
+ 
+-static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum,
++static int lan9303_phy_write(struct dsa_switch *ds, int port, int regnum,
+ 			     u16 val)
+ {
+ 	struct lan9303 *chip = ds->priv;
+ 	int phy_base = chip->phy_addr_base;
+ 
+-	if (phy == phy_base)
++	if (port == 0)
+ 		return lan9303_virt_phy_reg_write(chip, regnum, val);
+-	if (phy > phy_base + 2)
++	if (port > 2)
+ 		return -ENODEV;
+ 
+-	return chip->ops->phy_write(chip, phy, regnum, val);
++	return chip->ops->phy_write(chip, phy_base + port, regnum, val);
+ }
+ 
+ static int lan9303_port_enable(struct dsa_switch *ds, int port,
+@@ -1100,7 +1100,7 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port)
+ 	vlan_vid_del(dsa_port_to_conduit(dp), htons(ETH_P_8021Q), port);
+ 
+ 	lan9303_disable_processing_port(chip, port);
+-	lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
++	lan9303_phy_write(ds, port, MII_BMCR, BMCR_PDOWN);
+ }
+ 
+ static int lan9303_port_bridge_join(struct dsa_switch *ds, int port,
+@@ -1355,8 +1355,6 @@ static const struct dsa_switch_ops lan9303_switch_ops = {
+ 
+ static int lan9303_register_switch(struct lan9303 *chip)
+ {
+-	int base;
+-
+ 	chip->ds = devm_kzalloc(chip->dev, sizeof(*chip->ds), GFP_KERNEL);
+ 	if (!chip->ds)
+ 		return -ENOMEM;
+@@ -1365,8 +1363,7 @@ static int lan9303_register_switch(struct lan9303 *chip)
+ 	chip->ds->num_ports = LAN9303_NUM_PORTS;
+ 	chip->ds->priv = chip;
+ 	chip->ds->ops = &lan9303_switch_ops;
+-	base = chip->phy_addr_base;
+-	chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1 + base, base);
++	chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1, 0);
+ 
+ 	return dsa_register_switch(chip->ds);
+ }
+diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+index a806dadc41965..20c6529ec1350 100644
+--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c
++++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+@@ -1380,6 +1380,7 @@ static int bcmasp_probe(struct platform_device *pdev)
+ 			dev_err(dev, "Cannot create eth interface %d\n", i);
+ 			bcmasp_remove_intfs(priv);
+ 			of_node_put(intf_node);
++			ret = -ENOMEM;
+ 			goto of_put_exit;
+ 		}
+ 		list_add_tail(&intf->list, &priv->intfs);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+index ee86d2c53079e..55b5bb884d736 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+@@ -109,10 +109,6 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
+ 		-EFBIG,      /* I40E_AQ_RC_EFBIG */
+ 	};
+ 
+-	/* aq_rc is invalid if AQ timed out */
+-	if (aq_ret == -EIO)
+-		return -EAGAIN;
+-
+ 	if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
+ 		return -ERANGE;
+ 
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index ffb9f9f15c523..3a2d4d0697955 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -13264,6 +13264,10 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
+ 	bool need_reset;
+ 	int i;
+ 
++	/* VSI shall be deleted in a moment, block loading new programs */
++	if (prog && test_bit(__I40E_IN_REMOVE, pf->state))
++		return -EINVAL;
++
+ 	/* Don't allow frames that span over multiple buffers */
+ 	if (vsi->netdev->mtu > frame_size - I40E_PACKET_HDR_PAD) {
+ 		NL_SET_ERR_MSG_MOD(extack, "MTU too large for linear frames and XDP prog does not support frags");
+@@ -13272,14 +13276,9 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
+ 
+ 	/* When turning XDP on->off/off->on we reset and rebuild the rings. */
+ 	need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
+-
+ 	if (need_reset)
+ 		i40e_prep_for_reset(pf);
+ 
+-	/* VSI shall be deleted in a moment, just return EINVAL */
+-	if (test_bit(__I40E_IN_REMOVE, pf->state))
+-		return -EINVAL;
+-
+ 	old_prog = xchg(&vsi->xdp_prog, prog);
+ 
+ 	if (need_reset) {
+diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
+index 1d5b7bb6380f9..8a810e69cb338 100644
+--- a/drivers/net/ethernet/lantiq_etop.c
++++ b/drivers/net/ethernet/lantiq_etop.c
+@@ -217,9 +217,9 @@ ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch)
+ 	if (ch->dma.irq)
+ 		free_irq(ch->dma.irq, priv);
+ 	if (IS_RX(ch->idx)) {
+-		int desc;
++		struct ltq_dma_channel *dma = &ch->dma;
+ 
+-		for (desc = 0; desc < LTQ_DESC_NUM; desc++)
++		for (dma->desc = 0; dma->desc < LTQ_DESC_NUM; dma->desc++)
+ 			dev_kfree_skb_any(ch->skb[ch->dma.desc]);
+ 	}
+ }
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+index eb2a20b5a0d0c..f92dfc65a0ffc 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+@@ -1746,7 +1746,7 @@ struct cpt_lf_alloc_req_msg {
+ 	u16 nix_pf_func;
+ 	u16 sso_pf_func;
+ 	u16 eng_grpmsk;
+-	int blkaddr;
++	u8 blkaddr;
+ 	u8 ctx_ilen_valid : 1;
+ 	u8 ctx_ilen : 7;
+ };
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+index d883157393ea0..6c3aca6f278db 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+@@ -63,8 +63,13 @@ enum npc_kpu_lb_ltype {
+ 	NPC_LT_LB_CUSTOM1 = 0xF,
+ };
+ 
++/* Don't modify ltypes up to IP6_EXT, otherwise length and checksum of IP
++ * headers may not be checked correctly. IPv4 ltypes and IPv6 ltypes must
++ * differ only at bit 0 so mask 0xE can be used to detect extended headers.
++ */
+ enum npc_kpu_lc_ltype {
+-	NPC_LT_LC_IP = 1,
++	NPC_LT_LC_PTP = 1,
++	NPC_LT_LC_IP,
+ 	NPC_LT_LC_IP_OPT,
+ 	NPC_LT_LC_IP6,
+ 	NPC_LT_LC_IP6_EXT,
+@@ -72,7 +77,6 @@ enum npc_kpu_lc_ltype {
+ 	NPC_LT_LC_RARP,
+ 	NPC_LT_LC_MPLS,
+ 	NPC_LT_LC_NSH,
+-	NPC_LT_LC_PTP,
+ 	NPC_LT_LC_FCOE,
+ 	NPC_LT_LC_NGIO,
+ 	NPC_LT_LC_CUSTOM0 = 0xE,
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+index ff78251f92d44..5f661e67ccbcf 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+@@ -1643,7 +1643,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
+ 		if (req->ssow > block->lf.max) {
+ 			dev_err(&rvu->pdev->dev,
+ 				"Func 0x%x: Invalid SSOW req, %d > max %d\n",
+-				 pcifunc, req->sso, block->lf.max);
++				 pcifunc, req->ssow, block->lf.max);
+ 			return -EINVAL;
+ 		}
+ 		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+index f047185f38e0f..3e09d22858147 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+@@ -696,7 +696,8 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
+ 					struct cpt_rd_wr_reg_msg *req,
+ 					struct cpt_rd_wr_reg_msg *rsp)
+ {
+-	int blkaddr;
++	u64 offset = req->reg_offset;
++	int blkaddr, lf;
+ 
+ 	blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ 	if (blkaddr < 0)
+@@ -707,17 +708,25 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
+ 	    !is_cpt_vf(rvu, req->hdr.pcifunc))
+ 		return CPT_AF_ERR_ACCESS_DENIED;
+ 
+-	rsp->reg_offset = req->reg_offset;
+-	rsp->ret_val = req->ret_val;
+-	rsp->is_write = req->is_write;
+-
+ 	if (!is_valid_offset(rvu, req))
+ 		return CPT_AF_ERR_ACCESS_DENIED;
+ 
++	/* Translate local LF used by VFs to global CPT LF */
++	lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], req->hdr.pcifunc,
++			(offset & 0xFFF) >> 3);
++
++	/* Translate local LF's offset to global CPT LF's offset */
++	offset &= 0xFF000;
++	offset += lf << 3;
++
++	rsp->reg_offset = offset;
++	rsp->ret_val = req->ret_val;
++	rsp->is_write = req->is_write;
++
+ 	if (req->is_write)
+-		rvu_write64(rvu, blkaddr, req->reg_offset, req->val);
++		rvu_write64(rvu, blkaddr, offset, req->val);
+ 	else
+-		rsp->val = rvu_read64(rvu, blkaddr, req->reg_offset);
++		rsp->val = rvu_read64(rvu, blkaddr, offset);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index 00af8888e3291..3dc828cf6c5a6 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -3864,6 +3864,11 @@ static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
+ 	return -ERANGE;
+ }
+ 
++/* Mask to match ipv6(NPC_LT_LC_IP6) and ipv6 ext(NPC_LT_LC_IP6_EXT) */
++#define NPC_LT_LC_IP6_MATCH_MSK ((~(NPC_LT_LC_IP6 ^ NPC_LT_LC_IP6_EXT)) & 0xf)
++/* Mask to match both ipv4(NPC_LT_LC_IP) and ipv4 ext(NPC_LT_LC_IP_OPT) */
++#define NPC_LT_LC_IP_MATCH_MSK  ((~(NPC_LT_LC_IP ^ NPC_LT_LC_IP_OPT)) & 0xf)
++
+ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
+ {
+ 	int idx, nr_field, key_off, field_marker, keyoff_marker;
+@@ -3933,7 +3938,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
+ 			field->hdr_offset = 9; /* offset */
+ 			field->bytesm1 = 0; /* 1 byte */
+ 			field->ltype_match = NPC_LT_LC_IP;
+-			field->ltype_mask = 0xF;
++			field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK;
+ 			break;
+ 		case NIX_FLOW_KEY_TYPE_IPV4:
+ 		case NIX_FLOW_KEY_TYPE_INNR_IPV4:
+@@ -3960,8 +3965,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
+ 					field->bytesm1 = 3; /* DIP, 4 bytes */
+ 				}
+ 			}
+-
+-			field->ltype_mask = 0xF; /* Match only IPv4 */
++			field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK;
+ 			keyoff_marker = false;
+ 			break;
+ 		case NIX_FLOW_KEY_TYPE_IPV6:
+@@ -3990,7 +3994,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
+ 					field->bytesm1 = 15; /* DIP,16 bytes */
+ 				}
+ 			}
+-			field->ltype_mask = 0xF; /* Match only IPv6 */
++			field->ltype_mask = NPC_LT_LC_IP6_MATCH_MSK;
+ 			break;
+ 		case NIX_FLOW_KEY_TYPE_TCP:
+ 		case NIX_FLOW_KEY_TYPE_UDP:
+diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
+index 31aebeb2e2858..25989c79c92e6 100644
+--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
++++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
+@@ -1524,6 +1524,7 @@ static int mtk_star_probe(struct platform_device *pdev)
+ {
+ 	struct device_node *of_node;
+ 	struct mtk_star_priv *priv;
++	struct phy_device *phydev;
+ 	struct net_device *ndev;
+ 	struct device *dev;
+ 	void __iomem *base;
+@@ -1649,6 +1650,12 @@ static int mtk_star_probe(struct platform_device *pdev)
+ 	netif_napi_add(ndev, &priv->rx_napi, mtk_star_rx_poll);
+ 	netif_napi_add_tx(ndev, &priv->tx_napi, mtk_star_tx_poll);
+ 
++	phydev = of_phy_find_device(priv->phy_node);
++	if (phydev) {
++		phydev->mac_managed_pm = true;
++		put_device(&phydev->mdio.dev);
++	}
++
+ 	return devm_register_netdev(dev, ndev);
+ }
+ 
+diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
+index 6453c92f0fa7c..7fa1820db9cce 100644
+--- a/drivers/net/ethernet/micrel/ks8851_common.c
++++ b/drivers/net/ethernet/micrel/ks8851_common.c
+@@ -352,11 +352,11 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
+ 		netif_dbg(ks, intr, ks->netdev,
+ 			  "%s: txspace %d\n", __func__, tx_space);
+ 
+-		spin_lock(&ks->statelock);
++		spin_lock_bh(&ks->statelock);
+ 		ks->tx_space = tx_space;
+ 		if (netif_queue_stopped(ks->netdev))
+ 			netif_wake_queue(ks->netdev);
+-		spin_unlock(&ks->statelock);
++		spin_unlock_bh(&ks->statelock);
+ 	}
+ 
+ 	if (status & IRQ_SPIBEI) {
+@@ -482,6 +482,7 @@ static int ks8851_net_open(struct net_device *dev)
+ 	ks8851_wrreg16(ks, KS_IER, ks->rc_ier);
+ 
+ 	ks->queued_len = 0;
++	ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR);
+ 	netif_start_queue(ks->netdev);
+ 
+ 	netif_dbg(ks, ifup, ks->netdev, "network device up\n");
+@@ -635,14 +636,14 @@ static void ks8851_set_rx_mode(struct net_device *dev)
+ 
+ 	/* schedule work to do the actual set of the data if needed */
+ 
+-	spin_lock(&ks->statelock);
++	spin_lock_bh(&ks->statelock);
+ 
+ 	if (memcmp(&rxctrl, &ks->rxctrl, sizeof(rxctrl)) != 0) {
+ 		memcpy(&ks->rxctrl, &rxctrl, sizeof(ks->rxctrl));
+ 		schedule_work(&ks->rxctrl_work);
+ 	}
+ 
+-	spin_unlock(&ks->statelock);
++	spin_unlock_bh(&ks->statelock);
+ }
+ 
+ static int ks8851_set_mac_address(struct net_device *dev, void *addr)
+@@ -1101,7 +1102,6 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
+ 	int ret;
+ 
+ 	ks->netdev = netdev;
+-	ks->tx_space = 6144;
+ 
+ 	ks->gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ 	ret = PTR_ERR_OR_ZERO(ks->gpio);
+diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
+index 670c1de966db8..3062cc0f91992 100644
+--- a/drivers/net/ethernet/micrel/ks8851_spi.c
++++ b/drivers/net/ethernet/micrel/ks8851_spi.c
+@@ -340,10 +340,10 @@ static void ks8851_tx_work(struct work_struct *work)
+ 
+ 	tx_space = ks8851_rdreg16_spi(ks, KS_TXMIR);
+ 
+-	spin_lock(&ks->statelock);
++	spin_lock_bh(&ks->statelock);
+ 	ks->queued_len -= dequeued_len;
+ 	ks->tx_space = tx_space;
+-	spin_unlock(&ks->statelock);
++	spin_unlock_bh(&ks->statelock);
+ 
+ 	ks8851_unlock_spi(ks, &flags);
+ }
+diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
+index a838b61cd844b..a35528497a576 100644
+--- a/drivers/net/phy/microchip_t1.c
++++ b/drivers/net/phy/microchip_t1.c
+@@ -748,7 +748,7 @@ static int lan87xx_cable_test_report(struct phy_device *phydev)
+ 	ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ 				lan87xx_cable_test_report_trans(detect));
+ 
+-	return 0;
++	return phy_init_hw(phydev);
+ }
+ 
+ static int lan87xx_cable_test_get_status(struct phy_device *phydev,
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index fe380fe196e7b..996dee54d751d 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -70,6 +70,7 @@
+ #define MPHDRLEN_SSN	4	/* ditto with short sequence numbers */
+ 
+ #define PPP_PROTO_LEN	2
++#define PPP_LCP_HDRLEN	4
+ 
+ /*
+  * An instance of /dev/ppp can be associated with either a ppp
+@@ -493,6 +494,15 @@ static ssize_t ppp_read(struct file *file, char __user *buf,
+ 	return ret;
+ }
+ 
++static bool ppp_check_packet(struct sk_buff *skb, size_t count)
++{
++	/* LCP packets must include LCP header which 4 bytes long:
++	 * 1-byte code, 1-byte identifier, and 2-byte length.
++	 */
++	return get_unaligned_be16(skb->data) != PPP_LCP ||
++		count >= PPP_PROTO_LEN + PPP_LCP_HDRLEN;
++}
++
+ static ssize_t ppp_write(struct file *file, const char __user *buf,
+ 			 size_t count, loff_t *ppos)
+ {
+@@ -515,6 +525,11 @@ static ssize_t ppp_write(struct file *file, const char __user *buf,
+ 		kfree_skb(skb);
+ 		goto out;
+ 	}
++	ret = -EINVAL;
++	if (unlikely(!ppp_check_packet(skb, count))) {
++		kfree_skb(skb);
++		goto out;
++	}
+ 
+ 	switch (pf->kind) {
+ 	case INTERFACE:
+diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c
+index 0ba714ca5185c..4b8528206cc8a 100644
+--- a/drivers/net/wireguard/allowedips.c
++++ b/drivers/net/wireguard/allowedips.c
+@@ -15,8 +15,8 @@ static void swap_endian(u8 *dst, const u8 *src, u8 bits)
+ 	if (bits == 32) {
+ 		*(u32 *)dst = be32_to_cpu(*(const __be32 *)src);
+ 	} else if (bits == 128) {
+-		((u64 *)dst)[0] = be64_to_cpu(((const __be64 *)src)[0]);
+-		((u64 *)dst)[1] = be64_to_cpu(((const __be64 *)src)[1]);
++		((u64 *)dst)[0] = get_unaligned_be64(src);
++		((u64 *)dst)[1] = get_unaligned_be64(src + 8);
+ 	}
+ }
+ 
+diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
+index 1ea4f874e367e..7eb76724b3edb 100644
+--- a/drivers/net/wireguard/queueing.h
++++ b/drivers/net/wireguard/queueing.h
+@@ -124,10 +124,10 @@ static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
+  */
+ static inline int wg_cpumask_next_online(int *last_cpu)
+ {
+-	int cpu = cpumask_next(*last_cpu, cpu_online_mask);
++	int cpu = cpumask_next(READ_ONCE(*last_cpu), cpu_online_mask);
+ 	if (cpu >= nr_cpu_ids)
+ 		cpu = cpumask_first(cpu_online_mask);
+-	*last_cpu = cpu;
++	WRITE_ONCE(*last_cpu, cpu);
+ 	return cpu;
+ }
+ 
+diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c
+index 0d48e0f4a1ba3..26e09c30d596c 100644
+--- a/drivers/net/wireguard/send.c
++++ b/drivers/net/wireguard/send.c
+@@ -222,7 +222,7 @@ void wg_packet_send_keepalive(struct wg_peer *peer)
+ {
+ 	struct sk_buff *skb;
+ 
+-	if (skb_queue_empty(&peer->staged_packet_queue)) {
++	if (skb_queue_empty_lockless(&peer->staged_packet_queue)) {
+ 		skb = alloc_skb(DATA_PACKET_HEAD_ROOM + MESSAGE_MINIMUM_LENGTH,
+ 				GFP_ATOMIC);
+ 		if (unlikely(!skb))
+diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
+index 2c6b99402df8a..3a3f9b5018ca4 100644
+--- a/drivers/nvmem/core.c
++++ b/drivers/nvmem/core.c
+@@ -396,10 +396,9 @@ static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
+ 	if (!config->base_dev)
+ 		return -EINVAL;
+ 
+-	if (config->type == NVMEM_TYPE_FRAM)
+-		bin_attr_nvmem_eeprom_compat.attr.name = "fram";
+-
+ 	nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
++	if (config->type == NVMEM_TYPE_FRAM)
++		nvmem->eeprom.attr.name = "fram";
+ 	nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
+ 	nvmem->eeprom.size = nvmem->size;
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+@@ -463,7 +462,7 @@ static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem)
+ 						    "%s@%x,%x", entry->name,
+ 						    entry->offset,
+ 						    entry->bit_offset);
+-		attrs[i].attr.mode = 0444;
++		attrs[i].attr.mode = 0444 & nvmem_bin_attr_get_umode(nvmem);
+ 		attrs[i].size = entry->bytes;
+ 		attrs[i].read = &nvmem_cell_attr_read;
+ 		attrs[i].private = entry;
+diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c
+index 33678d0af2c24..6c2f80e166e28 100644
+--- a/drivers/nvmem/meson-efuse.c
++++ b/drivers/nvmem/meson-efuse.c
+@@ -18,18 +18,24 @@ static int meson_efuse_read(void *context, unsigned int offset,
+ 			    void *val, size_t bytes)
+ {
+ 	struct meson_sm_firmware *fw = context;
++	int ret;
+ 
+-	return meson_sm_call_read(fw, (u8 *)val, bytes, SM_EFUSE_READ, offset,
+-				  bytes, 0, 0, 0);
++	ret = meson_sm_call_read(fw, (u8 *)val, bytes, SM_EFUSE_READ, offset,
++				 bytes, 0, 0, 0);
++
++	return ret < 0 ? ret : 0;
+ }
+ 
+ static int meson_efuse_write(void *context, unsigned int offset,
+ 			     void *val, size_t bytes)
+ {
+ 	struct meson_sm_firmware *fw = context;
++	int ret;
++
++	ret = meson_sm_call_write(fw, (u8 *)val, bytes, SM_EFUSE_WRITE, offset,
++				  bytes, 0, 0, 0);
+ 
+-	return meson_sm_call_write(fw, (u8 *)val, bytes, SM_EFUSE_WRITE, offset,
+-				   bytes, 0, 0, 0);
++	return ret < 0 ? ret : 0;
+ }
+ 
+ static const struct of_device_id meson_efuse_match[] = {
+diff --git a/drivers/nvmem/rmem.c b/drivers/nvmem/rmem.c
+index 752d0bf4445ee..7f907c5a445e7 100644
+--- a/drivers/nvmem/rmem.c
++++ b/drivers/nvmem/rmem.c
+@@ -46,7 +46,10 @@ static int rmem_read(void *context, unsigned int offset,
+ 
+ 	memunmap(addr);
+ 
+-	return count;
++	if (count < 0)
++		return count;
++
++	return count == bytes ? 0 : -EIO;
+ }
+ 
+ static int rmem_probe(struct platform_device *pdev)
+diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
+index 7d345009c3270..5574cea0c2ef3 100644
+--- a/drivers/platform/x86/toshiba_acpi.c
++++ b/drivers/platform/x86/toshiba_acpi.c
+@@ -3304,6 +3304,7 @@ static const struct dmi_system_id toshiba_dmi_quirks[] __initconst = {
+ 		},
+ 	 .driver_data = (void *)(QUIRK_TURN_ON_PANEL_ON_RESUME | QUIRK_HCI_HOTKEY_QUICKSTART),
+ 	},
++	{ }
+ };
+ 
+ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
+diff --git a/drivers/pmdomain/qcom/rpmhpd.c b/drivers/pmdomain/qcom/rpmhpd.c
+index de9121ef4216b..d2cb4271a1cad 100644
+--- a/drivers/pmdomain/qcom/rpmhpd.c
++++ b/drivers/pmdomain/qcom/rpmhpd.c
+@@ -40,6 +40,7 @@
+  * @addr:		Resource address as looped up using resource name from
+  *			cmd-db
+  * @state_synced:	Indicator that sync_state has been invoked for the rpmhpd resource
++ * @skip_retention_level: Indicate that retention level should not be used for the power domain
+  */
+ struct rpmhpd {
+ 	struct device	*dev;
+@@ -56,6 +57,7 @@ struct rpmhpd {
+ 	const char	*res_name;
+ 	u32		addr;
+ 	bool		state_synced;
++	bool            skip_retention_level;
+ };
+ 
+ struct rpmhpd_desc {
+@@ -173,6 +175,7 @@ static struct rpmhpd mxc = {
+ 	.pd = { .name = "mxc", },
+ 	.peer = &mxc_ao,
+ 	.res_name = "mxc.lvl",
++	.skip_retention_level = true,
+ };
+ 
+ static struct rpmhpd mxc_ao = {
+@@ -180,6 +183,7 @@ static struct rpmhpd mxc_ao = {
+ 	.active_only = true,
+ 	.peer = &mxc,
+ 	.res_name = "mxc.lvl",
++	.skip_retention_level = true,
+ };
+ 
+ static struct rpmhpd nsp = {
+@@ -819,6 +823,9 @@ static int rpmhpd_update_level_mapping(struct rpmhpd *rpmhpd)
+ 		return -EINVAL;
+ 
+ 	for (i = 0; i < rpmhpd->level_count; i++) {
++		if (rpmhpd->skip_retention_level && buf[i] == RPMH_REGULATOR_LEVEL_RETENTION)
++			continue;
++
+ 		rpmhpd->level[i] = buf[i];
+ 
+ 		/* Remember the first corner with non-zero level */
+diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
+index e358ac5b45097..96a524772549e 100644
+--- a/drivers/spi/spi-axi-spi-engine.c
++++ b/drivers/spi/spi-axi-spi-engine.c
+@@ -164,16 +164,20 @@ static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
+ }
+ 
+ static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry,
+-				 int delay_ns, u32 sclk_hz)
++				 int delay_ns, int inst_ns, u32 sclk_hz)
+ {
+ 	unsigned int t;
+ 
+-	/* negative delay indicates error, e.g. from spi_delay_to_ns() */
+-	if (delay_ns <= 0)
++	/*
++	 * Negative delay indicates error, e.g. from spi_delay_to_ns(). And if
++	 * delay is less that the instruction execution time, there is no need
++	 * for an extra sleep instruction since the instruction execution time
++	 * will already cover the required delay.
++	 */
++	if (delay_ns < 0 || delay_ns <= inst_ns)
+ 		return;
+ 
+-	/* rounding down since executing the instruction adds a couple of ticks delay */
+-	t = DIV_ROUND_DOWN_ULL((u64)delay_ns * sclk_hz, NSEC_PER_SEC);
++	t = DIV_ROUND_UP_ULL((u64)(delay_ns - inst_ns) * sclk_hz, NSEC_PER_SEC);
+ 	while (t) {
+ 		unsigned int n = min(t, 256U);
+ 
+@@ -220,10 +224,16 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry,
+ 	struct spi_device *spi = msg->spi;
+ 	struct spi_controller *host = spi->controller;
+ 	struct spi_transfer *xfer;
+-	int clk_div, new_clk_div;
++	int clk_div, new_clk_div, inst_ns;
+ 	bool keep_cs = false;
+ 	u8 bits_per_word = 0;
+ 
++	/*
++	 * Take into account instruction execution time for more accurate sleep
++	 * times, especially when the delay is small.
++	 */
++	inst_ns = DIV_ROUND_UP(NSEC_PER_SEC, host->max_speed_hz);
++
+ 	clk_div = 1;
+ 
+ 	spi_engine_program_add_cmd(p, dry,
+@@ -252,7 +262,7 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry,
+ 
+ 		spi_engine_gen_xfer(p, dry, xfer);
+ 		spi_engine_gen_sleep(p, dry, spi_delay_to_ns(&xfer->delay, xfer),
+-				     xfer->effective_speed_hz);
++				     inst_ns, xfer->effective_speed_hz);
+ 
+ 		if (xfer->cs_change) {
+ 			if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
+@@ -262,7 +272,7 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry,
+ 					spi_engine_gen_cs(p, dry, spi, false);
+ 
+ 				spi_engine_gen_sleep(p, dry, spi_delay_to_ns(
+-					&xfer->cs_change_delay, xfer),
++					&xfer->cs_change_delay, xfer), inst_ns,
+ 					xfer->effective_speed_hz);
+ 
+ 				if (!list_next_entry(xfer, transfer_list)->cs_off)
+diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c
+index bd988f53753e2..031b5795d1060 100644
+--- a/drivers/spi/spi-mux.c
++++ b/drivers/spi/spi-mux.c
+@@ -162,6 +162,7 @@ static int spi_mux_probe(struct spi_device *spi)
+ 	ctlr->bus_num = -1;
+ 	ctlr->dev.of_node = spi->dev.of_node;
+ 	ctlr->must_async = true;
++	ctlr->defer_optimize_message = true;
+ 
+ 	ret = devm_spi_register_controller(&spi->dev, ctlr);
+ 	if (ret)
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index c349d6012625a..9304fd03bf764 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -2137,7 +2137,8 @@ static void __spi_unoptimize_message(struct spi_message *msg)
+  */
+ static void spi_maybe_unoptimize_message(struct spi_message *msg)
+ {
+-	if (!msg->pre_optimized && msg->optimized)
++	if (!msg->pre_optimized && msg->optimized &&
++	    !msg->spi->controller->defer_optimize_message)
+ 		__spi_unoptimize_message(msg);
+ }
+ 
+@@ -4285,6 +4286,11 @@ static int __spi_optimize_message(struct spi_device *spi,
+ static int spi_maybe_optimize_message(struct spi_device *spi,
+ 				      struct spi_message *msg)
+ {
++	if (spi->controller->defer_optimize_message) {
++		msg->spi = spi;
++		return 0;
++	}
++
+ 	if (msg->pre_optimized)
+ 		return 0;
+ 
+@@ -4315,6 +4321,13 @@ int spi_optimize_message(struct spi_device *spi, struct spi_message *msg)
+ {
+ 	int ret;
+ 
++	/*
++	 * Pre-optimization is not supported and optimization is deferred e.g.
++	 * when using spi-mux.
++	 */
++	if (spi->controller->defer_optimize_message)
++		return 0;
++
+ 	ret = __spi_optimize_message(spi, msg);
+ 	if (ret)
+ 		return ret;
+@@ -4341,6 +4354,9 @@ EXPORT_SYMBOL_GPL(spi_optimize_message);
+  */
+ void spi_unoptimize_message(struct spi_message *msg)
+ {
++	if (msg->spi->controller->defer_optimize_message)
++		return;
++
+ 	__spi_unoptimize_message(msg);
+ 	msg->pre_optimized = false;
+ }
+@@ -4423,8 +4439,6 @@ int spi_async(struct spi_device *spi, struct spi_message *message)
+ 
+ 	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
+ 
+-	spi_maybe_unoptimize_message(message);
+-
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(spi_async);
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index f63cdd6794419..8fde71138c5d7 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -1560,6 +1560,7 @@ static void imx_uart_shutdown(struct uart_port *port)
+ 	struct imx_port *sport = (struct imx_port *)port;
+ 	unsigned long flags;
+ 	u32 ucr1, ucr2, ucr4, uts;
++	int loops;
+ 
+ 	if (sport->dma_is_enabled) {
+ 		dmaengine_terminate_sync(sport->dma_chan_tx);
+@@ -1622,6 +1623,56 @@ static void imx_uart_shutdown(struct uart_port *port)
+ 	ucr4 &= ~UCR4_TCEN;
+ 	imx_uart_writel(sport, ucr4, UCR4);
+ 
++	/*
++	 * We have to ensure the tx state machine ends up in OFF. This
++	 * is especially important for rs485 where we must not leave
++	 * the RTS signal high, blocking the bus indefinitely.
++	 *
++	 * All interrupts are now disabled, so imx_uart_stop_tx() will
++	 * no longer be called from imx_uart_transmit_buffer(). It may
++	 * still be called via the hrtimers, and if those are in play,
++	 * we have to honour the delays.
++	 */
++	if (sport->tx_state == WAIT_AFTER_RTS || sport->tx_state == SEND)
++		imx_uart_stop_tx(port);
++
++	/*
++	 * In many cases (rs232 mode, or if tx_state was
++	 * WAIT_AFTER_RTS, or if tx_state was SEND and there is no
++	 * delay_rts_after_send), this will have moved directly to
++	 * OFF. In rs485 mode, tx_state might already have been
++	 * WAIT_AFTER_SEND and the hrtimer thus already started, or
++	 * the above imx_uart_stop_tx() call could have started it. In
++	 * those cases, we have to wait for the hrtimer to fire and
++	 * complete the transition to OFF.
++	 */
++	loops = port->rs485.flags & SER_RS485_ENABLED ?
++		port->rs485.delay_rts_after_send : 0;
++	while (sport->tx_state != OFF && loops--) {
++		uart_port_unlock_irqrestore(&sport->port, flags);
++		msleep(1);
++		uart_port_lock_irqsave(&sport->port, &flags);
++	}
++
++	if (sport->tx_state != OFF) {
++		dev_warn(sport->port.dev, "unexpected tx_state %d\n",
++			 sport->tx_state);
++		/*
++		 * This machine may be busted, but ensure the RTS
++		 * signal is inactive in order not to block other
++		 * devices.
++		 */
++		if (port->rs485.flags & SER_RS485_ENABLED) {
++			ucr2 = imx_uart_readl(sport, UCR2);
++			if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
++				imx_uart_rts_active(sport, &ucr2);
++			else
++				imx_uart_rts_inactive(sport, &ucr2);
++			imx_uart_writel(sport, ucr2, UCR2);
++		}
++		sport->tx_state = OFF;
++	}
++
+ 	uart_port_unlock_irqrestore(&sport->port, flags);
+ 
+ 	clk_disable_unprepare(sport->clk_per);
+diff --git a/drivers/tty/serial/ma35d1_serial.c b/drivers/tty/serial/ma35d1_serial.c
+index 19f0a305cc430..3b4206e815fe9 100644
+--- a/drivers/tty/serial/ma35d1_serial.c
++++ b/drivers/tty/serial/ma35d1_serial.c
+@@ -688,12 +688,13 @@ static int ma35d1serial_probe(struct platform_device *pdev)
+ 	struct uart_ma35d1_port *up;
+ 	int ret = 0;
+ 
+-	if (pdev->dev.of_node) {
+-		ret = of_alias_get_id(pdev->dev.of_node, "serial");
+-		if (ret < 0) {
+-			dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n", ret);
+-			return ret;
+-		}
++	if (!pdev->dev.of_node)
++		return -ENODEV;
++
++	ret = of_alias_get_id(pdev->dev.of_node, "serial");
++	if (ret < 0) {
++		dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n", ret);
++		return ret;
+ 	}
+ 	up = &ma35d1serial_ports[ret];
+ 	up->port.line = ret;
+diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
+index 8944548c30fa1..c532416aec229 100644
+--- a/drivers/ufs/core/ufs-mcq.c
++++ b/drivers/ufs/core/ufs-mcq.c
+@@ -105,16 +105,15 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac);
+  * @hba: per adapter instance
+  * @req: pointer to the request to be issued
+  *
+- * Return: the hardware queue instance on which the request would
+- * be queued.
++ * Return: the hardware queue instance on which the request will be or has
++ * been queued. %NULL if the request has already been freed.
+  */
+ struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
+ 					 struct request *req)
+ {
+-	u32 utag = blk_mq_unique_tag(req);
+-	u32 hwq = blk_mq_unique_tag_to_hwq(utag);
++	struct blk_mq_hw_ctx *hctx = READ_ONCE(req->mq_hctx);
+ 
+-	return &hba->uhq[hwq];
++	return hctx ? &hba->uhq[hctx->queue_num] : NULL;
+ }
+ 
+ /**
+@@ -515,6 +514,8 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
+ 		if (!cmd)
+ 			return -EINVAL;
+ 		hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
++		if (!hwq)
++			return 0;
+ 	} else {
+ 		hwq = hba->dev_cmd_queue;
+ 	}
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index f7d04f7c0017d..ad192b74536a2 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -6506,6 +6506,8 @@ static bool ufshcd_abort_one(struct request *rq, void *priv)
+ 	/* Release cmd in MCQ mode if abort succeeds */
+ 	if (is_mcq_enabled(hba) && (*ret == 0)) {
+ 		hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
++		if (!hwq)
++			return 0;
+ 		spin_lock_irqsave(&hwq->cq_lock, flags);
+ 		if (ufshcd_cmd_inflight(lrbp->cmd))
+ 			ufshcd_release_scsi_cmd(hba, lrbp);
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index 7f8d33f92ddb5..847dd32c0f5e2 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -291,6 +291,20 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ 	if (ifp->desc.bNumEndpoints >= num_ep)
+ 		goto skip_to_next_endpoint_or_interface_descriptor;
+ 
++	/* Save a copy of the descriptor and use it instead of the original */
++	endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
++	memcpy(&endpoint->desc, d, n);
++	d = &endpoint->desc;
++
++	/* Clear the reserved bits in bEndpointAddress */
++	i = d->bEndpointAddress &
++			(USB_ENDPOINT_DIR_MASK | USB_ENDPOINT_NUMBER_MASK);
++	if (i != d->bEndpointAddress) {
++		dev_notice(ddev, "config %d interface %d altsetting %d has an endpoint descriptor with address 0x%X, changing to 0x%X\n",
++		    cfgno, inum, asnum, d->bEndpointAddress, i);
++		endpoint->desc.bEndpointAddress = i;
++	}
++
+ 	/* Check for duplicate endpoint addresses */
+ 	if (config_endpoint_is_duplicate(config, inum, asnum, d)) {
+ 		dev_notice(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
+@@ -308,10 +322,8 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ 		}
+ 	}
+ 
+-	endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
++	/* Accept this endpoint */
+ 	++ifp->desc.bNumEndpoints;
+-
+-	memcpy(&endpoint->desc, d, n);
+ 	INIT_LIST_HEAD(&endpoint->urb_list);
+ 
+ 	/*
+diff --git a/drivers/usb/core/of.c b/drivers/usb/core/of.c
+index f1a499ee482c3..763e4122ed5b3 100644
+--- a/drivers/usb/core/of.c
++++ b/drivers/usb/core/of.c
+@@ -84,9 +84,12 @@ static bool usb_of_has_devices_or_graph(const struct usb_device *hub)
+ 	if (of_graph_is_present(np))
+ 		return true;
+ 
+-	for_each_child_of_node(np, child)
+-		if (of_property_present(child, "reg"))
++	for_each_child_of_node(np, child) {
++		if (of_property_present(child, "reg")) {
++			of_node_put(child);
+ 			return true;
++		}
++	}
+ 
+ 	return false;
+ }
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index b4783574b8e66..13171454f9591 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -506,6 +506,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x1b1c, 0x1b38), .driver_info = USB_QUIRK_DELAY_INIT |
+ 	  USB_QUIRK_DELAY_CTRL_MSG },
+ 
++	/* START BP-850k Printer */
++	{ USB_DEVICE(0x1bc3, 0x0003), .driver_info = USB_QUIRK_NO_SET_INTF },
++
+ 	/* MIDI keyboard WORLDE MINI */
+ 	{ USB_DEVICE(0x1c75, 0x0204), .driver_info =
+ 			USB_QUIRK_CONFIG_INTF_STRINGS },
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index 9ef821ca2fc71..052852f801467 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -54,6 +54,10 @@
+ #define PCI_DEVICE_ID_INTEL_MTL			0x7e7e
+ #define PCI_DEVICE_ID_INTEL_ARLH_PCH		0x777e
+ #define PCI_DEVICE_ID_INTEL_TGL			0x9a15
++#define PCI_DEVICE_ID_INTEL_PTLH		0xe332
++#define PCI_DEVICE_ID_INTEL_PTLH_PCH		0xe37e
++#define PCI_DEVICE_ID_INTEL_PTLU		0xe432
++#define PCI_DEVICE_ID_INTEL_PTLU_PCH		0xe47e
+ #define PCI_DEVICE_ID_AMD_MR			0x163a
+ 
+ #define PCI_INTEL_BXT_DSM_GUID		"732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
+@@ -430,6 +434,10 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
+ 	{ PCI_DEVICE_DATA(INTEL, MTLS, &dwc3_pci_intel_swnode) },
+ 	{ PCI_DEVICE_DATA(INTEL, ARLH_PCH, &dwc3_pci_intel_swnode) },
+ 	{ PCI_DEVICE_DATA(INTEL, TGL, &dwc3_pci_intel_swnode) },
++	{ PCI_DEVICE_DATA(INTEL, PTLH, &dwc3_pci_intel_swnode) },
++	{ PCI_DEVICE_DATA(INTEL, PTLH_PCH, &dwc3_pci_intel_swnode) },
++	{ PCI_DEVICE_DATA(INTEL, PTLU, &dwc3_pci_intel_swnode) },
++	{ PCI_DEVICE_DATA(INTEL, PTLU_PCH, &dwc3_pci_intel_swnode) },
+ 
+ 	{ PCI_DEVICE_DATA(AMD, NL_USB, &dwc3_pci_amd_swnode) },
+ 	{ PCI_DEVICE_DATA(AMD, MR, &dwc3_pci_amd_mr_swnode) },
+diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
+index ce3cfa1f36f51..0e7c1e947c0a0 100644
+--- a/drivers/usb/gadget/configfs.c
++++ b/drivers/usb/gadget/configfs.c
+@@ -115,9 +115,12 @@ static int usb_string_copy(const char *s, char **s_copy)
+ 	int ret;
+ 	char *str;
+ 	char *copy = *s_copy;
++
+ 	ret = strlen(s);
+ 	if (ret > USB_MAX_STRING_LEN)
+ 		return -EOVERFLOW;
++	if (ret < 1)
++		return -EINVAL;
+ 
+ 	if (copy) {
+ 		str = copy;
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 8579603edaff1..26d6ac940b694 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -1125,10 +1125,20 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
+ 			xhci_dbg(xhci, "Start the secondary HCD\n");
+ 			retval = xhci_run(xhci->shared_hcd);
+ 		}
+-
++		if (retval)
++			return retval;
++		/*
++		 * Resume roothubs unconditionally as PORTSC change bits are not
++		 * immediately visible after xHC reset
++		 */
+ 		hcd->state = HC_STATE_SUSPENDED;
+-		if (xhci->shared_hcd)
++
++		if (xhci->shared_hcd) {
+ 			xhci->shared_hcd->state = HC_STATE_SUSPENDED;
++			usb_hcd_resume_root_hub(xhci->shared_hcd);
++		}
++		usb_hcd_resume_root_hub(hcd);
++
+ 		goto done;
+ 	}
+ 
+@@ -1152,7 +1162,6 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
+ 
+ 	xhci_dbc_resume(xhci);
+ 
+- done:
+ 	if (retval == 0) {
+ 		/*
+ 		 * Resume roothubs only if there are pending events.
+@@ -1178,6 +1187,7 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
+ 			usb_hcd_resume_root_hub(hcd);
+ 		}
+ 	}
++done:
+ 	/*
+ 	 * If system is subject to the Quirk, Compliance Mode Timer needs to
+ 	 * be re-initialized Always after a system resume. Ports are subject
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index 8b0308d84270f..85697466b1476 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -1737,6 +1737,49 @@ static void mos7840_port_remove(struct usb_serial_port *port)
+ 	kfree(mos7840_port);
+ }
+ 
++static int mos7840_suspend(struct usb_serial *serial, pm_message_t message)
++{
++	struct moschip_port *mos7840_port;
++	struct usb_serial_port *port;
++	int i;
++
++	for (i = 0; i < serial->num_ports; ++i) {
++		port = serial->port[i];
++		if (!tty_port_initialized(&port->port))
++			continue;
++
++		mos7840_port = usb_get_serial_port_data(port);
++
++		usb_kill_urb(mos7840_port->read_urb);
++		mos7840_port->read_urb_busy = false;
++	}
++
++	return 0;
++}
++
++static int mos7840_resume(struct usb_serial *serial)
++{
++	struct moschip_port *mos7840_port;
++	struct usb_serial_port *port;
++	int res;
++	int i;
++
++	for (i = 0; i < serial->num_ports; ++i) {
++		port = serial->port[i];
++		if (!tty_port_initialized(&port->port))
++			continue;
++
++		mos7840_port = usb_get_serial_port_data(port);
++
++		mos7840_port->read_urb_busy = true;
++		res = usb_submit_urb(mos7840_port->read_urb, GFP_NOIO);
++		if (res)
++			mos7840_port->read_urb_busy = false;
++	}
++
++	return 0;
++}
++
+ static struct usb_serial_driver moschip7840_4port_device = {
+ 	.driver = {
+ 		   .owner = THIS_MODULE,
+@@ -1764,6 +1807,8 @@ static struct usb_serial_driver moschip7840_4port_device = {
+ 	.port_probe = mos7840_port_probe,
+ 	.port_remove = mos7840_port_remove,
+ 	.read_bulk_callback = mos7840_bulk_in_callback,
++	.suspend = mos7840_suspend,
++	.resume = mos7840_resume,
+ };
+ 
+ static struct usb_serial_driver * const serial_drivers[] = {
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 8a5846d4adf67..311040f9b9352 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1425,6 +1425,10 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = NCTRL(0) | RSVD(1) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff),	/* Telit LN940 (MBIM) */
+ 	  .driver_info = NCTRL(0) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x3000, 0xff),	/* Telit FN912 */
++	  .driver_info = RSVD(0) | NCTRL(3) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x3001, 0xff),	/* Telit FN912 */
++	  .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7010, 0xff),	/* Telit LE910-S1 (RNDIS) */
+ 	  .driver_info = NCTRL(2) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff),	/* Telit LE910-S1 (ECM) */
+@@ -1433,6 +1437,8 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = NCTRL(2) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701b, 0xff),	/* Telit LE910R1 (ECM) */
+ 	  .driver_info = NCTRL(2) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x9000, 0xff),	/* Telit generic core-dump device */
++	  .driver_info = NCTRL(0) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, 0x9010),				/* Telit SBL FN980 flashing device */
+ 	  .driver_info = NCTRL(0) | ZLP },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, 0x9200),				/* Telit LE910S1 flashing device */
+@@ -2224,6 +2230,10 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7106_2COM, 0x02, 0x02, 0x01) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x7126, 0xff, 0x00, 0x00),
++	  .driver_info = NCTRL(2) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x7127, 0xff, 0x00, 0x00),
++	  .driver_info = NCTRL(2) | NCTRL(3) | NCTRL(4) },
+ 	{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
+ 	{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MPL200),
+ 	  .driver_info = RSVD(1) | RSVD(4) },
+@@ -2284,6 +2294,8 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = RSVD(3) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0f0, 0xff),			/* Foxconn T99W373 MBIM */
+ 	  .driver_info = RSVD(3) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe145, 0xff),			/* Foxconn T99W651 RNDIS */
++	  .driver_info = RSVD(5) | RSVD(6) },
+ 	{ USB_DEVICE(0x1508, 0x1001),						/* Fibocom NL668 (IOT version) */
+ 	  .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
+ 	{ USB_DEVICE(0x1782, 0x4d10) },						/* Fibocom L610 (AT mode) */
+@@ -2321,6 +2333,32 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0115, 0xff),			/* Rolling RW135-GL (laptop MBIM) */
+ 	  .driver_info = RSVD(5) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0802, 0xff),			/* Rolling RW350-GL (laptop MBIM) */
++	  .driver_info = RSVD(5) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0100, 0xff, 0xff, 0x30) },	/* NetPrisma LCUK54-WWD for Global */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0100, 0xff, 0x00, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0100, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0101, 0xff, 0xff, 0x30) },	/* NetPrisma LCUK54-WRD for Global SKU */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0101, 0xff, 0x00, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0101, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0106, 0xff, 0xff, 0x30) },	/* NetPrisma LCUK54-WRD for China SKU */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0106, 0xff, 0x00, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0106, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0111, 0xff, 0xff, 0x30) },	/* NetPrisma LCUK54-WWD for SA */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0111, 0xff, 0x00, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0111, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0112, 0xff, 0xff, 0x30) },	/* NetPrisma LCUK54-WWD for EU */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0112, 0xff, 0x00, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0112, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0113, 0xff, 0xff, 0x30) },	/* NetPrisma LCUK54-WWD for NA */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0113, 0xff, 0x00, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0113, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0115, 0xff, 0xff, 0x30) },	/* NetPrisma LCUK54-WWD for China EDU */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0115, 0xff, 0x00, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0115, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0xff, 0x30) },	/* NetPrisma LCUK54-WWD for Golbal EDU */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0x00, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0xff, 0x40) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
+diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
+index d8c95cc16be81..ea36d2139590f 100644
+--- a/drivers/vfio/pci/vfio_pci_core.c
++++ b/drivers/vfio/pci/vfio_pci_core.c
+@@ -1260,7 +1260,7 @@ static int vfio_pci_ioctl_get_pci_hot_reset_info(
+ 	struct vfio_pci_hot_reset_info hdr;
+ 	struct vfio_pci_fill_info fill = {};
+ 	bool slot = false;
+-	int ret, count;
++	int ret, count = 0;
+ 
+ 	if (copy_from_user(&hdr, arg, minsz))
+ 		return -EFAULT;
+diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
+index 06cdf1a8a16f6..89b11336a8369 100644
+--- a/fs/cachefiles/daemon.c
++++ b/fs/cachefiles/daemon.c
+@@ -366,14 +366,14 @@ static __poll_t cachefiles_daemon_poll(struct file *file,
+ 
+ 	if (cachefiles_in_ondemand_mode(cache)) {
+ 		if (!xa_empty(&cache->reqs)) {
+-			rcu_read_lock();
++			xas_lock(&xas);
+ 			xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
+ 				if (!cachefiles_ondemand_is_reopening_read(req)) {
+ 					mask |= EPOLLIN;
+ 					break;
+ 				}
+ 			}
+-			rcu_read_unlock();
++			xas_unlock(&xas);
+ 		}
+ 	} else {
+ 		if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags))
+diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
+index 6845a90cdfcce..7b99bd98de75b 100644
+--- a/fs/cachefiles/internal.h
++++ b/fs/cachefiles/internal.h
+@@ -48,6 +48,7 @@ enum cachefiles_object_state {
+ 	CACHEFILES_ONDEMAND_OBJSTATE_CLOSE, /* Anonymous fd closed by daemon or initial state */
+ 	CACHEFILES_ONDEMAND_OBJSTATE_OPEN, /* Anonymous fd associated with object is available */
+ 	CACHEFILES_ONDEMAND_OBJSTATE_REOPENING, /* Object that was closed and is being reopened. */
++	CACHEFILES_ONDEMAND_OBJSTATE_DROPPING, /* Object is being dropped. */
+ };
+ 
+ struct cachefiles_ondemand_info {
+@@ -128,6 +129,7 @@ struct cachefiles_cache {
+ 	unsigned long			req_id_next;
+ 	struct xarray			ondemand_ids;	/* xarray for ondemand_id allocation */
+ 	u32				ondemand_id_next;
++	u32				msg_id_next;
+ };
+ 
+ static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache)
+@@ -335,6 +337,7 @@ cachefiles_ondemand_set_object_##_state(struct cachefiles_object *object) \
+ CACHEFILES_OBJECT_STATE_FUNCS(open, OPEN);
+ CACHEFILES_OBJECT_STATE_FUNCS(close, CLOSE);
+ CACHEFILES_OBJECT_STATE_FUNCS(reopening, REOPENING);
++CACHEFILES_OBJECT_STATE_FUNCS(dropping, DROPPING);
+ 
+ static inline bool cachefiles_ondemand_is_reopening_read(struct cachefiles_req *req)
+ {
+diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
+index 89f118d68d125..7e4874f60de10 100644
+--- a/fs/cachefiles/ondemand.c
++++ b/fs/cachefiles/ondemand.c
+@@ -494,7 +494,8 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
+ 		 */
+ 		xas_lock(&xas);
+ 
+-		if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
++		if (test_bit(CACHEFILES_DEAD, &cache->flags) ||
++		    cachefiles_ondemand_object_is_dropping(object)) {
+ 			xas_unlock(&xas);
+ 			ret = -EIO;
+ 			goto out;
+@@ -504,20 +505,32 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
+ 		smp_mb();
+ 
+ 		if (opcode == CACHEFILES_OP_CLOSE &&
+-			!cachefiles_ondemand_object_is_open(object)) {
++		    !cachefiles_ondemand_object_is_open(object)) {
+ 			WARN_ON_ONCE(object->ondemand->ondemand_id == 0);
+ 			xas_unlock(&xas);
+ 			ret = -EIO;
+ 			goto out;
+ 		}
+ 
+-		xas.xa_index = 0;
++		/*
++		 * Cyclically find a free xas to avoid msg_id reuse that would
++		 * cause the daemon to successfully copen a stale msg_id.
++		 */
++		xas.xa_index = cache->msg_id_next;
+ 		xas_find_marked(&xas, UINT_MAX, XA_FREE_MARK);
++		if (xas.xa_node == XAS_RESTART) {
++			xas.xa_index = 0;
++			xas_find_marked(&xas, cache->msg_id_next - 1, XA_FREE_MARK);
++		}
+ 		if (xas.xa_node == XAS_RESTART)
+ 			xas_set_err(&xas, -EBUSY);
++
+ 		xas_store(&xas, req);
+-		xas_clear_mark(&xas, XA_FREE_MARK);
+-		xas_set_mark(&xas, CACHEFILES_REQ_NEW);
++		if (xas_valid(&xas)) {
++			cache->msg_id_next = xas.xa_index + 1;
++			xas_clear_mark(&xas, XA_FREE_MARK);
++			xas_set_mark(&xas, CACHEFILES_REQ_NEW);
++		}
+ 		xas_unlock(&xas);
+ 	} while (xas_nomem(&xas, GFP_KERNEL));
+ 
+@@ -535,7 +548,8 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
+ 	 * If error occurs after creating the anonymous fd,
+ 	 * cachefiles_ondemand_fd_release() will set object to close.
+ 	 */
+-	if (opcode == CACHEFILES_OP_OPEN)
++	if (opcode == CACHEFILES_OP_OPEN &&
++	    !cachefiles_ondemand_object_is_dropping(object))
+ 		cachefiles_ondemand_set_object_close(object);
+ 	kfree(req);
+ 	return ret;
+@@ -634,8 +648,34 @@ int cachefiles_ondemand_init_object(struct cachefiles_object *object)
+ 
+ void cachefiles_ondemand_clean_object(struct cachefiles_object *object)
+ {
++	unsigned long index;
++	struct cachefiles_req *req;
++	struct cachefiles_cache *cache;
++
++	if (!object->ondemand)
++		return;
++
+ 	cachefiles_ondemand_send_req(object, CACHEFILES_OP_CLOSE, 0,
+ 			cachefiles_ondemand_init_close_req, NULL);
++
++	if (!object->ondemand->ondemand_id)
++		return;
++
++	/* Cancel all requests for the object that is being dropped. */
++	cache = object->volume->cache;
++	xa_lock(&cache->reqs);
++	cachefiles_ondemand_set_object_dropping(object);
++	xa_for_each(&cache->reqs, index, req) {
++		if (req->object == object) {
++			req->error = -EIO;
++			complete(&req->done);
++			__xa_erase(&cache->reqs, index);
++		}
++	}
++	xa_unlock(&cache->reqs);
++
++	/* Wait for ondemand_object_worker() to finish to avoid UAF. */
++	cancel_work_sync(&object->ondemand->ondemand_work);
+ }
+ 
+ int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object,
+diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c
+index bcb6173943ee4..4dd8a993c60a8 100644
+--- a/fs/cachefiles/xattr.c
++++ b/fs/cachefiles/xattr.c
+@@ -110,9 +110,11 @@ int cachefiles_check_auxdata(struct cachefiles_object *object, struct file *file
+ 	if (xlen == 0)
+ 		xlen = vfs_getxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache, buf, tlen);
+ 	if (xlen != tlen) {
+-		if (xlen < 0)
++		if (xlen < 0) {
++			ret = xlen;
+ 			trace_cachefiles_vfs_error(object, file_inode(file), xlen,
+ 						   cachefiles_trace_getxattr_error);
++		}
+ 		if (xlen == -EIO)
+ 			cachefiles_io_error_obj(
+ 				object,
+@@ -252,6 +254,7 @@ int cachefiles_check_volume_xattr(struct cachefiles_volume *volume)
+ 		xlen = vfs_getxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache, buf, len);
+ 	if (xlen != len) {
+ 		if (xlen < 0) {
++			ret = xlen;
+ 			trace_cachefiles_vfs_error(NULL, d_inode(dentry), xlen,
+ 						   cachefiles_trace_getxattr_error);
+ 			if (xlen == -EIO)
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 71a8e943a0fa5..66515fbc9dd70 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -355,7 +355,11 @@ static inline void __d_clear_type_and_inode(struct dentry *dentry)
+ 	flags &= ~DCACHE_ENTRY_TYPE;
+ 	WRITE_ONCE(dentry->d_flags, flags);
+ 	dentry->d_inode = NULL;
+-	if (dentry->d_flags & DCACHE_LRU_LIST)
++	/*
++	 * The negative counter only tracks dentries on the LRU. Don't inc if
++	 * d_lru is on another list.
++	 */
++	if ((flags & (DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST)
+ 		this_cpu_inc(nr_dentry_negative);
+ }
+ 
+@@ -1844,9 +1848,11 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
+ 
+ 	spin_lock(&dentry->d_lock);
+ 	/*
+-	 * Decrement negative dentry count if it was in the LRU list.
++	 * The negative counter only tracks dentries on the LRU. Don't dec if
++	 * d_lru is on another list.
+ 	 */
+-	if (dentry->d_flags & DCACHE_LRU_LIST)
++	if ((dentry->d_flags &
++	     (DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST)
+ 		this_cpu_dec(nr_dentry_negative);
+ 	hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
+ 	raw_write_seqcount_begin(&dentry->d_seq);
+diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
+index 63cbda3700ea9..d65dccb44ed59 100644
+--- a/fs/ext4/sysfs.c
++++ b/fs/ext4/sysfs.c
+@@ -473,6 +473,8 @@ static ssize_t ext4_attr_store(struct kobject *kobj,
+ 			*((unsigned int *) ptr) = t;
+ 		return len;
+ 	case attr_clusters_in_group:
++		if (!ptr)
++			return 0;
+ 		ret = kstrtouint(skip_spaces(buf), 0, &t);
+ 		if (ret)
+ 			return ret;
+diff --git a/fs/locks.c b/fs/locks.c
+index c360d1992d21f..bdd94c32256f5 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -1367,9 +1367,9 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
+ 		locks_wake_up_blocks(&left->c);
+ 	}
+  out:
++	trace_posix_lock_inode(inode, request, error);
+ 	spin_unlock(&ctx->flc_lock);
+ 	percpu_up_read(&file_rwsem);
+-	trace_posix_lock_inode(inode, request, error);
+ 	/*
+ 	 * Free any unused locks.
+ 	 */
+diff --git a/fs/minix/namei.c b/fs/minix/namei.c
+index d6031acc34f0c..a944a0f17b537 100644
+--- a/fs/minix/namei.c
++++ b/fs/minix/namei.c
+@@ -213,8 +213,7 @@ static int minix_rename(struct mnt_idmap *idmap,
+ 		if (!new_de)
+ 			goto out_dir;
+ 		err = minix_set_link(new_de, new_page, old_inode);
+-		kunmap(new_page);
+-		put_page(new_page);
++		unmap_and_put_page(new_page, new_de);
+ 		if (err)
+ 			goto out_dir;
+ 		inode_set_ctime_current(new_inode);
+diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
+index d748d9dce74e4..69ac866332acd 100644
+--- a/fs/nilfs2/dir.c
++++ b/fs/nilfs2/dir.c
+@@ -384,11 +384,39 @@ struct nilfs_dir_entry *nilfs_find_entry(struct inode *dir,
+ 
+ struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct folio **foliop)
+ {
+-	struct nilfs_dir_entry *de = nilfs_get_folio(dir, 0, foliop);
++	struct folio *folio;
++	struct nilfs_dir_entry *de, *next_de;
++	size_t limit;
++	char *msg;
+ 
++	de = nilfs_get_folio(dir, 0, &folio);
+ 	if (IS_ERR(de))
+ 		return NULL;
+-	return nilfs_next_entry(de);
++
++	limit = nilfs_last_byte(dir, 0);  /* is a multiple of chunk size */
++	if (unlikely(!limit || le64_to_cpu(de->inode) != dir->i_ino ||
++		     !nilfs_match(1, ".", de))) {
++		msg = "missing '.'";
++		goto fail;
++	}
++
++	next_de = nilfs_next_entry(de);
++	/*
++	 * If "next_de" has not reached the end of the chunk, there is
++	 * at least one more record.  Check whether it matches "..".
++	 */
++	if (unlikely((char *)next_de == (char *)de + nilfs_chunk_size(dir) ||
++		     !nilfs_match(2, "..", next_de))) {
++		msg = "missing '..'";
++		goto fail;
++	}
++	*foliop = folio;
++	return next_de;
++
++fail:
++	nilfs_error(dir->i_sb, "directory #%lu %s", dir->i_ino, msg);
++	folio_release_kmap(folio, de);
++	return NULL;
+ }
+ 
+ ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr)
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index 6ff35570db813..37b58c04b6b87 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -1938,8 +1938,8 @@ require use of the stronger protocol */
+ #define   CIFSSEC_MUST_SEAL	0x40040 /* not supported yet */
+ #define   CIFSSEC_MUST_NTLMSSP	0x80080 /* raw ntlmssp with ntlmv2 */
+ 
+-#define   CIFSSEC_DEF (CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_NTLMSSP)
+-#define   CIFSSEC_MAX (CIFSSEC_MUST_NTLMV2)
++#define   CIFSSEC_DEF (CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_NTLMSSP | CIFSSEC_MAY_SEAL)
++#define   CIFSSEC_MAX (CIFSSEC_MAY_SIGN | CIFSSEC_MUST_KRB5 | CIFSSEC_MAY_SEAL)
+ #define   CIFSSEC_AUTH_MASK (CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_KRB5 | CIFSSEC_MAY_NTLMSSP)
+ /*
+  *****************************************************************
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index e7e07891781b3..7d26fdcebbf98 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -2051,15 +2051,22 @@ int smb2_tree_connect(struct ksmbd_work *work)
+  * @access:		file access flags
+  * @disposition:	file disposition flags
+  * @may_flags:		set with MAY_ flags
++ * @is_dir:		is creating open flags for directory
+  *
+  * Return:      file open flags
+  */
+ static int smb2_create_open_flags(bool file_present, __le32 access,
+ 				  __le32 disposition,
+-				  int *may_flags)
++				  int *may_flags,
++				  bool is_dir)
+ {
+ 	int oflags = O_NONBLOCK | O_LARGEFILE;
+ 
++	if (is_dir) {
++		access &= ~FILE_WRITE_DESIRE_ACCESS_LE;
++		ksmbd_debug(SMB, "Discard write access to a directory\n");
++	}
++
+ 	if (access & FILE_READ_DESIRED_ACCESS_LE &&
+ 	    access & FILE_WRITE_DESIRE_ACCESS_LE) {
+ 		oflags |= O_RDWR;
+@@ -3167,7 +3174,9 @@ int smb2_open(struct ksmbd_work *work)
+ 
+ 	open_flags = smb2_create_open_flags(file_present, daccess,
+ 					    req->CreateDisposition,
+-					    &may_flags);
++					    &may_flags,
++		req->CreateOptions & FILE_DIRECTORY_FILE_LE ||
++		(file_present && S_ISDIR(d_inode(path.dentry)->i_mode)));
+ 
+ 	if (!test_tree_conn_flag(tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+ 		if (open_flags & (O_CREAT | O_TRUNC)) {
+diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
+index 292f5fd501047..5583214cdf5cf 100644
+--- a/fs/userfaultfd.c
++++ b/fs/userfaultfd.c
+@@ -2053,7 +2053,7 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
+ 		goto out;
+ 	features = uffdio_api.features;
+ 	ret = -EINVAL;
+-	if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
++	if (uffdio_api.api != UFFD_API)
+ 		goto err_out;
+ 	ret = -EPERM;
+ 	if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
+@@ -2077,6 +2077,11 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
+ 	uffdio_api.features &= ~UFFD_FEATURE_WP_UNPOPULATED;
+ 	uffdio_api.features &= ~UFFD_FEATURE_WP_ASYNC;
+ #endif
++
++	ret = -EINVAL;
++	if (features & ~uffdio_api.features)
++		goto err_out;
++
+ 	uffdio_api.ioctls = UFFD_API_IOCTLS;
+ 	ret = -EFAULT;
+ 	if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index a4f6f1fecc6f3..f8d89a021abc9 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -1976,8 +1976,9 @@ static inline int subsection_map_index(unsigned long pfn)
+ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
+ {
+ 	int idx = subsection_map_index(pfn);
++	struct mem_section_usage *usage = READ_ONCE(ms->usage);
+ 
+-	return test_bit(idx, READ_ONCE(ms->usage)->subsection_map);
++	return usage ? test_bit(idx, usage->subsection_map) : 0;
+ }
+ #else
+ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
+diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
+index d31e59e2e411a..365a649ef10eb 100644
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -352,11 +352,18 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
+  * a good order (that's 1MB if you're using 4kB pages)
+  */
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+-#define MAX_PAGECACHE_ORDER	HPAGE_PMD_ORDER
++#define PREFERRED_MAX_PAGECACHE_ORDER	HPAGE_PMD_ORDER
+ #else
+-#define MAX_PAGECACHE_ORDER	8
++#define PREFERRED_MAX_PAGECACHE_ORDER	8
+ #endif
+ 
++/*
++ * xas_split_alloc() does not support arbitrary orders. This implies no
++ * 512MB THP on ARM64 with 64KB base page size.
++ */
++#define MAX_XAS_ORDER		(XA_CHUNK_SHIFT * 2 - 1)
++#define MAX_PAGECACHE_ORDER	min(MAX_XAS_ORDER, PREFERRED_MAX_PAGECACHE_ORDER)
++
+ /**
+  * mapping_set_large_folios() - Indicate the file supports large folios.
+  * @mapping: The file.
+diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
+index c459809efee4f..64a4deb18dd00 100644
+--- a/include/linux/spi/spi.h
++++ b/include/linux/spi/spi.h
+@@ -532,6 +532,9 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
+  * @queue_empty: signal green light for opportunistically skipping the queue
+  *	for spi_sync transfers.
+  * @must_async: disable all fast paths in the core
++ * @defer_optimize_message: set to true if controller cannot pre-optimize messages
++ *	and needs to defer the optimization step until the message is actually
++ *	being transferred
+  *
+  * Each SPI controller can communicate with one or more @spi_device
+  * children.  These make a small bus, sharing MOSI, MISO and SCK signals
+@@ -775,6 +778,7 @@ struct spi_controller {
+ 	/* Flag for enabling opportunistic skipping of the queue in spi_sync */
+ 	bool			queue_empty;
+ 	bool			must_async;
++	bool			defer_optimize_message;
+ };
+ 
+ static inline void *spi_controller_get_devdata(struct spi_controller *ctlr)
+diff --git a/include/linux/swap.h b/include/linux/swap.h
+index f53d608daa013..7b5c33e5d8465 100644
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -344,7 +344,8 @@ static inline swp_entry_t page_swap_entry(struct page *page)
+ }
+ 
+ /* linux/mm/workingset.c */
+-bool workingset_test_recent(void *shadow, bool file, bool *workingset);
++bool workingset_test_recent(void *shadow, bool file, bool *workingset,
++				bool flush);
+ void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
+ void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg);
+ void workingset_refault(struct folio *folio, void *shadow);
+diff --git a/include/net/tcx.h b/include/net/tcx.h
+index 04be9377785d7..0a5f40a91c42f 100644
+--- a/include/net/tcx.h
++++ b/include/net/tcx.h
+@@ -13,7 +13,7 @@ struct mini_Qdisc;
+ struct tcx_entry {
+ 	struct mini_Qdisc __rcu *miniq;
+ 	struct bpf_mprog_bundle bundle;
+-	bool miniq_active;
++	u32 miniq_active;
+ 	struct rcu_head rcu;
+ };
+ 
+@@ -124,11 +124,16 @@ static inline void tcx_skeys_dec(bool ingress)
+ 	tcx_dec();
+ }
+ 
+-static inline void tcx_miniq_set_active(struct bpf_mprog_entry *entry,
+-					const bool active)
++static inline void tcx_miniq_inc(struct bpf_mprog_entry *entry)
+ {
+ 	ASSERT_RTNL();
+-	tcx_entry(entry)->miniq_active = active;
++	tcx_entry(entry)->miniq_active++;
++}
++
++static inline void tcx_miniq_dec(struct bpf_mprog_entry *entry)
++{
++	ASSERT_RTNL();
++	tcx_entry(entry)->miniq_active--;
+ }
+ 
+ static inline bool tcx_entry_is_active(struct bpf_mprog_entry *entry)
+diff --git a/include/uapi/misc/fastrpc.h b/include/uapi/misc/fastrpc.h
+index f33d914d8f469..91583690bddc5 100644
+--- a/include/uapi/misc/fastrpc.h
++++ b/include/uapi/misc/fastrpc.h
+@@ -8,11 +8,14 @@
+ #define FASTRPC_IOCTL_ALLOC_DMA_BUFF	_IOWR('R', 1, struct fastrpc_alloc_dma_buf)
+ #define FASTRPC_IOCTL_FREE_DMA_BUFF	_IOWR('R', 2, __u32)
+ #define FASTRPC_IOCTL_INVOKE		_IOWR('R', 3, struct fastrpc_invoke)
++/* This ioctl is only supported with secure device nodes */
+ #define FASTRPC_IOCTL_INIT_ATTACH	_IO('R', 4)
+ #define FASTRPC_IOCTL_INIT_CREATE	_IOWR('R', 5, struct fastrpc_init_create)
+ #define FASTRPC_IOCTL_MMAP		_IOWR('R', 6, struct fastrpc_req_mmap)
+ #define FASTRPC_IOCTL_MUNMAP		_IOWR('R', 7, struct fastrpc_req_munmap)
++/* This ioctl is only supported with secure device nodes */
+ #define FASTRPC_IOCTL_INIT_ATTACH_SNS	_IO('R', 8)
++/* This ioctl is only supported with secure device nodes */
+ #define FASTRPC_IOCTL_INIT_CREATE_STATIC _IOWR('R', 9, struct fastrpc_init_create_static)
+ #define FASTRPC_IOCTL_MEM_MAP		_IOWR('R', 10, struct fastrpc_mem_map)
+ #define FASTRPC_IOCTL_MEM_UNMAP		_IOWR('R', 11, struct fastrpc_mem_unmap)
+diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c
+index bdea1a459153c..bea5873d96d15 100644
+--- a/kernel/bpf/bpf_local_storage.c
++++ b/kernel/bpf/bpf_local_storage.c
+@@ -782,8 +782,8 @@ bpf_local_storage_map_alloc(union bpf_attr *attr,
+ 	nbuckets = max_t(u32, 2, nbuckets);
+ 	smap->bucket_log = ilog2(nbuckets);
+ 
+-	smap->buckets = bpf_map_kvcalloc(&smap->map, sizeof(*smap->buckets),
+-					 nbuckets, GFP_USER | __GFP_NOWARN);
++	smap->buckets = bpf_map_kvcalloc(&smap->map, nbuckets,
++					 sizeof(*smap->buckets), GFP_USER | __GFP_NOWARN);
+ 	if (!smap->buckets) {
+ 		err = -ENOMEM;
+ 		goto free_smap;
+diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
+index 449b9a5d3fe3f..6ad7a61c7617f 100644
+--- a/kernel/bpf/helpers.c
++++ b/kernel/bpf/helpers.c
+@@ -1079,11 +1079,23 @@ const struct bpf_func_proto bpf_snprintf_proto = {
+ 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
+ };
+ 
++struct bpf_async_cb {
++	struct bpf_map *map;
++	struct bpf_prog *prog;
++	void __rcu *callback_fn;
++	void *value;
++	union {
++		struct rcu_head rcu;
++		struct work_struct delete_work;
++	};
++	u64 flags;
++};
++
+ /* BPF map elements can contain 'struct bpf_timer'.
+  * Such map owns all of its BPF timers.
+  * 'struct bpf_timer' is allocated as part of map element allocation
+  * and it's zero initialized.
+- * That space is used to keep 'struct bpf_timer_kern'.
++ * That space is used to keep 'struct bpf_async_kern'.
+  * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and
+  * remembers 'struct bpf_map *' pointer it's part of.
+  * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn.
+@@ -1096,17 +1108,17 @@ const struct bpf_func_proto bpf_snprintf_proto = {
+  * freeing the timers when inner map is replaced or deleted by user space.
+  */
+ struct bpf_hrtimer {
++	struct bpf_async_cb cb;
+ 	struct hrtimer timer;
+-	struct bpf_map *map;
+-	struct bpf_prog *prog;
+-	void __rcu *callback_fn;
+-	void *value;
+-	struct rcu_head rcu;
++	atomic_t cancelling;
+ };
+ 
+ /* the actual struct hidden inside uapi struct bpf_timer */
+-struct bpf_timer_kern {
+-	struct bpf_hrtimer *timer;
++struct bpf_async_kern {
++	union {
++		struct bpf_async_cb *cb;
++		struct bpf_hrtimer *timer;
++	};
+ 	/* bpf_spin_lock is used here instead of spinlock_t to make
+ 	 * sure that it always fits into space reserved by struct bpf_timer
+ 	 * regardless of LOCKDEP and spinlock debug flags.
+@@ -1114,19 +1126,23 @@ struct bpf_timer_kern {
+ 	struct bpf_spin_lock lock;
+ } __attribute__((aligned(8)));
+ 
++enum bpf_async_type {
++	BPF_ASYNC_TYPE_TIMER = 0,
++};
++
+ static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);
+ 
+ static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
+ {
+ 	struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer);
+-	struct bpf_map *map = t->map;
+-	void *value = t->value;
++	struct bpf_map *map = t->cb.map;
++	void *value = t->cb.value;
+ 	bpf_callback_t callback_fn;
+ 	void *key;
+ 	u32 idx;
+ 
+ 	BTF_TYPE_EMIT(struct bpf_timer);
+-	callback_fn = rcu_dereference_check(t->callback_fn, rcu_read_lock_bh_held());
++	callback_fn = rcu_dereference_check(t->cb.callback_fn, rcu_read_lock_bh_held());
+ 	if (!callback_fn)
+ 		goto out;
+ 
+@@ -1155,46 +1171,72 @@ static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
+ 	return HRTIMER_NORESTART;
+ }
+ 
+-BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map,
+-	   u64, flags)
++static void bpf_timer_delete_work(struct work_struct *work)
+ {
+-	clockid_t clockid = flags & (MAX_CLOCKS - 1);
++	struct bpf_hrtimer *t = container_of(work, struct bpf_hrtimer, cb.delete_work);
++
++	/* Cancel the timer and wait for callback to complete if it was running.
++	 * If hrtimer_cancel() can be safely called it's safe to call
++	 * kfree_rcu(t) right after for both preallocated and non-preallocated
++	 * maps.  The async->cb = NULL was already done and no code path can see
++	 * address 't' anymore. Timer if armed for existing bpf_hrtimer before
++	 * bpf_timer_cancel_and_free will have been cancelled.
++	 */
++	hrtimer_cancel(&t->timer);
++	kfree_rcu(t, cb.rcu);
++}
++
++static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags,
++			    enum bpf_async_type type)
++{
++	struct bpf_async_cb *cb;
+ 	struct bpf_hrtimer *t;
++	clockid_t clockid;
++	size_t size;
+ 	int ret = 0;
+ 
+-	BUILD_BUG_ON(MAX_CLOCKS != 16);
+-	BUILD_BUG_ON(sizeof(struct bpf_timer_kern) > sizeof(struct bpf_timer));
+-	BUILD_BUG_ON(__alignof__(struct bpf_timer_kern) != __alignof__(struct bpf_timer));
+-
+ 	if (in_nmi())
+ 		return -EOPNOTSUPP;
+ 
+-	if (flags >= MAX_CLOCKS ||
+-	    /* similar to timerfd except _ALARM variants are not supported */
+-	    (clockid != CLOCK_MONOTONIC &&
+-	     clockid != CLOCK_REALTIME &&
+-	     clockid != CLOCK_BOOTTIME))
++	switch (type) {
++	case BPF_ASYNC_TYPE_TIMER:
++		size = sizeof(struct bpf_hrtimer);
++		break;
++	default:
+ 		return -EINVAL;
+-	__bpf_spin_lock_irqsave(&timer->lock);
+-	t = timer->timer;
++	}
++
++	__bpf_spin_lock_irqsave(&async->lock);
++	t = async->timer;
+ 	if (t) {
+ 		ret = -EBUSY;
+ 		goto out;
+ 	}
++
+ 	/* allocate hrtimer via map_kmalloc to use memcg accounting */
+-	t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node);
+-	if (!t) {
++	cb = bpf_map_kmalloc_node(map, size, GFP_ATOMIC, map->numa_node);
++	if (!cb) {
+ 		ret = -ENOMEM;
+ 		goto out;
+ 	}
+-	t->value = (void *)timer - map->record->timer_off;
+-	t->map = map;
+-	t->prog = NULL;
+-	rcu_assign_pointer(t->callback_fn, NULL);
+-	hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
+-	t->timer.function = bpf_timer_cb;
+-	WRITE_ONCE(timer->timer, t);
+-	/* Guarantee the order between timer->timer and map->usercnt. So
++
++	if (type == BPF_ASYNC_TYPE_TIMER) {
++		clockid = flags & (MAX_CLOCKS - 1);
++		t = (struct bpf_hrtimer *)cb;
++
++		atomic_set(&t->cancelling, 0);
++		INIT_WORK(&t->cb.delete_work, bpf_timer_delete_work);
++		hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
++		t->timer.function = bpf_timer_cb;
++		cb->value = (void *)async - map->record->timer_off;
++	}
++	cb->map = map;
++	cb->prog = NULL;
++	cb->flags = flags;
++	rcu_assign_pointer(cb->callback_fn, NULL);
++
++	WRITE_ONCE(async->cb, cb);
++	/* Guarantee the order between async->cb and map->usercnt. So
+ 	 * when there are concurrent uref release and bpf timer init, either
+ 	 * bpf_timer_cancel_and_free() called by uref release reads a no-NULL
+ 	 * timer or atomic64_read() below returns a zero usercnt.
+@@ -1204,15 +1246,34 @@ BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map
+ 		/* maps with timers must be either held by user space
+ 		 * or pinned in bpffs.
+ 		 */
+-		WRITE_ONCE(timer->timer, NULL);
+-		kfree(t);
++		WRITE_ONCE(async->cb, NULL);
++		kfree(cb);
+ 		ret = -EPERM;
+ 	}
+ out:
+-	__bpf_spin_unlock_irqrestore(&timer->lock);
++	__bpf_spin_unlock_irqrestore(&async->lock);
+ 	return ret;
+ }
+ 
++BPF_CALL_3(bpf_timer_init, struct bpf_async_kern *, timer, struct bpf_map *, map,
++	   u64, flags)
++{
++	clock_t clockid = flags & (MAX_CLOCKS - 1);
++
++	BUILD_BUG_ON(MAX_CLOCKS != 16);
++	BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_timer));
++	BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_timer));
++
++	if (flags >= MAX_CLOCKS ||
++	    /* similar to timerfd except _ALARM variants are not supported */
++	    (clockid != CLOCK_MONOTONIC &&
++	     clockid != CLOCK_REALTIME &&
++	     clockid != CLOCK_BOOTTIME))
++		return -EINVAL;
++
++	return __bpf_async_init(timer, map, flags, BPF_ASYNC_TYPE_TIMER);
++}
++
+ static const struct bpf_func_proto bpf_timer_init_proto = {
+ 	.func		= bpf_timer_init,
+ 	.gpl_only	= true,
+@@ -1222,7 +1283,7 @@ static const struct bpf_func_proto bpf_timer_init_proto = {
+ 	.arg3_type	= ARG_ANYTHING,
+ };
+ 
+-BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callback_fn,
++BPF_CALL_3(bpf_timer_set_callback, struct bpf_async_kern *, timer, void *, callback_fn,
+ 	   struct bpf_prog_aux *, aux)
+ {
+ 	struct bpf_prog *prev, *prog = aux->prog;
+@@ -1237,7 +1298,7 @@ BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callb
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+-	if (!atomic64_read(&t->map->usercnt)) {
++	if (!atomic64_read(&t->cb.map->usercnt)) {
+ 		/* maps with timers must be either held by user space
+ 		 * or pinned in bpffs. Otherwise timer might still be
+ 		 * running even when bpf prog is detached and user space
+@@ -1246,7 +1307,7 @@ BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callb
+ 		ret = -EPERM;
+ 		goto out;
+ 	}
+-	prev = t->prog;
++	prev = t->cb.prog;
+ 	if (prev != prog) {
+ 		/* Bump prog refcnt once. Every bpf_timer_set_callback()
+ 		 * can pick different callback_fn-s within the same prog.
+@@ -1259,9 +1320,9 @@ BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callb
+ 		if (prev)
+ 			/* Drop prev prog refcnt when swapping with new prog */
+ 			bpf_prog_put(prev);
+-		t->prog = prog;
++		t->cb.prog = prog;
+ 	}
+-	rcu_assign_pointer(t->callback_fn, callback_fn);
++	rcu_assign_pointer(t->cb.callback_fn, callback_fn);
+ out:
+ 	__bpf_spin_unlock_irqrestore(&timer->lock);
+ 	return ret;
+@@ -1275,7 +1336,7 @@ static const struct bpf_func_proto bpf_timer_set_callback_proto = {
+ 	.arg2_type	= ARG_PTR_TO_FUNC,
+ };
+ 
+-BPF_CALL_3(bpf_timer_start, struct bpf_timer_kern *, timer, u64, nsecs, u64, flags)
++BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, timer, u64, nsecs, u64, flags)
+ {
+ 	struct bpf_hrtimer *t;
+ 	int ret = 0;
+@@ -1287,7 +1348,7 @@ BPF_CALL_3(bpf_timer_start, struct bpf_timer_kern *, timer, u64, nsecs, u64, fla
+ 		return -EINVAL;
+ 	__bpf_spin_lock_irqsave(&timer->lock);
+ 	t = timer->timer;
+-	if (!t || !t->prog) {
++	if (!t || !t->cb.prog) {
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+@@ -1315,20 +1376,21 @@ static const struct bpf_func_proto bpf_timer_start_proto = {
+ 	.arg3_type	= ARG_ANYTHING,
+ };
+ 
+-static void drop_prog_refcnt(struct bpf_hrtimer *t)
++static void drop_prog_refcnt(struct bpf_async_cb *async)
+ {
+-	struct bpf_prog *prog = t->prog;
++	struct bpf_prog *prog = async->prog;
+ 
+ 	if (prog) {
+ 		bpf_prog_put(prog);
+-		t->prog = NULL;
+-		rcu_assign_pointer(t->callback_fn, NULL);
++		async->prog = NULL;
++		rcu_assign_pointer(async->callback_fn, NULL);
+ 	}
+ }
+ 
+-BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer)
++BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer)
+ {
+-	struct bpf_hrtimer *t;
++	struct bpf_hrtimer *t, *cur_t;
++	bool inc = false;
+ 	int ret = 0;
+ 
+ 	if (in_nmi())
+@@ -1340,21 +1402,50 @@ BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer)
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+-	if (this_cpu_read(hrtimer_running) == t) {
++
++	cur_t = this_cpu_read(hrtimer_running);
++	if (cur_t == t) {
+ 		/* If bpf callback_fn is trying to bpf_timer_cancel()
+ 		 * its own timer the hrtimer_cancel() will deadlock
+-		 * since it waits for callback_fn to finish
++		 * since it waits for callback_fn to finish.
++		 */
++		ret = -EDEADLK;
++		goto out;
++	}
++
++	/* Only account in-flight cancellations when invoked from a timer
++	 * callback, since we want to avoid waiting only if other _callbacks_
++	 * are waiting on us, to avoid introducing lockups. Non-callback paths
++	 * are ok, since nobody would synchronously wait for their completion.
++	 */
++	if (!cur_t)
++		goto drop;
++	atomic_inc(&t->cancelling);
++	/* Need full barrier after relaxed atomic_inc */
++	smp_mb__after_atomic();
++	inc = true;
++	if (atomic_read(&cur_t->cancelling)) {
++		/* We're cancelling timer t, while some other timer callback is
++		 * attempting to cancel us. In such a case, it might be possible
++		 * that timer t belongs to the other callback, or some other
++		 * callback waiting upon it (creating transitive dependencies
++		 * upon us), and we will enter a deadlock if we continue
++		 * cancelling and waiting for it synchronously, since it might
++		 * do the same. Bail!
+ 		 */
+ 		ret = -EDEADLK;
+ 		goto out;
+ 	}
+-	drop_prog_refcnt(t);
++drop:
++	drop_prog_refcnt(&t->cb);
+ out:
+ 	__bpf_spin_unlock_irqrestore(&timer->lock);
+ 	/* Cancel the timer and wait for associated callback to finish
+ 	 * if it was running.
+ 	 */
+ 	ret = ret ?: hrtimer_cancel(&t->timer);
++	if (inc)
++		atomic_dec(&t->cancelling);
+ 	rcu_read_unlock();
+ 	return ret;
+ }
+@@ -1371,7 +1462,7 @@ static const struct bpf_func_proto bpf_timer_cancel_proto = {
+  */
+ void bpf_timer_cancel_and_free(void *val)
+ {
+-	struct bpf_timer_kern *timer = val;
++	struct bpf_async_kern *timer = val;
+ 	struct bpf_hrtimer *t;
+ 
+ 	/* Performance optimization: read timer->timer without lock first. */
+@@ -1383,7 +1474,7 @@ void bpf_timer_cancel_and_free(void *val)
+ 	t = timer->timer;
+ 	if (!t)
+ 		goto out;
+-	drop_prog_refcnt(t);
++	drop_prog_refcnt(&t->cb);
+ 	/* The subsequent bpf_timer_start/cancel() helpers won't be able to use
+ 	 * this timer, since it won't be initialized.
+ 	 */
+@@ -1392,25 +1483,39 @@ void bpf_timer_cancel_and_free(void *val)
+ 	__bpf_spin_unlock_irqrestore(&timer->lock);
+ 	if (!t)
+ 		return;
+-	/* Cancel the timer and wait for callback to complete if it was running.
+-	 * If hrtimer_cancel() can be safely called it's safe to call kfree(t)
+-	 * right after for both preallocated and non-preallocated maps.
+-	 * The timer->timer = NULL was already done and no code path can
+-	 * see address 't' anymore.
+-	 *
+-	 * Check that bpf_map_delete/update_elem() wasn't called from timer
+-	 * callback_fn. In such case don't call hrtimer_cancel() (since it will
+-	 * deadlock) and don't call hrtimer_try_to_cancel() (since it will just
+-	 * return -1). Though callback_fn is still running on this cpu it's
++	/* We check that bpf_map_delete/update_elem() was called from timer
++	 * callback_fn. In such case we don't call hrtimer_cancel() (since it
++	 * will deadlock) and don't call hrtimer_try_to_cancel() (since it will
++	 * just return -1). Though callback_fn is still running on this cpu it's
+ 	 * safe to do kfree(t) because bpf_timer_cb() read everything it needed
+ 	 * from 't'. The bpf subprog callback_fn won't be able to access 't',
+ 	 * since timer->timer = NULL was already done. The timer will be
+ 	 * effectively cancelled because bpf_timer_cb() will return
+ 	 * HRTIMER_NORESTART.
++	 *
++	 * However, it is possible the timer callback_fn calling us armed the
++	 * timer _before_ calling us, such that failing to cancel it here will
++	 * cause it to possibly use struct hrtimer after freeing bpf_hrtimer.
++	 * Therefore, we _need_ to cancel any outstanding timers before we do
++	 * kfree_rcu, even though no more timers can be armed.
++	 *
++	 * Moreover, we need to schedule work even if timer does not belong to
++	 * the calling callback_fn, as on two different CPUs, we can end up in a
++	 * situation where both sides run in parallel, try to cancel one
++	 * another, and we end up waiting on both sides in hrtimer_cancel
++	 * without making forward progress, since timer1 depends on time2
++	 * callback to finish, and vice versa.
++	 *
++	 *  CPU 1 (timer1_cb)			CPU 2 (timer2_cb)
++	 *  bpf_timer_cancel_and_free(timer2)	bpf_timer_cancel_and_free(timer1)
++	 *
++	 * To avoid these issues, punt to workqueue context when we are in a
++	 * timer callback.
+ 	 */
+-	if (this_cpu_read(hrtimer_running) != t)
+-		hrtimer_cancel(&t->timer);
+-	kfree_rcu(t, rcu);
++	if (this_cpu_read(hrtimer_running))
++		queue_work(system_unbound_wq, &t->cb.delete_work);
++	else
++		bpf_timer_delete_work(&t->cb.delete_work);
+ }
+ 
+ BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr)
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index a04a436af8cc4..dce51bf2d3229 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -1805,8 +1805,13 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
+ 			 * The replenish timer needs to be canceled. No
+ 			 * problem if it fires concurrently: boosted threads
+ 			 * are ignored in dl_task_timer().
++			 *
++			 * If the timer callback was running (hrtimer_try_to_cancel == -1),
++			 * it will eventually call put_task_struct().
+ 			 */
+-			hrtimer_try_to_cancel(&p->dl.dl_timer);
++			if (hrtimer_try_to_cancel(&p->dl.dl_timer) == 1 &&
++			    !dl_server(&p->dl))
++				put_task_struct(p);
+ 			p->dl.dl_throttled = 0;
+ 		}
+ 	} else if (!dl_prio(p->normal_prio)) {
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 213c94d027a4c..98d03b34a8175 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -9106,12 +9106,8 @@ static int detach_tasks(struct lb_env *env)
+ 			break;
+ 
+ 		env->loop++;
+-		/*
+-		 * We've more or less seen every task there is, call it quits
+-		 * unless we haven't found any movable task yet.
+-		 */
+-		if (env->loop > env->loop_max &&
+-		    !(env->flags & LBF_ALL_PINNED))
++		/* We've more or less seen every task there is, call it quits */
++		if (env->loop > env->loop_max)
+ 			break;
+ 
+ 		/* take a breather every nr_migrate tasks */
+@@ -11363,9 +11359,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
+ 
+ 		if (env.flags & LBF_NEED_BREAK) {
+ 			env.flags &= ~LBF_NEED_BREAK;
+-			/* Stop if we tried all running tasks */
+-			if (env.loop < busiest->nr_running)
+-				goto more_balance;
++			goto more_balance;
+ 		}
+ 
+ 		/*
+diff --git a/mm/damon/core.c b/mm/damon/core.c
+index 6d503c1c125ef..d6f7e14abd6d2 100644
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -1357,14 +1357,31 @@ static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
+  * access frequencies are similar.  This is for minimizing the monitoring
+  * overhead under the dynamically changeable access pattern.  If a merge was
+  * unnecessarily made, later 'kdamond_split_regions()' will revert it.
++ *
++ * The total number of regions could be higher than the user-defined limit,
++ * max_nr_regions for some cases.  For example, the user can update
++ * max_nr_regions to a number that lower than the current number of regions
++ * while DAMON is running.  For such a case, repeat merging until the limit is
++ * met while increasing @threshold up to possible maximum level.
+  */
+ static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
+ 				  unsigned long sz_limit)
+ {
+ 	struct damon_target *t;
+-
+-	damon_for_each_target(t, c)
+-		damon_merge_regions_of(t, threshold, sz_limit);
++	unsigned int nr_regions;
++	unsigned int max_thres;
++
++	max_thres = c->attrs.aggr_interval /
++		(c->attrs.sample_interval ?  c->attrs.sample_interval : 1);
++	do {
++		nr_regions = 0;
++		damon_for_each_target(t, c) {
++			damon_merge_regions_of(t, threshold, sz_limit);
++			nr_regions += damon_nr_regions(t);
++		}
++		threshold = max(1, threshold * 2);
++	} while (nr_regions > c->attrs.max_nr_regions &&
++			threshold / 2 < max_thres);
+ }
+ 
+ /*
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 30de18c4fd28a..41bf94f7dbd1b 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -3100,7 +3100,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
+ 
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ 	/* Use the readahead code, even if readahead is disabled */
+-	if (vm_flags & VM_HUGEPAGE) {
++	if ((vm_flags & VM_HUGEPAGE) && HPAGE_PMD_ORDER <= MAX_PAGECACHE_ORDER) {
+ 		fpin = maybe_unlock_mmap_for_io(vmf, fpin);
+ 		ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1);
+ 		ra->size = HPAGE_PMD_NR;
+@@ -3207,7 +3207,8 @@ static vm_fault_t filemap_fault_recheck_pte_none(struct vm_fault *vmf)
+ 	if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID))
+ 		return 0;
+ 
+-	ptep = pte_offset_map(vmf->pmd, vmf->address);
++	ptep = pte_offset_map_nolock(vma->vm_mm, vmf->pmd, vmf->address,
++				     &vmf->ptl);
+ 	if (unlikely(!ptep))
+ 		return VM_FAULT_NOPAGE;
+ 
+@@ -4153,6 +4154,9 @@ static void filemap_cachestat(struct address_space *mapping,
+ 	XA_STATE(xas, &mapping->i_pages, first_index);
+ 	struct folio *folio;
+ 
++	/* Flush stats (and potentially sleep) outside the RCU read section. */
++	mem_cgroup_flush_stats_ratelimited(NULL);
++
+ 	rcu_read_lock();
+ 	xas_for_each(&xas, folio, last_index) {
+ 		int order;
+@@ -4216,7 +4220,7 @@ static void filemap_cachestat(struct address_space *mapping,
+ 					goto resched;
+ 			}
+ #endif
+-			if (workingset_test_recent(shadow, true, &workingset))
++			if (workingset_test_recent(shadow, true, &workingset, false))
+ 				cs->nr_recently_evicted += nr_pages;
+ 
+ 			goto resched;
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 612558f306f4a..d960151da50c8 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -7609,17 +7609,6 @@ void mem_cgroup_migrate(struct folio *old, struct folio *new)
+ 
+ 	/* Transfer the charge and the css ref */
+ 	commit_charge(new, memcg);
+-	/*
+-	 * If the old folio is a large folio and is in the split queue, it needs
+-	 * to be removed from the split queue now, in case getting an incorrect
+-	 * split queue in destroy_large_folio() after the memcg of the old folio
+-	 * is cleared.
+-	 *
+-	 * In addition, the old folio is about to be freed after migration, so
+-	 * removing from the split queue a bit earlier seems reasonable.
+-	 */
+-	if (folio_test_large(old) && folio_test_large_rmappable(old))
+-		folio_undo_large_rmappable(old);
+ 	old->memcg_data = 0;
+ }
+ 
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 73a052a382f13..8f99fcea99e44 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -415,6 +415,15 @@ int folio_migrate_mapping(struct address_space *mapping,
+ 		if (folio_ref_count(folio) != expected_count)
+ 			return -EAGAIN;
+ 
++		/* Take off deferred split queue while frozen and memcg set */
++		if (folio_test_large(folio) &&
++		    folio_test_large_rmappable(folio)) {
++			if (!folio_ref_freeze(folio, expected_count))
++				return -EAGAIN;
++			folio_undo_large_rmappable(folio);
++			folio_ref_unfreeze(folio, expected_count);
++		}
++
+ 		/* No turning back from here */
+ 		newfolio->index = folio->index;
+ 		newfolio->mapping = folio->mapping;
+@@ -433,6 +442,10 @@ int folio_migrate_mapping(struct address_space *mapping,
+ 		return -EAGAIN;
+ 	}
+ 
++	/* Take off deferred split queue while frozen and memcg set */
++	if (folio_test_large(folio) && folio_test_large_rmappable(folio))
++		folio_undo_large_rmappable(folio);
++
+ 	/*
+ 	 * Now we know that no one else is looking at the folio:
+ 	 * no turning back from here.
+diff --git a/mm/readahead.c b/mm/readahead.c
+index d55138e9560b5..e5d0a56218de2 100644
+--- a/mm/readahead.c
++++ b/mm/readahead.c
+@@ -499,11 +499,11 @@ void page_cache_ra_order(struct readahead_control *ractl,
+ 
+ 	limit = min(limit, index + ra->size - 1);
+ 
+-	if (new_order < MAX_PAGECACHE_ORDER) {
++	if (new_order < MAX_PAGECACHE_ORDER)
+ 		new_order += 2;
+-		new_order = min_t(unsigned int, MAX_PAGECACHE_ORDER, new_order);
+-		new_order = min_t(unsigned int, new_order, ilog2(ra->size));
+-	}
++
++	new_order = min_t(unsigned int, MAX_PAGECACHE_ORDER, new_order);
++	new_order = min_t(unsigned int, new_order, ilog2(ra->size));
+ 
+ 	/* See comment in page_cache_ra_unbounded() */
+ 	nofs = memalloc_nofs_save();
+diff --git a/mm/shmem.c b/mm/shmem.c
+index b66e99e2c70f3..3248432246906 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -541,8 +541,9 @@ static bool shmem_confirm_swap(struct address_space *mapping,
+ 
+ static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
+ 
+-bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
+-		   struct mm_struct *mm, unsigned long vm_flags)
++static bool __shmem_is_huge(struct inode *inode, pgoff_t index,
++			    bool shmem_huge_force, struct mm_struct *mm,
++			    unsigned long vm_flags)
+ {
+ 	loff_t i_size;
+ 
+@@ -573,6 +574,16 @@ bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
+ 	}
+ }
+ 
++bool shmem_is_huge(struct inode *inode, pgoff_t index,
++		   bool shmem_huge_force, struct mm_struct *mm,
++		   unsigned long vm_flags)
++{
++	if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
++		return false;
++
++	return __shmem_is_huge(inode, index, shmem_huge_force, mm, vm_flags);
++}
++
+ #if defined(CONFIG_SYSFS)
+ static int shmem_parse_huge(const char *str)
+ {
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 2cd015e976102..03c78fae06f3b 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -2519,7 +2519,15 @@ static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
+ static struct xarray *
+ addr_to_vb_xa(unsigned long addr)
+ {
+-	int index = (addr / VMAP_BLOCK_SIZE) % num_possible_cpus();
++	int index = (addr / VMAP_BLOCK_SIZE) % nr_cpu_ids;
++
++	/*
++	 * Please note, nr_cpu_ids points on a highest set
++	 * possible bit, i.e. we never invoke cpumask_next()
++	 * if an index points on it which is nr_cpu_ids - 1.
++	 */
++	if (!cpu_possible(index))
++		index = cpumask_next(index, cpu_possible_mask);
+ 
+ 	return &per_cpu(vmap_block_queue, index).vmap_blocks;
+ }
+diff --git a/mm/workingset.c b/mm/workingset.c
+index f2a0ecaf708d7..8a044921ed7a2 100644
+--- a/mm/workingset.c
++++ b/mm/workingset.c
+@@ -412,10 +412,12 @@ void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg)
+  * @file: whether the corresponding folio is from the file lru.
+  * @workingset: where the workingset value unpacked from shadow should
+  * be stored.
++ * @flush: whether to flush cgroup rstat.
+  *
+  * Return: true if the shadow is for a recently evicted folio; false otherwise.
+  */
+-bool workingset_test_recent(void *shadow, bool file, bool *workingset)
++bool workingset_test_recent(void *shadow, bool file, bool *workingset,
++				bool flush)
+ {
+ 	struct mem_cgroup *eviction_memcg;
+ 	struct lruvec *eviction_lruvec;
+@@ -467,10 +469,16 @@ bool workingset_test_recent(void *shadow, bool file, bool *workingset)
+ 
+ 	/*
+ 	 * Flush stats (and potentially sleep) outside the RCU read section.
++	 *
++	 * Note that workingset_test_recent() itself might be called in RCU read
++	 * section (for e.g, in cachestat) - these callers need to skip flushing
++	 * stats (via the flush argument).
++	 *
+ 	 * XXX: With per-memcg flushing and thresholding, is ratelimiting
+ 	 * still needed here?
+ 	 */
+-	mem_cgroup_flush_stats_ratelimited(eviction_memcg);
++	if (flush)
++		mem_cgroup_flush_stats_ratelimited(eviction_memcg);
+ 
+ 	eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat);
+ 	refault = atomic_long_read(&eviction_lruvec->nonresident_age);
+@@ -558,7 +566,7 @@ void workingset_refault(struct folio *folio, void *shadow)
+ 
+ 	mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr);
+ 
+-	if (!workingset_test_recent(shadow, file, &workingset))
++	if (!workingset_test_recent(shadow, file, &workingset, true))
+ 		return;
+ 
+ 	folio_set_active(folio);
+diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
+index f263f7e91a219..ab66b599ac479 100644
+--- a/net/ceph/mon_client.c
++++ b/net/ceph/mon_client.c
+@@ -1085,13 +1085,19 @@ static void delayed_work(struct work_struct *work)
+ 	struct ceph_mon_client *monc =
+ 		container_of(work, struct ceph_mon_client, delayed_work.work);
+ 
+-	dout("monc delayed_work\n");
+ 	mutex_lock(&monc->mutex);
++	dout("%s mon%d\n", __func__, monc->cur_mon);
++	if (monc->cur_mon < 0) {
++		goto out;
++	}
++
+ 	if (monc->hunting) {
+ 		dout("%s continuing hunt\n", __func__);
+ 		reopen_session(monc);
+ 	} else {
+ 		int is_auth = ceph_auth_is_authenticated(monc->auth);
++
++		dout("%s is_authed %d\n", __func__, is_auth);
+ 		if (ceph_con_keepalive_expired(&monc->con,
+ 					       CEPH_MONC_PING_TIMEOUT)) {
+ 			dout("monc keepalive timeout\n");
+@@ -1116,6 +1122,8 @@ static void delayed_work(struct work_struct *work)
+ 		}
+ 	}
+ 	__schedule_delayed(monc);
++
++out:
+ 	mutex_unlock(&monc->mutex);
+ }
+ 
+@@ -1232,13 +1240,15 @@ EXPORT_SYMBOL(ceph_monc_init);
+ void ceph_monc_stop(struct ceph_mon_client *monc)
+ {
+ 	dout("stop\n");
+-	cancel_delayed_work_sync(&monc->delayed_work);
+ 
+ 	mutex_lock(&monc->mutex);
+ 	__close_session(monc);
++	monc->hunting = false;
+ 	monc->cur_mon = -1;
+ 	mutex_unlock(&monc->mutex);
+ 
++	cancel_delayed_work_sync(&monc->delayed_work);
++
+ 	/*
+ 	 * flush msgr queue before we destroy ourselves to ensure that:
+ 	 *  - any work that references our embedded con is finished.
+diff --git a/net/core/datagram.c b/net/core/datagram.c
+index cb72923acc21c..99abfafb0b439 100644
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -442,11 +442,12 @@ static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
+ 			if (copy > len)
+ 				copy = len;
+ 
++			n = 0;
+ 			skb_frag_foreach_page(frag,
+ 					      skb_frag_off(frag) + offset - start,
+ 					      copy, p, p_off, p_len, copied) {
+ 				vaddr = kmap_local_page(p);
+-				n = INDIRECT_CALL_1(cb, simple_copy_to_iter,
++				n += INDIRECT_CALL_1(cb, simple_copy_to_iter,
+ 					vaddr + p_off, p_len, data, to);
+ 				kunmap_local(vaddr);
+ 			}
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index fd20aae30be23..bbf40b9997138 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -434,7 +434,8 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
+ 			page = sg_page(sge);
+ 			if (copied + copy > len)
+ 				copy = len - copied;
+-			copy = copy_page_to_iter(page, sge->offset, copy, iter);
++			if (copy)
++				copy = copy_page_to_iter(page, sge->offset, copy, iter);
+ 			if (!copy) {
+ 				copied = copied ? copied : -EFAULT;
+ 				goto out;
+diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
+index e645d751a5e89..223dcd25d88a2 100644
+--- a/net/ethtool/ioctl.c
++++ b/net/ethtool/ioctl.c
+@@ -1306,7 +1306,8 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
+ 	if (rxfh.input_xfrm && rxfh.input_xfrm != RXH_XFRM_SYM_XOR &&
+ 	    rxfh.input_xfrm != RXH_XFRM_NO_CHANGE)
+ 		return -EINVAL;
+-	if ((rxfh.input_xfrm & RXH_XFRM_SYM_XOR) &&
++	if (rxfh.input_xfrm != RXH_XFRM_NO_CHANGE &&
++	    (rxfh.input_xfrm & RXH_XFRM_SYM_XOR) &&
+ 	    !ops->cap_rss_sym_xor_supported)
+ 		return -EOPNOTSUPP;
+ 
+diff --git a/net/ethtool/linkstate.c b/net/ethtool/linkstate.c
+index b2de2108b356a..34d76e87847d0 100644
+--- a/net/ethtool/linkstate.c
++++ b/net/ethtool/linkstate.c
+@@ -37,6 +37,8 @@ static int linkstate_get_sqi(struct net_device *dev)
+ 	mutex_lock(&phydev->lock);
+ 	if (!phydev->drv || !phydev->drv->get_sqi)
+ 		ret = -EOPNOTSUPP;
++	else if (!phydev->link)
++		ret = -ENETDOWN;
+ 	else
+ 		ret = phydev->drv->get_sqi(phydev);
+ 	mutex_unlock(&phydev->lock);
+@@ -55,6 +57,8 @@ static int linkstate_get_sqi_max(struct net_device *dev)
+ 	mutex_lock(&phydev->lock);
+ 	if (!phydev->drv || !phydev->drv->get_sqi_max)
+ 		ret = -EOPNOTSUPP;
++	else if (!phydev->link)
++		ret = -ENETDOWN;
+ 	else
+ 		ret = phydev->drv->get_sqi_max(phydev);
+ 	mutex_unlock(&phydev->lock);
+@@ -62,6 +66,17 @@ static int linkstate_get_sqi_max(struct net_device *dev)
+ 	return ret;
+ };
+ 
++static bool linkstate_sqi_critical_error(int sqi)
++{
++	return sqi < 0 && sqi != -EOPNOTSUPP && sqi != -ENETDOWN;
++}
++
++static bool linkstate_sqi_valid(struct linkstate_reply_data *data)
++{
++	return data->sqi >= 0 && data->sqi_max >= 0 &&
++	       data->sqi <= data->sqi_max;
++}
++
+ static int linkstate_get_link_ext_state(struct net_device *dev,
+ 					struct linkstate_reply_data *data)
+ {
+@@ -93,12 +108,12 @@ static int linkstate_prepare_data(const struct ethnl_req_info *req_base,
+ 	data->link = __ethtool_get_link(dev);
+ 
+ 	ret = linkstate_get_sqi(dev);
+-	if (ret < 0 && ret != -EOPNOTSUPP)
++	if (linkstate_sqi_critical_error(ret))
+ 		goto out;
+ 	data->sqi = ret;
+ 
+ 	ret = linkstate_get_sqi_max(dev);
+-	if (ret < 0 && ret != -EOPNOTSUPP)
++	if (linkstate_sqi_critical_error(ret))
+ 		goto out;
+ 	data->sqi_max = ret;
+ 
+@@ -136,11 +151,10 @@ static int linkstate_reply_size(const struct ethnl_req_info *req_base,
+ 	len = nla_total_size(sizeof(u8)) /* LINKSTATE_LINK */
+ 		+ 0;
+ 
+-	if (data->sqi != -EOPNOTSUPP)
+-		len += nla_total_size(sizeof(u32));
+-
+-	if (data->sqi_max != -EOPNOTSUPP)
+-		len += nla_total_size(sizeof(u32));
++	if (linkstate_sqi_valid(data)) {
++		len += nla_total_size(sizeof(u32)); /* LINKSTATE_SQI */
++		len += nla_total_size(sizeof(u32)); /* LINKSTATE_SQI_MAX */
++	}
+ 
+ 	if (data->link_ext_state_provided)
+ 		len += nla_total_size(sizeof(u8)); /* LINKSTATE_EXT_STATE */
+@@ -164,13 +178,14 @@ static int linkstate_fill_reply(struct sk_buff *skb,
+ 	    nla_put_u8(skb, ETHTOOL_A_LINKSTATE_LINK, !!data->link))
+ 		return -EMSGSIZE;
+ 
+-	if (data->sqi != -EOPNOTSUPP &&
+-	    nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI, data->sqi))
+-		return -EMSGSIZE;
++	if (linkstate_sqi_valid(data)) {
++		if (nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI, data->sqi))
++			return -EMSGSIZE;
+ 
+-	if (data->sqi_max != -EOPNOTSUPP &&
+-	    nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI_MAX, data->sqi_max))
+-		return -EMSGSIZE;
++		if (nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI_MAX,
++				data->sqi_max))
++			return -EMSGSIZE;
++	}
+ 
+ 	if (data->link_ext_state_provided) {
+ 		if (nla_put_u8(skb, ETHTOOL_A_LINKSTATE_EXT_STATE,
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 7b692bcb61d4a..c765d479869dc 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -2126,8 +2126,16 @@ void tcp_clear_retrans(struct tcp_sock *tp)
+ static inline void tcp_init_undo(struct tcp_sock *tp)
+ {
+ 	tp->undo_marker = tp->snd_una;
++
+ 	/* Retransmission still in flight may cause DSACKs later. */
+-	tp->undo_retrans = tp->retrans_out ? : -1;
++	/* First, account for regular retransmits in flight: */
++	tp->undo_retrans = tp->retrans_out;
++	/* Next, account for TLP retransmits in flight: */
++	if (tp->tlp_high_seq && tp->tlp_retrans)
++		tp->undo_retrans++;
++	/* Finally, avoid 0, because undo_retrans==0 means "can undo now": */
++	if (!tp->undo_retrans)
++		tp->undo_retrans = -1;
+ }
+ 
+ static bool tcp_is_rack(const struct sock *sk)
+@@ -2206,6 +2214,7 @@ void tcp_enter_loss(struct sock *sk)
+ 
+ 	tcp_set_ca_state(sk, TCP_CA_Loss);
+ 	tp->high_seq = tp->snd_nxt;
++	tp->tlp_high_seq = 0;
+ 	tcp_ecn_queue_cwr(tp);
+ 
+ 	/* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index f96f68cf7961c..cceb4fabd4c85 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -481,15 +481,26 @@ static bool tcp_rtx_probe0_timed_out(const struct sock *sk,
+ 				     const struct sk_buff *skb,
+ 				     u32 rtx_delta)
+ {
++	const struct inet_connection_sock *icsk = inet_csk(sk);
++	u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
+ 	const struct tcp_sock *tp = tcp_sk(sk);
+-	const int timeout = TCP_RTO_MAX * 2;
++	int timeout = TCP_RTO_MAX * 2;
+ 	s32 rcv_delta;
+ 
++	if (user_timeout) {
++		/* If user application specified a TCP_USER_TIMEOUT,
++		 * it does not want win 0 packets to 'reset the timer'
++		 * while retransmits are not making progress.
++		 */
++		if (rtx_delta > user_timeout)
++			return true;
++		timeout = min_t(u32, timeout, msecs_to_jiffies(user_timeout));
++	}
+ 	/* Note: timer interrupt might have been delayed by at least one jiffy,
+ 	 * and tp->rcv_tstamp might very well have been written recently.
+ 	 * rcv_delta can thus be negative.
+ 	 */
+-	rcv_delta = inet_csk(sk)->icsk_timeout - tp->rcv_tstamp;
++	rcv_delta = icsk->icsk_timeout - tp->rcv_tstamp;
+ 	if (rcv_delta <= timeout)
+ 		return false;
+ 
+@@ -534,8 +545,6 @@ void tcp_retransmit_timer(struct sock *sk)
+ 	if (WARN_ON_ONCE(!skb))
+ 		return;
+ 
+-	tp->tlp_high_seq = 0;
+-
+ 	if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
+ 	    !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
+ 		/* Receiver dastardly shrinks window. Our retransmits
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 72d3bf136810d..fb71bf3b12b47 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -326,6 +326,8 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
+ 			goto fail_unlock;
+ 		}
+ 
++		sock_set_flag(sk, SOCK_RCU_FREE);
++
+ 		sk_add_node_rcu(sk, &hslot->head);
+ 		hslot->count++;
+ 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+@@ -342,7 +344,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
+ 		hslot2->count++;
+ 		spin_unlock(&hslot2->lock);
+ 	}
+-	sock_set_flag(sk, SOCK_RCU_FREE);
++
+ 	error = 0;
+ fail_unlock:
+ 	spin_unlock_bh(&hslot->lock);
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 0f77ba3306c23..d129b826924eb 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -3823,6 +3823,15 @@ static void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *r
+ 	nf_tables_rule_destroy(ctx, rule);
+ }
+ 
++/** nft_chain_validate - loop detection and hook validation
++ *
++ * @ctx: context containing call depth and base chain
++ * @chain: chain to validate
++ *
++ * Walk through the rules of the given chain and chase all jumps/gotos
++ * and set lookups until either the jump limit is hit or all reachable
++ * chains have been validated.
++ */
+ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain)
+ {
+ 	struct nft_expr *expr, *last;
+@@ -3844,6 +3853,9 @@ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain)
+ 			if (!expr->ops->validate)
+ 				continue;
+ 
++			/* This may call nft_chain_validate() recursively,
++			 * callers that do so must increment ctx->level.
++			 */
+ 			err = expr->ops->validate(ctx, expr, &data);
+ 			if (err < 0)
+ 				return err;
+@@ -10805,150 +10817,6 @@ int nft_chain_validate_hooks(const struct nft_chain *chain,
+ }
+ EXPORT_SYMBOL_GPL(nft_chain_validate_hooks);
+ 
+-/*
+- * Loop detection - walk through the ruleset beginning at the destination chain
+- * of a new jump until either the source chain is reached (loop) or all
+- * reachable chains have been traversed.
+- *
+- * The loop check is performed whenever a new jump verdict is added to an
+- * expression or verdict map or a verdict map is bound to a new chain.
+- */
+-
+-static int nf_tables_check_loops(const struct nft_ctx *ctx,
+-				 const struct nft_chain *chain);
+-
+-static int nft_check_loops(const struct nft_ctx *ctx,
+-			   const struct nft_set_ext *ext)
+-{
+-	const struct nft_data *data;
+-	int ret;
+-
+-	data = nft_set_ext_data(ext);
+-	switch (data->verdict.code) {
+-	case NFT_JUMP:
+-	case NFT_GOTO:
+-		ret = nf_tables_check_loops(ctx, data->verdict.chain);
+-		break;
+-	default:
+-		ret = 0;
+-		break;
+-	}
+-
+-	return ret;
+-}
+-
+-static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx,
+-					struct nft_set *set,
+-					const struct nft_set_iter *iter,
+-					struct nft_elem_priv *elem_priv)
+-{
+-	const struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
+-
+-	if (!nft_set_elem_active(ext, iter->genmask))
+-		return 0;
+-
+-	if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
+-	    *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
+-		return 0;
+-
+-	return nft_check_loops(ctx, ext);
+-}
+-
+-static int nft_set_catchall_loops(const struct nft_ctx *ctx,
+-				  struct nft_set *set)
+-{
+-	u8 genmask = nft_genmask_next(ctx->net);
+-	struct nft_set_elem_catchall *catchall;
+-	struct nft_set_ext *ext;
+-	int ret = 0;
+-
+-	list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
+-		ext = nft_set_elem_ext(set, catchall->elem);
+-		if (!nft_set_elem_active(ext, genmask))
+-			continue;
+-
+-		ret = nft_check_loops(ctx, ext);
+-		if (ret < 0)
+-			return ret;
+-	}
+-
+-	return ret;
+-}
+-
+-static int nf_tables_check_loops(const struct nft_ctx *ctx,
+-				 const struct nft_chain *chain)
+-{
+-	const struct nft_rule *rule;
+-	const struct nft_expr *expr, *last;
+-	struct nft_set *set;
+-	struct nft_set_binding *binding;
+-	struct nft_set_iter iter;
+-
+-	if (ctx->chain == chain)
+-		return -ELOOP;
+-
+-	if (fatal_signal_pending(current))
+-		return -EINTR;
+-
+-	list_for_each_entry(rule, &chain->rules, list) {
+-		nft_rule_for_each_expr(expr, last, rule) {
+-			struct nft_immediate_expr *priv;
+-			const struct nft_data *data;
+-			int err;
+-
+-			if (strcmp(expr->ops->type->name, "immediate"))
+-				continue;
+-
+-			priv = nft_expr_priv(expr);
+-			if (priv->dreg != NFT_REG_VERDICT)
+-				continue;
+-
+-			data = &priv->data;
+-			switch (data->verdict.code) {
+-			case NFT_JUMP:
+-			case NFT_GOTO:
+-				err = nf_tables_check_loops(ctx,
+-							data->verdict.chain);
+-				if (err < 0)
+-					return err;
+-				break;
+-			default:
+-				break;
+-			}
+-		}
+-	}
+-
+-	list_for_each_entry(set, &ctx->table->sets, list) {
+-		if (!nft_is_active_next(ctx->net, set))
+-			continue;
+-		if (!(set->flags & NFT_SET_MAP) ||
+-		    set->dtype != NFT_DATA_VERDICT)
+-			continue;
+-
+-		list_for_each_entry(binding, &set->bindings, list) {
+-			if (!(binding->flags & NFT_SET_MAP) ||
+-			    binding->chain != chain)
+-				continue;
+-
+-			iter.genmask	= nft_genmask_next(ctx->net);
+-			iter.type	= NFT_ITER_UPDATE;
+-			iter.skip 	= 0;
+-			iter.count	= 0;
+-			iter.err	= 0;
+-			iter.fn		= nf_tables_loop_check_setelem;
+-
+-			set->ops->walk(ctx, set, &iter);
+-			if (!iter.err)
+-				iter.err = nft_set_catchall_loops(ctx, set);
+-
+-			if (iter.err < 0)
+-				return iter.err;
+-		}
+-	}
+-
+-	return 0;
+-}
+-
+ /**
+  *	nft_parse_u32_check - fetch u32 attribute and check for maximum value
+  *
+@@ -11061,7 +10929,7 @@ static int nft_validate_register_store(const struct nft_ctx *ctx,
+ 		if (data != NULL &&
+ 		    (data->verdict.code == NFT_GOTO ||
+ 		     data->verdict.code == NFT_JUMP)) {
+-			err = nf_tables_check_loops(ctx, data->verdict.chain);
++			err = nft_chain_validate(ctx, data->verdict.chain);
+ 			if (err < 0)
+ 				return err;
+ 		}
+diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
+index f1c31757e4969..55e28e1da66ec 100644
+--- a/net/netfilter/nfnetlink_queue.c
++++ b/net/netfilter/nfnetlink_queue.c
+@@ -325,7 +325,7 @@ static void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
+ 	hooks = nf_hook_entries_head(net, pf, entry->state.hook);
+ 
+ 	i = entry->hook_index;
+-	if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) {
++	if (!hooks || i >= hooks->num_hook_entries) {
+ 		kfree_skb_reason(skb, SKB_DROP_REASON_NETFILTER_DROP);
+ 		nf_queue_entry_free(entry);
+ 		return;
+diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
+index 2a96d9c1db65b..6fa3cca87d346 100644
+--- a/net/sched/act_ct.c
++++ b/net/sched/act_ct.c
+@@ -1077,6 +1077,14 @@ TC_INDIRECT_SCOPE int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
+ 		 */
+ 		if (nf_conntrack_confirm(skb) != NF_ACCEPT)
+ 			goto drop;
++
++		/* The ct may be dropped if a clash has been resolved,
++		 * so it's necessary to retrieve it from skb again to
++		 * prevent UAF.
++		 */
++		ct = nf_ct_get(skb, &ctinfo);
++		if (!ct)
++			skip_add = true;
+ 	}
+ 
+ 	if (!skip_add)
+diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
+index c2ef9dcf91d2d..cc6051d4f2ef8 100644
+--- a/net/sched/sch_ingress.c
++++ b/net/sched/sch_ingress.c
+@@ -91,7 +91,7 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
+ 	entry = tcx_entry_fetch_or_create(dev, true, &created);
+ 	if (!entry)
+ 		return -ENOMEM;
+-	tcx_miniq_set_active(entry, true);
++	tcx_miniq_inc(entry);
+ 	mini_qdisc_pair_init(&q->miniqp, sch, &tcx_entry(entry)->miniq);
+ 	if (created)
+ 		tcx_entry_update(dev, entry, true);
+@@ -121,7 +121,7 @@ static void ingress_destroy(struct Qdisc *sch)
+ 	tcf_block_put_ext(q->block, sch, &q->block_info);
+ 
+ 	if (entry) {
+-		tcx_miniq_set_active(entry, false);
++		tcx_miniq_dec(entry);
+ 		if (!tcx_entry_is_active(entry)) {
+ 			tcx_entry_update(dev, NULL, true);
+ 			tcx_entry_free(entry);
+@@ -257,7 +257,7 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
+ 	entry = tcx_entry_fetch_or_create(dev, true, &created);
+ 	if (!entry)
+ 		return -ENOMEM;
+-	tcx_miniq_set_active(entry, true);
++	tcx_miniq_inc(entry);
+ 	mini_qdisc_pair_init(&q->miniqp_ingress, sch, &tcx_entry(entry)->miniq);
+ 	if (created)
+ 		tcx_entry_update(dev, entry, true);
+@@ -276,7 +276,7 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
+ 	entry = tcx_entry_fetch_or_create(dev, false, &created);
+ 	if (!entry)
+ 		return -ENOMEM;
+-	tcx_miniq_set_active(entry, true);
++	tcx_miniq_inc(entry);
+ 	mini_qdisc_pair_init(&q->miniqp_egress, sch, &tcx_entry(entry)->miniq);
+ 	if (created)
+ 		tcx_entry_update(dev, entry, false);
+@@ -302,7 +302,7 @@ static void clsact_destroy(struct Qdisc *sch)
+ 	tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info);
+ 
+ 	if (ingress_entry) {
+-		tcx_miniq_set_active(ingress_entry, false);
++		tcx_miniq_dec(ingress_entry);
+ 		if (!tcx_entry_is_active(ingress_entry)) {
+ 			tcx_entry_update(dev, NULL, true);
+ 			tcx_entry_free(ingress_entry);
+@@ -310,7 +310,7 @@ static void clsact_destroy(struct Qdisc *sch)
+ 	}
+ 
+ 	if (egress_entry) {
+-		tcx_miniq_set_active(egress_entry, false);
++		tcx_miniq_dec(egress_entry);
+ 		if (!tcx_entry_is_active(egress_entry)) {
+ 			tcx_entry_update(dev, NULL, false);
+ 			tcx_entry_free(egress_entry);
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index ce18716491c8f..b9121adef8b76 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -2442,6 +2442,13 @@ static void xs_tcp_setup_socket(struct work_struct *work)
+ 		transport->srcport = 0;
+ 		status = -EAGAIN;
+ 		break;
++	case -EPERM:
++		/* Happens, for instance, if a BPF program is preventing
++		 * the connect. Remap the error so upper layers can better
++		 * deal with it.
++		 */
++		status = -ECONNREFUSED;
++		fallthrough;
+ 	case -EINVAL:
+ 		/* Happens, for instance, if the user specified a link
+ 		 * local IPv6 address without a scope-id.
+diff --git a/scripts/ld-version.sh b/scripts/ld-version.sh
+index a78b804b680cf..b9513d224476f 100755
+--- a/scripts/ld-version.sh
++++ b/scripts/ld-version.sh
+@@ -57,9 +57,11 @@ else
+ 	fi
+ fi
+ 
+-# Some distributions append a package release number, as in 2.34-4.fc32
+-# Trim the hyphen and any characters that follow.
+-version=${version%-*}
++# There may be something after the version, such as a distribution's package
++# release number (like Fedora's "2.34-4.fc32") or punctuation (like LLD briefly
++# added before the "compatible with GNU linkers" string), so remove everything
++# after just numbers and periods.
++version=${version%%[!0-9.]*}
+ 
+ cversion=$(get_canonical_version $version)
+ min_cversion=$(get_canonical_version $min_version)
+diff --git a/scripts/package/kernel.spec b/scripts/package/kernel.spec
+index fffc8af8deb17..c52d517b93647 100644
+--- a/scripts/package/kernel.spec
++++ b/scripts/package/kernel.spec
+@@ -83,7 +83,6 @@ ln -fns /usr/src/kernels/%{KERNELRELEASE} %{buildroot}/lib/modules/%{KERNELRELEA
+ 	done
+ 
+ 	if [ -d "%{buildroot}/lib/modules/%{KERNELRELEASE}/dtb" ];then
+-		echo "/lib/modules/%{KERNELRELEASE}/dtb"
+ 		find "%{buildroot}/lib/modules/%{KERNELRELEASE}/dtb" -printf "%%%ghost /boot/dtb-%{KERNELRELEASE}/%%P\n"
+ 	fi
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index c0530d4aa3fc3..98f580e273e48 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9999,6 +9999,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ 	SND_PCI_QUIRK(0x103c, 0x841c, "HP Pavilion 15-CK0xx", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ 	SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
++	SND_PCI_QUIRK(0x103c, 0x84a6, "HP 250 G7 Notebook PC", ALC269_FIXUP_HP_LINE1_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x84ae, "HP 15-db0403ng", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
+ 	SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+@@ -10327,6 +10328,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
+ 	SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
+ 	SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
++	SND_PCI_QUIRK(0x10ec, 0x11bc, "VAIO VJFE-IL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+ 	SND_PCI_QUIRK(0x10ec, 0x124c, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+ 	SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+@@ -10424,6 +10426,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1558, 0xa600, "Clevo NL50NU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xa650, "Clevo NP[567]0SN[CD]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xa671, "Clevo NP70SN[CDE]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0xa763, "Clevo V54x_6x_TU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xb018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xb019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xb022, "Clevo NH77D[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+@@ -10594,6 +10597,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
++	SND_PCI_QUIRK(0x2782, 0x0214, "VAIO VJFE-CL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO),
+ 	SND_PCI_QUIRK(0x2782, 0x1707, "Vaio VJFE-ADL", ALC298_FIXUP_SPK_VOLUME),
+ 	SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
+diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
+index 6a39ca632f55e..4a6beddb0f6c7 100644
+--- a/sound/soc/sof/intel/hda-dai.c
++++ b/sound/soc/sof/intel/hda-dai.c
+@@ -566,12 +566,6 @@ static int hda_dai_suspend(struct hdac_bus *bus)
+ 			sdai = swidget->private;
+ 			ops = sdai->platform_private;
+ 
+-			ret = hda_link_dma_cleanup(hext_stream->link_substream,
+-						   hext_stream,
+-						   cpu_dai);
+-			if (ret < 0)
+-				return ret;
+-
+ 			/* for consistency with TRIGGER_SUSPEND  */
+ 			if (ops->post_trigger) {
+ 				ret = ops->post_trigger(sdev, cpu_dai,
+@@ -580,6 +574,12 @@ static int hda_dai_suspend(struct hdac_bus *bus)
+ 				if (ret < 0)
+ 					return ret;
+ 			}
++
++			ret = hda_link_dma_cleanup(hext_stream->link_substream,
++						   hext_stream,
++						   cpu_dai);
++			if (ret < 0)
++				return ret;
+ 		}
+ 	}
+ 
+diff --git a/tools/testing/selftests/wireguard/qemu/Makefile b/tools/testing/selftests/wireguard/qemu/Makefile
+index e95bd56b332f7..35856b11c1435 100644
+--- a/tools/testing/selftests/wireguard/qemu/Makefile
++++ b/tools/testing/selftests/wireguard/qemu/Makefile
+@@ -109,9 +109,9 @@ KERNEL_ARCH := x86_64
+ KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage
+ QEMU_VPORT_RESULT := virtio-serial-device
+ ifeq ($(HOST_ARCH),$(ARCH))
+-QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off -no-acpi
++QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off,acpi=off
+ else
+-QEMU_MACHINE := -cpu max -machine microvm -no-acpi
++QEMU_MACHINE := -cpu max -machine microvm,acpi=off
+ endif
+ else ifeq ($(ARCH),i686)
+ CHOST := i686-linux-musl
+@@ -120,9 +120,9 @@ KERNEL_ARCH := x86
+ KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage
+ QEMU_VPORT_RESULT := virtio-serial-device
+ ifeq ($(subst x86_64,i686,$(HOST_ARCH)),$(ARCH))
+-QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off -no-acpi
++QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off,acpi=off
+ else
+-QEMU_MACHINE := -cpu coreduo -machine microvm -no-acpi
++QEMU_MACHINE := -cpu coreduo -machine microvm,acpi=off
+ endif
+ else ifeq ($(ARCH),mips64)
+ CHOST := mips64-linux-musl


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [gentoo-commits] proj/linux-patches:6.9 commit in: /
@ 2024-07-25 12:08 Mike Pagano
  0 siblings, 0 replies; 18+ messages in thread
From: Mike Pagano @ 2024-07-25 12:08 UTC (permalink / raw
  To: gentoo-commits

commit:     23c525cf347a41a5a069e3a19cc1c1e92a254571
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jul 25 12:08:09 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jul 25 12:08:09 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=23c525cf

Linux patch 6.9.11

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1010_linux-6.9.11.patch | 6453 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6457 insertions(+)

diff --git a/0000_README b/0000_README
index e8d9065b..dd499825 100644
--- a/0000_README
+++ b/0000_README
@@ -83,6 +83,10 @@ Patch:  1009_linux-6.9.10.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.9.10
 
+Patch:  1010_linux-6.9.11.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.9.11
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch
 Desc:   Enable link security restrictions by default.

diff --git a/1010_linux-6.9.11.patch b/1010_linux-6.9.11.patch
new file mode 100644
index 00000000..aa466b72
--- /dev/null
+++ b/1010_linux-6.9.11.patch
@@ -0,0 +1,6453 @@
+diff --git a/Documentation/cdrom/cdrom-standard.rst b/Documentation/cdrom/cdrom-standard.rst
+index 7964fe134277b..6c1303cff159e 100644
+--- a/Documentation/cdrom/cdrom-standard.rst
++++ b/Documentation/cdrom/cdrom-standard.rst
+@@ -217,7 +217,7 @@ current *struct* is::
+ 		int (*media_changed)(struct cdrom_device_info *, int);
+ 		int (*tray_move)(struct cdrom_device_info *, int);
+ 		int (*lock_door)(struct cdrom_device_info *, int);
+-		int (*select_speed)(struct cdrom_device_info *, int);
++		int (*select_speed)(struct cdrom_device_info *, unsigned long);
+ 		int (*get_last_session) (struct cdrom_device_info *,
+ 					 struct cdrom_multisession *);
+ 		int (*get_mcn)(struct cdrom_device_info *, struct cdrom_mcn *);
+@@ -396,7 +396,7 @@ action need be taken, and the return value should be 0.
+ 
+ ::
+ 
+-	int select_speed(struct cdrom_device_info *cdi, int speed)
++	int select_speed(struct cdrom_device_info *cdi, unsigned long speed)
+ 
+ Some CD-ROM drives are capable of changing their head-speed. There
+ are several reasons for changing the speed of a CD-ROM drive. Badly
+diff --git a/Makefile b/Makefile
+index 5471f554f95ec..46457f645921e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 9
+-SUBLEVEL = 10
++SUBLEVEL = 11
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
+index dd6ce86d4332b..b776e7424fe91 100644
+--- a/arch/arm64/kernel/armv8_deprecated.c
++++ b/arch/arm64/kernel/armv8_deprecated.c
+@@ -462,6 +462,9 @@ static int run_all_insn_set_hw_mode(unsigned int cpu)
+ 	for (int i = 0; i < ARRAY_SIZE(insn_emulations); i++) {
+ 		struct insn_emulation *insn = insn_emulations[i];
+ 		bool enable = READ_ONCE(insn->current_mode) == INSN_HW;
++		if (insn->status == INSN_UNAVAILABLE)
++			continue;
++
+ 		if (insn->set_hw_mode && insn->set_hw_mode(enable)) {
+ 			pr_warn("CPU[%u] cannot support the emulation of %s",
+ 				cpu, insn->name);
+diff --git a/arch/loongarch/boot/dts/loongson-2k0500-ref.dts b/arch/loongarch/boot/dts/loongson-2k0500-ref.dts
+index 8aefb0c126722..a34734a6c3ce8 100644
+--- a/arch/loongarch/boot/dts/loongson-2k0500-ref.dts
++++ b/arch/loongarch/boot/dts/loongson-2k0500-ref.dts
+@@ -44,14 +44,14 @@ linux,cma {
+ &gmac0 {
+ 	status = "okay";
+ 
+-	phy-mode = "rgmii";
++	phy-mode = "rgmii-id";
+ 	bus_id = <0x0>;
+ };
+ 
+ &gmac1 {
+ 	status = "okay";
+ 
+-	phy-mode = "rgmii";
++	phy-mode = "rgmii-id";
+ 	bus_id = <0x1>;
+ };
+ 
+diff --git a/arch/loongarch/boot/dts/loongson-2k1000-ref.dts b/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
+index ed4d324340411..aaf41b565805a 100644
+--- a/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
++++ b/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
+@@ -43,7 +43,7 @@ linux,cma {
+ &gmac0 {
+ 	status = "okay";
+ 
+-	phy-mode = "rgmii";
++	phy-mode = "rgmii-id";
+ 	phy-handle = <&phy0>;
+ 	mdio {
+ 		compatible = "snps,dwmac-mdio";
+@@ -58,7 +58,7 @@ phy0: ethernet-phy@0 {
+ &gmac1 {
+ 	status = "okay";
+ 
+-	phy-mode = "rgmii";
++	phy-mode = "rgmii-id";
+ 	phy-handle = <&phy1>;
+ 	mdio {
+ 		compatible = "snps,dwmac-mdio";
+diff --git a/arch/loongarch/boot/dts/loongson-2k2000-ref.dts b/arch/loongarch/boot/dts/loongson-2k2000-ref.dts
+index 74b99bd234cc3..ea9e6985d0e9f 100644
+--- a/arch/loongarch/boot/dts/loongson-2k2000-ref.dts
++++ b/arch/loongarch/boot/dts/loongson-2k2000-ref.dts
+@@ -92,7 +92,7 @@ phy1: ethernet-phy@1 {
+ &gmac2 {
+ 	status = "okay";
+ 
+-	phy-mode = "rgmii";
++	phy-mode = "rgmii-id";
+ 	phy-handle = <&phy2>;
+ 	mdio {
+ 		compatible = "snps,dwmac-mdio";
+diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
+index 0352c07c608e9..fe76282a353f5 100644
+--- a/arch/mips/kernel/syscalls/syscall_o32.tbl
++++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
+@@ -27,7 +27,7 @@
+ 17	o32	break				sys_ni_syscall
+ # 18 was sys_stat
+ 18	o32	unused18			sys_ni_syscall
+-19	o32	lseek				sys_lseek
++19	o32	lseek				sys_lseek			compat_sys_lseek
+ 20	o32	getpid				sys_getpid
+ 21	o32	mount				sys_mount
+ 22	o32	umount				sys_oldumount
+diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
+index e0ce812796241..7d1b50599dd6c 100644
+--- a/arch/powerpc/kernel/eeh_pe.c
++++ b/arch/powerpc/kernel/eeh_pe.c
+@@ -849,6 +849,7 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
+ {
+ 	struct eeh_dev *edev;
+ 	struct pci_dev *pdev;
++	struct pci_bus *bus = NULL;
+ 
+ 	if (pe->type & EEH_PE_PHB)
+ 		return pe->phb->bus;
+@@ -859,9 +860,11 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
+ 
+ 	/* Retrieve the parent PCI bus of first (top) PCI device */
+ 	edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, entry);
++	pci_lock_rescan_remove();
+ 	pdev = eeh_dev_to_pci_dev(edev);
+ 	if (pdev)
+-		return pdev->bus;
++		bus = pdev->bus;
++	pci_unlock_rescan_remove();
+ 
+-	return NULL;
++	return bus;
+ }
+diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
+index b569ebaa590e2..3ff3de9a52acf 100644
+--- a/arch/powerpc/kvm/book3s_64_vio.c
++++ b/arch/powerpc/kvm/book3s_64_vio.c
+@@ -130,14 +130,16 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
+ 	}
+ 	rcu_read_unlock();
+ 
+-	fdput(f);
+-
+-	if (!found)
++	if (!found) {
++		fdput(f);
+ 		return -EINVAL;
++	}
+ 
+ 	table_group = iommu_group_get_iommudata(grp);
+-	if (WARN_ON(!table_group))
++	if (WARN_ON(!table_group)) {
++		fdput(f);
+ 		return -EFAULT;
++	}
+ 
+ 	for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
+ 		struct iommu_table *tbltmp = table_group->tables[i];
+@@ -158,8 +160,10 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
+ 			break;
+ 		}
+ 	}
+-	if (!tbl)
++	if (!tbl) {
++		fdput(f);
+ 		return -EINVAL;
++	}
+ 
+ 	rcu_read_lock();
+ 	list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
+@@ -170,6 +174,7 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
+ 			/* stit is being destroyed */
+ 			iommu_tce_table_put(tbl);
+ 			rcu_read_unlock();
++			fdput(f);
+ 			return -ENOTTY;
+ 		}
+ 		/*
+@@ -177,6 +182,7 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
+ 		 * its KVM reference counter and can return.
+ 		 */
+ 		rcu_read_unlock();
++		fdput(f);
+ 		return 0;
+ 	}
+ 	rcu_read_unlock();
+@@ -184,6 +190,7 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
+ 	stit = kzalloc(sizeof(*stit), GFP_KERNEL);
+ 	if (!stit) {
+ 		iommu_tce_table_put(tbl);
++		fdput(f);
+ 		return -ENOMEM;
+ 	}
+ 
+@@ -192,6 +199,7 @@ long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
+ 
+ 	list_add_rcu(&stit->next, &stt->iommu_tables);
+ 
++	fdput(f);
+ 	return 0;
+ }
+ 
+diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
+index b44de0f0822f0..b10a253252387 100644
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -343,8 +343,8 @@ static int alloc_dispatch_log_kmem_cache(void)
+ {
+ 	void (*ctor)(void *) = get_dtl_cache_ctor();
+ 
+-	dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES,
+-						DISPATCH_LOG_BYTES, 0, ctor);
++	dtl_cache = kmem_cache_create_usercopy("dtl", DISPATCH_LOG_BYTES,
++						DISPATCH_LOG_BYTES, 0, 0, DISPATCH_LOG_BYTES, ctor);
+ 	if (!dtl_cache) {
+ 		pr_warn("Failed to create dispatch trace log buffer cache\n");
+ 		pr_warn("Stolen time statistics will be unreliable\n");
+diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
+index 0d3f00eb0baee..10e311b2759d3 100644
+--- a/arch/riscv/kernel/stacktrace.c
++++ b/arch/riscv/kernel/stacktrace.c
+@@ -32,6 +32,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
+ 			     bool (*fn)(void *, unsigned long), void *arg)
+ {
+ 	unsigned long fp, sp, pc;
++	int graph_idx = 0;
+ 	int level = 0;
+ 
+ 	if (regs) {
+@@ -68,7 +69,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
+ 			pc = regs->ra;
+ 		} else {
+ 			fp = frame->fp;
+-			pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
++			pc = ftrace_graph_ret_addr(current, &graph_idx, frame->ra,
+ 						   &frame->ra);
+ 			if (pc == (unsigned long)ret_from_exception) {
+ 				if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
+diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
+index 2d4a35e6dd18d..09a87fa222c78 100644
+--- a/drivers/acpi/ac.c
++++ b/drivers/acpi/ac.c
+@@ -145,7 +145,7 @@ static void acpi_ac_notify(acpi_handle handle, u32 event, void *data)
+ 						  dev_name(&adev->dev), event,
+ 						  (u32) ac->state);
+ 		acpi_notifier_call_chain(adev, event, (u32) ac->state);
+-		kobject_uevent(&ac->charger->dev.kobj, KOBJ_CHANGE);
++		power_supply_changed(ac->charger);
+ 	}
+ }
+ 
+@@ -268,7 +268,7 @@ static int acpi_ac_resume(struct device *dev)
+ 	if (acpi_ac_get_state(ac))
+ 		return 0;
+ 	if (old_state != ac->state)
+-		kobject_uevent(&ac->charger->dev.kobj, KOBJ_CHANGE);
++		power_supply_changed(ac->charger);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 1cec29ab64ce8..299ec653388ce 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -1333,10 +1333,13 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
+ 	if (ec->busy_polling || bits > 8)
+ 		acpi_ec_burst_enable(ec);
+ 
+-	for (i = 0; i < bytes; ++i, ++address, ++value)
++	for (i = 0; i < bytes; ++i, ++address, ++value) {
+ 		result = (function == ACPI_READ) ?
+ 			acpi_ec_read(ec, address, value) :
+ 			acpi_ec_write(ec, address, *value);
++		if (result < 0)
++			break;
++	}
+ 
+ 	if (ec->busy_polling || bits > 8)
+ 		acpi_ec_burst_disable(ec);
+@@ -1348,8 +1351,10 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
+ 		return AE_NOT_FOUND;
+ 	case -ETIME:
+ 		return AE_TIME;
+-	default:
++	case 0:
+ 		return AE_OK;
++	default:
++		return AE_ERROR;
+ 	}
+ }
+ 
+diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
+index 94e3c000df2e1..dc8164b182dcc 100644
+--- a/drivers/acpi/sbs.c
++++ b/drivers/acpi/sbs.c
+@@ -610,7 +610,7 @@ static void acpi_sbs_callback(void *context)
+ 	if (sbs->charger_exists) {
+ 		acpi_ac_get_present(sbs);
+ 		if (sbs->charger_present != saved_charger_state)
+-			kobject_uevent(&sbs->charger->dev.kobj, KOBJ_CHANGE);
++			power_supply_changed(sbs->charger);
+ 	}
+ 
+ 	if (sbs->manager_present) {
+@@ -622,7 +622,7 @@ static void acpi_sbs_callback(void *context)
+ 			acpi_battery_read(bat);
+ 			if (saved_battery_state == bat->present)
+ 				continue;
+-			kobject_uevent(&bat->bat->dev.kobj, KOBJ_CHANGE);
++			power_supply_changed(bat->bat);
+ 		}
+ 	}
+ }
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 28a95fd366fea..95a468eaa7013 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -302,6 +302,21 @@ static int lo_read_simple(struct loop_device *lo, struct request *rq,
+ 	return 0;
+ }
+ 
++static void loop_clear_limits(struct loop_device *lo, int mode)
++{
++	struct queue_limits lim = queue_limits_start_update(lo->lo_queue);
++
++	if (mode & FALLOC_FL_ZERO_RANGE)
++		lim.max_write_zeroes_sectors = 0;
++
++	if (mode & FALLOC_FL_PUNCH_HOLE) {
++		lim.max_hw_discard_sectors = 0;
++		lim.discard_granularity = 0;
++	}
++
++	queue_limits_commit_update(lo->lo_queue, &lim);
++}
++
+ static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
+ 			int mode)
+ {
+@@ -320,6 +335,14 @@ static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
+ 	ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq));
+ 	if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP))
+ 		return -EIO;
++
++	/*
++	 * We initially configure the limits in a hope that fallocate is
++	 * supported and clear them here if that turns out not to be true.
++	 */
++	if (unlikely(ret == -EOPNOTSUPP))
++		loop_clear_limits(lo, mode);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
+index 620679a0ac381..26e2c22a87e1c 100644
+--- a/drivers/block/null_blk/main.c
++++ b/drivers/block/null_blk/main.c
+@@ -1810,8 +1810,8 @@ static int null_validate_conf(struct nullb_device *dev)
+ 		dev->queue_mode = NULL_Q_MQ;
+ 	}
+ 
+-	dev->blocksize = round_down(dev->blocksize, 512);
+-	dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
++	if (blk_validate_block_size(dev->blocksize))
++		return -EINVAL;
+ 
+ 	if (dev->use_per_node_hctx) {
+ 		if (dev->submit_queues != nr_online_nodes)
+diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
+index 9d0c7e278114b..9bfa9a6ad56c8 100644
+--- a/drivers/bluetooth/btnxpuart.c
++++ b/drivers/bluetooth/btnxpuart.c
+@@ -281,7 +281,7 @@ static u8 crc8_table[CRC8_TABLE_SIZE];
+ 
+ /* Default configurations */
+ #define DEFAULT_H2C_WAKEUP_MODE	WAKEUP_METHOD_BREAK
+-#define DEFAULT_PS_MODE		PS_MODE_DISABLE
++#define DEFAULT_PS_MODE		PS_MODE_ENABLE
+ #define FW_INIT_BAUDRATE	HCI_NXP_PRI_BAUDRATE
+ 
+ static struct sk_buff *nxp_drv_send_cmd(struct hci_dev *hdev, u16 opcode,
+diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c
+index d7ab5bd5d4b41..e12bb9abf6b6a 100644
+--- a/drivers/clk/qcom/apss-ipq-pll.c
++++ b/drivers/clk/qcom/apss-ipq-pll.c
+@@ -100,7 +100,6 @@ static struct clk_alpha_pll ipq_pll_stromer_plus = {
+ static const struct alpha_pll_config ipq5018_pll_config = {
+ 	.l = 0x2a,
+ 	.config_ctl_val = 0x4001075b,
+-	.config_ctl_hi_val = 0x304,
+ 	.main_output_mask = BIT(0),
+ 	.aux_output_mask = BIT(1),
+ 	.early_output_mask = BIT(3),
+@@ -114,7 +113,6 @@ static const struct alpha_pll_config ipq5018_pll_config = {
+ static const struct alpha_pll_config ipq5332_pll_config = {
+ 	.l = 0x2d,
+ 	.config_ctl_val = 0x4001075b,
+-	.config_ctl_hi_val = 0x304,
+ 	.main_output_mask = BIT(0),
+ 	.aux_output_mask = BIT(1),
+ 	.early_output_mask = BIT(3),
+diff --git a/drivers/firmware/efi/libstub/zboot.lds b/drivers/firmware/efi/libstub/zboot.lds
+index ac8c0ef851581..af2c82f7bd902 100644
+--- a/drivers/firmware/efi/libstub/zboot.lds
++++ b/drivers/firmware/efi/libstub/zboot.lds
+@@ -41,6 +41,7 @@ SECTIONS
+ 	}
+ 
+ 	/DISCARD/ : {
++		*(.discard .discard.*)
+ 		*(.modinfo .init.modinfo)
+ 	}
+ }
+diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
+index 00ffa168e4056..f2f40393e3695 100644
+--- a/drivers/gpio/gpio-pca953x.c
++++ b/drivers/gpio/gpio-pca953x.c
+@@ -758,6 +758,8 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
+ 	int level;
+ 
+ 	if (chip->driver_data & PCA_PCAL) {
++		guard(mutex)(&chip->i2c_lock);
++
+ 		/* Enable latch on interrupt-enabled inputs */
+ 		pca953x_write_regs(chip, PCAL953X_IN_LATCH, chip->irq_mask);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+index d89d6829f1df4..b10fdd8b54144 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+@@ -4187,9 +4187,10 @@ static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_i
+ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
+ 				 struct amdgpu_cu_info *cu_info)
+ {
+-	int i, j, k, counter, xcc_id, active_cu_number = 0;
+-	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
++	int i, j, k, prev_counter, counter, xcc_id, active_cu_number = 0;
++	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0, tmp;
+ 	unsigned disable_masks[4 * 4];
++	bool is_symmetric_cus;
+ 
+ 	if (!adev || !cu_info)
+ 		return -EINVAL;
+@@ -4207,6 +4208,7 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
+ 
+ 	mutex_lock(&adev->grbm_idx_mutex);
+ 	for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
++		is_symmetric_cus = true;
+ 		for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ 			for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ 				mask = 1;
+@@ -4234,6 +4236,15 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
+ 					ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+ 				cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
+ 			}
++			if (i && is_symmetric_cus && prev_counter != counter)
++				is_symmetric_cus = false;
++			prev_counter = counter;
++		}
++		if (is_symmetric_cus) {
++			tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG);
++			tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_RELAUNCH_DISABLE, 1);
++			tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_DISPATCH_DISABLE, 1);
++			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG, tmp);
+ 		}
+ 		gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
+ 					    xcc_id);
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
+index 78a95f8f370be..238abd98072ad 100644
+--- a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
+@@ -32,7 +32,9 @@
+ #include "mp/mp_14_0_2_sh_mask.h"
+ 
+ MODULE_FIRMWARE("amdgpu/psp_14_0_2_sos.bin");
++MODULE_FIRMWARE("amdgpu/psp_14_0_2_ta.bin");
+ MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos.bin");
++MODULE_FIRMWARE("amdgpu/psp_14_0_3_ta.bin");
+ 
+ /* For large FW files the time to complete can be very long */
+ #define USBC_PD_POLLING_LIMIT_S 240
+@@ -64,6 +66,9 @@ static int psp_v14_0_init_microcode(struct psp_context *psp)
+ 	case IP_VERSION(14, 0, 2):
+ 	case IP_VERSION(14, 0, 3):
+ 		err = psp_init_sos_microcode(psp, ucode_prefix);
++		if (err)
++			return err;
++		err = psp_init_ta_microcode(psp, ucode_prefix);
+ 		if (err)
+ 			return err;
+ 		break;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 2152e40ee1c27..c29d271579ad3 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -11161,6 +11161,49 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
+ 	return ret;
+ }
+ 
++static void parse_edid_displayid_vrr(struct drm_connector *connector,
++		struct edid *edid)
++{
++	u8 *edid_ext = NULL;
++	int i;
++	int j = 0;
++	u16 min_vfreq;
++	u16 max_vfreq;
++
++	if (edid == NULL || edid->extensions == 0)
++		return;
++
++	/* Find DisplayID extension */
++	for (i = 0; i < edid->extensions; i++) {
++		edid_ext = (void *)(edid + (i + 1));
++		if (edid_ext[0] == DISPLAYID_EXT)
++			break;
++	}
++
++	if (edid_ext == NULL)
++		return;
++
++	while (j < EDID_LENGTH) {
++		/* Get dynamic video timing range from DisplayID if available */
++		if (EDID_LENGTH - j > 13 && edid_ext[j] == 0x25	&&
++		    (edid_ext[j+1] & 0xFE) == 0 && (edid_ext[j+2] == 9)) {
++			min_vfreq = edid_ext[j+9];
++			if (edid_ext[j+1] & 7)
++				max_vfreq = edid_ext[j+10] + ((edid_ext[j+11] & 3) << 8);
++			else
++				max_vfreq = edid_ext[j+10];
++
++			if (max_vfreq && min_vfreq) {
++				connector->display_info.monitor_range.max_vfreq = max_vfreq;
++				connector->display_info.monitor_range.min_vfreq = min_vfreq;
++
++				return;
++			}
++		}
++		j++;
++	}
++}
++
+ static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector,
+ 			  struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
+ {
+@@ -11282,6 +11325,11 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
+ 	if (!adev->dm.freesync_module)
+ 		goto update;
+ 
++	/* Some eDP panels only have the refresh rate range info in DisplayID */
++	if ((connector->display_info.monitor_range.min_vfreq == 0 ||
++	     connector->display_info.monitor_range.max_vfreq == 0))
++		parse_edid_displayid_vrr(connector, edid);
++
+ 	if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ 		     sink->sink_signal == SIGNAL_TYPE_EDP)) {
+ 		bool edid_check_required = false;
+@@ -11289,9 +11337,11 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
+ 		if (is_dp_capable_without_timing_msa(adev->dm.dc,
+ 						     amdgpu_dm_connector)) {
+ 			if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) {
+-				freesync_capable = true;
+ 				amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
+ 				amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
++				if (amdgpu_dm_connector->max_vfreq -
++				    amdgpu_dm_connector->min_vfreq > 10)
++					freesync_capable = true;
+ 			} else {
+ 				edid_check_required = edid->version > 1 ||
+ 						      (edid->version == 1 &&
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+index 6c84b0fa40f44..0782a34689a00 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+@@ -3364,6 +3364,9 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 							&mode_lib->vba.UrgentBurstFactorLumaPre[k],
+ 							&mode_lib->vba.UrgentBurstFactorChromaPre[k],
+ 							&mode_lib->vba.NotUrgentLatencyHidingPre[k]);
++
++					v->cursor_bw_pre[k] = mode_lib->vba.NumberOfCursors[k] * mode_lib->vba.CursorWidth[k][0] * mode_lib->vba.CursorBPP[k][0] /
++							8.0 / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * v->VRatioPreY[i][j][k];
+ 				}
+ 
+ 				{
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+index 53e40d3c48d4b..6716696df7719 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+@@ -177,7 +177,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
+ 	.urgent_latency_pixel_data_only_us = 4.0,
+ 	.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ 	.urgent_latency_vm_data_only_us = 4.0,
+-	.dram_clock_change_latency_us = 11.72,
++	.dram_clock_change_latency_us = 34.0,
+ 	.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ 	.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ 	.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
+index b3ffab77cf889..40ca38dd1b23e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
+@@ -215,7 +215,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = {
+ 	.urgent_latency_pixel_data_only_us = 4.0,
+ 	.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ 	.urgent_latency_vm_data_only_us = 4.0,
+-	.dram_clock_change_latency_us = 11.72,
++	.dram_clock_change_latency_us = 34,
+ 	.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ 	.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ 	.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+index a20f28a5d2e7b..3af759dca6ebf 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+@@ -233,6 +233,7 @@ void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, s
+ 		out->round_trip_ping_latency_dcfclk_cycles = 106;
+ 		out->smn_latency_us = 2;
+ 		out->dispclk_dppclk_vco_speed_mhz = 3600;
++		out->pct_ideal_dram_bw_after_urgent_pixel_only = 65.0;
+ 		break;
+ 
+ 	}
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
+index b72ed3e78df05..bb4e812248aec 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
+@@ -294,7 +294,7 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont
+ 	context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = (unsigned int)in_ctx->v20.dml_core_ctx.mp.DCFCLKDeepSleep * 1000;
+ 	context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
+ 
+-	if (in_ctx->v20.dml_core_ctx.ms.support.FCLKChangeSupport[in_ctx->v20.scratch.mode_support_params.out_lowest_state_idx] == dml_fclock_change_unsupported)
++	if (in_ctx->v20.dml_core_ctx.ms.support.FCLKChangeSupport[0] == dml_fclock_change_unsupported)
+ 		context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = false;
+ 	else
+ 		context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = true;
+diff --git a/drivers/gpu/drm/amd/include/pptable.h b/drivers/gpu/drm/amd/include/pptable.h
+index 2e8e6c9875f6c..f83ace2d7ec30 100644
+--- a/drivers/gpu/drm/amd/include/pptable.h
++++ b/drivers/gpu/drm/amd/include/pptable.h
+@@ -477,31 +477,30 @@ typedef struct _ATOM_PPLIB_STATE_V2
+ } ATOM_PPLIB_STATE_V2;
+ 
+ typedef struct _StateArray{
+-    //how many states we have 
+-    UCHAR ucNumEntries;
+-    
+-    ATOM_PPLIB_STATE_V2 states[1];
++	//how many states we have
++	UCHAR ucNumEntries;
++
++	ATOM_PPLIB_STATE_V2 states[] /* __counted_by(ucNumEntries) */;
+ }StateArray;
+ 
+ 
+ typedef struct _ClockInfoArray{
+-    //how many clock levels we have
+-    UCHAR ucNumEntries;
+-    
+-    //sizeof(ATOM_PPLIB_CLOCK_INFO)
+-    UCHAR ucEntrySize;
+-    
+-    UCHAR clockInfo[];
++	//how many clock levels we have
++	UCHAR ucNumEntries;
++
++	//sizeof(ATOM_PPLIB_CLOCK_INFO)
++	UCHAR ucEntrySize;
++
++	UCHAR clockInfo[];
+ }ClockInfoArray;
+ 
+ typedef struct _NonClockInfoArray{
++	//how many non-clock levels we have. normally should be same as number of states
++	UCHAR ucNumEntries;
++	//sizeof(ATOM_PPLIB_NONCLOCK_INFO)
++	UCHAR ucEntrySize;
+ 
+-    //how many non-clock levels we have. normally should be same as number of states
+-    UCHAR ucNumEntries;
+-    //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
+-    UCHAR ucEntrySize;
+-    
+-    ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[];
++	ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[] __counted_by(ucNumEntries);
+ }NonClockInfoArray;
+ 
+ typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
+@@ -513,8 +512,10 @@ typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
+ 
+ typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
+ {
+-    UCHAR ucNumEntries;                                                // Number of entries.
+-    ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1];             // Dynamically allocate entries.
++	// Number of entries.
++	UCHAR ucNumEntries;
++	// Dynamically allocate entries.
++	ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[] __counted_by(ucNumEntries);
+ }ATOM_PPLIB_Clock_Voltage_Dependency_Table;
+ 
+ typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
+@@ -529,8 +530,10 @@ typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
+ 
+ typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
+ {
+-    UCHAR ucNumEntries;                                                // Number of entries.
+-    ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1];                  // Dynamically allocate entries.
++	// Number of entries.
++	UCHAR ucNumEntries;
++	// Dynamically allocate entries.
++	ATOM_PPLIB_Clock_Voltage_Limit_Record entries[] __counted_by(ucNumEntries);
+ }ATOM_PPLIB_Clock_Voltage_Limit_Table;
+ 
+ union _ATOM_PPLIB_CAC_Leakage_Record
+@@ -553,8 +556,10 @@ typedef union _ATOM_PPLIB_CAC_Leakage_Record ATOM_PPLIB_CAC_Leakage_Record;
+ 
+ typedef struct _ATOM_PPLIB_CAC_Leakage_Table
+ {
+-    UCHAR ucNumEntries;                                                 // Number of entries.
+-    ATOM_PPLIB_CAC_Leakage_Record entries[1];                           // Dynamically allocate entries.
++	// Number of entries.
++	UCHAR ucNumEntries;
++	// Dynamically allocate entries.
++	ATOM_PPLIB_CAC_Leakage_Record entries[] __counted_by(ucNumEntries);
+ }ATOM_PPLIB_CAC_Leakage_Table;
+ 
+ typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
+@@ -568,8 +573,10 @@ typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
+ 
+ typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table
+ {
+-    UCHAR ucNumEntries;                                                 // Number of entries.
+-    ATOM_PPLIB_PhaseSheddingLimits_Record entries[1];                   // Dynamically allocate entries.
++	// Number of entries.
++	UCHAR ucNumEntries;
++	// Dynamically allocate entries.
++	ATOM_PPLIB_PhaseSheddingLimits_Record entries[] __counted_by(ucNumEntries);
+ }ATOM_PPLIB_PhaseSheddingLimits_Table;
+ 
+ typedef struct _VCEClockInfo{
+@@ -580,8 +587,8 @@ typedef struct _VCEClockInfo{
+ }VCEClockInfo;
+ 
+ typedef struct _VCEClockInfoArray{
+-    UCHAR ucNumEntries;
+-    VCEClockInfo entries[1];
++	UCHAR ucNumEntries;
++	VCEClockInfo entries[] __counted_by(ucNumEntries);
+ }VCEClockInfoArray;
+ 
+ typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
+@@ -592,8 +599,8 @@ typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
+ 
+ typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
+ {
+-    UCHAR numEntries;
+-    ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1];
++	UCHAR numEntries;
++	ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[] __counted_by(numEntries);
+ }ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table;
+ 
+ typedef struct _ATOM_PPLIB_VCE_State_Record
+@@ -604,8 +611,8 @@ typedef struct _ATOM_PPLIB_VCE_State_Record
+ 
+ typedef struct _ATOM_PPLIB_VCE_State_Table
+ {
+-    UCHAR numEntries;
+-    ATOM_PPLIB_VCE_State_Record entries[1];
++	UCHAR numEntries;
++	ATOM_PPLIB_VCE_State_Record entries[] __counted_by(numEntries);
+ }ATOM_PPLIB_VCE_State_Table;
+ 
+ 
+@@ -626,8 +633,8 @@ typedef struct _UVDClockInfo{
+ }UVDClockInfo;
+ 
+ typedef struct _UVDClockInfoArray{
+-    UCHAR ucNumEntries;
+-    UVDClockInfo entries[1];
++	UCHAR ucNumEntries;
++	UVDClockInfo entries[] __counted_by(ucNumEntries);
+ }UVDClockInfoArray;
+ 
+ typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
+@@ -638,8 +645,8 @@ typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
+ 
+ typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
+ {
+-    UCHAR numEntries;
+-    ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1];
++	UCHAR numEntries;
++	ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[] __counted_by(numEntries);
+ }ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table;
+ 
+ typedef struct _ATOM_PPLIB_UVD_Table
+@@ -657,8 +664,8 @@ typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record
+ }ATOM_PPLIB_SAMClk_Voltage_Limit_Record;
+ 
+ typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{
+-    UCHAR numEntries;
+-    ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[];
++	UCHAR numEntries;
++	ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[] __counted_by(numEntries);
+ }ATOM_PPLIB_SAMClk_Voltage_Limit_Table;
+ 
+ typedef struct _ATOM_PPLIB_SAMU_Table
+@@ -675,8 +682,8 @@ typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Record
+ }ATOM_PPLIB_ACPClk_Voltage_Limit_Record;
+ 
+ typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Table{
+-    UCHAR numEntries;
+-    ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[1];
++	UCHAR numEntries;
++	ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[] __counted_by(numEntries);
+ }ATOM_PPLIB_ACPClk_Voltage_Limit_Table;
+ 
+ typedef struct _ATOM_PPLIB_ACP_Table
+@@ -743,9 +750,9 @@ typedef struct ATOM_PPLIB_VQ_Budgeting_Record{
+ } ATOM_PPLIB_VQ_Budgeting_Record;
+ 
+ typedef struct ATOM_PPLIB_VQ_Budgeting_Table {
+-    UCHAR revid;
+-    UCHAR numEntries;
+-    ATOM_PPLIB_VQ_Budgeting_Record         entries[1];
++	UCHAR revid;
++	UCHAR numEntries;
++	ATOM_PPLIB_VQ_Budgeting_Record entries[] __counted_by(numEntries);
+ } ATOM_PPLIB_VQ_Budgeting_Table;
+ 
+ #pragma pack()
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index 65333141b1c1b..5a2247018229c 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -323,6 +323,18 @@ static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,
+ 	return ret;
+ }
+ 
++static int smu_set_mall_enable(struct smu_context *smu)
++{
++	int ret = 0;
++
++	if (!smu->ppt_funcs->set_mall_enable)
++		return 0;
++
++	ret = smu->ppt_funcs->set_mall_enable(smu);
++
++	return ret;
++}
++
+ /**
+  * smu_dpm_set_power_gate - power gate/ungate the specific IP block
+  *
+@@ -1785,6 +1797,7 @@ static int smu_hw_init(void *handle)
+ 		smu_dpm_set_jpeg_enable(smu, true);
+ 		smu_dpm_set_vpe_enable(smu, true);
+ 		smu_dpm_set_umsch_mm_enable(smu, true);
++		smu_set_mall_enable(smu);
+ 		smu_set_gfx_cgpg(smu, true);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+index 1fa81575788c5..8667e8c9d7e7c 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+@@ -1391,6 +1391,11 @@ struct pptable_funcs {
+ 	 */
+ 	int (*dpm_set_umsch_mm_enable)(struct smu_context *smu, bool enable);
+ 
++	/**
++	 * @set_mall_enable: Init MALL power gating control.
++	 */
++	int (*set_mall_enable)(struct smu_context *smu);
++
+ 	/**
+ 	 * @notify_rlc_state: Notify RLC power state to SMU.
+ 	 */
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
+index c4dc5881d8df0..e7f5ef49049f9 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
+@@ -106,8 +106,8 @@
+ #define PPSMC_MSG_DisableLSdma                  0x35 ///< Disable LSDMA
+ #define PPSMC_MSG_SetSoftMaxVpe                 0x36 ///<
+ #define PPSMC_MSG_SetSoftMinVpe                 0x37 ///<
+-#define PPSMC_MSG_AllocMALLCache                0x38 ///< Allocating MALL Cache
+-#define PPSMC_MSG_ReleaseMALLCache              0x39 ///< Releasing MALL Cache
++#define PPSMC_MSG_MALLPowerController           0x38 ///< Set MALL control
++#define PPSMC_MSG_MALLPowerState                0x39 ///< Enter/Exit MALL PG
+ #define PPSMC_Message_Count                     0x3A ///< Total number of PPSMC messages
+ /** @}*/
+ 
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+index af427cc7dbb84..4a7404856b960 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+@@ -272,7 +272,9 @@
+ 	__SMU_DUMMY_MAP(SetSoftMinVpe), \
+ 	__SMU_DUMMY_MAP(GetMetricsVersion), \
+ 	__SMU_DUMMY_MAP(EnableUCLKShadow), \
+-	__SMU_DUMMY_MAP(RmaDueToBadPageThreshold),
++	__SMU_DUMMY_MAP(RmaDueToBadPageThreshold), \
++	__SMU_DUMMY_MAP(MALLPowerController), \
++	__SMU_DUMMY_MAP(MALLPowerState),
+ 
+ #undef __SMU_DUMMY_MAP
+ #define __SMU_DUMMY_MAP(type)	SMU_MSG_##type
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+index 63399c00cc28f..20f3861b5eeac 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+@@ -52,6 +52,19 @@
+ #define mmMP1_SMN_C2PMSG_90			0x029a
+ #define mmMP1_SMN_C2PMSG_90_BASE_IDX		    0
+ 
++/* MALLPowerController message arguments (Defines for the Cache mode control) */
++#define SMU_MALL_PMFW_CONTROL 0
++#define SMU_MALL_DRIVER_CONTROL 1
++
++/*
++ * MALLPowerState message arguments
++ * (Defines for the Allocate/Release Cache mode if in driver mode)
++ */
++#define SMU_MALL_EXIT_PG 0
++#define SMU_MALL_ENTER_PG 1
++
++#define SMU_MALL_PG_CONFIG_DEFAULT SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON
++
+ #define FEATURE_MASK(feature) (1ULL << feature)
+ #define SMC_DPM_FEATURE ( \
+ 	FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
+@@ -66,6 +79,12 @@
+ 	FEATURE_MASK(FEATURE_GFX_DPM_BIT)	| \
+ 	FEATURE_MASK(FEATURE_VPE_DPM_BIT))
+ 
++enum smu_mall_pg_config {
++	SMU_MALL_PG_CONFIG_PMFW_CONTROL = 0,
++	SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON = 1,
++	SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_OFF = 2,
++};
++
+ static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] = {
+ 	MSG_MAP(TestMessage,                    PPSMC_MSG_TestMessage,				1),
+ 	MSG_MAP(GetSmuVersion,                  PPSMC_MSG_GetPmfwVersion,			1),
+@@ -113,6 +132,8 @@ static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] =
+ 	MSG_MAP(PowerDownUmsch,                 PPSMC_MSG_PowerDownUmsch,			1),
+ 	MSG_MAP(SetSoftMaxVpe,                  PPSMC_MSG_SetSoftMaxVpe,			1),
+ 	MSG_MAP(SetSoftMinVpe,                  PPSMC_MSG_SetSoftMinVpe,			1),
++	MSG_MAP(MALLPowerController,            PPSMC_MSG_MALLPowerController,		1),
++	MSG_MAP(MALLPowerState,                 PPSMC_MSG_MALLPowerState,			1),
+ };
+ 
+ static struct cmn2asic_mapping smu_v14_0_0_feature_mask_map[SMU_FEATURE_COUNT] = {
+@@ -1417,6 +1438,57 @@ static int smu_v14_0_common_get_dpm_table(struct smu_context *smu, struct dpm_cl
+ 	return 0;
+ }
+ 
++static int smu_v14_0_1_init_mall_power_gating(struct smu_context *smu, enum smu_mall_pg_config pg_config)
++{
++	struct amdgpu_device *adev = smu->adev;
++	int ret = 0;
++
++	if (pg_config == SMU_MALL_PG_CONFIG_PMFW_CONTROL) {
++		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerController,
++								SMU_MALL_PMFW_CONTROL, NULL);
++		if (ret) {
++			dev_err(adev->dev, "Init MALL PMFW CONTROL Failure\n");
++			return ret;
++		}
++	} else {
++		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerController,
++								SMU_MALL_DRIVER_CONTROL, NULL);
++		if (ret) {
++			dev_err(adev->dev, "Init MALL Driver CONTROL Failure\n");
++			return ret;
++		}
++
++		if (pg_config == SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON) {
++			ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerState,
++									SMU_MALL_EXIT_PG, NULL);
++			if (ret) {
++				dev_err(adev->dev, "EXIT MALL PG Failure\n");
++				return ret;
++			}
++		} else if (pg_config == SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_OFF) {
++			ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerState,
++									SMU_MALL_ENTER_PG, NULL);
++			if (ret) {
++				dev_err(adev->dev, "Enter MALL PG Failure\n");
++				return ret;
++			}
++		}
++	}
++
++	return ret;
++}
++
++static int smu_v14_0_common_set_mall_enable(struct smu_context *smu)
++{
++	enum smu_mall_pg_config pg_config = SMU_MALL_PG_CONFIG_DEFAULT;
++	int ret = 0;
++
++	if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
++		ret = smu_v14_0_1_init_mall_power_gating(smu, pg_config);
++
++	return ret;
++}
++
+ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
+ 	.check_fw_status = smu_v14_0_check_fw_status,
+ 	.check_fw_version = smu_v14_0_check_fw_version,
+@@ -1448,6 +1520,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
+ 	.dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable,
+ 	.dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable,
+ 	.get_dpm_clock_table = smu_v14_0_common_get_dpm_table,
++	.set_mall_enable = smu_v14_0_common_set_mall_enable,
+ };
+ 
+ static void smu_v14_0_0_set_smu_mailbox_registers(struct smu_context *smu)
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index 426bbee2d9f5e..5db52d6c5c35c 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -202,6 +202,12 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_MATCH(DMI_BOARD_NAME, "NEXT"),
+ 		},
+ 		.driver_data = (void *)&lcd800x1280_rightside_up,
++	}, {	/* AYA NEO KUN */
++		.matches = {
++		  DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
++		  DMI_MATCH(DMI_BOARD_NAME, "KUN"),
++		},
++		.driver_data = (void *)&lcd1600x2560_rightside_up,
+ 	}, {	/* Chuwi HiBook (CWI514) */
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
+index f48c4343f4690..3e6d4c6aa877e 100644
+--- a/drivers/gpu/drm/exynos/exynos_dp.c
++++ b/drivers/gpu/drm/exynos/exynos_dp.c
+@@ -285,7 +285,6 @@ struct platform_driver dp_driver = {
+ 	.remove_new	= exynos_dp_remove,
+ 	.driver		= {
+ 		.name	= "exynos-dp",
+-		.owner	= THIS_MODULE,
+ 		.pm	= pm_ptr(&exynos_dp_pm_ops),
+ 		.of_match_table = exynos_dp_match,
+ 	},
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+index 74832c2130921..0b570e194079a 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+@@ -950,6 +950,13 @@ static void mtk_drm_remove(struct platform_device *pdev)
+ 		of_node_put(private->comp_node[i]);
+ }
+ 
++static void mtk_drm_shutdown(struct platform_device *pdev)
++{
++	struct mtk_drm_private *private = platform_get_drvdata(pdev);
++
++	drm_atomic_helper_shutdown(private->drm);
++}
++
+ static int mtk_drm_sys_prepare(struct device *dev)
+ {
+ 	struct mtk_drm_private *private = dev_get_drvdata(dev);
+@@ -981,6 +988,7 @@ static const struct dev_pm_ops mtk_drm_pm_ops = {
+ static struct platform_driver mtk_drm_platform_driver = {
+ 	.probe	= mtk_drm_probe,
+ 	.remove_new = mtk_drm_remove,
++	.shutdown = mtk_drm_shutdown,
+ 	.driver	= {
+ 		.name	= "mediatek-drm",
+ 		.pm     = &mtk_drm_pm_ops,
+diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
+index 3fec3acdaf284..27225d1fe8d2e 100644
+--- a/drivers/gpu/drm/radeon/radeon_gem.c
++++ b/drivers/gpu/drm/radeon/radeon_gem.c
+@@ -641,7 +641,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
+ 	if (r)
+ 		goto error_unlock;
+ 
+-	if (bo_va->it.start)
++	if (bo_va->it.start && bo_va->bo)
+ 		r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource);
+ 
+ error_unlock:
+diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c
+index e83c3e52251de..0250d5f00bf10 100644
+--- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c
++++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c
+@@ -171,6 +171,13 @@ static void shmob_drm_remove(struct platform_device *pdev)
+ 	drm_kms_helper_poll_fini(ddev);
+ }
+ 
++static void shmob_drm_shutdown(struct platform_device *pdev)
++{
++	struct shmob_drm_device *sdev = platform_get_drvdata(pdev);
++
++	drm_atomic_helper_shutdown(&sdev->ddev);
++}
++
+ static int shmob_drm_probe(struct platform_device *pdev)
+ {
+ 	struct shmob_drm_platform_data *pdata = pdev->dev.platform_data;
+@@ -273,6 +280,7 @@ static const struct of_device_id shmob_drm_of_table[] __maybe_unused = {
+ static struct platform_driver shmob_drm_platform_driver = {
+ 	.probe		= shmob_drm_probe,
+ 	.remove_new	= shmob_drm_remove,
++	.shutdown	= shmob_drm_shutdown,
+ 	.driver		= {
+ 		.name	= "shmob-drm",
+ 		.of_match_table = of_match_ptr(shmob_drm_of_table),
+diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
+index faddae3d6ac2e..6f1ac940cbae7 100644
+--- a/drivers/gpu/drm/vmwgfx/Kconfig
++++ b/drivers/gpu/drm/vmwgfx/Kconfig
+@@ -2,7 +2,7 @@
+ config DRM_VMWGFX
+ 	tristate "DRM driver for VMware Virtual GPU"
+ 	depends on DRM && PCI && MMU
+-	depends on X86 || ARM64
++	depends on (X86 && HYPERVISOR_GUEST) || ARM64
+ 	select DRM_TTM
+ 	select DRM_TTM_HELPER
+ 	select MAPPING_DIRTY_HELPERS
+diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
+index 7dd83ec74f8a9..5302bfd527d86 100644
+--- a/drivers/hid/hid-debug.c
++++ b/drivers/hid/hid-debug.c
+@@ -974,6 +974,8 @@ static const char *keys[KEY_MAX + 1] = {
+ 	[KEY_CAMERA_ACCESS_ENABLE] = "CameraAccessEnable",
+ 	[KEY_CAMERA_ACCESS_DISABLE] = "CameraAccessDisable",
+ 	[KEY_CAMERA_ACCESS_TOGGLE] = "CameraAccessToggle",
++	[KEY_ACCESSIBILITY] = "Accessibility",
++	[KEY_DO_NOT_DISTURB] = "DoNotDisturb",
+ 	[KEY_DICTATE] = "Dictate",
+ 	[KEY_MICMUTE] = "MicrophoneMute",
+ 	[KEY_BRIGHTNESS_MIN] = "BrightnessMin",
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 68b0f39deaa9a..8eb073dea3593 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -421,6 +421,8 @@
+ #define I2C_DEVICE_ID_HP_SPECTRE_X360_13_AW0020NG  0x29DF
+ #define I2C_DEVICE_ID_ASUS_TP420IA_TOUCHSCREEN 0x2BC8
+ #define I2C_DEVICE_ID_ASUS_GV301RA_TOUCHSCREEN 0x2C82
++#define I2C_DEVICE_ID_ASUS_UX3402_TOUCHSCREEN 0x2F2C
++#define I2C_DEVICE_ID_ASUS_UX6404_TOUCHSCREEN 0x4116
+ #define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN	0x2544
+ #define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN	0x2706
+ #define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN	0x261A
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index e03d300d2bac4..c9094a4f281e9 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -377,6 +377,10 @@ static const struct hid_device_id hid_battery_quirks[] = {
+ 	  HID_BATTERY_QUIRK_IGNORE },
+ 	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_GV301RA_TOUCHSCREEN),
+ 	  HID_BATTERY_QUIRK_IGNORE },
++	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_UX3402_TOUCHSCREEN),
++	  HID_BATTERY_QUIRK_IGNORE },
++	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_UX6404_TOUCHSCREEN),
++	  HID_BATTERY_QUIRK_IGNORE },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
+ 	  HID_BATTERY_QUIRK_IGNORE },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN),
+@@ -833,9 +837,18 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 			break;
+ 		}
+ 
++		if ((usage->hid & 0xf0) == 0x90) { /* SystemControl*/
++			switch (usage->hid & 0xf) {
++			case 0xb: map_key_clear(KEY_DO_NOT_DISTURB); break;
++			default: goto ignore;
++			}
++			break;
++		}
++
+ 		if ((usage->hid & 0xf0) == 0xa0) {	/* SystemControl */
+ 			switch (usage->hid & 0xf) {
+ 			case 0x9: map_key_clear(KEY_MICMUTE); break;
++			case 0xa: map_key_clear(KEY_ACCESSIBILITY); break;
+ 			default: goto ignore;
+ 			}
+ 			break;
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index 6fadaddb2b908..3a5af0909233a 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -209,6 +209,7 @@ static const struct xpad_device {
+ 	{ 0x0738, 0xf738, "Super SFIV FightStick TE S", 0, XTYPE_XBOX360 },
+ 	{ 0x07ff, 0xffff, "Mad Catz GamePad", 0, XTYPE_XBOX360 },
+ 	{ 0x0b05, 0x1a38, "ASUS ROG RAIKIRI", 0, XTYPE_XBOXONE },
++	{ 0x0b05, 0x1abb, "ASUS ROG RAIKIRI PRO", 0, XTYPE_XBOXONE },
+ 	{ 0x0c12, 0x0005, "Intec wireless", 0, XTYPE_XBOX },
+ 	{ 0x0c12, 0x8801, "Nyko Xbox Controller", 0, XTYPE_XBOX },
+ 	{ 0x0c12, 0x8802, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index 4e38229404b4b..b4723ea395eb9 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1476,16 +1476,47 @@ static void elantech_disconnect(struct psmouse *psmouse)
+ 	psmouse->private = NULL;
+ }
+ 
++/*
++ * Some hw_version 4 models fail to properly activate absolute mode on
++ * resume without going through disable/enable cycle.
++ */
++static const struct dmi_system_id elantech_needs_reenable[] = {
++#if defined(CONFIG_DMI) && defined(CONFIG_X86)
++	{
++		/* Lenovo N24 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "81AF"),
++		},
++	},
++#endif
++	{ }
++};
++
+ /*
+  * Put the touchpad back into absolute mode when reconnecting
+  */
+ static int elantech_reconnect(struct psmouse *psmouse)
+ {
++	int err;
++
+ 	psmouse_reset(psmouse);
+ 
+ 	if (elantech_detect(psmouse, 0))
+ 		return -1;
+ 
++	if (dmi_check_system(elantech_needs_reenable)) {
++		err = ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_DISABLE);
++		if (err)
++			psmouse_warn(psmouse, "failed to deactivate mouse on %s: %d\n",
++				     psmouse->ps2dev.serio->phys, err);
++
++		err = ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_ENABLE);
++		if (err)
++			psmouse_warn(psmouse, "failed to reactivate mouse on %s: %d\n",
++				     psmouse->ps2dev.serio->phys, err);
++	}
++
+ 	if (elantech_set_absolute_mode(psmouse)) {
+ 		psmouse_err(psmouse,
+ 			    "failed to put touchpad back into absolute mode.\n");
+diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
+index dfc6c581873b7..5b50475ec4140 100644
+--- a/drivers/input/serio/i8042-acpipnpio.h
++++ b/drivers/input/serio/i8042-acpipnpio.h
+@@ -76,7 +76,7 @@ static inline void i8042_write_command(int val)
+ #define SERIO_QUIRK_PROBE_DEFER		BIT(5)
+ #define SERIO_QUIRK_RESET_ALWAYS	BIT(6)
+ #define SERIO_QUIRK_RESET_NEVER		BIT(7)
+-#define SERIO_QUIRK_DIECT		BIT(8)
++#define SERIO_QUIRK_DIRECT		BIT(8)
+ #define SERIO_QUIRK_DUMBKBD		BIT(9)
+ #define SERIO_QUIRK_NOLOOP		BIT(10)
+ #define SERIO_QUIRK_NOTIMEOUT		BIT(11)
+@@ -1332,6 +1332,20 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ 					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ 	},
++	{
++		/*
++		 * The Ayaneo Kun is a handheld device where some the buttons
++		 * are handled by an AT keyboard. The keyboard is usually
++		 * detected as raw, but sometimes, usually after a cold boot,
++		 * it is detected as translated. Make sure that the keyboard
++		 * is always in raw mode.
++		 */
++		.matches = {
++			DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
++			DMI_MATCH(DMI_BOARD_NAME, "KUN"),
++		},
++		.driver_data = (void *)(SERIO_QUIRK_DIRECT)
++	},
+ 	{ }
+ };
+ 
+@@ -1655,7 +1669,7 @@ static void __init i8042_check_quirks(void)
+ 		if (quirks & SERIO_QUIRK_RESET_NEVER)
+ 			i8042_reset = I8042_RESET_NEVER;
+ 	}
+-	if (quirks & SERIO_QUIRK_DIECT)
++	if (quirks & SERIO_QUIRK_DIRECT)
+ 		i8042_direct = true;
+ 	if (quirks & SERIO_QUIRK_DUMBKBD)
+ 		i8042_dumbkbd = true;
+diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
+index d2bbb436a77df..4d13db13b9e57 100644
+--- a/drivers/input/touchscreen/ads7846.c
++++ b/drivers/input/touchscreen/ads7846.c
+@@ -1111,6 +1111,16 @@ static const struct of_device_id ads7846_dt_ids[] = {
+ };
+ MODULE_DEVICE_TABLE(of, ads7846_dt_ids);
+ 
++static const struct spi_device_id ads7846_spi_ids[] = {
++	{ "tsc2046", 7846 },
++	{ "ads7843", 7843 },
++	{ "ads7845", 7845 },
++	{ "ads7846", 7846 },
++	{ "ads7873", 7873 },
++	{ },
++};
++MODULE_DEVICE_TABLE(spi, ads7846_spi_ids);
++
+ static const struct ads7846_platform_data *ads7846_get_props(struct device *dev)
+ {
+ 	struct ads7846_platform_data *pdata;
+@@ -1386,10 +1396,10 @@ static struct spi_driver ads7846_driver = {
+ 	},
+ 	.probe		= ads7846_probe,
+ 	.remove		= ads7846_remove,
++	.id_table	= ads7846_spi_ids,
+ };
+ 
+ module_spi_driver(ads7846_driver);
+ 
+ MODULE_DESCRIPTION("ADS7846 TouchScreen Driver");
+ MODULE_LICENSE("GPL");
+-MODULE_ALIAS("spi:ads7846");
+diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
+index 62f562ad50263..050fa9ca4ec94 100644
+--- a/drivers/input/touchscreen/silead.c
++++ b/drivers/input/touchscreen/silead.c
+@@ -71,7 +71,6 @@ struct silead_ts_data {
+ 	struct regulator_bulk_data regulators[2];
+ 	char fw_name[64];
+ 	struct touchscreen_properties prop;
+-	u32 max_fingers;
+ 	u32 chip_id;
+ 	struct input_mt_pos pos[SILEAD_MAX_FINGERS];
+ 	int slots[SILEAD_MAX_FINGERS];
+@@ -136,7 +135,7 @@ static int silead_ts_request_input_dev(struct silead_ts_data *data)
+ 	touchscreen_parse_properties(data->input, true, &data->prop);
+ 	silead_apply_efi_fw_min_max(data);
+ 
+-	input_mt_init_slots(data->input, data->max_fingers,
++	input_mt_init_slots(data->input, SILEAD_MAX_FINGERS,
+ 			    INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED |
+ 			    INPUT_MT_TRACK);
+ 
+@@ -256,10 +255,10 @@ static void silead_ts_read_data(struct i2c_client *client)
+ 		return;
+ 	}
+ 
+-	if (buf[0] > data->max_fingers) {
++	if (buf[0] > SILEAD_MAX_FINGERS) {
+ 		dev_warn(dev, "More touches reported then supported %d > %d\n",
+-			 buf[0], data->max_fingers);
+-		buf[0] = data->max_fingers;
++			 buf[0], SILEAD_MAX_FINGERS);
++		buf[0] = SILEAD_MAX_FINGERS;
+ 	}
+ 
+ 	if (silead_ts_handle_pen_data(data, buf))
+@@ -315,7 +314,6 @@ static void silead_ts_read_data(struct i2c_client *client)
+ 
+ static int silead_ts_init(struct i2c_client *client)
+ {
+-	struct silead_ts_data *data = i2c_get_clientdata(client);
+ 	int error;
+ 
+ 	error = i2c_smbus_write_byte_data(client, SILEAD_REG_RESET,
+@@ -325,7 +323,7 @@ static int silead_ts_init(struct i2c_client *client)
+ 	usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX);
+ 
+ 	error = i2c_smbus_write_byte_data(client, SILEAD_REG_TOUCH_NR,
+-					data->max_fingers);
++					  SILEAD_MAX_FINGERS);
+ 	if (error)
+ 		goto i2c_write_err;
+ 	usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX);
+@@ -591,13 +589,6 @@ static void silead_ts_read_props(struct i2c_client *client)
+ 	const char *str;
+ 	int error;
+ 
+-	error = device_property_read_u32(dev, "silead,max-fingers",
+-					 &data->max_fingers);
+-	if (error) {
+-		dev_dbg(dev, "Max fingers read error %d\n", error);
+-		data->max_fingers = 5; /* Most devices handle up-to 5 fingers */
+-	}
+-
+ 	error = device_property_read_string(dev, "firmware-name", &str);
+ 	if (!error)
+ 		snprintf(data->fw_name, sizeof(data->fw_name),
+diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
+index 79e6f3c1341fe..40c3fe26f76df 100644
+--- a/drivers/misc/mei/main.c
++++ b/drivers/misc/mei/main.c
+@@ -329,7 +329,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
+ 	}
+ 
+ 	if (!mei_cl_is_connected(cl)) {
+-		cl_err(dev, cl, "is not connected");
++		cl_dbg(dev, cl, "is not connected");
+ 		rets = -ENODEV;
+ 		goto out;
+ 	}
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+index ffc3e93292501..024169461cad0 100644
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+@@ -295,7 +295,7 @@ int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd,
+ 	}
+ 	usb_free_urb(urb);
+ 
+-	return 0;
++	return err;
+ }
+ 
+ int kvaser_usb_can_rx_over_error(struct net_device *netdev)
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 722bb724361c2..664baedfe7d13 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -2478,6 +2478,18 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 	    (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
+ 
+ 	tx_buff = &tx_pool->tx_buff[bufidx];
++
++	/* Sanity checks on our free map to make sure it points to an index
++	 * that is not being occupied by another skb. If skb memory is
++	 * not freed then we see congestion control kick in and halt tx.
++	 */
++	if (unlikely(tx_buff->skb)) {
++		dev_warn_ratelimited(dev, "TX free map points to untracked skb (%s %d idx=%d)\n",
++				     skb_is_gso(skb) ? "tso_pool" : "tx_pool",
++				     queue_num, bufidx);
++		dev_kfree_skb_any(tx_buff->skb);
++	}
++
+ 	tx_buff->skb = skb;
+ 	tx_buff->index = bufidx;
+ 	tx_buff->pool_index = queue_num;
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+index cebc79a710ec2..6340e5e61a7da 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+@@ -6906,6 +6906,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
+ 	/* 9704 == 9728 - 20 and rounding to 8 */
+ 	dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
+ 	device_set_node(&dev->dev, port_fwnode);
++	dev->dev_port = port->id;
+ 
+ 	port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
+ 	port->pcs_gmac.neg_mode = true;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+index a85ac039d779b..87d5776e3b88e 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+@@ -648,14 +648,14 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
+ 	} else if (lvl == NIX_TXSCH_LVL_TL4) {
+ 		parent = schq_list[NIX_TXSCH_LVL_TL3][prio];
+ 		req->reg[0] = NIX_AF_TL4X_PARENT(schq);
+-		req->regval[0] = parent << 16;
++		req->regval[0] = (u64)parent << 16;
+ 		req->num_regs++;
+ 		req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
+ 		req->regval[1] = dwrr_val;
+ 	} else if (lvl == NIX_TXSCH_LVL_TL3) {
+ 		parent = schq_list[NIX_TXSCH_LVL_TL2][prio];
+ 		req->reg[0] = NIX_AF_TL3X_PARENT(schq);
+-		req->regval[0] = parent << 16;
++		req->regval[0] = (u64)parent << 16;
+ 		req->num_regs++;
+ 		req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
+ 		req->regval[1] = dwrr_val;
+@@ -670,11 +670,11 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
+ 	} else if (lvl == NIX_TXSCH_LVL_TL2) {
+ 		parent = schq_list[NIX_TXSCH_LVL_TL1][prio];
+ 		req->reg[0] = NIX_AF_TL2X_PARENT(schq);
+-		req->regval[0] = parent << 16;
++		req->regval[0] = (u64)parent << 16;
+ 
+ 		req->num_regs++;
+ 		req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
+-		req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val;
++		req->regval[1] = (u64)hw->txschq_aggr_lvl_rr_prio << 24 | dwrr_val;
+ 
+ 		if (lvl == hw->txschq_link_cfg_lvl) {
+ 			req->num_regs++;
+@@ -698,7 +698,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
+ 
+ 		req->num_regs++;
+ 		req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
+-		req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
++		req->regval[1] = hw->txschq_aggr_lvl_rr_prio << 1;
+ 
+ 		req->num_regs++;
+ 		req->reg[2] = NIX_AF_TL1X_CIR(schq);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
+index 45a32e4b49d1c..e3aee6e362151 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
+@@ -139,33 +139,34 @@
+ #define	NIX_LF_CINTX_ENA_W1C(a)		(NIX_LFBASE | 0xD50 | (a) << 12)
+ 
+ /* NIX AF transmit scheduler registers */
+-#define NIX_AF_SMQX_CFG(a)		(0x700 | (a) << 16)
+-#define NIX_AF_TL1X_SCHEDULE(a)		(0xC00 | (a) << 16)
+-#define NIX_AF_TL1X_CIR(a)		(0xC20 | (a) << 16)
+-#define NIX_AF_TL1X_TOPOLOGY(a)		(0xC80 | (a) << 16)
+-#define NIX_AF_TL2X_PARENT(a)		(0xE88 | (a) << 16)
+-#define NIX_AF_TL2X_SCHEDULE(a)		(0xE00 | (a) << 16)
+-#define NIX_AF_TL2X_TOPOLOGY(a)		(0xE80 | (a) << 16)
+-#define NIX_AF_TL2X_CIR(a)              (0xE20 | (a) << 16)
+-#define NIX_AF_TL2X_PIR(a)              (0xE30 | (a) << 16)
+-#define NIX_AF_TL3X_PARENT(a)		(0x1088 | (a) << 16)
+-#define NIX_AF_TL3X_SCHEDULE(a)		(0x1000 | (a) << 16)
+-#define NIX_AF_TL3X_SHAPE(a)		(0x1010 | (a) << 16)
+-#define NIX_AF_TL3X_CIR(a)		(0x1020 | (a) << 16)
+-#define NIX_AF_TL3X_PIR(a)		(0x1030 | (a) << 16)
+-#define NIX_AF_TL3X_TOPOLOGY(a)		(0x1080 | (a) << 16)
+-#define NIX_AF_TL4X_PARENT(a)		(0x1288 | (a) << 16)
+-#define NIX_AF_TL4X_SCHEDULE(a)		(0x1200 | (a) << 16)
+-#define NIX_AF_TL4X_SHAPE(a)		(0x1210 | (a) << 16)
+-#define NIX_AF_TL4X_CIR(a)		(0x1220 | (a) << 16)
+-#define NIX_AF_TL4X_PIR(a)		(0x1230 | (a) << 16)
+-#define NIX_AF_TL4X_TOPOLOGY(a)		(0x1280 | (a) << 16)
+-#define NIX_AF_MDQX_SCHEDULE(a)		(0x1400 | (a) << 16)
+-#define NIX_AF_MDQX_SHAPE(a)		(0x1410 | (a) << 16)
+-#define NIX_AF_MDQX_CIR(a)		(0x1420 | (a) << 16)
+-#define NIX_AF_MDQX_PIR(a)		(0x1430 | (a) << 16)
+-#define NIX_AF_MDQX_PARENT(a)		(0x1480 | (a) << 16)
+-#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b)	(0x1700 | (a) << 16 | (b) << 3)
++#define NIX_AF_SMQX_CFG(a)		(0x700 | (u64)(a) << 16)
++#define NIX_AF_TL4X_SDP_LINK_CFG(a)	(0xB10 | (u64)(a) << 16)
++#define NIX_AF_TL1X_SCHEDULE(a)		(0xC00 | (u64)(a) << 16)
++#define NIX_AF_TL1X_CIR(a)		(0xC20 | (u64)(a) << 16)
++#define NIX_AF_TL1X_TOPOLOGY(a)		(0xC80 | (u64)(a) << 16)
++#define NIX_AF_TL2X_PARENT(a)		(0xE88 | (u64)(a) << 16)
++#define NIX_AF_TL2X_SCHEDULE(a)		(0xE00 | (u64)(a) << 16)
++#define NIX_AF_TL2X_TOPOLOGY(a)		(0xE80 | (u64)(a) << 16)
++#define NIX_AF_TL2X_CIR(a)		(0xE20 | (u64)(a) << 16)
++#define NIX_AF_TL2X_PIR(a)		(0xE30 | (u64)(a) << 16)
++#define NIX_AF_TL3X_PARENT(a)		(0x1088 | (u64)(a) << 16)
++#define NIX_AF_TL3X_SCHEDULE(a)		(0x1000 | (u64)(a) << 16)
++#define NIX_AF_TL3X_SHAPE(a)		(0x1010 | (u64)(a) << 16)
++#define NIX_AF_TL3X_CIR(a)		(0x1020 | (u64)(a) << 16)
++#define NIX_AF_TL3X_PIR(a)		(0x1030 | (u64)(a) << 16)
++#define NIX_AF_TL3X_TOPOLOGY(a)		(0x1080 | (u64)(a) << 16)
++#define NIX_AF_TL4X_PARENT(a)		(0x1288 | (u64)(a) << 16)
++#define NIX_AF_TL4X_SCHEDULE(a)		(0x1200 | (u64)(a) << 16)
++#define NIX_AF_TL4X_SHAPE(a)		(0x1210 | (u64)(a) << 16)
++#define NIX_AF_TL4X_CIR(a)		(0x1220 | (u64)(a) << 16)
++#define NIX_AF_TL4X_PIR(a)		(0x1230 | (u64)(a) << 16)
++#define NIX_AF_TL4X_TOPOLOGY(a)		(0x1280 | (u64)(a) << 16)
++#define NIX_AF_MDQX_SCHEDULE(a)		(0x1400 | (u64)(a) << 16)
++#define NIX_AF_MDQX_SHAPE(a)		(0x1410 | (u64)(a) << 16)
++#define NIX_AF_MDQX_CIR(a)		(0x1420 | (u64)(a) << 16)
++#define NIX_AF_MDQX_PIR(a)		(0x1430 | (u64)(a) << 16)
++#define NIX_AF_MDQX_PARENT(a)		(0x1480 | (u64)(a) << 16)
++#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b)	(0x1700 | (u64)(a) << 16 | (b) << 3)
+ 
+ /* LMT LF registers */
+ #define LMT_LFBASE			BIT_ULL(RVU_FUNC_BLKADDR_SHIFT)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+index 04a49b9b545f3..0ca9f2ffd932d 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+@@ -510,7 +510,7 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
+ 
+ static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll)
+ {
+-	struct dim_sample dim_sample;
++	struct dim_sample dim_sample = { 0 };
+ 	u64 rx_frames, rx_bytes;
+ 	u64 tx_frames, tx_bytes;
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+index 6cddb4da85b71..4995a2d54d7d0 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+@@ -153,7 +153,6 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf,
+ 		num_regs++;
+ 
+ 		otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
+-
+ 	} else if (level == NIX_TXSCH_LVL_TL4) {
+ 		otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
+ 	} else if (level == NIX_TXSCH_LVL_TL3) {
+@@ -176,7 +175,7 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf,
+ 		/* check if node is root */
+ 		if (node->qid == OTX2_QOS_QID_INNER && !node->parent) {
+ 			cfg->reg[num_regs] = NIX_AF_TL2X_SCHEDULE(node->schq);
+-			cfg->regval[num_regs] =  TXSCH_TL1_DFLT_RR_PRIO << 24 |
++			cfg->regval[num_regs] =  (u64)hw->txschq_aggr_lvl_rr_prio << 24 |
+ 						 mtu_to_dwrr_weight(pfvf,
+ 								    pfvf->tx_max_pktlen);
+ 			num_regs++;
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index a5469cf5cf670..befbca01bfe37 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1380,6 +1380,8 @@ static const struct usb_device_id products[] = {
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)},	/* Telit LE910Cx */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)},	/* Telit LE910Cx */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)},	/* Telit LN940 series */
++	{QMI_QUIRK_SET_DTR(0x1bc7, 0x3000, 0)},	/* Telit FN912 series */
++	{QMI_QUIRK_SET_DTR(0x1bc7, 0x3001, 0)},	/* Telit FN912 series */
+ 	{QMI_FIXED_INTF(0x1c9e, 0x9801, 3)},	/* Telewell TW-3G HSPA+ */
+ 	{QMI_FIXED_INTF(0x1c9e, 0x9803, 4)},	/* Telewell TW-3G HSPA+ */
+ 	{QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)},	/* XS Stick W100-2 from 4G Systems */
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+index 52518a47554e7..6f16b5b33f0c0 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+@@ -595,6 +595,12 @@ static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw,
+ 					 void *_data)
+ {
+ 	struct wowlan_key_gtk_type_iter *data = _data;
++	__le32 *cipher = NULL;
++
++	if (key->keyidx == 4 || key->keyidx == 5)
++		cipher = &data->kek_kck_cmd->igtk_cipher;
++	if (key->keyidx == 6 || key->keyidx == 7)
++		cipher = &data->kek_kck_cmd->bigtk_cipher;
+ 
+ 	switch (key->cipher) {
+ 	default:
+@@ -606,10 +612,13 @@ static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw,
+ 		return;
+ 	case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ 	case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+-		data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
++		if (cipher)
++			*cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
+ 		return;
+ 	case WLAN_CIPHER_SUITE_AES_CMAC:
+-		data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM);
++	case WLAN_CIPHER_SUITE_BIP_CMAC_256:
++		if (cipher)
++			*cipher = cpu_to_le32(STA_KEY_FLG_CCM);
+ 		return;
+ 	case WLAN_CIPHER_SUITE_CCMP:
+ 		if (!sta)
+@@ -2182,7 +2191,8 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
+ 
+ out:
+ 	if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP,
+-				    WOWLAN_GET_STATUSES, 0) < 10) {
++				    WOWLAN_GET_STATUSES,
++				    IWL_FW_CMD_VER_UNKNOWN) < 10) {
+ 		mvmvif->seqno_valid = true;
+ 		/* +0x10 because the set API expects next-to-use, not last-used */
+ 		mvmvif->seqno = status->non_qos_seq_ctr + 0x10;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index 5f6b16d3fc8a3..81bc0878a61cc 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -651,7 +651,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
+ 		hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
+ 
+ 	if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_KEK_KCK_MATERIAL,
+-				  IWL_FW_CMD_VER_UNKNOWN) == 3)
++				  IWL_FW_CMD_VER_UNKNOWN) >= 3)
+ 		hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK;
+ 
+ 	if (fw_has_api(&mvm->fw->ucode_capa,
+@@ -1115,6 +1115,39 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
+ 	RCU_INIT_POINTER(mvmvif->deflink.probe_resp_data, NULL);
+ }
+ 
++static void iwl_mvm_cleanup_sta_iterator(void *data, struct ieee80211_sta *sta)
++{
++	struct iwl_mvm *mvm = data;
++	struct iwl_mvm_sta *mvm_sta;
++	struct ieee80211_vif *vif;
++	int link_id;
++
++	mvm_sta = iwl_mvm_sta_from_mac80211(sta);
++	vif = mvm_sta->vif;
++
++	if (!sta->valid_links)
++		return;
++
++	for (link_id = 0; link_id < ARRAY_SIZE((sta)->link); link_id++) {
++		struct iwl_mvm_link_sta *mvm_link_sta;
++
++		mvm_link_sta =
++			rcu_dereference_check(mvm_sta->link[link_id],
++					      lockdep_is_held(&mvm->mutex));
++		if (mvm_link_sta && !(vif->active_links & BIT(link_id))) {
++			/*
++			 * We have a link STA but the link is inactive in
++			 * mac80211. This will happen if we failed to
++			 * deactivate the link but mac80211 roll back the
++			 * deactivation of the link.
++			 * Delete the stale data to avoid issues later on.
++			 */
++			iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta,
++						  link_id, false);
++		}
++	}
++}
++
+ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
+ {
+ 	iwl_mvm_stop_device(mvm);
+@@ -1137,6 +1170,10 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
+ 	 */
+ 	ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
+ 
++	/* cleanup stations as links may be gone after restart */
++	ieee80211_iterate_stations_atomic(mvm->hw,
++					  iwl_mvm_cleanup_sta_iterator, mvm);
++
+ 	mvm->p2p_device_vif = NULL;
+ 
+ 	iwl_mvm_reset_phy_ctxts(mvm);
+@@ -6127,7 +6164,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
+ 		.len[0] = sizeof(cmd),
+ 		.data[1] = data,
+ 		.len[1] = size,
+-		.flags = sync ? 0 : CMD_ASYNC,
++		.flags = CMD_SEND_IN_RFKILL | (sync ? 0 : CMD_ASYNC),
+ 	};
+ 	int ret;
+ 
+@@ -6152,11 +6189,9 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
+ 	if (sync) {
+ 		lockdep_assert_held(&mvm->mutex);
+ 		ret = wait_event_timeout(mvm->rx_sync_waitq,
+-					 READ_ONCE(mvm->queue_sync_state) == 0 ||
+-					 iwl_mvm_is_radio_hw_killed(mvm),
++					 READ_ONCE(mvm->queue_sync_state) == 0,
+ 					 SYNC_RX_QUEUE_TIMEOUT);
+-		WARN_ONCE(!ret && !iwl_mvm_is_radio_hw_killed(mvm),
+-			  "queue sync: failed to sync, state is 0x%lx, cookie %d\n",
++		WARN_ONCE(!ret, "queue sync: failed to sync, state is 0x%lx, cookie %d\n",
+ 			  mvm->queue_sync_state,
+ 			  mvm->queue_sync_cookie);
+ 	}
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+index 36dc291d98dd6..dbe668db7ce37 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+@@ -515,11 +515,11 @@ static int iwl_mvm_mld_cfg_sta(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ 	return iwl_mvm_mld_send_sta_cmd(mvm, &cmd);
+ }
+ 
+-static void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
+-				      struct iwl_mvm_sta *mvm_sta,
+-				      struct iwl_mvm_link_sta *mvm_sta_link,
+-				      unsigned int link_id,
+-				      bool is_in_fw)
++void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
++			       struct iwl_mvm_sta *mvm_sta,
++			       struct iwl_mvm_link_sta *mvm_sta_link,
++			       unsigned int link_id,
++			       bool is_in_fw)
+ {
+ 	RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta_link->sta_id],
+ 			 is_in_fw ? ERR_PTR(-EINVAL) : NULL);
+@@ -1012,7 +1012,8 @@ static int iwl_mvm_mld_update_sta_baids(struct iwl_mvm *mvm,
+ 
+ 		cmd.modify.tid = cpu_to_le32(data->tid);
+ 
+-		ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd);
++		ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_SEND_IN_RFKILL,
++					   sizeof(cmd), &cmd);
+ 		data->sta_mask = new_sta_mask;
+ 		if (ret)
+ 			return ret;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+index a93981cb9714f..7f5685a4838cb 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+@@ -1854,12 +1854,10 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
+ 	bool rfkill_safe_init_done = READ_ONCE(mvm->rfkill_safe_init_done);
+ 	bool unified = iwl_mvm_has_unified_ucode(mvm);
+ 
+-	if (state) {
++	if (state)
+ 		set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+-		wake_up(&mvm->rx_sync_waitq);
+-	} else {
++	else
+ 		clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+-	}
+ 
+ 	iwl_mvm_set_rfkill_state(mvm);
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+index 525d8efcc1475..aa5fa6c657c02 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+@@ -1717,7 +1717,10 @@ iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm,
+ 				break;
+ 		}
+ 
+-		if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE) {
++		if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE &&
++		    !WARN_ONCE(!is_valid_ether_addr(scan_6ghz_params[j].bssid),
++			       "scan: invalid BSSID at index %u, index_b=%u\n",
++			       j, idex_b)) {
+ 			memcpy(&pp->bssid_array[idex_b++],
+ 			       scan_6ghz_params[j].bssid, ETH_ALEN);
+ 		}
+@@ -3251,10 +3254,11 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
+ 
+ 	ret = iwl_mvm_send_cmd_pdu(mvm,
+ 				   WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC),
+-				   0, sizeof(cmd), &cmd);
++				   CMD_SEND_IN_RFKILL, sizeof(cmd), &cmd);
+ 	if (!ret)
+ 		mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
+ 
++	IWL_DEBUG_SCAN(mvm, "Scan abort: ret=%d\n", ret);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+index 491c449fd4316..908d0bc474da6 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+@@ -2836,7 +2836,12 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
+ 		.action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) :
+ 				  cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
+ 	};
+-	u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
++	struct iwl_host_cmd hcmd = {
++		.id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD),
++		.flags = CMD_SEND_IN_RFKILL,
++		.len[0] = sizeof(cmd),
++		.data[0] = &cmd,
++	};
+ 	int ret;
+ 
+ 	BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
+@@ -2848,7 +2853,7 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
+ 		cmd.alloc.ssn = cpu_to_le16(ssn);
+ 		cmd.alloc.win_size = cpu_to_le16(buf_size);
+ 		baid = -EIO;
+-	} else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) {
++	} else if (iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 1) == 1) {
+ 		cmd.remove_v1.baid = cpu_to_le32(baid);
+ 		BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove));
+ 	} else {
+@@ -2857,8 +2862,7 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
+ 		cmd.remove.tid = cpu_to_le32(tid);
+ 	}
+ 
+-	ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd),
+-					  &cmd, &baid);
++	ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &baid);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+index b3450569864eb..7dd8f7f4b4492 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+@@ -638,6 +638,11 @@ int iwl_mvm_mld_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ 			   struct ieee80211_sta *sta);
+ int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ 		       struct ieee80211_sta *sta);
++void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
++			       struct iwl_mvm_sta *mvm_sta,
++			       struct iwl_mvm_link_sta *mvm_sta_link,
++			       unsigned int link_id,
++			       bool is_in_fw);
+ int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id);
+ int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm,
+ 				 struct ieee80211_vif *vif,
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index d513fd27589df..36f30594b671f 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -998,6 +998,7 @@ void nvme_cleanup_cmd(struct request *req)
+ 			clear_bit_unlock(0, &ctrl->discard_page_busy);
+ 		else
+ 			kfree(bvec_virt(&req->special_vec));
++		req->rq_flags &= ~RQF_SPECIAL_PAYLOAD;
+ 	}
+ }
+ EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
+diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
+index 1f0ea1f32d22f..f6416f8553f03 100644
+--- a/drivers/nvme/host/fabrics.c
++++ b/drivers/nvme/host/fabrics.c
+@@ -180,7 +180,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
+ 	cmd.prop_get.offset = cpu_to_le32(off);
+ 
+ 	ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
+-			NVME_QID_ANY, 0);
++			NVME_QID_ANY, NVME_SUBMIT_RESERVED);
+ 
+ 	if (ret >= 0)
+ 		*val = le64_to_cpu(res.u64);
+@@ -226,7 +226,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
+ 	cmd.prop_get.offset = cpu_to_le32(off);
+ 
+ 	ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
+-			NVME_QID_ANY, 0);
++			NVME_QID_ANY, NVME_SUBMIT_RESERVED);
+ 
+ 	if (ret >= 0)
+ 		*val = le64_to_cpu(res.u64);
+@@ -271,7 +271,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
+ 	cmd.prop_set.value = cpu_to_le64(val);
+ 
+ 	ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0,
+-			NVME_QID_ANY, 0);
++			NVME_QID_ANY, NVME_SUBMIT_RESERVED);
+ 	if (unlikely(ret))
+ 		dev_err(ctrl->device,
+ 			"Property Set error: %d, offset %#x\n",
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index d7bcc6d51e84e..3f2b0d41e4819 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -503,7 +503,7 @@ static inline bool nvme_ns_head_multipath(struct nvme_ns_head *head)
+ enum nvme_ns_features {
+ 	NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */
+ 	NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */
+-	NVME_NS_DEAC,		/* DEAC bit in Write Zeores supported */
++	NVME_NS_DEAC = 1 << 2,		/* DEAC bit in Write Zeores supported */
+ };
+ 
+ struct nvme_ns {
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index 06f0c587f3437..4ff460ba28263 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -957,6 +957,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
+ 	req->metadata_sg_cnt = 0;
+ 	req->transfer_len = 0;
+ 	req->metadata_len = 0;
++	req->cqe->result.u64 = 0;
+ 	req->cqe->status = 0;
+ 	req->cqe->sq_head = 0;
+ 	req->ns = NULL;
+diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
+index eb7785be0ca77..ee76491e8b12c 100644
+--- a/drivers/nvme/target/fabrics-cmd-auth.c
++++ b/drivers/nvme/target/fabrics-cmd-auth.c
+@@ -332,7 +332,6 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
+ 		pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n",
+ 			 __func__, ctrl->cntlid, req->sq->qid,
+ 			 status, req->error_loc);
+-	req->cqe->result.u64 = 0;
+ 	if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
+ 	    req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
+ 		unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
+@@ -515,8 +514,6 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
+ 	status = nvmet_copy_to_sgl(req, 0, d, al);
+ 	kfree(d);
+ done:
+-	req->cqe->result.u64 = 0;
+-
+ 	if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
+ 		nvmet_auth_sq_free(req->sq);
+ 	else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
+diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
+index b23f4cf840bd5..f6714453b8bb3 100644
+--- a/drivers/nvme/target/fabrics-cmd.c
++++ b/drivers/nvme/target/fabrics-cmd.c
+@@ -226,9 +226,6 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
+ 	if (status)
+ 		goto out;
+ 
+-	/* zero out initial completion result, assign values as needed */
+-	req->cqe->result.u32 = 0;
+-
+ 	if (c->recfmt != 0) {
+ 		pr_warn("invalid connect version (%d).\n",
+ 			le16_to_cpu(c->recfmt));
+@@ -304,9 +301,6 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
+ 	if (status)
+ 		goto out;
+ 
+-	/* zero out initial completion result, assign values as needed */
+-	req->cqe->result.u32 = 0;
+-
+ 	if (c->recfmt != 0) {
+ 		pr_warn("invalid connect version (%d).\n",
+ 			le16_to_cpu(c->recfmt));
+diff --git a/drivers/of/irq.c b/drivers/of/irq.c
+index 174900072c18c..c94203ce65bb3 100644
+--- a/drivers/of/irq.c
++++ b/drivers/of/irq.c
+@@ -25,6 +25,8 @@
+ #include <linux/string.h>
+ #include <linux/slab.h>
+ 
++#include "of_private.h"
++
+ /**
+  * irq_of_parse_and_map - Parse and map an interrupt into linux virq space
+  * @dev: Device node of the device whose interrupt is to be mapped
+@@ -79,7 +81,8 @@ EXPORT_SYMBOL_GPL(of_irq_find_parent);
+ /*
+  * These interrupt controllers abuse interrupt-map for unspeakable
+  * reasons and rely on the core code to *ignore* it (the drivers do
+- * their own parsing of the property).
++ * their own parsing of the property). The PAsemi entry covers a
++ * non-sensical interrupt-map that is better left ignored.
+  *
+  * If you think of adding to the list for something *new*, think
+  * again. There is a high chance that you will be sent back to the
+@@ -93,9 +96,61 @@ static const char * const of_irq_imap_abusers[] = {
+ 	"fsl,ls1043a-extirq",
+ 	"fsl,ls1088a-extirq",
+ 	"renesas,rza1-irqc",
++	"pasemi,rootbus",
+ 	NULL,
+ };
+ 
++const __be32 *of_irq_parse_imap_parent(const __be32 *imap, int len, struct of_phandle_args *out_irq)
++{
++	u32 intsize, addrsize;
++	struct device_node *np;
++
++	/* Get the interrupt parent */
++	if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
++		np = of_node_get(of_irq_dflt_pic);
++	else
++		np = of_find_node_by_phandle(be32_to_cpup(imap));
++	imap++;
++
++	/* Check if not found */
++	if (!np) {
++		pr_debug(" -> imap parent not found !\n");
++		return NULL;
++	}
++
++	/* Get #interrupt-cells and #address-cells of new parent */
++	if (of_property_read_u32(np, "#interrupt-cells",
++					&intsize)) {
++		pr_debug(" -> parent lacks #interrupt-cells!\n");
++		of_node_put(np);
++		return NULL;
++	}
++	if (of_property_read_u32(np, "#address-cells",
++					&addrsize))
++		addrsize = 0;
++
++	pr_debug(" -> intsize=%d, addrsize=%d\n",
++		intsize, addrsize);
++
++	/* Check for malformed properties */
++	if (WARN_ON(addrsize + intsize > MAX_PHANDLE_ARGS)
++		|| (len < (addrsize + intsize))) {
++		of_node_put(np);
++		return NULL;
++	}
++
++	pr_debug(" -> imaplen=%d\n", len);
++
++	imap += addrsize + intsize;
++
++	out_irq->np = np;
++	for (int i = 0; i < intsize; i++)
++		out_irq->args[i] = be32_to_cpup(imap - intsize + i);
++	out_irq->args_count = intsize;
++
++	return imap;
++}
++
+ /**
+  * of_irq_parse_raw - Low level interrupt tree parsing
+  * @addr:	address specifier (start of "reg" property of the device) in be32 format
+@@ -112,12 +167,12 @@ static const char * const of_irq_imap_abusers[] = {
+  */
+ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
+ {
+-	struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL;
++	struct device_node *ipar, *tnode, *old = NULL;
+ 	__be32 initial_match_array[MAX_PHANDLE_ARGS];
+ 	const __be32 *match_array = initial_match_array;
+-	const __be32 *tmp, *imap, *imask, dummy_imask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) };
+-	u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0;
+-	int imaplen, match, i, rc = -EINVAL;
++	const __be32 *tmp, dummy_imask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) };
++	u32 intsize = 1, addrsize;
++	int i, rc = -EINVAL;
+ 
+ #ifdef DEBUG
+ 	of_print_phandle_args("of_irq_parse_raw: ", out_irq);
+@@ -176,6 +231,9 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
+ 
+ 	/* Now start the actual "proper" walk of the interrupt tree */
+ 	while (ipar != NULL) {
++		int imaplen, match;
++		const __be32 *imap, *oldimap, *imask;
++		struct device_node *newpar;
+ 		/*
+ 		 * Now check if cursor is an interrupt-controller and
+ 		 * if it is then we are done, unless there is an
+@@ -216,7 +274,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
+ 
+ 		/* Parse interrupt-map */
+ 		match = 0;
+-		while (imaplen > (addrsize + intsize + 1) && !match) {
++		while (imaplen > (addrsize + intsize + 1)) {
+ 			/* Compare specifiers */
+ 			match = 1;
+ 			for (i = 0; i < (addrsize + intsize); i++, imaplen--)
+@@ -224,74 +282,31 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
+ 
+ 			pr_debug(" -> match=%d (imaplen=%d)\n", match, imaplen);
+ 
+-			/* Get the interrupt parent */
+-			if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
+-				newpar = of_node_get(of_irq_dflt_pic);
+-			else
+-				newpar = of_find_node_by_phandle(be32_to_cpup(imap));
+-			imap++;
+-			--imaplen;
+-
+-			/* Check if not found */
+-			if (newpar == NULL) {
+-				pr_debug(" -> imap parent not found !\n");
++			oldimap = imap;
++			imap = of_irq_parse_imap_parent(oldimap, imaplen, out_irq);
++			if (!imap)
+ 				goto fail;
+-			}
+-
+-			if (!of_device_is_available(newpar))
+-				match = 0;
+-
+-			/* Get #interrupt-cells and #address-cells of new
+-			 * parent
+-			 */
+-			if (of_property_read_u32(newpar, "#interrupt-cells",
+-						 &newintsize)) {
+-				pr_debug(" -> parent lacks #interrupt-cells!\n");
+-				goto fail;
+-			}
+-			if (of_property_read_u32(newpar, "#address-cells",
+-						 &newaddrsize))
+-				newaddrsize = 0;
+ 
+-			pr_debug(" -> newintsize=%d, newaddrsize=%d\n",
+-			    newintsize, newaddrsize);
+-
+-			/* Check for malformed properties */
+-			if (WARN_ON(newaddrsize + newintsize > MAX_PHANDLE_ARGS)
+-			    || (imaplen < (newaddrsize + newintsize))) {
+-				rc = -EFAULT;
+-				goto fail;
+-			}
+-
+-			imap += newaddrsize + newintsize;
+-			imaplen -= newaddrsize + newintsize;
++			match &= of_device_is_available(out_irq->np);
++			if (match)
++				break;
+ 
++			of_node_put(out_irq->np);
++			imaplen -= imap - oldimap;
+ 			pr_debug(" -> imaplen=%d\n", imaplen);
+ 		}
+-		if (!match) {
+-			if (intc) {
+-				/*
+-				 * The PASEMI Nemo is a known offender, so
+-				 * let's only warn for anyone else.
+-				 */
+-				WARN(!IS_ENABLED(CONFIG_PPC_PASEMI),
+-				     "%pOF interrupt-map failed, using interrupt-controller\n",
+-				     ipar);
+-				return 0;
+-			}
+-
++		if (!match)
+ 			goto fail;
+-		}
+ 
+ 		/*
+ 		 * Successfully parsed an interrupt-map translation; copy new
+ 		 * interrupt specifier into the out_irq structure
+ 		 */
+-		match_array = imap - newaddrsize - newintsize;
+-		for (i = 0; i < newintsize; i++)
+-			out_irq->args[i] = be32_to_cpup(imap - newintsize + i);
+-		out_irq->args_count = intsize = newintsize;
+-		addrsize = newaddrsize;
++		match_array = oldimap + 1;
++
++		newpar = out_irq->np;
++		intsize = out_irq->args_count;
++		addrsize = (imap - match_array) - intsize;
+ 
+ 		if (ipar == newpar) {
+ 			pr_debug("%pOF interrupt-map entry to self\n", ipar);
+@@ -300,7 +315,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
+ 
+ 	skiplevel:
+ 		/* Iterate again with new parent */
+-		out_irq->np = newpar;
+ 		pr_debug(" -> new parent: %pOF\n", newpar);
+ 		of_node_put(ipar);
+ 		ipar = newpar;
+@@ -310,7 +324,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
+ 
+  fail:
+ 	of_node_put(ipar);
+-	of_node_put(newpar);
+ 
+ 	return rc;
+ }
+diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
+index 485483524b7f1..b0609de49c7c4 100644
+--- a/drivers/of/of_private.h
++++ b/drivers/of/of_private.h
+@@ -158,6 +158,9 @@ extern void __of_sysfs_remove_bin_file(struct device_node *np,
+ extern int of_bus_n_addr_cells(struct device_node *np);
+ extern int of_bus_n_size_cells(struct device_node *np);
+ 
++const __be32 *of_irq_parse_imap_parent(const __be32 *imap, int len,
++				       struct of_phandle_args *out_irq);
++
+ struct bus_dma_region;
+ #if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_HAS_DMA)
+ int of_dma_get_range(struct device_node *np,
+diff --git a/drivers/parport/parport_amiga.c b/drivers/parport/parport_amiga.c
+index e6dc857aac3fe..e06c7b2aac5c4 100644
+--- a/drivers/parport/parport_amiga.c
++++ b/drivers/parport/parport_amiga.c
+@@ -229,7 +229,13 @@ static void __exit amiga_parallel_remove(struct platform_device *pdev)
+ 	parport_put_port(port);
+ }
+ 
+-static struct platform_driver amiga_parallel_driver = {
++/*
++ * amiga_parallel_remove() lives in .exit.text. For drivers registered via
++ * module_platform_driver_probe() this is ok because they cannot get unbound at
++ * runtime. So mark the driver struct with __refdata to prevent modpost
++ * triggering a section mismatch warning.
++ */
++static struct platform_driver amiga_parallel_driver __refdata = {
+ 	.remove_new = __exit_p(amiga_parallel_remove),
+ 	.driver   = {
+ 		.name	= "amiga-parallel",
+diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
+index 3e44d2fb8bf81..6d3fdf3a688dd 100644
+--- a/drivers/perf/riscv_pmu_sbi.c
++++ b/drivers/perf/riscv_pmu_sbi.c
+@@ -634,7 +634,7 @@ static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu)
+ 	 * which may include counters that are not enabled yet.
+ 	 */
+ 	sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP,
+-		  0, pmu->cmask, 0, 0, 0, 0);
++		  0, pmu->cmask, SBI_PMU_STOP_FLAG_RESET, 0, 0, 0);
+ }
+ 
+ static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
+diff --git a/drivers/platform/mellanox/nvsw-sn2201.c b/drivers/platform/mellanox/nvsw-sn2201.c
+index 3ef655591424c..abe7be602f846 100644
+--- a/drivers/platform/mellanox/nvsw-sn2201.c
++++ b/drivers/platform/mellanox/nvsw-sn2201.c
+@@ -1198,6 +1198,7 @@ static int nvsw_sn2201_config_pre_init(struct nvsw_sn2201 *nvsw_sn2201)
+ static int nvsw_sn2201_probe(struct platform_device *pdev)
+ {
+ 	struct nvsw_sn2201 *nvsw_sn2201;
++	int ret;
+ 
+ 	nvsw_sn2201 = devm_kzalloc(&pdev->dev, sizeof(*nvsw_sn2201), GFP_KERNEL);
+ 	if (!nvsw_sn2201)
+@@ -1205,8 +1206,10 @@ static int nvsw_sn2201_probe(struct platform_device *pdev)
+ 
+ 	nvsw_sn2201->dev = &pdev->dev;
+ 	platform_set_drvdata(pdev, nvsw_sn2201);
+-	platform_device_add_resources(pdev, nvsw_sn2201_lpc_io_resources,
++	ret = platform_device_add_resources(pdev, nvsw_sn2201_lpc_io_resources,
+ 				      ARRAY_SIZE(nvsw_sn2201_lpc_io_resources));
++	if (ret)
++		return ret;
+ 
+ 	nvsw_sn2201->main_mux_deferred_nr = NVSW_SN2201_MAIN_MUX_DEFER_NR;
+ 	nvsw_sn2201->main_mux_devs = nvsw_sn2201_main_mux_brdinfo;
+diff --git a/drivers/platform/x86/amd/hsmp.c b/drivers/platform/x86/amd/hsmp.c
+index 1927be901108e..272d32a95e216 100644
+--- a/drivers/platform/x86/amd/hsmp.c
++++ b/drivers/platform/x86/amd/hsmp.c
+@@ -907,16 +907,44 @@ static int hsmp_plat_dev_register(void)
+ 	return ret;
+ }
+ 
++/*
++ * This check is only needed for backward compatibility of previous platforms.
++ * All new platforms are expected to support ACPI based probing.
++ */
++static bool legacy_hsmp_support(void)
++{
++	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
++		return false;
++
++	switch (boot_cpu_data.x86) {
++	case 0x19:
++		switch (boot_cpu_data.x86_model) {
++		case 0x00 ... 0x1F:
++		case 0x30 ... 0x3F:
++		case 0x90 ... 0x9F:
++		case 0xA0 ... 0xAF:
++			return true;
++		default:
++			return false;
++		}
++	case 0x1A:
++		switch (boot_cpu_data.x86_model) {
++		case 0x00 ... 0x1F:
++			return true;
++		default:
++			return false;
++		}
++	default:
++		return false;
++	}
++
++	return false;
++}
++
+ static int __init hsmp_plt_init(void)
+ {
+ 	int ret = -ENODEV;
+ 
+-	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD || boot_cpu_data.x86 < 0x19) {
+-		pr_err("HSMP is not supported on Family:%x model:%x\n",
+-		       boot_cpu_data.x86, boot_cpu_data.x86_model);
+-		return ret;
+-	}
+-
+ 	/*
+ 	 * amd_nb_num() returns number of SMN/DF interfaces present in the system
+ 	 * if we have N SMN/DF interfaces that ideally means N sockets
+@@ -930,7 +958,15 @@ static int __init hsmp_plt_init(void)
+ 		return ret;
+ 
+ 	if (!plat_dev.is_acpi_device) {
+-		ret = hsmp_plat_dev_register();
++		if (legacy_hsmp_support()) {
++			/* Not ACPI device, but supports HSMP, register a plat_dev */
++			ret = hsmp_plat_dev_register();
++		} else {
++			/* Not ACPI, Does not support HSMP */
++			pr_info("HSMP is not supported on Family:%x model:%x\n",
++				boot_cpu_data.x86, boot_cpu_data.x86_model);
++			ret = -ENODEV;
++		}
+ 		if (ret)
+ 			platform_driver_unregister(&amd_hsmp_driver);
+ 	}
+diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
+index e714ee6298dda..78c48a1f9c68a 100644
+--- a/drivers/platform/x86/lg-laptop.c
++++ b/drivers/platform/x86/lg-laptop.c
+@@ -39,8 +39,6 @@ MODULE_LICENSE("GPL");
+ #define WMI_METHOD_WMBB "2B4F501A-BD3C-4394-8DCF-00A7D2BC8210"
+ #define WMI_EVENT_GUID  WMI_EVENT_GUID0
+ 
+-#define WMAB_METHOD     "\\XINI.WMAB"
+-#define WMBB_METHOD     "\\XINI.WMBB"
+ #define SB_GGOV_METHOD  "\\_SB.GGOV"
+ #define GOV_TLED        0x2020008
+ #define WM_GET          1
+@@ -74,7 +72,7 @@ static u32 inited;
+ 
+ static int battery_limit_use_wmbb;
+ static struct led_classdev kbd_backlight;
+-static enum led_brightness get_kbd_backlight_level(void);
++static enum led_brightness get_kbd_backlight_level(struct device *dev);
+ 
+ static const struct key_entry wmi_keymap[] = {
+ 	{KE_KEY, 0x70, {KEY_F15} },	 /* LG control panel (F1) */
+@@ -84,7 +82,6 @@ static const struct key_entry wmi_keymap[] = {
+ 					  * this key both sends an event and
+ 					  * changes backlight level.
+ 					  */
+-	{KE_KEY, 0x80, {KEY_RFKILL} },
+ 	{KE_END, 0}
+ };
+ 
+@@ -128,11 +125,10 @@ static int ggov(u32 arg0)
+ 	return res;
+ }
+ 
+-static union acpi_object *lg_wmab(u32 method, u32 arg1, u32 arg2)
++static union acpi_object *lg_wmab(struct device *dev, u32 method, u32 arg1, u32 arg2)
+ {
+ 	union acpi_object args[3];
+ 	acpi_status status;
+-	acpi_handle handle;
+ 	struct acpi_object_list arg;
+ 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ 
+@@ -143,29 +139,22 @@ static union acpi_object *lg_wmab(u32 method, u32 arg1, u32 arg2)
+ 	args[2].type = ACPI_TYPE_INTEGER;
+ 	args[2].integer.value = arg2;
+ 
+-	status = acpi_get_handle(NULL, (acpi_string) WMAB_METHOD, &handle);
+-	if (ACPI_FAILURE(status)) {
+-		pr_err("Cannot get handle");
+-		return NULL;
+-	}
+-
+ 	arg.count = 3;
+ 	arg.pointer = args;
+ 
+-	status = acpi_evaluate_object(handle, NULL, &arg, &buffer);
++	status = acpi_evaluate_object(ACPI_HANDLE(dev), "WMAB", &arg, &buffer);
+ 	if (ACPI_FAILURE(status)) {
+-		acpi_handle_err(handle, "WMAB: call failed.\n");
++		dev_err(dev, "WMAB: call failed.\n");
+ 		return NULL;
+ 	}
+ 
+ 	return buffer.pointer;
+ }
+ 
+-static union acpi_object *lg_wmbb(u32 method_id, u32 arg1, u32 arg2)
++static union acpi_object *lg_wmbb(struct device *dev, u32 method_id, u32 arg1, u32 arg2)
+ {
+ 	union acpi_object args[3];
+ 	acpi_status status;
+-	acpi_handle handle;
+ 	struct acpi_object_list arg;
+ 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ 	u8 buf[32];
+@@ -181,18 +170,12 @@ static union acpi_object *lg_wmbb(u32 method_id, u32 arg1, u32 arg2)
+ 	args[2].buffer.length = 32;
+ 	args[2].buffer.pointer = buf;
+ 
+-	status = acpi_get_handle(NULL, (acpi_string)WMBB_METHOD, &handle);
+-	if (ACPI_FAILURE(status)) {
+-		pr_err("Cannot get handle");
+-		return NULL;
+-	}
+-
+ 	arg.count = 3;
+ 	arg.pointer = args;
+ 
+-	status = acpi_evaluate_object(handle, NULL, &arg, &buffer);
++	status = acpi_evaluate_object(ACPI_HANDLE(dev), "WMBB", &arg, &buffer);
+ 	if (ACPI_FAILURE(status)) {
+-		acpi_handle_err(handle, "WMAB: call failed.\n");
++		dev_err(dev, "WMBB: call failed.\n");
+ 		return NULL;
+ 	}
+ 
+@@ -223,7 +206,7 @@ static void wmi_notify(u32 value, void *context)
+ 
+ 		if (eventcode == 0x10000000) {
+ 			led_classdev_notify_brightness_hw_changed(
+-				&kbd_backlight, get_kbd_backlight_level());
++				&kbd_backlight, get_kbd_backlight_level(kbd_backlight.dev->parent));
+ 		} else {
+ 			key = sparse_keymap_entry_from_scancode(
+ 				wmi_input_dev, eventcode);
+@@ -272,14 +255,7 @@ static void wmi_input_setup(void)
+ 
+ static void acpi_notify(struct acpi_device *device, u32 event)
+ {
+-	struct key_entry *key;
+-
+ 	acpi_handle_debug(device->handle, "notify: %d\n", event);
+-	if (inited & INIT_SPARSE_KEYMAP) {
+-		key = sparse_keymap_entry_from_scancode(wmi_input_dev, 0x80);
+-		if (key && key->type == KE_KEY)
+-			sparse_keymap_report_entry(wmi_input_dev, key, 1, true);
+-	}
+ }
+ 
+ static ssize_t fan_mode_store(struct device *dev,
+@@ -295,7 +271,7 @@ static ssize_t fan_mode_store(struct device *dev,
+ 	if (ret)
+ 		return ret;
+ 
+-	r = lg_wmab(WM_FAN_MODE, WM_GET, 0);
++	r = lg_wmab(dev, WM_FAN_MODE, WM_GET, 0);
+ 	if (!r)
+ 		return -EIO;
+ 
+@@ -306,9 +282,9 @@ static ssize_t fan_mode_store(struct device *dev,
+ 
+ 	m = r->integer.value;
+ 	kfree(r);
+-	r = lg_wmab(WM_FAN_MODE, WM_SET, (m & 0xffffff0f) | (value << 4));
++	r = lg_wmab(dev, WM_FAN_MODE, WM_SET, (m & 0xffffff0f) | (value << 4));
+ 	kfree(r);
+-	r = lg_wmab(WM_FAN_MODE, WM_SET, (m & 0xfffffff0) | value);
++	r = lg_wmab(dev, WM_FAN_MODE, WM_SET, (m & 0xfffffff0) | value);
+ 	kfree(r);
+ 
+ 	return count;
+@@ -320,7 +296,7 @@ static ssize_t fan_mode_show(struct device *dev,
+ 	unsigned int status;
+ 	union acpi_object *r;
+ 
+-	r = lg_wmab(WM_FAN_MODE, WM_GET, 0);
++	r = lg_wmab(dev, WM_FAN_MODE, WM_GET, 0);
+ 	if (!r)
+ 		return -EIO;
+ 
+@@ -347,7 +323,7 @@ static ssize_t usb_charge_store(struct device *dev,
+ 	if (ret)
+ 		return ret;
+ 
+-	r = lg_wmbb(WMBB_USB_CHARGE, WM_SET, value);
++	r = lg_wmbb(dev, WMBB_USB_CHARGE, WM_SET, value);
+ 	if (!r)
+ 		return -EIO;
+ 
+@@ -361,7 +337,7 @@ static ssize_t usb_charge_show(struct device *dev,
+ 	unsigned int status;
+ 	union acpi_object *r;
+ 
+-	r = lg_wmbb(WMBB_USB_CHARGE, WM_GET, 0);
++	r = lg_wmbb(dev, WMBB_USB_CHARGE, WM_GET, 0);
+ 	if (!r)
+ 		return -EIO;
+ 
+@@ -389,7 +365,7 @@ static ssize_t reader_mode_store(struct device *dev,
+ 	if (ret)
+ 		return ret;
+ 
+-	r = lg_wmab(WM_READER_MODE, WM_SET, value);
++	r = lg_wmab(dev, WM_READER_MODE, WM_SET, value);
+ 	if (!r)
+ 		return -EIO;
+ 
+@@ -403,7 +379,7 @@ static ssize_t reader_mode_show(struct device *dev,
+ 	unsigned int status;
+ 	union acpi_object *r;
+ 
+-	r = lg_wmab(WM_READER_MODE, WM_GET, 0);
++	r = lg_wmab(dev, WM_READER_MODE, WM_GET, 0);
+ 	if (!r)
+ 		return -EIO;
+ 
+@@ -431,7 +407,7 @@ static ssize_t fn_lock_store(struct device *dev,
+ 	if (ret)
+ 		return ret;
+ 
+-	r = lg_wmab(WM_FN_LOCK, WM_SET, value);
++	r = lg_wmab(dev, WM_FN_LOCK, WM_SET, value);
+ 	if (!r)
+ 		return -EIO;
+ 
+@@ -445,7 +421,7 @@ static ssize_t fn_lock_show(struct device *dev,
+ 	unsigned int status;
+ 	union acpi_object *r;
+ 
+-	r = lg_wmab(WM_FN_LOCK, WM_GET, 0);
++	r = lg_wmab(dev, WM_FN_LOCK, WM_GET, 0);
+ 	if (!r)
+ 		return -EIO;
+ 
+@@ -475,9 +451,9 @@ static ssize_t charge_control_end_threshold_store(struct device *dev,
+ 		union acpi_object *r;
+ 
+ 		if (battery_limit_use_wmbb)
+-			r = lg_wmbb(WMBB_BATT_LIMIT, WM_SET, value);
++			r = lg_wmbb(&pf_device->dev, WMBB_BATT_LIMIT, WM_SET, value);
+ 		else
+-			r = lg_wmab(WM_BATT_LIMIT, WM_SET, value);
++			r = lg_wmab(&pf_device->dev, WM_BATT_LIMIT, WM_SET, value);
+ 		if (!r)
+ 			return -EIO;
+ 
+@@ -496,7 +472,7 @@ static ssize_t charge_control_end_threshold_show(struct device *device,
+ 	union acpi_object *r;
+ 
+ 	if (battery_limit_use_wmbb) {
+-		r = lg_wmbb(WMBB_BATT_LIMIT, WM_GET, 0);
++		r = lg_wmbb(&pf_device->dev, WMBB_BATT_LIMIT, WM_GET, 0);
+ 		if (!r)
+ 			return -EIO;
+ 
+@@ -507,7 +483,7 @@ static ssize_t charge_control_end_threshold_show(struct device *device,
+ 
+ 		status = r->buffer.pointer[0x10];
+ 	} else {
+-		r = lg_wmab(WM_BATT_LIMIT, WM_GET, 0);
++		r = lg_wmab(&pf_device->dev, WM_BATT_LIMIT, WM_GET, 0);
+ 		if (!r)
+ 			return -EIO;
+ 
+@@ -586,7 +562,7 @@ static void tpad_led_set(struct led_classdev *cdev,
+ {
+ 	union acpi_object *r;
+ 
+-	r = lg_wmab(WM_TLED, WM_SET, brightness > LED_OFF);
++	r = lg_wmab(cdev->dev->parent, WM_TLED, WM_SET, brightness > LED_OFF);
+ 	kfree(r);
+ }
+ 
+@@ -608,16 +584,16 @@ static void kbd_backlight_set(struct led_classdev *cdev,
+ 		val = 0;
+ 	if (brightness >= LED_FULL)
+ 		val = 0x24;
+-	r = lg_wmab(WM_KEY_LIGHT, WM_SET, val);
++	r = lg_wmab(cdev->dev->parent, WM_KEY_LIGHT, WM_SET, val);
+ 	kfree(r);
+ }
+ 
+-static enum led_brightness get_kbd_backlight_level(void)
++static enum led_brightness get_kbd_backlight_level(struct device *dev)
+ {
+ 	union acpi_object *r;
+ 	int val;
+ 
+-	r = lg_wmab(WM_KEY_LIGHT, WM_GET, 0);
++	r = lg_wmab(dev, WM_KEY_LIGHT, WM_GET, 0);
+ 
+ 	if (!r)
+ 		return LED_OFF;
+@@ -645,7 +621,7 @@ static enum led_brightness get_kbd_backlight_level(void)
+ 
+ static enum led_brightness kbd_backlight_get(struct led_classdev *cdev)
+ {
+-	return get_kbd_backlight_level();
++	return get_kbd_backlight_level(cdev->dev->parent);
+ }
+ 
+ static LED_DEVICE(kbd_backlight, 255, LED_BRIGHT_HW_CHANGED);
+@@ -672,6 +648,11 @@ static struct platform_driver pf_driver = {
+ 
+ static int acpi_add(struct acpi_device *device)
+ {
++	struct platform_device_info pdev_info = {
++		.fwnode = acpi_fwnode_handle(device),
++		.name = PLATFORM_NAME,
++		.id = PLATFORM_DEVID_NONE,
++	};
+ 	int ret;
+ 	const char *product;
+ 	int year = 2017;
+@@ -683,9 +664,7 @@ static int acpi_add(struct acpi_device *device)
+ 	if (ret)
+ 		return ret;
+ 
+-	pf_device = platform_device_register_simple(PLATFORM_NAME,
+-						    PLATFORM_DEVID_NONE,
+-						    NULL, 0);
++	pf_device = platform_device_register_full(&pdev_info);
+ 	if (IS_ERR(pf_device)) {
+ 		ret = PTR_ERR(pf_device);
+ 		pf_device = NULL;
+@@ -776,7 +755,7 @@ static void acpi_remove(struct acpi_device *device)
+ }
+ 
+ static const struct acpi_device_id device_ids[] = {
+-	{"LGEX0815", 0},
++	{"LGEX0820", 0},
+ 	{"", 0}
+ };
+ MODULE_DEVICE_TABLE(acpi, device_ids);
+diff --git a/drivers/platform/x86/wireless-hotkey.c b/drivers/platform/x86/wireless-hotkey.c
+index 4422863f47bbe..01feb6e6787f2 100644
+--- a/drivers/platform/x86/wireless-hotkey.c
++++ b/drivers/platform/x86/wireless-hotkey.c
+@@ -19,6 +19,7 @@ MODULE_AUTHOR("Alex Hung");
+ MODULE_ALIAS("acpi*:HPQ6001:*");
+ MODULE_ALIAS("acpi*:WSTADEF:*");
+ MODULE_ALIAS("acpi*:AMDI0051:*");
++MODULE_ALIAS("acpi*:LGEX0815:*");
+ 
+ struct wl_button {
+ 	struct input_dev *input_dev;
+@@ -29,6 +30,7 @@ static const struct acpi_device_id wl_ids[] = {
+ 	{"HPQ6001", 0},
+ 	{"WSTADEF", 0},
+ 	{"AMDI0051", 0},
++	{"LGEX0815", 0},
+ 	{"", 0},
+ };
+ 
+diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h
+index e74a0f6a31572..4e80273dfb1ec 100644
+--- a/drivers/pnp/base.h
++++ b/drivers/pnp/base.h
+@@ -6,6 +6,7 @@
+ 
+ extern struct mutex pnp_lock;
+ extern const struct attribute_group *pnp_dev_groups[];
++extern const struct bus_type pnp_bus_type;
+ 
+ int pnp_register_protocol(struct pnp_protocol *protocol);
+ void pnp_unregister_protocol(struct pnp_protocol *protocol);
+diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
+index d53ee34d398f6..fbe29cabcbb83 100644
+--- a/drivers/s390/char/sclp.c
++++ b/drivers/s390/char/sclp.c
+@@ -1293,6 +1293,7 @@ sclp_init(void)
+ fail_unregister_reboot_notifier:
+ 	unregister_reboot_notifier(&sclp_reboot_notifier);
+ fail_init_state_uninitialized:
++	list_del(&sclp_state_change_event.list);
+ 	sclp_init_state = sclp_init_state_uninitialized;
+ 	free_page((unsigned long) sclp_read_sccb);
+ 	free_page((unsigned long) sclp_init_sccb);
+diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
+index a226dc1b65d71..4eb0837298d4d 100644
+--- a/drivers/scsi/device_handler/scsi_dh_alua.c
++++ b/drivers/scsi/device_handler/scsi_dh_alua.c
+@@ -414,28 +414,40 @@ static char print_alua_state(unsigned char state)
+ 	}
+ }
+ 
+-static enum scsi_disposition alua_check_sense(struct scsi_device *sdev,
+-					      struct scsi_sense_hdr *sense_hdr)
++static void alua_handle_state_transition(struct scsi_device *sdev)
+ {
+ 	struct alua_dh_data *h = sdev->handler_data;
+ 	struct alua_port_group *pg;
+ 
++	rcu_read_lock();
++	pg = rcu_dereference(h->pg);
++	if (pg)
++		pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
++	rcu_read_unlock();
++	alua_check(sdev, false);
++}
++
++static enum scsi_disposition alua_check_sense(struct scsi_device *sdev,
++					      struct scsi_sense_hdr *sense_hdr)
++{
+ 	switch (sense_hdr->sense_key) {
+ 	case NOT_READY:
+ 		if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) {
+ 			/*
+ 			 * LUN Not Accessible - ALUA state transition
+ 			 */
+-			rcu_read_lock();
+-			pg = rcu_dereference(h->pg);
+-			if (pg)
+-				pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
+-			rcu_read_unlock();
+-			alua_check(sdev, false);
++			alua_handle_state_transition(sdev);
+ 			return NEEDS_RETRY;
+ 		}
+ 		break;
+ 	case UNIT_ATTENTION:
++		if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) {
++			/*
++			 * LUN Not Accessible - ALUA state transition
++			 */
++			alua_handle_state_transition(sdev);
++			return NEEDS_RETRY;
++		}
+ 		if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) {
+ 			/*
+ 			 * Power On, Reset, or Bus Device Reset.
+@@ -502,7 +514,8 @@ static int alua_tur(struct scsi_device *sdev)
+ 
+ 	retval = scsi_test_unit_ready(sdev, ALUA_FAILOVER_TIMEOUT * HZ,
+ 				      ALUA_FAILOVER_RETRIES, &sense_hdr);
+-	if (sense_hdr.sense_key == NOT_READY &&
++	if ((sense_hdr.sense_key == NOT_READY ||
++	     sense_hdr.sense_key == UNIT_ATTENTION) &&
+ 	    sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a)
+ 		return SCSI_DH_RETRY;
+ 	else if (retval)
+diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
+index 3804aef165adb..164086c5824ec 100644
+--- a/drivers/scsi/libsas/sas_internal.h
++++ b/drivers/scsi/libsas/sas_internal.h
+@@ -145,6 +145,20 @@ static inline void sas_fail_probe(struct domain_device *dev, const char *func, i
+ 		func, dev->parent ? "exp-attached" :
+ 		"direct-attached",
+ 		SAS_ADDR(dev->sas_addr), err);
++
++	/*
++	 * If the device probe failed, the expander phy attached address
++	 * needs to be reset so that the phy will not be treated as flutter
++	 * in the next revalidation
++	 */
++	if (dev->parent && !dev_is_expander(dev->dev_type)) {
++		struct sas_phy *phy = dev->phy;
++		struct domain_device *parent = dev->parent;
++		struct ex_phy *ex_phy = &parent->ex_dev.ex_phy[phy->number];
++
++		memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
++	}
++
+ 	sas_unregister_dev(dev->port, dev);
+ }
+ 
+diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
+index 5058e01b65a27..98afdfe636003 100644
+--- a/drivers/scsi/qedf/qedf.h
++++ b/drivers/scsi/qedf/qedf.h
+@@ -363,6 +363,7 @@ struct qedf_ctx {
+ #define QEDF_IN_RECOVERY		5
+ #define QEDF_DBG_STOP_IO		6
+ #define QEDF_PROBING			8
++#define QEDF_STAG_IN_PROGRESS		9
+ 	unsigned long flags; /* Miscellaneous state flags */
+ 	int fipvlan_retries;
+ 	u8 num_queues;
+diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
+index a58353b7b4e8b..b97a8712d3f66 100644
+--- a/drivers/scsi/qedf/qedf_main.c
++++ b/drivers/scsi/qedf/qedf_main.c
+@@ -318,11 +318,18 @@ static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did,
+ 	 */
+ 	if (resp == fc_lport_flogi_resp) {
+ 		qedf->flogi_cnt++;
++		qedf->flogi_pending++;
++
++		if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
++			QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n");
++			qedf->flogi_pending = 0;
++		}
++
+ 		if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) {
+ 			schedule_delayed_work(&qedf->stag_work, 2);
+ 			return NULL;
+ 		}
+-		qedf->flogi_pending++;
++
+ 		return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
+ 		    arg, timeout);
+ 	}
+@@ -912,13 +919,14 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
+ 	struct qedf_ctx *qedf;
+ 	struct qed_link_output if_link;
+ 
++	qedf = lport_priv(lport);
++
+ 	if (lport->vport) {
++		clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
+ 		printk_ratelimited("Cannot issue host reset on NPIV port.\n");
+ 		return;
+ 	}
+ 
+-	qedf = lport_priv(lport);
+-
+ 	qedf->flogi_pending = 0;
+ 	/* For host reset, essentially do a soft link up/down */
+ 	atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
+@@ -938,6 +946,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
+ 	if (!if_link.link_up) {
+ 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+ 			  "Physical link is not up.\n");
++		clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
+ 		return;
+ 	}
+ 	/* Flush and wait to make sure link down is processed */
+@@ -950,6 +959,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
+ 		  "Queue link up work.\n");
+ 	queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
+ 	    0);
++	clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
+ }
+ 
+ /* Reset the host by gracefully logging out and then logging back in */
+@@ -3463,6 +3473,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
+ 	}
+ 
+ 	/* Start the Slowpath-process */
++	memset(&slowpath_params, 0, sizeof(struct qed_slowpath_params));
+ 	slowpath_params.int_mode = QED_INT_MODE_MSIX;
+ 	slowpath_params.drv_major = QEDF_DRIVER_MAJOR_VER;
+ 	slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER;
+@@ -3721,6 +3732,7 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
+ {
+ 	struct qedf_ctx *qedf;
+ 	int rc;
++	int cnt = 0;
+ 
+ 	if (!pdev) {
+ 		QEDF_ERR(NULL, "pdev is NULL.\n");
+@@ -3738,6 +3750,17 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
+ 		return;
+ 	}
+ 
++stag_in_prog:
++	if (test_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags)) {
++		QEDF_ERR(&qedf->dbg_ctx, "Stag in progress, cnt=%d.\n", cnt);
++		cnt++;
++
++		if (cnt < 5) {
++			msleep(500);
++			goto stag_in_prog;
++		}
++	}
++
+ 	if (mode != QEDF_MODE_RECOVERY)
+ 		set_bit(QEDF_UNLOADING, &qedf->flags);
+ 
+@@ -3997,6 +4020,24 @@ void qedf_stag_change_work(struct work_struct *work)
+ 	struct qedf_ctx *qedf =
+ 	    container_of(work, struct qedf_ctx, stag_work.work);
+ 
++	if (!qedf) {
++		QEDF_ERR(&qedf->dbg_ctx, "qedf is NULL");
++		return;
++	}
++
++	if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) {
++		QEDF_ERR(&qedf->dbg_ctx,
++			 "Already is in recovery, hence not calling software context reset.\n");
++		return;
++	}
++
++	if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
++		QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n");
++		return;
++	}
++
++	set_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
++
+ 	printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.",
+ 			dev_name(&qedf->pdev->dev), __func__, __LINE__,
+ 			qedf->dbg_ctx.host_no);
+diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h
+index 1175f2e213b56..dc899277b3a44 100644
+--- a/drivers/scsi/sr.h
++++ b/drivers/scsi/sr.h
+@@ -65,7 +65,7 @@ int sr_disk_status(struct cdrom_device_info *);
+ int sr_get_last_session(struct cdrom_device_info *, struct cdrom_multisession *);
+ int sr_get_mcn(struct cdrom_device_info *, struct cdrom_mcn *);
+ int sr_reset(struct cdrom_device_info *);
+-int sr_select_speed(struct cdrom_device_info *cdi, int speed);
++int sr_select_speed(struct cdrom_device_info *cdi, unsigned long speed);
+ int sr_audio_ioctl(struct cdrom_device_info *, unsigned int, void *);
+ 
+ int sr_is_xa(Scsi_CD *);
+diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
+index 5b0b35e60e61f..a0d2556a27bba 100644
+--- a/drivers/scsi/sr_ioctl.c
++++ b/drivers/scsi/sr_ioctl.c
+@@ -425,11 +425,14 @@ int sr_reset(struct cdrom_device_info *cdi)
+ 	return 0;
+ }
+ 
+-int sr_select_speed(struct cdrom_device_info *cdi, int speed)
++int sr_select_speed(struct cdrom_device_info *cdi, unsigned long speed)
+ {
+ 	Scsi_CD *cd = cdi->handle;
+ 	struct packet_command cgc;
+ 
++	/* avoid exceeding the max speed or overflowing integer bounds */
++	speed = clamp(0, speed, 0xffff / 177);
++
+ 	if (speed == 0)
+ 		speed = 0xffff;	/* set to max */
+ 	else
+diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
+index be3998104bfbb..f7e8b5efa50e5 100644
+--- a/drivers/spi/spi-davinci.c
++++ b/drivers/spi/spi-davinci.c
+@@ -984,6 +984,9 @@ static int davinci_spi_probe(struct platform_device *pdev)
+ 	return ret;
+ 
+ free_dma:
++	/* This bit needs to be cleared to disable dpsi->clk */
++	clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
++
+ 	if (dspi->dma_rx) {
+ 		dma_release_channel(dspi->dma_rx);
+ 		dma_release_channel(dspi->dma_tx);
+@@ -1013,6 +1016,9 @@ static void davinci_spi_remove(struct platform_device *pdev)
+ 
+ 	spi_bitbang_stop(&dspi->bitbang);
+ 
++	/* This bit needs to be cleared to disable dpsi->clk */
++	clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
++
+ 	if (dspi->dma_rx) {
+ 		dma_release_channel(dspi->dma_rx);
+ 		dma_release_channel(dspi->dma_tx);
+diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
+index 09b6c1b45f1a1..09c676e50fe0e 100644
+--- a/drivers/spi/spi-imx.c
++++ b/drivers/spi/spi-imx.c
+@@ -1050,7 +1050,7 @@ static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
+ 	.rx_available = mx31_rx_available,
+ 	.reset = mx31_reset,
+ 	.fifo_size = 8,
+-	.has_dmamode = true,
++	.has_dmamode = false,
+ 	.dynamic_burst = false,
+ 	.has_targetmode = false,
+ 	.devtype = IMX35_CSPI,
+diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c
+index 031b5795d1060..a8bb07b38ec64 100644
+--- a/drivers/spi/spi-mux.c
++++ b/drivers/spi/spi-mux.c
+@@ -156,6 +156,7 @@ static int spi_mux_probe(struct spi_device *spi)
+ 	/* supported modes are the same as our parent's */
+ 	ctlr->mode_bits = spi->controller->mode_bits;
+ 	ctlr->flags = spi->controller->flags;
++	ctlr->bits_per_word_mask = spi->controller->bits_per_word_mask;
+ 	ctlr->transfer_one_message = spi_mux_transfer_one_message;
+ 	ctlr->setup = spi_mux_setup;
+ 	ctlr->num_chipselect = mux_control_states(priv->mux);
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 9304fd03bf764..fcc39523d6857 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -4152,7 +4152,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
+ 				return -EINVAL;
+ 			if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
+ 				xfer->tx_nbits != SPI_NBITS_DUAL &&
+-				xfer->tx_nbits != SPI_NBITS_QUAD)
++				xfer->tx_nbits != SPI_NBITS_QUAD &&
++				xfer->tx_nbits != SPI_NBITS_OCTAL)
+ 				return -EINVAL;
+ 			if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
+ 				!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
+@@ -4167,7 +4168,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
+ 				return -EINVAL;
+ 			if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
+ 				xfer->rx_nbits != SPI_NBITS_DUAL &&
+-				xfer->rx_nbits != SPI_NBITS_QUAD)
++				xfer->rx_nbits != SPI_NBITS_QUAD &&
++				xfer->rx_nbits != SPI_NBITS_OCTAL)
+ 				return -EINVAL;
+ 			if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
+ 				!(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
+diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
+index ecb5eb079408e..c5a3e25c55dab 100644
+--- a/drivers/tee/optee/ffa_abi.c
++++ b/drivers/tee/optee/ffa_abi.c
+@@ -660,7 +660,9 @@ static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
+ 					const struct ffa_ops *ops)
+ {
+ 	const struct ffa_msg_ops *msg_ops = ops->msg_ops;
+-	struct ffa_send_direct_data data = { OPTEE_FFA_GET_API_VERSION };
++	struct ffa_send_direct_data data = {
++		.data0 = OPTEE_FFA_GET_API_VERSION,
++	};
+ 	int rc;
+ 
+ 	msg_ops->mode_32bit_set(ffa_dev);
+@@ -677,7 +679,9 @@ static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
+ 		return false;
+ 	}
+ 
+-	data = (struct ffa_send_direct_data){ OPTEE_FFA_GET_OS_VERSION };
++	data = (struct ffa_send_direct_data){
++		.data0 = OPTEE_FFA_GET_OS_VERSION,
++	};
+ 	rc = msg_ops->sync_send_receive(ffa_dev, &data);
+ 	if (rc) {
+ 		pr_err("Unexpected error %d\n", rc);
+@@ -698,7 +702,9 @@ static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev,
+ 				    unsigned int *rpc_param_count,
+ 				    unsigned int *max_notif_value)
+ {
+-	struct ffa_send_direct_data data = { OPTEE_FFA_EXCHANGE_CAPABILITIES };
++	struct ffa_send_direct_data data = {
++		.data0 = OPTEE_FFA_EXCHANGE_CAPABILITIES,
++	};
+ 	int rc;
+ 
+ 	rc = ops->msg_ops->sync_send_receive(ffa_dev, &data);
+diff --git a/drivers/vfio/device_cdev.c b/drivers/vfio/device_cdev.c
+index e75da0a70d1f8..bb1817bd4ff31 100644
+--- a/drivers/vfio/device_cdev.c
++++ b/drivers/vfio/device_cdev.c
+@@ -39,6 +39,13 @@ int vfio_device_fops_cdev_open(struct inode *inode, struct file *filep)
+ 
+ 	filep->private_data = df;
+ 
++	/*
++	 * Use the pseudo fs inode on the device to link all mmaps
++	 * to the same address space, allowing us to unmap all vmas
++	 * associated to this device using unmap_mapping_range().
++	 */
++	filep->f_mapping = device->inode->i_mapping;
++
+ 	return 0;
+ 
+ err_put_registration:
+diff --git a/drivers/vfio/group.c b/drivers/vfio/group.c
+index 610a429c61912..ded364588d297 100644
+--- a/drivers/vfio/group.c
++++ b/drivers/vfio/group.c
+@@ -286,6 +286,13 @@ static struct file *vfio_device_open_file(struct vfio_device *device)
+ 	 */
+ 	filep->f_mode |= (FMODE_PREAD | FMODE_PWRITE);
+ 
++	/*
++	 * Use the pseudo fs inode on the device to link all mmaps
++	 * to the same address space, allowing us to unmap all vmas
++	 * associated to this device using unmap_mapping_range().
++	 */
++	filep->f_mapping = device->inode->i_mapping;
++
+ 	if (device->group->type == VFIO_NO_IOMMU)
+ 		dev_warn(device->dev, "vfio-noiommu device opened by user "
+ 			 "(%s:%d)\n", current->comm, task_pid_nr(current));
+diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
+index ea36d2139590f..680b15ca4fcea 100644
+--- a/drivers/vfio/pci/vfio_pci_core.c
++++ b/drivers/vfio/pci/vfio_pci_core.c
+@@ -1607,100 +1607,20 @@ ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *bu
+ }
+ EXPORT_SYMBOL_GPL(vfio_pci_core_write);
+ 
+-/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */
+-static int vfio_pci_zap_and_vma_lock(struct vfio_pci_core_device *vdev, bool try)
++static void vfio_pci_zap_bars(struct vfio_pci_core_device *vdev)
+ {
+-	struct vfio_pci_mmap_vma *mmap_vma, *tmp;
++	struct vfio_device *core_vdev = &vdev->vdev;
++	loff_t start = VFIO_PCI_INDEX_TO_OFFSET(VFIO_PCI_BAR0_REGION_INDEX);
++	loff_t end = VFIO_PCI_INDEX_TO_OFFSET(VFIO_PCI_ROM_REGION_INDEX);
++	loff_t len = end - start;
+ 
+-	/*
+-	 * Lock ordering:
+-	 * vma_lock is nested under mmap_lock for vm_ops callback paths.
+-	 * The memory_lock semaphore is used by both code paths calling
+-	 * into this function to zap vmas and the vm_ops.fault callback
+-	 * to protect the memory enable state of the device.
+-	 *
+-	 * When zapping vmas we need to maintain the mmap_lock => vma_lock
+-	 * ordering, which requires using vma_lock to walk vma_list to
+-	 * acquire an mm, then dropping vma_lock to get the mmap_lock and
+-	 * reacquiring vma_lock.  This logic is derived from similar
+-	 * requirements in uverbs_user_mmap_disassociate().
+-	 *
+-	 * mmap_lock must always be the top-level lock when it is taken.
+-	 * Therefore we can only hold the memory_lock write lock when
+-	 * vma_list is empty, as we'd need to take mmap_lock to clear
+-	 * entries.  vma_list can only be guaranteed empty when holding
+-	 * vma_lock, thus memory_lock is nested under vma_lock.
+-	 *
+-	 * This enables the vm_ops.fault callback to acquire vma_lock,
+-	 * followed by memory_lock read lock, while already holding
+-	 * mmap_lock without risk of deadlock.
+-	 */
+-	while (1) {
+-		struct mm_struct *mm = NULL;
+-
+-		if (try) {
+-			if (!mutex_trylock(&vdev->vma_lock))
+-				return 0;
+-		} else {
+-			mutex_lock(&vdev->vma_lock);
+-		}
+-		while (!list_empty(&vdev->vma_list)) {
+-			mmap_vma = list_first_entry(&vdev->vma_list,
+-						    struct vfio_pci_mmap_vma,
+-						    vma_next);
+-			mm = mmap_vma->vma->vm_mm;
+-			if (mmget_not_zero(mm))
+-				break;
+-
+-			list_del(&mmap_vma->vma_next);
+-			kfree(mmap_vma);
+-			mm = NULL;
+-		}
+-		if (!mm)
+-			return 1;
+-		mutex_unlock(&vdev->vma_lock);
+-
+-		if (try) {
+-			if (!mmap_read_trylock(mm)) {
+-				mmput(mm);
+-				return 0;
+-			}
+-		} else {
+-			mmap_read_lock(mm);
+-		}
+-		if (try) {
+-			if (!mutex_trylock(&vdev->vma_lock)) {
+-				mmap_read_unlock(mm);
+-				mmput(mm);
+-				return 0;
+-			}
+-		} else {
+-			mutex_lock(&vdev->vma_lock);
+-		}
+-		list_for_each_entry_safe(mmap_vma, tmp,
+-					 &vdev->vma_list, vma_next) {
+-			struct vm_area_struct *vma = mmap_vma->vma;
+-
+-			if (vma->vm_mm != mm)
+-				continue;
+-
+-			list_del(&mmap_vma->vma_next);
+-			kfree(mmap_vma);
+-
+-			zap_vma_ptes(vma, vma->vm_start,
+-				     vma->vm_end - vma->vm_start);
+-		}
+-		mutex_unlock(&vdev->vma_lock);
+-		mmap_read_unlock(mm);
+-		mmput(mm);
+-	}
++	unmap_mapping_range(core_vdev->inode->i_mapping, start, len, true);
+ }
+ 
+ void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device *vdev)
+ {
+-	vfio_pci_zap_and_vma_lock(vdev, false);
+ 	down_write(&vdev->memory_lock);
+-	mutex_unlock(&vdev->vma_lock);
++	vfio_pci_zap_bars(vdev);
+ }
+ 
+ u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device *vdev)
+@@ -1722,99 +1642,56 @@ void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev, u16 c
+ 	up_write(&vdev->memory_lock);
+ }
+ 
+-/* Caller holds vma_lock */
+-static int __vfio_pci_add_vma(struct vfio_pci_core_device *vdev,
+-			      struct vm_area_struct *vma)
+-{
+-	struct vfio_pci_mmap_vma *mmap_vma;
+-
+-	mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL_ACCOUNT);
+-	if (!mmap_vma)
+-		return -ENOMEM;
+-
+-	mmap_vma->vma = vma;
+-	list_add(&mmap_vma->vma_next, &vdev->vma_list);
+-
+-	return 0;
+-}
+-
+-/*
+- * Zap mmaps on open so that we can fault them in on access and therefore
+- * our vma_list only tracks mappings accessed since last zap.
+- */
+-static void vfio_pci_mmap_open(struct vm_area_struct *vma)
+-{
+-	zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+-}
+-
+-static void vfio_pci_mmap_close(struct vm_area_struct *vma)
++static unsigned long vma_to_pfn(struct vm_area_struct *vma)
+ {
+ 	struct vfio_pci_core_device *vdev = vma->vm_private_data;
+-	struct vfio_pci_mmap_vma *mmap_vma;
++	int index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
++	u64 pgoff;
+ 
+-	mutex_lock(&vdev->vma_lock);
+-	list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
+-		if (mmap_vma->vma == vma) {
+-			list_del(&mmap_vma->vma_next);
+-			kfree(mmap_vma);
+-			break;
+-		}
+-	}
+-	mutex_unlock(&vdev->vma_lock);
++	pgoff = vma->vm_pgoff &
++		((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
++
++	return (pci_resource_start(vdev->pdev, index) >> PAGE_SHIFT) + pgoff;
+ }
+ 
+ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
+ {
+ 	struct vm_area_struct *vma = vmf->vma;
+ 	struct vfio_pci_core_device *vdev = vma->vm_private_data;
+-	struct vfio_pci_mmap_vma *mmap_vma;
+-	vm_fault_t ret = VM_FAULT_NOPAGE;
++	unsigned long pfn, pgoff = vmf->pgoff - vma->vm_pgoff;
++	unsigned long addr = vma->vm_start;
++	vm_fault_t ret = VM_FAULT_SIGBUS;
++
++	pfn = vma_to_pfn(vma);
+ 
+-	mutex_lock(&vdev->vma_lock);
+ 	down_read(&vdev->memory_lock);
+ 
+-	/*
+-	 * Memory region cannot be accessed if the low power feature is engaged
+-	 * or memory access is disabled.
+-	 */
+-	if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev)) {
+-		ret = VM_FAULT_SIGBUS;
+-		goto up_out;
+-	}
++	if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev))
++		goto out_unlock;
++
++	ret = vmf_insert_pfn(vma, vmf->address, pfn + pgoff);
++	if (ret & VM_FAULT_ERROR)
++		goto out_unlock;
+ 
+ 	/*
+-	 * We populate the whole vma on fault, so we need to test whether
+-	 * the vma has already been mapped, such as for concurrent faults
+-	 * to the same vma.  io_remap_pfn_range() will trigger a BUG_ON if
+-	 * we ask it to fill the same range again.
++	 * Pre-fault the remainder of the vma, abort further insertions and
++	 * supress error if fault is encountered during pre-fault.
+ 	 */
+-	list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
+-		if (mmap_vma->vma == vma)
+-			goto up_out;
+-	}
+-
+-	if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+-			       vma->vm_end - vma->vm_start,
+-			       vma->vm_page_prot)) {
+-		ret = VM_FAULT_SIGBUS;
+-		zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+-		goto up_out;
+-	}
++	for (; addr < vma->vm_end; addr += PAGE_SIZE, pfn++) {
++		if (addr == vmf->address)
++			continue;
+ 
+-	if (__vfio_pci_add_vma(vdev, vma)) {
+-		ret = VM_FAULT_OOM;
+-		zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
++		if (vmf_insert_pfn(vma, addr, pfn) & VM_FAULT_ERROR)
++			break;
+ 	}
+ 
+-up_out:
++out_unlock:
+ 	up_read(&vdev->memory_lock);
+-	mutex_unlock(&vdev->vma_lock);
++
+ 	return ret;
+ }
+ 
+ static const struct vm_operations_struct vfio_pci_mmap_ops = {
+-	.open = vfio_pci_mmap_open,
+-	.close = vfio_pci_mmap_close,
+ 	.fault = vfio_pci_mmap_fault,
+ };
+ 
+@@ -1877,11 +1754,12 @@ int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma
+ 
+ 	vma->vm_private_data = vdev;
+ 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+-	vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
++	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+ 
+ 	/*
+-	 * See remap_pfn_range(), called from vfio_pci_fault() but we can't
+-	 * change vm_flags within the fault handler.  Set them now.
++	 * Set vm_flags now, they should not be changed in the fault handler.
++	 * We want the same flags and page protection (decrypted above) as
++	 * io_remap_pfn_range() would set.
+ 	 *
+ 	 * VM_ALLOW_ANY_UNCACHED: The VMA flag is implemented for ARM64,
+ 	 * allowing KVM stage 2 device mapping attributes to use Normal-NC
+@@ -2199,8 +2077,6 @@ int vfio_pci_core_init_dev(struct vfio_device *core_vdev)
+ 	mutex_init(&vdev->ioeventfds_lock);
+ 	INIT_LIST_HEAD(&vdev->dummy_resources_list);
+ 	INIT_LIST_HEAD(&vdev->ioeventfds_list);
+-	mutex_init(&vdev->vma_lock);
+-	INIT_LIST_HEAD(&vdev->vma_list);
+ 	INIT_LIST_HEAD(&vdev->sriov_pfs_item);
+ 	init_rwsem(&vdev->memory_lock);
+ 	xa_init(&vdev->ctx);
+@@ -2216,7 +2092,6 @@ void vfio_pci_core_release_dev(struct vfio_device *core_vdev)
+ 
+ 	mutex_destroy(&vdev->igate);
+ 	mutex_destroy(&vdev->ioeventfds_lock);
+-	mutex_destroy(&vdev->vma_lock);
+ 	kfree(vdev->region);
+ 	kfree(vdev->pm_save);
+ }
+@@ -2494,26 +2369,15 @@ static int vfio_pci_dev_set_pm_runtime_get(struct vfio_device_set *dev_set)
+ 	return ret;
+ }
+ 
+-/*
+- * We need to get memory_lock for each device, but devices can share mmap_lock,
+- * therefore we need to zap and hold the vma_lock for each device, and only then
+- * get each memory_lock.
+- */
+ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
+ 				      struct vfio_pci_group_info *groups,
+ 				      struct iommufd_ctx *iommufd_ctx)
+ {
+-	struct vfio_pci_core_device *cur_mem;
+-	struct vfio_pci_core_device *cur_vma;
+-	struct vfio_pci_core_device *cur;
++	struct vfio_pci_core_device *vdev;
+ 	struct pci_dev *pdev;
+-	bool is_mem = true;
+ 	int ret;
+ 
+ 	mutex_lock(&dev_set->lock);
+-	cur_mem = list_first_entry(&dev_set->device_list,
+-				   struct vfio_pci_core_device,
+-				   vdev.dev_set_list);
+ 
+ 	pdev = vfio_pci_dev_set_resettable(dev_set);
+ 	if (!pdev) {
+@@ -2530,7 +2394,7 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
+ 	if (ret)
+ 		goto err_unlock;
+ 
+-	list_for_each_entry(cur_vma, &dev_set->device_list, vdev.dev_set_list) {
++	list_for_each_entry(vdev, &dev_set->device_list, vdev.dev_set_list) {
+ 		bool owned;
+ 
+ 		/*
+@@ -2554,38 +2418,38 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
+ 		 * Otherwise, reset is not allowed.
+ 		 */
+ 		if (iommufd_ctx) {
+-			int devid = vfio_iommufd_get_dev_id(&cur_vma->vdev,
++			int devid = vfio_iommufd_get_dev_id(&vdev->vdev,
+ 							    iommufd_ctx);
+ 
+ 			owned = (devid > 0 || devid == -ENOENT);
+ 		} else {
+-			owned = vfio_dev_in_groups(&cur_vma->vdev, groups);
++			owned = vfio_dev_in_groups(&vdev->vdev, groups);
+ 		}
+ 
+ 		if (!owned) {
+ 			ret = -EINVAL;
+-			goto err_undo;
++			break;
+ 		}
+ 
+ 		/*
+-		 * Locking multiple devices is prone to deadlock, runaway and
+-		 * unwind if we hit contention.
++		 * Take the memory write lock for each device and zap BAR
++		 * mappings to prevent the user accessing the device while in
++		 * reset.  Locking multiple devices is prone to deadlock,
++		 * runaway and unwind if we hit contention.
+ 		 */
+-		if (!vfio_pci_zap_and_vma_lock(cur_vma, true)) {
++		if (!down_write_trylock(&vdev->memory_lock)) {
+ 			ret = -EBUSY;
+-			goto err_undo;
++			break;
+ 		}
++
++		vfio_pci_zap_bars(vdev);
+ 	}
+-	cur_vma = NULL;
+ 
+-	list_for_each_entry(cur_mem, &dev_set->device_list, vdev.dev_set_list) {
+-		if (!down_write_trylock(&cur_mem->memory_lock)) {
+-			ret = -EBUSY;
+-			goto err_undo;
+-		}
+-		mutex_unlock(&cur_mem->vma_lock);
++	if (!list_entry_is_head(vdev,
++				&dev_set->device_list, vdev.dev_set_list)) {
++		vdev = list_prev_entry(vdev, vdev.dev_set_list);
++		goto err_undo;
+ 	}
+-	cur_mem = NULL;
+ 
+ 	/*
+ 	 * The pci_reset_bus() will reset all the devices in the bus.
+@@ -2596,25 +2460,22 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
+ 	 * cause the PCI config space reset without restoring the original
+ 	 * state (saved locally in 'vdev->pm_save').
+ 	 */
+-	list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
+-		vfio_pci_set_power_state(cur, PCI_D0);
++	list_for_each_entry(vdev, &dev_set->device_list, vdev.dev_set_list)
++		vfio_pci_set_power_state(vdev, PCI_D0);
+ 
+ 	ret = pci_reset_bus(pdev);
+ 
++	vdev = list_last_entry(&dev_set->device_list,
++			       struct vfio_pci_core_device, vdev.dev_set_list);
++
+ err_undo:
+-	list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
+-		if (cur == cur_mem)
+-			is_mem = false;
+-		if (cur == cur_vma)
+-			break;
+-		if (is_mem)
+-			up_write(&cur->memory_lock);
+-		else
+-			mutex_unlock(&cur->vma_lock);
+-	}
++	list_for_each_entry_from_reverse(vdev, &dev_set->device_list,
++					 vdev.dev_set_list)
++		up_write(&vdev->memory_lock);
++
++	list_for_each_entry(vdev, &dev_set->device_list, vdev.dev_set_list)
++		pm_runtime_put(&vdev->pdev->dev);
+ 
+-	list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
+-		pm_runtime_put(&cur->pdev->dev);
+ err_unlock:
+ 	mutex_unlock(&dev_set->lock);
+ 	return ret;
+diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c
+index e97d796a54fba..a5a62d9d963f7 100644
+--- a/drivers/vfio/vfio_main.c
++++ b/drivers/vfio/vfio_main.c
+@@ -22,8 +22,10 @@
+ #include <linux/list.h>
+ #include <linux/miscdevice.h>
+ #include <linux/module.h>
++#include <linux/mount.h>
+ #include <linux/mutex.h>
+ #include <linux/pci.h>
++#include <linux/pseudo_fs.h>
+ #include <linux/rwsem.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+@@ -43,9 +45,13 @@
+ #define DRIVER_AUTHOR	"Alex Williamson <alex.williamson@redhat.com>"
+ #define DRIVER_DESC	"VFIO - User Level meta-driver"
+ 
++#define VFIO_MAGIC 0x5646494f /* "VFIO" */
++
+ static struct vfio {
+ 	struct class			*device_class;
+ 	struct ida			device_ida;
++	struct vfsmount			*vfs_mount;
++	int				fs_count;
+ } vfio;
+ 
+ #ifdef CONFIG_VFIO_NOIOMMU
+@@ -186,6 +192,8 @@ static void vfio_device_release(struct device *dev)
+ 	if (device->ops->release)
+ 		device->ops->release(device);
+ 
++	iput(device->inode);
++	simple_release_fs(&vfio.vfs_mount, &vfio.fs_count);
+ 	kvfree(device);
+ }
+ 
+@@ -228,6 +236,34 @@ struct vfio_device *_vfio_alloc_device(size_t size, struct device *dev,
+ }
+ EXPORT_SYMBOL_GPL(_vfio_alloc_device);
+ 
++static int vfio_fs_init_fs_context(struct fs_context *fc)
++{
++	return init_pseudo(fc, VFIO_MAGIC) ? 0 : -ENOMEM;
++}
++
++static struct file_system_type vfio_fs_type = {
++	.name = "vfio",
++	.owner = THIS_MODULE,
++	.init_fs_context = vfio_fs_init_fs_context,
++	.kill_sb = kill_anon_super,
++};
++
++static struct inode *vfio_fs_inode_new(void)
++{
++	struct inode *inode;
++	int ret;
++
++	ret = simple_pin_fs(&vfio_fs_type, &vfio.vfs_mount, &vfio.fs_count);
++	if (ret)
++		return ERR_PTR(ret);
++
++	inode = alloc_anon_inode(vfio.vfs_mount->mnt_sb);
++	if (IS_ERR(inode))
++		simple_release_fs(&vfio.vfs_mount, &vfio.fs_count);
++
++	return inode;
++}
++
+ /*
+  * Initialize a vfio_device so it can be registered to vfio core.
+  */
+@@ -246,6 +282,11 @@ static int vfio_init_device(struct vfio_device *device, struct device *dev,
+ 	init_completion(&device->comp);
+ 	device->dev = dev;
+ 	device->ops = ops;
++	device->inode = vfio_fs_inode_new();
++	if (IS_ERR(device->inode)) {
++		ret = PTR_ERR(device->inode);
++		goto out_inode;
++	}
+ 
+ 	if (ops->init) {
+ 		ret = ops->init(device);
+@@ -260,6 +301,9 @@ static int vfio_init_device(struct vfio_device *device, struct device *dev,
+ 	return 0;
+ 
+ out_uninit:
++	iput(device->inode);
++	simple_release_fs(&vfio.vfs_mount, &vfio.fs_count);
++out_inode:
+ 	vfio_release_device_set(device);
+ 	ida_free(&vfio.device_ida, device->index);
+ 	return ret;
+diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
+index 100020ca4658e..787ca2892d7a6 100644
+--- a/fs/btrfs/btrfs_inode.h
++++ b/fs/btrfs/btrfs_inode.h
+@@ -89,6 +89,16 @@ enum {
+ 	BTRFS_INODE_FREE_SPACE_INODE,
+ 	/* Set when there are no capabilities in XATTs for the inode. */
+ 	BTRFS_INODE_NO_CAP_XATTR,
++	/*
++	 * Set if an error happened when doing a COW write before submitting a
++	 * bio or during writeback. Used for both buffered writes and direct IO
++	 * writes. This is to signal a fast fsync that it has to wait for
++	 * ordered extents to complete and therefore not log extent maps that
++	 * point to unwritten extents (when an ordered extent completes and it
++	 * has the BTRFS_ORDERED_IOERR flag set, it drops extent maps in its
++	 * range).
++	 */
++	BTRFS_INODE_COW_WRITE_ERROR,
+ };
+ 
+ /* in memory btrfs inode */
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index f9d76072398da..97f6133b6eee8 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -1875,6 +1875,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ 	 */
+ 	if (full_sync || btrfs_is_zoned(fs_info)) {
+ 		ret = btrfs_wait_ordered_range(inode, start, len);
++		clear_bit(BTRFS_INODE_COW_WRITE_ERROR, &BTRFS_I(inode)->runtime_flags);
+ 	} else {
+ 		/*
+ 		 * Get our ordered extents as soon as possible to avoid doing
+@@ -1884,6 +1885,21 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ 		btrfs_get_ordered_extents_for_logging(BTRFS_I(inode),
+ 						      &ctx.ordered_extents);
+ 		ret = filemap_fdatawait_range(inode->i_mapping, start, end);
++		if (ret)
++			goto out_release_extents;
++
++		/*
++		 * Check and clear the BTRFS_INODE_COW_WRITE_ERROR now after
++		 * starting and waiting for writeback, because for buffered IO
++		 * it may have been set during the end IO callback
++		 * (end_bbio_data_write() -> btrfs_finish_ordered_extent()) in
++		 * case an error happened and we need to wait for ordered
++		 * extents to complete so that any extent maps that point to
++		 * unwritten locations are dropped and we don't log them.
++		 */
++		if (test_and_clear_bit(BTRFS_INODE_COW_WRITE_ERROR,
++				       &BTRFS_I(inode)->runtime_flags))
++			ret = btrfs_wait_ordered_range(inode, start, len);
+ 	}
+ 
+ 	if (ret)
+diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
+index c2a42bcde98e0..7dbf4162c75a5 100644
+--- a/fs/btrfs/ordered-data.c
++++ b/fs/btrfs/ordered-data.c
+@@ -382,6 +382,37 @@ bool btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
+ 	ret = can_finish_ordered_extent(ordered, page, file_offset, len, uptodate);
+ 	spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
+ 
++	/*
++	 * If this is a COW write it means we created new extent maps for the
++	 * range and they point to unwritten locations if we got an error either
++	 * before submitting a bio or during IO.
++	 *
++	 * We have marked the ordered extent with BTRFS_ORDERED_IOERR, and we
++	 * are queuing its completion below. During completion, at
++	 * btrfs_finish_one_ordered(), we will drop the extent maps for the
++	 * unwritten extents.
++	 *
++	 * However because completion runs in a work queue we can end up having
++	 * a fast fsync running before that. In the case of direct IO, once we
++	 * unlock the inode the fsync might start, and we queue the completion
++	 * before unlocking the inode. In the case of buffered IO when writeback
++	 * finishes (end_bbio_data_write()) we queue the completion, so if the
++	 * writeback was triggered by a fast fsync, the fsync might start
++	 * logging before ordered extent completion runs in the work queue.
++	 *
++	 * The fast fsync will log file extent items based on the extent maps it
++	 * finds, so if by the time it collects extent maps the ordered extent
++	 * completion didn't happen yet, it will log file extent items that
++	 * point to unwritten extents, resulting in a corruption if a crash
++	 * happens and the log tree is replayed. Note that a fast fsync does not
++	 * wait for completion of ordered extents in order to reduce latency.
++	 *
++	 * Set a flag in the inode so that the next fast fsync will wait for
++	 * ordered extents to complete before starting to log.
++	 */
++	if (!uptodate && !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
++		set_bit(BTRFS_INODE_COW_WRITE_ERROR, &inode->runtime_flags);
++
+ 	if (ret)
+ 		btrfs_queue_ordered_fn(ordered);
+ 	return ret;
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 4caa078d972a3..9af2afb417308 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -1351,7 +1351,7 @@ static int flush_reservations(struct btrfs_fs_info *fs_info)
+ 
+ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
+ {
+-	struct btrfs_root *quota_root;
++	struct btrfs_root *quota_root = NULL;
+ 	struct btrfs_trans_handle *trans = NULL;
+ 	int ret = 0;
+ 
+@@ -1449,9 +1449,9 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
+ 	btrfs_free_tree_block(trans, btrfs_root_id(quota_root),
+ 			      quota_root->node, 0, 1);
+ 
+-	btrfs_put_root(quota_root);
+ 
+ out:
++	btrfs_put_root(quota_root);
+ 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
+ 	if (ret && trans)
+ 		btrfs_end_transaction(trans);
+diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
+index 8c4fc98ca9ce7..aa7ddc09c55fa 100644
+--- a/fs/btrfs/ref-verify.c
++++ b/fs/btrfs/ref-verify.c
+@@ -441,7 +441,8 @@ static int process_extent_item(struct btrfs_fs_info *fs_info,
+ 	u32 item_size = btrfs_item_size(leaf, slot);
+ 	unsigned long end, ptr;
+ 	u64 offset, flags, count;
+-	int type, ret;
++	int type;
++	int ret = 0;
+ 
+ 	ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
+ 	flags = btrfs_extent_flags(leaf, ei);
+@@ -486,7 +487,11 @@ static int process_extent_item(struct btrfs_fs_info *fs_info,
+ 						  key->objectid, key->offset);
+ 			break;
+ 		case BTRFS_EXTENT_OWNER_REF_KEY:
+-			WARN_ON(!btrfs_fs_incompat(fs_info, SIMPLE_QUOTA));
++			if (!btrfs_fs_incompat(fs_info, SIMPLE_QUOTA)) {
++				btrfs_err(fs_info,
++			  "found extent owner ref without simple quotas enabled");
++				ret = -EINVAL;
++			}
+ 			break;
+ 		default:
+ 			btrfs_err(fs_info, "invalid key type in iref");
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index afd6932f5e895..d7caa3732f074 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -1688,20 +1688,24 @@ static void scrub_submit_extent_sector_read(struct scrub_ctx *sctx,
+ 					    (i << fs_info->sectorsize_bits);
+ 			int err;
+ 
+-			bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
+-					       fs_info, scrub_read_endio, stripe);
+-			bbio->bio.bi_iter.bi_sector = logical >> SECTOR_SHIFT;
+-
+ 			io_stripe.is_scrub = true;
++			stripe_len = (nr_sectors - i) << fs_info->sectorsize_bits;
++			/*
++			 * For RST cases, we need to manually split the bbio to
++			 * follow the RST boundary.
++			 */
+ 			err = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
+-					      &stripe_len, &bioc, &io_stripe,
+-					      &mirror);
++					      &stripe_len, &bioc, &io_stripe, &mirror);
+ 			btrfs_put_bioc(bioc);
+-			if (err) {
+-				btrfs_bio_end_io(bbio,
+-						 errno_to_blk_status(err));
+-				return;
++			if (err < 0) {
++				set_bit(i, &stripe->io_error_bitmap);
++				set_bit(i, &stripe->error_bitmap);
++				continue;
+ 			}
++
++			bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
++					       fs_info, scrub_read_endio, stripe);
++			bbio->bio.bi_iter.bi_sector = logical >> SECTOR_SHIFT;
+ 		}
+ 
+ 		__bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
+diff --git a/fs/cachefiles/cache.c b/fs/cachefiles/cache.c
+index f449f7340aad0..9fb06dc165202 100644
+--- a/fs/cachefiles/cache.c
++++ b/fs/cachefiles/cache.c
+@@ -8,6 +8,7 @@
+ #include <linux/slab.h>
+ #include <linux/statfs.h>
+ #include <linux/namei.h>
++#include <trace/events/fscache.h>
+ #include "internal.h"
+ 
+ /*
+@@ -312,19 +313,59 @@ static void cachefiles_withdraw_objects(struct cachefiles_cache *cache)
+ }
+ 
+ /*
+- * Withdraw volumes.
++ * Withdraw fscache volumes.
++ */
++static void cachefiles_withdraw_fscache_volumes(struct cachefiles_cache *cache)
++{
++	struct list_head *cur;
++	struct cachefiles_volume *volume;
++	struct fscache_volume *vcookie;
++
++	_enter("");
++retry:
++	spin_lock(&cache->object_list_lock);
++	list_for_each(cur, &cache->volumes) {
++		volume = list_entry(cur, struct cachefiles_volume, cache_link);
++
++		if (atomic_read(&volume->vcookie->n_accesses) == 0)
++			continue;
++
++		vcookie = fscache_try_get_volume(volume->vcookie,
++						 fscache_volume_get_withdraw);
++		if (vcookie) {
++			spin_unlock(&cache->object_list_lock);
++			fscache_withdraw_volume(vcookie);
++			fscache_put_volume(vcookie, fscache_volume_put_withdraw);
++			goto retry;
++		}
++	}
++	spin_unlock(&cache->object_list_lock);
++
++	_leave("");
++}
++
++/*
++ * Withdraw cachefiles volumes.
+  */
+ static void cachefiles_withdraw_volumes(struct cachefiles_cache *cache)
+ {
+ 	_enter("");
+ 
+ 	for (;;) {
++		struct fscache_volume *vcookie = NULL;
+ 		struct cachefiles_volume *volume = NULL;
+ 
+ 		spin_lock(&cache->object_list_lock);
+ 		if (!list_empty(&cache->volumes)) {
+ 			volume = list_first_entry(&cache->volumes,
+ 						  struct cachefiles_volume, cache_link);
++			vcookie = fscache_try_get_volume(volume->vcookie,
++							 fscache_volume_get_withdraw);
++			if (!vcookie) {
++				spin_unlock(&cache->object_list_lock);
++				cpu_relax();
++				continue;
++			}
+ 			list_del_init(&volume->cache_link);
+ 		}
+ 		spin_unlock(&cache->object_list_lock);
+@@ -332,6 +373,7 @@ static void cachefiles_withdraw_volumes(struct cachefiles_cache *cache)
+ 			break;
+ 
+ 		cachefiles_withdraw_volume(volume);
++		fscache_put_volume(vcookie, fscache_volume_put_withdraw);
+ 	}
+ 
+ 	_leave("");
+@@ -371,6 +413,7 @@ void cachefiles_withdraw_cache(struct cachefiles_cache *cache)
+ 	pr_info("File cache on %s unregistering\n", fscache->name);
+ 
+ 	fscache_withdraw_cache(fscache);
++	cachefiles_withdraw_fscache_volumes(cache);
+ 
+ 	/* we now have to destroy all the active objects pertaining to this
+ 	 * cache - which we do by passing them off to thread pool to be
+diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
+index 7e4874f60de10..d0059d36cbd51 100644
+--- a/fs/cachefiles/ondemand.c
++++ b/fs/cachefiles/ondemand.c
+@@ -97,12 +97,12 @@ static loff_t cachefiles_ondemand_fd_llseek(struct file *filp, loff_t pos,
+ }
+ 
+ static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl,
+-					 unsigned long arg)
++					 unsigned long id)
+ {
+ 	struct cachefiles_object *object = filp->private_data;
+ 	struct cachefiles_cache *cache = object->volume->cache;
+ 	struct cachefiles_req *req;
+-	unsigned long id;
++	XA_STATE(xas, &cache->reqs, id);
+ 
+ 	if (ioctl != CACHEFILES_IOC_READ_COMPLETE)
+ 		return -EINVAL;
+@@ -110,10 +110,15 @@ static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl,
+ 	if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
+ 		return -EOPNOTSUPP;
+ 
+-	id = arg;
+-	req = xa_erase(&cache->reqs, id);
+-	if (!req)
++	xa_lock(&cache->reqs);
++	req = xas_load(&xas);
++	if (!req || req->msg.opcode != CACHEFILES_OP_READ ||
++	    req->object != object) {
++		xa_unlock(&cache->reqs);
+ 		return -EINVAL;
++	}
++	xas_store(&xas, NULL);
++	xa_unlock(&cache->reqs);
+ 
+ 	trace_cachefiles_ondemand_cread(object, id);
+ 	complete(&req->done);
+@@ -142,6 +147,7 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
+ 	unsigned long id;
+ 	long size;
+ 	int ret;
++	XA_STATE(xas, &cache->reqs, 0);
+ 
+ 	if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
+ 		return -EOPNOTSUPP;
+@@ -165,10 +171,18 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
+ 	if (ret)
+ 		return ret;
+ 
+-	req = xa_erase(&cache->reqs, id);
+-	if (!req)
++	xa_lock(&cache->reqs);
++	xas.xa_index = id;
++	req = xas_load(&xas);
++	if (!req || req->msg.opcode != CACHEFILES_OP_OPEN ||
++	    !req->object->ondemand->ondemand_id) {
++		xa_unlock(&cache->reqs);
+ 		return -EINVAL;
++	}
++	xas_store(&xas, NULL);
++	xa_unlock(&cache->reqs);
+ 
++	info = req->object->ondemand;
+ 	/* fail OPEN request if copen format is invalid */
+ 	ret = kstrtol(psize, 0, &size);
+ 	if (ret) {
+@@ -188,7 +202,6 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
+ 		goto out;
+ 	}
+ 
+-	info = req->object->ondemand;
+ 	spin_lock(&info->lock);
+ 	/*
+ 	 * The anonymous fd was closed before copen ? Fail the request.
+@@ -228,6 +241,11 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
+ 	wake_up_all(&cache->daemon_pollwq);
+ 
+ out:
++	spin_lock(&info->lock);
++	/* Need to set object close to avoid reopen status continuing */
++	if (info->ondemand_id == CACHEFILES_ONDEMAND_ID_CLOSED)
++		cachefiles_ondemand_set_object_close(req->object);
++	spin_unlock(&info->lock);
+ 	complete(&req->done);
+ 	return ret;
+ }
+@@ -362,6 +380,20 @@ static struct cachefiles_req *cachefiles_ondemand_select_req(struct xa_state *xa
+ 	return NULL;
+ }
+ 
++static inline bool cachefiles_ondemand_finish_req(struct cachefiles_req *req,
++						  struct xa_state *xas, int err)
++{
++	if (unlikely(!xas || !req))
++		return false;
++
++	if (xa_cmpxchg(xas->xa, xas->xa_index, req, NULL, 0) != req)
++		return false;
++
++	req->error = err;
++	complete(&req->done);
++	return true;
++}
++
+ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
+ 					char __user *_buffer, size_t buflen)
+ {
+@@ -425,16 +457,8 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
+ out:
+ 	cachefiles_put_object(req->object, cachefiles_obj_put_read_req);
+ 	/* Remove error request and CLOSE request has no reply */
+-	if (ret || msg->opcode == CACHEFILES_OP_CLOSE) {
+-		xas_reset(&xas);
+-		xas_lock(&xas);
+-		if (xas_load(&xas) == req) {
+-			req->error = ret;
+-			complete(&req->done);
+-			xas_store(&xas, NULL);
+-		}
+-		xas_unlock(&xas);
+-	}
++	if (ret || msg->opcode == CACHEFILES_OP_CLOSE)
++		cachefiles_ondemand_finish_req(req, &xas, ret);
+ 	cachefiles_req_put(req);
+ 	return ret ? ret : n;
+ }
+@@ -539,8 +563,18 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
+ 		goto out;
+ 
+ 	wake_up_all(&cache->daemon_pollwq);
+-	wait_for_completion(&req->done);
+-	ret = req->error;
++wait:
++	ret = wait_for_completion_killable(&req->done);
++	if (!ret) {
++		ret = req->error;
++	} else {
++		ret = -EINTR;
++		if (!cachefiles_ondemand_finish_req(req, &xas, ret)) {
++			/* Someone will complete it soon. */
++			cpu_relax();
++			goto wait;
++		}
++	}
+ 	cachefiles_req_put(req);
+ 	return ret;
+ out:
+diff --git a/fs/cachefiles/volume.c b/fs/cachefiles/volume.c
+index 89df0ba8ba5e7..781aac4ef274b 100644
+--- a/fs/cachefiles/volume.c
++++ b/fs/cachefiles/volume.c
+@@ -133,7 +133,6 @@ void cachefiles_free_volume(struct fscache_volume *vcookie)
+ 
+ void cachefiles_withdraw_volume(struct cachefiles_volume *volume)
+ {
+-	fscache_withdraw_volume(volume->vcookie);
+ 	cachefiles_set_volume_xattr(volume);
+ 	__cachefiles_free_volume(volume);
+ }
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 66515fbc9dd70..4c144519aa709 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -3035,28 +3035,25 @@ EXPORT_SYMBOL(d_splice_alias);
+   
+ bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
+ {
+-	bool result;
++	bool subdir;
+ 	unsigned seq;
+ 
+ 	if (new_dentry == old_dentry)
+ 		return true;
+ 
+-	do {
+-		/* for restarting inner loop in case of seq retry */
+-		seq = read_seqbegin(&rename_lock);
+-		/*
+-		 * Need rcu_readlock to protect against the d_parent trashing
+-		 * due to d_move
+-		 */
+-		rcu_read_lock();
+-		if (d_ancestor(old_dentry, new_dentry))
+-			result = true;
+-		else
+-			result = false;
+-		rcu_read_unlock();
+-	} while (read_seqretry(&rename_lock, seq));
+-
+-	return result;
++	/* Access d_parent under rcu as d_move() may change it. */
++	rcu_read_lock();
++	seq = read_seqbegin(&rename_lock);
++	subdir = d_ancestor(old_dentry, new_dentry);
++	 /* Try lockless once... */
++	if (read_seqretry(&rename_lock, seq)) {
++		/* ...else acquire lock for progress even on deep chains. */
++		read_seqlock_excl(&rename_lock);
++		subdir = d_ancestor(old_dentry, new_dentry);
++		read_sequnlock_excl(&rename_lock);
++	}
++	rcu_read_unlock();
++	return subdir;
+ }
+ EXPORT_SYMBOL(is_subdir);
+ 
+diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
+index e313c936351d5..6bd435a565f61 100644
+--- a/fs/erofs/zmap.c
++++ b/fs/erofs/zmap.c
+@@ -723,6 +723,8 @@ int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
+ 
+ 	err = z_erofs_do_map_blocks(inode, map, flags);
+ out:
++	if (err)
++		map->m_llen = 0;
+ 	trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err);
+ 	return err;
+ }
+diff --git a/fs/file.c b/fs/file.c
+index 3b683b9101d84..005841dd35977 100644
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -481,12 +481,12 @@ struct files_struct init_files = {
+ 
+ static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
+ {
+-	unsigned int maxfd = fdt->max_fds;
++	unsigned int maxfd = fdt->max_fds; /* always multiple of BITS_PER_LONG */
+ 	unsigned int maxbit = maxfd / BITS_PER_LONG;
+ 	unsigned int bitbit = start / BITS_PER_LONG;
+ 
+ 	bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
+-	if (bitbit > maxfd)
++	if (bitbit >= maxfd)
+ 		return maxfd;
+ 	if (bitbit > start)
+ 		start = bitbit;
+diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
+index 9c9ff6b8c6f7e..858029b1c1735 100644
+--- a/fs/hfsplus/xattr.c
++++ b/fs/hfsplus/xattr.c
+@@ -698,7 +698,7 @@ ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size)
+ 		return err;
+ 	}
+ 
+-	strbuf = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN +
++	strbuf = kzalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN +
+ 			XATTR_MAC_OSX_PREFIX_LEN + 1, GFP_KERNEL);
+ 	if (!strbuf) {
+ 		res = -ENOMEM;
+diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
+index 4ac6c8c403c26..248e615270ff7 100644
+--- a/fs/iomap/buffered-io.c
++++ b/fs/iomap/buffered-io.c
+@@ -241,6 +241,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
+ 	unsigned block_size = (1 << block_bits);
+ 	size_t poff = offset_in_folio(folio, *pos);
+ 	size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
++	size_t orig_plen = plen;
+ 	unsigned first = poff >> block_bits;
+ 	unsigned last = (poff + plen - 1) >> block_bits;
+ 
+@@ -277,7 +278,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
+ 	 * handle both halves separately so that we properly zero data in the
+ 	 * page cache for blocks that are entirely outside of i_size.
+ 	 */
+-	if (orig_pos <= isize && orig_pos + length > isize) {
++	if (orig_pos <= isize && orig_pos + orig_plen > isize) {
+ 		unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
+ 
+ 		if (first <= end && last > end)
+diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c
+index d2ce0849bb53d..e6066ddbb3acf 100644
+--- a/fs/netfs/buffered_write.c
++++ b/fs/netfs/buffered_write.c
+@@ -811,7 +811,7 @@ static void netfs_extend_writeback(struct address_space *mapping,
+ 				break;
+ 			}
+ 
+-			if (!folio_try_get_rcu(folio)) {
++			if (!folio_try_get(folio)) {
+ 				xas_reset(xas);
+ 				continue;
+ 			}
+@@ -1028,7 +1028,7 @@ static ssize_t netfs_writepages_begin(struct address_space *mapping,
+ 		if (!folio)
+ 			break;
+ 
+-		if (!folio_try_get_rcu(folio)) {
++		if (!folio_try_get(folio)) {
+ 			xas_reset(xas);
+ 			continue;
+ 		}
+diff --git a/fs/netfs/fscache_volume.c b/fs/netfs/fscache_volume.c
+index cdf991bdd9def..cb75c07b5281a 100644
+--- a/fs/netfs/fscache_volume.c
++++ b/fs/netfs/fscache_volume.c
+@@ -27,6 +27,19 @@ struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
+ 	return volume;
+ }
+ 
++struct fscache_volume *fscache_try_get_volume(struct fscache_volume *volume,
++					      enum fscache_volume_trace where)
++{
++	int ref;
++
++	if (!__refcount_inc_not_zero(&volume->ref, &ref))
++		return NULL;
++
++	trace_fscache_volume(volume->debug_id, ref + 1, where);
++	return volume;
++}
++EXPORT_SYMBOL(fscache_try_get_volume);
++
+ static void fscache_see_volume(struct fscache_volume *volume,
+ 			       enum fscache_volume_trace where)
+ {
+@@ -420,6 +433,7 @@ void fscache_put_volume(struct fscache_volume *volume,
+ 			fscache_free_volume(volume);
+ 	}
+ }
++EXPORT_SYMBOL(fscache_put_volume);
+ 
+ /*
+  * Relinquish a volume representation cookie.
+diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h
+index ec7045d24400d..edf9b2a180caa 100644
+--- a/fs/netfs/internal.h
++++ b/fs/netfs/internal.h
+@@ -326,8 +326,6 @@ extern const struct seq_operations fscache_volumes_seq_ops;
+ 
+ struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
+ 					  enum fscache_volume_trace where);
+-void fscache_put_volume(struct fscache_volume *volume,
+-			enum fscache_volume_trace where);
+ bool fscache_begin_volume_access(struct fscache_volume *volume,
+ 				 struct fscache_cookie *cookie,
+ 				 enum fscache_access_trace why);
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index bdd6cb33a3708..375c08fdcf2f3 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1625,7 +1625,16 @@ nfs_lookup_revalidate_done(struct inode *dir, struct dentry *dentry,
+ 	switch (error) {
+ 	case 1:
+ 		break;
+-	case 0:
++	case -ETIMEDOUT:
++		if (inode && (IS_ROOT(dentry) ||
++			      NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL))
++			error = 1;
++		break;
++	case -ESTALE:
++	case -ENOENT:
++		error = 0;
++		fallthrough;
++	default:
+ 		/*
+ 		 * We can't d_drop the root of a disconnected tree:
+ 		 * its d_hash is on the s_anon list and d_drop() would hide
+@@ -1680,18 +1689,8 @@ static int nfs_lookup_revalidate_dentry(struct inode *dir,
+ 
+ 	dir_verifier = nfs_save_change_attribute(dir);
+ 	ret = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr);
+-	if (ret < 0) {
+-		switch (ret) {
+-		case -ESTALE:
+-		case -ENOENT:
+-			ret = 0;
+-			break;
+-		case -ETIMEDOUT:
+-			if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL)
+-				ret = 1;
+-		}
++	if (ret < 0)
+ 		goto out;
+-	}
+ 
+ 	/* Request help from readdirplus */
+ 	nfs_lookup_advise_force_readdirplus(dir, flags);
+@@ -1735,7 +1734,7 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
+ 			 unsigned int flags)
+ {
+ 	struct inode *inode;
+-	int error;
++	int error = 0;
+ 
+ 	nfs_inc_stats(dir, NFSIOS_DENTRYREVALIDATE);
+ 	inode = d_inode(dentry);
+@@ -1780,7 +1779,7 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
+ out_bad:
+ 	if (flags & LOOKUP_RCU)
+ 		return -ECHILD;
+-	return nfs_lookup_revalidate_done(dir, dentry, inode, 0);
++	return nfs_lookup_revalidate_done(dir, dentry, inode, error);
+ }
+ 
+ static int
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 3a816c4a6d5e2..a691fa10b3e95 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -6289,6 +6289,7 @@ nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
+ 	if (status == 0)
+ 		nfs_setsecurity(inode, fattr);
+ 
++	nfs_free_fattr(fattr);
+ 	return status;
+ }
+ #endif	/* CONFIG_NFS_V4_SECURITY_LABEL */
+diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
+index 6efb5068c116e..040b6b79c75e5 100644
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -1545,6 +1545,11 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
+ 					continue;
+ 			} else if (index == prev->wb_index + 1)
+ 				continue;
++			/*
++			 * We will submit more requests after these. Indicate
++			 * this to the underlying layers.
++			 */
++			desc->pg_moreio = 1;
+ 			nfs_pageio_complete(desc);
+ 			break;
+ 		}
+diff --git a/fs/nfs/symlink.c b/fs/nfs/symlink.c
+index 0e27a2e4e68b8..13818129d268f 100644
+--- a/fs/nfs/symlink.c
++++ b/fs/nfs/symlink.c
+@@ -41,7 +41,7 @@ static int nfs_symlink_filler(struct file *file, struct folio *folio)
+ error:
+ 	folio_set_error(folio);
+ 	folio_unlock(folio);
+-	return -EIO;
++	return error;
+ }
+ 
+ static const char *nfs_get_link(struct dentry *dentry,
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index d8ede68260007..612e3398b221e 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -1409,7 +1409,7 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
+ 	target_tcon = tlink_tcon(smb_file_target->tlink);
+ 
+ 	if (src_tcon->ses != target_tcon->ses) {
+-		cifs_dbg(VFS, "source and target of copy not on same server\n");
++		cifs_dbg(FYI, "source and target of copy not on same server\n");
+ 		goto out;
+ 	}
+ 
+diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
+index 9be37d0fe724e..4784fece4d994 100644
+--- a/fs/smb/client/file.c
++++ b/fs/smb/client/file.c
+@@ -2753,7 +2753,7 @@ static void cifs_extend_writeback(struct address_space *mapping,
+ 				break;
+ 			}
+ 
+-			if (!folio_try_get_rcu(folio)) {
++			if (!folio_try_get(folio)) {
+ 				xas_reset(xas);
+ 				continue;
+ 			}
+@@ -2989,7 +2989,7 @@ static ssize_t cifs_writepages_begin(struct address_space *mapping,
+ 		if (!folio)
+ 			break;
+ 
+-		if (!folio_try_get_rcu(folio)) {
++		if (!folio_try_get(folio)) {
+ 			xas_reset(xas);
+ 			continue;
+ 		}
+diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h
+index 202ff91281560..694d2b4a4ad99 100644
+--- a/fs/smb/common/smb2pdu.h
++++ b/fs/smb/common/smb2pdu.h
+@@ -917,6 +917,40 @@ struct smb2_query_directory_rsp {
+ 	__u8   Buffer[];
+ } __packed;
+ 
++/* DeviceType Flags */
++#define FILE_DEVICE_CD_ROM              0x00000002
++#define FILE_DEVICE_CD_ROM_FILE_SYSTEM  0x00000003
++#define FILE_DEVICE_DFS                 0x00000006
++#define FILE_DEVICE_DISK                0x00000007
++#define FILE_DEVICE_DISK_FILE_SYSTEM    0x00000008
++#define FILE_DEVICE_FILE_SYSTEM         0x00000009
++#define FILE_DEVICE_NAMED_PIPE          0x00000011
++#define FILE_DEVICE_NETWORK             0x00000012
++#define FILE_DEVICE_NETWORK_FILE_SYSTEM 0x00000014
++#define FILE_DEVICE_NULL                0x00000015
++#define FILE_DEVICE_PARALLEL_PORT       0x00000016
++#define FILE_DEVICE_PRINTER             0x00000018
++#define FILE_DEVICE_SERIAL_PORT         0x0000001b
++#define FILE_DEVICE_STREAMS             0x0000001e
++#define FILE_DEVICE_TAPE                0x0000001f
++#define FILE_DEVICE_TAPE_FILE_SYSTEM    0x00000020
++#define FILE_DEVICE_VIRTUAL_DISK        0x00000024
++#define FILE_DEVICE_NETWORK_REDIRECTOR  0x00000028
++
++/* Device Characteristics */
++#define FILE_REMOVABLE_MEDIA			0x00000001
++#define FILE_READ_ONLY_DEVICE			0x00000002
++#define FILE_FLOPPY_DISKETTE			0x00000004
++#define FILE_WRITE_ONCE_MEDIA			0x00000008
++#define FILE_REMOTE_DEVICE			0x00000010
++#define FILE_DEVICE_IS_MOUNTED			0x00000020
++#define FILE_VIRTUAL_VOLUME			0x00000040
++#define FILE_DEVICE_SECURE_OPEN			0x00000100
++#define FILE_CHARACTERISTIC_TS_DEVICE		0x00001000
++#define FILE_CHARACTERISTIC_WEBDAV_DEVICE	0x00002000
++#define FILE_PORTABLE_DEVICE			0x00004000
++#define FILE_DEVICE_ALLOW_APPCONTAINER_TRAVERSAL 0x00020000
++
+ /*
+  * Maximum number of iovs we need for a set-info request.
+  * The largest one is rename/hardlink
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 7d26fdcebbf98..840c71c66b30b 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -5323,8 +5323,13 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
+ 
+ 		info = (struct filesystem_device_info *)rsp->Buffer;
+ 
+-		info->DeviceType = cpu_to_le32(stfs.f_type);
+-		info->DeviceCharacteristics = cpu_to_le32(0x00000020);
++		info->DeviceType = cpu_to_le32(FILE_DEVICE_DISK);
++		info->DeviceCharacteristics =
++			cpu_to_le32(FILE_DEVICE_IS_MOUNTED);
++		if (!test_tree_conn_flag(work->tcon,
++					 KSMBD_TREE_CONN_FLAG_WRITABLE))
++			info->DeviceCharacteristics |=
++				cpu_to_le32(FILE_READ_ONLY_DEVICE);
+ 		rsp->OutputBufferLength = cpu_to_le32(8);
+ 		break;
+ 	}
+diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
+index 98c6fd0b39b63..fdfb61ccf55ae 100644
+--- a/include/linux/cdrom.h
++++ b/include/linux/cdrom.h
+@@ -77,7 +77,7 @@ struct cdrom_device_ops {
+ 				      unsigned int clearing, int slot);
+ 	int (*tray_move) (struct cdrom_device_info *, int);
+ 	int (*lock_door) (struct cdrom_device_info *, int);
+-	int (*select_speed) (struct cdrom_device_info *, int);
++	int (*select_speed) (struct cdrom_device_info *, unsigned long);
+ 	int (*get_last_session) (struct cdrom_device_info *,
+ 				 struct cdrom_multisession *);
+ 	int (*get_mcn) (struct cdrom_device_info *,
+diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
+index bdf7f3eddf0a2..4c91a019972b6 100644
+--- a/include/linux/fscache-cache.h
++++ b/include/linux/fscache-cache.h
+@@ -19,6 +19,7 @@
+ enum fscache_cache_trace;
+ enum fscache_cookie_trace;
+ enum fscache_access_trace;
++enum fscache_volume_trace;
+ 
+ enum fscache_cache_state {
+ 	FSCACHE_CACHE_IS_NOT_PRESENT,	/* No cache is present for this name */
+@@ -97,6 +98,11 @@ extern void fscache_withdraw_cookie(struct fscache_cookie *cookie);
+ 
+ extern void fscache_io_error(struct fscache_cache *cache);
+ 
++extern struct fscache_volume *
++fscache_try_get_volume(struct fscache_volume *volume,
++		       enum fscache_volume_trace where);
++extern void fscache_put_volume(struct fscache_volume *volume,
++			       enum fscache_volume_trace where);
+ extern void fscache_end_volume_access(struct fscache_volume *volume,
+ 				      struct fscache_cookie *cookie,
+ 				      enum fscache_access_trace why);
+diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
+index d7c2d33baa7f8..fdd2a75adb037 100644
+--- a/include/linux/page_ref.h
++++ b/include/linux/page_ref.h
+@@ -263,54 +263,9 @@ static inline bool folio_try_get(struct folio *folio)
+ 	return folio_ref_add_unless(folio, 1, 0);
+ }
+ 
+-static inline bool folio_ref_try_add_rcu(struct folio *folio, int count)
+-{
+-#ifdef CONFIG_TINY_RCU
+-	/*
+-	 * The caller guarantees the folio will not be freed from interrupt
+-	 * context, so (on !SMP) we only need preemption to be disabled
+-	 * and TINY_RCU does that for us.
+-	 */
+-# ifdef CONFIG_PREEMPT_COUNT
+-	VM_BUG_ON(!in_atomic() && !irqs_disabled());
+-# endif
+-	VM_BUG_ON_FOLIO(folio_ref_count(folio) == 0, folio);
+-	folio_ref_add(folio, count);
+-#else
+-	if (unlikely(!folio_ref_add_unless(folio, count, 0))) {
+-		/* Either the folio has been freed, or will be freed. */
+-		return false;
+-	}
+-#endif
+-	return true;
+-}
+-
+-/**
+- * folio_try_get_rcu - Attempt to increase the refcount on a folio.
+- * @folio: The folio.
+- *
+- * This is a version of folio_try_get() optimised for non-SMP kernels.
+- * If you are still holding the rcu_read_lock() after looking up the
+- * page and know that the page cannot have its refcount decreased to
+- * zero in interrupt context, you can use this instead of folio_try_get().
+- *
+- * Example users include get_user_pages_fast() (as pages are not unmapped
+- * from interrupt context) and the page cache lookups (as pages are not
+- * truncated from interrupt context).  We also know that pages are not
+- * frozen in interrupt context for the purposes of splitting or migration.
+- *
+- * You can also use this function if you're holding a lock that prevents
+- * pages being frozen & removed; eg the i_pages lock for the page cache
+- * or the mmap_lock or page table lock for page tables.  In this case,
+- * it will always succeed, and you could have used a plain folio_get(),
+- * but it's sometimes more convenient to have a common function called
+- * from both locked and RCU-protected contexts.
+- *
+- * Return: True if the reference count was successfully incremented.
+- */
+-static inline bool folio_try_get_rcu(struct folio *folio)
++static inline bool folio_ref_try_add(struct folio *folio, int count)
+ {
+-	return folio_ref_try_add_rcu(folio, 1);
++	return folio_ref_add_unless(folio, count, 0);
+ }
+ 
+ static inline int page_ref_freeze(struct page *page, int count)
+diff --git a/include/linux/pnp.h b/include/linux/pnp.h
+index ddbe7c3ca4ce2..314892a6de8a0 100644
+--- a/include/linux/pnp.h
++++ b/include/linux/pnp.h
+@@ -435,8 +435,6 @@ struct pnp_protocol {
+ #define protocol_for_each_dev(protocol, dev)	\
+ 	list_for_each_entry(dev, &(protocol)->devices, protocol_list)
+ 
+-extern const struct bus_type pnp_bus_type;
+-
+ #if defined(CONFIG_PNP)
+ 
+ /* device management */
+diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
+index 64a4deb18dd00..afe6631da1bc6 100644
+--- a/include/linux/spi/spi.h
++++ b/include/linux/spi/spi.h
+@@ -1089,12 +1089,13 @@ struct spi_transfer {
+ 	unsigned	dummy_data:1;
+ 	unsigned	cs_off:1;
+ 	unsigned	cs_change:1;
+-	unsigned	tx_nbits:3;
+-	unsigned	rx_nbits:3;
++	unsigned	tx_nbits:4;
++	unsigned	rx_nbits:4;
+ 	unsigned	timestamped:1;
+ #define	SPI_NBITS_SINGLE	0x01 /* 1-bit transfer */
+ #define	SPI_NBITS_DUAL		0x02 /* 2-bit transfer */
+ #define	SPI_NBITS_QUAD		0x04 /* 4-bit transfer */
++#define	SPI_NBITS_OCTAL	0x08 /* 8-bit transfer */
+ 	u8		bits_per_word;
+ 	struct spi_delay	delay;
+ 	struct spi_delay	cs_change_delay;
+diff --git a/include/linux/vfio.h b/include/linux/vfio.h
+index 8b1a298204091..000a6cab2d318 100644
+--- a/include/linux/vfio.h
++++ b/include/linux/vfio.h
+@@ -64,6 +64,7 @@ struct vfio_device {
+ 	struct completion comp;
+ 	struct iommufd_access *iommufd_access;
+ 	void (*put_kvm)(struct kvm *kvm);
++	struct inode *inode;
+ #if IS_ENABLED(CONFIG_IOMMUFD)
+ 	struct iommufd_device *iommufd_device;
+ 	u8 iommufd_attached:1;
+diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h
+index a2c8b8bba7119..f87067438ed48 100644
+--- a/include/linux/vfio_pci_core.h
++++ b/include/linux/vfio_pci_core.h
+@@ -93,8 +93,6 @@ struct vfio_pci_core_device {
+ 	struct list_head		sriov_pfs_item;
+ 	struct vfio_pci_core_device	*sriov_pf_core_dev;
+ 	struct notifier_block	nb;
+-	struct mutex		vma_lock;
+-	struct list_head	vma_list;
+ 	struct rw_semaphore	memory_lock;
+ };
+ 
+diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
+index 6a9d063e9f472..534c3386e714f 100644
+--- a/include/net/bluetooth/hci_sync.h
++++ b/include/net/bluetooth/hci_sync.h
+@@ -38,6 +38,8 @@ int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
+ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+ 			     const void *param, u8 event, u32 timeout,
+ 			     struct sock *sk);
++int hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
++			const void *param, u32 timeout);
+ 
+ void hci_cmd_sync_init(struct hci_dev *hdev);
+ void hci_cmd_sync_clear(struct hci_dev *hdev);
+diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
+index d70c55f17df7c..94dbb23580f2f 100644
+--- a/include/sound/dmaengine_pcm.h
++++ b/include/sound/dmaengine_pcm.h
+@@ -36,6 +36,7 @@ snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream
+ int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
+ 	struct dma_chan *chan);
+ int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream);
++int snd_dmaengine_pcm_sync_stop(struct snd_pcm_substream *substream);
+ 
+ int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
+ 	dma_filter_fn filter_fn, void *filter_data);
+diff --git a/include/trace/events/fscache.h b/include/trace/events/fscache.h
+index a6190aa1b4060..f1a73aa83fbbf 100644
+--- a/include/trace/events/fscache.h
++++ b/include/trace/events/fscache.h
+@@ -35,12 +35,14 @@ enum fscache_volume_trace {
+ 	fscache_volume_get_cookie,
+ 	fscache_volume_get_create_work,
+ 	fscache_volume_get_hash_collision,
++	fscache_volume_get_withdraw,
+ 	fscache_volume_free,
+ 	fscache_volume_new_acquire,
+ 	fscache_volume_put_cookie,
+ 	fscache_volume_put_create_work,
+ 	fscache_volume_put_hash_collision,
+ 	fscache_volume_put_relinquish,
++	fscache_volume_put_withdraw,
+ 	fscache_volume_see_create_work,
+ 	fscache_volume_see_hash_wake,
+ 	fscache_volume_wait_create_work,
+@@ -120,12 +122,14 @@ enum fscache_access_trace {
+ 	EM(fscache_volume_get_cookie,		"GET cook ")		\
+ 	EM(fscache_volume_get_create_work,	"GET creat")		\
+ 	EM(fscache_volume_get_hash_collision,	"GET hcoll")		\
++	EM(fscache_volume_get_withdraw,		"GET withd")            \
+ 	EM(fscache_volume_free,			"FREE     ")		\
+ 	EM(fscache_volume_new_acquire,		"NEW acq  ")		\
+ 	EM(fscache_volume_put_cookie,		"PUT cook ")		\
+ 	EM(fscache_volume_put_create_work,	"PUT creat")		\
+ 	EM(fscache_volume_put_hash_collision,	"PUT hcoll")		\
+ 	EM(fscache_volume_put_relinquish,	"PUT relnq")		\
++	EM(fscache_volume_put_withdraw,		"PUT withd")            \
+ 	EM(fscache_volume_see_create_work,	"SEE creat")		\
+ 	EM(fscache_volume_see_hash_wake,	"SEE hwake")		\
+ 	E_(fscache_volume_wait_create_work,	"WAIT crea")
+diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
+index 03edf2ccdf6c8..a4206723f5033 100644
+--- a/include/uapi/linux/input-event-codes.h
++++ b/include/uapi/linux/input-event-codes.h
+@@ -618,6 +618,8 @@
+ #define KEY_CAMERA_ACCESS_ENABLE	0x24b	/* Enables programmatic access to camera devices. (HUTRR72) */
+ #define KEY_CAMERA_ACCESS_DISABLE	0x24c	/* Disables programmatic access to camera devices. (HUTRR72) */
+ #define KEY_CAMERA_ACCESS_TOGGLE	0x24d	/* Toggles the current state of the camera access control. (HUTRR72) */
++#define KEY_ACCESSIBILITY		0x24e	/* Toggles the system bound accessibility UI/command (HUTRR116) */
++#define KEY_DO_NOT_DISTURB		0x24f	/* Toggles the system-wide "Do Not Disturb" control (HUTRR94)*/
+ 
+ #define KEY_BRIGHTNESS_MIN		0x250	/* Set Brightness to Minimum */
+ #define KEY_BRIGHTNESS_MAX		0x251	/* Set Brightness to Maximum */
+diff --git a/io_uring/register.c b/io_uring/register.c
+index 99c37775f974c..1ae8491e35abb 100644
+--- a/io_uring/register.c
++++ b/io_uring/register.c
+@@ -355,8 +355,10 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
+ 	}
+ 
+ 	if (sqd) {
++		mutex_unlock(&ctx->uring_lock);
+ 		mutex_unlock(&sqd->lock);
+ 		io_put_sq_data(sqd);
++		mutex_lock(&ctx->uring_lock);
+ 	}
+ 
+ 	if (copy_to_user(arg, new_count, sizeof(new_count)))
+@@ -381,8 +383,10 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
+ 	return 0;
+ err:
+ 	if (sqd) {
++		mutex_unlock(&ctx->uring_lock);
+ 		mutex_unlock(&sqd->lock);
+ 		io_put_sq_data(sqd);
++		mutex_lock(&ctx->uring_lock);
+ 	}
+ 	return ret;
+ }
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index d2dbe099286b9..7634fc32ee05a 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -124,6 +124,7 @@ enum wq_internal_consts {
+ 	HIGHPRI_NICE_LEVEL	= MIN_NICE,
+ 
+ 	WQ_NAME_LEN		= 32,
++	WORKER_ID_LEN		= 10 + WQ_NAME_LEN, /* "kworker/R-" + WQ_NAME_LEN */
+ };
+ 
+ /*
+@@ -2778,6 +2779,26 @@ static void worker_detach_from_pool(struct worker *worker)
+ 		complete(detach_completion);
+ }
+ 
++static int format_worker_id(char *buf, size_t size, struct worker *worker,
++			    struct worker_pool *pool)
++{
++	if (worker->rescue_wq)
++		return scnprintf(buf, size, "kworker/R-%s",
++				 worker->rescue_wq->name);
++
++	if (pool) {
++		if (pool->cpu >= 0)
++			return scnprintf(buf, size, "kworker/%d:%d%s",
++					 pool->cpu, worker->id,
++					 pool->attrs->nice < 0  ? "H" : "");
++		else
++			return scnprintf(buf, size, "kworker/u%d:%d",
++					 pool->id, worker->id);
++	} else {
++		return scnprintf(buf, size, "kworker/dying");
++	}
++}
++
+ /**
+  * create_worker - create a new workqueue worker
+  * @pool: pool the new worker will belong to
+@@ -2794,7 +2815,6 @@ static struct worker *create_worker(struct worker_pool *pool)
+ {
+ 	struct worker *worker;
+ 	int id;
+-	char id_buf[23];
+ 
+ 	/* ID is needed to determine kthread name */
+ 	id = ida_alloc(&pool->worker_ida, GFP_KERNEL);
+@@ -2813,17 +2833,14 @@ static struct worker *create_worker(struct worker_pool *pool)
+ 	worker->id = id;
+ 
+ 	if (!(pool->flags & POOL_BH)) {
+-		if (pool->cpu >= 0)
+-			snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
+-				 pool->attrs->nice < 0  ? "H" : "");
+-		else
+-			snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
++		char id_buf[WORKER_ID_LEN];
+ 
++		format_worker_id(id_buf, sizeof(id_buf), worker, pool);
+ 		worker->task = kthread_create_on_node(worker_thread, worker,
+-					pool->node, "kworker/%s", id_buf);
++						      pool->node, "%s", id_buf);
+ 		if (IS_ERR(worker->task)) {
+ 			if (PTR_ERR(worker->task) == -EINTR) {
+-				pr_err("workqueue: Interrupted when creating a worker thread \"kworker/%s\"\n",
++				pr_err("workqueue: Interrupted when creating a worker thread \"%s\"\n",
+ 				       id_buf);
+ 			} else {
+ 				pr_err_once("workqueue: Failed to create a worker thread: %pe",
+@@ -3386,7 +3403,6 @@ static int worker_thread(void *__worker)
+ 		raw_spin_unlock_irq(&pool->lock);
+ 		set_pf_worker(false);
+ 
+-		set_task_comm(worker->task, "kworker/dying");
+ 		ida_free(&pool->worker_ida, worker->id);
+ 		worker_detach_from_pool(worker);
+ 		WARN_ON_ONCE(!list_empty(&worker->entry));
+@@ -5430,6 +5446,7 @@ static int wq_clamp_max_active(int max_active, unsigned int flags,
+ static int init_rescuer(struct workqueue_struct *wq)
+ {
+ 	struct worker *rescuer;
++	char id_buf[WORKER_ID_LEN];
+ 	int ret;
+ 
+ 	if (!(wq->flags & WQ_MEM_RECLAIM))
+@@ -5443,7 +5460,9 @@ static int init_rescuer(struct workqueue_struct *wq)
+ 	}
+ 
+ 	rescuer->rescue_wq = wq;
+-	rescuer->task = kthread_create(rescuer_thread, rescuer, "kworker/R-%s", wq->name);
++	format_worker_id(id_buf, sizeof(id_buf), rescuer, NULL);
++
++	rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", id_buf);
+ 	if (IS_ERR(rescuer->task)) {
+ 		ret = PTR_ERR(rescuer->task);
+ 		pr_err("workqueue: Failed to create a rescuer kthread for wq \"%s\": %pe",
+@@ -6272,19 +6291,15 @@ void show_freezable_workqueues(void)
+ /* used to show worker information through /proc/PID/{comm,stat,status} */
+ void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
+ {
+-	int off;
+-
+-	/* always show the actual comm */
+-	off = strscpy(buf, task->comm, size);
+-	if (off < 0)
+-		return;
+-
+ 	/* stabilize PF_WQ_WORKER and worker pool association */
+ 	mutex_lock(&wq_pool_attach_mutex);
+ 
+ 	if (task->flags & PF_WQ_WORKER) {
+ 		struct worker *worker = kthread_data(task);
+ 		struct worker_pool *pool = worker->pool;
++		int off;
++
++		off = format_worker_id(buf, size, worker, pool);
+ 
+ 		if (pool) {
+ 			raw_spin_lock_irq(&pool->lock);
+@@ -6303,6 +6318,8 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
+ 			}
+ 			raw_spin_unlock_irq(&pool->lock);
+ 		}
++	} else {
++		strscpy(buf, task->comm, size);
+ 	}
+ 
+ 	mutex_unlock(&wq_pool_attach_mutex);
+diff --git a/lib/Kconfig b/lib/Kconfig
+index 4557bb8a52565..c98e11c7330ec 100644
+--- a/lib/Kconfig
++++ b/lib/Kconfig
+@@ -539,13 +539,7 @@ config CPUMASK_OFFSTACK
+ 	  stack overflow.
+ 
+ config FORCE_NR_CPUS
+-       bool "Set number of CPUs at compile time"
+-       depends on SMP && EXPERT && !COMPILE_TEST
+-       help
+-         Say Yes if you have NR_CPUS set to an actual number of possible
+-         CPUs in your system, not to a default value. This forces the core
+-         code to rely on compile-time value and optimize kernel routines
+-         better.
++	def_bool !SMP
+ 
+ config CPU_RMAP
+ 	bool
+diff --git a/lib/closure.c b/lib/closure.c
+index c16540552d61b..99380d9b4aa94 100644
+--- a/lib/closure.c
++++ b/lib/closure.c
+@@ -17,12 +17,18 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
+ {
+ 	int r = flags & CLOSURE_REMAINING_MASK;
+ 
+-	BUG_ON(flags & CLOSURE_GUARD_MASK);
+-	BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
++	if (WARN(flags & CLOSURE_GUARD_MASK,
++		 "closure has guard bits set: %x (%u)",
++		 flags & CLOSURE_GUARD_MASK, (unsigned) __fls(r)))
++		r &= ~CLOSURE_GUARD_MASK;
+ 
+ 	if (!r) {
+ 		smp_acquire__after_ctrl_dep();
+ 
++		WARN(flags & ~CLOSURE_DESTRUCTOR,
++		     "closure ref hit 0 with incorrect flags set: %x (%u)",
++		     flags & ~CLOSURE_DESTRUCTOR, (unsigned) __fls(flags));
++
+ 		cl->closure_get_happened = false;
+ 
+ 		if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 41bf94f7dbd1b..196f70166537b 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -1823,7 +1823,7 @@ void *filemap_get_entry(struct address_space *mapping, pgoff_t index)
+ 	if (!folio || xa_is_value(folio))
+ 		goto out;
+ 
+-	if (!folio_try_get_rcu(folio))
++	if (!folio_try_get(folio))
+ 		goto repeat;
+ 
+ 	if (unlikely(folio != xas_reload(&xas))) {
+@@ -1977,7 +1977,7 @@ static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max,
+ 	if (!folio || xa_is_value(folio))
+ 		return folio;
+ 
+-	if (!folio_try_get_rcu(folio))
++	if (!folio_try_get(folio))
+ 		goto reset;
+ 
+ 	if (unlikely(folio != xas_reload(xas))) {
+@@ -2157,7 +2157,7 @@ unsigned filemap_get_folios_contig(struct address_space *mapping,
+ 		if (xa_is_value(folio))
+ 			goto update_start;
+ 
+-		if (!folio_try_get_rcu(folio))
++		if (!folio_try_get(folio))
+ 			goto retry;
+ 
+ 		if (unlikely(folio != xas_reload(&xas)))
+@@ -2289,7 +2289,7 @@ static void filemap_get_read_batch(struct address_space *mapping,
+ 			break;
+ 		if (xa_is_sibling(folio))
+ 			break;
+-		if (!folio_try_get_rcu(folio))
++		if (!folio_try_get(folio))
+ 			goto retry;
+ 
+ 		if (unlikely(folio != xas_reload(&xas)))
+@@ -3449,7 +3449,7 @@ static struct folio *next_uptodate_folio(struct xa_state *xas,
+ 			continue;
+ 		if (folio_test_locked(folio))
+ 			continue;
+-		if (!folio_try_get_rcu(folio))
++		if (!folio_try_get(folio))
+ 			continue;
+ 		/* Has the page moved or been split? */
+ 		if (unlikely(folio != xas_reload(xas)))
+diff --git a/mm/gup.c b/mm/gup.c
+index 1611e73b1121b..ec8570d25a6c9 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -76,7 +76,7 @@ static inline struct folio *try_get_folio(struct page *page, int refs)
+ 	folio = page_folio(page);
+ 	if (WARN_ON_ONCE(folio_ref_count(folio) < 0))
+ 		return NULL;
+-	if (unlikely(!folio_ref_try_add_rcu(folio, refs)))
++	if (unlikely(!folio_ref_try_add(folio, refs)))
+ 		return NULL;
+ 
+ 	/*
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 24f6b6a5c7721..2647458843c59 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -63,50 +63,6 @@ DEFINE_MUTEX(hci_cb_list_lock);
+ /* HCI ID Numbering */
+ static DEFINE_IDA(hci_index_ida);
+ 
+-static int hci_scan_req(struct hci_request *req, unsigned long opt)
+-{
+-	__u8 scan = opt;
+-
+-	BT_DBG("%s %x", req->hdev->name, scan);
+-
+-	/* Inquiry and Page scans */
+-	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+-	return 0;
+-}
+-
+-static int hci_auth_req(struct hci_request *req, unsigned long opt)
+-{
+-	__u8 auth = opt;
+-
+-	BT_DBG("%s %x", req->hdev->name, auth);
+-
+-	/* Authentication */
+-	hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
+-	return 0;
+-}
+-
+-static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
+-{
+-	__u8 encrypt = opt;
+-
+-	BT_DBG("%s %x", req->hdev->name, encrypt);
+-
+-	/* Encryption */
+-	hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
+-	return 0;
+-}
+-
+-static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
+-{
+-	__le16 policy = cpu_to_le16(opt);
+-
+-	BT_DBG("%s %x", req->hdev->name, policy);
+-
+-	/* Default link policy */
+-	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
+-	return 0;
+-}
+-
+ /* Get HCI device by index.
+  * Device is held on return. */
+ struct hci_dev *hci_dev_get(int index)
+@@ -728,6 +684,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
+ {
+ 	struct hci_dev *hdev;
+ 	struct hci_dev_req dr;
++	__le16 policy;
+ 	int err = 0;
+ 
+ 	if (copy_from_user(&dr, arg, sizeof(dr)))
+@@ -754,8 +711,8 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
+ 
+ 	switch (cmd) {
+ 	case HCISETAUTH:
+-		err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
+-				   HCI_INIT_TIMEOUT, NULL);
++		err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
++					    1, &dr.dev_opt, HCI_CMD_TIMEOUT);
+ 		break;
+ 
+ 	case HCISETENCRYPT:
+@@ -766,19 +723,23 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
+ 
+ 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
+ 			/* Auth must be enabled first */
+-			err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
+-					   HCI_INIT_TIMEOUT, NULL);
++			err = __hci_cmd_sync_status(hdev,
++						    HCI_OP_WRITE_AUTH_ENABLE,
++						    1, &dr.dev_opt,
++						    HCI_CMD_TIMEOUT);
+ 			if (err)
+ 				break;
+ 		}
+ 
+-		err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
+-				   HCI_INIT_TIMEOUT, NULL);
++		err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE,
++					    1, &dr.dev_opt,
++					    HCI_CMD_TIMEOUT);
+ 		break;
+ 
+ 	case HCISETSCAN:
+-		err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
+-				   HCI_INIT_TIMEOUT, NULL);
++		err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
++					    1, &dr.dev_opt,
++					    HCI_CMD_TIMEOUT);
+ 
+ 		/* Ensure that the connectable and discoverable states
+ 		 * get correctly modified as this was a non-mgmt change.
+@@ -788,8 +749,11 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
+ 		break;
+ 
+ 	case HCISETLINKPOL:
+-		err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
+-				   HCI_INIT_TIMEOUT, NULL);
++		policy = cpu_to_le16(dr.dev_opt);
++
++		err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
++					    2, &policy,
++					    HCI_CMD_TIMEOUT);
+ 		break;
+ 
+ 	case HCISETLINKMODE:
+@@ -2744,7 +2708,11 @@ void hci_unregister_dev(struct hci_dev *hdev)
+ 	list_del(&hdev->list);
+ 	write_unlock(&hci_dev_list_lock);
+ 
++	cancel_work_sync(&hdev->rx_work);
++	cancel_work_sync(&hdev->cmd_work);
++	cancel_work_sync(&hdev->tx_work);
+ 	cancel_work_sync(&hdev->power_on);
++	cancel_work_sync(&hdev->error_reset);
+ 
+ 	hci_cmd_sync_clear(hdev);
+ 
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 7bfa6b59ba87e..51f754b6e838b 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -280,6 +280,19 @@ int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
+ }
+ EXPORT_SYMBOL(__hci_cmd_sync_status);
+ 
++int hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
++			const void *param, u32 timeout)
++{
++	int err;
++
++	hci_req_sync_lock(hdev);
++	err = __hci_cmd_sync_status(hdev, opcode, plen, param, timeout);
++	hci_req_sync_unlock(hdev);
++
++	return err;
++}
++EXPORT_SYMBOL(hci_cmd_sync_status);
++
+ static void hci_cmd_sync_work(struct work_struct *work)
+ {
+ 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work);
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 9394a158d1b1a..b8ff522589cd9 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -6762,6 +6762,8 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
+ 
+ 	BT_DBG("chan %p, len %d", chan, skb->len);
+ 
++	l2cap_chan_lock(chan);
++
+ 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
+ 		goto drop;
+ 
+@@ -6778,6 +6780,7 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
+ 	}
+ 
+ drop:
++	l2cap_chan_unlock(chan);
+ 	l2cap_chan_put(chan);
+ free_skb:
+ 	kfree_skb(skb);
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index 8645461d45e81..1bc79887a7947 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -1239,6 +1239,10 @@ static void l2cap_sock_kill(struct sock *sk)
+ 
+ 	BT_DBG("sk %p state %s", sk, state_to_string(sk->sk_state));
+ 
++	/* Sock is dead, so set chan data to NULL, avoid other task use invalid
++	 * sock pointer.
++	 */
++	l2cap_pi(sk)->chan->data = NULL;
+ 	/* Kill poor orphan */
+ 
+ 	l2cap_chan_put(l2cap_pi(sk)->chan);
+@@ -1481,12 +1485,16 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
+ 
+ static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
+ {
+-	struct sock *sk = chan->data;
+-	struct l2cap_pinfo *pi = l2cap_pi(sk);
++	struct sock *sk;
++	struct l2cap_pinfo *pi;
+ 	int err;
+ 
+-	lock_sock(sk);
++	sk = chan->data;
++	if (!sk)
++		return -ENXIO;
+ 
++	pi = l2cap_pi(sk);
++	lock_sock(sk);
+ 	if (chan->mode == L2CAP_MODE_ERTM && !list_empty(&pi->rx_busy)) {
+ 		err = -ENOMEM;
+ 		goto done;
+diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
+index 0601bad798221..ff7e734e335b0 100644
+--- a/net/ipv6/ila/ila_lwt.c
++++ b/net/ipv6/ila/ila_lwt.c
+@@ -58,7 +58,9 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 		return orig_dst->lwtstate->orig_output(net, sk, skb);
+ 	}
+ 
++	local_bh_disable();
+ 	dst = dst_cache_get(&ilwt->dst_cache);
++	local_bh_enable();
+ 	if (unlikely(!dst)) {
+ 		struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ 		struct flowi6 fl6;
+@@ -86,8 +88,11 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 			goto drop;
+ 		}
+ 
+-		if (ilwt->connected)
++		if (ilwt->connected) {
++			local_bh_disable();
+ 			dst_cache_set_ip6(&ilwt->dst_cache, dst, &fl6.saddr);
++			local_bh_enable();
++		}
+ 	}
+ 
+ 	skb_dst_set(skb, dst);
+diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c
+index a013b92cbb860..2c83b7586422d 100644
+--- a/net/ipv6/rpl_iptunnel.c
++++ b/net/ipv6/rpl_iptunnel.c
+@@ -212,9 +212,9 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 	if (unlikely(err))
+ 		goto drop;
+ 
+-	preempt_disable();
++	local_bh_disable();
+ 	dst = dst_cache_get(&rlwt->cache);
+-	preempt_enable();
++	local_bh_enable();
+ 
+ 	if (unlikely(!dst)) {
+ 		struct ipv6hdr *hdr = ipv6_hdr(skb);
+@@ -234,9 +234,9 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 			goto drop;
+ 		}
+ 
+-		preempt_disable();
++		local_bh_disable();
+ 		dst_cache_set_ip6(&rlwt->cache, dst, &fl6.saddr);
+-		preempt_enable();
++		local_bh_enable();
+ 	}
+ 
+ 	skb_dst_drop(skb);
+@@ -268,23 +268,21 @@ static int rpl_input(struct sk_buff *skb)
+ 		return err;
+ 	}
+ 
+-	preempt_disable();
++	local_bh_disable();
+ 	dst = dst_cache_get(&rlwt->cache);
+-	preempt_enable();
+ 
+ 	if (!dst) {
+ 		ip6_route_input(skb);
+ 		dst = skb_dst(skb);
+ 		if (!dst->error) {
+-			preempt_disable();
+ 			dst_cache_set_ip6(&rlwt->cache, dst,
+ 					  &ipv6_hdr(skb)->saddr);
+-			preempt_enable();
+ 		}
+ 	} else {
+ 		skb_dst_drop(skb);
+ 		skb_dst_set(skb, dst);
+ 	}
++	local_bh_enable();
+ 
+ 	err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+ 	if (unlikely(err))
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 51dc2d9dd6b84..d0feadfdb46e1 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -2954,8 +2954,9 @@ static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev,
+ 	memcpy(sdata->vif.bss_conf.mcast_rate, rate,
+ 	       sizeof(int) * NUM_NL80211_BANDS);
+ 
+-	ieee80211_link_info_change_notify(sdata, &sdata->deflink,
+-					  BSS_CHANGED_MCAST_RATE);
++	if (ieee80211_sdata_running(sdata))
++		ieee80211_link_info_change_notify(sdata, &sdata->deflink,
++						  BSS_CHANGED_MCAST_RATE);
+ 
+ 	return 0;
+ }
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index 70c67c860e995..48bf62e92e02e 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -1841,6 +1841,8 @@ void ieee80211_link_info_change_notify(struct ieee80211_sub_if_data *sdata,
+ void ieee80211_configure_filter(struct ieee80211_local *local);
+ u64 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata);
+ 
++void ieee80211_handle_queued_frames(struct ieee80211_local *local);
++
+ u64 ieee80211_mgmt_tx_cookie(struct ieee80211_local *local);
+ int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb,
+ 			     u64 *cookie, gfp_t gfp);
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index 4eaea0a9975b4..0965ad11ec747 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -423,9 +423,9 @@ u64 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
+ 	       BSS_CHANGED_ERP_SLOT;
+ }
+ 
+-static void ieee80211_tasklet_handler(struct tasklet_struct *t)
++/* context: requires softirqs disabled */
++void ieee80211_handle_queued_frames(struct ieee80211_local *local)
+ {
+-	struct ieee80211_local *local = from_tasklet(local, t, tasklet);
+ 	struct sk_buff *skb;
+ 
+ 	while ((skb = skb_dequeue(&local->skb_queue)) ||
+@@ -450,6 +450,13 @@ static void ieee80211_tasklet_handler(struct tasklet_struct *t)
+ 	}
+ }
+ 
++static void ieee80211_tasklet_handler(struct tasklet_struct *t)
++{
++	struct ieee80211_local *local = from_tasklet(local, t, tasklet);
++
++	ieee80211_handle_queued_frames(local);
++}
++
+ static void ieee80211_restart_work(struct work_struct *work)
+ {
+ 	struct ieee80211_local *local =
+diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
+index cbc9b5e40cb35..6d4510221c98e 100644
+--- a/net/mac80211/mesh.c
++++ b/net/mac80211/mesh.c
+@@ -1776,6 +1776,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
+ 	ifmsh->last_preq = jiffies;
+ 	ifmsh->next_perr = jiffies;
+ 	ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE;
++	ifmsh->nonpeer_pm = NL80211_MESH_POWER_ACTIVE;
+ 	/* Allocate all mesh structures when creating the first mesh interface. */
+ 	if (!mesh_allocated)
+ 		ieee80211s_init();
+diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
+index 3da1c5c450358..b5f2df61c7f67 100644
+--- a/net/mac80211/scan.c
++++ b/net/mac80211/scan.c
+@@ -358,7 +358,8 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_sub_if_data *sdata)
+ 	struct cfg80211_scan_request *req;
+ 	struct cfg80211_chan_def chandef;
+ 	u8 bands_used = 0;
+-	int i, ielen, n_chans;
++	int i, ielen;
++	u32 *n_chans;
+ 	u32 flags = 0;
+ 
+ 	req = rcu_dereference_protected(local->scan_req,
+@@ -368,34 +369,34 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_sub_if_data *sdata)
+ 		return false;
+ 
+ 	if (ieee80211_hw_check(&local->hw, SINGLE_SCAN_ON_ALL_BANDS)) {
++		local->hw_scan_req->req.n_channels = req->n_channels;
++
+ 		for (i = 0; i < req->n_channels; i++) {
+ 			local->hw_scan_req->req.channels[i] = req->channels[i];
+ 			bands_used |= BIT(req->channels[i]->band);
+ 		}
+-
+-		n_chans = req->n_channels;
+ 	} else {
+ 		do {
+ 			if (local->hw_scan_band == NUM_NL80211_BANDS)
+ 				return false;
+ 
+-			n_chans = 0;
++			n_chans = &local->hw_scan_req->req.n_channels;
++			*n_chans = 0;
+ 
+ 			for (i = 0; i < req->n_channels; i++) {
+ 				if (req->channels[i]->band !=
+ 				    local->hw_scan_band)
+ 					continue;
+-				local->hw_scan_req->req.channels[n_chans] =
++				local->hw_scan_req->req.channels[(*n_chans)++] =
+ 							req->channels[i];
+-				n_chans++;
++
+ 				bands_used |= BIT(req->channels[i]->band);
+ 			}
+ 
+ 			local->hw_scan_band++;
+-		} while (!n_chans);
++		} while (!*n_chans);
+ 	}
+ 
+-	local->hw_scan_req->req.n_channels = n_chans;
+ 	ieee80211_prepare_scan_chandef(&chandef);
+ 
+ 	if (req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT)
+@@ -744,15 +745,21 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
+ 			local->hw_scan_ies_bufsize *= n_bands;
+ 		}
+ 
+-		local->hw_scan_req = kmalloc(
+-				sizeof(*local->hw_scan_req) +
+-				req->n_channels * sizeof(req->channels[0]) +
+-				local->hw_scan_ies_bufsize, GFP_KERNEL);
++		local->hw_scan_req = kmalloc(struct_size(local->hw_scan_req,
++							 req.channels,
++							 req->n_channels) +
++					     local->hw_scan_ies_bufsize,
++					     GFP_KERNEL);
+ 		if (!local->hw_scan_req)
+ 			return -ENOMEM;
+ 
+ 		local->hw_scan_req->req.ssids = req->ssids;
+ 		local->hw_scan_req->req.n_ssids = req->n_ssids;
++		/* None of the channels are actually set
++		 * up but let UBSAN know the boundaries.
++		 */
++		local->hw_scan_req->req.n_channels = req->n_channels;
++
+ 		ies = (u8 *)local->hw_scan_req +
+ 			sizeof(*local->hw_scan_req) +
+ 			req->n_channels * sizeof(req->channels[0]);
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index 0da5f6082d159..cd4573723999e 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -1567,6 +1567,10 @@ u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
+ 
+ void ieee80211_stop_device(struct ieee80211_local *local)
+ {
++	local_bh_disable();
++	ieee80211_handle_queued_frames(local);
++	local_bh_enable();
++
+ 	ieee80211_led_radio(local, false);
+ 	ieee80211_mod_tpt_led_trig(local, 0, IEEE80211_TPT_LEDTRIG_FL_RADIO);
+ 
+diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
+index 2a6f1ed763c9b..6fbed5bb5c3e0 100644
+--- a/net/mac802154/tx.c
++++ b/net/mac802154/tx.c
+@@ -34,8 +34,8 @@ void ieee802154_xmit_sync_worker(struct work_struct *work)
+ 	if (res)
+ 		goto err_tx;
+ 
+-	dev->stats.tx_packets++;
+-	dev->stats.tx_bytes += skb->len;
++	DEV_STATS_INC(dev, tx_packets);
++	DEV_STATS_ADD(dev, tx_bytes, skb->len);
+ 
+ 	ieee802154_xmit_complete(&local->hw, skb, false);
+ 
+@@ -90,8 +90,8 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
+ 		if (ret)
+ 			goto err_wake_netif_queue;
+ 
+-		dev->stats.tx_packets++;
+-		dev->stats.tx_bytes += len;
++		DEV_STATS_INC(dev, tx_packets);
++		DEV_STATS_ADD(dev, tx_bytes, len);
+ 	} else {
+ 		local->tx_skb = skb;
+ 		queue_work(local->workqueue, &local->sync_tx_work);
+diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
+index 43897a5269b6a..755af47b88b91 100644
+--- a/net/wireless/rdev-ops.h
++++ b/net/wireless/rdev-ops.h
+@@ -2,7 +2,7 @@
+ /*
+  * Portions of this file
+  * Copyright(c) 2016-2017 Intel Deutschland GmbH
+- * Copyright (C) 2018, 2021-2023 Intel Corporation
++ * Copyright (C) 2018, 2021-2024 Intel Corporation
+  */
+ #ifndef __CFG80211_RDEV_OPS
+ #define __CFG80211_RDEV_OPS
+@@ -458,6 +458,10 @@ static inline int rdev_scan(struct cfg80211_registered_device *rdev,
+ 			    struct cfg80211_scan_request *request)
+ {
+ 	int ret;
++
++	if (WARN_ON_ONCE(!request->n_ssids && request->ssids))
++		return -EINVAL;
++
+ 	trace_rdev_scan(&rdev->wiphy, request);
+ 	ret = rdev->ops->scan(&rdev->wiphy, request);
+ 	trace_rdev_return_int(&rdev->wiphy, ret);
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index ecea8c08e2701..fba5e98bf493a 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -812,6 +812,7 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
+ 	LIST_HEAD(coloc_ap_list);
+ 	bool need_scan_psc = true;
+ 	const struct ieee80211_sband_iftype_data *iftd;
++	size_t size, offs_ssids, offs_6ghz_params, offs_ies;
+ 
+ 	rdev_req->scan_6ghz = true;
+ 
+@@ -877,10 +878,15 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
+ 		spin_unlock_bh(&rdev->bss_lock);
+ 	}
+ 
+-	request = kzalloc(struct_size(request, channels, n_channels) +
+-			  sizeof(*request->scan_6ghz_params) * count +
+-			  sizeof(*request->ssids) * rdev_req->n_ssids,
+-			  GFP_KERNEL);
++	size = struct_size(request, channels, n_channels);
++	offs_ssids = size;
++	size += sizeof(*request->ssids) * rdev_req->n_ssids;
++	offs_6ghz_params = size;
++	size += sizeof(*request->scan_6ghz_params) * count;
++	offs_ies = size;
++	size += rdev_req->ie_len;
++
++	request = kzalloc(size, GFP_KERNEL);
+ 	if (!request) {
+ 		cfg80211_free_coloc_ap_list(&coloc_ap_list);
+ 		return -ENOMEM;
+@@ -888,8 +894,26 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
+ 
+ 	*request = *rdev_req;
+ 	request->n_channels = 0;
+-	request->scan_6ghz_params =
+-		(void *)&request->channels[n_channels];
++	request->n_6ghz_params = 0;
++	if (rdev_req->n_ssids) {
++		/*
++		 * Add the ssids from the parent scan request to the new
++		 * scan request, so the driver would be able to use them
++		 * in its probe requests to discover hidden APs on PSC
++		 * channels.
++		 */
++		request->ssids = (void *)request + offs_ssids;
++		memcpy(request->ssids, rdev_req->ssids,
++		       sizeof(*request->ssids) * request->n_ssids);
++	}
++	request->scan_6ghz_params = (void *)request + offs_6ghz_params;
++
++	if (rdev_req->ie_len) {
++		void *ie = (void *)request + offs_ies;
++
++		memcpy(ie, rdev_req->ie, rdev_req->ie_len);
++		request->ie = ie;
++	}
+ 
+ 	/*
+ 	 * PSC channels should not be scanned in case of direct scan with 1 SSID
+@@ -978,17 +1002,8 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
+ 
+ 	if (request->n_channels) {
+ 		struct cfg80211_scan_request *old = rdev->int_scan_req;
+-		rdev->int_scan_req = request;
+ 
+-		/*
+-		 * Add the ssids from the parent scan request to the new scan
+-		 * request, so the driver would be able to use them in its
+-		 * probe requests to discover hidden APs on PSC channels.
+-		 */
+-		request->ssids = (void *)&request->channels[request->n_channels];
+-		request->n_ssids = rdev_req->n_ssids;
+-		memcpy(request->ssids, rdev_req->ssids, sizeof(*request->ssids) *
+-		       request->n_ssids);
++		rdev->int_scan_req = request;
+ 
+ 		/*
+ 		 * If this scan follows a previous scan, save the scan start
+@@ -3396,10 +3411,14 @@ int cfg80211_wext_siwscan(struct net_device *dev,
+ 	wiphy = &rdev->wiphy;
+ 
+ 	/* Determine number of channels, needed to allocate creq */
+-	if (wreq && wreq->num_channels)
++	if (wreq && wreq->num_channels) {
++		/* Passed from userspace so should be checked */
++		if (unlikely(wreq->num_channels > IW_MAX_FREQUENCIES))
++			return -EINVAL;
+ 		n_channels = wreq->num_channels;
+-	else
++	} else {
+ 		n_channels = ieee80211_get_num_supported_channels(wiphy);
++	}
+ 
+ 	creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) +
+ 		       n_channels * sizeof(void *),
+@@ -3473,8 +3492,10 @@ int cfg80211_wext_siwscan(struct net_device *dev,
+ 			memcpy(creq->ssids[0].ssid, wreq->essid, wreq->essid_len);
+ 			creq->ssids[0].ssid_len = wreq->essid_len;
+ 		}
+-		if (wreq->scan_type == IW_SCAN_TYPE_PASSIVE)
++		if (wreq->scan_type == IW_SCAN_TYPE_PASSIVE) {
++			creq->ssids = NULL;
+ 			creq->n_ssids = 0;
++		}
+ 	}
+ 
+ 	for (i = 0; i < NUM_NL80211_BANDS; i++)
+diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
+index a290de36307ba..7866167552177 100644
+--- a/scripts/kconfig/expr.c
++++ b/scripts/kconfig/expr.c
+@@ -396,35 +396,6 @@ static struct expr *expr_eliminate_yn(struct expr *e)
+ 	return e;
+ }
+ 
+-/*
+- * bool FOO!=n => FOO
+- */
+-struct expr *expr_trans_bool(struct expr *e)
+-{
+-	if (!e)
+-		return NULL;
+-	switch (e->type) {
+-	case E_AND:
+-	case E_OR:
+-	case E_NOT:
+-		e->left.expr = expr_trans_bool(e->left.expr);
+-		e->right.expr = expr_trans_bool(e->right.expr);
+-		break;
+-	case E_UNEQUAL:
+-		// FOO!=n -> FOO
+-		if (e->left.sym->type == S_TRISTATE) {
+-			if (e->right.sym == &symbol_no) {
+-				e->type = E_SYMBOL;
+-				e->right.sym = NULL;
+-			}
+-		}
+-		break;
+-	default:
+-		;
+-	}
+-	return e;
+-}
+-
+ /*
+  * e1 || e2 -> ?
+  */
+diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
+index 0158f5eac4542..f015883519b39 100644
+--- a/scripts/kconfig/expr.h
++++ b/scripts/kconfig/expr.h
+@@ -288,7 +288,6 @@ void expr_free(struct expr *e);
+ void expr_eliminate_eq(struct expr **ep1, struct expr **ep2);
+ int expr_eq(struct expr *e1, struct expr *e2);
+ tristate expr_calc_value(struct expr *e);
+-struct expr *expr_trans_bool(struct expr *e);
+ struct expr *expr_eliminate_dups(struct expr *e);
+ struct expr *expr_transform(struct expr *e);
+ int expr_contains_symbol(struct expr *dep, struct symbol *sym);
+diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c
+index 9709aca3a30fe..9e52c7360e55b 100644
+--- a/scripts/kconfig/gconf.c
++++ b/scripts/kconfig/gconf.c
+@@ -1478,7 +1478,6 @@ int main(int ac, char *av[])
+ 
+ 	conf_parse(name);
+ 	fixup_rootmenu(&rootmenu);
+-	conf_read(NULL);
+ 
+ 	/* Load the interface and connect signals */
+ 	init_main_window(glade_file);
+@@ -1486,6 +1485,8 @@ int main(int ac, char *av[])
+ 	init_left_tree();
+ 	init_right_tree();
+ 
++	conf_read(NULL);
++
+ 	switch (view_mode) {
+ 	case SINGLE_VIEW:
+ 		display_tree_part();
+diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
+index 3b822cd110f47..8b48a80e7e168 100644
+--- a/scripts/kconfig/menu.c
++++ b/scripts/kconfig/menu.c
+@@ -379,8 +379,6 @@ static void _menu_finalize(struct menu *parent, bool inside_choice)
+ 				dep = expr_transform(dep);
+ 				dep = expr_alloc_and(expr_copy(basedep), dep);
+ 				dep = expr_eliminate_dups(dep);
+-				if (menu->sym && menu->sym->type != S_TRISTATE)
+-					dep = expr_trans_bool(dep);
+ 				prop->visible.expr = dep;
+ 
+ 				/*
+diff --git a/sound/core/pcm_dmaengine.c b/sound/core/pcm_dmaengine.c
+index 494ec0c207fad..e299e8634751f 100644
+--- a/sound/core/pcm_dmaengine.c
++++ b/sound/core/pcm_dmaengine.c
+@@ -349,6 +349,16 @@ int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
+ }
+ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan);
+ 
++int snd_dmaengine_pcm_sync_stop(struct snd_pcm_substream *substream)
++{
++	struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
++
++	dmaengine_synchronize(prtd->dma_chan);
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_sync_stop);
++
+ /**
+  * snd_dmaengine_pcm_close - Close a dmaengine based PCM substream
+  * @substream: PCM substream
+@@ -358,6 +368,12 @@ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan);
+ int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream)
+ {
+ 	struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
++	struct dma_tx_state state;
++	enum dma_status status;
++
++	status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state);
++	if (status == DMA_PAUSED)
++		dmaengine_terminate_async(prtd->dma_chan);
+ 
+ 	dmaengine_synchronize(prtd->dma_chan);
+ 	kfree(prtd);
+@@ -378,6 +394,12 @@ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close);
+ int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream)
+ {
+ 	struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
++	struct dma_tx_state state;
++	enum dma_status status;
++
++	status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state);
++	if (status == DMA_PAUSED)
++		dmaengine_terminate_async(prtd->dma_chan);
+ 
+ 	dmaengine_synchronize(prtd->dma_chan);
+ 	dma_release_channel(prtd->dma_chan);
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index 0b76e76823d28..353ecd960a1f5 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -1775,6 +1775,8 @@ static int snd_pcm_pre_resume(struct snd_pcm_substream *substream,
+ 			      snd_pcm_state_t state)
+ {
+ 	struct snd_pcm_runtime *runtime = substream->runtime;
++	if (runtime->state != SNDRV_PCM_STATE_SUSPENDED)
++		return -EBADFD;
+ 	if (!(runtime->info & SNDRV_PCM_INFO_RESUME))
+ 		return -ENOSYS;
+ 	runtime->trigger_master = substream;
+diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
+index f806636242ee9..b6869fe019f29 100644
+--- a/sound/pci/hda/Kconfig
++++ b/sound/pci/hda/Kconfig
+@@ -160,6 +160,7 @@ config SND_HDA_SCODEC_CS35L56_I2C
+ 	depends on ACPI || COMPILE_TEST
+ 	depends on SND_SOC
+ 	select FW_CS_DSP
++	imply SERIAL_MULTI_INSTANTIATE
+ 	select SND_HDA_GENERIC
+ 	select SND_SOC_CS35L56_SHARED
+ 	select SND_HDA_SCODEC_CS35L56
+@@ -176,6 +177,7 @@ config SND_HDA_SCODEC_CS35L56_SPI
+ 	depends on ACPI || COMPILE_TEST
+ 	depends on SND_SOC
+ 	select FW_CS_DSP
++	imply SERIAL_MULTI_INSTANTIATE
+ 	select SND_HDA_GENERIC
+ 	select SND_SOC_CS35L56_SHARED
+ 	select SND_HDA_SCODEC_CS35L56
+diff --git a/sound/pci/hda/cs35l41_hda_property.c b/sound/pci/hda/cs35l41_hda_property.c
+index 4f5e581cdd5ff..16cf62f06d2ca 100644
+--- a/sound/pci/hda/cs35l41_hda_property.c
++++ b/sound/pci/hda/cs35l41_hda_property.c
+@@ -118,6 +118,10 @@ static const struct cs35l41_config cs35l41_config_table[] = {
+ 	{ "17AA38B5", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 },
+ 	{ "17AA38B6", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 },
+ 	{ "17AA38B7", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 },
++	{ "17AA38C7", 4, INTERNAL, { CS35L41_RIGHT, CS35L41_LEFT, CS35L41_RIGHT, CS35L41_LEFT }, 0, 2, -1, 1000, 4500, 24 },
++	{ "17AA38C8", 4, INTERNAL, { CS35L41_RIGHT, CS35L41_LEFT, CS35L41_RIGHT, CS35L41_LEFT }, 0, 2, -1, 1000, 4500, 24 },
++	{ "17AA38F9", 2, EXTERNAL, { CS35L41_RIGHT, CS35L41_LEFT, 0, 0 }, 0, 2, -1, 0, 0, 0 },
++	{ "17AA38FA", 2, EXTERNAL, { CS35L41_RIGHT, CS35L41_LEFT, 0, 0 }, 0, 2, -1, 0, 0, 0 },
+ 	{}
+ };
+ 
+@@ -509,6 +513,10 @@ static const struct cs35l41_prop_model cs35l41_prop_model_table[] = {
+ 	{ "CSC3551", "17AA38B5", generic_dsd_config },
+ 	{ "CSC3551", "17AA38B6", generic_dsd_config },
+ 	{ "CSC3551", "17AA38B7", generic_dsd_config },
++	{ "CSC3551", "17AA38C7", generic_dsd_config },
++	{ "CSC3551", "17AA38C8", generic_dsd_config },
++	{ "CSC3551", "17AA38F9", generic_dsd_config },
++	{ "CSC3551", "17AA38FA", generic_dsd_config },
+ 	{}
+ };
+ 
+diff --git a/sound/pci/hda/cs35l56_hda.c b/sound/pci/hda/cs35l56_hda.c
+index 6b77c38a0e155..e134ede6c5aa5 100644
+--- a/sound/pci/hda/cs35l56_hda.c
++++ b/sound/pci/hda/cs35l56_hda.c
+@@ -735,6 +735,8 @@ static void cs35l56_hda_unbind(struct device *dev, struct device *master, void *
+ 	if (comps[cs35l56->index].dev == dev)
+ 		memset(&comps[cs35l56->index], 0, sizeof(*comps));
+ 
++	cs35l56->codec = NULL;
++
+ 	dev_dbg(cs35l56->base.dev, "Unbound\n");
+ }
+ 
+@@ -840,6 +842,9 @@ static int cs35l56_hda_system_resume(struct device *dev)
+ 
+ 	cs35l56->suspended = false;
+ 
++	if (!cs35l56->codec)
++		return 0;
++
+ 	ret = cs35l56_is_fw_reload_needed(&cs35l56->base);
+ 	dev_dbg(cs35l56->base.dev, "fw_reload_needed: %d\n", ret);
+ 	if (ret > 0) {
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 98f580e273e48..8a52ed9aa465c 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -585,10 +585,14 @@ static void alc_shutup_pins(struct hda_codec *codec)
+ 	switch (codec->core.vendor_id) {
+ 	case 0x10ec0236:
+ 	case 0x10ec0256:
++	case 0x10ec0257:
+ 	case 0x19e58326:
+ 	case 0x10ec0283:
++	case 0x10ec0285:
+ 	case 0x10ec0286:
++	case 0x10ec0287:
+ 	case 0x10ec0288:
++	case 0x10ec0295:
+ 	case 0x10ec0298:
+ 		alc_headset_mic_no_shutup(codec);
+ 		break;
+@@ -10035,6 +10039,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8788, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87b7, "HP Laptop 14-fq0xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x87d3, "HP Laptop 15-gw0xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87e7, "HP ProBook 450 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87f1, "HP ProBook 630 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+@@ -10538,10 +10543,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x38be, "Yoga S980-14.5 proX YC Dual", ALC287_FIXUP_TAS2781_I2C),
+ 	SND_PCI_QUIRK(0x17aa, 0x38bf, "Yoga S980-14.5 proX LX Dual", ALC287_FIXUP_TAS2781_I2C),
+ 	SND_PCI_QUIRK(0x17aa, 0x38c3, "Y980 DUAL", ALC287_FIXUP_TAS2781_I2C),
++	SND_PCI_QUIRK(0x17aa, 0x38c7, "Thinkbook 13x Gen 4", ALC287_FIXUP_CS35L41_I2C_4),
++	SND_PCI_QUIRK(0x17aa, 0x38c8, "Thinkbook 13x Gen 4", ALC287_FIXUP_CS35L41_I2C_4),
+ 	SND_PCI_QUIRK(0x17aa, 0x38cb, "Y790 YG DUAL", ALC287_FIXUP_TAS2781_I2C),
+ 	SND_PCI_QUIRK(0x17aa, 0x38cd, "Y790 VECO DUAL", ALC287_FIXUP_TAS2781_I2C),
+ 	SND_PCI_QUIRK(0x17aa, 0x38d2, "Lenovo Yoga 9 14IMH9", ALC287_FIXUP_YOGA9_14IMH9_BASS_SPK_PIN),
+ 	SND_PCI_QUIRK(0x17aa, 0x38d7, "Lenovo Yoga 9 14IMH9", ALC287_FIXUP_YOGA9_14IMH9_BASS_SPK_PIN),
++	SND_PCI_QUIRK(0x17aa, 0x38f9, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x17aa, 0x38fa, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+ 	SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index 1760b5d42460a..4e3a8ce690a45 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -283,6 +283,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "M5402RA"),
+ 		}
+ 	},
++        {
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "M5602RA"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+diff --git a/sound/soc/codecs/cs35l56-shared.c b/sound/soc/codecs/cs35l56-shared.c
+index fd02b621da52c..d29878af2a80d 100644
+--- a/sound/soc/codecs/cs35l56-shared.c
++++ b/sound/soc/codecs/cs35l56-shared.c
+@@ -214,6 +214,10 @@ static const struct reg_sequence cs35l56_asp1_defaults[] = {
+ 	REG_SEQ0(CS35L56_ASP1_FRAME_CONTROL5,	0x00020100),
+ 	REG_SEQ0(CS35L56_ASP1_DATA_CONTROL1,	0x00000018),
+ 	REG_SEQ0(CS35L56_ASP1_DATA_CONTROL5,	0x00000018),
++	REG_SEQ0(CS35L56_ASP1TX1_INPUT,		0x00000000),
++	REG_SEQ0(CS35L56_ASP1TX2_INPUT,		0x00000000),
++	REG_SEQ0(CS35L56_ASP1TX3_INPUT,		0x00000000),
++	REG_SEQ0(CS35L56_ASP1TX4_INPUT,		0x00000000),
+ };
+ 
+ /*
+diff --git a/sound/soc/codecs/es8326.c b/sound/soc/codecs/es8326.c
+index 17bd6b5160772..8b2328d5d0c74 100644
+--- a/sound/soc/codecs/es8326.c
++++ b/sound/soc/codecs/es8326.c
+@@ -865,12 +865,16 @@ static void es8326_jack_detect_handler(struct work_struct *work)
+ 			 * set auto-check mode, then restart jack_detect_work after 400ms.
+ 			 * Don't report jack status.
+ 			 */
+-			regmap_write(es8326->regmap, ES8326_INT_SOURCE,
+-					(ES8326_INT_SRC_PIN9 | ES8326_INT_SRC_BUTTON));
++			regmap_write(es8326->regmap, ES8326_INT_SOURCE, 0x00);
+ 			regmap_update_bits(es8326->regmap, ES8326_HPDET_TYPE, 0x03, 0x01);
++			regmap_update_bits(es8326->regmap, ES8326_HPDET_TYPE, 0x10, 0x00);
+ 			es8326_enable_micbias(es8326->component);
+ 			usleep_range(50000, 70000);
+ 			regmap_update_bits(es8326->regmap, ES8326_HPDET_TYPE, 0x03, 0x00);
++			regmap_update_bits(es8326->regmap, ES8326_HPDET_TYPE, 0x10, 0x10);
++			usleep_range(50000, 70000);
++			regmap_write(es8326->regmap, ES8326_INT_SOURCE,
++					(ES8326_INT_SRC_PIN9 | ES8326_INT_SRC_BUTTON));
+ 			regmap_write(es8326->regmap, ES8326_SYS_BIAS, 0x1f);
+ 			regmap_update_bits(es8326->regmap, ES8326_HP_DRIVER_REF, 0x0f, 0x08);
+ 			queue_delayed_work(system_wq, &es8326->jack_detect_work,
+diff --git a/sound/soc/codecs/rt722-sdca-sdw.c b/sound/soc/codecs/rt722-sdca-sdw.c
+index 65d584c1886e8..543a3fa1f5d3c 100644
+--- a/sound/soc/codecs/rt722-sdca-sdw.c
++++ b/sound/soc/codecs/rt722-sdca-sdw.c
+@@ -68,6 +68,7 @@ static bool rt722_sdca_mbq_readable_register(struct device *dev, unsigned int re
+ 	case 0x200007f:
+ 	case 0x2000082 ... 0x200008e:
+ 	case 0x2000090 ... 0x2000094:
++	case 0x3110000:
+ 	case 0x5300000 ... 0x5300002:
+ 	case 0x5400002:
+ 	case 0x5600000 ... 0x5600007:
+@@ -125,6 +126,7 @@ static bool rt722_sdca_mbq_volatile_register(struct device *dev, unsigned int re
+ 	case 0x2000067:
+ 	case 0x2000084:
+ 	case 0x2000086:
++	case 0x3110000:
+ 		return true;
+ 	default:
+ 		return false;
+@@ -350,7 +352,7 @@ static int rt722_sdca_interrupt_callback(struct sdw_slave *slave,
+ 
+ 	if (status->sdca_cascade && !rt722->disable_irq)
+ 		mod_delayed_work(system_power_efficient_wq,
+-			&rt722->jack_detect_work, msecs_to_jiffies(30));
++			&rt722->jack_detect_work, msecs_to_jiffies(280));
+ 
+ 	mutex_unlock(&rt722->disable_irq_lock);
+ 
+diff --git a/sound/soc/intel/avs/topology.c b/sound/soc/intel/avs/topology.c
+index 42b42903ae9de..691d16ce95a0f 100644
+--- a/sound/soc/intel/avs/topology.c
++++ b/sound/soc/intel/avs/topology.c
+@@ -1545,8 +1545,8 @@ static int avs_route_load(struct snd_soc_component *comp, int index,
+ {
+ 	struct snd_soc_acpi_mach *mach = dev_get_platdata(comp->card->dev);
+ 	size_t len = SNDRV_CTL_ELEM_ID_NAME_MAXLEN;
+-	char buf[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+ 	int ssp_port, tdm_slot;
++	char *buf;
+ 
+ 	/* See parse_link_formatted_string() for dynamic naming when(s). */
+ 	if (!avs_mach_singular_ssp(mach))
+@@ -1557,13 +1557,24 @@ static int avs_route_load(struct snd_soc_component *comp, int index,
+ 		return 0;
+ 	tdm_slot = avs_mach_ssp_tdm(mach, ssp_port);
+ 
++	buf = devm_kzalloc(comp->card->dev, len, GFP_KERNEL);
++	if (!buf)
++		return -ENOMEM;
+ 	avs_ssp_sprint(buf, len, route->source, ssp_port, tdm_slot);
+-	strscpy((char *)route->source, buf, len);
++	route->source = buf;
++
++	buf = devm_kzalloc(comp->card->dev, len, GFP_KERNEL);
++	if (!buf)
++		return -ENOMEM;
+ 	avs_ssp_sprint(buf, len, route->sink, ssp_port, tdm_slot);
+-	strscpy((char *)route->sink, buf, len);
++	route->sink = buf;
++
+ 	if (route->control) {
++		buf = devm_kzalloc(comp->card->dev, len, GFP_KERNEL);
++		if (!buf)
++			return -ENOMEM;
+ 		avs_ssp_sprint(buf, len, route->control, ssp_port, tdm_slot);
+-		strscpy((char *)route->control, buf, len);
++		route->control = buf;
+ 	}
+ 
+ 	return 0;
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
+index b41a1147f1c34..a64d1989e28a5 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -610,6 +610,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ 					BYT_RT5640_SSP0_AIF1 |
+ 					BYT_RT5640_MCLK_EN),
+ 	},
++	{
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ARCHOS"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ARCHOS 101 CESIUM"),
++		},
++		.driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
++					BYT_RT5640_JD_NOT_INV |
++					BYT_RT5640_DIFF_MIC |
++					BYT_RT5640_SSP0_AIF1 |
++					BYT_RT5640_MCLK_EN),
++	},
+ 	{
+ 		.matches = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ARCHOS"),
+diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
+index 092ca09f36319..7fa75b55c65e2 100644
+--- a/sound/soc/soc-generic-dmaengine-pcm.c
++++ b/sound/soc/soc-generic-dmaengine-pcm.c
+@@ -318,6 +318,12 @@ static int dmaengine_copy(struct snd_soc_component *component,
+ 	return 0;
+ }
+ 
++static int dmaengine_pcm_sync_stop(struct snd_soc_component *component,
++				   struct snd_pcm_substream *substream)
++{
++	return snd_dmaengine_pcm_sync_stop(substream);
++}
++
+ static const struct snd_soc_component_driver dmaengine_pcm_component = {
+ 	.name		= SND_DMAENGINE_PCM_DRV_NAME,
+ 	.probe_order	= SND_SOC_COMP_ORDER_LATE,
+@@ -327,6 +333,7 @@ static const struct snd_soc_component_driver dmaengine_pcm_component = {
+ 	.trigger	= dmaengine_pcm_trigger,
+ 	.pointer	= dmaengine_pcm_pointer,
+ 	.pcm_construct	= dmaengine_pcm_new,
++	.sync_stop	= dmaengine_pcm_sync_stop,
+ };
+ 
+ static const struct snd_soc_component_driver dmaengine_pcm_component_process = {
+@@ -339,6 +346,7 @@ static const struct snd_soc_component_driver dmaengine_pcm_component_process = {
+ 	.pointer	= dmaengine_pcm_pointer,
+ 	.copy		= dmaengine_copy,
+ 	.pcm_construct	= dmaengine_pcm_new,
++	.sync_stop	= dmaengine_pcm_sync_stop,
+ };
+ 
+ static const char * const dmaengine_pcm_dma_channel_names[] = {
+diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
+index ba4890991f0d7..ce22613bf9690 100644
+--- a/sound/soc/soc-topology.c
++++ b/sound/soc/soc-topology.c
+@@ -1060,15 +1060,28 @@ static int soc_tplg_dapm_graph_elems_load(struct soc_tplg *tplg,
+ 			break;
+ 		}
+ 
+-		route->source = elem->source;
+-		route->sink = elem->sink;
++		route->source = devm_kmemdup(tplg->dev, elem->source,
++					     min(strlen(elem->source),
++						 SNDRV_CTL_ELEM_ID_NAME_MAXLEN),
++					     GFP_KERNEL);
++		route->sink = devm_kmemdup(tplg->dev, elem->sink,
++					   min(strlen(elem->sink), SNDRV_CTL_ELEM_ID_NAME_MAXLEN),
++					   GFP_KERNEL);
++		if (!route->source || !route->sink) {
++			ret = -ENOMEM;
++			break;
++		}
+ 
+-		/* set to NULL atm for tplg users */
+-		route->connected = NULL;
+-		if (strnlen(elem->control, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == 0)
+-			route->control = NULL;
+-		else
+-			route->control = elem->control;
++		if (strnlen(elem->control, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) != 0) {
++			route->control = devm_kmemdup(tplg->dev, elem->control,
++						      min(strlen(elem->control),
++							  SNDRV_CTL_ELEM_ID_NAME_MAXLEN),
++						      GFP_KERNEL);
++			if (!route->control) {
++				ret = -ENOMEM;
++				break;
++			}
++		}
+ 
+ 		/* add route dobj to dobj_list */
+ 		route->dobj.type = SND_SOC_DOBJ_GRAPH;
+diff --git a/sound/soc/sof/intel/hda-pcm.c b/sound/soc/sof/intel/hda-pcm.c
+index d7b446f3f973e..8b5fbbc777bd4 100644
+--- a/sound/soc/sof/intel/hda-pcm.c
++++ b/sound/soc/sof/intel/hda-pcm.c
+@@ -254,6 +254,12 @@ int hda_dsp_pcm_open(struct snd_sof_dev *sdev,
+ 	snd_pcm_hw_constraint_integer(substream->runtime,
+ 				      SNDRV_PCM_HW_PARAM_PERIODS);
+ 
++	/* Limit the maximum number of periods to not exceed the BDL entries count */
++	if (runtime->hw.periods_max > HDA_DSP_MAX_BDL_ENTRIES)
++		snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIODS,
++					     runtime->hw.periods_min,
++					     HDA_DSP_MAX_BDL_ENTRIES);
++
+ 	/* Only S16 and S32 supported by HDA hardware when used without DSP */
+ 	if (sdev->dspless_mode_selected)
+ 		snd_pcm_hw_constraint_mask64(substream->runtime, SNDRV_PCM_HW_PARAM_FORMAT,
+diff --git a/sound/soc/sof/sof-audio.c b/sound/soc/sof/sof-audio.c
+index e693dcb475e4d..d1a7d867f6a3a 100644
+--- a/sound/soc/sof/sof-audio.c
++++ b/sound/soc/sof/sof-audio.c
+@@ -485,7 +485,7 @@ sof_prepare_widgets_in_path(struct snd_sof_dev *sdev, struct snd_soc_dapm_widget
+ 			if (ret < 0) {
+ 				/* unprepare the source widget */
+ 				if (widget_ops[widget->id].ipc_unprepare &&
+-				    swidget && swidget->prepared) {
++				    swidget && swidget->prepared && swidget->use_count == 0) {
+ 					widget_ops[widget->id].ipc_unprepare(swidget);
+ 					swidget->prepared = false;
+ 				}
+diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c
+index 1e760c3155213..2b1ed91a736c9 100644
+--- a/sound/soc/ti/davinci-mcasp.c
++++ b/sound/soc/ti/davinci-mcasp.c
+@@ -1472,10 +1472,11 @@ static int davinci_mcasp_hw_rule_min_periodsize(
+ {
+ 	struct snd_interval *period_size = hw_param_interval(params,
+ 						SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
++	u8 numevt = *((u8 *)rule->private);
+ 	struct snd_interval frames;
+ 
+ 	snd_interval_any(&frames);
+-	frames.min = 64;
++	frames.min = numevt;
+ 	frames.integer = 1;
+ 
+ 	return snd_interval_refine(period_size, &frames);
+@@ -1490,6 +1491,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
+ 	u32 max_channels = 0;
+ 	int i, dir, ret;
+ 	int tdm_slots = mcasp->tdm_slots;
++	u8 *numevt;
+ 
+ 	/* Do not allow more then one stream per direction */
+ 	if (mcasp->substreams[substream->stream])
+@@ -1589,9 +1591,12 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
+ 			return ret;
+ 	}
+ 
++	numevt = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
++			 &mcasp->txnumevt :
++			 &mcasp->rxnumevt;
+ 	snd_pcm_hw_rule_add(substream->runtime, 0,
+ 			    SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+-			    davinci_mcasp_hw_rule_min_periodsize, NULL,
++			    davinci_mcasp_hw_rule_min_periodsize, numevt,
+ 			    SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
+ 
+ 	return 0;
+diff --git a/sound/soc/ti/omap-hdmi.c b/sound/soc/ti/omap-hdmi.c
+index 4513b527ab970..ad8925b6481ca 100644
+--- a/sound/soc/ti/omap-hdmi.c
++++ b/sound/soc/ti/omap-hdmi.c
+@@ -354,11 +354,7 @@ static int omap_hdmi_audio_probe(struct platform_device *pdev)
+ 	if (!card)
+ 		return -ENOMEM;
+ 
+-	card->name = devm_kasprintf(dev, GFP_KERNEL,
+-				    "HDMI %s", dev_name(ad->dssdev));
+-	if (!card->name)
+-		return -ENOMEM;
+-
++	card->name = "HDMI";
+ 	card->owner = THIS_MODULE;
+ 	card->dai_link =
+ 		devm_kzalloc(dev, sizeof(*(card->dai_link)), GFP_KERNEL);
+diff --git a/tools/power/cpupower/utils/helpers/amd.c b/tools/power/cpupower/utils/helpers/amd.c
+index c519cc89c97f4..0a56e22240fc8 100644
+--- a/tools/power/cpupower/utils/helpers/amd.c
++++ b/tools/power/cpupower/utils/helpers/amd.c
+@@ -41,6 +41,16 @@ union core_pstate {
+ 		unsigned res1:31;
+ 		unsigned en:1;
+ 	} pstatedef;
++	/* since fam 1Ah: */
++	struct {
++		unsigned fid:12;
++		unsigned res1:2;
++		unsigned vid:8;
++		unsigned iddval:8;
++		unsigned idddiv:2;
++		unsigned res2:31;
++		unsigned en:1;
++	} pstatedef2;
+ 	unsigned long long val;
+ };
+ 
+@@ -48,6 +58,10 @@ static int get_did(union core_pstate pstate)
+ {
+ 	int t;
+ 
++	/* Fam 1Ah onward do not use did */
++	if (cpupower_cpu_info.family >= 0x1A)
++		return 0;
++
+ 	if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_PSTATEDEF)
+ 		t = pstate.pstatedef.did;
+ 	else if (cpupower_cpu_info.family == 0x12)
+@@ -61,12 +75,18 @@ static int get_did(union core_pstate pstate)
+ static int get_cof(union core_pstate pstate)
+ {
+ 	int t;
+-	int fid, did, cof;
++	int fid, did, cof = 0;
+ 
+ 	did = get_did(pstate);
+ 	if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_PSTATEDEF) {
+-		fid = pstate.pstatedef.fid;
+-		cof = 200 * fid / did;
++		if (cpupower_cpu_info.family >= 0x1A) {
++			fid = pstate.pstatedef2.fid;
++			if (fid > 0x0f)
++				cof = (fid * 5);
++		} else {
++			fid = pstate.pstatedef.fid;
++			cof = 200 * fid / did;
++		}
+ 	} else {
+ 		t = 0x10;
+ 		fid = pstate.pstate.fid;
+diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
+index 01f241ea2c67b..dec9fd7ebba7f 100644
+--- a/tools/testing/selftests/bpf/config
++++ b/tools/testing/selftests/bpf/config
+@@ -53,9 +53,12 @@ CONFIG_MPLS=y
+ CONFIG_MPLS_IPTUNNEL=y
+ CONFIG_MPLS_ROUTING=y
+ CONFIG_MPTCP=y
++CONFIG_NET_ACT_SKBMOD=y
++CONFIG_NET_CLS=y
+ CONFIG_NET_CLS_ACT=y
+ CONFIG_NET_CLS_BPF=y
+ CONFIG_NET_CLS_FLOWER=y
++CONFIG_NET_CLS_MATCHALL=y
+ CONFIG_NET_FOU=y
+ CONFIG_NET_FOU_IP_TUNNELS=y
+ CONFIG_NET_IPGRE=y
+diff --git a/tools/testing/selftests/bpf/prog_tests/tc_links.c b/tools/testing/selftests/bpf/prog_tests/tc_links.c
+index bc98411446855..1af9ec1149aab 100644
+--- a/tools/testing/selftests/bpf/prog_tests/tc_links.c
++++ b/tools/testing/selftests/bpf/prog_tests/tc_links.c
+@@ -9,6 +9,8 @@
+ #define ping_cmd "ping -q -c1 -w1 127.0.0.1 > /dev/null"
+ 
+ #include "test_tc_link.skel.h"
++
++#include "netlink_helpers.h"
+ #include "tc_helpers.h"
+ 
+ void serial_test_tc_links_basic(void)
+@@ -1787,6 +1789,65 @@ void serial_test_tc_links_ingress(void)
+ 	test_tc_links_ingress(BPF_TCX_INGRESS, false, false);
+ }
+ 
++struct qdisc_req {
++	struct nlmsghdr  n;
++	struct tcmsg     t;
++	char             buf[1024];
++};
++
++static int qdisc_replace(int ifindex, const char *kind, bool block)
++{
++	struct rtnl_handle rth = { .fd = -1 };
++	struct qdisc_req req;
++	int err;
++
++	err = rtnl_open(&rth, 0);
++	if (!ASSERT_OK(err, "open_rtnetlink"))
++		return err;
++
++	memset(&req, 0, sizeof(req));
++	req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
++	req.n.nlmsg_flags = NLM_F_CREATE | NLM_F_REPLACE | NLM_F_REQUEST;
++	req.n.nlmsg_type = RTM_NEWQDISC;
++	req.t.tcm_family = AF_UNSPEC;
++	req.t.tcm_ifindex = ifindex;
++	req.t.tcm_parent = 0xfffffff1;
++
++	addattr_l(&req.n, sizeof(req), TCA_KIND, kind, strlen(kind) + 1);
++	if (block)
++		addattr32(&req.n, sizeof(req), TCA_INGRESS_BLOCK, 1);
++
++	err = rtnl_talk(&rth, &req.n, NULL);
++	ASSERT_OK(err, "talk_rtnetlink");
++	rtnl_close(&rth);
++	return err;
++}
++
++void serial_test_tc_links_dev_chain0(void)
++{
++	int err, ifindex;
++
++	ASSERT_OK(system("ip link add dev foo type veth peer name bar"), "add veth");
++	ifindex = if_nametoindex("foo");
++	ASSERT_NEQ(ifindex, 0, "non_zero_ifindex");
++	err = qdisc_replace(ifindex, "ingress", true);
++	if (!ASSERT_OK(err, "attaching ingress"))
++		goto cleanup;
++	ASSERT_OK(system("tc filter add block 1 matchall action skbmod swap mac"), "add block");
++	err = qdisc_replace(ifindex, "clsact", false);
++	if (!ASSERT_OK(err, "attaching clsact"))
++		goto cleanup;
++	/* Heuristic: kern_sync_rcu() alone does not work; a wait-time of ~5s
++	 * triggered the issue without the fix reliably 100% of the time.
++	 */
++	sleep(5);
++	ASSERT_OK(system("tc filter add dev foo ingress matchall action skbmod swap mac"), "add filter");
++cleanup:
++	ASSERT_OK(system("ip link del dev foo"), "del veth");
++	ASSERT_EQ(if_nametoindex("foo"), 0, "foo removed");
++	ASSERT_EQ(if_nametoindex("bar"), 0, "bar removed");
++}
++
+ static void test_tc_links_dev_mixed(int target)
+ {
+ 	LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
+diff --git a/tools/testing/selftests/cachestat/test_cachestat.c b/tools/testing/selftests/cachestat/test_cachestat.c
+index b171fd53b004e..632ab44737ec3 100644
+--- a/tools/testing/selftests/cachestat/test_cachestat.c
++++ b/tools/testing/selftests/cachestat/test_cachestat.c
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #define _GNU_SOURCE
++#define __SANE_USERSPACE_TYPES__ // Use ll64
+ 
+ #include <stdio.h>
+ #include <stdbool.h>
+diff --git a/tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c b/tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c
+index 759f86e7d263e..2862aae58b79a 100644
+--- a/tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c
++++ b/tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #define _GNU_SOURCE
++#define __SANE_USERSPACE_TYPES__ // Use ll64
+ 
+ #include <inttypes.h>
+ #include <unistd.h>
+diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
+index a392d0917b4e5..994fa3468f170 100644
+--- a/tools/testing/selftests/futex/functional/Makefile
++++ b/tools/testing/selftests/futex/functional/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ INCLUDES := -I../include -I../../ $(KHDR_INCLUDES)
+-CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE -pthread $(INCLUDES) $(KHDR_INCLUDES)
++CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE= -pthread $(INCLUDES) $(KHDR_INCLUDES)
+ LDLIBS := -lpthread -lrt
+ 
+ LOCAL_HDRS := \
+diff --git a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
+index 5e0e539a323d5..8b120718768ec 100644
+--- a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
++++ b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
+@@ -531,7 +531,7 @@ class ovsactions(nla):
+             for flat_act in parse_flat_map:
+                 if parse_starts_block(actstr, flat_act[0], False):
+                     actstr = actstr[len(flat_act[0]):]
+-                    self["attrs"].append([flat_act[1]])
++                    self["attrs"].append([flat_act[1], True])
+                     actstr = actstr[strspn(actstr, ", ") :]
+                     parsed = True
+ 
+diff --git a/tools/testing/selftests/openat2/openat2_test.c b/tools/testing/selftests/openat2/openat2_test.c
+index 9024754530b23..5790ab446527f 100644
+--- a/tools/testing/selftests/openat2/openat2_test.c
++++ b/tools/testing/selftests/openat2/openat2_test.c
+@@ -5,6 +5,7 @@
+  */
+ 
+ #define _GNU_SOURCE
++#define __SANE_USERSPACE_TYPES__ // Use ll64
+ #include <fcntl.h>
+ #include <sched.h>
+ #include <sys/stat.h>
+diff --git a/tools/testing/selftests/timens/exec.c b/tools/testing/selftests/timens/exec.c
+index e40dc5be2f668..d12ff955de0d8 100644
+--- a/tools/testing/selftests/timens/exec.c
++++ b/tools/testing/selftests/timens/exec.c
+@@ -30,7 +30,7 @@ int main(int argc, char *argv[])
+ 
+ 		for (i = 0; i < 2; i++) {
+ 			_gettime(CLOCK_MONOTONIC, &tst, i);
+-			if (abs(tst.tv_sec - now.tv_sec) > 5)
++			if (labs(tst.tv_sec - now.tv_sec) > 5)
+ 				return pr_fail("%ld %ld\n", now.tv_sec, tst.tv_sec);
+ 		}
+ 		return 0;
+@@ -50,7 +50,7 @@ int main(int argc, char *argv[])
+ 
+ 	for (i = 0; i < 2; i++) {
+ 		_gettime(CLOCK_MONOTONIC, &tst, i);
+-		if (abs(tst.tv_sec - now.tv_sec) > 5)
++		if (labs(tst.tv_sec - now.tv_sec) > 5)
+ 			return pr_fail("%ld %ld\n",
+ 					now.tv_sec, tst.tv_sec);
+ 	}
+@@ -70,7 +70,7 @@ int main(int argc, char *argv[])
+ 		/* Check that a child process is in the new timens. */
+ 		for (i = 0; i < 2; i++) {
+ 			_gettime(CLOCK_MONOTONIC, &tst, i);
+-			if (abs(tst.tv_sec - now.tv_sec - OFFSET) > 5)
++			if (labs(tst.tv_sec - now.tv_sec - OFFSET) > 5)
+ 				return pr_fail("%ld %ld\n",
+ 						now.tv_sec + OFFSET, tst.tv_sec);
+ 		}
+diff --git a/tools/testing/selftests/timens/timer.c b/tools/testing/selftests/timens/timer.c
+index 5e7f0051bd7be..5b939f59dfa4d 100644
+--- a/tools/testing/selftests/timens/timer.c
++++ b/tools/testing/selftests/timens/timer.c
+@@ -56,7 +56,7 @@ int run_test(int clockid, struct timespec now)
+ 			return pr_perror("timerfd_gettime");
+ 
+ 		elapsed = new_value.it_value.tv_sec;
+-		if (abs(elapsed - 3600) > 60) {
++		if (llabs(elapsed - 3600) > 60) {
+ 			ksft_test_result_fail("clockid: %d elapsed: %lld\n",
+ 					      clockid, elapsed);
+ 			return 1;
+diff --git a/tools/testing/selftests/timens/timerfd.c b/tools/testing/selftests/timens/timerfd.c
+index 9edd43d6b2c13..a4196bbd6e33f 100644
+--- a/tools/testing/selftests/timens/timerfd.c
++++ b/tools/testing/selftests/timens/timerfd.c
+@@ -61,7 +61,7 @@ int run_test(int clockid, struct timespec now)
+ 			return pr_perror("timerfd_gettime(%d)", clockid);
+ 
+ 		elapsed = new_value.it_value.tv_sec;
+-		if (abs(elapsed - 3600) > 60) {
++		if (llabs(elapsed - 3600) > 60) {
+ 			ksft_test_result_fail("clockid: %d elapsed: %lld\n",
+ 					      clockid, elapsed);
+ 			return 1;
+diff --git a/tools/testing/selftests/timens/vfork_exec.c b/tools/testing/selftests/timens/vfork_exec.c
+index beb7614941fb1..5b8907bf451dd 100644
+--- a/tools/testing/selftests/timens/vfork_exec.c
++++ b/tools/testing/selftests/timens/vfork_exec.c
+@@ -32,7 +32,7 @@ static void *tcheck(void *_args)
+ 
+ 	for (i = 0; i < 2; i++) {
+ 		_gettime(CLOCK_MONOTONIC, &tst, i);
+-		if (abs(tst.tv_sec - now->tv_sec) > 5) {
++		if (labs(tst.tv_sec - now->tv_sec) > 5) {
+ 			pr_fail("%s: in-thread: unexpected value: %ld (%ld)\n",
+ 				args->tst_name, tst.tv_sec, now->tv_sec);
+ 			return (void *)1UL;
+@@ -64,7 +64,7 @@ static int check(char *tst_name, struct timespec *now)
+ 
+ 	for (i = 0; i < 2; i++) {
+ 		_gettime(CLOCK_MONOTONIC, &tst, i);
+-		if (abs(tst.tv_sec - now->tv_sec) > 5)
++		if (labs(tst.tv_sec - now->tv_sec) > 5)
+ 			return pr_fail("%s: unexpected value: %ld (%ld)\n",
+ 					tst_name, tst.tv_sec, now->tv_sec);
+ 	}
+diff --git a/tools/testing/selftests/vDSO/parse_vdso.c b/tools/testing/selftests/vDSO/parse_vdso.c
+index 413f75620a35b..4ae417372e9eb 100644
+--- a/tools/testing/selftests/vDSO/parse_vdso.c
++++ b/tools/testing/selftests/vDSO/parse_vdso.c
+@@ -55,14 +55,20 @@ static struct vdso_info
+ 	ELF(Verdef) *verdef;
+ } vdso_info;
+ 
+-/* Straight from the ELF specification. */
+-static unsigned long elf_hash(const unsigned char *name)
++/*
++ * Straight from the ELF specification...and then tweaked slightly, in order to
++ * avoid a few clang warnings.
++ */
++static unsigned long elf_hash(const char *name)
+ {
+ 	unsigned long h = 0, g;
+-	while (*name)
++	const unsigned char *uch_name = (const unsigned char *)name;
++
++	while (*uch_name)
+ 	{
+-		h = (h << 4) + *name++;
+-		if (g = h & 0xf0000000)
++		h = (h << 4) + *uch_name++;
++		g = h & 0xf0000000;
++		if (g)
+ 			h ^= g >> 24;
+ 		h &= ~g;
+ 	}
+diff --git a/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c b/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
+index 8a44ff973ee17..27f6fdf119691 100644
+--- a/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
++++ b/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
+@@ -18,7 +18,7 @@
+ 
+ #include "parse_vdso.h"
+ 
+-/* We need a libc functions... */
++/* We need some libc functions... */
+ int strcmp(const char *a, const char *b)
+ {
+ 	/* This implementation is buggy: it never returns -1. */
+@@ -34,6 +34,20 @@ int strcmp(const char *a, const char *b)
+ 	return 0;
+ }
+ 
++/*
++ * The clang build needs this, although gcc does not.
++ * Stolen from lib/string.c.
++ */
++void *memcpy(void *dest, const void *src, size_t count)
++{
++	char *tmp = dest;
++	const char *s = src;
++
++	while (count--)
++		*tmp++ = *s++;
++	return dest;
++}
++
+ /* ...and two syscalls.  This is x86-specific. */
+ static inline long x86_syscall3(long nr, long a0, long a1, long a2)
+ {
+@@ -70,7 +84,7 @@ void to_base10(char *lastdig, time_t n)
+ 	}
+ }
+ 
+-__attribute__((externally_visible)) void c_main(void **stack)
++void c_main(void **stack)
+ {
+ 	/* Parse the stack */
+ 	long argc = (long)*stack;


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [gentoo-commits] proj/linux-patches:6.9 commit in: /
@ 2024-07-27 13:46 Mike Pagano
  0 siblings, 0 replies; 18+ messages in thread
From: Mike Pagano @ 2024-07-27 13:46 UTC (permalink / raw
  To: gentoo-commits

commit:     c78fc9fdb4c29f7899905015e09ce855621591a8
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Jul 27 13:46:07 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Jul 27 13:46:07 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c78fc9fd

Linux patch 6.9.12

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |   4 +
 1011_linux-6.9.12.patch | 896 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 900 insertions(+)

diff --git a/0000_README b/0000_README
index dd499825..0267313c 100644
--- a/0000_README
+++ b/0000_README
@@ -87,6 +87,10 @@ Patch:  1010_linux-6.9.11.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.9.11
 
+Patch:  1011_linux-6.9.12.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.9.12
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch
 Desc:   Enable link security restrictions by default.

diff --git a/1011_linux-6.9.12.patch b/1011_linux-6.9.12.patch
new file mode 100644
index 00000000..b2251cee
--- /dev/null
+++ b/1011_linux-6.9.12.patch
@@ -0,0 +1,896 @@
+diff --git a/Makefile b/Makefile
+index 46457f645921e..d9243ad053f9a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 9
+-SUBLEVEL = 11
++SUBLEVEL = 12
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+index 4e29adea570a0..1ddd314137017 100644
+--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+@@ -685,6 +685,7 @@ dwc_0: usb@8a00000 {
+ 				clocks = <&xo>;
+ 				clock-names = "ref";
+ 				tx-fifo-resize;
++				snps,parkmode-disable-ss-quirk;
+ 				snps,is-utmi-l1-suspend;
+ 				snps,hird-threshold = /bits/ 8 <0x0>;
+ 				snps,dis_u2_susphy_quirk;
+diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+index e5b89753aa5c1..cf04879c6b75b 100644
+--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+@@ -647,6 +647,7 @@ dwc_0: usb@8a00000 {
+ 				interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
+ 				phys = <&qusb_phy_0>, <&ssphy_0>;
+ 				phy-names = "usb2-phy", "usb3-phy";
++				snps,parkmode-disable-ss-quirk;
+ 				snps,is-utmi-l1-suspend;
+ 				snps,hird-threshold = /bits/ 8 <0x0>;
+ 				snps,dis_u2_susphy_quirk;
+@@ -696,6 +697,7 @@ dwc_1: usb@8c00000 {
+ 				interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
+ 				phys = <&qusb_phy_1>, <&ssphy_1>;
+ 				phy-names = "usb2-phy", "usb3-phy";
++				snps,parkmode-disable-ss-quirk;
+ 				snps,is-utmi-l1-suspend;
+ 				snps,hird-threshold = /bits/ 8 <0x0>;
+ 				snps,dis_u2_susphy_quirk;
+diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+index 1601e46549e77..db668416657c6 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+@@ -3061,6 +3061,7 @@ usb3_dwc3: usb@6a00000 {
+ 				snps,dis_u2_susphy_quirk;
+ 				snps,dis_enblslpm_quirk;
+ 				snps,is-utmi-l1-suspend;
++				snps,parkmode-disable-ss-quirk;
+ 				tx-fifo-resize;
+ 			};
+ 		};
+diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
+index 4dfe2d09ac285..240a29ed35262 100644
+--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
+@@ -2154,6 +2154,7 @@ usb3_dwc3: usb@a800000 {
+ 				interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
+ 				snps,dis_u2_susphy_quirk;
+ 				snps,dis_enblslpm_quirk;
++				snps,parkmode-disable-ss-quirk;
+ 				phys = <&qusb2phy>, <&usb3phy>;
+ 				phy-names = "usb2-phy", "usb3-phy";
+ 				snps,has-lpm-erratum;
+diff --git a/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts b/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts
+index 6e9dd0312adc5..a9ab924d2ed12 100644
+--- a/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts
++++ b/arch/arm64/boot/dts/qcom/qrb2210-rb1.dts
+@@ -59,6 +59,17 @@ hdmi_con: endpoint {
+ 		};
+ 	};
+ 
++	i2c2_gpio: i2c {
++		compatible = "i2c-gpio";
++
++		sda-gpios = <&tlmm 6 GPIO_ACTIVE_HIGH>;
++		scl-gpios = <&tlmm 7 GPIO_ACTIVE_HIGH>;
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		status = "disabled";
++	};
++
+ 	leds {
+ 		compatible = "gpio-leds";
+ 
+@@ -199,7 +210,7 @@ &gpi_dma0 {
+ 	status = "okay";
+ };
+ 
+-&i2c2 {
++&i2c2_gpio {
+ 	clock-frequency = <400000>;
+ 	status = "okay";
+ 
+diff --git a/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts b/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts
+index 696d6d43c56b3..54d17eca97519 100644
+--- a/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts
++++ b/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts
+@@ -60,6 +60,17 @@ hdmi_con: endpoint {
+ 		};
+ 	};
+ 
++	i2c2_gpio: i2c {
++		compatible = "i2c-gpio";
++
++		sda-gpios = <&tlmm 6 GPIO_ACTIVE_HIGH>;
++		scl-gpios = <&tlmm 7 GPIO_ACTIVE_HIGH>;
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		status = "disabled";
++	};
++
+ 	leds {
+ 		compatible = "gpio-leds";
+ 
+@@ -190,7 +201,7 @@ zap-shader {
+ 	};
+ };
+ 
+-&i2c2 {
++&i2c2_gpio {
+ 	clock-frequency = <400000>;
+ 	status = "okay";
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+index 2b481e20ae38f..cc93b5675d5d7 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+@@ -3063,6 +3063,7 @@ usb_1_dwc3: usb@a600000 {
+ 				iommus = <&apps_smmu 0x540 0>;
+ 				snps,dis_u2_susphy_quirk;
+ 				snps,dis_enblslpm_quirk;
++				snps,parkmode-disable-ss-quirk;
+ 				phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
+ 				phy-names = "usb2-phy", "usb3-phy";
+ 				maximum-speed = "super-speed";
+diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+index 41f51d3261110..e0d3eeb6f639e 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+@@ -4131,6 +4131,7 @@ usb_1_dwc3: usb@a600000 {
+ 				iommus = <&apps_smmu 0xe0 0x0>;
+ 				snps,dis_u2_susphy_quirk;
+ 				snps,dis_enblslpm_quirk;
++				snps,parkmode-disable-ss-quirk;
+ 				phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
+ 				phy-names = "usb2-phy", "usb3-phy";
+ 				maximum-speed = "super-speed";
+diff --git a/arch/arm64/boot/dts/qcom/sdm630.dtsi b/arch/arm64/boot/dts/qcom/sdm630.dtsi
+index f5921b80ef943..5f6884b2367d9 100644
+--- a/arch/arm64/boot/dts/qcom/sdm630.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm630.dtsi
+@@ -1302,6 +1302,7 @@ usb3_dwc3: usb@a800000 {
+ 				interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
+ 				snps,dis_u2_susphy_quirk;
+ 				snps,dis_enblslpm_quirk;
++				snps,parkmode-disable-ss-quirk;
+ 
+ 				phys = <&qusb2phy0>, <&usb3_qmpphy>;
+ 				phy-names = "usb2-phy", "usb3-phy";
+diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+index 2f20be99ee7e1..9310aa2383068 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+@@ -4086,6 +4086,7 @@ usb_1_dwc3: usb@a600000 {
+ 				iommus = <&apps_smmu 0x740 0>;
+ 				snps,dis_u2_susphy_quirk;
+ 				snps,dis_enblslpm_quirk;
++				snps,parkmode-disable-ss-quirk;
+ 				phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
+ 				phy-names = "usb2-phy", "usb3-phy";
+ 			};
+@@ -4141,6 +4142,7 @@ usb_2_dwc3: usb@a800000 {
+ 				iommus = <&apps_smmu 0x760 0>;
+ 				snps,dis_u2_susphy_quirk;
+ 				snps,dis_enblslpm_quirk;
++				snps,parkmode-disable-ss-quirk;
+ 				phys = <&usb_2_hsphy>, <&usb_2_qmpphy>;
+ 				phy-names = "usb2-phy", "usb3-phy";
+ 			};
+diff --git a/arch/arm64/boot/dts/qcom/sm6115.dtsi b/arch/arm64/boot/dts/qcom/sm6115.dtsi
+index 9ed062150aaf2..b4ce5a322107e 100644
+--- a/arch/arm64/boot/dts/qcom/sm6115.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6115.dtsi
+@@ -1656,6 +1656,7 @@ usb_dwc3: usb@4e00000 {
+ 				snps,has-lpm-erratum;
+ 				snps,hird-threshold = /bits/ 8 <0x10>;
+ 				snps,usb3_lpm_capable;
++				snps,parkmode-disable-ss-quirk;
+ 
+ 				usb-role-switch;
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sm6350.dtsi b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+index 0be053555602c..d2b98f400d4f6 100644
+--- a/arch/arm64/boot/dts/qcom/sm6350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+@@ -1859,6 +1859,7 @@ usb_1_dwc3: usb@a600000 {
+ 				snps,dis_enblslpm_quirk;
+ 				snps,has-lpm-erratum;
+ 				snps,hird-threshold = /bits/ 8 <0x10>;
++				snps,parkmode-disable-ss-quirk;
+ 				phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
+ 				phy-names = "usb2-phy", "usb3-phy";
+ 			};
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
+index c5442396dcec0..a40130ae3eb0c 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
++++ b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
+@@ -657,7 +657,7 @@ &pcie6a {
+ };
+ 
+ &pcie6a_phy {
+-	vdda-phy-supply = <&vreg_l3j_0p8>;
++	vdda-phy-supply = <&vreg_l1d_0p8>;
+ 	vdda-pll-supply = <&vreg_l2j_1p2>;
+ 
+ 	status = "okay";
+@@ -824,12 +824,15 @@ &uart21 {
+ 
+ &usb_1_ss0_hsphy {
+ 	vdd-supply = <&vreg_l2e_0p8>;
+-	vdda12-supply = <&vreg_l3e_1p2>;
++	vdda12-supply = <&vreg_l2j_1p2>;
+ 
+ 	status = "okay";
+ };
+ 
+ &usb_1_ss0_qmpphy {
++	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-pll-supply = <&vreg_l1j_0p8>;
++
+ 	status = "okay";
+ };
+ 
+@@ -844,12 +847,15 @@ &usb_1_ss0_dwc3 {
+ 
+ &usb_1_ss1_hsphy {
+ 	vdd-supply = <&vreg_l2e_0p8>;
+-	vdda12-supply = <&vreg_l3e_1p2>;
++	vdda12-supply = <&vreg_l2j_1p2>;
+ 
+ 	status = "okay";
+ };
+ 
+ &usb_1_ss1_qmpphy {
++	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-pll-supply = <&vreg_l2d_0p9>;
++
+ 	status = "okay";
+ };
+ 
+@@ -864,12 +870,15 @@ &usb_1_ss1_dwc3 {
+ 
+ &usb_1_ss2_hsphy {
+ 	vdd-supply = <&vreg_l2e_0p8>;
+-	vdda12-supply = <&vreg_l3e_1p2>;
++	vdda12-supply = <&vreg_l2j_1p2>;
+ 
+ 	status = "okay";
+ };
+ 
+ &usb_1_ss2_qmpphy {
++	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-pll-supply = <&vreg_l2d_0p9>;
++
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
+index 49e19a64455b8..2dccb98b4437e 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
++++ b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
+@@ -468,7 +468,7 @@ &pcie6a {
+ };
+ 
+ &pcie6a_phy {
+-	vdda-phy-supply = <&vreg_l3j_0p8>;
++	vdda-phy-supply = <&vreg_l1d_0p8>;
+ 	vdda-pll-supply = <&vreg_l2j_1p2>;
+ 
+ 	status = "okay";
+@@ -520,12 +520,15 @@ &uart21 {
+ 
+ &usb_1_ss0_hsphy {
+ 	vdd-supply = <&vreg_l2e_0p8>;
+-	vdda12-supply = <&vreg_l3e_1p2>;
++	vdda12-supply = <&vreg_l2j_1p2>;
+ 
+ 	status = "okay";
+ };
+ 
+ &usb_1_ss0_qmpphy {
++	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-pll-supply = <&vreg_l1j_0p8>;
++
+ 	status = "okay";
+ };
+ 
+@@ -540,12 +543,15 @@ &usb_1_ss0_dwc3 {
+ 
+ &usb_1_ss1_hsphy {
+ 	vdd-supply = <&vreg_l2e_0p8>;
+-	vdda12-supply = <&vreg_l3e_1p2>;
++	vdda12-supply = <&vreg_l2j_1p2>;
+ 
+ 	status = "okay";
+ };
+ 
+ &usb_1_ss1_qmpphy {
++	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-pll-supply = <&vreg_l2d_0p9>;
++
+ 	status = "okay";
+ };
+ 
+@@ -560,12 +566,15 @@ &usb_1_ss1_dwc3 {
+ 
+ &usb_1_ss2_hsphy {
+ 	vdd-supply = <&vreg_l2e_0p8>;
+-	vdda12-supply = <&vreg_l3e_1p2>;
++	vdda12-supply = <&vreg_l2j_1p2>;
+ 
+ 	status = "okay";
+ };
+ 
+ &usb_1_ss2_qmpphy {
++	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-pll-supply = <&vreg_l2d_0p9>;
++
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
+index 0c66b32e0f9f1..063a6ea378aad 100644
+--- a/arch/s390/mm/fault.c
++++ b/arch/s390/mm/fault.c
+@@ -432,12 +432,13 @@ static void do_exception(struct pt_regs *regs, int access)
+ 			handle_fault_error_nolock(regs, 0);
+ 		else
+ 			do_sigsegv(regs, SEGV_MAPERR);
+-	} else if (fault & VM_FAULT_SIGBUS) {
++	} else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON)) {
+ 		if (!user_mode(regs))
+ 			handle_fault_error_nolock(regs, 0);
+ 		else
+ 			do_sigbus(regs);
+ 	} else {
++		pr_emerg("Unexpected fault flags: %08x\n", fault);
+ 		BUG();
+ 	}
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index 8f231766756a2..3fac29233414d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -2017,7 +2017,7 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
+ 				      struct amdgpu_irq_src *source,
+ 				      struct amdgpu_iv_entry *entry)
+ {
+-	uint32_t instance;
++	int instance;
+ 
+ 	DRM_DEBUG("IH: SDMA trap\n");
+ 	instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
+diff --git a/drivers/net/tap.c b/drivers/net/tap.c
+index 9f0495e8df4d0..feeeac715c18f 100644
+--- a/drivers/net/tap.c
++++ b/drivers/net/tap.c
+@@ -1177,6 +1177,11 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
+ 	struct sk_buff *skb;
+ 	int err, depth;
+ 
++	if (unlikely(xdp->data_end - xdp->data < ETH_HLEN)) {
++		err = -EINVAL;
++		goto err;
++	}
++
+ 	if (q->flags & IFF_VNET_HDR)
+ 		vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
+ 
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 92da8c03d960c..f0f3fa1fc6269 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -2451,6 +2451,9 @@ static int tun_xdp_one(struct tun_struct *tun,
+ 	bool skb_xdp = false;
+ 	struct page *page;
+ 
++	if (unlikely(datasize < ETH_HLEN))
++		return -EINVAL;
++
+ 	xdp_prog = rcu_dereference(tun->xdp_prog);
+ 	if (xdp_prog) {
+ 		if (gso->gso_type) {
+diff --git a/drivers/usb/gadget/function/f_midi2.c b/drivers/usb/gadget/function/f_midi2.c
+index ec8cd7c7bbfc1..0e38bb145e8f5 100644
+--- a/drivers/usb/gadget/function/f_midi2.c
++++ b/drivers/usb/gadget/function/f_midi2.c
+@@ -150,6 +150,9 @@ struct f_midi2 {
+ 
+ #define func_to_midi2(f)	container_of(f, struct f_midi2, func)
+ 
++/* convert from MIDI protocol number (1 or 2) to SNDRV_UMP_EP_INFO_PROTO_* */
++#define to_ump_protocol(v)	(((v) & 3) << 8)
++
+ /* get EP name string */
+ static const char *ump_ep_name(const struct f_midi2_ep *ep)
+ {
+@@ -564,8 +567,7 @@ static void reply_ump_stream_ep_config(struct f_midi2_ep *ep)
+ 		.status = UMP_STREAM_MSG_STATUS_STREAM_CFG,
+ 	};
+ 
+-	if ((ep->info.protocol & SNDRV_UMP_EP_INFO_PROTO_MIDI_MASK) ==
+-	    SNDRV_UMP_EP_INFO_PROTO_MIDI2)
++	if (ep->info.protocol == 2)
+ 		rep.protocol = UMP_STREAM_MSG_EP_INFO_CAP_MIDI2 >> 8;
+ 	else
+ 		rep.protocol = UMP_STREAM_MSG_EP_INFO_CAP_MIDI1 >> 8;
+@@ -627,13 +629,13 @@ static void process_ump_stream_msg(struct f_midi2_ep *ep, const u32 *data)
+ 		return;
+ 	case UMP_STREAM_MSG_STATUS_STREAM_CFG_REQUEST:
+ 		if (*data & UMP_STREAM_MSG_EP_INFO_CAP_MIDI2) {
+-			ep->info.protocol = SNDRV_UMP_EP_INFO_PROTO_MIDI2;
++			ep->info.protocol = 2;
+ 			DBG(midi2, "Switching Protocol to MIDI2\n");
+ 		} else {
+-			ep->info.protocol = SNDRV_UMP_EP_INFO_PROTO_MIDI1;
++			ep->info.protocol = 1;
+ 			DBG(midi2, "Switching Protocol to MIDI1\n");
+ 		}
+-		snd_ump_switch_protocol(ep->ump, ep->info.protocol);
++		snd_ump_switch_protocol(ep->ump, to_ump_protocol(ep->info.protocol));
+ 		reply_ump_stream_ep_config(ep);
+ 		return;
+ 	case UMP_STREAM_MSG_STATUS_FB_DISCOVERY:
+@@ -1065,7 +1067,8 @@ static void f_midi2_midi1_ep_out_complete(struct usb_ep *usb_ep,
+ 		group = midi2->out_cable_mapping[cable].group;
+ 		bytes = midi1_packet_bytes[*buf & 0x0f];
+ 		for (c = 0; c < bytes; c++) {
+-			snd_ump_convert_to_ump(cvt, group, ep->info.protocol,
++			snd_ump_convert_to_ump(cvt, group,
++					       to_ump_protocol(ep->info.protocol),
+ 					       buf[c + 1]);
+ 			if (cvt->ump_bytes) {
+ 				snd_ump_receive(ep->ump, cvt->ump,
+@@ -1375,7 +1378,7 @@ static void assign_block_descriptors(struct f_midi2 *midi2,
+ 			desc->nNumGroupTrm = b->num_groups;
+ 			desc->iBlockItem = ep->blks[blk].string_id;
+ 
+-			if (ep->info.protocol & SNDRV_UMP_EP_INFO_PROTO_MIDI2)
++			if (ep->info.protocol == 2)
+ 				desc->bMIDIProtocol = USB_MS_MIDI_PROTO_2_0;
+ 			else
+ 				desc->bMIDIProtocol = USB_MS_MIDI_PROTO_1_0_128;
+@@ -1552,7 +1555,7 @@ static int f_midi2_create_card(struct f_midi2 *midi2)
+ 		if (midi2->info.static_block)
+ 			ump->info.flags |= SNDRV_UMP_EP_INFO_STATIC_BLOCKS;
+ 		ump->info.protocol_caps = (ep->info.protocol_caps & 3) << 8;
+-		ump->info.protocol = (ep->info.protocol & 3) << 8;
++		ump->info.protocol = to_ump_protocol(ep->info.protocol);
+ 		ump->info.version = 0x0101;
+ 		ump->info.family_id = ep->info.family;
+ 		ump->info.model_id = ep->info.model;
+diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
+index 9987055293b35..2999ed5d83f5e 100644
+--- a/fs/jfs/xattr.c
++++ b/fs/jfs/xattr.c
+@@ -797,7 +797,7 @@ ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
+ 		       size_t buf_size)
+ {
+ 	struct jfs_ea_list *ealist;
+-	struct jfs_ea *ea;
++	struct jfs_ea *ea, *ealist_end;
+ 	struct ea_buffer ea_buf;
+ 	int xattr_size;
+ 	ssize_t size;
+@@ -817,9 +817,16 @@ ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
+ 		goto not_found;
+ 
+ 	ealist = (struct jfs_ea_list *) ea_buf.xattr;
++	ealist_end = END_EALIST(ealist);
+ 
+ 	/* Find the named attribute */
+-	for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea))
++	for (ea = FIRST_EA(ealist); ea < ealist_end; ea = NEXT_EA(ea)) {
++		if (unlikely(ea + 1 > ealist_end) ||
++		    unlikely(NEXT_EA(ea) > ealist_end)) {
++			size = -EUCLEAN;
++			goto release;
++		}
++
+ 		if ((namelen == ea->namelen) &&
+ 		    memcmp(name, ea->name, namelen) == 0) {
+ 			/* Found it */
+@@ -834,6 +841,7 @@ ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
+ 			memcpy(data, value, size);
+ 			goto release;
+ 		}
++	}
+       not_found:
+ 	size = -ENODATA;
+       release:
+@@ -861,7 +869,7 @@ ssize_t jfs_listxattr(struct dentry * dentry, char *data, size_t buf_size)
+ 	ssize_t size = 0;
+ 	int xattr_size;
+ 	struct jfs_ea_list *ealist;
+-	struct jfs_ea *ea;
++	struct jfs_ea *ea, *ealist_end;
+ 	struct ea_buffer ea_buf;
+ 
+ 	down_read(&JFS_IP(inode)->xattr_sem);
+@@ -876,9 +884,16 @@ ssize_t jfs_listxattr(struct dentry * dentry, char *data, size_t buf_size)
+ 		goto release;
+ 
+ 	ealist = (struct jfs_ea_list *) ea_buf.xattr;
++	ealist_end = END_EALIST(ealist);
+ 
+ 	/* compute required size of list */
+-	for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) {
++	for (ea = FIRST_EA(ealist); ea < ealist_end; ea = NEXT_EA(ea)) {
++		if (unlikely(ea + 1 > ealist_end) ||
++		    unlikely(NEXT_EA(ea) > ealist_end)) {
++			size = -EUCLEAN;
++			goto release;
++		}
++
+ 		if (can_list(ea))
+ 			size += name_size(ea) + 1;
+ 	}
+diff --git a/fs/locks.c b/fs/locks.c
+index bdd94c32256f5..9afb16e0683ff 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -2570,8 +2570,9 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
+ 	error = do_lock_file_wait(filp, cmd, file_lock);
+ 
+ 	/*
+-	 * Attempt to detect a close/fcntl race and recover by releasing the
+-	 * lock that was just acquired. There is no need to do that when we're
++	 * Detect close/fcntl races and recover by zapping all POSIX locks
++	 * associated with this file and our files_struct, just like on
++	 * filp_flush(). There is no need to do that when we're
+ 	 * unlocking though, or for OFD locks.
+ 	 */
+ 	if (!error && file_lock->c.flc_type != F_UNLCK &&
+@@ -2586,9 +2587,7 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
+ 		f = files_lookup_fd_locked(files, fd);
+ 		spin_unlock(&files->file_lock);
+ 		if (f != filp) {
+-			file_lock->c.flc_type = F_UNLCK;
+-			error = do_lock_file_wait(filp, cmd, file_lock);
+-			WARN_ON_ONCE(error);
++			locks_remove_posix(filp, files);
+ 			error = -EBADF;
+ 		}
+ 	}
+diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
+index 4085fe30bf481..c14ab9d5cfc70 100644
+--- a/fs/ntfs3/fslog.c
++++ b/fs/ntfs3/fslog.c
+@@ -724,7 +724,8 @@ static bool check_rstbl(const struct RESTART_TABLE *rt, size_t bytes)
+ 
+ 	if (!rsize || rsize > bytes ||
+ 	    rsize + sizeof(struct RESTART_TABLE) > bytes || bytes < ts ||
+-	    le16_to_cpu(rt->total) > ne || ff > ts || lf > ts ||
++	    le16_to_cpu(rt->total) > ne ||
++			ff > ts - sizeof(__le32) || lf > ts - sizeof(__le32) ||
+ 	    (ff && ff < sizeof(struct RESTART_TABLE)) ||
+ 	    (lf && lf < sizeof(struct RESTART_TABLE))) {
+ 		return false;
+@@ -754,6 +755,9 @@ static bool check_rstbl(const struct RESTART_TABLE *rt, size_t bytes)
+ 			return false;
+ 
+ 		off = le32_to_cpu(*(__le32 *)Add2Ptr(rt, off));
++
++		if (off > ts - sizeof(__le32))
++			return false;
+ 	}
+ 
+ 	return true;
+@@ -3722,6 +3726,8 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
+ 
+ 	u64 rec_lsn, checkpt_lsn = 0, rlsn = 0;
+ 	struct ATTR_NAME_ENTRY *attr_names = NULL;
++	u32 attr_names_bytes = 0;
++	u32 oatbl_bytes = 0;
+ 	struct RESTART_TABLE *dptbl = NULL;
+ 	struct RESTART_TABLE *trtbl = NULL;
+ 	const struct RESTART_TABLE *rt;
+@@ -3736,6 +3742,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
+ 	struct NTFS_RESTART *rst = NULL;
+ 	struct lcb *lcb = NULL;
+ 	struct OPEN_ATTR_ENRTY *oe;
++	struct ATTR_NAME_ENTRY *ane;
+ 	struct TRANSACTION_ENTRY *tr;
+ 	struct DIR_PAGE_ENTRY *dp;
+ 	u32 i, bytes_per_attr_entry;
+@@ -4314,17 +4321,40 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
+ 	lcb = NULL;
+ 
+ check_attribute_names2:
+-	if (rst->attr_names_len && oatbl) {
+-		struct ATTR_NAME_ENTRY *ane = attr_names;
+-		while (ane->off) {
++	if (attr_names && oatbl) {
++		off = 0;
++		for (;;) {
++			/* Check we can use attribute name entry 'ane'. */
++			static_assert(sizeof(*ane) == 4);
++			if (off + sizeof(*ane) > attr_names_bytes) {
++				/* just ignore the rest. */
++				break;
++			}
++
++			ane = Add2Ptr(attr_names, off);
++			t16 = le16_to_cpu(ane->off);
++			if (!t16) {
++				/* this is the only valid exit. */
++				break;
++			}
++
++			/* Check we can use open attribute entry 'oe'. */
++			if (t16 + sizeof(*oe) > oatbl_bytes) {
++				/* just ignore the rest. */
++				break;
++			}
++
+ 			/* TODO: Clear table on exit! */
+-			oe = Add2Ptr(oatbl, le16_to_cpu(ane->off));
++			oe = Add2Ptr(oatbl, t16);
+ 			t16 = le16_to_cpu(ane->name_bytes);
++			off += t16 + sizeof(*ane);
++			if (off > attr_names_bytes) {
++				/* just ignore the rest. */
++				break;
++			}
+ 			oe->name_len = t16 / sizeof(short);
+ 			oe->ptr = ane->name;
+ 			oe->is_attr_name = 2;
+-			ane = Add2Ptr(ane,
+-				      sizeof(struct ATTR_NAME_ENTRY) + t16);
+ 		}
+ 	}
+ 
+diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
+index d620d4c53c6fa..f0beb173dbba2 100644
+--- a/fs/ocfs2/dir.c
++++ b/fs/ocfs2/dir.c
+@@ -294,13 +294,16 @@ static void ocfs2_dx_dir_name_hash(struct inode *dir, const char *name, int len,
+  * bh passed here can be an inode block or a dir data block, depending
+  * on the inode inline data flag.
+  */
+-static int ocfs2_check_dir_entry(struct inode * dir,
+-				 struct ocfs2_dir_entry * de,
+-				 struct buffer_head * bh,
++static int ocfs2_check_dir_entry(struct inode *dir,
++				 struct ocfs2_dir_entry *de,
++				 struct buffer_head *bh,
++				 char *buf,
++				 unsigned int size,
+ 				 unsigned long offset)
+ {
+ 	const char *error_msg = NULL;
+ 	const int rlen = le16_to_cpu(de->rec_len);
++	const unsigned long next_offset = ((char *) de - buf) + rlen;
+ 
+ 	if (unlikely(rlen < OCFS2_DIR_REC_LEN(1)))
+ 		error_msg = "rec_len is smaller than minimal";
+@@ -308,9 +311,11 @@ static int ocfs2_check_dir_entry(struct inode * dir,
+ 		error_msg = "rec_len % 4 != 0";
+ 	else if (unlikely(rlen < OCFS2_DIR_REC_LEN(de->name_len)))
+ 		error_msg = "rec_len is too small for name_len";
+-	else if (unlikely(
+-		 ((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize))
+-		error_msg = "directory entry across blocks";
++	else if (unlikely(next_offset > size))
++		error_msg = "directory entry overrun";
++	else if (unlikely(next_offset > size - OCFS2_DIR_REC_LEN(1)) &&
++		 next_offset != size)
++		error_msg = "directory entry too close to end";
+ 
+ 	if (unlikely(error_msg != NULL))
+ 		mlog(ML_ERROR, "bad entry in directory #%llu: %s - "
+@@ -352,16 +357,17 @@ static inline int ocfs2_search_dirblock(struct buffer_head *bh,
+ 	de_buf = first_de;
+ 	dlimit = de_buf + bytes;
+ 
+-	while (de_buf < dlimit) {
++	while (de_buf < dlimit - OCFS2_DIR_MEMBER_LEN) {
+ 		/* this code is executed quadratically often */
+ 		/* do minimal checking `by hand' */
+ 
+ 		de = (struct ocfs2_dir_entry *) de_buf;
+ 
+-		if (de_buf + namelen <= dlimit &&
++		if (de->name + namelen <= dlimit &&
+ 		    ocfs2_match(namelen, name, de)) {
+ 			/* found a match - just to be sure, do a full check */
+-			if (!ocfs2_check_dir_entry(dir, de, bh, offset)) {
++			if (!ocfs2_check_dir_entry(dir, de, bh, first_de,
++						   bytes, offset)) {
+ 				ret = -1;
+ 				goto bail;
+ 			}
+@@ -1138,7 +1144,7 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
+ 	pde = NULL;
+ 	de = (struct ocfs2_dir_entry *) first_de;
+ 	while (i < bytes) {
+-		if (!ocfs2_check_dir_entry(dir, de, bh, i)) {
++		if (!ocfs2_check_dir_entry(dir, de, bh, first_de, bytes, i)) {
+ 			status = -EIO;
+ 			mlog_errno(status);
+ 			goto bail;
+@@ -1635,7 +1641,8 @@ int __ocfs2_add_entry(handle_t *handle,
+ 		/* These checks should've already been passed by the
+ 		 * prepare function, but I guess we can leave them
+ 		 * here anyway. */
+-		if (!ocfs2_check_dir_entry(dir, de, insert_bh, offset)) {
++		if (!ocfs2_check_dir_entry(dir, de, insert_bh, data_start,
++					   size, offset)) {
+ 			retval = -ENOENT;
+ 			goto bail;
+ 		}
+@@ -1774,7 +1781,8 @@ static int ocfs2_dir_foreach_blk_id(struct inode *inode,
+ 		}
+ 
+ 		de = (struct ocfs2_dir_entry *) (data->id_data + ctx->pos);
+-		if (!ocfs2_check_dir_entry(inode, de, di_bh, ctx->pos)) {
++		if (!ocfs2_check_dir_entry(inode, de, di_bh, (char *)data->id_data,
++					   i_size_read(inode), ctx->pos)) {
+ 			/* On error, skip the f_pos to the end. */
+ 			ctx->pos = i_size_read(inode);
+ 			break;
+@@ -1867,7 +1875,8 @@ static int ocfs2_dir_foreach_blk_el(struct inode *inode,
+ 		while (ctx->pos < i_size_read(inode)
+ 		       && offset < sb->s_blocksize) {
+ 			de = (struct ocfs2_dir_entry *) (bh->b_data + offset);
+-			if (!ocfs2_check_dir_entry(inode, de, bh, offset)) {
++			if (!ocfs2_check_dir_entry(inode, de, bh, bh->b_data,
++						   sb->s_blocksize, offset)) {
+ 				/* On error, skip the f_pos to the
+ 				   next block. */
+ 				ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 1;
+@@ -3339,7 +3348,7 @@ static int ocfs2_find_dir_space_id(struct inode *dir, struct buffer_head *di_bh,
+ 	struct super_block *sb = dir->i_sb;
+ 	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+ 	struct ocfs2_dir_entry *de, *last_de = NULL;
+-	char *de_buf, *limit;
++	char *first_de, *de_buf, *limit;
+ 	unsigned long offset = 0;
+ 	unsigned int rec_len, new_rec_len, free_space;
+ 
+@@ -3352,14 +3361,16 @@ static int ocfs2_find_dir_space_id(struct inode *dir, struct buffer_head *di_bh,
+ 	else
+ 		free_space = dir->i_sb->s_blocksize - i_size_read(dir);
+ 
+-	de_buf = di->id2.i_data.id_data;
++	first_de = di->id2.i_data.id_data;
++	de_buf = first_de;
+ 	limit = de_buf + i_size_read(dir);
+ 	rec_len = OCFS2_DIR_REC_LEN(namelen);
+ 
+ 	while (de_buf < limit) {
+ 		de = (struct ocfs2_dir_entry *)de_buf;
+ 
+-		if (!ocfs2_check_dir_entry(dir, de, di_bh, offset)) {
++		if (!ocfs2_check_dir_entry(dir, de, di_bh, first_de,
++					   i_size_read(dir), offset)) {
+ 			ret = -ENOENT;
+ 			goto out;
+ 		}
+@@ -3441,7 +3452,8 @@ static int ocfs2_find_dir_space_el(struct inode *dir, const char *name,
+ 			/* move to next block */
+ 			de = (struct ocfs2_dir_entry *) bh->b_data;
+ 		}
+-		if (!ocfs2_check_dir_entry(dir, de, bh, offset)) {
++		if (!ocfs2_check_dir_entry(dir, de, bh, bh->b_data, blocksize,
++					   offset)) {
+ 			status = -ENOENT;
+ 			goto bail;
+ 		}
+diff --git a/sound/core/pcm_dmaengine.c b/sound/core/pcm_dmaengine.c
+index e299e8634751f..62489677f3947 100644
+--- a/sound/core/pcm_dmaengine.c
++++ b/sound/core/pcm_dmaengine.c
+@@ -352,8 +352,12 @@ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_open_request_chan);
+ int snd_dmaengine_pcm_sync_stop(struct snd_pcm_substream *substream)
+ {
+ 	struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
++	struct dma_tx_state state;
++	enum dma_status status;
+ 
+-	dmaengine_synchronize(prtd->dma_chan);
++	status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state);
++	if (status != DMA_PAUSED)
++		dmaengine_synchronize(prtd->dma_chan);
+ 
+ 	return 0;
+ }
+diff --git a/sound/core/seq/seq_ump_client.c b/sound/core/seq/seq_ump_client.c
+index c627d72f7fe20..9cdfbeae3ed59 100644
+--- a/sound/core/seq/seq_ump_client.c
++++ b/sound/core/seq/seq_ump_client.c
+@@ -28,6 +28,7 @@ struct seq_ump_group {
+ 	int group;			/* group index (0-based) */
+ 	unsigned int dir_bits;		/* directions */
+ 	bool active;			/* activeness */
++	bool valid;			/* valid group (referred by blocks) */
+ 	char name[64];			/* seq port name */
+ };
+ 
+@@ -210,6 +211,13 @@ static void fill_port_info(struct snd_seq_port_info *port,
+ 		sprintf(port->name, "Group %d", group->group + 1);
+ }
+ 
++/* skip non-existing group for static blocks */
++static bool skip_group(struct seq_ump_client *client, struct seq_ump_group *group)
++{
++	return !group->valid &&
++		(client->ump->info.flags & SNDRV_UMP_EP_INFO_STATIC_BLOCKS);
++}
++
+ /* create a new sequencer port per UMP group */
+ static int seq_ump_group_init(struct seq_ump_client *client, int group_index)
+ {
+@@ -217,6 +225,9 @@ static int seq_ump_group_init(struct seq_ump_client *client, int group_index)
+ 	struct snd_seq_port_info *port __free(kfree) = NULL;
+ 	struct snd_seq_port_callback pcallbacks;
+ 
++	if (skip_group(client, group))
++		return 0;
++
+ 	port = kzalloc(sizeof(*port), GFP_KERNEL);
+ 	if (!port)
+ 		return -ENOMEM;
+@@ -250,6 +261,9 @@ static void update_port_infos(struct seq_ump_client *client)
+ 		return;
+ 
+ 	for (i = 0; i < SNDRV_UMP_MAX_GROUPS; i++) {
++		if (skip_group(client, &client->groups[i]))
++			continue;
++
+ 		old->addr.client = client->seq_client;
+ 		old->addr.port = i;
+ 		err = snd_seq_kernel_client_ctl(client->seq_client,
+@@ -284,6 +298,7 @@ static void update_group_attrs(struct seq_ump_client *client)
+ 		group->dir_bits = 0;
+ 		group->active = 0;
+ 		group->group = i;
++		group->valid = false;
+ 	}
+ 
+ 	list_for_each_entry(fb, &client->ump->block_list, list) {
+@@ -291,6 +306,7 @@ static void update_group_attrs(struct seq_ump_client *client)
+ 			break;
+ 		group = &client->groups[fb->info.first_group];
+ 		for (i = 0; i < fb->info.num_groups; i++, group++) {
++			group->valid = true;
+ 			if (fb->info.active)
+ 				group->active = 1;
+ 			switch (fb->info.direction) {
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 8a52ed9aa465c..3d91ed8c445c2 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10333,6 +10333,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
+ 	SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
+ 	SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
++	SND_PCI_QUIRK(0x10ec, 0x119e, "Positivo SU C1400", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x10ec, 0x11bc, "VAIO VJFE-IL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+ 	SND_PCI_QUIRK(0x10ec, 0x124c, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+@@ -10347,6 +10348,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_AMP),
+ 	SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NP930XCJ-K01US)", ALC298_FIXUP_SAMSUNG_AMP),
+ 	SND_PCI_QUIRK(0x144d, 0xc1a3, "Samsung Galaxy Book Pro (NP935XDB-KC1SE)", ALC298_FIXUP_SAMSUNG_AMP),
++	SND_PCI_QUIRK(0x144d, 0xc1a4, "Samsung Galaxy Book Pro 360 (NT935QBD)", ALC298_FIXUP_SAMSUNG_AMP),
+ 	SND_PCI_QUIRK(0x144d, 0xc1a6, "Samsung Galaxy Book Pro 360 (NP930QBD)", ALC298_FIXUP_SAMSUNG_AMP),
+ 	SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
+ 	SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_AMP),
+@@ -10488,6 +10490,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x231a, "Thinkpad Z16 Gen2", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
+ 	SND_PCI_QUIRK(0x17aa, 0x231e, "Thinkpad", ALC287_FIXUP_LENOVO_THKPAD_WH_ALC1318),
+ 	SND_PCI_QUIRK(0x17aa, 0x231f, "Thinkpad", ALC287_FIXUP_LENOVO_THKPAD_WH_ALC1318),
++	SND_PCI_QUIRK(0x17aa, 0x2326, "Hera2", ALC287_FIXUP_TAS2781_I2C),
+ 	SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [gentoo-commits] proj/linux-patches:6.9 commit in: /
@ 2024-07-27 22:47 Mike Pagano
  0 siblings, 0 replies; 18+ messages in thread
From: Mike Pagano @ 2024-07-27 22:47 UTC (permalink / raw
  To: gentoo-commits

commit:     0877aff178386e9c1abb95779c332e460df7e666
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Jul 27 22:47:30 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Jul 27 22:47:30 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0877aff1

Update cpu optimixation patch. use=experiemental

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 5010_enable-cpu-optimizations-universal.patch | 88 ++++++++++++++++-----------
 1 file changed, 51 insertions(+), 37 deletions(-)

diff --git a/5010_enable-cpu-optimizations-universal.patch b/5010_enable-cpu-optimizations-universal.patch
index 75c48bf1..b5382da3 100644
--- a/5010_enable-cpu-optimizations-universal.patch
+++ b/5010_enable-cpu-optimizations-universal.patch
@@ -1,4 +1,4 @@
-From 71dd30c3e2ab2852b0290ae1f34ce1c7f8655040 Mon Sep 17 00:00:00 2001
+rom 86977b5357d9212d57841bc325e80f43081bb333 Mon Sep 17 00:00:00 2001
 From: graysky <therealgraysky@proton.me>
 Date: Wed, 21 Feb 2024 08:38:13 -0500
 
@@ -32,8 +32,9 @@ CPU-specific microarchitectures include:
 • AMD Family 15h (Excavator)
 • AMD Family 17h (Zen)
 • AMD Family 17h (Zen 2)
-• AMD Family 19h (Zen 3)†
-• AMD Family 19h (Zen 4)§
+• AMD Family 19h (Zen 3)**
+• AMD Family 19h (Zen 4)‡
+• AMD Family 1Ah (Zen 5)§
 • Intel Silvermont low-power processors
 • Intel Goldmont low-power processors (Apollo Lake and Denverton)
 • Intel Goldmont Plus low-power processors (Gemini Lake)
@@ -50,18 +51,20 @@ CPU-specific microarchitectures include:
 • Intel Xeon (Cascade Lake)
 • Intel Xeon (Cooper Lake)*
 • Intel 3rd Gen 10nm++ i3/i5/i7/i9-family (Tiger Lake)*
-• Intel 4th Gen 10nm++ Xeon (Sapphire Rapids)‡
-• Intel 11th Gen i3/i5/i7/i9-family (Rocket Lake)‡
-• Intel 12th Gen i3/i5/i7/i9-family (Alder Lake)‡
-• Intel 13th Gen i3/i5/i7/i9-family (Raptor Lake)§
-• Intel 14th Gen i3/i5/i7/i9-family (Meteor Lake)§
-• Intel 5th Gen 10nm++ Xeon (Emerald Rapids)§
+• Intel 4th Gen 10nm++ Xeon (Sapphire Rapids)†
+• Intel 11th Gen i3/i5/i7/i9-family (Rocket Lake)†
+• Intel 12th Gen i3/i5/i7/i9-family (Alder Lake)†
+• Intel 13th Gen i3/i5/i7/i9-family (Raptor Lake)‡
+• Intel 14th Gen i3/i5/i7/i9-family (Meteor Lake)‡
+• Intel 5th Gen 10nm++ Xeon (Emerald Rapids)‡
 
 Notes: If not otherwise noted, gcc >=9.1 is required for support.
        *Requires gcc >=10.1 or clang >=10.0
-       †Required gcc >=10.3 or clang >=12.0
-       ‡Required gcc >=11.1 or clang >=12.0
-       §Required gcc >=13.0 or clang >=15.0.5
+      **Required gcc >=10.3 or clang >=12.0
+       †Required gcc >=11.1 or clang >=12.0
+       ‡Required gcc >=13.0 or clang >=15.0.5
+       §Required gcc >=14.1 or clang >=19.0?
+
 
 It also offers to compile passing the 'native' option which, "selects the CPU
 to generate code for at compilation time by determining the processor type of
@@ -101,13 +104,13 @@ REFERENCES
 4.  https://github.com/graysky2/kernel_gcc_patch/issues/15
 5.  http://www.linuxforge.net/docs/linux/linux-gcc.php
 ---
- arch/x86/Kconfig.cpu            | 424 ++++++++++++++++++++++++++++++--
- arch/x86/Makefile               |  44 +++-
- arch/x86/include/asm/vermagic.h |  74 ++++++
- 3 files changed, 526 insertions(+), 16 deletions(-)
+ arch/x86/Kconfig.cpu            | 432 ++++++++++++++++++++++++++++++--
+ arch/x86/Makefile               |  45 +++-
+ arch/x86/include/asm/vermagic.h |  76 ++++++
+ 3 files changed, 537 insertions(+), 16 deletions(-)
 
 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
-index 2a7279d80460a..6924a0f5f1c26 100644
+index 2a7279d80460a..55941c31ade35 100644
 --- a/arch/x86/Kconfig.cpu
 +++ b/arch/x86/Kconfig.cpu
 @@ -157,7 +157,7 @@ config MPENTIUM4
@@ -128,7 +131,7 @@ index 2a7279d80460a..6924a0f5f1c26 100644
  	depends on X86_32
  	help
  	  Select this for an AMD Athlon K7-family processor.  Enables use of
-@@ -173,12 +173,106 @@ config MK7
+@@ -173,12 +173,114 @@ config MK7
  	  flags to GCC.
  
  config MK8
@@ -232,11 +235,19 @@ index 2a7279d80460a..6924a0f5f1c26 100644
 +	  Select this for AMD Family 19h Zen 4 processors.
 +
 +	  Enables -march=znver4
++
++config MZEN5
++	bool "AMD Zen 5"
++	depends on (CC_IS_GCC && GCC_VERSION >= 141000) || (CC_IS_CLANG && CLANG_VERSION >= 180000)
++	help
++	  Select this for AMD Family 19h Zen 5 processors.
++
++	  Enables -march=znver5
 +
  config MCRUSOE
  	bool "Crusoe"
  	depends on X86_32
-@@ -270,7 +364,7 @@ config MPSC
+@@ -270,7 +372,7 @@ config MPSC
  	  in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
  
  config MCORE2
@@ -245,7 +256,7 @@ index 2a7279d80460a..6924a0f5f1c26 100644
  	help
  
  	  Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
-@@ -278,6 +372,8 @@ config MCORE2
+@@ -278,6 +380,8 @@ config MCORE2
  	  family in /proc/cpuinfo. Newer ones have 6 and older ones 15
  	  (not a typo)
  
@@ -254,7 +265,7 @@ index 2a7279d80460a..6924a0f5f1c26 100644
  config MATOM
  	bool "Intel Atom"
  	help
-@@ -287,6 +383,212 @@ config MATOM
+@@ -287,6 +391,212 @@ config MATOM
  	  accordingly optimized code. Use a recent GCC with specific Atom
  	  support in order to fully benefit from selecting this option.
  
@@ -467,7 +478,7 @@ index 2a7279d80460a..6924a0f5f1c26 100644
  config GENERIC_CPU
  	bool "Generic-x86-64"
  	depends on X86_64
-@@ -294,6 +596,50 @@ config GENERIC_CPU
+@@ -294,6 +604,50 @@ config GENERIC_CPU
  	  Generic x86-64 CPU.
  	  Run equally well on all x86-64 CPUs.
  
@@ -518,14 +529,14 @@ index 2a7279d80460a..6924a0f5f1c26 100644
  endchoice
  
  config X86_GENERIC
-@@ -318,9 +664,17 @@ config X86_INTERNODE_CACHE_SHIFT
+@@ -318,9 +672,17 @@ config X86_INTERNODE_CACHE_SHIFT
  config X86_L1_CACHE_SHIFT
  	int
  	default "7" if MPENTIUM4 || MPSC
 -	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
 +	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || MK8SSE3 || MK10 \
 +	|| MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER \
-+	|| MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MNEHALEM || MWESTMERE || MSILVERMONT \
++	|| MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MZEN5 || MNEHALEM || MWESTMERE || MSILVERMONT \
 +	|| MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL \
 +	|| MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE \
 +	|| MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE \
@@ -538,7 +549,7 @@ index 2a7279d80460a..6924a0f5f1c26 100644
  
  config X86_F00F_BUG
  	def_bool y
-@@ -332,15 +686,27 @@ config X86_INVD_BUG
+@@ -332,15 +694,27 @@ config X86_INVD_BUG
  
  config X86_ALIGNMENT_16
  	def_bool y
@@ -561,7 +572,7 @@ index 2a7279d80460a..6924a0f5f1c26 100644
 +	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM \
 +	|| MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX \
 +	|| MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER \
-+	|| MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MNEHALEM \
++	|| MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MZEN5 || MNEHALEM \
 +	|| MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE \
 +	|| MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE \
 +	|| MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE \
@@ -569,7 +580,7 @@ index 2a7279d80460a..6924a0f5f1c26 100644
  
  #
  # P6_NOPs are a relatively minor optimization that require a family >=
-@@ -356,11 +722,22 @@ config X86_USE_PPRO_CHECKSUM
+@@ -356,11 +730,22 @@ config X86_USE_PPRO_CHECKSUM
  config X86_P6_NOP
  	def_bool y
  	depends on X86_64
@@ -586,7 +597,7 @@ index 2a7279d80460a..6924a0f5f1c26 100644
 +	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM \
 +	|| MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 \
 +	|| MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER \
-+	|| MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MNEHALEM \
++	|| MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MZEN5 || MNEHALEM \
 +	|| MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL \
 +	|| MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE \
 +	|| MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS \
@@ -594,7 +605,7 @@ index 2a7279d80460a..6924a0f5f1c26 100644
  
  config X86_HAVE_PAE
  	def_bool y
-@@ -368,18 +745,37 @@ config X86_HAVE_PAE
+@@ -368,18 +753,37 @@ config X86_HAVE_PAE
  
  config X86_CMPXCHG64
  	def_bool y
@@ -602,7 +613,7 @@ index 2a7279d80460a..6924a0f5f1c26 100644
 +	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 \
 +	|| M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8 || MK8SSE3 || MK10 \
 +	|| MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN \
-+	|| MZEN2 || MZEN3 || MZEN4 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS \
++	|| MZEN2 || MZEN3 || MZEN4 || MZEN5 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS \
 +	|| MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE \
 +	|| MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE \
 +	|| MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL || MNATIVE_AMD
@@ -615,7 +626,7 @@ index 2a7279d80460a..6924a0f5f1c26 100644
 +	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 \
 +	|| MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX || MK8SSE3 || MK10 \
 +	|| MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR \
-+	|| MZEN || MZEN2 || MZEN3 || MZEN4 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT \
++	|| MZEN || MZEN2 || MZEN3 || MZEN4 || MZEN5 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT \
 +	|| MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX \
 +	|| MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS \
 +	|| MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL || MNATIVE_AMD)
@@ -627,7 +638,7 @@ index 2a7279d80460a..6924a0f5f1c26 100644
 +	default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 \
 +	|| MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCORE2 || MK7 || MK8 ||  MK8SSE3 \
 +	|| MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER \
-+	|| MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MNEHALEM || MWESTMERE || MSILVERMONT \
++	|| MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MZEN5 || MNEHALEM || MWESTMERE || MSILVERMONT \
 +	|| MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL \
 +	|| MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE \
 +	|| MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MRAPTORLAKE \
@@ -636,10 +647,10 @@ index 2a7279d80460a..6924a0f5f1c26 100644
  	default "4"
  
 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
-index da8f3caf27815..c873d10df15d0 100644
+index 5ab93fcdd691d..ac203b599befd 100644
 --- a/arch/x86/Makefile
 +++ b/arch/x86/Makefile
-@@ -152,8 +152,48 @@ else
+@@ -156,8 +156,49 @@ else
          # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
          cflags-$(CONFIG_MK8)		+= -march=k8
          cflags-$(CONFIG_MPSC)		+= -march=nocona
@@ -658,6 +669,7 @@ index da8f3caf27815..c873d10df15d0 100644
 +        cflags-$(CONFIG_MZEN2) 	+= -march=znver2
 +        cflags-$(CONFIG_MZEN3) 	+= -march=znver3
 +        cflags-$(CONFIG_MZEN4) 	+= -march=znver4
++        cflags-$(CONFIG_MZEN5) 	+= -march=znver5
 +        cflags-$(CONFIG_MNATIVE_INTEL) += -march=native
 +        cflags-$(CONFIG_MNATIVE_AMD) 	+= -march=native
 +        cflags-$(CONFIG_MATOM) 	+= -march=bonnell
@@ -691,7 +703,7 @@ index da8f3caf27815..c873d10df15d0 100644
          KBUILD_CFLAGS += $(cflags-y)
  
 diff --git a/arch/x86/include/asm/vermagic.h b/arch/x86/include/asm/vermagic.h
-index 75884d2cdec37..02c1386eb653e 100644
+index 75884d2cdec37..7acca9b5a9d56 100644
 --- a/arch/x86/include/asm/vermagic.h
 +++ b/arch/x86/include/asm/vermagic.h
 @@ -17,6 +17,54 @@
@@ -749,7 +761,7 @@ index 75884d2cdec37..02c1386eb653e 100644
  #elif defined CONFIG_MATOM
  #define MODULE_PROC_FAMILY "ATOM "
  #elif defined CONFIG_M686
-@@ -35,6 +83,32 @@
+@@ -35,6 +83,34 @@
  #define MODULE_PROC_FAMILY "K7 "
  #elif defined CONFIG_MK8
  #define MODULE_PROC_FAMILY "K8 "
@@ -779,9 +791,11 @@ index 75884d2cdec37..02c1386eb653e 100644
 +#define MODULE_PROC_FAMILY "ZEN3 "
 +#elif defined CONFIG_MZEN4
 +#define MODULE_PROC_FAMILY "ZEN4 "
++#elif defined CONFIG_MZEN5
++#define MODULE_PROC_FAMILY "ZEN5 "
  #elif defined CONFIG_MELAN
  #define MODULE_PROC_FAMILY "ELAN "
  #elif defined CONFIG_MCRUSOE
 -- 
-2.43.2
+2.45.0
 


^ permalink raw reply related	[flat|nested] 18+ messages in thread

end of thread, other threads:[~2024-07-27 22:47 UTC | newest]

Thread overview: 18+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-07-18 12:14 [gentoo-commits] proj/linux-patches:6.9 commit in: / Mike Pagano
  -- strict thread matches above, loose matches on Subject: below --
2024-07-27 22:47 Mike Pagano
2024-07-27 13:46 Mike Pagano
2024-07-25 12:08 Mike Pagano
2024-07-11 11:47 Mike Pagano
2024-07-05 10:48 Mike Pagano
2024-07-05 10:47 Mike Pagano
2024-07-03 23:05 Mike Pagano
2024-06-27 12:31 Mike Pagano
2024-06-21 14:06 Mike Pagano
2024-06-16 14:31 Mike Pagano
2024-06-12 10:18 Mike Pagano
2024-05-30 12:02 Mike Pagano
2024-05-30 11:57 Mike Pagano
2024-05-25 15:19 Mike Pagano
2024-05-23 12:41 Mike Pagano
2024-05-17 11:32 Mike Pagano
2024-05-05 18:02 Mike Pagano

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox