public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/linux-patches:6.13 commit in: /
@ 2025-01-30 12:46 Mike Pagano
  0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2025-01-30 12:46 UTC (permalink / raw
  To: gentoo-commits

commit:     04319f1ae252cdd4189fa553e4ccf4676ea67c15
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jan 30 12:45:31 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jan 30 12:45:31 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=04319f1a

Updated CPU Optimization patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 5010_enable-cpu-optimizations-universal.patch | 64 ++++++++++++++++-----------
 1 file changed, 38 insertions(+), 26 deletions(-)

diff --git a/5010_enable-cpu-optimizations-universal.patch b/5010_enable-cpu-optimizations-universal.patch
index 0758b0ba..5011aaa6 100644
--- a/5010_enable-cpu-optimizations-universal.patch
+++ b/5010_enable-cpu-optimizations-universal.patch
@@ -116,13 +116,13 @@ REFERENCES
 4.  http://www.linuxforge.net/docs/linux/linux-gcc.php
 
 ---
- arch/x86/Kconfig.cpu            | 359 ++++++++++++++++++++++++++++++--
- arch/x86/Makefile               |  87 +++++++-
- arch/x86/include/asm/vermagic.h |  70 +++++++
- 3 files changed, 499 insertions(+), 17 deletions(-)
+ arch/x86/Kconfig.cpu            | 367 ++++++++++++++++++++++++++++++--
+ arch/x86/Makefile               |  89 +++++++-
+ arch/x86/include/asm/vermagic.h |  72 +++++++
+ 3 files changed, 511 insertions(+), 17 deletions(-)
 
 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
-index 2a7279d80460..abfadddd1b23 100644
+index ce5ed2c2db0c..6d89f21aba52 100644
 --- a/arch/x86/Kconfig.cpu
 +++ b/arch/x86/Kconfig.cpu
 @@ -155,9 +155,8 @@ config MPENTIUM4
@@ -252,7 +252,7 @@ index 2a7279d80460..abfadddd1b23 100644
 +
 +config MZEN5
 +	bool "AMD Zen 5"
-+	depends on (CC_IS_GCC && GCC_VERSION > 140000) || (CC_IS_CLANG && CLANG_VERSION >= 191000)
++	depends on (CC_IS_GCC && GCC_VERSION > 140000) || (CC_IS_CLANG && CLANG_VERSION >= 190100)
 +	help
 +	  Select this for AMD Family 19h Zen 5 processors.
 +
@@ -280,7 +280,7 @@ index 2a7279d80460..abfadddd1b23 100644
  	help
 
  	  Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
-@@ -278,14 +388,191 @@ config MCORE2
+@@ -278,14 +388,199 @@ config MCORE2
  	  family in /proc/cpuinfo. Newer ones have 6 and older ones 15
  	  (not a typo)
 
@@ -388,14 +388,22 @@ index 2a7279d80460..abfadddd1b23 100644
 +
 +	  Enables -march=cannonlake
 +
-+config MICELAKE
++config MICELAKE_CLIENT
 +	bool "Intel Ice Lake"
 +	help
 +
-+	  Select this for 10th Gen Core processors in the Ice Lake family.
++	  Select this for 10th Gen Core client processors in the Ice Lake family.
 +
 +	  Enables -march=icelake-client
 +
++config MICELAKE_SERVER
++	bool "Intel Ice Lake Server"
++	help
++
++	  Select this for 10th Gen Core server processors in the Ice Lake family.
++
++	  Enables -march=icelake-server
++
 +config MCASCADELAKE
 +	bool "Intel Cascade Lake"
 +	help
@@ -478,7 +486,7 @@ index 2a7279d80460..abfadddd1b23 100644
 
  config GENERIC_CPU
  	bool "Generic-x86-64"
-@@ -294,6 +581,26 @@ config GENERIC_CPU
+@@ -294,6 +589,26 @@ config GENERIC_CPU
  	  Generic x86-64 CPU.
  	  Run equally well on all x86-64 CPUs.
 
@@ -505,7 +513,7 @@ index 2a7279d80460..abfadddd1b23 100644
  endchoice
 
  config X86_GENERIC
-@@ -308,6 +615,30 @@ config X86_GENERIC
+@@ -308,6 +623,30 @@ config X86_GENERIC
  	  This is really intended for distributors who need more
  	  generic optimizations.
 
@@ -536,34 +544,34 @@ index 2a7279d80460..abfadddd1b23 100644
  #
  # Define implied options from the CPU selection here
  config X86_INTERNODE_CACHE_SHIFT
-@@ -318,7 +649,7 @@ config X86_INTERNODE_CACHE_SHIFT
+@@ -318,7 +657,7 @@ config X86_INTERNODE_CACHE_SHIFT
  config X86_L1_CACHE_SHIFT
  	int
  	default "7" if MPENTIUM4 || MPSC
 -	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
-+	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MZEN5 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL || MNATIVE_AMD
++	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MZEN5 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE_CLIENT || MICELAKE_SERVER || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL || MNATIVE_AMD
  	default "4" if MELAN || M486SX || M486 || MGEODEGX1
  	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
 
-@@ -336,11 +667,11 @@ config X86_ALIGNMENT_16
+@@ -336,11 +675,11 @@ config X86_ALIGNMENT_16
 
  config X86_INTEL_USERCOPY
  	def_bool y
 -	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
-+	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL
++	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE_CLIENT || MICELAKE_SERVER || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL
 
  config X86_USE_PPRO_CHECKSUM
  	def_bool y
 -	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
-+	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MZEN5 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL || MNATIVE_AMD
++	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MZEN5 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE_CLIENT || MICELAKE_SERVER || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL || MNATIVE_AMD
 
  #
  # P6_NOPs are a relatively minor optimization that require a family >=
 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
-index cd75e78a06c1..396d1db12bca 100644
+index 3419ffa2a350..aafb069de612 100644
 --- a/arch/x86/Makefile
 +++ b/arch/x86/Makefile
-@@ -181,15 +181,96 @@ else
+@@ -152,15 +152,98 @@ else
          cflags-$(CONFIG_MK8)		+= -march=k8
          cflags-$(CONFIG_MPSC)		+= -march=nocona
          cflags-$(CONFIG_MCORE2)		+= -march=core2
@@ -605,7 +613,8 @@ index cd75e78a06c1..396d1db12bca 100644
 +        cflags-$(CONFIG_MSKYLAKE) 	+= -march=skylake
 +        cflags-$(CONFIG_MSKYLAKEX) 	+= -march=skylake-avx512
 +        cflags-$(CONFIG_MCANNONLAKE) 	+= -march=cannonlake
-+        cflags-$(CONFIG_MICELAKE) 	+= -march=icelake-client
++        cflags-$(CONFIG_MICELAKE_CLIENT) 	+= -march=icelake-client
++        cflags-$(CONFIG_MICELAKE_SERVER) 	+= -march=icelake-server
 +        cflags-$(CONFIG_MCASCADELAKE) 	+= -march=cascadelake
 +        cflags-$(CONFIG_MCOOPERLAKE) 	+= -march=cooperlake
 +        cflags-$(CONFIG_MTIGERLAKE) 	+= -march=tigerlake
@@ -650,7 +659,8 @@ index cd75e78a06c1..396d1db12bca 100644
 +        rustflags-$(CONFIG_MSKYLAKE) 	+= -Ctarget-cpu=skylake
 +        rustflags-$(CONFIG_MSKYLAKEX) 	+= -Ctarget-cpu=skylake-avx512
 +        rustflags-$(CONFIG_MCANNONLAKE) 	+= -Ctarget-cpu=cannonlake
-+        rustflags-$(CONFIG_MICELAKE) 	+= -Ctarget-cpu=icelake-client
++        rustflags-$(CONFIG_MICELAKE_CLIENT) 	+= -Ctarget-cpu=icelake-client
++        rustflags-$(CONFIG_MICELAKE_SERVER) 	+= -Ctarget-cpu=icelake-server
 +        rustflags-$(CONFIG_MCASCADELAKE) 	+= -Ctarget-cpu=cascadelake
 +        rustflags-$(CONFIG_MCOOPERLAKE) 	+= -Ctarget-cpu=cooperlake
 +        rustflags-$(CONFIG_MTIGERLAKE) 	+= -Ctarget-cpu=tigerlake
@@ -664,10 +674,10 @@ index cd75e78a06c1..396d1db12bca 100644
 
          KBUILD_CFLAGS += -mno-red-zone
 diff --git a/arch/x86/include/asm/vermagic.h b/arch/x86/include/asm/vermagic.h
-index 75884d2cdec3..f4e29563473d 100644
+index 75884d2cdec3..2fdae271f47f 100644
 --- a/arch/x86/include/asm/vermagic.h
 +++ b/arch/x86/include/asm/vermagic.h
-@@ -17,6 +17,54 @@
+@@ -17,6 +17,56 @@
  #define MODULE_PROC_FAMILY "586MMX "
  #elif defined CONFIG_MCORE2
  #define MODULE_PROC_FAMILY "CORE2 "
@@ -699,8 +709,10 @@ index 75884d2cdec3..f4e29563473d 100644
 +#define MODULE_PROC_FAMILY "SKYLAKEX "
 +#elif defined CONFIG_MCANNONLAKE
 +#define MODULE_PROC_FAMILY "CANNONLAKE "
-+#elif defined CONFIG_MICELAKE
-+#define MODULE_PROC_FAMILY "ICELAKE "
++#elif defined CONFIG_MICELAKE_CLIENT
++#define MODULE_PROC_FAMILY "ICELAKE_CLIENT "
++#elif defined CONFIG_MICELAKE_SERVER
++#define MODULE_PROC_FAMILY "ICELAKE_SERVER "
 +#elif defined CONFIG_MCASCADELAKE
 +#define MODULE_PROC_FAMILY "CASCADELAKE "
 +#elif defined CONFIG_MCOOPERLAKE
@@ -722,7 +734,7 @@ index 75884d2cdec3..f4e29563473d 100644
  #elif defined CONFIG_MATOM
  #define MODULE_PROC_FAMILY "ATOM "
  #elif defined CONFIG_M686
-@@ -35,6 +83,28 @@
+@@ -35,6 +85,28 @@
  #define MODULE_PROC_FAMILY "K7 "
  #elif defined CONFIG_MK8
  #define MODULE_PROC_FAMILY "K8 "
@@ -752,5 +764,5 @@ index 75884d2cdec3..f4e29563473d 100644
  #define MODULE_PROC_FAMILY "ELAN "
  #elif defined CONFIG_MCRUSOE
 --
-2.46.2
+2.47.1
 


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [gentoo-commits] proj/linux-patches:6.13 commit in: /
@ 2025-02-01 23:05 Mike Pagano
  0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2025-02-01 23:05 UTC (permalink / raw
  To: gentoo-commits

commit:     f97a8476b1d19d17e90c9f24fb286c3a91eb9dba
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Feb  1 23:05:35 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Feb  1 23:05:35 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=f97a8476

Linux paych 6.13.1

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |   4 +
 1000_linux-6.13.1.patch | 964 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 968 insertions(+)

diff --git a/0000_README b/0000_README
index f5c88dfc..7b1940a4 100644
--- a/0000_README
+++ b/0000_README
@@ -43,6 +43,10 @@ EXPERIMENTAL
 Individual Patch Descriptions:
 --------------------------------------------------------------------------
 
+Patch:  1000_linux-6.13.1.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.13.1
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.

diff --git a/1000_linux-6.13.1.patch b/1000_linux-6.13.1.patch
new file mode 100644
index 00000000..be62d35a
--- /dev/null
+++ b/1000_linux-6.13.1.patch
@@ -0,0 +1,964 @@
+diff --git a/Makefile b/Makefile
+index b9464c88ac7230..7bc322bc7ad80c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 13
+-SUBLEVEL = 0
++SUBLEVEL = 1
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
+index da203045df9bec..72b6a119412fa7 100644
+--- a/drivers/gpu/drm/v3d/v3d_irq.c
++++ b/drivers/gpu/drm/v3d/v3d_irq.c
+@@ -107,8 +107,10 @@ v3d_irq(int irq, void *arg)
+ 
+ 		v3d_job_update_stats(&v3d->bin_job->base, V3D_BIN);
+ 		trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
+-		dma_fence_signal(&fence->base);
++
+ 		v3d->bin_job = NULL;
++		dma_fence_signal(&fence->base);
++
+ 		status = IRQ_HANDLED;
+ 	}
+ 
+@@ -118,8 +120,10 @@ v3d_irq(int irq, void *arg)
+ 
+ 		v3d_job_update_stats(&v3d->render_job->base, V3D_RENDER);
+ 		trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
+-		dma_fence_signal(&fence->base);
++
+ 		v3d->render_job = NULL;
++		dma_fence_signal(&fence->base);
++
+ 		status = IRQ_HANDLED;
+ 	}
+ 
+@@ -129,8 +133,10 @@ v3d_irq(int irq, void *arg)
+ 
+ 		v3d_job_update_stats(&v3d->csd_job->base, V3D_CSD);
+ 		trace_v3d_csd_irq(&v3d->drm, fence->seqno);
+-		dma_fence_signal(&fence->base);
++
+ 		v3d->csd_job = NULL;
++		dma_fence_signal(&fence->base);
++
+ 		status = IRQ_HANDLED;
+ 	}
+ 
+@@ -167,8 +173,10 @@ v3d_hub_irq(int irq, void *arg)
+ 
+ 		v3d_job_update_stats(&v3d->tfu_job->base, V3D_TFU);
+ 		trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
+-		dma_fence_signal(&fence->base);
++
+ 		v3d->tfu_job = NULL;
++		dma_fence_signal(&fence->base);
++
+ 		status = IRQ_HANDLED;
+ 	}
+ 
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 1f47fda809b9a0..d1d479ca50a214 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -506,7 +506,6 @@
+ #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
+ 
+ #define I2C_VENDOR_ID_GOODIX		0x27c6
+-#define I2C_DEVICE_ID_GOODIX_01E0	0x01e0
+ #define I2C_DEVICE_ID_GOODIX_01E8	0x01e8
+ #define I2C_DEVICE_ID_GOODIX_01E9	0x01e9
+ #define I2C_DEVICE_ID_GOODIX_01F0	0x01f0
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 785743036647ca..65023bfe30ed28 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -1460,8 +1460,7 @@ static const __u8 *mt_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ {
+ 	if (hdev->vendor == I2C_VENDOR_ID_GOODIX &&
+ 	    (hdev->product == I2C_DEVICE_ID_GOODIX_01E8 ||
+-	     hdev->product == I2C_DEVICE_ID_GOODIX_01E9 ||
+-		 hdev->product == I2C_DEVICE_ID_GOODIX_01E0)) {
++	     hdev->product == I2C_DEVICE_ID_GOODIX_01E9)) {
+ 		if (rdesc[607] == 0x15) {
+ 			rdesc[607] = 0x25;
+ 			dev_info(
+@@ -2085,10 +2084,7 @@ static const struct hid_device_id mt_devices[] = {
+ 		     I2C_DEVICE_ID_GOODIX_01E8) },
+ 	{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
+ 	  HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX,
+-		     I2C_DEVICE_ID_GOODIX_01E9) },
+-	{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
+-	  HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX,
+-		     I2C_DEVICE_ID_GOODIX_01E0) },
++		     I2C_DEVICE_ID_GOODIX_01E8) },
+ 
+ 	/* GoodTouch panels */
+ 	{ .driver_data = MT_CLS_NSMU,
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
+index 9843b52bd017a0..34428349fa3118 100644
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -1370,17 +1370,6 @@ static int wacom_led_register_one(struct device *dev, struct wacom *wacom,
+ 	if (!name)
+ 		return -ENOMEM;
+ 
+-	if (!read_only) {
+-		led->trigger.name = name;
+-		error = devm_led_trigger_register(dev, &led->trigger);
+-		if (error) {
+-			hid_err(wacom->hdev,
+-				"failed to register LED trigger %s: %d\n",
+-				led->cdev.name, error);
+-			return error;
+-		}
+-	}
+-
+ 	led->group = group;
+ 	led->id = id;
+ 	led->wacom = wacom;
+@@ -1397,6 +1386,19 @@ static int wacom_led_register_one(struct device *dev, struct wacom *wacom,
+ 		led->cdev.brightness_set = wacom_led_readonly_brightness_set;
+ 	}
+ 
++	if (!read_only) {
++		led->trigger.name = name;
++		if (id == wacom->led.groups[group].select)
++			led->trigger.brightness = wacom_leds_brightness_get(led);
++		error = devm_led_trigger_register(dev, &led->trigger);
++		if (error) {
++			hid_err(wacom->hdev,
++				"failed to register LED trigger %s: %d\n",
++				led->cdev.name, error);
++			return error;
++		}
++	}
++
+ 	error = devm_led_classdev_register(dev, &led->cdev);
+ 	if (error) {
+ 		hid_err(wacom->hdev,
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index ff9bc87f2f7092..8fe2a51df649ed 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -150,6 +150,7 @@ static const struct xpad_device {
+ 	{ 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 },
+ 	{ 0x045e, 0x028f, "Microsoft X-Box 360 pad v2", 0, XTYPE_XBOX360 },
+ 	{ 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
++	{ 0x045e, 0x02a9, "Xbox 360 Wireless Receiver (Unofficial)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
+ 	{ 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE },
+ 	{ 0x045e, 0x02dd, "Microsoft X-Box One pad (Firmware 2015)", 0, XTYPE_XBOXONE },
+ 	{ 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", MAP_PADDLES, XTYPE_XBOXONE },
+@@ -305,6 +306,7 @@ static const struct xpad_device {
+ 	{ 0x1689, 0xfe00, "Razer Sabertooth", 0, XTYPE_XBOX360 },
+ 	{ 0x17ef, 0x6182, "Lenovo Legion Controller for Windows", 0, XTYPE_XBOX360 },
+ 	{ 0x1949, 0x041a, "Amazon Game Controller", 0, XTYPE_XBOX360 },
++	{ 0x1a86, 0xe310, "QH Electronics Controller", 0, XTYPE_XBOX360 },
+ 	{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
+ 	{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+ 	{ 0x1bad, 0x0130, "Ion Drum Rocker", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+@@ -373,16 +375,19 @@ static const struct xpad_device {
+ 	{ 0x294b, 0x3303, "Snakebyte GAMEPAD BASE X", 0, XTYPE_XBOXONE },
+ 	{ 0x294b, 0x3404, "Snakebyte GAMEPAD RGB X", 0, XTYPE_XBOXONE },
+ 	{ 0x2dc8, 0x2000, "8BitDo Pro 2 Wired Controller fox Xbox", 0, XTYPE_XBOXONE },
+-	{ 0x2dc8, 0x3106, "8BitDo Pro 2 Wired Controller", 0, XTYPE_XBOX360 },
++	{ 0x2dc8, 0x3106, "8BitDo Ultimate Wireless / Pro 2 Wired Controller", 0, XTYPE_XBOX360 },
+ 	{ 0x2dc8, 0x310a, "8BitDo Ultimate 2C Wireless Controller", 0, XTYPE_XBOX360 },
+ 	{ 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
+ 	{ 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 },
+ 	{ 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 },
+ 	{ 0x31e3, 0x1210, "Wooting Lekker", 0, XTYPE_XBOX360 },
+ 	{ 0x31e3, 0x1220, "Wooting Two HE", 0, XTYPE_XBOX360 },
++	{ 0x31e3, 0x1230, "Wooting Two HE (ARM)", 0, XTYPE_XBOX360 },
+ 	{ 0x31e3, 0x1300, "Wooting 60HE (AVR)", 0, XTYPE_XBOX360 },
+ 	{ 0x31e3, 0x1310, "Wooting 60HE (ARM)", 0, XTYPE_XBOX360 },
+ 	{ 0x3285, 0x0607, "Nacon GC-100", 0, XTYPE_XBOX360 },
++	{ 0x3285, 0x0646, "Nacon Pro Compact", 0, XTYPE_XBOXONE },
++	{ 0x3285, 0x0663, "Nacon Evol-X", 0, XTYPE_XBOXONE },
+ 	{ 0x3537, 0x1004, "GameSir T4 Kaleid", 0, XTYPE_XBOX360 },
+ 	{ 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX },
+ 	{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
+@@ -514,6 +519,7 @@ static const struct usb_device_id xpad_table[] = {
+ 	XPAD_XBOX360_VENDOR(0x1689),		/* Razer Onza */
+ 	XPAD_XBOX360_VENDOR(0x17ef),		/* Lenovo */
+ 	XPAD_XBOX360_VENDOR(0x1949),		/* Amazon controllers */
++	XPAD_XBOX360_VENDOR(0x1a86),		/* QH Electronics */
+ 	XPAD_XBOX360_VENDOR(0x1bad),		/* Harmonix Rock Band guitar and drums */
+ 	XPAD_XBOX360_VENDOR(0x20d6),		/* PowerA controllers */
+ 	XPAD_XBOXONE_VENDOR(0x20d6),		/* PowerA controllers */
+@@ -530,6 +536,7 @@ static const struct usb_device_id xpad_table[] = {
+ 	XPAD_XBOX360_VENDOR(0x2f24),		/* GameSir controllers */
+ 	XPAD_XBOX360_VENDOR(0x31e3),		/* Wooting Keyboards */
+ 	XPAD_XBOX360_VENDOR(0x3285),		/* Nacon GC-100 */
++	XPAD_XBOXONE_VENDOR(0x3285),		/* Nacon Evol-X */
+ 	XPAD_XBOX360_VENDOR(0x3537),		/* GameSir Controllers */
+ 	XPAD_XBOXONE_VENDOR(0x3537),		/* GameSir Controllers */
+ 	{ }
+diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
+index ec94fcfa4cdebe..adf0f311996c9b 100644
+--- a/drivers/input/keyboard/atkbd.c
++++ b/drivers/input/keyboard/atkbd.c
+@@ -89,7 +89,7 @@ static const unsigned short atkbd_set2_keycode[ATKBD_KEYMAP_SIZE] = {
+ 	  0, 46, 45, 32, 18,  5,  4, 95,  0, 57, 47, 33, 20, 19,  6,183,
+ 	  0, 49, 48, 35, 34, 21,  7,184,  0,  0, 50, 36, 22,  8,  9,185,
+ 	  0, 51, 37, 23, 24, 11, 10,  0,  0, 52, 53, 38, 39, 25, 12,  0,
+-	  0, 89, 40,  0, 26, 13,  0,  0, 58, 54, 28, 27,  0, 43,  0, 85,
++	  0, 89, 40,  0, 26, 13,  0,193, 58, 54, 28, 27,  0, 43,  0, 85,
+ 	  0, 86, 91, 90, 92,  0, 14, 94,  0, 79,124, 75, 71,121,  0,  0,
+ 	 82, 83, 80, 76, 77, 72,  1, 69, 87, 78, 81, 74, 55, 73, 70, 99,
+ 
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c
+index f95898f68d68a5..4ce0c05c512910 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/core.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c
+@@ -8147,6 +8147,8 @@ static const struct usb_device_id dev_table[] = {
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817e, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
++{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8186, 0xff, 0xff, 0xff),
++	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818a, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x317f, 0xff, 0xff, 0xff),
+@@ -8157,12 +8159,18 @@ static const struct usb_device_id dev_table[] = {
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1102, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
++{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x11f2, 0xff, 0xff, 0xff),
++	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe033, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
++{USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8188, 0xff, 0xff, 0xff),
++	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8189, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9041, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
++{USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9043, 0xff, 0xff, 0xff),
++	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x17ba, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x1e1e, 0xff, 0xff, 0xff),
+@@ -8179,6 +8187,10 @@ static const struct usb_device_id dev_table[] = {
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x13d3, 0x3357, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
++{USB_DEVICE_AND_INTERFACE_INFO(0x13d3, 0x3358, 0xff, 0xff, 0xff),
++	.driver_info = (unsigned long)&rtl8192cu_fops},
++{USB_DEVICE_AND_INTERFACE_INFO(0x13d3, 0x3359, 0xff, 0xff, 0xff),
++	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330b, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0x4902, 0xff, 0xff, 0xff),
+@@ -8193,6 +8205,8 @@ static const struct usb_device_id dev_table[] = {
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x4856, 0x0091, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
++{USB_DEVICE_AND_INTERFACE_INFO(0x9846, 0x9041, 0xff, 0xff, 0xff),
++	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0xcdab, 0x8010, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaff7, 0xff, 0xff, 0xff),
+@@ -8218,6 +8232,8 @@ static const struct usb_device_id dev_table[] = {
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x0586, 0x341f, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
++{USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe033, 0xff, 0xff, 0xff),
++	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe035, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x17ab, 0xff, 0xff, 0xff),
+@@ -8226,6 +8242,8 @@ static const struct usb_device_id dev_table[] = {
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0070, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
++{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0077, 0xff, 0xff, 0xff),
++	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x0789, 0x016d, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x07aa, 0x0056, 0xff, 0xff, 0xff),
+@@ -8248,6 +8266,8 @@ static const struct usb_device_id dev_table[] = {
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330a, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
++{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330d, 0xff, 0xff, 0xff),
++	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab2b, 0xff, 0xff, 0xff),
+ 	.driver_info = (unsigned long)&rtl8192cu_fops},
+ {USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x624d, 0xff, 0xff, 0xff),
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index d0b55c1fa908a5..b3c588b102d900 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -171,6 +171,12 @@ do {								\
+ 		dev_warn(&(dev)->device, fmt, ##__VA_ARGS__);	\
+ } while (0)
+ 
++#define storvsc_log_ratelimited(dev, level, fmt, ...)				\
++do {										\
++	if (do_logging(level))							\
++		dev_warn_ratelimited(&(dev)->device, fmt, ##__VA_ARGS__);	\
++} while (0)
++
+ struct vmscsi_request {
+ 	u16 length;
+ 	u8 srb_status;
+@@ -1177,7 +1183,7 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device,
+ 		int loglevel = (stor_pkt->vm_srb.cdb[0] == TEST_UNIT_READY) ?
+ 			STORVSC_LOGGING_WARN : STORVSC_LOGGING_ERROR;
+ 
+-		storvsc_log(device, loglevel,
++		storvsc_log_ratelimited(device, loglevel,
+ 			"tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x hv 0x%x\n",
+ 			scsi_cmd_to_rq(request->cmd)->tag,
+ 			stor_pkt->vm_srb.cdb[0],
+diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
+index bc143a86c2ddf0..53d9fc41acc522 100644
+--- a/drivers/usb/gadget/function/u_serial.c
++++ b/drivers/usb/gadget/function/u_serial.c
+@@ -1420,10 +1420,6 @@ void gserial_disconnect(struct gserial *gser)
+ 	/* REVISIT as above: how best to track this? */
+ 	port->port_line_coding = gser->port_line_coding;
+ 
+-	/* disable endpoints, aborting down any active I/O */
+-	usb_ep_disable(gser->out);
+-	usb_ep_disable(gser->in);
+-
+ 	port->port_usb = NULL;
+ 	gser->ioport = NULL;
+ 	if (port->port.count > 0) {
+@@ -1435,6 +1431,10 @@ void gserial_disconnect(struct gserial *gser)
+ 	spin_unlock(&port->port_lock);
+ 	spin_unlock_irqrestore(&serial_port_lock, flags);
+ 
++	/* disable endpoints, aborting down any active I/O */
++	usb_ep_disable(gser->out);
++	usb_ep_disable(gser->in);
++
+ 	/* finally, free any unused/unusable I/O buffers */
+ 	spin_lock_irqsave(&port->port_lock, flags);
+ 	if (port->port.count == 0)
+diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
+index a317bdbd00ad5c..72fe83a6c97801 100644
+--- a/drivers/usb/serial/quatech2.c
++++ b/drivers/usb/serial/quatech2.c
+@@ -503,7 +503,7 @@ static void qt2_process_read_urb(struct urb *urb)
+ 
+ 				newport = *(ch + 3);
+ 
+-				if (newport > serial->num_ports) {
++				if (newport >= serial->num_ports) {
+ 					dev_err(&port->dev,
+ 						"%s - port change to invalid port: %i\n",
+ 						__func__, newport);
+diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
+index e53757d1d0958a..3bf1043cd7957c 100644
+--- a/drivers/vfio/platform/vfio_platform_common.c
++++ b/drivers/vfio/platform/vfio_platform_common.c
+@@ -388,6 +388,11 @@ static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg,
+ {
+ 	unsigned int done = 0;
+ 
++	if (off >= reg->size)
++		return -EINVAL;
++
++	count = min_t(size_t, count, reg->size - off);
++
+ 	if (!reg->ioaddr) {
+ 		reg->ioaddr =
+ 			ioremap(reg->addr, reg->size);
+@@ -467,6 +472,11 @@ static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg,
+ {
+ 	unsigned int done = 0;
+ 
++	if (off >= reg->size)
++		return -EINVAL;
++
++	count = min_t(size_t, count, reg->size - off);
++
+ 	if (!reg->ioaddr) {
+ 		reg->ioaddr =
+ 			ioremap(reg->addr, reg->size);
+diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
+index 1e73cf87ff88b1..c9bb3be21d2ba7 100644
+--- a/fs/gfs2/file.c
++++ b/fs/gfs2/file.c
+@@ -251,6 +251,7 @@ static int do_gfs2_set_flags(struct inode *inode, u32 reqflags, u32 mask)
+ 		error = filemap_fdatawait(inode->i_mapping);
+ 		if (error)
+ 			goto out;
++		truncate_inode_pages(inode->i_mapping, 0);
+ 		if (new_flags & GFS2_DIF_JDATA)
+ 			gfs2_ordered_del_inode(ip);
+ 	}
+diff --git a/fs/libfs.c b/fs/libfs.c
+index 748ac59231547c..279442b1fe9684 100644
+--- a/fs/libfs.c
++++ b/fs/libfs.c
+@@ -245,9 +245,16 @@ const struct inode_operations simple_dir_inode_operations = {
+ };
+ EXPORT_SYMBOL(simple_dir_inode_operations);
+ 
+-/* 0 is '.', 1 is '..', so always start with offset 2 or more */
++/* simple_offset_add() never assigns these to a dentry */
+ enum {
+-	DIR_OFFSET_MIN	= 2,
++	DIR_OFFSET_FIRST	= 2,		/* Find first real entry */
++	DIR_OFFSET_EOD		= S32_MAX,
++};
++
++/* simple_offset_add() allocation range */
++enum {
++	DIR_OFFSET_MIN		= DIR_OFFSET_FIRST + 1,
++	DIR_OFFSET_MAX		= DIR_OFFSET_EOD - 1,
+ };
+ 
+ static void offset_set(struct dentry *dentry, long offset)
+@@ -291,9 +298,10 @@ int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry)
+ 		return -EBUSY;
+ 
+ 	ret = mtree_alloc_cyclic(&octx->mt, &offset, dentry, DIR_OFFSET_MIN,
+-				 LONG_MAX, &octx->next_offset, GFP_KERNEL);
+-	if (ret < 0)
+-		return ret;
++				 DIR_OFFSET_MAX, &octx->next_offset,
++				 GFP_KERNEL);
++	if (unlikely(ret < 0))
++		return ret == -EBUSY ? -ENOSPC : ret;
+ 
+ 	offset_set(dentry, offset);
+ 	return 0;
+@@ -329,38 +337,6 @@ void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry)
+ 	offset_set(dentry, 0);
+ }
+ 
+-/**
+- * simple_offset_empty - Check if a dentry can be unlinked
+- * @dentry: dentry to be tested
+- *
+- * Returns 0 if @dentry is a non-empty directory; otherwise returns 1.
+- */
+-int simple_offset_empty(struct dentry *dentry)
+-{
+-	struct inode *inode = d_inode(dentry);
+-	struct offset_ctx *octx;
+-	struct dentry *child;
+-	unsigned long index;
+-	int ret = 1;
+-
+-	if (!inode || !S_ISDIR(inode->i_mode))
+-		return ret;
+-
+-	index = DIR_OFFSET_MIN;
+-	octx = inode->i_op->get_offset_ctx(inode);
+-	mt_for_each(&octx->mt, child, index, LONG_MAX) {
+-		spin_lock(&child->d_lock);
+-		if (simple_positive(child)) {
+-			spin_unlock(&child->d_lock);
+-			ret = 0;
+-			break;
+-		}
+-		spin_unlock(&child->d_lock);
+-	}
+-
+-	return ret;
+-}
+-
+ /**
+  * simple_offset_rename - handle directory offsets for rename
+  * @old_dir: parent directory of source entry
+@@ -454,14 +430,6 @@ void simple_offset_destroy(struct offset_ctx *octx)
+ 	mtree_destroy(&octx->mt);
+ }
+ 
+-static int offset_dir_open(struct inode *inode, struct file *file)
+-{
+-	struct offset_ctx *ctx = inode->i_op->get_offset_ctx(inode);
+-
+-	file->private_data = (void *)ctx->next_offset;
+-	return 0;
+-}
+-
+ /**
+  * offset_dir_llseek - Advance the read position of a directory descriptor
+  * @file: an open directory whose position is to be updated
+@@ -475,9 +443,6 @@ static int offset_dir_open(struct inode *inode, struct file *file)
+  */
+ static loff_t offset_dir_llseek(struct file *file, loff_t offset, int whence)
+ {
+-	struct inode *inode = file->f_inode;
+-	struct offset_ctx *ctx = inode->i_op->get_offset_ctx(inode);
+-
+ 	switch (whence) {
+ 	case SEEK_CUR:
+ 		offset += file->f_pos;
+@@ -490,62 +455,89 @@ static loff_t offset_dir_llseek(struct file *file, loff_t offset, int whence)
+ 		return -EINVAL;
+ 	}
+ 
+-	/* In this case, ->private_data is protected by f_pos_lock */
+-	if (!offset)
+-		file->private_data = (void *)ctx->next_offset;
+ 	return vfs_setpos(file, offset, LONG_MAX);
+ }
+ 
+-static struct dentry *offset_find_next(struct offset_ctx *octx, loff_t offset)
++static struct dentry *find_positive_dentry(struct dentry *parent,
++					   struct dentry *dentry,
++					   bool next)
+ {
+-	MA_STATE(mas, &octx->mt, offset, offset);
++	struct dentry *found = NULL;
++
++	spin_lock(&parent->d_lock);
++	if (next)
++		dentry = d_next_sibling(dentry);
++	else if (!dentry)
++		dentry = d_first_child(parent);
++	hlist_for_each_entry_from(dentry, d_sib) {
++		if (!simple_positive(dentry))
++			continue;
++		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
++		if (simple_positive(dentry))
++			found = dget_dlock(dentry);
++		spin_unlock(&dentry->d_lock);
++		if (likely(found))
++			break;
++	}
++	spin_unlock(&parent->d_lock);
++	return found;
++}
++
++static noinline_for_stack struct dentry *
++offset_dir_lookup(struct dentry *parent, loff_t offset)
++{
++	struct inode *inode = d_inode(parent);
++	struct offset_ctx *octx = inode->i_op->get_offset_ctx(inode);
+ 	struct dentry *child, *found = NULL;
+ 
+-	rcu_read_lock();
+-	child = mas_find(&mas, LONG_MAX);
+-	if (!child)
+-		goto out;
+-	spin_lock(&child->d_lock);
+-	if (simple_positive(child))
+-		found = dget_dlock(child);
+-	spin_unlock(&child->d_lock);
+-out:
+-	rcu_read_unlock();
++	MA_STATE(mas, &octx->mt, offset, offset);
++
++	if (offset == DIR_OFFSET_FIRST)
++		found = find_positive_dentry(parent, NULL, false);
++	else {
++		rcu_read_lock();
++		child = mas_find(&mas, DIR_OFFSET_MAX);
++		found = find_positive_dentry(parent, child, false);
++		rcu_read_unlock();
++	}
+ 	return found;
+ }
+ 
+ static bool offset_dir_emit(struct dir_context *ctx, struct dentry *dentry)
+ {
+ 	struct inode *inode = d_inode(dentry);
+-	long offset = dentry2offset(dentry);
+ 
+-	return ctx->actor(ctx, dentry->d_name.name, dentry->d_name.len, offset,
+-			  inode->i_ino, fs_umode_to_dtype(inode->i_mode));
++	return dir_emit(ctx, dentry->d_name.name, dentry->d_name.len,
++			inode->i_ino, fs_umode_to_dtype(inode->i_mode));
+ }
+ 
+-static void offset_iterate_dir(struct inode *inode, struct dir_context *ctx, long last_index)
++static void offset_iterate_dir(struct file *file, struct dir_context *ctx)
+ {
+-	struct offset_ctx *octx = inode->i_op->get_offset_ctx(inode);
++	struct dentry *dir = file->f_path.dentry;
+ 	struct dentry *dentry;
+ 
++	dentry = offset_dir_lookup(dir, ctx->pos);
++	if (!dentry)
++		goto out_eod;
+ 	while (true) {
+-		dentry = offset_find_next(octx, ctx->pos);
+-		if (!dentry)
+-			return;
+-
+-		if (dentry2offset(dentry) >= last_index) {
+-			dput(dentry);
+-			return;
+-		}
++		struct dentry *next;
+ 
+-		if (!offset_dir_emit(ctx, dentry)) {
+-			dput(dentry);
+-			return;
+-		}
++		ctx->pos = dentry2offset(dentry);
++		if (!offset_dir_emit(ctx, dentry))
++			break;
+ 
+-		ctx->pos = dentry2offset(dentry) + 1;
++		next = find_positive_dentry(dir, dentry, true);
+ 		dput(dentry);
++
++		if (!next)
++			goto out_eod;
++		dentry = next;
+ 	}
++	dput(dentry);
++	return;
++
++out_eod:
++	ctx->pos = DIR_OFFSET_EOD;
+ }
+ 
+ /**
+@@ -565,6 +557,8 @@ static void offset_iterate_dir(struct inode *inode, struct dir_context *ctx, lon
+  *
+  * On return, @ctx->pos contains an offset that will read the next entry
+  * in this directory when offset_readdir() is called again with @ctx.
++ * Caller places this value in the d_off field of the last entry in the
++ * user's buffer.
+  *
+  * Return values:
+  *   %0 - Complete
+@@ -572,19 +566,17 @@ static void offset_iterate_dir(struct inode *inode, struct dir_context *ctx, lon
+ static int offset_readdir(struct file *file, struct dir_context *ctx)
+ {
+ 	struct dentry *dir = file->f_path.dentry;
+-	long last_index = (long)file->private_data;
+ 
+ 	lockdep_assert_held(&d_inode(dir)->i_rwsem);
+ 
+ 	if (!dir_emit_dots(file, ctx))
+ 		return 0;
+-
+-	offset_iterate_dir(d_inode(dir), ctx, last_index);
++	if (ctx->pos != DIR_OFFSET_EOD)
++		offset_iterate_dir(file, ctx);
+ 	return 0;
+ }
+ 
+ const struct file_operations simple_offset_dir_operations = {
+-	.open		= offset_dir_open,
+ 	.llseek		= offset_dir_llseek,
+ 	.iterate_shared	= offset_readdir,
+ 	.read		= generic_read_dir,
+diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
+index a55f0044d30bde..b935c1a62d10cf 100644
+--- a/fs/smb/client/smb2inode.c
++++ b/fs/smb/client/smb2inode.c
+@@ -176,27 +176,27 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 			    struct kvec *out_iov, int *out_buftype, struct dentry *dentry)
+ {
+ 
+-	struct reparse_data_buffer *rbuf;
++	struct smb2_query_info_rsp *qi_rsp = NULL;
+ 	struct smb2_compound_vars *vars = NULL;
+-	struct kvec *rsp_iov, *iov;
+-	struct smb_rqst *rqst;
+-	int rc;
+-	__le16 *utf16_path = NULL;
+ 	__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+-	struct cifs_fid fid;
++	struct cifs_open_info_data *idata;
+ 	struct cifs_ses *ses = tcon->ses;
++	struct reparse_data_buffer *rbuf;
+ 	struct TCP_Server_Info *server;
+-	int num_rqst = 0, i;
+ 	int resp_buftype[MAX_COMPOUND];
+-	struct smb2_query_info_rsp *qi_rsp = NULL;
+-	struct cifs_open_info_data *idata;
++	int retries = 0, cur_sleep = 1;
++	__u8 delete_pending[8] = {1,};
++	struct kvec *rsp_iov, *iov;
+ 	struct inode *inode = NULL;
+-	int flags = 0;
+-	__u8 delete_pending[8] = {1, 0, 0, 0, 0, 0, 0, 0};
++	__le16 *utf16_path = NULL;
++	struct smb_rqst *rqst;
+ 	unsigned int size[2];
+-	void *data[2];
++	struct cifs_fid fid;
++	int num_rqst = 0, i;
+ 	unsigned int len;
+-	int retries = 0, cur_sleep = 1;
++	int tmp_rc, rc;
++	int flags = 0;
++	void *data[2];
+ 
+ replay_again:
+ 	/* reinitialize for possible replay */
+@@ -637,7 +637,14 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 		tcon->need_reconnect = true;
+ 	}
+ 
++	tmp_rc = rc;
+ 	for (i = 0; i < num_cmds; i++) {
++		char *buf = rsp_iov[i + i].iov_base;
++
++		if (buf && resp_buftype[i + 1] != CIFS_NO_BUFFER)
++			rc = server->ops->map_error(buf, false);
++		else
++			rc = tmp_rc;
+ 		switch (cmds[i]) {
+ 		case SMB2_OP_QUERY_INFO:
+ 			idata = in_iov[i].iov_base;
+@@ -803,6 +810,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 		}
+ 	}
+ 	SMB2_close_free(&rqst[num_rqst]);
++	rc = tmp_rc;
+ 
+ 	num_cmds += 2;
+ 	if (out_iov && out_buftype) {
+@@ -858,22 +866,52 @@ static int parse_create_response(struct cifs_open_info_data *data,
+ 	return rc;
+ }
+ 
++/* Check only if SMB2_OP_QUERY_WSL_EA command failed in the compound chain */
++static bool ea_unsupported(int *cmds, int num_cmds,
++			   struct kvec *out_iov, int *out_buftype)
++{
++	int i;
++
++	if (cmds[num_cmds - 1] != SMB2_OP_QUERY_WSL_EA)
++		return false;
++
++	for (i = 1; i < num_cmds - 1; i++) {
++		struct smb2_hdr *hdr = out_iov[i].iov_base;
++
++		if (out_buftype[i] == CIFS_NO_BUFFER || !hdr ||
++		    hdr->Status != STATUS_SUCCESS)
++			return false;
++	}
++	return true;
++}
++
++static inline void free_rsp_iov(struct kvec *iovs, int *buftype, int count)
++{
++	int i;
++
++	for (i = 0; i < count; i++) {
++		free_rsp_buf(buftype[i], iovs[i].iov_base);
++		memset(&iovs[i], 0, sizeof(*iovs));
++		buftype[i] = CIFS_NO_BUFFER;
++	}
++}
++
+ int smb2_query_path_info(const unsigned int xid,
+ 			 struct cifs_tcon *tcon,
+ 			 struct cifs_sb_info *cifs_sb,
+ 			 const char *full_path,
+ 			 struct cifs_open_info_data *data)
+ {
++	struct kvec in_iov[3], out_iov[5] = {};
++	struct cached_fid *cfid = NULL;
+ 	struct cifs_open_parms oparms;
+-	__u32 create_options = 0;
+ 	struct cifsFileInfo *cfile;
+-	struct cached_fid *cfid = NULL;
++	__u32 create_options = 0;
++	int out_buftype[5] = {};
+ 	struct smb2_hdr *hdr;
+-	struct kvec in_iov[3], out_iov[3] = {};
+-	int out_buftype[3] = {};
++	int num_cmds = 0;
+ 	int cmds[3];
+ 	bool islink;
+-	int i, num_cmds = 0;
+ 	int rc, rc2;
+ 
+ 	data->adjust_tz = false;
+@@ -943,14 +981,14 @@ int smb2_query_path_info(const unsigned int xid,
+ 		if (rc || !data->reparse_point)
+ 			goto out;
+ 
+-		if (!tcon->posix_extensions)
+-			cmds[num_cmds++] = SMB2_OP_QUERY_WSL_EA;
+ 		/*
+ 		 * Skip SMB2_OP_GET_REPARSE if symlink already parsed in create
+ 		 * response.
+ 		 */
+ 		if (data->reparse.tag != IO_REPARSE_TAG_SYMLINK)
+ 			cmds[num_cmds++] = SMB2_OP_GET_REPARSE;
++		if (!tcon->posix_extensions)
++			cmds[num_cmds++] = SMB2_OP_QUERY_WSL_EA;
+ 
+ 		oparms = CIFS_OPARMS(cifs_sb, tcon, full_path,
+ 				     FILE_READ_ATTRIBUTES |
+@@ -958,9 +996,18 @@ int smb2_query_path_info(const unsigned int xid,
+ 				     FILE_OPEN, create_options |
+ 				     OPEN_REPARSE_POINT, ACL_NO_MODE);
+ 		cifs_get_readable_path(tcon, full_path, &cfile);
++		free_rsp_iov(out_iov, out_buftype, ARRAY_SIZE(out_iov));
+ 		rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
+ 				      &oparms, in_iov, cmds, num_cmds,
+-				      cfile, NULL, NULL, NULL);
++				      cfile, out_iov, out_buftype, NULL);
++		if (rc && ea_unsupported(cmds, num_cmds,
++					 out_iov, out_buftype)) {
++			if (data->reparse.tag != IO_REPARSE_TAG_LX_BLK &&
++			    data->reparse.tag != IO_REPARSE_TAG_LX_CHR)
++				rc = 0;
++			else
++				rc = -EOPNOTSUPP;
++		}
+ 		break;
+ 	case -EREMOTE:
+ 		break;
+@@ -978,8 +1025,7 @@ int smb2_query_path_info(const unsigned int xid,
+ 	}
+ 
+ out:
+-	for (i = 0; i < ARRAY_SIZE(out_buftype); i++)
+-		free_rsp_buf(out_buftype[i], out_iov[i].iov_base);
++	free_rsp_iov(out_iov, out_buftype, ARRAY_SIZE(out_iov));
+ 	return rc;
+ }
+ 
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 7e29433c5ecce2..f7efc6866ebc9a 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -3468,7 +3468,6 @@ struct offset_ctx {
+ void simple_offset_init(struct offset_ctx *octx);
+ int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry);
+ void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry);
+-int simple_offset_empty(struct dentry *dentry);
+ int simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry,
+ 			 struct inode *new_dir, struct dentry *new_dentry);
+ int simple_offset_rename_exchange(struct inode *old_dir,
+diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
+index 69937d0c94f951..cc58defd88d427 100644
+--- a/io_uring/rsrc.c
++++ b/io_uring/rsrc.c
+@@ -931,6 +931,13 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
+ 	int i, ret, off, nr;
+ 	unsigned int nbufs;
+ 
++	/*
++	 * Accounting state is shared between the two rings; that only works if
++	 * both rings are accounted towards the same counters.
++	 */
++	if (ctx->user != src_ctx->user || ctx->mm_account != src_ctx->mm_account)
++		return -EINVAL;
++
+ 	/* if offsets are given, must have nr specified too */
+ 	if (!arg->nr && (arg->dst_off || arg->src_off))
+ 		return -EINVAL;
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 4f476411a9a2da..440922a7d8f17d 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -4375,6 +4375,20 @@ static void filemap_cachestat(struct address_space *mapping,
+ 	rcu_read_unlock();
+ }
+ 
++/*
++ * See mincore: reveal pagecache information only for files
++ * that the calling process has write access to, or could (if
++ * tried) open for writing.
++ */
++static inline bool can_do_cachestat(struct file *f)
++{
++	if (f->f_mode & FMODE_WRITE)
++		return true;
++	if (inode_owner_or_capable(file_mnt_idmap(f), file_inode(f)))
++		return true;
++	return file_permission(f, MAY_WRITE) == 0;
++}
++
+ /*
+  * The cachestat(2) system call.
+  *
+@@ -4430,6 +4444,9 @@ SYSCALL_DEFINE4(cachestat, unsigned int, fd,
+ 	if (is_file_hugepages(fd_file(f)))
+ 		return -EOPNOTSUPP;
+ 
++	if (!can_do_cachestat(fd_file(f)))
++		return -EPERM;
++
+ 	if (flags != 0)
+ 		return -EINVAL;
+ 
+diff --git a/mm/shmem.c b/mm/shmem.c
+index fdb5afa1cfe9a6..e10d6e09246205 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -3821,7 +3821,7 @@ static int shmem_unlink(struct inode *dir, struct dentry *dentry)
+ 
+ static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
+ {
+-	if (!simple_offset_empty(dentry))
++	if (!simple_empty(dentry))
+ 		return -ENOTEMPTY;
+ 
+ 	drop_nlink(d_inode(dentry));
+@@ -3878,7 +3878,7 @@ static int shmem_rename2(struct mnt_idmap *idmap,
+ 		return simple_offset_rename_exchange(old_dir, old_dentry,
+ 						     new_dir, new_dentry);
+ 
+-	if (!simple_offset_empty(new_dentry))
++	if (!simple_empty(new_dentry))
+ 		return -ENOTEMPTY;
+ 
+ 	if (flags & RENAME_WHITEOUT) {
+diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c
+index f80bc05d4c5a50..516038a4416380 100644
+--- a/net/sched/sch_ets.c
++++ b/net/sched/sch_ets.c
+@@ -91,6 +91,8 @@ ets_class_from_arg(struct Qdisc *sch, unsigned long arg)
+ {
+ 	struct ets_sched *q = qdisc_priv(sch);
+ 
++	if (arg == 0 || arg > q->nbands)
++		return NULL;
+ 	return &q->classes[arg - 1];
+ }
+ 
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 8ba0aff8be2ec2..7968d6a2f592ac 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2239,6 +2239,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_CTL_MSG_DELAY_1M),
+ 	DEVICE_FLG(0x0c45, 0x6340, /* Sonix HD USB Camera */
+ 		   QUIRK_FLAG_GET_SAMPLE_RATE),
++	DEVICE_FLG(0x0d8c, 0x0014, /* USB Audio Device */
++		   QUIRK_FLAG_CTL_MSG_DELAY_1M),
+ 	DEVICE_FLG(0x0ecb, 0x205c, /* JBL Quantum610 Wireless */
+ 		   QUIRK_FLAG_FIXED_RATE),
+ 	DEVICE_FLG(0x0ecb, 0x2069, /* JBL Quantum810 Wireless */
+diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
+index 175004ce44b216..51a95239fe0631 100644
+--- a/tools/power/cpupower/Makefile
++++ b/tools/power/cpupower/Makefile
+@@ -87,11 +87,19 @@ INSTALL_SCRIPT = ${INSTALL} -m 644
+ # to something more interesting, like "arm-linux-".  If you want
+ # to compile vs uClibc, that can be done here as well.
+ CROSS ?= #/usr/i386-linux-uclibc/usr/bin/i386-uclibc-
++ifneq ($(CROSS), )
++CC = $(CROSS)gcc
++LD = $(CROSS)gcc
++AR = $(CROSS)ar
++STRIP = $(CROSS)strip
++RANLIB = $(CROSS)ranlib
++else
+ CC ?= $(CROSS)gcc
+ LD ?= $(CROSS)gcc
+ AR ?= $(CROSS)ar
+ STRIP ?= $(CROSS)strip
+ RANLIB ?= $(CROSS)ranlib
++endif
+ HOSTCC = gcc
+ MKDIR = mkdir
+ 


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [gentoo-commits] proj/linux-patches:6.13 commit in: /
@ 2025-02-05 17:10 Mike Pagano
  0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2025-02-05 17:10 UTC (permalink / raw
  To: gentoo-commits

commit:     b8288cc8724600c44ed8e281e969623aef421989
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Feb  5 17:09:43 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Feb  5 17:09:43 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b8288cc8

x86/insn_decoder_test: allow longer symbol-names

Bug: https://bugs.gentoo.org/949240
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |  4 ++
 ...sn-decoder-test-allow-longer-symbol-names.patch | 49 ++++++++++++++++++++++
 2 files changed, 53 insertions(+)

diff --git a/0000_README b/0000_README
index 7b1940a4..59bf41fa 100644
--- a/0000_README
+++ b/0000_README
@@ -59,6 +59,10 @@ Patch:  1730_parisc-Disable-prctl.patch
 From:   https://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux.git
 Desc:   prctl: Temporarily disable prctl(PR_SET_MDWE) on parisc
 
+Patch:  1740_x86-insn-decoder-test-allow-longer-symbol-names.patch
+From:   https://gitlab.com/cki-project/kernel-ark/-/commit/8d4a52c3921d278f27241fc0c6949d8fdc13a7f5
+Desc:   x86/insn_decoder_test: allow longer symbol-names
+
 Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758

diff --git a/1740_x86-insn-decoder-test-allow-longer-symbol-names.patch b/1740_x86-insn-decoder-test-allow-longer-symbol-names.patch
new file mode 100644
index 00000000..70c706ba
--- /dev/null
+++ b/1740_x86-insn-decoder-test-allow-longer-symbol-names.patch
@@ -0,0 +1,49 @@
+From 8d4a52c3921d278f27241fc0c6949d8fdc13a7f5 Mon Sep 17 00:00:00 2001
+From: David Rheinsberg <david@readahead.eu>
+Date: Tue, 24 Jan 2023 12:04:59 +0100
+Subject: [PATCH] x86/insn_decoder_test: allow longer symbol-names
+
+Increase the allowed line-length of the insn-decoder-test to 4k to allow
+for symbol-names longer than 256 characters.
+
+The insn-decoder-test takes objdump output as input, which may contain
+symbol-names as instruction arguments. With rust-code entering the
+kernel, those symbol-names will include mangled-symbols which might
+exceed the current line-length-limit of the tool.
+
+By bumping the line-length-limit of the tool to 4k, we get a reasonable
+buffer for all objdump outputs I have seen so far. Unfortunately, ELF
+symbol-names are not restricted in length, so technically this might
+still end up failing if we encounter longer names in the future.
+
+My compile-failure looks like this:
+
+    arch/x86/tools/insn_decoder_test: error: malformed line 1152000:
+    tBb_+0xf2>
+
+..which overflowed by 10 characters reading this line:
+
+    ffffffff81458193:   74 3d                   je     ffffffff814581d2 <_RNvXse_NtNtNtCshGpAVYOtgW1_4core4iter8adapters7flattenINtB5_13FlattenCompatINtNtB7_3map3MapNtNtNtBb_3str4iter5CharsNtB1v_17CharEscapeDefaultENtNtBb_4char13EscapeDefaultENtNtBb_3fmt5Debug3fmtBb_+0xf2>
+
+Signed-off-by: David Rheinsberg <david@readahead.eu>
+Signed-off-by: Scott Weaver <scweaver@redhat.com>
+---
+ arch/x86/tools/insn_decoder_test.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/x86/tools/insn_decoder_test.c b/arch/x86/tools/insn_decoder_test.c
+index 472540aeabc23..366e07546344b 100644
+--- a/arch/x86/tools/insn_decoder_test.c
++++ b/arch/x86/tools/insn_decoder_test.c
+@@ -106,7 +106,7 @@ static void parse_args(int argc, char **argv)
+ 	}
+ }
+ 
+-#define BUFSIZE 256
++#define BUFSIZE 4096
+ 
+ int main(int argc, char **argv)
+ {
+-- 
+GitLab
+


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [gentoo-commits] proj/linux-patches:6.13 commit in: /
@ 2025-02-08 11:25 Mike Pagano
  0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2025-02-08 11:25 UTC (permalink / raw
  To: gentoo-commits

commit:     1b8a169189f3183fc612e9e200cc3b7d85dd0505
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Feb  8 11:25:02 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Feb  8 11:25:02 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1b8a1691

Linux patch 6.13.2

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1001_linux-6.13.2.patch | 25138 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 25142 insertions(+)

diff --git a/0000_README b/0000_README
index 59bf41fa..a507da31 100644
--- a/0000_README
+++ b/0000_README
@@ -47,6 +47,10 @@ Patch:  1000_linux-6.13.1.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.13.1
 
+Patch:  1001_linux-6.13.2.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.13.2
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.

diff --git a/1001_linux-6.13.2.patch b/1001_linux-6.13.2.patch
new file mode 100644
index 00000000..8d46a722
--- /dev/null
+++ b/1001_linux-6.13.2.patch
@@ -0,0 +1,25138 @@
+diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sa8775p-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sa8775p-mdss.yaml
+index 58f8a01f29c7aa..4536bb2f971f3b 100644
+--- a/Documentation/devicetree/bindings/display/msm/qcom,sa8775p-mdss.yaml
++++ b/Documentation/devicetree/bindings/display/msm/qcom,sa8775p-mdss.yaml
+@@ -168,7 +168,8 @@ examples:
+             reg = <0xaf54000 0x104>,
+                   <0xaf54200 0x0c0>,
+                   <0xaf55000 0x770>,
+-                  <0xaf56000 0x09c>;
++                  <0xaf56000 0x09c>,
++                  <0xaf57000 0x09c>;
+ 
+             interrupt-parent = <&mdss0>;
+             interrupts = <12>;
+diff --git a/Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml b/Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml
+index e850a8894758df..bb40bb9e036ee0 100644
+--- a/Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml
++++ b/Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml
+@@ -27,7 +27,7 @@ properties:
+     description: |
+       For multicolor LED support this property should be defined as either
+       LED_COLOR_ID_RGB or LED_COLOR_ID_MULTI which can be found in
+-      include/linux/leds/common.h.
++      include/dt-bindings/leds/common.h.
+     enum: [ 8, 9 ]
+ 
+ required:
+diff --git a/Documentation/devicetree/bindings/mfd/rohm,bd71815-pmic.yaml b/Documentation/devicetree/bindings/mfd/rohm,bd71815-pmic.yaml
+index bb81307dc11b89..4fc78efaa5504a 100644
+--- a/Documentation/devicetree/bindings/mfd/rohm,bd71815-pmic.yaml
++++ b/Documentation/devicetree/bindings/mfd/rohm,bd71815-pmic.yaml
+@@ -50,15 +50,15 @@ properties:
+     minimum: 0
+     maximum: 1
+ 
+-  rohm,charger-sense-resistor-ohms:
+-    minimum: 10000000
+-    maximum: 50000000
++  rohm,charger-sense-resistor-micro-ohms:
++    minimum: 10000
++    maximum: 50000
+     description: |
+-      BD71827 and BD71828 have SAR ADC for measuring charging currents.
+-      External sense resistor (RSENSE in data sheet) should be used. If
+-      something other but 30MOhm resistor is used the resistance value
+-      should be given here in Ohms.
+-    default: 30000000
++      BD71815 has SAR ADC for measuring charging currents. External sense
++      resistor (RSENSE in data sheet) should be used. If something other
++      but a 30 mOhm resistor is used the resistance value should be given
++      here in micro Ohms.
++    default: 30000
+ 
+   regulators:
+     $ref: /schemas/regulator/rohm,bd71815-regulator.yaml
+@@ -67,7 +67,7 @@ properties:
+ 
+   gpio-reserved-ranges:
+     description: |
+-      Usage of BD71828 GPIO pins can be changed via OTP. This property can be
++      Usage of BD71815 GPIO pins can be changed via OTP. This property can be
+       used to mark the pins which should not be configured for GPIO. Please see
+       the ../gpio/gpio.txt for more information.
+ 
+@@ -113,7 +113,7 @@ examples:
+             gpio-controller;
+             #gpio-cells = <2>;
+ 
+-            rohm,charger-sense-resistor-ohms = <10000000>;
++            rohm,charger-sense-resistor-micro-ohms = <10000>;
+ 
+             regulators {
+                 buck1: buck1 {
+diff --git a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
+index 58ae298cd2fcf4..23884b8184a9df 100644
+--- a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
++++ b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
+@@ -25,7 +25,7 @@ properties:
+   "#address-cells":
+     const: 1
+     description: |
+-      The cell is the slot ID if a function subnode is used.
++      The cell is the SDIO function number if a function subnode is used.
+ 
+   "#size-cells":
+     const: 0
+diff --git a/Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml b/Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml
+index cd4aa27218a1b6..fa6743bb269d44 100644
+--- a/Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml
++++ b/Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml
+@@ -35,10 +35,6 @@ properties:
+         $ref: regulator.yaml#
+         unevaluatedProperties: false
+ 
+-        properties:
+-          regulator-compatible:
+-            pattern: "^vbuck[1-4]$"
+-
+     additionalProperties: false
+ 
+ required:
+@@ -56,7 +52,6 @@ examples:
+ 
+       regulators {
+         vbuck1 {
+-          regulator-compatible = "vbuck1";
+           regulator-min-microvolt = <300000>;
+           regulator-max-microvolt = <1193750>;
+           regulator-enable-ramp-delay = <256>;
+@@ -64,7 +59,6 @@ examples:
+         };
+ 
+         vbuck3 {
+-          regulator-compatible = "vbuck3";
+           regulator-min-microvolt = <300000>;
+           regulator-max-microvolt = <1193750>;
+           regulator-enable-ramp-delay = <256>;
+diff --git a/Documentation/driver-api/crypto/iaa/iaa-crypto.rst b/Documentation/driver-api/crypto/iaa/iaa-crypto.rst
+index bba40158dd5c5a..8e50b900d51c27 100644
+--- a/Documentation/driver-api/crypto/iaa/iaa-crypto.rst
++++ b/Documentation/driver-api/crypto/iaa/iaa-crypto.rst
+@@ -272,7 +272,7 @@ The available attributes are:
+       echo async_irq > /sys/bus/dsa/drivers/crypto/sync_mode
+ 
+     Async mode without interrupts (caller must poll) can be enabled by
+-    writing 'async' to it::
++    writing 'async' to it (please see Caveat)::
+ 
+       echo async > /sys/bus/dsa/drivers/crypto/sync_mode
+ 
+@@ -283,6 +283,13 @@ The available attributes are:
+ 
+     The default mode is 'sync'.
+ 
++    Caveat: since the only mechanism that iaa_crypto currently implements
++    for async polling without interrupts is via the 'sync' mode as
++    described earlier, writing 'async' to
++    '/sys/bus/dsa/drivers/crypto/sync_mode' will internally enable the
++    'sync' mode. This is to ensure correct iaa_crypto behavior until true
++    async polling without interrupts is enabled in iaa_crypto.
++
+ .. _iaa_default_config:
+ 
+ IAA Default Configuration
+diff --git a/Makefile b/Makefile
+index 7bc322bc7ad80c..9de0dc460a8368 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 13
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-yosemite4.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-yosemite4.dts
+index 98477792aa005a..14d17510310680 100644
+--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-yosemite4.dts
++++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-yosemite4.dts
+@@ -284,12 +284,12 @@ &i2c10 {
+ &i2c11 {
+ 	status = "okay";
+ 	power-sensor@10 {
+-		compatible = "adi, adm1272";
++		compatible = "adi,adm1272";
+ 		reg = <0x10>;
+ 	};
+ 
+ 	power-sensor@12 {
+-		compatible = "adi, adm1272";
++		compatible = "adi,adm1272";
+ 		reg = <0x12>;
+ 	};
+ 
+@@ -461,22 +461,20 @@ adc@1f {
+ 			};
+ 
+ 			pwm@20{
+-				compatible = "max31790";
++				compatible = "maxim,max31790";
+ 				reg = <0x20>;
+-				#address-cells = <1>;
+-				#size-cells = <0>;
+ 			};
+ 
+ 			gpio@22{
+ 				compatible = "ti,tca6424";
+ 				reg = <0x22>;
++				gpio-controller;
++				#gpio-cells = <2>;
+ 			};
+ 
+ 			pwm@23{
+-				compatible = "max31790";
++				compatible = "maxim,max31790";
+ 				reg = <0x23>;
+-				#address-cells = <1>;
+-				#size-cells = <0>;
+ 			};
+ 
+ 			adc@33 {
+@@ -511,22 +509,20 @@ adc@1f {
+ 			};
+ 
+ 			pwm@20{
+-				compatible = "max31790";
++				compatible = "maxim,max31790";
+ 				reg = <0x20>;
+-				#address-cells = <1>;
+-				#size-cells = <0>;
+ 			};
+ 
+ 			gpio@22{
+ 				compatible = "ti,tca6424";
+ 				reg = <0x22>;
++				gpio-controller;
++				#gpio-cells = <2>;
+ 			};
+ 
+ 			pwm@23{
+-				compatible = "max31790";
++				compatible = "maxim,max31790";
+ 				reg = <0x23>;
+-				#address-cells = <1>;
+-				#size-cells = <0>;
+ 			};
+ 
+ 			adc@33 {
+diff --git a/arch/arm/boot/dts/intel/socfpga/socfpga_arria10.dtsi b/arch/arm/boot/dts/intel/socfpga/socfpga_arria10.dtsi
+index 6b6e77596ffa86..b108265e9bde42 100644
+--- a/arch/arm/boot/dts/intel/socfpga/socfpga_arria10.dtsi
++++ b/arch/arm/boot/dts/intel/socfpga/socfpga_arria10.dtsi
+@@ -440,7 +440,7 @@ gmac0: ethernet@ff800000 {
+ 			clocks = <&l4_mp_clk>, <&peri_emac_ptp_clk>;
+ 			clock-names = "stmmaceth", "ptp_ref";
+ 			resets = <&rst EMAC0_RESET>, <&rst EMAC0_OCP_RESET>;
+-			reset-names = "stmmaceth", "ahb";
++			reset-names = "stmmaceth", "stmmaceth-ocp";
+ 			snps,axi-config = <&socfpga_axi_setup>;
+ 			status = "disabled";
+ 		};
+@@ -460,7 +460,7 @@ gmac1: ethernet@ff802000 {
+ 			clocks = <&l4_mp_clk>, <&peri_emac_ptp_clk>;
+ 			clock-names = "stmmaceth", "ptp_ref";
+ 			resets = <&rst EMAC1_RESET>, <&rst EMAC1_OCP_RESET>;
+-			reset-names = "stmmaceth", "ahb";
++			reset-names = "stmmaceth", "stmmaceth-ocp";
+ 			snps,axi-config = <&socfpga_axi_setup>;
+ 			status = "disabled";
+ 		};
+@@ -480,7 +480,7 @@ gmac2: ethernet@ff804000 {
+ 			clocks = <&l4_mp_clk>, <&peri_emac_ptp_clk>;
+ 			clock-names = "stmmaceth", "ptp_ref";
+ 			resets = <&rst EMAC2_RESET>, <&rst EMAC2_OCP_RESET>;
+-			reset-names = "stmmaceth", "ahb";
++			reset-names = "stmmaceth", "stmmaceth-ocp";
+ 			snps,axi-config = <&socfpga_axi_setup>;
+ 			status = "disabled";
+ 		};
+diff --git a/arch/arm/boot/dts/mediatek/mt7623.dtsi b/arch/arm/boot/dts/mediatek/mt7623.dtsi
+index 814586abc2979e..fd7a89cc337d69 100644
+--- a/arch/arm/boot/dts/mediatek/mt7623.dtsi
++++ b/arch/arm/boot/dts/mediatek/mt7623.dtsi
+@@ -308,7 +308,7 @@ pwrap: pwrap@1000d000 {
+ 		clock-names = "spi", "wrap";
+ 	};
+ 
+-	cir: cir@10013000 {
++	cir: ir-receiver@10013000 {
+ 		compatible = "mediatek,mt7623-cir";
+ 		reg = <0 0x10013000 0 0x1000>;
+ 		interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_LOW>;
+diff --git a/arch/arm/boot/dts/microchip/at91-sama5d27_wlsom1_ek.dts b/arch/arm/boot/dts/microchip/at91-sama5d27_wlsom1_ek.dts
+index 15239834d886ed..35a933eec5738f 100644
+--- a/arch/arm/boot/dts/microchip/at91-sama5d27_wlsom1_ek.dts
++++ b/arch/arm/boot/dts/microchip/at91-sama5d27_wlsom1_ek.dts
+@@ -197,6 +197,7 @@ qspi1_flash: flash@0 {
+ 
+ &sdmmc0 {
+ 	bus-width = <4>;
++	no-1-8-v;
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_sdmmc0_default>;
+ 	status = "okay";
+diff --git a/arch/arm/boot/dts/microchip/at91-sama5d29_curiosity.dts b/arch/arm/boot/dts/microchip/at91-sama5d29_curiosity.dts
+index b6684bf67d3e6e..7be21578154976 100644
+--- a/arch/arm/boot/dts/microchip/at91-sama5d29_curiosity.dts
++++ b/arch/arm/boot/dts/microchip/at91-sama5d29_curiosity.dts
+@@ -514,6 +514,7 @@ kernel@200000 {
+ 
+ &sdmmc0 {
+ 	bus-width = <4>;
++	no-1-8-v;
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_sdmmc0_default>;
+ 	disable-wp;
+diff --git a/arch/arm/boot/dts/nxp/imx/imx7-tqma7.dtsi b/arch/arm/boot/dts/nxp/imx/imx7-tqma7.dtsi
+index 028961eb71089c..91ca23a66bf3c2 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx7-tqma7.dtsi
++++ b/arch/arm/boot/dts/nxp/imx/imx7-tqma7.dtsi
+@@ -135,6 +135,7 @@ vgen6_reg: vldo4 {
+ 	lm75a: temperature-sensor@48 {
+ 		compatible = "national,lm75a";
+ 		reg = <0x48>;
++		vs-supply = <&vgen4_reg>;
+ 	};
+ 
+ 	/* NXP SE97BTP with temperature sensor + eeprom, TQMa7x 02xx */
+diff --git a/arch/arm/boot/dts/st/stm32mp13xx-dhcor-som.dtsi b/arch/arm/boot/dts/st/stm32mp13xx-dhcor-som.dtsi
+index 5edbc790d1d273..6236ce2a69684f 100644
+--- a/arch/arm/boot/dts/st/stm32mp13xx-dhcor-som.dtsi
++++ b/arch/arm/boot/dts/st/stm32mp13xx-dhcor-som.dtsi
+@@ -85,8 +85,8 @@ regulators {
+ 
+ 			vddcpu: buck1 { /* VDD_CPU_1V2 */
+ 				regulator-name = "vddcpu";
+-				regulator-min-microvolt = <1250000>;
+-				regulator-max-microvolt = <1250000>;
++				regulator-min-microvolt = <1350000>;
++				regulator-max-microvolt = <1350000>;
+ 				regulator-always-on;
+ 				regulator-initial-mode = <0>;
+ 				regulator-over-current-protection;
+@@ -201,17 +201,17 @@ eeprom0: eeprom@50 {
+ 		pagesize = <64>;
+ 	};
+ 
+-	eeprom0wl: eeprom@58 {
+-		compatible = "st,24256e-wl";	/* ST M24256E WL page of 0x50 */
+-		pagesize = <64>;
+-		reg = <0x58>;
+-	};
+-
+ 	rv3032: rtc@51 {
+ 		compatible = "microcrystal,rv3032";
+ 		reg = <0x51>;
+ 		interrupts-extended = <&gpioi 0 IRQ_TYPE_EDGE_FALLING>;
+ 	};
++
++	eeprom0wl: eeprom@58 {
++		compatible = "st,24256e-wl";	/* ST M24256E WL page of 0x50 */
++		pagesize = <64>;
++		reg = <0x58>;
++	};
+ };
+ 
+ &iwdg2 {
+diff --git a/arch/arm/boot/dts/st/stm32mp151.dtsi b/arch/arm/boot/dts/st/stm32mp151.dtsi
+index b28dc90926bdac..e7e3ce8066ece3 100644
+--- a/arch/arm/boot/dts/st/stm32mp151.dtsi
++++ b/arch/arm/boot/dts/st/stm32mp151.dtsi
+@@ -129,7 +129,7 @@ ipcc: mailbox@4c001000 {
+ 			reg = <0x4c001000 0x400>;
+ 			st,proc-id = <0>;
+ 			interrupts-extended =
+-				<&exti 61 1>,
++				<&exti 61 IRQ_TYPE_LEVEL_HIGH>,
+ 				<&intc GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "rx", "tx";
+ 			clocks = <&rcc IPCC>;
+diff --git a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-drc02.dtsi b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-drc02.dtsi
+index bb4f8a0b937f37..abe2dfe706364b 100644
+--- a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-drc02.dtsi
++++ b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-drc02.dtsi
+@@ -6,18 +6,6 @@
+ #include <dt-bindings/input/input.h>
+ #include <dt-bindings/pwm/pwm.h>
+ 
+-/ {
+-	aliases {
+-		serial0 = &uart4;
+-		serial1 = &usart3;
+-		serial2 = &uart8;
+-	};
+-
+-	chosen {
+-		stdout-path = "serial0:115200n8";
+-	};
+-};
+-
+ &adc {
+ 	status = "disabled";
+ };
+diff --git a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-pdk2.dtsi b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-pdk2.dtsi
+index 171d7c7658fa86..0fb4e55843b9d2 100644
+--- a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-pdk2.dtsi
++++ b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-pdk2.dtsi
+@@ -7,16 +7,6 @@
+ #include <dt-bindings/pwm/pwm.h>
+ 
+ / {
+-	aliases {
+-		serial0 = &uart4;
+-		serial1 = &usart3;
+-		serial2 = &uart8;
+-	};
+-
+-	chosen {
+-		stdout-path = "serial0:115200n8";
+-	};
+-
+ 	clk_ext_audio_codec: clock-codec {
+ 		compatible = "fixed-clock";
+ 		#clock-cells = <0>;
+diff --git a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-picoitx.dtsi b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-picoitx.dtsi
+index b5bc53accd6b2f..01c693cc03446c 100644
+--- a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-picoitx.dtsi
++++ b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-picoitx.dtsi
+@@ -7,16 +7,6 @@
+ #include <dt-bindings/pwm/pwm.h>
+ 
+ / {
+-	aliases {
+-		serial0 = &uart4;
+-		serial1 = &usart3;
+-		serial2 = &uart8;
+-	};
+-
+-	chosen {
+-		stdout-path = "serial0:115200n8";
+-	};
+-
+ 	led {
+ 		compatible = "gpio-leds";
+ 
+diff --git a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-som.dtsi b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-som.dtsi
+index 74a11ccc5333f8..142d4a8731f8d4 100644
+--- a/arch/arm/boot/dts/st/stm32mp15xx-dhcom-som.dtsi
++++ b/arch/arm/boot/dts/st/stm32mp15xx-dhcom-som.dtsi
+@@ -14,6 +14,13 @@ aliases {
+ 		ethernet1 = &ksz8851;
+ 		rtc0 = &hwrtc;
+ 		rtc1 = &rtc;
++		serial0 = &uart4;
++		serial1 = &uart8;
++		serial2 = &usart3;
++	};
++
++	chosen {
++		stdout-path = "serial0:115200n8";
+ 	};
+ 
+ 	memory@c0000000 {
+diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
+index b9b995f8a36e14..05a1547642b60f 100644
+--- a/arch/arm/mach-at91/pm.c
++++ b/arch/arm/mach-at91/pm.c
+@@ -598,7 +598,21 @@ static int at91_suspend_finish(unsigned long val)
+ 	return 0;
+ }
+ 
+-static void at91_pm_switch_ba_to_vbat(void)
++/**
++ * at91_pm_switch_ba_to_auto() - Configure Backup Unit Power Switch
++ * to automatic/hardware mode.
++ *
++ * The Backup Unit Power Switch can be managed either by software or hardware.
++ * Enabling hardware mode allows the automatic transition of power between
++ * VDDANA (or VDDIN33) and VDDBU (or VBAT, respectively), based on the
++ * availability of these power sources.
++ *
++ * If the Backup Unit Power Switch is already in automatic mode, no action is
++ * required. If it is in software-controlled mode, it is switched to automatic
++ * mode to enhance safety and eliminate the need for toggling between power
++ * sources.
++ */
++static void at91_pm_switch_ba_to_auto(void)
+ {
+ 	unsigned int offset = offsetof(struct at91_pm_sfrbu_regs, pswbu);
+ 	unsigned int val;
+@@ -609,24 +623,19 @@ static void at91_pm_switch_ba_to_vbat(void)
+ 
+ 	val = readl(soc_pm.data.sfrbu + offset);
+ 
+-	/* Already on VBAT. */
+-	if (!(val & soc_pm.sfrbu_regs.pswbu.state))
++	/* Already on auto/hardware. */
++	if (!(val & soc_pm.sfrbu_regs.pswbu.ctrl))
+ 		return;
+ 
+-	val &= ~soc_pm.sfrbu_regs.pswbu.softsw;
+-	val |= soc_pm.sfrbu_regs.pswbu.key | soc_pm.sfrbu_regs.pswbu.ctrl;
++	val &= ~soc_pm.sfrbu_regs.pswbu.ctrl;
++	val |= soc_pm.sfrbu_regs.pswbu.key;
+ 	writel(val, soc_pm.data.sfrbu + offset);
+-
+-	/* Wait for update. */
+-	val = readl(soc_pm.data.sfrbu + offset);
+-	while (val & soc_pm.sfrbu_regs.pswbu.state)
+-		val = readl(soc_pm.data.sfrbu + offset);
+ }
+ 
+ static void at91_pm_suspend(suspend_state_t state)
+ {
+ 	if (soc_pm.data.mode == AT91_PM_BACKUP) {
+-		at91_pm_switch_ba_to_vbat();
++		at91_pm_switch_ba_to_auto();
+ 
+ 		cpu_suspend(0, at91_suspend_finish);
+ 
+diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
+index 3312ef93355da7..a5bf5554800fe1 100644
+--- a/arch/arm/mach-omap1/board-nokia770.c
++++ b/arch/arm/mach-omap1/board-nokia770.c
+@@ -289,7 +289,7 @@ static struct gpiod_lookup_table nokia770_irq_gpio_table = {
+ 		GPIO_LOOKUP("gpio-0-15", 15, "ads7846_irq",
+ 			    GPIO_ACTIVE_HIGH),
+ 		/* GPIO used for retu IRQ */
+-		GPIO_LOOKUP("gpio-48-63", 15, "retu_irq",
++		GPIO_LOOKUP("gpio-48-63", 14, "retu_irq",
+ 			    GPIO_ACTIVE_HIGH),
+ 		/* GPIO used for tahvo IRQ */
+ 		GPIO_LOOKUP("gpio-32-47", 8, "tahvo_irq",
+diff --git a/arch/arm64/boot/dts/freescale/imx93.dtsi b/arch/arm64/boot/dts/freescale/imx93.dtsi
+index 688488de8cd288..56766fdb0b1e52 100644
+--- a/arch/arm64/boot/dts/freescale/imx93.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx93.dtsi
+@@ -925,7 +925,7 @@ xcvr: xcvr@42680000 {
+ 				reg-names = "ram", "regs", "rxfifo", "txfifo";
+ 				interrupts = <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
+ 					     <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>;
+-				clocks = <&clk IMX93_CLK_BUS_WAKEUP>,
++				clocks = <&clk IMX93_CLK_SPDIF_IPG>,
+ 					 <&clk IMX93_CLK_SPDIF_GATE>,
+ 					 <&clk IMX93_CLK_DUMMY>,
+ 					 <&clk IMX93_CLK_AUD_XCVR_GATE>;
+diff --git a/arch/arm64/boot/dts/marvell/cn9131-cf-solidwan.dts b/arch/arm64/boot/dts/marvell/cn9131-cf-solidwan.dts
+index b1ea7dcaed17dc..47234d0858dd21 100644
+--- a/arch/arm64/boot/dts/marvell/cn9131-cf-solidwan.dts
++++ b/arch/arm64/boot/dts/marvell/cn9131-cf-solidwan.dts
+@@ -435,7 +435,7 @@ &cp1_eth1 {
+ 	managed = "in-band-status";
+ 	phy-mode = "sgmii";
+ 	phy = <&cp1_phy0>;
+-	phys = <&cp0_comphy3 1>;
++	phys = <&cp1_comphy3 1>;
+ 	status = "okay";
+ };
+ 
+@@ -444,7 +444,7 @@ &cp1_eth2 {
+ 	managed = "in-band-status";
+ 	phy-mode = "sgmii";
+ 	phy = <&cp1_phy1>;
+-	phys = <&cp0_comphy5 2>;
++	phys = <&cp1_comphy5 2>;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/mediatek/mt7988a.dtsi b/arch/arm64/boot/dts/mediatek/mt7988a.dtsi
+index c9649b81527687..73561c7a3ad26a 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7988a.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt7988a.dtsi
+@@ -162,6 +162,7 @@ i2c@11003000 {
+ 			reg = <0 0x11003000 0 0x1000>,
+ 			      <0 0x10217080 0 0x80>;
+ 			interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
++			clock-div = <1>;
+ 			clocks = <&infracfg CLK_INFRA_I2C_BCK>,
+ 				 <&infracfg CLK_INFRA_66M_AP_DMA_BCK>;
+ 			clock-names = "main", "dma";
+@@ -175,6 +176,7 @@ i2c@11004000 {
+ 			reg = <0 0x11004000 0 0x1000>,
+ 			      <0 0x10217100 0 0x80>;
+ 			interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
++			clock-div = <1>;
+ 			clocks = <&infracfg CLK_INFRA_I2C_BCK>,
+ 				 <&infracfg CLK_INFRA_66M_AP_DMA_BCK>;
+ 			clock-names = "main", "dma";
+@@ -188,6 +190,7 @@ i2c@11005000 {
+ 			reg = <0 0x11005000 0 0x1000>,
+ 			      <0 0x10217180 0 0x80>;
+ 			interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
++			clock-div = <1>;
+ 			clocks = <&infracfg CLK_INFRA_I2C_BCK>,
+ 				 <&infracfg CLK_INFRA_66M_AP_DMA_BCK>;
+ 			clock-names = "main", "dma";
+diff --git a/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
+index eee64461421f83..b5d4b5baf4785f 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
+@@ -931,7 +931,7 @@ pmic: pmic {
+ 		interrupt-controller;
+ 		#interrupt-cells = <2>;
+ 
+-		clock: mt6397clock {
++		clock: clocks {
+ 			compatible = "mediatek,mt6397-clk";
+ 			#clock-cells = <1>;
+ 		};
+@@ -942,11 +942,10 @@ pio6397: pinctrl {
+ 			#gpio-cells = <2>;
+ 		};
+ 
+-		regulator: mt6397regulator {
++		regulators {
+ 			compatible = "mediatek,mt6397-regulator";
+ 
+ 			mt6397_vpca15_reg: buck_vpca15 {
+-				regulator-compatible = "buck_vpca15";
+ 				regulator-name = "vpca15";
+ 				regulator-min-microvolt = < 700000>;
+ 				regulator-max-microvolt = <1350000>;
+@@ -956,7 +955,6 @@ mt6397_vpca15_reg: buck_vpca15 {
+ 			};
+ 
+ 			mt6397_vpca7_reg: buck_vpca7 {
+-				regulator-compatible = "buck_vpca7";
+ 				regulator-name = "vpca7";
+ 				regulator-min-microvolt = < 700000>;
+ 				regulator-max-microvolt = <1350000>;
+@@ -966,7 +964,6 @@ mt6397_vpca7_reg: buck_vpca7 {
+ 			};
+ 
+ 			mt6397_vsramca15_reg: buck_vsramca15 {
+-				regulator-compatible = "buck_vsramca15";
+ 				regulator-name = "vsramca15";
+ 				regulator-min-microvolt = < 700000>;
+ 				regulator-max-microvolt = <1350000>;
+@@ -975,7 +972,6 @@ mt6397_vsramca15_reg: buck_vsramca15 {
+ 			};
+ 
+ 			mt6397_vsramca7_reg: buck_vsramca7 {
+-				regulator-compatible = "buck_vsramca7";
+ 				regulator-name = "vsramca7";
+ 				regulator-min-microvolt = < 700000>;
+ 				regulator-max-microvolt = <1350000>;
+@@ -984,7 +980,6 @@ mt6397_vsramca7_reg: buck_vsramca7 {
+ 			};
+ 
+ 			mt6397_vcore_reg: buck_vcore {
+-				regulator-compatible = "buck_vcore";
+ 				regulator-name = "vcore";
+ 				regulator-min-microvolt = < 700000>;
+ 				regulator-max-microvolt = <1350000>;
+@@ -993,7 +988,6 @@ mt6397_vcore_reg: buck_vcore {
+ 			};
+ 
+ 			mt6397_vgpu_reg: buck_vgpu {
+-				regulator-compatible = "buck_vgpu";
+ 				regulator-name = "vgpu";
+ 				regulator-min-microvolt = < 700000>;
+ 				regulator-max-microvolt = <1350000>;
+@@ -1002,7 +996,6 @@ mt6397_vgpu_reg: buck_vgpu {
+ 			};
+ 
+ 			mt6397_vdrm_reg: buck_vdrm {
+-				regulator-compatible = "buck_vdrm";
+ 				regulator-name = "vdrm";
+ 				regulator-min-microvolt = <1200000>;
+ 				regulator-max-microvolt = <1400000>;
+@@ -1011,7 +1004,6 @@ mt6397_vdrm_reg: buck_vdrm {
+ 			};
+ 
+ 			mt6397_vio18_reg: buck_vio18 {
+-				regulator-compatible = "buck_vio18";
+ 				regulator-name = "vio18";
+ 				regulator-min-microvolt = <1620000>;
+ 				regulator-max-microvolt = <1980000>;
+@@ -1020,18 +1012,15 @@ mt6397_vio18_reg: buck_vio18 {
+ 			};
+ 
+ 			mt6397_vtcxo_reg: ldo_vtcxo {
+-				regulator-compatible = "ldo_vtcxo";
+ 				regulator-name = "vtcxo";
+ 				regulator-always-on;
+ 			};
+ 
+ 			mt6397_va28_reg: ldo_va28 {
+-				regulator-compatible = "ldo_va28";
+ 				regulator-name = "va28";
+ 			};
+ 
+ 			mt6397_vcama_reg: ldo_vcama {
+-				regulator-compatible = "ldo_vcama";
+ 				regulator-name = "vcama";
+ 				regulator-min-microvolt = <1800000>;
+ 				regulator-max-microvolt = <1800000>;
+@@ -1039,18 +1028,15 @@ mt6397_vcama_reg: ldo_vcama {
+ 			};
+ 
+ 			mt6397_vio28_reg: ldo_vio28 {
+-				regulator-compatible = "ldo_vio28";
+ 				regulator-name = "vio28";
+ 				regulator-always-on;
+ 			};
+ 
+ 			mt6397_vusb_reg: ldo_vusb {
+-				regulator-compatible = "ldo_vusb";
+ 				regulator-name = "vusb";
+ 			};
+ 
+ 			mt6397_vmc_reg: ldo_vmc {
+-				regulator-compatible = "ldo_vmc";
+ 				regulator-name = "vmc";
+ 				regulator-min-microvolt = <1800000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -1058,7 +1044,6 @@ mt6397_vmc_reg: ldo_vmc {
+ 			};
+ 
+ 			mt6397_vmch_reg: ldo_vmch {
+-				regulator-compatible = "ldo_vmch";
+ 				regulator-name = "vmch";
+ 				regulator-min-microvolt = <3000000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -1066,7 +1051,6 @@ mt6397_vmch_reg: ldo_vmch {
+ 			};
+ 
+ 			mt6397_vemc_3v3_reg: ldo_vemc3v3 {
+-				regulator-compatible = "ldo_vemc3v3";
+ 				regulator-name = "vemc_3v3";
+ 				regulator-min-microvolt = <3000000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -1074,7 +1058,6 @@ mt6397_vemc_3v3_reg: ldo_vemc3v3 {
+ 			};
+ 
+ 			mt6397_vgp1_reg: ldo_vgp1 {
+-				regulator-compatible = "ldo_vgp1";
+ 				regulator-name = "vcamd";
+ 				regulator-min-microvolt = <1800000>;
+ 				regulator-max-microvolt = <1800000>;
+@@ -1082,7 +1065,6 @@ mt6397_vgp1_reg: ldo_vgp1 {
+ 			};
+ 
+ 			mt6397_vgp2_reg: ldo_vgp2 {
+-				regulator-compatible = "ldo_vgp2";
+ 				regulator-name = "vcamio";
+ 				regulator-min-microvolt = <3300000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -1090,7 +1072,6 @@ mt6397_vgp2_reg: ldo_vgp2 {
+ 			};
+ 
+ 			mt6397_vgp3_reg: ldo_vgp3 {
+-				regulator-compatible = "ldo_vgp3";
+ 				regulator-name = "vcamaf";
+ 				regulator-min-microvolt = <1800000>;
+ 				regulator-max-microvolt = <1800000>;
+@@ -1098,7 +1079,6 @@ mt6397_vgp3_reg: ldo_vgp3 {
+ 			};
+ 
+ 			mt6397_vgp4_reg: ldo_vgp4 {
+-				regulator-compatible = "ldo_vgp4";
+ 				regulator-name = "vgp4";
+ 				regulator-min-microvolt = <1200000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -1106,7 +1086,6 @@ mt6397_vgp4_reg: ldo_vgp4 {
+ 			};
+ 
+ 			mt6397_vgp5_reg: ldo_vgp5 {
+-				regulator-compatible = "ldo_vgp5";
+ 				regulator-name = "vgp5";
+ 				regulator-min-microvolt = <1200000>;
+ 				regulator-max-microvolt = <3000000>;
+@@ -1114,7 +1093,6 @@ mt6397_vgp5_reg: ldo_vgp5 {
+ 			};
+ 
+ 			mt6397_vgp6_reg: ldo_vgp6 {
+-				regulator-compatible = "ldo_vgp6";
+ 				regulator-name = "vgp6";
+ 				regulator-min-microvolt = <3300000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -1123,7 +1101,6 @@ mt6397_vgp6_reg: ldo_vgp6 {
+ 			};
+ 
+ 			mt6397_vibr_reg: ldo_vibr {
+-				regulator-compatible = "ldo_vibr";
+ 				regulator-name = "vibr";
+ 				regulator-min-microvolt = <1300000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -1131,7 +1108,7 @@ mt6397_vibr_reg: ldo_vibr {
+ 			};
+ 		};
+ 
+-		rtc: mt6397rtc {
++		rtc: rtc {
+ 			compatible = "mediatek,mt6397-rtc";
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+index bb4671c18e3bd4..9fffed0ef4bff4 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+@@ -307,11 +307,10 @@ pmic: pmic {
+ 		interrupt-controller;
+ 		#interrupt-cells = <2>;
+ 
+-		mt6397regulator: mt6397regulator {
++		regulators {
+ 			compatible = "mediatek,mt6397-regulator";
+ 
+ 			mt6397_vpca15_reg: buck_vpca15 {
+-				regulator-compatible = "buck_vpca15";
+ 				regulator-name = "vpca15";
+ 				regulator-min-microvolt = < 700000>;
+ 				regulator-max-microvolt = <1350000>;
+@@ -320,7 +319,6 @@ mt6397_vpca15_reg: buck_vpca15 {
+ 			};
+ 
+ 			mt6397_vpca7_reg: buck_vpca7 {
+-				regulator-compatible = "buck_vpca7";
+ 				regulator-name = "vpca7";
+ 				regulator-min-microvolt = < 700000>;
+ 				regulator-max-microvolt = <1350000>;
+@@ -329,7 +327,6 @@ mt6397_vpca7_reg: buck_vpca7 {
+ 			};
+ 
+ 			mt6397_vsramca15_reg: buck_vsramca15 {
+-				regulator-compatible = "buck_vsramca15";
+ 				regulator-name = "vsramca15";
+ 				regulator-min-microvolt = < 700000>;
+ 				regulator-max-microvolt = <1350000>;
+@@ -338,7 +335,6 @@ mt6397_vsramca15_reg: buck_vsramca15 {
+ 			};
+ 
+ 			mt6397_vsramca7_reg: buck_vsramca7 {
+-				regulator-compatible = "buck_vsramca7";
+ 				regulator-name = "vsramca7";
+ 				regulator-min-microvolt = < 700000>;
+ 				regulator-max-microvolt = <1350000>;
+@@ -347,7 +343,6 @@ mt6397_vsramca7_reg: buck_vsramca7 {
+ 			};
+ 
+ 			mt6397_vcore_reg: buck_vcore {
+-				regulator-compatible = "buck_vcore";
+ 				regulator-name = "vcore";
+ 				regulator-min-microvolt = < 700000>;
+ 				regulator-max-microvolt = <1350000>;
+@@ -356,7 +351,6 @@ mt6397_vcore_reg: buck_vcore {
+ 			};
+ 
+ 			mt6397_vgpu_reg: buck_vgpu {
+-				regulator-compatible = "buck_vgpu";
+ 				regulator-name = "vgpu";
+ 				regulator-min-microvolt = < 700000>;
+ 				regulator-max-microvolt = <1350000>;
+@@ -365,7 +359,6 @@ mt6397_vgpu_reg: buck_vgpu {
+ 			};
+ 
+ 			mt6397_vdrm_reg: buck_vdrm {
+-				regulator-compatible = "buck_vdrm";
+ 				regulator-name = "vdrm";
+ 				regulator-min-microvolt = <1200000>;
+ 				regulator-max-microvolt = <1400000>;
+@@ -374,7 +367,6 @@ mt6397_vdrm_reg: buck_vdrm {
+ 			};
+ 
+ 			mt6397_vio18_reg: buck_vio18 {
+-				regulator-compatible = "buck_vio18";
+ 				regulator-name = "vio18";
+ 				regulator-min-microvolt = <1620000>;
+ 				regulator-max-microvolt = <1980000>;
+@@ -383,19 +375,16 @@ mt6397_vio18_reg: buck_vio18 {
+ 			};
+ 
+ 			mt6397_vtcxo_reg: ldo_vtcxo {
+-				regulator-compatible = "ldo_vtcxo";
+ 				regulator-name = "vtcxo";
+ 				regulator-always-on;
+ 			};
+ 
+ 			mt6397_va28_reg: ldo_va28 {
+-				regulator-compatible = "ldo_va28";
+ 				regulator-name = "va28";
+ 				regulator-always-on;
+ 			};
+ 
+ 			mt6397_vcama_reg: ldo_vcama {
+-				regulator-compatible = "ldo_vcama";
+ 				regulator-name = "vcama";
+ 				regulator-min-microvolt = <1500000>;
+ 				regulator-max-microvolt = <2800000>;
+@@ -403,18 +392,15 @@ mt6397_vcama_reg: ldo_vcama {
+ 			};
+ 
+ 			mt6397_vio28_reg: ldo_vio28 {
+-				regulator-compatible = "ldo_vio28";
+ 				regulator-name = "vio28";
+ 				regulator-always-on;
+ 			};
+ 
+ 			mt6397_vusb_reg: ldo_vusb {
+-				regulator-compatible = "ldo_vusb";
+ 				regulator-name = "vusb";
+ 			};
+ 
+ 			mt6397_vmc_reg: ldo_vmc {
+-				regulator-compatible = "ldo_vmc";
+ 				regulator-name = "vmc";
+ 				regulator-min-microvolt = <1800000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -422,7 +408,6 @@ mt6397_vmc_reg: ldo_vmc {
+ 			};
+ 
+ 			mt6397_vmch_reg: ldo_vmch {
+-				regulator-compatible = "ldo_vmch";
+ 				regulator-name = "vmch";
+ 				regulator-min-microvolt = <3000000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -430,7 +415,6 @@ mt6397_vmch_reg: ldo_vmch {
+ 			};
+ 
+ 			mt6397_vemc_3v3_reg: ldo_vemc3v3 {
+-				regulator-compatible = "ldo_vemc3v3";
+ 				regulator-name = "vemc_3v3";
+ 				regulator-min-microvolt = <3000000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -438,7 +422,6 @@ mt6397_vemc_3v3_reg: ldo_vemc3v3 {
+ 			};
+ 
+ 			mt6397_vgp1_reg: ldo_vgp1 {
+-				regulator-compatible = "ldo_vgp1";
+ 				regulator-name = "vcamd";
+ 				regulator-min-microvolt = <1220000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -446,7 +429,6 @@ mt6397_vgp1_reg: ldo_vgp1 {
+ 			};
+ 
+ 			mt6397_vgp2_reg: ldo_vgp2 {
+-				regulator-compatible = "ldo_vgp2";
+ 				regulator-name = "vcamio";
+ 				regulator-min-microvolt = <1000000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -454,7 +436,6 @@ mt6397_vgp2_reg: ldo_vgp2 {
+ 			};
+ 
+ 			mt6397_vgp3_reg: ldo_vgp3 {
+-				regulator-compatible = "ldo_vgp3";
+ 				regulator-name = "vcamaf";
+ 				regulator-min-microvolt = <1200000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -462,7 +443,6 @@ mt6397_vgp3_reg: ldo_vgp3 {
+ 			};
+ 
+ 			mt6397_vgp4_reg: ldo_vgp4 {
+-				regulator-compatible = "ldo_vgp4";
+ 				regulator-name = "vgp4";
+ 				regulator-min-microvolt = <1200000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -470,7 +450,6 @@ mt6397_vgp4_reg: ldo_vgp4 {
+ 			};
+ 
+ 			mt6397_vgp5_reg: ldo_vgp5 {
+-				regulator-compatible = "ldo_vgp5";
+ 				regulator-name = "vgp5";
+ 				regulator-min-microvolt = <1200000>;
+ 				regulator-max-microvolt = <3000000>;
+@@ -478,7 +457,6 @@ mt6397_vgp5_reg: ldo_vgp5 {
+ 			};
+ 
+ 			mt6397_vgp6_reg: ldo_vgp6 {
+-				regulator-compatible = "ldo_vgp6";
+ 				regulator-name = "vgp6";
+ 				regulator-min-microvolt = <1200000>;
+ 				regulator-max-microvolt = <3300000>;
+@@ -486,7 +464,6 @@ mt6397_vgp6_reg: ldo_vgp6 {
+ 			};
+ 
+ 			mt6397_vibr_reg: ldo_vibr {
+-				regulator-compatible = "ldo_vibr";
+ 				regulator-name = "vibr";
+ 				regulator-min-microvolt = <1300000>;
+ 				regulator-max-microvolt = <3300000>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts
+index 65860b33c01fe8..3935d83a047e08 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts
+@@ -26,6 +26,10 @@ &touchscreen {
+ 	hid-descr-addr = <0x0001>;
+ };
+ 
++&mt6358codec {
++	mediatek,dmic-mode = <1>; /* one-wire */
++};
++
+ &qca_wifi {
+ 	qcom,ath10k-calibration-variant = "GO_DAMU";
+ };
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kenzo.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kenzo.dts
+index e8241587949b2b..561770fcf69e66 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kenzo.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kenzo.dts
+@@ -12,3 +12,18 @@ / {
+ 	chassis-type = "laptop";
+ 	compatible = "google,juniper-sku17", "google,juniper", "mediatek,mt8183";
+ };
++
++&i2c0 {
++	touchscreen@40 {
++		compatible = "hid-over-i2c";
++		reg = <0x40>;
++
++		pinctrl-names = "default";
++		pinctrl-0 = <&touchscreen_pins>;
++
++		interrupts-extended = <&pio 155 IRQ_TYPE_LEVEL_LOW>;
++
++		post-power-on-delay-ms = <70>;
++		hid-descr-addr = <0x0001>;
++	};
++};
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow.dtsi
+index 76d33540166f90..c942e461a177ef 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow.dtsi
+@@ -6,6 +6,21 @@
+ /dts-v1/;
+ #include "mt8183-kukui-jacuzzi.dtsi"
+ 
++&i2c0 {
++	touchscreen@40 {
++		compatible = "hid-over-i2c";
++		reg = <0x40>;
++
++		pinctrl-names = "default";
++		pinctrl-0 = <&touchscreen_pins>;
++
++		interrupts-extended = <&pio 155 IRQ_TYPE_LEVEL_LOW>;
++
++		post-power-on-delay-ms = <70>;
++		hid-descr-addr = <0x0001>;
++	};
++};
++
+ &i2c2 {
+ 	trackpad@2c {
+ 		compatible = "hid-over-i2c";
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
+index 49e053b932e76c..80888bd4ad823d 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
+@@ -39,8 +39,6 @@ pp1800_mipibrdg: pp1800-mipibrdg {
+ 	pp3300_panel: pp3300-panel {
+ 		compatible = "regulator-fixed";
+ 		regulator-name = "pp3300_panel";
+-		regulator-min-microvolt = <3300000>;
+-		regulator-max-microvolt = <3300000>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pp3300_panel_pins>;
+ 
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+index 1afeeb1155f578..9af6349dbfcf10 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+@@ -1024,7 +1024,8 @@ pwrap: pwrap@1000d000 {
+ 		};
+ 
+ 		keyboard: keyboard@10010000 {
+-			compatible = "mediatek,mt6779-keypad";
++			compatible = "mediatek,mt8183-keypad",
++				     "mediatek,mt6779-keypad";
+ 			reg = <0 0x10010000 0 0x1000>;
+ 			interrupts = <GIC_SPI 186 IRQ_TYPE_EDGE_FALLING>;
+ 			clocks = <&clk26m>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8186.dtsi b/arch/arm64/boot/dts/mediatek/mt8186.dtsi
+index d3c3c2a40adcdf..b91f88ffae0e8b 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8186.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8186.dtsi
+@@ -1577,6 +1577,8 @@ ssusb0: usb@11201000 {
+ 			#address-cells = <2>;
+ 			#size-cells = <2>;
+ 			ranges;
++			wakeup-source;
++			mediatek,syscon-wakeup = <&pericfg 0x420 2>;
+ 			status = "disabled";
+ 
+ 			usb_host0: usb@11200000 {
+@@ -1590,8 +1592,6 @@ usb_host0: usb@11200000 {
+ 					 <&infracfg_ao CLK_INFRA_AO_SSUSB_TOP_XHCI>;
+ 				clock-names = "sys_ck", "ref_ck", "mcu_ck", "dma_ck", "xhci_ck";
+ 				interrupts = <GIC_SPI 294 IRQ_TYPE_LEVEL_HIGH 0>;
+-				mediatek,syscon-wakeup = <&pericfg 0x420 2>;
+-				wakeup-source;
+ 				status = "disabled";
+ 			};
+ 		};
+@@ -1643,6 +1643,8 @@ ssusb1: usb@11281000 {
+ 			#address-cells = <2>;
+ 			#size-cells = <2>;
+ 			ranges;
++			wakeup-source;
++			mediatek,syscon-wakeup = <&pericfg 0x424 2>;
+ 			status = "disabled";
+ 
+ 			usb_host1: usb@11280000 {
+@@ -1656,8 +1658,6 @@ usb_host1: usb@11280000 {
+ 					 <&infracfg_ao CLK_INFRA_AO_SSUSB_TOP_P1_XHCI>;
+ 				clock-names = "sys_ck", "ref_ck", "mcu_ck", "dma_ck","xhci_ck";
+ 				interrupts = <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH 0>;
+-				mediatek,syscon-wakeup = <&pericfg 0x424 2>;
+-				wakeup-source;
+ 				status = "disabled";
+ 			};
+ 		};
+diff --git a/arch/arm64/boot/dts/mediatek/mt8188.dtsi b/arch/arm64/boot/dts/mediatek/mt8188.dtsi
+index faccc7f16259a4..23ec3ff6cad9b8 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8188.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8188.dtsi
+@@ -2488,7 +2488,7 @@ jpeg_decoder: jpeg-decoder@1a040000 {
+ 		};
+ 
+ 		ovl0: ovl@1c000000 {
+-			compatible = "mediatek,mt8188-disp-ovl", "mediatek,mt8183-disp-ovl";
++			compatible = "mediatek,mt8188-disp-ovl", "mediatek,mt8195-disp-ovl";
+ 			reg = <0 0x1c000000 0 0x1000>;
+ 			clocks = <&vdosys0 CLK_VDO0_DISP_OVL0>;
+ 			interrupts = <GIC_SPI 636 IRQ_TYPE_LEVEL_HIGH 0>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
+index 8dda8b63765bad..dd0d07fbe61a84 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
+@@ -1418,7 +1418,6 @@ mt6315_6: pmic@6 {
+ 
+ 		regulators {
+ 			mt6315_6_vbuck1: vbuck1 {
+-				regulator-compatible = "vbuck1";
+ 				regulator-name = "Vbcpu";
+ 				regulator-min-microvolt = <400000>;
+ 				regulator-max-microvolt = <1193750>;
+@@ -1428,7 +1427,6 @@ mt6315_6_vbuck1: vbuck1 {
+ 			};
+ 
+ 			mt6315_6_vbuck3: vbuck3 {
+-				regulator-compatible = "vbuck3";
+ 				regulator-name = "Vlcpu";
+ 				regulator-min-microvolt = <400000>;
+ 				regulator-max-microvolt = <1193750>;
+@@ -1445,7 +1443,6 @@ mt6315_7: pmic@7 {
+ 
+ 		regulators {
+ 			mt6315_7_vbuck1: vbuck1 {
+-				regulator-compatible = "vbuck1";
+ 				regulator-name = "Vgpu";
+ 				regulator-min-microvolt = <400000>;
+ 				regulator-max-microvolt = <800000>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
+index 2c7b2223ee76b1..5056e07399e23a 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
+@@ -1285,7 +1285,6 @@ mt6315@6 {
+ 
+ 		regulators {
+ 			mt6315_6_vbuck1: vbuck1 {
+-				regulator-compatible = "vbuck1";
+ 				regulator-name = "Vbcpu";
+ 				regulator-min-microvolt = <400000>;
+ 				regulator-max-microvolt = <1193750>;
+@@ -1303,7 +1302,6 @@ mt6315@7 {
+ 
+ 		regulators {
+ 			mt6315_7_vbuck1: vbuck1 {
+-				regulator-compatible = "vbuck1";
+ 				regulator-name = "Vgpu";
+ 				regulator-min-microvolt = <400000>;
+ 				regulator-max-microvolt = <1193750>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
+index 31d424b8fc7ced..bfb75296795c39 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
+@@ -137,7 +137,6 @@ charger {
+ 			richtek,vinovp-microvolt = <14500000>;
+ 
+ 			otg_vbus_regulator: usb-otg-vbus-regulator {
+-				regulator-compatible = "usb-otg-vbus";
+ 				regulator-name = "usb-otg-vbus";
+ 				regulator-min-microvolt = <4425000>;
+ 				regulator-max-microvolt = <5825000>;
+@@ -149,7 +148,6 @@ regulator {
+ 			LDO_VIN3-supply = <&mt6360_buck2>;
+ 
+ 			mt6360_buck1: buck1 {
+-				regulator-compatible = "BUCK1";
+ 				regulator-name = "mt6360,buck1";
+ 				regulator-min-microvolt = <300000>;
+ 				regulator-max-microvolt = <1300000>;
+@@ -160,7 +158,6 @@ MT6360_OPMODE_LP
+ 			};
+ 
+ 			mt6360_buck2: buck2 {
+-				regulator-compatible = "BUCK2";
+ 				regulator-name = "mt6360,buck2";
+ 				regulator-min-microvolt = <300000>;
+ 				regulator-max-microvolt = <1300000>;
+@@ -171,7 +168,6 @@ MT6360_OPMODE_LP
+ 			};
+ 
+ 			mt6360_ldo1: ldo1 {
+-				regulator-compatible = "LDO1";
+ 				regulator-name = "mt6360,ldo1";
+ 				regulator-min-microvolt = <1200000>;
+ 				regulator-max-microvolt = <3600000>;
+@@ -180,7 +176,6 @@ mt6360_ldo1: ldo1 {
+ 			};
+ 
+ 			mt6360_ldo2: ldo2 {
+-				regulator-compatible = "LDO2";
+ 				regulator-name = "mt6360,ldo2";
+ 				regulator-min-microvolt = <1200000>;
+ 				regulator-max-microvolt = <3600000>;
+@@ -189,7 +184,6 @@ mt6360_ldo2: ldo2 {
+ 			};
+ 
+ 			mt6360_ldo3: ldo3 {
+-				regulator-compatible = "LDO3";
+ 				regulator-name = "mt6360,ldo3";
+ 				regulator-min-microvolt = <1200000>;
+ 				regulator-max-microvolt = <3600000>;
+@@ -198,7 +192,6 @@ mt6360_ldo3: ldo3 {
+ 			};
+ 
+ 			mt6360_ldo5: ldo5 {
+-				regulator-compatible = "LDO5";
+ 				regulator-name = "mt6360,ldo5";
+ 				regulator-min-microvolt = <2700000>;
+ 				regulator-max-microvolt = <3600000>;
+@@ -207,7 +200,6 @@ mt6360_ldo5: ldo5 {
+ 			};
+ 
+ 			mt6360_ldo6: ldo6 {
+-				regulator-compatible = "LDO6";
+ 				regulator-name = "mt6360,ldo6";
+ 				regulator-min-microvolt = <500000>;
+ 				regulator-max-microvolt = <2100000>;
+@@ -216,7 +208,6 @@ mt6360_ldo6: ldo6 {
+ 			};
+ 
+ 			mt6360_ldo7: ldo7 {
+-				regulator-compatible = "LDO7";
+ 				regulator-name = "mt6360,ldo7";
+ 				regulator-min-microvolt = <500000>;
+ 				regulator-max-microvolt = <2100000>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+index ade685ed2190b7..f013dbad9dc4ea 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+@@ -1611,9 +1611,6 @@ pcie1: pcie@112f8000 {
+ 			phy-names = "pcie-phy";
+ 			power-domains = <&spm MT8195_POWER_DOMAIN_PCIE_MAC_P1>;
+ 
+-			resets = <&infracfg_ao MT8195_INFRA_RST2_PCIE_P1_SWRST>;
+-			reset-names = "mac";
+-
+ 			#interrupt-cells = <1>;
+ 			interrupt-map-mask = <0 0 0 7>;
+ 			interrupt-map = <0 0 0 1 &pcie_intc1 0>,
+@@ -3138,7 +3135,7 @@ larb20: larb@1b010000 {
+ 		};
+ 
+ 		ovl0: ovl@1c000000 {
+-			compatible = "mediatek,mt8195-disp-ovl", "mediatek,mt8183-disp-ovl";
++			compatible = "mediatek,mt8195-disp-ovl";
+ 			reg = <0 0x1c000000 0 0x1000>;
+ 			interrupts = <GIC_SPI 636 IRQ_TYPE_LEVEL_HIGH 0>;
+ 			power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS0>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8365.dtsi b/arch/arm64/boot/dts/mediatek/mt8365.dtsi
+index 9c91fe8ea0f969..2bf8c9d02b6ee7 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8365.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8365.dtsi
+@@ -449,7 +449,8 @@ pwrap: pwrap@1000d000 {
+ 		};
+ 
+ 		keypad: keypad@10010000 {
+-			compatible = "mediatek,mt6779-keypad";
++			compatible = "mediatek,mt8365-keypad",
++				     "mediatek,mt6779-keypad";
+ 			reg = <0 0x10010000 0 0x1000>;
+ 			wakeup-source;
+ 			interrupts = <GIC_SPI 124 IRQ_TYPE_EDGE_FALLING>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts b/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts
+index 5f16fb82058056..5950194c9ccb25 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts
+@@ -835,7 +835,6 @@ mt6315_6: pmic@6 {
+ 
+ 		regulators {
+ 			mt6315_6_vbuck1: vbuck1 {
+-				regulator-compatible = "vbuck1";
+ 				regulator-name = "Vbcpu";
+ 				regulator-min-microvolt = <300000>;
+ 				regulator-max-microvolt = <1193750>;
+@@ -852,7 +851,6 @@ mt6315_7: pmic@7 {
+ 
+ 		regulators {
+ 			mt6315_7_vbuck1: vbuck1 {
+-				regulator-compatible = "vbuck1";
+ 				regulator-name = "Vgpu";
+ 				regulator-min-microvolt = <546000>;
+ 				regulator-max-microvolt = <787000>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts b/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts
+index 14ec970c4e491f..41dc34837b02e7 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts
+@@ -812,7 +812,6 @@ mt6315_6: pmic@6 {
+ 
+ 		regulators {
+ 			mt6315_6_vbuck1: vbuck1 {
+-				regulator-compatible = "vbuck1";
+ 				regulator-name = "Vbcpu";
+ 				regulator-min-microvolt = <300000>;
+ 				regulator-max-microvolt = <1193750>;
+@@ -829,7 +828,6 @@ mt6315_7: pmic@7 {
+ 
+ 		regulators {
+ 			mt6315_7_vbuck1: vbuck1 {
+-				regulator-compatible = "vbuck1";
+ 				regulator-name = "Vgpu";
+ 				regulator-min-microvolt = <300000>;
+ 				regulator-max-microvolt = <1193750>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8516.dtsi b/arch/arm64/boot/dts/mediatek/mt8516.dtsi
+index d0b03dc4d3f43a..e30623ebac0e1b 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8516.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8516.dtsi
+@@ -144,10 +144,10 @@ reserved-memory {
+ 		#size-cells = <2>;
+ 		ranges;
+ 
+-		/* 128 KiB reserved for ARM Trusted Firmware (BL31) */
++		/* 192 KiB reserved for ARM Trusted Firmware (BL31) */
+ 		bl31_secmon_reserved: secmon@43000000 {
+ 			no-map;
+-			reg = <0 0x43000000 0 0x20000>;
++			reg = <0 0x43000000 0 0x30000>;
+ 		};
+ 	};
+ 
+@@ -206,7 +206,7 @@ watchdog@10007000 {
+ 			compatible = "mediatek,mt8516-wdt",
+ 				     "mediatek,mt6589-wdt";
+ 			reg = <0 0x10007000 0 0x1000>;
+-			interrupts = <GIC_SPI 198 IRQ_TYPE_EDGE_FALLING>;
++			interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_LOW>;
+ 			#reset-cells = <1>;
+ 		};
+ 
+@@ -268,7 +268,7 @@ gic: interrupt-controller@10310000 {
+ 			interrupt-parent = <&gic>;
+ 			interrupt-controller;
+ 			reg = <0 0x10310000 0 0x1000>,
+-			      <0 0x10320000 0 0x1000>,
++			      <0 0x1032f000 0 0x2000>,
+ 			      <0 0x10340000 0 0x2000>,
+ 			      <0 0x10360000 0 0x2000>;
+ 			interrupts = <GIC_PPI 9
+@@ -344,6 +344,7 @@ i2c0: i2c@11009000 {
+ 			reg = <0 0x11009000 0 0x90>,
+ 			      <0 0x11000180 0 0x80>;
+ 			interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_LOW>;
++			clock-div = <2>;
+ 			clocks = <&topckgen CLK_TOP_I2C0>,
+ 				 <&topckgen CLK_TOP_APDMA>;
+ 			clock-names = "main", "dma";
+@@ -358,6 +359,7 @@ i2c1: i2c@1100a000 {
+ 			reg = <0 0x1100a000 0 0x90>,
+ 			      <0 0x11000200 0 0x80>;
+ 			interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_LOW>;
++			clock-div = <2>;
+ 			clocks = <&topckgen CLK_TOP_I2C1>,
+ 				 <&topckgen CLK_TOP_APDMA>;
+ 			clock-names = "main", "dma";
+@@ -372,6 +374,7 @@ i2c2: i2c@1100b000 {
+ 			reg = <0 0x1100b000 0 0x90>,
+ 			      <0 0x11000280 0 0x80>;
+ 			interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_LOW>;
++			clock-div = <2>;
+ 			clocks = <&topckgen CLK_TOP_I2C2>,
+ 				 <&topckgen CLK_TOP_APDMA>;
+ 			clock-names = "main", "dma";
+diff --git a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
+index ec8dfb3d1c6d69..a356db5fcc5f3c 100644
+--- a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
++++ b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
+@@ -47,7 +47,6 @@ key-volume-down {
+ };
+ 
+ &i2c0 {
+-	clock-div = <2>;
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&i2c0_pins_a>;
+ 	status = "okay";
+@@ -156,7 +155,6 @@ cam-pwdn-hog {
+ };
+ 
+ &i2c2 {
+-	clock-div = <2>;
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&i2c2_pins_a>;
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/nvidia/tegra234.dtsi b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+index 984c85eab41afd..570331baa09ee3 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra234.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+@@ -3900,7 +3900,7 @@ spi@c260000 {
+ 			assigned-clock-parents = <&bpmp TEGRA234_CLK_PLLP_OUT0>;
+ 			resets = <&bpmp TEGRA234_RESET_SPI2>;
+ 			reset-names = "spi";
+-			dmas = <&gpcdma 19>, <&gpcdma 19>;
++			dmas = <&gpcdma 16>, <&gpcdma 16>;
+ 			dma-names = "rx", "tx";
+ 			dma-coherent;
+ 			status = "disabled";
+diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+index 5e558bcc9d8789..8f35c9af18782a 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+@@ -125,7 +125,7 @@ xo_board: xo-board {
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+ 			#clock-cells = <0>;
+-			clock-frequency = <32768>;
++			clock-frequency = <32764>;
+ 		};
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/msm8939.dtsi b/arch/arm64/boot/dts/qcom/msm8939.dtsi
+index 7a6f1eeaa3fc43..7cd5660de1b33d 100644
+--- a/arch/arm64/boot/dts/qcom/msm8939.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8939.dtsi
+@@ -34,7 +34,7 @@ xo_board: xo-board {
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+ 			#clock-cells = <0>;
+-			clock-frequency = <32768>;
++			clock-frequency = <32764>;
+ 		};
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/msm8994.dtsi b/arch/arm64/boot/dts/qcom/msm8994.dtsi
+index 1acb0f15951199..b5cbdd620bb9e7 100644
+--- a/arch/arm64/boot/dts/qcom/msm8994.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8994.dtsi
+@@ -34,7 +34,7 @@ xo_board: xo-board {
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+ 			#clock-cells = <0>;
+-			clock-frequency = <32768>;
++			clock-frequency = <32764>;
+ 			clock-output-names = "sleep_clk";
+ 		};
+ 	};
+@@ -437,6 +437,15 @@ usb3: usb@f92f8800 {
+ 			#size-cells = <1>;
+ 			ranges;
+ 
++			interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>;
++			interrupt-names = "pwr_event",
++					  "qusb2_phy",
++					  "hs_phy_irq",
++					  "ss_phy_irq";
++
+ 			clocks = <&gcc GCC_USB30_MASTER_CLK>,
+ 				 <&gcc GCC_SYS_NOC_USB3_AXI_CLK>,
+ 				 <&gcc GCC_USB30_SLEEP_CLK>,
+diff --git a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts
+index f8e9d90afab000..dbad8f57f2fa34 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts
++++ b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts
+@@ -64,7 +64,7 @@ led@0 {
+ 		};
+ 
+ 		led@1 {
+-			reg = <0>;
++			reg = <1>;
+ 			chan-name = "button-backlight1";
+ 			led-cur = /bits/ 8 <0x32>;
+ 			max-cur = /bits/ 8 <0xc8>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+index b379623c1b8a08..4719e1fc70d2cb 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+@@ -3065,9 +3065,14 @@ usb3: usb@6af8800 {
+ 			#size-cells = <1>;
+ 			ranges;
+ 
+-			interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>,
++			interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 243 IRQ_TYPE_LEVEL_HIGH>;
+-			interrupt-names = "hs_phy_irq", "ss_phy_irq";
++			interrupt-names = "pwr_event",
++					  "qusb2_phy",
++					  "hs_phy_irq",
++					  "ss_phy_irq";
+ 
+ 			clocks = <&gcc GCC_SYS_NOC_USB3_AXI_CLK>,
+ 				 <&gcc GCC_USB30_MASTER_CLK>,
+diff --git a/arch/arm64/boot/dts/qcom/qcm6490-shift-otter.dts b/arch/arm64/boot/dts/qcom/qcm6490-shift-otter.dts
+index 4667e47a74bc5b..75930f95769663 100644
+--- a/arch/arm64/boot/dts/qcom/qcm6490-shift-otter.dts
++++ b/arch/arm64/boot/dts/qcom/qcm6490-shift-otter.dts
+@@ -942,8 +942,6 @@ &usb_1_hsphy {
+ 
+ 	qcom,squelch-detector-bp = <(-2090)>;
+ 
+-	orientation-switch;
+-
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/qcom/qcs404.dtsi b/arch/arm64/boot/dts/qcom/qcs404.dtsi
+index 215ba146207afd..2862474f33b0ea 100644
+--- a/arch/arm64/boot/dts/qcom/qcs404.dtsi
++++ b/arch/arm64/boot/dts/qcom/qcs404.dtsi
+@@ -28,7 +28,7 @@ xo_board: xo-board {
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+ 			#clock-cells = <0>;
+-			clock-frequency = <32768>;
++			clock-frequency = <32764>;
+ 		};
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/qcs8550-aim300.dtsi b/arch/arm64/boot/dts/qcom/qcs8550-aim300.dtsi
+index f6960e2d466a26..e6ac529e6b7216 100644
+--- a/arch/arm64/boot/dts/qcom/qcs8550-aim300.dtsi
++++ b/arch/arm64/boot/dts/qcom/qcs8550-aim300.dtsi
+@@ -367,7 +367,7 @@ &pm8550b_eusb2_repeater {
+ };
+ 
+ &sleep_clk {
+-	clock-frequency = <32000>;
++	clock-frequency = <32764>;
+ };
+ 
+ &ufs_mem_hc {
+diff --git a/arch/arm64/boot/dts/qcom/qdu1000-idp.dts b/arch/arm64/boot/dts/qcom/qdu1000-idp.dts
+index e65305f8136c88..c73eda75faf820 100644
+--- a/arch/arm64/boot/dts/qcom/qdu1000-idp.dts
++++ b/arch/arm64/boot/dts/qcom/qdu1000-idp.dts
+@@ -31,7 +31,7 @@ xo_board: xo-board-clk {
+ 
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+-			clock-frequency = <32000>;
++			clock-frequency = <32764>;
+ 			#clock-cells = <0>;
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts b/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts
+index a9540e92d3e6fc..d8d4cff7d5abed 100644
+--- a/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts
++++ b/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts
+@@ -545,7 +545,7 @@ can@0 {
+ };
+ 
+ &sleep_clk {
+-	clock-frequency = <32000>;
++	clock-frequency = <32764>;
+ };
+ 
+ &tlmm {
+diff --git a/arch/arm64/boot/dts/qcom/qru1000-idp.dts b/arch/arm64/boot/dts/qcom/qru1000-idp.dts
+index 1c781d9e24cf4d..52ce51e56e2fdc 100644
+--- a/arch/arm64/boot/dts/qcom/qru1000-idp.dts
++++ b/arch/arm64/boot/dts/qcom/qru1000-idp.dts
+@@ -31,7 +31,7 @@ xo_board: xo-board-clk {
+ 
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+-			clock-frequency = <32000>;
++			clock-frequency = <32764>;
+ 			#clock-cells = <0>;
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi b/arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi
+index 3fc62e12368969..db03e04ad9d56f 100644
+--- a/arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi
++++ b/arch/arm64/boot/dts/qcom/sa8775p-ride.dtsi
+@@ -608,7 +608,7 @@ &serdes1 {
+ };
+ 
+ &sleep_clk {
+-	clock-frequency = <32764>;
++	clock-frequency = <32000>;
+ };
+ 
+ &spi16 {
+diff --git a/arch/arm64/boot/dts/qcom/sa8775p.dtsi b/arch/arm64/boot/dts/qcom/sa8775p.dtsi
+index 9da62d7c4d27f4..b4726c0bbb5b2c 100644
+--- a/arch/arm64/boot/dts/qcom/sa8775p.dtsi
++++ b/arch/arm64/boot/dts/qcom/sa8775p.dtsi
+@@ -44,6 +44,8 @@ cpu0: cpu@0 {
+ 			compatible = "qcom,kryo";
+ 			reg = <0x0 0x0>;
+ 			enable-method = "psci";
++			power-domains = <&cpu_pd0>;
++			power-domain-names = "psci";
+ 			qcom,freq-domain = <&cpufreq_hw 0>;
+ 			next-level-cache = <&l2_0>;
+ 			capacity-dmips-mhz = <1024>;
+@@ -66,6 +68,8 @@ cpu1: cpu@100 {
+ 			compatible = "qcom,kryo";
+ 			reg = <0x0 0x100>;
+ 			enable-method = "psci";
++			power-domains = <&cpu_pd1>;
++			power-domain-names = "psci";
+ 			qcom,freq-domain = <&cpufreq_hw 0>;
+ 			next-level-cache = <&l2_1>;
+ 			capacity-dmips-mhz = <1024>;
+@@ -83,6 +87,8 @@ cpu2: cpu@200 {
+ 			compatible = "qcom,kryo";
+ 			reg = <0x0 0x200>;
+ 			enable-method = "psci";
++			power-domains = <&cpu_pd2>;
++			power-domain-names = "psci";
+ 			qcom,freq-domain = <&cpufreq_hw 0>;
+ 			next-level-cache = <&l2_2>;
+ 			capacity-dmips-mhz = <1024>;
+@@ -100,6 +106,8 @@ cpu3: cpu@300 {
+ 			compatible = "qcom,kryo";
+ 			reg = <0x0 0x300>;
+ 			enable-method = "psci";
++			power-domains = <&cpu_pd3>;
++			power-domain-names = "psci";
+ 			qcom,freq-domain = <&cpufreq_hw 0>;
+ 			next-level-cache = <&l2_3>;
+ 			capacity-dmips-mhz = <1024>;
+@@ -117,6 +125,8 @@ cpu4: cpu@10000 {
+ 			compatible = "qcom,kryo";
+ 			reg = <0x0 0x10000>;
+ 			enable-method = "psci";
++			power-domains = <&cpu_pd4>;
++			power-domain-names = "psci";
+ 			qcom,freq-domain = <&cpufreq_hw 1>;
+ 			next-level-cache = <&l2_4>;
+ 			capacity-dmips-mhz = <1024>;
+@@ -140,6 +150,8 @@ cpu5: cpu@10100 {
+ 			compatible = "qcom,kryo";
+ 			reg = <0x0 0x10100>;
+ 			enable-method = "psci";
++			power-domains = <&cpu_pd5>;
++			power-domain-names = "psci";
+ 			qcom,freq-domain = <&cpufreq_hw 1>;
+ 			next-level-cache = <&l2_5>;
+ 			capacity-dmips-mhz = <1024>;
+@@ -157,6 +169,8 @@ cpu6: cpu@10200 {
+ 			compatible = "qcom,kryo";
+ 			reg = <0x0 0x10200>;
+ 			enable-method = "psci";
++			power-domains = <&cpu_pd6>;
++			power-domain-names = "psci";
+ 			qcom,freq-domain = <&cpufreq_hw 1>;
+ 			next-level-cache = <&l2_6>;
+ 			capacity-dmips-mhz = <1024>;
+@@ -174,6 +188,8 @@ cpu7: cpu@10300 {
+ 			compatible = "qcom,kryo";
+ 			reg = <0x0 0x10300>;
+ 			enable-method = "psci";
++			power-domains = <&cpu_pd7>;
++			power-domain-names = "psci";
+ 			qcom,freq-domain = <&cpufreq_hw 1>;
+ 			next-level-cache = <&l2_7>;
+ 			capacity-dmips-mhz = <1024>;
+@@ -854,8 +870,8 @@ ipcc: mailbox@408000 {
+ 			#mbox-cells = <2>;
+ 		};
+ 
+-		gpi_dma2: qcom,gpi-dma@800000  {
+-			compatible = "qcom,sm6350-gpi-dma";
++		gpi_dma2: dma-controller@800000  {
++			compatible = "qcom,sa8775p-gpi-dma", "qcom,sm6350-gpi-dma";
+ 			reg = <0x0 0x00800000 0x0 0x60000>;
+ 			#dma-cells = <3>;
+ 			interrupts = <GIC_SPI 588 IRQ_TYPE_LEVEL_HIGH>,
+@@ -1345,8 +1361,8 @@ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ 
+ 		};
+ 
+-		gpi_dma0: qcom,gpi-dma@900000  {
+-			compatible = "qcom,sm6350-gpi-dma";
++		gpi_dma0: dma-controller@900000  {
++			compatible = "qcom,sa8775p-gpi-dma", "qcom,sm6350-gpi-dma";
+ 			reg = <0x0 0x00900000 0x0 0x60000>;
+ 			#dma-cells = <3>;
+ 			interrupts = <GIC_SPI 244 IRQ_TYPE_LEVEL_HIGH>,
+@@ -1770,8 +1786,8 @@ &clk_virt SLAVE_QUP_CORE_0 QCOM_ICC_TAG_ALWAYS>,
+ 			};
+ 		};
+ 
+-		gpi_dma1: qcom,gpi-dma@a00000  {
+-			compatible = "qcom,sm6350-gpi-dma";
++		gpi_dma1: dma-controller@a00000  {
++			compatible = "qcom,sa8775p-gpi-dma", "qcom,sm6350-gpi-dma";
+ 			reg = <0x0 0x00a00000 0x0 0x60000>;
+ 			#dma-cells = <3>;
+ 			interrupts = <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
+@@ -2225,8 +2241,8 @@ &config_noc SLAVE_QUP_1 QCOM_ICC_TAG_ALWAYS>,
+ 			};
+ 		};
+ 
+-		gpi_dma3: qcom,gpi-dma@b00000  {
+-			compatible = "qcom,sm6350-gpi-dma";
++		gpi_dma3: dma-controller@b00000  {
++			compatible = "qcom,sa8775p-gpi-dma", "qcom,sm6350-gpi-dma";
+ 			reg = <0x0 0x00b00000 0x0 0x58000>;
+ 			#dma-cells = <3>;
+ 			interrupts = <GIC_SPI 368 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi
+index ac8d4589e3fb74..f7300ffbb4519a 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi
+@@ -12,11 +12,11 @@
+ 
+ / {
+ 	thermal-zones {
+-		5v-choke-thermal {
++		choke-5v-thermal {
+ 			thermal-sensors = <&pm6150_adc_tm 1>;
+ 
+ 			trips {
+-				5v-choke-crit {
++				choke-5v-crit {
+ 					temperature = <125000>;
+ 					hysteresis = <1000>;
+ 					type = "critical";
+diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-quackingstick.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-quackingstick.dtsi
+index 00229b1515e605..ff8996b4de4e1e 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-quackingstick.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-quackingstick.dtsi
+@@ -78,6 +78,7 @@ panel: panel@0 {
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&lcd_rst>;
+ 		avdd-supply = <&ppvar_lcd>;
++		avee-supply = <&ppvar_lcd>;
+ 		pp1800-supply = <&v1p8_disp>;
+ 		pp3300-supply = <&pp3300_dx_edp>;
+ 		backlight = <&backlight>;
+diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+index 76fe314d2ad50d..e7773d215f34eb 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+@@ -580,55 +580,55 @@ psci {
+ 		compatible = "arm,psci-1.0";
+ 		method = "smc";
+ 
+-		cpu_pd0: cpu0 {
++		cpu_pd0: power-domain-cpu0 {
+ 			#power-domain-cells = <0>;
+ 			power-domains = <&cluster_pd>;
+ 			domain-idle-states = <&little_cpu_sleep_0 &little_cpu_sleep_1>;
+ 		};
+ 
+-		cpu_pd1: cpu1 {
++		cpu_pd1: power-domain-cpu1 {
+ 			#power-domain-cells = <0>;
+ 			power-domains = <&cluster_pd>;
+ 			domain-idle-states = <&little_cpu_sleep_0 &little_cpu_sleep_1>;
+ 		};
+ 
+-		cpu_pd2: cpu2 {
++		cpu_pd2: power-domain-cpu2 {
+ 			#power-domain-cells = <0>;
+ 			power-domains = <&cluster_pd>;
+ 			domain-idle-states = <&little_cpu_sleep_0 &little_cpu_sleep_1>;
+ 		};
+ 
+-		cpu_pd3: cpu3 {
++		cpu_pd3: power-domain-cpu3 {
+ 			#power-domain-cells = <0>;
+ 			power-domains = <&cluster_pd>;
+ 			domain-idle-states = <&little_cpu_sleep_0 &little_cpu_sleep_1>;
+ 		};
+ 
+-		cpu_pd4: cpu4 {
++		cpu_pd4: power-domain-cpu4 {
+ 			#power-domain-cells = <0>;
+ 			power-domains = <&cluster_pd>;
+ 			domain-idle-states = <&little_cpu_sleep_0 &little_cpu_sleep_1>;
+ 		};
+ 
+-		cpu_pd5: cpu5 {
++		cpu_pd5: power-domain-cpu5 {
+ 			#power-domain-cells = <0>;
+ 			power-domains = <&cluster_pd>;
+ 			domain-idle-states = <&little_cpu_sleep_0 &little_cpu_sleep_1>;
+ 		};
+ 
+-		cpu_pd6: cpu6 {
++		cpu_pd6: power-domain-cpu6 {
+ 			#power-domain-cells = <0>;
+ 			power-domains = <&cluster_pd>;
+ 			domain-idle-states = <&big_cpu_sleep_0 &big_cpu_sleep_1>;
+ 		};
+ 
+-		cpu_pd7: cpu7 {
++		cpu_pd7: power-domain-cpu7 {
+ 			#power-domain-cells = <0>;
+ 			power-domains = <&cluster_pd>;
+ 			domain-idle-states = <&big_cpu_sleep_0 &big_cpu_sleep_1>;
+ 		};
+ 
+-		cluster_pd: cpu-cluster0 {
++		cluster_pd: power-domain-cluster {
+ 			#power-domain-cells = <0>;
+ 			domain-idle-states = <&cluster_sleep_pc
+ 					      &cluster_sleep_cx_ret
+diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+index 55db1c83ef5517..d12e0a63fd0874 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+@@ -83,7 +83,7 @@ xo_board: xo-board {
+ 
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+-			clock-frequency = <32000>;
++			clock-frequency = <32764>;
+ 			#clock-cells = <0>;
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+index ef06d1ac084d32..c6a95db0d2a2e1 100644
+--- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+@@ -2743,7 +2743,7 @@ usb_2_qmpphy1: phy@88f1000 {
+ 
+ 		remoteproc_adsp: remoteproc@3000000 {
+ 			compatible = "qcom,sc8280xp-adsp-pas";
+-			reg = <0 0x03000000 0 0x100>;
++			reg = <0 0x03000000 0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 162 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+@@ -3900,26 +3900,26 @@ camss: camss@ac5a000 {
+ 				    "vfe3",
+ 				    "csid3";
+ 
+-			interrupts = <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 448 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 464 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 477 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 478 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 479 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 640 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 641 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 758 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 759 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 760 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 761 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 762 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 764 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_SPI 359 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 360 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 448 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 464 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 465 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 466 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 467 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 468 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 469 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 477 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 478 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 479 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 640 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 641 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 758 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 759 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 760 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 761 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 762 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 764 IRQ_TYPE_EDGE_RISING>;
+ 			interrupt-names = "csid1_lite",
+ 					  "vfe_lite1",
+ 					  "csiphy3",
+@@ -5254,7 +5254,7 @@ cpufreq_hw: cpufreq@18591000 {
+ 
+ 		remoteproc_nsp0: remoteproc@1b300000 {
+ 			compatible = "qcom,sc8280xp-nsp0-pas";
+-			reg = <0 0x1b300000 0 0x100>;
++			reg = <0 0x1b300000 0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_nsp0_in 0 IRQ_TYPE_EDGE_RISING>,
+@@ -5385,7 +5385,7 @@ compute-cb@14 {
+ 
+ 		remoteproc_nsp1: remoteproc@21300000 {
+ 			compatible = "qcom,sc8280xp-nsp1-pas";
+-			reg = <0 0x21300000 0 0x100>;
++			reg = <0 0x21300000 0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 887 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_nsp1_in 0 IRQ_TYPE_EDGE_RISING>,
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c-navigation-mezzanine.dtso b/arch/arm64/boot/dts/qcom/sdm845-db845c-navigation-mezzanine.dtso
+index 0a87df806cafc8..59970082da4520 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-db845c-navigation-mezzanine.dtso
++++ b/arch/arm64/boot/dts/qcom/sdm845-db845c-navigation-mezzanine.dtso
+@@ -79,45 +79,3 @@ ov8856_ep: endpoint {
+ 		};
+ 	};
+ };
+-
+-&cci_i2c1 {
+-	#address-cells = <1>;
+-	#size-cells = <0>;
+-
+-	camera@60 {
+-		compatible = "ovti,ov7251";
+-
+-		/* I2C address as per ov7251.txt linux documentation */
+-		reg = <0x60>;
+-
+-		/* CAM3_RST_N */
+-		enable-gpios = <&tlmm 21 GPIO_ACTIVE_HIGH>;
+-		pinctrl-names = "default";
+-		pinctrl-0 = <&cam3_default>;
+-
+-		clocks = <&clock_camcc CAM_CC_MCLK3_CLK>;
+-		clock-names = "xclk";
+-		clock-frequency = <24000000>;
+-
+-		/*
+-		 * The &vreg_s4a_1p8 trace always powered on.
+-		 *
+-		 * The 2.8V vdda-supply regulator is enabled when the
+-		 * vreg_s4a_1p8 trace is pulled high.
+-		 * It too is represented by a fixed regulator.
+-		 *
+-		 * No 1.2V vddd-supply regulator is used.
+-		 */
+-		vdddo-supply = <&vreg_lvs1a_1p8>;
+-		vdda-supply = <&cam3_avdd_2v8>;
+-
+-		status = "disabled";
+-
+-		port {
+-			ov7251_ep: endpoint {
+-				data-lanes = <0 1>;
+-/*				remote-endpoint = <&csiphy3_ep>; */
+-			};
+-		};
+-	};
+-};
+diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+index 1ed794638a7cee..cb9fae39334c8d 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+@@ -4326,16 +4326,16 @@ camss: camss@acb3000 {
+ 				"vfe1",
+ 				"vfe_lite";
+ 
+-			interrupts = <GIC_SPI 464 IRQ_TYPE_LEVEL_HIGH>,
+-				<GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>,
+-				<GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>,
+-				<GIC_SPI 477 IRQ_TYPE_LEVEL_HIGH>,
+-				<GIC_SPI 478 IRQ_TYPE_LEVEL_HIGH>,
+-				<GIC_SPI 479 IRQ_TYPE_LEVEL_HIGH>,
+-				<GIC_SPI 448 IRQ_TYPE_LEVEL_HIGH>,
+-				<GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH>,
+-				<GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>,
+-				<GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_SPI 464 IRQ_TYPE_EDGE_RISING>,
++				<GIC_SPI 466 IRQ_TYPE_EDGE_RISING>,
++				<GIC_SPI 468 IRQ_TYPE_EDGE_RISING>,
++				<GIC_SPI 477 IRQ_TYPE_EDGE_RISING>,
++				<GIC_SPI 478 IRQ_TYPE_EDGE_RISING>,
++				<GIC_SPI 479 IRQ_TYPE_EDGE_RISING>,
++				<GIC_SPI 448 IRQ_TYPE_EDGE_RISING>,
++				<GIC_SPI 465 IRQ_TYPE_EDGE_RISING>,
++				<GIC_SPI 467 IRQ_TYPE_EDGE_RISING>,
++				<GIC_SPI 469 IRQ_TYPE_EDGE_RISING>;
+ 			interrupt-names = "csid0",
+ 				"csid1",
+ 				"csid2",
+diff --git a/arch/arm64/boot/dts/qcom/sdx75.dtsi b/arch/arm64/boot/dts/qcom/sdx75.dtsi
+index 5f7e59ecf1ca62..68d7dbe037b6ac 100644
+--- a/arch/arm64/boot/dts/qcom/sdx75.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdx75.dtsi
+@@ -34,7 +34,7 @@ xo_board: xo-board {
+ 
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+-			clock-frequency = <32000>;
++			clock-frequency = <32764>;
+ 			#clock-cells = <0>;
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/qcom/sm4450.dtsi b/arch/arm64/boot/dts/qcom/sm4450.dtsi
+index a0de5fe16faae5..27453771aa68a1 100644
+--- a/arch/arm64/boot/dts/qcom/sm4450.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm4450.dtsi
+@@ -29,7 +29,7 @@ xo_board: xo-board {
+ 
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+-			clock-frequency = <32000>;
++			clock-frequency = <32764>;
+ 			#clock-cells = <0>;
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sm6125.dtsi b/arch/arm64/boot/dts/qcom/sm6125.dtsi
+index 17d528d639343b..f3f207dcac84fa 100644
+--- a/arch/arm64/boot/dts/qcom/sm6125.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6125.dtsi
+@@ -28,7 +28,7 @@ xo_board: xo-board {
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+ 			#clock-cells = <0>;
+-			clock-frequency = <32000>;
++			clock-frequency = <32764>;
+ 			clock-output-names = "sleep_clk";
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/qcom/sm6375.dtsi b/arch/arm64/boot/dts/qcom/sm6375.dtsi
+index e0b1c54e98c0e8..7c929168ed0805 100644
+--- a/arch/arm64/boot/dts/qcom/sm6375.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6375.dtsi
+@@ -29,7 +29,7 @@ xo_board_clk: xo-board-clk {
+ 
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+-			clock-frequency = <32000>;
++			clock-frequency = <32764>;
+ 			#clock-cells = <0>;
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/qcom/sm7225-fairphone-fp4.dts b/arch/arm64/boot/dts/qcom/sm7225-fairphone-fp4.dts
+index 2ee2561b57b1d6..52b16a4fdc4321 100644
+--- a/arch/arm64/boot/dts/qcom/sm7225-fairphone-fp4.dts
++++ b/arch/arm64/boot/dts/qcom/sm7225-fairphone-fp4.dts
+@@ -32,7 +32,7 @@ / {
+ 	chassis-type = "handset";
+ 
+ 	/* required for bootloader to select correct board */
+-	qcom,msm-id = <434 0x10000>, <459 0x10000>;
++	qcom,msm-id = <459 0x10000>;
+ 	qcom,board-id = <8 32>;
+ 
+ 	aliases {
+diff --git a/arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts b/arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts
+index b039773c44653a..a1323a8b8e6bfb 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts
++++ b/arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts
+@@ -376,8 +376,8 @@ da7280@4a {
+ 		pinctrl-0 = <&da7280_intr_default>;
+ 
+ 		dlg,actuator-type = "LRA";
+-		dlg,dlg,const-op-mode = <1>;
+-		dlg,dlg,periodic-op-mode = <1>;
++		dlg,const-op-mode = <1>;
++		dlg,periodic-op-mode = <1>;
+ 		dlg,nom-microvolt = <2000000>;
+ 		dlg,abs-max-microvolt = <2000000>;
+ 		dlg,imax-microamp = <129000>;
+diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+index 48318ed1ce98ab..9d317ae7dc98ae 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+@@ -84,7 +84,7 @@ xo_board: xo-board {
+ 
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+-			clock-frequency = <32768>;
++			clock-frequency = <32764>;
+ 			#clock-cells = <0>;
+ 		};
+ 	};
+@@ -4481,20 +4481,20 @@ camss: camss@ac6a000 {
+ 				    "vfe_lite0",
+ 				    "vfe_lite1";
+ 
+-			interrupts = <GIC_SPI 477 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 478 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 479 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 448 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 464 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_SPI 477 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 478 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 479 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 448 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 86 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 89 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 464 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 466 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 468 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 359 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 465 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 467 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 469 IRQ_TYPE_EDGE_RISING>,
++				     <GIC_SPI 360 IRQ_TYPE_EDGE_RISING>;
+ 			interrupt-names = "csiphy0",
+ 					  "csiphy1",
+ 					  "csiphy2",
+diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+index 877905dfd861ed..15b7f15b3836dc 100644
+--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+@@ -42,7 +42,7 @@ xo_board: xo-board {
+ 
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+-			clock-frequency = <32000>;
++			clock-frequency = <32764>;
+ 			#clock-cells = <0>;
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+index 53147aa6f7e4ac..7a0b901799bc32 100644
+--- a/arch/arm64/boot/dts/qcom/sm8450.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+@@ -43,7 +43,7 @@ xo_board: xo-board {
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+ 			#clock-cells = <0>;
+-			clock-frequency = <32000>;
++			clock-frequency = <32764>;
+ 		};
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sm8550-hdk.dts b/arch/arm64/boot/dts/qcom/sm8550-hdk.dts
+index 01c92160260572..29bc1ddfc7b25f 100644
+--- a/arch/arm64/boot/dts/qcom/sm8550-hdk.dts
++++ b/arch/arm64/boot/dts/qcom/sm8550-hdk.dts
+@@ -1172,7 +1172,7 @@ &sdhc_2 {
+ };
+ 
+ &sleep_clk {
+-	clock-frequency = <32000>;
++	clock-frequency = <32764>;
+ };
+ 
+ &swr0 {
+diff --git a/arch/arm64/boot/dts/qcom/sm8550-mtp.dts b/arch/arm64/boot/dts/qcom/sm8550-mtp.dts
+index ab447fc252f7dd..5648ab60ba4c4b 100644
+--- a/arch/arm64/boot/dts/qcom/sm8550-mtp.dts
++++ b/arch/arm64/boot/dts/qcom/sm8550-mtp.dts
+@@ -825,7 +825,7 @@ &sdhc_2 {
+ };
+ 
+ &sleep_clk {
+-	clock-frequency = <32000>;
++	clock-frequency = <32764>;
+ };
+ 
+ &swr0 {
+diff --git a/arch/arm64/boot/dts/qcom/sm8550-qrd.dts b/arch/arm64/boot/dts/qcom/sm8550-qrd.dts
+index 6052dd922ec55c..3a6cb279130489 100644
+--- a/arch/arm64/boot/dts/qcom/sm8550-qrd.dts
++++ b/arch/arm64/boot/dts/qcom/sm8550-qrd.dts
+@@ -1005,7 +1005,7 @@ &remoteproc_mpss {
+ };
+ 
+ &sleep_clk {
+-	clock-frequency = <32000>;
++	clock-frequency = <32764>;
+ };
+ 
+ &swr0 {
+diff --git a/arch/arm64/boot/dts/qcom/sm8550-samsung-q5q.dts b/arch/arm64/boot/dts/qcom/sm8550-samsung-q5q.dts
+index 3c5d8d26704fd9..e8383faac576a2 100644
+--- a/arch/arm64/boot/dts/qcom/sm8550-samsung-q5q.dts
++++ b/arch/arm64/boot/dts/qcom/sm8550-samsung-q5q.dts
+@@ -565,7 +565,7 @@ &remoteproc_mpss {
+ };
+ 
+ &sleep_clk {
+-	clock-frequency = <32000>;
++	clock-frequency = <32764>;
+ };
+ 
+ &tlmm {
+diff --git a/arch/arm64/boot/dts/qcom/sm8550-sony-xperia-yodo-pdx234.dts b/arch/arm64/boot/dts/qcom/sm8550-sony-xperia-yodo-pdx234.dts
+index 85d487ef80a0be..d90dc7b37c4a74 100644
+--- a/arch/arm64/boot/dts/qcom/sm8550-sony-xperia-yodo-pdx234.dts
++++ b/arch/arm64/boot/dts/qcom/sm8550-sony-xperia-yodo-pdx234.dts
+@@ -722,7 +722,7 @@ &sdhc_2 {
+ };
+ 
+ &sleep_clk {
+-	clock-frequency = <32000>;
++	clock-frequency = <32764>;
+ };
+ 
+ &tlmm {
+diff --git a/arch/arm64/boot/dts/qcom/sm8650-hdk.dts b/arch/arm64/boot/dts/qcom/sm8650-hdk.dts
+index f00bdff4280af2..d0912735b54e50 100644
+--- a/arch/arm64/boot/dts/qcom/sm8650-hdk.dts
++++ b/arch/arm64/boot/dts/qcom/sm8650-hdk.dts
+@@ -1113,7 +1113,7 @@ &sdhc_2 {
+ };
+ 
+ &sleep_clk {
+-	clock-frequency = <32000>;
++	clock-frequency = <32764>;
+ };
+ 
+ &swr0 {
+diff --git a/arch/arm64/boot/dts/qcom/sm8650-mtp.dts b/arch/arm64/boot/dts/qcom/sm8650-mtp.dts
+index 0db2cb03f252d1..76ef43c10f77d8 100644
+--- a/arch/arm64/boot/dts/qcom/sm8650-mtp.dts
++++ b/arch/arm64/boot/dts/qcom/sm8650-mtp.dts
+@@ -730,7 +730,7 @@ &sdhc_2 {
+ };
+ 
+ &sleep_clk {
+-	clock-frequency = <32000>;
++	clock-frequency = <32764>;
+ };
+ 
+ &swr0 {
+diff --git a/arch/arm64/boot/dts/qcom/sm8650-qrd.dts b/arch/arm64/boot/dts/qcom/sm8650-qrd.dts
+index c5e8c3c2df91a4..71033fba21b56b 100644
+--- a/arch/arm64/boot/dts/qcom/sm8650-qrd.dts
++++ b/arch/arm64/boot/dts/qcom/sm8650-qrd.dts
+@@ -1041,7 +1041,7 @@ &remoteproc_mpss {
+ };
+ 
+ &sleep_clk {
+-	clock-frequency = <32000>;
++	clock-frequency = <32764>;
+ };
+ 
+ &spi4 {
+diff --git a/arch/arm64/boot/dts/qcom/sm8650.dtsi b/arch/arm64/boot/dts/qcom/sm8650.dtsi
+index 25e47505adcb79..1e2d6ba0b8c127 100644
+--- a/arch/arm64/boot/dts/qcom/sm8650.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8650.dtsi
+@@ -5622,7 +5622,7 @@ compute-cb@8 {
+ 
+ 					/* note: secure cb9 in downstream */
+ 
+-					compute-cb@10 {
++					compute-cb@12 {
+ 						compatible = "qcom,fastrpc-compute-cb";
+ 						reg = <12>;
+ 
+@@ -5632,7 +5632,7 @@ compute-cb@10 {
+ 						dma-coherent;
+ 					};
+ 
+-					compute-cb@11 {
++					compute-cb@13 {
+ 						compatible = "qcom,fastrpc-compute-cb";
+ 						reg = <13>;
+ 
+@@ -5642,7 +5642,7 @@ compute-cb@11 {
+ 						dma-coherent;
+ 					};
+ 
+-					compute-cb@12 {
++					compute-cb@14 {
+ 						compatible = "qcom,fastrpc-compute-cb";
+ 						reg = <14>;
+ 
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi
+index 6835fdeef3aec1..8761874dc2f064 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi
++++ b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi
+@@ -706,14 +706,14 @@ &qupv3_2 {
+ 
+ &remoteproc_adsp {
+ 	firmware-name = "qcom/x1e80100/microsoft/Romulus/qcadsp8380.mbn",
+-			"qcom/x1e80100/microsoft/Romulus/adsp_dtb.mbn";
++			"qcom/x1e80100/microsoft/Romulus/adsp_dtbs.elf";
+ 
+ 	status = "okay";
+ };
+ 
+ &remoteproc_cdsp {
+ 	firmware-name = "qcom/x1e80100/microsoft/Romulus/qccdsp8380.mbn",
+-			"qcom/x1e80100/microsoft/Romulus/cdsp_dtb.mbn";
++			"qcom/x1e80100/microsoft/Romulus/cdsp_dtbs.elf";
+ 
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100.dtsi b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
+index 7e4f46ad8eddad..4dfea255c83f70 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100.dtsi
++++ b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
+@@ -38,7 +38,7 @@ xo_board: xo-board {
+ 
+ 		sleep_clk: sleep-clk {
+ 			compatible = "fixed-clock";
+-			clock-frequency = <32000>;
++			clock-frequency = <32764>;
+ 			#clock-cells = <0>;
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi
+index 2ed01d391554b5..55c72c8a073508 100644
+--- a/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi
++++ b/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi
+@@ -43,11 +43,6 @@ aliases {
+ #endif
+ 	};
+ 
+-	chosen {
+-		bootargs = "ignore_loglevel";
+-		stdout-path = "serial0:115200n8";
+-	};
+-
+ 	memory@48000000 {
+ 		device_type = "memory";
+ 		/* First 128MB is reserved for secure area. */
+diff --git a/arch/arm64/boot/dts/renesas/rzg3s-smarc.dtsi b/arch/arm64/boot/dts/renesas/rzg3s-smarc.dtsi
+index 4509151344c430..33b9873b225a82 100644
+--- a/arch/arm64/boot/dts/renesas/rzg3s-smarc.dtsi
++++ b/arch/arm64/boot/dts/renesas/rzg3s-smarc.dtsi
+@@ -12,10 +12,15 @@
+ / {
+ 	aliases {
+ 		i2c0 = &i2c0;
+-		serial0 = &scif0;
++		serial3 = &scif0;
+ 		mmc1 = &sdhi1;
+ 	};
+ 
++	chosen {
++		bootargs = "ignore_loglevel";
++		stdout-path = "serial3:115200n8";
++	};
++
+ 	keys {
+ 		compatible = "gpio-keys";
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk3308-rock-s0.dts b/arch/arm64/boot/dts/rockchip/rk3308-rock-s0.dts
+index bd6419a5c20a22..8311af4c8689f6 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3308-rock-s0.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3308-rock-s0.dts
+@@ -74,6 +74,23 @@ vcc_io: regulator-3v3-vcc-io {
+ 		vin-supply = <&vcc5v0_sys>;
+ 	};
+ 
++	/*
++	 * HW revision prior to v1.2 must pull GPIO4_D6 low to access sdmmc.
++	 * This is modeled as an always-on active low fixed regulator.
++	 */
++	vcc_sd: regulator-3v3-vcc-sd {
++		compatible = "regulator-fixed";
++		gpios = <&gpio4 RK_PD6 GPIO_ACTIVE_LOW>;
++		pinctrl-names = "default";
++		pinctrl-0 = <&sdmmc_2030>;
++		regulator-name = "vcc_sd";
++		regulator-always-on;
++		regulator-boot-on;
++		regulator-min-microvolt = <3300000>;
++		regulator-max-microvolt = <3300000>;
++		vin-supply = <&vcc_io>;
++	};
++
+ 	vcc5v0_sys: regulator-5v0-vcc-sys {
+ 		compatible = "regulator-fixed";
+ 		regulator-name = "vcc5v0_sys";
+@@ -181,6 +198,12 @@ pwr_led: pwr-led {
+ 		};
+ 	};
+ 
++	sdmmc {
++		sdmmc_2030: sdmmc-2030 {
++			rockchip,pins = <4 RK_PD6 RK_FUNC_GPIO &pcfg_pull_none>;
++		};
++	};
++
+ 	wifi {
+ 		wifi_reg_on: wifi-reg-on {
+ 			rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
+@@ -233,7 +256,7 @@ &sdmmc {
+ 	cap-mmc-highspeed;
+ 	cap-sd-highspeed;
+ 	disable-wp;
+-	vmmc-supply = <&vcc_io>;
++	vmmc-supply = <&vcc_sd>;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5.dts b/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5.dts
+index e8243c90854277..c6e41373124104 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3568-wolfvision-pf5.dts
+@@ -53,7 +53,7 @@ hdmi_tx_5v: regulator-hdmi-tx-5v {
+ 
+ 	pdm_codec: pdm-codec {
+ 		compatible = "dmic-codec";
+-		num-channels = <1>;
++		num-channels = <2>;
+ 		#sound-dai-cells = <0>;
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-io.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-io.dtsi
+index 05ae9bdcfbbdeb..7125790bbed226 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-io.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-io.dtsi
+@@ -10,6 +10,15 @@ chosen {
+ 		stdout-path = "serial2:1500000n8";
+ 	};
+ 
++	/* Unnamed gated oscillator: 100MHz,3.3V,3225 */
++	pcie30_port0_refclk: pcie30_port1_refclk: pcie-oscillator {
++		compatible = "gated-fixed-clock";
++		#clock-cells = <0>;
++		clock-frequency = <100000000>;
++		clock-output-names = "pcie30_refclk";
++		vdd-supply = <&vcc3v3_pi6c_05>;
++	};
++
+ 	vcc3v3_pcie2x1l0: regulator-vcc3v3-pcie2x1l0 {
+ 		compatible = "regulator-fixed";
+ 		regulator-name = "vcc3v3_pcie2x1l0";
+@@ -19,26 +28,26 @@ vcc3v3_pcie2x1l0: regulator-vcc3v3-pcie2x1l0 {
+ 		vin-supply = <&vcc_3v3_s3>;
+ 	};
+ 
+-	vcc3v3_pcie3x2: regulator-vcc3v3-pcie3x2 {
++	vcc3v3_bkey: regulator-vcc3v3-bkey {
+ 		compatible = "regulator-fixed";
+ 		enable-active-high;
+ 		gpios = <&gpio2 RK_PC4 GPIO_ACTIVE_HIGH>; /* PCIE_4G_PWEN */
+ 		pinctrl-names = "default";
+-		pinctrl-0 = <&pcie3x2_vcc3v3_en>;
+-		regulator-name = "vcc3v3_pcie3x2";
++		pinctrl-0 = <&pcie_4g_pwen>;
++		regulator-name = "vcc3v3_bkey";
+ 		regulator-min-microvolt = <3300000>;
+ 		regulator-max-microvolt = <3300000>;
+ 		startup-delay-us = <5000>;
+ 		vin-supply = <&vcc5v0_sys>;
+ 	};
+ 
+-	vcc3v3_pcie3x4: regulator-vcc3v3-pcie3x4 {
++	vcc3v3_pcie30: vcc3v3_pi6c_05: regulator-vcc3v3-pi6c-05 {
+ 		compatible = "regulator-fixed";
+ 		enable-active-high;
+ 		gpios = <&gpio2 RK_PC5 GPIO_ACTIVE_HIGH>; /* PCIE30x4_PWREN_H */
+ 		pinctrl-names = "default";
+-		pinctrl-0 = <&pcie3x4_vcc3v3_en>;
+-		regulator-name = "vcc3v3_pcie3x4";
++		pinctrl-0 = <&pcie30x4_pwren_h>;
++		regulator-name = "vcc3v3_pcie30";
+ 		regulator-min-microvolt = <3300000>;
+ 		regulator-max-microvolt = <3300000>;
+ 		startup-delay-us = <5000>;
+@@ -98,24 +107,52 @@ &pcie2x1l0 {
+ };
+ 
+ &pcie30phy {
++	data-lanes = <1 1 2 2>;
++	/* separate clock lines from the clock generator to phy and devices */
++	rockchip,rx-common-refclk-mode = <0 0 0 0>;
+ 	status = "okay";
+ };
+ 
+-/* B-Key and E-Key */
++/* M-Key */
+ &pcie3x2 {
++	/*
++	 * The board has a "pcie_refclk" oscillator that needs enabling,
++	 * so add it to the list of clocks.
++	 */
++	clocks = <&cru ACLK_PCIE_2L_MSTR>, <&cru ACLK_PCIE_2L_SLV>,
++		 <&cru ACLK_PCIE_2L_DBI>, <&cru PCLK_PCIE_2L>,
++		 <&cru CLK_PCIE_AUX1>, <&cru CLK_PCIE2L_PIPE>,
++		 <&pcie30_port1_refclk>;
++	clock-names = "aclk_mst", "aclk_slv",
++		      "aclk_dbi", "pclk",
++		      "aux", "pipe",
++		      "ref";
++	num-lanes = <2>;
+ 	pinctrl-names = "default";
+-	pinctrl-0 = <&pcie3x2_rst>;
+-	reset-gpios = <&gpio4 RK_PB6 GPIO_ACTIVE_HIGH>; /* PCIE30X4_PERSTn_M1_L */
+-	vpcie3v3-supply = <&vcc3v3_pcie3x2>;
++	pinctrl-0 = <&pcie30x2_perstn_m1_l>;
++	reset-gpios = <&gpio4 RK_PB0 GPIO_ACTIVE_HIGH>; /* PCIE30X2_PERSTn_M1_L */
++	vpcie3v3-supply = <&vcc3v3_pcie30>;
+ 	status = "okay";
+ };
+ 
+-/* M-Key */
++/* B-Key and E-Key */
+ &pcie3x4 {
++	/*
++	 * The board has a "pcie_refclk" oscillator that needs enabling,
++	 * so add it to the list of clocks.
++	 */
++	clocks = <&cru ACLK_PCIE_4L_MSTR>, <&cru ACLK_PCIE_4L_SLV>,
++		 <&cru ACLK_PCIE_4L_DBI>, <&cru PCLK_PCIE_4L>,
++		 <&cru CLK_PCIE_AUX0>, <&cru CLK_PCIE4L_PIPE>,
++		 <&pcie30_port0_refclk>;
++	clock-names = "aclk_mst", "aclk_slv",
++		      "aclk_dbi", "pclk",
++		      "aux", "pipe",
++		      "ref";
+ 	pinctrl-names = "default";
+-	pinctrl-0 = <&pcie3x4_rst>;
+-	reset-gpios = <&gpio4 RK_PB0 GPIO_ACTIVE_HIGH>; /* PCIE30X2_PERSTn_M1_L */
+-	vpcie3v3-supply = <&vcc3v3_pcie3x4>;
++	pinctrl-0 = <&pcie30x4_perstn_m1_l>;
++	reset-gpios = <&gpio4 RK_PB6 GPIO_ACTIVE_HIGH>; /* PCIE30X4_PERSTn_M1_L */
++	vpcie3v3-supply = <&vcc3v3_bkey>;
+ 	status = "okay";
+ };
+ 
+@@ -127,20 +164,20 @@ pcie2_0_rst: pcie2-0-rst {
+ 	};
+ 
+ 	pcie3 {
+-		pcie3x2_rst: pcie3x2-rst {
+-			rockchip,pins = <4 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
++		pcie30x2_perstn_m1_l: pcie30x2-perstn-m1-l {
++			rockchip,pins = <4 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
+ 		};
+ 
+-		pcie3x2_vcc3v3_en: pcie3x2-vcc3v3-en {
+-			rockchip,pins = <2 RK_PC4 RK_FUNC_GPIO &pcfg_pull_none>;
++		pcie_4g_pwen: pcie-4g-pwen {
++			rockchip,pins = <2 RK_PC4 RK_FUNC_GPIO &pcfg_pull_down>;
+ 		};
+ 
+-		pcie3x4_rst: pcie3x4-rst {
+-			rockchip,pins = <4 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
++		pcie30x4_perstn_m1_l: pcie30x4-perstn-m1-l {
++			rockchip,pins = <4 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
+ 		};
+ 
+-		pcie3x4_vcc3v3_en: pcie3x4-vcc3v3-en {
+-			rockchip,pins = <2 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
++		pcie30x4_pwren_h: pcie30x4-pwren-h {
++			rockchip,pins = <2 RK_PC5 RK_FUNC_GPIO &pcfg_pull_down>;
+ 		};
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/ti/Makefile b/arch/arm64/boot/dts/ti/Makefile
+index f71360f14f233c..2b3b48a9a8842e 100644
+--- a/arch/arm64/boot/dts/ti/Makefile
++++ b/arch/arm64/boot/dts/ti/Makefile
+@@ -42,10 +42,6 @@ dtb-$(CONFIG_ARCH_K3) += k3-am62x-sk-csi2-imx219.dtbo
+ dtb-$(CONFIG_ARCH_K3) += k3-am62x-sk-hdmi-audio.dtbo
+ 
+ # Boards with AM64x SoC
+-k3-am642-hummingboard-t-pcie-dtbs := \
+-	k3-am642-hummingboard-t.dtb k3-am642-hummingboard-t-pcie.dtbo
+-k3-am642-hummingboard-t-usb3-dtbs := \
+-	k3-am642-hummingboard-t.dtb k3-am642-hummingboard-t-usb3.dtbo
+ dtb-$(CONFIG_ARCH_K3) += k3-am642-evm.dtb
+ dtb-$(CONFIG_ARCH_K3) += k3-am642-evm-icssg1-dualemac.dtbo
+ dtb-$(CONFIG_ARCH_K3) += k3-am642-evm-icssg1-dualemac-mii.dtbo
+@@ -230,7 +226,7 @@ dtb- += k3-am625-beagleplay-csi2-ov5640.dtb \
+ 	k3-am642-tqma64xxl-mbax4xxl-wlan.dtb \
+ 	k3-am68-sk-base-board-csi2-dual-imx219.dtb \
+ 	k3-am69-sk-csi2-dual-imx219.dtb \
+-	k3-j7200-evm-pcie1-ep.dtbo \
++	k3-j7200-evm-pcie1-ep.dtb \
+ 	k3-j721e-common-proc-board-infotainment.dtb \
+ 	k3-j721e-evm-pcie0-ep.dtb \
+ 	k3-j721e-sk-csi2-dual-imx219.dtb \
+diff --git a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
+index 7cd727d10a5f27..7d355aa73ea211 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
+@@ -23,7 +23,6 @@ gic500: interrupt-controller@1800000 {
+ 		interrupt-controller;
+ 		reg = <0x00 0x01800000 0x00 0x10000>,	/* GICD */
+ 		      <0x00 0x01880000 0x00 0xc0000>,	/* GICR */
+-		      <0x00 0x01880000 0x00 0xc0000>,   /* GICR */
+ 		      <0x01 0x00000000 0x00 0x2000>,    /* GICC */
+ 		      <0x01 0x00010000 0x00 0x1000>,    /* GICH */
+ 		      <0x01 0x00020000 0x00 0x2000>;    /* GICV */
+diff --git a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
+index a93e2cd7b8c74a..a1daba7b1fad5d 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi
+@@ -18,7 +18,6 @@ gic500: interrupt-controller@1800000 {
+ 		compatible = "arm,gic-v3";
+ 		reg = <0x00 0x01800000 0x00 0x10000>,	/* GICD */
+ 		      <0x00 0x01880000 0x00 0xc0000>,	/* GICR */
+-		      <0x00 0x01880000 0x00 0xc0000>,   /* GICR */
+ 		      <0x01 0x00000000 0x00 0x2000>,    /* GICC */
+ 		      <0x01 0x00010000 0x00 0x1000>,    /* GICH */
+ 		      <0x01 0x00020000 0x00 0x2000>;    /* GICV */
+diff --git a/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-pcie.dts b/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-pcie.dts
+new file mode 100644
+index 00000000000000..023b2a6aaa5668
+--- /dev/null
++++ b/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-pcie.dts
+@@ -0,0 +1,47 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Copyright (C) 2023 Josua Mayer <josua@solid-run.com>
++ *
++ * DTS for SolidRun AM642 HummingBoard-T,
++ * running on Cortex A53, with PCI-E.
++ *
++ */
++
++#include "k3-am642-hummingboard-t.dts"
++
++#include "k3-serdes.h"
++
++/ {
++	model = "SolidRun AM642 HummingBoard-T with PCI-E";
++};
++
++&pcie0_rc {
++	pinctrl-names = "default";
++	pinctrl-0 = <&pcie0_default_pins>;
++	reset-gpios = <&main_gpio1 15 GPIO_ACTIVE_HIGH>;
++	phys = <&serdes0_link>;
++	phy-names = "pcie-phy";
++	num-lanes = <1>;
++	status = "okay";
++};
++
++&serdes0 {
++	#address-cells = <1>;
++	#size-cells = <0>;
++
++	serdes0_link: phy@0 {
++		reg = <0>;
++		cdns,num-lanes = <1>;
++		cdns,phy-type = <PHY_TYPE_PCIE>;
++		#phy-cells = <0>;
++		resets = <&serdes_wiz0 1>;
++	};
++};
++
++&serdes_ln_ctrl {
++	idle-states = <AM64_SERDES0_LANE0_PCIE0>;
++};
++
++&serdes_mux {
++	idle-state = <1>;
++};
+diff --git a/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-pcie.dtso b/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-pcie.dtso
+deleted file mode 100644
+index bd9a5caf20da5b..00000000000000
+--- a/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-pcie.dtso
++++ /dev/null
+@@ -1,45 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0+
+-/*
+- * Copyright (C) 2023 Josua Mayer <josua@solid-run.com>
+- *
+- * Overlay for SolidRun AM642 HummingBoard-T to enable PCI-E.
+- */
+-
+-/dts-v1/;
+-/plugin/;
+-
+-#include <dt-bindings/gpio/gpio.h>
+-#include <dt-bindings/phy/phy.h>
+-
+-#include "k3-serdes.h"
+-
+-&pcie0_rc {
+-	pinctrl-names = "default";
+-	pinctrl-0 = <&pcie0_default_pins>;
+-	reset-gpios = <&main_gpio1 15 GPIO_ACTIVE_HIGH>;
+-	phys = <&serdes0_link>;
+-	phy-names = "pcie-phy";
+-	num-lanes = <1>;
+-	status = "okay";
+-};
+-
+-&serdes0 {
+-	#address-cells = <1>;
+-	#size-cells = <0>;
+-
+-	serdes0_link: phy@0 {
+-		reg = <0>;
+-		cdns,num-lanes = <1>;
+-		cdns,phy-type = <PHY_TYPE_PCIE>;
+-		#phy-cells = <0>;
+-		resets = <&serdes_wiz0 1>;
+-	};
+-};
+-
+-&serdes_ln_ctrl {
+-	idle-states = <AM64_SERDES0_LANE0_PCIE0>;
+-};
+-
+-&serdes_mux {
+-	idle-state = <1>;
+-};
+diff --git a/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-usb3.dts b/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-usb3.dts
+new file mode 100644
+index 00000000000000..ee9bd618f37010
+--- /dev/null
++++ b/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-usb3.dts
+@@ -0,0 +1,47 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Copyright (C) 2023 Josua Mayer <josua@solid-run.com>
++ *
++ * DTS for SolidRun AM642 HummingBoard-T,
++ * running on Cortex A53, with USB-3.1 Gen 1.
++ *
++ */
++
++#include "k3-am642-hummingboard-t.dts"
++
++#include "k3-serdes.h"
++
++/ {
++	model = "SolidRun AM642 HummingBoard-T with USB-3.1 Gen 1";
++};
++
++&serdes0 {
++	#address-cells = <1>;
++	#size-cells = <0>;
++
++	serdes0_link: phy@0 {
++		reg = <0>;
++		cdns,num-lanes = <1>;
++		cdns,phy-type = <PHY_TYPE_USB3>;
++		#phy-cells = <0>;
++		resets = <&serdes_wiz0 1>;
++	};
++};
++
++&serdes_ln_ctrl {
++	idle-states = <AM64_SERDES0_LANE0_USB>;
++};
++
++&serdes_mux {
++	idle-state = <0>;
++};
++
++&usbss0 {
++	/delete-property/ ti,usb2-only;
++};
++
++&usb0 {
++	maximum-speed = "super-speed";
++	phys = <&serdes0_link>;
++	phy-names = "cdns3,usb3-phy";
++};
+diff --git a/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-usb3.dtso b/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-usb3.dtso
+deleted file mode 100644
+index ffcc3bd3c7bc5d..00000000000000
+--- a/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t-usb3.dtso
++++ /dev/null
+@@ -1,44 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0+
+-/*
+- * Copyright (C) 2023 Josua Mayer <josua@solid-run.com>
+- *
+- * Overlay for SolidRun AM642 HummingBoard-T to enable USB-3.1.
+- */
+-
+-/dts-v1/;
+-/plugin/;
+-
+-#include <dt-bindings/phy/phy.h>
+-
+-#include "k3-serdes.h"
+-
+-&serdes0 {
+-	#address-cells = <1>;
+-	#size-cells = <0>;
+-
+-	serdes0_link: phy@0 {
+-		reg = <0>;
+-		cdns,num-lanes = <1>;
+-		cdns,phy-type = <PHY_TYPE_USB3>;
+-		#phy-cells = <0>;
+-		resets = <&serdes_wiz0 1>;
+-	};
+-};
+-
+-&serdes_ln_ctrl {
+-	idle-states = <AM64_SERDES0_LANE0_USB>;
+-};
+-
+-&serdes_mux {
+-	idle-state = <0>;
+-};
+-
+-&usbss0 {
+-	/delete-property/ ti,usb2-only;
+-};
+-
+-&usb0 {
+-	maximum-speed = "super-speed";
+-	phys = <&serdes0_link>;
+-	phy-names = "cdns3,usb3-phy";
+-};
+diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
+index c62831e6158633..c6d6a31a8f48c7 100644
+--- a/arch/arm64/configs/defconfig
++++ b/arch/arm64/configs/defconfig
+@@ -1352,7 +1352,6 @@ CONFIG_SM_DISPCC_6115=m
+ CONFIG_SM_DISPCC_8250=y
+ CONFIG_SM_DISPCC_8450=m
+ CONFIG_SM_DISPCC_8550=m
+-CONFIG_SM_DISPCC_8650=m
+ CONFIG_SM_GCC_4450=y
+ CONFIG_SM_GCC_6115=y
+ CONFIG_SM_GCC_8350=y
+diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h
+index bf6cf5579cf459..9c58fb81f7fd67 100644
+--- a/arch/hexagon/include/asm/cmpxchg.h
++++ b/arch/hexagon/include/asm/cmpxchg.h
+@@ -56,7 +56,7 @@ __arch_xchg(unsigned long x, volatile void *ptr, int size)
+ 	__typeof__(ptr) __ptr = (ptr);				\
+ 	__typeof__(*(ptr)) __old = (old);			\
+ 	__typeof__(*(ptr)) __new = (new);			\
+-	__typeof__(*(ptr)) __oldval = 0;			\
++	__typeof__(*(ptr)) __oldval = (__typeof__(*(ptr))) 0;	\
+ 								\
+ 	asm volatile(						\
+ 		"1:	%0 = memw_locked(%1);\n"		\
+diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c
+index 75e062722d285b..040a958de1dfc5 100644
+--- a/arch/hexagon/kernel/traps.c
++++ b/arch/hexagon/kernel/traps.c
+@@ -195,8 +195,10 @@ int die(const char *str, struct pt_regs *regs, long err)
+ 	printk(KERN_EMERG "Oops: %s[#%d]:\n", str, ++die.counter);
+ 
+ 	if (notify_die(DIE_OOPS, str, regs, err, pt_cause(regs), SIGSEGV) ==
+-	    NOTIFY_STOP)
++	    NOTIFY_STOP) {
++		spin_unlock_irq(&die.lock);
+ 		return 1;
++	}
+ 
+ 	print_modules();
+ 	show_regs(regs);
+diff --git a/arch/loongarch/include/asm/hw_breakpoint.h b/arch/loongarch/include/asm/hw_breakpoint.h
+index d78330916bd18a..13b2462f3d8c9d 100644
+--- a/arch/loongarch/include/asm/hw_breakpoint.h
++++ b/arch/loongarch/include/asm/hw_breakpoint.h
+@@ -38,8 +38,8 @@ struct arch_hw_breakpoint {
+  * Limits.
+  * Changing these will require modifications to the register accessors.
+  */
+-#define LOONGARCH_MAX_BRP		8
+-#define LOONGARCH_MAX_WRP		8
++#define LOONGARCH_MAX_BRP		14
++#define LOONGARCH_MAX_WRP		14
+ 
+ /* Virtual debug register bases. */
+ #define CSR_CFG_ADDR	0
+diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
+index 64ad277e096edd..aaa4ad6b85944a 100644
+--- a/arch/loongarch/include/asm/loongarch.h
++++ b/arch/loongarch/include/asm/loongarch.h
+@@ -959,6 +959,36 @@
+ #define LOONGARCH_CSR_DB7CTRL		0x34a	/* data breakpoint 7 control */
+ #define LOONGARCH_CSR_DB7ASID		0x34b	/* data breakpoint 7 asid */
+ 
++#define LOONGARCH_CSR_DB8ADDR		0x350	/* data breakpoint 8 address */
++#define LOONGARCH_CSR_DB8MASK		0x351	/* data breakpoint 8 mask */
++#define LOONGARCH_CSR_DB8CTRL		0x352	/* data breakpoint 8 control */
++#define LOONGARCH_CSR_DB8ASID		0x353	/* data breakpoint 8 asid */
++
++#define LOONGARCH_CSR_DB9ADDR		0x358	/* data breakpoint 9 address */
++#define LOONGARCH_CSR_DB9MASK		0x359	/* data breakpoint 9 mask */
++#define LOONGARCH_CSR_DB9CTRL		0x35a	/* data breakpoint 9 control */
++#define LOONGARCH_CSR_DB9ASID		0x35b	/* data breakpoint 9 asid */
++
++#define LOONGARCH_CSR_DB10ADDR		0x360	/* data breakpoint 10 address */
++#define LOONGARCH_CSR_DB10MASK		0x361	/* data breakpoint 10 mask */
++#define LOONGARCH_CSR_DB10CTRL		0x362	/* data breakpoint 10 control */
++#define LOONGARCH_CSR_DB10ASID		0x363	/* data breakpoint 10 asid */
++
++#define LOONGARCH_CSR_DB11ADDR		0x368	/* data breakpoint 11 address */
++#define LOONGARCH_CSR_DB11MASK		0x369	/* data breakpoint 11 mask */
++#define LOONGARCH_CSR_DB11CTRL		0x36a	/* data breakpoint 11 control */
++#define LOONGARCH_CSR_DB11ASID		0x36b	/* data breakpoint 11 asid */
++
++#define LOONGARCH_CSR_DB12ADDR		0x370	/* data breakpoint 12 address */
++#define LOONGARCH_CSR_DB12MASK		0x371	/* data breakpoint 12 mask */
++#define LOONGARCH_CSR_DB12CTRL		0x372	/* data breakpoint 12 control */
++#define LOONGARCH_CSR_DB12ASID		0x373	/* data breakpoint 12 asid */
++
++#define LOONGARCH_CSR_DB13ADDR		0x378	/* data breakpoint 13 address */
++#define LOONGARCH_CSR_DB13MASK		0x379	/* data breakpoint 13 mask */
++#define LOONGARCH_CSR_DB13CTRL		0x37a	/* data breakpoint 13 control */
++#define LOONGARCH_CSR_DB13ASID		0x37b	/* data breakpoint 13 asid */
++
+ #define LOONGARCH_CSR_FWPC		0x380	/* instruction breakpoint config */
+ #define LOONGARCH_CSR_FWPS		0x381	/* instruction breakpoint status */
+ 
+@@ -1002,6 +1032,36 @@
+ #define LOONGARCH_CSR_IB7CTRL		0x3ca	/* inst breakpoint 7 control */
+ #define LOONGARCH_CSR_IB7ASID		0x3cb	/* inst breakpoint 7 asid */
+ 
++#define LOONGARCH_CSR_IB8ADDR		0x3d0	/* inst breakpoint 8 address */
++#define LOONGARCH_CSR_IB8MASK		0x3d1	/* inst breakpoint 8 mask */
++#define LOONGARCH_CSR_IB8CTRL		0x3d2	/* inst breakpoint 8 control */
++#define LOONGARCH_CSR_IB8ASID		0x3d3	/* inst breakpoint 8 asid */
++
++#define LOONGARCH_CSR_IB9ADDR		0x3d8	/* inst breakpoint 9 address */
++#define LOONGARCH_CSR_IB9MASK		0x3d9	/* inst breakpoint 9 mask */
++#define LOONGARCH_CSR_IB9CTRL		0x3da	/* inst breakpoint 9 control */
++#define LOONGARCH_CSR_IB9ASID		0x3db	/* inst breakpoint 9 asid */
++
++#define LOONGARCH_CSR_IB10ADDR		0x3e0	/* inst breakpoint 10 address */
++#define LOONGARCH_CSR_IB10MASK		0x3e1	/* inst breakpoint 10 mask */
++#define LOONGARCH_CSR_IB10CTRL		0x3e2	/* inst breakpoint 10 control */
++#define LOONGARCH_CSR_IB10ASID		0x3e3	/* inst breakpoint 10 asid */
++
++#define LOONGARCH_CSR_IB11ADDR		0x3e8	/* inst breakpoint 11 address */
++#define LOONGARCH_CSR_IB11MASK		0x3e9	/* inst breakpoint 11 mask */
++#define LOONGARCH_CSR_IB11CTRL		0x3ea	/* inst breakpoint 11 control */
++#define LOONGARCH_CSR_IB11ASID		0x3eb	/* inst breakpoint 11 asid */
++
++#define LOONGARCH_CSR_IB12ADDR		0x3f0	/* inst breakpoint 12 address */
++#define LOONGARCH_CSR_IB12MASK		0x3f1	/* inst breakpoint 12 mask */
++#define LOONGARCH_CSR_IB12CTRL		0x3f2	/* inst breakpoint 12 control */
++#define LOONGARCH_CSR_IB12ASID		0x3f3	/* inst breakpoint 12 asid */
++
++#define LOONGARCH_CSR_IB13ADDR		0x3f8	/* inst breakpoint 13 address */
++#define LOONGARCH_CSR_IB13MASK		0x3f9	/* inst breakpoint 13 mask */
++#define LOONGARCH_CSR_IB13CTRL		0x3fa	/* inst breakpoint 13 control */
++#define LOONGARCH_CSR_IB13ASID		0x3fb	/* inst breakpoint 13 asid */
++
+ #define LOONGARCH_CSR_DEBUG		0x500	/* debug config */
+ #define LOONGARCH_CSR_DERA		0x501	/* debug era */
+ #define LOONGARCH_CSR_DESAVE		0x502	/* debug save */
+diff --git a/arch/loongarch/kernel/hw_breakpoint.c b/arch/loongarch/kernel/hw_breakpoint.c
+index a6e4b605bfa8d6..c35f9bf3803349 100644
+--- a/arch/loongarch/kernel/hw_breakpoint.c
++++ b/arch/loongarch/kernel/hw_breakpoint.c
+@@ -51,7 +51,13 @@ int hw_breakpoint_slots(int type)
+ 	READ_WB_REG_CASE(OFF, 4, REG, T, VAL);		\
+ 	READ_WB_REG_CASE(OFF, 5, REG, T, VAL);		\
+ 	READ_WB_REG_CASE(OFF, 6, REG, T, VAL);		\
+-	READ_WB_REG_CASE(OFF, 7, REG, T, VAL);
++	READ_WB_REG_CASE(OFF, 7, REG, T, VAL);		\
++	READ_WB_REG_CASE(OFF, 8, REG, T, VAL);		\
++	READ_WB_REG_CASE(OFF, 9, REG, T, VAL);		\
++	READ_WB_REG_CASE(OFF, 10, REG, T, VAL);		\
++	READ_WB_REG_CASE(OFF, 11, REG, T, VAL);		\
++	READ_WB_REG_CASE(OFF, 12, REG, T, VAL);		\
++	READ_WB_REG_CASE(OFF, 13, REG, T, VAL);
+ 
+ #define GEN_WRITE_WB_REG_CASES(OFF, REG, T, VAL)	\
+ 	WRITE_WB_REG_CASE(OFF, 0, REG, T, VAL);		\
+@@ -61,7 +67,13 @@ int hw_breakpoint_slots(int type)
+ 	WRITE_WB_REG_CASE(OFF, 4, REG, T, VAL);		\
+ 	WRITE_WB_REG_CASE(OFF, 5, REG, T, VAL);		\
+ 	WRITE_WB_REG_CASE(OFF, 6, REG, T, VAL);		\
+-	WRITE_WB_REG_CASE(OFF, 7, REG, T, VAL);
++	WRITE_WB_REG_CASE(OFF, 7, REG, T, VAL);		\
++	WRITE_WB_REG_CASE(OFF, 8, REG, T, VAL);		\
++	WRITE_WB_REG_CASE(OFF, 9, REG, T, VAL);		\
++	WRITE_WB_REG_CASE(OFF, 10, REG, T, VAL);	\
++	WRITE_WB_REG_CASE(OFF, 11, REG, T, VAL);	\
++	WRITE_WB_REG_CASE(OFF, 12, REG, T, VAL);	\
++	WRITE_WB_REG_CASE(OFF, 13, REG, T, VAL);
+ 
+ static u64 read_wb_reg(int reg, int n, int t)
+ {
+diff --git a/arch/loongarch/power/platform.c b/arch/loongarch/power/platform.c
+index 0909729dc2e153..5bbdb9fd76e5d0 100644
+--- a/arch/loongarch/power/platform.c
++++ b/arch/loongarch/power/platform.c
+@@ -17,7 +17,7 @@ void enable_gpe_wakeup(void)
+ 	if (acpi_gbl_reduced_hardware)
+ 	       return;
+ 
+-	acpi_enable_all_wakeup_gpes();
++	acpi_hw_enable_all_wakeup_gpes();
+ }
+ 
+ void enable_pci_wakeup(void)
+diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
+index 18a3028ac3b6de..dad2e7980f245b 100644
+--- a/arch/powerpc/include/asm/hugetlb.h
++++ b/arch/powerpc/include/asm/hugetlb.h
+@@ -15,6 +15,15 @@
+ 
+ extern bool hugetlb_disabled;
+ 
++static inline bool hugepages_supported(void)
++{
++	if (hugetlb_disabled)
++		return false;
++
++	return HPAGE_SHIFT != 0;
++}
++#define hugepages_supported hugepages_supported
++
+ void __init hugetlbpage_init_defaultsize(void);
+ 
+ int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
+diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
+index 76381e14e800c7..0ebae6e4c19dd7 100644
+--- a/arch/powerpc/kernel/iommu.c
++++ b/arch/powerpc/kernel/iommu.c
+@@ -687,7 +687,7 @@ void iommu_table_clear(struct iommu_table *tbl)
+ void iommu_table_reserve_pages(struct iommu_table *tbl,
+ 		unsigned long res_start, unsigned long res_end)
+ {
+-	int i;
++	unsigned long i;
+ 
+ 	WARN_ON_ONCE(res_end < res_start);
+ 	/*
+diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
+index 534cd159e9ab4c..ae6f7a235d8b24 100644
+--- a/arch/powerpc/platforms/pseries/iommu.c
++++ b/arch/powerpc/platforms/pseries/iommu.c
+@@ -1650,7 +1650,8 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
+ 		iommu_table_setparms_common(newtbl, pci->phb->bus->number, create.liobn,
+ 					    dynamic_addr, dynamic_len, page_shift, NULL,
+ 					    &iommu_table_lpar_multi_ops);
+-		iommu_init_table(newtbl, pci->phb->node, start, end);
++		iommu_init_table(newtbl, pci->phb->node,
++				 start >> page_shift, end >> page_shift);
+ 
+ 		pci->table_group->tables[default_win_removed ? 0 : 1] = newtbl;
+ 
+@@ -2065,7 +2066,9 @@ static long spapr_tce_create_table(struct iommu_table_group *table_group, int nu
+ 							    offset, 1UL << window_shift,
+ 							    IOMMU_PAGE_SHIFT_4K, NULL,
+ 							    &iommu_table_lpar_multi_ops);
+-				iommu_init_table(tbl, pci->phb->node, start, end);
++				iommu_init_table(tbl, pci->phb->node,
++						 start >> IOMMU_PAGE_SHIFT_4K,
++						 end >> IOMMU_PAGE_SHIFT_4K);
+ 
+ 				table_group->tables[0] = tbl;
+ 
+@@ -2136,7 +2139,7 @@ static long spapr_tce_create_table(struct iommu_table_group *table_group, int nu
+ 	/* New table for using DDW instead of the default DMA window */
+ 	iommu_table_setparms_common(tbl, pci->phb->bus->number, create.liobn, win_addr,
+ 				    1UL << len, page_shift, NULL, &iommu_table_lpar_multi_ops);
+-	iommu_init_table(tbl, pci->phb->node, start, end);
++	iommu_init_table(tbl, pci->phb->node, start >> page_shift, end >> page_shift);
+ 
+ 	pci->table_group->tables[num] = tbl;
+ 	set_iommu_table_base(&pdev->dev, tbl);
+@@ -2205,6 +2208,9 @@ static long spapr_tce_unset_window(struct iommu_table_group *table_group, int nu
+ 	const char *win_name;
+ 	int ret = -ENODEV;
+ 
++	if (!tbl) /* The table was never created OR window was never opened */
++		return 0;
++
+ 	mutex_lock(&dma_win_init_mutex);
+ 
+ 	if ((num == 0) && is_default_window_table(table_group, tbl))
+diff --git a/arch/riscv/kernel/vector.c b/arch/riscv/kernel/vector.c
+index 821818886fab06..39f0577f580def 100644
+--- a/arch/riscv/kernel/vector.c
++++ b/arch/riscv/kernel/vector.c
+@@ -309,7 +309,7 @@ static int __init riscv_v_sysctl_init(void)
+ static int __init riscv_v_sysctl_init(void) { return 0; }
+ #endif /* ! CONFIG_SYSCTL */
+ 
+-static int riscv_v_init(void)
++static int __init riscv_v_init(void)
+ {
+ 	return riscv_v_sysctl_init();
+ }
+diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
+index 0077969170e8b4..83b1d7bbd8880c 100644
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -72,6 +72,7 @@ config S390
+ 	select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM
+ 	select ARCH_ENABLE_MEMORY_HOTREMOVE
+ 	select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2
++	select ARCH_HAS_CPU_FINALIZE_INIT
+ 	select ARCH_HAS_CURRENT_STACK_POINTER
+ 	select ARCH_HAS_DEBUG_VIRTUAL
+ 	select ARCH_HAS_DEBUG_VM_PGTABLE
+diff --git a/arch/s390/Makefile b/arch/s390/Makefile
+index 7fd57398221ea3..9b772093278704 100644
+--- a/arch/s390/Makefile
++++ b/arch/s390/Makefile
+@@ -22,7 +22,7 @@ KBUILD_AFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -D__ASSEMBLY__
+ ifndef CONFIG_AS_IS_LLVM
+ KBUILD_AFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),$(aflags_dwarf))
+ endif
+-KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 -mpacked-stack
++KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 -mpacked-stack -std=gnu11
+ KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
+ KBUILD_CFLAGS_DECOMPRESSOR += -D__DECOMPRESSOR
+ KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float -mbackchain
+diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c
+index 3fa28db2fe59f4..41f0159339dbb2 100644
+--- a/arch/s390/boot/vmem.c
++++ b/arch/s390/boot/vmem.c
+@@ -13,6 +13,7 @@
+ #include "decompressor.h"
+ #include "boot.h"
+ 
++#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
+ struct ctlreg __bootdata_preserved(s390_invalid_asce);
+ 
+ #ifdef CONFIG_PROC_FS
+@@ -236,11 +237,12 @@ static pte_t *boot_pte_alloc(void)
+ 	return pte;
+ }
+ 
+-static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_mode mode)
++static unsigned long resolve_pa_may_alloc(unsigned long addr, unsigned long size,
++					  enum populate_mode mode)
+ {
+ 	switch (mode) {
+ 	case POPULATE_NONE:
+-		return -1;
++		return INVALID_PHYS_ADDR;
+ 	case POPULATE_DIRECT:
+ 		return addr;
+ 	case POPULATE_LOWCORE:
+@@ -258,33 +260,55 @@ static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_m
+ 		return addr;
+ #endif
+ 	default:
+-		return -1;
++		return INVALID_PHYS_ADDR;
+ 	}
+ }
+ 
+-static bool large_allowed(enum populate_mode mode)
++static bool large_page_mapping_allowed(enum populate_mode mode)
+ {
+-	return (mode == POPULATE_DIRECT) || (mode == POPULATE_IDENTITY) || (mode == POPULATE_KERNEL);
++	switch (mode) {
++	case POPULATE_DIRECT:
++	case POPULATE_IDENTITY:
++	case POPULATE_KERNEL:
++#ifdef CONFIG_KASAN
++	case POPULATE_KASAN_MAP_SHADOW:
++#endif
++		return true;
++	default:
++		return false;
++	}
+ }
+ 
+-static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end,
+-			  enum populate_mode mode)
++static unsigned long try_get_large_pud_pa(pud_t *pu_dir, unsigned long addr, unsigned long end,
++					  enum populate_mode mode)
+ {
+-	unsigned long size = end - addr;
++	unsigned long pa, size = end - addr;
++
++	if (!machine.has_edat2 || !large_page_mapping_allowed(mode) ||
++	    !IS_ALIGNED(addr, PUD_SIZE) || (size < PUD_SIZE))
++		return INVALID_PHYS_ADDR;
+ 
+-	return machine.has_edat2 && large_allowed(mode) &&
+-	       IS_ALIGNED(addr, PUD_SIZE) && (size >= PUD_SIZE) &&
+-	       IS_ALIGNED(_pa(addr, size, mode), PUD_SIZE);
++	pa = resolve_pa_may_alloc(addr, size, mode);
++	if (!IS_ALIGNED(pa, PUD_SIZE))
++		return INVALID_PHYS_ADDR;
++
++	return pa;
+ }
+ 
+-static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end,
+-			  enum populate_mode mode)
++static unsigned long try_get_large_pmd_pa(pmd_t *pm_dir, unsigned long addr, unsigned long end,
++					  enum populate_mode mode)
+ {
+-	unsigned long size = end - addr;
++	unsigned long pa, size = end - addr;
++
++	if (!machine.has_edat1 || !large_page_mapping_allowed(mode) ||
++	    !IS_ALIGNED(addr, PMD_SIZE) || (size < PMD_SIZE))
++		return INVALID_PHYS_ADDR;
++
++	pa = resolve_pa_may_alloc(addr, size, mode);
++	if (!IS_ALIGNED(pa, PMD_SIZE))
++		return INVALID_PHYS_ADDR;
+ 
+-	return machine.has_edat1 && large_allowed(mode) &&
+-	       IS_ALIGNED(addr, PMD_SIZE) && (size >= PMD_SIZE) &&
+-	       IS_ALIGNED(_pa(addr, size, mode), PMD_SIZE);
++	return pa;
+ }
+ 
+ static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end,
+@@ -298,7 +322,7 @@ static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long e
+ 		if (pte_none(*pte)) {
+ 			if (kasan_pte_populate_zero_shadow(pte, mode))
+ 				continue;
+-			entry = __pte(_pa(addr, PAGE_SIZE, mode));
++			entry = __pte(resolve_pa_may_alloc(addr, PAGE_SIZE, mode));
+ 			entry = set_pte_bit(entry, PAGE_KERNEL);
+ 			if (!machine.has_nx)
+ 				entry = clear_pte_bit(entry, __pgprot(_PAGE_NOEXEC));
+@@ -313,7 +337,7 @@ static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long e
+ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long end,
+ 				 enum populate_mode mode)
+ {
+-	unsigned long next, pages = 0;
++	unsigned long pa, next, pages = 0;
+ 	pmd_t *pmd, entry;
+ 	pte_t *pte;
+ 
+@@ -323,8 +347,9 @@ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long e
+ 		if (pmd_none(*pmd)) {
+ 			if (kasan_pmd_populate_zero_shadow(pmd, addr, next, mode))
+ 				continue;
+-			if (can_large_pmd(pmd, addr, next, mode)) {
+-				entry = __pmd(_pa(addr, _SEGMENT_SIZE, mode));
++			pa = try_get_large_pmd_pa(pmd, addr, next, mode);
++			if (pa != INVALID_PHYS_ADDR) {
++				entry = __pmd(pa);
+ 				entry = set_pmd_bit(entry, SEGMENT_KERNEL);
+ 				if (!machine.has_nx)
+ 					entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC));
+@@ -346,7 +371,7 @@ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long e
+ static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long end,
+ 				 enum populate_mode mode)
+ {
+-	unsigned long next, pages = 0;
++	unsigned long pa, next, pages = 0;
+ 	pud_t *pud, entry;
+ 	pmd_t *pmd;
+ 
+@@ -356,8 +381,9 @@ static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long e
+ 		if (pud_none(*pud)) {
+ 			if (kasan_pud_populate_zero_shadow(pud, addr, next, mode))
+ 				continue;
+-			if (can_large_pud(pud, addr, next, mode)) {
+-				entry = __pud(_pa(addr, _REGION3_SIZE, mode));
++			pa = try_get_large_pud_pa(pud, addr, next, mode);
++			if (pa != INVALID_PHYS_ADDR) {
++				entry = __pud(pa);
+ 				entry = set_pud_bit(entry, REGION3_KERNEL);
+ 				if (!machine.has_nx)
+ 					entry = clear_pud_bit(entry, __pgprot(_REGION_ENTRY_NOEXEC));
+diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
+index eb00fa1771da07..ad17d91ad2e661 100644
+--- a/arch/s390/include/asm/sclp.h
++++ b/arch/s390/include/asm/sclp.h
+@@ -137,6 +137,7 @@ void sclp_early_printk(const char *s);
+ void __sclp_early_printk(const char *s, unsigned int len);
+ void sclp_emergency_printk(const char *s);
+ 
++int sclp_init(void);
+ int sclp_early_get_memsize(unsigned long *mem);
+ int sclp_early_get_hsa_size(unsigned long *hsa_size);
+ int _sclp_get_core_info(struct sclp_core_info *info);
+diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
+index b0bc68da6a116f..33205dd410e470 100644
+--- a/arch/s390/kernel/perf_cpum_cf.c
++++ b/arch/s390/kernel/perf_cpum_cf.c
+@@ -981,7 +981,7 @@ static int cfdiag_push_sample(struct perf_event *event,
+ 	if (event->attr.sample_type & PERF_SAMPLE_RAW) {
+ 		raw.frag.size = cpuhw->usedss;
+ 		raw.frag.data = cpuhw->stop;
+-		perf_sample_save_raw_data(&data, &raw);
++		perf_sample_save_raw_data(&data, event, &raw);
+ 	}
+ 
+ 	overflow = perf_event_overflow(event, &data, &regs);
+diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c
+index fa732545426611..10725f5a6f0fd1 100644
+--- a/arch/s390/kernel/perf_pai_crypto.c
++++ b/arch/s390/kernel/perf_pai_crypto.c
+@@ -478,7 +478,7 @@ static int paicrypt_push_sample(size_t rawsize, struct paicrypt_map *cpump,
+ 	if (event->attr.sample_type & PERF_SAMPLE_RAW) {
+ 		raw.frag.size = rawsize;
+ 		raw.frag.data = cpump->save;
+-		perf_sample_save_raw_data(&data, &raw);
++		perf_sample_save_raw_data(&data, event, &raw);
+ 	}
+ 
+ 	overflow = perf_event_overflow(event, &data, &regs);
+diff --git a/arch/s390/kernel/perf_pai_ext.c b/arch/s390/kernel/perf_pai_ext.c
+index 7f462bef1fc075..a8f0bad99cf04f 100644
+--- a/arch/s390/kernel/perf_pai_ext.c
++++ b/arch/s390/kernel/perf_pai_ext.c
+@@ -503,7 +503,7 @@ static int paiext_push_sample(size_t rawsize, struct paiext_map *cpump,
+ 	if (event->attr.sample_type & PERF_SAMPLE_RAW) {
+ 		raw.frag.size = rawsize;
+ 		raw.frag.data = cpump->save;
+-		perf_sample_save_raw_data(&data, &raw);
++		perf_sample_save_raw_data(&data, event, &raw);
+ 	}
+ 
+ 	overflow = perf_event_overflow(event, &data, &regs);
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index a3fea683b22706..99f165726ca9ef 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -1006,3 +1006,8 @@ void __init setup_arch(char **cmdline_p)
+ 	/* Add system specific data to the random pool */
+ 	setup_randomness();
+ }
++
++void __init arch_cpu_finalize_init(void)
++{
++	sclp_init();
++}
+diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile
+index 24eccaa293371b..bdcf2a3b6c41b3 100644
+--- a/arch/s390/purgatory/Makefile
++++ b/arch/s390/purgatory/Makefile
+@@ -13,7 +13,7 @@ CFLAGS_sha256.o := -D__DISABLE_EXPORTS -D__NO_FORTIFY
+ $(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE
+ 	$(call if_changed_rule,as_o_S)
+ 
+-KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes
++KBUILD_CFLAGS := -std=gnu11 -fno-strict-aliasing -Wall -Wstrict-prototypes
+ KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
+ KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding
+ KBUILD_CFLAGS += -Os -m64 -msoft-float -fno-common
+diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
+index e91970b01d6243..c3a2f6f57770ab 100644
+--- a/arch/x86/events/amd/ibs.c
++++ b/arch/x86/events/amd/ibs.c
+@@ -1118,7 +1118,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
+ 				.data = ibs_data.data,
+ 			},
+ 		};
+-		perf_sample_save_raw_data(&data, &raw);
++		perf_sample_save_raw_data(&data, event, &raw);
+ 	}
+ 
+ 	if (perf_ibs == &perf_ibs_op)
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index e159e44a6a1b61..5aa50dfe01042a 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1735,7 +1735,7 @@ struct kvm_x86_ops {
+ 	bool allow_apicv_in_x2apic_without_x2apic_virtualization;
+ 	void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
+ 	void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
+-	void (*hwapic_isr_update)(int isr);
++	void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
+ 	void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
+ 	void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
+ 	void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu);
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index b5a8f0891135b1..f1fac08fdef28c 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -495,15 +495,6 @@ static int x86_cluster_flags(void)
+ }
+ #endif
+ 
+-static int x86_die_flags(void)
+-{
+-	if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU) ||
+-	    cpu_feature_enabled(X86_FEATURE_AMD_HETEROGENEOUS_CORES))
+-		return x86_sched_itmt_flags();
+-
+-	return 0;
+-}
+-
+ /*
+  * Set if a package/die has multiple NUMA nodes inside.
+  * AMD Magny-Cours, Intel Cluster-on-Die, and Intel
+@@ -539,7 +530,7 @@ static void __init build_sched_topology(void)
+ 	 */
+ 	if (!x86_has_numa_in_package) {
+ 		x86_topology[i++] = (struct sched_domain_topology_level){
+-			cpu_cpu_mask, x86_die_flags, SD_INIT_NAME(PKG)
++			cpu_cpu_mask, x86_sched_itmt_flags, SD_INIT_NAME(PKG)
+ 		};
+ 	}
+ 
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 3c83951c619ead..39ae2f5f9866c3 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -763,7 +763,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
+ 	 * just set SVI.
+ 	 */
+ 	if (unlikely(apic->apicv_active))
+-		kvm_x86_call(hwapic_isr_update)(vec);
++		kvm_x86_call(hwapic_isr_update)(apic->vcpu, vec);
+ 	else {
+ 		++apic->isr_count;
+ 		BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
+@@ -808,7 +808,7 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
+ 	 * and must be left alone.
+ 	 */
+ 	if (unlikely(apic->apicv_active))
+-		kvm_x86_call(hwapic_isr_update)(apic_find_highest_isr(apic));
++		kvm_x86_call(hwapic_isr_update)(apic->vcpu, apic_find_highest_isr(apic));
+ 	else {
+ 		--apic->isr_count;
+ 		BUG_ON(apic->isr_count < 0);
+@@ -2806,7 +2806,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
+ 	if (apic->apicv_active) {
+ 		kvm_x86_call(apicv_post_state_restore)(vcpu);
+ 		kvm_x86_call(hwapic_irr_update)(vcpu, -1);
+-		kvm_x86_call(hwapic_isr_update)(-1);
++		kvm_x86_call(hwapic_isr_update)(vcpu, -1);
+ 	}
+ 
+ 	vcpu->arch.apic_arb_prio = 0;
+@@ -3121,9 +3121,8 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
+ 	kvm_apic_update_apicv(vcpu);
+ 	if (apic->apicv_active) {
+ 		kvm_x86_call(apicv_post_state_restore)(vcpu);
+-		kvm_x86_call(hwapic_irr_update)(vcpu,
+-						apic_find_highest_irr(apic));
+-		kvm_x86_call(hwapic_isr_update)(apic_find_highest_isr(apic));
++		kvm_x86_call(hwapic_irr_update)(vcpu, apic_find_highest_irr(apic));
++		kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic));
+ 	}
+ 	kvm_make_request(KVM_REQ_EVENT, vcpu);
+ 	if (ioapic_in_kernel(vcpu->kvm))
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 893366e5373224..22cb11ab87090d 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -6862,7 +6862,7 @@ void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
+ 	read_unlock(&vcpu->kvm->mmu_lock);
+ }
+ 
+-void vmx_hwapic_isr_update(int max_isr)
++void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
+ {
+ 	u16 status;
+ 	u8 old;
+diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
+index a55981c5216e63..48dc76bf0ec03a 100644
+--- a/arch/x86/kvm/vmx/x86_ops.h
++++ b/arch/x86/kvm/vmx/x86_ops.h
+@@ -48,7 +48,7 @@ void vmx_migrate_timers(struct kvm_vcpu *vcpu);
+ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
+ void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu);
+ void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
+-void vmx_hwapic_isr_update(int max_isr);
++void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
+ int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu);
+ void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
+ 			   int trig_mode, int vector);
+diff --git a/block/bio-integrity.c b/block/bio-integrity.c
+index 2a4bd661169207..e73d8ed34235e0 100644
+--- a/block/bio-integrity.c
++++ b/block/bio-integrity.c
+@@ -118,17 +118,18 @@ static void bio_integrity_unpin_bvec(struct bio_vec *bv, int nr_vecs,
+ 
+ static void bio_integrity_uncopy_user(struct bio_integrity_payload *bip)
+ {
+-	unsigned short nr_vecs = bip->bip_max_vcnt - 1;
+-	struct bio_vec *copy = &bip->bip_vec[1];
+-	size_t bytes = bip->bip_iter.bi_size;
+-	struct iov_iter iter;
++	unsigned short orig_nr_vecs = bip->bip_max_vcnt - 1;
++	struct bio_vec *orig_bvecs = &bip->bip_vec[1];
++	struct bio_vec *bounce_bvec = &bip->bip_vec[0];
++	size_t bytes = bounce_bvec->bv_len;
++	struct iov_iter orig_iter;
+ 	int ret;
+ 
+-	iov_iter_bvec(&iter, ITER_DEST, copy, nr_vecs, bytes);
+-	ret = copy_to_iter(bvec_virt(bip->bip_vec), bytes, &iter);
++	iov_iter_bvec(&orig_iter, ITER_DEST, orig_bvecs, orig_nr_vecs, bytes);
++	ret = copy_to_iter(bvec_virt(bounce_bvec), bytes, &orig_iter);
+ 	WARN_ON_ONCE(ret != bytes);
+ 
+-	bio_integrity_unpin_bvec(copy, nr_vecs, true);
++	bio_integrity_unpin_bvec(orig_bvecs, orig_nr_vecs, true);
+ }
+ 
+ /**
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 666efe8fa20206..32fb28a6372cdf 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -629,8 +629,14 @@ static void __submit_bio(struct bio *bio)
+ 		blk_mq_submit_bio(bio);
+ 	} else if (likely(bio_queue_enter(bio) == 0)) {
+ 		struct gendisk *disk = bio->bi_bdev->bd_disk;
+-
+-		disk->fops->submit_bio(bio);
++	
++		if ((bio->bi_opf & REQ_POLLED) &&
++		    !(disk->queue->limits.features & BLK_FEAT_POLL)) {
++			bio->bi_status = BLK_STS_NOTSUPP;
++			bio_endio(bio);
++		} else {
++			disk->fops->submit_bio(bio);
++		}
+ 		blk_queue_exit(disk->queue);
+ 	}
+ 
+@@ -805,12 +811,6 @@ void submit_bio_noacct(struct bio *bio)
+ 		}
+ 	}
+ 
+-	if (!(q->limits.features & BLK_FEAT_POLL) &&
+-			(bio->bi_opf & REQ_POLLED)) {
+-		bio_clear_polled(bio);
+-		goto not_supported;
+-	}
+-
+ 	switch (bio_op(bio)) {
+ 	case REQ_OP_READ:
+ 		break;
+@@ -935,7 +935,7 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
+ 		return 0;
+ 
+ 	q = bdev_get_queue(bdev);
+-	if (cookie == BLK_QC_T_NONE || !(q->limits.features & BLK_FEAT_POLL))
++	if (cookie == BLK_QC_T_NONE)
+ 		return 0;
+ 
+ 	blk_flush_plug(current->plug, false);
+@@ -956,7 +956,8 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
+ 	} else {
+ 		struct gendisk *disk = q->disk;
+ 
+-		if (disk && disk->fops->poll_bio)
++		if ((q->limits.features & BLK_FEAT_POLL) && disk &&
++		    disk->fops->poll_bio)
+ 			ret = disk->fops->poll_bio(bio, iob, flags);
+ 	}
+ 	blk_queue_exit(q);
+diff --git a/block/blk-integrity.c b/block/blk-integrity.c
+index b180cac61a9ddb..013469faa5e7c4 100644
+--- a/block/blk-integrity.c
++++ b/block/blk-integrity.c
+@@ -218,9 +218,7 @@ static ssize_t flag_store(struct device *dev, const char *page, size_t count,
+ 	else
+ 		lim.integrity.flags |= flag;
+ 
+-	blk_mq_freeze_queue(q);
+-	err = queue_limits_commit_update(q, &lim);
+-	blk_mq_unfreeze_queue(q);
++	err = queue_limits_commit_update_frozen(q, &lim);
+ 	if (err)
+ 		return err;
+ 	return count;
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 8ac19d4ae3c0ae..23968c02be0d62 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -3092,14 +3092,21 @@ void blk_mq_submit_bio(struct bio *bio)
+ 	}
+ 
+ 	/*
+-	 * Device reconfiguration may change logical block size, so alignment
+-	 * check has to be done with queue usage counter held
++	 * Device reconfiguration may change logical block size or reduce the
++	 * number of poll queues, so the checks for alignment and poll support
++	 * have to be done with queue usage counter held.
+ 	 */
+ 	if (unlikely(bio_unaligned(bio, q))) {
+ 		bio_io_error(bio);
+ 		goto queue_exit;
+ 	}
+ 
++	if ((bio->bi_opf & REQ_POLLED) && !blk_mq_can_poll(q)) {
++		bio->bi_status = BLK_STS_NOTSUPP;
++		bio_endio(bio);
++		goto queue_exit;
++	}
++
+ 	bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
+ 	if (!bio)
+ 		goto queue_exit;
+@@ -4317,12 +4324,6 @@ void blk_mq_release(struct request_queue *q)
+ 	blk_mq_sysfs_deinit(q);
+ }
+ 
+-static bool blk_mq_can_poll(struct blk_mq_tag_set *set)
+-{
+-	return set->nr_maps > HCTX_TYPE_POLL &&
+-		set->map[HCTX_TYPE_POLL].nr_queues;
+-}
+-
+ struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set,
+ 		struct queue_limits *lim, void *queuedata)
+ {
+@@ -4333,7 +4334,7 @@ struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set,
+ 	if (!lim)
+ 		lim = &default_lim;
+ 	lim->features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT;
+-	if (blk_mq_can_poll(set))
++	if (set->nr_maps > HCTX_TYPE_POLL)
+ 		lim->features |= BLK_FEAT_POLL;
+ 
+ 	q = blk_alloc_queue(lim, set->numa_node);
+@@ -5021,8 +5022,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+ fallback:
+ 	blk_mq_update_queue_map(set);
+ 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
+-		struct queue_limits lim;
+-
+ 		blk_mq_realloc_hw_ctxs(set, q);
+ 
+ 		if (q->nr_hw_queues != set->nr_hw_queues) {
+@@ -5036,13 +5035,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+ 			set->nr_hw_queues = prev_nr_hw_queues;
+ 			goto fallback;
+ 		}
+-		lim = queue_limits_start_update(q);
+-		if (blk_mq_can_poll(set))
+-			lim.features |= BLK_FEAT_POLL;
+-		else
+-			lim.features &= ~BLK_FEAT_POLL;
+-		if (queue_limits_commit_update(q, &lim) < 0)
+-			pr_warn("updating the poll flag failed\n");
+ 		blk_mq_map_swqueue(q);
+ 	}
+ 
+@@ -5102,9 +5094,9 @@ static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
+ int blk_mq_poll(struct request_queue *q, blk_qc_t cookie,
+ 		struct io_comp_batch *iob, unsigned int flags)
+ {
+-	struct blk_mq_hw_ctx *hctx = xa_load(&q->hctx_table, cookie);
+-
+-	return blk_hctx_poll(q, hctx, iob, flags);
++	if (!blk_mq_can_poll(q))
++		return 0;
++	return blk_hctx_poll(q, xa_load(&q->hctx_table, cookie), iob, flags);
+ }
+ 
+ int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
+diff --git a/block/blk-mq.h b/block/blk-mq.h
+index 89a20fffa4b1ce..a80d3b3105f9ed 100644
+--- a/block/blk-mq.h
++++ b/block/blk-mq.h
+@@ -451,4 +451,10 @@ do {								\
+ #define blk_mq_run_dispatch_ops(q, dispatch_ops)		\
+ 	__blk_mq_run_dispatch_ops(q, true, dispatch_ops)	\
+ 
++static inline bool blk_mq_can_poll(struct request_queue *q)
++{
++	return (q->limits.features & BLK_FEAT_POLL) &&
++		q->tag_set->map[HCTX_TYPE_POLL].nr_queues;
++}
++
+ #endif
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index 8f09e33f41f68a..64f2e67238d77b 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -443,6 +443,30 @@ int queue_limits_commit_update(struct request_queue *q,
+ }
+ EXPORT_SYMBOL_GPL(queue_limits_commit_update);
+ 
++/**
++ * queue_limits_commit_update_frozen - commit an atomic update of queue limits
++ * @q:		queue to update
++ * @lim:	limits to apply
++ *
++ * Apply the limits in @lim that were obtained from queue_limits_start_update()
++ * and updated with the new values by the caller to @q.  Freezes the queue
++ * before the update and unfreezes it after.
++ *
++ * Returns 0 if successful, else a negative error code.
++ */
++int queue_limits_commit_update_frozen(struct request_queue *q,
++		struct queue_limits *lim)
++{
++	int ret;
++
++	blk_mq_freeze_queue(q);
++	ret = queue_limits_commit_update(q, lim);
++	blk_mq_unfreeze_queue(q);
++
++	return ret;
++}
++EXPORT_SYMBOL_GPL(queue_limits_commit_update_frozen);
++
+ /**
+  * queue_limits_set - apply queue limits to queue
+  * @q:		queue to update
+@@ -584,7 +608,7 @@ static bool blk_stack_atomic_writes_head(struct queue_limits *t,
+ }
+ 
+ static void blk_stack_atomic_writes_limits(struct queue_limits *t,
+-				struct queue_limits *b)
++				struct queue_limits *b, sector_t start)
+ {
+ 	if (!(t->features & BLK_FEAT_ATOMIC_WRITES_STACKED))
+ 		goto unsupported;
+@@ -592,6 +616,9 @@ static void blk_stack_atomic_writes_limits(struct queue_limits *t,
+ 	if (!b->atomic_write_unit_min)
+ 		goto unsupported;
+ 
++	if (!blk_atomic_write_start_sect_aligned(start, b))
++		goto unsupported;
++
+ 	/*
+ 	 * If atomic_write_hw_max is set, we have already stacked 1x bottom
+ 	 * device, so check for compliance.
+@@ -774,7 +801,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+ 		t->zone_write_granularity = 0;
+ 		t->max_zone_append_sectors = 0;
+ 	}
+-	blk_stack_atomic_writes_limits(t, b);
++	blk_stack_atomic_writes_limits(t, b, start);
+ 
+ 	return ret;
+ }
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index 767598e719ab0e..e828be777206bb 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -24,6 +24,8 @@ struct queue_sysfs_entry {
+ 	struct attribute attr;
+ 	ssize_t (*show)(struct gendisk *disk, char *page);
+ 	ssize_t (*store)(struct gendisk *disk, const char *page, size_t count);
++	int (*store_limit)(struct gendisk *disk, const char *page,
++			size_t count, struct queue_limits *lim);
+ 	void (*load_module)(struct gendisk *disk, const char *page, size_t count);
+ };
+ 
+@@ -153,13 +155,11 @@ QUEUE_SYSFS_SHOW_CONST(discard_zeroes_data, 0)
+ QUEUE_SYSFS_SHOW_CONST(write_same_max, 0)
+ QUEUE_SYSFS_SHOW_CONST(poll_delay, -1)
+ 
+-static ssize_t queue_max_discard_sectors_store(struct gendisk *disk,
+-		const char *page, size_t count)
++static int queue_max_discard_sectors_store(struct gendisk *disk,
++		const char *page, size_t count, struct queue_limits *lim)
+ {
+ 	unsigned long max_discard_bytes;
+-	struct queue_limits lim;
+ 	ssize_t ret;
+-	int err;
+ 
+ 	ret = queue_var_store(&max_discard_bytes, page, count);
+ 	if (ret < 0)
+@@ -171,38 +171,28 @@ static ssize_t queue_max_discard_sectors_store(struct gendisk *disk,
+ 	if ((max_discard_bytes >> SECTOR_SHIFT) > UINT_MAX)
+ 		return -EINVAL;
+ 
+-	lim = queue_limits_start_update(disk->queue);
+-	lim.max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT;
+-	err = queue_limits_commit_update(disk->queue, &lim);
+-	if (err)
+-		return err;
+-	return ret;
++	lim->max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT;
++	return 0;
+ }
+ 
+-static ssize_t
+-queue_max_sectors_store(struct gendisk *disk, const char *page, size_t count)
++static int
++queue_max_sectors_store(struct gendisk *disk, const char *page, size_t count,
++		struct queue_limits *lim)
+ {
+ 	unsigned long max_sectors_kb;
+-	struct queue_limits lim;
+ 	ssize_t ret;
+-	int err;
+ 
+ 	ret = queue_var_store(&max_sectors_kb, page, count);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	lim = queue_limits_start_update(disk->queue);
+-	lim.max_user_sectors = max_sectors_kb << 1;
+-	err = queue_limits_commit_update(disk->queue, &lim);
+-	if (err)
+-		return err;
+-	return ret;
++	lim->max_user_sectors = max_sectors_kb << 1;
++	return 0;
+ }
+ 
+ static ssize_t queue_feature_store(struct gendisk *disk, const char *page,
+-		size_t count, blk_features_t feature)
++		size_t count, struct queue_limits *lim, blk_features_t feature)
+ {
+-	struct queue_limits lim;
+ 	unsigned long val;
+ 	ssize_t ret;
+ 
+@@ -210,15 +200,11 @@ static ssize_t queue_feature_store(struct gendisk *disk, const char *page,
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	lim = queue_limits_start_update(disk->queue);
+ 	if (val)
+-		lim.features |= feature;
++		lim->features |= feature;
+ 	else
+-		lim.features &= ~feature;
+-	ret = queue_limits_commit_update(disk->queue, &lim);
+-	if (ret)
+-		return ret;
+-	return count;
++		lim->features &= ~feature;
++	return 0;
+ }
+ 
+ #define QUEUE_SYSFS_FEATURE(_name, _feature)				\
+@@ -227,10 +213,10 @@ static ssize_t queue_##_name##_show(struct gendisk *disk, char *page)	\
+ 	return sysfs_emit(page, "%u\n",					\
+ 		!!(disk->queue->limits.features & _feature));		\
+ }									\
+-static ssize_t queue_##_name##_store(struct gendisk *disk,		\
+-		const char *page, size_t count)				\
++static int queue_##_name##_store(struct gendisk *disk,			\
++		const char *page, size_t count, struct queue_limits *lim) \
+ {									\
+-	return queue_feature_store(disk, page, count, _feature);	\
++	return queue_feature_store(disk, page, count, lim, _feature);	\
+ }
+ 
+ QUEUE_SYSFS_FEATURE(rotational, BLK_FEAT_ROTATIONAL)
+@@ -245,10 +231,17 @@ static ssize_t queue_##_name##_show(struct gendisk *disk, char *page)	\
+ 		!!(disk->queue->limits.features & _feature));		\
+ }
+ 
+-QUEUE_SYSFS_FEATURE_SHOW(poll, BLK_FEAT_POLL);
+ QUEUE_SYSFS_FEATURE_SHOW(fua, BLK_FEAT_FUA);
+ QUEUE_SYSFS_FEATURE_SHOW(dax, BLK_FEAT_DAX);
+ 
++static ssize_t queue_poll_show(struct gendisk *disk, char *page)
++{
++	if (queue_is_mq(disk->queue))
++		return sysfs_emit(page, "%u\n", blk_mq_can_poll(disk->queue));
++	return sysfs_emit(page, "%u\n",
++		!!(disk->queue->limits.features & BLK_FEAT_POLL));
++}
++
+ static ssize_t queue_zoned_show(struct gendisk *disk, char *page)
+ {
+ 	if (blk_queue_is_zoned(disk->queue))
+@@ -266,10 +259,9 @@ static ssize_t queue_iostats_passthrough_show(struct gendisk *disk, char *page)
+ 	return queue_var_show(!!blk_queue_passthrough_stat(disk->queue), page);
+ }
+ 
+-static ssize_t queue_iostats_passthrough_store(struct gendisk *disk,
+-					       const char *page, size_t count)
++static int queue_iostats_passthrough_store(struct gendisk *disk,
++		const char *page, size_t count, struct queue_limits *lim)
+ {
+-	struct queue_limits lim;
+ 	unsigned long ios;
+ 	ssize_t ret;
+ 
+@@ -277,18 +269,13 @@ static ssize_t queue_iostats_passthrough_store(struct gendisk *disk,
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	lim = queue_limits_start_update(disk->queue);
+ 	if (ios)
+-		lim.flags |= BLK_FLAG_IOSTATS_PASSTHROUGH;
++		lim->flags |= BLK_FLAG_IOSTATS_PASSTHROUGH;
+ 	else
+-		lim.flags &= ~BLK_FLAG_IOSTATS_PASSTHROUGH;
+-
+-	ret = queue_limits_commit_update(disk->queue, &lim);
+-	if (ret)
+-		return ret;
+-
+-	return count;
++		lim->flags &= ~BLK_FLAG_IOSTATS_PASSTHROUGH;
++	return 0;
+ }
++
+ static ssize_t queue_nomerges_show(struct gendisk *disk, char *page)
+ {
+ 	return queue_var_show((blk_queue_nomerges(disk->queue) << 1) |
+@@ -391,12 +378,10 @@ static ssize_t queue_wc_show(struct gendisk *disk, char *page)
+ 	return sysfs_emit(page, "write through\n");
+ }
+ 
+-static ssize_t queue_wc_store(struct gendisk *disk, const char *page,
+-			      size_t count)
++static int queue_wc_store(struct gendisk *disk, const char *page,
++		size_t count, struct queue_limits *lim)
+ {
+-	struct queue_limits lim;
+ 	bool disable;
+-	int err;
+ 
+ 	if (!strncmp(page, "write back", 10)) {
+ 		disable = false;
+@@ -407,15 +392,11 @@ static ssize_t queue_wc_store(struct gendisk *disk, const char *page,
+ 		return -EINVAL;
+ 	}
+ 
+-	lim = queue_limits_start_update(disk->queue);
+ 	if (disable)
+-		lim.flags |= BLK_FLAG_WRITE_CACHE_DISABLED;
++		lim->flags |= BLK_FLAG_WRITE_CACHE_DISABLED;
+ 	else
+-		lim.flags &= ~BLK_FLAG_WRITE_CACHE_DISABLED;
+-	err = queue_limits_commit_update(disk->queue, &lim);
+-	if (err)
+-		return err;
+-	return count;
++		lim->flags &= ~BLK_FLAG_WRITE_CACHE_DISABLED;
++	return 0;
+ }
+ 
+ #define QUEUE_RO_ENTRY(_prefix, _name)			\
+@@ -431,6 +412,13 @@ static struct queue_sysfs_entry _prefix##_entry = {	\
+ 	.store	= _prefix##_store,			\
+ };
+ 
++#define QUEUE_LIM_RW_ENTRY(_prefix, _name)			\
++static struct queue_sysfs_entry _prefix##_entry = {	\
++	.attr		= { .name = _name, .mode = 0644 },	\
++	.show		= _prefix##_show,			\
++	.store_limit	= _prefix##_store,			\
++}
++
+ #define QUEUE_RW_LOAD_MODULE_ENTRY(_prefix, _name)		\
+ static struct queue_sysfs_entry _prefix##_entry = {		\
+ 	.attr		= { .name = _name, .mode = 0644 },	\
+@@ -441,7 +429,7 @@ static struct queue_sysfs_entry _prefix##_entry = {		\
+ 
+ QUEUE_RW_ENTRY(queue_requests, "nr_requests");
+ QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
+-QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
++QUEUE_LIM_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
+ QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
+ QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
+ QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
+@@ -457,7 +445,7 @@ QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
+ QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
+ QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
+ QUEUE_RO_ENTRY(queue_max_hw_discard_sectors, "discard_max_hw_bytes");
+-QUEUE_RW_ENTRY(queue_max_discard_sectors, "discard_max_bytes");
++QUEUE_LIM_RW_ENTRY(queue_max_discard_sectors, "discard_max_bytes");
+ QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
+ 
+ QUEUE_RO_ENTRY(queue_atomic_write_max_sectors, "atomic_write_max_bytes");
+@@ -477,11 +465,11 @@ QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
+ QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
+ 
+ QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
+-QUEUE_RW_ENTRY(queue_iostats_passthrough, "iostats_passthrough");
++QUEUE_LIM_RW_ENTRY(queue_iostats_passthrough, "iostats_passthrough");
+ QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
+ QUEUE_RW_ENTRY(queue_poll, "io_poll");
+ QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
+-QUEUE_RW_ENTRY(queue_wc, "write_cache");
++QUEUE_LIM_RW_ENTRY(queue_wc, "write_cache");
+ QUEUE_RO_ENTRY(queue_fua, "fua");
+ QUEUE_RO_ENTRY(queue_dax, "dax");
+ QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
+@@ -494,10 +482,10 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = {
+ 	.show = queue_logical_block_size_show,
+ };
+ 
+-QUEUE_RW_ENTRY(queue_rotational, "rotational");
+-QUEUE_RW_ENTRY(queue_iostats, "iostats");
+-QUEUE_RW_ENTRY(queue_add_random, "add_random");
+-QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
++QUEUE_LIM_RW_ENTRY(queue_rotational, "rotational");
++QUEUE_LIM_RW_ENTRY(queue_iostats, "iostats");
++QUEUE_LIM_RW_ENTRY(queue_add_random, "add_random");
++QUEUE_LIM_RW_ENTRY(queue_stable_writes, "stable_writes");
+ 
+ #ifdef CONFIG_BLK_WBT
+ static ssize_t queue_var_store64(s64 *var, const char *page)
+@@ -695,7 +683,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
+ 	struct request_queue *q = disk->queue;
+ 	ssize_t res;
+ 
+-	if (!entry->store)
++	if (!entry->store_limit && !entry->store)
+ 		return -EIO;
+ 
+ 	/*
+@@ -706,11 +694,26 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
+ 	if (entry->load_module)
+ 		entry->load_module(disk, page, length);
+ 
+-	blk_mq_freeze_queue(q);
++	if (entry->store_limit) {
++		struct queue_limits lim = queue_limits_start_update(q);
++
++		res = entry->store_limit(disk, page, length, &lim);
++		if (res < 0) {
++			queue_limits_cancel_update(q);
++			return res;
++		}
++
++		res = queue_limits_commit_update_frozen(q, &lim);
++		if (res)
++			return res;
++		return length;
++	}
++
+ 	mutex_lock(&q->sysfs_lock);
++	blk_mq_freeze_queue(q);
+ 	res = entry->store(disk, page, length);
+-	mutex_unlock(&q->sysfs_lock);
+ 	blk_mq_unfreeze_queue(q);
++	mutex_unlock(&q->sysfs_lock);
+ 	return res;
+ }
+ 
+diff --git a/block/blk-zoned.c b/block/blk-zoned.c
+index 84da1eadff642d..c964c6b667809c 100644
+--- a/block/blk-zoned.c
++++ b/block/blk-zoned.c
+@@ -1446,7 +1446,6 @@ static int disk_update_zone_resources(struct gendisk *disk,
+ 	unsigned int nr_seq_zones, nr_conv_zones;
+ 	unsigned int pool_size;
+ 	struct queue_limits lim;
+-	int ret;
+ 
+ 	disk->nr_zones = args->nr_zones;
+ 	disk->zone_capacity = args->zone_capacity;
+@@ -1497,11 +1496,7 @@ static int disk_update_zone_resources(struct gendisk *disk,
+ 	}
+ 
+ commit:
+-	blk_mq_freeze_queue(q);
+-	ret = queue_limits_commit_update(q, &lim);
+-	blk_mq_unfreeze_queue(q);
+-
+-	return ret;
++	return queue_limits_commit_update_frozen(q, &lim);
+ }
+ 
+ static int blk_revalidate_conv_zone(struct blk_zone *zone, unsigned int idx,
+diff --git a/block/genhd.c b/block/genhd.c
+index 79230c109fca03..8a63be374220c8 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -798,7 +798,7 @@ static ssize_t disk_badblocks_store(struct device *dev,
+ }
+ 
+ #ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
+-void blk_request_module(dev_t devt)
++static bool blk_probe_dev(dev_t devt)
+ {
+ 	unsigned int major = MAJOR(devt);
+ 	struct blk_major_name **n;
+@@ -808,14 +808,26 @@ void blk_request_module(dev_t devt)
+ 		if ((*n)->major == major && (*n)->probe) {
+ 			(*n)->probe(devt);
+ 			mutex_unlock(&major_names_lock);
+-			return;
++			return true;
+ 		}
+ 	}
+ 	mutex_unlock(&major_names_lock);
++	return false;
++}
++
++void blk_request_module(dev_t devt)
++{
++	int error;
++
++	if (blk_probe_dev(devt))
++		return;
+ 
+-	if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0)
+-		/* Make old-style 2.4 aliases work */
+-		request_module("block-major-%d", MAJOR(devt));
++	error = request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt));
++	/* Make old-style 2.4 aliases work */
++	if (error > 0)
++		error = request_module("block-major-%d", MAJOR(devt));
++	if (!error)
++		blk_probe_dev(devt);
+ }
+ #endif /* CONFIG_BLOCK_LEGACY_AUTOLOAD */
+ 
+diff --git a/block/partitions/ldm.h b/block/partitions/ldm.h
+index e259180c89148b..aa3bd050d8cdd0 100644
+--- a/block/partitions/ldm.h
++++ b/block/partitions/ldm.h
+@@ -1,5 +1,5 @@
+ // SPDX-License-Identifier: GPL-2.0-or-later
+-/**
++/*
+  * ldm - Part of the Linux-NTFS project.
+  *
+  * Copyright (C) 2001,2002 Richard Russon <ldm@flatcap.org>
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index 16f7c7a9d8ab66..7e061d8a1d52dd 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -1016,6 +1016,8 @@ static void __init crypto_start_tests(void)
+ 	if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS))
+ 		return;
+ 
++	set_crypto_boot_test_finished();
++
+ 	for (;;) {
+ 		struct crypto_larval *larval = NULL;
+ 		struct crypto_alg *q;
+@@ -1047,8 +1049,6 @@ static void __init crypto_start_tests(void)
+ 		if (!larval)
+ 			break;
+ 	}
+-
+-	set_crypto_boot_test_finished();
+ }
+ 
+ static int __init crypto_algapi_init(void)
+diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
+index 79bbfe00d241f9..b8543a34caeada 100644
+--- a/drivers/acpi/acpica/achware.h
++++ b/drivers/acpi/acpica/achware.h
+@@ -103,8 +103,6 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info,
+ 
+ acpi_status acpi_hw_enable_all_runtime_gpes(void);
+ 
+-acpi_status acpi_hw_enable_all_wakeup_gpes(void);
+-
+ u8 acpi_hw_check_all_gpes(acpi_handle gpe_skip_device, u32 gpe_skip_number);
+ 
+ acpi_status
+diff --git a/drivers/acpi/fan_core.c b/drivers/acpi/fan_core.c
+index 3ea9cfcff46e79..10016f52f4f40e 100644
+--- a/drivers/acpi/fan_core.c
++++ b/drivers/acpi/fan_core.c
+@@ -371,19 +371,25 @@ static int acpi_fan_probe(struct platform_device *pdev)
+ 	result = sysfs_create_link(&pdev->dev.kobj,
+ 				   &cdev->device.kobj,
+ 				   "thermal_cooling");
+-	if (result)
++	if (result) {
+ 		dev_err(&pdev->dev, "Failed to create sysfs link 'thermal_cooling'\n");
++		goto err_unregister;
++	}
+ 
+ 	result = sysfs_create_link(&cdev->device.kobj,
+ 				   &pdev->dev.kobj,
+ 				   "device");
+ 	if (result) {
+ 		dev_err(&pdev->dev, "Failed to create sysfs link 'device'\n");
+-		goto err_end;
++		goto err_remove_link;
+ 	}
+ 
+ 	return 0;
+ 
++err_remove_link:
++	sysfs_remove_link(&pdev->dev.kobj, "thermal_cooling");
++err_unregister:
++	thermal_cooling_device_unregister(cdev);
+ err_end:
+ 	if (fan->acpi4)
+ 		acpi_fan_delete_attributes(device);
+diff --git a/drivers/base/class.c b/drivers/base/class.c
+index 582b5a02a5c410..d57f277978dc90 100644
+--- a/drivers/base/class.c
++++ b/drivers/base/class.c
+@@ -323,8 +323,12 @@ void class_dev_iter_init(struct class_dev_iter *iter, const struct class *class,
+ 	struct subsys_private *sp = class_to_subsys(class);
+ 	struct klist_node *start_knode = NULL;
+ 
+-	if (!sp)
++	memset(iter, 0, sizeof(*iter));
++	if (!sp) {
++		pr_crit("%s: class %p was not registered yet\n",
++			__func__, class);
+ 		return;
++	}
+ 
+ 	if (start)
+ 		start_knode = &start->p->knode_class;
+@@ -351,6 +355,9 @@ struct device *class_dev_iter_next(struct class_dev_iter *iter)
+ 	struct klist_node *knode;
+ 	struct device *dev;
+ 
++	if (!iter->sp)
++		return NULL;
++
+ 	while (1) {
+ 		knode = klist_next(&iter->ki);
+ 		if (!knode)
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index b852050d8a9665..450458267e6e64 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -2180,6 +2180,7 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
+ 	flush_workqueue(nbd->recv_workq);
+ 	nbd_clear_que(nbd);
+ 	nbd->task_setup = NULL;
++	clear_bit(NBD_RT_BOUND, &nbd->config->runtime_flags);
+ 	mutex_unlock(&nbd->config_lock);
+ 
+ 	if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
+diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
+index ff45ed76646957..226ffc743238e9 100644
+--- a/drivers/block/ps3disk.c
++++ b/drivers/block/ps3disk.c
+@@ -384,9 +384,9 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
+ 	unsigned int devidx;
+ 	struct queue_limits lim = {
+ 		.logical_block_size	= dev->blk_size,
+-		.max_hw_sectors		= dev->bounce_size >> 9,
++		.max_hw_sectors		= BOUNCE_SIZE >> 9,
+ 		.max_segments		= -1,
+-		.max_segment_size	= dev->bounce_size,
++		.max_segment_size	= BOUNCE_SIZE,
+ 		.dma_alignment		= dev->blk_size - 1,
+ 		.features		= BLK_FEAT_WRITE_CACHE |
+ 					  BLK_FEAT_ROTATIONAL,
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index 3efe378f138662..5afc8aac62ab9b 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -1106,9 +1106,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
+ 		lim.features |= BLK_FEAT_WRITE_CACHE;
+ 	else
+ 		lim.features &= ~BLK_FEAT_WRITE_CACHE;
+-	blk_mq_freeze_queue(disk->queue);
+-	i = queue_limits_commit_update(disk->queue, &lim);
+-	blk_mq_unfreeze_queue(disk->queue);
++	i = queue_limits_commit_update_frozen(disk->queue, &lim);
+ 	if (i)
+ 		return i;
+ 	return count;
+diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
+index a1153ada74d206..0a60660fc8ce80 100644
+--- a/drivers/bluetooth/btbcm.c
++++ b/drivers/bluetooth/btbcm.c
+@@ -553,6 +553,9 @@ static const char *btbcm_get_board_name(struct device *dev)
+ 
+ 	/* get rid of any '/' in the compatible string */
+ 	board_type = devm_kstrdup(dev, tmp, GFP_KERNEL);
++	if (!board_type)
++		return NULL;
++
+ 	strreplace(board_type, '/', '-');
+ 
+ 	return board_type;
+diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
+index 1230045d78a5f0..aa5ec1d444a9d9 100644
+--- a/drivers/bluetooth/btnxpuart.c
++++ b/drivers/bluetooth/btnxpuart.c
+@@ -1381,13 +1381,12 @@ static void btnxpuart_tx_work(struct work_struct *work)
+ 
+ 	while ((skb = nxp_dequeue(nxpdev))) {
+ 		len = serdev_device_write_buf(serdev, skb->data, skb->len);
+-		serdev_device_wait_until_sent(serdev, 0);
+ 		hdev->stat.byte_tx += len;
+ 
+ 		skb_pull(skb, len);
+ 		if (skb->len > 0) {
+ 			skb_queue_head(&nxpdev->txq, skb);
+-			break;
++			continue;
+ 		}
+ 
+ 		switch (hci_skb_pkt_type(skb)) {
+diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
+index 83025f457ca044..d3eba0d4a57d3b 100644
+--- a/drivers/bluetooth/btrtl.c
++++ b/drivers/bluetooth/btrtl.c
+@@ -1351,12 +1351,14 @@ int btrtl_setup_realtek(struct hci_dev *hdev)
+ 
+ 	btrtl_set_quirks(hdev, btrtl_dev);
+ 
+-	hci_set_hw_info(hdev,
++	if (btrtl_dev->ic_info) {
++		hci_set_hw_info(hdev,
+ 			"RTL lmp_subver=%u hci_rev=%u hci_ver=%u hci_bus=%u",
+ 			btrtl_dev->ic_info->lmp_subver,
+ 			btrtl_dev->ic_info->hci_rev,
+ 			btrtl_dev->ic_info->hci_ver,
+ 			btrtl_dev->ic_info->hci_bus);
++	}
+ 
+ 	btrtl_free(btrtl_dev);
+ 	return ret;
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 279fe6c115fac5..f69df515d668b6 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -2638,8 +2638,15 @@ static void btusb_mtk_claim_iso_intf(struct btusb_data *data)
+ 	struct btmtk_data *btmtk_data = hci_get_priv(data->hdev);
+ 	int err;
+ 
++	/*
++	 * The function usb_driver_claim_interface() is documented to need
++	 * locks held if it's not called from a probe routine. The code here
++	 * is called from the hci_power_on workqueue, so grab the lock.
++	 */
++	device_lock(&btmtk_data->isopkt_intf->dev);
+ 	err = usb_driver_claim_interface(&btusb_driver,
+ 					 btmtk_data->isopkt_intf, data);
++	device_unlock(&btmtk_data->isopkt_intf->dev);
+ 	if (err < 0) {
+ 		btmtk_data->isopkt_intf = NULL;
+ 		bt_dev_err(data->hdev, "Failed to claim iso interface");
+diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c
+index 7296127181eca3..8a14fd0291d89b 100644
+--- a/drivers/char/ipmi/ipmb_dev_int.c
++++ b/drivers/char/ipmi/ipmb_dev_int.c
+@@ -321,6 +321,9 @@ static int ipmb_probe(struct i2c_client *client)
+ 	ipmb_dev->miscdev.name = devm_kasprintf(&client->dev, GFP_KERNEL,
+ 						"%s%d", "ipmb-",
+ 						client->adapter->nr);
++	if (!ipmb_dev->miscdev.name)
++		return -ENOMEM;
++
+ 	ipmb_dev->miscdev.fops = &ipmb_fops;
+ 	ipmb_dev->miscdev.parent = &client->dev;
+ 	ret = misc_register(&ipmb_dev->miscdev);
+diff --git a/drivers/char/ipmi/ssif_bmc.c b/drivers/char/ipmi/ssif_bmc.c
+index a14fafc583d4d8..310f17dd9511a5 100644
+--- a/drivers/char/ipmi/ssif_bmc.c
++++ b/drivers/char/ipmi/ssif_bmc.c
+@@ -292,7 +292,6 @@ static void complete_response(struct ssif_bmc_ctx *ssif_bmc)
+ 	ssif_bmc->nbytes_processed = 0;
+ 	ssif_bmc->remain_len = 0;
+ 	ssif_bmc->busy = false;
+-	memset(&ssif_bmc->part_buf, 0, sizeof(struct ssif_part_buffer));
+ 	wake_up_all(&ssif_bmc->wait_queue);
+ }
+ 
+@@ -744,9 +743,11 @@ static void on_stop_event(struct ssif_bmc_ctx *ssif_bmc, u8 *val)
+ 			ssif_bmc->aborting = true;
+ 		}
+ 	} else if (ssif_bmc->state == SSIF_RES_SENDING) {
+-		if (ssif_bmc->is_singlepart_read || ssif_bmc->block_num == 0xFF)
++		if (ssif_bmc->is_singlepart_read || ssif_bmc->block_num == 0xFF) {
++			memset(&ssif_bmc->part_buf, 0, sizeof(struct ssif_part_buffer));
+ 			/* Invalidate response buffer to denote it is sent */
+ 			complete_response(ssif_bmc);
++		}
+ 		ssif_bmc->state = SSIF_READY;
+ 	}
+ 
+diff --git a/drivers/clk/analogbits/wrpll-cln28hpc.c b/drivers/clk/analogbits/wrpll-cln28hpc.c
+index 65d422a588e1f1..9d178afc73bdd1 100644
+--- a/drivers/clk/analogbits/wrpll-cln28hpc.c
++++ b/drivers/clk/analogbits/wrpll-cln28hpc.c
+@@ -292,7 +292,7 @@ int wrpll_configure_for_rate(struct wrpll_cfg *c, u32 target_rate,
+ 			vco = vco_pre * f;
+ 		}
+ 
+-		delta = abs(target_rate - vco);
++		delta = abs(target_vco_rate - vco);
+ 		if (delta < best_delta) {
+ 			best_delta = delta;
+ 			best_r = r;
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index 9b45fa005030f5..cf7720b9172ff2 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -5385,8 +5385,10 @@ const char *of_clk_get_parent_name(const struct device_node *np, int index)
+ 		count++;
+ 	}
+ 	/* We went off the end of 'clock-indices' without finding it */
+-	if (of_property_present(clkspec.np, "clock-indices") && !found)
++	if (of_property_present(clkspec.np, "clock-indices") && !found) {
++		of_node_put(clkspec.np);
+ 		return NULL;
++	}
+ 
+ 	if (of_property_read_string_index(clkspec.np, "clock-output-names",
+ 					  index,
+diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
+index 516dbd170c8a35..fb18f507f12135 100644
+--- a/drivers/clk/imx/clk-imx8mp.c
++++ b/drivers/clk/imx/clk-imx8mp.c
+@@ -399,8 +399,9 @@ static const char * const imx8mp_dram_core_sels[] = {"dram_pll_out", "dram_alt_r
+ 
+ static const char * const imx8mp_clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "video_pll1_out",
+ 						  "dummy", "dummy", "gpu_pll_out", "vpu_pll_out",
+-						  "arm_pll_out", "sys_pll1", "sys_pll2", "sys_pll3",
+-						  "dummy", "dummy", "osc_24m", "dummy", "osc_32k"};
++						  "arm_pll_out", "sys_pll1_out", "sys_pll2_out",
++						  "sys_pll3_out", "dummy", "dummy", "osc_24m",
++						  "dummy", "osc_32k"};
+ 
+ static struct clk_hw **hws;
+ static struct clk_hw_onecell_data *clk_hw_data;
+diff --git a/drivers/clk/imx/clk-imx93.c b/drivers/clk/imx/clk-imx93.c
+index 58a516dd385bf5..c5f358a75f307b 100644
+--- a/drivers/clk/imx/clk-imx93.c
++++ b/drivers/clk/imx/clk-imx93.c
+@@ -15,7 +15,7 @@
+ 
+ #include "clk.h"
+ 
+-#define IMX93_CLK_END 207
++#define IMX93_CLK_END 208
+ 
+ #define PLAT_IMX93 BIT(0)
+ #define PLAT_IMX91 BIT(1)
+@@ -38,6 +38,7 @@ static u32 share_count_sai2;
+ static u32 share_count_sai3;
+ static u32 share_count_mub;
+ static u32 share_count_pdm;
++static u32 share_count_spdif;
+ 
+ static const char * const a55_core_sels[] = {"a55_alt", "arm_pll"};
+ static const char *parent_names[MAX_SEL][4] = {
+@@ -70,8 +71,8 @@ static const struct imx93_clk_root {
+ 	{ IMX93_CLK_WAKEUP_AXI,		"wakeup_axi_root",	0x0380,	FAST_SEL, CLK_IS_CRITICAL },
+ 	{ IMX93_CLK_SWO_TRACE,		"swo_trace_root",	0x0400,	LOW_SPEED_IO_SEL, },
+ 	{ IMX93_CLK_M33_SYSTICK,	"m33_systick_root",	0x0480,	LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
+-	{ IMX93_CLK_FLEXIO1,		"flexio1_root",		0x0500,	LOW_SPEED_IO_SEL, },
+-	{ IMX93_CLK_FLEXIO2,		"flexio2_root",		0x0580,	LOW_SPEED_IO_SEL, },
++	{ IMX93_CLK_FLEXIO1,		"flexio1_root",		0x0500,	LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
++	{ IMX93_CLK_FLEXIO2,		"flexio2_root",		0x0580,	LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
+ 	{ IMX93_CLK_LPTMR1,		"lptmr1_root",		0x0700,	LOW_SPEED_IO_SEL, },
+ 	{ IMX93_CLK_LPTMR2,		"lptmr2_root",		0x0780,	LOW_SPEED_IO_SEL, },
+ 	{ IMX93_CLK_TPM2,		"tpm2_root",		0x0880,	TPM_SEL, },
+@@ -177,10 +178,10 @@ static const struct imx93_clk_ccgr {
+ 	{ IMX93_CLK_WDOG5_GATE,		"wdog5",	"osc_24m",		0x8400, },
+ 	{ IMX93_CLK_SEMA1_GATE,		"sema1",	"bus_aon_root",		0x8440, },
+ 	{ IMX93_CLK_SEMA2_GATE,		"sema2",	"bus_wakeup_root",	0x8480, },
+-	{ IMX93_CLK_MU1_A_GATE,		"mu1_a",	"bus_aon_root",		0x84c0, CLK_IGNORE_UNUSED },
+-	{ IMX93_CLK_MU2_A_GATE,		"mu2_a",	"bus_wakeup_root",	0x84c0, CLK_IGNORE_UNUSED },
+-	{ IMX93_CLK_MU1_B_GATE,		"mu1_b",	"bus_aon_root",		0x8500, 0, &share_count_mub },
+-	{ IMX93_CLK_MU2_B_GATE,		"mu2_b",	"bus_wakeup_root",	0x8500, 0, &share_count_mub },
++	{ IMX93_CLK_MU1_A_GATE,		"mu1_a",	"bus_aon_root",		0x84c0, CLK_IGNORE_UNUSED, NULL, PLAT_IMX93 },
++	{ IMX93_CLK_MU2_A_GATE,		"mu2_a",	"bus_wakeup_root",	0x84c0, CLK_IGNORE_UNUSED, NULL, PLAT_IMX93 },
++	{ IMX93_CLK_MU1_B_GATE,		"mu1_b",	"bus_aon_root",		0x8500, 0, &share_count_mub, PLAT_IMX93 },
++	{ IMX93_CLK_MU2_B_GATE,		"mu2_b",	"bus_wakeup_root",	0x8500, 0, &share_count_mub, PLAT_IMX93 },
+ 	{ IMX93_CLK_EDMA1_GATE,		"edma1",	"m33_root",		0x8540, },
+ 	{ IMX93_CLK_EDMA2_GATE,		"edma2",	"wakeup_axi_root",	0x8580, },
+ 	{ IMX93_CLK_FLEXSPI1_GATE,	"flexspi1",	"flexspi1_root",	0x8640, },
+@@ -188,8 +189,8 @@ static const struct imx93_clk_ccgr {
+ 	{ IMX93_CLK_GPIO2_GATE,		"gpio2",	"bus_wakeup_root",	0x88c0, },
+ 	{ IMX93_CLK_GPIO3_GATE,		"gpio3",	"bus_wakeup_root",	0x8900, },
+ 	{ IMX93_CLK_GPIO4_GATE,		"gpio4",	"bus_wakeup_root",	0x8940, },
+-	{ IMX93_CLK_FLEXIO1_GATE,	"flexio1",	"flexio1_root",		0x8980, },
+-	{ IMX93_CLK_FLEXIO2_GATE,	"flexio2",	"flexio2_root",		0x89c0, },
++	{ IMX93_CLK_FLEXIO1_GATE,	"flexio1",	"flexio1_root",		0x8980, 0, NULL, PLAT_IMX93},
++	{ IMX93_CLK_FLEXIO2_GATE,	"flexio2",	"flexio2_root",		0x89c0, 0, NULL, PLAT_IMX93},
+ 	{ IMX93_CLK_LPIT1_GATE,		"lpit1",	"bus_aon_root",		0x8a00, },
+ 	{ IMX93_CLK_LPIT2_GATE,		"lpit2",	"bus_wakeup_root",	0x8a40, },
+ 	{ IMX93_CLK_LPTMR1_GATE,	"lptmr1",	"lptmr1_root",		0x8a80, },
+@@ -238,10 +239,10 @@ static const struct imx93_clk_ccgr {
+ 	{ IMX93_CLK_SAI3_GATE,          "sai3",         "sai3_root",            0x94c0, 0, &share_count_sai3},
+ 	{ IMX93_CLK_SAI3_IPG,		"sai3_ipg_clk", "bus_wakeup_root",	0x94c0, 0, &share_count_sai3},
+ 	{ IMX93_CLK_MIPI_CSI_GATE,	"mipi_csi",	"media_apb_root",	0x9580, },
+-	{ IMX93_CLK_MIPI_DSI_GATE,	"mipi_dsi",	"media_apb_root",	0x95c0, },
+-	{ IMX93_CLK_LVDS_GATE,		"lvds",		"media_ldb_root",	0x9600, },
++	{ IMX93_CLK_MIPI_DSI_GATE,	"mipi_dsi",	"media_apb_root",	0x95c0, 0, NULL, PLAT_IMX93 },
++	{ IMX93_CLK_LVDS_GATE,		"lvds",		"media_ldb_root",	0x9600, 0, NULL, PLAT_IMX93 },
+ 	{ IMX93_CLK_LCDIF_GATE,		"lcdif",	"media_apb_root",	0x9640, },
+-	{ IMX93_CLK_PXP_GATE,		"pxp",		"media_apb_root",	0x9680, },
++	{ IMX93_CLK_PXP_GATE,		"pxp",		"media_apb_root",	0x9680, 0, NULL, PLAT_IMX93 },
+ 	{ IMX93_CLK_ISI_GATE,		"isi",		"media_apb_root",	0x96c0, },
+ 	{ IMX93_CLK_NIC_MEDIA_GATE,	"nic_media",	"media_axi_root",	0x9700, },
+ 	{ IMX93_CLK_USB_CONTROLLER_GATE, "usb_controller", "hsio_root",		0x9a00, },
+@@ -252,12 +253,13 @@ static const struct imx93_clk_ccgr {
+ 	{ IMX93_CLK_MQS1_GATE,		"mqs1",		"sai1_root",		0x9b00, },
+ 	{ IMX93_CLK_MQS2_GATE,		"mqs2",		"sai3_root",		0x9b40, },
+ 	{ IMX93_CLK_AUD_XCVR_GATE,	"aud_xcvr",	"audio_xcvr_root",	0x9b80, },
+-	{ IMX93_CLK_SPDIF_GATE,		"spdif",	"spdif_root",		0x9c00, },
++	{ IMX93_CLK_SPDIF_IPG,		"spdif_ipg_clk", "bus_wakeup_root",	0x9c00, 0, &share_count_spdif},
++	{ IMX93_CLK_SPDIF_GATE,		"spdif",	"spdif_root",		0x9c00, 0, &share_count_spdif},
+ 	{ IMX93_CLK_HSIO_32K_GATE,	"hsio_32k",	"osc_32k",		0x9dc0, },
+ 	{ IMX93_CLK_ENET1_GATE,		"enet1",	"wakeup_axi_root",	0x9e00, 0, NULL, PLAT_IMX93, },
+ 	{ IMX93_CLK_ENET_QOS_GATE,	"enet_qos",	"wakeup_axi_root",	0x9e40, 0, NULL, PLAT_IMX93, },
+-	{ IMX91_CLK_ENET2_REGULAR_GATE, "enet2_regular",        "wakeup_axi_root",      0x9e00, 0, NULL, PLAT_IMX91, },
+-	{ IMX91_CLK_ENET1_QOS_TSN_GATE,     "enet1_qos_tsn",        "wakeup_axi_root",      0x9e40, 0, NULL, PLAT_IMX91, },
++	{ IMX91_CLK_ENET2_REGULAR_GATE, "enet2_regular", "wakeup_axi_root",	0x9e00, 0, NULL, PLAT_IMX91, },
++	{ IMX91_CLK_ENET1_QOS_TSN_GATE, "enet1_qos_tsn", "wakeup_axi_root",	0x9e40, 0, NULL, PLAT_IMX91, },
+ 	/* Critical because clk accessed during CPU idle */
+ 	{ IMX93_CLK_SYS_CNT_GATE,	"sys_cnt",	"osc_24m",		0x9e80, CLK_IS_CRITICAL},
+ 	{ IMX93_CLK_TSTMR1_GATE,	"tstmr1",	"bus_aon_root",		0x9ec0, },
+diff --git a/drivers/clk/mmp/clk-pxa1908-apbc.c b/drivers/clk/mmp/clk-pxa1908-apbc.c
+index b93d0846619856..3fd7b5e644f3b6 100644
+--- a/drivers/clk/mmp/clk-pxa1908-apbc.c
++++ b/drivers/clk/mmp/clk-pxa1908-apbc.c
+@@ -96,8 +96,8 @@ static int pxa1908_apbc_probe(struct platform_device *pdev)
+ 	struct pxa1908_clk_unit *pxa_unit;
+ 
+ 	pxa_unit = devm_kzalloc(&pdev->dev, sizeof(*pxa_unit), GFP_KERNEL);
+-	if (IS_ERR(pxa_unit))
+-		return PTR_ERR(pxa_unit);
++	if (!pxa_unit)
++		return -ENOMEM;
+ 
+ 	pxa_unit->base = devm_platform_ioremap_resource(pdev, 0);
+ 	if (IS_ERR(pxa_unit->base))
+diff --git a/drivers/clk/mmp/clk-pxa1908-apbcp.c b/drivers/clk/mmp/clk-pxa1908-apbcp.c
+index 08f3845cbb1bec..f638d7e89b472f 100644
+--- a/drivers/clk/mmp/clk-pxa1908-apbcp.c
++++ b/drivers/clk/mmp/clk-pxa1908-apbcp.c
+@@ -48,8 +48,8 @@ static int pxa1908_apbcp_probe(struct platform_device *pdev)
+ 	struct pxa1908_clk_unit *pxa_unit;
+ 
+ 	pxa_unit = devm_kzalloc(&pdev->dev, sizeof(*pxa_unit), GFP_KERNEL);
+-	if (IS_ERR(pxa_unit))
+-		return PTR_ERR(pxa_unit);
++	if (!pxa_unit)
++		return -ENOMEM;
+ 
+ 	pxa_unit->base = devm_platform_ioremap_resource(pdev, 0);
+ 	if (IS_ERR(pxa_unit->base))
+diff --git a/drivers/clk/mmp/clk-pxa1908-mpmu.c b/drivers/clk/mmp/clk-pxa1908-mpmu.c
+index e3337bacaadd5a..90b4b248857406 100644
+--- a/drivers/clk/mmp/clk-pxa1908-mpmu.c
++++ b/drivers/clk/mmp/clk-pxa1908-mpmu.c
+@@ -78,8 +78,8 @@ static int pxa1908_mpmu_probe(struct platform_device *pdev)
+ 	struct pxa1908_clk_unit *pxa_unit;
+ 
+ 	pxa_unit = devm_kzalloc(&pdev->dev, sizeof(*pxa_unit), GFP_KERNEL);
+-	if (IS_ERR(pxa_unit))
+-		return PTR_ERR(pxa_unit);
++	if (!pxa_unit)
++		return -ENOMEM;
+ 
+ 	pxa_unit->base = devm_platform_ioremap_resource(pdev, 0);
+ 	if (IS_ERR(pxa_unit->base))
+diff --git a/drivers/clk/qcom/camcc-x1e80100.c b/drivers/clk/qcom/camcc-x1e80100.c
+index 85e76c7712ad84..b73524ae64b1b2 100644
+--- a/drivers/clk/qcom/camcc-x1e80100.c
++++ b/drivers/clk/qcom/camcc-x1e80100.c
+@@ -2212,6 +2212,8 @@ static struct clk_branch cam_cc_sfe_0_fast_ahb_clk = {
+ 	},
+ };
+ 
++static struct gdsc cam_cc_titan_top_gdsc;
++
+ static struct gdsc cam_cc_bps_gdsc = {
+ 	.gdscr = 0x10004,
+ 	.en_rest_wait_val = 0x2,
+@@ -2221,6 +2223,7 @@ static struct gdsc cam_cc_bps_gdsc = {
+ 		.name = "cam_cc_bps_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
++	.parent = &cam_cc_titan_top_gdsc.pd,
+ 	.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+ 
+@@ -2233,6 +2236,7 @@ static struct gdsc cam_cc_ife_0_gdsc = {
+ 		.name = "cam_cc_ife_0_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
++	.parent = &cam_cc_titan_top_gdsc.pd,
+ 	.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+ 
+@@ -2245,6 +2249,7 @@ static struct gdsc cam_cc_ife_1_gdsc = {
+ 		.name = "cam_cc_ife_1_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
++	.parent = &cam_cc_titan_top_gdsc.pd,
+ 	.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+ 
+@@ -2257,6 +2262,7 @@ static struct gdsc cam_cc_ipe_0_gdsc = {
+ 		.name = "cam_cc_ipe_0_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
++	.parent = &cam_cc_titan_top_gdsc.pd,
+ 	.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+ 
+@@ -2269,6 +2275,7 @@ static struct gdsc cam_cc_sfe_0_gdsc = {
+ 		.name = "cam_cc_sfe_0_gdsc",
+ 	},
+ 	.pwrsts = PWRSTS_OFF_ON,
++	.parent = &cam_cc_titan_top_gdsc.pd,
+ 	.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+ 
+diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
+index dc3aa7014c3ed1..c6692808a8228c 100644
+--- a/drivers/clk/qcom/gcc-sdm845.c
++++ b/drivers/clk/qcom/gcc-sdm845.c
+@@ -454,7 +454,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap0_s0_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+@@ -470,7 +470,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap0_s1_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+@@ -486,7 +486,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap0_s2_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+@@ -502,7 +502,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap0_s3_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+@@ -518,7 +518,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap0_s4_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+@@ -534,7 +534,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap0_s5_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+@@ -550,7 +550,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap0_s6_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
+@@ -566,7 +566,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap0_s7_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
+@@ -582,7 +582,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap1_s0_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+@@ -598,7 +598,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap1_s1_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+@@ -614,7 +614,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap1_s2_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+@@ -630,7 +630,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap1_s3_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+@@ -646,7 +646,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap1_s4_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+@@ -662,7 +662,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap1_s5_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+@@ -678,7 +678,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap1_s6_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
+@@ -694,7 +694,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
+ 	.name = "gcc_qupv3_wrap1_s7_clk_src",
+ 	.parent_data = gcc_parent_data_0,
+ 	.num_parents = ARRAY_SIZE(gcc_parent_data_0),
+-	.ops = &clk_rcg2_shared_ops,
++	.ops = &clk_rcg2_ops,
+ };
+ 
+ static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
+diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c
+index 8ea25aa25dff04..7288af845434d8 100644
+--- a/drivers/clk/qcom/gcc-x1e80100.c
++++ b/drivers/clk/qcom/gcc-x1e80100.c
+@@ -6083,7 +6083,7 @@ static struct gdsc gcc_usb20_prim_gdsc = {
+ 	.pd = {
+ 		.name = "gcc_usb20_prim_gdsc",
+ 	},
+-	.pwrsts = PWRSTS_OFF_ON,
++	.pwrsts = PWRSTS_RET_ON,
+ 	.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+ 
+diff --git a/drivers/clk/ralink/clk-mtmips.c b/drivers/clk/ralink/clk-mtmips.c
+index 97b8ca0f91816f..19d433034884ae 100644
+--- a/drivers/clk/ralink/clk-mtmips.c
++++ b/drivers/clk/ralink/clk-mtmips.c
+@@ -266,7 +266,6 @@ static int mtmips_register_pherip_clocks(struct device_node *np,
+ 	}
+ 
+ static struct mtmips_clk_fixed rt3883_fixed_clocks[] = {
+-	CLK_FIXED("xtal", NULL, 40000000),
+ 	CLK_FIXED("periph", "xtal", 40000000)
+ };
+ 
+diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
+index 79e7a90c3b1bef..bf85501709f03b 100644
+--- a/drivers/clk/renesas/renesas-cpg-mssr.c
++++ b/drivers/clk/renesas/renesas-cpg-mssr.c
+@@ -979,7 +979,7 @@ static void __init cpg_mssr_reserved_exit(struct cpg_mssr_priv *priv)
+ static int __init cpg_mssr_reserved_init(struct cpg_mssr_priv *priv,
+ 					 const struct cpg_mssr_info *info)
+ {
+-	struct device_node *soc = of_find_node_by_path("/soc");
++	struct device_node *soc __free(device_node) = of_find_node_by_path("/soc");
+ 	struct device_node *node;
+ 	uint32_t args[MAX_PHANDLE_ARGS];
+ 	unsigned int *ids = NULL;
+diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+index 3a7d61c816672d..ba1ad267f12333 100644
+--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
++++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+@@ -535,11 +535,11 @@ static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents,
+ 				 CLK_SET_RATE_PARENT);
+ 
+ /*
+- * DSI output seems to work only when PLL_MIPI selected. Set it and prevent
+- * the mux from reparenting.
++ * Experiments showed that RGB output requires pll-video0-2x, while DSI
++ * requires pll-mipi. It will not work with incorrect clock, the screen will
++ * be blank.
++ * sun50i-a64.dtsi assigns pll-mipi as TCON0 parent by default
+  */
+-#define SUN50I_A64_TCON0_CLK_REG	0x118
+-
+ static const char * const tcon0_parents[] = { "pll-mipi", "pll-video0-2x" };
+ static const u8 tcon0_table[] = { 0, 2, };
+ static SUNXI_CCU_MUX_TABLE_WITH_GATE_CLOSEST(tcon0_clk, "tcon0", tcon0_parents,
+@@ -959,11 +959,6 @@ static int sun50i_a64_ccu_probe(struct platform_device *pdev)
+ 
+ 	writel(0x515, reg + SUN50I_A64_PLL_MIPI_REG);
+ 
+-	/* Set PLL MIPI as parent for TCON0 */
+-	val = readl(reg + SUN50I_A64_TCON0_CLK_REG);
+-	val &= ~GENMASK(26, 24);
+-	writel(val | (0 << 24), reg + SUN50I_A64_TCON0_CLK_REG);
+-
+ 	ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_a64_ccu_desc);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/clk/thead/clk-th1520-ap.c b/drivers/clk/thead/clk-th1520-ap.c
+index 1015fab9525157..4c9555fc61844d 100644
+--- a/drivers/clk/thead/clk-th1520-ap.c
++++ b/drivers/clk/thead/clk-th1520-ap.c
+@@ -657,7 +657,7 @@ static struct ccu_div apb_pclk = {
+ 		.hw.init	= CLK_HW_INIT_PARENTS_DATA("apb-pclk",
+ 						      apb_parents,
+ 						      &ccu_div_ops,
+-						      0),
++						      CLK_IGNORE_UNUSED),
+ 	},
+ };
+ 
+@@ -794,13 +794,13 @@ static CCU_GATE(CLK_X2X_CPUSYS, x2x_cpusys_clk, "x2x-cpusys", axi4_cpusys2_aclk_
+ 		0x134, BIT(7), 0);
+ static CCU_GATE(CLK_CPU2AON_X2H, cpu2aon_x2h_clk, "cpu2aon-x2h", axi_aclk_pd, 0x138, BIT(8), 0);
+ static CCU_GATE(CLK_CPU2PERI_X2H, cpu2peri_x2h_clk, "cpu2peri-x2h", axi4_cpusys2_aclk_pd,
+-		0x140, BIT(9), 0);
++		0x140, BIT(9), CLK_IGNORE_UNUSED);
+ static CCU_GATE(CLK_PERISYS_APB1_HCLK, perisys_apb1_hclk, "perisys-apb1-hclk", perisys_ahb_hclk_pd,
+ 		0x150, BIT(9), 0);
+ static CCU_GATE(CLK_PERISYS_APB2_HCLK, perisys_apb2_hclk, "perisys-apb2-hclk", perisys_ahb_hclk_pd,
+-		0x150, BIT(10), 0);
++		0x150, BIT(10), CLK_IGNORE_UNUSED);
+ static CCU_GATE(CLK_PERISYS_APB3_HCLK, perisys_apb3_hclk, "perisys-apb3-hclk", perisys_ahb_hclk_pd,
+-		0x150, BIT(11), 0);
++		0x150, BIT(11), CLK_IGNORE_UNUSED);
+ static CCU_GATE(CLK_PERISYS_APB4_HCLK, perisys_apb4_hclk, "perisys-apb4-hclk", perisys_ahb_hclk_pd,
+ 		0x150, BIT(12), 0);
+ static CCU_GATE(CLK_NPU_AXI, npu_axi_clk, "npu-axi", axi_aclk_pd, 0x1c8, BIT(5), 0);
+@@ -896,7 +896,6 @@ static struct ccu_common *th1520_div_clks[] = {
+ 	&vo_axi_clk.common,
+ 	&vp_apb_clk.common,
+ 	&vp_axi_clk.common,
+-	&cpu2vp_clk.common,
+ 	&venc_clk.common,
+ 	&dpu0_clk.common,
+ 	&dpu1_clk.common,
+@@ -916,6 +915,7 @@ static struct ccu_common *th1520_gate_clks[] = {
+ 	&bmu_clk.common,
+ 	&cpu2aon_x2h_clk.common,
+ 	&cpu2peri_x2h_clk.common,
++	&cpu2vp_clk.common,
+ 	&perisys_apb1_hclk.common,
+ 	&perisys_apb2_hclk.common,
+ 	&perisys_apb3_hclk.common,
+@@ -1048,7 +1048,8 @@ static int th1520_clk_probe(struct platform_device *pdev)
+ 		hw = devm_clk_hw_register_gate_parent_data(dev,
+ 							   cg->common.hw.init->name,
+ 							   cg->common.hw.init->parent_data,
+-							   0, base + cg->common.cfg0,
++							   cg->common.hw.init->flags,
++							   base + cg->common.cfg0,
+ 							   ffs(cg->enable) - 1, 0, NULL);
+ 		if (IS_ERR(hw))
+ 			return PTR_ERR(hw);
+diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
+index c9ebacf5c88e24..302df42d688757 100644
+--- a/drivers/cpufreq/acpi-cpufreq.c
++++ b/drivers/cpufreq/acpi-cpufreq.c
+@@ -623,7 +623,14 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
+ #endif
+ 
+ #ifdef CONFIG_ACPI_CPPC_LIB
+-static u64 get_max_boost_ratio(unsigned int cpu)
++/*
++ * get_max_boost_ratio: Computes the max_boost_ratio as the ratio
++ * between the highest_perf and the nominal_perf.
++ *
++ * Returns the max_boost_ratio for @cpu. Returns the CPPC nominal
++ * frequency via @nominal_freq if it is non-NULL pointer.
++ */
++static u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
+ {
+ 	struct cppc_perf_caps perf_caps;
+ 	u64 highest_perf, nominal_perf;
+@@ -652,6 +659,9 @@ static u64 get_max_boost_ratio(unsigned int cpu)
+ 
+ 	nominal_perf = perf_caps.nominal_perf;
+ 
++	if (nominal_freq)
++		*nominal_freq = perf_caps.nominal_freq;
++
+ 	if (!highest_perf || !nominal_perf) {
+ 		pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
+ 		return 0;
+@@ -664,8 +674,12 @@ static u64 get_max_boost_ratio(unsigned int cpu)
+ 
+ 	return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
+ }
++
+ #else
+-static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; }
++static inline u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
++{
++	return 0;
++}
+ #endif
+ 
+ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+@@ -675,9 +689,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ 	struct acpi_cpufreq_data *data;
+ 	unsigned int cpu = policy->cpu;
+ 	struct cpuinfo_x86 *c = &cpu_data(cpu);
++	u64 max_boost_ratio, nominal_freq = 0;
+ 	unsigned int valid_states = 0;
+ 	unsigned int result = 0;
+-	u64 max_boost_ratio;
+ 	unsigned int i;
+ #ifdef CONFIG_SMP
+ 	static int blacklisted;
+@@ -827,16 +841,20 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ 	}
+ 	freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
+ 
+-	max_boost_ratio = get_max_boost_ratio(cpu);
++	max_boost_ratio = get_max_boost_ratio(cpu, &nominal_freq);
+ 	if (max_boost_ratio) {
+-		unsigned int freq = freq_table[0].frequency;
++		unsigned int freq = nominal_freq;
+ 
+ 		/*
+-		 * Because the loop above sorts the freq_table entries in the
+-		 * descending order, freq is the maximum frequency in the table.
+-		 * Assume that it corresponds to the CPPC nominal frequency and
+-		 * use it to set cpuinfo.max_freq.
++		 * The loop above sorts the freq_table entries in the
++		 * descending order. If ACPI CPPC has not advertised
++		 * the nominal frequency (this is possible in CPPC
++		 * revisions prior to 3), then use the first entry in
++		 * the pstate table as a proxy for nominal frequency.
+ 		 */
++		if (!freq)
++			freq = freq_table[0].frequency;
++
+ 		policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
+ 	} else {
+ 		/*
+diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
+index 66e5dfc711c0c5..f6d04eb40af94d 100644
+--- a/drivers/cpufreq/amd-pstate.c
++++ b/drivers/cpufreq/amd-pstate.c
+@@ -802,7 +802,7 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
+ 	 * sched_set_itmt_support(true) has been called and it is valid to
+ 	 * update them at any time after it has been called.
+ 	 */
+-	sched_set_itmt_core_prio((int)READ_ONCE(cpudata->highest_perf), cpudata->cpu);
++	sched_set_itmt_core_prio((int)READ_ONCE(cpudata->prefcore_ranking), cpudata->cpu);
+ 
+ 	schedule_work(&sched_prefcore_work);
+ }
+diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
+index 98129565acb8e0..b2e7e89feaac41 100644
+--- a/drivers/cpufreq/qcom-cpufreq-hw.c
++++ b/drivers/cpufreq/qcom-cpufreq-hw.c
+@@ -143,14 +143,12 @@ static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
+ }
+ 
+ /* Get the frequency requested by the cpufreq core for the CPU */
+-static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
++static unsigned int qcom_cpufreq_get_freq(struct cpufreq_policy *policy)
+ {
+ 	struct qcom_cpufreq_data *data;
+ 	const struct qcom_cpufreq_soc_data *soc_data;
+-	struct cpufreq_policy *policy;
+ 	unsigned int index;
+ 
+-	policy = cpufreq_cpu_get_raw(cpu);
+ 	if (!policy)
+ 		return 0;
+ 
+@@ -163,12 +161,10 @@ static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
+ 	return policy->freq_table[index].frequency;
+ }
+ 
+-static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
++static unsigned int __qcom_cpufreq_hw_get(struct cpufreq_policy *policy)
+ {
+ 	struct qcom_cpufreq_data *data;
+-	struct cpufreq_policy *policy;
+ 
+-	policy = cpufreq_cpu_get_raw(cpu);
+ 	if (!policy)
+ 		return 0;
+ 
+@@ -177,7 +173,12 @@ static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
+ 	if (data->throttle_irq >= 0)
+ 		return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
+ 
+-	return qcom_cpufreq_get_freq(cpu);
++	return qcom_cpufreq_get_freq(policy);
++}
++
++static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
++{
++	return __qcom_cpufreq_hw_get(cpufreq_cpu_get_raw(cpu));
+ }
+ 
+ static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
+@@ -363,7 +364,7 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
+ 	 * If h/w throttled frequency is higher than what cpufreq has requested
+ 	 * for, then stop polling and switch back to interrupt mechanism.
+ 	 */
+-	if (throttled_freq >= qcom_cpufreq_get_freq(cpu))
++	if (throttled_freq >= qcom_cpufreq_get_freq(cpufreq_cpu_get_raw(cpu)))
+ 		enable_irq(data->throttle_irq);
+ 	else
+ 		mod_delayed_work(system_highpri_wq, &data->throttle_work,
+@@ -441,7 +442,6 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
+ 		return data->throttle_irq;
+ 
+ 	data->cancel_throttle = false;
+-	data->policy = policy;
+ 
+ 	mutex_init(&data->throttle_lock);
+ 	INIT_DEFERRABLE_WORK(&data->throttle_work, qcom_lmh_dcvs_poll);
+@@ -552,6 +552,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
+ 
+ 	policy->driver_data = data;
+ 	policy->dvfs_possible_from_any_cpu = true;
++	data->policy = policy;
+ 
+ 	ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy);
+ 	if (ret) {
+@@ -622,11 +623,24 @@ static unsigned long qcom_cpufreq_hw_recalc_rate(struct clk_hw *hw, unsigned lon
+ {
+ 	struct qcom_cpufreq_data *data = container_of(hw, struct qcom_cpufreq_data, cpu_clk);
+ 
+-	return qcom_lmh_get_throttle_freq(data);
++	return __qcom_cpufreq_hw_get(data->policy) * HZ_PER_KHZ;
++}
++
++/*
++ * Since we cannot determine the closest rate of the target rate, let's just
++ * return the actual rate at which the clock is running at. This is needed to
++ * make clk_set_rate() API work properly.
++ */
++static int qcom_cpufreq_hw_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
++{
++	req->rate = qcom_cpufreq_hw_recalc_rate(hw, 0);
++
++	return 0;
+ }
+ 
+ static const struct clk_ops qcom_cpufreq_hw_clk_ops = {
+ 	.recalc_rate = qcom_cpufreq_hw_recalc_rate,
++	.determine_rate = qcom_cpufreq_hw_determine_rate,
+ };
+ 
+ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
+diff --git a/drivers/crypto/caam/blob_gen.c b/drivers/crypto/caam/blob_gen.c
+index 87781c1534ee5b..079a22cc9f02be 100644
+--- a/drivers/crypto/caam/blob_gen.c
++++ b/drivers/crypto/caam/blob_gen.c
+@@ -2,6 +2,7 @@
+ /*
+  * Copyright (C) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
+  * Copyright (C) 2021 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de>
++ * Copyright 2024 NXP
+  */
+ 
+ #define pr_fmt(fmt) "caam blob_gen: " fmt
+@@ -104,7 +105,7 @@ int caam_process_blob(struct caam_blob_priv *priv,
+ 	}
+ 
+ 	ctrlpriv = dev_get_drvdata(jrdev->parent);
+-	moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->ctrl->perfmon.status));
++	moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->jr[0]->perfmon.status));
+ 	if (moo != CSTA_MOO_SECURE && moo != CSTA_MOO_TRUSTED)
+ 		dev_warn(jrdev,
+ 			 "using insecure test key, enable HAB to use unique device key!\n");
+diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
+index 356188bee6fbcb..4b997023082287 100644
+--- a/drivers/crypto/hisilicon/sec2/sec.h
++++ b/drivers/crypto/hisilicon/sec2/sec.h
+@@ -37,6 +37,7 @@ struct sec_aead_req {
+ 	u8 *a_ivin;
+ 	dma_addr_t a_ivin_dma;
+ 	struct aead_request *aead_req;
++	bool fallback;
+ };
+ 
+ /* SEC request of Crypto */
+@@ -90,9 +91,7 @@ struct sec_auth_ctx {
+ 	dma_addr_t a_key_dma;
+ 	u8 *a_key;
+ 	u8 a_key_len;
+-	u8 mac_len;
+ 	u8 a_alg;
+-	bool fallback;
+ 	struct crypto_shash *hash_tfm;
+ 	struct crypto_aead *fallback_aead_tfm;
+ };
+diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
+index ae9ebbb4103d41..66bc07da9eb6f7 100644
+--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
++++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
+@@ -948,15 +948,14 @@ static int sec_aead_mac_init(struct sec_aead_req *req)
+ 	struct aead_request *aead_req = req->aead_req;
+ 	struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
+ 	size_t authsize = crypto_aead_authsize(tfm);
+-	u8 *mac_out = req->out_mac;
+ 	struct scatterlist *sgl = aead_req->src;
++	u8 *mac_out = req->out_mac;
+ 	size_t copy_size;
+ 	off_t skip_size;
+ 
+ 	/* Copy input mac */
+ 	skip_size = aead_req->assoclen + aead_req->cryptlen - authsize;
+-	copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out,
+-				       authsize, skip_size);
++	copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, authsize, skip_size);
+ 	if (unlikely(copy_size != authsize))
+ 		return -EINVAL;
+ 
+@@ -1120,10 +1119,7 @@ static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize)
+ 	struct sec_ctx *ctx = crypto_tfm_ctx(tfm);
+ 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+ 
+-	if (unlikely(a_ctx->fallback_aead_tfm))
+-		return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
+-
+-	return 0;
++	return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
+ }
+ 
+ static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
+@@ -1139,7 +1135,6 @@ static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
+ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ 			   const u32 keylen, const enum sec_hash_alg a_alg,
+ 			   const enum sec_calg c_alg,
+-			   const enum sec_mac_len mac_len,
+ 			   const enum sec_cmode c_mode)
+ {
+ 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+@@ -1151,7 +1146,6 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ 
+ 	ctx->a_ctx.a_alg = a_alg;
+ 	ctx->c_ctx.c_alg = c_alg;
+-	ctx->a_ctx.mac_len = mac_len;
+ 	c_ctx->c_mode = c_mode;
+ 
+ 	if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) {
+@@ -1162,13 +1156,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ 		}
+ 		memcpy(c_ctx->c_key, key, keylen);
+ 
+-		if (unlikely(a_ctx->fallback_aead_tfm)) {
+-			ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
+-			if (ret)
+-				return ret;
+-		}
+-
+-		return 0;
++		return sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
+ 	}
+ 
+ 	ret = crypto_authenc_extractkeys(&keys, key, keylen);
+@@ -1187,10 +1175,15 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ 		goto bad_key;
+ 	}
+ 
+-	if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK)  ||
+-	    (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) {
++	if (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK) {
+ 		ret = -EINVAL;
+-		dev_err(dev, "MAC or AUTH key length error!\n");
++		dev_err(dev, "AUTH key length error!\n");
++		goto bad_key;
++	}
++
++	ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
++	if (ret) {
++		dev_err(dev, "set sec fallback key err!\n");
+ 		goto bad_key;
+ 	}
+ 
+@@ -1202,27 +1195,19 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ }
+ 
+ 
+-#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode)	\
+-static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key,	\
+-	u32 keylen)							\
+-{									\
+-	return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
+-}
+-
+-GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1,
+-			 SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC)
+-GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
+-			 SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
+-GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
+-			 SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
+-GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES,
+-			 SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
+-GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES,
+-			 SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
+-GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4,
+-			 SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
+-GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4,
+-			 SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
++#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, cmode)				\
++static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, u32 keylen)	\
++{											\
++	return sec_aead_setkey(tfm, key, keylen, aalg, calg, cmode);			\
++}
++
++GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, SEC_CALG_AES, SEC_CMODE_CBC)
++GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, SEC_CALG_AES, SEC_CMODE_CBC)
++GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, SEC_CALG_AES, SEC_CMODE_CBC)
++GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, SEC_CMODE_CCM)
++GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, SEC_CMODE_GCM)
++GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, SEC_CMODE_CCM)
++GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, SEC_CMODE_GCM)
+ 
+ static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
+ {
+@@ -1470,9 +1455,10 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
+ static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
+ {
+ 	struct aead_request *aead_req = req->aead_req.aead_req;
+-	struct sec_cipher_req *c_req = &req->c_req;
++	struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
++	size_t authsize = crypto_aead_authsize(tfm);
+ 	struct sec_aead_req *a_req = &req->aead_req;
+-	size_t authsize = ctx->a_ctx.mac_len;
++	struct sec_cipher_req *c_req = &req->c_req;
+ 	u32 data_size = aead_req->cryptlen;
+ 	u8 flage = 0;
+ 	u8 cm, cl;
+@@ -1513,10 +1499,8 @@ static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
+ static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
+ {
+ 	struct aead_request *aead_req = req->aead_req.aead_req;
+-	struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
+-	size_t authsize = crypto_aead_authsize(tfm);
+-	struct sec_cipher_req *c_req = &req->c_req;
+ 	struct sec_aead_req *a_req = &req->aead_req;
++	struct sec_cipher_req *c_req = &req->c_req;
+ 
+ 	memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
+ 
+@@ -1524,15 +1508,11 @@ static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
+ 		/*
+ 		 * CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter},
+ 		 * the  counter must set to 0x01
++		 * CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length}
+ 		 */
+-		ctx->a_ctx.mac_len = authsize;
+-		/* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */
+ 		set_aead_auth_iv(ctx, req);
+-	}
+-
+-	/* GCM 12Byte Cipher_IV == Auth_IV */
+-	if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
+-		ctx->a_ctx.mac_len = authsize;
++	} else if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
++		/* GCM 12Byte Cipher_IV == Auth_IV */
+ 		memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE);
+ 	}
+ }
+@@ -1542,9 +1522,11 @@ static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir,
+ {
+ 	struct sec_aead_req *a_req = &req->aead_req;
+ 	struct aead_request *aq = a_req->aead_req;
++	struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
++	size_t authsize = crypto_aead_authsize(tfm);
+ 
+ 	/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
+-	sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len);
++	sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)authsize);
+ 
+ 	/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
+ 	sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr;
+@@ -1568,9 +1550,11 @@ static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir,
+ {
+ 	struct sec_aead_req *a_req = &req->aead_req;
+ 	struct aead_request *aq = a_req->aead_req;
++	struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
++	size_t authsize = crypto_aead_authsize(tfm);
+ 
+ 	/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
+-	sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3);
++	sqe3->c_icv_key |= cpu_to_le16((u16)authsize << SEC_MAC_OFFSET_V3);
+ 
+ 	/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
+ 	sqe3->a_key_addr = sqe3->c_key_addr;
+@@ -1594,11 +1578,12 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
+ 	struct sec_aead_req *a_req = &req->aead_req;
+ 	struct sec_cipher_req *c_req = &req->c_req;
+ 	struct aead_request *aq = a_req->aead_req;
++	struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
++	size_t authsize = crypto_aead_authsize(tfm);
+ 
+ 	sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
+ 
+-	sec_sqe->type2.mac_key_alg =
+-			cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE);
++	sec_sqe->type2.mac_key_alg = cpu_to_le32(authsize / SEC_SQE_LEN_RATE);
+ 
+ 	sec_sqe->type2.mac_key_alg |=
+ 			cpu_to_le32((u32)((ctx->a_key_len) /
+@@ -1648,11 +1633,13 @@ static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
+ 	struct sec_aead_req *a_req = &req->aead_req;
+ 	struct sec_cipher_req *c_req = &req->c_req;
+ 	struct aead_request *aq = a_req->aead_req;
++	struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
++	size_t authsize = crypto_aead_authsize(tfm);
+ 
+ 	sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);
+ 
+ 	sqe3->auth_mac_key |=
+-			cpu_to_le32((u32)(ctx->mac_len /
++			cpu_to_le32((u32)(authsize /
+ 			SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3);
+ 
+ 	sqe3->auth_mac_key |=
+@@ -1703,9 +1690,9 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
+ {
+ 	struct aead_request *a_req = req->aead_req.aead_req;
+ 	struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
++	size_t authsize = crypto_aead_authsize(tfm);
+ 	struct sec_aead_req *aead_req = &req->aead_req;
+ 	struct sec_cipher_req *c_req = &req->c_req;
+-	size_t authsize = crypto_aead_authsize(tfm);
+ 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ 	struct aead_request *backlog_aead_req;
+ 	struct sec_req *backlog_req;
+@@ -1718,10 +1705,8 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
+ 	if (!err && c_req->encrypt) {
+ 		struct scatterlist *sgl = a_req->dst;
+ 
+-		sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
+-					  aead_req->out_mac,
+-					  authsize, a_req->cryptlen +
+-					  a_req->assoclen);
++		sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), aead_req->out_mac,
++					  authsize, a_req->cryptlen + a_req->assoclen);
+ 		if (unlikely(sz != authsize)) {
+ 			dev_err(c->dev, "copy out mac err!\n");
+ 			err = -EINVAL;
+@@ -1929,8 +1914,10 @@ static void sec_aead_exit(struct crypto_aead *tfm)
+ 
+ static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
+ {
++	struct aead_alg *alg = crypto_aead_alg(tfm);
+ 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+-	struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
++	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
++	const char *aead_name = alg->base.cra_name;
+ 	int ret;
+ 
+ 	ret = sec_aead_init(tfm);
+@@ -1939,11 +1926,20 @@ static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
+ 		return ret;
+ 	}
+ 
+-	auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+-	if (IS_ERR(auth_ctx->hash_tfm)) {
++	a_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
++	if (IS_ERR(a_ctx->hash_tfm)) {
+ 		dev_err(ctx->dev, "aead alloc shash error!\n");
+ 		sec_aead_exit(tfm);
+-		return PTR_ERR(auth_ctx->hash_tfm);
++		return PTR_ERR(a_ctx->hash_tfm);
++	}
++
++	a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
++						     CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
++	if (IS_ERR(a_ctx->fallback_aead_tfm)) {
++		dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
++		crypto_free_shash(ctx->a_ctx.hash_tfm);
++		sec_aead_exit(tfm);
++		return PTR_ERR(a_ctx->fallback_aead_tfm);
+ 	}
+ 
+ 	return 0;
+@@ -1953,6 +1949,7 @@ static void sec_aead_ctx_exit(struct crypto_aead *tfm)
+ {
+ 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ 
++	crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
+ 	crypto_free_shash(ctx->a_ctx.hash_tfm);
+ 	sec_aead_exit(tfm);
+ }
+@@ -1979,7 +1976,6 @@ static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)
+ 		sec_aead_exit(tfm);
+ 		return PTR_ERR(a_ctx->fallback_aead_tfm);
+ 	}
+-	a_ctx->fallback = false;
+ 
+ 	return 0;
+ }
+@@ -2233,21 +2229,20 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
+ {
+ 	struct aead_request *req = sreq->aead_req.aead_req;
+ 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+-	size_t authsize = crypto_aead_authsize(tfm);
++	size_t sz = crypto_aead_authsize(tfm);
+ 	u8 c_mode = ctx->c_ctx.c_mode;
+ 	struct device *dev = ctx->dev;
+ 	int ret;
+ 
+-	if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
+-	    req->assoclen > SEC_MAX_AAD_LEN)) {
+-		dev_err(dev, "aead input spec error!\n");
++	/* Hardware does not handle cases where authsize is less than 4 bytes */
++	if (unlikely(sz < MIN_MAC_LEN)) {
++		sreq->aead_req.fallback = true;
+ 		return -EINVAL;
+ 	}
+ 
+-	if (unlikely((c_mode == SEC_CMODE_GCM && authsize < DES_BLOCK_SIZE) ||
+-	   (c_mode == SEC_CMODE_CCM && (authsize < MIN_MAC_LEN ||
+-		authsize & MAC_LEN_MASK)))) {
+-		dev_err(dev, "aead input mac length error!\n");
++	if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
++	    req->assoclen > SEC_MAX_AAD_LEN)) {
++		dev_err(dev, "aead input spec error!\n");
+ 		return -EINVAL;
+ 	}
+ 
+@@ -2266,7 +2261,7 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
+ 	if (sreq->c_req.encrypt)
+ 		sreq->c_req.c_len = req->cryptlen;
+ 	else
+-		sreq->c_req.c_len = req->cryptlen - authsize;
++		sreq->c_req.c_len = req->cryptlen - sz;
+ 	if (c_mode == SEC_CMODE_CBC) {
+ 		if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
+ 			dev_err(dev, "aead crypto length error!\n");
+@@ -2292,8 +2287,8 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
+ 
+ 	if (ctx->sec->qm.ver == QM_HW_V2) {
+ 		if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
+-		    req->cryptlen <= authsize))) {
+-			ctx->a_ctx.fallback = true;
++			     req->cryptlen <= authsize))) {
++			sreq->aead_req.fallback = true;
+ 			return -EINVAL;
+ 		}
+ 	}
+@@ -2321,16 +2316,9 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx,
+ 				bool encrypt)
+ {
+ 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+-	struct device *dev = ctx->dev;
+ 	struct aead_request *subreq;
+ 	int ret;
+ 
+-	/* Kunpeng920 aead mode not support input 0 size */
+-	if (!a_ctx->fallback_aead_tfm) {
+-		dev_err(dev, "aead fallback tfm is NULL!\n");
+-		return -EINVAL;
+-	}
+-
+ 	subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL);
+ 	if (!subreq)
+ 		return -ENOMEM;
+@@ -2362,10 +2350,11 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
+ 	req->aead_req.aead_req = a_req;
+ 	req->c_req.encrypt = encrypt;
+ 	req->ctx = ctx;
++	req->aead_req.fallback = false;
+ 
+ 	ret = sec_aead_param_check(ctx, req);
+ 	if (unlikely(ret)) {
+-		if (ctx->a_ctx.fallback)
++		if (req->aead_req.fallback)
+ 			return sec_aead_soft_crypto(ctx, a_req, encrypt);
+ 		return -EINVAL;
+ 	}
+diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
+index 27a0ee5ad9131c..04725b514382f8 100644
+--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
++++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
+@@ -23,17 +23,6 @@ enum sec_hash_alg {
+ 	SEC_A_HMAC_SHA512 = 0x15,
+ };
+ 
+-enum sec_mac_len {
+-	SEC_HMAC_CCM_MAC   = 16,
+-	SEC_HMAC_GCM_MAC   = 16,
+-	SEC_SM3_MAC        = 32,
+-	SEC_HMAC_SM3_MAC   = 32,
+-	SEC_HMAC_MD5_MAC   = 16,
+-	SEC_HMAC_SHA1_MAC   = 20,
+-	SEC_HMAC_SHA256_MAC = 32,
+-	SEC_HMAC_SHA512_MAC = 64,
+-};
+-
+ enum sec_cmode {
+ 	SEC_CMODE_ECB    = 0x0,
+ 	SEC_CMODE_CBC    = 0x1,
+diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
+index 9e557649e5d084..c3776b0de51d76 100644
+--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
++++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
+@@ -173,7 +173,7 @@ static int set_iaa_sync_mode(const char *name)
+ 		async_mode = false;
+ 		use_irq = false;
+ 	} else if (sysfs_streq(name, "async")) {
+-		async_mode = true;
++		async_mode = false;
+ 		use_irq = false;
+ 	} else if (sysfs_streq(name, "async_irq")) {
+ 		async_mode = true;
+diff --git a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
+index 449c6d3ab2db14..fcc0cf4df637d2 100644
+--- a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
++++ b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
+@@ -471,6 +471,7 @@ static int init_ixp_crypto(struct device *dev)
+ 			return -ENODEV;
+ 		}
+ 		npe_id = npe_spec.args[0];
++		of_node_put(npe_spec.np);
+ 
+ 		ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0,
+ 						       &queue_spec);
+@@ -479,6 +480,7 @@ static int init_ixp_crypto(struct device *dev)
+ 			return -ENODEV;
+ 		}
+ 		recv_qid = queue_spec.args[0];
++		of_node_put(queue_spec.np);
+ 
+ 		ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0,
+ 						       &queue_spec);
+@@ -487,6 +489,7 @@ static int init_ixp_crypto(struct device *dev)
+ 			return -ENODEV;
+ 		}
+ 		send_qid = queue_spec.args[0];
++		of_node_put(queue_spec.np);
+ 	} else {
+ 		/*
+ 		 * Hardcoded engine when using platform data, this goes away
+diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c
+index 9d130592cc0acd..d734c9a567868f 100644
+--- a/drivers/crypto/tegra/tegra-se-aes.c
++++ b/drivers/crypto/tegra/tegra-se-aes.c
+@@ -1750,10 +1750,13 @@ static int tegra_cmac_digest(struct ahash_request *req)
+ 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ 	struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ 	struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
++	int ret;
+ 
+-	tegra_cmac_init(req);
+-	rctx->task |= SHA_UPDATE | SHA_FINAL;
++	ret = tegra_cmac_init(req);
++	if (ret)
++		return ret;
+ 
++	rctx->task |= SHA_UPDATE | SHA_FINAL;
+ 	return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
+ }
+ 
+diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c
+index 4d4bd727f49869..0b5cdd5676b17e 100644
+--- a/drivers/crypto/tegra/tegra-se-hash.c
++++ b/drivers/crypto/tegra/tegra-se-hash.c
+@@ -615,13 +615,16 @@ static int tegra_sha_digest(struct ahash_request *req)
+ 	struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ 	struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
++	int ret;
+ 
+ 	if (ctx->fallback)
+ 		return tegra_sha_fallback_digest(req);
+ 
+-	tegra_sha_init(req);
+-	rctx->task |= SHA_UPDATE | SHA_FINAL;
++	ret = tegra_sha_init(req);
++	if (ret)
++		return ret;
+ 
++	rctx->task |= SHA_UPDATE | SHA_FINAL;
+ 	return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
+ }
+ 
+diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
+index 343e986e66e7ce..171ab168402677 100644
+--- a/drivers/dma/ti/edma.c
++++ b/drivers/dma/ti/edma.c
+@@ -208,7 +208,6 @@ struct edma_desc {
+ struct edma_cc;
+ 
+ struct edma_tc {
+-	struct device_node		*node;
+ 	u16				id;
+ };
+ 
+@@ -2466,13 +2465,13 @@ static int edma_probe(struct platform_device *pdev)
+ 			if (ret || i == ecc->num_tc)
+ 				break;
+ 
+-			ecc->tc_list[i].node = tc_args.np;
+ 			ecc->tc_list[i].id = i;
+ 			queue_priority_mapping[i][1] = tc_args.args[0];
+ 			if (queue_priority_mapping[i][1] > lowest_priority) {
+ 				lowest_priority = queue_priority_mapping[i][1];
+ 				info->default_queue = i;
+ 			}
++			of_node_put(tc_args.np);
+ 		}
+ 
+ 		/* See if we have optional dma-channel-mask array */
+diff --git a/drivers/firewire/device-attribute-test.c b/drivers/firewire/device-attribute-test.c
+index 2f123c6b0a1659..97478a96d1c965 100644
+--- a/drivers/firewire/device-attribute-test.c
++++ b/drivers/firewire/device-attribute-test.c
+@@ -99,6 +99,7 @@ static void device_attr_simple_avc(struct kunit *test)
+ 	struct device *unit0_dev = (struct device *)&unit0.device;
+ 	static const int unit0_expected_ids[] = {0x00ffffff, 0x00ffffff, 0x0000a02d, 0x00010001};
+ 	char *buf = kunit_kzalloc(test, PAGE_SIZE, GFP_KERNEL);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ 	int ids[4] = {0, 0, 0, 0};
+ 
+ 	// Ensure associations for node and unit devices.
+@@ -180,6 +181,7 @@ static void device_attr_legacy_avc(struct kunit *test)
+ 	struct device *unit0_dev = (struct device *)&unit0.device;
+ 	static const int unit0_expected_ids[] = {0x00012345, 0x00fedcba, 0x00abcdef, 0x00543210};
+ 	char *buf = kunit_kzalloc(test, PAGE_SIZE, GFP_KERNEL);
++	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ 	int ids[4] = {0, 0, 0, 0};
+ 
+ 	// Ensure associations for node and unit devices.
+diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c
+index cc807ed35aedf7..1e509595ac0343 100644
+--- a/drivers/firmware/efi/sysfb_efi.c
++++ b/drivers/firmware/efi/sysfb_efi.c
+@@ -91,6 +91,7 @@ void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
+ 		_ret_;						\
+ 	})
+ 
++#ifdef CONFIG_EFI
+ static int __init efifb_set_system(const struct dmi_system_id *id)
+ {
+ 	struct efifb_dmi_info *info = id->driver_data;
+@@ -346,7 +347,6 @@ static const struct fwnode_operations efifb_fwnode_ops = {
+ 	.add_links = efifb_add_links,
+ };
+ 
+-#ifdef CONFIG_EFI
+ static struct fwnode_handle efifb_fwnode;
+ 
+ __init void sysfb_apply_efi_quirks(void)
+diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
+index 72bf87ddcd9698..26312a5131d2ab 100644
+--- a/drivers/firmware/qcom/qcom_scm.c
++++ b/drivers/firmware/qcom/qcom_scm.c
+@@ -2029,13 +2029,17 @@ static int qcom_scm_probe(struct platform_device *pdev)
+ 
+ 	irq = platform_get_irq_optional(pdev, 0);
+ 	if (irq < 0) {
+-		if (irq != -ENXIO)
+-			return irq;
++		if (irq != -ENXIO) {
++			ret = irq;
++			goto err;
++		}
+ 	} else {
+ 		ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler,
+ 						IRQF_ONESHOT, "qcom-scm", __scm);
+-		if (ret < 0)
+-			return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
++		if (ret < 0) {
++			dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
++			goto err;
++		}
+ 	}
+ 
+ 	__get_convention();
+@@ -2054,14 +2058,18 @@ static int qcom_scm_probe(struct platform_device *pdev)
+ 		qcom_scm_disable_sdi();
+ 
+ 	ret = of_reserved_mem_device_init(__scm->dev);
+-	if (ret && ret != -ENODEV)
+-		return dev_err_probe(__scm->dev, ret,
+-				     "Failed to setup the reserved memory region for TZ mem\n");
++	if (ret && ret != -ENODEV) {
++		dev_err_probe(__scm->dev, ret,
++			      "Failed to setup the reserved memory region for TZ mem\n");
++		goto err;
++	}
+ 
+ 	ret = qcom_tzmem_enable(__scm->dev);
+-	if (ret)
+-		return dev_err_probe(__scm->dev, ret,
+-				     "Failed to enable the TrustZone memory allocator\n");
++	if (ret) {
++		dev_err_probe(__scm->dev, ret,
++			      "Failed to enable the TrustZone memory allocator\n");
++		goto err;
++	}
+ 
+ 	memset(&pool_config, 0, sizeof(pool_config));
+ 	pool_config.initial_size = 0;
+@@ -2069,9 +2077,11 @@ static int qcom_scm_probe(struct platform_device *pdev)
+ 	pool_config.max_size = SZ_256K;
+ 
+ 	__scm->mempool = devm_qcom_tzmem_pool_new(__scm->dev, &pool_config);
+-	if (IS_ERR(__scm->mempool))
+-		return dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
+-				     "Failed to create the SCM memory pool\n");
++	if (IS_ERR(__scm->mempool)) {
++		dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
++			      "Failed to create the SCM memory pool\n");
++		goto err;
++	}
+ 
+ 	/*
+ 	 * Initialize the QSEECOM interface.
+@@ -2087,6 +2097,12 @@ static int qcom_scm_probe(struct platform_device *pdev)
+ 	WARN(ret < 0, "failed to initialize qseecom: %d\n", ret);
+ 
+ 	return 0;
++
++err:
++	/* Paired with smp_load_acquire() in qcom_scm_is_available(). */
++	smp_store_release(&__scm, NULL);
++
++	return ret;
+ }
+ 
+ static void qcom_scm_shutdown(struct platform_device *pdev)
+diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
+index 4cb455b2bdee71..619b6fb9d833a4 100644
+--- a/drivers/gpio/gpio-mxc.c
++++ b/drivers/gpio/gpio-mxc.c
+@@ -490,8 +490,7 @@ static int mxc_gpio_probe(struct platform_device *pdev)
+ 	port->gc.request = mxc_gpio_request;
+ 	port->gc.free = mxc_gpio_free;
+ 	port->gc.to_irq = mxc_gpio_to_irq;
+-	port->gc.base = (pdev->id < 0) ? of_alias_get_id(np, "gpio") * 32 :
+-					     pdev->id * 32;
++	port->gc.base = of_alias_get_id(np, "gpio") * 32;
+ 
+ 	err = devm_gpiochip_add_data(&pdev->dev, &port->gc, port);
+ 	if (err)
+diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
+index 272febc3230e90..be4c9981ebc404 100644
+--- a/drivers/gpio/gpio-pca953x.c
++++ b/drivers/gpio/gpio-pca953x.c
+@@ -1088,7 +1088,8 @@ static int pca953x_probe(struct i2c_client *client)
+ 		 */
+ 		reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ 		if (IS_ERR(reset_gpio))
+-			return PTR_ERR(reset_gpio);
++			return dev_err_probe(dev, PTR_ERR(reset_gpio),
++					     "Failed to get reset gpio\n");
+ 	}
+ 
+ 	chip->client = client;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+index cc66ebb7bae15f..441568163e20e4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+@@ -1131,6 +1131,9 @@ uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev,
+ 	uint32_t low, high;
+ 	uint64_t queue_addr = 0;
+ 
++	if (!amdgpu_gpu_recovery)
++		return 0;
++
+ 	kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
+ 	amdgpu_gfx_rlc_enter_safe_mode(adev, inst);
+ 
+@@ -1179,6 +1182,9 @@ uint64_t kgd_gfx_v9_hqd_reset(struct amdgpu_device *adev,
+ 	uint32_t low, high, pipe_reset_data = 0;
+ 	uint64_t queue_addr = 0;
+ 
++	if (!amdgpu_gpu_recovery)
++		return 0;
++
+ 	kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
+ 	amdgpu_gfx_rlc_enter_safe_mode(adev, inst);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+index 1d155463d044b2..9a4dad3e415290 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -2058,7 +2058,7 @@ static int amdgpu_debugfs_gfx_sched_mask_set(void *data, u64 val)
+ 	if (!adev)
+ 		return -ENODEV;
+ 
+-	mask = (1 << adev->gfx.num_gfx_rings) - 1;
++	mask = (1ULL << adev->gfx.num_gfx_rings) - 1;
+ 	if ((val & mask) == 0)
+ 		return -EINVAL;
+ 
+@@ -2086,7 +2086,7 @@ static int amdgpu_debugfs_gfx_sched_mask_get(void *data, u64 *val)
+ 	for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
+ 		ring = &adev->gfx.gfx_ring[i];
+ 		if (ring->sched.ready)
+-			mask |= 1 << i;
++			mask |= 1ULL << i;
+ 	}
+ 
+ 	*val = mask;
+@@ -2128,7 +2128,7 @@ static int amdgpu_debugfs_compute_sched_mask_set(void *data, u64 val)
+ 	if (!adev)
+ 		return -ENODEV;
+ 
+-	mask = (1 << adev->gfx.num_compute_rings) - 1;
++	mask = (1ULL << adev->gfx.num_compute_rings) - 1;
+ 	if ((val & mask) == 0)
+ 		return -EINVAL;
+ 
+@@ -2157,7 +2157,7 @@ static int amdgpu_debugfs_compute_sched_mask_get(void *data, u64 *val)
+ 	for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
+ 		ring = &adev->gfx.compute_ring[i];
+ 		if (ring->sched.ready)
+-			mask |= 1 << i;
++			mask |= 1ULL << i;
+ 	}
+ 
+ 	*val = mask;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+index 113f0d2426187e..f40531fea11add 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+@@ -358,13 +358,13 @@ static int amdgpu_debugfs_sdma_sched_mask_set(void *data, u64 val)
+ 	if (!adev)
+ 		return -ENODEV;
+ 
+-	mask = (1 << adev->sdma.num_instances) - 1;
++	mask = BIT_ULL(adev->sdma.num_instances) - 1;
+ 	if ((val & mask) == 0)
+ 		return -EINVAL;
+ 
+ 	for (i = 0; i < adev->sdma.num_instances; ++i) {
+ 		ring = &adev->sdma.instance[i].ring;
+-		if (val & (1 << i))
++		if (val & BIT_ULL(i))
+ 			ring->sched.ready = true;
+ 		else
+ 			ring->sched.ready = false;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index c8180cad0abdd8..c4da62d111052e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -2065,6 +2065,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
+ 	ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
+ 	ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
+ 	ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
++	ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_DOORBELL);
+ 	ttm_device_fini(&adev->mman.bdev);
+ 	adev->mman.initialized = false;
+ 	DRM_INFO("amdgpu: ttm finalized\n");
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+index 3f69b9b2bcd079..18cc1aefb2a2be 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+@@ -957,6 +957,8 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev)
+ 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ 		vcn_inst = GET_INST(VCN, i);
+ 
++		vcn_v4_0_3_fw_shared_init(adev, vcn_inst);
++
+ 		memset(&header, 0, sizeof(struct mmsch_v4_0_3_init_header));
+ 		header.version = MMSCH_VERSION;
+ 		header.total_size = sizeof(struct mmsch_v4_0_3_init_header) >> 2;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 5f216d626cbb57..53694baca96637 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -12227,10 +12227,14 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
+ 
+ 	if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ 		     sink->sink_signal == SIGNAL_TYPE_EDP)) {
+-		amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
+-		amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
+-		if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
+-			freesync_capable = true;
++		if (amdgpu_dm_connector->dc_link &&
++		    amdgpu_dm_connector->dc_link->dpcd_caps.allow_invalid_MSA_timing_param) {
++			amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
++			amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
++			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
++				freesync_capable = true;
++		}
++
+ 		parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
+ 
+ 		if (vsdb_info.replay_mode) {
+diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
+index e1da48b05d0094..961d8936150ab7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
++++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
+@@ -194,6 +194,9 @@ void dpp_reset(struct dpp *dpp_base)
+ 	dpp->filter_h = NULL;
+ 	dpp->filter_v = NULL;
+ 
++	memset(&dpp_base->pos, 0, sizeof(dpp_base->pos));
++	memset(&dpp_base->att, 0, sizeof(dpp_base->att));
++
+ 	memset(&dpp->scl_data, 0, sizeof(dpp->scl_data));
+ 	memset(&dpp->pwl_data, 0, sizeof(dpp->pwl_data));
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
+index 22ac2b7e49aeae..da963f73829f6c 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
+@@ -532,6 +532,12 @@ void hubp1_dcc_control(struct hubp *hubp, bool enable,
+ 			SECONDARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk);
+ }
+ 
++void hubp_reset(struct hubp *hubp)
++{
++	memset(&hubp->pos, 0, sizeof(hubp->pos));
++	memset(&hubp->att, 0, sizeof(hubp->att));
++}
++
+ void hubp1_program_surface_config(
+ 	struct hubp *hubp,
+ 	enum surface_pixel_format format,
+@@ -1337,8 +1343,9 @@ static void hubp1_wait_pipe_read_start(struct hubp *hubp)
+ 
+ void hubp1_init(struct hubp *hubp)
+ {
+-	//do nothing
++	hubp_reset(hubp);
+ }
++
+ static const struct hubp_funcs dcn10_hubp_funcs = {
+ 	.hubp_program_surface_flip_and_addr =
+ 			hubp1_program_surface_flip_and_addr,
+@@ -1351,6 +1358,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
+ 	.hubp_set_vm_context0_settings = hubp1_set_vm_context0_settings,
+ 	.set_blank = hubp1_set_blank,
+ 	.dcc_control = hubp1_dcc_control,
++	.hubp_reset = hubp_reset,
+ 	.mem_program_viewport = min_set_viewport,
+ 	.set_hubp_blank_en = hubp1_set_hubp_blank_en,
+ 	.set_cursor_attributes	= hubp1_cursor_set_attributes,
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
+index 69119b2fdce23b..193e48b440ef18 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
+@@ -746,6 +746,8 @@ void hubp1_dcc_control(struct hubp *hubp,
+ 		bool enable,
+ 		enum hubp_ind_block_size independent_64b_blks);
+ 
++void hubp_reset(struct hubp *hubp);
++
+ bool hubp1_program_surface_flip_and_addr(
+ 	struct hubp *hubp,
+ 	const struct dc_plane_address *address,
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
+index 0637e4c552d8a2..b405fa22f87a9e 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
+@@ -1660,6 +1660,7 @@ static struct hubp_funcs dcn20_hubp_funcs = {
+ 	.set_blank = hubp2_set_blank,
+ 	.set_blank_regs = hubp2_set_blank_regs,
+ 	.dcc_control = hubp2_dcc_control,
++	.hubp_reset = hubp_reset,
+ 	.mem_program_viewport = min_set_viewport,
+ 	.set_cursor_attributes	= hubp2_cursor_set_attributes,
+ 	.set_cursor_position	= hubp2_cursor_set_position,
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c
+index cd2bfcc5127650..6efcb10abf3dee 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c
+@@ -121,6 +121,7 @@ static struct hubp_funcs dcn201_hubp_funcs = {
+ 	.set_cursor_position	= hubp1_cursor_set_position,
+ 	.set_blank = hubp1_set_blank,
+ 	.dcc_control = hubp1_dcc_control,
++	.hubp_reset = hubp_reset,
+ 	.mem_program_viewport = min_set_viewport,
+ 	.hubp_clk_cntl = hubp1_clk_cntl,
+ 	.hubp_vtg_sel = hubp1_vtg_sel,
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
+index e13d69a22c1c7f..4e2d9d381db393 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
+@@ -811,6 +811,8 @@ static void hubp21_init(struct hubp *hubp)
+ 	struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
+ 	//hubp[i].HUBPREQ_DEBUG.HUBPREQ_DEBUG[26] = 1;
+ 	REG_WRITE(HUBPREQ_DEBUG, 1 << 26);
++
++	hubp_reset(hubp);
+ }
+ static struct hubp_funcs dcn21_hubp_funcs = {
+ 	.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
+@@ -823,6 +825,7 @@ static struct hubp_funcs dcn21_hubp_funcs = {
+ 	.hubp_set_vm_system_aperture_settings = hubp21_set_vm_system_aperture_settings,
+ 	.set_blank = hubp1_set_blank,
+ 	.dcc_control = hubp1_dcc_control,
++	.hubp_reset = hubp_reset,
+ 	.mem_program_viewport = hubp21_set_viewport,
+ 	.set_cursor_attributes	= hubp2_cursor_set_attributes,
+ 	.set_cursor_position	= hubp1_cursor_set_position,
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
+index 60a64d29035274..c55b1b8be8ffd6 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
+@@ -483,6 +483,8 @@ void hubp3_init(struct hubp *hubp)
+ 	struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ 	//hubp[i].HUBPREQ_DEBUG.HUBPREQ_DEBUG[26] = 1;
+ 	REG_WRITE(HUBPREQ_DEBUG, 1 << 26);
++
++	hubp_reset(hubp);
+ }
+ 
+ static struct hubp_funcs dcn30_hubp_funcs = {
+@@ -497,6 +499,7 @@ static struct hubp_funcs dcn30_hubp_funcs = {
+ 	.set_blank = hubp2_set_blank,
+ 	.set_blank_regs = hubp2_set_blank_regs,
+ 	.dcc_control = hubp3_dcc_control,
++	.hubp_reset = hubp_reset,
+ 	.mem_program_viewport = min_set_viewport,
+ 	.set_cursor_attributes	= hubp2_cursor_set_attributes,
+ 	.set_cursor_position	= hubp2_cursor_set_position,
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
+index 8394e8c069199f..a65a0ddee64672 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
+@@ -79,6 +79,7 @@ static struct hubp_funcs dcn31_hubp_funcs = {
+ 	.hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,
+ 	.set_blank = hubp2_set_blank,
+ 	.dcc_control = hubp3_dcc_control,
++	.hubp_reset = hubp_reset,
+ 	.mem_program_viewport = min_set_viewport,
+ 	.set_cursor_attributes	= hubp2_cursor_set_attributes,
+ 	.set_cursor_position	= hubp2_cursor_set_position,
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
+index ca5b4b28a66441..45023fa9b708dc 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
+@@ -181,6 +181,7 @@ static struct hubp_funcs dcn32_hubp_funcs = {
+ 	.set_blank = hubp2_set_blank,
+ 	.set_blank_regs = hubp2_set_blank_regs,
+ 	.dcc_control = hubp3_dcc_control,
++	.hubp_reset = hubp_reset,
+ 	.mem_program_viewport = min_set_viewport,
+ 	.set_cursor_attributes	= hubp32_cursor_set_attributes,
+ 	.set_cursor_position	= hubp2_cursor_set_position,
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
+index d1f05b82b3dd5c..e7625290c0e467 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
+@@ -199,6 +199,7 @@ static struct hubp_funcs dcn35_hubp_funcs = {
+ 	.hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,
+ 	.set_blank = hubp2_set_blank,
+ 	.dcc_control = hubp3_dcc_control,
++	.hubp_reset = hubp_reset,
+ 	.mem_program_viewport = min_set_viewport,
+ 	.set_cursor_attributes	= hubp2_cursor_set_attributes,
+ 	.set_cursor_position	= hubp2_cursor_set_position,
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+index b1ebf5053b4fc3..2d52100510f05f 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+@@ -141,7 +141,7 @@ void hubp401_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor
+ 
+ void hubp401_init(struct hubp *hubp)
+ {
+-	//For now nothing to do, HUBPREQ_DEBUG_DB register is removed on DCN4x.
++	hubp_reset(hubp);
+ }
+ 
+ void hubp401_vready_at_or_After_vsync(struct hubp *hubp,
+@@ -974,6 +974,7 @@ static struct hubp_funcs dcn401_hubp_funcs = {
+ 	.hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,
+ 	.set_blank = hubp2_set_blank,
+ 	.set_blank_regs = hubp2_set_blank_regs,
++	.hubp_reset = hubp_reset,
+ 	.mem_program_viewport = hubp401_set_viewport,
+ 	.set_cursor_attributes	= hubp32_cursor_set_attributes,
+ 	.set_cursor_position	= hubp401_cursor_set_position,
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+index 681bb92c60690d..44e405e9bc9715 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+@@ -1286,6 +1286,7 @@ void dcn10_plane_atomic_power_down(struct dc *dc,
+ 		if (hws->funcs.hubp_pg_control)
+ 			hws->funcs.hubp_pg_control(hws, hubp->inst, false);
+ 
++		hubp->funcs->hubp_reset(hubp);
+ 		dpp->funcs->dpp_reset(dpp);
+ 
+ 		REG_SET(DC_IP_REQUEST_CNTL, 0,
+@@ -1447,6 +1448,7 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
+ 		/* Disable on the current state so the new one isn't cleared. */
+ 		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ 
++		hubp->funcs->hubp_reset(hubp);
+ 		dpp->funcs->dpp_reset(dpp);
+ 
+ 		pipe_ctx->stream_res.tg = tg;
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+index e599cdc465bfd2..463f7abe35a7dd 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+@@ -788,6 +788,7 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
+ 		/* Disable on the current state so the new one isn't cleared. */
+ 		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ 
++		hubp->funcs->hubp_reset(hubp);
+ 		dpp->funcs->dpp_reset(dpp);
+ 
+ 		pipe_ctx->stream_res.tg = tg;
+@@ -944,6 +945,7 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
+ /*to do, need to support both case*/
+ 	hubp->power_gated = true;
+ 
++	hubp->funcs->hubp_reset(hubp);
+ 	dpp->funcs->dpp_reset(dpp);
+ 
+ 	pipe_ctx->stream = NULL;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+index 16580d62427891..eec16b0a199dd4 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+@@ -152,6 +152,8 @@ struct hubp_funcs {
+ 	void (*dcc_control)(struct hubp *hubp, bool enable,
+ 			enum hubp_ind_block_size blk_size);
+ 
++	void (*hubp_reset)(struct hubp *hubp);
++
+ 	void (*mem_program_viewport)(
+ 			struct hubp *hubp,
+ 			const struct rect *viewport,
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+index fe24219c3bf48e..4bd92fd782be6a 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+@@ -992,6 +992,8 @@ int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctr
+ 			GetIndexIntoMasterTable(DATA, SMU_Info),
+ 			&size, &frev, &crev);
+ 
++	if (!psmu_info)
++		return -EINVAL;
+ 
+ 	for (i = 0; i < psmu_info->ucSclkEntryNum; i++) {
+ 		table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting;
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
+index 3007b054c873c9..776d58ea63ae90 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
+@@ -1120,13 +1120,14 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
+ 	result = vega10_program_didt_config_registers(hwmgr, SEEDCForceStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ 	result |= vega10_program_didt_config_registers(hwmgr, SEEDCCtrlForceStallConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ 	if (0 != result)
+-		return result;
++		goto exit_safe_mode;
+ 
+ 	vega10_didt_set_mask(hwmgr, false);
+ 
++exit_safe_mode:
+ 	amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ 
+-	return 0;
++	return result;
+ }
+ 
+ static int vega10_disable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
+diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
+index 008d86cc562af7..cf891e7677c0e2 100644
+--- a/drivers/gpu/drm/bridge/ite-it6505.c
++++ b/drivers/gpu/drm/bridge/ite-it6505.c
+@@ -300,7 +300,7 @@
+ #define MAX_CR_LEVEL 0x03
+ #define MAX_EQ_LEVEL 0x03
+ #define AUX_WAIT_TIMEOUT_MS 15
+-#define AUX_FIFO_MAX_SIZE 32
++#define AUX_FIFO_MAX_SIZE 16
+ #define PIXEL_CLK_DELAY 1
+ #define PIXEL_CLK_INVERSE 0
+ #define ADJUST_PHASE_THRESHOLD 80000
+diff --git a/drivers/gpu/drm/display/drm_hdmi_state_helper.c b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
+index feb7a3a759811a..936a8f95d80f7e 100644
+--- a/drivers/gpu/drm/display/drm_hdmi_state_helper.c
++++ b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
+@@ -347,6 +347,8 @@ static int hdmi_generate_avi_infoframe(const struct drm_connector *connector,
+ 		is_limited_range ? HDMI_QUANTIZATION_RANGE_LIMITED : HDMI_QUANTIZATION_RANGE_FULL;
+ 	int ret;
+ 
++	infoframe->set = false;
++
+ 	ret = drm_hdmi_avi_infoframe_from_display_mode(frame, connector, mode);
+ 	if (ret)
+ 		return ret;
+@@ -376,6 +378,8 @@ static int hdmi_generate_spd_infoframe(const struct drm_connector *connector,
+ 		&infoframe->data.spd;
+ 	int ret;
+ 
++	infoframe->set = false;
++
+ 	ret = hdmi_spd_infoframe_init(frame,
+ 				      connector->hdmi.vendor,
+ 				      connector->hdmi.product);
+@@ -398,6 +402,8 @@ static int hdmi_generate_hdr_infoframe(const struct drm_connector *connector,
+ 		&infoframe->data.drm;
+ 	int ret;
+ 
++	infoframe->set = false;
++
+ 	if (connector->max_bpc < 10)
+ 		return 0;
+ 
+@@ -425,6 +431,8 @@ static int hdmi_generate_hdmi_vendor_infoframe(const struct drm_connector *conne
+ 		&infoframe->data.vendor.hdmi;
+ 	int ret;
+ 
++	infoframe->set = false;
++
+ 	if (!info->has_hdmi_infoframe)
+ 		return 0;
+ 
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+index 16473c371444c2..da30179de02b53 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+@@ -342,6 +342,7 @@ void *etnaviv_gem_vmap(struct drm_gem_object *obj)
+ static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
+ {
+ 	struct page **pages;
++	pgprot_t prot;
+ 
+ 	lockdep_assert_held(&obj->lock);
+ 
+@@ -349,8 +350,19 @@ static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
+ 	if (IS_ERR(pages))
+ 		return NULL;
+ 
+-	return vmap(pages, obj->base.size >> PAGE_SHIFT,
+-			VM_MAP, pgprot_writecombine(PAGE_KERNEL));
++	switch (obj->flags & ETNA_BO_CACHE_MASK) {
++	case ETNA_BO_CACHED:
++		prot = PAGE_KERNEL;
++		break;
++	case ETNA_BO_UNCACHED:
++		prot = pgprot_noncached(PAGE_KERNEL);
++		break;
++	case ETNA_BO_WC:
++	default:
++		prot = pgprot_writecombine(PAGE_KERNEL);
++	}
++
++	return vmap(pages, obj->base.size >> PAGE_SHIFT, VM_MAP, prot);
+ }
+ 
+ static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
+diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
+index 74c1983fe07eaa..1be55bdb48b966 100644
+--- a/drivers/gpu/drm/i915/display/intel_crt.c
++++ b/drivers/gpu/drm/i915/display/intel_crt.c
+@@ -244,7 +244,7 @@ static void hsw_disable_crt(struct intel_atomic_state *state,
+ 			    const struct intel_crtc_state *old_crtc_state,
+ 			    const struct drm_connector_state *old_conn_state)
+ {
+-	struct intel_display *display = to_intel_display(state);
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ 
+ 	drm_WARN_ON(display->drm, !old_crtc_state->has_pch_encoder);
+@@ -257,7 +257,7 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state,
+ 				 const struct intel_crtc_state *old_crtc_state,
+ 				 const struct drm_connector_state *old_conn_state)
+ {
+-	struct intel_display *display = to_intel_display(state);
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ 
+@@ -287,7 +287,7 @@ static void hsw_pre_pll_enable_crt(struct intel_atomic_state *state,
+ 				   const struct intel_crtc_state *crtc_state,
+ 				   const struct drm_connector_state *conn_state)
+ {
+-	struct intel_display *display = to_intel_display(state);
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ 
+ 	drm_WARN_ON(display->drm, !crtc_state->has_pch_encoder);
+@@ -300,7 +300,7 @@ static void hsw_pre_enable_crt(struct intel_atomic_state *state,
+ 			       const struct intel_crtc_state *crtc_state,
+ 			       const struct drm_connector_state *conn_state)
+ {
+-	struct intel_display *display = to_intel_display(state);
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ 	enum pipe pipe = crtc->pipe;
+@@ -319,7 +319,7 @@ static void hsw_enable_crt(struct intel_atomic_state *state,
+ 			   const struct intel_crtc_state *crtc_state,
+ 			   const struct drm_connector_state *conn_state)
+ {
+-	struct intel_display *display = to_intel_display(state);
++	struct intel_display *display = to_intel_display(encoder);
+ 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ 	enum pipe pipe = crtc->pipe;
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+index 14db7376c712d1..e386b059187acf 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+@@ -1603,7 +1603,9 @@ int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
+ 
+ 	gmu->dev = &pdev->dev;
+ 
+-	of_dma_configure(gmu->dev, node, true);
++	ret = of_dma_configure(gmu->dev, node, true);
++	if (ret)
++		return ret;
+ 
+ 	pm_runtime_enable(gmu->dev);
+ 
+@@ -1668,7 +1670,9 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
+ 
+ 	gmu->dev = &pdev->dev;
+ 
+-	of_dma_configure(gmu->dev, node, true);
++	ret = of_dma_configure(gmu->dev, node, true);
++	if (ret)
++		return ret;
+ 
+ 	/* Fow now, don't do anything fancy until we get our feet under us */
+ 	gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
+index eb5dfff2ec4f48..e187e7b1cef167 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
+@@ -160,6 +160,7 @@ static const struct dpu_lm_cfg sm8650_lm[] = {
+ 		.sblk = &sdm845_lm_sblk,
+ 		.lm_pair = LM_3,
+ 		.pingpong = PINGPONG_2,
++		.dspp = DSPP_2,
+ 	}, {
+ 		.name = "lm_3", .id = LM_3,
+ 		.base = 0x47000, .len = 0x400,
+@@ -167,6 +168,7 @@ static const struct dpu_lm_cfg sm8650_lm[] = {
+ 		.sblk = &sdm845_lm_sblk,
+ 		.lm_pair = LM_2,
+ 		.pingpong = PINGPONG_3,
++		.dspp = DSPP_3,
+ 	}, {
+ 		.name = "lm_4", .id = LM_4,
+ 		.base = 0x48000, .len = 0x400,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
+index cbbdaebe357ec4..daef07924886a5 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
+@@ -65,6 +65,54 @@ static const struct dpu_sspp_cfg sdm670_sspp[] = {
+ 	},
+ };
+ 
++static const struct dpu_lm_cfg sdm670_lm[] = {
++	{
++		.name = "lm_0", .id = LM_0,
++		.base = 0x44000, .len = 0x320,
++		.features = MIXER_SDM845_MASK,
++		.sblk = &sdm845_lm_sblk,
++		.lm_pair = LM_1,
++		.pingpong = PINGPONG_0,
++		.dspp = DSPP_0,
++	}, {
++		.name = "lm_1", .id = LM_1,
++		.base = 0x45000, .len = 0x320,
++		.features = MIXER_SDM845_MASK,
++		.sblk = &sdm845_lm_sblk,
++		.lm_pair = LM_0,
++		.pingpong = PINGPONG_1,
++		.dspp = DSPP_1,
++	}, {
++		.name = "lm_2", .id = LM_2,
++		.base = 0x46000, .len = 0x320,
++		.features = MIXER_SDM845_MASK,
++		.sblk = &sdm845_lm_sblk,
++		.lm_pair = LM_5,
++		.pingpong = PINGPONG_2,
++	}, {
++		.name = "lm_5", .id = LM_5,
++		.base = 0x49000, .len = 0x320,
++		.features = MIXER_SDM845_MASK,
++		.sblk = &sdm845_lm_sblk,
++		.lm_pair = LM_2,
++		.pingpong = PINGPONG_3,
++	},
++};
++
++static const struct dpu_dspp_cfg sdm670_dspp[] = {
++	{
++		.name = "dspp_0", .id = DSPP_0,
++		.base = 0x54000, .len = 0x1800,
++		.features = DSPP_SC7180_MASK,
++		.sblk = &sdm845_dspp_sblk,
++	}, {
++		.name = "dspp_1", .id = DSPP_1,
++		.base = 0x56000, .len = 0x1800,
++		.features = DSPP_SC7180_MASK,
++		.sblk = &sdm845_dspp_sblk,
++	},
++};
++
+ static const struct dpu_dsc_cfg sdm670_dsc[] = {
+ 	{
+ 		.name = "dsc_0", .id = DSC_0,
+@@ -88,8 +136,10 @@ const struct dpu_mdss_cfg dpu_sdm670_cfg = {
+ 	.ctl = sdm845_ctl,
+ 	.sspp_count = ARRAY_SIZE(sdm670_sspp),
+ 	.sspp = sdm670_sspp,
+-	.mixer_count = ARRAY_SIZE(sdm845_lm),
+-	.mixer = sdm845_lm,
++	.mixer_count = ARRAY_SIZE(sdm670_lm),
++	.mixer = sdm670_lm,
++	.dspp_count = ARRAY_SIZE(sdm670_dspp),
++	.dspp = sdm670_dspp,
+ 	.pingpong_count = ARRAY_SIZE(sdm845_pp),
+ 	.pingpong = sdm845_pp,
+ 	.dsc_count = ARRAY_SIZE(sdm670_dsc),
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+index 6ccfde82fecdb4..421afacb724803 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+@@ -164,6 +164,7 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
+ 		.sblk = &sdm845_lm_sblk,
+ 		.lm_pair = LM_3,
+ 		.pingpong = PINGPONG_2,
++		.dspp = DSPP_2,
+ 	}, {
+ 		.name = "lm_3", .id = LM_3,
+ 		.base = 0x47000, .len = 0x320,
+@@ -171,6 +172,7 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
+ 		.sblk = &sdm845_lm_sblk,
+ 		.lm_pair = LM_2,
+ 		.pingpong = PINGPONG_3,
++		.dspp = DSPP_3,
+ 	}, {
+ 		.name = "lm_4", .id = LM_4,
+ 		.base = 0x48000, .len = 0x320,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+index bab19ddd1d4f97..641023b102bf59 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+@@ -163,6 +163,7 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
+ 		.sblk = &sdm845_lm_sblk,
+ 		.lm_pair = LM_3,
+ 		.pingpong = PINGPONG_2,
++		.dspp = DSPP_2,
+ 	}, {
+ 		.name = "lm_3", .id = LM_3,
+ 		.base = 0x47000, .len = 0x320,
+@@ -170,6 +171,7 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
+ 		.sblk = &sdm845_lm_sblk,
+ 		.lm_pair = LM_2,
+ 		.pingpong = PINGPONG_3,
++		.dspp = DSPP_3,
+ 	}, {
+ 		.name = "lm_4", .id = LM_4,
+ 		.base = 0x48000, .len = 0x320,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
+index a57d50b1f02807..e8916ae826a6da 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
+@@ -162,6 +162,7 @@ static const struct dpu_lm_cfg sm8250_lm[] = {
+ 		.sblk = &sdm845_lm_sblk,
+ 		.lm_pair = LM_3,
+ 		.pingpong = PINGPONG_2,
++		.dspp = DSPP_2,
+ 	}, {
+ 		.name = "lm_3", .id = LM_3,
+ 		.base = 0x47000, .len = 0x320,
+@@ -169,6 +170,7 @@ static const struct dpu_lm_cfg sm8250_lm[] = {
+ 		.sblk = &sdm845_lm_sblk,
+ 		.lm_pair = LM_2,
+ 		.pingpong = PINGPONG_3,
++		.dspp = DSPP_3,
+ 	}, {
+ 		.name = "lm_4", .id = LM_4,
+ 		.base = 0x48000, .len = 0x320,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
+index aced16e350daa1..f7c08e89c88203 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
+@@ -162,6 +162,7 @@ static const struct dpu_lm_cfg sm8350_lm[] = {
+ 		.sblk = &sdm845_lm_sblk,
+ 		.lm_pair = LM_3,
+ 		.pingpong = PINGPONG_2,
++		.dspp = DSPP_2,
+ 	}, {
+ 		.name = "lm_3", .id = LM_3,
+ 		.base = 0x47000, .len = 0x320,
+@@ -169,6 +170,7 @@ static const struct dpu_lm_cfg sm8350_lm[] = {
+ 		.sblk = &sdm845_lm_sblk,
+ 		.lm_pair = LM_2,
+ 		.pingpong = PINGPONG_3,
++		.dspp = DSPP_3,
+ 	}, {
+ 		.name = "lm_4", .id = LM_4,
+ 		.base = 0x48000, .len = 0x320,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
+index ad48defa154f7d..a1dbbf5c652ff9 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
+@@ -160,6 +160,7 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
+ 		.sblk = &sdm845_lm_sblk,
+ 		.lm_pair = LM_3,
+ 		.pingpong = PINGPONG_2,
++		.dspp = DSPP_2,
+ 	}, {
+ 		.name = "lm_3", .id = LM_3,
+ 		.base = 0x47000, .len = 0x320,
+@@ -167,6 +168,7 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
+ 		.sblk = &sdm845_lm_sblk,
+ 		.lm_pair = LM_2,
+ 		.pingpong = PINGPONG_3,
++		.dspp = DSPP_3,
+ 	}, {
+ 		.name = "lm_4", .id = LM_4,
+ 		.base = 0x48000, .len = 0x320,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
+index a3e60ac70689e7..e084406ebb0711 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
+@@ -159,6 +159,7 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
+ 		.sblk = &sdm845_lm_sblk,
+ 		.lm_pair = LM_3,
+ 		.pingpong = PINGPONG_2,
++		.dspp = DSPP_2,
+ 	}, {
+ 		.name = "lm_3", .id = LM_3,
+ 		.base = 0x47000, .len = 0x320,
+@@ -166,6 +167,7 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
+ 		.sblk = &sdm845_lm_sblk,
+ 		.lm_pair = LM_2,
+ 		.pingpong = PINGPONG_3,
++		.dspp = DSPP_3,
+ 	}, {
+ 		.name = "lm_4", .id = LM_4,
+ 		.base = 0x48000, .len = 0x320,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+index 3ffac24333a2a5..703e58901d53f2 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+@@ -1335,12 +1335,15 @@ static void dpu_plane_atomic_print_state(struct drm_printer *p,
+ 
+ 	drm_printf(p, "\tstage=%d\n", pstate->stage);
+ 
+-	drm_printf(p, "\tsspp[0]=%s\n", pipe->sspp->cap->name);
+-	drm_printf(p, "\tmultirect_mode[0]=%s\n", dpu_get_multirect_mode(pipe->multirect_mode));
+-	drm_printf(p, "\tmultirect_index[0]=%s\n",
+-		   dpu_get_multirect_index(pipe->multirect_index));
+-	drm_printf(p, "\tsrc[0]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&pipe_cfg->src_rect));
+-	drm_printf(p, "\tdst[0]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&pipe_cfg->dst_rect));
++	if (pipe->sspp) {
++		drm_printf(p, "\tsspp[0]=%s\n", pipe->sspp->cap->name);
++		drm_printf(p, "\tmultirect_mode[0]=%s\n",
++			   dpu_get_multirect_mode(pipe->multirect_mode));
++		drm_printf(p, "\tmultirect_index[0]=%s\n",
++			   dpu_get_multirect_index(pipe->multirect_index));
++		drm_printf(p, "\tsrc[0]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&pipe_cfg->src_rect));
++		drm_printf(p, "\tdst[0]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&pipe_cfg->dst_rect));
++	}
+ 
+ 	if (r_pipe->sspp) {
+ 		drm_printf(p, "\tsspp[1]=%s\n", r_pipe->sspp->cap->name);
+diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
+index 576995ddce37e9..8bbc7fb881d599 100644
+--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
++++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
+@@ -389,7 +389,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
+ 
+ 	/* TODO: different regulators in other cases? */
+ 	mdp4_lcdc_encoder->regs[0].supply = "lvds-vccs-3p3v";
+-	mdp4_lcdc_encoder->regs[1].supply = "lvds-vccs-3p3v";
++	mdp4_lcdc_encoder->regs[1].supply = "lvds-pll-vdda";
+ 	mdp4_lcdc_encoder->regs[2].supply = "lvds-vdda";
+ 
+ 	ret = devm_regulator_bulk_get(dev->dev,
+diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
+index 74e01a5dd4195d..5cbb11986460d1 100644
+--- a/drivers/gpu/drm/msm/dp/dp_audio.c
++++ b/drivers/gpu/drm/msm/dp/dp_audio.c
+@@ -329,10 +329,10 @@ static void msm_dp_audio_safe_to_exit_level(struct msm_dp_audio_private *audio)
+ 		safe_to_exit_level = 5;
+ 		break;
+ 	default:
++		safe_to_exit_level = 14;
+ 		drm_dbg_dp(audio->drm_dev,
+ 				"setting the default safe_to_exit_level = %u\n",
+ 				safe_to_exit_level);
+-		safe_to_exit_level = 14;
+ 		break;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
+index b4c8856fb25d01..2a755a06ac4905 100644
+--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
++++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
+@@ -1036,7 +1036,6 @@ void msm_dp_catalog_panel_tpg_enable(struct msm_dp_catalog *msm_dp_catalog,
+ 	display_hctl = (hsync_end_x << 16) | hsync_start_x;
+ 
+ 
+-	msm_dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0x0);
+ 	msm_dp_write_p0(catalog, MMSS_DP_INTF_HSYNC_CTL, hsync_ctl);
+ 	msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F0, vsync_period *
+ 			hsync_period);
+diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
+index bc2ca8133b790f..9c463ae2f8fae9 100644
+--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
+@@ -178,7 +178,6 @@ static void msm_dp_ctrl_configure_source_params(struct msm_dp_ctrl_private *ctrl
+ 	u32 cc, tb;
+ 
+ 	msm_dp_catalog_ctrl_lane_mapping(ctrl->catalog);
+-	msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true);
+ 	msm_dp_catalog_setup_peripheral_flush(ctrl->catalog);
+ 
+ 	msm_dp_ctrl_config_ctrl(ctrl);
+@@ -2071,6 +2070,7 @@ void msm_dp_ctrl_off_link(struct msm_dp_ctrl *msm_dp_ctrl)
+ 
+ 	msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ 
++	dev_pm_opp_set_rate(ctrl->dev, 0);
+ 	msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl);
+ 
+ 	DRM_DEBUG_DP("Before, phy=%p init_count=%d power_on=%d\n",
+diff --git a/drivers/gpu/drm/msm/dp/dp_utils.c b/drivers/gpu/drm/msm/dp/dp_utils.c
+index 2a40f07fe2d5e2..4a5ebb0c33b85e 100644
+--- a/drivers/gpu/drm/msm/dp/dp_utils.c
++++ b/drivers/gpu/drm/msm/dp/dp_utils.c
+@@ -74,14 +74,8 @@ u8 msm_dp_utils_calculate_parity(u32 data)
+ 	return parity_byte;
+ }
+ 
+-ssize_t msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_buff)
++void msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 header_buff[2])
+ {
+-	size_t length;
+-
+-	length = sizeof(header_buff);
+-	if (length < DP_SDP_HEADER_SIZE)
+-		return -ENOSPC;
+-
+ 	header_buff[0] = FIELD_PREP(HEADER_0_MASK, sdp_header->HB0) |
+ 		FIELD_PREP(PARITY_0_MASK, msm_dp_utils_calculate_parity(sdp_header->HB0)) |
+ 		FIELD_PREP(HEADER_1_MASK, sdp_header->HB1) |
+@@ -91,6 +85,4 @@ ssize_t msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *head
+ 		FIELD_PREP(PARITY_2_MASK, msm_dp_utils_calculate_parity(sdp_header->HB2)) |
+ 		FIELD_PREP(HEADER_3_MASK, sdp_header->HB3) |
+ 		FIELD_PREP(PARITY_3_MASK, msm_dp_utils_calculate_parity(sdp_header->HB3));
+-
+-	return length;
+ }
+diff --git a/drivers/gpu/drm/msm/dp/dp_utils.h b/drivers/gpu/drm/msm/dp/dp_utils.h
+index 88d53157f5b59e..2e4f98a863c4cb 100644
+--- a/drivers/gpu/drm/msm/dp/dp_utils.h
++++ b/drivers/gpu/drm/msm/dp/dp_utils.h
+@@ -31,6 +31,6 @@
+ u8 msm_dp_utils_get_g0_value(u8 data);
+ u8 msm_dp_utils_get_g1_value(u8 data);
+ u8 msm_dp_utils_calculate_parity(u32 data);
+-ssize_t msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_buff);
++void msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 header_buff[2]);
+ 
+ #endif /* _DP_UTILS_H_ */
+diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
+index a719fd33d9d8d5..33bb48ae58a2da 100644
+--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
++++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
+@@ -137,7 +137,7 @@ static inline u32 pll_get_integloop_gain(u64 frac_start, u64 bclk, u32 ref_clk,
+ 
+ 	base <<= (digclk_divsel == 2 ? 1 : 0);
+ 
+-	return (base <= 2046 ? base : 2046);
++	return base;
+ }
+ 
+ static inline u32 pll_get_pll_cmp(u64 fdata, unsigned long ref_clk)
+diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c
+index f3326d09bdbce1..4cfad12f4dc1f9 100644
+--- a/drivers/gpu/drm/msm/msm_kms.c
++++ b/drivers/gpu/drm/msm/msm_kms.c
+@@ -244,7 +244,6 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
+ 	ret = priv->kms_init(ddev);
+ 	if (ret) {
+ 		DRM_DEV_ERROR(dev, "failed to load kms\n");
+-		priv->kms = NULL;
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c
+index 6fbff516c1c1f0..01dff89bed4e1d 100644
+--- a/drivers/gpu/drm/panthor/panthor_device.c
++++ b/drivers/gpu/drm/panthor/panthor_device.c
+@@ -445,8 +445,8 @@ int panthor_device_resume(struct device *dev)
+ 	    drm_dev_enter(&ptdev->base, &cookie)) {
+ 		panthor_gpu_resume(ptdev);
+ 		panthor_mmu_resume(ptdev);
+-		ret = drm_WARN_ON(&ptdev->base, panthor_fw_resume(ptdev));
+-		if (!ret) {
++		ret = panthor_fw_resume(ptdev);
++		if (!drm_WARN_ON(&ptdev->base, ret)) {
+ 			panthor_sched_resume(ptdev);
+ 		} else {
+ 			panthor_mmu_suspend(ptdev);
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+index 9873172e3fd331..5880d87fe6b3aa 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+@@ -33,7 +33,6 @@
+ #include <uapi/linux/videodev2.h>
+ #include <dt-bindings/soc/rockchip,vop2.h>
+ 
+-#include "rockchip_drm_drv.h"
+ #include "rockchip_drm_gem.h"
+ #include "rockchip_drm_vop2.h"
+ #include "rockchip_rgb.h"
+@@ -550,6 +549,25 @@ static bool rockchip_vop2_mod_supported(struct drm_plane *plane, u32 format,
+ 	if (modifier == DRM_FORMAT_MOD_INVALID)
+ 		return false;
+ 
++	if (vop2->data->soc_id == 3568 || vop2->data->soc_id == 3566) {
++		if (vop2_cluster_window(win)) {
++			if (modifier == DRM_FORMAT_MOD_LINEAR) {
++				drm_dbg_kms(vop2->drm,
++					    "Cluster window only supports format with afbc\n");
++				return false;
++			}
++		}
++	}
++
++	if (format == DRM_FORMAT_XRGB2101010 || format == DRM_FORMAT_XBGR2101010) {
++		if (vop2->data->soc_id == 3588) {
++			if (!rockchip_afbc(plane, modifier)) {
++				drm_dbg_kms(vop2->drm, "Only support 32 bpp format with afbc\n");
++				return false;
++			}
++		}
++	}
++
+ 	if (modifier == DRM_FORMAT_MOD_LINEAR)
+ 		return true;
+ 
+@@ -1320,6 +1338,12 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
+ 		&fb->format->format,
+ 		afbc_en ? "AFBC" : "", &yrgb_mst);
+ 
++	if (vop2->data->soc_id > 3568) {
++		vop2_win_write(win, VOP2_WIN_AXI_BUS_ID, win->data->axi_bus_id);
++		vop2_win_write(win, VOP2_WIN_AXI_YRGB_R_ID, win->data->axi_yrgb_r_id);
++		vop2_win_write(win, VOP2_WIN_AXI_UV_R_ID, win->data->axi_uv_r_id);
++	}
++
+ 	if (vop2_cluster_window(win))
+ 		vop2_win_write(win, VOP2_WIN_AFBC_HALF_BLOCK_EN, half_block_en);
+ 
+@@ -1721,9 +1745,9 @@ static unsigned long rk3588_calc_cru_cfg(struct vop2_video_port *vp, int id,
+ 		else
+ 			dclk_out_rate = v_pixclk >> 2;
+ 
+-		dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000);
++		dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000000);
+ 		if (!dclk_rate) {
+-			drm_err(vop2->drm, "DP dclk_out_rate out of range, dclk_out_rate: %ld KHZ\n",
++			drm_err(vop2->drm, "DP dclk_out_rate out of range, dclk_out_rate: %ld Hz\n",
+ 				dclk_out_rate);
+ 			return 0;
+ 		}
+@@ -1738,9 +1762,9 @@ static unsigned long rk3588_calc_cru_cfg(struct vop2_video_port *vp, int id,
+ 		 * dclk_rate = N * dclk_core_rate N = (1,2,4 ),
+ 		 * we get a little factor here
+ 		 */
+-		dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000);
++		dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000000);
+ 		if (!dclk_rate) {
+-			drm_err(vop2->drm, "MIPI dclk out of range, dclk_out_rate: %ld KHZ\n",
++			drm_err(vop2->drm, "MIPI dclk out of range, dclk_out_rate: %ld Hz\n",
+ 				dclk_out_rate);
+ 			return 0;
+ 		}
+@@ -2159,7 +2183,6 @@ static int vop2_find_start_mixer_id_for_vp(struct vop2 *vop2, u8 port_id)
+ 
+ static void vop2_setup_cluster_alpha(struct vop2 *vop2, struct vop2_win *main_win)
+ {
+-	u32 offset = (main_win->data->phys_id * 0x10);
+ 	struct vop2_alpha_config alpha_config;
+ 	struct vop2_alpha alpha;
+ 	struct drm_plane_state *bottom_win_pstate;
+@@ -2167,6 +2190,7 @@ static void vop2_setup_cluster_alpha(struct vop2 *vop2, struct vop2_win *main_wi
+ 	u16 src_glb_alpha_val, dst_glb_alpha_val;
+ 	bool premulti_en = false;
+ 	bool swap = false;
++	u32 offset = 0;
+ 
+ 	/* At one win mode, win0 is dst/bottom win, and win1 is a all zero src/top win */
+ 	bottom_win_pstate = main_win->base.state;
+@@ -2185,6 +2209,22 @@ static void vop2_setup_cluster_alpha(struct vop2 *vop2, struct vop2_win *main_wi
+ 	vop2_parse_alpha(&alpha_config, &alpha);
+ 
+ 	alpha.src_color_ctrl.bits.src_dst_swap = swap;
++
++	switch (main_win->data->phys_id) {
++	case ROCKCHIP_VOP2_CLUSTER0:
++		offset = 0x0;
++		break;
++	case ROCKCHIP_VOP2_CLUSTER1:
++		offset = 0x10;
++		break;
++	case ROCKCHIP_VOP2_CLUSTER2:
++		offset = 0x20;
++		break;
++	case ROCKCHIP_VOP2_CLUSTER3:
++		offset = 0x30;
++		break;
++	}
++
+ 	vop2_writel(vop2, RK3568_CLUSTER0_MIX_SRC_COLOR_CTRL + offset,
+ 		    alpha.src_color_ctrl.val);
+ 	vop2_writel(vop2, RK3568_CLUSTER0_MIX_DST_COLOR_CTRL + offset,
+@@ -2232,6 +2272,12 @@ static void vop2_setup_alpha(struct vop2_video_port *vp)
+ 		struct vop2_win *win = to_vop2_win(plane);
+ 		int zpos = plane->state->normalized_zpos;
+ 
++		/*
++		 * Need to configure alpha from second layer.
++		 */
++		if (zpos == 0)
++			continue;
++
+ 		if (plane->state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI)
+ 			premulti_en = 1;
+ 		else
+@@ -2308,7 +2354,10 @@ static void vop2_setup_layer_mixer(struct vop2_video_port *vp)
+ 	struct drm_plane *plane;
+ 	u32 layer_sel = 0;
+ 	u32 port_sel;
+-	unsigned int nlayer, ofs;
++	u8 layer_id;
++	u8 old_layer_id;
++	u8 layer_sel_id;
++	unsigned int ofs;
+ 	u32 ovl_ctrl;
+ 	int i;
+ 	struct vop2_video_port *vp0 = &vop2->vps[0];
+@@ -2352,9 +2401,30 @@ static void vop2_setup_layer_mixer(struct vop2_video_port *vp)
+ 	for (i = 0; i < vp->id; i++)
+ 		ofs += vop2->vps[i].nlayers;
+ 
+-	nlayer = 0;
+ 	drm_atomic_crtc_for_each_plane(plane, &vp->crtc) {
+ 		struct vop2_win *win = to_vop2_win(plane);
++		struct vop2_win *old_win;
++
++		layer_id = (u8)(plane->state->normalized_zpos + ofs);
++
++		/*
++		 * Find the layer this win bind in old state.
++		 */
++		for (old_layer_id = 0; old_layer_id < vop2->data->win_size; old_layer_id++) {
++			layer_sel_id = (layer_sel >> (4 * old_layer_id)) & 0xf;
++			if (layer_sel_id == win->data->layer_sel_id)
++				break;
++		}
++
++		/*
++		 * Find the win bind to this layer in old state
++		 */
++		for (i = 0; i < vop2->data->win_size; i++) {
++			old_win = &vop2->win[i];
++			layer_sel_id = (layer_sel >> (4 * layer_id)) & 0xf;
++			if (layer_sel_id == old_win->data->layer_sel_id)
++				break;
++		}
+ 
+ 		switch (win->data->phys_id) {
+ 		case ROCKCHIP_VOP2_CLUSTER0:
+@@ -2399,17 +2469,14 @@ static void vop2_setup_layer_mixer(struct vop2_video_port *vp)
+ 			break;
+ 		}
+ 
+-		layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(plane->state->normalized_zpos + ofs,
+-							  0x7);
+-		layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(plane->state->normalized_zpos + ofs,
+-							 win->data->layer_sel_id);
+-		nlayer++;
+-	}
+-
+-	/* configure unused layers to 0x5 (reserved) */
+-	for (; nlayer < vp->nlayers; nlayer++) {
+-		layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(nlayer + ofs, 0x7);
+-		layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(nlayer + ofs, 5);
++		layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(layer_id, 0x7);
++		layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(layer_id, win->data->layer_sel_id);
++		/*
++		 * When we bind a window from layerM to layerN, we also need to move the old
++		 * window on layerN to layerM to avoid one window selected by two or more layers.
++		 */
++		layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(old_layer_id, 0x7);
++		layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(old_layer_id, old_win->data->layer_sel_id);
+ 	}
+ 
+ 	vop2_writel(vop2, RK3568_OVL_LAYER_SEL, layer_sel);
+@@ -2444,9 +2511,11 @@ static void vop2_setup_dly_for_windows(struct vop2 *vop2)
+ 			sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__ESMART1, dly);
+ 			break;
+ 		case ROCKCHIP_VOP2_SMART0:
++		case ROCKCHIP_VOP2_ESMART2:
+ 			sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__SMART0, dly);
+ 			break;
+ 		case ROCKCHIP_VOP2_SMART1:
++		case ROCKCHIP_VOP2_ESMART3:
+ 			sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__SMART1, dly);
+ 			break;
+ 		}
+@@ -2865,6 +2934,10 @@ static struct reg_field vop2_cluster_regs[VOP2_WIN_MAX_REG] = {
+ 	[VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 8, 8),
+ 	[VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 9, 9),
+ 	[VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 10, 11),
++	[VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 0, 3),
++	[VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 5, 8),
++	/* RK3588 only, reserved bit on rk3568*/
++	[VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3568_CLUSTER_CTRL, 13, 13),
+ 
+ 	/* Scale */
+ 	[VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 0, 15),
+@@ -2957,6 +3030,10 @@ static struct reg_field vop2_esmart_regs[VOP2_WIN_MAX_REG] = {
+ 	[VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_SMART_CTRL1, 31, 31),
+ 	[VOP2_WIN_COLOR_KEY] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 0, 29),
+ 	[VOP2_WIN_COLOR_KEY_EN] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 31, 31),
++	[VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 4, 8),
++	[VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 12, 16),
++	/* RK3588 only, reserved register on rk3568 */
++	[VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3588_SMART_AXI_CTRL, 1, 1),
+ 
+ 	/* Scale */
+ 	[VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 0, 15),
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
+index 615a16196aff6b..130aaa40316d13 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
+@@ -9,6 +9,7 @@
+ 
+ #include <linux/regmap.h>
+ #include <drm/drm_modes.h>
++#include "rockchip_drm_drv.h"
+ #include "rockchip_drm_vop.h"
+ 
+ #define VOP2_VP_FEATURE_OUTPUT_10BIT        BIT(0)
+@@ -78,6 +79,9 @@ enum vop2_win_regs {
+ 	VOP2_WIN_COLOR_KEY,
+ 	VOP2_WIN_COLOR_KEY_EN,
+ 	VOP2_WIN_DITHER_UP,
++	VOP2_WIN_AXI_BUS_ID,
++	VOP2_WIN_AXI_YRGB_R_ID,
++	VOP2_WIN_AXI_UV_R_ID,
+ 
+ 	/* scale regs */
+ 	VOP2_WIN_SCALE_YRGB_X,
+@@ -140,6 +144,10 @@ struct vop2_win_data {
+ 	unsigned int layer_sel_id;
+ 	uint64_t feature;
+ 
++	uint8_t axi_bus_id;
++	uint8_t axi_yrgb_r_id;
++	uint8_t axi_uv_r_id;
++
+ 	unsigned int max_upscale_factor;
+ 	unsigned int max_downscale_factor;
+ 	const u8 dly[VOP2_DLY_MODE_MAX];
+@@ -308,6 +316,7 @@ enum dst_factor_mode {
+ 
+ #define RK3568_CLUSTER_WIN_CTRL0		0x00
+ #define RK3568_CLUSTER_WIN_CTRL1		0x04
++#define RK3568_CLUSTER_WIN_CTRL2		0x08
+ #define RK3568_CLUSTER_WIN_YRGB_MST		0x10
+ #define RK3568_CLUSTER_WIN_CBR_MST		0x14
+ #define RK3568_CLUSTER_WIN_VIR			0x18
+@@ -330,6 +339,7 @@ enum dst_factor_mode {
+ /* (E)smart register definition, offset relative to window base */
+ #define RK3568_SMART_CTRL0			0x00
+ #define RK3568_SMART_CTRL1			0x04
++#define RK3588_SMART_AXI_CTRL			0x08
+ #define RK3568_SMART_REGION0_CTRL		0x10
+ #define RK3568_SMART_REGION0_YRGB_MST		0x14
+ #define RK3568_SMART_REGION0_CBR_MST		0x18
+diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+index f9d87a0abc8b02..6a6c15f2c9cc7a 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
++++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+@@ -313,7 +313,7 @@ static const struct vop2_video_port_data rk3588_vop_video_ports[] = {
+  * AXI1 is a read only bus.
+  *
+  * Every window on a AXI bus must assigned two unique
+- * read id(yrgb_id/uv_id, valid id are 0x1~0xe).
++ * read id(yrgb_r_id/uv_r_id, valid id are 0x1~0xe).
+  *
+  * AXI0:
+  * Cluster0/1, Esmart0/1, WriteBack
+@@ -333,6 +333,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
+ 		.layer_sel_id = 0,
+ 		.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
+ 				       DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
++		.axi_bus_id = 0,
++		.axi_yrgb_r_id = 2,
++		.axi_uv_r_id = 3,
+ 		.max_upscale_factor = 4,
+ 		.max_downscale_factor = 4,
+ 		.dly = { 4, 26, 29 },
+@@ -349,6 +352,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
+ 		.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
+ 				       DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
+ 		.type = DRM_PLANE_TYPE_PRIMARY,
++		.axi_bus_id = 0,
++		.axi_yrgb_r_id = 6,
++		.axi_uv_r_id = 7,
+ 		.max_upscale_factor = 4,
+ 		.max_downscale_factor = 4,
+ 		.dly = { 4, 26, 29 },
+@@ -364,6 +370,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
+ 		.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
+ 				       DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
+ 		.type = DRM_PLANE_TYPE_PRIMARY,
++		.axi_bus_id = 1,
++		.axi_yrgb_r_id = 2,
++		.axi_uv_r_id = 3,
+ 		.max_upscale_factor = 4,
+ 		.max_downscale_factor = 4,
+ 		.dly = { 4, 26, 29 },
+@@ -379,6 +388,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
+ 		.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
+ 				       DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
+ 		.type = DRM_PLANE_TYPE_PRIMARY,
++		.axi_bus_id = 1,
++		.axi_yrgb_r_id = 6,
++		.axi_uv_r_id = 7,
+ 		.max_upscale_factor = 4,
+ 		.max_downscale_factor = 4,
+ 		.dly = { 4, 26, 29 },
+@@ -393,6 +405,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
+ 		.layer_sel_id = 2,
+ 		.supported_rotations = DRM_MODE_REFLECT_Y,
+ 		.type = DRM_PLANE_TYPE_OVERLAY,
++		.axi_bus_id = 0,
++		.axi_yrgb_r_id = 0x0a,
++		.axi_uv_r_id = 0x0b,
+ 		.max_upscale_factor = 8,
+ 		.max_downscale_factor = 8,
+ 		.dly = { 23, 45, 48 },
+@@ -406,6 +421,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
+ 		.layer_sel_id = 3,
+ 		.supported_rotations = DRM_MODE_REFLECT_Y,
+ 		.type = DRM_PLANE_TYPE_OVERLAY,
++		.axi_bus_id = 0,
++		.axi_yrgb_r_id = 0x0c,
++		.axi_uv_r_id = 0x01,
+ 		.max_upscale_factor = 8,
+ 		.max_downscale_factor = 8,
+ 		.dly = { 23, 45, 48 },
+@@ -419,6 +437,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
+ 		.layer_sel_id = 6,
+ 		.supported_rotations = DRM_MODE_REFLECT_Y,
+ 		.type = DRM_PLANE_TYPE_OVERLAY,
++		.axi_bus_id = 1,
++		.axi_yrgb_r_id = 0x0a,
++		.axi_uv_r_id = 0x0b,
+ 		.max_upscale_factor = 8,
+ 		.max_downscale_factor = 8,
+ 		.dly = { 23, 45, 48 },
+@@ -432,6 +453,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
+ 		.layer_sel_id = 7,
+ 		.supported_rotations = DRM_MODE_REFLECT_Y,
+ 		.type = DRM_PLANE_TYPE_OVERLAY,
++		.axi_bus_id = 1,
++		.axi_yrgb_r_id = 0x0c,
++		.axi_uv_r_id = 0x0d,
+ 		.max_upscale_factor = 8,
+ 		.max_downscale_factor = 8,
+ 		.dly = { 23, 45, 48 },
+diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
+index 19e3ee7ac897fe..76816f2551c100 100644
+--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
++++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
+@@ -237,8 +237,8 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
+ 	if (v3d->ver >= 40) {
+ 		int cycle_count_reg = V3D_PCTR_CYCLE_COUNT(v3d->ver);
+ 		V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3,
+-			       V3D_SET_FIELD(cycle_count_reg,
+-					     V3D_PCTR_S0));
++			       V3D_SET_FIELD_VER(cycle_count_reg,
++						 V3D_PCTR_S0, v3d->ver));
+ 		V3D_CORE_WRITE(core, V3D_V4_PCTR_0_CLR, 1);
+ 		V3D_CORE_WRITE(core, V3D_V4_PCTR_0_EN, 1);
+ 	} else {
+diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c
+index 924814cab46a17..ecf06e8e9fbccb 100644
+--- a/drivers/gpu/drm/v3d/v3d_perfmon.c
++++ b/drivers/gpu/drm/v3d/v3d_perfmon.c
+@@ -240,17 +240,18 @@ void v3d_perfmon_start(struct v3d_dev *v3d, struct v3d_perfmon *perfmon)
+ 
+ 	for (i = 0; i < ncounters; i++) {
+ 		u32 source = i / 4;
+-		u32 channel = V3D_SET_FIELD(perfmon->counters[i], V3D_PCTR_S0);
++		u32 channel = V3D_SET_FIELD_VER(perfmon->counters[i], V3D_PCTR_S0,
++						v3d->ver);
+ 
+ 		i++;
+-		channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0,
+-					 V3D_PCTR_S1);
++		channel |= V3D_SET_FIELD_VER(i < ncounters ? perfmon->counters[i] : 0,
++					     V3D_PCTR_S1, v3d->ver);
+ 		i++;
+-		channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0,
+-					 V3D_PCTR_S2);
++		channel |= V3D_SET_FIELD_VER(i < ncounters ? perfmon->counters[i] : 0,
++					     V3D_PCTR_S2, v3d->ver);
+ 		i++;
+-		channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0,
+-					 V3D_PCTR_S3);
++		channel |= V3D_SET_FIELD_VER(i < ncounters ? perfmon->counters[i] : 0,
++					     V3D_PCTR_S3, v3d->ver);
+ 		V3D_CORE_WRITE(0, V3D_V4_PCTR_0_SRC_X(source), channel);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h
+index 1b1a62ad95852b..6da3c69082bd6d 100644
+--- a/drivers/gpu/drm/v3d/v3d_regs.h
++++ b/drivers/gpu/drm/v3d/v3d_regs.h
+@@ -15,6 +15,14 @@
+ 		fieldval & field##_MASK;				\
+ 	 })
+ 
++#define V3D_SET_FIELD_VER(value, field, ver)				\
++	({								\
++		typeof(ver) _ver = (ver);				\
++		u32 fieldval = (value) << field##_SHIFT(_ver);		\
++		WARN_ON((fieldval & ~field##_MASK(_ver)) != 0);		\
++		fieldval & field##_MASK(_ver);				\
++	 })
++
+ #define V3D_GET_FIELD(word, field) (((word) & field##_MASK) >>		\
+ 				    field##_SHIFT)
+ 
+@@ -354,18 +362,15 @@
+ #define V3D_V4_PCTR_0_SRC_28_31                        0x0067c
+ #define V3D_V4_PCTR_0_SRC_X(x)                         (V3D_V4_PCTR_0_SRC_0_3 + \
+ 							4 * (x))
+-# define V3D_PCTR_S0_MASK                              V3D_MASK(6, 0)
+-# define V3D_V7_PCTR_S0_MASK                           V3D_MASK(7, 0)
+-# define V3D_PCTR_S0_SHIFT                             0
+-# define V3D_PCTR_S1_MASK                              V3D_MASK(14, 8)
+-# define V3D_V7_PCTR_S1_MASK                           V3D_MASK(15, 8)
+-# define V3D_PCTR_S1_SHIFT                             8
+-# define V3D_PCTR_S2_MASK                              V3D_MASK(22, 16)
+-# define V3D_V7_PCTR_S2_MASK                           V3D_MASK(23, 16)
+-# define V3D_PCTR_S2_SHIFT                             16
+-# define V3D_PCTR_S3_MASK                              V3D_MASK(30, 24)
+-# define V3D_V7_PCTR_S3_MASK                           V3D_MASK(31, 24)
+-# define V3D_PCTR_S3_SHIFT                             24
++# define V3D_PCTR_S0_MASK(ver) (((ver) >= 71) ? V3D_MASK(7, 0) : V3D_MASK(6, 0))
++# define V3D_PCTR_S0_SHIFT(ver)                        0
++# define V3D_PCTR_S1_MASK(ver) (((ver) >= 71) ? V3D_MASK(15, 8) : V3D_MASK(14, 8))
++# define V3D_PCTR_S1_SHIFT(ver)                        8
++# define V3D_PCTR_S2_MASK(ver) (((ver) >= 71) ? V3D_MASK(23, 16) : V3D_MASK(22, 16))
++# define V3D_PCTR_S2_SHIFT(ver)                        16
++# define V3D_PCTR_S3_MASK(ver) (((ver) >= 71) ? V3D_MASK(31, 24) : V3D_MASK(30, 24))
++# define V3D_PCTR_S3_SHIFT(ver)                        24
++
+ #define V3D_PCTR_CYCLE_COUNT(ver) ((ver >= 71) ? 0 : 32)
+ 
+ /* Output values of the counters. */
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 33a19197332488..5ab8e10bbd7602 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1163,6 +1163,8 @@ static void hid_apply_multiplier(struct hid_device *hid,
+ 	while (multiplier_collection->parent_idx != -1 &&
+ 	       multiplier_collection->type != HID_COLLECTION_LOGICAL)
+ 		multiplier_collection = &hid->collection[multiplier_collection->parent_idx];
++	if (multiplier_collection->type != HID_COLLECTION_LOGICAL)
++		multiplier_collection = NULL;
+ 
+ 	effective_multiplier = hid_calculate_multiplier(hid, multiplier);
+ 
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index fda9dce3da9980..9d80635a91ebd8 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -810,10 +810,23 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 			break;
+ 		}
+ 
+-		if ((usage->hid & 0xf0) == 0x90) { /* SystemControl*/
+-			switch (usage->hid & 0xf) {
+-			case 0xb: map_key_clear(KEY_DO_NOT_DISTURB); break;
+-			default: goto ignore;
++		if ((usage->hid & 0xf0) == 0x90) { /* SystemControl & D-pad */
++			switch (usage->hid) {
++			case HID_GD_UP:	   usage->hat_dir = 1; break;
++			case HID_GD_DOWN:  usage->hat_dir = 5; break;
++			case HID_GD_RIGHT: usage->hat_dir = 3; break;
++			case HID_GD_LEFT:  usage->hat_dir = 7; break;
++			case HID_GD_DO_NOT_DISTURB:
++				map_key_clear(KEY_DO_NOT_DISTURB); break;
++			default: goto unknown;
++			}
++
++			if (usage->hid <= HID_GD_LEFT) {
++				if (field->dpad) {
++					map_abs(field->dpad);
++					goto ignore;
++				}
++				map_abs(ABS_HAT0X);
+ 			}
+ 			break;
+ 		}
+@@ -844,22 +857,6 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 		if (field->application == HID_GD_SYSTEM_CONTROL)
+ 			goto ignore;
+ 
+-		if ((usage->hid & 0xf0) == 0x90) {	/* D-pad */
+-			switch (usage->hid) {
+-			case HID_GD_UP:	   usage->hat_dir = 1; break;
+-			case HID_GD_DOWN:  usage->hat_dir = 5; break;
+-			case HID_GD_RIGHT: usage->hat_dir = 3; break;
+-			case HID_GD_LEFT:  usage->hat_dir = 7; break;
+-			default: goto unknown;
+-			}
+-			if (field->dpad) {
+-				map_abs(field->dpad);
+-				goto ignore;
+-			}
+-			map_abs(ABS_HAT0X);
+-			break;
+-		}
+-
+ 		switch (usage->hid) {
+ 		/* These usage IDs map directly to the usage codes. */
+ 		case HID_GD_X: case HID_GD_Y: case HID_GD_Z:
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 65023bfe30ed28..42c0bd9d2f31e3 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -2084,7 +2084,7 @@ static const struct hid_device_id mt_devices[] = {
+ 		     I2C_DEVICE_ID_GOODIX_01E8) },
+ 	{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
+ 	  HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX,
+-		     I2C_DEVICE_ID_GOODIX_01E8) },
++		     I2C_DEVICE_ID_GOODIX_01E9) },
+ 
+ 	/* GoodTouch panels */
+ 	{ .driver_data = MT_CLS_NSMU,
+diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c
+index cf1679b0d4fbb5..6c3e758bbb09e3 100644
+--- a/drivers/hid/hid-thrustmaster.c
++++ b/drivers/hid/hid-thrustmaster.c
+@@ -170,6 +170,14 @@ static void thrustmaster_interrupts(struct hid_device *hdev)
+ 	ep = &usbif->cur_altsetting->endpoint[1];
+ 	b_ep = ep->desc.bEndpointAddress;
+ 
++	/* Are the expected endpoints present? */
++	u8 ep_addr[1] = {b_ep};
++
++	if (!usb_check_int_endpoints(usbif, ep_addr)) {
++		hid_err(hdev, "Unexpected non-int endpoint\n");
++		return;
++	}
++
+ 	for (i = 0; i < ARRAY_SIZE(setup_arr); ++i) {
+ 		memcpy(send_buf, setup_arr[i], setup_arr_sizes[i]);
+ 
+diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
+index dd376602f3f19c..9afa70f877cc12 100644
+--- a/drivers/hwmon/Kconfig
++++ b/drivers/hwmon/Kconfig
+@@ -413,7 +413,7 @@ config SENSORS_ASPEED
+ 	  will be called aspeed_pwm_tacho.
+ 
+ config SENSORS_ASPEED_G6
+-	tristate "ASPEED g6 PWM and Fan tach driver"
++	tristate "ASPEED G6 PWM and Fan tach driver"
+ 	depends on ARCH_ASPEED || COMPILE_TEST
+ 	depends on PWM
+ 	help
+@@ -421,7 +421,7 @@ config SENSORS_ASPEED_G6
+ 	  controllers.
+ 
+ 	  This driver can also be built as a module. If so, the module
+-	  will be called aspeed_pwm_tacho.
++	  will be called aspeed_g6_pwm_tach.
+ 
+ config SENSORS_ATXP1
+ 	tristate "Attansic ATXP1 VID controller"
+diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
+index c243b51837d2fb..fa3351351825b7 100644
+--- a/drivers/hwmon/nct6775-core.c
++++ b/drivers/hwmon/nct6775-core.c
+@@ -42,6 +42,9 @@
+ 
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
++#undef DEFAULT_SYMBOL_NAMESPACE
++#define DEFAULT_SYMBOL_NAMESPACE "HWMON_NCT6775"
++
+ #include <linux/module.h>
+ #include <linux/init.h>
+ #include <linux/slab.h>
+@@ -56,9 +59,6 @@
+ #include "lm75.h"
+ #include "nct6775.h"
+ 
+-#undef DEFAULT_SYMBOL_NAMESPACE
+-#define DEFAULT_SYMBOL_NAMESPACE "HWMON_NCT6775"
+-
+ #define USE_ALTERNATE
+ 
+ /* used to set data->name = nct6775_device_names[data->sio_kind] */
+diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
+index 183a35038eef91..8eb7bd640f8d3c 100644
+--- a/drivers/i2c/busses/i2c-designware-common.c
++++ b/drivers/i2c/busses/i2c-designware-common.c
+@@ -8,6 +8,9 @@
+  * Copyright (C) 2007 MontaVista Software Inc.
+  * Copyright (C) 2009 Provigent Ltd.
+  */
++
++#define DEFAULT_SYMBOL_NAMESPACE	"I2C_DW_COMMON"
++
+ #include <linux/acpi.h>
+ #include <linux/clk.h>
+ #include <linux/delay.h>
+@@ -29,8 +32,6 @@
+ #include <linux/types.h>
+ #include <linux/units.h>
+ 
+-#define DEFAULT_SYMBOL_NAMESPACE	"I2C_DW_COMMON"
+-
+ #include "i2c-designware-core.h"
+ 
+ static const char *const abort_sources[] = {
+diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
+index c8cbe5b1aeb197..2569bf1a72e0ea 100644
+--- a/drivers/i2c/busses/i2c-designware-master.c
++++ b/drivers/i2c/busses/i2c-designware-master.c
+@@ -8,6 +8,9 @@
+  * Copyright (C) 2007 MontaVista Software Inc.
+  * Copyright (C) 2009 Provigent Ltd.
+  */
++
++#define DEFAULT_SYMBOL_NAMESPACE	"I2C_DW"
++
+ #include <linux/delay.h>
+ #include <linux/err.h>
+ #include <linux/errno.h>
+@@ -22,8 +25,6 @@
+ #include <linux/regmap.h>
+ #include <linux/reset.h>
+ 
+-#define DEFAULT_SYMBOL_NAMESPACE	"I2C_DW"
+-
+ #include "i2c-designware-core.h"
+ 
+ #define AMD_TIMEOUT_MIN_US	25
+diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
+index dc2b788eac5bdc..5cd4a5f7a472e4 100644
+--- a/drivers/i2c/busses/i2c-designware-slave.c
++++ b/drivers/i2c/busses/i2c-designware-slave.c
+@@ -6,6 +6,9 @@
+  *
+  * Copyright (C) 2016 Synopsys Inc.
+  */
++
++#define DEFAULT_SYMBOL_NAMESPACE	"I2C_DW"
++
+ #include <linux/delay.h>
+ #include <linux/err.h>
+ #include <linux/errno.h>
+@@ -16,8 +19,6 @@
+ #include <linux/pm_runtime.h>
+ #include <linux/regmap.h>
+ 
+-#define DEFAULT_SYMBOL_NAMESPACE	"I2C_DW"
+-
+ #include "i2c-designware-core.h"
+ 
+ static void i2c_dw_configure_fifo_slave(struct dw_i2c_dev *dev)
+diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
+index d4b80eb8cecdf4..343b2f9ca63c3c 100644
+--- a/drivers/i3c/master/dw-i3c-master.c
++++ b/drivers/i3c/master/dw-i3c-master.c
+@@ -1647,6 +1647,7 @@ EXPORT_SYMBOL_GPL(dw_i3c_common_probe);
+ 
+ void dw_i3c_common_remove(struct dw_i3c_master *master)
+ {
++	cancel_work_sync(&master->hj_work);
+ 	i3c_master_unregister(&master->base);
+ 
+ 	pm_runtime_disable(master->dev);
+diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
+index 1211f4317a9f4f..aba96ca9bce5df 100644
+--- a/drivers/infiniband/hw/Makefile
++++ b/drivers/infiniband/hw/Makefile
+@@ -11,7 +11,7 @@ obj-$(CONFIG_INFINIBAND_OCRDMA)		+= ocrdma/
+ obj-$(CONFIG_INFINIBAND_VMWARE_PVRDMA)	+= vmw_pvrdma/
+ obj-$(CONFIG_INFINIBAND_USNIC)		+= usnic/
+ obj-$(CONFIG_INFINIBAND_HFI1)		+= hfi1/
+-obj-$(CONFIG_INFINIBAND_HNS)		+= hns/
++obj-$(CONFIG_INFINIBAND_HNS_HIP08)	+= hns/
+ obj-$(CONFIG_INFINIBAND_QEDR)		+= qedr/
+ obj-$(CONFIG_INFINIBAND_BNXT_RE)	+= bnxt_re/
+ obj-$(CONFIG_INFINIBAND_ERDMA)		+= erdma/
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+index e3d26bd6de05a3..1ff2e176b0369c 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -4467,9 +4467,10 @@ int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
+ 	case BNXT_RE_MMAP_TOGGLE_PAGE:
+ 		/* Driver doesn't expect write access for user space */
+ 		if (vma->vm_flags & VM_WRITE)
+-			return -EFAULT;
+-		ret = vm_insert_page(vma, vma->vm_start,
+-				     virt_to_page((void *)bnxt_entry->mem_offset));
++			ret = -EFAULT;
++		else
++			ret = vm_insert_page(vma, vma->vm_start,
++					     virt_to_page((void *)bnxt_entry->mem_offset));
+ 		break;
+ 	default:
+ 		ret = -EINVAL;
+diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
+index 80970a1738f8a6..034b85c4225555 100644
+--- a/drivers/infiniband/hw/cxgb4/device.c
++++ b/drivers/infiniband/hw/cxgb4/device.c
+@@ -1114,8 +1114,10 @@ static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
+ 	 * The math here assumes sizeof cpl_pass_accept_req >= sizeof
+ 	 * cpl_rx_pkt.
+ 	 */
+-	skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) +
+-			sizeof(struct rss_header) - pktshift, GFP_ATOMIC);
++	skb = alloc_skb(size_add(gl->tot_len,
++				 sizeof(struct cpl_pass_accept_req) +
++				 sizeof(struct rss_header)) - pktshift,
++			GFP_ATOMIC);
+ 	if (unlikely(!skb))
+ 		return NULL;
+ 
+diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
+index 7b5c4522b426a6..955f061a55e9ae 100644
+--- a/drivers/infiniband/hw/cxgb4/qp.c
++++ b/drivers/infiniband/hw/cxgb4/qp.c
+@@ -1599,6 +1599,7 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
+ 	int count;
+ 	int rq_flushed = 0, sq_flushed;
+ 	unsigned long flag;
++	struct ib_event ev;
+ 
+ 	pr_debug("qhp %p rchp %p schp %p\n", qhp, rchp, schp);
+ 
+@@ -1607,6 +1608,13 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
+ 	if (schp != rchp)
+ 		spin_lock(&schp->lock);
+ 	spin_lock(&qhp->lock);
++	if (qhp->srq && qhp->attr.state == C4IW_QP_STATE_ERROR &&
++	    qhp->ibqp.event_handler) {
++		ev.device = qhp->ibqp.device;
++		ev.element.qp = &qhp->ibqp;
++		ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
++		qhp->ibqp.event_handler(&ev, qhp->ibqp.qp_context);
++	}
+ 
+ 	if (qhp->wq.flushed) {
+ 		spin_unlock(&qhp->lock);
+diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig
+index ab3fbba70789ca..44cdb706fe276d 100644
+--- a/drivers/infiniband/hw/hns/Kconfig
++++ b/drivers/infiniband/hw/hns/Kconfig
+@@ -1,21 +1,11 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+-config INFINIBAND_HNS
+-	tristate "HNS RoCE Driver"
+-	depends on NET_VENDOR_HISILICON
+-	depends on ARM64 || (COMPILE_TEST && 64BIT)
+-	depends on (HNS_DSAF && HNS_ENET) || HNS3
+-	help
+-	  This is a RoCE/RDMA driver for the Hisilicon RoCE engine.
+-
+-	  To compile HIP08 driver as module, choose M here.
+-
+ config INFINIBAND_HNS_HIP08
+-	bool "Hisilicon Hip08 Family RoCE support"
+-	depends on INFINIBAND_HNS && PCI && HNS3
+-	depends on INFINIBAND_HNS=m || HNS3=y
++	tristate "Hisilicon Hip08 Family RoCE support"
++	depends on ARM64 || (COMPILE_TEST && 64BIT)
++	depends on PCI && HNS3
+ 	help
+ 	  RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC.
+ 	  The RoCE engine is a PCI device.
+ 
+-	  To compile this driver, choose Y here: if INFINIBAND_HNS is m, this
+-	  module will be called hns-roce-hw-v2.
++	  To compile this driver, choose M here. This module will be called
++	  hns-roce-hw-v2.
+diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
+index be1e1cdbcfa8a8..7917af8e6380e8 100644
+--- a/drivers/infiniband/hw/hns/Makefile
++++ b/drivers/infiniband/hw/hns/Makefile
+@@ -5,12 +5,9 @@
+ 
+ ccflags-y :=  -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
+ 
+-hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
++hns-roce-hw-v2-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
+ 	hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
+ 	hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o \
+-	hns_roce_debugfs.o
++	hns_roce_debugfs.o hns_roce_hw_v2.o
+ 
+-ifdef CONFIG_INFINIBAND_HNS_HIP08
+-hns-roce-hw-v2-objs := hns_roce_hw_v2.o $(hns-roce-objs)
+-obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o
+-endif
++obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
+diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
+index 529db874d67c69..b1bbdcff631d56 100644
+--- a/drivers/infiniband/hw/mlx4/main.c
++++ b/drivers/infiniband/hw/mlx4/main.c
+@@ -351,7 +351,7 @@ static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
+ 	struct mlx4_port_gid_table   *port_gid_table;
+ 	int ret = 0;
+ 	int hw_update = 0;
+-	struct gid_entry *gids;
++	struct gid_entry *gids = NULL;
+ 
+ 	if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
+ 		return -EINVAL;
+@@ -389,10 +389,10 @@ static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
+ 	}
+ 	spin_unlock_bh(&iboe->lock);
+ 
+-	if (!ret && hw_update) {
++	if (gids)
+ 		ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
+-		kfree(gids);
+-	}
++
++	kfree(gids);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
+index 4b37446758fd4e..64b441542cd5dd 100644
+--- a/drivers/infiniband/hw/mlx5/odp.c
++++ b/drivers/infiniband/hw/mlx5/odp.c
+@@ -228,13 +228,27 @@ static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
+ 	unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
+ 	struct mlx5_ib_mr *imr = mr->parent;
+ 
++	/*
++	 * If userspace is racing freeing the parent implicit ODP MR then we can
++	 * loose the race with parent destruction. In this case
++	 * mlx5_ib_free_odp_mr() will free everything in the implicit_children
++	 * xarray so NOP is fine. This child MR cannot be destroyed here because
++	 * we are under its umem_mutex.
++	 */
+ 	if (!refcount_inc_not_zero(&imr->mmkey.usecount))
+ 		return;
+ 
+-	xa_erase(&imr->implicit_children, idx);
++	xa_lock(&imr->implicit_children);
++	if (__xa_cmpxchg(&imr->implicit_children, idx, mr, NULL, GFP_KERNEL) !=
++	    mr) {
++		xa_unlock(&imr->implicit_children);
++		return;
++	}
++
+ 	if (MLX5_CAP_ODP(mr_to_mdev(mr)->mdev, mem_page_fault))
+-		xa_erase(&mr_to_mdev(mr)->odp_mkeys,
+-			 mlx5_base_mkey(mr->mmkey.key));
++		__xa_erase(&mr_to_mdev(mr)->odp_mkeys,
++			   mlx5_base_mkey(mr->mmkey.key));
++	xa_unlock(&imr->implicit_children);
+ 
+ 	/* Freeing a MR is a sleeping operation, so bounce to a work queue */
+ 	INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work);
+@@ -500,18 +514,18 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
+ 		refcount_inc(&ret->mmkey.usecount);
+ 		goto out_lock;
+ 	}
+-	xa_unlock(&imr->implicit_children);
+ 
+ 	if (MLX5_CAP_ODP(dev->mdev, mem_page_fault)) {
+-		ret = xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
+-			       &mr->mmkey, GFP_KERNEL);
++		ret = __xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
++				 &mr->mmkey, GFP_KERNEL);
+ 		if (xa_is_err(ret)) {
+ 			ret = ERR_PTR(xa_err(ret));
+-			xa_erase(&imr->implicit_children, idx);
+-			goto out_mr;
++			__xa_erase(&imr->implicit_children, idx);
++			goto out_lock;
+ 		}
+ 		mr->mmkey.type = MLX5_MKEY_IMPLICIT_CHILD;
+ 	}
++	xa_unlock(&imr->implicit_children);
+ 	mlx5_ib_dbg(mr_to_mdev(imr), "key %x mr %p\n", mr->mmkey.key, mr);
+ 	return mr;
+ 
+@@ -944,8 +958,7 @@ static struct mlx5_ib_mkey *find_odp_mkey(struct mlx5_ib_dev *dev, u32 key)
+ /*
+  * Handle a single data segment in a page-fault WQE or RDMA region.
+  *
+- * Returns number of OS pages retrieved on success. The caller may continue to
+- * the next data segment.
++ * Returns zero on success. The caller may continue to the next data segment.
+  * Can return the following error codes:
+  * -EAGAIN to designate a temporary error. The caller will abort handling the
+  *  page fault and resolve it.
+@@ -958,7 +971,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
+ 					 u32 *bytes_committed,
+ 					 u32 *bytes_mapped)
+ {
+-	int npages = 0, ret, i, outlen, cur_outlen = 0, depth = 0;
++	int ret, i, outlen, cur_outlen = 0, depth = 0, pages_in_range;
+ 	struct pf_frame *head = NULL, *frame;
+ 	struct mlx5_ib_mkey *mmkey;
+ 	struct mlx5_ib_mr *mr;
+@@ -993,13 +1006,20 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
+ 	case MLX5_MKEY_MR:
+ 		mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
+ 
++		pages_in_range = (ALIGN(io_virt + bcnt, PAGE_SIZE) -
++				  (io_virt & PAGE_MASK)) >>
++				 PAGE_SHIFT;
+ 		ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0, false);
+ 		if (ret < 0)
+ 			goto end;
+ 
+ 		mlx5_update_odp_stats(mr, faults, ret);
+ 
+-		npages += ret;
++		if (ret < pages_in_range) {
++			ret = -EFAULT;
++			goto end;
++		}
++
+ 		ret = 0;
+ 		break;
+ 
+@@ -1090,7 +1110,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
+ 	kfree(out);
+ 
+ 	*bytes_committed = 0;
+-	return ret ? ret : npages;
++	return ret;
+ }
+ 
+ /*
+@@ -1109,8 +1129,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
+  *                   the committed bytes).
+  * @receive_queue: receive WQE end of sg list
+  *
+- * Returns the number of pages loaded if positive, zero for an empty WQE, or a
+- * negative error code.
++ * Returns zero for success or a negative error code.
+  */
+ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
+ 				   struct mlx5_pagefault *pfault,
+@@ -1118,7 +1137,7 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
+ 				   void *wqe_end, u32 *bytes_mapped,
+ 				   u32 *total_wqe_bytes, bool receive_queue)
+ {
+-	int ret = 0, npages = 0;
++	int ret = 0;
+ 	u64 io_virt;
+ 	__be32 key;
+ 	u32 byte_count;
+@@ -1175,10 +1194,9 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
+ 						    bytes_mapped);
+ 		if (ret < 0)
+ 			break;
+-		npages += ret;
+ 	}
+ 
+-	return ret < 0 ? ret : npages;
++	return ret;
+ }
+ 
+ /*
+@@ -1414,12 +1432,6 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
+ 	free_page((unsigned long)wqe_start);
+ }
+ 
+-static int pages_in_range(u64 address, u32 length)
+-{
+-	return (ALIGN(address + length, PAGE_SIZE) -
+-		(address & PAGE_MASK)) >> PAGE_SHIFT;
+-}
+-
+ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
+ 					   struct mlx5_pagefault *pfault)
+ {
+@@ -1458,7 +1470,7 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
+ 	if (ret == -EAGAIN) {
+ 		/* We're racing with an invalidation, don't prefetch */
+ 		prefetch_activated = 0;
+-	} else if (ret < 0 || pages_in_range(address, length) > ret) {
++	} else if (ret < 0) {
+ 		mlx5_ib_page_fault_resume(dev, pfault, 1);
+ 		if (ret != -ENOENT)
+ 			mlx5_ib_dbg(dev, "PAGE FAULT error %d. QP 0x%llx, type: 0x%x\n",
+diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
+index d2f57ead78ad12..003f681e5dc022 100644
+--- a/drivers/infiniband/sw/rxe/rxe_param.h
++++ b/drivers/infiniband/sw/rxe/rxe_param.h
+@@ -129,7 +129,7 @@ enum rxe_device_param {
+ enum rxe_port_param {
+ 	RXE_PORT_GID_TBL_LEN		= 1024,
+ 	RXE_PORT_PORT_CAP_FLAGS		= IB_PORT_CM_SUP,
+-	RXE_PORT_MAX_MSG_SZ		= 0x800000,
++	RXE_PORT_MAX_MSG_SZ		= (1UL << 31),
+ 	RXE_PORT_BAD_PKEY_CNTR		= 0,
+ 	RXE_PORT_QKEY_VIOL_CNTR		= 0,
+ 	RXE_PORT_LID			= 0,
+diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
+index 67567d62195e86..d9cb682fd71f88 100644
+--- a/drivers/infiniband/sw/rxe/rxe_pool.c
++++ b/drivers/infiniband/sw/rxe/rxe_pool.c
+@@ -178,7 +178,6 @@ int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable)
+ {
+ 	struct rxe_pool *pool = elem->pool;
+ 	struct xarray *xa = &pool->xa;
+-	static int timeout = RXE_POOL_TIMEOUT;
+ 	int ret, err = 0;
+ 	void *xa_ret;
+ 
+@@ -202,19 +201,19 @@ int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable)
+ 	 * return to rdma-core
+ 	 */
+ 	if (sleepable) {
+-		if (!completion_done(&elem->complete) && timeout) {
++		if (!completion_done(&elem->complete)) {
+ 			ret = wait_for_completion_timeout(&elem->complete,
+-					timeout);
++					msecs_to_jiffies(50000));
+ 
+ 			/* Shouldn't happen. There are still references to
+ 			 * the object but, rather than deadlock, free the
+ 			 * object or pass back to rdma-core.
+ 			 */
+ 			if (WARN_ON(!ret))
+-				err = -EINVAL;
++				err = -ETIMEDOUT;
+ 		}
+ 	} else {
+-		unsigned long until = jiffies + timeout;
++		unsigned long until = jiffies + RXE_POOL_TIMEOUT;
+ 
+ 		/* AH objects are unique in that the destroy_ah verb
+ 		 * can be called in atomic context. This delay
+@@ -226,7 +225,7 @@ int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable)
+ 			mdelay(1);
+ 
+ 		if (WARN_ON(!completion_done(&elem->complete)))
+-			err = -EINVAL;
++			err = -ETIMEDOUT;
+ 	}
+ 
+ 	if (pool->cleanup)
+diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
+index 8a5fc20fd18692..589ac0d8489dbd 100644
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
+@@ -696,7 +696,7 @@ static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
+ 		for (i = 0; i < ibwr->num_sge; i++)
+ 			length += ibwr->sg_list[i].length;
+ 
+-		if (length > (1UL << 31)) {
++		if (length > RXE_PORT_MAX_MSG_SZ) {
+ 			rxe_err_qp(qp, "message length too long\n");
+ 			break;
+ 		}
+@@ -980,8 +980,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
+ 	for (i = 0; i < num_sge; i++)
+ 		length += ibwr->sg_list[i].length;
+ 
+-	/* IBA max message size is 2^31 */
+-	if (length >= (1UL<<31)) {
++	if (length > RXE_PORT_MAX_MSG_SZ) {
+ 		err = -EINVAL;
+ 		rxe_dbg("message length too long\n");
+ 		goto err_out;
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
+index 4e17d546d4ccf3..bf38ac6f87c47a 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs.c
+@@ -584,6 +584,9 @@ static void dev_free(struct kref *ref)
+ 	list_del(&dev->entry);
+ 	mutex_unlock(&pool->mutex);
+ 
++	if (pool->ops && pool->ops->deinit)
++		pool->ops->deinit(dev);
++
+ 	ib_dealloc_pd(dev->ib_pd);
+ 	kfree(dev);
+ }
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index 2916e77f589b81..7289ae0b83aced 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -3978,7 +3978,6 @@ static struct srp_host *srp_add_port(struct srp_device *device, u32 port)
+ 	return host;
+ 
+ put_host:
+-	device_del(&host->dev);
+ 	put_device(&host->dev);
+ 	return NULL;
+ }
+diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
+index 1bef5d55b2f9dd..6eb0af2e033907 100644
+--- a/drivers/iommu/amd/amd_iommu.h
++++ b/drivers/iommu/amd/amd_iommu.h
+@@ -41,13 +41,13 @@ void amd_iommu_disable(void);
+ int amd_iommu_reenable(int mode);
+ int amd_iommu_enable_faulting(unsigned int cpu);
+ extern int amd_iommu_guest_ir;
+-extern enum io_pgtable_fmt amd_iommu_pgtable;
++extern enum protection_domain_mode amd_iommu_pgtable;
+ extern int amd_iommu_gpt_level;
+ extern unsigned long amd_iommu_pgsize_bitmap;
+ 
+ /* Protection domain ops */
+ void amd_iommu_init_identity_domain(void);
+-struct protection_domain *protection_domain_alloc(unsigned int type, int nid);
++struct protection_domain *protection_domain_alloc(int nid);
+ void protection_domain_free(struct protection_domain *domain);
+ struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
+ 						struct mm_struct *mm);
+@@ -89,7 +89,6 @@ int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag);
+  */
+ void amd_iommu_flush_all_caches(struct amd_iommu *iommu);
+ void amd_iommu_update_and_flush_device_table(struct protection_domain *domain);
+-void amd_iommu_domain_update(struct protection_domain *domain);
+ void amd_iommu_domain_flush_pages(struct protection_domain *domain,
+ 				  u64 address, size_t size);
+ void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data,
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index 0e0a531042acb3..db4b52aae1fcf1 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -152,7 +152,7 @@ struct ivmd_header {
+ bool amd_iommu_dump;
+ bool amd_iommu_irq_remap __read_mostly;
+ 
+-enum io_pgtable_fmt amd_iommu_pgtable = AMD_IOMMU_V1;
++enum protection_domain_mode amd_iommu_pgtable = PD_MODE_V1;
+ /* Guest page table level */
+ int amd_iommu_gpt_level = PAGE_MODE_4_LEVEL;
+ 
+@@ -2145,7 +2145,7 @@ static void print_iommu_info(void)
+ 		if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
+ 			pr_info("X2APIC enabled\n");
+ 	}
+-	if (amd_iommu_pgtable == AMD_IOMMU_V2) {
++	if (amd_iommu_pgtable == PD_MODE_V2) {
+ 		pr_info("V2 page table enabled (Paging mode : %d level)\n",
+ 			amd_iommu_gpt_level);
+ 	}
+@@ -3059,10 +3059,10 @@ static int __init early_amd_iommu_init(void)
+ 	    FIELD_GET(FEATURE_GATS, amd_iommu_efr) == GUEST_PGTABLE_5_LEVEL)
+ 		amd_iommu_gpt_level = PAGE_MODE_5_LEVEL;
+ 
+-	if (amd_iommu_pgtable == AMD_IOMMU_V2) {
++	if (amd_iommu_pgtable == PD_MODE_V2) {
+ 		if (!amd_iommu_v2_pgtbl_supported()) {
+ 			pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n");
+-			amd_iommu_pgtable = AMD_IOMMU_V1;
++			amd_iommu_pgtable = PD_MODE_V1;
+ 		}
+ 	}
+ 
+@@ -3185,7 +3185,7 @@ static void iommu_snp_enable(void)
+ 		goto disable_snp;
+ 	}
+ 
+-	if (amd_iommu_pgtable != AMD_IOMMU_V1) {
++	if (amd_iommu_pgtable != PD_MODE_V1) {
+ 		pr_warn("SNP: IOMMU is configured with V2 page table mode, SNP cannot be supported.\n");
+ 		goto disable_snp;
+ 	}
+@@ -3464,9 +3464,9 @@ static int __init parse_amd_iommu_options(char *str)
+ 		} else if (strncmp(str, "force_isolation", 15) == 0) {
+ 			amd_iommu_force_isolation = true;
+ 		} else if (strncmp(str, "pgtbl_v1", 8) == 0) {
+-			amd_iommu_pgtable = AMD_IOMMU_V1;
++			amd_iommu_pgtable = PD_MODE_V1;
+ 		} else if (strncmp(str, "pgtbl_v2", 8) == 0) {
+-			amd_iommu_pgtable = AMD_IOMMU_V2;
++			amd_iommu_pgtable = PD_MODE_V2;
+ 		} else if (strncmp(str, "irtcachedis", 11) == 0) {
+ 			amd_iommu_irtcachedis = true;
+ 		} else if (strncmp(str, "nohugepages", 11) == 0) {
+diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
+index 16f40b8000d798..80b2c9eb438f2f 100644
+--- a/drivers/iommu/amd/iommu.c
++++ b/drivers/iommu/amd/iommu.c
+@@ -1603,15 +1603,6 @@ void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
+ 	domain_flush_complete(domain);
+ }
+ 
+-void amd_iommu_domain_update(struct protection_domain *domain)
+-{
+-	/* Update device table */
+-	amd_iommu_update_and_flush_device_table(domain);
+-
+-	/* Flush domain TLB(s) and wait for completion */
+-	amd_iommu_domain_flush_all(domain);
+-}
+-
+ int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag)
+ {
+ 	struct iommu_dev_data *dev_data;
+@@ -2285,7 +2276,7 @@ static void protection_domain_init(struct protection_domain *domain, int nid)
+ 	domain->iop.pgtbl.cfg.amd.nid = nid;
+ }
+ 
+-struct protection_domain *protection_domain_alloc(unsigned int type, int nid)
++struct protection_domain *protection_domain_alloc(int nid)
+ {
+ 	struct protection_domain *domain;
+ 	int domid;
+@@ -2306,37 +2297,30 @@ struct protection_domain *protection_domain_alloc(unsigned int type, int nid)
+ 	return domain;
+ }
+ 
+-static int pdom_setup_pgtable(struct protection_domain *domain,
+-			      unsigned int type, int pgtable)
++static int pdom_setup_pgtable(struct protection_domain *domain)
+ {
+ 	struct io_pgtable_ops *pgtbl_ops;
++	enum io_pgtable_fmt fmt;
+ 
+-	/* No need to allocate io pgtable ops in passthrough mode */
+-	if (!(type & __IOMMU_DOMAIN_PAGING))
+-		return 0;
+-
+-	switch (pgtable) {
+-	case AMD_IOMMU_V1:
+-		domain->pd_mode = PD_MODE_V1;
++	switch (domain->pd_mode) {
++	case PD_MODE_V1:
++		fmt = AMD_IOMMU_V1;
+ 		break;
+-	case AMD_IOMMU_V2:
+-		domain->pd_mode = PD_MODE_V2;
++	case PD_MODE_V2:
++		fmt = AMD_IOMMU_V2;
+ 		break;
+-	default:
+-		return -EINVAL;
+ 	}
+ 
+-	pgtbl_ops =
+-		alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl.cfg, domain);
++	pgtbl_ops = alloc_io_pgtable_ops(fmt, &domain->iop.pgtbl.cfg, domain);
+ 	if (!pgtbl_ops)
+ 		return -ENOMEM;
+ 
+ 	return 0;
+ }
+ 
+-static inline u64 dma_max_address(int pgtable)
++static inline u64 dma_max_address(enum protection_domain_mode pgtable)
+ {
+-	if (pgtable == AMD_IOMMU_V1)
++	if (pgtable == PD_MODE_V1)
+ 		return ~0ULL;
+ 
+ 	/* V2 with 4/5 level page table */
+@@ -2348,31 +2332,21 @@ static bool amd_iommu_hd_support(struct amd_iommu *iommu)
+ 	return iommu && (iommu->features & FEATURE_HDSUP);
+ }
+ 
+-static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
+-						  struct device *dev,
+-						  u32 flags, int pgtable)
++static struct iommu_domain *
++do_iommu_domain_alloc(struct device *dev, u32 flags,
++		      enum protection_domain_mode pgtable)
+ {
+ 	bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
++	struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
+ 	struct protection_domain *domain;
+-	struct amd_iommu *iommu = NULL;
+ 	int ret;
+ 
+-	if (dev)
+-		iommu = get_amd_iommu_from_dev(dev);
+-
+-	/*
+-	 * Since DTE[Mode]=0 is prohibited on SNP-enabled system,
+-	 * default to use IOMMU_DOMAIN_DMA[_FQ].
+-	 */
+-	if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY))
+-		return ERR_PTR(-EINVAL);
+-
+-	domain = protection_domain_alloc(type,
+-					 dev ? dev_to_node(dev) : NUMA_NO_NODE);
++	domain = protection_domain_alloc(dev_to_node(dev));
+ 	if (!domain)
+ 		return ERR_PTR(-ENOMEM);
+ 
+-	ret = pdom_setup_pgtable(domain, type, pgtable);
++	domain->pd_mode = pgtable;
++	ret = pdom_setup_pgtable(domain);
+ 	if (ret) {
+ 		pdom_id_free(domain->id);
+ 		kfree(domain);
+@@ -2384,72 +2358,45 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
+ 	domain->domain.geometry.force_aperture = true;
+ 	domain->domain.pgsize_bitmap = domain->iop.pgtbl.cfg.pgsize_bitmap;
+ 
+-	if (iommu) {
+-		domain->domain.type = type;
+-		domain->domain.ops = iommu->iommu.ops->default_domain_ops;
++	domain->domain.type = IOMMU_DOMAIN_UNMANAGED;
++	domain->domain.ops = iommu->iommu.ops->default_domain_ops;
+ 
+-		if (dirty_tracking)
+-			domain->domain.dirty_ops = &amd_dirty_ops;
+-	}
++	if (dirty_tracking)
++		domain->domain.dirty_ops = &amd_dirty_ops;
+ 
+ 	return &domain->domain;
+ }
+ 
+-static struct iommu_domain *amd_iommu_domain_alloc(unsigned int type)
+-{
+-	struct iommu_domain *domain;
+-	int pgtable = amd_iommu_pgtable;
+-
+-	/*
+-	 * Force IOMMU v1 page table when allocating
+-	 * domain for pass-through devices.
+-	 */
+-	if (type == IOMMU_DOMAIN_UNMANAGED)
+-		pgtable = AMD_IOMMU_V1;
+-
+-	domain = do_iommu_domain_alloc(type, NULL, 0, pgtable);
+-	if (IS_ERR(domain))
+-		return NULL;
+-
+-	return domain;
+-}
+-
+ static struct iommu_domain *
+ amd_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
+ 				    const struct iommu_user_data *user_data)
+ 
+ {
+-	unsigned int type = IOMMU_DOMAIN_UNMANAGED;
+-	struct amd_iommu *iommu = NULL;
++	struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
+ 	const u32 supported_flags = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
+ 						IOMMU_HWPT_ALLOC_PASID;
+ 
+-	if (dev)
+-		iommu = get_amd_iommu_from_dev(dev);
+-
+ 	if ((flags & ~supported_flags) || user_data)
+ 		return ERR_PTR(-EOPNOTSUPP);
+ 
+-	/* Allocate domain with v2 page table if IOMMU supports PASID. */
+-	if (flags & IOMMU_HWPT_ALLOC_PASID) {
++	switch (flags & supported_flags) {
++	case IOMMU_HWPT_ALLOC_DIRTY_TRACKING:
++		/* Allocate domain with v1 page table for dirty tracking */
++		if (!amd_iommu_hd_support(iommu))
++			break;
++		return do_iommu_domain_alloc(dev, flags, PD_MODE_V1);
++	case IOMMU_HWPT_ALLOC_PASID:
++		/* Allocate domain with v2 page table if IOMMU supports PASID. */
+ 		if (!amd_iommu_pasid_supported())
+-			return ERR_PTR(-EOPNOTSUPP);
+-
+-		return do_iommu_domain_alloc(type, dev, flags, AMD_IOMMU_V2);
+-	}
+-
+-	/* Allocate domain with v1 page table for dirty tracking */
+-	if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) {
+-		if (iommu && amd_iommu_hd_support(iommu)) {
+-			return do_iommu_domain_alloc(type, dev,
+-						     flags, AMD_IOMMU_V1);
+-		}
+-
+-		return ERR_PTR(-EOPNOTSUPP);
++			break;
++		return do_iommu_domain_alloc(dev, flags, PD_MODE_V2);
++	case 0:
++		/* If nothing specific is required use the kernel commandline default */
++		return do_iommu_domain_alloc(dev, 0, amd_iommu_pgtable);
++	default:
++		break;
+ 	}
+-
+-	/* If nothing specific is required use the kernel commandline default */
+-	return do_iommu_domain_alloc(type, dev, 0, amd_iommu_pgtable);
++	return ERR_PTR(-EOPNOTSUPP);
+ }
+ 
+ void amd_iommu_domain_free(struct iommu_domain *dom)
+@@ -2890,7 +2837,6 @@ const struct iommu_ops amd_iommu_ops = {
+ 	.blocked_domain = &blocked_domain,
+ 	.release_domain = &release_domain,
+ 	.identity_domain = &identity_domain.domain,
+-	.domain_alloc = amd_iommu_domain_alloc,
+ 	.domain_alloc_paging_flags = amd_iommu_domain_alloc_paging_flags,
+ 	.domain_alloc_sva = amd_iommu_domain_alloc_sva,
+ 	.probe_device = amd_iommu_probe_device,
+diff --git a/drivers/iommu/amd/pasid.c b/drivers/iommu/amd/pasid.c
+index 8c73a30c2800e7..9101d07b11d3f7 100644
+--- a/drivers/iommu/amd/pasid.c
++++ b/drivers/iommu/amd/pasid.c
+@@ -185,12 +185,13 @@ struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
+ 	struct protection_domain *pdom;
+ 	int ret;
+ 
+-	pdom = protection_domain_alloc(IOMMU_DOMAIN_SVA, dev_to_node(dev));
++	pdom = protection_domain_alloc(dev_to_node(dev));
+ 	if (!pdom)
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	pdom->domain.ops = &amd_sva_domain_ops;
+ 	pdom->mn.ops = &sva_mn;
++	pdom->domain.type = IOMMU_DOMAIN_SVA;
+ 
+ 	ret = mmu_notifier_register(&pdom->mn, mm);
+ 	if (ret) {
+diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+index a5c7002ff75bb0..6d15405f0ea3e9 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+@@ -2745,9 +2745,14 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
+ 		 * Translation Requests and Translated transactions are denied
+ 		 * as though ATS is disabled for the stream (STE.EATS == 0b00),
+ 		 * causing F_BAD_ATS_TREQ and F_TRANSL_FORBIDDEN events
+-		 * (IHI0070Ea 5.2 Stream Table Entry). Thus ATS can only be
+-		 * enabled if we have arm_smmu_domain, those always have page
+-		 * tables.
++		 * (IHI0070Ea 5.2 Stream Table Entry).
++		 *
++		 * However, if we have installed a CD table and are using S1DSS
++		 * then ATS will work in S1DSS bypass. See "13.6.4 Full ATS
++		 * skipping stage 1".
++		 *
++		 * Disable ATS if we are going to create a normal 0b100 bypass
++		 * STE.
+ 		 */
+ 		state->ats_enabled = !state->disable_ats &&
+ 				     arm_smmu_ats_supported(master);
+@@ -3070,8 +3075,10 @@ static void arm_smmu_attach_dev_ste(struct iommu_domain *domain,
+ 	if (arm_smmu_ssids_in_use(&master->cd_table)) {
+ 		/*
+ 		 * If a CD table has to be present then we need to run with ATS
+-		 * on even though the RID will fail ATS queries with UR. This is
+-		 * because we have no idea what the PASID's need.
++		 * on because we have to assume a PASID is using ATS. For
++		 * IDENTITY this will setup things so that S1DSS=bypass which
++		 * follows the explanation in "13.6.4 Full ATS skipping stage 1"
++		 * and allows for ATS on the RID to work.
+ 		 */
+ 		state.cd_needs_ats = true;
+ 		arm_smmu_attach_prepare(&state, domain);
+diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
+index 5b7d85f1e143c0..fb59a7d35958f5 100644
+--- a/drivers/iommu/intel/pasid.c
++++ b/drivers/iommu/intel/pasid.c
+@@ -244,11 +244,31 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
+ 
+ 	spin_lock(&iommu->lock);
+ 	pte = intel_pasid_get_entry(dev, pasid);
+-	if (WARN_ON(!pte) || !pasid_pte_is_present(pte)) {
++	if (WARN_ON(!pte)) {
+ 		spin_unlock(&iommu->lock);
+ 		return;
+ 	}
+ 
++	if (!pasid_pte_is_present(pte)) {
++		if (!pasid_pte_is_fault_disabled(pte)) {
++			WARN_ON(READ_ONCE(pte->val[0]) != 0);
++			spin_unlock(&iommu->lock);
++			return;
++		}
++
++		/*
++		 * When a PASID is used for SVA by a device, it's possible
++		 * that the pasid entry is non-present with the Fault
++		 * Processing Disabled bit set. Clear the pasid entry and
++		 * drain the PRQ for the PASID before return.
++		 */
++		pasid_clear_entry(pte);
++		spin_unlock(&iommu->lock);
++		intel_iommu_drain_pasid_prq(dev, pasid);
++
++		return;
++	}
++
+ 	did = pasid_get_domain_id(pte);
+ 	pgtt = pasid_pte_get_pgtt(pte);
+ 	intel_pasid_clear_entry(dev, pasid, fault_ignore);
+diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
+index 082f4fe20216a7..668d8ece6b143c 100644
+--- a/drivers/iommu/intel/pasid.h
++++ b/drivers/iommu/intel/pasid.h
+@@ -73,6 +73,12 @@ static inline bool pasid_pte_is_present(struct pasid_entry *pte)
+ 	return READ_ONCE(pte->val[0]) & PASID_PTE_PRESENT;
+ }
+ 
++/* Get FPD(Fault Processing Disable) bit of a PASID table entry */
++static inline bool pasid_pte_is_fault_disabled(struct pasid_entry *pte)
++{
++	return READ_ONCE(pte->val[0]) & PASID_PTE_FPD;
++}
++
+ /* Get PGTT field of a PASID table entry */
+ static inline u16 pasid_pte_get_pgtt(struct pasid_entry *pte)
+ {
+diff --git a/drivers/iommu/iommufd/iova_bitmap.c b/drivers/iommu/iommufd/iova_bitmap.c
+index ab665cf38ef4a9..39a86a4a1d3af4 100644
+--- a/drivers/iommu/iommufd/iova_bitmap.c
++++ b/drivers/iommu/iommufd/iova_bitmap.c
+@@ -130,7 +130,7 @@ struct iova_bitmap {
+ static unsigned long iova_bitmap_offset_to_index(struct iova_bitmap *bitmap,
+ 						 unsigned long iova)
+ {
+-	unsigned long pgsize = 1 << bitmap->mapped.pgshift;
++	unsigned long pgsize = 1UL << bitmap->mapped.pgshift;
+ 
+ 	return iova / (BITS_PER_TYPE(*bitmap->bitmap) * pgsize);
+ }
+diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
+index 97c5e3567d33e4..d898d05be690fd 100644
+--- a/drivers/iommu/iommufd/main.c
++++ b/drivers/iommu/iommufd/main.c
+@@ -104,7 +104,7 @@ static int iommufd_object_dec_wait_shortterm(struct iommufd_ctx *ictx,
+ 	if (wait_event_timeout(ictx->destroy_wait,
+ 				refcount_read(&to_destroy->shortterm_users) ==
+ 					0,
+-				msecs_to_jiffies(10000)))
++				msecs_to_jiffies(60000)))
+ 		return 0;
+ 
+ 	pr_crit("Time out waiting for iommufd object to become free\n");
+diff --git a/drivers/iommu/riscv/iommu.c b/drivers/iommu/riscv/iommu.c
+index 8a05def774bdb5..38d381164385a1 100644
+--- a/drivers/iommu/riscv/iommu.c
++++ b/drivers/iommu/riscv/iommu.c
+@@ -1270,7 +1270,7 @@ static phys_addr_t riscv_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
+ 					    dma_addr_t iova)
+ {
+ 	struct riscv_iommu_domain *domain = iommu_domain_to_riscv(iommu_domain);
+-	unsigned long pte_size;
++	size_t pte_size;
+ 	unsigned long *ptr;
+ 
+ 	ptr = riscv_iommu_pte_fetch(domain, iova, &pte_size);
+diff --git a/drivers/leds/leds-cht-wcove.c b/drivers/leds/leds-cht-wcove.c
+index 8246f048edcb4b..9a609dd5acdc88 100644
+--- a/drivers/leds/leds-cht-wcove.c
++++ b/drivers/leds/leds-cht-wcove.c
+@@ -394,7 +394,7 @@ static int cht_wc_leds_probe(struct platform_device *pdev)
+ 		led->cdev.pattern_clear = cht_wc_leds_pattern_clear;
+ 		led->cdev.max_brightness = 255;
+ 
+-		ret = led_classdev_register(&pdev->dev, &led->cdev);
++		ret = devm_led_classdev_register(&pdev->dev, &led->cdev);
+ 		if (ret < 0)
+ 			return ret;
+ 	}
+@@ -406,10 +406,6 @@ static int cht_wc_leds_probe(struct platform_device *pdev)
+ static void cht_wc_leds_remove(struct platform_device *pdev)
+ {
+ 	struct cht_wc_leds *leds = platform_get_drvdata(pdev);
+-	int i;
+-
+-	for (i = 0; i < CHT_WC_LED_COUNT; i++)
+-		led_classdev_unregister(&leds->leds[i].cdev);
+ 
+ 	/* Restore LED1 regs if hw-control was active else leave LED1 off */
+ 	if (!(leds->led1_initial_regs.ctrl & CHT_WC_LED1_SWCTL))
+diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
+index af5a908b8d9edd..e95287416ef879 100644
+--- a/drivers/leds/leds-netxbig.c
++++ b/drivers/leds/leds-netxbig.c
+@@ -439,6 +439,7 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
+ 	}
+ 	gpio_ext_pdev = of_find_device_by_node(gpio_ext_np);
+ 	if (!gpio_ext_pdev) {
++		of_node_put(gpio_ext_np);
+ 		dev_err(dev, "Failed to find platform device for gpio-ext\n");
+ 		return -ENODEV;
+ 	}
+diff --git a/drivers/mailbox/mailbox-mpfs.c b/drivers/mailbox/mailbox-mpfs.c
+index 4df546e3b7eaeb..d5d9effece9797 100644
+--- a/drivers/mailbox/mailbox-mpfs.c
++++ b/drivers/mailbox/mailbox-mpfs.c
+@@ -251,7 +251,7 @@ static inline int mpfs_mbox_syscon_probe(struct mpfs_mbox *mbox, struct platform
+ 		return PTR_ERR(mbox->sysreg_scb);
+ 
+ 	mbox->mbox_base = devm_platform_ioremap_resource(pdev, 0);
+-	if (IS_ERR(mbox->ctrl_base))
++	if (IS_ERR(mbox->mbox_base))
+ 		return PTR_ERR(mbox->mbox_base);
+ 
+ 	return 0;
+diff --git a/drivers/mailbox/mailbox-th1520.c b/drivers/mailbox/mailbox-th1520.c
+index 4e84640ac3b876..a6b2aa9ae95206 100644
+--- a/drivers/mailbox/mailbox-th1520.c
++++ b/drivers/mailbox/mailbox-th1520.c
+@@ -41,7 +41,7 @@
+ #ifdef CONFIG_PM_SLEEP
+ /* store MBOX context across system-wide suspend/resume transitions */
+ struct th1520_mbox_context {
+-	u32 intr_mask[TH_1520_MBOX_CHANS - 1];
++	u32 intr_mask[TH_1520_MBOX_CHANS];
+ };
+ #endif
+ 
+@@ -387,8 +387,10 @@ static void __iomem *th1520_map_mmio(struct platform_device *pdev,
+ 
+ 	mapped = devm_ioremap(&pdev->dev, res->start + offset,
+ 			      resource_size(res) - offset);
+-	if (IS_ERR(mapped))
++	if (!mapped) {
+ 		dev_err(&pdev->dev, "Failed to map resource: %s\n", res_name);
++		return ERR_PTR(-ENOMEM);
++	}
+ 
+ 	return mapped;
+ }
+diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
+index c3a42dd66ce551..2e3087556adb37 100644
+--- a/drivers/md/md-bitmap.c
++++ b/drivers/md/md-bitmap.c
+@@ -1671,24 +1671,13 @@ __acquires(bitmap->lock)
+ }
+ 
+ static int bitmap_startwrite(struct mddev *mddev, sector_t offset,
+-			     unsigned long sectors, bool behind)
++			     unsigned long sectors)
+ {
+ 	struct bitmap *bitmap = mddev->bitmap;
+ 
+ 	if (!bitmap)
+ 		return 0;
+ 
+-	if (behind) {
+-		int bw;
+-		atomic_inc(&bitmap->behind_writes);
+-		bw = atomic_read(&bitmap->behind_writes);
+-		if (bw > bitmap->behind_writes_used)
+-			bitmap->behind_writes_used = bw;
+-
+-		pr_debug("inc write-behind count %d/%lu\n",
+-			 bw, bitmap->mddev->bitmap_info.max_write_behind);
+-	}
+-
+ 	while (sectors) {
+ 		sector_t blocks;
+ 		bitmap_counter_t *bmc;
+@@ -1737,21 +1726,13 @@ static int bitmap_startwrite(struct mddev *mddev, sector_t offset,
+ }
+ 
+ static void bitmap_endwrite(struct mddev *mddev, sector_t offset,
+-			    unsigned long sectors, bool success, bool behind)
++			    unsigned long sectors)
+ {
+ 	struct bitmap *bitmap = mddev->bitmap;
+ 
+ 	if (!bitmap)
+ 		return;
+ 
+-	if (behind) {
+-		if (atomic_dec_and_test(&bitmap->behind_writes))
+-			wake_up(&bitmap->behind_wait);
+-		pr_debug("dec write-behind count %d/%lu\n",
+-			 atomic_read(&bitmap->behind_writes),
+-			 bitmap->mddev->bitmap_info.max_write_behind);
+-	}
+-
+ 	while (sectors) {
+ 		sector_t blocks;
+ 		unsigned long flags;
+@@ -1764,15 +1745,16 @@ static void bitmap_endwrite(struct mddev *mddev, sector_t offset,
+ 			return;
+ 		}
+ 
+-		if (success && !bitmap->mddev->degraded &&
+-		    bitmap->events_cleared < bitmap->mddev->events) {
+-			bitmap->events_cleared = bitmap->mddev->events;
+-			bitmap->need_sync = 1;
+-			sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
+-		}
+-
+-		if (!success && !NEEDED(*bmc))
++		if (!bitmap->mddev->degraded) {
++			if (bitmap->events_cleared < bitmap->mddev->events) {
++				bitmap->events_cleared = bitmap->mddev->events;
++				bitmap->need_sync = 1;
++				sysfs_notify_dirent_safe(
++						bitmap->sysfs_can_clear);
++			}
++		} else if (!NEEDED(*bmc)) {
+ 			*bmc |= NEEDED_MASK;
++		}
+ 
+ 		if (COUNTER(*bmc) == COUNTER_MAX)
+ 			wake_up(&bitmap->overflow_wait);
+@@ -2062,6 +2044,37 @@ static void md_bitmap_free(void *data)
+ 	kfree(bitmap);
+ }
+ 
++static void bitmap_start_behind_write(struct mddev *mddev)
++{
++	struct bitmap *bitmap = mddev->bitmap;
++	int bw;
++
++	if (!bitmap)
++		return;
++
++	atomic_inc(&bitmap->behind_writes);
++	bw = atomic_read(&bitmap->behind_writes);
++	if (bw > bitmap->behind_writes_used)
++		bitmap->behind_writes_used = bw;
++
++	pr_debug("inc write-behind count %d/%lu\n",
++		 bw, bitmap->mddev->bitmap_info.max_write_behind);
++}
++
++static void bitmap_end_behind_write(struct mddev *mddev)
++{
++	struct bitmap *bitmap = mddev->bitmap;
++
++	if (!bitmap)
++		return;
++
++	if (atomic_dec_and_test(&bitmap->behind_writes))
++		wake_up(&bitmap->behind_wait);
++	pr_debug("dec write-behind count %d/%lu\n",
++		 atomic_read(&bitmap->behind_writes),
++		 bitmap->mddev->bitmap_info.max_write_behind);
++}
++
+ static void bitmap_wait_behind_writes(struct mddev *mddev)
+ {
+ 	struct bitmap *bitmap = mddev->bitmap;
+@@ -2342,7 +2355,10 @@ static int bitmap_get_stats(void *data, struct md_bitmap_stats *stats)
+ 
+ 	if (!bitmap)
+ 		return -ENOENT;
+-
++	if (bitmap->mddev->bitmap_info.external)
++		return -ENOENT;
++	if (!bitmap->storage.sb_page) /* no superblock */
++		return -EINVAL;
+ 	sb = kmap_local_page(bitmap->storage.sb_page);
+ 	stats->sync_size = le64_to_cpu(sb->sync_size);
+ 	kunmap_local(sb);
+@@ -2981,6 +2997,9 @@ static struct bitmap_operations bitmap_ops = {
+ 	.dirty_bits		= bitmap_dirty_bits,
+ 	.unplug			= bitmap_unplug,
+ 	.daemon_work		= bitmap_daemon_work,
++
++	.start_behind_write	= bitmap_start_behind_write,
++	.end_behind_write	= bitmap_end_behind_write,
+ 	.wait_behind_writes	= bitmap_wait_behind_writes,
+ 
+ 	.startwrite		= bitmap_startwrite,
+diff --git a/drivers/md/md-bitmap.h b/drivers/md/md-bitmap.h
+index 662e6fc141a775..31c93019c76bf3 100644
+--- a/drivers/md/md-bitmap.h
++++ b/drivers/md/md-bitmap.h
+@@ -84,12 +84,15 @@ struct bitmap_operations {
+ 			   unsigned long e);
+ 	void (*unplug)(struct mddev *mddev, bool sync);
+ 	void (*daemon_work)(struct mddev *mddev);
++
++	void (*start_behind_write)(struct mddev *mddev);
++	void (*end_behind_write)(struct mddev *mddev);
+ 	void (*wait_behind_writes)(struct mddev *mddev);
+ 
+ 	int (*startwrite)(struct mddev *mddev, sector_t offset,
+-			  unsigned long sectors, bool behind);
++			  unsigned long sectors);
+ 	void (*endwrite)(struct mddev *mddev, sector_t offset,
+-			 unsigned long sectors, bool success, bool behind);
++			 unsigned long sectors);
+ 	bool (*start_sync)(struct mddev *mddev, sector_t offset,
+ 			   sector_t *blocks, bool degraded);
+ 	void (*end_sync)(struct mddev *mddev, sector_t offset, sector_t *blocks);
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index aebe12b0ee279f..f0d007f967f1d3 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -8376,6 +8376,10 @@ static int md_seq_show(struct seq_file *seq, void *v)
+ 		return 0;
+ 
+ 	spin_unlock(&all_mddevs_lock);
++
++	/* prevent bitmap to be freed after checking */
++	mutex_lock(&mddev->bitmap_info.mutex);
++
+ 	spin_lock(&mddev->lock);
+ 	if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
+ 		seq_printf(seq, "%s : ", mdname(mddev));
+@@ -8451,6 +8455,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
+ 		seq_printf(seq, "\n");
+ 	}
+ 	spin_unlock(&mddev->lock);
++	mutex_unlock(&mddev->bitmap_info.mutex);
+ 	spin_lock(&all_mddevs_lock);
+ 
+ 	if (mddev == list_last_entry(&all_mddevs, struct mddev, all_mddevs))
+@@ -8745,12 +8750,32 @@ void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
+ }
+ EXPORT_SYMBOL_GPL(md_submit_discard_bio);
+ 
++static void md_bitmap_start(struct mddev *mddev,
++			    struct md_io_clone *md_io_clone)
++{
++	if (mddev->pers->bitmap_sector)
++		mddev->pers->bitmap_sector(mddev, &md_io_clone->offset,
++					   &md_io_clone->sectors);
++
++	mddev->bitmap_ops->startwrite(mddev, md_io_clone->offset,
++				      md_io_clone->sectors);
++}
++
++static void md_bitmap_end(struct mddev *mddev, struct md_io_clone *md_io_clone)
++{
++	mddev->bitmap_ops->endwrite(mddev, md_io_clone->offset,
++				    md_io_clone->sectors);
++}
++
+ static void md_end_clone_io(struct bio *bio)
+ {
+ 	struct md_io_clone *md_io_clone = bio->bi_private;
+ 	struct bio *orig_bio = md_io_clone->orig_bio;
+ 	struct mddev *mddev = md_io_clone->mddev;
+ 
++	if (bio_data_dir(orig_bio) == WRITE && mddev->bitmap)
++		md_bitmap_end(mddev, md_io_clone);
++
+ 	if (bio->bi_status && !orig_bio->bi_status)
+ 		orig_bio->bi_status = bio->bi_status;
+ 
+@@ -8775,6 +8800,12 @@ static void md_clone_bio(struct mddev *mddev, struct bio **bio)
+ 	if (blk_queue_io_stat(bdev->bd_disk->queue))
+ 		md_io_clone->start_time = bio_start_io_acct(*bio);
+ 
++	if (bio_data_dir(*bio) == WRITE && mddev->bitmap) {
++		md_io_clone->offset = (*bio)->bi_iter.bi_sector;
++		md_io_clone->sectors = bio_sectors(*bio);
++		md_bitmap_start(mddev, md_io_clone);
++	}
++
+ 	clone->bi_end_io = md_end_clone_io;
+ 	clone->bi_private = md_io_clone;
+ 	*bio = clone;
+@@ -8793,6 +8824,9 @@ void md_free_cloned_bio(struct bio *bio)
+ 	struct bio *orig_bio = md_io_clone->orig_bio;
+ 	struct mddev *mddev = md_io_clone->mddev;
+ 
++	if (bio_data_dir(orig_bio) == WRITE && mddev->bitmap)
++		md_bitmap_end(mddev, md_io_clone);
++
+ 	if (bio->bi_status && !orig_bio->bi_status)
+ 		orig_bio->bi_status = bio->bi_status;
+ 
+diff --git a/drivers/md/md.h b/drivers/md/md.h
+index 4ba93af36126dd..def808064ad8ef 100644
+--- a/drivers/md/md.h
++++ b/drivers/md/md.h
+@@ -746,6 +746,9 @@ struct md_personality
+ 	void *(*takeover) (struct mddev *mddev);
+ 	/* Changes the consistency policy of an active array. */
+ 	int (*change_consistency_policy)(struct mddev *mddev, const char *buf);
++	/* convert io ranges from array to bitmap */
++	void (*bitmap_sector)(struct mddev *mddev, sector_t *offset,
++			      unsigned long *sectors);
+ };
+ 
+ struct md_sysfs_entry {
+@@ -828,6 +831,8 @@ struct md_io_clone {
+ 	struct mddev	*mddev;
+ 	struct bio	*orig_bio;
+ 	unsigned long	start_time;
++	sector_t	offset;
++	unsigned long	sectors;
+ 	struct bio	bio_clone;
+ };
+ 
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 519c56f0ee3d40..a5cd6522fc2d4d 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -420,10 +420,8 @@ static void close_write(struct r1bio *r1_bio)
+ 		r1_bio->behind_master_bio = NULL;
+ 	}
+ 
+-	/* clear the bitmap if all writes complete successfully */
+-	mddev->bitmap_ops->endwrite(mddev, r1_bio->sector, r1_bio->sectors,
+-				    !test_bit(R1BIO_Degraded, &r1_bio->state),
+-				    test_bit(R1BIO_BehindIO, &r1_bio->state));
++	if (test_bit(R1BIO_BehindIO, &r1_bio->state))
++		mddev->bitmap_ops->end_behind_write(mddev);
+ 	md_write_end(mddev);
+ }
+ 
+@@ -480,8 +478,6 @@ static void raid1_end_write_request(struct bio *bio)
+ 		if (!test_bit(Faulty, &rdev->flags))
+ 			set_bit(R1BIO_WriteError, &r1_bio->state);
+ 		else {
+-			/* Fail the request */
+-			set_bit(R1BIO_Degraded, &r1_bio->state);
+ 			/* Finished with this branch */
+ 			r1_bio->bios[mirror] = NULL;
+ 			to_put = bio;
+@@ -1535,11 +1531,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
+ 			write_behind = true;
+ 
+ 		r1_bio->bios[i] = NULL;
+-		if (!rdev || test_bit(Faulty, &rdev->flags)) {
+-			if (i < conf->raid_disks)
+-				set_bit(R1BIO_Degraded, &r1_bio->state);
++		if (!rdev || test_bit(Faulty, &rdev->flags))
+ 			continue;
+-		}
+ 
+ 		atomic_inc(&rdev->nr_pending);
+ 		if (test_bit(WriteErrorSeen, &rdev->flags)) {
+@@ -1558,16 +1551,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
+ 					 */
+ 					max_sectors = bad_sectors;
+ 				rdev_dec_pending(rdev, mddev);
+-				/* We don't set R1BIO_Degraded as that
+-				 * only applies if the disk is
+-				 * missing, so it might be re-added,
+-				 * and we want to know to recover this
+-				 * chunk.
+-				 * In this case the device is here,
+-				 * and the fact that this chunk is not
+-				 * in-sync is recorded in the bad
+-				 * block log
+-				 */
+ 				continue;
+ 			}
+ 			if (is_bad) {
+@@ -1645,9 +1628,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
+ 			    stats.behind_writes < max_write_behind)
+ 				alloc_behind_master_bio(r1_bio, bio);
+ 
+-			mddev->bitmap_ops->startwrite(
+-				mddev, r1_bio->sector, r1_bio->sectors,
+-				test_bit(R1BIO_BehindIO, &r1_bio->state));
++			if (test_bit(R1BIO_BehindIO, &r1_bio->state))
++				mddev->bitmap_ops->start_behind_write(mddev);
+ 			first_clone = 0;
+ 		}
+ 
+@@ -2614,12 +2596,10 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
+ 			 * errors.
+ 			 */
+ 			fail = true;
+-			if (!narrow_write_error(r1_bio, m)) {
++			if (!narrow_write_error(r1_bio, m))
+ 				md_error(conf->mddev,
+ 					 conf->mirrors[m].rdev);
+ 				/* an I/O failed, we can't clear the bitmap */
+-				set_bit(R1BIO_Degraded, &r1_bio->state);
+-			}
+ 			rdev_dec_pending(conf->mirrors[m].rdev,
+ 					 conf->mddev);
+ 		}
+@@ -2710,8 +2690,6 @@ static void raid1d(struct md_thread *thread)
+ 			list_del(&r1_bio->retry_list);
+ 			idx = sector_to_idx(r1_bio->sector);
+ 			atomic_dec(&conf->nr_queued[idx]);
+-			if (mddev->degraded)
+-				set_bit(R1BIO_Degraded, &r1_bio->state);
+ 			if (test_bit(R1BIO_WriteError, &r1_bio->state))
+ 				close_write(r1_bio);
+ 			raid_end_bio_io(r1_bio);
+diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
+index 5300cbaa58a415..33f318fcc268d8 100644
+--- a/drivers/md/raid1.h
++++ b/drivers/md/raid1.h
+@@ -188,7 +188,6 @@ struct r1bio {
+ enum r1bio_state {
+ 	R1BIO_Uptodate,
+ 	R1BIO_IsSync,
+-	R1BIO_Degraded,
+ 	R1BIO_BehindIO,
+ /* Set ReadError on bios that experience a readerror so that
+  * raid1d knows what to do with them.
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 7d7a8a2524dcab..e1e6cd7fb125e1 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -428,10 +428,6 @@ static void close_write(struct r10bio *r10_bio)
+ {
+ 	struct mddev *mddev = r10_bio->mddev;
+ 
+-	/* clear the bitmap if all writes complete successfully */
+-	mddev->bitmap_ops->endwrite(mddev, r10_bio->sector, r10_bio->sectors,
+-				    !test_bit(R10BIO_Degraded, &r10_bio->state),
+-				    false);
+ 	md_write_end(mddev);
+ }
+ 
+@@ -501,7 +497,6 @@ static void raid10_end_write_request(struct bio *bio)
+ 				set_bit(R10BIO_WriteError, &r10_bio->state);
+ 			else {
+ 				/* Fail the request */
+-				set_bit(R10BIO_Degraded, &r10_bio->state);
+ 				r10_bio->devs[slot].bio = NULL;
+ 				to_put = bio;
+ 				dec_rdev = 1;
+@@ -1438,10 +1433,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
+ 		r10_bio->devs[i].bio = NULL;
+ 		r10_bio->devs[i].repl_bio = NULL;
+ 
+-		if (!rdev && !rrdev) {
+-			set_bit(R10BIO_Degraded, &r10_bio->state);
++		if (!rdev && !rrdev)
+ 			continue;
+-		}
+ 		if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
+ 			sector_t first_bad;
+ 			sector_t dev_sector = r10_bio->devs[i].addr;
+@@ -1458,14 +1451,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
+ 					 * to other devices yet
+ 					 */
+ 					max_sectors = bad_sectors;
+-				/* We don't set R10BIO_Degraded as that
+-				 * only applies if the disk is missing,
+-				 * so it might be re-added, and we want to
+-				 * know to recover this chunk.
+-				 * In this case the device is here, and the
+-				 * fact that this chunk is not in-sync is
+-				 * recorded in the bad block log.
+-				 */
+ 				continue;
+ 			}
+ 			if (is_bad) {
+@@ -1519,8 +1504,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
+ 	md_account_bio(mddev, &bio);
+ 	r10_bio->master_bio = bio;
+ 	atomic_set(&r10_bio->remaining, 1);
+-	mddev->bitmap_ops->startwrite(mddev, r10_bio->sector, r10_bio->sectors,
+-				      false);
+ 
+ 	for (i = 0; i < conf->copies; i++) {
+ 		if (r10_bio->devs[i].bio)
+@@ -2966,11 +2949,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
+ 				rdev_dec_pending(rdev, conf->mddev);
+ 			} else if (bio != NULL && bio->bi_status) {
+ 				fail = true;
+-				if (!narrow_write_error(r10_bio, m)) {
++				if (!narrow_write_error(r10_bio, m))
+ 					md_error(conf->mddev, rdev);
+-					set_bit(R10BIO_Degraded,
+-						&r10_bio->state);
+-				}
+ 				rdev_dec_pending(rdev, conf->mddev);
+ 			}
+ 			bio = r10_bio->devs[m].repl_bio;
+@@ -3029,8 +3009,6 @@ static void raid10d(struct md_thread *thread)
+ 			r10_bio = list_first_entry(&tmp, struct r10bio,
+ 						   retry_list);
+ 			list_del(&r10_bio->retry_list);
+-			if (mddev->degraded)
+-				set_bit(R10BIO_Degraded, &r10_bio->state);
+ 
+ 			if (test_bit(R10BIO_WriteError,
+ 				     &r10_bio->state))
+diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
+index 2e75e88d08023f..3f16ad6904a9fb 100644
+--- a/drivers/md/raid10.h
++++ b/drivers/md/raid10.h
+@@ -161,7 +161,6 @@ enum r10bio_state {
+ 	R10BIO_IsSync,
+ 	R10BIO_IsRecover,
+ 	R10BIO_IsReshape,
+-	R10BIO_Degraded,
+ /* Set ReadError on bios that experience a read error
+  * so that raid10d knows what to do with them.
+  */
+diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
+index b4f7b79fd187d0..011246e16a99e5 100644
+--- a/drivers/md/raid5-cache.c
++++ b/drivers/md/raid5-cache.c
+@@ -313,10 +313,6 @@ void r5c_handle_cached_data_endio(struct r5conf *conf,
+ 		if (sh->dev[i].written) {
+ 			set_bit(R5_UPTODATE, &sh->dev[i].flags);
+ 			r5c_return_dev_pending_writes(conf, &sh->dev[i]);
+-			conf->mddev->bitmap_ops->endwrite(conf->mddev,
+-					sh->sector, RAID5_STRIPE_SECTORS(conf),
+-					!test_bit(STRIPE_DEGRADED, &sh->state),
+-					false);
+ 		}
+ 	}
+ }
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index f09e7677ee9f1b..5c79429acc64da 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -906,8 +906,7 @@ static bool stripe_can_batch(struct stripe_head *sh)
+ 	if (raid5_has_log(conf) || raid5_has_ppl(conf))
+ 		return false;
+ 	return test_bit(STRIPE_BATCH_READY, &sh->state) &&
+-		!test_bit(STRIPE_BITMAP_PENDING, &sh->state) &&
+-		is_full_stripe_write(sh);
++	       is_full_stripe_write(sh);
+ }
+ 
+ /* we only do back search */
+@@ -1345,8 +1344,6 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
+ 				submit_bio_noacct(rbi);
+ 		}
+ 		if (!rdev && !rrdev) {
+-			if (op_is_write(op))
+-				set_bit(STRIPE_DEGRADED, &sh->state);
+ 			pr_debug("skip op %d on disc %d for sector %llu\n",
+ 				bi->bi_opf, i, (unsigned long long)sh->sector);
+ 			clear_bit(R5_LOCKED, &sh->dev[i].flags);
+@@ -2884,7 +2881,6 @@ static void raid5_end_write_request(struct bio *bi)
+ 			set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
+ 	} else {
+ 		if (bi->bi_status) {
+-			set_bit(STRIPE_DEGRADED, &sh->state);
+ 			set_bit(WriteErrorSeen, &rdev->flags);
+ 			set_bit(R5_WriteError, &sh->dev[i].flags);
+ 			if (!test_and_set_bit(WantReplacement, &rdev->flags))
+@@ -3548,29 +3544,9 @@ static void __add_stripe_bio(struct stripe_head *sh, struct bio *bi,
+ 		 (*bip)->bi_iter.bi_sector, sh->sector, dd_idx,
+ 		 sh->dev[dd_idx].sector);
+ 
+-	if (conf->mddev->bitmap && firstwrite) {
+-		/* Cannot hold spinlock over bitmap_startwrite,
+-		 * but must ensure this isn't added to a batch until
+-		 * we have added to the bitmap and set bm_seq.
+-		 * So set STRIPE_BITMAP_PENDING to prevent
+-		 * batching.
+-		 * If multiple __add_stripe_bio() calls race here they
+-		 * much all set STRIPE_BITMAP_PENDING.  So only the first one
+-		 * to complete "bitmap_startwrite" gets to set
+-		 * STRIPE_BIT_DELAY.  This is important as once a stripe
+-		 * is added to a batch, STRIPE_BIT_DELAY cannot be changed
+-		 * any more.
+-		 */
+-		set_bit(STRIPE_BITMAP_PENDING, &sh->state);
+-		spin_unlock_irq(&sh->stripe_lock);
+-		conf->mddev->bitmap_ops->startwrite(conf->mddev, sh->sector,
+-					RAID5_STRIPE_SECTORS(conf), false);
+-		spin_lock_irq(&sh->stripe_lock);
+-		clear_bit(STRIPE_BITMAP_PENDING, &sh->state);
+-		if (!sh->batch_head) {
+-			sh->bm_seq = conf->seq_flush+1;
+-			set_bit(STRIPE_BIT_DELAY, &sh->state);
+-		}
++	if (conf->mddev->bitmap && firstwrite && !sh->batch_head) {
++		sh->bm_seq = conf->seq_flush+1;
++		set_bit(STRIPE_BIT_DELAY, &sh->state);
+ 	}
+ }
+ 
+@@ -3621,7 +3597,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
+ 	BUG_ON(sh->batch_head);
+ 	for (i = disks; i--; ) {
+ 		struct bio *bi;
+-		int bitmap_end = 0;
+ 
+ 		if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
+ 			struct md_rdev *rdev = conf->disks[i].rdev;
+@@ -3646,8 +3621,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
+ 		sh->dev[i].towrite = NULL;
+ 		sh->overwrite_disks = 0;
+ 		spin_unlock_irq(&sh->stripe_lock);
+-		if (bi)
+-			bitmap_end = 1;
+ 
+ 		log_stripe_write_finished(sh);
+ 
+@@ -3662,11 +3635,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
+ 			bio_io_error(bi);
+ 			bi = nextbi;
+ 		}
+-		if (bitmap_end)
+-			conf->mddev->bitmap_ops->endwrite(conf->mddev,
+-					sh->sector, RAID5_STRIPE_SECTORS(conf),
+-					false, false);
+-		bitmap_end = 0;
+ 		/* and fail all 'written' */
+ 		bi = sh->dev[i].written;
+ 		sh->dev[i].written = NULL;
+@@ -3675,7 +3643,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
+ 			sh->dev[i].page = sh->dev[i].orig_page;
+ 		}
+ 
+-		if (bi) bitmap_end = 1;
+ 		while (bi && bi->bi_iter.bi_sector <
+ 		       sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) {
+ 			struct bio *bi2 = r5_next_bio(conf, bi, sh->dev[i].sector);
+@@ -3709,10 +3676,6 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
+ 				bi = nextbi;
+ 			}
+ 		}
+-		if (bitmap_end)
+-			conf->mddev->bitmap_ops->endwrite(conf->mddev,
+-					sh->sector, RAID5_STRIPE_SECTORS(conf),
+-					false, false);
+ 		/* If we were in the middle of a write the parity block might
+ 		 * still be locked - so just clear all R5_LOCKED flags
+ 		 */
+@@ -4061,10 +4024,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
+ 					bio_endio(wbi);
+ 					wbi = wbi2;
+ 				}
+-				conf->mddev->bitmap_ops->endwrite(conf->mddev,
+-					sh->sector, RAID5_STRIPE_SECTORS(conf),
+-					!test_bit(STRIPE_DEGRADED, &sh->state),
+-					false);
++
+ 				if (head_sh->batch_head) {
+ 					sh = list_first_entry(&sh->batch_list,
+ 							      struct stripe_head,
+@@ -4341,7 +4301,6 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
+ 		s->locked++;
+ 		set_bit(R5_Wantwrite, &dev->flags);
+ 
+-		clear_bit(STRIPE_DEGRADED, &sh->state);
+ 		set_bit(STRIPE_INSYNC, &sh->state);
+ 		break;
+ 	case check_state_run:
+@@ -4498,7 +4457,6 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
+ 			clear_bit(R5_Wantwrite, &dev->flags);
+ 			s->locked--;
+ 		}
+-		clear_bit(STRIPE_DEGRADED, &sh->state);
+ 
+ 		set_bit(STRIPE_INSYNC, &sh->state);
+ 		break;
+@@ -4891,8 +4849,7 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
+ 					  (1 << STRIPE_COMPUTE_RUN)  |
+ 					  (1 << STRIPE_DISCARD) |
+ 					  (1 << STRIPE_BATCH_READY) |
+-					  (1 << STRIPE_BATCH_ERR) |
+-					  (1 << STRIPE_BITMAP_PENDING)),
++					  (1 << STRIPE_BATCH_ERR)),
+ 			"stripe state: %lx\n", sh->state);
+ 		WARN_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) |
+ 					      (1 << STRIPE_REPLACED)),
+@@ -4900,7 +4857,6 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
+ 
+ 		set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
+ 					    (1 << STRIPE_PREREAD_ACTIVE) |
+-					    (1 << STRIPE_DEGRADED) |
+ 					    (1 << STRIPE_ON_UNPLUG_LIST)),
+ 			      head_sh->state & (1 << STRIPE_INSYNC));
+ 
+@@ -5784,10 +5740,6 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
+ 		}
+ 		spin_unlock_irq(&sh->stripe_lock);
+ 		if (conf->mddev->bitmap) {
+-			for (d = 0; d < conf->raid_disks - conf->max_degraded;
+-			     d++)
+-				mddev->bitmap_ops->startwrite(mddev, sh->sector,
+-					RAID5_STRIPE_SECTORS(conf), false);
+ 			sh->bm_seq = conf->seq_flush + 1;
+ 			set_bit(STRIPE_BIT_DELAY, &sh->state);
+ 		}
+@@ -5928,6 +5880,54 @@ static enum reshape_loc get_reshape_loc(struct mddev *mddev,
+ 	return LOC_BEHIND_RESHAPE;
+ }
+ 
++static void raid5_bitmap_sector(struct mddev *mddev, sector_t *offset,
++				unsigned long *sectors)
++{
++	struct r5conf *conf = mddev->private;
++	sector_t start = *offset;
++	sector_t end = start + *sectors;
++	sector_t prev_start = start;
++	sector_t prev_end = end;
++	int sectors_per_chunk;
++	enum reshape_loc loc;
++	int dd_idx;
++
++	sectors_per_chunk = conf->chunk_sectors *
++		(conf->raid_disks - conf->max_degraded);
++	start = round_down(start, sectors_per_chunk);
++	end = round_up(end, sectors_per_chunk);
++
++	start = raid5_compute_sector(conf, start, 0, &dd_idx, NULL);
++	end = raid5_compute_sector(conf, end, 0, &dd_idx, NULL);
++
++	/*
++	 * For LOC_INSIDE_RESHAPE, this IO will wait for reshape to make
++	 * progress, hence it's the same as LOC_BEHIND_RESHAPE.
++	 */
++	loc = get_reshape_loc(mddev, conf, prev_start);
++	if (likely(loc != LOC_AHEAD_OF_RESHAPE)) {
++		*offset = start;
++		*sectors = end - start;
++		return;
++	}
++
++	sectors_per_chunk = conf->prev_chunk_sectors *
++		(conf->previous_raid_disks - conf->max_degraded);
++	prev_start = round_down(prev_start, sectors_per_chunk);
++	prev_end = round_down(prev_end, sectors_per_chunk);
++
++	prev_start = raid5_compute_sector(conf, prev_start, 1, &dd_idx, NULL);
++	prev_end = raid5_compute_sector(conf, prev_end, 1, &dd_idx, NULL);
++
++	/*
++	 * for LOC_AHEAD_OF_RESHAPE, reshape can make progress before this IO
++	 * is handled in make_stripe_request(), we can't know this here hence
++	 * we set bits for both.
++	 */
++	*offset = min(start, prev_start);
++	*sectors = max(end, prev_end) - *offset;
++}
++
+ static enum stripe_result make_stripe_request(struct mddev *mddev,
+ 		struct r5conf *conf, struct stripe_request_ctx *ctx,
+ 		sector_t logical_sector, struct bio *bi)
+@@ -8976,6 +8976,7 @@ static struct md_personality raid6_personality =
+ 	.takeover	= raid6_takeover,
+ 	.change_consistency_policy = raid5_change_consistency_policy,
+ 	.prepare_suspend = raid5_prepare_suspend,
++	.bitmap_sector	= raid5_bitmap_sector,
+ };
+ static struct md_personality raid5_personality =
+ {
+@@ -9001,6 +9002,7 @@ static struct md_personality raid5_personality =
+ 	.takeover	= raid5_takeover,
+ 	.change_consistency_policy = raid5_change_consistency_policy,
+ 	.prepare_suspend = raid5_prepare_suspend,
++	.bitmap_sector	= raid5_bitmap_sector,
+ };
+ 
+ static struct md_personality raid4_personality =
+@@ -9027,6 +9029,7 @@ static struct md_personality raid4_personality =
+ 	.takeover	= raid4_takeover,
+ 	.change_consistency_policy = raid5_change_consistency_policy,
+ 	.prepare_suspend = raid5_prepare_suspend,
++	.bitmap_sector	= raid5_bitmap_sector,
+ };
+ 
+ static int __init raid5_init(void)
+diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
+index d174e586698f04..eafc6e9ed6ee1c 100644
+--- a/drivers/md/raid5.h
++++ b/drivers/md/raid5.h
+@@ -358,7 +358,6 @@ enum {
+ 	STRIPE_REPLACED,
+ 	STRIPE_PREREAD_ACTIVE,
+ 	STRIPE_DELAYED,
+-	STRIPE_DEGRADED,
+ 	STRIPE_BIT_DELAY,
+ 	STRIPE_EXPANDING,
+ 	STRIPE_EXPAND_SOURCE,
+@@ -372,9 +371,6 @@ enum {
+ 	STRIPE_ON_RELEASE_LIST,
+ 	STRIPE_BATCH_READY,
+ 	STRIPE_BATCH_ERR,
+-	STRIPE_BITMAP_PENDING,	/* Being added to bitmap, don't add
+-				 * to batch yet.
+-				 */
+ 	STRIPE_LOG_TRAPPED,	/* trapped into log (see raid5-cache.c)
+ 				 * this bit is used in two scenarios:
+ 				 *
+diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
+index f5ee6bd3b52d63..c3a707deee3f55 100644
+--- a/drivers/media/i2c/imx290.c
++++ b/drivers/media/i2c/imx290.c
+@@ -267,7 +267,6 @@ static const struct cci_reg_sequence imx290_global_init_settings[] = {
+ 	{ IMX290_WINWV, 1097 },
+ 	{ IMX290_XSOUTSEL, IMX290_XSOUTSEL_XVSOUTSEL_VSYNC |
+ 			   IMX290_XSOUTSEL_XHSOUTSEL_HSYNC },
+-	{ CCI_REG8(0x3011), 0x02 },
+ 	{ CCI_REG8(0x3012), 0x64 },
+ 	{ CCI_REG8(0x3013), 0x00 },
+ };
+@@ -275,6 +274,7 @@ static const struct cci_reg_sequence imx290_global_init_settings[] = {
+ static const struct cci_reg_sequence imx290_global_init_settings_290[] = {
+ 	{ CCI_REG8(0x300f), 0x00 },
+ 	{ CCI_REG8(0x3010), 0x21 },
++	{ CCI_REG8(0x3011), 0x00 },
+ 	{ CCI_REG8(0x3016), 0x09 },
+ 	{ CCI_REG8(0x3070), 0x02 },
+ 	{ CCI_REG8(0x3071), 0x11 },
+@@ -328,6 +328,7 @@ static const struct cci_reg_sequence xclk_regs[][IMX290_NUM_CLK_REGS] = {
+ };
+ 
+ static const struct cci_reg_sequence imx290_global_init_settings_327[] = {
++	{ CCI_REG8(0x3011), 0x02 },
+ 	{ CCI_REG8(0x309e), 0x4A },
+ 	{ CCI_REG8(0x309f), 0x4A },
+ 	{ CCI_REG8(0x313b), 0x61 },
+diff --git a/drivers/media/i2c/imx412.c b/drivers/media/i2c/imx412.c
+index 0bfe3046fcc872..c74097a59c4285 100644
+--- a/drivers/media/i2c/imx412.c
++++ b/drivers/media/i2c/imx412.c
+@@ -547,7 +547,7 @@ static int imx412_update_exp_gain(struct imx412 *imx412, u32 exposure, u32 gain)
+ 
+ 	lpfr = imx412->vblank + imx412->cur_mode->height;
+ 
+-	dev_dbg(imx412->dev, "Set exp %u, analog gain %u, lpfr %u",
++	dev_dbg(imx412->dev, "Set exp %u, analog gain %u, lpfr %u\n",
+ 		exposure, gain, lpfr);
+ 
+ 	ret = imx412_write_reg(imx412, IMX412_REG_HOLD, 1, 1);
+@@ -594,7 +594,7 @@ static int imx412_set_ctrl(struct v4l2_ctrl *ctrl)
+ 	case V4L2_CID_VBLANK:
+ 		imx412->vblank = imx412->vblank_ctrl->val;
+ 
+-		dev_dbg(imx412->dev, "Received vblank %u, new lpfr %u",
++		dev_dbg(imx412->dev, "Received vblank %u, new lpfr %u\n",
+ 			imx412->vblank,
+ 			imx412->vblank + imx412->cur_mode->height);
+ 
+@@ -613,7 +613,7 @@ static int imx412_set_ctrl(struct v4l2_ctrl *ctrl)
+ 		exposure = ctrl->val;
+ 		analog_gain = imx412->again_ctrl->val;
+ 
+-		dev_dbg(imx412->dev, "Received exp %u, analog gain %u",
++		dev_dbg(imx412->dev, "Received exp %u, analog gain %u\n",
+ 			exposure, analog_gain);
+ 
+ 		ret = imx412_update_exp_gain(imx412, exposure, analog_gain);
+@@ -622,7 +622,7 @@ static int imx412_set_ctrl(struct v4l2_ctrl *ctrl)
+ 
+ 		break;
+ 	default:
+-		dev_err(imx412->dev, "Invalid control %d", ctrl->id);
++		dev_err(imx412->dev, "Invalid control %d\n", ctrl->id);
+ 		ret = -EINVAL;
+ 	}
+ 
+@@ -803,14 +803,14 @@ static int imx412_start_streaming(struct imx412 *imx412)
+ 	ret = imx412_write_regs(imx412, reg_list->regs,
+ 				reg_list->num_of_regs);
+ 	if (ret) {
+-		dev_err(imx412->dev, "fail to write initial registers");
++		dev_err(imx412->dev, "fail to write initial registers\n");
+ 		return ret;
+ 	}
+ 
+ 	/* Setup handler will write actual exposure and gain */
+ 	ret =  __v4l2_ctrl_handler_setup(imx412->sd.ctrl_handler);
+ 	if (ret) {
+-		dev_err(imx412->dev, "fail to setup handler");
++		dev_err(imx412->dev, "fail to setup handler\n");
+ 		return ret;
+ 	}
+ 
+@@ -821,7 +821,7 @@ static int imx412_start_streaming(struct imx412 *imx412)
+ 	ret = imx412_write_reg(imx412, IMX412_REG_MODE_SELECT,
+ 			       1, IMX412_MODE_STREAMING);
+ 	if (ret) {
+-		dev_err(imx412->dev, "fail to start streaming");
++		dev_err(imx412->dev, "fail to start streaming\n");
+ 		return ret;
+ 	}
+ 
+@@ -895,7 +895,7 @@ static int imx412_detect(struct imx412 *imx412)
+ 		return ret;
+ 
+ 	if (val != IMX412_ID) {
+-		dev_err(imx412->dev, "chip id mismatch: %x!=%x",
++		dev_err(imx412->dev, "chip id mismatch: %x!=%x\n",
+ 			IMX412_ID, val);
+ 		return -ENXIO;
+ 	}
+@@ -927,7 +927,7 @@ static int imx412_parse_hw_config(struct imx412 *imx412)
+ 	imx412->reset_gpio = devm_gpiod_get_optional(imx412->dev, "reset",
+ 						     GPIOD_OUT_LOW);
+ 	if (IS_ERR(imx412->reset_gpio)) {
+-		dev_err(imx412->dev, "failed to get reset gpio %ld",
++		dev_err(imx412->dev, "failed to get reset gpio %ld\n",
+ 			PTR_ERR(imx412->reset_gpio));
+ 		return PTR_ERR(imx412->reset_gpio);
+ 	}
+@@ -935,13 +935,13 @@ static int imx412_parse_hw_config(struct imx412 *imx412)
+ 	/* Get sensor input clock */
+ 	imx412->inclk = devm_clk_get(imx412->dev, NULL);
+ 	if (IS_ERR(imx412->inclk)) {
+-		dev_err(imx412->dev, "could not get inclk");
++		dev_err(imx412->dev, "could not get inclk\n");
+ 		return PTR_ERR(imx412->inclk);
+ 	}
+ 
+ 	rate = clk_get_rate(imx412->inclk);
+ 	if (rate != IMX412_INCLK_RATE) {
+-		dev_err(imx412->dev, "inclk frequency mismatch");
++		dev_err(imx412->dev, "inclk frequency mismatch\n");
+ 		return -EINVAL;
+ 	}
+ 
+@@ -966,14 +966,14 @@ static int imx412_parse_hw_config(struct imx412 *imx412)
+ 
+ 	if (bus_cfg.bus.mipi_csi2.num_data_lanes != IMX412_NUM_DATA_LANES) {
+ 		dev_err(imx412->dev,
+-			"number of CSI2 data lanes %d is not supported",
++			"number of CSI2 data lanes %d is not supported\n",
+ 			bus_cfg.bus.mipi_csi2.num_data_lanes);
+ 		ret = -EINVAL;
+ 		goto done_endpoint_free;
+ 	}
+ 
+ 	if (!bus_cfg.nr_of_link_frequencies) {
+-		dev_err(imx412->dev, "no link frequencies defined");
++		dev_err(imx412->dev, "no link frequencies defined\n");
+ 		ret = -EINVAL;
+ 		goto done_endpoint_free;
+ 	}
+@@ -1034,7 +1034,7 @@ static int imx412_power_on(struct device *dev)
+ 
+ 	ret = clk_prepare_enable(imx412->inclk);
+ 	if (ret) {
+-		dev_err(imx412->dev, "fail to enable inclk");
++		dev_err(imx412->dev, "fail to enable inclk\n");
+ 		goto error_reset;
+ 	}
+ 
+@@ -1145,7 +1145,7 @@ static int imx412_init_controls(struct imx412 *imx412)
+ 		imx412->hblank_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ 
+ 	if (ctrl_hdlr->error) {
+-		dev_err(imx412->dev, "control init failed: %d",
++		dev_err(imx412->dev, "control init failed: %d\n",
+ 			ctrl_hdlr->error);
+ 		v4l2_ctrl_handler_free(ctrl_hdlr);
+ 		return ctrl_hdlr->error;
+@@ -1183,7 +1183,7 @@ static int imx412_probe(struct i2c_client *client)
+ 
+ 	ret = imx412_parse_hw_config(imx412);
+ 	if (ret) {
+-		dev_err(imx412->dev, "HW configuration is not supported");
++		dev_err(imx412->dev, "HW configuration is not supported\n");
+ 		return ret;
+ 	}
+ 
+@@ -1191,14 +1191,14 @@ static int imx412_probe(struct i2c_client *client)
+ 
+ 	ret = imx412_power_on(imx412->dev);
+ 	if (ret) {
+-		dev_err(imx412->dev, "failed to power-on the sensor");
++		dev_err(imx412->dev, "failed to power-on the sensor\n");
+ 		goto error_mutex_destroy;
+ 	}
+ 
+ 	/* Check module identity */
+ 	ret = imx412_detect(imx412);
+ 	if (ret) {
+-		dev_err(imx412->dev, "failed to find sensor: %d", ret);
++		dev_err(imx412->dev, "failed to find sensor: %d\n", ret);
+ 		goto error_power_off;
+ 	}
+ 
+@@ -1208,7 +1208,7 @@ static int imx412_probe(struct i2c_client *client)
+ 
+ 	ret = imx412_init_controls(imx412);
+ 	if (ret) {
+-		dev_err(imx412->dev, "failed to init controls: %d", ret);
++		dev_err(imx412->dev, "failed to init controls: %d\n", ret);
+ 		goto error_power_off;
+ 	}
+ 
+@@ -1222,14 +1222,14 @@ static int imx412_probe(struct i2c_client *client)
+ 	imx412->pad.flags = MEDIA_PAD_FL_SOURCE;
+ 	ret = media_entity_pads_init(&imx412->sd.entity, 1, &imx412->pad);
+ 	if (ret) {
+-		dev_err(imx412->dev, "failed to init entity pads: %d", ret);
++		dev_err(imx412->dev, "failed to init entity pads: %d\n", ret);
+ 		goto error_handler_free;
+ 	}
+ 
+ 	ret = v4l2_async_register_subdev_sensor(&imx412->sd);
+ 	if (ret < 0) {
+ 		dev_err(imx412->dev,
+-			"failed to register async subdev: %d", ret);
++			"failed to register async subdev: %d\n", ret);
+ 		goto error_media_entity;
+ 	}
+ 
+diff --git a/drivers/media/i2c/ov9282.c b/drivers/media/i2c/ov9282.c
+index 9f52af6f047f3c..87e5d7ce5a47ee 100644
+--- a/drivers/media/i2c/ov9282.c
++++ b/drivers/media/i2c/ov9282.c
+@@ -40,7 +40,7 @@
+ /* Exposure control */
+ #define OV9282_REG_EXPOSURE	0x3500
+ #define OV9282_EXPOSURE_MIN	1
+-#define OV9282_EXPOSURE_OFFSET	12
++#define OV9282_EXPOSURE_OFFSET	25
+ #define OV9282_EXPOSURE_STEP	1
+ #define OV9282_EXPOSURE_DEFAULT	0x0282
+ 
+diff --git a/drivers/media/platform/marvell/mcam-core.c b/drivers/media/platform/marvell/mcam-core.c
+index 9ec01228f90731..b8360d37000a7d 100644
+--- a/drivers/media/platform/marvell/mcam-core.c
++++ b/drivers/media/platform/marvell/mcam-core.c
+@@ -935,7 +935,12 @@ static int mclk_enable(struct clk_hw *hw)
+ 	ret = pm_runtime_resume_and_get(cam->dev);
+ 	if (ret < 0)
+ 		return ret;
+-	clk_enable(cam->clk[0]);
++	ret = clk_enable(cam->clk[0]);
++	if (ret) {
++		pm_runtime_put(cam->dev);
++		return ret;
++	}
++
+ 	mcam_reg_write(cam, REG_CLKCTRL, (mclk_src << 29) | mclk_div);
+ 	mcam_ctlr_power_up(cam);
+ 
+diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
+index 7f5fe551179b9e..1221b309a91639 100644
+--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
++++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
+@@ -2677,11 +2677,12 @@ static void mxc_jpeg_detach_pm_domains(struct mxc_jpeg_dev *jpeg)
+ 	int i;
+ 
+ 	for (i = 0; i < jpeg->num_domains; i++) {
+-		if (jpeg->pd_dev[i] && !pm_runtime_suspended(jpeg->pd_dev[i]))
++		if (!IS_ERR_OR_NULL(jpeg->pd_dev[i]) &&
++		    !pm_runtime_suspended(jpeg->pd_dev[i]))
+ 			pm_runtime_force_suspend(jpeg->pd_dev[i]);
+-		if (jpeg->pd_link[i] && !IS_ERR(jpeg->pd_link[i]))
++		if (!IS_ERR_OR_NULL(jpeg->pd_link[i]))
+ 			device_link_del(jpeg->pd_link[i]);
+-		if (jpeg->pd_dev[i] && !IS_ERR(jpeg->pd_dev[i]))
++		if (!IS_ERR_OR_NULL(jpeg->pd_dev[i]))
+ 			dev_pm_domain_detach(jpeg->pd_dev[i], true);
+ 		jpeg->pd_dev[i] = NULL;
+ 		jpeg->pd_link[i] = NULL;
+diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
+index c0ba34ea82fd79..8654150728a869 100644
+--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
++++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
+@@ -861,6 +861,7 @@ int mxc_isi_video_buffer_prepare(struct mxc_isi_dev *isi, struct vb2_buffer *vb2
+ 				 const struct mxc_isi_format_info *info,
+ 				 const struct v4l2_pix_format_mplane *pix)
+ {
++	struct vb2_v4l2_buffer *v4l2_buf = to_vb2_v4l2_buffer(vb2);
+ 	unsigned int i;
+ 
+ 	for (i = 0; i < info->mem_planes; i++) {
+@@ -875,6 +876,8 @@ int mxc_isi_video_buffer_prepare(struct mxc_isi_dev *isi, struct vb2_buffer *vb2
+ 		vb2_set_plane_payload(vb2, i, size);
+ 	}
+ 
++	v4l2_buf->field = pix->field;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/media/platform/samsung/exynos4-is/mipi-csis.c b/drivers/media/platform/samsung/exynos4-is/mipi-csis.c
+index 63f3eecdd7e699..452880b5350cd1 100644
+--- a/drivers/media/platform/samsung/exynos4-is/mipi-csis.c
++++ b/drivers/media/platform/samsung/exynos4-is/mipi-csis.c
+@@ -940,13 +940,19 @@ static int s5pcsis_pm_resume(struct device *dev, bool runtime)
+ 					       state->supplies);
+ 			goto unlock;
+ 		}
+-		clk_enable(state->clock[CSIS_CLK_GATE]);
++		ret = clk_enable(state->clock[CSIS_CLK_GATE]);
++		if (ret) {
++			phy_power_off(state->phy);
++			regulator_bulk_disable(CSIS_NUM_SUPPLIES,
++					       state->supplies);
++			goto unlock;
++		}
+ 	}
+ 	if (state->flags & ST_STREAMING)
+ 		s5pcsis_start_stream(state);
+ 
+ 	state->flags &= ~ST_SUSPENDED;
+- unlock:
++unlock:
+ 	mutex_unlock(&state->lock);
+ 	return ret ? -EAGAIN : 0;
+ }
+diff --git a/drivers/media/platform/samsung/s3c-camif/camif-core.c b/drivers/media/platform/samsung/s3c-camif/camif-core.c
+index de6e8f1518496a..221e3c447f361d 100644
+--- a/drivers/media/platform/samsung/s3c-camif/camif-core.c
++++ b/drivers/media/platform/samsung/s3c-camif/camif-core.c
+@@ -527,10 +527,19 @@ static void s3c_camif_remove(struct platform_device *pdev)
+ static int s3c_camif_runtime_resume(struct device *dev)
+ {
+ 	struct camif_dev *camif = dev_get_drvdata(dev);
++	int ret;
++
++	ret = clk_enable(camif->clock[CLK_GATE]);
++	if (ret)
++		return ret;
+ 
+-	clk_enable(camif->clock[CLK_GATE]);
+ 	/* null op on s3c244x */
+-	clk_enable(camif->clock[CLK_CAM]);
++	ret = clk_enable(camif->clock[CLK_CAM]);
++	if (ret) {
++		clk_disable(camif->clock[CLK_GATE]);
++		return ret;
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
+index 276bf3c8a8cb49..8af94246e5916e 100644
+--- a/drivers/media/rc/iguanair.c
++++ b/drivers/media/rc/iguanair.c
+@@ -194,8 +194,10 @@ static int iguanair_send(struct iguanair *ir, unsigned size)
+ 	if (rc)
+ 		return rc;
+ 
+-	if (wait_for_completion_timeout(&ir->completion, TIMEOUT) == 0)
++	if (wait_for_completion_timeout(&ir->completion, TIMEOUT) == 0) {
++		usb_kill_urb(ir->urb_out);
+ 		return -ETIMEDOUT;
++	}
+ 
+ 	return rc;
+ }
+diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
+index 0d2c42819d3909..218f712f56b17c 100644
+--- a/drivers/media/usb/dvb-usb-v2/af9035.c
++++ b/drivers/media/usb/dvb-usb-v2/af9035.c
+@@ -322,13 +322,16 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ 			ret = -EOPNOTSUPP;
+ 		} else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
+ 			   (msg[0].addr == state->af9033_i2c_addr[1])) {
++			/* demod access via firmware interface */
++			u32 reg;
++
+ 			if (msg[0].len < 3 || msg[1].len < 1) {
+ 				ret = -EOPNOTSUPP;
+ 				goto unlock;
+ 			}
+-			/* demod access via firmware interface */
+-			u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
+-					msg[0].buf[2];
++
++			reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
++				msg[0].buf[2];
+ 
+ 			if (msg[0].addr == state->af9033_i2c_addr[1])
+ 				reg |= 0x100000;
+@@ -385,13 +388,16 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ 			ret = -EOPNOTSUPP;
+ 		} else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
+ 			   (msg[0].addr == state->af9033_i2c_addr[1])) {
++			/* demod access via firmware interface */
++			u32 reg;
++
+ 			if (msg[0].len < 3) {
+ 				ret = -EOPNOTSUPP;
+ 				goto unlock;
+ 			}
+-			/* demod access via firmware interface */
+-			u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
+-					msg[0].buf[2];
++
++			reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
++				msg[0].buf[2];
+ 
+ 			if (msg[0].addr == state->af9033_i2c_addr[1])
+ 				reg |= 0x100000;
+diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
+index 8a34e6c0d6a6d1..f0537b741d1352 100644
+--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
++++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
+@@ -373,6 +373,7 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
+ 	struct dvb_usb_device *d = adap_to_d(adap);
+ 	struct lme2510_state *lme_int = adap_to_priv(adap);
+ 	struct usb_host_endpoint *ep;
++	int ret;
+ 
+ 	lme_int->lme_urb = usb_alloc_urb(0, GFP_KERNEL);
+ 
+@@ -390,11 +391,20 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
+ 
+ 	/* Quirk of pipe reporting PIPE_BULK but behaves as interrupt */
+ 	ep = usb_pipe_endpoint(d->udev, lme_int->lme_urb->pipe);
++	if (!ep) {
++		usb_free_urb(lme_int->lme_urb);
++		return -ENODEV;
++	}
+ 
+ 	if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK)
+ 		lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa);
+ 
+-	usb_submit_urb(lme_int->lme_urb, GFP_KERNEL);
++	ret = usb_submit_urb(lme_int->lme_urb, GFP_KERNEL);
++	if (ret) {
++		usb_free_urb(lme_int->lme_urb);
++		return ret;
++	}
++
+ 	info("INT Interrupt Service Started");
+ 
+ 	return 0;
+diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
+index 26ee85657fc89d..f8464f0aae1b83 100644
+--- a/drivers/media/usb/uvc/uvc_queue.c
++++ b/drivers/media/usb/uvc/uvc_queue.c
+@@ -479,7 +479,8 @@ static void uvc_queue_buffer_complete(struct kref *ref)
+ 
+ 	buf->state = buf->error ? UVC_BUF_STATE_ERROR : UVC_BUF_STATE_DONE;
+ 	vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
+-	vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
++	vb2_buffer_done(&buf->buf.vb2_buf, buf->error ? VB2_BUF_STATE_ERROR :
++							VB2_BUF_STATE_DONE);
+ }
+ 
+ /*
+diff --git a/drivers/media/usb/uvc/uvc_status.c b/drivers/media/usb/uvc/uvc_status.c
+index 06c867510c8fe6..eca5e6c3b06158 100644
+--- a/drivers/media/usb/uvc/uvc_status.c
++++ b/drivers/media/usb/uvc/uvc_status.c
+@@ -271,6 +271,7 @@ int uvc_status_init(struct uvc_device *dev)
+ 	dev->int_urb = usb_alloc_urb(0, GFP_KERNEL);
+ 	if (!dev->int_urb) {
+ 		kfree(dev->status);
++		dev->status = NULL;
+ 		return -ENOMEM;
+ 	}
+ 
+@@ -294,6 +295,9 @@ int uvc_status_init(struct uvc_device *dev)
+ 
+ void uvc_status_unregister(struct uvc_device *dev)
+ {
++	if (!dev->status)
++		return;
++
+ 	uvc_status_suspend(dev);
+ 	uvc_input_unregister(dev);
+ }
+diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
+index 7193f848d17e66..9b7d30a21a5bd0 100644
+--- a/drivers/memory/tegra/tegra20-emc.c
++++ b/drivers/memory/tegra/tegra20-emc.c
+@@ -474,14 +474,15 @@ tegra_emc_find_node_by_ram_code(struct tegra_emc *emc)
+ 
+ 	ram_code = tegra_read_ram_code();
+ 
+-	for (np = of_find_node_by_name(dev->of_node, "emc-tables"); np;
+-	     np = of_find_node_by_name(np, "emc-tables")) {
++	for_each_child_of_node(dev->of_node, np) {
++		if (!of_node_name_eq(np, "emc-tables"))
++			continue;
+ 		err = of_property_read_u32(np, "nvidia,ram-code", &value);
+ 		if (err || value != ram_code) {
+ 			struct device_node *lpddr2_np;
+ 			bool cfg_mismatches = false;
+ 
+-			lpddr2_np = of_find_node_by_name(np, "lpddr2");
++			lpddr2_np = of_get_child_by_name(np, "lpddr2");
+ 			if (lpddr2_np) {
+ 				const struct lpddr2_info *info;
+ 
+@@ -518,7 +519,6 @@ tegra_emc_find_node_by_ram_code(struct tegra_emc *emc)
+ 			}
+ 
+ 			if (cfg_mismatches) {
+-				of_node_put(np);
+ 				continue;
+ 			}
+ 		}
+diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
+index 3e1d699ba9340f..72f20de9652da2 100644
+--- a/drivers/mfd/syscon.c
++++ b/drivers/mfd/syscon.c
+@@ -15,6 +15,7 @@
+ #include <linux/io.h>
+ #include <linux/init.h>
+ #include <linux/list.h>
++#include <linux/mutex.h>
+ #include <linux/of.h>
+ #include <linux/of_address.h>
+ #include <linux/of_platform.h>
+@@ -27,7 +28,7 @@
+ 
+ static struct platform_driver syscon_driver;
+ 
+-static DEFINE_SPINLOCK(syscon_list_slock);
++static DEFINE_MUTEX(syscon_list_lock);
+ static LIST_HEAD(syscon_list);
+ 
+ struct syscon {
+@@ -54,6 +55,8 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_res)
+ 	struct resource res;
+ 	struct reset_control *reset;
+ 
++	WARN_ON(!mutex_is_locked(&syscon_list_lock));
++
+ 	struct syscon *syscon __free(kfree) = kzalloc(sizeof(*syscon), GFP_KERNEL);
+ 	if (!syscon)
+ 		return ERR_PTR(-ENOMEM);
+@@ -146,9 +149,7 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_res)
+ 	syscon->regmap = regmap;
+ 	syscon->np = np;
+ 
+-	spin_lock(&syscon_list_slock);
+ 	list_add_tail(&syscon->list, &syscon_list);
+-	spin_unlock(&syscon_list_slock);
+ 
+ 	return_ptr(syscon);
+ 
+@@ -169,7 +170,7 @@ static struct regmap *device_node_get_regmap(struct device_node *np,
+ {
+ 	struct syscon *entry, *syscon = NULL;
+ 
+-	spin_lock(&syscon_list_slock);
++	mutex_lock(&syscon_list_lock);
+ 
+ 	list_for_each_entry(entry, &syscon_list, list)
+ 		if (entry->np == np) {
+@@ -177,11 +178,11 @@ static struct regmap *device_node_get_regmap(struct device_node *np,
+ 			break;
+ 		}
+ 
+-	spin_unlock(&syscon_list_slock);
+-
+ 	if (!syscon)
+ 		syscon = of_syscon_register(np, check_res);
+ 
++	mutex_unlock(&syscon_list_lock);
++
+ 	if (IS_ERR(syscon))
+ 		return ERR_CAST(syscon);
+ 
+@@ -212,7 +213,7 @@ int of_syscon_register_regmap(struct device_node *np, struct regmap *regmap)
+ 		return -ENOMEM;
+ 
+ 	/* check if syscon entry already exists */
+-	spin_lock(&syscon_list_slock);
++	mutex_lock(&syscon_list_lock);
+ 
+ 	list_for_each_entry(entry, &syscon_list, list)
+ 		if (entry->np == np) {
+@@ -225,12 +226,12 @@ int of_syscon_register_regmap(struct device_node *np, struct regmap *regmap)
+ 
+ 	/* register the regmap in syscon list */
+ 	list_add_tail(&syscon->list, &syscon_list);
+-	spin_unlock(&syscon_list_slock);
++	mutex_unlock(&syscon_list_lock);
+ 
+ 	return 0;
+ 
+ err_unlock:
+-	spin_unlock(&syscon_list_slock);
++	mutex_unlock(&syscon_list_lock);
+ 	kfree(syscon);
+ 	return ret;
+ }
+diff --git a/drivers/misc/cardreader/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c
+index 77b0490a1b38d7..e0174da5e9fc39 100644
+--- a/drivers/misc/cardreader/rtsx_usb.c
++++ b/drivers/misc/cardreader/rtsx_usb.c
+@@ -286,6 +286,7 @@ static int rtsx_usb_get_status_with_bulk(struct rtsx_ucr *ucr, u16 *status)
+ int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
+ {
+ 	int ret;
++	u8 interrupt_val = 0;
+ 	u16 *buf;
+ 
+ 	if (!status)
+@@ -308,6 +309,20 @@ int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
+ 		ret = rtsx_usb_get_status_with_bulk(ucr, status);
+ 	}
+ 
++	rtsx_usb_read_register(ucr, CARD_INT_PEND, &interrupt_val);
++	/* Cross check presence with interrupts */
++	if (*status & XD_CD)
++		if (!(interrupt_val & XD_INT))
++			*status &= ~XD_CD;
++
++	if (*status & SD_CD)
++		if (!(interrupt_val & SD_INT))
++			*status &= ~SD_CD;
++
++	if (*status & MS_CD)
++		if (!(interrupt_val & MS_INT))
++			*status &= ~MS_CD;
++
+ 	/* usb_control_msg may return positive when success */
+ 	if (ret < 0)
+ 		return ret;
+diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c
+index 217f4e69233fbf..edad2ab0f2fa96 100644
+--- a/drivers/mtd/hyperbus/hbmc-am654.c
++++ b/drivers/mtd/hyperbus/hbmc-am654.c
+@@ -174,26 +174,30 @@ static int am654_hbmc_probe(struct platform_device *pdev)
+ 	priv->hbdev.np = of_get_next_child(np, NULL);
+ 	ret = of_address_to_resource(priv->hbdev.np, 0, &res);
+ 	if (ret)
+-		return ret;
++		goto put_node;
+ 
+ 	if (of_property_read_bool(dev->of_node, "mux-controls")) {
+ 		struct mux_control *control = devm_mux_control_get(dev, NULL);
+ 
+-		if (IS_ERR(control))
+-			return PTR_ERR(control);
++		if (IS_ERR(control)) {
++			ret = PTR_ERR(control);
++			goto put_node;
++		}
+ 
+ 		ret = mux_control_select(control, 1);
+ 		if (ret) {
+ 			dev_err(dev, "Failed to select HBMC mux\n");
+-			return ret;
++			goto put_node;
+ 		}
+ 		priv->mux_ctrl = control;
+ 	}
+ 
+ 	priv->hbdev.map.size = resource_size(&res);
+ 	priv->hbdev.map.virt = devm_ioremap_resource(dev, &res);
+-	if (IS_ERR(priv->hbdev.map.virt))
+-		return PTR_ERR(priv->hbdev.map.virt);
++	if (IS_ERR(priv->hbdev.map.virt)) {
++		ret = PTR_ERR(priv->hbdev.map.virt);
++		goto disable_mux;
++	}
+ 
+ 	priv->ctlr.dev = dev;
+ 	priv->ctlr.ops = &am654_hbmc_ops;
+@@ -226,6 +230,8 @@ static int am654_hbmc_probe(struct platform_device *pdev)
+ disable_mux:
+ 	if (priv->mux_ctrl)
+ 		mux_control_deselect(priv->mux_ctrl);
++put_node:
++	of_node_put(priv->hbdev.np);
+ 	return ret;
+ }
+ 
+@@ -241,6 +247,7 @@ static void am654_hbmc_remove(struct platform_device *pdev)
+ 
+ 	if (dev_priv->rx_chan)
+ 		dma_release_channel(dev_priv->rx_chan);
++	of_node_put(priv->hbdev.np);
+ }
+ 
+ static const struct of_device_id am654_hbmc_dt_ids[] = {
+diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+index 9c253a511e45a5..fea5b611995635 100644
+--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
++++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+@@ -2342,6 +2342,11 @@ static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
+ 		brcmnand_send_cmd(host, CMD_PROGRAM_PAGE);
+ 		status = brcmnand_waitfunc(chip);
+ 
++		if (status < 0) {
++			ret = status;
++			goto out;
++		}
++
+ 		if (status & NAND_STATUS_FAIL) {
+ 			dev_info(ctrl->dev, "program failed at %llx\n",
+ 				(unsigned long long)addr);
+diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
+index 26cc53ad34ec77..c792b9bcab9bce 100644
+--- a/drivers/mtd/ubi/ubi.h
++++ b/drivers/mtd/ubi/ubi.h
+@@ -549,7 +549,6 @@ struct ubi_debug_info {
+  * @peb_buf: a buffer of PEB size used for different purposes
+  * @buf_mutex: protects @peb_buf
+  * @ckvol_mutex: serializes static volume checking when opening
+- * @wl_reboot_notifier: close all wear-leveling work before reboot
+  *
+  * @dbg: debugging information for this UBI device
+  */
+@@ -652,7 +651,6 @@ struct ubi_device {
+ 	void *peb_buf;
+ 	struct mutex buf_mutex;
+ 	struct mutex ckvol_mutex;
+-	struct notifier_block wl_reboot_notifier;
+ 
+ 	struct ubi_debug_info dbg;
+ };
+diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
+index 4f6f339d8fb8ab..fbd399cf650337 100644
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -89,7 +89,6 @@
+ #include <linux/crc32.h>
+ #include <linux/freezer.h>
+ #include <linux/kthread.h>
+-#include <linux/reboot.h>
+ #include "ubi.h"
+ #include "wl.h"
+ 
+@@ -128,8 +127,6 @@ static int self_check_in_wl_tree(const struct ubi_device *ubi,
+ 				 struct ubi_wl_entry *e, struct rb_root *root);
+ static int self_check_in_pq(const struct ubi_device *ubi,
+ 			    struct ubi_wl_entry *e);
+-static int ubi_wl_reboot_notifier(struct notifier_block *n,
+-				  unsigned long state, void *cmd);
+ 
+ /**
+  * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
+@@ -1953,13 +1950,6 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
+ 	if (!ubi->ro_mode && !ubi->fm_disabled)
+ 		ubi_ensure_anchor_pebs(ubi);
+ #endif
+-
+-	if (!ubi->wl_reboot_notifier.notifier_call) {
+-		ubi->wl_reboot_notifier.notifier_call = ubi_wl_reboot_notifier;
+-		ubi->wl_reboot_notifier.priority = 1; /* Higher than MTD */
+-		register_reboot_notifier(&ubi->wl_reboot_notifier);
+-	}
+-
+ 	return 0;
+ 
+ out_free:
+@@ -2005,17 +1995,6 @@ void ubi_wl_close(struct ubi_device *ubi)
+ 	kfree(ubi->lookuptbl);
+ }
+ 
+-static int ubi_wl_reboot_notifier(struct notifier_block *n,
+-				  unsigned long state, void *cmd)
+-{
+-	struct ubi_device *ubi;
+-
+-	ubi = container_of(n, struct ubi_device, wl_reboot_notifier);
+-	ubi_wl_close(ubi);
+-
+-	return NOTIFY_DONE;
+-}
+-
+ /**
+  * self_check_ec - make sure that the erase counter of a PEB is correct.
+  * @ubi: UBI device description object
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 7b78c2bada8149..e45bba240cbcda 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1538,17 +1538,20 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
+ 				 NETIF_F_HIGHDMA | NETIF_F_LRO)
+ 
+ #define BOND_ENC_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
+-				 NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
++				 NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE | \
++				 NETIF_F_GSO_PARTIAL)
+ 
+ #define BOND_MPLS_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
+ 				 NETIF_F_GSO_SOFTWARE)
+ 
++#define BOND_GSO_PARTIAL_FEATURES (NETIF_F_GSO_ESP)
++
+ 
+ static void bond_compute_features(struct bonding *bond)
+ {
++	netdev_features_t gso_partial_features = BOND_GSO_PARTIAL_FEATURES;
+ 	unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
+ 					IFF_XMIT_DST_RELEASE_PERM;
+-	netdev_features_t gso_partial_features = NETIF_F_GSO_ESP;
+ 	netdev_features_t vlan_features = BOND_VLAN_FEATURES;
+ 	netdev_features_t enc_features  = BOND_ENC_FEATURES;
+ #ifdef CONFIG_XFRM_OFFLOAD
+@@ -1582,8 +1585,9 @@ static void bond_compute_features(struct bonding *bond)
+ 							  BOND_XFRM_FEATURES);
+ #endif /* CONFIG_XFRM_OFFLOAD */
+ 
+-		if (slave->dev->hw_enc_features & NETIF_F_GSO_PARTIAL)
+-			gso_partial_features &= slave->dev->gso_partial_features;
++		gso_partial_features = netdev_increment_features(gso_partial_features,
++								 slave->dev->gso_partial_features,
++								 BOND_GSO_PARTIAL_FEATURES);
+ 
+ 		mpls_features = netdev_increment_features(mpls_features,
+ 							  slave->dev->mpls_features,
+@@ -1598,12 +1602,8 @@ static void bond_compute_features(struct bonding *bond)
+ 	}
+ 	bond_dev->hard_header_len = max_hard_header_len;
+ 
+-	if (gso_partial_features & NETIF_F_GSO_ESP)
+-		bond_dev->gso_partial_features |= NETIF_F_GSO_ESP;
+-	else
+-		bond_dev->gso_partial_features &= ~NETIF_F_GSO_ESP;
+-
+ done:
++	bond_dev->gso_partial_features = gso_partial_features;
+ 	bond_dev->vlan_features = vlan_features;
+ 	bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
+ 				    NETIF_F_HW_VLAN_CTAG_TX |
+@@ -6046,6 +6046,7 @@ void bond_setup(struct net_device *bond_dev)
+ 	bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
+ 	bond_dev->features |= bond_dev->hw_features;
+ 	bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
++	bond_dev->features |= NETIF_F_GSO_PARTIAL;
+ #ifdef CONFIG_XFRM_OFFLOAD
+ 	bond_dev->hw_features |= BOND_XFRM_FEATURES;
+ 	/* Only enable XFRM features if this is an active-backup config */
+diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
+index d73ef262991d61..6fee9a41839c0b 100644
+--- a/drivers/net/ethernet/broadcom/bgmac.h
++++ b/drivers/net/ethernet/broadcom/bgmac.h
+@@ -328,8 +328,7 @@
+ #define BGMAC_RX_FRAME_OFFSET			30		/* There are 2 unused bytes between header and real data */
+ #define BGMAC_RX_BUF_OFFSET			(NET_SKB_PAD + NET_IP_ALIGN - \
+ 						 BGMAC_RX_FRAME_OFFSET)
+-/* Jumbo frame size with FCS */
+-#define BGMAC_RX_MAX_FRAME_SIZE			9724
++#define BGMAC_RX_MAX_FRAME_SIZE			1536
+ #define BGMAC_RX_BUF_SIZE			(BGMAC_RX_FRAME_OFFSET + BGMAC_RX_MAX_FRAME_SIZE)
+ #define BGMAC_RX_ALLOC_SIZE			(SKB_DATA_ALIGN(BGMAC_RX_BUF_SIZE + BGMAC_RX_BUF_OFFSET) + \
+ 						 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
+index 8735e333034cf4..b87eaf0c250ce9 100644
+--- a/drivers/net/ethernet/davicom/dm9000.c
++++ b/drivers/net/ethernet/davicom/dm9000.c
+@@ -1777,10 +1777,11 @@ static void dm9000_drv_remove(struct platform_device *pdev)
+ 
+ 	unregister_netdev(ndev);
+ 	dm9000_release_board(pdev, dm);
+-	free_netdev(ndev);		/* free device structure */
+ 	if (dm->power_supply)
+ 		regulator_disable(dm->power_supply);
+ 
++	free_netdev(ndev);		/* free device structure */
++
+ 	dev_dbg(&pdev->dev, "released and freed device\n");
+ }
+ 
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 4566848e1d7c6a..2f706d08481998 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -840,6 +840,8 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
+ 	struct fec_enet_private *fep = netdev_priv(ndev);
+ 	int hdr_len, total_len, data_left;
+ 	struct bufdesc *bdp = txq->bd.cur;
++	struct bufdesc *tmp_bdp;
++	struct bufdesc_ex *ebdp;
+ 	struct tso_t tso;
+ 	unsigned int index = 0;
+ 	int ret;
+@@ -913,7 +915,34 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
+ 	return 0;
+ 
+ err_release:
+-	/* TODO: Release all used data descriptors for TSO */
++	/* Release all used data descriptors for TSO */
++	tmp_bdp = txq->bd.cur;
++
++	while (tmp_bdp != bdp) {
++		/* Unmap data buffers */
++		if (tmp_bdp->cbd_bufaddr &&
++		    !IS_TSO_HEADER(txq, fec32_to_cpu(tmp_bdp->cbd_bufaddr)))
++			dma_unmap_single(&fep->pdev->dev,
++					 fec32_to_cpu(tmp_bdp->cbd_bufaddr),
++					 fec16_to_cpu(tmp_bdp->cbd_datlen),
++					 DMA_TO_DEVICE);
++
++		/* Clear standard buffer descriptor fields */
++		tmp_bdp->cbd_sc = 0;
++		tmp_bdp->cbd_datlen = 0;
++		tmp_bdp->cbd_bufaddr = 0;
++
++		/* Handle extended descriptor if enabled */
++		if (fep->bufdesc_ex) {
++			ebdp = (struct bufdesc_ex *)tmp_bdp;
++			ebdp->cbd_esc = 0;
++		}
++
++		tmp_bdp = fec_enet_get_nextdesc(tmp_bdp, &txq->bd);
++	}
++
++	dev_kfree_skb_any(skb);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+index 9a63fbc6940831..b25fb400f4767e 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+@@ -40,6 +40,21 @@ EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare);
+  */
+ static DEFINE_MUTEX(hnae3_common_lock);
+ 
++/* ensure the drivers being unloaded one by one */
++static DEFINE_MUTEX(hnae3_unload_lock);
++
++void hnae3_acquire_unload_lock(void)
++{
++	mutex_lock(&hnae3_unload_lock);
++}
++EXPORT_SYMBOL(hnae3_acquire_unload_lock);
++
++void hnae3_release_unload_lock(void)
++{
++	mutex_unlock(&hnae3_unload_lock);
++}
++EXPORT_SYMBOL(hnae3_release_unload_lock);
++
+ static bool hnae3_client_match(enum hnae3_client_type client_type)
+ {
+ 	if (client_type == HNAE3_CLIENT_KNIC ||
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+index 12ba380eb7019a..4e44f28288f902 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+@@ -963,4 +963,6 @@ int hnae3_register_client(struct hnae3_client *client);
+ void hnae3_set_client_init_flag(struct hnae3_client *client,
+ 				struct hnae3_ae_dev *ae_dev,
+ 				unsigned int inited);
++void hnae3_acquire_unload_lock(void);
++void hnae3_release_unload_lock(void);
+ #endif
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index a7e3b22f641c85..9ff797fb36c456 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -6002,9 +6002,11 @@ module_init(hns3_init_module);
+  */
+ static void __exit hns3_exit_module(void)
+ {
++	hnae3_acquire_unload_lock();
+ 	pci_unregister_driver(&hns3_driver);
+ 	hnae3_unregister_client(&client);
+ 	hns3_dbg_unregister_debugfs();
++	hnae3_release_unload_lock();
+ }
+ module_exit(hns3_exit_module);
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index db784500925261..3f17b3073e50fd 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -12919,9 +12919,11 @@ static int __init hclge_init(void)
+ 
+ static void __exit hclge_exit(void)
+ {
++	hnae3_acquire_unload_lock();
+ 	hnae3_unregister_ae_algo_prepare(&ae_algo);
+ 	hnae3_unregister_ae_algo(&ae_algo);
+ 	destroy_workqueue(hclge_wq);
++	hnae3_release_unload_lock();
+ }
+ module_init(hclge_init);
+ module_exit(hclge_exit);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index 163c6e59ea4c15..9ba767740a043f 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -3410,8 +3410,10 @@ static int __init hclgevf_init(void)
+ 
+ static void __exit hclgevf_exit(void)
+ {
++	hnae3_acquire_unload_lock();
+ 	hnae3_unregister_ae_algo(&ae_algovf);
+ 	destroy_workqueue(hclgevf_wq);
++	hnae3_release_unload_lock();
+ }
+ module_init(hclgevf_init);
+ module_exit(hclgevf_exit);
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index a9e54866ae6bfa..2b8700abe56bb4 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -773,6 +773,11 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
+ 		f->state = IAVF_VLAN_ADD;
+ 		adapter->num_vlan_filters++;
+ 		iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
++	} else if (f->state == IAVF_VLAN_REMOVE) {
++		/* IAVF_VLAN_REMOVE means that VLAN wasn't yet removed.
++		 * We can safely only change the state here.
++		 */
++		f->state = IAVF_VLAN_ACTIVE;
+ 	}
+ 
+ clearout:
+@@ -793,8 +798,18 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
+ 
+ 	f = iavf_find_vlan(adapter, vlan);
+ 	if (f) {
+-		f->state = IAVF_VLAN_REMOVE;
+-		iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_VLAN_FILTER);
++		/* IAVF_ADD_VLAN means that VLAN wasn't even added yet.
++		 * Remove it from the list.
++		 */
++		if (f->state == IAVF_VLAN_ADD) {
++			list_del(&f->list);
++			kfree(f);
++			adapter->num_vlan_filters--;
++		} else {
++			f->state = IAVF_VLAN_REMOVE;
++			iavf_schedule_aq_request(adapter,
++						 IAVF_FLAG_AQ_DEL_VLAN_FILTER);
++		}
+ 	}
+ 
+ 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+index 46f9726d9a8a86..f5858e89dadd79 100644
+--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
++++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+@@ -1491,7 +1491,6 @@ struct ice_aqc_dnl_equa_param {
+ #define ICE_AQC_RX_EQU_POST1 (0x12 << ICE_AQC_RX_EQU_SHIFT)
+ #define ICE_AQC_RX_EQU_BFLF (0x13 << ICE_AQC_RX_EQU_SHIFT)
+ #define ICE_AQC_RX_EQU_BFHF (0x14 << ICE_AQC_RX_EQU_SHIFT)
+-#define ICE_AQC_RX_EQU_DRATE (0x15 << ICE_AQC_RX_EQU_SHIFT)
+ #define ICE_AQC_RX_EQU_CTLE_GAINHF (0x20 << ICE_AQC_RX_EQU_SHIFT)
+ #define ICE_AQC_RX_EQU_CTLE_GAINLF (0x21 << ICE_AQC_RX_EQU_SHIFT)
+ #define ICE_AQC_RX_EQU_CTLE_GAINDC (0x22 << ICE_AQC_RX_EQU_SHIFT)
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+index 3072634bf049c4..f241493a6ac883 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+@@ -710,7 +710,6 @@ static int ice_get_tx_rx_equa(struct ice_hw *hw, u8 serdes_num,
+ 		{ ICE_AQC_RX_EQU_POST1, rx, &ptr->rx_equ_post1 },
+ 		{ ICE_AQC_RX_EQU_BFLF, rx, &ptr->rx_equ_bflf },
+ 		{ ICE_AQC_RX_EQU_BFHF, rx, &ptr->rx_equ_bfhf },
+-		{ ICE_AQC_RX_EQU_DRATE, rx, &ptr->rx_equ_drate },
+ 		{ ICE_AQC_RX_EQU_CTLE_GAINHF, rx, &ptr->rx_equ_ctle_gainhf },
+ 		{ ICE_AQC_RX_EQU_CTLE_GAINLF, rx, &ptr->rx_equ_ctle_gainlf },
+ 		{ ICE_AQC_RX_EQU_CTLE_GAINDC, rx, &ptr->rx_equ_ctle_gaindc },
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.h b/drivers/net/ethernet/intel/ice/ice_ethtool.h
+index 8f2ad1c172c061..23b2cfbc9684c0 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.h
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.h
+@@ -15,7 +15,6 @@ struct ice_serdes_equalization_to_ethtool {
+ 	int rx_equ_post1;
+ 	int rx_equ_bflf;
+ 	int rx_equ_bfhf;
+-	int rx_equ_drate;
+ 	int rx_equ_ctle_gainhf;
+ 	int rx_equ_ctle_gainlf;
+ 	int rx_equ_ctle_gaindc;
+diff --git a/drivers/net/ethernet/intel/ice/ice_parser.h b/drivers/net/ethernet/intel/ice/ice_parser.h
+index 6509d807627cee..4f56d53d56b9ad 100644
+--- a/drivers/net/ethernet/intel/ice/ice_parser.h
++++ b/drivers/net/ethernet/intel/ice/ice_parser.h
+@@ -257,7 +257,6 @@ ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size,
+ /*** ICE_SID_RXPARSER_BOOST_TCAM and ICE_SID_LBL_RXPARSER_TMEM sections ***/
+ #define ICE_BST_TCAM_TABLE_SIZE		256
+ #define ICE_BST_TCAM_KEY_SIZE		20
+-#define ICE_BST_KEY_TCAM_SIZE		19
+ 
+ /* Boost TCAM item */
+ struct ice_bst_tcam_item {
+@@ -401,7 +400,6 @@ u16 ice_xlt_kb_flag_get(struct ice_xlt_kb *kb, u64 pkt_flag);
+ #define ICE_PARSER_GPR_NUM	128
+ #define ICE_PARSER_FLG_NUM	64
+ #define ICE_PARSER_ERR_NUM	16
+-#define ICE_BST_KEY_SIZE	10
+ #define ICE_MARKER_ID_SIZE	9
+ #define ICE_MARKER_MAX_SIZE	\
+ 		(ICE_MARKER_ID_SIZE * BITS_PER_BYTE - 1)
+@@ -431,13 +429,13 @@ struct ice_parser_rt {
+ 	u8 pkt_buf[ICE_PARSER_MAX_PKT_LEN + ICE_PARSER_PKT_REV];
+ 	u16 pkt_len;
+ 	u16 po;
+-	u8 bst_key[ICE_BST_KEY_SIZE];
++	u8 bst_key[ICE_BST_TCAM_KEY_SIZE];
+ 	struct ice_pg_cam_key pg_key;
++	u8 pg_prio;
+ 	struct ice_alu *alu0;
+ 	struct ice_alu *alu1;
+ 	struct ice_alu *alu2;
+ 	struct ice_pg_cam_action *action;
+-	u8 pg_prio;
+ 	struct ice_gpr_pu pu;
+ 	u8 markers[ICE_MARKER_ID_SIZE];
+ 	bool protocols[ICE_PO_PAIR_SIZE];
+diff --git a/drivers/net/ethernet/intel/ice/ice_parser_rt.c b/drivers/net/ethernet/intel/ice/ice_parser_rt.c
+index dedf5e854e4b76..3995d662e05099 100644
+--- a/drivers/net/ethernet/intel/ice/ice_parser_rt.c
++++ b/drivers/net/ethernet/intel/ice/ice_parser_rt.c
+@@ -125,22 +125,20 @@ static void ice_bst_key_init(struct ice_parser_rt *rt,
+ 	else
+ 		key[idd] = imem->b_kb.prio;
+ 
+-	idd = ICE_BST_KEY_TCAM_SIZE - 1;
++	idd = ICE_BST_TCAM_KEY_SIZE - 2;
+ 	for (i = idd; i >= 0; i--) {
+ 		int j;
+ 
+ 		j = ho + idd - i;
+ 		if (j < ICE_PARSER_MAX_PKT_LEN)
+-			key[i] = rt->pkt_buf[ho + idd - i];
++			key[i] = rt->pkt_buf[j];
+ 		else
+ 			key[i] = 0;
+ 	}
+ 
+-	ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generated Boost TCAM Key:\n");
+-	ice_debug(rt->psr->hw, ICE_DBG_PARSER, "%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+-		  key[0], key[1], key[2], key[3], key[4],
+-		  key[5], key[6], key[7], key[8], key[9]);
+-	ice_debug(rt->psr->hw, ICE_DBG_PARSER, "\n");
++	ice_debug_array_w_prefix(rt->psr->hw, ICE_DBG_PARSER,
++				 KBUILD_MODNAME ": Generated Boost TCAM Key",
++				 key, ICE_BST_TCAM_KEY_SIZE);
+ }
+ 
+ static u16 ice_bit_rev_u16(u16 v, int len)
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.c b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
+index 4849590a5591f1..b28991dd187036 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_controlq.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
+@@ -376,6 +376,9 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+ 		if (!(le16_to_cpu(desc->flags) & IDPF_CTLQ_FLAG_DD))
+ 			break;
+ 
++		/* Ensure no other fields are read until DD flag is checked */
++		dma_rmb();
++
+ 		/* strip off FW internal code */
+ 		desc_err = le16_to_cpu(desc->ret_val) & 0xff;
+ 
+@@ -563,6 +566,9 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+ 		if (!(flags & IDPF_CTLQ_FLAG_DD))
+ 			break;
+ 
++		/* Ensure no other fields are read until DD flag is checked */
++		dma_rmb();
++
+ 		q_msg[i].vmvf_type = (flags &
+ 				      (IDPF_CTLQ_FLAG_FTYPE_VM |
+ 				       IDPF_CTLQ_FLAG_FTYPE_PF)) >>
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
+index f71d3182580b6a..b6c515d14cbf08 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_main.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
+@@ -174,7 +174,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	pci_set_master(pdev);
+ 	pci_set_drvdata(pdev, adapter);
+ 
+-	adapter->init_wq = alloc_workqueue("%s-%s-init", 0, 0,
++	adapter->init_wq = alloc_workqueue("%s-%s-init",
++					   WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
+ 					   dev_driver_string(dev),
+ 					   dev_name(dev));
+ 	if (!adapter->init_wq) {
+@@ -183,7 +184,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		goto err_free;
+ 	}
+ 
+-	adapter->serv_wq = alloc_workqueue("%s-%s-service", 0, 0,
++	adapter->serv_wq = alloc_workqueue("%s-%s-service",
++					   WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
+ 					   dev_driver_string(dev),
+ 					   dev_name(dev));
+ 	if (!adapter->serv_wq) {
+@@ -192,7 +194,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		goto err_serv_wq_alloc;
+ 	}
+ 
+-	adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", 0, 0,
++	adapter->mbx_wq = alloc_workqueue("%s-%s-mbx",
++					  WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
+ 					  dev_driver_string(dev),
+ 					  dev_name(dev));
+ 	if (!adapter->mbx_wq) {
+@@ -201,7 +204,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		goto err_mbx_wq_alloc;
+ 	}
+ 
+-	adapter->stats_wq = alloc_workqueue("%s-%s-stats", 0, 0,
++	adapter->stats_wq = alloc_workqueue("%s-%s-stats",
++					    WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
+ 					    dev_driver_string(dev),
+ 					    dev_name(dev));
+ 	if (!adapter->stats_wq) {
+@@ -210,7 +214,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		goto err_stats_wq_alloc;
+ 	}
+ 
+-	adapter->vc_event_wq = alloc_workqueue("%s-%s-vc_event", 0, 0,
++	adapter->vc_event_wq = alloc_workqueue("%s-%s-vc_event",
++					       WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
+ 					       dev_driver_string(dev),
+ 					       dev_name(dev));
+ 	if (!adapter->vc_event_wq) {
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+index d46c95f91b0d81..99bdb95bf22661 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+@@ -612,14 +612,15 @@ idpf_vc_xn_forward_reply(struct idpf_adapter *adapter,
+ 		return -EINVAL;
+ 	}
+ 	xn = &adapter->vcxn_mngr->ring[xn_idx];
++	idpf_vc_xn_lock(xn);
+ 	salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info);
+ 	if (xn->salt != salt) {
+ 		dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (%02x != %02x)\n",
+ 				    xn->salt, salt);
++		idpf_vc_xn_unlock(xn);
+ 		return -EINVAL;
+ 	}
+ 
+-	idpf_vc_xn_lock(xn);
+ 	switch (xn->state) {
+ 	case IDPF_VC_XN_WAITING:
+ 		/* success */
+@@ -3077,12 +3078,21 @@ int idpf_vc_core_init(struct idpf_adapter *adapter)
+  */
+ void idpf_vc_core_deinit(struct idpf_adapter *adapter)
+ {
++	bool remove_in_prog;
++
+ 	if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags))
+ 		return;
+ 
++	/* Avoid transaction timeouts when called during reset */
++	remove_in_prog = test_bit(IDPF_REMOVE_IN_PROG, adapter->flags);
++	if (!remove_in_prog)
++		idpf_vc_xn_shutdown(adapter->vcxn_mngr);
++
+ 	idpf_deinit_task(adapter);
+ 	idpf_intr_rel(adapter);
+-	idpf_vc_xn_shutdown(adapter->vcxn_mngr);
++
++	if (remove_in_prog)
++		idpf_vc_xn_shutdown(adapter->vcxn_mngr);
+ 
+ 	cancel_delayed_work_sync(&adapter->serv_task);
+ 	cancel_delayed_work_sync(&adapter->mbx_task);
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+index 549436efc20488..730aa5632cceee 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+@@ -995,12 +995,6 @@ static void octep_get_stats64(struct net_device *netdev,
+ 	struct octep_device *oct = netdev_priv(netdev);
+ 	int q;
+ 
+-	if (netif_running(netdev))
+-		octep_ctrl_net_get_if_stats(oct,
+-					    OCTEP_CTRL_NET_INVALID_VFID,
+-					    &oct->iface_rx_stats,
+-					    &oct->iface_tx_stats);
+-
+ 	tx_packets = 0;
+ 	tx_bytes = 0;
+ 	rx_packets = 0;
+@@ -1018,10 +1012,6 @@ static void octep_get_stats64(struct net_device *netdev,
+ 	stats->tx_bytes = tx_bytes;
+ 	stats->rx_packets = rx_packets;
+ 	stats->rx_bytes = rx_bytes;
+-	stats->multicast = oct->iface_rx_stats.mcast_pkts;
+-	stats->rx_errors = oct->iface_rx_stats.err_pkts;
+-	stats->collisions = oct->iface_tx_stats.xscol;
+-	stats->tx_fifo_errors = oct->iface_tx_stats.undflw;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+index 7e6771c9cdbbab..4c699514fd57a0 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+@@ -799,14 +799,6 @@ static void octep_vf_get_stats64(struct net_device *netdev,
+ 	stats->tx_bytes = tx_bytes;
+ 	stats->rx_packets = rx_packets;
+ 	stats->rx_bytes = rx_bytes;
+-	if (!octep_vf_get_if_stats(oct)) {
+-		stats->multicast = oct->iface_rx_stats.mcast_pkts;
+-		stats->rx_errors = oct->iface_rx_stats.err_pkts;
+-		stats->rx_dropped = oct->iface_rx_stats.dropped_pkts_fifo_full +
+-				    oct->iface_rx_stats.err_pkts;
+-		stats->rx_missed_errors = oct->iface_rx_stats.dropped_pkts_fifo_full;
+-		stats->tx_dropped = oct->iface_tx_stats.dropped;
+-	}
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/mediatek/airoha_eth.c
+index 6c683a12d5aa52..00b80fb9d87ac5 100644
+--- a/drivers/net/ethernet/mediatek/airoha_eth.c
++++ b/drivers/net/ethernet/mediatek/airoha_eth.c
+@@ -258,11 +258,11 @@
+ #define REG_GDM3_FWD_CFG		GDM3_BASE
+ #define GDM3_PAD_EN_MASK		BIT(28)
+ 
+-#define REG_GDM4_FWD_CFG		(GDM4_BASE + 0x100)
++#define REG_GDM4_FWD_CFG		GDM4_BASE
+ #define GDM4_PAD_EN_MASK		BIT(28)
+ #define GDM4_SPORT_OFFSET0_MASK		GENMASK(11, 8)
+ 
+-#define REG_GDM4_SRC_PORT_SET		(GDM4_BASE + 0x33c)
++#define REG_GDM4_SRC_PORT_SET		(GDM4_BASE + 0x23c)
+ #define GDM4_SPORT_OFF2_MASK		GENMASK(19, 16)
+ #define GDM4_SPORT_OFF1_MASK		GENMASK(15, 12)
+ #define GDM4_SPORT_OFF0_MASK		GENMASK(11, 8)
+@@ -2138,17 +2138,14 @@ static void airoha_hw_cleanup(struct airoha_qdma *qdma)
+ 		if (!qdma->q_rx[i].ndesc)
+ 			continue;
+ 
+-		napi_disable(&qdma->q_rx[i].napi);
+ 		netif_napi_del(&qdma->q_rx[i].napi);
+ 		airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]);
+ 		if (qdma->q_rx[i].page_pool)
+ 			page_pool_destroy(qdma->q_rx[i].page_pool);
+ 	}
+ 
+-	for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
+-		napi_disable(&qdma->q_tx_irq[i].napi);
++	for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
+ 		netif_napi_del(&qdma->q_tx_irq[i].napi);
+-	}
+ 
+ 	for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+ 		if (!qdma->q_tx[i].ndesc)
+@@ -2173,6 +2170,21 @@ static void airoha_qdma_start_napi(struct airoha_qdma *qdma)
+ 	}
+ }
+ 
++static void airoha_qdma_stop_napi(struct airoha_qdma *qdma)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
++		napi_disable(&qdma->q_tx_irq[i].napi);
++
++	for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
++		if (!qdma->q_rx[i].ndesc)
++			continue;
++
++		napi_disable(&qdma->q_rx[i].napi);
++	}
++}
++
+ static void airoha_update_hw_stats(struct airoha_gdm_port *port)
+ {
+ 	struct airoha_eth *eth = port->qdma->eth;
+@@ -2738,7 +2750,7 @@ static int airoha_probe(struct platform_device *pdev)
+ 
+ 	err = airoha_hw_init(pdev, eth);
+ 	if (err)
+-		goto error;
++		goto error_hw_cleanup;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
+ 		airoha_qdma_start_napi(&eth->qdma[i]);
+@@ -2753,13 +2765,16 @@ static int airoha_probe(struct platform_device *pdev)
+ 		err = airoha_alloc_gdm_port(eth, np);
+ 		if (err) {
+ 			of_node_put(np);
+-			goto error;
++			goto error_napi_stop;
+ 		}
+ 	}
+ 
+ 	return 0;
+ 
+-error:
++error_napi_stop:
++	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
++		airoha_qdma_stop_napi(&eth->qdma[i]);
++error_hw_cleanup:
+ 	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
+ 		airoha_hw_cleanup(&eth->qdma[i]);
+ 
+@@ -2780,8 +2795,10 @@ static void airoha_remove(struct platform_device *pdev)
+ 	struct airoha_eth *eth = platform_get_drvdata(pdev);
+ 	int i;
+ 
+-	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
++	for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
++		airoha_qdma_stop_napi(&eth->qdma[i]);
+ 		airoha_hw_cleanup(&eth->qdma[i]);
++	}
+ 
+ 	for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
+ 		struct airoha_gdm_port *port = eth->ports[i];
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 0ec17c276bdd2d..cb93f46eaa7c39 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -2087,7 +2087,7 @@ static struct mlx5e_xdpsq *mlx5e_open_xdpredirect_sq(struct mlx5e_channel *c,
+ 	struct mlx5e_xdpsq *xdpsq;
+ 	int err;
+ 
+-	xdpsq = kvzalloc_node(sizeof(*xdpsq), GFP_KERNEL, c->cpu);
++	xdpsq = kvzalloc_node(sizeof(*xdpsq), GFP_KERNEL, cpu_to_node(c->cpu));
+ 	if (!xdpsq)
+ 		return ERR_PTR(-ENOMEM);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
+index 8fe96eb76baff7..10ece7df1cfaff 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
+@@ -70,7 +70,7 @@
+ 			u32 second_dw_mask = (mask) & ((1 << _bit_off) - 1); \
+ 			_HWS_SET32(p, (v) >> _bit_off, byte_off, 0, (mask) >> _bit_off); \
+ 			_HWS_SET32(p, (v) & second_dw_mask, (byte_off) + DW_SIZE, \
+-				    (bit_off) % BITS_IN_DW, second_dw_mask); \
++				    (bit_off + BITS_IN_DW) % BITS_IN_DW, second_dw_mask); \
+ 		} else { \
+ 			_HWS_SET32(p, v, byte_off, (bit_off), (mask)); \
+ 		} \
+diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
+index 46245e0b24623d..43c84900369a36 100644
+--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
++++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
+@@ -14,7 +14,6 @@
+ #define MLXFW_FSM_STATE_WAIT_TIMEOUT_MS 30000
+ #define MLXFW_FSM_STATE_WAIT_ROUNDS \
+ 	(MLXFW_FSM_STATE_WAIT_TIMEOUT_MS / MLXFW_FSM_STATE_WAIT_CYCLE_MS)
+-#define MLXFW_FSM_MAX_COMPONENT_SIZE (10 * (1 << 20))
+ 
+ static const int mlxfw_fsm_state_errno[] = {
+ 	[MLXFW_FSM_STATE_ERR_ERROR] = -EIO,
+@@ -229,7 +228,6 @@ static int mlxfw_flash_component(struct mlxfw_dev *mlxfw_dev,
+ 		return err;
+ 	}
+ 
+-	comp_max_size = min_t(u32, comp_max_size, MLXFW_FSM_MAX_COMPONENT_SIZE);
+ 	if (comp->data_size > comp_max_size) {
+ 		MLXFW_ERR_MSG(mlxfw_dev, extack,
+ 			      "Component size is bigger than limit", -EINVAL);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+index 69cd689dbc83e9..5afe6b155ef0d5 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+@@ -1003,10 +1003,10 @@ static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp,
+ 	mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets,
+ 				&bytes);
+ 
+-	if (mr_route->mfc->mfc_un.res.pkt != packets)
+-		mr_route->mfc->mfc_un.res.lastuse = jiffies;
+-	mr_route->mfc->mfc_un.res.pkt = packets;
+-	mr_route->mfc->mfc_un.res.bytes = bytes;
++	if (atomic_long_read(&mr_route->mfc->mfc_un.res.pkt) != packets)
++		WRITE_ONCE(mr_route->mfc->mfc_un.res.lastuse, jiffies);
++	atomic_long_set(&mr_route->mfc->mfc_un.res.pkt, packets);
++	atomic_long_set(&mr_route->mfc->mfc_un.res.bytes, bytes);
+ }
+ 
+ static void mlxsw_sp_mr_stats_update(struct work_struct *work)
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index bc395294a32df9..c9f4976a35275a 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -3217,10 +3217,15 @@ static int ravb_suspend(struct device *dev)
+ 
+ 	netif_device_detach(ndev);
+ 
+-	if (priv->wol_enabled)
+-		return ravb_wol_setup(ndev);
++	rtnl_lock();
++	if (priv->wol_enabled) {
++		ret = ravb_wol_setup(ndev);
++		rtnl_unlock();
++		return ret;
++	}
+ 
+ 	ret = ravb_close(ndev);
++	rtnl_unlock();
+ 	if (ret)
+ 		return ret;
+ 
+@@ -3245,19 +3250,20 @@ static int ravb_resume(struct device *dev)
+ 	if (!netif_running(ndev))
+ 		return 0;
+ 
++	rtnl_lock();
+ 	/* If WoL is enabled restore the interface. */
+-	if (priv->wol_enabled) {
++	if (priv->wol_enabled)
+ 		ret = ravb_wol_restore(ndev);
+-		if (ret)
+-			return ret;
+-	} else {
++	else
+ 		ret = pm_runtime_force_resume(dev);
+-		if (ret)
+-			return ret;
++	if (ret) {
++		rtnl_unlock();
++		return ret;
+ 	}
+ 
+ 	/* Reopening the interface will restore the device to the working state. */
+ 	ret = ravb_open(ndev);
++	rtnl_unlock();
+ 	if (ret < 0)
+ 		goto out_rpm_put;
+ 
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index 8887b89210093b..5fc8027c92c7c9 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -3494,10 +3494,12 @@ static int sh_eth_suspend(struct device *dev)
+ 
+ 	netif_device_detach(ndev);
+ 
++	rtnl_lock();
+ 	if (mdp->wol_enabled)
+ 		ret = sh_eth_wol_setup(ndev);
+ 	else
+ 		ret = sh_eth_close(ndev);
++	rtnl_unlock();
+ 
+ 	return ret;
+ }
+@@ -3511,10 +3513,12 @@ static int sh_eth_resume(struct device *dev)
+ 	if (!netif_running(ndev))
+ 		return 0;
+ 
++	rtnl_lock();
+ 	if (mdp->wol_enabled)
+ 		ret = sh_eth_wol_restore(ndev);
+ 	else
+ 		ret = sh_eth_open(ndev);
++	rtnl_unlock();
+ 
+ 	if (ret < 0)
+ 		return ret;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index c81ea8cdfe6eb8..1bed3e7629faab 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -7175,6 +7175,36 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
+ 	if (priv->dma_cap.tsoen)
+ 		dev_info(priv->device, "TSO supported\n");
+ 
++	if (priv->dma_cap.number_rx_queues &&
++	    priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
++		dev_warn(priv->device,
++			 "Number of Rx queues (%u) exceeds dma capability\n",
++			 priv->plat->rx_queues_to_use);
++		priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
++	}
++	if (priv->dma_cap.number_tx_queues &&
++	    priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
++		dev_warn(priv->device,
++			 "Number of Tx queues (%u) exceeds dma capability\n",
++			 priv->plat->tx_queues_to_use);
++		priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
++	}
++
++	if (priv->dma_cap.rx_fifo_size &&
++	    priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
++		dev_warn(priv->device,
++			 "Rx FIFO size (%u) exceeds dma capability\n",
++			 priv->plat->rx_fifo_size);
++		priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
++	}
++	if (priv->dma_cap.tx_fifo_size &&
++	    priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
++		dev_warn(priv->device,
++			 "Tx FIFO size (%u) exceeds dma capability\n",
++			 priv->plat->tx_fifo_size);
++		priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
++	}
++
+ 	priv->hw->vlan_fail_q_en =
+ 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
+ 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index 5465bf872734a3..e1de45fb18aeea 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -2248,7 +2248,7 @@ static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
+ 	for (i = 0; i < common->tx_ch_num; i++) {
+ 		struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+ 
+-		if (tx_chn->irq)
++		if (tx_chn->irq > 0)
+ 			devm_free_irq(dev, tx_chn->irq, tx_chn);
+ 
+ 		netif_napi_del(&tx_chn->napi_tx);
+diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
+index bf02efa10956a6..84181dcb98831f 100644
+--- a/drivers/net/netdevsim/netdevsim.h
++++ b/drivers/net/netdevsim/netdevsim.h
+@@ -129,6 +129,7 @@ struct netdevsim {
+ 		u32 sleep;
+ 		u32 __ports[2][NSIM_UDP_TUNNEL_N_PORTS];
+ 		u32 (*ports)[NSIM_UDP_TUNNEL_N_PORTS];
++		struct dentry *ddir;
+ 		struct debugfs_u32_array dfs_ports[2];
+ 	} udp_ports;
+ 
+diff --git a/drivers/net/netdevsim/udp_tunnels.c b/drivers/net/netdevsim/udp_tunnels.c
+index 02dc3123eb6c16..640b4983a9a0d1 100644
+--- a/drivers/net/netdevsim/udp_tunnels.c
++++ b/drivers/net/netdevsim/udp_tunnels.c
+@@ -112,9 +112,11 @@ nsim_udp_tunnels_info_reset_write(struct file *file, const char __user *data,
+ 	struct net_device *dev = file->private_data;
+ 	struct netdevsim *ns = netdev_priv(dev);
+ 
+-	memset(ns->udp_ports.ports, 0, sizeof(ns->udp_ports.__ports));
+ 	rtnl_lock();
+-	udp_tunnel_nic_reset_ntf(dev);
++	if (dev->reg_state == NETREG_REGISTERED) {
++		memset(ns->udp_ports.ports, 0, sizeof(ns->udp_ports.__ports));
++		udp_tunnel_nic_reset_ntf(dev);
++	}
+ 	rtnl_unlock();
+ 
+ 	return count;
+@@ -144,23 +146,23 @@ int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
+ 	else
+ 		ns->udp_ports.ports = nsim_dev->udp_ports.__ports;
+ 
+-	debugfs_create_u32("udp_ports_inject_error", 0600,
+-			   ns->nsim_dev_port->ddir,
++	ns->udp_ports.ddir = debugfs_create_dir("udp_ports",
++						ns->nsim_dev_port->ddir);
++
++	debugfs_create_u32("inject_error", 0600, ns->udp_ports.ddir,
+ 			   &ns->udp_ports.inject_error);
+ 
+ 	ns->udp_ports.dfs_ports[0].array = ns->udp_ports.ports[0];
+ 	ns->udp_ports.dfs_ports[0].n_elements = NSIM_UDP_TUNNEL_N_PORTS;
+-	debugfs_create_u32_array("udp_ports_table0", 0400,
+-				 ns->nsim_dev_port->ddir,
++	debugfs_create_u32_array("table0", 0400, ns->udp_ports.ddir,
+ 				 &ns->udp_ports.dfs_ports[0]);
+ 
+ 	ns->udp_ports.dfs_ports[1].array = ns->udp_ports.ports[1];
+ 	ns->udp_ports.dfs_ports[1].n_elements = NSIM_UDP_TUNNEL_N_PORTS;
+-	debugfs_create_u32_array("udp_ports_table1", 0400,
+-				 ns->nsim_dev_port->ddir,
++	debugfs_create_u32_array("table1", 0400, ns->udp_ports.ddir,
+ 				 &ns->udp_ports.dfs_ports[1]);
+ 
+-	debugfs_create_file("udp_ports_reset", 0200, ns->nsim_dev_port->ddir,
++	debugfs_create_file("reset", 0200, ns->udp_ports.ddir,
+ 			    dev, &nsim_udp_tunnels_info_reset_fops);
+ 
+ 	/* Note: it's not normal to allocate the info struct like this!
+@@ -196,6 +198,9 @@ int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
+ 
+ void nsim_udp_tunnels_info_destroy(struct net_device *dev)
+ {
++	struct netdevsim *ns = netdev_priv(dev);
++
++	debugfs_remove_recursive(ns->udp_ports.ddir);
+ 	kfree(dev->udp_tunnel_nic_info);
+ 	dev->udp_tunnel_nic_info = NULL;
+ }
+diff --git a/drivers/net/phy/marvell-88q2xxx.c b/drivers/net/phy/marvell-88q2xxx.c
+index 5107f58338aff4..376b499d6e8ebe 100644
+--- a/drivers/net/phy/marvell-88q2xxx.c
++++ b/drivers/net/phy/marvell-88q2xxx.c
+@@ -95,6 +95,10 @@
+ 
+ #define MDIO_MMD_PCS_MV_TDR_OFF_CUTOFF			65246
+ 
++struct mv88q2xxx_priv {
++	bool enable_temp;
++};
++
+ struct mmd_val {
+ 	int devad;
+ 	u32 regnum;
+@@ -710,17 +714,12 @@ static const struct hwmon_chip_info mv88q2xxx_hwmon_chip_info = {
+ 
+ static int mv88q2xxx_hwmon_probe(struct phy_device *phydev)
+ {
++	struct mv88q2xxx_priv *priv = phydev->priv;
+ 	struct device *dev = &phydev->mdio.dev;
+ 	struct device *hwmon;
+ 	char *hwmon_name;
+-	int ret;
+-
+-	/* Enable temperature sense */
+-	ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TEMP_SENSOR2,
+-			     MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK, 0);
+-	if (ret < 0)
+-		return ret;
+ 
++	priv->enable_temp = true;
+ 	hwmon_name = devm_hwmon_sanitize_name(dev, dev_name(dev));
+ 	if (IS_ERR(hwmon_name))
+ 		return PTR_ERR(hwmon_name);
+@@ -743,6 +742,14 @@ static int mv88q2xxx_hwmon_probe(struct phy_device *phydev)
+ 
+ static int mv88q2xxx_probe(struct phy_device *phydev)
+ {
++	struct mv88q2xxx_priv *priv;
++
++	priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
++	if (!priv)
++		return -ENOMEM;
++
++	phydev->priv = priv;
++
+ 	return mv88q2xxx_hwmon_probe(phydev);
+ }
+ 
+@@ -810,6 +817,18 @@ static int mv88q222x_revb1_revb2_config_init(struct phy_device *phydev)
+ 
+ static int mv88q222x_config_init(struct phy_device *phydev)
+ {
++	struct mv88q2xxx_priv *priv = phydev->priv;
++	int ret;
++
++	/* Enable temperature sense */
++	if (priv->enable_temp) {
++		ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
++				     MDIO_MMD_PCS_MV_TEMP_SENSOR2,
++				     MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK, 0);
++		if (ret < 0)
++			return ret;
++	}
++
+ 	if (phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] == PHY_ID_88Q2220_REVB0)
+ 		return mv88q222x_revb0_config_init(phydev);
+ 	else
+diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
+index f65d7f1f348e73..9cefca1aefa1b1 100644
+--- a/drivers/net/phy/realtek.c
++++ b/drivers/net/phy/realtek.c
+@@ -952,15 +952,15 @@ static int rtl822x_read_status(struct phy_device *phydev)
+ {
+ 	int lpadv, ret;
+ 
++	mii_10gbt_stat_mod_linkmode_lpa_t(phydev->lp_advertising, 0);
++
+ 	ret = rtlgen_read_status(phydev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+ 	if (phydev->autoneg == AUTONEG_DISABLE ||
+-	    !phydev->autoneg_complete) {
+-		mii_10gbt_stat_mod_linkmode_lpa_t(phydev->lp_advertising, 0);
++	    !phydev->autoneg_complete)
+ 		return 0;
+-	}
+ 
+ 	lpadv = phy_read_paged(phydev, 0xa5d, 0x13);
+ 	if (lpadv < 0)
+@@ -1023,26 +1023,25 @@ static int rtl822x_c45_read_status(struct phy_device *phydev)
+ {
+ 	int ret, val;
+ 
+-	ret = genphy_c45_read_status(phydev);
+-	if (ret < 0)
+-		return ret;
+-
+-	if (phydev->autoneg == AUTONEG_DISABLE ||
+-	    !genphy_c45_aneg_done(phydev))
+-		mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, 0);
+-
+ 	/* Vendor register as C45 has no standardized support for 1000BaseT */
+-	if (phydev->autoneg == AUTONEG_ENABLE) {
++	if (phydev->autoneg == AUTONEG_ENABLE && genphy_c45_aneg_done(phydev)) {
+ 		val = phy_read_mmd(phydev, MDIO_MMD_VEND2,
+ 				   RTL822X_VND2_GANLPAR);
+ 		if (val < 0)
+ 			return val;
+-
+-		mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, val);
++	} else {
++		val = 0;
+ 	}
++	mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, val);
+ 
+-	if (!phydev->link)
++	ret = genphy_c45_read_status(phydev);
++	if (ret < 0)
++		return ret;
++
++	if (!phydev->link) {
++		phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
+ 		return 0;
++	}
+ 
+ 	/* Read actual speed from vendor register. */
+ 	val = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL_VND2_PHYSR);
+diff --git a/drivers/net/tap.c b/drivers/net/tap.c
+index 5aa41d5f7765a6..5ca6ecf0ce5fbc 100644
+--- a/drivers/net/tap.c
++++ b/drivers/net/tap.c
+@@ -1329,9 +1329,9 @@ int tap_queue_resize(struct tap_dev *tap)
+ 	list_for_each_entry(q, &tap->queue_list, next)
+ 		rings[i++] = &q->ring;
+ 
+-	ret = ptr_ring_resize_multiple(rings, n,
+-				       dev->tx_queue_len, GFP_KERNEL,
+-				       __skb_array_destroy_skb);
++	ret = ptr_ring_resize_multiple_bh(rings, n,
++					  dev->tx_queue_len, GFP_KERNEL,
++					  __skb_array_destroy_skb);
+ 
+ 	kfree(rings);
+ 	return ret;
+diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c
+index c7690adec8db72..dc7cbd6a9798a7 100644
+--- a/drivers/net/team/team_core.c
++++ b/drivers/net/team/team_core.c
+@@ -1175,6 +1175,13 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
+ 		return -EBUSY;
+ 	}
+ 
++	if (netdev_has_upper_dev(port_dev, dev)) {
++		NL_SET_ERR_MSG(extack, "Device is already a lower device of the team interface");
++		netdev_err(dev, "Device %s is already a lower device of the team interface\n",
++			   portname);
++		return -EBUSY;
++	}
++
+ 	if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
+ 	    vlan_uses_dev(dev)) {
+ 		NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index e816aaba8e5f2e..148c7bc66c0af1 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -3697,9 +3697,9 @@ static int tun_queue_resize(struct tun_struct *tun)
+ 	list_for_each_entry(tfile, &tun->disabled, next)
+ 		rings[i++] = &tfile->tx_ring;
+ 
+-	ret = ptr_ring_resize_multiple(rings, n,
+-				       dev->tx_queue_len, GFP_KERNEL,
+-				       tun_ptr_free);
++	ret = ptr_ring_resize_multiple_bh(rings, n,
++					  dev->tx_queue_len, GFP_KERNEL,
++					  tun_ptr_free);
+ 
+ 	kfree(rings);
+ 	return ret;
+diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
+index 01a3b2417a5401..ddff6f19ff98eb 100644
+--- a/drivers/net/usb/rtl8150.c
++++ b/drivers/net/usb/rtl8150.c
+@@ -71,6 +71,14 @@
+ #define MSR_SPEED		(1<<3)
+ #define MSR_LINK		(1<<2)
+ 
++/* USB endpoints */
++enum rtl8150_usb_ep {
++	RTL8150_USB_EP_CONTROL = 0,
++	RTL8150_USB_EP_BULK_IN = 1,
++	RTL8150_USB_EP_BULK_OUT = 2,
++	RTL8150_USB_EP_INT_IN = 3,
++};
++
+ /* Interrupt pipe data */
+ #define INT_TSR			0x00
+ #define INT_RSR			0x01
+@@ -867,6 +875,13 @@ static int rtl8150_probe(struct usb_interface *intf,
+ 	struct usb_device *udev = interface_to_usbdev(intf);
+ 	rtl8150_t *dev;
+ 	struct net_device *netdev;
++	static const u8 bulk_ep_addr[] = {
++		RTL8150_USB_EP_BULK_IN | USB_DIR_IN,
++		RTL8150_USB_EP_BULK_OUT | USB_DIR_OUT,
++		0};
++	static const u8 int_ep_addr[] = {
++		RTL8150_USB_EP_INT_IN | USB_DIR_IN,
++		0};
+ 
+ 	netdev = alloc_etherdev(sizeof(rtl8150_t));
+ 	if (!netdev)
+@@ -880,6 +895,13 @@ static int rtl8150_probe(struct usb_interface *intf,
+ 		return -ENOMEM;
+ 	}
+ 
++	/* Verify that all required endpoints are present */
++	if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) ||
++	    !usb_check_int_endpoints(intf, int_ep_addr)) {
++		dev_err(&intf->dev, "couldn't find required endpoints\n");
++		goto out;
++	}
++
+ 	tasklet_setup(&dev->tl, rx_fixup);
+ 	spin_lock_init(&dev->rx_pool_lock);
+ 
+diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c
+index d2023e7131bd4f..6e6e9f05509ab0 100644
+--- a/drivers/net/vxlan/vxlan_vnifilter.c
++++ b/drivers/net/vxlan/vxlan_vnifilter.c
+@@ -411,6 +411,11 @@ static int vxlan_vnifilter_dump(struct sk_buff *skb, struct netlink_callback *cb
+ 	struct tunnel_msg *tmsg;
+ 	struct net_device *dev;
+ 
++	if (cb->nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct tunnel_msg))) {
++		NL_SET_ERR_MSG(cb->extack, "Invalid msg length");
++		return -EINVAL;
++	}
++
+ 	tmsg = nlmsg_data(cb->nlh);
+ 
+ 	if (tmsg->flags & ~TUNNEL_MSG_VALID_USER_FLAGS) {
+diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
+index 40088e62572e12..40b52d12b43235 100644
+--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
++++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
+@@ -3872,6 +3872,7 @@ int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
+ 		ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
+ 						 &rbm);
+ 		if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
++		    rbm != HAL_RX_BUF_RBM_SW1_BM &&
+ 		    rbm != HAL_RX_BUF_RBM_SW3_BM) {
+ 			ab->soc_stats.invalid_rbm++;
+ 			ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
+diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
+index 8f7dd43dc1bd8e..753bd93f02123d 100644
+--- a/drivers/net/wireless/ath/ath11k/hal_rx.c
++++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
+@@ -372,7 +372,8 @@ int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc,
+ 
+ 	ret_buf_mgr = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
+ 				wbm_desc->buf_addr_info.info1);
+-	if (ret_buf_mgr != HAL_RX_BUF_RBM_SW3_BM) {
++	if (ret_buf_mgr != HAL_RX_BUF_RBM_SW1_BM &&
++	    ret_buf_mgr != HAL_RX_BUF_RBM_SW3_BM) {
+ 		ab->soc_stats.invalid_rbm++;
+ 		return -EINVAL;
+ 	}
+diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
+index d493ec812055f8..ef2736fb5f53fd 100644
+--- a/drivers/net/wireless/ath/ath12k/mac.c
++++ b/drivers/net/wireless/ath/ath12k/mac.c
+@@ -4316,7 +4316,23 @@ static int ath12k_mac_update_key_cache(struct ath12k_vif_cache *cache,
+ 				       struct ieee80211_sta *sta,
+ 				       struct ieee80211_key_conf *key)
+ {
+-	struct ath12k_key_conf *key_conf = NULL, *tmp;
++	struct ath12k_key_conf *key_conf, *tmp;
++
++	list_for_each_entry_safe(key_conf, tmp, &cache->key_conf.list, list) {
++		if (key_conf->key != key)
++			continue;
++
++		/* If SET key entry is already present in cache, nothing to do,
++		 * just return
++		 */
++		if (cmd == SET_KEY)
++			return 0;
++
++		/* DEL key for an old SET key which driver hasn't flushed yet.
++		 */
++		list_del(&key_conf->list);
++		kfree(key_conf);
++	}
+ 
+ 	if (cmd == SET_KEY) {
+ 		key_conf = kzalloc(sizeof(*key_conf), GFP_KERNEL);
+@@ -4330,17 +4346,7 @@ static int ath12k_mac_update_key_cache(struct ath12k_vif_cache *cache,
+ 		list_add_tail(&key_conf->list,
+ 			      &cache->key_conf.list);
+ 	}
+-	if (list_empty(&cache->key_conf.list))
+-		return 0;
+-	list_for_each_entry_safe(key_conf, tmp, &cache->key_conf.list, list) {
+-		if (key_conf->key == key) {
+-			/* DEL key for an old SET key which driver hasn't flushed yet.
+-			 */
+-			list_del(&key_conf->list);
+-			kfree(key_conf);
+-			break;
+-		}
+-	}
++
+ 	return 0;
+ }
+ 
+@@ -7173,9 +7179,6 @@ static struct ath12k *ath12k_mac_assign_vif_to_vdev(struct ieee80211_hw *hw,
+ 
+ 	ab = ar->ab;
+ 
+-	if (arvif->is_created)
+-		goto flush;
+-
+ 	/* Assign arvif again here since previous radio switch block
+ 	 * would've unassigned and cleared it.
+ 	 */
+@@ -7186,6 +7189,9 @@ static struct ath12k *ath12k_mac_assign_vif_to_vdev(struct ieee80211_hw *hw,
+ 		goto unlock;
+ 	}
+ 
++	if (arvif->is_created)
++		goto flush;
++
+ 	if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) {
+ 		ath12k_warn(ab, "failed to create vdev, reached max vdev limit %d\n",
+ 			    TARGET_NUM_VDEVS);
+@@ -7658,9 +7664,9 @@ ath12k_mac_vdev_start_restart(struct ath12k_link_vif *arvif,
+ 							chandef->chan->band,
+ 							ahvif->vif->type);
+ 	arg.min_power = 0;
+-	arg.max_power = chandef->chan->max_power * 2;
+-	arg.max_reg_power = chandef->chan->max_reg_power * 2;
+-	arg.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
++	arg.max_power = chandef->chan->max_power;
++	arg.max_reg_power = chandef->chan->max_reg_power;
++	arg.max_antenna_gain = chandef->chan->max_antenna_gain;
+ 
+ 	arg.pref_tx_streams = ar->num_tx_chains;
+ 	arg.pref_rx_streams = ar->num_rx_chains;
+diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
+index 8557d4826a46e4..94d08d6ae1a3c2 100644
+--- a/drivers/net/wireless/ath/wcn36xx/main.c
++++ b/drivers/net/wireless/ath/wcn36xx/main.c
+@@ -1590,7 +1590,10 @@ static int wcn36xx_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	n_channels = wcn_band_2ghz.n_channels + wcn_band_5ghz.n_channels;
+-	wcn->chan_survey = devm_kmalloc(wcn->dev, n_channels, GFP_KERNEL);
++	wcn->chan_survey = devm_kcalloc(wcn->dev,
++					n_channels,
++					sizeof(struct wcn36xx_chan_survey),
++					GFP_KERNEL);
+ 	if (!wcn->chan_survey) {
+ 		ret = -ENOMEM;
+ 		goto out_wq;
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
+index 31e080e4da6697..ab3d6cfcb02bde 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
+@@ -6,6 +6,8 @@
+ #ifndef _fwil_h_
+ #define _fwil_h_
+ 
++#include "debug.h"
++
+ /*******************************************************************************
+  * Dongle command codes that are interpreted by firmware
+  ******************************************************************************/
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+index 091fb6fd7c787c..834f7c9bb9e92d 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+@@ -13,9 +13,12 @@
+ #include <linux/efi.h>
+ #include "fw/runtime.h"
+ 
+-#define IWL_EFI_VAR_GUID EFI_GUID(0x92daaf2f, 0xc02b, 0x455b,	\
+-				  0xb2, 0xec, 0xf5, 0xa3,	\
+-				  0x59, 0x4f, 0x4a, 0xea)
++#define IWL_EFI_WIFI_GUID	EFI_GUID(0x92daaf2f, 0xc02b, 0x455b,	\
++					 0xb2, 0xec, 0xf5, 0xa3,	\
++					 0x59, 0x4f, 0x4a, 0xea)
++#define IWL_EFI_WIFI_BT_GUID	EFI_GUID(0xe65d8884, 0xd4af, 0x4b20,	\
++					 0x8d, 0x03, 0x77, 0x2e,	\
++					 0xcc, 0x3d, 0xa5, 0x31)
+ 
+ struct iwl_uefi_pnvm_mem_desc {
+ 	__le32 addr;
+@@ -61,7 +64,7 @@ void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
+ 
+ 	*len = 0;
+ 
+-	data = iwl_uefi_get_variable(IWL_UEFI_OEM_PNVM_NAME, &IWL_EFI_VAR_GUID,
++	data = iwl_uefi_get_variable(IWL_UEFI_OEM_PNVM_NAME, &IWL_EFI_WIFI_GUID,
+ 				     &package_size);
+ 	if (IS_ERR(data)) {
+ 		IWL_DEBUG_FW(trans,
+@@ -76,18 +79,18 @@ void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
+ 	return data;
+ }
+ 
+-static
+-void *iwl_uefi_get_verified_variable(struct iwl_trans *trans,
+-				     efi_char16_t *uefi_var_name,
+-				     char *var_name,
+-				     unsigned int expected_size,
+-				     unsigned long *size)
++static void *
++iwl_uefi_get_verified_variable_guid(struct iwl_trans *trans,
++				    efi_guid_t *guid,
++				    efi_char16_t *uefi_var_name,
++				    char *var_name,
++				    unsigned int expected_size,
++				    unsigned long *size)
+ {
+ 	void *var;
+ 	unsigned long var_size;
+ 
+-	var = iwl_uefi_get_variable(uefi_var_name, &IWL_EFI_VAR_GUID,
+-				    &var_size);
++	var = iwl_uefi_get_variable(uefi_var_name, guid, &var_size);
+ 
+ 	if (IS_ERR(var)) {
+ 		IWL_DEBUG_RADIO(trans,
+@@ -112,6 +115,18 @@ void *iwl_uefi_get_verified_variable(struct iwl_trans *trans,
+ 	return var;
+ }
+ 
++static void *
++iwl_uefi_get_verified_variable(struct iwl_trans *trans,
++			       efi_char16_t *uefi_var_name,
++			       char *var_name,
++			       unsigned int expected_size,
++			       unsigned long *size)
++{
++	return iwl_uefi_get_verified_variable_guid(trans, &IWL_EFI_WIFI_GUID,
++						   uefi_var_name, var_name,
++						   expected_size, size);
++}
++
+ int iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data,
+ 				 u32 tlv_len, struct iwl_pnvm_image *pnvm_data)
+ {
+@@ -311,8 +326,9 @@ void iwl_uefi_get_step_table(struct iwl_trans *trans)
+ 	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ 		return;
+ 
+-	data = iwl_uefi_get_verified_variable(trans, IWL_UEFI_STEP_NAME,
+-					      "STEP", sizeof(*data), NULL);
++	data = iwl_uefi_get_verified_variable_guid(trans, &IWL_EFI_WIFI_BT_GUID,
++						   IWL_UEFI_STEP_NAME,
++						   "STEP", sizeof(*data), NULL);
+ 	if (IS_ERR(data))
+ 		return;
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+index 36726ea4b822a4..21641d41a958cf 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+@@ -530,18 +530,15 @@ static void iwl_mvm_bt_coex_notif_iterator(void *_data, u8 *mac,
+ 					   struct ieee80211_vif *vif)
+ {
+ 	struct iwl_mvm *mvm = _data;
++	struct ieee80211_bss_conf *link_conf;
++	unsigned int link_id;
+ 
+ 	lockdep_assert_held(&mvm->mutex);
+ 
+ 	if (vif->type != NL80211_IFTYPE_STATION)
+ 		return;
+ 
+-	for (int link_id = 0;
+-	     link_id < IEEE80211_MLD_MAX_NUM_LINKS;
+-	     link_id++) {
+-		struct ieee80211_bss_conf *link_conf =
+-			rcu_dereference_check(vif->link_conf[link_id],
+-					      lockdep_is_held(&mvm->mutex));
++	for_each_vif_active_link(vif, link_conf, link_id) {
+ 		struct ieee80211_chanctx_conf *chanctx_conf =
+ 			rcu_dereference_check(link_conf->chanctx_conf,
+ 					      lockdep_is_held(&mvm->mutex));
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+index 7d973546c9fb85..4d1daff1e070d7 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+@@ -2498,12 +2498,6 @@ static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm,
+ 	u32 expected_len = sizeof(*data) +
+ 		data->num_mlo_link_keys * sizeof(status->mlo_keys[0]);
+ 
+-	if (!data) {
+-		IWL_ERR(mvm, "iwl_wowlan_info_notif data is NULL\n");
+-		status = NULL;
+-		return;
+-	}
+-
+ 	if (len < expected_len) {
+ 		IWL_ERR(mvm, "Invalid WoWLAN info notification!\n");
+ 		status = NULL;
+@@ -2555,12 +2549,6 @@ iwl_mvm_parse_wowlan_info_notif_v4(struct iwl_mvm *mvm,
+ 	u32 i;
+ 	u32 expected_len = sizeof(*data);
+ 
+-	if (!data) {
+-		IWL_ERR(mvm, "iwl_wowlan_info_notif data is NULL\n");
+-		status = NULL;
+-		return;
+-	}
+-
+ 	if (has_mlo_keys)
+ 		expected_len += (data->num_mlo_link_keys *
+ 				 sizeof(status->mlo_keys[0]));
+@@ -2609,12 +2597,6 @@ iwl_mvm_parse_wowlan_info_notif_v2(struct iwl_mvm *mvm,
+ {
+ 	u32 i;
+ 
+-	if (!data) {
+-		IWL_ERR(mvm, "iwl_wowlan_info_notif data is NULL\n");
+-		status = NULL;
+-		return;
+-	}
+-
+ 	if (len < sizeof(*data)) {
+ 		IWL_ERR(mvm, "Invalid WoWLAN info notification!\n");
+ 		status = NULL;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+index c9867d26361b6a..998a390a70bbcc 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+@@ -1880,7 +1880,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
+ 				IWL_DEBUG_TX_REPLY(mvm,
+ 						   "Next reclaimed packet:%d\n",
+ 						   next_reclaimed);
+-				iwl_mvm_count_mpdu(mvmsta, sta_id, 1, true, 0);
++				if (tid < IWL_MAX_TID_COUNT)
++					iwl_mvm_count_mpdu(mvmsta, sta_id, 1,
++							   true, 0);
+ 			} else {
+ 				IWL_DEBUG_TX_REPLY(mvm,
+ 						   "NDP - don't update next_reclaimed\n");
+diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
+index 9d5561f441347b..0ca83f1a3e3ea2 100644
+--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
++++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
+@@ -958,11 +958,11 @@ int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
+ 
+ 	if (chandef->chan != phy->main_chan)
+ 		memset(phy->chan_state, 0, sizeof(*phy->chan_state));
+-	mt76_worker_enable(&dev->tx_worker);
+ 
+ 	ret = dev->drv->set_channel(phy);
+ 
+ 	clear_bit(MT76_RESET, &phy->state);
++	mt76_worker_enable(&dev->tx_worker);
+ 	mt76_worker_schedule(&dev->tx_worker);
+ 
+ 	mutex_unlock(&dev->mutex);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+index 96e34277fece9b..1cc8fc8fefe740 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+@@ -1113,7 +1113,7 @@ mt7615_mcu_uni_add_dev(struct mt7615_phy *phy, struct ieee80211_vif *vif,
+ {
+ 	struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ 
+-	return mt76_connac_mcu_uni_add_dev(phy->mt76, &vif->bss_conf,
++	return mt76_connac_mcu_uni_add_dev(phy->mt76, &vif->bss_conf, &mvif->mt76,
+ 					   &mvif->sta.wcid, enable);
+ }
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+index 864246f9408899..7d07e720e4ec1d 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+@@ -1137,10 +1137,10 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_ba_tlv);
+ 
+ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
+ 				struct ieee80211_bss_conf *bss_conf,
++				struct mt76_vif *mvif,
+ 				struct mt76_wcid *wcid,
+ 				bool enable)
+ {
+-	struct mt76_vif *mvif = (struct mt76_vif *)bss_conf->vif->drv_priv;
+ 	struct mt76_dev *dev = phy->dev;
+ 	struct {
+ 		struct {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+index 1b0e80dfc346b8..57a8340fa70097 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+@@ -1938,6 +1938,7 @@ void mt76_connac_mcu_sta_ba_tlv(struct sk_buff *skb,
+ 				bool enable, bool tx);
+ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
+ 				struct ieee80211_bss_conf *bss_conf,
++				struct mt76_vif *mvif,
+ 				struct mt76_wcid *wcid,
+ 				bool enable);
+ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+index 6bef96e3d2a3d9..77d82ccd73079d 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+@@ -82,7 +82,7 @@ static ssize_t mt7915_thermal_temp_store(struct device *dev,
+ 		return ret;
+ 
+ 	mutex_lock(&phy->dev->mt76.mutex);
+-	val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 60, 130);
++	val = DIV_ROUND_CLOSEST(clamp_val(val, 60 * 1000, 130 * 1000), 1000);
+ 
+ 	if ((i - 1 == MT7915_CRIT_TEMP_IDX &&
+ 	     val > phy->throttle_temp[MT7915_MAX_TEMP_IDX]) ||
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+index cf77ce0c875991..799e8d2cc7e6ec 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+@@ -1388,6 +1388,8 @@ mt7915_mac_restart(struct mt7915_dev *dev)
+ 	if (dev_is_pci(mdev->dev)) {
+ 		mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
+ 		if (dev->hif2) {
++			mt76_wr(dev, MT_PCIE_RECOG_ID,
++				dev->hif2->index | MT_PCIE_RECOG_ID_SEM);
+ 			if (is_mt7915(mdev))
+ 				mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
+ 			else
+@@ -1442,9 +1444,11 @@ static void
+ mt7915_mac_full_reset(struct mt7915_dev *dev)
+ {
+ 	struct mt76_phy *ext_phy;
++	struct mt7915_phy *phy2;
+ 	int i;
+ 
+ 	ext_phy = dev->mt76.phys[MT_BAND1];
++	phy2 = ext_phy ? ext_phy->priv : NULL;
+ 
+ 	dev->recovery.hw_full_reset = true;
+ 
+@@ -1474,6 +1478,9 @@ mt7915_mac_full_reset(struct mt7915_dev *dev)
+ 
+ 	memset(dev->mt76.wcid_mask, 0, sizeof(dev->mt76.wcid_mask));
+ 	dev->mt76.vif_mask = 0;
++	dev->phy.omac_mask = 0;
++	if (phy2)
++		phy2->omac_mask = 0;
+ 
+ 	i = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA);
+ 	dev->mt76.global_wcid.idx = i;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+index c6f498fc81ffdc..351285daac99f7 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+@@ -246,8 +246,10 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
+ 	phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
+ 
+ 	idx = mt76_wcid_alloc(dev->mt76.wcid_mask, mt7915_wtbl_size(dev));
+-	if (idx < 0)
+-		return -ENOSPC;
++	if (idx < 0) {
++		ret = -ENOSPC;
++		goto out;
++	}
+ 
+ 	INIT_LIST_HEAD(&mvif->sta.rc_list);
+ 	INIT_LIST_HEAD(&mvif->sta.wcid.poll_list);
+@@ -619,8 +621,9 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
+ 	if (changed & BSS_CHANGED_ASSOC)
+ 		set_bss_info = vif->cfg.assoc;
+ 	if (changed & BSS_CHANGED_BEACON_ENABLED &&
++	    info->enable_beacon &&
+ 	    vif->type != NL80211_IFTYPE_AP)
+-		set_bss_info = set_sta = info->enable_beacon;
++		set_bss_info = set_sta = 1;
+ 
+ 	if (set_bss_info == 1)
+ 		mt7915_mcu_add_bss_info(phy, vif, true);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+index 44e112b8b5b368..2e7604eed27b02 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+@@ -484,7 +484,7 @@ static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
+ 			continue;
+ 
+ 		ofs = addr - dev->reg.map[i].phys;
+-		if (ofs > dev->reg.map[i].size)
++		if (ofs >= dev->reg.map[i].size)
+ 			continue;
+ 
+ 		return dev->reg.map[i].maps + ofs;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+index ac0b1f0eb27c14..5fe872ef2e939b 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+@@ -191,6 +191,7 @@ struct mt7915_hif {
+ 	struct device *dev;
+ 	void __iomem *regs;
+ 	int irq;
++	u32 index;
+ };
+ 
+ struct mt7915_phy {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
+index 39132894e8ea29..07b0a5766eab7d 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
+@@ -42,6 +42,7 @@ static struct mt7915_hif *mt7915_pci_get_hif2(u32 idx)
+ 			continue;
+ 
+ 		get_device(hif->dev);
++		hif->index = idx;
+ 		goto out;
+ 	}
+ 	hif = NULL;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+index 047106b65d2bc6..bd1455698ebe5f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+@@ -647,6 +647,7 @@ mt7921_vif_connect_iter(void *priv, u8 *mac,
+ 		ieee80211_disconnect(vif, true);
+ 
+ 	mt76_connac_mcu_uni_add_dev(&dev->mphy, &vif->bss_conf,
++				    &mvif->bss_conf.mt76,
+ 				    &mvif->sta.deflink.wcid, true);
+ 	mt7921_mcu_set_tx(dev, vif);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+index a7f5bfbc02ed1f..e2dfd3670c4c93 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+@@ -308,6 +308,7 @@ mt7921_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+ 	mvif->bss_conf.mt76.wmm_idx = mvif->bss_conf.mt76.idx % MT76_CONNAC_MAX_WMM_SETS;
+ 
+ 	ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, &vif->bss_conf,
++					  &mvif->bss_conf.mt76,
+ 					  &mvif->sta.deflink.wcid, true);
+ 	if (ret)
+ 		goto out;
+@@ -531,7 +532,13 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 	} else {
+ 		if (idx == *wcid_keyidx)
+ 			*wcid_keyidx = -1;
+-		goto out;
++
++		/* For security issue we don't trigger the key deletion when
++		 * reassociating. But we should trigger the deletion process
++		 * to avoid using incorrect cipher after disconnection,
++		 */
++		if (vif->type != NL80211_IFTYPE_STATION || vif->cfg.assoc)
++			goto out;
+ 	}
+ 
+ 	mt76_wcid_key_setup(&dev->mt76, wcid, key);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
+index 634c42bbf23f67..a095fb31e391a1 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
+@@ -49,7 +49,7 @@ static void mt7925_mac_sta_poll(struct mt792x_dev *dev)
+ 			break;
+ 		mlink = list_first_entry(&sta_poll_list,
+ 					 struct mt792x_link_sta, wcid.poll_list);
+-		msta = container_of(mlink, struct mt792x_sta, deflink);
++		msta = mlink->sta;
+ 		spin_lock_bh(&dev->mt76.sta_poll_lock);
+ 		list_del_init(&mlink->wcid.poll_list);
+ 		spin_unlock_bh(&dev->mt76.sta_poll_lock);
+@@ -1271,6 +1271,7 @@ mt7925_vif_connect_iter(void *priv, u8 *mac,
+ 	struct mt792x_dev *dev = mvif->phy->dev;
+ 	struct ieee80211_hw *hw = mt76_hw(dev);
+ 	struct ieee80211_bss_conf *bss_conf;
++	struct mt792x_bss_conf *mconf;
+ 	int i;
+ 
+ 	if (vif->type == NL80211_IFTYPE_STATION)
+@@ -1278,8 +1279,9 @@ mt7925_vif_connect_iter(void *priv, u8 *mac,
+ 
+ 	for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
+ 		bss_conf = mt792x_vif_to_bss_conf(vif, i);
++		mconf = mt792x_vif_to_link(mvif, i);
+ 
+-		mt76_connac_mcu_uni_add_dev(&dev->mphy, bss_conf,
++		mt76_connac_mcu_uni_add_dev(&dev->mphy, bss_conf, &mconf->mt76,
+ 					    &mvif->sta.deflink.wcid, true);
+ 		mt7925_mcu_set_tx(dev, bss_conf);
+ 	}
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+index 791c8b00e11264..ddc67423efe2cb 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+@@ -365,18 +365,14 @@ static int mt7925_mac_link_bss_add(struct mt792x_dev *dev,
+ 	mconf->mt76.omac_idx = ieee80211_vif_is_mld(vif) ?
+ 			       0 : mconf->mt76.idx;
+ 	mconf->mt76.band_idx = 0xff;
+-	mconf->mt76.wmm_idx = mconf->mt76.idx % MT76_CONNAC_MAX_WMM_SETS;
++	mconf->mt76.wmm_idx = ieee80211_vif_is_mld(vif) ?
++			      0 : mconf->mt76.idx % MT76_CONNAC_MAX_WMM_SETS;
+ 
+ 	if (mvif->phy->mt76->chandef.chan->band != NL80211_BAND_2GHZ)
+ 		mconf->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL + 4;
+ 	else
+ 		mconf->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL;
+ 
+-	ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, link_conf,
+-					  &mlink->wcid, true);
+-	if (ret)
+-		goto out;
+-
+ 	dev->mt76.vif_mask |= BIT_ULL(mconf->mt76.idx);
+ 	mvif->phy->omac_mask |= BIT_ULL(mconf->mt76.omac_idx);
+ 
+@@ -384,7 +380,7 @@ static int mt7925_mac_link_bss_add(struct mt792x_dev *dev,
+ 
+ 	INIT_LIST_HEAD(&mlink->wcid.poll_list);
+ 	mlink->wcid.idx = idx;
+-	mlink->wcid.phy_idx = mconf->mt76.band_idx;
++	mlink->wcid.phy_idx = 0;
+ 	mlink->wcid.hw_key_idx = -1;
+ 	mlink->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ 	mt76_wcid_init(&mlink->wcid);
+@@ -395,6 +391,12 @@ static int mt7925_mac_link_bss_add(struct mt792x_dev *dev,
+ 	ewma_rssi_init(&mconf->rssi);
+ 
+ 	rcu_assign_pointer(dev->mt76.wcid[idx], &mlink->wcid);
++
++	ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, link_conf, &mconf->mt76,
++					  &mlink->wcid, true);
++	if (ret)
++		goto out;
++
+ 	if (vif->txq) {
+ 		mtxq = (struct mt76_txq *)vif->txq->drv_priv;
+ 		mtxq->wcid = idx;
+@@ -837,6 +839,7 @@ static int mt7925_mac_link_sta_add(struct mt76_dev *mdev,
+ 	u8 link_id = link_sta->link_id;
+ 	struct mt792x_link_sta *mlink;
+ 	struct mt792x_sta *msta;
++	struct mt76_wcid *wcid;
+ 	int ret, idx;
+ 
+ 	msta = (struct mt792x_sta *)link_sta->sta->drv_priv;
+@@ -850,11 +853,20 @@ static int mt7925_mac_link_sta_add(struct mt76_dev *mdev,
+ 	INIT_LIST_HEAD(&mlink->wcid.poll_list);
+ 	mlink->wcid.sta = 1;
+ 	mlink->wcid.idx = idx;
+-	mlink->wcid.phy_idx = mconf->mt76.band_idx;
++	mlink->wcid.phy_idx = 0;
+ 	mlink->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ 	mlink->last_txs = jiffies;
+ 	mlink->wcid.link_id = link_sta->link_id;
+ 	mlink->wcid.link_valid = !!link_sta->sta->valid_links;
++	mlink->sta = msta;
++
++	wcid = &mlink->wcid;
++	ewma_signal_init(&wcid->rssi);
++	rcu_assign_pointer(dev->mt76.wcid[wcid->idx], wcid);
++	mt76_wcid_init(wcid);
++	ewma_avg_signal_init(&mlink->avg_ack_signal);
++	memset(mlink->airtime_ac, 0,
++	       sizeof(msta->deflink.airtime_ac));
+ 
+ 	ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm);
+ 	if (ret)
+@@ -866,9 +878,14 @@ static int mt7925_mac_link_sta_add(struct mt76_dev *mdev,
+ 	link_conf = mt792x_vif_to_bss_conf(vif, link_id);
+ 
+ 	/* should update bss info before STA add */
+-	if (vif->type == NL80211_IFTYPE_STATION && !link_sta->sta->tdls)
+-		mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx,
+-					link_conf, link_sta, false);
++	if (vif->type == NL80211_IFTYPE_STATION && !link_sta->sta->tdls) {
++		if (ieee80211_vif_is_mld(vif))
++			mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx,
++						link_conf, link_sta, link_sta != mlink->pri_link);
++		else
++			mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx,
++						link_conf, link_sta, false);
++	}
+ 
+ 	if (ieee80211_vif_is_mld(vif) &&
+ 	    link_sta == mlink->pri_link) {
+@@ -904,7 +921,6 @@ mt7925_mac_sta_add_links(struct mt792x_dev *dev, struct ieee80211_vif *vif,
+ 			 struct ieee80211_sta *sta, unsigned long new_links)
+ {
+ 	struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
+-	struct mt76_wcid *wcid;
+ 	unsigned int link_id;
+ 	int err = 0;
+ 
+@@ -921,14 +937,6 @@ mt7925_mac_sta_add_links(struct mt792x_dev *dev, struct ieee80211_vif *vif,
+ 				err = -ENOMEM;
+ 				break;
+ 			}
+-
+-			wcid = &mlink->wcid;
+-			ewma_signal_init(&wcid->rssi);
+-			rcu_assign_pointer(dev->mt76.wcid[wcid->idx], wcid);
+-			mt76_wcid_init(wcid);
+-			ewma_avg_signal_init(&mlink->avg_ack_signal);
+-			memset(mlink->airtime_ac, 0,
+-			       sizeof(msta->deflink.airtime_ac));
+ 		}
+ 
+ 		msta->valid_links |= BIT(link_id);
+@@ -1141,8 +1149,7 @@ static void mt7925_mac_link_sta_remove(struct mt76_dev *mdev,
+ 		struct mt792x_bss_conf *mconf;
+ 
+ 		mconf = mt792x_link_conf_to_mconf(link_conf);
+-		mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx, link_conf,
+-					link_sta, false);
++		mt792x_mac_link_bss_remove(dev, mconf, mlink);
+ 	}
+ 
+ 	spin_lock_bh(&mdev->sta_poll_lock);
+@@ -1200,12 +1207,45 @@ void mt7925_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ {
+ 	struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
+ 	struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
++	struct {
++		struct {
++			u8 omac_idx;
++			u8 band_idx;
++			__le16 pad;
++		} __packed hdr;
++		struct req_tlv {
++			__le16 tag;
++			__le16 len;
++			u8 active;
++			u8 link_idx; /* hw link idx */
++			u8 omac_addr[ETH_ALEN];
++		} __packed tlv;
++	} dev_req = {
++		.hdr = {
++			.omac_idx = 0,
++			.band_idx = 0,
++		},
++		.tlv = {
++			.tag = cpu_to_le16(DEV_INFO_ACTIVE),
++			.len = cpu_to_le16(sizeof(struct req_tlv)),
++			.active = true,
++		},
++	};
+ 	unsigned long rem;
+ 
+ 	rem = ieee80211_vif_is_mld(vif) ? msta->valid_links : BIT(0);
+ 
+ 	mt7925_mac_sta_remove_links(dev, vif, sta, rem);
+ 
++	if (ieee80211_vif_is_mld(vif)) {
++		mt7925_mcu_set_dbdc(&dev->mphy, false);
++
++		/* recovery omac address for the legacy interface */
++		memcpy(dev_req.tlv.omac_addr, vif->addr, ETH_ALEN);
++		mt76_mcu_send_msg(mdev, MCU_UNI_CMD(DEV_INFO_UPDATE),
++				  &dev_req, sizeof(dev_req), true);
++	}
++
+ 	if (vif->type == NL80211_IFTYPE_STATION) {
+ 		struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ 
+@@ -1250,22 +1290,22 @@ mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ 	case IEEE80211_AMPDU_RX_START:
+ 		mt76_rx_aggr_start(&dev->mt76, &msta->deflink.wcid, tid, ssn,
+ 				   params->buf_size);
+-		mt7925_mcu_uni_rx_ba(dev, params, true);
++		mt7925_mcu_uni_rx_ba(dev, vif, params, true);
+ 		break;
+ 	case IEEE80211_AMPDU_RX_STOP:
+ 		mt76_rx_aggr_stop(&dev->mt76, &msta->deflink.wcid, tid);
+-		mt7925_mcu_uni_rx_ba(dev, params, false);
++		mt7925_mcu_uni_rx_ba(dev, vif, params, false);
+ 		break;
+ 	case IEEE80211_AMPDU_TX_OPERATIONAL:
+ 		mtxq->aggr = true;
+ 		mtxq->send_bar = false;
+-		mt7925_mcu_uni_tx_ba(dev, params, true);
++		mt7925_mcu_uni_tx_ba(dev, vif, params, true);
+ 		break;
+ 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ 		mtxq->aggr = false;
+ 		clear_bit(tid, &msta->deflink.wcid.ampdu_state);
+-		mt7925_mcu_uni_tx_ba(dev, params, false);
++		mt7925_mcu_uni_tx_ba(dev, vif, params, false);
+ 		break;
+ 	case IEEE80211_AMPDU_TX_START:
+ 		set_bit(tid, &msta->deflink.wcid.ampdu_state);
+@@ -1274,7 +1314,7 @@ mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ 	case IEEE80211_AMPDU_TX_STOP_CONT:
+ 		mtxq->aggr = false;
+ 		clear_bit(tid, &msta->deflink.wcid.ampdu_state);
+-		mt7925_mcu_uni_tx_ba(dev, params, false);
++		mt7925_mcu_uni_tx_ba(dev, vif, params, false);
+ 		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ 		break;
+ 	}
+@@ -1895,6 +1935,13 @@ static void mt7925_link_info_changed(struct ieee80211_hw *hw,
+ 	if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED))
+ 		mt7925_mcu_set_tx(dev, info);
+ 
++	if (changed & BSS_CHANGED_BSSID) {
++		if (ieee80211_vif_is_mld(vif) &&
++		    hweight16(mvif->valid_links) == 2)
++			/* Indicate the secondary setup done */
++			mt7925_mcu_uni_bss_bcnft(dev, info, true);
++	}
++
+ 	mt792x_mutex_release(dev);
+ }
+ 
+@@ -1946,6 +1993,8 @@ mt7925_change_vif_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ 					     GFP_KERNEL);
+ 			mlink = devm_kzalloc(dev->mt76.dev, sizeof(*mlink),
+ 					     GFP_KERNEL);
++			if (!mconf || !mlink)
++				return -ENOMEM;
+ 		}
+ 
+ 		mconfs[link_id] = mconf;
+@@ -1974,6 +2023,8 @@ mt7925_change_vif_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ 			goto free;
+ 
+ 		if (mconf != &mvif->bss_conf) {
++			mt7925_mcu_set_bss_pm(dev, link_conf, true);
++
+ 			err = mt7925_set_mlo_roc(phy, &mvif->bss_conf,
+ 						 vif->active_links);
+ 			if (err < 0)
+@@ -2071,18 +2122,16 @@ static void mt7925_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ 	struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv;
+ 	struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ 	struct mt792x_dev *dev = mt792x_hw_dev(hw);
+-	struct ieee80211_bss_conf *pri_link_conf;
+ 	struct mt792x_bss_conf *mconf;
+ 
+ 	mutex_lock(&dev->mt76.mutex);
+ 
+ 	if (ieee80211_vif_is_mld(vif)) {
+ 		mconf = mt792x_vif_to_link(mvif, link_conf->link_id);
+-		pri_link_conf = mt792x_vif_to_bss_conf(vif, mvif->deflink_id);
+ 
+ 		if (vif->type == NL80211_IFTYPE_STATION &&
+ 		    mconf == &mvif->bss_conf)
+-			mt7925_mcu_add_bss_info(&dev->phy, NULL, pri_link_conf,
++			mt7925_mcu_add_bss_info(&dev->phy, NULL, link_conf,
+ 						NULL, false);
+ 	} else {
+ 		mconf = &mvif->bss_conf;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+index 748ea6adbc6b39..ce3d8197b026a6 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+@@ -123,10 +123,8 @@ EXPORT_SYMBOL_GPL(mt7925_mcu_regval);
+ int mt7925_mcu_update_arp_filter(struct mt76_dev *dev,
+ 				 struct ieee80211_bss_conf *link_conf)
+ {
+-	struct ieee80211_vif *mvif = container_of((void *)link_conf->vif,
+-						  struct ieee80211_vif,
+-						  drv_priv);
+ 	struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
++	struct ieee80211_vif *mvif = link_conf->vif;
+ 	struct sk_buff *skb;
+ 	int i, len = min_t(int, mvif->cfg.arp_addr_cnt,
+ 			   IEEE80211_BSS_ARP_ADDR_LIST_LEN);
+@@ -531,10 +529,10 @@ void mt7925_mcu_rx_event(struct mt792x_dev *dev, struct sk_buff *skb)
+ 
+ static int
+ mt7925_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
++		  struct mt76_wcid *wcid,
+ 		  struct ieee80211_ampdu_params *params,
+ 		  bool enable, bool tx)
+ {
+-	struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv;
+ 	struct sta_rec_ba_uni *ba;
+ 	struct sk_buff *skb;
+ 	struct tlv *tlv;
+@@ -562,28 +560,60 @@ mt7925_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
+ 
+ /** starec & wtbl **/
+ int mt7925_mcu_uni_tx_ba(struct mt792x_dev *dev,
++			 struct ieee80211_vif *vif,
+ 			 struct ieee80211_ampdu_params *params,
+ 			 bool enable)
+ {
+ 	struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv;
+-	struct mt792x_vif *mvif = msta->vif;
++	struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
++	struct mt792x_link_sta *mlink;
++	struct mt792x_bss_conf *mconf;
++	unsigned long usable_links = ieee80211_vif_usable_links(vif);
++	struct mt76_wcid *wcid;
++	u8 link_id, ret;
++
++	for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
++		mconf = mt792x_vif_to_link(mvif, link_id);
++		mlink = mt792x_sta_to_link(msta, link_id);
++		wcid = &mlink->wcid;
+ 
+-	if (enable && !params->amsdu)
+-		msta->deflink.wcid.amsdu = false;
++		if (enable && !params->amsdu)
++			mlink->wcid.amsdu = false;
+ 
+-	return mt7925_mcu_sta_ba(&dev->mt76, &mvif->bss_conf.mt76, params,
+-				 enable, true);
++		ret = mt7925_mcu_sta_ba(&dev->mt76, &mconf->mt76, wcid, params,
++					enable, true);
++		if (ret < 0)
++			break;
++	}
++
++	return ret;
+ }
+ 
+ int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev,
++			 struct ieee80211_vif *vif,
+ 			 struct ieee80211_ampdu_params *params,
+ 			 bool enable)
+ {
+ 	struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv;
+-	struct mt792x_vif *mvif = msta->vif;
++	struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
++	struct mt792x_link_sta *mlink;
++	struct mt792x_bss_conf *mconf;
++	unsigned long usable_links = ieee80211_vif_usable_links(vif);
++	struct mt76_wcid *wcid;
++	u8 link_id, ret;
++
++	for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
++		mconf = mt792x_vif_to_link(mvif, link_id);
++		mlink = mt792x_sta_to_link(msta, link_id);
++		wcid = &mlink->wcid;
++
++		ret = mt7925_mcu_sta_ba(&dev->mt76, &mconf->mt76, wcid, params,
++					enable, false);
++		if (ret < 0)
++			break;
++	}
+ 
+-	return mt7925_mcu_sta_ba(&dev->mt76, &mvif->bss_conf.mt76, params,
+-				 enable, false);
++	return ret;
+ }
+ 
+ static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name)
+@@ -638,7 +668,7 @@ static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name)
+ 	for (offset = 0; offset < len; offset += le32_to_cpu(clc->len)) {
+ 		clc = (const struct mt7925_clc *)(clc_base + offset);
+ 
+-		if (clc->idx > ARRAY_SIZE(phy->clc))
++		if (clc->idx >= ARRAY_SIZE(phy->clc))
+ 			break;
+ 
+ 		/* do not init buf again if chip reset triggered */
+@@ -823,7 +853,7 @@ mt7925_mcu_get_nic_capability(struct mt792x_dev *dev)
+ 			mt7925_mcu_parse_phy_cap(dev, tlv->data);
+ 			break;
+ 		case MT_NIC_CAP_CHIP_CAP:
+-			memcpy(&dev->phy.chip_cap, (void *)skb->data, sizeof(u64));
++			dev->phy.chip_cap = le64_to_cpu(*(__le64 *)tlv->data);
+ 			break;
+ 		case MT_NIC_CAP_EML_CAP:
+ 			mt7925_mcu_parse_eml_cap(dev, tlv->data);
+@@ -1153,7 +1183,12 @@ int mt7925_mcu_set_mlo_roc(struct mt792x_bss_conf *mconf, u16 sel_links,
+ 			u8 rsv[4];
+ 		} __packed hdr;
+ 		struct roc_acquire_tlv roc[2];
+-	} __packed req;
++	} __packed req = {
++			.roc[0].tag = cpu_to_le16(UNI_ROC_NUM),
++			.roc[0].len = cpu_to_le16(sizeof(struct roc_acquire_tlv)),
++			.roc[1].tag = cpu_to_le16(UNI_ROC_NUM),
++			.roc[1].len = cpu_to_le16(sizeof(struct roc_acquire_tlv))
++	};
+ 
+ 	if (!mconf || hweight16(vif->valid_links) < 2 ||
+ 	    hweight16(sel_links) != 2)
+@@ -1200,6 +1235,8 @@ int mt7925_mcu_set_mlo_roc(struct mt792x_bss_conf *mconf, u16 sel_links,
+ 		req.roc[i].bw_from_ap = CMD_CBW_20MHZ;
+ 		req.roc[i].center_chan = center_ch;
+ 		req.roc[i].center_chan_from_ap = center_ch;
++		req.roc[i].center_chan2 = 0;
++		req.roc[i].center_chan2_from_ap = 0;
+ 
+ 		/* STR : 0xfe indicates BAND_ALL with enabling DBDC
+ 		 * EMLSR : 0xff indicates (BAND_AUTO) without DBDC
+@@ -1215,7 +1252,7 @@ int mt7925_mcu_set_mlo_roc(struct mt792x_bss_conf *mconf, u16 sel_links,
+ 	}
+ 
+ 	return mt76_mcu_send_msg(&mvif->phy->dev->mt76, MCU_UNI_CMD(ROC),
+-				 &req, sizeof(req), false);
++				 &req, sizeof(req), true);
+ }
+ 
+ int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
+@@ -1264,7 +1301,7 @@ int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
+ 	}
+ 
+ 	return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(ROC),
+-				 &req, sizeof(req), false);
++				 &req, sizeof(req), true);
+ }
+ 
+ int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
+@@ -1294,7 +1331,7 @@ int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
+ 	};
+ 
+ 	return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(ROC),
+-				 &req, sizeof(req), false);
++				 &req, sizeof(req), true);
+ }
+ 
+ int mt7925_mcu_set_eeprom(struct mt792x_dev *dev)
+@@ -1357,7 +1394,7 @@ int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev,
+ 				 &ps_req, sizeof(ps_req), true);
+ }
+ 
+-static int
++int
+ mt7925_mcu_uni_bss_bcnft(struct mt792x_dev *dev,
+ 			 struct ieee80211_bss_conf *link_conf, bool enable)
+ {
+@@ -1447,12 +1484,12 @@ mt7925_mcu_set_bss_pm(struct mt792x_dev *dev,
+ 	int err;
+ 
+ 	err = mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
+-				&req1, sizeof(req1), false);
++				&req1, sizeof(req1), true);
+ 	if (err < 0 || !enable)
+ 		return err;
+ 
+ 	return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
+-				 &req, sizeof(req), false);
++				 &req, sizeof(req), true);
+ }
+ 
+ static void
+@@ -1898,7 +1935,11 @@ int mt7925_mcu_sta_update(struct mt792x_dev *dev,
+ 		mlink = mt792x_sta_to_link(msta, link_sta->link_id);
+ 	}
+ 	info.wcid = link_sta ? &mlink->wcid : &mvif->sta.deflink.wcid;
+-	info.newly = link_sta ? state != MT76_STA_INFO_STATE_ASSOC : true;
++
++	if (link_sta)
++		info.newly = state != MT76_STA_INFO_STATE_ASSOC;
++	else
++		info.newly = state == MT76_STA_INFO_STATE_ASSOC ? false : true;
+ 
+ 	if (ieee80211_vif_is_mld(vif))
+ 		err = mt7925_mcu_mlo_sta_cmd(&dev->mphy, &info);
+@@ -1914,32 +1955,21 @@ int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev,
+ {
+ #define MT7925_FIF_BIT_CLR		BIT(1)
+ #define MT7925_FIF_BIT_SET		BIT(0)
+-	struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+-	unsigned long valid = ieee80211_vif_is_mld(vif) ?
+-				      mvif->valid_links : BIT(0);
+-	struct ieee80211_bss_conf *bss_conf;
+ 	int err = 0;
+-	int i;
+ 
+ 	if (enable) {
+-		for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
+-			bss_conf = mt792x_vif_to_bss_conf(vif, i);
+-			err = mt7925_mcu_uni_bss_bcnft(dev, bss_conf, true);
+-			if (err < 0)
+-				return err;
+-		}
++		err = mt7925_mcu_uni_bss_bcnft(dev, &vif->bss_conf, true);
++		if (err < 0)
++			return err;
+ 
+ 		return mt7925_mcu_set_rxfilter(dev, 0,
+ 					       MT7925_FIF_BIT_SET,
+ 					       MT_WF_RFCR_DROP_OTHER_BEACON);
+ 	}
+ 
+-	for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
+-		bss_conf = mt792x_vif_to_bss_conf(vif, i);
+-		err = mt7925_mcu_set_bss_pm(dev, bss_conf, false);
+-		if (err)
+-			return err;
+-	}
++	err = mt7925_mcu_set_bss_pm(dev, &vif->bss_conf, false);
++	if (err < 0)
++		return err;
+ 
+ 	return mt7925_mcu_set_rxfilter(dev, 0,
+ 				       MT7925_FIF_BIT_CLR,
+@@ -1976,8 +2006,6 @@ int mt7925_get_txpwr_info(struct mt792x_dev *dev, u8 band_idx, struct mt7925_txp
+ int mt7925_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif,
+ 			   bool enable)
+ {
+-	struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+-
+ 	struct {
+ 		struct {
+ 			u8 band_idx;
+@@ -1991,7 +2019,7 @@ int mt7925_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif,
+ 		} __packed enable;
+ 	} __packed req = {
+ 		.hdr = {
+-			.band_idx = mvif->bss_conf.mt76.band_idx,
++			.band_idx = 0,
+ 		},
+ 		.enable = {
+ 			.tag = cpu_to_le16(UNI_SNIFFER_ENABLE),
+@@ -2050,7 +2078,7 @@ int mt7925_mcu_config_sniffer(struct mt792x_vif *vif,
+ 		} __packed tlv;
+ 	} __packed req = {
+ 		.hdr = {
+-			.band_idx = vif->bss_conf.mt76.band_idx,
++			.band_idx = 0,
+ 		},
+ 		.tlv = {
+ 			.tag = cpu_to_le16(UNI_SNIFFER_CONFIG),
+@@ -2179,11 +2207,27 @@ void mt7925_mcu_bss_rlm_tlv(struct sk_buff *skb, struct mt76_phy *phy,
+ 	req = (struct bss_rlm_tlv *)tlv;
+ 	req->control_channel = chandef->chan->hw_value;
+ 	req->center_chan = ieee80211_frequency_to_channel(freq1);
+-	req->center_chan2 = ieee80211_frequency_to_channel(freq2);
++	req->center_chan2 = 0;
+ 	req->tx_streams = hweight8(phy->antenna_mask);
+ 	req->ht_op_info = 4; /* set HT 40M allowed */
+ 	req->rx_streams = hweight8(phy->antenna_mask);
+-	req->band = band;
++	req->center_chan2 = 0;
++	req->sco = 0;
++	req->band = 1;
++
++	switch (band) {
++	case NL80211_BAND_2GHZ:
++		req->band = 1;
++		break;
++	case NL80211_BAND_5GHZ:
++		req->band = 2;
++		break;
++	case NL80211_BAND_6GHZ:
++		req->band = 3;
++		break;
++	default:
++		break;
++	}
+ 
+ 	switch (chandef->width) {
+ 	case NL80211_CHAN_WIDTH_40:
+@@ -2194,6 +2238,7 @@ void mt7925_mcu_bss_rlm_tlv(struct sk_buff *skb, struct mt76_phy *phy,
+ 		break;
+ 	case NL80211_CHAN_WIDTH_80P80:
+ 		req->bw = CMD_CBW_8080MHZ;
++		req->center_chan2 = ieee80211_frequency_to_channel(freq2);
+ 		break;
+ 	case NL80211_CHAN_WIDTH_160:
+ 		req->bw = CMD_CBW_160MHZ;
+@@ -2463,6 +2508,7 @@ static void
+ mt7925_mcu_bss_mld_tlv(struct sk_buff *skb,
+ 		       struct ieee80211_bss_conf *link_conf)
+ {
++	struct ieee80211_vif *vif = link_conf->vif;
+ 	struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
+ 	struct mt792x_vif *mvif = (struct mt792x_vif *)link_conf->vif->drv_priv;
+ 	struct bss_mld_tlv *mld;
+@@ -2483,7 +2529,7 @@ mt7925_mcu_bss_mld_tlv(struct sk_buff *skb,
+ 	mld->eml_enable = !!(link_conf->vif->cfg.eml_cap &
+ 			     IEEE80211_EML_CAP_EMLSR_SUPP);
+ 
+-	memcpy(mld->mac_addr, link_conf->addr, ETH_ALEN);
++	memcpy(mld->mac_addr, vif->addr, ETH_ALEN);
+ }
+ 
+ static void
+@@ -2614,7 +2660,7 @@ int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
+ 				     MCU_UNI_CMD(BSS_INFO_UPDATE), true);
+ }
+ 
+-int mt7925_mcu_set_dbdc(struct mt76_phy *phy)
++int mt7925_mcu_set_dbdc(struct mt76_phy *phy, bool enable)
+ {
+ 	struct mt76_dev *mdev = phy->dev;
+ 
+@@ -2634,7 +2680,7 @@ int mt7925_mcu_set_dbdc(struct mt76_phy *phy)
+ 	tlv = mt76_connac_mcu_add_tlv(skb, UNI_MBMC_SETTING, sizeof(*conf));
+ 	conf = (struct mbmc_conf_tlv *)tlv;
+ 
+-	conf->mbmc_en = 1;
++	conf->mbmc_en = enable;
+ 	conf->band = 0; /* unused */
+ 
+ 	err = mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SET_DBDC_PARMS),
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
+index ac53bdc993322f..fe6a613ba00889 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
+@@ -616,7 +616,7 @@ mt7925_mcu_get_cipher(int cipher)
+ 	}
+ }
+ 
+-int mt7925_mcu_set_dbdc(struct mt76_phy *phy);
++int mt7925_mcu_set_dbdc(struct mt76_phy *phy, bool enable);
+ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ 		       struct ieee80211_scan_request *scan_req);
+ int mt7925_mcu_cancel_hw_scan(struct mt76_phy *phy,
+@@ -643,4 +643,7 @@ int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif *mvif,
+ int mt7925_mcu_set_rate_txpower(struct mt76_phy *phy);
+ int mt7925_mcu_update_arp_filter(struct mt76_dev *dev,
+ 				 struct ieee80211_bss_conf *link_conf);
++int
++mt7925_mcu_uni_bss_bcnft(struct mt792x_dev *dev,
++			 struct ieee80211_bss_conf *link_conf, bool enable);
+ #endif
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
+index f5c02e5f506633..df3c705d1cb3fa 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
+@@ -242,9 +242,11 @@ int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev,
+ 				 struct ieee80211_vif *vif,
+ 				 bool enable);
+ int mt7925_mcu_uni_tx_ba(struct mt792x_dev *dev,
++			 struct ieee80211_vif *vif,
+ 			 struct ieee80211_ampdu_params *params,
+ 			 bool enable);
+ int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev,
++			 struct ieee80211_vif *vif,
+ 			 struct ieee80211_ampdu_params *params,
+ 			 bool enable);
+ void mt7925_scan_work(struct work_struct *work);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h
+index ab12616ec2b87c..2b8b9b2977f74a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt792x.h
++++ b/drivers/net/wireless/mediatek/mt76/mt792x.h
+@@ -241,6 +241,7 @@ static inline struct mt792x_bss_conf *
+ mt792x_vif_to_link(struct mt792x_vif *mvif, u8 link_id)
+ {
+ 	struct ieee80211_vif *vif;
++	struct mt792x_bss_conf *bss_conf;
+ 
+ 	vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv);
+ 
+@@ -248,8 +249,10 @@ mt792x_vif_to_link(struct mt792x_vif *mvif, u8 link_id)
+ 	    link_id >= IEEE80211_LINK_UNSPECIFIED)
+ 		return &mvif->bss_conf;
+ 
+-	return rcu_dereference_protected(mvif->link_conf[link_id],
+-		lockdep_is_held(&mvif->phy->dev->mt76.mutex));
++	bss_conf = rcu_dereference_protected(mvif->link_conf[link_id],
++					     lockdep_is_held(&mvif->phy->dev->mt76.mutex));
++
++	return bss_conf ? bss_conf : &mvif->bss_conf;
+ }
+ 
+ static inline struct mt792x_link_sta *
+diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
+index 78fe37c2e07b59..b87eed4d168df5 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c
++++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
+@@ -147,7 +147,8 @@ void mt792x_mac_link_bss_remove(struct mt792x_dev *dev,
+ 	link_conf = mt792x_vif_to_bss_conf(vif, mconf->link_id);
+ 
+ 	mt76_connac_free_pending_tx_skbs(&dev->pm, &mlink->wcid);
+-	mt76_connac_mcu_uni_add_dev(&dev->mphy, link_conf, &mlink->wcid, false);
++	mt76_connac_mcu_uni_add_dev(&dev->mphy, link_conf, &mconf->mt76,
++				    &mlink->wcid, false);
+ 
+ 	rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_mac.c b/drivers/net/wireless/mediatek/mt76/mt792x_mac.c
+index 106273935b267f..05978d9c7b916a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt792x_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt792x_mac.c
+@@ -153,7 +153,7 @@ struct mt76_wcid *mt792x_rx_get_wcid(struct mt792x_dev *dev, u16 idx,
+ 		return NULL;
+ 
+ 	link = container_of(wcid, struct mt792x_link_sta, wcid);
+-	sta = container_of(link, struct mt792x_sta, deflink);
++	sta = link->sta;
+ 	if (!sta->vif)
+ 		return NULL;
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+index 5e96973226bbb5..d8a013812d1e37 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+@@ -16,9 +16,6 @@
+ 
+ static const struct ieee80211_iface_limit if_limits[] = {
+ 	{
+-		.max = 1,
+-		.types = BIT(NL80211_IFTYPE_ADHOC)
+-	}, {
+ 		.max = 16,
+ 		.types = BIT(NL80211_IFTYPE_AP)
+ #ifdef CONFIG_MAC80211_MESH
+@@ -85,7 +82,7 @@ static ssize_t mt7996_thermal_temp_store(struct device *dev,
+ 		return ret;
+ 
+ 	mutex_lock(&phy->dev->mt76.mutex);
+-	val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 40, 130);
++	val = DIV_ROUND_CLOSEST(clamp_val(val, 40 * 1000, 130 * 1000), 1000);
+ 
+ 	/* add a safety margin ~10 */
+ 	if ((i - 1 == MT7996_CRIT_TEMP_IDX &&
+@@ -1080,6 +1077,9 @@ mt7996_init_he_caps(struct mt7996_phy *phy, enum nl80211_band band,
+ 	he_cap_elem->phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ 				       IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ;
+ 
++	he_cap_elem->phy_cap_info[7] =
++			IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI;
++
+ 	switch (iftype) {
+ 	case NL80211_IFTYPE_AP:
+ 		he_cap_elem->mac_cap_info[0] |= IEEE80211_HE_MAC_CAP0_TWT_RES;
+@@ -1119,8 +1119,7 @@ mt7996_init_he_caps(struct mt7996_phy *phy, enum nl80211_band band,
+ 			IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
+ 			IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
+ 		he_cap_elem->phy_cap_info[7] |=
+-			IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
+-			IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI;
++			IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP;
+ 		he_cap_elem->phy_cap_info[8] |=
+ 			IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
+ 			IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+@@ -1190,7 +1189,9 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
+ 
+ 	eht_cap_elem->mac_cap_info[0] =
+ 		IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
+-		IEEE80211_EHT_MAC_CAP0_OM_CONTROL;
++		IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
++		u8_encode_bits(IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454,
++			       IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK);
+ 
+ 	eht_cap_elem->phy_cap_info[0] =
+ 		IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+@@ -1233,21 +1234,20 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
+ 		IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK;
+ 
+ 	eht_cap_elem->phy_cap_info[4] =
++		IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI |
+ 		u8_encode_bits(min_t(int, sts - 1, 2),
+ 			       IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK);
+ 
+ 	eht_cap_elem->phy_cap_info[5] =
+ 		u8_encode_bits(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US,
+ 			       IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK) |
+-		u8_encode_bits(u8_get_bits(0x11, GENMASK(1, 0)),
++		u8_encode_bits(u8_get_bits(1, GENMASK(1, 0)),
+ 			       IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK);
+ 
+ 	val = width == NL80211_CHAN_WIDTH_320 ? 0xf :
+ 	      width == NL80211_CHAN_WIDTH_160 ? 0x7 :
+ 	      width == NL80211_CHAN_WIDTH_80 ? 0x3 : 0x1;
+ 	eht_cap_elem->phy_cap_info[6] =
+-		u8_encode_bits(u8_get_bits(0x11, GENMASK(4, 2)),
+-			       IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK) |
+ 		u8_encode_bits(val, IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK);
+ 
+ 	val = u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_RX) |
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+index 0d21414e2c884a..f590902fdeea37 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+@@ -819,6 +819,7 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
+ 			   struct ieee80211_key_conf *key, int pid,
+ 			   enum mt76_txq_id qid, u32 changed)
+ {
++	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ 	struct ieee80211_vif *vif = info->control.vif;
+ 	u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
+@@ -886,8 +887,9 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
+ 	val = MT_TXD6_DIS_MAT | MT_TXD6_DAS;
+ 	if (is_mt7996(&dev->mt76))
+ 		val |= FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
+-	else
++	else if (is_8023 || !ieee80211_is_mgmt(hdr->frame_control))
+ 		val |= FIELD_PREP(MT_TXD6_MSDU_CNT_V2, 1);
++
+ 	txwi[6] = cpu_to_le32(val);
+ 	txwi[7] = 0;
+ 
+@@ -897,7 +899,6 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
+ 		mt7996_mac_write_txwi_80211(dev, txwi, skb, key);
+ 
+ 	if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) {
+-		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ 		bool mcast = ieee80211_is_data(hdr->frame_control) &&
+ 			     is_multicast_ether_addr(hdr->addr1);
+ 		u8 idx = MT7996_BASIC_RATES_TBL;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+index 2b34ae5e0cb57b..cc4c010d28b83f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+@@ -496,8 +496,7 @@ static void mt7996_configure_filter(struct ieee80211_hw *hw,
+ 
+ 	MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS |
+ 			     MT_WF_RFCR_DROP_RTS |
+-			     MT_WF_RFCR_DROP_CTL_RSV |
+-			     MT_WF_RFCR_DROP_NDPA);
++			     MT_WF_RFCR_DROP_CTL_RSV);
+ 
+ 	*total_flags = flags;
+ 	mt76_wr(dev, MT_WF_RFCR(phy->mt76->band_idx), phy->rxfilter);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+index 6c445a9dbc03d8..265958f7b78711 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+@@ -2070,7 +2070,7 @@ mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev,
+ 			cap |= STA_CAP_VHT_TX_STBC;
+ 		if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1)
+ 			cap |= STA_CAP_VHT_RX_STBC;
+-		if (vif->bss_conf.vht_ldpc &&
++		if ((vif->type != NL80211_IFTYPE_AP || vif->bss_conf.vht_ldpc) &&
+ 		    (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC))
+ 			cap |= STA_CAP_VHT_LDPC;
+ 
+@@ -3666,6 +3666,13 @@ int mt7996_mcu_get_chip_config(struct mt7996_dev *dev, u32 *cap)
+ 
+ int mt7996_mcu_get_chan_mib_info(struct mt7996_phy *phy, bool chan_switch)
+ {
++	enum {
++		IDX_TX_TIME,
++		IDX_RX_TIME,
++		IDX_OBSS_AIRTIME,
++		IDX_NON_WIFI_TIME,
++		IDX_NUM
++	};
+ 	struct {
+ 		struct {
+ 			u8 band;
+@@ -3675,16 +3682,15 @@ int mt7996_mcu_get_chan_mib_info(struct mt7996_phy *phy, bool chan_switch)
+ 			__le16 tag;
+ 			__le16 len;
+ 			__le32 offs;
+-		} data[4];
++		} data[IDX_NUM];
+ 	} __packed req = {
+ 		.hdr.band = phy->mt76->band_idx,
+ 	};
+-	/* strict order */
+ 	static const u32 offs[] = {
+-		UNI_MIB_TX_TIME,
+-		UNI_MIB_RX_TIME,
+-		UNI_MIB_OBSS_AIRTIME,
+-		UNI_MIB_NON_WIFI_TIME,
++		[IDX_TX_TIME] = UNI_MIB_TX_TIME,
++		[IDX_RX_TIME] = UNI_MIB_RX_TIME,
++		[IDX_OBSS_AIRTIME] = UNI_MIB_OBSS_AIRTIME,
++		[IDX_NON_WIFI_TIME] = UNI_MIB_NON_WIFI_TIME,
+ 	};
+ 	struct mt76_channel_state *state = phy->mt76->chan_state;
+ 	struct mt76_channel_state *state_ts = &phy->state_ts;
+@@ -3693,7 +3699,7 @@ int mt7996_mcu_get_chan_mib_info(struct mt7996_phy *phy, bool chan_switch)
+ 	struct sk_buff *skb;
+ 	int i, ret;
+ 
+-	for (i = 0; i < 4; i++) {
++	for (i = 0; i < IDX_NUM; i++) {
+ 		req.data[i].tag = cpu_to_le16(UNI_CMD_MIB_DATA);
+ 		req.data[i].len = cpu_to_le16(sizeof(req.data[i]));
+ 		req.data[i].offs = cpu_to_le32(offs[i]);
+@@ -3712,17 +3718,24 @@ int mt7996_mcu_get_chan_mib_info(struct mt7996_phy *phy, bool chan_switch)
+ 		goto out;
+ 
+ #define __res_u64(s) le64_to_cpu(res[s].data)
+-	state->cc_tx += __res_u64(1) - state_ts->cc_tx;
+-	state->cc_bss_rx += __res_u64(2) - state_ts->cc_bss_rx;
+-	state->cc_rx += __res_u64(2) + __res_u64(3) - state_ts->cc_rx;
+-	state->cc_busy += __res_u64(0) + __res_u64(1) + __res_u64(2) + __res_u64(3) -
++	state->cc_tx += __res_u64(IDX_TX_TIME) - state_ts->cc_tx;
++	state->cc_bss_rx += __res_u64(IDX_RX_TIME) - state_ts->cc_bss_rx;
++	state->cc_rx += __res_u64(IDX_RX_TIME) +
++			__res_u64(IDX_OBSS_AIRTIME) -
++			state_ts->cc_rx;
++	state->cc_busy += __res_u64(IDX_TX_TIME) +
++			  __res_u64(IDX_RX_TIME) +
++			  __res_u64(IDX_OBSS_AIRTIME) +
++			  __res_u64(IDX_NON_WIFI_TIME) -
+ 			  state_ts->cc_busy;
+-
+ out:
+-	state_ts->cc_tx = __res_u64(1);
+-	state_ts->cc_bss_rx = __res_u64(2);
+-	state_ts->cc_rx = __res_u64(2) + __res_u64(3);
+-	state_ts->cc_busy = __res_u64(0) + __res_u64(1) + __res_u64(2) + __res_u64(3);
++	state_ts->cc_tx = __res_u64(IDX_TX_TIME);
++	state_ts->cc_bss_rx = __res_u64(IDX_RX_TIME);
++	state_ts->cc_rx = __res_u64(IDX_RX_TIME) + __res_u64(IDX_OBSS_AIRTIME);
++	state_ts->cc_busy = __res_u64(IDX_TX_TIME) +
++			    __res_u64(IDX_RX_TIME) +
++			    __res_u64(IDX_OBSS_AIRTIME) +
++			    __res_u64(IDX_NON_WIFI_TIME);
+ #undef __res_u64
+ 
+ 	dev_kfree_skb(skb);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+index 40e45fb2b62607..442f72450352b0 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+@@ -177,7 +177,7 @@ static u32 __mt7996_reg_addr(struct mt7996_dev *dev, u32 addr)
+ 			continue;
+ 
+ 		ofs = addr - dev->reg.map[i].phys;
+-		if (ofs > dev->reg.map[i].size)
++		if (ofs >= dev->reg.map[i].size)
+ 			continue;
+ 
+ 		return dev->reg.map[i].mapped + ofs;
+diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
+index 58ff068233894e..f9e67b8c3b3c89 100644
+--- a/drivers/net/wireless/mediatek/mt76/usb.c
++++ b/drivers/net/wireless/mediatek/mt76/usb.c
+@@ -33,9 +33,9 @@ int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type,
+ 
+ 		ret = usb_control_msg(udev, pipe, req, req_type, val,
+ 				      offset, buf, len, MT_VEND_REQ_TOUT_MS);
+-		if (ret == -ENODEV)
++		if (ret == -ENODEV || ret == -EPROTO)
+ 			set_bit(MT76_REMOVED, &dev->phy.state);
+-		if (ret >= 0 || ret == -ENODEV)
++		if (ret >= 0 || ret == -ENODEV || ret == -EPROTO)
+ 			return ret;
+ 		usleep_range(5000, 10000);
+ 	}
+diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
+index 7e84fc0fd91188..af298021e05041 100644
+--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
++++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
+@@ -925,8 +925,6 @@ void wilc_netdev_cleanup(struct wilc *wilc)
+ 
+ 	wilc_wlan_cfg_deinit(wilc);
+ 	wlan_deinit_locks(wilc);
+-	wiphy_unregister(wilc->wiphy);
+-	wiphy_free(wilc->wiphy);
+ }
+ EXPORT_SYMBOL_GPL(wilc_netdev_cleanup);
+ 
+diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
+index 5262c8846c13df..af970f99911108 100644
+--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
++++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
+@@ -193,7 +193,7 @@ static int wilc_sdio_probe(struct sdio_func *func,
+ 	ret = wilc_load_mac_from_nv(wilc);
+ 	if (ret) {
+ 		pr_err("Can not retrieve MAC address from chip\n");
+-		goto dispose_irq;
++		goto unregister_wiphy;
+ 	}
+ 
+ 	wilc_sdio_deinit(wilc);
+@@ -202,15 +202,18 @@ static int wilc_sdio_probe(struct sdio_func *func,
+ 				   NL80211_IFTYPE_STATION, false);
+ 	if (IS_ERR(vif)) {
+ 		ret = PTR_ERR(vif);
+-		goto dispose_irq;
++		goto unregister_wiphy;
+ 	}
+ 
+ 	dev_info(&func->dev, "Driver Initializing success\n");
+ 	return 0;
+ 
++unregister_wiphy:
++	wiphy_unregister(wilc->wiphy);
+ dispose_irq:
+ 	irq_dispose_mapping(wilc->dev_irq_num);
+ 	wilc_netdev_cleanup(wilc);
++	wiphy_free(wilc->wiphy);
+ free:
+ 	kfree(sdio_priv->cmd53_buf);
+ 	kfree(sdio_priv);
+@@ -223,6 +226,8 @@ static void wilc_sdio_remove(struct sdio_func *func)
+ 	struct wilc_sdio *sdio_priv = wilc->bus_data;
+ 
+ 	wilc_netdev_cleanup(wilc);
++	wiphy_unregister(wilc->wiphy);
++	wiphy_free(wilc->wiphy);
+ 	kfree(sdio_priv->cmd53_buf);
+ 	kfree(sdio_priv);
+ }
+diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
+index ce2a9cdd6aa787..5bcabb7decea0f 100644
+--- a/drivers/net/wireless/microchip/wilc1000/spi.c
++++ b/drivers/net/wireless/microchip/wilc1000/spi.c
+@@ -256,7 +256,7 @@ static int wilc_bus_probe(struct spi_device *spi)
+ 	ret = wilc_load_mac_from_nv(wilc);
+ 	if (ret) {
+ 		pr_err("Can not retrieve MAC address from chip\n");
+-		goto power_down;
++		goto unregister_wiphy;
+ 	}
+ 
+ 	wilc_wlan_power(wilc, false);
+@@ -264,14 +264,17 @@ static int wilc_bus_probe(struct spi_device *spi)
+ 				   NL80211_IFTYPE_STATION, false);
+ 	if (IS_ERR(vif)) {
+ 		ret = PTR_ERR(vif);
+-		goto power_down;
++		goto unregister_wiphy;
+ 	}
+ 	return 0;
+ 
++unregister_wiphy:
++	wiphy_unregister(wilc->wiphy);
+ power_down:
+ 	wilc_wlan_power(wilc, false);
+ netdev_cleanup:
+ 	wilc_netdev_cleanup(wilc);
++	wiphy_free(wilc->wiphy);
+ free:
+ 	kfree(spi_priv);
+ 	return ret;
+@@ -283,6 +286,8 @@ static void wilc_bus_remove(struct spi_device *spi)
+ 	struct wilc_spi *spi_priv = wilc->bus_data;
+ 
+ 	wilc_netdev_cleanup(wilc);
++	wiphy_unregister(wilc->wiphy);
++	wiphy_free(wilc->wiphy);
+ 	kfree(spi_priv);
+ }
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
+index aab4605de9c47c..ff61867d142fa4 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/base.c
++++ b/drivers/net/wireless/realtek/rtlwifi/base.c
+@@ -575,9 +575,15 @@ static void rtl_free_entries_from_ack_queue(struct ieee80211_hw *hw,
+ 
+ void rtl_deinit_core(struct ieee80211_hw *hw)
+ {
++	struct rtl_priv *rtlpriv = rtl_priv(hw);
++
+ 	rtl_c2hcmd_launcher(hw, 0);
+ 	rtl_free_entries_from_scan_list(hw);
+ 	rtl_free_entries_from_ack_queue(hw, false);
++	if (rtlpriv->works.rtl_wq) {
++		destroy_workqueue(rtlpriv->works.rtl_wq);
++		rtlpriv->works.rtl_wq = NULL;
++	}
+ }
+ EXPORT_SYMBOL_GPL(rtl_deinit_core);
+ 
+@@ -2696,9 +2702,6 @@ MODULE_AUTHOR("Larry Finger	<Larry.FInger@lwfinger.net>");
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
+ 
+-struct rtl_global_var rtl_global_var = {};
+-EXPORT_SYMBOL_GPL(rtl_global_var);
+-
+ static int __init rtl_core_module_init(void)
+ {
+ 	BUILD_BUG_ON(TX_PWR_BY_RATE_NUM_RATE < TX_PWR_BY_RATE_NUM_SECTION);
+@@ -2712,10 +2715,6 @@ static int __init rtl_core_module_init(void)
+ 	/* add debugfs */
+ 	rtl_debugfs_add_topdir();
+ 
+-	/* init some global vars */
+-	INIT_LIST_HEAD(&rtl_global_var.glb_priv_list);
+-	spin_lock_init(&rtl_global_var.glb_list_lock);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h
+index f081a9a90563f5..f3a6a43a42eca8 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/base.h
++++ b/drivers/net/wireless/realtek/rtlwifi/base.h
+@@ -124,7 +124,6 @@ int rtl_send_smps_action(struct ieee80211_hw *hw,
+ u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie);
+ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len);
+ u8 rtl_tid_to_ac(u8 tid);
+-extern struct rtl_global_var rtl_global_var;
+ void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation);
+ 
+ #endif
+diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
+index 11709b6c83f1aa..0eafc4d125f91d 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
++++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
+@@ -295,46 +295,6 @@ static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw)
+ 	return status;
+ }
+ 
+-static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
+-				     struct rtl_priv **buddy_priv)
+-{
+-	struct rtl_priv *rtlpriv = rtl_priv(hw);
+-	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+-	struct rtl_priv *tpriv = NULL, *iter;
+-	struct rtl_pci_priv *tpcipriv = NULL;
+-
+-	if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) {
+-		list_for_each_entry(iter, &rtlpriv->glb_var->glb_priv_list,
+-				    list) {
+-			tpcipriv = (struct rtl_pci_priv *)iter->priv;
+-			rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+-				"pcipriv->ndis_adapter.funcnumber %x\n",
+-				pcipriv->ndis_adapter.funcnumber);
+-			rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+-				"tpcipriv->ndis_adapter.funcnumber %x\n",
+-				tpcipriv->ndis_adapter.funcnumber);
+-
+-			if (pcipriv->ndis_adapter.busnumber ==
+-			    tpcipriv->ndis_adapter.busnumber &&
+-			    pcipriv->ndis_adapter.devnumber ==
+-			    tpcipriv->ndis_adapter.devnumber &&
+-			    pcipriv->ndis_adapter.funcnumber !=
+-			    tpcipriv->ndis_adapter.funcnumber) {
+-				tpriv = iter;
+-				break;
+-			}
+-		}
+-	}
+-
+-	rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+-		"find_buddy_priv %d\n", tpriv != NULL);
+-
+-	if (tpriv)
+-		*buddy_priv = tpriv;
+-
+-	return tpriv != NULL;
+-}
+-
+ static void rtl_pci_parse_configuration(struct pci_dev *pdev,
+ 					struct ieee80211_hw *hw)
+ {
+@@ -1696,8 +1656,6 @@ static void rtl_pci_deinit(struct ieee80211_hw *hw)
+ 	synchronize_irq(rtlpci->pdev->irq);
+ 	tasklet_kill(&rtlpriv->works.irq_tasklet);
+ 	cancel_work_sync(&rtlpriv->works.lps_change_work);
+-
+-	destroy_workqueue(rtlpriv->works.rtl_wq);
+ }
+ 
+ static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
+@@ -2011,7 +1969,6 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
+ 		pcipriv->ndis_adapter.amd_l1_patch);
+ 
+ 	rtl_pci_parse_configuration(pdev, hw);
+-	list_add_tail(&rtlpriv->list, &rtlpriv->glb_var->glb_priv_list);
+ 
+ 	return true;
+ }
+@@ -2158,7 +2115,6 @@ int rtl_pci_probe(struct pci_dev *pdev,
+ 	rtlpriv->rtlhal.interface = INTF_PCI;
+ 	rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
+ 	rtlpriv->intf_ops = &rtl_pci_ops;
+-	rtlpriv->glb_var = &rtl_global_var;
+ 	rtl_efuse_ops_init(hw);
+ 
+ 	/* MEM map */
+@@ -2209,7 +2165,7 @@ int rtl_pci_probe(struct pci_dev *pdev,
+ 	if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
+ 		pr_err("Can't init_sw_vars\n");
+ 		err = -ENODEV;
+-		goto fail3;
++		goto fail2;
+ 	}
+ 	rtl_init_sw_leds(hw);
+ 
+@@ -2227,14 +2183,14 @@ int rtl_pci_probe(struct pci_dev *pdev,
+ 	err = rtl_pci_init(hw, pdev);
+ 	if (err) {
+ 		pr_err("Failed to init PCI\n");
+-		goto fail3;
++		goto fail4;
+ 	}
+ 
+ 	err = ieee80211_register_hw(hw);
+ 	if (err) {
+ 		pr_err("Can't register mac80211 hw.\n");
+ 		err = -ENODEV;
+-		goto fail3;
++		goto fail5;
+ 	}
+ 	rtlpriv->mac80211.mac80211_registered = 1;
+ 
+@@ -2257,16 +2213,19 @@ int rtl_pci_probe(struct pci_dev *pdev,
+ 	set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
+ 	return 0;
+ 
+-fail3:
+-	pci_set_drvdata(pdev, NULL);
++fail5:
++	rtl_pci_deinit(hw);
++fail4:
+ 	rtl_deinit_core(hw);
++fail3:
++	wait_for_completion(&rtlpriv->firmware_loading_complete);
++	rtlpriv->cfg->ops->deinit_sw_vars(hw);
+ 
+ fail2:
+ 	if (rtlpriv->io.pci_mem_start != 0)
+ 		pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
+ 
+ 	pci_release_regions(pdev);
+-	complete(&rtlpriv->firmware_loading_complete);
+ 
+ fail1:
+ 	if (hw)
+@@ -2317,7 +2276,6 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
+ 	if (rtlpci->using_msi)
+ 		pci_disable_msi(rtlpci->pdev);
+ 
+-	list_del(&rtlpriv->list);
+ 	if (rtlpriv->io.pci_mem_start != 0) {
+ 		pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
+ 		pci_release_regions(pdev);
+@@ -2376,7 +2334,6 @@ EXPORT_SYMBOL(rtl_pci_resume);
+ const struct rtl_intf_ops rtl_pci_ops = {
+ 	.adapter_start = rtl_pci_start,
+ 	.adapter_stop = rtl_pci_stop,
+-	.check_buddy_priv = rtl_pci_check_buddy_priv,
+ 	.adapter_tx = rtl_pci_tx,
+ 	.flush = rtl_pci_flush,
+ 	.reset_trx_ring = rtl_pci_reset_trx_ring,
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+index bbf8ff63dcedb4..e63c67b1861b5f 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+@@ -64,22 +64,23 @@ static void rtl92se_fw_cb(const struct firmware *firmware, void *context)
+ 
+ 	rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ 		"Firmware callback routine entered!\n");
+-	complete(&rtlpriv->firmware_loading_complete);
+ 	if (!firmware) {
+ 		pr_err("Firmware %s not available\n", fw_name);
+ 		rtlpriv->max_fw_size = 0;
+-		return;
++		goto exit;
+ 	}
+ 	if (firmware->size > rtlpriv->max_fw_size) {
+ 		pr_err("Firmware is too big!\n");
+ 		rtlpriv->max_fw_size = 0;
+ 		release_firmware(firmware);
+-		return;
++		goto exit;
+ 	}
+ 	pfirmware = (struct rt_firmware *)rtlpriv->rtlhal.pfirmware;
+ 	memcpy(pfirmware->sz_fw_tmpbuffer, firmware->data, firmware->size);
+ 	pfirmware->sz_fw_tmpbufferlen = firmware->size;
+ 	release_firmware(firmware);
++exit:
++	complete(&rtlpriv->firmware_loading_complete);
+ }
+ 
+ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+index 1be51ea3f3c820..9eddbada8af12c 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+@@ -2033,8 +2033,10 @@ static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+ 			if (!_rtl8821ae_check_condition(hw, v1)) {
+ 				i += 2; /* skip the pair of expression*/
+ 				v2 = array[i+1];
+-				while (v2 != 0xDEAD)
++				while (v2 != 0xDEAD) {
+ 					i += 3;
++					v2 = array[i + 1];
++				}
+ 			}
+ 		}
+ 	}
+diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
+index d37a017b2b814f..f5718e570011e6 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
++++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
+@@ -629,11 +629,6 @@ static void _rtl_usb_cleanup_rx(struct ieee80211_hw *hw)
+ 	tasklet_kill(&rtlusb->rx_work_tasklet);
+ 	cancel_work_sync(&rtlpriv->works.lps_change_work);
+ 
+-	if (rtlpriv->works.rtl_wq) {
+-		destroy_workqueue(rtlpriv->works.rtl_wq);
+-		rtlpriv->works.rtl_wq = NULL;
+-	}
+-
+ 	skb_queue_purge(&rtlusb->rx_queue);
+ 
+ 	while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) {
+@@ -1028,19 +1023,22 @@ int rtl_usb_probe(struct usb_interface *intf,
+ 	err = ieee80211_register_hw(hw);
+ 	if (err) {
+ 		pr_err("Can't register mac80211 hw.\n");
+-		goto error_out;
++		goto error_init_vars;
+ 	}
+ 	rtlpriv->mac80211.mac80211_registered = 1;
+ 
+ 	set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
+ 	return 0;
+ 
++error_init_vars:
++	wait_for_completion(&rtlpriv->firmware_loading_complete);
++	rtlpriv->cfg->ops->deinit_sw_vars(hw);
+ error_out:
++	rtl_usb_deinit(hw);
+ 	rtl_deinit_core(hw);
+ error_out2:
+ 	_rtl_usb_io_handler_release(hw);
+ 	usb_put_dev(udev);
+-	complete(&rtlpriv->firmware_loading_complete);
+ 	kfree(rtlpriv->usb_data);
+ 	ieee80211_free_hw(hw);
+ 	return -ENODEV;
+diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
+index ae6e351bc83c91..f1830ddcdd8c19 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
++++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
+@@ -2270,8 +2270,6 @@ struct rtl_intf_ops {
+ 	/*com */
+ 	int (*adapter_start)(struct ieee80211_hw *hw);
+ 	void (*adapter_stop)(struct ieee80211_hw *hw);
+-	bool (*check_buddy_priv)(struct ieee80211_hw *hw,
+-				 struct rtl_priv **buddy_priv);
+ 
+ 	int (*adapter_tx)(struct ieee80211_hw *hw,
+ 			  struct ieee80211_sta *sta,
+@@ -2514,14 +2512,6 @@ struct dig_t {
+ 	u32 rssi_max;
+ };
+ 
+-struct rtl_global_var {
+-	/* from this list we can get
+-	 * other adapter's rtl_priv
+-	 */
+-	struct list_head glb_priv_list;
+-	spinlock_t glb_list_lock;
+-};
+-
+ #define IN_4WAY_TIMEOUT_TIME	(30 * MSEC_PER_SEC)	/* 30 seconds */
+ 
+ struct rtl_btc_info {
+@@ -2667,9 +2657,7 @@ struct rtl_scan_list {
+ struct rtl_priv {
+ 	struct ieee80211_hw *hw;
+ 	struct completion firmware_loading_complete;
+-	struct list_head list;
+ 	struct rtl_priv *buddy_priv;
+-	struct rtl_global_var *glb_var;
+ 	struct rtl_dmsp_ctl dmsp_ctl;
+ 	struct rtl_locks locks;
+ 	struct rtl_works works;
+diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c
+index fb9449930c40aa..4df4e04c3e67d7 100644
+--- a/drivers/net/wireless/realtek/rtw89/chan.c
++++ b/drivers/net/wireless/realtek/rtw89/chan.c
+@@ -391,11 +391,12 @@ static void rtw89_entity_recalc_mgnt_roles(struct rtw89_dev *rtwdev)
+ 
+ 				list_del(&role->mgnt_entry);
+ 				list_add(&role->mgnt_entry, &mgnt->active_list);
+-				break;
++				goto fill;
+ 			}
+ 		}
+ 	}
+ 
++fill:
+ 	list_for_each_entry(role, &mgnt->active_list, mgnt_entry) {
+ 		if (unlikely(pos >= RTW89_MAX_INTERFACE_NUM)) {
+ 			rtw89_warn(rtwdev,
+@@ -801,7 +802,7 @@ static void rtw89_mcc_fill_role_limit(struct rtw89_dev *rtwdev,
+ 
+ 	mcc_role->limit.max_toa = max_toa_us / 1024;
+ 	mcc_role->limit.max_tob = max_tob_us / 1024;
+-	mcc_role->limit.max_dur = max_dur_us / 1024;
++	mcc_role->limit.max_dur = mcc_role->limit.max_toa + mcc_role->limit.max_tob;
+ 	mcc_role->limit.enable = true;
+ 
+ 	rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+@@ -2530,7 +2531,25 @@ void rtw89_chanctx_pause(struct rtw89_dev *rtwdev,
+ 	hal->entity_pause = true;
+ }
+ 
+-void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev)
++static void rtw89_chanctx_proceed_cb(struct rtw89_dev *rtwdev,
++				     const struct rtw89_chanctx_cb_parm *parm)
++{
++	int ret;
++
++	if (!parm || !parm->cb)
++		return;
++
++	ret = parm->cb(rtwdev, parm->data);
++	if (ret)
++		rtw89_warn(rtwdev, "%s (%s): cb failed: %d\n", __func__,
++			   parm->caller ?: "unknown", ret);
++}
++
++/* pass @cb_parm if there is a @cb_parm->cb which needs to invoke right after
++ * call rtw89_set_channel() and right before proceed entity according to mode.
++ */
++void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev,
++			   const struct rtw89_chanctx_cb_parm *cb_parm)
+ {
+ 	struct rtw89_hal *hal = &rtwdev->hal;
+ 	enum rtw89_entity_mode mode;
+@@ -2538,14 +2557,18 @@ void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev)
+ 
+ 	lockdep_assert_held(&rtwdev->mutex);
+ 
+-	if (!hal->entity_pause)
++	if (unlikely(!hal->entity_pause)) {
++		rtw89_chanctx_proceed_cb(rtwdev, cb_parm);
+ 		return;
++	}
+ 
+ 	rtw89_debug(rtwdev, RTW89_DBG_CHAN, "chanctx proceed\n");
+ 
+ 	hal->entity_pause = false;
+ 	rtw89_set_channel(rtwdev);
+ 
++	rtw89_chanctx_proceed_cb(rtwdev, cb_parm);
++
+ 	mode = rtw89_get_entity_mode(rtwdev);
+ 	switch (mode) {
+ 	case RTW89_ENTITY_MODE_MCC:
+diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h
+index 2eb31dff208310..092a6f676894f5 100644
+--- a/drivers/net/wireless/realtek/rtw89/chan.h
++++ b/drivers/net/wireless/realtek/rtw89/chan.h
+@@ -38,6 +38,12 @@ enum rtw89_chanctx_pause_reasons {
+ 	RTW89_CHANCTX_PAUSE_REASON_ROC,
+ };
+ 
++struct rtw89_chanctx_cb_parm {
++	int (*cb)(struct rtw89_dev *rtwdev, void *data);
++	void *data;
++	const char *caller;
++};
++
+ struct rtw89_entity_weight {
+ 	unsigned int active_chanctxs;
+ 	unsigned int active_roles;
+@@ -100,7 +106,8 @@ void rtw89_queue_chanctx_change(struct rtw89_dev *rtwdev,
+ void rtw89_chanctx_track(struct rtw89_dev *rtwdev);
+ void rtw89_chanctx_pause(struct rtw89_dev *rtwdev,
+ 			 enum rtw89_chanctx_pause_reasons rsn);
+-void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev);
++void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev,
++			   const struct rtw89_chanctx_cb_parm *cb_parm);
+ 
+ const struct rtw89_chan *__rtw89_mgnt_chan_get(struct rtw89_dev *rtwdev,
+ 					       const char *caller_message,
+diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
+index e5b2968c1431f2..4027cda39024cc 100644
+--- a/drivers/net/wireless/realtek/rtw89/core.c
++++ b/drivers/net/wireless/realtek/rtw89/core.c
+@@ -931,6 +931,11 @@ rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev,
+ 	bool is_bmc;
+ 	u16 seq;
+ 
++	if (tx_req->sta)
++		desc_info->mlo = tx_req->sta->mlo;
++	else if (tx_req->vif)
++		desc_info->mlo = ieee80211_vif_is_mld(tx_req->vif);
++
+ 	seq = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
+ 	if (tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD) {
+ 		tx_type = rtw89_core_get_tx_type(rtwdev, skb);
+@@ -938,7 +943,7 @@ rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev,
+ 
+ 		addr_cam = rtw89_get_addr_cam_of(tx_req->rtwvif_link,
+ 						 tx_req->rtwsta_link);
+-		if (addr_cam->valid)
++		if (addr_cam->valid && desc_info->mlo)
+ 			upd_wlan_hdr = true;
+ 	}
+ 	is_bmc = (is_broadcast_ether_addr(hdr->addr1) ||
+@@ -1078,6 +1083,8 @@ int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ 	}
+ 
+ 	tx_req.skb = skb;
++	tx_req.vif = vif;
++	tx_req.sta = sta;
+ 	tx_req.rtwvif_link = rtwvif_link;
+ 	tx_req.rtwsta_link = rtwsta_link;
+ 
+@@ -3257,7 +3264,7 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+ 
+ 	roc->state = RTW89_ROC_IDLE;
+ 	rtw89_config_roc_chandef(rtwdev, rtwvif_link->chanctx_idx, NULL);
+-	rtw89_chanctx_proceed(rtwdev);
++	rtw89_chanctx_proceed(rtwdev, NULL);
+ 	ret = rtw89_core_send_nullfunc(rtwdev, rtwvif_link, true, false);
+ 	if (ret)
+ 		rtw89_debug(rtwdev, RTW89_DBG_TXRX,
+diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
+index 5ad32eacd0d50c..41bec362ac2295 100644
+--- a/drivers/net/wireless/realtek/rtw89/core.h
++++ b/drivers/net/wireless/realtek/rtw89/core.h
+@@ -1163,12 +1163,15 @@ struct rtw89_tx_desc_info {
+ 	bool stbc;
+ 	bool ldpc;
+ 	bool upd_wlan_hdr;
++	bool mlo;
+ };
+ 
+ struct rtw89_core_tx_request {
+ 	enum rtw89_core_tx_type tx_type;
+ 
+ 	struct sk_buff *skb;
++	struct ieee80211_vif *vif;
++	struct ieee80211_sta *sta;
+ 	struct rtw89_vif_link *rtwvif_link;
+ 	struct rtw89_sta_link *rtwsta_link;
+ 	struct rtw89_tx_desc_info desc_info;
+diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
+index 2191c037d72e40..9146a7e1ed66aa 100644
+--- a/drivers/net/wireless/realtek/rtw89/fw.c
++++ b/drivers/net/wireless/realtek/rtw89/fw.c
+@@ -6780,22 +6780,25 @@ void rtw89_hw_scan_start(struct rtw89_dev *rtwdev,
+ 	rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_HW_SCAN);
+ }
+ 
+-void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev,
+-			    struct rtw89_vif_link *rtwvif_link,
+-			    bool aborted)
++struct rtw89_hw_scan_complete_cb_data {
++	struct rtw89_vif_link *rtwvif_link;
++	bool aborted;
++};
++
++static int rtw89_hw_scan_complete_cb(struct rtw89_dev *rtwdev, void *data)
+ {
+ 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
++	struct rtw89_hw_scan_complete_cb_data *cb_data = data;
++	struct rtw89_vif_link *rtwvif_link = cb_data->rtwvif_link;
+ 	struct cfg80211_scan_info info = {
+-		.aborted = aborted,
++		.aborted = cb_data->aborted,
+ 	};
+ 	struct rtw89_vif *rtwvif;
+ 	u32 reg;
+ 
+ 	if (!rtwvif_link)
+-		return;
+-
+-	rtw89_chanctx_proceed(rtwdev);
++		return -EINVAL;
+ 
+ 	rtwvif = rtwvif_link->rtwvif;
+ 
+@@ -6814,6 +6817,29 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev,
+ 	scan_info->last_chan_idx = 0;
+ 	scan_info->scanning_vif = NULL;
+ 	scan_info->abort = false;
++
++	return 0;
++}
++
++void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev,
++			    struct rtw89_vif_link *rtwvif_link,
++			    bool aborted)
++{
++	struct rtw89_hw_scan_complete_cb_data cb_data = {
++		.rtwvif_link = rtwvif_link,
++		.aborted = aborted,
++	};
++	const struct rtw89_chanctx_cb_parm cb_parm = {
++		.cb = rtw89_hw_scan_complete_cb,
++		.data = &cb_data,
++		.caller = __func__,
++	};
++
++	/* The things here needs to be done after setting channel (for coex)
++	 * and before proceeding entity mode (for MCC). So, pass a callback
++	 * of them for the right sequence rather than doing them directly.
++	 */
++	rtw89_chanctx_proceed(rtwdev, &cb_parm);
+ }
+ 
+ void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev,
+diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
+index 619d2d3771d52f..03fb076e8c9546 100644
+--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
++++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
+@@ -189,10 +189,10 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
+ 
+ 	rtw89_core_txq_init(rtwdev, vif->txq);
+ 
+-	if (!rtw89_rtwvif_in_list(rtwdev, rtwvif))
++	if (!rtw89_rtwvif_in_list(rtwdev, rtwvif)) {
+ 		list_add_tail(&rtwvif->list, &rtwdev->rtwvifs_list);
+-
+-	INIT_LIST_HEAD(&rtwvif->mgnt_entry);
++		INIT_LIST_HEAD(&rtwvif->mgnt_entry);
++	}
+ 
+ 	ether_addr_copy(rtwvif->mac_addr, vif->addr);
+ 
+@@ -1273,11 +1273,11 @@ static void rtw89_ops_cancel_hw_scan(struct ieee80211_hw *hw,
+ 	if (!RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw))
+ 		return;
+ 
+-	if (!rtwdev->scanning)
+-		return;
+-
+ 	mutex_lock(&rtwdev->mutex);
+ 
++	if (!rtwdev->scanning)
++		goto out;
++
+ 	rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
+ 	if (unlikely(!rtwvif_link)) {
+ 		rtw89_err(rtwdev, "cancel hw scan: find no link on HW-0\n");
+diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
+index 986b07bfa0ee81..8fb58a5d911cb7 100644
+--- a/drivers/net/wireless/ti/wlcore/main.c
++++ b/drivers/net/wireless/ti/wlcore/main.c
+@@ -2612,24 +2612,24 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
+ 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
+ 	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
+ 		ret = -EBUSY;
+-		goto out;
++		goto out_unlock;
+ 	}
+ 
+ 
+ 	ret = wl12xx_init_vif_data(wl, vif);
+ 	if (ret < 0)
+-		goto out;
++		goto out_unlock;
+ 
+ 	wlvif->wl = wl;
+ 	role_type = wl12xx_get_role_type(wl, wlvif);
+ 	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
+ 		ret = -EINVAL;
+-		goto out;
++		goto out_unlock;
+ 	}
+ 
+ 	ret = wlcore_allocate_hw_queue_base(wl, wlvif);
+ 	if (ret < 0)
+-		goto out;
++		goto out_unlock;
+ 
+ 	/*
+ 	 * TODO: after the nvs issue will be solved, move this block
+@@ -2644,7 +2644,7 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
+ 
+ 		ret = wl12xx_init_fw(wl);
+ 		if (ret < 0)
+-			goto out;
++			goto out_unlock;
+ 	}
+ 
+ 	/*
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index a970168a3014e6..12e7ae1f99e208 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -3092,7 +3092,7 @@ int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
+ static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
+ 				struct nvme_effects_log **log)
+ {
+-	struct nvme_effects_log	*cel = xa_load(&ctrl->cels, csi);
++	struct nvme_effects_log *old, *cel = xa_load(&ctrl->cels, csi);
+ 	int ret;
+ 
+ 	if (cel)
+@@ -3109,7 +3109,11 @@ static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
+ 		return ret;
+ 	}
+ 
+-	xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
++	old = xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
++	if (xa_is_err(old)) {
++		kfree(cel);
++		return xa_err(old);
++	}
+ out:
+ 	*log = cel;
+ 	return 0;
+@@ -3171,6 +3175,25 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
+ 	return ret;
+ }
+ 
++static int nvme_init_effects_log(struct nvme_ctrl *ctrl,
++		u8 csi, struct nvme_effects_log **log)
++{
++	struct nvme_effects_log *effects, *old;
++
++	effects = kzalloc(sizeof(*effects), GFP_KERNEL);
++	if (!effects)
++		return -ENOMEM;
++
++	old = xa_store(&ctrl->cels, csi, effects, GFP_KERNEL);
++	if (xa_is_err(old)) {
++		kfree(effects);
++		return xa_err(old);
++	}
++
++	*log = effects;
++	return 0;
++}
++
+ static void nvme_init_known_nvm_effects(struct nvme_ctrl *ctrl)
+ {
+ 	struct nvme_effects_log	*log = ctrl->effects;
+@@ -3217,10 +3240,9 @@ static int nvme_init_effects(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+ 	}
+ 
+ 	if (!ctrl->effects) {
+-		ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL);
+-		if (!ctrl->effects)
+-			return -ENOMEM;
+-		xa_store(&ctrl->cels, NVME_CSI_NVM, ctrl->effects, GFP_KERNEL);
++		ret = nvme_init_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
++		if (ret < 0)
++			return ret;
+ 	}
+ 
+ 	nvme_init_known_nvm_effects(ctrl);
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index b127d41dbbfee1..841238f38fddab 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -54,6 +54,8 @@ MODULE_PARM_DESC(tls_handshake_timeout,
+ 		 "nvme TLS handshake timeout in seconds (default 10)");
+ #endif
+ 
++static atomic_t nvme_tcp_cpu_queues[NR_CPUS];
++
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ /* lockdep can detect a circular dependency of the form
+  *   sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
+@@ -127,6 +129,7 @@ enum nvme_tcp_queue_flags {
+ 	NVME_TCP_Q_ALLOCATED	= 0,
+ 	NVME_TCP_Q_LIVE		= 1,
+ 	NVME_TCP_Q_POLLING	= 2,
++	NVME_TCP_Q_IO_CPU_SET	= 3,
+ };
+ 
+ enum nvme_tcp_recv_state {
+@@ -1562,23 +1565,56 @@ static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
+ 			  ctrl->io_queues[HCTX_TYPE_POLL];
+ }
+ 
++/**
++ * Track the number of queues assigned to each cpu using a global per-cpu
++ * counter and select the least used cpu from the mq_map. Our goal is to spread
++ * different controllers I/O threads across different cpu cores.
++ *
++ * Note that the accounting is not 100% perfect, but we don't need to be, we're
++ * simply putting our best effort to select the best candidate cpu core that we
++ * find at any given point.
++ */
+ static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
+ {
+ 	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
+-	int qid = nvme_tcp_queue_id(queue);
+-	int n = 0;
++	struct blk_mq_tag_set *set = &ctrl->tag_set;
++	int qid = nvme_tcp_queue_id(queue) - 1;
++	unsigned int *mq_map = NULL;
++	int cpu, min_queues = INT_MAX, io_cpu;
++
++	if (wq_unbound)
++		goto out;
+ 
+ 	if (nvme_tcp_default_queue(queue))
+-		n = qid - 1;
++		mq_map = set->map[HCTX_TYPE_DEFAULT].mq_map;
+ 	else if (nvme_tcp_read_queue(queue))
+-		n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
++		mq_map = set->map[HCTX_TYPE_READ].mq_map;
+ 	else if (nvme_tcp_poll_queue(queue))
+-		n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
+-				ctrl->io_queues[HCTX_TYPE_READ] - 1;
+-	if (wq_unbound)
+-		queue->io_cpu = WORK_CPU_UNBOUND;
+-	else
+-		queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
++		mq_map = set->map[HCTX_TYPE_POLL].mq_map;
++
++	if (WARN_ON(!mq_map))
++		goto out;
++
++	/* Search for the least used cpu from the mq_map */
++	io_cpu = WORK_CPU_UNBOUND;
++	for_each_online_cpu(cpu) {
++		int num_queues = atomic_read(&nvme_tcp_cpu_queues[cpu]);
++
++		if (mq_map[cpu] != qid)
++			continue;
++		if (num_queues < min_queues) {
++			io_cpu = cpu;
++			min_queues = num_queues;
++		}
++	}
++	if (io_cpu != WORK_CPU_UNBOUND) {
++		queue->io_cpu = io_cpu;
++		atomic_inc(&nvme_tcp_cpu_queues[io_cpu]);
++		set_bit(NVME_TCP_Q_IO_CPU_SET, &queue->flags);
++	}
++out:
++	dev_dbg(ctrl->ctrl.device, "queue %d: using cpu %d\n",
++		qid, queue->io_cpu);
+ }
+ 
+ static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid)
+@@ -1722,7 +1758,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
+ 
+ 	queue->sock->sk->sk_allocation = GFP_ATOMIC;
+ 	queue->sock->sk->sk_use_task_frag = false;
+-	nvme_tcp_set_queue_io_cpu(queue);
++	queue->io_cpu = WORK_CPU_UNBOUND;
+ 	queue->request = NULL;
+ 	queue->data_remaining = 0;
+ 	queue->ddgst_remaining = 0;
+@@ -1844,6 +1880,9 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
+ 	if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
+ 		return;
+ 
++	if (test_and_clear_bit(NVME_TCP_Q_IO_CPU_SET, &queue->flags))
++		atomic_dec(&nvme_tcp_cpu_queues[queue->io_cpu]);
++
+ 	mutex_lock(&queue->queue_lock);
+ 	if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
+ 		__nvme_tcp_stop_queue(queue);
+@@ -1878,9 +1917,10 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
+ 	nvme_tcp_init_recv_ctx(queue);
+ 	nvme_tcp_setup_sock_ops(queue);
+ 
+-	if (idx)
++	if (idx) {
++		nvme_tcp_set_queue_io_cpu(queue);
+ 		ret = nvmf_connect_io_queue(nctrl, idx);
+-	else
++	} else
+ 		ret = nvmf_connect_admin_queue(nctrl);
+ 
+ 	if (!ret) {
+@@ -2845,6 +2885,7 @@ static struct nvmf_transport_ops nvme_tcp_transport = {
+ static int __init nvme_tcp_init_module(void)
+ {
+ 	unsigned int wq_flags = WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_SYSFS;
++	int cpu;
+ 
+ 	BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8);
+ 	BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72);
+@@ -2862,6 +2903,9 @@ static int __init nvme_tcp_init_module(void)
+ 	if (!nvme_tcp_wq)
+ 		return -ENOMEM;
+ 
++	for_each_possible_cpu(cpu)
++		atomic_set(&nvme_tcp_cpu_queues[cpu], 0);
++
+ 	nvmf_register_transport(&nvme_tcp_transport);
+ 	return 0;
+ }
+diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
+index 0121100372b41d..3b29a5c50e2e58 100644
+--- a/drivers/of/fdt.c
++++ b/drivers/of/fdt.c
+@@ -8,7 +8,6 @@
+ 
+ #define pr_fmt(fmt)	"OF: fdt: " fmt
+ 
+-#include <linux/acpi.h>
+ #include <linux/crash_dump.h>
+ #include <linux/crc32.h>
+ #include <linux/kernel.h>
+@@ -1215,14 +1214,7 @@ void __init unflatten_device_tree(void)
+ 	/* Save the statically-placed regions in the reserved_mem array */
+ 	fdt_scan_reserved_mem_reg_nodes();
+ 
+-	/* Don't use the bootloader provided DTB if ACPI is enabled */
+-	if (!acpi_disabled)
+-		fdt = NULL;
+-
+-	/*
+-	 * Populate an empty root node when ACPI is enabled or bootloader
+-	 * doesn't provide one.
+-	 */
++	/* Populate an empty root node when bootloader doesn't provide one */
+ 	if (!fdt) {
+ 		fdt = (void *) __dtb_empty_root_begin;
+ 		/* fdt_totalsize() will be used for copy size */
+diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
+index 45517b9e57b1ad..b47559f11f079c 100644
+--- a/drivers/of/of_reserved_mem.c
++++ b/drivers/of/of_reserved_mem.c
+@@ -52,7 +52,8 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
+ 			memblock_phys_free(base, size);
+ 	}
+ 
+-	kmemleak_ignore_phys(base);
++	if (!err)
++		kmemleak_ignore_phys(base);
+ 
+ 	return err;
+ }
+diff --git a/drivers/of/property.c b/drivers/of/property.c
+index cfc8aea002e439..b0633f3589de83 100644
+--- a/drivers/of/property.c
++++ b/drivers/of/property.c
+@@ -1390,9 +1390,9 @@ static struct device_node *parse_interrupt_map(struct device_node *np,
+ 	addrcells = of_bus_n_addr_cells(np);
+ 
+ 	imap = of_get_property(np, "interrupt-map", &imaplen);
+-	imaplen /= sizeof(*imap);
+ 	if (!imap)
+ 		return NULL;
++	imaplen /= sizeof(*imap);
+ 
+ 	imap_end = imap + imaplen;
+ 
+diff --git a/drivers/opp/core.c b/drivers/opp/core.c
+index 0311b18319a458..47b1068bb98913 100644
+--- a/drivers/opp/core.c
++++ b/drivers/opp/core.c
+@@ -101,11 +101,30 @@ struct opp_table *_find_opp_table(struct device *dev)
+  * representation in the OPP table and manage the clock configuration themselves
+  * in an platform specific way.
+  */
+-static bool assert_single_clk(struct opp_table *opp_table)
++static bool assert_single_clk(struct opp_table *opp_table,
++			      unsigned int __always_unused index)
+ {
+ 	return !WARN_ON(opp_table->clk_count > 1);
+ }
+ 
++/*
++ * Returns true if clock table is large enough to contain the clock index.
++ */
++static bool assert_clk_index(struct opp_table *opp_table,
++			     unsigned int index)
++{
++	return opp_table->clk_count > index;
++}
++
++/*
++ * Returns true if bandwidth table is large enough to contain the bandwidth index.
++ */
++static bool assert_bandwidth_index(struct opp_table *opp_table,
++				   unsigned int index)
++{
++	return opp_table->path_count > index;
++}
++
+ /**
+  * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
+  * @opp:	opp for which voltage has to be returned for
+@@ -499,12 +518,12 @@ static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table,
+ 		unsigned long (*read)(struct dev_pm_opp *opp, int index),
+ 		bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
+ 				unsigned long opp_key, unsigned long key),
+-		bool (*assert)(struct opp_table *opp_table))
++		bool (*assert)(struct opp_table *opp_table, unsigned int index))
+ {
+ 	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+ 
+ 	/* Assert that the requirement is met */
+-	if (assert && !assert(opp_table))
++	if (assert && !assert(opp_table, index))
+ 		return ERR_PTR(-EINVAL);
+ 
+ 	mutex_lock(&opp_table->lock);
+@@ -532,7 +551,7 @@ _find_key(struct device *dev, unsigned long *key, int index, bool available,
+ 	  unsigned long (*read)(struct dev_pm_opp *opp, int index),
+ 	  bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
+ 			  unsigned long opp_key, unsigned long key),
+-	  bool (*assert)(struct opp_table *opp_table))
++	  bool (*assert)(struct opp_table *opp_table, unsigned int index))
+ {
+ 	struct opp_table *opp_table;
+ 	struct dev_pm_opp *opp;
+@@ -555,7 +574,7 @@ _find_key(struct device *dev, unsigned long *key, int index, bool available,
+ static struct dev_pm_opp *_find_key_exact(struct device *dev,
+ 		unsigned long key, int index, bool available,
+ 		unsigned long (*read)(struct dev_pm_opp *opp, int index),
+-		bool (*assert)(struct opp_table *opp_table))
++		bool (*assert)(struct opp_table *opp_table, unsigned int index))
+ {
+ 	/*
+ 	 * The value of key will be updated here, but will be ignored as the
+@@ -568,7 +587,7 @@ static struct dev_pm_opp *_find_key_exact(struct device *dev,
+ static struct dev_pm_opp *_opp_table_find_key_ceil(struct opp_table *opp_table,
+ 		unsigned long *key, int index, bool available,
+ 		unsigned long (*read)(struct dev_pm_opp *opp, int index),
+-		bool (*assert)(struct opp_table *opp_table))
++		bool (*assert)(struct opp_table *opp_table, unsigned int index))
+ {
+ 	return _opp_table_find_key(opp_table, key, index, available, read,
+ 				   _compare_ceil, assert);
+@@ -577,7 +596,7 @@ static struct dev_pm_opp *_opp_table_find_key_ceil(struct opp_table *opp_table,
+ static struct dev_pm_opp *_find_key_ceil(struct device *dev, unsigned long *key,
+ 		int index, bool available,
+ 		unsigned long (*read)(struct dev_pm_opp *opp, int index),
+-		bool (*assert)(struct opp_table *opp_table))
++		bool (*assert)(struct opp_table *opp_table, unsigned int index))
+ {
+ 	return _find_key(dev, key, index, available, read, _compare_ceil,
+ 			 assert);
+@@ -586,7 +605,7 @@ static struct dev_pm_opp *_find_key_ceil(struct device *dev, unsigned long *key,
+ static struct dev_pm_opp *_find_key_floor(struct device *dev,
+ 		unsigned long *key, int index, bool available,
+ 		unsigned long (*read)(struct dev_pm_opp *opp, int index),
+-		bool (*assert)(struct opp_table *opp_table))
++		bool (*assert)(struct opp_table *opp_table, unsigned int index))
+ {
+ 	return _find_key(dev, key, index, available, read, _compare_floor,
+ 			 assert);
+@@ -647,7 +666,8 @@ struct dev_pm_opp *
+ dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq,
+ 				   u32 index, bool available)
+ {
+-	return _find_key_exact(dev, freq, index, available, _read_freq, NULL);
++	return _find_key_exact(dev, freq, index, available, _read_freq,
++			       assert_clk_index);
+ }
+ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact_indexed);
+ 
+@@ -707,7 +727,8 @@ struct dev_pm_opp *
+ dev_pm_opp_find_freq_ceil_indexed(struct device *dev, unsigned long *freq,
+ 				  u32 index)
+ {
+-	return _find_key_ceil(dev, freq, index, true, _read_freq, NULL);
++	return _find_key_ceil(dev, freq, index, true, _read_freq,
++			      assert_clk_index);
+ }
+ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_indexed);
+ 
+@@ -760,7 +781,7 @@ struct dev_pm_opp *
+ dev_pm_opp_find_freq_floor_indexed(struct device *dev, unsigned long *freq,
+ 				   u32 index)
+ {
+-	return _find_key_floor(dev, freq, index, true, _read_freq, NULL);
++	return _find_key_floor(dev, freq, index, true, _read_freq, assert_clk_index);
+ }
+ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor_indexed);
+ 
+@@ -878,7 +899,8 @@ struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw,
+ 	unsigned long temp = *bw;
+ 	struct dev_pm_opp *opp;
+ 
+-	opp = _find_key_ceil(dev, &temp, index, true, _read_bw, NULL);
++	opp = _find_key_ceil(dev, &temp, index, true, _read_bw,
++			     assert_bandwidth_index);
+ 	*bw = temp;
+ 	return opp;
+ }
+@@ -909,7 +931,8 @@ struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
+ 	unsigned long temp = *bw;
+ 	struct dev_pm_opp *opp;
+ 
+-	opp = _find_key_floor(dev, &temp, index, true, _read_bw, NULL);
++	opp = _find_key_floor(dev, &temp, index, true, _read_bw,
++			      assert_bandwidth_index);
+ 	*bw = temp;
+ 	return opp;
+ }
+@@ -1702,7 +1725,7 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
+ 	if (IS_ERR(opp_table))
+ 		return;
+ 
+-	if (!assert_single_clk(opp_table))
++	if (!assert_single_clk(opp_table, 0))
+ 		goto put_table;
+ 
+ 	mutex_lock(&opp_table->lock);
+@@ -2054,7 +2077,7 @@ int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
+ 	unsigned long tol, u_volt = data->u_volt;
+ 	int ret;
+ 
+-	if (!assert_single_clk(opp_table))
++	if (!assert_single_clk(opp_table, 0))
+ 		return -EINVAL;
+ 
+ 	new_opp = _opp_allocate(opp_table);
+@@ -2810,7 +2833,7 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
+ 		return r;
+ 	}
+ 
+-	if (!assert_single_clk(opp_table)) {
++	if (!assert_single_clk(opp_table, 0)) {
+ 		r = -EINVAL;
+ 		goto put_table;
+ 	}
+@@ -2886,7 +2909,7 @@ int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
+ 		return r;
+ 	}
+ 
+-	if (!assert_single_clk(opp_table)) {
++	if (!assert_single_clk(opp_table, 0)) {
+ 		r = -EINVAL;
+ 		goto put_table;
+ 	}
+diff --git a/drivers/opp/of.c b/drivers/opp/of.c
+index fd5ed285825881..a24f76f5fd0172 100644
+--- a/drivers/opp/of.c
++++ b/drivers/opp/of.c
+@@ -926,7 +926,7 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
+ 
+ 	ret = _of_opp_alloc_required_opps(opp_table, new_opp);
+ 	if (ret)
+-		goto free_opp;
++		goto put_node;
+ 
+ 	if (!of_property_read_u32(np, "clock-latency-ns", &val))
+ 		new_opp->clock_latency_ns = val;
+@@ -976,6 +976,8 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
+ 
+ free_required_opps:
+ 	_of_opp_free_required_opps(opp_table, new_opp);
++put_node:
++	of_node_put(np);
+ free_opp:
+ 	_opp_free(new_opp);
+ 
+diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
+index c8d5c90aa4d45b..ad3028b755d16a 100644
+--- a/drivers/pci/controller/dwc/pci-imx6.c
++++ b/drivers/pci/controller/dwc/pci-imx6.c
+@@ -598,10 +598,9 @@ static int imx_pcie_attach_pd(struct device *dev)
+ 
+ static int imx6sx_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
+ {
+-	if (enable)
+-		regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+-				  IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
+-
++	regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
++			   IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
++			   enable ? 0 : IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
+ 	return 0;
+ }
+ 
+@@ -630,19 +629,20 @@ static int imx8mm_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
+ {
+ 	int offset = imx_pcie_grp_offset(imx_pcie);
+ 
+-	if (enable) {
+-		regmap_clear_bits(imx_pcie->iomuxc_gpr, offset, IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE);
+-		regmap_set_bits(imx_pcie->iomuxc_gpr, offset, IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
+-	}
+-
++	regmap_update_bits(imx_pcie->iomuxc_gpr, offset,
++			   IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
++			   enable ? 0 : IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE);
++	regmap_update_bits(imx_pcie->iomuxc_gpr, offset,
++			   IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
++			   enable ? IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN : 0);
+ 	return 0;
+ }
+ 
+ static int imx7d_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
+ {
+-	if (!enable)
+-		regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+-				IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
++	regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
++			   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
++			   enable ? 0 : IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
+ 	return 0;
+ }
+ 
+@@ -775,6 +775,7 @@ static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie)
+ static int imx_pcie_deassert_core_reset(struct imx_pcie *imx_pcie)
+ {
+ 	reset_control_deassert(imx_pcie->pciephy_reset);
++	reset_control_deassert(imx_pcie->apps_reset);
+ 
+ 	if (imx_pcie->drvdata->core_reset)
+ 		imx_pcie->drvdata->core_reset(imx_pcie, false);
+@@ -966,7 +967,9 @@ static int imx_pcie_host_init(struct dw_pcie_rp *pp)
+ 			goto err_clk_disable;
+ 		}
+ 
+-		ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
++		ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE,
++				       imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE ?
++						PHY_MODE_PCIE_EP : PHY_MODE_PCIE_RC);
+ 		if (ret) {
+ 			dev_err(dev, "unable to set PCIe PHY mode\n");
+ 			goto err_phy_exit;
+@@ -1391,7 +1394,6 @@ static int imx_pcie_probe(struct platform_device *pdev)
+ 	switch (imx_pcie->drvdata->variant) {
+ 	case IMX8MQ:
+ 	case IMX8MQ_EP:
+-	case IMX7D:
+ 		if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
+ 			imx_pcie->controller_id = 1;
+ 		break;
+diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
+index d2291c3ceb8bed..cf146ff6a3ea8a 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-host.c
++++ b/drivers/pci/controller/dwc/pcie-designware-host.c
+@@ -946,6 +946,7 @@ int dw_pcie_suspend_noirq(struct dw_pcie *pci)
+ 		return ret;
+ 	}
+ 
++	dw_pcie_stop_link(pci);
+ 	if (pci->pp.ops->deinit)
+ 		pci->pp.ops->deinit(&pci->pp);
+ 
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index dc102d8bd58c69..91b7f9ec78bcc9 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -1569,6 +1569,8 @@ static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data)
+ 		pci_lock_rescan_remove();
+ 		pci_rescan_bus(pp->bridge->bus);
+ 		pci_unlock_rescan_remove();
++
++		qcom_pcie_icc_opp_update(pcie);
+ 	} else {
+ 		dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n",
+ 			      status);
+diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
+index 047e2cef5afcd5..c5e0d025bc4359 100644
+--- a/drivers/pci/controller/pcie-rcar-ep.c
++++ b/drivers/pci/controller/pcie-rcar-ep.c
+@@ -107,7 +107,7 @@ static int rcar_pcie_parse_outbound_ranges(struct rcar_pcie_endpoint *ep,
+ 		}
+ 		if (!devm_request_mem_region(&pdev->dev, res->start,
+ 					     resource_size(res),
+-					     outbound_name)) {
++					     res->name)) {
+ 			dev_err(pcie->dev, "Cannot request memory region %s.\n",
+ 				outbound_name);
+ 			return -EIO;
+diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
+index 1064b7b06cef64..85ea36df2f59ab 100644
+--- a/drivers/pci/controller/pcie-rockchip-ep.c
++++ b/drivers/pci/controller/pcie-rockchip-ep.c
+@@ -40,6 +40,10 @@
+  * @irq_pci_fn: the latest PCI function that has updated the mapping of
+  *		the MSI/INTX IRQ dedicated outbound region.
+  * @irq_pending: bitmask of asserted INTX IRQs.
++ * @perst_irq: IRQ used for the PERST# signal.
++ * @perst_asserted: True if the PERST# signal was asserted.
++ * @link_up: True if the PCI link is up.
++ * @link_training: Work item to execute PCI link training.
+  */
+ struct rockchip_pcie_ep {
+ 	struct rockchip_pcie	rockchip;
+@@ -784,6 +788,7 @@ static int rockchip_pcie_ep_init_ob_mem(struct rockchip_pcie_ep *ep)
+ 						  SZ_1M);
+ 	if (!ep->irq_cpu_addr) {
+ 		dev_err(dev, "failed to reserve memory space for MSI\n");
++		err = -ENOMEM;
+ 		goto err_epc_mem_exit;
+ 	}
+ 
+diff --git a/drivers/pci/controller/plda/pcie-microchip-host.c b/drivers/pci/controller/plda/pcie-microchip-host.c
+index 6630cacef30105..3fdfffdf027001 100644
+--- a/drivers/pci/controller/plda/pcie-microchip-host.c
++++ b/drivers/pci/controller/plda/pcie-microchip-host.c
+@@ -7,20 +7,27 @@
+  * Author: Daire McNamara <daire.mcnamara@microchip.com>
+  */
+ 
++#include <linux/align.h>
++#include <linux/bits.h>
+ #include <linux/bitfield.h>
+ #include <linux/clk.h>
+ #include <linux/irqchip/chained_irq.h>
+ #include <linux/irqdomain.h>
++#include <linux/log2.h>
+ #include <linux/module.h>
+ #include <linux/msi.h>
+ #include <linux/of_address.h>
+ #include <linux/of_pci.h>
+ #include <linux/pci-ecam.h>
+ #include <linux/platform_device.h>
++#include <linux/wordpart.h>
+ 
+ #include "../../pci.h"
+ #include "pcie-plda.h"
+ 
++#define MC_MAX_NUM_INBOUND_WINDOWS		8
++#define MPFS_NC_BOUNCE_ADDR			0x80000000
++
+ /* PCIe Bridge Phy and Controller Phy offsets */
+ #define MC_PCIE1_BRIDGE_ADDR			0x00008000u
+ #define MC_PCIE1_CTRL_ADDR			0x0000a000u
+@@ -607,6 +614,91 @@ static void mc_disable_interrupts(struct mc_pcie *port)
+ 	writel_relaxed(GENMASK(31, 0), port->bridge_base_addr + ISTATUS_HOST);
+ }
+ 
++static void mc_pcie_setup_inbound_atr(struct mc_pcie *port, int window_index,
++				      u64 axi_addr, u64 pcie_addr, u64 size)
++{
++	u32 table_offset = window_index * ATR_ENTRY_SIZE;
++	void __iomem *table_addr = port->bridge_base_addr + table_offset;
++	u32 atr_sz;
++	u32 val;
++
++	atr_sz = ilog2(size) - 1;
++
++	val = ALIGN_DOWN(lower_32_bits(pcie_addr), SZ_4K);
++	val |= FIELD_PREP(ATR_SIZE_MASK, atr_sz);
++	val |= ATR_IMPL_ENABLE;
++
++	writel(val, table_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
++
++	writel(upper_32_bits(pcie_addr), table_addr + ATR0_PCIE_WIN0_SRC_ADDR);
++
++	writel(lower_32_bits(axi_addr), table_addr + ATR0_PCIE_WIN0_TRSL_ADDR_LSB);
++	writel(upper_32_bits(axi_addr), table_addr + ATR0_PCIE_WIN0_TRSL_ADDR_UDW);
++
++	writel(TRSL_ID_AXI4_MASTER_0, table_addr + ATR0_PCIE_WIN0_TRSL_PARAM);
++}
++
++static int mc_pcie_setup_inbound_ranges(struct platform_device *pdev,
++					struct mc_pcie *port)
++{
++	struct device *dev = &pdev->dev;
++	struct device_node *dn = dev->of_node;
++	struct of_range_parser parser;
++	struct of_range range;
++	int atr_index = 0;
++
++	/*
++	 * MPFS PCIe Root Port is 32-bit only, behind a Fabric Interface
++	 * Controller FPGA logic block which contains the AXI-S interface.
++	 *
++	 * From the point of view of the PCIe Root Port, there are only two
++	 * supported Root Port configurations:
++	 *
++	 * Configuration 1: for use with fully coherent designs; supports a
++	 * window from 0x0 (CPU space) to specified PCIe space.
++	 *
++	 * Configuration 2: for use with non-coherent designs; supports two
++	 * 1 GB windows to CPU space; one mapping CPU space 0 to PCIe space
++	 * 0x80000000 and a second mapping CPU space 0x40000000 to PCIe
++	 * space 0xc0000000. This cfg needs two windows because of how the
++	 * MSI space is allocated in the AXI-S range on MPFS.
++	 *
++	 * The FIC interface outside the PCIe block *must* complete the
++	 * inbound address translation as per MCHP MPFS FPGA design
++	 * guidelines.
++	 */
++	if (device_property_read_bool(dev, "dma-noncoherent")) {
++		/*
++		 * Always need same two tables in this case.  Need two tables
++		 * due to hardware interactions between address and size.
++		 */
++		mc_pcie_setup_inbound_atr(port, 0, 0,
++					  MPFS_NC_BOUNCE_ADDR, SZ_1G);
++		mc_pcie_setup_inbound_atr(port, 1, SZ_1G,
++					  MPFS_NC_BOUNCE_ADDR + SZ_1G, SZ_1G);
++	} else {
++		/* Find any DMA ranges */
++		if (of_pci_dma_range_parser_init(&parser, dn)) {
++			/* No DMA range property - setup default */
++			mc_pcie_setup_inbound_atr(port, 0, 0, 0, SZ_4G);
++			return 0;
++		}
++
++		for_each_of_range(&parser, &range) {
++			if (atr_index >= MC_MAX_NUM_INBOUND_WINDOWS) {
++				dev_err(dev, "too many inbound ranges; %d available tables\n",
++					MC_MAX_NUM_INBOUND_WINDOWS);
++				return -EINVAL;
++			}
++			mc_pcie_setup_inbound_atr(port, atr_index, 0,
++						  range.pci_addr, range.size);
++			atr_index++;
++		}
++	}
++
++	return 0;
++}
++
+ static int mc_platform_init(struct pci_config_window *cfg)
+ {
+ 	struct device *dev = cfg->parent;
+@@ -627,6 +719,10 @@ static int mc_platform_init(struct pci_config_window *cfg)
+ 	if (ret)
+ 		return ret;
+ 
++	ret = mc_pcie_setup_inbound_ranges(pdev, port);
++	if (ret)
++		return ret;
++
+ 	port->plda.event_ops = &mc_event_ops;
+ 	port->plda.event_irq_chip = &mc_event_irq_chip;
+ 	port->plda.events_bitmap = GENMASK(NUM_EVENTS - 1, 0);
+diff --git a/drivers/pci/controller/plda/pcie-plda-host.c b/drivers/pci/controller/plda/pcie-plda-host.c
+index 8533dc618d45f0..4153214ca41038 100644
+--- a/drivers/pci/controller/plda/pcie-plda-host.c
++++ b/drivers/pci/controller/plda/pcie-plda-host.c
+@@ -8,11 +8,14 @@
+  * Author: Daire McNamara <daire.mcnamara@microchip.com>
+  */
+ 
++#include <linux/align.h>
++#include <linux/bitfield.h>
+ #include <linux/irqchip/chained_irq.h>
+ #include <linux/irqdomain.h>
+ #include <linux/msi.h>
+ #include <linux/pci_regs.h>
+ #include <linux/pci-ecam.h>
++#include <linux/wordpart.h>
+ 
+ #include "pcie-plda.h"
+ 
+@@ -502,8 +505,9 @@ void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
+ 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ 	       ATR0_AXI4_SLV0_TRSL_PARAM);
+ 
+-	val = lower_32_bits(axi_addr) | (atr_sz << ATR_SIZE_SHIFT) |
+-			    ATR_IMPL_ENABLE;
++	val = ALIGN_DOWN(lower_32_bits(axi_addr), SZ_4K);
++	val |= FIELD_PREP(ATR_SIZE_MASK, atr_sz);
++	val |= ATR_IMPL_ENABLE;
+ 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ 	       ATR0_AXI4_SLV0_SRCADDR_PARAM);
+ 
+@@ -518,13 +522,20 @@ void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
+ 	val = upper_32_bits(pci_addr);
+ 	writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ 	       ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
++}
++EXPORT_SYMBOL_GPL(plda_pcie_setup_window);
++
++void plda_pcie_setup_inbound_address_translation(struct plda_pcie_rp *port)
++{
++	void __iomem *bridge_base_addr = port->bridge_addr;
++	u32 val;
+ 
+ 	val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
+ 	val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT);
+ 	writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
+ 	writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
+ }
+-EXPORT_SYMBOL_GPL(plda_pcie_setup_window);
++EXPORT_SYMBOL_GPL(plda_pcie_setup_inbound_address_translation);
+ 
+ int plda_pcie_setup_iomems(struct pci_host_bridge *bridge,
+ 			   struct plda_pcie_rp *port)
+diff --git a/drivers/pci/controller/plda/pcie-plda.h b/drivers/pci/controller/plda/pcie-plda.h
+index 0e7dc0d8e5ba11..61ece26065ea09 100644
+--- a/drivers/pci/controller/plda/pcie-plda.h
++++ b/drivers/pci/controller/plda/pcie-plda.h
+@@ -89,14 +89,15 @@
+ 
+ /* PCIe AXI slave table init defines */
+ #define ATR0_AXI4_SLV0_SRCADDR_PARAM		0x800u
+-#define  ATR_SIZE_SHIFT				1
+-#define  ATR_IMPL_ENABLE			1
++#define  ATR_SIZE_MASK				GENMASK(6, 1)
++#define  ATR_IMPL_ENABLE			BIT(0)
+ #define ATR0_AXI4_SLV0_SRC_ADDR			0x804u
+ #define ATR0_AXI4_SLV0_TRSL_ADDR_LSB		0x808u
+ #define ATR0_AXI4_SLV0_TRSL_ADDR_UDW		0x80cu
+ #define ATR0_AXI4_SLV0_TRSL_PARAM		0x810u
+ #define  PCIE_TX_RX_INTERFACE			0x00000000u
+ #define  PCIE_CONFIG_INTERFACE			0x00000001u
++#define  TRSL_ID_AXI4_MASTER_0			0x00000004u
+ 
+ #define CONFIG_SPACE_ADDR_OFFSET		0x1000u
+ 
+@@ -204,6 +205,7 @@ int plda_init_interrupts(struct platform_device *pdev,
+ void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
+ 			    phys_addr_t axi_addr, phys_addr_t pci_addr,
+ 			    size_t size);
++void plda_pcie_setup_inbound_address_translation(struct plda_pcie_rp *port);
+ int plda_pcie_setup_iomems(struct pci_host_bridge *bridge,
+ 			   struct plda_pcie_rp *port);
+ int plda_pcie_host_init(struct plda_pcie_rp *port, struct pci_ops *ops,
+diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
+index ef6677f34116e1..b2fdd8c82c4387 100644
+--- a/drivers/pci/endpoint/functions/pci-epf-test.c
++++ b/drivers/pci/endpoint/functions/pci-epf-test.c
+@@ -251,7 +251,7 @@ static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
+ 
+ fail_back_rx:
+ 	dma_release_channel(epf_test->dma_chan_rx);
+-	epf_test->dma_chan_tx = NULL;
++	epf_test->dma_chan_rx = NULL;
+ 
+ fail_back_tx:
+ 	dma_cap_zero(mask);
+@@ -328,8 +328,8 @@ static void pci_epf_test_copy(struct pci_epf_test *epf_test,
+ 	void *copy_buf = NULL, *buf;
+ 
+ 	if (reg->flags & FLAG_USE_DMA) {
+-		if (epf_test->dma_private) {
+-			dev_err(dev, "Cannot transfer data using DMA\n");
++		if (!dma_has_cap(DMA_MEMCPY, epf_test->dma_chan_tx->device->cap_mask)) {
++			dev_err(dev, "DMA controller doesn't support MEMCPY\n");
+ 			ret = -EINVAL;
+ 			goto set_status;
+ 		}
+diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
+index bed7c7d1fe3c37..75c66882900343 100644
+--- a/drivers/pci/endpoint/pci-epc-core.c
++++ b/drivers/pci/endpoint/pci-epc-core.c
+@@ -942,7 +942,7 @@ void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc)
+ {
+ 	int r;
+ 
+-	r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match,
++	r = devres_release(dev, devm_pci_epc_release, devm_pci_epc_match,
+ 			   epc);
+ 	dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n");
+ }
+diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+index f4f10c60c1d23b..dcc662be080004 100644
+--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
++++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+@@ -438,9 +438,9 @@ static void nmk_prcm_altcx_set_mode(struct nmk_pinctrl *npct,
+  *  - Any spurious wake up event during switch sequence to be ignored and
+  *    cleared
+  */
+-static void nmk_gpio_glitch_slpm_init(unsigned int *slpm)
++static int nmk_gpio_glitch_slpm_init(unsigned int *slpm)
+ {
+-	int i;
++	int i, j, ret;
+ 
+ 	for (i = 0; i < NMK_MAX_BANKS; i++) {
+ 		struct nmk_gpio_chip *chip = nmk_gpio_chips[i];
+@@ -449,11 +449,21 @@ static void nmk_gpio_glitch_slpm_init(unsigned int *slpm)
+ 		if (!chip)
+ 			break;
+ 
+-		clk_enable(chip->clk);
++		ret = clk_enable(chip->clk);
++		if (ret) {
++			for (j = 0; j < i; j++) {
++				chip = nmk_gpio_chips[j];
++				clk_disable(chip->clk);
++			}
++
++			return ret;
++		}
+ 
+ 		slpm[i] = readl(chip->addr + NMK_GPIO_SLPC);
+ 		writel(temp, chip->addr + NMK_GPIO_SLPC);
+ 	}
++
++	return 0;
+ }
+ 
+ static void nmk_gpio_glitch_slpm_restore(unsigned int *slpm)
+@@ -923,7 +933,9 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned int function,
+ 
+ 			slpm[nmk_chip->bank] &= ~BIT(bit);
+ 		}
+-		nmk_gpio_glitch_slpm_init(slpm);
++		ret = nmk_gpio_glitch_slpm_init(slpm);
++		if (ret)
++			goto out_pre_slpm_init;
+ 	}
+ 
+ 	for (i = 0; i < g->grp.npins; i++) {
+@@ -940,7 +952,10 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned int function,
+ 		dev_dbg(npct->dev, "setting pin %d to altsetting %d\n",
+ 			g->grp.pins[i], g->altsetting);
+ 
+-		clk_enable(nmk_chip->clk);
++		ret = clk_enable(nmk_chip->clk);
++		if (ret)
++			goto out_glitch;
++
+ 		/*
+ 		 * If the pin is switching to altfunc, and there was an
+ 		 * interrupt installed on it which has been lazy disabled,
+@@ -988,6 +1003,7 @@ static int nmk_gpio_request_enable(struct pinctrl_dev *pctldev,
+ 	struct nmk_gpio_chip *nmk_chip;
+ 	struct gpio_chip *chip;
+ 	unsigned int bit;
++	int ret;
+ 
+ 	if (!range) {
+ 		dev_err(npct->dev, "invalid range\n");
+@@ -1004,7 +1020,9 @@ static int nmk_gpio_request_enable(struct pinctrl_dev *pctldev,
+ 
+ 	find_nmk_gpio_from_pin(pin, &bit);
+ 
+-	clk_enable(nmk_chip->clk);
++	ret = clk_enable(nmk_chip->clk);
++	if (ret)
++		return ret;
+ 	/* There is no glitch when converting any pin to GPIO */
+ 	__nmk_gpio_set_mode(nmk_chip, bit, NMK_GPIO_ALT_GPIO);
+ 	clk_disable(nmk_chip->clk);
+@@ -1058,6 +1076,7 @@ static int nmk_pin_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ 	unsigned long cfg;
+ 	int pull, slpm, output, val, i;
+ 	bool lowemi, gpiomode, sleep;
++	int ret;
+ 
+ 	nmk_chip = find_nmk_gpio_from_pin(pin, &bit);
+ 	if (!nmk_chip) {
+@@ -1116,7 +1135,9 @@ static int nmk_pin_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ 			output ? (val ? "high" : "low") : "",
+ 			lowemi ? "on" : "off");
+ 
+-		clk_enable(nmk_chip->clk);
++		ret = clk_enable(nmk_chip->clk);
++		if (ret)
++			return ret;
+ 		if (gpiomode)
+ 			/* No glitch when going to GPIO mode */
+ 			__nmk_gpio_set_mode(nmk_chip, bit, NMK_GPIO_ALT_GPIO);
+diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
+index fff6d4209ad578..a03feb5a60dda8 100644
+--- a/drivers/pinctrl/pinctrl-amd.c
++++ b/drivers/pinctrl/pinctrl-amd.c
+@@ -908,12 +908,13 @@ static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin)
+ 	return false;
+ }
+ 
+-static int amd_gpio_suspend(struct device *dev)
++static int amd_gpio_suspend_hibernate_common(struct device *dev, bool is_suspend)
+ {
+ 	struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
+ 	struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+ 	unsigned long flags;
+ 	int i;
++	u32 wake_mask = is_suspend ? WAKE_SOURCE_SUSPEND : WAKE_SOURCE_HIBERNATE;
+ 
+ 	for (i = 0; i < desc->npins; i++) {
+ 		int pin = desc->pins[i].number;
+@@ -925,11 +926,11 @@ static int amd_gpio_suspend(struct device *dev)
+ 		gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin * 4) & ~PIN_IRQ_PENDING;
+ 
+ 		/* mask any interrupts not intended to be a wake source */
+-		if (!(gpio_dev->saved_regs[i] & WAKE_SOURCE)) {
++		if (!(gpio_dev->saved_regs[i] & wake_mask)) {
+ 			writel(gpio_dev->saved_regs[i] & ~BIT(INTERRUPT_MASK_OFF),
+ 			       gpio_dev->base + pin * 4);
+-			pm_pr_dbg("Disabling GPIO #%d interrupt for suspend.\n",
+-				  pin);
++			pm_pr_dbg("Disabling GPIO #%d interrupt for %s.\n",
++				  pin, is_suspend ? "suspend" : "hibernate");
+ 		}
+ 
+ 		raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
+@@ -938,6 +939,16 @@ static int amd_gpio_suspend(struct device *dev)
+ 	return 0;
+ }
+ 
++static int amd_gpio_suspend(struct device *dev)
++{
++	return amd_gpio_suspend_hibernate_common(dev, true);
++}
++
++static int amd_gpio_hibernate(struct device *dev)
++{
++	return amd_gpio_suspend_hibernate_common(dev, false);
++}
++
+ static int amd_gpio_resume(struct device *dev)
+ {
+ 	struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
+@@ -961,8 +972,12 @@ static int amd_gpio_resume(struct device *dev)
+ }
+ 
+ static const struct dev_pm_ops amd_gpio_pm_ops = {
+-	SET_LATE_SYSTEM_SLEEP_PM_OPS(amd_gpio_suspend,
+-				     amd_gpio_resume)
++	.suspend_late = amd_gpio_suspend,
++	.resume_early = amd_gpio_resume,
++	.freeze_late = amd_gpio_hibernate,
++	.thaw_early = amd_gpio_resume,
++	.poweroff_late = amd_gpio_hibernate,
++	.restore_early = amd_gpio_resume,
+ };
+ #endif
+ 
+diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h
+index 667be49c3f48d2..3a1e5bffaf6e5f 100644
+--- a/drivers/pinctrl/pinctrl-amd.h
++++ b/drivers/pinctrl/pinctrl-amd.h
+@@ -80,10 +80,9 @@
+ #define FUNCTION_MASK		GENMASK(1, 0)
+ #define FUNCTION_INVALID	GENMASK(7, 0)
+ 
+-#define WAKE_SOURCE	(BIT(WAKE_CNTRL_OFF_S0I3) | \
+-			 BIT(WAKE_CNTRL_OFF_S3)   | \
+-			 BIT(WAKE_CNTRL_OFF_S4)   | \
+-			 BIT(WAKECNTRL_Z_OFF))
++#define WAKE_SOURCE_SUSPEND  (BIT(WAKE_CNTRL_OFF_S0I3) | \
++			      BIT(WAKE_CNTRL_OFF_S3))
++#define WAKE_SOURCE_HIBERNATE BIT(WAKE_CNTRL_OFF_S4)
+ 
+ struct amd_function {
+ 	const char *name;
+diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
+index b79c211c037496..ac6dc22b37c98e 100644
+--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
++++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
+@@ -636,7 +636,7 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
+ 		if (clk_enable(b->drvdata->pclk)) {
+ 			dev_err(b->gpio_chip.parent,
+ 				"unable to enable clock for pending IRQs\n");
+-			return;
++			goto out;
+ 		}
+ 	}
+ 
+@@ -652,6 +652,7 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
+ 	if (eintd->nr_banks)
+ 		clk_disable(eintd->banks[0]->drvdata->pclk);
+ 
++out:
+ 	chained_irq_exit(chip, desc);
+ }
+ 
+diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
+index 5b7fa77c118436..03f3f707d27555 100644
+--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
++++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
+@@ -86,7 +86,6 @@ struct stm32_pinctrl_group {
+ 
+ struct stm32_gpio_bank {
+ 	void __iomem *base;
+-	struct clk *clk;
+ 	struct reset_control *rstc;
+ 	spinlock_t lock;
+ 	struct gpio_chip gpio_chip;
+@@ -108,6 +107,7 @@ struct stm32_pinctrl {
+ 	unsigned ngroups;
+ 	const char **grp_names;
+ 	struct stm32_gpio_bank *banks;
++	struct clk_bulk_data *clks;
+ 	unsigned nbanks;
+ 	const struct stm32_pinctrl_match_data *match_data;
+ 	struct irq_domain	*domain;
+@@ -1308,12 +1308,6 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
+ 	if (IS_ERR(bank->base))
+ 		return PTR_ERR(bank->base);
+ 
+-	err = clk_prepare_enable(bank->clk);
+-	if (err) {
+-		dev_err(dev, "failed to prepare_enable clk (%d)\n", err);
+-		return err;
+-	}
+-
+ 	bank->gpio_chip = stm32_gpio_template;
+ 
+ 	fwnode_property_read_string(fwnode, "st,bank-name", &bank->gpio_chip.label);
+@@ -1360,26 +1354,20 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
+ 							   bank->fwnode, &stm32_gpio_domain_ops,
+ 							   bank);
+ 
+-		if (!bank->domain) {
+-			err = -ENODEV;
+-			goto err_clk;
+-		}
++		if (!bank->domain)
++			return -ENODEV;
+ 	}
+ 
+ 	names = devm_kcalloc(dev, npins, sizeof(char *), GFP_KERNEL);
+-	if (!names) {
+-		err = -ENOMEM;
+-		goto err_clk;
+-	}
++	if (!names)
++		return -ENOMEM;
+ 
+ 	for (i = 0; i < npins; i++) {
+ 		stm32_pin = stm32_pctrl_get_desc_pin_from_gpio(pctl, bank, i);
+ 		if (stm32_pin && stm32_pin->pin.name) {
+ 			names[i] = devm_kasprintf(dev, GFP_KERNEL, "%s", stm32_pin->pin.name);
+-			if (!names[i]) {
+-				err = -ENOMEM;
+-				goto err_clk;
+-			}
++			if (!names[i])
++				return -ENOMEM;
+ 		} else {
+ 			names[i] = NULL;
+ 		}
+@@ -1390,15 +1378,11 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
+ 	err = gpiochip_add_data(&bank->gpio_chip, bank);
+ 	if (err) {
+ 		dev_err(dev, "Failed to add gpiochip(%d)!\n", bank_nr);
+-		goto err_clk;
++		return err;
+ 	}
+ 
+ 	dev_info(dev, "%s bank added\n", bank->gpio_chip.label);
+ 	return 0;
+-
+-err_clk:
+-	clk_disable_unprepare(bank->clk);
+-	return err;
+ }
+ 
+ static struct irq_domain *stm32_pctrl_get_irq_domain(struct platform_device *pdev)
+@@ -1621,6 +1605,11 @@ int stm32_pctl_probe(struct platform_device *pdev)
+ 	if (!pctl->banks)
+ 		return -ENOMEM;
+ 
++	pctl->clks = devm_kcalloc(dev, banks, sizeof(*pctl->clks),
++				  GFP_KERNEL);
++	if (!pctl->clks)
++		return -ENOMEM;
++
+ 	i = 0;
+ 	for_each_gpiochip_node(dev, child) {
+ 		struct stm32_gpio_bank *bank = &pctl->banks[i];
+@@ -1632,24 +1621,27 @@ int stm32_pctl_probe(struct platform_device *pdev)
+ 			return -EPROBE_DEFER;
+ 		}
+ 
+-		bank->clk = of_clk_get_by_name(np, NULL);
+-		if (IS_ERR(bank->clk)) {
++		pctl->clks[i].clk = of_clk_get_by_name(np, NULL);
++		if (IS_ERR(pctl->clks[i].clk)) {
+ 			fwnode_handle_put(child);
+-			return dev_err_probe(dev, PTR_ERR(bank->clk),
++			return dev_err_probe(dev, PTR_ERR(pctl->clks[i].clk),
+ 					     "failed to get clk\n");
+ 		}
++		pctl->clks[i].id = "pctl";
+ 		i++;
+ 	}
+ 
++	ret = clk_bulk_prepare_enable(banks, pctl->clks);
++	if (ret) {
++		dev_err(dev, "failed to prepare_enable clk (%d)\n", ret);
++		return ret;
++	}
++
+ 	for_each_gpiochip_node(dev, child) {
+ 		ret = stm32_gpiolib_register_bank(pctl, child);
+ 		if (ret) {
+ 			fwnode_handle_put(child);
+-
+-			for (i = 0; i < pctl->nbanks; i++)
+-				clk_disable_unprepare(pctl->banks[i].clk);
+-
+-			return ret;
++			goto err_register;
+ 		}
+ 
+ 		pctl->nbanks++;
+@@ -1658,6 +1650,15 @@ int stm32_pctl_probe(struct platform_device *pdev)
+ 	dev_info(dev, "Pinctrl STM32 initialized\n");
+ 
+ 	return 0;
++err_register:
++	for (i = 0; i < pctl->nbanks; i++) {
++		struct stm32_gpio_bank *bank = &pctl->banks[i];
++
++		gpiochip_remove(&bank->gpio_chip);
++	}
++
++	clk_bulk_disable_unprepare(banks, pctl->clks);
++	return ret;
+ }
+ 
+ static int __maybe_unused stm32_pinctrl_restore_gpio_regs(
+@@ -1726,10 +1727,8 @@ static int __maybe_unused stm32_pinctrl_restore_gpio_regs(
+ int __maybe_unused stm32_pinctrl_suspend(struct device *dev)
+ {
+ 	struct stm32_pinctrl *pctl = dev_get_drvdata(dev);
+-	int i;
+ 
+-	for (i = 0; i < pctl->nbanks; i++)
+-		clk_disable(pctl->banks[i].clk);
++	clk_bulk_disable(pctl->nbanks, pctl->clks);
+ 
+ 	return 0;
+ }
+@@ -1738,10 +1737,11 @@ int __maybe_unused stm32_pinctrl_resume(struct device *dev)
+ {
+ 	struct stm32_pinctrl *pctl = dev_get_drvdata(dev);
+ 	struct stm32_pinctrl_group *g = pctl->groups;
+-	int i;
++	int i, ret;
+ 
+-	for (i = 0; i < pctl->nbanks; i++)
+-		clk_enable(pctl->banks[i].clk);
++	ret = clk_bulk_enable(pctl->nbanks, pctl->clks);
++	if (ret)
++		return ret;
+ 
+ 	for (i = 0; i < pctl->ngroups; i++, g++)
+ 		stm32_pinctrl_restore_gpio_regs(pctl, g->pin);
+diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
+index 9d18dfca6a673b..9ff7b487dc4892 100644
+--- a/drivers/platform/mellanox/mlxbf-pmc.c
++++ b/drivers/platform/mellanox/mlxbf-pmc.c
+@@ -1168,7 +1168,7 @@ static int mlxbf_pmc_program_l3_counter(unsigned int blk_num, u32 cnt_num, u32 e
+ /* Method to handle crspace counter programming */
+ static int mlxbf_pmc_program_crspace_counter(unsigned int blk_num, u32 cnt_num, u32 evt)
+ {
+-	void *addr;
++	void __iomem *addr;
+ 	u32 word;
+ 	int ret;
+ 
+@@ -1192,7 +1192,7 @@ static int mlxbf_pmc_program_crspace_counter(unsigned int blk_num, u32 cnt_num,
+ /* Method to clear crspace counter value */
+ static int mlxbf_pmc_clear_crspace_counter(unsigned int blk_num, u32 cnt_num)
+ {
+-	void *addr;
++	void __iomem *addr;
+ 
+ 	addr = pmc->block[blk_num].mmio_base +
+ 		MLXBF_PMC_CRSPACE_PERFMON_VAL0(pmc->block[blk_num].counters) +
+@@ -1405,7 +1405,7 @@ static int mlxbf_pmc_read_l3_event(unsigned int blk_num, u32 cnt_num, u64 *resul
+ static int mlxbf_pmc_read_crspace_event(unsigned int blk_num, u32 cnt_num, u64 *result)
+ {
+ 	u32 word, evt;
+-	void *addr;
++	void __iomem *addr;
+ 	int ret;
+ 
+ 	addr = pmc->block[blk_num].mmio_base +
+diff --git a/drivers/platform/x86/x86-android-tablets/core.c b/drivers/platform/x86/x86-android-tablets/core.c
+index 4218afcec0e9b7..40b5cd9b172f1c 100644
+--- a/drivers/platform/x86/x86-android-tablets/core.c
++++ b/drivers/platform/x86/x86-android-tablets/core.c
+@@ -157,7 +157,7 @@ static struct gpiod_lookup_table * const *gpiod_lookup_tables;
+ static const struct software_node *bat_swnode;
+ static void (*exit_handler)(void);
+ 
+-static struct i2c_adapter *
++static __init struct i2c_adapter *
+ get_i2c_adap_by_handle(const struct x86_i2c_client_info *client_info)
+ {
+ 	acpi_handle handle;
+@@ -177,7 +177,7 @@ static __init int match_parent(struct device *dev, const void *data)
+ 	return dev->parent == data;
+ }
+ 
+-static struct i2c_adapter *
++static __init struct i2c_adapter *
+ get_i2c_adap_by_pci_parent(const struct x86_i2c_client_info *client_info)
+ {
+ 	struct i2c_adapter *adap = NULL;
+diff --git a/drivers/platform/x86/x86-android-tablets/lenovo.c b/drivers/platform/x86/x86-android-tablets/lenovo.c
+index ae087f1471c174..a60efbaf4817fe 100644
+--- a/drivers/platform/x86/x86-android-tablets/lenovo.c
++++ b/drivers/platform/x86/x86-android-tablets/lenovo.c
+@@ -601,7 +601,7 @@ static const struct regulator_init_data lenovo_yoga_tab2_1380_bq24190_vbus_init_
+ 	.num_consumer_supplies = 1,
+ };
+ 
+-struct bq24190_platform_data lenovo_yoga_tab2_1380_bq24190_pdata = {
++static struct bq24190_platform_data lenovo_yoga_tab2_1380_bq24190_pdata = {
+ 	.regulator_init_data = &lenovo_yoga_tab2_1380_bq24190_vbus_init_data,
+ };
+ 
+@@ -726,7 +726,7 @@ static const struct platform_device_info lenovo_yoga_tab2_1380_pdevs[] __initcon
+ 	},
+ };
+ 
+-const char * const lenovo_yoga_tab2_1380_modules[] __initconst = {
++static const char * const lenovo_yoga_tab2_1380_modules[] __initconst = {
+ 	"bq24190_charger",            /* For the Vbus regulator for lc824206xa */
+ 	NULL
+ };
+diff --git a/drivers/platform/x86/x86-android-tablets/other.c b/drivers/platform/x86/x86-android-tablets/other.c
+index 735df818f76bfe..3cd0db74c6c9ce 100644
+--- a/drivers/platform/x86/x86-android-tablets/other.c
++++ b/drivers/platform/x86/x86-android-tablets/other.c
+@@ -602,14 +602,14 @@ const struct x86_dev_info whitelabel_tm800a550l_info __initconst = {
+  * Vexia EDU ATLA 10 tablet, Android 4.2 / 4.4 + Guadalinex Ubuntu tablet
+  * distributed to schools in the Spanish Andalucía region.
+  */
+-const char * const crystal_cove_pwrsrc_psy[] = { "crystal_cove_pwrsrc" };
++static const char * const crystal_cove_pwrsrc_psy[] = { "crystal_cove_pwrsrc" };
+ 
+ static const struct property_entry vexia_edu_atla10_ulpmc_props[] = {
+ 	PROPERTY_ENTRY_STRING_ARRAY("supplied-from", crystal_cove_pwrsrc_psy),
+ 	{ }
+ };
+ 
+-const struct software_node vexia_edu_atla10_ulpmc_node = {
++static const struct software_node vexia_edu_atla10_ulpmc_node = {
+ 	.properties = vexia_edu_atla10_ulpmc_props,
+ };
+ 
+diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c
+index 634c3b2f8c2654..f77b19884f051b 100644
+--- a/drivers/pps/clients/pps-gpio.c
++++ b/drivers/pps/clients/pps-gpio.c
+@@ -214,8 +214,8 @@ static int pps_gpio_probe(struct platform_device *pdev)
+ 		return -EINVAL;
+ 	}
+ 
+-	dev_info(data->pps->dev, "Registered IRQ %d as PPS source\n",
+-		 data->irq);
++	dev_dbg(&data->pps->dev, "Registered IRQ %d as PPS source\n",
++		data->irq);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/pps/clients/pps-ktimer.c b/drivers/pps/clients/pps-ktimer.c
+index d33106bd7a290f..2f465549b843f7 100644
+--- a/drivers/pps/clients/pps-ktimer.c
++++ b/drivers/pps/clients/pps-ktimer.c
+@@ -56,7 +56,7 @@ static struct pps_source_info pps_ktimer_info = {
+ 
+ static void __exit pps_ktimer_exit(void)
+ {
+-	dev_info(pps->dev, "ktimer PPS source unregistered\n");
++	dev_dbg(&pps->dev, "ktimer PPS source unregistered\n");
+ 
+ 	del_timer_sync(&ktimer);
+ 	pps_unregister_source(pps);
+@@ -74,7 +74,7 @@ static int __init pps_ktimer_init(void)
+ 	timer_setup(&ktimer, pps_ktimer_event, 0);
+ 	mod_timer(&ktimer, jiffies + HZ);
+ 
+-	dev_info(pps->dev, "ktimer PPS source registered\n");
++	dev_dbg(&pps->dev, "ktimer PPS source registered\n");
+ 
+ 	return 0;
+ }
+diff --git a/drivers/pps/clients/pps-ldisc.c b/drivers/pps/clients/pps-ldisc.c
+index 443d6bae19d14d..fa5660f3c4b707 100644
+--- a/drivers/pps/clients/pps-ldisc.c
++++ b/drivers/pps/clients/pps-ldisc.c
+@@ -32,7 +32,7 @@ static void pps_tty_dcd_change(struct tty_struct *tty, bool active)
+ 	pps_event(pps, &ts, active ? PPS_CAPTUREASSERT :
+ 			PPS_CAPTURECLEAR, NULL);
+ 
+-	dev_dbg(pps->dev, "PPS %s at %lu\n",
++	dev_dbg(&pps->dev, "PPS %s at %lu\n",
+ 			active ? "assert" : "clear", jiffies);
+ }
+ 
+@@ -69,7 +69,7 @@ static int pps_tty_open(struct tty_struct *tty)
+ 		goto err_unregister;
+ 	}
+ 
+-	dev_info(pps->dev, "source \"%s\" added\n", info.path);
++	dev_dbg(&pps->dev, "source \"%s\" added\n", info.path);
+ 
+ 	return 0;
+ 
+@@ -89,7 +89,7 @@ static void pps_tty_close(struct tty_struct *tty)
+ 	if (WARN_ON(!pps))
+ 		return;
+ 
+-	dev_info(pps->dev, "removed\n");
++	dev_info(&pps->dev, "removed\n");
+ 	pps_unregister_source(pps);
+ }
+ 
+diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c
+index abaffb4e1c1ce9..24db06750297d5 100644
+--- a/drivers/pps/clients/pps_parport.c
++++ b/drivers/pps/clients/pps_parport.c
+@@ -81,7 +81,7 @@ static void parport_irq(void *handle)
+ 	/* check the signal (no signal means the pulse is lost this time) */
+ 	if (!signal_is_set(port)) {
+ 		local_irq_restore(flags);
+-		dev_err(dev->pps->dev, "lost the signal\n");
++		dev_err(&dev->pps->dev, "lost the signal\n");
+ 		goto out_assert;
+ 	}
+ 
+@@ -98,7 +98,7 @@ static void parport_irq(void *handle)
+ 	/* timeout */
+ 	dev->cw_err++;
+ 	if (dev->cw_err >= CLEAR_WAIT_MAX_ERRORS) {
+-		dev_err(dev->pps->dev, "disabled clear edge capture after %d"
++		dev_err(&dev->pps->dev, "disabled clear edge capture after %d"
+ 				" timeouts\n", dev->cw_err);
+ 		dev->cw = 0;
+ 		dev->cw_err = 0;
+diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
+index d9d566f70ed199..92d1b62ea239d7 100644
+--- a/drivers/pps/kapi.c
++++ b/drivers/pps/kapi.c
+@@ -41,7 +41,7 @@ static void pps_add_offset(struct pps_ktime *ts, struct pps_ktime *offset)
+ static void pps_echo_client_default(struct pps_device *pps, int event,
+ 		void *data)
+ {
+-	dev_info(pps->dev, "echo %s %s\n",
++	dev_info(&pps->dev, "echo %s %s\n",
+ 		event & PPS_CAPTUREASSERT ? "assert" : "",
+ 		event & PPS_CAPTURECLEAR ? "clear" : "");
+ }
+@@ -112,7 +112,7 @@ struct pps_device *pps_register_source(struct pps_source_info *info,
+ 		goto kfree_pps;
+ 	}
+ 
+-	dev_info(pps->dev, "new PPS source %s\n", info->name);
++	dev_dbg(&pps->dev, "new PPS source %s\n", info->name);
+ 
+ 	return pps;
+ 
+@@ -166,7 +166,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
+ 	/* check event type */
+ 	BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0);
+ 
+-	dev_dbg(pps->dev, "PPS event at %lld.%09ld\n",
++	dev_dbg(&pps->dev, "PPS event at %lld.%09ld\n",
+ 			(s64)ts->ts_real.tv_sec, ts->ts_real.tv_nsec);
+ 
+ 	timespec_to_pps_ktime(&ts_real, ts->ts_real);
+@@ -188,7 +188,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
+ 		/* Save the time stamp */
+ 		pps->assert_tu = ts_real;
+ 		pps->assert_sequence++;
+-		dev_dbg(pps->dev, "capture assert seq #%u\n",
++		dev_dbg(&pps->dev, "capture assert seq #%u\n",
+ 			pps->assert_sequence);
+ 
+ 		captured = ~0;
+@@ -202,7 +202,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
+ 		/* Save the time stamp */
+ 		pps->clear_tu = ts_real;
+ 		pps->clear_sequence++;
+-		dev_dbg(pps->dev, "capture clear seq #%u\n",
++		dev_dbg(&pps->dev, "capture clear seq #%u\n",
+ 			pps->clear_sequence);
+ 
+ 		captured = ~0;
+diff --git a/drivers/pps/kc.c b/drivers/pps/kc.c
+index 50dc59af45be24..fbd23295afd7d9 100644
+--- a/drivers/pps/kc.c
++++ b/drivers/pps/kc.c
+@@ -43,11 +43,11 @@ int pps_kc_bind(struct pps_device *pps, struct pps_bind_args *bind_args)
+ 			pps_kc_hardpps_mode = 0;
+ 			pps_kc_hardpps_dev = NULL;
+ 			spin_unlock_irq(&pps_kc_hardpps_lock);
+-			dev_info(pps->dev, "unbound kernel"
++			dev_info(&pps->dev, "unbound kernel"
+ 					" consumer\n");
+ 		} else {
+ 			spin_unlock_irq(&pps_kc_hardpps_lock);
+-			dev_err(pps->dev, "selected kernel consumer"
++			dev_err(&pps->dev, "selected kernel consumer"
+ 					" is not bound\n");
+ 			return -EINVAL;
+ 		}
+@@ -57,11 +57,11 @@ int pps_kc_bind(struct pps_device *pps, struct pps_bind_args *bind_args)
+ 			pps_kc_hardpps_mode = bind_args->edge;
+ 			pps_kc_hardpps_dev = pps;
+ 			spin_unlock_irq(&pps_kc_hardpps_lock);
+-			dev_info(pps->dev, "bound kernel consumer: "
++			dev_info(&pps->dev, "bound kernel consumer: "
+ 				"edge=0x%x\n", bind_args->edge);
+ 		} else {
+ 			spin_unlock_irq(&pps_kc_hardpps_lock);
+-			dev_err(pps->dev, "another kernel consumer"
++			dev_err(&pps->dev, "another kernel consumer"
+ 					" is already bound\n");
+ 			return -EINVAL;
+ 		}
+@@ -83,7 +83,7 @@ void pps_kc_remove(struct pps_device *pps)
+ 		pps_kc_hardpps_mode = 0;
+ 		pps_kc_hardpps_dev = NULL;
+ 		spin_unlock_irq(&pps_kc_hardpps_lock);
+-		dev_info(pps->dev, "unbound kernel consumer"
++		dev_info(&pps->dev, "unbound kernel consumer"
+ 				" on device removal\n");
+ 	} else
+ 		spin_unlock_irq(&pps_kc_hardpps_lock);
+diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
+index 25d47907db175e..6a02245ea35fec 100644
+--- a/drivers/pps/pps.c
++++ b/drivers/pps/pps.c
+@@ -25,7 +25,7 @@
+  * Local variables
+  */
+ 
+-static dev_t pps_devt;
++static int pps_major;
+ static struct class *pps_class;
+ 
+ static DEFINE_MUTEX(pps_idr_lock);
+@@ -62,7 +62,7 @@ static int pps_cdev_pps_fetch(struct pps_device *pps, struct pps_fdata *fdata)
+ 	else {
+ 		unsigned long ticks;
+ 
+-		dev_dbg(pps->dev, "timeout %lld.%09d\n",
++		dev_dbg(&pps->dev, "timeout %lld.%09d\n",
+ 				(long long) fdata->timeout.sec,
+ 				fdata->timeout.nsec);
+ 		ticks = fdata->timeout.sec * HZ;
+@@ -80,7 +80,7 @@ static int pps_cdev_pps_fetch(struct pps_device *pps, struct pps_fdata *fdata)
+ 
+ 	/* Check for pending signals */
+ 	if (err == -ERESTARTSYS) {
+-		dev_dbg(pps->dev, "pending signal caught\n");
++		dev_dbg(&pps->dev, "pending signal caught\n");
+ 		return -EINTR;
+ 	}
+ 
+@@ -98,7 +98,7 @@ static long pps_cdev_ioctl(struct file *file,
+ 
+ 	switch (cmd) {
+ 	case PPS_GETPARAMS:
+-		dev_dbg(pps->dev, "PPS_GETPARAMS\n");
++		dev_dbg(&pps->dev, "PPS_GETPARAMS\n");
+ 
+ 		spin_lock_irq(&pps->lock);
+ 
+@@ -114,7 +114,7 @@ static long pps_cdev_ioctl(struct file *file,
+ 		break;
+ 
+ 	case PPS_SETPARAMS:
+-		dev_dbg(pps->dev, "PPS_SETPARAMS\n");
++		dev_dbg(&pps->dev, "PPS_SETPARAMS\n");
+ 
+ 		/* Check the capabilities */
+ 		if (!capable(CAP_SYS_TIME))
+@@ -124,14 +124,14 @@ static long pps_cdev_ioctl(struct file *file,
+ 		if (err)
+ 			return -EFAULT;
+ 		if (!(params.mode & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR))) {
+-			dev_dbg(pps->dev, "capture mode unspecified (%x)\n",
++			dev_dbg(&pps->dev, "capture mode unspecified (%x)\n",
+ 								params.mode);
+ 			return -EINVAL;
+ 		}
+ 
+ 		/* Check for supported capabilities */
+ 		if ((params.mode & ~pps->info.mode) != 0) {
+-			dev_dbg(pps->dev, "unsupported capabilities (%x)\n",
++			dev_dbg(&pps->dev, "unsupported capabilities (%x)\n",
+ 								params.mode);
+ 			return -EINVAL;
+ 		}
+@@ -144,7 +144,7 @@ static long pps_cdev_ioctl(struct file *file,
+ 		/* Restore the read only parameters */
+ 		if ((params.mode & (PPS_TSFMT_TSPEC | PPS_TSFMT_NTPFP)) == 0) {
+ 			/* section 3.3 of RFC 2783 interpreted */
+-			dev_dbg(pps->dev, "time format unspecified (%x)\n",
++			dev_dbg(&pps->dev, "time format unspecified (%x)\n",
+ 								params.mode);
+ 			pps->params.mode |= PPS_TSFMT_TSPEC;
+ 		}
+@@ -165,7 +165,7 @@ static long pps_cdev_ioctl(struct file *file,
+ 		break;
+ 
+ 	case PPS_GETCAP:
+-		dev_dbg(pps->dev, "PPS_GETCAP\n");
++		dev_dbg(&pps->dev, "PPS_GETCAP\n");
+ 
+ 		err = put_user(pps->info.mode, iuarg);
+ 		if (err)
+@@ -176,7 +176,7 @@ static long pps_cdev_ioctl(struct file *file,
+ 	case PPS_FETCH: {
+ 		struct pps_fdata fdata;
+ 
+-		dev_dbg(pps->dev, "PPS_FETCH\n");
++		dev_dbg(&pps->dev, "PPS_FETCH\n");
+ 
+ 		err = copy_from_user(&fdata, uarg, sizeof(struct pps_fdata));
+ 		if (err)
+@@ -206,7 +206,7 @@ static long pps_cdev_ioctl(struct file *file,
+ 	case PPS_KC_BIND: {
+ 		struct pps_bind_args bind_args;
+ 
+-		dev_dbg(pps->dev, "PPS_KC_BIND\n");
++		dev_dbg(&pps->dev, "PPS_KC_BIND\n");
+ 
+ 		/* Check the capabilities */
+ 		if (!capable(CAP_SYS_TIME))
+@@ -218,7 +218,7 @@ static long pps_cdev_ioctl(struct file *file,
+ 
+ 		/* Check for supported capabilities */
+ 		if ((bind_args.edge & ~pps->info.mode) != 0) {
+-			dev_err(pps->dev, "unsupported capabilities (%x)\n",
++			dev_err(&pps->dev, "unsupported capabilities (%x)\n",
+ 					bind_args.edge);
+ 			return -EINVAL;
+ 		}
+@@ -227,7 +227,7 @@ static long pps_cdev_ioctl(struct file *file,
+ 		if (bind_args.tsformat != PPS_TSFMT_TSPEC ||
+ 				(bind_args.edge & ~PPS_CAPTUREBOTH) != 0 ||
+ 				bind_args.consumer != PPS_KC_HARDPPS) {
+-			dev_err(pps->dev, "invalid kernel consumer bind"
++			dev_err(&pps->dev, "invalid kernel consumer bind"
+ 					" parameters (%x)\n", bind_args.edge);
+ 			return -EINVAL;
+ 		}
+@@ -259,7 +259,7 @@ static long pps_cdev_compat_ioctl(struct file *file,
+ 		struct pps_fdata fdata;
+ 		int err;
+ 
+-		dev_dbg(pps->dev, "PPS_FETCH\n");
++		dev_dbg(&pps->dev, "PPS_FETCH\n");
+ 
+ 		err = copy_from_user(&compat, uarg, sizeof(struct pps_fdata_compat));
+ 		if (err)
+@@ -296,20 +296,36 @@ static long pps_cdev_compat_ioctl(struct file *file,
+ #define pps_cdev_compat_ioctl	NULL
+ #endif
+ 
++static struct pps_device *pps_idr_get(unsigned long id)
++{
++	struct pps_device *pps;
++
++	mutex_lock(&pps_idr_lock);
++	pps = idr_find(&pps_idr, id);
++	if (pps)
++		get_device(&pps->dev);
++
++	mutex_unlock(&pps_idr_lock);
++	return pps;
++}
++
+ static int pps_cdev_open(struct inode *inode, struct file *file)
+ {
+-	struct pps_device *pps = container_of(inode->i_cdev,
+-						struct pps_device, cdev);
++	struct pps_device *pps = pps_idr_get(iminor(inode));
++
++	if (!pps)
++		return -ENODEV;
++
+ 	file->private_data = pps;
+-	kobject_get(&pps->dev->kobj);
+ 	return 0;
+ }
+ 
+ static int pps_cdev_release(struct inode *inode, struct file *file)
+ {
+-	struct pps_device *pps = container_of(inode->i_cdev,
+-						struct pps_device, cdev);
+-	kobject_put(&pps->dev->kobj);
++	struct pps_device *pps = file->private_data;
++
++	WARN_ON(pps->id != iminor(inode));
++	put_device(&pps->dev);
+ 	return 0;
+ }
+ 
+@@ -331,22 +347,13 @@ static void pps_device_destruct(struct device *dev)
+ {
+ 	struct pps_device *pps = dev_get_drvdata(dev);
+ 
+-	cdev_del(&pps->cdev);
+-
+-	/* Now we can release the ID for re-use */
+ 	pr_debug("deallocating pps%d\n", pps->id);
+-	mutex_lock(&pps_idr_lock);
+-	idr_remove(&pps_idr, pps->id);
+-	mutex_unlock(&pps_idr_lock);
+-
+-	kfree(dev);
+ 	kfree(pps);
+ }
+ 
+ int pps_register_cdev(struct pps_device *pps)
+ {
+ 	int err;
+-	dev_t devt;
+ 
+ 	mutex_lock(&pps_idr_lock);
+ 	/*
+@@ -363,40 +370,29 @@ int pps_register_cdev(struct pps_device *pps)
+ 		goto out_unlock;
+ 	}
+ 	pps->id = err;
+-	mutex_unlock(&pps_idr_lock);
+-
+-	devt = MKDEV(MAJOR(pps_devt), pps->id);
+-
+-	cdev_init(&pps->cdev, &pps_cdev_fops);
+-	pps->cdev.owner = pps->info.owner;
+ 
+-	err = cdev_add(&pps->cdev, devt, 1);
+-	if (err) {
+-		pr_err("%s: failed to add char device %d:%d\n",
+-				pps->info.name, MAJOR(pps_devt), pps->id);
++	pps->dev.class = pps_class;
++	pps->dev.parent = pps->info.dev;
++	pps->dev.devt = MKDEV(pps_major, pps->id);
++	dev_set_drvdata(&pps->dev, pps);
++	dev_set_name(&pps->dev, "pps%d", pps->id);
++	err = device_register(&pps->dev);
++	if (err)
+ 		goto free_idr;
+-	}
+-	pps->dev = device_create(pps_class, pps->info.dev, devt, pps,
+-							"pps%d", pps->id);
+-	if (IS_ERR(pps->dev)) {
+-		err = PTR_ERR(pps->dev);
+-		goto del_cdev;
+-	}
+ 
+ 	/* Override the release function with our own */
+-	pps->dev->release = pps_device_destruct;
++	pps->dev.release = pps_device_destruct;
+ 
+-	pr_debug("source %s got cdev (%d:%d)\n", pps->info.name,
+-			MAJOR(pps_devt), pps->id);
++	pr_debug("source %s got cdev (%d:%d)\n", pps->info.name, pps_major,
++		 pps->id);
+ 
++	get_device(&pps->dev);
++	mutex_unlock(&pps_idr_lock);
+ 	return 0;
+ 
+-del_cdev:
+-	cdev_del(&pps->cdev);
+-
+ free_idr:
+-	mutex_lock(&pps_idr_lock);
+ 	idr_remove(&pps_idr, pps->id);
++	put_device(&pps->dev);
+ out_unlock:
+ 	mutex_unlock(&pps_idr_lock);
+ 	return err;
+@@ -406,7 +402,13 @@ void pps_unregister_cdev(struct pps_device *pps)
+ {
+ 	pr_debug("unregistering pps%d\n", pps->id);
+ 	pps->lookup_cookie = NULL;
+-	device_destroy(pps_class, pps->dev->devt);
++	device_destroy(pps_class, pps->dev.devt);
++
++	/* Now we can release the ID for re-use */
++	mutex_lock(&pps_idr_lock);
++	idr_remove(&pps_idr, pps->id);
++	put_device(&pps->dev);
++	mutex_unlock(&pps_idr_lock);
+ }
+ 
+ /*
+@@ -426,6 +428,11 @@ void pps_unregister_cdev(struct pps_device *pps)
+  * so that it will not be used again, even if the pps device cannot
+  * be removed from the idr due to pending references holding the minor
+  * number in use.
++ *
++ * Since pps_idr holds a reference to the device, the returned
++ * pps_device is guaranteed to be valid until pps_unregister_cdev() is
++ * called on it. But after calling pps_unregister_cdev(), it may be
++ * freed at any time.
+  */
+ struct pps_device *pps_lookup_dev(void const *cookie)
+ {
+@@ -448,13 +455,11 @@ EXPORT_SYMBOL(pps_lookup_dev);
+ static void __exit pps_exit(void)
+ {
+ 	class_destroy(pps_class);
+-	unregister_chrdev_region(pps_devt, PPS_MAX_SOURCES);
++	__unregister_chrdev(pps_major, 0, PPS_MAX_SOURCES, "pps");
+ }
+ 
+ static int __init pps_init(void)
+ {
+-	int err;
+-
+ 	pps_class = class_create("pps");
+ 	if (IS_ERR(pps_class)) {
+ 		pr_err("failed to allocate class\n");
+@@ -462,8 +467,9 @@ static int __init pps_init(void)
+ 	}
+ 	pps_class->dev_groups = pps_groups;
+ 
+-	err = alloc_chrdev_region(&pps_devt, 0, PPS_MAX_SOURCES, "pps");
+-	if (err < 0) {
++	pps_major = __register_chrdev(0, 0, PPS_MAX_SOURCES, "pps",
++				      &pps_cdev_fops);
++	if (pps_major < 0) {
+ 		pr_err("failed to allocate char device region\n");
+ 		goto remove_class;
+ 	}
+@@ -476,8 +482,7 @@ static int __init pps_init(void)
+ 
+ remove_class:
+ 	class_destroy(pps_class);
+-
+-	return err;
++	return pps_major;
+ }
+ 
+ subsys_initcall(pps_init);
+diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
+index ea96a14d72d141..bf6468c56419c5 100644
+--- a/drivers/ptp/ptp_chardev.c
++++ b/drivers/ptp/ptp_chardev.c
+@@ -4,6 +4,7 @@
+  *
+  * Copyright (C) 2010 OMICRON electronics GmbH
+  */
++#include <linux/compat.h>
+ #include <linux/module.h>
+ #include <linux/posix-clock.h>
+ #include <linux/poll.h>
+@@ -176,6 +177,9 @@ long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd,
+ 	struct timespec64 ts;
+ 	int enable, err = 0;
+ 
++	if (in_compat_syscall() && cmd != PTP_ENABLE_PPS && cmd != PTP_ENABLE_PPS2)
++		arg = (unsigned long)compat_ptr(arg);
++
+ 	tsevq = pccontext->private_clkdata;
+ 
+ 	switch (cmd) {
+diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
+index 5feecaadde8e05..120db96d9e95d6 100644
+--- a/drivers/ptp/ptp_ocp.c
++++ b/drivers/ptp/ptp_ocp.c
+@@ -4420,7 +4420,7 @@ ptp_ocp_complete(struct ptp_ocp *bp)
+ 
+ 	pps = pps_lookup_dev(bp->ptp);
+ 	if (pps)
+-		ptp_ocp_symlink(bp, pps->dev, "pps");
++		ptp_ocp_symlink(bp, &pps->dev, "pps");
+ 
+ 	ptp_ocp_debugfs_add_device(bp);
+ 
+diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
+index 675b252d9c8ce7..99d0bc69331523 100644
+--- a/drivers/pwm/core.c
++++ b/drivers/pwm/core.c
+@@ -242,6 +242,9 @@ int pwm_round_waveform_might_sleep(struct pwm_device *pwm, struct pwm_waveform *
+ 
+ 	BUG_ON(WFHWSIZE < ops->sizeof_wfhw);
+ 
++	if (!pwmchip_supports_waveform(chip))
++		return -EOPNOTSUPP;
++
+ 	if (!pwm_wf_valid(wf))
+ 		return -EINVAL;
+ 
+@@ -294,6 +297,9 @@ int pwm_get_waveform_might_sleep(struct pwm_device *pwm, struct pwm_waveform *wf
+ 
+ 	BUG_ON(WFHWSIZE < ops->sizeof_wfhw);
+ 
++	if (!pwmchip_supports_waveform(chip) || !ops->read_waveform)
++		return -EOPNOTSUPP;
++
+ 	guard(pwmchip)(chip);
+ 
+ 	if (!chip->operational)
+@@ -320,6 +326,9 @@ static int __pwm_set_waveform(struct pwm_device *pwm,
+ 
+ 	BUG_ON(WFHWSIZE < ops->sizeof_wfhw);
+ 
++	if (!pwmchip_supports_waveform(chip))
++		return -EOPNOTSUPP;
++
+ 	if (!pwm_wf_valid(wf))
+ 		return -EINVAL;
+ 
+@@ -592,7 +601,7 @@ static int __pwm_apply(struct pwm_device *pwm, const struct pwm_state *state)
+ 	    state->usage_power == pwm->state.usage_power)
+ 		return 0;
+ 
+-	if (ops->write_waveform) {
++	if (pwmchip_supports_waveform(chip)) {
+ 		struct pwm_waveform wf;
+ 		char wfhw[WFHWSIZE];
+ 
+@@ -746,7 +755,7 @@ int pwm_get_state_hw(struct pwm_device *pwm, struct pwm_state *state)
+ 	if (!chip->operational)
+ 		return -ENODEV;
+ 
+-	if (ops->read_waveform) {
++	if (pwmchip_supports_waveform(chip) && ops->read_waveform) {
+ 		char wfhw[WFHWSIZE];
+ 		struct pwm_waveform wf;
+ 
+diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
+index 989731256f5030..5832dce8ed9d58 100644
+--- a/drivers/pwm/pwm-stm32-lp.c
++++ b/drivers/pwm/pwm-stm32-lp.c
+@@ -167,8 +167,12 @@ static int stm32_pwm_lp_get_state(struct pwm_chip *chip,
+ 	regmap_read(priv->regmap, STM32_LPTIM_CR, &val);
+ 	state->enabled = !!FIELD_GET(STM32_LPTIM_ENABLE, val);
+ 	/* Keep PWM counter clock refcount in sync with PWM initial state */
+-	if (state->enabled)
+-		clk_enable(priv->clk);
++	if (state->enabled) {
++		int ret = clk_enable(priv->clk);
++
++		if (ret)
++			return ret;
++	}
+ 
+ 	regmap_read(priv->regmap, STM32_LPTIM_CFGR, &val);
+ 	presc = FIELD_GET(STM32_LPTIM_PRESC, val);
+diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
+index 17e591f61efb60..a59de4de18b6e9 100644
+--- a/drivers/pwm/pwm-stm32.c
++++ b/drivers/pwm/pwm-stm32.c
+@@ -858,8 +858,11 @@ static int stm32_pwm_probe(struct platform_device *pdev)
+ 	chip->ops = &stm32pwm_ops;
+ 
+ 	/* Initialize clock refcount to number of enabled PWM channels. */
+-	for (i = 0; i < num_enabled; i++)
+-		clk_enable(priv->clk);
++	for (i = 0; i < num_enabled; i++) {
++		ret = clk_enable(priv->clk);
++		if (ret)
++			return ret;
++	}
+ 
+ 	ret = devm_pwmchip_add(dev, chip);
+ 	if (ret < 0)
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 8cb948a91e60d9..13d9c3e349682c 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -4908,7 +4908,7 @@ int _regulator_bulk_get(struct device *dev, int num_consumers,
+ 						       consumers[i].supply, get_type);
+ 		if (IS_ERR(consumers[i].consumer)) {
+ 			ret = dev_err_probe(dev, PTR_ERR(consumers[i].consumer),
+-					    "Failed to get supply '%s'",
++					    "Failed to get supply '%s'\n",
+ 					    consumers[i].supply);
+ 			consumers[i].consumer = NULL;
+ 			goto err;
+diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
+index e5b4b93c07e3f5..6af8411679c766 100644
+--- a/drivers/regulator/of_regulator.c
++++ b/drivers/regulator/of_regulator.c
+@@ -446,7 +446,7 @@ int of_regulator_match(struct device *dev, struct device_node *node,
+ 					"failed to parse DT for regulator %pOFn\n",
+ 					child);
+ 				of_node_put(child);
+-				return -EINVAL;
++				goto err_put;
+ 			}
+ 			match->of_node = of_node_get(child);
+ 			count++;
+@@ -455,6 +455,18 @@ int of_regulator_match(struct device *dev, struct device_node *node,
+ 	}
+ 
+ 	return count;
++
++err_put:
++	for (i = 0; i < num_matches; i++) {
++		struct of_regulator_match *match = &matches[i];
++
++		match->init_data = NULL;
++		if (match->of_node) {
++			of_node_put(match->of_node);
++			match->of_node = NULL;
++		}
++	}
++	return -EINVAL;
+ }
+ EXPORT_SYMBOL_GPL(of_regulator_match);
+ 
+diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
+index 0f4a7065d0bd9e..8206a17664818a 100644
+--- a/drivers/remoteproc/mtk_scp.c
++++ b/drivers/remoteproc/mtk_scp.c
+@@ -1326,6 +1326,11 @@ static int scp_cluster_init(struct platform_device *pdev, struct mtk_scp_of_clus
+ 	return ret;
+ }
+ 
++static const struct of_device_id scp_core_match[] = {
++	{ .compatible = "mediatek,scp-core" },
++	{}
++};
++
+ static int scp_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+@@ -1357,13 +1362,15 @@ static int scp_probe(struct platform_device *pdev)
+ 	INIT_LIST_HEAD(&scp_cluster->mtk_scp_list);
+ 	mutex_init(&scp_cluster->cluster_lock);
+ 
+-	ret = devm_of_platform_populate(dev);
++	ret = of_platform_populate(dev_of_node(dev), scp_core_match, NULL, dev);
+ 	if (ret)
+ 		return dev_err_probe(dev, ret, "Failed to populate platform devices\n");
+ 
+ 	ret = scp_cluster_init(pdev, scp_cluster);
+-	if (ret)
++	if (ret) {
++		of_platform_depopulate(dev);
+ 		return ret;
++	}
+ 
+ 	return 0;
+ }
+@@ -1379,6 +1386,7 @@ static void scp_remove(struct platform_device *pdev)
+ 		rproc_del(scp->rproc);
+ 		scp_free(scp);
+ 	}
++	of_platform_depopulate(&pdev->dev);
+ 	mutex_destroy(&scp_cluster->cluster_lock);
+ }
+ 
+diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
+index eb66f78ec8b774..c2cf0d27772966 100644
+--- a/drivers/remoteproc/remoteproc_core.c
++++ b/drivers/remoteproc/remoteproc_core.c
+@@ -2486,6 +2486,13 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
+ 	rproc->dev.driver_data = rproc;
+ 	idr_init(&rproc->notifyids);
+ 
++	/* Assign a unique device index and name */
++	rproc->index = ida_alloc(&rproc_dev_index, GFP_KERNEL);
++	if (rproc->index < 0) {
++		dev_err(dev, "ida_alloc failed: %d\n", rproc->index);
++		goto put_device;
++	}
++
+ 	rproc->name = kstrdup_const(name, GFP_KERNEL);
+ 	if (!rproc->name)
+ 		goto put_device;
+@@ -2496,13 +2503,6 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
+ 	if (rproc_alloc_ops(rproc, ops))
+ 		goto put_device;
+ 
+-	/* Assign a unique device index and name */
+-	rproc->index = ida_alloc(&rproc_dev_index, GFP_KERNEL);
+-	if (rproc->index < 0) {
+-		dev_err(dev, "ida_alloc failed: %d\n", rproc->index);
+-		goto put_device;
+-	}
+-
+ 	dev_set_name(&rproc->dev, "remoteproc%d", rproc->index);
+ 
+ 	atomic_set(&rproc->power, 0);
+diff --git a/drivers/rtc/rtc-loongson.c b/drivers/rtc/rtc-loongson.c
+index 8d713e563d7c0a..a0f7974e6a570a 100644
+--- a/drivers/rtc/rtc-loongson.c
++++ b/drivers/rtc/rtc-loongson.c
+@@ -114,6 +114,13 @@ static irqreturn_t loongson_rtc_isr(int irq, void *id)
+ 	struct loongson_rtc_priv *priv = (struct loongson_rtc_priv *)id;
+ 
+ 	rtc_update_irq(priv->rtcdev, 1, RTC_AF | RTC_IRQF);
++
++	/*
++	 * The TOY_MATCH0_REG should be cleared 0 here,
++	 * otherwise the interrupt cannot be cleared.
++	 */
++	regmap_write(priv->regmap, TOY_MATCH0_REG, 0);
++
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -131,11 +138,7 @@ static u32 loongson_rtc_handler(void *id)
+ 	writel(RTC_STS, priv->pm_base + PM1_STS_REG);
+ 	spin_unlock(&priv->lock);
+ 
+-	/*
+-	 * The TOY_MATCH0_REG should be cleared 0 here,
+-	 * otherwise the interrupt cannot be cleared.
+-	 */
+-	return regmap_write(priv->regmap, TOY_MATCH0_REG, 0);
++	return ACPI_INTERRUPT_HANDLED;
+ }
+ 
+ static int loongson_rtc_set_enabled(struct device *dev)
+diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
+index fdbc07f14036af..905986c616559b 100644
+--- a/drivers/rtc/rtc-pcf85063.c
++++ b/drivers/rtc/rtc-pcf85063.c
+@@ -322,7 +322,16 @@ static const struct rtc_class_ops pcf85063_rtc_ops = {
+ static int pcf85063_nvmem_read(void *priv, unsigned int offset,
+ 			       void *val, size_t bytes)
+ {
+-	return regmap_read(priv, PCF85063_REG_RAM, val);
++	unsigned int tmp;
++	int ret;
++
++	ret = regmap_read(priv, PCF85063_REG_RAM, &tmp);
++	if (ret < 0)
++		return ret;
++
++	*(u8 *)val = tmp;
++
++	return 0;
+ }
+ 
+ static int pcf85063_nvmem_write(void *priv, unsigned int offset,
+diff --git a/drivers/rtc/rtc-tps6594.c b/drivers/rtc/rtc-tps6594.c
+index e696676341378e..7c6246e3f02923 100644
+--- a/drivers/rtc/rtc-tps6594.c
++++ b/drivers/rtc/rtc-tps6594.c
+@@ -37,7 +37,7 @@
+ #define MAX_OFFSET (277774)
+ 
+ // Number of ticks per hour
+-#define TICKS_PER_HOUR (32768 * 3600)
++#define TICKS_PER_HOUR (32768 * 3600LL)
+ 
+ // Multiplier for ppb conversions
+ #define PPB_MULT NANO
+diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
+index fbffd451031fdb..45bd001206a2b8 100644
+--- a/drivers/s390/char/sclp.c
++++ b/drivers/s390/char/sclp.c
+@@ -245,7 +245,6 @@ static void sclp_request_timeout(bool force_restart);
+ static void sclp_process_queue(void);
+ static void __sclp_make_read_req(void);
+ static int sclp_init_mask(int calculate);
+-static int sclp_init(void);
+ 
+ static void
+ __sclp_queue_read_req(void)
+@@ -1251,8 +1250,7 @@ static struct platform_driver sclp_pdrv = {
+ 
+ /* Initialize SCLP driver. Return zero if driver is operational, non-zero
+  * otherwise. */
+-static int
+-sclp_init(void)
++int sclp_init(void)
+ {
+ 	unsigned long flags;
+ 	int rc = 0;
+@@ -1305,13 +1303,7 @@ sclp_init(void)
+ 
+ static __init int sclp_initcall(void)
+ {
+-	int rc;
+-
+-	rc = platform_driver_register(&sclp_pdrv);
+-	if (rc)
+-		return rc;
+-
+-	return sclp_init();
++	return platform_driver_register(&sclp_pdrv);
+ }
+ 
+ arch_initcall(sclp_initcall);
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
+index 10b8e4dc64f8b0..7589f48aebc80f 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
+@@ -2951,6 +2951,7 @@ void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc)
+ 		.max_hw_sectors		= MPI3MR_MAX_APP_XFER_SECTORS,
+ 		.max_segments		= MPI3MR_MAX_APP_XFER_SEGMENTS,
+ 	};
++	struct request_queue *q;
+ 
+ 	device_initialize(bsg_dev);
+ 
+@@ -2966,14 +2967,17 @@ void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc)
+ 		return;
+ 	}
+ 
+-	mrioc->bsg_queue = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), &lim,
++	q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), &lim,
+ 			mpi3mr_bsg_request, NULL, 0);
+-	if (IS_ERR(mrioc->bsg_queue)) {
++	if (IS_ERR(q)) {
+ 		ioc_err(mrioc, "%s: bsg registration failed\n",
+ 		    dev_name(bsg_dev));
+ 		device_del(bsg_dev);
+ 		put_device(bsg_dev);
++		return;
+ 	}
++
++	mrioc->bsg_queue = q;
+ }
+ 
+ /**
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index 16ac2267c71e19..c1d8f2c91a5e51 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -5629,8 +5629,7 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
+ 	if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
+ 		pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
+ 		    ioc->name);
+-		ioc->manu_pg11.EEDPTagMode &= ~0x3;
+-		ioc->manu_pg11.EEDPTagMode |= 0x1;
++		ioc->manu_pg11.EEDPTagMode = 0x1;
+ 		mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
+ 		    &ioc->manu_pg11);
+ 	}
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 8947dab132d789..af62a8ed862004 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -177,9 +177,8 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
+ 
+ 		lim = queue_limits_start_update(sdkp->disk->queue);
+ 		sd_set_flush_flag(sdkp, &lim);
+-		blk_mq_freeze_queue(sdkp->disk->queue);
+-		ret = queue_limits_commit_update(sdkp->disk->queue, &lim);
+-		blk_mq_unfreeze_queue(sdkp->disk->queue);
++		ret = queue_limits_commit_update_frozen(sdkp->disk->queue,
++				&lim);
+ 		if (ret)
+ 			return ret;
+ 		return count;
+@@ -483,9 +482,7 @@ provisioning_mode_store(struct device *dev, struct device_attribute *attr,
+ 
+ 	lim = queue_limits_start_update(sdkp->disk->queue);
+ 	sd_config_discard(sdkp, &lim, mode);
+-	blk_mq_freeze_queue(sdkp->disk->queue);
+-	err = queue_limits_commit_update(sdkp->disk->queue, &lim);
+-	blk_mq_unfreeze_queue(sdkp->disk->queue);
++	err = queue_limits_commit_update_frozen(sdkp->disk->queue, &lim);
+ 	if (err)
+ 		return err;
+ 	return count;
+@@ -594,9 +591,7 @@ max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
+ 
+ 	lim = queue_limits_start_update(sdkp->disk->queue);
+ 	sd_config_write_same(sdkp, &lim);
+-	blk_mq_freeze_queue(sdkp->disk->queue);
+-	err = queue_limits_commit_update(sdkp->disk->queue, &lim);
+-	blk_mq_unfreeze_queue(sdkp->disk->queue);
++	err = queue_limits_commit_update_frozen(sdkp->disk->queue, &lim);
+ 	if (err)
+ 		return err;
+ 	return count;
+@@ -3803,9 +3798,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ 	sd_config_write_same(sdkp, &lim);
+ 	kfree(buffer);
+ 
+-	blk_mq_freeze_queue(sdkp->disk->queue);
+-	err = queue_limits_commit_update(sdkp->disk->queue, &lim);
+-	blk_mq_unfreeze_queue(sdkp->disk->queue);
++	err = queue_limits_commit_update_frozen(sdkp->disk->queue, &lim);
+ 	if (err)
+ 		return err;
+ 
+diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
+index 198bec87bb8e7c..b17796d5ee6652 100644
+--- a/drivers/scsi/sr.c
++++ b/drivers/scsi/sr.c
+@@ -797,10 +797,7 @@ static int get_sectorsize(struct scsi_cd *cd)
+ 
+ 	lim = queue_limits_start_update(q);
+ 	lim.logical_block_size = sector_size;
+-	blk_mq_freeze_queue(q);
+-	err = queue_limits_commit_update(q, &lim);
+-	blk_mq_unfreeze_queue(q);
+-	return err;
++	return queue_limits_commit_update_frozen(q, &lim);
+ }
+ 
+ static int get_capabilities(struct scsi_cd *cd)
+diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c
+index 2a42b28931c96d..298b542dd1c064 100644
+--- a/drivers/soc/atmel/soc.c
++++ b/drivers/soc/atmel/soc.c
+@@ -399,7 +399,7 @@ static const struct of_device_id at91_soc_allowed_list[] __initconst = {
+ 
+ static int __init atmel_soc_device_init(void)
+ {
+-	struct device_node *np = of_find_node_by_path("/");
++	struct device_node *np __free(device_node) = of_find_node_by_path("/");
+ 
+ 	if (!of_match_node(at91_soc_allowed_list, np))
+ 		return 0;
+diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
+index add6247d348190..29c616e2c408cf 100644
+--- a/drivers/spi/spi-omap2-mcspi.c
++++ b/drivers/spi/spi-omap2-mcspi.c
+@@ -1561,10 +1561,15 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	mcspi->ref_clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
+-	if (IS_ERR(mcspi->ref_clk))
+-		mcspi->ref_clk_hz = OMAP2_MCSPI_MAX_FREQ;
+-	else
++	if (IS_ERR(mcspi->ref_clk)) {
++		status = PTR_ERR(mcspi->ref_clk);
++		dev_err_probe(&pdev->dev, status, "Failed to get ref_clk");
++		goto free_ctlr;
++	}
++	if (mcspi->ref_clk)
+ 		mcspi->ref_clk_hz = clk_get_rate(mcspi->ref_clk);
++	else
++		mcspi->ref_clk_hz = OMAP2_MCSPI_MAX_FREQ;
+ 	ctlr->max_speed_hz = mcspi->ref_clk_hz;
+ 	ctlr->min_speed_hz = mcspi->ref_clk_hz >> 15;
+ 
+diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
+index dee9c339a35e74..5caf0abf3763a8 100644
+--- a/drivers/spi/spi-zynq-qspi.c
++++ b/drivers/spi/spi-zynq-qspi.c
+@@ -379,12 +379,21 @@ static int zynq_qspi_setup_op(struct spi_device *spi)
+ {
+ 	struct spi_controller *ctlr = spi->controller;
+ 	struct zynq_qspi *qspi = spi_controller_get_devdata(ctlr);
++	int ret;
+ 
+ 	if (ctlr->busy)
+ 		return -EBUSY;
+ 
+-	clk_enable(qspi->refclk);
+-	clk_enable(qspi->pclk);
++	ret = clk_enable(qspi->refclk);
++	if (ret)
++		return ret;
++
++	ret = clk_enable(qspi->pclk);
++	if (ret) {
++		clk_disable(qspi->refclk);
++		return ret;
++	}
++
+ 	zynq_qspi_write(qspi, ZYNQ_QSPI_ENABLE_OFFSET,
+ 			ZYNQ_QSPI_ENABLE_ENABLE_MASK);
+ 
+diff --git a/drivers/staging/media/imx/imx-media-of.c b/drivers/staging/media/imx/imx-media-of.c
+index 118bff988bc7e6..bb28daa4d71334 100644
+--- a/drivers/staging/media/imx/imx-media-of.c
++++ b/drivers/staging/media/imx/imx-media-of.c
+@@ -54,22 +54,18 @@ int imx_media_add_of_subdevs(struct imx_media_dev *imxmd,
+ 			break;
+ 
+ 		ret = imx_media_of_add_csi(imxmd, csi_np);
++		of_node_put(csi_np);
+ 		if (ret) {
+ 			/* unavailable or already added is not an error */
+ 			if (ret == -ENODEV || ret == -EEXIST) {
+-				of_node_put(csi_np);
+ 				continue;
+ 			}
+ 
+ 			/* other error, can't continue */
+-			goto err_out;
++			return ret;
+ 		}
+ 	}
+ 
+ 	return 0;
+-
+-err_out:
+-	of_node_put(csi_np);
+-	return ret;
+ }
+ EXPORT_SYMBOL_GPL(imx_media_add_of_subdevs);
+diff --git a/drivers/staging/media/max96712/max96712.c b/drivers/staging/media/max96712/max96712.c
+index ede02e8c891cbb..0751b2e0489589 100644
+--- a/drivers/staging/media/max96712/max96712.c
++++ b/drivers/staging/media/max96712/max96712.c
+@@ -418,7 +418,6 @@ static int max96712_probe(struct i2c_client *client)
+ 	priv->info = of_device_get_match_data(&client->dev);
+ 
+ 	priv->client = client;
+-	i2c_set_clientdata(client, priv);
+ 
+ 	priv->regmap = devm_regmap_init_i2c(client, &max96712_i2c_regmap);
+ 	if (IS_ERR(priv->regmap))
+@@ -448,7 +447,8 @@ static int max96712_probe(struct i2c_client *client)
+ 
+ static void max96712_remove(struct i2c_client *client)
+ {
+-	struct max96712_priv *priv = i2c_get_clientdata(client);
++	struct v4l2_subdev *sd = i2c_get_clientdata(client);
++	struct max96712_priv *priv = container_of(sd, struct max96712_priv, sd);
+ 
+ 	v4l2_async_unregister_subdev(&priv->sd);
+ 
+diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
+index afbf7738c7c47c..58b28be63c79b1 100644
+--- a/drivers/tty/mips_ejtag_fdc.c
++++ b/drivers/tty/mips_ejtag_fdc.c
+@@ -1154,7 +1154,7 @@ static char kgdbfdc_rbuf[4];
+ 
+ /* write buffer to allow compaction */
+ static unsigned int kgdbfdc_wbuflen;
+-static char kgdbfdc_wbuf[4];
++static u8 kgdbfdc_wbuf[4];
+ 
+ static void __iomem *kgdbfdc_setup(void)
+ {
+@@ -1215,7 +1215,7 @@ static int kgdbfdc_read_char(void)
+ /* push an FDC word from write buffer to TX FIFO */
+ static void kgdbfdc_push_one(void)
+ {
+-	const char *bufs[1] = { kgdbfdc_wbuf };
++	const u8 *bufs[1] = { kgdbfdc_wbuf };
+ 	struct fdc_word word;
+ 	void __iomem *regs;
+ 	unsigned int i;
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index 649e74e9b52f6a..b8babbdec8f3f6 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -2079,7 +2079,8 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state)
+ 	serial8250_rpm_put(up);
+ }
+ 
+-static void wait_for_lsr(struct uart_8250_port *up, int bits)
++/* Returns true if @bits were set, false on timeout */
++static bool wait_for_lsr(struct uart_8250_port *up, int bits)
+ {
+ 	unsigned int status, tmout = 10000;
+ 
+@@ -2094,11 +2095,11 @@ static void wait_for_lsr(struct uart_8250_port *up, int bits)
+ 		udelay(1);
+ 		touch_nmi_watchdog();
+ 	}
++
++	return (tmout != 0);
+ }
+ 
+-/*
+- *	Wait for transmitter & holding register to empty
+- */
++/* Wait for transmitter and holding register to empty with timeout */
+ static void wait_for_xmitr(struct uart_8250_port *up, int bits)
+ {
+ 	unsigned int tmout;
+@@ -3317,6 +3318,16 @@ static void serial8250_console_restore(struct uart_8250_port *up)
+ 	serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS);
+ }
+ 
++static void fifo_wait_for_lsr(struct uart_8250_port *up, unsigned int count)
++{
++	unsigned int i;
++
++	for (i = 0; i < count; i++) {
++		if (wait_for_lsr(up, UART_LSR_THRE))
++			return;
++	}
++}
++
+ /*
+  * Print a string to the serial port using the device FIFO
+  *
+@@ -3326,13 +3337,15 @@ static void serial8250_console_restore(struct uart_8250_port *up)
+ static void serial8250_console_fifo_write(struct uart_8250_port *up,
+ 					  const char *s, unsigned int count)
+ {
+-	int i;
+ 	const char *end = s + count;
+ 	unsigned int fifosize = up->tx_loadsz;
++	unsigned int tx_count = 0;
+ 	bool cr_sent = false;
++	unsigned int i;
+ 
+ 	while (s != end) {
+-		wait_for_lsr(up, UART_LSR_THRE);
++		/* Allow timeout for each byte of a possibly full FIFO */
++		fifo_wait_for_lsr(up, fifosize);
+ 
+ 		for (i = 0; i < fifosize && s != end; ++i) {
+ 			if (*s == '\n' && !cr_sent) {
+@@ -3343,7 +3356,14 @@ static void serial8250_console_fifo_write(struct uart_8250_port *up,
+ 				cr_sent = false;
+ 			}
+ 		}
++		tx_count = i;
+ 	}
++
++	/*
++	 * Allow timeout for each byte written since the caller will only wait
++	 * for UART_LSR_BOTH_EMPTY using the timeout of a single character
++	 */
++	fifo_wait_for_lsr(up, tx_count);
+ }
+ 
+ /*
+diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
+index 6c09d97ae00658..58023f735c195f 100644
+--- a/drivers/ufs/core/ufs_bsg.c
++++ b/drivers/ufs/core/ufs_bsg.c
+@@ -257,6 +257,7 @@ int ufs_bsg_probe(struct ufs_hba *hba)
+ 			NULL, 0);
+ 	if (IS_ERR(q)) {
+ 		ret = PTR_ERR(q);
++		device_del(bsg_dev);
+ 		goto out;
+ 	}
+ 
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index f219c82e9619d9..0a631641a3b31b 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -1664,8 +1664,6 @@ static void dwc3_get_properties(struct dwc3 *dwc)
+ 	u8			tx_thr_num_pkt_prd = 0;
+ 	u8			tx_max_burst_prd = 0;
+ 	u8			tx_fifo_resize_max_num;
+-	const char		*usb_psy_name;
+-	int			ret;
+ 
+ 	/* default to highest possible threshold */
+ 	lpm_nyet_threshold = 0xf;
+@@ -1700,13 +1698,6 @@ static void dwc3_get_properties(struct dwc3 *dwc)
+ 
+ 	dwc->sys_wakeup = device_may_wakeup(dwc->sysdev);
+ 
+-	ret = device_property_read_string(dev, "usb-psy-name", &usb_psy_name);
+-	if (ret >= 0) {
+-		dwc->usb_psy = power_supply_get_by_name(usb_psy_name);
+-		if (!dwc->usb_psy)
+-			dev_err(dev, "couldn't get usb power supply\n");
+-	}
+-
+ 	dwc->has_lpm_erratum = device_property_read_bool(dev,
+ 				"snps,has-lpm-erratum");
+ 	device_property_read_u8(dev, "snps,lpm-nyet-threshold",
+@@ -2109,6 +2100,23 @@ static int dwc3_get_num_ports(struct dwc3 *dwc)
+ 	return 0;
+ }
+ 
++static struct power_supply *dwc3_get_usb_power_supply(struct dwc3 *dwc)
++{
++	struct power_supply *usb_psy;
++	const char *usb_psy_name;
++	int ret;
++
++	ret = device_property_read_string(dwc->dev, "usb-psy-name", &usb_psy_name);
++	if (ret < 0)
++		return NULL;
++
++	usb_psy = power_supply_get_by_name(usb_psy_name);
++	if (!usb_psy)
++		return ERR_PTR(-EPROBE_DEFER);
++
++	return usb_psy;
++}
++
+ static int dwc3_probe(struct platform_device *pdev)
+ {
+ 	struct device		*dev = &pdev->dev;
+@@ -2165,6 +2173,10 @@ static int dwc3_probe(struct platform_device *pdev)
+ 
+ 	dwc3_get_software_properties(dwc);
+ 
++	dwc->usb_psy = dwc3_get_usb_power_supply(dwc);
++	if (IS_ERR(dwc->usb_psy))
++		return dev_err_probe(dev, PTR_ERR(dwc->usb_psy), "couldn't get usb power supply\n");
++
+ 	dwc->reset = devm_reset_control_array_get_optional_shared(dev);
+ 	if (IS_ERR(dwc->reset)) {
+ 		ret = PTR_ERR(dwc->reset);
+@@ -2589,12 +2601,15 @@ static int dwc3_resume(struct device *dev)
+ 	pinctrl_pm_select_default_state(dev);
+ 
+ 	pm_runtime_disable(dev);
+-	pm_runtime_set_active(dev);
++	ret = pm_runtime_set_active(dev);
++	if (ret)
++		goto out;
+ 
+ 	ret = dwc3_resume_common(dwc, PMSG_RESUME);
+ 	if (ret)
+ 		pm_runtime_set_suspended(dev);
+ 
++out:
+ 	pm_runtime_enable(dev);
+ 
+ 	return ret;
+diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c
+index 7d43da5f28973d..d102506f94713a 100644
+--- a/drivers/usb/dwc3/dwc3-am62.c
++++ b/drivers/usb/dwc3/dwc3-am62.c
+@@ -166,6 +166,7 @@ static int phy_syscon_pll_refclk(struct dwc3_am62 *am62)
+ 	if (ret)
+ 		return ret;
+ 
++	of_node_put(args.np);
+ 	am62->offset = args.args[0];
+ 
+ 	/* Core voltage. PHY_CORE_VOLTAGE bit Recommended to be 0 always */
+diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
+index 15bb3aa12aa8b4..48dee166e5d89c 100644
+--- a/drivers/usb/gadget/function/f_tcm.c
++++ b/drivers/usb/gadget/function/f_tcm.c
+@@ -1066,7 +1066,6 @@ static void usbg_cmd_work(struct work_struct *work)
+ out:
+ 	transport_send_check_condition_and_sense(se_cmd,
+ 			TCM_UNSUPPORTED_SCSI_OPCODE, 1);
+-	transport_generic_free_cmd(&cmd->se_cmd, 0);
+ }
+ 
+ static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
+@@ -1195,7 +1194,6 @@ static void bot_cmd_work(struct work_struct *work)
+ out:
+ 	transport_send_check_condition_and_sense(se_cmd,
+ 				TCM_UNSUPPORTED_SCSI_OPCODE, 1);
+-	transport_generic_free_cmd(&cmd->se_cmd, 0);
+ }
+ 
+ static int bot_submit_command(struct f_uas *fu,
+@@ -2051,9 +2049,14 @@ static void tcm_delayed_set_alt(struct work_struct *wq)
+ 
+ static int tcm_get_alt(struct usb_function *f, unsigned intf)
+ {
+-	if (intf == bot_intf_desc.bInterfaceNumber)
++	struct f_uas *fu = to_f_uas(f);
++
++	if (fu->iface != intf)
++		return -EOPNOTSUPP;
++
++	if (fu->flags & USBG_IS_BOT)
+ 		return USB_G_ALT_INT_BBB;
+-	if (intf == uasp_intf_desc.bInterfaceNumber)
++	else if (fu->flags & USBG_IS_UAS)
+ 		return USB_G_ALT_INT_UAS;
+ 
+ 	return -EOPNOTSUPP;
+@@ -2063,6 +2066,9 @@ static int tcm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+ {
+ 	struct f_uas *fu = to_f_uas(f);
+ 
++	if (fu->iface != intf)
++		return -EOPNOTSUPP;
++
+ 	if ((alt == USB_G_ALT_INT_BBB) || (alt == USB_G_ALT_INT_UAS)) {
+ 		struct guas_setup_wq *work;
+ 
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 09b05a62375e01..dfe1a676d487c7 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -422,7 +422,8 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
+ 	if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
+ 	    !(xhci->xhc_state & XHCI_STATE_DYING)) {
+ 		xhci->current_cmd = cur_cmd;
+-		xhci_mod_cmd_timer(xhci);
++		if (cur_cmd)
++			xhci_mod_cmd_timer(xhci);
+ 		xhci_ring_cmd_db(xhci);
+ 	}
+ }
+diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
+index 24a6a4354df8ba..b2c83f552da55d 100644
+--- a/drivers/usb/typec/tcpm/tcpci.c
++++ b/drivers/usb/typec/tcpm/tcpci.c
+@@ -27,6 +27,7 @@
+ #define	VPPS_NEW_MIN_PERCENT			95
+ #define	VPPS_VALID_MIN_MV			100
+ #define	VSINKDISCONNECT_PD_MIN_PERCENT		90
++#define	VPPS_SHUTDOWN_MIN_PERCENT		85
+ 
+ struct tcpci {
+ 	struct device *dev;
+@@ -366,7 +367,8 @@ static int tcpci_enable_auto_vbus_discharge(struct tcpc_dev *dev, bool enable)
+ }
+ 
+ static int tcpci_set_auto_vbus_discharge_threshold(struct tcpc_dev *dev, enum typec_pwr_opmode mode,
+-						   bool pps_active, u32 requested_vbus_voltage_mv)
++						   bool pps_active, u32 requested_vbus_voltage_mv,
++						   u32 apdo_min_voltage_mv)
+ {
+ 	struct tcpci *tcpci = tcpc_to_tcpci(dev);
+ 	unsigned int pwr_ctrl, threshold = 0;
+@@ -388,9 +390,12 @@ static int tcpci_set_auto_vbus_discharge_threshold(struct tcpc_dev *dev, enum ty
+ 		threshold = AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV;
+ 	} else if (mode == TYPEC_PWR_MODE_PD) {
+ 		if (pps_active)
+-			threshold = ((VPPS_NEW_MIN_PERCENT * requested_vbus_voltage_mv / 100) -
+-				     VSINKPD_MIN_IR_DROP_MV - VPPS_VALID_MIN_MV) *
+-				     VSINKDISCONNECT_PD_MIN_PERCENT / 100;
++			/*
++			 * To prevent disconnect when the source is in Current Limit Mode.
++			 * Set the threshold to the lowest possible voltage vPpsShutdown (min)
++			 */
++			threshold = VPPS_SHUTDOWN_MIN_PERCENT * apdo_min_voltage_mv / 100 -
++				    VSINKPD_MIN_IR_DROP_MV;
+ 		else
+ 			threshold = ((VSRC_NEW_MIN_PERCENT * requested_vbus_voltage_mv / 100) -
+ 				     VSINKPD_MIN_IR_DROP_MV - VSRC_VALID_MIN_MV) *
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index 6021eeb903fec5..a22c1644d0f792 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -2943,10 +2943,12 @@ static int tcpm_set_auto_vbus_discharge_threshold(struct tcpm_port *port,
+ 		return 0;
+ 
+ 	ret = port->tcpc->set_auto_vbus_discharge_threshold(port->tcpc, mode, pps_active,
+-							    requested_vbus_voltage);
++							    requested_vbus_voltage,
++							    port->pps_data.min_volt);
+ 	tcpm_log_force(port,
+-		       "set_auto_vbus_discharge_threshold mode:%d pps_active:%c vbus:%u ret:%d",
+-		       mode, pps_active ? 'y' : 'n', requested_vbus_voltage, ret);
++		       "set_auto_vbus_discharge_threshold mode:%d pps_active:%c vbus:%u pps_apdo_min_volt:%u ret:%d",
++		       mode, pps_active ? 'y' : 'n', requested_vbus_voltage,
++		       port->pps_data.min_volt, ret);
+ 
+ 	return ret;
+ }
+@@ -4772,7 +4774,7 @@ static void run_state_machine(struct tcpm_port *port)
+ 			port->caps_count = 0;
+ 			port->pd_capable = true;
+ 			tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
+-					    PD_T_SEND_SOURCE_CAP);
++					    PD_T_SENDER_RESPONSE);
+ 		}
+ 		break;
+ 	case SRC_SEND_CAPABILITIES_TIMEOUT:
+diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
+index c04cbe0ef173d6..7c636db7988256 100644
+--- a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
++++ b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
+@@ -36,6 +36,7 @@ struct device_node *dss_of_port_get_parent_device(struct device_node *port)
+ 		np = of_get_next_parent(np);
+ 	}
+ 
++	of_node_put(np);
+ 	return NULL;
+ }
+ 
+diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
+index 58c9445c0f885c..255ece133576b4 100644
+--- a/drivers/watchdog/rti_wdt.c
++++ b/drivers/watchdog/rti_wdt.c
+@@ -301,6 +301,7 @@ static int rti_wdt_probe(struct platform_device *pdev)
+ 	node = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
+ 	if (node) {
+ 		ret = of_address_to_resource(node, 0, &res);
++		of_node_put(node);
+ 		if (ret) {
+ 			dev_err(dev, "No memory address assigned to the region.\n");
+ 			goto err_iomap;
+diff --git a/fs/afs/dir.c b/fs/afs/dir.c
+index ada363af5aab8e..50edd1cae28ace 100644
+--- a/fs/afs/dir.c
++++ b/fs/afs/dir.c
+@@ -1472,7 +1472,12 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
+ 		op->file[1].vnode = vnode;
+ 	}
+ 
+-	return afs_do_sync_operation(op);
++	ret = afs_do_sync_operation(op);
++
++	/* Not all systems that can host afs servers have ENOTEMPTY. */
++	if (ret == -EEXIST)
++		ret = -ENOTEMPTY;
++	return ret;
+ 
+ error:
+ 	return afs_put_operation(op);
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index c9d620175e80ca..d9760b2a8d8de4 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -1346,6 +1346,15 @@ extern void afs_send_simple_reply(struct afs_call *, const void *, size_t);
+ extern int afs_extract_data(struct afs_call *, bool);
+ extern int afs_protocol_error(struct afs_call *, enum afs_eproto_cause);
+ 
++static inline void afs_see_call(struct afs_call *call, enum afs_call_trace why)
++{
++	int r = refcount_read(&call->ref);
++
++	trace_afs_call(call->debug_id, why, r,
++		       atomic_read(&call->net->nr_outstanding_calls),
++		       __builtin_return_address(0));
++}
++
+ static inline void afs_make_op_call(struct afs_operation *op, struct afs_call *call,
+ 				    gfp_t gfp)
+ {
+diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
+index 9f2a3bb56ec69e..a122c6366ce19f 100644
+--- a/fs/afs/rxrpc.c
++++ b/fs/afs/rxrpc.c
+@@ -430,11 +430,16 @@ void afs_make_call(struct afs_call *call, gfp_t gfp)
+ 	return;
+ 
+ error_do_abort:
+-	if (ret != -ECONNABORTED) {
++	if (ret != -ECONNABORTED)
+ 		rxrpc_kernel_abort_call(call->net->socket, rxcall,
+ 					RX_USER_ABORT, ret,
+ 					afs_abort_send_data_error);
+-	} else {
++	if (call->async) {
++		afs_see_call(call, afs_call_trace_async_abort);
++		return;
++	}
++
++	if (ret == -ECONNABORTED) {
+ 		len = 0;
+ 		iov_iter_kvec(&msg.msg_iter, ITER_DEST, NULL, 0, 0);
+ 		rxrpc_kernel_recv_data(call->net->socket, rxcall,
+@@ -445,6 +450,8 @@ void afs_make_call(struct afs_call *call, gfp_t gfp)
+ 	call->error = ret;
+ 	trace_afs_call_done(call);
+ error_kill_call:
++	if (call->async)
++		afs_see_call(call, afs_call_trace_async_kill);
+ 	if (call->type->done)
+ 		call->type->done(call);
+ 
+@@ -602,7 +609,6 @@ static void afs_deliver_to_call(struct afs_call *call)
+ 	abort_code = 0;
+ call_complete:
+ 	afs_set_call_complete(call, ret, remote_abort);
+-	state = AFS_CALL_COMPLETE;
+ 	goto done;
+ }
+ 
+diff --git a/fs/afs/xdr_fs.h b/fs/afs/xdr_fs.h
+index 8ca8681645077d..cc5f143d21a347 100644
+--- a/fs/afs/xdr_fs.h
++++ b/fs/afs/xdr_fs.h
+@@ -88,7 +88,7 @@ union afs_xdr_dir_block {
+ 
+ 	struct {
+ 		struct afs_xdr_dir_hdr	hdr;
+-		u8			alloc_ctrs[AFS_DIR_MAX_BLOCKS];
++		u8			alloc_ctrs[AFS_DIR_BLOCKS_WITH_CTR];
+ 		__be16			hashtable[AFS_DIR_HASHTBL_SIZE];
+ 	} meta;
+ 
+diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
+index 024227aba4cd5f..362845f9aaaefa 100644
+--- a/fs/afs/yfsclient.c
++++ b/fs/afs/yfsclient.c
+@@ -666,8 +666,9 @@ static int yfs_deliver_fs_remove_file2(struct afs_call *call)
+ static void yfs_done_fs_remove_file2(struct afs_call *call)
+ {
+ 	if (call->error == -ECONNABORTED &&
+-	    call->abort_code == RX_INVALID_OPERATION) {
+-		set_bit(AFS_SERVER_FL_NO_RM2, &call->server->flags);
++	    (call->abort_code == RX_INVALID_OPERATION ||
++	     call->abort_code == RXGEN_OPCODE)) {
++		set_bit(AFS_SERVER_FL_NO_RM2, &call->op->server->flags);
+ 		call->op->flags |= AFS_OPERATION_DOWNGRADE;
+ 	}
+ }
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 27b2fe7f735d5b..d1c8f6730a5687 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -1372,6 +1372,17 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
+ 
+ 	alloc_hint = btrfs_get_extent_allocation_hint(inode, start, num_bytes);
+ 
++	/*
++	 * We're not doing compressed IO, don't unlock the first page (which
++	 * the caller expects to stay locked), don't clear any dirty bits and
++	 * don't set any writeback bits.
++	 *
++	 * Do set the Ordered (Private2) bit so we know this page was properly
++	 * setup for writepage.
++	 */
++	page_ops = (keep_locked ? 0 : PAGE_UNLOCK);
++	page_ops |= PAGE_SET_ORDERED;
++
+ 	/*
+ 	 * Relocation relies on the relocated extents to have exactly the same
+ 	 * size as the original extents. Normally writeback for relocation data
+@@ -1431,6 +1442,10 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
+ 		file_extent.offset = 0;
+ 		file_extent.compression = BTRFS_COMPRESS_NONE;
+ 
++		/*
++		 * Locked range will be released either during error clean up or
++		 * after the whole range is finished.
++		 */
+ 		lock_extent(&inode->io_tree, start, start + cur_alloc_size - 1,
+ 			    &cached);
+ 
+@@ -1476,21 +1491,6 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
+ 
+ 		btrfs_dec_block_group_reservations(fs_info, ins.objectid);
+ 
+-		/*
+-		 * We're not doing compressed IO, don't unlock the first page
+-		 * (which the caller expects to stay locked), don't clear any
+-		 * dirty bits and don't set any writeback bits
+-		 *
+-		 * Do set the Ordered flag so we know this page was
+-		 * properly setup for writepage.
+-		 */
+-		page_ops = (keep_locked ? 0 : PAGE_UNLOCK);
+-		page_ops |= PAGE_SET_ORDERED;
+-
+-		extent_clear_unlock_delalloc(inode, start, start + cur_alloc_size - 1,
+-					     locked_folio, &cached,
+-					     EXTENT_LOCKED | EXTENT_DELALLOC,
+-					     page_ops);
+ 		if (num_bytes < cur_alloc_size)
+ 			num_bytes = 0;
+ 		else
+@@ -1507,6 +1507,8 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
+ 		if (ret)
+ 			goto out_unlock;
+ 	}
++	extent_clear_unlock_delalloc(inode, orig_start, end, locked_folio, &cached,
++				     EXTENT_LOCKED | EXTENT_DELALLOC, page_ops);
+ done:
+ 	if (done_offset)
+ 		*done_offset = end;
+@@ -1527,35 +1529,30 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
+ 	 * We process each region below.
+ 	 */
+ 
+-	clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
+-		EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
+-	page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
+-
+ 	/*
+ 	 * For the range (1). We have already instantiated the ordered extents
+ 	 * for this region. They are cleaned up by
+ 	 * btrfs_cleanup_ordered_extents() in e.g,
+-	 * btrfs_run_delalloc_range(). EXTENT_LOCKED | EXTENT_DELALLOC are
+-	 * already cleared in the above loop. And, EXTENT_DELALLOC_NEW |
+-	 * EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV are handled by the cleanup
+-	 * function.
++	 * btrfs_run_delalloc_range().
++	 * EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV
++	 * are also handled by the cleanup function.
+ 	 *
+-	 * However, in case of @keep_locked, we still need to unlock the pages
+-	 * (except @locked_folio) to ensure all the pages are unlocked.
++	 * So here we only clear EXTENT_LOCKED and EXTENT_DELALLOC flag, and
++	 * finish the writeback of the involved folios, which will be never submitted.
+ 	 */
+-	if (keep_locked && orig_start < start) {
++	if (orig_start < start) {
++		clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC;
++		page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
++
+ 		if (!locked_folio)
+ 			mapping_set_error(inode->vfs_inode.i_mapping, ret);
+ 		extent_clear_unlock_delalloc(inode, orig_start, start - 1,
+-					     locked_folio, NULL, 0, page_ops);
++					     locked_folio, NULL, clear_bits, page_ops);
+ 	}
+ 
+-	/*
+-	 * At this point we're unlocked, we want to make sure we're only
+-	 * clearing these flags under the extent lock, so lock the rest of the
+-	 * range and clear everything up.
+-	 */
+-	lock_extent(&inode->io_tree, start, end, NULL);
++	clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
++		     EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
++	page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
+ 
+ 	/*
+ 	 * For the range (2). If we reserved an extent for our delalloc range
+@@ -1970,6 +1967,53 @@ static int can_nocow_file_extent(struct btrfs_path *path,
+ 	return ret < 0 ? ret : can_nocow;
+ }
+ 
++/*
++ * Cleanup the dirty folios which will never be submitted due to error.
++ *
++ * When running a delalloc range, we may need to split the ranges (due to
++ * fragmentation or NOCOW). If we hit an error in the later part, we will error
++ * out and previously successfully executed range will never be submitted, thus
++ * we have to cleanup those folios by clearing their dirty flag, starting and
++ * finishing the writeback.
++ */
++static void cleanup_dirty_folios(struct btrfs_inode *inode,
++				 struct folio *locked_folio,
++				 u64 start, u64 end, int error)
++{
++	struct btrfs_fs_info *fs_info = inode->root->fs_info;
++	struct address_space *mapping = inode->vfs_inode.i_mapping;
++	pgoff_t start_index = start >> PAGE_SHIFT;
++	pgoff_t end_index = end >> PAGE_SHIFT;
++	u32 len;
++
++	ASSERT(end + 1 - start < U32_MAX);
++	ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
++	       IS_ALIGNED(end + 1, fs_info->sectorsize));
++	len = end + 1 - start;
++
++	/*
++	 * Handle the locked folio first.
++	 * The btrfs_folio_clamp_*() helpers can handle range out of the folio case.
++	 */
++	btrfs_folio_clamp_finish_io(fs_info, locked_folio, start, len);
++
++	for (pgoff_t index = start_index; index <= end_index; index++) {
++		struct folio *folio;
++
++		/* Already handled at the beginning. */
++		if (index == locked_folio->index)
++			continue;
++		folio = __filemap_get_folio(mapping, index, FGP_LOCK, GFP_NOFS);
++		/* Cache already dropped, no need to do any cleanup. */
++		if (IS_ERR(folio))
++			continue;
++		btrfs_folio_clamp_finish_io(fs_info, locked_folio, start, len);
++		folio_unlock(folio);
++		folio_put(folio);
++	}
++	mapping_set_error(mapping, error);
++}
++
+ /*
+  * when nowcow writeback call back.  This checks for snapshots or COW copies
+  * of the extents that exist in the file, and COWs the file as required.
+@@ -1985,6 +2029,11 @@ static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
+ 	struct btrfs_root *root = inode->root;
+ 	struct btrfs_path *path;
+ 	u64 cow_start = (u64)-1;
++	/*
++	 * If not 0, represents the inclusive end of the last fallback_to_cow()
++	 * range. Only for error handling.
++	 */
++	u64 cow_end = 0;
+ 	u64 cur_offset = start;
+ 	int ret;
+ 	bool check_prev = true;
+@@ -2145,6 +2194,7 @@ static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
+ 					      found_key.offset - 1);
+ 			cow_start = (u64)-1;
+ 			if (ret) {
++				cow_end = found_key.offset - 1;
+ 				btrfs_dec_nocow_writers(nocow_bg);
+ 				goto error;
+ 			}
+@@ -2218,24 +2268,54 @@ static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
+ 		cow_start = cur_offset;
+ 
+ 	if (cow_start != (u64)-1) {
+-		cur_offset = end;
+ 		ret = fallback_to_cow(inode, locked_folio, cow_start, end);
+ 		cow_start = (u64)-1;
+-		if (ret)
++		if (ret) {
++			cow_end = end;
+ 			goto error;
++		}
+ 	}
+ 
+ 	btrfs_free_path(path);
+ 	return 0;
+ 
+ error:
++	/*
++	 * There are several error cases:
++	 *
++	 * 1) Failed without falling back to COW
++	 *    start         cur_offset             end
++	 *    |/////////////|                      |
++	 *
++	 *    For range [start, cur_offset) the folios are already unlocked (except
++	 *    @locked_folio), EXTENT_DELALLOC already removed.
++	 *    Only need to clear the dirty flag as they will never be submitted.
++	 *    Ordered extent and extent maps are handled by
++	 *    btrfs_mark_ordered_io_finished() inside run_delalloc_range().
++	 *
++	 * 2) Failed with error from fallback_to_cow()
++	 *    start         cur_offset  cow_end    end
++	 *    |/////////////|-----------|          |
++	 *
++	 *    For range [start, cur_offset) it's the same as case 1).
++	 *    But for range [cur_offset, cow_end), the folios have dirty flag
++	 *    cleared and unlocked, EXTENT_DEALLLOC cleared by cow_file_range().
++	 *
++	 *    Thus we should not call extent_clear_unlock_delalloc() on range
++	 *    [cur_offset, cow_end), as the folios are already unlocked.
++	 *
++	 * So clear the folio dirty flags for [start, cur_offset) first.
++	 */
++	if (cur_offset > start)
++		cleanup_dirty_folios(inode, locked_folio, start, cur_offset - 1, ret);
++
+ 	/*
+ 	 * If an error happened while a COW region is outstanding, cur_offset
+-	 * needs to be reset to cow_start to ensure the COW region is unlocked
+-	 * as well.
++	 * needs to be reset to @cow_end + 1 to skip the COW range, as
++	 * cow_file_range() will do the proper cleanup at error.
+ 	 */
+-	if (cow_start != (u64)-1)
+-		cur_offset = cow_start;
++	if (cow_end)
++		cur_offset = cow_end + 1;
+ 
+ 	/*
+ 	 * We need to lock the extent here because we're clearing DELALLOC and
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index f9b214992212dc..993b5e803699ec 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -1838,9 +1838,19 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
+ 	 * Thus its reserved space should all be zero, no matter if qgroup
+ 	 * is consistent or the mode.
+ 	 */
+-	WARN_ON(qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] ||
+-		qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] ||
+-		qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]);
++	if (qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] ||
++	    qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] ||
++	    qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]) {
++		WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
++		btrfs_warn_rl(fs_info,
++"to be deleted qgroup %u/%llu has non-zero numbers, data %llu meta prealloc %llu meta pertrans %llu",
++			      btrfs_qgroup_level(qgroup->qgroupid),
++			      btrfs_qgroup_subvolid(qgroup->qgroupid),
++			      qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA],
++			      qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC],
++			      qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS]);
++
++	}
+ 	/*
+ 	 * The same for rfer/excl numbers, but that's only if our qgroup is
+ 	 * consistent and if it's in regular qgroup mode.
+@@ -1849,8 +1859,9 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
+ 	 */
+ 	if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL &&
+ 	    !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)) {
+-		if (WARN_ON(qgroup->rfer || qgroup->excl ||
+-			    qgroup->rfer_cmpr || qgroup->excl_cmpr)) {
++		if (qgroup->rfer || qgroup->excl ||
++		    qgroup->rfer_cmpr || qgroup->excl_cmpr) {
++			WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
+ 			btrfs_warn_rl(fs_info,
+ "to be deleted qgroup %u/%llu has non-zero numbers, rfer %llu rfer_cmpr %llu excl %llu excl_cmpr %llu",
+ 				      btrfs_qgroup_level(qgroup->qgroupid),
+diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c
+index 8c68059ac1b0c1..03d7bfc042e2ae 100644
+--- a/fs/btrfs/subpage.c
++++ b/fs/btrfs/subpage.c
+@@ -716,6 +716,7 @@ void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
+ 	unsigned long writeback_bitmap;
+ 	unsigned long ordered_bitmap;
+ 	unsigned long checked_bitmap;
++	unsigned long locked_bitmap;
+ 	unsigned long flags;
+ 
+ 	ASSERT(folio_test_private(folio) && folio_get_private(folio));
+@@ -728,15 +729,16 @@ void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
+ 	GET_SUBPAGE_BITMAP(subpage, fs_info, writeback, &writeback_bitmap);
+ 	GET_SUBPAGE_BITMAP(subpage, fs_info, ordered, &ordered_bitmap);
+ 	GET_SUBPAGE_BITMAP(subpage, fs_info, checked, &checked_bitmap);
+-	GET_SUBPAGE_BITMAP(subpage, fs_info, locked, &checked_bitmap);
++	GET_SUBPAGE_BITMAP(subpage, fs_info, locked, &locked_bitmap);
+ 	spin_unlock_irqrestore(&subpage->lock, flags);
+ 
+ 	dump_page(folio_page(folio, 0), "btrfs subpage dump");
+ 	btrfs_warn(fs_info,
+-"start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl dirty=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl",
++"start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl dirty=%*pbl locked=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl",
+ 		    start, len, folio_pos(folio),
+ 		    sectors_per_page, &uptodate_bitmap,
+ 		    sectors_per_page, &dirty_bitmap,
++		    sectors_per_page, &locked_bitmap,
+ 		    sectors_per_page, &writeback_bitmap,
+ 		    sectors_per_page, &ordered_bitmap,
+ 		    sectors_per_page, &checked_bitmap);
+diff --git a/fs/btrfs/subpage.h b/fs/btrfs/subpage.h
+index 428fa9389fd49e..44fff1f4eac482 100644
+--- a/fs/btrfs/subpage.h
++++ b/fs/btrfs/subpage.h
+@@ -137,6 +137,19 @@ DECLARE_BTRFS_SUBPAGE_OPS(writeback);
+ DECLARE_BTRFS_SUBPAGE_OPS(ordered);
+ DECLARE_BTRFS_SUBPAGE_OPS(checked);
+ 
++/*
++ * Helper for error cleanup, where a folio will have its dirty flag cleared,
++ * with writeback started and finished.
++ */
++static inline void btrfs_folio_clamp_finish_io(struct btrfs_fs_info *fs_info,
++					       struct folio *locked_folio,
++					       u64 start, u32 len)
++{
++	btrfs_folio_clamp_clear_dirty(fs_info, locked_folio, start, len);
++	btrfs_folio_clamp_set_writeback(fs_info, locked_folio, start, len);
++	btrfs_folio_clamp_clear_writeback(fs_info, locked_folio, start, len);
++}
++
+ bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
+ 					struct folio *folio, u64 start, u32 len);
+ 
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 7dfe5005129a1d..f6eaaf20229d84 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -971,7 +971,7 @@ static int btrfs_fill_super(struct super_block *sb,
+ 
+ 	err = open_ctree(sb, fs_devices);
+ 	if (err) {
+-		btrfs_err(fs_info, "open_ctree failed");
++		btrfs_err(fs_info, "open_ctree failed: %d", err);
+ 		return err;
+ 	}
+ 
+diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
+index fc1d710166e922..c8ff88f1cdcf2c 100644
+--- a/fs/dlm/lock.c
++++ b/fs/dlm/lock.c
+@@ -824,9 +824,12 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
+ 		r->res_first_lkid = 0;
+ 	}
+ 
+-	/* A dir record will not be on the scan list. */
+-	if (r->res_dir_nodeid != our_nodeid)
+-		del_scan(ls, r);
++	/* we always deactivate scan timer for the rsb, when
++	 * we move it out of the inactive state as rsb state
++	 * can be changed and scan timers are only for inactive
++	 * rsbs.
++	 */
++	del_scan(ls, r);
+ 	list_move(&r->res_slow_list, &ls->ls_slow_active);
+ 	rsb_clear_flag(r, RSB_INACTIVE);
+ 	kref_init(&r->res_ref); /* ref is now used in active state */
+@@ -989,10 +992,10 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
+ 		r->res_nodeid = 0;
+ 	}
+ 
++	del_scan(ls, r);
+ 	list_move(&r->res_slow_list, &ls->ls_slow_active);
+ 	rsb_clear_flag(r, RSB_INACTIVE);
+ 	kref_init(&r->res_ref);
+-	del_scan(ls, r);
+ 	write_unlock_bh(&ls->ls_rsbtbl_lock);
+ 
+ 	goto out;
+@@ -1337,9 +1340,13 @@ static int _dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *na
+ 	__dlm_master_lookup(ls, r, our_nodeid, from_nodeid, true, flags,
+ 			    r_nodeid, result);
+ 
+-	/* A dir record rsb should never be on scan list. */
+-	/* Try to fix this with del_scan? */
+-	WARN_ON(!list_empty(&r->res_scan_list));
++	/* A dir record rsb should never be on scan list.
++	 * Except when we are the dir and master node.
++	 * This function should only be called by the dir
++	 * node.
++	 */
++	WARN_ON(!list_empty(&r->res_scan_list) &&
++		r->res_master_nodeid != our_nodeid);
+ 
+ 	write_unlock_bh(&ls->ls_rsbtbl_lock);
+ 
+@@ -1430,16 +1437,23 @@ static void deactivate_rsb(struct kref *kref)
+ 	list_move(&r->res_slow_list, &ls->ls_slow_inactive);
+ 
+ 	/*
+-	 * When the rsb becomes unused:
+-	 * - If it's not a dir record for a remote master rsb,
+-	 *   then it is put on the scan list to be freed.
+-	 * - If it's a dir record for a remote master rsb,
+-	 *   then it is kept in the inactive state until
+-	 *   receive_remove() from the master node.
++	 * When the rsb becomes unused, there are two possibilities:
++	 * 1. Leave the inactive rsb in place (don't remove it).
++	 * 2. Add it to the scan list to be removed.
++	 *
++	 * 1 is done when the rsb is acting as the dir record
++	 * for a remotely mastered rsb.  The rsb must be left
++	 * in place as an inactive rsb to act as the dir record.
++	 *
++	 * 2 is done when a) the rsb is not the master and not the
++	 * dir record, b) when the rsb is both the master and the
++	 * dir record, c) when the rsb is master but not dir record.
++	 *
++	 * (If no directory is used, the rsb can always be removed.)
+ 	 */
+-	if (!dlm_no_directory(ls) &&
+-	    (r->res_master_nodeid != our_nodeid) &&
+-	    (dlm_dir_nodeid(r) != our_nodeid))
++	if (dlm_no_directory(ls) ||
++	    (r->res_master_nodeid == our_nodeid ||
++	     dlm_dir_nodeid(r) != our_nodeid))
+ 		add_scan(ls, r);
+ 
+ 	if (r->res_lvbptr) {
+diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
+index df40c3fd10702c..d28141829c051b 100644
+--- a/fs/dlm/lowcomms.c
++++ b/fs/dlm/lowcomms.c
+@@ -462,7 +462,8 @@ static bool dlm_lowcomms_con_has_addr(const struct connection *con,
+ int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr)
+ {
+ 	struct connection *con;
+-	bool ret, idx;
++	bool ret;
++	int idx;
+ 
+ 	idx = srcu_read_lock(&connections_srcu);
+ 	con = nodeid2con(nodeid, GFP_NOFS);
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index 19ef4ff2a1345d..f416b73e0ca31a 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -927,8 +927,7 @@ unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi,
+ 				  unsigned long nr_shrink)
+ {
+ 	struct z_erofs_pcluster *pcl;
+-	unsigned int freed = 0;
+-	unsigned long index;
++	unsigned long index, freed = 0;
+ 
+ 	xa_lock(&sbi->managed_pslots);
+ 	xa_for_each(&sbi->managed_pslots, index, pcl) {
+diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
+index 47a5c806cf1628..54dd52de7269da 100644
+--- a/fs/f2fs/dir.c
++++ b/fs/f2fs/dir.c
+@@ -175,7 +175,8 @@ static unsigned long dir_block_index(unsigned int level,
+ static struct f2fs_dir_entry *find_in_block(struct inode *dir,
+ 				struct page *dentry_page,
+ 				const struct f2fs_filename *fname,
+-				int *max_slots)
++				int *max_slots,
++				bool use_hash)
+ {
+ 	struct f2fs_dentry_block *dentry_blk;
+ 	struct f2fs_dentry_ptr d;
+@@ -183,7 +184,7 @@ static struct f2fs_dir_entry *find_in_block(struct inode *dir,
+ 	dentry_blk = (struct f2fs_dentry_block *)page_address(dentry_page);
+ 
+ 	make_dentry_ptr_block(dir, &d, dentry_blk);
+-	return f2fs_find_target_dentry(&d, fname, max_slots);
++	return f2fs_find_target_dentry(&d, fname, max_slots, use_hash);
+ }
+ 
+ static inline int f2fs_match_name(const struct inode *dir,
+@@ -208,7 +209,8 @@ static inline int f2fs_match_name(const struct inode *dir,
+ }
+ 
+ struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
+-			const struct f2fs_filename *fname, int *max_slots)
++			const struct f2fs_filename *fname, int *max_slots,
++			bool use_hash)
+ {
+ 	struct f2fs_dir_entry *de;
+ 	unsigned long bit_pos = 0;
+@@ -231,7 +233,7 @@ struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
+ 			continue;
+ 		}
+ 
+-		if (de->hash_code == fname->hash) {
++		if (!use_hash || de->hash_code == fname->hash) {
+ 			res = f2fs_match_name(d->inode, fname,
+ 					      d->filename[bit_pos],
+ 					      le16_to_cpu(de->name_len));
+@@ -258,11 +260,12 @@ struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
+ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
+ 					unsigned int level,
+ 					const struct f2fs_filename *fname,
+-					struct page **res_page)
++					struct page **res_page,
++					bool use_hash)
+ {
+ 	int s = GET_DENTRY_SLOTS(fname->disk_name.len);
+ 	unsigned int nbucket, nblock;
+-	unsigned int bidx, end_block;
++	unsigned int bidx, end_block, bucket_no;
+ 	struct page *dentry_page;
+ 	struct f2fs_dir_entry *de = NULL;
+ 	pgoff_t next_pgofs;
+@@ -272,8 +275,11 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
+ 	nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
+ 	nblock = bucket_blocks(level);
+ 
++	bucket_no = use_hash ? le32_to_cpu(fname->hash) % nbucket : 0;
++
++start_find_bucket:
+ 	bidx = dir_block_index(level, F2FS_I(dir)->i_dir_level,
+-			       le32_to_cpu(fname->hash) % nbucket);
++			       bucket_no);
+ 	end_block = bidx + nblock;
+ 
+ 	while (bidx < end_block) {
+@@ -290,7 +296,7 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
+ 			}
+ 		}
+ 
+-		de = find_in_block(dir, dentry_page, fname, &max_slots);
++		de = find_in_block(dir, dentry_page, fname, &max_slots, use_hash);
+ 		if (IS_ERR(de)) {
+ 			*res_page = ERR_CAST(de);
+ 			de = NULL;
+@@ -307,12 +313,18 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
+ 		bidx++;
+ 	}
+ 
+-	if (!de && room && F2FS_I(dir)->chash != fname->hash) {
+-		F2FS_I(dir)->chash = fname->hash;
+-		F2FS_I(dir)->clevel = level;
+-	}
++	if (de)
++		return de;
+ 
+-	return de;
++	if (likely(use_hash)) {
++		if (room && F2FS_I(dir)->chash != fname->hash) {
++			F2FS_I(dir)->chash = fname->hash;
++			F2FS_I(dir)->clevel = level;
++		}
++	} else if (++bucket_no < nbucket) {
++		goto start_find_bucket;
++	}
++	return NULL;
+ }
+ 
+ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
+@@ -323,11 +335,15 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
+ 	struct f2fs_dir_entry *de = NULL;
+ 	unsigned int max_depth;
+ 	unsigned int level;
++	bool use_hash = true;
+ 
+ 	*res_page = NULL;
+ 
++#if IS_ENABLED(CONFIG_UNICODE)
++start_find_entry:
++#endif
+ 	if (f2fs_has_inline_dentry(dir)) {
+-		de = f2fs_find_in_inline_dir(dir, fname, res_page);
++		de = f2fs_find_in_inline_dir(dir, fname, res_page, use_hash);
+ 		goto out;
+ 	}
+ 
+@@ -343,11 +359,18 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
+ 	}
+ 
+ 	for (level = 0; level < max_depth; level++) {
+-		de = find_in_level(dir, level, fname, res_page);
++		de = find_in_level(dir, level, fname, res_page, use_hash);
+ 		if (de || IS_ERR(*res_page))
+ 			break;
+ 	}
++
+ out:
++#if IS_ENABLED(CONFIG_UNICODE)
++	if (IS_CASEFOLDED(dir) && !de && use_hash) {
++		use_hash = false;
++		goto start_find_entry;
++	}
++#endif
+ 	/* This is to increase the speed of f2fs_create */
+ 	if (!de)
+ 		F2FS_I(dir)->task = current;
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 6f2cbf4c57402b..da199fdfebfc6b 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -3583,7 +3583,8 @@ int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry,
+ 			struct f2fs_filename *fname);
+ void f2fs_free_filename(struct f2fs_filename *fname);
+ struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
+-			const struct f2fs_filename *fname, int *max_slots);
++			const struct f2fs_filename *fname, int *max_slots,
++			bool use_hash);
+ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
+ 			unsigned int start_pos, struct fscrypt_str *fstr);
+ void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
+@@ -4219,7 +4220,8 @@ int f2fs_write_inline_data(struct inode *inode, struct folio *folio);
+ int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
+ struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
+ 					const struct f2fs_filename *fname,
+-					struct page **res_page);
++					struct page **res_page,
++					bool use_hash);
+ int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
+ 			struct page *ipage);
+ int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
+diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
+index 005babf1bed1e3..3b91a95d42764f 100644
+--- a/fs/f2fs/inline.c
++++ b/fs/f2fs/inline.c
+@@ -352,7 +352,8 @@ int f2fs_recover_inline_data(struct inode *inode, struct page *npage)
+ 
+ struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
+ 					const struct f2fs_filename *fname,
+-					struct page **res_page)
++					struct page **res_page,
++					bool use_hash)
+ {
+ 	struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
+ 	struct f2fs_dir_entry *de;
+@@ -369,7 +370,7 @@ struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
+ 	inline_dentry = inline_data_addr(dir, ipage);
+ 
+ 	make_dentry_ptr_inline(dir, &d, inline_dentry);
+-	de = f2fs_find_target_dentry(&d, fname, NULL);
++	de = f2fs_find_target_dentry(&d, fname, NULL, use_hash);
+ 	unlock_page(ipage);
+ 	if (IS_ERR(de)) {
+ 		*res_page = ERR_CAST(de);
+diff --git a/fs/file_table.c b/fs/file_table.c
+index 976736be47cb66..502b81f614d9bb 100644
+--- a/fs/file_table.c
++++ b/fs/file_table.c
+@@ -128,7 +128,7 @@ static struct ctl_table fs_stat_sysctls[] = {
+ 		.data		= &sysctl_nr_open,
+ 		.maxlen		= sizeof(unsigned int),
+ 		.mode		= 0644,
+-		.proc_handler	= proc_dointvec_minmax,
++		.proc_handler	= proc_douintvec_minmax,
+ 		.extra1		= &sysctl_nr_open_min,
+ 		.extra2		= &sysctl_nr_open_max,
+ 	},
+diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
+index 7e51d2cec64b48..bd6503b7314264 100644
+--- a/fs/hostfs/hostfs_kern.c
++++ b/fs/hostfs/hostfs_kern.c
+@@ -95,32 +95,17 @@ __uml_setup("hostfs=", hostfs_args,
+ static char *__dentry_name(struct dentry *dentry, char *name)
+ {
+ 	char *p = dentry_path_raw(dentry, name, PATH_MAX);
+-	char *root;
+-	size_t len;
+-	struct hostfs_fs_info *fsi;
+-
+-	fsi = dentry->d_sb->s_fs_info;
+-	root = fsi->host_root_path;
+-	len = strlen(root);
+-	if (IS_ERR(p)) {
+-		__putname(name);
+-		return NULL;
+-	}
+-
+-	/*
+-	 * This function relies on the fact that dentry_path_raw() will place
+-	 * the path name at the end of the provided buffer.
+-	 */
+-	BUG_ON(p + strlen(p) + 1 != name + PATH_MAX);
++	struct hostfs_fs_info *fsi = dentry->d_sb->s_fs_info;
++	char *root = fsi->host_root_path;
++	size_t len = strlen(root);
+ 
+-	strscpy(name, root, PATH_MAX);
+-	if (len > p - name) {
++	if (IS_ERR(p) || len > p - name) {
+ 		__putname(name);
+ 		return NULL;
+ 	}
+ 
+-	if (p > name + len)
+-		strcpy(name + len, p);
++	memcpy(name, root, len);
++	memmove(name + len, p, name + PATH_MAX - p);
+ 
+ 	return name;
+ }
+diff --git a/fs/nfs/localio.c b/fs/nfs/localio.c
+index 4b8618cf114caf..17b0ae5cb2efdc 100644
+--- a/fs/nfs/localio.c
++++ b/fs/nfs/localio.c
+@@ -328,7 +328,7 @@ nfs_local_pgio_done(struct nfs_pgio_header *hdr, long status)
+ 		hdr->res.op_status = NFS4_OK;
+ 		hdr->task.tk_status = 0;
+ 	} else {
+-		hdr->res.op_status = nfs4_stat_to_errno(status);
++		hdr->res.op_status = nfs_localio_errno_to_nfs4_stat(status);
+ 		hdr->task.tk_status = status;
+ 	}
+ }
+@@ -668,7 +668,7 @@ nfs_local_commit_done(struct nfs_commit_data *data, int status)
+ 		data->task.tk_status = 0;
+ 	} else {
+ 		nfs_reset_boot_verifier(data->inode);
+-		data->res.op_status = nfs4_stat_to_errno(status);
++		data->res.op_status = nfs_localio_errno_to_nfs4_stat(status);
+ 		data->task.tk_status = status;
+ 	}
+ }
+diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
+index 531c9c20ef1d1b..9f0d69e6526443 100644
+--- a/fs/nfs/nfs42proc.c
++++ b/fs/nfs/nfs42proc.c
+@@ -552,7 +552,7 @@ static int nfs42_do_offload_cancel_async(struct file *dst,
+ 		.rpc_message = &msg,
+ 		.callback_ops = &nfs42_offload_cancel_ops,
+ 		.workqueue = nfsiod_workqueue,
+-		.flags = RPC_TASK_ASYNC,
++		.flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE,
+ 	};
+ 	int status;
+ 
+diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
+index 9e3ae53e220583..becc3149aa9e5c 100644
+--- a/fs/nfs/nfs42xdr.c
++++ b/fs/nfs/nfs42xdr.c
+@@ -144,9 +144,11 @@
+ 					 decode_putfh_maxsz + \
+ 					 decode_offload_cancel_maxsz)
+ #define NFS4_enc_copy_notify_sz		(compound_encode_hdr_maxsz + \
++					 encode_sequence_maxsz + \
+ 					 encode_putfh_maxsz + \
+ 					 encode_copy_notify_maxsz)
+ #define NFS4_dec_copy_notify_sz		(compound_decode_hdr_maxsz + \
++					 decode_sequence_maxsz + \
+ 					 decode_putfh_maxsz + \
+ 					 decode_copy_notify_maxsz)
+ #define NFS4_enc_deallocate_sz		(compound_encode_hdr_maxsz + \
+diff --git a/fs/nfs_common/common.c b/fs/nfs_common/common.c
+index 34a115176f97eb..af09aed09fd279 100644
+--- a/fs/nfs_common/common.c
++++ b/fs/nfs_common/common.c
+@@ -15,7 +15,7 @@ static const struct {
+ 	{ NFS_OK,		0		},
+ 	{ NFSERR_PERM,		-EPERM		},
+ 	{ NFSERR_NOENT,		-ENOENT		},
+-	{ NFSERR_IO,		-errno_NFSERR_IO},
++	{ NFSERR_IO,		-EIO		},
+ 	{ NFSERR_NXIO,		-ENXIO		},
+ /*	{ NFSERR_EAGAIN,	-EAGAIN		}, */
+ 	{ NFSERR_ACCES,		-EACCES		},
+@@ -45,7 +45,6 @@ static const struct {
+ 	{ NFSERR_SERVERFAULT,	-EREMOTEIO	},
+ 	{ NFSERR_BADTYPE,	-EBADTYPE	},
+ 	{ NFSERR_JUKEBOX,	-EJUKEBOX	},
+-	{ -1,			-EIO		}
+ };
+ 
+ /**
+@@ -59,26 +58,29 @@ int nfs_stat_to_errno(enum nfs_stat status)
+ {
+ 	int i;
+ 
+-	for (i = 0; nfs_errtbl[i].stat != -1; i++) {
++	for (i = 0; i < ARRAY_SIZE(nfs_errtbl); i++) {
+ 		if (nfs_errtbl[i].stat == (int)status)
+ 			return nfs_errtbl[i].errno;
+ 	}
+-	return nfs_errtbl[i].errno;
++	return -EIO;
+ }
+ EXPORT_SYMBOL_GPL(nfs_stat_to_errno);
+ 
+ /*
+  * We need to translate between nfs v4 status return values and
+  * the local errno values which may not be the same.
++ *
++ * nfs4_errtbl_common[] is used before more specialized mappings
++ * available in nfs4_errtbl[] or nfs4_errtbl_localio[].
+  */
+ static const struct {
+ 	int stat;
+ 	int errno;
+-} nfs4_errtbl[] = {
++} nfs4_errtbl_common[] = {
+ 	{ NFS4_OK,		0		},
+ 	{ NFS4ERR_PERM,		-EPERM		},
+ 	{ NFS4ERR_NOENT,	-ENOENT		},
+-	{ NFS4ERR_IO,		-errno_NFSERR_IO},
++	{ NFS4ERR_IO,		-EIO		},
+ 	{ NFS4ERR_NXIO,		-ENXIO		},
+ 	{ NFS4ERR_ACCESS,	-EACCES		},
+ 	{ NFS4ERR_EXIST,	-EEXIST		},
+@@ -98,15 +100,20 @@ static const struct {
+ 	{ NFS4ERR_BAD_COOKIE,	-EBADCOOKIE	},
+ 	{ NFS4ERR_NOTSUPP,	-ENOTSUPP	},
+ 	{ NFS4ERR_TOOSMALL,	-ETOOSMALL	},
+-	{ NFS4ERR_SERVERFAULT,	-EREMOTEIO	},
+ 	{ NFS4ERR_BADTYPE,	-EBADTYPE	},
+-	{ NFS4ERR_LOCKED,	-EAGAIN		},
+ 	{ NFS4ERR_SYMLINK,	-ELOOP		},
+-	{ NFS4ERR_OP_ILLEGAL,	-EOPNOTSUPP	},
+ 	{ NFS4ERR_DEADLOCK,	-EDEADLK	},
++};
++
++static const struct {
++	int stat;
++	int errno;
++} nfs4_errtbl[] = {
++	{ NFS4ERR_SERVERFAULT,	-EREMOTEIO	},
++	{ NFS4ERR_LOCKED,	-EAGAIN		},
++	{ NFS4ERR_OP_ILLEGAL,	-EOPNOTSUPP	},
+ 	{ NFS4ERR_NOXATTR,	-ENODATA	},
+ 	{ NFS4ERR_XATTR2BIG,	-E2BIG		},
+-	{ -1,			-EIO		}
+ };
+ 
+ /*
+@@ -116,7 +123,14 @@ static const struct {
+ int nfs4_stat_to_errno(int stat)
+ {
+ 	int i;
+-	for (i = 0; nfs4_errtbl[i].stat != -1; i++) {
++
++	/* First check nfs4_errtbl_common */
++	for (i = 0; i < ARRAY_SIZE(nfs4_errtbl_common); i++) {
++		if (nfs4_errtbl_common[i].stat == stat)
++			return nfs4_errtbl_common[i].errno;
++	}
++	/* Then check nfs4_errtbl */
++	for (i = 0; i < ARRAY_SIZE(nfs4_errtbl); i++) {
+ 		if (nfs4_errtbl[i].stat == stat)
+ 			return nfs4_errtbl[i].errno;
+ 	}
+@@ -132,3 +146,56 @@ int nfs4_stat_to_errno(int stat)
+ 	return -stat;
+ }
+ EXPORT_SYMBOL_GPL(nfs4_stat_to_errno);
++
++/*
++ * This table is useful for conversion from local errno to NFS error.
++ * It provides more logically correct mappings for use with LOCALIO
++ * (which is focused on converting from errno to NFS status).
++ */
++static const struct {
++	int stat;
++	int errno;
++} nfs4_errtbl_localio[] = {
++	/* Map errors differently than nfs4_errtbl */
++	{ NFS4ERR_IO,		-EREMOTEIO	},
++	{ NFS4ERR_DELAY,	-EAGAIN		},
++	{ NFS4ERR_FBIG,		-E2BIG		},
++	/* Map errors not handled by nfs4_errtbl */
++	{ NFS4ERR_STALE,	-EBADF		},
++	{ NFS4ERR_STALE,	-EOPENSTALE	},
++	{ NFS4ERR_DELAY,	-ETIMEDOUT	},
++	{ NFS4ERR_DELAY,	-ERESTARTSYS	},
++	{ NFS4ERR_DELAY,	-ENOMEM		},
++	{ NFS4ERR_IO,		-ETXTBSY	},
++	{ NFS4ERR_IO,		-EBUSY		},
++	{ NFS4ERR_SERVERFAULT,	-ESERVERFAULT	},
++	{ NFS4ERR_SERVERFAULT,	-ENFILE		},
++	{ NFS4ERR_IO,		-EUCLEAN	},
++	{ NFS4ERR_PERM,		-ENOKEY		},
++};
++
++/*
++ * Convert an errno to an NFS error code for LOCALIO.
++ */
++__u32 nfs_localio_errno_to_nfs4_stat(int errno)
++{
++	int i;
++
++	/* First check nfs4_errtbl_common */
++	for (i = 0; i < ARRAY_SIZE(nfs4_errtbl_common); i++) {
++		if (nfs4_errtbl_common[i].errno == errno)
++			return nfs4_errtbl_common[i].stat;
++	}
++	/* Then check nfs4_errtbl_localio */
++	for (i = 0; i < ARRAY_SIZE(nfs4_errtbl_localio); i++) {
++		if (nfs4_errtbl_localio[i].errno == errno)
++			return nfs4_errtbl_localio[i].stat;
++	}
++	/* If we cannot translate the error, the recovery routines should
++	 * handle it.
++	 * Note: remaining NFSv4 error codes have values > 10000, so should
++	 * not conflict with native Linux error codes.
++	 */
++	return NFS4ERR_SERVERFAULT;
++}
++EXPORT_SYMBOL_GPL(nfs_localio_errno_to_nfs4_stat);
+diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
+index 0a3aea6c416bc6..9b7f8e9655a277 100644
+--- a/fs/nilfs2/dir.c
++++ b/fs/nilfs2/dir.c
+@@ -400,7 +400,7 @@ int nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr, ino_t *ino)
+ 	return 0;
+ }
+ 
+-void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
++int nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
+ 		    struct folio *folio, struct inode *inode)
+ {
+ 	size_t from = offset_in_folio(folio, de);
+@@ -410,11 +410,15 @@ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
+ 
+ 	folio_lock(folio);
+ 	err = nilfs_prepare_chunk(folio, from, to);
+-	BUG_ON(err);
++	if (unlikely(err)) {
++		folio_unlock(folio);
++		return err;
++	}
+ 	de->inode = cpu_to_le64(inode->i_ino);
+ 	de->file_type = fs_umode_to_ftype(inode->i_mode);
+ 	nilfs_commit_chunk(folio, mapping, from, to);
+ 	inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
++	return 0;
+ }
+ 
+ /*
+@@ -543,7 +547,10 @@ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct folio *folio)
+ 		from = (char *)pde - kaddr;
+ 	folio_lock(folio);
+ 	err = nilfs_prepare_chunk(folio, from, to);
+-	BUG_ON(err);
++	if (unlikely(err)) {
++		folio_unlock(folio);
++		goto out;
++	}
+ 	if (pde)
+ 		pde->rec_len = nilfs_rec_len_to_disk(to - from);
+ 	dir->inode = 0;
+diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
+index 1d836a5540f3b1..e02fae6757f126 100644
+--- a/fs/nilfs2/namei.c
++++ b/fs/nilfs2/namei.c
+@@ -406,8 +406,10 @@ static int nilfs_rename(struct mnt_idmap *idmap,
+ 			err = PTR_ERR(new_de);
+ 			goto out_dir;
+ 		}
+-		nilfs_set_link(new_dir, new_de, new_folio, old_inode);
++		err = nilfs_set_link(new_dir, new_de, new_folio, old_inode);
+ 		folio_release_kmap(new_folio, new_de);
++		if (unlikely(err))
++			goto out_dir;
+ 		nilfs_mark_inode_dirty(new_dir);
+ 		inode_set_ctime_current(new_inode);
+ 		if (dir_de)
+@@ -430,28 +432,27 @@ static int nilfs_rename(struct mnt_idmap *idmap,
+ 	 */
+ 	inode_set_ctime_current(old_inode);
+ 
+-	nilfs_delete_entry(old_de, old_folio);
+-
+-	if (dir_de) {
+-		nilfs_set_link(old_inode, dir_de, dir_folio, new_dir);
+-		folio_release_kmap(dir_folio, dir_de);
+-		drop_nlink(old_dir);
++	err = nilfs_delete_entry(old_de, old_folio);
++	if (likely(!err)) {
++		if (dir_de) {
++			err = nilfs_set_link(old_inode, dir_de, dir_folio,
++					     new_dir);
++			drop_nlink(old_dir);
++		}
++		nilfs_mark_inode_dirty(old_dir);
+ 	}
+-	folio_release_kmap(old_folio, old_de);
+-
+-	nilfs_mark_inode_dirty(old_dir);
+ 	nilfs_mark_inode_dirty(old_inode);
+ 
+-	err = nilfs_transaction_commit(old_dir->i_sb);
+-	return err;
+-
+ out_dir:
+ 	if (dir_de)
+ 		folio_release_kmap(dir_folio, dir_de);
+ out_old:
+ 	folio_release_kmap(old_folio, old_de);
+ out:
+-	nilfs_transaction_abort(old_dir->i_sb);
++	if (likely(!err))
++		err = nilfs_transaction_commit(old_dir->i_sb);
++	else
++		nilfs_transaction_abort(old_dir->i_sb);
+ 	return err;
+ }
+ 
+diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
+index dff241c53fc583..cb6ed54accd7ba 100644
+--- a/fs/nilfs2/nilfs.h
++++ b/fs/nilfs2/nilfs.h
+@@ -261,8 +261,8 @@ struct nilfs_dir_entry *nilfs_find_entry(struct inode *, const struct qstr *,
+ int nilfs_delete_entry(struct nilfs_dir_entry *, struct folio *);
+ int nilfs_empty_dir(struct inode *);
+ struct nilfs_dir_entry *nilfs_dotdot(struct inode *, struct folio **);
+-void nilfs_set_link(struct inode *, struct nilfs_dir_entry *,
+-			   struct folio *, struct inode *);
++int nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
++		   struct folio *folio, struct inode *inode);
+ 
+ /* file.c */
+ extern int nilfs_sync_file(struct file *, loff_t, loff_t, int);
+diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
+index 9de2a494a06942..899686d2e5f71d 100644
+--- a/fs/nilfs2/page.c
++++ b/fs/nilfs2/page.c
+@@ -392,6 +392,11 @@ void nilfs_clear_dirty_pages(struct address_space *mapping)
+ /**
+  * nilfs_clear_folio_dirty - discard dirty folio
+  * @folio: dirty folio that will be discarded
++ *
++ * nilfs_clear_folio_dirty() clears working states including dirty state for
++ * the folio and its buffers.  If the folio has buffers, clear only if it is
++ * confirmed that none of the buffer heads are busy (none have valid
++ * references and none are locked).
+  */
+ void nilfs_clear_folio_dirty(struct folio *folio)
+ {
+@@ -399,10 +404,6 @@ void nilfs_clear_folio_dirty(struct folio *folio)
+ 
+ 	BUG_ON(!folio_test_locked(folio));
+ 
+-	folio_clear_uptodate(folio);
+-	folio_clear_mappedtodisk(folio);
+-	folio_clear_checked(folio);
+-
+ 	head = folio_buffers(folio);
+ 	if (head) {
+ 		const unsigned long clear_bits =
+@@ -410,6 +411,25 @@ void nilfs_clear_folio_dirty(struct folio *folio)
+ 			 BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
+ 			 BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected) |
+ 			 BIT(BH_Delay));
++		bool busy, invalidated = false;
++
++recheck_buffers:
++		busy = false;
++		bh = head;
++		do {
++			if (atomic_read(&bh->b_count) | buffer_locked(bh)) {
++				busy = true;
++				break;
++			}
++		} while (bh = bh->b_this_page, bh != head);
++
++		if (busy) {
++			if (invalidated)
++				return;
++			invalidate_bh_lrus();
++			invalidated = true;
++			goto recheck_buffers;
++		}
+ 
+ 		bh = head;
+ 		do {
+@@ -419,6 +439,9 @@ void nilfs_clear_folio_dirty(struct folio *folio)
+ 		} while (bh = bh->b_this_page, bh != head);
+ 	}
+ 
++	folio_clear_uptodate(folio);
++	folio_clear_mappedtodisk(folio);
++	folio_clear_checked(folio);
+ 	__nilfs_clear_folio_dirty(folio);
+ }
+ 
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index 58725183089733..58a598b548fa28 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -734,7 +734,6 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
+ 		if (!head)
+ 			head = create_empty_buffers(folio,
+ 					i_blocksize(inode), 0);
+-		folio_unlock(folio);
+ 
+ 		bh = head;
+ 		do {
+@@ -744,11 +743,14 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
+ 			list_add_tail(&bh->b_assoc_buffers, listp);
+ 			ndirties++;
+ 			if (unlikely(ndirties >= nlimit)) {
++				folio_unlock(folio);
+ 				folio_batch_release(&fbatch);
+ 				cond_resched();
+ 				return ndirties;
+ 			}
+ 		} while (bh = bh->b_this_page, bh != head);
++
++		folio_unlock(folio);
+ 	}
+ 	folio_batch_release(&fbatch);
+ 	cond_resched();
+diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
+index 3404e7a30c330c..15d9acd456ecce 100644
+--- a/fs/ocfs2/quota_global.c
++++ b/fs/ocfs2/quota_global.c
+@@ -761,6 +761,11 @@ static int ocfs2_release_dquot(struct dquot *dquot)
+ 	handle = ocfs2_start_trans(osb,
+ 		ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_id.type));
+ 	if (IS_ERR(handle)) {
++		/*
++		 * Mark dquot as inactive to avoid endless cycle in
++		 * quota_release_workfn().
++		 */
++		clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
+ 		status = PTR_ERR(handle);
+ 		mlog_errno(status);
+ 		goto out_ilock;
+diff --git a/fs/pstore/blk.c b/fs/pstore/blk.c
+index 65b2473e22ff9c..fa6b8cb788a1f2 100644
+--- a/fs/pstore/blk.c
++++ b/fs/pstore/blk.c
+@@ -89,7 +89,7 @@ static struct pstore_device_info *pstore_device_info;
+ 		_##name_ = check_size(name, alignsize);		\
+ 	else							\
+ 		_##name_ = 0;					\
+-	/* Synchronize module parameters with resuls. */	\
++	/* Synchronize module parameters with results. */	\
+ 	name = _##name_ / 1024;					\
+ 	dev->zone.name = _##name_;				\
+ }
+@@ -121,7 +121,7 @@ static int __register_pstore_device(struct pstore_device_info *dev)
+ 	if (pstore_device_info)
+ 		return -EBUSY;
+ 
+-	/* zero means not limit on which backends to attempt to store. */
++	/* zero means no limit on which backends attempt to store. */
+ 	if (!dev->flags)
+ 		dev->flags = UINT_MAX;
+ 
+diff --git a/fs/select.c b/fs/select.c
+index e223d1fe9d5541..7da531b1cf6bec 100644
+--- a/fs/select.c
++++ b/fs/select.c
+@@ -786,7 +786,7 @@ static inline int get_sigset_argpack(struct sigset_argpack *to,
+ 	}
+ 	return 0;
+ Efault:
+-	user_access_end();
++	user_read_access_end();
+ 	return -EFAULT;
+ }
+ 
+@@ -1355,7 +1355,7 @@ static inline int get_compat_sigset_argpack(struct compat_sigset_argpack *to,
+ 	}
+ 	return 0;
+ Efault:
+-	user_access_end();
++	user_read_access_end();
+ 	return -EFAULT;
+ }
+ 
+diff --git a/fs/smb/client/cifsacl.c b/fs/smb/client/cifsacl.c
+index ba79aa2107cc9f..699a3f76d08346 100644
+--- a/fs/smb/client/cifsacl.c
++++ b/fs/smb/client/cifsacl.c
+@@ -1395,7 +1395,7 @@ static int build_sec_desc(struct smb_ntsd *pntsd, struct smb_ntsd *pnntsd,
+ #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+ struct smb_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
+ 				      const struct cifs_fid *cifsfid, u32 *pacllen,
+-				      u32 __maybe_unused unused)
++				      u32 info)
+ {
+ 	struct smb_ntsd *pntsd = NULL;
+ 	unsigned int xid;
+@@ -1407,7 +1407,7 @@ struct smb_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
+ 
+ 	xid = get_xid();
+ 	rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), cifsfid->netfid, &pntsd,
+-				pacllen);
++				pacllen, info);
+ 	free_xid(xid);
+ 
+ 	cifs_put_tlink(tlink);
+@@ -1419,7 +1419,7 @@ struct smb_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
+ }
+ 
+ static struct smb_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
+-		const char *path, u32 *pacllen)
++		const char *path, u32 *pacllen, u32 info)
+ {
+ 	struct smb_ntsd *pntsd = NULL;
+ 	int oplock = 0;
+@@ -1446,9 +1446,12 @@ static struct smb_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
+ 		.fid = &fid,
+ 	};
+ 
++	if (info & SACL_SECINFO)
++		oparms.desired_access |= SYSTEM_SECURITY;
++
+ 	rc = CIFS_open(xid, &oparms, &oplock, NULL);
+ 	if (!rc) {
+-		rc = CIFSSMBGetCIFSACL(xid, tcon, fid.netfid, &pntsd, pacllen);
++		rc = CIFSSMBGetCIFSACL(xid, tcon, fid.netfid, &pntsd, pacllen, info);
+ 		CIFSSMBClose(xid, tcon, fid.netfid);
+ 	}
+ 
+@@ -1472,7 +1475,7 @@ struct smb_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
+ 	if (inode)
+ 		open_file = find_readable_file(CIFS_I(inode), true);
+ 	if (!open_file)
+-		return get_cifs_acl_by_path(cifs_sb, path, pacllen);
++		return get_cifs_acl_by_path(cifs_sb, path, pacllen, info);
+ 
+ 	pntsd = get_cifs_acl_by_fid(cifs_sb, &open_file->fid, pacllen, info);
+ 	cifsFileInfo_put(open_file);
+@@ -1485,7 +1488,7 @@ int set_cifs_acl(struct smb_ntsd *pnntsd, __u32 acllen,
+ {
+ 	int oplock = 0;
+ 	unsigned int xid;
+-	int rc, access_flags;
++	int rc, access_flags = 0;
+ 	struct cifs_tcon *tcon;
+ 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ 	struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+@@ -1498,10 +1501,12 @@ int set_cifs_acl(struct smb_ntsd *pnntsd, __u32 acllen,
+ 	tcon = tlink_tcon(tlink);
+ 	xid = get_xid();
+ 
+-	if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
+-		access_flags = WRITE_OWNER;
+-	else
+-		access_flags = WRITE_DAC;
++	if (aclflag & CIFS_ACL_OWNER || aclflag & CIFS_ACL_GROUP)
++		access_flags |= WRITE_OWNER;
++	if (aclflag & CIFS_ACL_SACL)
++		access_flags |= SYSTEM_SECURITY;
++	if (aclflag & CIFS_ACL_DACL)
++		access_flags |= WRITE_DAC;
+ 
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
+diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
+index d26f9bbb53829a..2d3b7b68f7360d 100644
+--- a/fs/smb/client/cifsproto.h
++++ b/fs/smb/client/cifsproto.h
+@@ -560,7 +560,7 @@ extern int CIFSSMBSetEA(const unsigned int xid, struct cifs_tcon *tcon,
+ 		const struct nls_table *nls_codepage,
+ 		struct cifs_sb_info *cifs_sb);
+ extern int CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon,
+-			__u16 fid, struct smb_ntsd **acl_inf, __u32 *buflen);
++			__u16 fid, struct smb_ntsd **acl_inf, __u32 *buflen, __u32 info);
+ extern int CIFSSMBSetCIFSACL(const unsigned int, struct cifs_tcon *, __u16,
+ 			struct smb_ntsd *pntsd, __u32 len, int aclflag);
+ extern int cifs_do_get_acl(const unsigned int xid, struct cifs_tcon *tcon,
+diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
+index ab0b949924d77d..a993d4ac584117 100644
+--- a/fs/smb/client/cifssmb.c
++++ b/fs/smb/client/cifssmb.c
+@@ -3375,7 +3375,7 @@ validate_ntransact(char *buf, char **ppparm, char **ppdata,
+ /* Get Security Descriptor (by handle) from remote server for a file or dir */
+ int
+ CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid,
+-		  struct smb_ntsd **acl_inf, __u32 *pbuflen)
++		  struct smb_ntsd **acl_inf, __u32 *pbuflen, __u32 info)
+ {
+ 	int rc = 0;
+ 	int buf_type = 0;
+@@ -3398,7 +3398,7 @@ CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid,
+ 	pSMB->MaxSetupCount = 0;
+ 	pSMB->Fid = fid; /* file handle always le */
+ 	pSMB->AclFlags = cpu_to_le32(CIFS_ACL_OWNER | CIFS_ACL_GROUP |
+-				     CIFS_ACL_DACL);
++				     CIFS_ACL_DACL | info);
+ 	pSMB->ByteCount = cpu_to_le16(11); /* 3 bytes pad + 8 bytes parm */
+ 	inc_rfc1001_len(pSMB, 11);
+ 	iov[0].iov_base = (char *)pSMB;
+diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c
+index 273358d20a46c9..50f96259d9adc2 100644
+--- a/fs/smb/client/readdir.c
++++ b/fs/smb/client/readdir.c
+@@ -413,7 +413,7 @@ _initiate_cifs_search(const unsigned int xid, struct file *file,
+ 		cifsFile->invalidHandle = false;
+ 	} else if ((rc == -EOPNOTSUPP) &&
+ 		   (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) {
+-		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
++		cifs_autodisable_serverino(cifs_sb);
+ 		goto ffirst_retry;
+ 	}
+ error_exit:
+diff --git a/fs/smb/client/reparse.c b/fs/smb/client/reparse.c
+index d88b41133e00c6..b387dfbaf16b05 100644
+--- a/fs/smb/client/reparse.c
++++ b/fs/smb/client/reparse.c
+@@ -747,11 +747,12 @@ int smb2_parse_reparse_point(struct cifs_sb_info *cifs_sb,
+ 	return parse_reparse_point(buf, plen, cifs_sb, full_path, true, data);
+ }
+ 
+-static void wsl_to_fattr(struct cifs_open_info_data *data,
++static bool wsl_to_fattr(struct cifs_open_info_data *data,
+ 			 struct cifs_sb_info *cifs_sb,
+ 			 u32 tag, struct cifs_fattr *fattr)
+ {
+ 	struct smb2_file_full_ea_info *ea;
++	bool have_xattr_dev = false;
+ 	u32 next = 0;
+ 
+ 	switch (tag) {
+@@ -794,13 +795,24 @@ static void wsl_to_fattr(struct cifs_open_info_data *data,
+ 			fattr->cf_uid = wsl_make_kuid(cifs_sb, v);
+ 		else if (!strncmp(name, SMB2_WSL_XATTR_GID, nlen))
+ 			fattr->cf_gid = wsl_make_kgid(cifs_sb, v);
+-		else if (!strncmp(name, SMB2_WSL_XATTR_MODE, nlen))
++		else if (!strncmp(name, SMB2_WSL_XATTR_MODE, nlen)) {
++			/* File type in reparse point tag and in xattr mode must match. */
++			if (S_DT(fattr->cf_mode) != S_DT(le32_to_cpu(*(__le32 *)v)))
++				return false;
+ 			fattr->cf_mode = (umode_t)le32_to_cpu(*(__le32 *)v);
+-		else if (!strncmp(name, SMB2_WSL_XATTR_DEV, nlen))
++		} else if (!strncmp(name, SMB2_WSL_XATTR_DEV, nlen)) {
+ 			fattr->cf_rdev = reparse_mkdev(v);
++			have_xattr_dev = true;
++		}
+ 	} while (next);
+ out:
++
++	/* Major and minor numbers for char and block devices are mandatory. */
++	if (!have_xattr_dev && (tag == IO_REPARSE_TAG_LX_CHR || tag == IO_REPARSE_TAG_LX_BLK))
++		return false;
++
+ 	fattr->cf_dtype = S_DT(fattr->cf_mode);
++	return true;
+ }
+ 
+ static bool posix_reparse_to_fattr(struct cifs_sb_info *cifs_sb,
+@@ -874,7 +886,9 @@ bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb,
+ 	case IO_REPARSE_TAG_AF_UNIX:
+ 	case IO_REPARSE_TAG_LX_CHR:
+ 	case IO_REPARSE_TAG_LX_BLK:
+-		wsl_to_fattr(data, cifs_sb, tag, fattr);
++		ok = wsl_to_fattr(data, cifs_sb, tag, fattr);
++		if (!ok)
++			return false;
+ 		break;
+ 	case IO_REPARSE_TAG_NFS:
+ 		ok = posix_reparse_to_fattr(cifs_sb, fattr, data);
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 87cb1872db28b0..9790ff2cc5b32d 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -658,7 +658,8 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
+ 
+ 	while (bytes_left >= (ssize_t)sizeof(*p)) {
+ 		memset(&tmp_iface, 0, sizeof(tmp_iface));
+-		tmp_iface.speed = le64_to_cpu(p->LinkSpeed);
++		/* default to 1Gbps when link speed is unset */
++		tmp_iface.speed = le64_to_cpu(p->LinkSpeed) ?: 1000000000;
+ 		tmp_iface.rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
+ 		tmp_iface.rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0;
+ 
+diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
+index 5cc69beaa62ecf..10a86c02a8b328 100644
+--- a/fs/ubifs/debug.c
++++ b/fs/ubifs/debug.c
+@@ -946,16 +946,20 @@ void ubifs_dump_tnc(struct ubifs_info *c)
+ 
+ 	pr_err("\n");
+ 	pr_err("(pid %d) start dumping TNC tree\n", current->pid);
+-	znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, NULL);
+-	level = znode->level;
+-	pr_err("== Level %d ==\n", level);
+-	while (znode) {
+-		if (level != znode->level) {
+-			level = znode->level;
+-			pr_err("== Level %d ==\n", level);
++	if (c->zroot.znode) {
++		znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, NULL);
++		level = znode->level;
++		pr_err("== Level %d ==\n", level);
++		while (znode) {
++			if (level != znode->level) {
++				level = znode->level;
++				pr_err("== Level %d ==\n", level);
++			}
++			ubifs_dump_znode(c, znode);
++			znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, znode);
+ 		}
+-		ubifs_dump_znode(c, znode);
+-		znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, znode);
++	} else {
++		pr_err("empty TNC tree in memory\n");
+ 	}
+ 	pr_err("(pid %d) finish dumping TNC tree\n", current->pid);
+ }
+diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
+index aa63b8efd78228..b9444ff5c8a586 100644
+--- a/fs/xfs/xfs_buf.c
++++ b/fs/xfs/xfs_buf.c
+@@ -663,9 +663,8 @@ xfs_buf_find_insert(
+ 		spin_unlock(&bch->bc_lock);
+ 		goto out_free_buf;
+ 	}
+-	if (bp) {
++	if (bp && atomic_inc_not_zero(&bp->b_hold)) {
+ 		/* found an existing buffer */
+-		atomic_inc(&bp->b_hold);
+ 		spin_unlock(&bch->bc_lock);
+ 		error = xfs_buf_find_lock(bp, flags);
+ 		if (error)
+diff --git a/fs/xfs/xfs_buf_item_recover.c b/fs/xfs/xfs_buf_item_recover.c
+index 3d0c6402cb3634..6b10390ad3d2c0 100644
+--- a/fs/xfs/xfs_buf_item_recover.c
++++ b/fs/xfs/xfs_buf_item_recover.c
+@@ -1079,7 +1079,7 @@ xlog_recover_buf_commit_pass2(
+ 		error = xlog_recover_do_primary_sb_buffer(mp, item, bp, buf_f,
+ 				current_lsn);
+ 		if (error)
+-			goto out_release;
++			goto out_writebuf;
+ 
+ 		/* Update the rt superblock if we have one. */
+ 		if (xfs_has_rtsb(mp) && mp->m_rtsb_bp) {
+@@ -1096,6 +1096,15 @@ xlog_recover_buf_commit_pass2(
+ 		xlog_recover_do_reg_buffer(mp, item, bp, buf_f, current_lsn);
+ 	}
+ 
++	/*
++	 * Buffer held by buf log item during 'normal' buffer recovery must
++	 * be committed through buffer I/O submission path to ensure proper
++	 * release. When error occurs during sb buffer recovery, log shutdown
++	 * will be done before submitting buffer list so that buffers can be
++	 * released correctly through ioend failure path.
++	 */
++out_writebuf:
++
+ 	/*
+ 	 * Perform delayed write on the buffer.  Asynchronous writes will be
+ 	 * slower when taking into account all the buffers to be flushed.
+diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
+index 201c26322edea4..84b69f686ba82e 100644
+--- a/fs/xfs/xfs_dquot.c
++++ b/fs/xfs/xfs_dquot.c
+@@ -1316,7 +1316,8 @@ xfs_dquot_read_buf(
+ 
+ /*
+  * Attach a dquot buffer to this dquot to avoid allocating a buffer during a
+- * dqflush, since dqflush can be called from reclaim context.
++ * dqflush, since dqflush can be called from reclaim context.  Caller must hold
++ * the dqlock.
+  */
+ int
+ xfs_dquot_attach_buf(
+@@ -1337,13 +1338,16 @@ xfs_dquot_attach_buf(
+ 			return error;
+ 
+ 		/*
+-		 * Attach the dquot to the buffer so that the AIL does not have
+-		 * to read the dquot buffer to push this item.
++		 * Hold the dquot buffer so that we retain our ref to it after
++		 * detaching it from the transaction, then give that ref to the
++		 * dquot log item so that the AIL does not have to read the
++		 * dquot buffer to push this item.
+ 		 */
+ 		xfs_buf_hold(bp);
++		xfs_trans_brelse(tp, bp);
++
+ 		spin_lock(&qlip->qli_lock);
+ 		lip->li_buf = bp;
+-		xfs_trans_brelse(tp, bp);
+ 	}
+ 	qlip->qli_dirty = true;
+ 	spin_unlock(&qlip->qli_lock);
+diff --git a/fs/xfs/xfs_notify_failure.c b/fs/xfs/xfs_notify_failure.c
+index fa50e5308292d3..0b0b0f31aca274 100644
+--- a/fs/xfs/xfs_notify_failure.c
++++ b/fs/xfs/xfs_notify_failure.c
+@@ -153,6 +153,79 @@ xfs_dax_notify_failure_thaw(
+ 	thaw_super(sb, FREEZE_HOLDER_USERSPACE);
+ }
+ 
++static int
++xfs_dax_translate_range(
++	struct xfs_buftarg	*btp,
++	u64			offset,
++	u64			len,
++	xfs_daddr_t		*daddr,
++	uint64_t		*bblen)
++{
++	u64			dev_start = btp->bt_dax_part_off;
++	u64			dev_len = bdev_nr_bytes(btp->bt_bdev);
++	u64			dev_end = dev_start + dev_len - 1;
++
++	/* Notify failure on the whole device. */
++	if (offset == 0 && len == U64_MAX) {
++		offset = dev_start;
++		len = dev_len;
++	}
++
++	/* Ignore the range out of filesystem area */
++	if (offset + len - 1 < dev_start)
++		return -ENXIO;
++	if (offset > dev_end)
++		return -ENXIO;
++
++	/* Calculate the real range when it touches the boundary */
++	if (offset > dev_start)
++		offset -= dev_start;
++	else {
++		len -= dev_start - offset;
++		offset = 0;
++	}
++	if (offset + len - 1 > dev_end)
++		len = dev_end - offset + 1;
++
++	*daddr = BTOBB(offset);
++	*bblen = BTOBB(len);
++	return 0;
++}
++
++static int
++xfs_dax_notify_logdev_failure(
++	struct xfs_mount	*mp,
++	u64			offset,
++	u64			len,
++	int			mf_flags)
++{
++	xfs_daddr_t		daddr;
++	uint64_t		bblen;
++	int			error;
++
++	/*
++	 * Return ENXIO instead of shutting down the filesystem if the failed
++	 * region is beyond the end of the log.
++	 */
++	error = xfs_dax_translate_range(mp->m_logdev_targp,
++			offset, len, &daddr, &bblen);
++	if (error)
++		return error;
++
++	/*
++	 * In the pre-remove case the failure notification is attempting to
++	 * trigger a force unmount.  The expectation is that the device is
++	 * still present, but its removal is in progress and can not be
++	 * cancelled, proceed with accessing the log device.
++	 */
++	if (mf_flags & MF_MEM_PRE_REMOVE)
++		return 0;
++
++	xfs_err(mp, "ondisk log corrupt, shutting down fs!");
++	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_ONDISK);
++	return -EFSCORRUPTED;
++}
++
+ static int
+ xfs_dax_notify_ddev_failure(
+ 	struct xfs_mount	*mp,
+@@ -263,8 +336,9 @@ xfs_dax_notify_failure(
+ 	int			mf_flags)
+ {
+ 	struct xfs_mount	*mp = dax_holder(dax_dev);
+-	u64			ddev_start;
+-	u64			ddev_end;
++	xfs_daddr_t		daddr;
++	uint64_t		bblen;
++	int			error;
+ 
+ 	if (!(mp->m_super->s_flags & SB_BORN)) {
+ 		xfs_warn(mp, "filesystem is not ready for notify_failure()!");
+@@ -279,17 +353,7 @@ xfs_dax_notify_failure(
+ 
+ 	if (mp->m_logdev_targp && mp->m_logdev_targp->bt_daxdev == dax_dev &&
+ 	    mp->m_logdev_targp != mp->m_ddev_targp) {
+-		/*
+-		 * In the pre-remove case the failure notification is attempting
+-		 * to trigger a force unmount.  The expectation is that the
+-		 * device is still present, but its removal is in progress and
+-		 * can not be cancelled, proceed with accessing the log device.
+-		 */
+-		if (mf_flags & MF_MEM_PRE_REMOVE)
+-			return 0;
+-		xfs_err(mp, "ondisk log corrupt, shutting down fs!");
+-		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_ONDISK);
+-		return -EFSCORRUPTED;
++		return xfs_dax_notify_logdev_failure(mp, offset, len, mf_flags);
+ 	}
+ 
+ 	if (!xfs_has_rmapbt(mp)) {
+@@ -297,33 +361,12 @@ xfs_dax_notify_failure(
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+-	ddev_start = mp->m_ddev_targp->bt_dax_part_off;
+-	ddev_end = ddev_start + bdev_nr_bytes(mp->m_ddev_targp->bt_bdev) - 1;
+-
+-	/* Notify failure on the whole device. */
+-	if (offset == 0 && len == U64_MAX) {
+-		offset = ddev_start;
+-		len = bdev_nr_bytes(mp->m_ddev_targp->bt_bdev);
+-	}
+-
+-	/* Ignore the range out of filesystem area */
+-	if (offset + len - 1 < ddev_start)
+-		return -ENXIO;
+-	if (offset > ddev_end)
+-		return -ENXIO;
+-
+-	/* Calculate the real range when it touches the boundary */
+-	if (offset > ddev_start)
+-		offset -= ddev_start;
+-	else {
+-		len -= ddev_start - offset;
+-		offset = 0;
+-	}
+-	if (offset + len - 1 > ddev_end)
+-		len = ddev_end - offset + 1;
++	error = xfs_dax_translate_range(mp->m_ddev_targp, offset, len, &daddr,
++			&bblen);
++	if (error)
++		return error;
+ 
+-	return xfs_dax_notify_ddev_failure(mp, BTOBB(offset), BTOBB(len),
+-			mf_flags);
++	return xfs_dax_notify_ddev_failure(mp, daddr, bblen, mf_flags);
+ }
+ 
+ const struct dax_holder_operations xfs_dax_holder_operations = {
+diff --git a/fs/xfs/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c
+index 847ba29630e9d8..db5b8afd9d1b97 100644
+--- a/fs/xfs/xfs_qm_bhv.c
++++ b/fs/xfs/xfs_qm_bhv.c
+@@ -32,21 +32,28 @@ xfs_fill_statvfs_from_dquot(
+ 	limit = blkres->softlimit ?
+ 		blkres->softlimit :
+ 		blkres->hardlimit;
+-	if (limit && statp->f_blocks > limit) {
+-		statp->f_blocks = limit;
+-		statp->f_bfree = statp->f_bavail =
+-			(statp->f_blocks > blkres->reserved) ?
+-			 (statp->f_blocks - blkres->reserved) : 0;
++	if (limit) {
++		uint64_t	remaining = 0;
++
++		if (limit > blkres->reserved)
++			remaining = limit - blkres->reserved;
++
++		statp->f_blocks = min(statp->f_blocks, limit);
++		statp->f_bfree = min(statp->f_bfree, remaining);
++		statp->f_bavail = min(statp->f_bavail, remaining);
+ 	}
+ 
+ 	limit = dqp->q_ino.softlimit ?
+ 		dqp->q_ino.softlimit :
+ 		dqp->q_ino.hardlimit;
+-	if (limit && statp->f_files > limit) {
+-		statp->f_files = limit;
+-		statp->f_ffree =
+-			(statp->f_files > dqp->q_ino.reserved) ?
+-			 (statp->f_files - dqp->q_ino.reserved) : 0;
++	if (limit) {
++		uint64_t	remaining = 0;
++
++		if (limit > dqp->q_ino.reserved)
++			remaining = limit - dqp->q_ino.reserved;
++
++		statp->f_files = min(statp->f_files, limit);
++		statp->f_ffree = min(statp->f_ffree, remaining);
+ 	}
+ }
+ 
+diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
+index d076ebd19a61e8..78b24b09048885 100644
+--- a/include/acpi/acpixf.h
++++ b/include/acpi/acpixf.h
+@@ -763,6 +763,7 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ 						     *event_status))
+ ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_dispatch_gpe(acpi_handle gpe_device, u32 gpe_number))
+ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_hw_disable_all_gpes(void))
++ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_hw_enable_all_wakeup_gpes(void))
+ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
+ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
+ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
+diff --git a/include/dt-bindings/clock/imx93-clock.h b/include/dt-bindings/clock/imx93-clock.h
+index 6c685067288b54..c393fad3a3469c 100644
+--- a/include/dt-bindings/clock/imx93-clock.h
++++ b/include/dt-bindings/clock/imx93-clock.h
+@@ -209,5 +209,6 @@
+ #define IMX91_CLK_ENET2_REGULAR     204
+ #define IMX91_CLK_ENET2_REGULAR_GATE		205
+ #define IMX91_CLK_ENET1_QOS_TSN_GATE		206
++#define IMX93_CLK_SPDIF_IPG		207
+ 
+ #endif
+diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h
+index 0bbbe537c5f9fb..a946e0203e6d60 100644
+--- a/include/linux/alloc_tag.h
++++ b/include/linux/alloc_tag.h
+@@ -224,9 +224,14 @@ static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {}
+ 
+ #define alloc_hooks_tag(_tag, _do_alloc)				\
+ ({									\
+-	struct alloc_tag * __maybe_unused _old = alloc_tag_save(_tag);	\
+-	typeof(_do_alloc) _res = _do_alloc;				\
+-	alloc_tag_restore(_tag, _old);					\
++	typeof(_do_alloc) _res;						\
++	if (mem_alloc_profiling_enabled()) {				\
++		struct alloc_tag * __maybe_unused _old;			\
++		_old = alloc_tag_save(_tag);				\
++		_res = _do_alloc;					\
++		alloc_tag_restore(_tag, _old);				\
++	} else								\
++		_res = _do_alloc;					\
+ 	_res;								\
+ })
+ 
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 378d3a1a22fca6..495813277597fb 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -947,6 +947,8 @@ queue_limits_start_update(struct request_queue *q)
+ 	mutex_lock(&q->limits_lock);
+ 	return q->limits;
+ }
++int queue_limits_commit_update_frozen(struct request_queue *q,
++		struct queue_limits *lim);
+ int queue_limits_commit_update(struct request_queue *q,
+ 		struct queue_limits *lim);
+ int queue_limits_set(struct request_queue *q, struct queue_limits *lim);
+@@ -1699,6 +1701,15 @@ struct io_comp_batch {
+ 	void (*complete)(struct io_comp_batch *);
+ };
+ 
++static inline bool blk_atomic_write_start_sect_aligned(sector_t sector,
++						struct queue_limits *limits)
++{
++	unsigned int alignment = max(limits->atomic_write_hw_unit_min,
++				limits->atomic_write_hw_boundary);
++
++	return IS_ALIGNED(sector, alignment >> SECTOR_SHIFT);
++}
++
+ static inline bool bdev_can_atomic_write(struct block_device *bdev)
+ {
+ 	struct request_queue *bd_queue = bdev->bd_queue;
+@@ -1707,15 +1718,9 @@ static inline bool bdev_can_atomic_write(struct block_device *bdev)
+ 	if (!limits->atomic_write_unit_min)
+ 		return false;
+ 
+-	if (bdev_is_partition(bdev)) {
+-		sector_t bd_start_sect = bdev->bd_start_sect;
+-		unsigned int alignment =
+-			max(limits->atomic_write_unit_min,
+-			    limits->atomic_write_hw_boundary);
+-
+-		if (!IS_ALIGNED(bd_start_sect, alignment >> SECTOR_SHIFT))
+-			return false;
+-	}
++	if (bdev_is_partition(bdev))
++		return blk_atomic_write_start_sect_aligned(bdev->bd_start_sect,
++							limits);
+ 
+ 	return true;
+ }
+diff --git a/include/linux/btf.h b/include/linux/btf.h
+index 4214e76c916861..2a08a2b55592ee 100644
+--- a/include/linux/btf.h
++++ b/include/linux/btf.h
+@@ -353,6 +353,11 @@ static inline bool btf_type_is_scalar(const struct btf_type *t)
+ 	return btf_type_is_int(t) || btf_type_is_enum(t);
+ }
+ 
++static inline bool btf_type_is_fwd(const struct btf_type *t)
++{
++	return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
++}
++
+ static inline bool btf_type_is_typedef(const struct btf_type *t)
+ {
+ 	return BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF;
+diff --git a/include/linux/coredump.h b/include/linux/coredump.h
+index 45e598fe34766f..77e6e195d1d687 100644
+--- a/include/linux/coredump.h
++++ b/include/linux/coredump.h
+@@ -52,8 +52,8 @@ extern void do_coredump(const kernel_siginfo_t *siginfo);
+ #define __COREDUMP_PRINTK(Level, Format, ...) \
+ 	do {	\
+ 		char comm[TASK_COMM_LEN];	\
+-	\
+-		get_task_comm(comm, current);	\
++		/* This will always be NUL terminated. */ \
++		memcpy(comm, current->comm, sizeof(comm)); \
+ 		printk_ratelimited(Level "coredump: %d(%*pE): " Format "\n",	\
+ 			task_tgid_vnr(current), (int)strlen(comm), comm, ##__VA_ARGS__);	\
+ 	} while (0)	\
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index d11e9c9a5f1592..cdc0dc13c87fed 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -218,6 +218,7 @@ struct hid_item {
+ #define HID_GD_DOWN		0x00010091
+ #define HID_GD_RIGHT		0x00010092
+ #define HID_GD_LEFT		0x00010093
++#define HID_GD_DO_NOT_DISTURB	0x0001009b
+ /* Microsoft Win8 Wireless Radio Controls CA usage codes */
+ #define HID_GD_RFKILL_BTN	0x000100c6
+ #define HID_GD_RFKILL_LED	0x000100c7
+diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
+index 05dedc45505ce1..a96db2915aabea 100644
+--- a/include/linux/ieee80211.h
++++ b/include/linux/ieee80211.h
+@@ -5055,28 +5055,24 @@ static inline u8 ieee80211_mle_common_size(const u8 *data)
+ {
+ 	const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ 	u16 control = le16_to_cpu(mle->control);
+-	u8 common = 0;
+ 
+ 	switch (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE)) {
+ 	case IEEE80211_ML_CONTROL_TYPE_BASIC:
+ 	case IEEE80211_ML_CONTROL_TYPE_PREQ:
+ 	case IEEE80211_ML_CONTROL_TYPE_TDLS:
+ 	case IEEE80211_ML_CONTROL_TYPE_RECONF:
++	case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS:
+ 		/*
+ 		 * The length is the first octet pointed by mle->variable so no
+ 		 * need to add anything
+ 		 */
+ 		break;
+-	case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS:
+-		if (control & IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR)
+-			common += ETH_ALEN;
+-		return common;
+ 	default:
+ 		WARN_ON(1);
+ 		return 0;
+ 	}
+ 
+-	return sizeof(*mle) + common + mle->variable[0];
++	return sizeof(*mle) + mle->variable[0];
+ }
+ 
+ /**
+@@ -5314,8 +5310,7 @@ static inline bool ieee80211_mle_size_ok(const u8 *data, size_t len)
+ 		check_common_len = true;
+ 		break;
+ 	case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS:
+-		if (control & IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR)
+-			common += ETH_ALEN;
++		common = ETH_ALEN + 1;
+ 		break;
+ 	default:
+ 		/* we don't know this type */
+diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
+index c3f075e8f60cb6..1c6a6c1704d8d0 100644
+--- a/include/linux/kallsyms.h
++++ b/include/linux/kallsyms.h
+@@ -57,10 +57,10 @@ static inline void *dereference_symbol_descriptor(void *ptr)
+ 
+ 	preempt_disable();
+ 	mod = __module_address((unsigned long)ptr);
+-	preempt_enable();
+ 
+ 	if (mod)
+ 		ptr = dereference_module_function_descriptor(mod, ptr);
++	preempt_enable();
+ #endif
+ 	return ptr;
+ }
+diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h
+index 9dd4bf1572553f..58a2401e4b551b 100644
+--- a/include/linux/mroute_base.h
++++ b/include/linux/mroute_base.h
+@@ -146,9 +146,9 @@ struct mr_mfc {
+ 			unsigned long last_assert;
+ 			int minvif;
+ 			int maxvif;
+-			unsigned long bytes;
+-			unsigned long pkt;
+-			unsigned long wrong_if;
++			atomic_long_t bytes;
++			atomic_long_t pkt;
++			atomic_long_t wrong_if;
+ 			unsigned long lastuse;
+ 			unsigned char ttls[MAXVIFS];
+ 			refcount_t refcount;
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index ecc686409161ea..3928e91bb5905b 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -2259,7 +2259,7 @@ struct net_device {
+ 	void 			*atalk_ptr;
+ #endif
+ #if IS_ENABLED(CONFIG_AX25)
+-	void			*ax25_ptr;
++	struct ax25_dev	__rcu	*ax25_ptr;
+ #endif
+ #if IS_ENABLED(CONFIG_CFG80211)
+ 	struct wireless_dev	*ieee80211_ptr;
+diff --git a/include/linux/nfs_common.h b/include/linux/nfs_common.h
+index 5fc02df882521e..a541c3a0288750 100644
+--- a/include/linux/nfs_common.h
++++ b/include/linux/nfs_common.h
+@@ -9,9 +9,10 @@
+ #include <uapi/linux/nfs.h>
+ 
+ /* Mapping from NFS error code to "errno" error code. */
+-#define errno_NFSERR_IO EIO
+ 
+ int nfs_stat_to_errno(enum nfs_stat status);
+ int nfs4_stat_to_errno(int stat);
+ 
++__u32 nfs_localio_errno_to_nfs4_stat(int errno);
++
+ #endif /* _LINUX_NFS_COMMON_H */
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index cb99ec8c9e96f6..f7c0a3f2f502d0 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -1287,12 +1287,18 @@ static inline void perf_sample_save_callchain(struct perf_sample_data *data,
+ }
+ 
+ static inline void perf_sample_save_raw_data(struct perf_sample_data *data,
++					     struct perf_event *event,
+ 					     struct perf_raw_record *raw)
+ {
+ 	struct perf_raw_frag *frag = &raw->frag;
+ 	u32 sum = 0;
+ 	int size;
+ 
++	if (!(event->attr.sample_type & PERF_SAMPLE_RAW))
++		return;
++	if (WARN_ON_ONCE(data->sample_flags & PERF_SAMPLE_RAW))
++		return;
++
+ 	do {
+ 		sum += frag->size;
+ 		if (perf_raw_frag_last(frag))
+diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h
+index 78c8ac4951b581..c7abce28ed2995 100644
+--- a/include/linux/pps_kernel.h
++++ b/include/linux/pps_kernel.h
+@@ -56,8 +56,7 @@ struct pps_device {
+ 
+ 	unsigned int id;			/* PPS source unique ID */
+ 	void const *lookup_cookie;		/* For pps_lookup_dev() only */
+-	struct cdev cdev;
+-	struct device *dev;
++	struct device dev;
+ 	struct fasync_struct *async_queue;	/* fasync method */
+ 	spinlock_t lock;
+ };
+diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
+index fd037c127bb071..551329220e4f34 100644
+--- a/include/linux/ptr_ring.h
++++ b/include/linux/ptr_ring.h
+@@ -615,15 +615,14 @@ static inline int ptr_ring_resize_noprof(struct ptr_ring *r, int size, gfp_t gfp
+ /*
+  * Note: producer lock is nested within consumer lock, so if you
+  * resize you must make sure all uses nest correctly.
+- * In particular if you consume ring in interrupt or BH context, you must
+- * disable interrupts/BH when doing so.
++ * In particular if you consume ring in BH context, you must
++ * disable BH when doing so.
+  */
+-static inline int ptr_ring_resize_multiple_noprof(struct ptr_ring **rings,
+-						  unsigned int nrings,
+-						  int size,
+-						  gfp_t gfp, void (*destroy)(void *))
++static inline int ptr_ring_resize_multiple_bh_noprof(struct ptr_ring **rings,
++						     unsigned int nrings,
++						     int size, gfp_t gfp,
++						     void (*destroy)(void *))
+ {
+-	unsigned long flags;
+ 	void ***queues;
+ 	int i;
+ 
+@@ -638,12 +637,12 @@ static inline int ptr_ring_resize_multiple_noprof(struct ptr_ring **rings,
+ 	}
+ 
+ 	for (i = 0; i < nrings; ++i) {
+-		spin_lock_irqsave(&(rings[i])->consumer_lock, flags);
++		spin_lock_bh(&(rings[i])->consumer_lock);
+ 		spin_lock(&(rings[i])->producer_lock);
+ 		queues[i] = __ptr_ring_swap_queue(rings[i], queues[i],
+ 						  size, gfp, destroy);
+ 		spin_unlock(&(rings[i])->producer_lock);
+-		spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags);
++		spin_unlock_bh(&(rings[i])->consumer_lock);
+ 	}
+ 
+ 	for (i = 0; i < nrings; ++i)
+@@ -662,8 +661,8 @@ static inline int ptr_ring_resize_multiple_noprof(struct ptr_ring **rings,
+ noqueues:
+ 	return -ENOMEM;
+ }
+-#define ptr_ring_resize_multiple(...) \
+-		alloc_hooks(ptr_ring_resize_multiple_noprof(__VA_ARGS__))
++#define ptr_ring_resize_multiple_bh(...) \
++		alloc_hooks(ptr_ring_resize_multiple_bh_noprof(__VA_ARGS__))
+ 
+ static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *))
+ {
+diff --git a/include/linux/pwm.h b/include/linux/pwm.h
+index 6853e29d967413..a2df509056ac7a 100644
+--- a/include/linux/pwm.h
++++ b/include/linux/pwm.h
+@@ -347,6 +347,23 @@ struct pwm_chip {
+ 	struct pwm_device pwms[] __counted_by(npwm);
+ };
+ 
++/**
++ * pwmchip_supports_waveform() - checks if the given chip supports waveform callbacks
++ * @chip: The pwm_chip to test
++ *
++ * Returns true iff the pwm chip support the waveform functions like
++ * pwm_set_waveform_might_sleep() and pwm_round_waveform_might_sleep()
++ */
++static inline bool pwmchip_supports_waveform(struct pwm_chip *chip)
++{
++	/*
++	 * only check for .write_waveform(). If that is available,
++	 * .round_waveform_tohw() and .round_waveform_fromhw() asserted to be
++	 * available, too, in pwmchip_add().
++	 */
++	return chip->ops->write_waveform != NULL;
++}
++
+ static inline struct device *pwmchip_parent(const struct pwm_chip *chip)
+ {
+ 	return chip->dev.parent;
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 64934e0830af34..949b53e0accf23 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -944,6 +944,7 @@ struct task_struct {
+ 	unsigned			sched_reset_on_fork:1;
+ 	unsigned			sched_contributes_to_load:1;
+ 	unsigned			sched_migrated:1;
++	unsigned			sched_task_hot:1;
+ 
+ 	/* Force alignment to the next boundary: */
+ 	unsigned			:0;
+diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h
+index 926496c9cc9c3b..bf178238a3083d 100644
+--- a/include/linux/skb_array.h
++++ b/include/linux/skb_array.h
+@@ -199,17 +199,18 @@ static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
+ 	return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb);
+ }
+ 
+-static inline int skb_array_resize_multiple_noprof(struct skb_array **rings,
+-						   int nrings, unsigned int size,
+-						   gfp_t gfp)
++static inline int skb_array_resize_multiple_bh_noprof(struct skb_array **rings,
++						      int nrings,
++						      unsigned int size,
++						      gfp_t gfp)
+ {
+ 	BUILD_BUG_ON(offsetof(struct skb_array, ring));
+-	return ptr_ring_resize_multiple_noprof((struct ptr_ring **)rings,
+-					       nrings, size, gfp,
+-					       __skb_array_destroy_skb);
++	return ptr_ring_resize_multiple_bh_noprof((struct ptr_ring **)rings,
++					          nrings, size, gfp,
++					          __skb_array_destroy_skb);
+ }
+-#define skb_array_resize_multiple(...)	\
+-		alloc_hooks(skb_array_resize_multiple_noprof(__VA_ARGS__))
++#define skb_array_resize_multiple_bh(...)	\
++		alloc_hooks(skb_array_resize_multiple_bh_noprof(__VA_ARGS__))
+ 
+ static inline void skb_array_cleanup(struct skb_array *a)
+ {
+diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
+index 061da9546a8131..b22e659f81ba54 100644
+--- a/include/linux/usb/tcpm.h
++++ b/include/linux/usb/tcpm.h
+@@ -163,7 +163,8 @@ struct tcpc_dev {
+ 	void (*frs_sourcing_vbus)(struct tcpc_dev *dev);
+ 	int (*enable_auto_vbus_discharge)(struct tcpc_dev *dev, bool enable);
+ 	int (*set_auto_vbus_discharge_threshold)(struct tcpc_dev *dev, enum typec_pwr_opmode mode,
+-						 bool pps_active, u32 requested_vbus_voltage);
++						 bool pps_active, u32 requested_vbus_voltage,
++						 u32 pps_apdo_min_voltage);
+ 	bool (*is_vbus_vsafe0v)(struct tcpc_dev *dev);
+ 	void (*set_partner_usb_comm_capable)(struct tcpc_dev *dev, bool enable);
+ 	void (*check_contaminant)(struct tcpc_dev *dev);
+diff --git a/include/net/ax25.h b/include/net/ax25.h
+index cb622d84cd0cc4..4ee141aae0a29d 100644
+--- a/include/net/ax25.h
++++ b/include/net/ax25.h
+@@ -231,6 +231,7 @@ typedef struct ax25_dev {
+ #endif
+ 	refcount_t		refcount;
+ 	bool device_up;
++	struct rcu_head		rcu;
+ } ax25_dev;
+ 
+ typedef struct ax25_cb {
+@@ -290,9 +291,8 @@ static inline void ax25_dev_hold(ax25_dev *ax25_dev)
+ 
+ static inline void ax25_dev_put(ax25_dev *ax25_dev)
+ {
+-	if (refcount_dec_and_test(&ax25_dev->refcount)) {
+-		kfree(ax25_dev);
+-	}
++	if (refcount_dec_and_test(&ax25_dev->refcount))
++		kfree_rcu(ax25_dev, rcu);
+ }
+ static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev)
+ {
+@@ -335,9 +335,9 @@ void ax25_digi_invert(const ax25_digi *, ax25_digi *);
+ extern spinlock_t ax25_dev_lock;
+ 
+ #if IS_ENABLED(CONFIG_AX25)
+-static inline ax25_dev *ax25_dev_ax25dev(struct net_device *dev)
++static inline ax25_dev *ax25_dev_ax25dev(const struct net_device *dev)
+ {
+-	return dev->ax25_ptr;
++	return rcu_dereference_rtnl(dev->ax25_ptr);
+ }
+ #endif
+ 
+diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
+index 74ff688568a0c6..f475757daafba9 100644
+--- a/include/net/inetpeer.h
++++ b/include/net/inetpeer.h
+@@ -96,30 +96,28 @@ static inline struct in6_addr *inetpeer_get_addr_v6(struct inetpeer_addr *iaddr)
+ 
+ /* can be called with or without local BH being disabled */
+ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
+-			       const struct inetpeer_addr *daddr,
+-			       int create);
++			       const struct inetpeer_addr *daddr);
+ 
+ static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base,
+ 						__be32 v4daddr,
+-						int vif, int create)
++						int vif)
+ {
+ 	struct inetpeer_addr daddr;
+ 
+ 	daddr.a4.addr = v4daddr;
+ 	daddr.a4.vif = vif;
+ 	daddr.family = AF_INET;
+-	return inet_getpeer(base, &daddr, create);
++	return inet_getpeer(base, &daddr);
+ }
+ 
+ static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base,
+-						const struct in6_addr *v6daddr,
+-						int create)
++						const struct in6_addr *v6daddr)
+ {
+ 	struct inetpeer_addr daddr;
+ 
+ 	daddr.a6 = *v6daddr;
+ 	daddr.family = AF_INET6;
+-	return inet_getpeer(base, &daddr, create);
++	return inet_getpeer(base, &daddr);
+ }
+ 
+ static inline int inetpeer_addr_cmp(const struct inetpeer_addr *a,
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 0027beca5cd503..f6958118986ac4 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -442,6 +442,9 @@ struct nft_set_ext;
+  *	@remove: remove element from set
+  *	@walk: iterate over all set elements
+  *	@get: get set elements
++ *	@ksize: kernel set size
++ * 	@usize: userspace set size
++ *	@adjust_maxsize: delta to adjust maximum set size
+  *	@commit: commit set elements
+  *	@abort: abort set elements
+  *	@privsize: function to return size of set private data
+@@ -495,6 +498,9 @@ struct nft_set_ops {
+ 					       const struct nft_set *set,
+ 					       const struct nft_set_elem *elem,
+ 					       unsigned int flags);
++	u32				(*ksize)(u32 size);
++	u32				(*usize)(u32 size);
++	u32				(*adjust_maxsize)(const struct nft_set *set);
+ 	void				(*commit)(struct nft_set *set);
+ 	void				(*abort)(const struct nft_set *set);
+ 	u64				(*privsize)(const struct nlattr * const nla[],
+diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
+index c022c410abe39d..386efddd2aac08 100644
+--- a/include/net/page_pool/types.h
++++ b/include/net/page_pool/types.h
+@@ -236,7 +236,6 @@ struct page_pool {
+ 	struct {
+ 		struct hlist_node list;
+ 		u64 detach_time;
+-		u32 napi_id;
+ 		u32 id;
+ 	} user;
+ };
+diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
+index cf199af85c52e2..4eb0ebb9e76c7b 100644
+--- a/include/net/pkt_cls.h
++++ b/include/net/pkt_cls.h
+@@ -75,11 +75,11 @@ static inline bool tcf_block_non_null_shared(struct tcf_block *block)
+ }
+ 
+ #ifdef CONFIG_NET_CLS_ACT
+-DECLARE_STATIC_KEY_FALSE(tcf_bypass_check_needed_key);
++DECLARE_STATIC_KEY_FALSE(tcf_sw_enabled_key);
+ 
+ static inline bool tcf_block_bypass_sw(struct tcf_block *block)
+ {
+-	return block && block->bypass_wanted;
++	return block && !atomic_read(&block->useswcnt);
+ }
+ #endif
+ 
+@@ -760,6 +760,15 @@ tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
+ 		cls_common->extack = extack;
+ }
+ 
++static inline void tcf_proto_update_usesw(struct tcf_proto *tp, u32 flags)
++{
++	if (tp->usesw)
++		return;
++	if (tc_skip_sw(flags) && tc_in_hw(flags))
++		return;
++	tp->usesw = true;
++}
++
+ #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb)
+ {
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 5d74fa7e694cc8..1e6324f0d4efda 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -425,6 +425,7 @@ struct tcf_proto {
+ 	spinlock_t		lock;
+ 	bool			deleting;
+ 	bool			counted;
++	bool			usesw;
+ 	refcount_t		refcnt;
+ 	struct rcu_head		rcu;
+ 	struct hlist_node	destroy_ht_node;
+@@ -474,9 +475,7 @@ struct tcf_block {
+ 	struct flow_block flow_block;
+ 	struct list_head owner_list;
+ 	bool keep_dst;
+-	bool bypass_wanted;
+-	atomic_t filtercnt; /* Number of filters */
+-	atomic_t skipswcnt; /* Number of skip_sw filters */
++	atomic_t useswcnt;
+ 	atomic_t offloadcnt; /* Number of oddloaded filters */
+ 	unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
+ 	unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */
+diff --git a/include/net/xfrm.h b/include/net/xfrm.h
+index 32c09e85a64ce2..2c4eda6a859664 100644
+--- a/include/net/xfrm.h
++++ b/include/net/xfrm.h
+@@ -1224,9 +1224,19 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir,
+ 
+ 	if (xo) {
+ 		x = xfrm_input_state(skb);
+-		if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
+-			return (xo->flags & CRYPTO_DONE) &&
+-			       (xo->status & CRYPTO_SUCCESS);
++		if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) {
++			bool check = (xo->flags & CRYPTO_DONE) &&
++				     (xo->status & CRYPTO_SUCCESS);
++
++			/* The packets here are plain ones and secpath was
++			 * needed to indicate that hardware already handled
++			 * them and there is no need to do nothing in addition.
++			 *
++			 * Consume secpath which was set by drivers.
++			 */
++			secpath_reset(skb);
++			return check;
++		}
+ 	}
+ 
+ 	return __xfrm_check_nopolicy(net, skb, dir) ||
+diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
+index 957295364a5e3c..4c7a40e149a594 100644
+--- a/include/sound/hdaudio_ext.h
++++ b/include/sound/hdaudio_ext.h
+@@ -2,8 +2,6 @@
+ #ifndef __SOUND_HDAUDIO_EXT_H
+ #define __SOUND_HDAUDIO_EXT_H
+ 
+-#include <linux/io-64-nonatomic-lo-hi.h>
+-#include <linux/iopoll.h>
+ #include <sound/hdaudio.h>
+ 
+ int snd_hdac_ext_bus_init(struct hdac_bus *bus, struct device *dev,
+@@ -119,49 +117,6 @@ int snd_hdac_ext_bus_link_put(struct hdac_bus *bus, struct hdac_ext_link *hlink)
+ 
+ void snd_hdac_ext_bus_link_power(struct hdac_device *codec, bool enable);
+ 
+-#define snd_hdac_adsp_writeb(chip, reg, value) \
+-	snd_hdac_reg_writeb(chip, (chip)->dsp_ba + (reg), value)
+-#define snd_hdac_adsp_readb(chip, reg) \
+-	snd_hdac_reg_readb(chip, (chip)->dsp_ba + (reg))
+-#define snd_hdac_adsp_writew(chip, reg, value) \
+-	snd_hdac_reg_writew(chip, (chip)->dsp_ba + (reg), value)
+-#define snd_hdac_adsp_readw(chip, reg) \
+-	snd_hdac_reg_readw(chip, (chip)->dsp_ba + (reg))
+-#define snd_hdac_adsp_writel(chip, reg, value) \
+-	snd_hdac_reg_writel(chip, (chip)->dsp_ba + (reg), value)
+-#define snd_hdac_adsp_readl(chip, reg) \
+-	snd_hdac_reg_readl(chip, (chip)->dsp_ba + (reg))
+-#define snd_hdac_adsp_writeq(chip, reg, value) \
+-	snd_hdac_reg_writeq(chip, (chip)->dsp_ba + (reg), value)
+-#define snd_hdac_adsp_readq(chip, reg) \
+-	snd_hdac_reg_readq(chip, (chip)->dsp_ba + (reg))
+-
+-#define snd_hdac_adsp_updateb(chip, reg, mask, val) \
+-	snd_hdac_adsp_writeb(chip, reg, \
+-			(snd_hdac_adsp_readb(chip, reg) & ~(mask)) | (val))
+-#define snd_hdac_adsp_updatew(chip, reg, mask, val) \
+-	snd_hdac_adsp_writew(chip, reg, \
+-			(snd_hdac_adsp_readw(chip, reg) & ~(mask)) | (val))
+-#define snd_hdac_adsp_updatel(chip, reg, mask, val) \
+-	snd_hdac_adsp_writel(chip, reg, \
+-			(snd_hdac_adsp_readl(chip, reg) & ~(mask)) | (val))
+-#define snd_hdac_adsp_updateq(chip, reg, mask, val) \
+-	snd_hdac_adsp_writeq(chip, reg, \
+-			(snd_hdac_adsp_readq(chip, reg) & ~(mask)) | (val))
+-
+-#define snd_hdac_adsp_readb_poll(chip, reg, val, cond, delay_us, timeout_us) \
+-	readb_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
+-			   delay_us, timeout_us)
+-#define snd_hdac_adsp_readw_poll(chip, reg, val, cond, delay_us, timeout_us) \
+-	readw_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
+-			   delay_us, timeout_us)
+-#define snd_hdac_adsp_readl_poll(chip, reg, val, cond, delay_us, timeout_us) \
+-	readl_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
+-			   delay_us, timeout_us)
+-#define snd_hdac_adsp_readq_poll(chip, reg, val, cond, delay_us, timeout_us) \
+-	readq_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
+-			   delay_us, timeout_us)
+-
+ struct hdac_ext_device;
+ 
+ /* ops common to all codec drivers */
+diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
+index a0aed1a428a183..9a75590227f262 100644
+--- a/include/trace/events/afs.h
++++ b/include/trace/events/afs.h
+@@ -118,6 +118,8 @@ enum yfs_cm_operation {
+  */
+ #define afs_call_traces \
+ 	EM(afs_call_trace_alloc,		"ALLOC") \
++	EM(afs_call_trace_async_abort,		"ASYAB") \
++	EM(afs_call_trace_async_kill,		"ASYKL") \
+ 	EM(afs_call_trace_free,			"FREE ") \
+ 	EM(afs_call_trace_get,			"GET  ") \
+ 	EM(afs_call_trace_put,			"PUT  ") \
+diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
+index d03e0bd8c028b5..27c23873c88115 100644
+--- a/include/trace/events/rxrpc.h
++++ b/include/trace/events/rxrpc.h
+@@ -117,6 +117,7 @@
+ #define rxrpc_call_poke_traces \
+ 	EM(rxrpc_call_poke_abort,		"Abort")	\
+ 	EM(rxrpc_call_poke_complete,		"Compl")	\
++	EM(rxrpc_call_poke_conn_abort,		"Conn-abort")	\
+ 	EM(rxrpc_call_poke_error,		"Error")	\
+ 	EM(rxrpc_call_poke_idle,		"Idle")		\
+ 	EM(rxrpc_call_poke_set_timeout,		"Set-timo")	\
+@@ -282,6 +283,7 @@
+ 	EM(rxrpc_call_see_activate_client,	"SEE act-clnt") \
+ 	EM(rxrpc_call_see_connect_failed,	"SEE con-fail") \
+ 	EM(rxrpc_call_see_connected,		"SEE connect ") \
++	EM(rxrpc_call_see_conn_abort,		"SEE conn-abt") \
+ 	EM(rxrpc_call_see_disconnected,		"SEE disconn ") \
+ 	EM(rxrpc_call_see_distribute_error,	"SEE dist-err") \
+ 	EM(rxrpc_call_see_input,		"SEE input   ") \
+@@ -981,6 +983,29 @@ TRACE_EVENT(rxrpc_rx_abort,
+ 		      __entry->abort_code)
+ 	    );
+ 
++TRACE_EVENT(rxrpc_rx_conn_abort,
++	    TP_PROTO(const struct rxrpc_connection *conn, const struct sk_buff *skb),
++
++	    TP_ARGS(conn, skb),
++
++	    TP_STRUCT__entry(
++		    __field(unsigned int,	conn)
++		    __field(rxrpc_serial_t,	serial)
++		    __field(u32,		abort_code)
++			     ),
++
++	    TP_fast_assign(
++		    __entry->conn = conn->debug_id;
++		    __entry->serial = rxrpc_skb(skb)->hdr.serial;
++		    __entry->abort_code = skb->priority;
++			   ),
++
++	    TP_printk("C=%08x ABORT %08x ac=%d",
++		      __entry->conn,
++		      __entry->serial,
++		      __entry->abort_code)
++	    );
++
+ TRACE_EVENT(rxrpc_rx_challenge,
+ 	    TP_PROTO(struct rxrpc_connection *conn, rxrpc_serial_t serial,
+ 		     u32 version, u32 nonce, u32 min_level),
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 4758f1ba902b94..d062c5c69211ba 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -3233,6 +3233,7 @@ static struct io_uring_reg_wait *io_get_ext_arg_reg(struct io_ring_ctx *ctx,
+ 		     end > ctx->cq_wait_size))
+ 		return ERR_PTR(-EFAULT);
+ 
++	offset = array_index_nospec(offset, ctx->cq_wait_size - size);
+ 	return ctx->cq_wait_arg + offset;
+ }
+ 
+diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
+index 333c220d322a90..800cd48001e6ef 100644
+--- a/io_uring/msg_ring.c
++++ b/io_uring/msg_ring.c
+@@ -89,8 +89,7 @@ static void io_msg_tw_complete(struct io_kiocb *req, struct io_tw_state *ts)
+ static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ 			      int res, u32 cflags, u64 user_data)
+ {
+-	req->tctx = READ_ONCE(ctx->submitter_task->io_uring);
+-	if (!req->tctx) {
++	if (!READ_ONCE(ctx->submitter_task)) {
+ 		kmem_cache_free(req_cachep, req);
+ 		return -EOWNERDEAD;
+ 	}
+@@ -98,6 +97,7 @@ static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ 	io_req_set_res(req, res, cflags);
+ 	percpu_ref_get(&ctx->refs);
+ 	req->ctx = ctx;
++	req->tctx = NULL;
+ 	req->io_task_work.func = io_msg_tw_complete;
+ 	io_req_task_work_add_remote(req, ctx, IOU_F_TWQ_LAZY_WAKE);
+ 	return 0;
+diff --git a/io_uring/register.c b/io_uring/register.c
+index 371aec87e078c8..14ece7754e4cac 100644
+--- a/io_uring/register.c
++++ b/io_uring/register.c
+@@ -553,7 +553,7 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
+ 	ctx->cqe_cached = ctx->cqe_sentinel = NULL;
+ 
+ 	WRITE_ONCE(n.rings->sq_dropped, READ_ONCE(o.rings->sq_dropped));
+-	WRITE_ONCE(n.rings->sq_flags, READ_ONCE(o.rings->sq_flags));
++	atomic_set(&n.rings->sq_flags, atomic_read(&o.rings->sq_flags));
+ 	WRITE_ONCE(n.rings->cq_flags, READ_ONCE(o.rings->cq_flags));
+ 	WRITE_ONCE(n.rings->cq_overflow, READ_ONCE(o.rings->cq_overflow));
+ 
+diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
+index ce7726a048834a..25cae9f5575be2 100644
+--- a/io_uring/uring_cmd.c
++++ b/io_uring/uring_cmd.c
+@@ -362,7 +362,7 @@ int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags)
+ 	if (!prot || !prot->ioctl)
+ 		return -EOPNOTSUPP;
+ 
+-	switch (cmd->sqe->cmd_op) {
++	switch (cmd->cmd_op) {
+ 	case SOCKET_URING_OP_SIOCINQ:
+ 		ret = prot->ioctl(sk, SIOCINQ, &arg);
+ 		if (ret)
+diff --git a/kernel/bpf/arena.c b/kernel/bpf/arena.c
+index 945a5680f6a548..8caf56a308d964 100644
+--- a/kernel/bpf/arena.c
++++ b/kernel/bpf/arena.c
+@@ -218,7 +218,7 @@ static u64 arena_map_mem_usage(const struct bpf_map *map)
+ struct vma_list {
+ 	struct vm_area_struct *vma;
+ 	struct list_head head;
+-	atomic_t mmap_count;
++	refcount_t mmap_count;
+ };
+ 
+ static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
+@@ -228,7 +228,7 @@ static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
+ 	vml = kmalloc(sizeof(*vml), GFP_KERNEL);
+ 	if (!vml)
+ 		return -ENOMEM;
+-	atomic_set(&vml->mmap_count, 1);
++	refcount_set(&vml->mmap_count, 1);
+ 	vma->vm_private_data = vml;
+ 	vml->vma = vma;
+ 	list_add(&vml->head, &arena->vma_list);
+@@ -239,7 +239,7 @@ static void arena_vm_open(struct vm_area_struct *vma)
+ {
+ 	struct vma_list *vml = vma->vm_private_data;
+ 
+-	atomic_inc(&vml->mmap_count);
++	refcount_inc(&vml->mmap_count);
+ }
+ 
+ static void arena_vm_close(struct vm_area_struct *vma)
+@@ -248,7 +248,7 @@ static void arena_vm_close(struct vm_area_struct *vma)
+ 	struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
+ 	struct vma_list *vml = vma->vm_private_data;
+ 
+-	if (!atomic_dec_and_test(&vml->mmap_count))
++	if (!refcount_dec_and_test(&vml->mmap_count))
+ 		return;
+ 	guard(mutex)(&arena->lock);
+ 	/* update link list under lock */
+diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c
+index 7e6a0af0afc168..e94820f6b0cd38 100644
+--- a/kernel/bpf/bpf_local_storage.c
++++ b/kernel/bpf/bpf_local_storage.c
+@@ -841,8 +841,12 @@ bpf_local_storage_map_alloc(union bpf_attr *attr,
+ 	smap->elem_size = offsetof(struct bpf_local_storage_elem,
+ 				   sdata.data[attr->value_size]);
+ 
+-	smap->bpf_ma = bpf_ma;
+-	if (bpf_ma) {
++	/* In PREEMPT_RT, kmalloc(GFP_ATOMIC) is still not safe in non
++	 * preemptible context. Thus, enforce all storages to use
++	 * bpf_mem_alloc when CONFIG_PREEMPT_RT is enabled.
++	 */
++	smap->bpf_ma = IS_ENABLED(CONFIG_PREEMPT_RT) ? true : bpf_ma;
++	if (smap->bpf_ma) {
+ 		err = bpf_mem_alloc_init(&smap->selem_ma, smap->elem_size, false);
+ 		if (err)
+ 			goto free_smap;
+diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
+index 606efe32485a98..040fb1cd840b65 100644
+--- a/kernel/bpf/bpf_struct_ops.c
++++ b/kernel/bpf/bpf_struct_ops.c
+@@ -310,6 +310,20 @@ void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc)
+ 	kfree(arg_info);
+ }
+ 
++static bool is_module_member(const struct btf *btf, u32 id)
++{
++	const struct btf_type *t;
++
++	t = btf_type_resolve_ptr(btf, id, NULL);
++	if (!t)
++		return false;
++
++	if (!__btf_type_is_struct(t) && !btf_type_is_fwd(t))
++		return false;
++
++	return !strcmp(btf_name_by_offset(btf, t->name_off), "module");
++}
++
+ int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
+ 			     struct btf *btf,
+ 			     struct bpf_verifier_log *log)
+@@ -389,6 +403,13 @@ int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
+ 			goto errout;
+ 		}
+ 
++		if (!st_ops_ids[IDX_MODULE_ID] && is_module_member(btf, member->type)) {
++			pr_warn("'struct module' btf id not found. Is CONFIG_MODULES enabled? bpf_struct_ops '%s' needs module support.\n",
++				st_ops->name);
++			err = -EOPNOTSUPP;
++			goto errout;
++		}
++
+ 		func_proto = btf_type_resolve_func_ptr(btf,
+ 						       member->type,
+ 						       NULL);
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index e5a5f023cedd5c..10d0975deadabe 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -498,11 +498,6 @@ bool btf_type_is_void(const struct btf_type *t)
+ 	return t == &btf_void;
+ }
+ 
+-static bool btf_type_is_fwd(const struct btf_type *t)
+-{
+-	return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
+-}
+-
+ static bool btf_type_is_datasec(const struct btf_type *t)
+ {
+ 	return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
+diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
+index 751c150f9e1cd7..46a1faf9ffd5da 100644
+--- a/kernel/bpf/helpers.c
++++ b/kernel/bpf/helpers.c
+@@ -1593,10 +1593,24 @@ void bpf_timer_cancel_and_free(void *val)
+ 	 * To avoid these issues, punt to workqueue context when we are in a
+ 	 * timer callback.
+ 	 */
+-	if (this_cpu_read(hrtimer_running))
++	if (this_cpu_read(hrtimer_running)) {
+ 		queue_work(system_unbound_wq, &t->cb.delete_work);
+-	else
++		return;
++	}
++
++	if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
++		/* If the timer is running on other CPU, also use a kworker to
++		 * wait for the completion of the timer instead of trying to
++		 * acquire a sleepable lock in hrtimer_cancel() to wait for its
++		 * completion.
++		 */
++		if (hrtimer_try_to_cancel(&t->timer) >= 0)
++			kfree_rcu(t, cb.rcu);
++		else
++			queue_work(system_unbound_wq, &t->cb.delete_work);
++	} else {
+ 		bpf_timer_delete_work(&t->cb.delete_work);
++	}
+ }
+ 
+ /* This function is called by map_delete/update_elem for individual element and
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 065f9188b44a0d..e9f698c08dc179 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -10425,9 +10425,9 @@ static struct pmu perf_tracepoint = {
+ };
+ 
+ static int perf_tp_filter_match(struct perf_event *event,
+-				struct perf_sample_data *data)
++				struct perf_raw_record *raw)
+ {
+-	void *record = data->raw->frag.data;
++	void *record = raw->frag.data;
+ 
+ 	/* only top level events have filters set */
+ 	if (event->parent)
+@@ -10439,7 +10439,7 @@ static int perf_tp_filter_match(struct perf_event *event,
+ }
+ 
+ static int perf_tp_event_match(struct perf_event *event,
+-				struct perf_sample_data *data,
++				struct perf_raw_record *raw,
+ 				struct pt_regs *regs)
+ {
+ 	if (event->hw.state & PERF_HES_STOPPED)
+@@ -10450,7 +10450,7 @@ static int perf_tp_event_match(struct perf_event *event,
+ 	if (event->attr.exclude_kernel && !user_mode(regs))
+ 		return 0;
+ 
+-	if (!perf_tp_filter_match(event, data))
++	if (!perf_tp_filter_match(event, raw))
+ 		return 0;
+ 
+ 	return 1;
+@@ -10476,6 +10476,7 @@ EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit);
+ static void __perf_tp_event_target_task(u64 count, void *record,
+ 					struct pt_regs *regs,
+ 					struct perf_sample_data *data,
++					struct perf_raw_record *raw,
+ 					struct perf_event *event)
+ {
+ 	struct trace_entry *entry = record;
+@@ -10485,13 +10486,17 @@ static void __perf_tp_event_target_task(u64 count, void *record,
+ 	/* Cannot deliver synchronous signal to other task. */
+ 	if (event->attr.sigtrap)
+ 		return;
+-	if (perf_tp_event_match(event, data, regs))
++	if (perf_tp_event_match(event, raw, regs)) {
++		perf_sample_data_init(data, 0, 0);
++		perf_sample_save_raw_data(data, event, raw);
+ 		perf_swevent_event(event, count, data, regs);
++	}
+ }
+ 
+ static void perf_tp_event_target_task(u64 count, void *record,
+ 				      struct pt_regs *regs,
+ 				      struct perf_sample_data *data,
++				      struct perf_raw_record *raw,
+ 				      struct perf_event_context *ctx)
+ {
+ 	unsigned int cpu = smp_processor_id();
+@@ -10499,15 +10504,15 @@ static void perf_tp_event_target_task(u64 count, void *record,
+ 	struct perf_event *event, *sibling;
+ 
+ 	perf_event_groups_for_cpu_pmu(event, &ctx->pinned_groups, cpu, pmu) {
+-		__perf_tp_event_target_task(count, record, regs, data, event);
++		__perf_tp_event_target_task(count, record, regs, data, raw, event);
+ 		for_each_sibling_event(sibling, event)
+-			__perf_tp_event_target_task(count, record, regs, data, sibling);
++			__perf_tp_event_target_task(count, record, regs, data, raw, sibling);
+ 	}
+ 
+ 	perf_event_groups_for_cpu_pmu(event, &ctx->flexible_groups, cpu, pmu) {
+-		__perf_tp_event_target_task(count, record, regs, data, event);
++		__perf_tp_event_target_task(count, record, regs, data, raw, event);
+ 		for_each_sibling_event(sibling, event)
+-			__perf_tp_event_target_task(count, record, regs, data, sibling);
++			__perf_tp_event_target_task(count, record, regs, data, raw, sibling);
+ 	}
+ }
+ 
+@@ -10525,15 +10530,10 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
+ 		},
+ 	};
+ 
+-	perf_sample_data_init(&data, 0, 0);
+-	perf_sample_save_raw_data(&data, &raw);
+-
+ 	perf_trace_buf_update(record, event_type);
+ 
+ 	hlist_for_each_entry_rcu(event, head, hlist_entry) {
+-		if (perf_tp_event_match(event, &data, regs)) {
+-			perf_swevent_event(event, count, &data, regs);
+-
++		if (perf_tp_event_match(event, &raw, regs)) {
+ 			/*
+ 			 * Here use the same on-stack perf_sample_data,
+ 			 * some members in data are event-specific and
+@@ -10543,7 +10543,8 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
+ 			 * because data->sample_flags is set.
+ 			 */
+ 			perf_sample_data_init(&data, 0, 0);
+-			perf_sample_save_raw_data(&data, &raw);
++			perf_sample_save_raw_data(&data, event, &raw);
++			perf_swevent_event(event, count, &data, regs);
+ 		}
+ 	}
+ 
+@@ -10560,7 +10561,7 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
+ 			goto unlock;
+ 
+ 		raw_spin_lock(&ctx->lock);
+-		perf_tp_event_target_task(count, record, regs, &data, ctx);
++		perf_tp_event_target_task(count, record, regs, &data, &raw, ctx);
+ 		raw_spin_unlock(&ctx->lock);
+ unlock:
+ 		rcu_read_unlock();
+diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
+index 5d71ef85420c51..7f1a95b4f14de8 100644
+--- a/kernel/events/uprobes.c
++++ b/kernel/events/uprobes.c
+@@ -28,6 +28,7 @@
+ #include <linux/rcupdate_trace.h>
+ #include <linux/workqueue.h>
+ #include <linux/srcu.h>
++#include <linux/oom.h>          /* check_stable_address_space */
+ 
+ #include <linux/uprobes.h>
+ 
+@@ -1260,6 +1261,9 @@ register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
+ 		 * returns NULL in find_active_uprobe_rcu().
+ 		 */
+ 		mmap_write_lock(mm);
++		if (check_stable_address_space(mm))
++			goto unlock;
++
+ 		vma = find_vma(mm, info->vaddr);
+ 		if (!vma || !valid_vma(vma, is_register) ||
+ 		    file_inode(vma->vm_file) != uprobe->inode)
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 9b301180fd4162..9da032802e347f 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -760,7 +760,8 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
+ 		mt_set_in_rcu(vmi.mas.tree);
+ 		ksm_fork(mm, oldmm);
+ 		khugepaged_fork(mm, oldmm);
+-	} else if (mpnt) {
++	} else {
++
+ 		/*
+ 		 * The entire maple tree has already been duplicated. If the
+ 		 * mmap duplication fails, mark the failure point with
+@@ -768,8 +769,18 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
+ 		 * stop releasing VMAs that have not been duplicated after this
+ 		 * point.
+ 		 */
+-		mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1);
+-		mas_store(&vmi.mas, XA_ZERO_ENTRY);
++		if (mpnt) {
++			mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1);
++			mas_store(&vmi.mas, XA_ZERO_ENTRY);
++			/* Avoid OOM iterating a broken tree */
++			set_bit(MMF_OOM_SKIP, &mm->flags);
++		}
++		/*
++		 * The mm_struct is going to exit, but the locks will be dropped
++		 * first.  Set the mm_struct as unstable is advisable as it is
++		 * not fully initialised.
++		 */
++		set_bit(MMF_UNSTABLE, &mm->flags);
+ 	}
+ out:
+ 	mmap_write_unlock(mm);
+diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
+index fe0272cd84a51a..a29df4b02a2ed9 100644
+--- a/kernel/irq/internals.h
++++ b/kernel/irq/internals.h
+@@ -441,10 +441,6 @@ static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc)
+ {
+ 	return desc->pending_mask;
+ }
+-static inline bool handle_enforce_irqctx(struct irq_data *data)
+-{
+-	return irqd_is_handle_enforce_irqctx(data);
+-}
+ bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear);
+ #else /* CONFIG_GENERIC_PENDING_IRQ */
+ static inline bool irq_can_move_pcntxt(struct irq_data *data)
+@@ -471,11 +467,12 @@ static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear)
+ {
+ 	return false;
+ }
++#endif /* !CONFIG_GENERIC_PENDING_IRQ */
++
+ static inline bool handle_enforce_irqctx(struct irq_data *data)
+ {
+-	return false;
++	return irqd_is_handle_enforce_irqctx(data);
+ }
+-#endif /* !CONFIG_GENERIC_PENDING_IRQ */
+ 
+ #if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY)
+ static inline int irq_domain_activate_irq(struct irq_data *data, bool reserve)
+diff --git a/kernel/module/main.c b/kernel/module/main.c
+index 5399c182b3cbed..c740d208b52aa3 100644
+--- a/kernel/module/main.c
++++ b/kernel/module/main.c
+@@ -2950,7 +2950,10 @@ static noinline int do_init_module(struct module *mod)
+ #endif
+ 	ret = module_enable_rodata_ro(mod, true);
+ 	if (ret)
+-		goto fail_mutex_unlock;
++		pr_warn("%s: module_enable_rodata_ro_after_init() returned %d, "
++			"ro_after_init data might still be writable\n",
++			mod->name, ret);
++
+ 	mod_tree_remove_init(mod);
+ 	module_arch_freeing_init(mod);
+ 	for_class_mod_mem_type(type, init) {
+@@ -2989,8 +2992,6 @@ static noinline int do_init_module(struct module *mod)
+ 
+ 	return 0;
+ 
+-fail_mutex_unlock:
+-	mutex_unlock(&module_mutex);
+ fail_free_freeinit:
+ 	kfree(freeinit);
+ fail:
+diff --git a/kernel/padata.c b/kernel/padata.c
+index d51bbc76b2279c..418987056340ea 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -47,6 +47,22 @@ struct padata_mt_job_state {
+ static void padata_free_pd(struct parallel_data *pd);
+ static void __init padata_mt_helper(struct work_struct *work);
+ 
++static inline void padata_get_pd(struct parallel_data *pd)
++{
++	refcount_inc(&pd->refcnt);
++}
++
++static inline void padata_put_pd_cnt(struct parallel_data *pd, int cnt)
++{
++	if (refcount_sub_and_test(cnt, &pd->refcnt))
++		padata_free_pd(pd);
++}
++
++static inline void padata_put_pd(struct parallel_data *pd)
++{
++	padata_put_pd_cnt(pd, 1);
++}
++
+ static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
+ {
+ 	int cpu, target_cpu;
+@@ -206,7 +222,7 @@ int padata_do_parallel(struct padata_shell *ps,
+ 	if ((pinst->flags & PADATA_RESET))
+ 		goto out;
+ 
+-	refcount_inc(&pd->refcnt);
++	padata_get_pd(pd);
+ 	padata->pd = pd;
+ 	padata->cb_cpu = *cb_cpu;
+ 
+@@ -336,8 +352,14 @@ static void padata_reorder(struct parallel_data *pd)
+ 	smp_mb();
+ 
+ 	reorder = per_cpu_ptr(pd->reorder_list, pd->cpu);
+-	if (!list_empty(&reorder->list) && padata_find_next(pd, false))
++	if (!list_empty(&reorder->list) && padata_find_next(pd, false)) {
++		/*
++		 * Other context(eg. the padata_serial_worker) can finish the request.
++		 * To avoid UAF issue, add pd ref here, and put pd ref after reorder_work finish.
++		 */
++		padata_get_pd(pd);
+ 		queue_work(pinst->serial_wq, &pd->reorder_work);
++	}
+ }
+ 
+ static void invoke_padata_reorder(struct work_struct *work)
+@@ -348,6 +370,8 @@ static void invoke_padata_reorder(struct work_struct *work)
+ 	pd = container_of(work, struct parallel_data, reorder_work);
+ 	padata_reorder(pd);
+ 	local_bh_enable();
++	/* Pairs with putting the reorder_work in the serial_wq */
++	padata_put_pd(pd);
+ }
+ 
+ static void padata_serial_worker(struct work_struct *serial_work)
+@@ -380,8 +404,7 @@ static void padata_serial_worker(struct work_struct *serial_work)
+ 	}
+ 	local_bh_enable();
+ 
+-	if (refcount_sub_and_test(cnt, &pd->refcnt))
+-		padata_free_pd(pd);
++	padata_put_pd_cnt(pd, cnt);
+ }
+ 
+ /**
+@@ -681,8 +704,7 @@ static int padata_replace(struct padata_instance *pinst)
+ 	synchronize_rcu();
+ 
+ 	list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
+-		if (refcount_dec_and_test(&ps->opd->refcnt))
+-			padata_free_pd(ps->opd);
++		padata_put_pd(ps->opd);
+ 
+ 	pinst->flags &= ~PADATA_RESET;
+ 
+@@ -970,7 +992,7 @@ static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
+ 
+ 	pinst = kobj2pinst(kobj);
+ 	pentry = attr2pentry(attr);
+-	if (pentry->show)
++	if (pentry->store)
+ 		ret = pentry->store(pinst, attr, buf, count);
+ 
+ 	return ret;
+@@ -1121,11 +1143,16 @@ void padata_free_shell(struct padata_shell *ps)
+ 	if (!ps)
+ 		return;
+ 
++	/*
++	 * Wait for all _do_serial calls to finish to avoid touching
++	 * freed pd's and ps's.
++	 */
++	synchronize_rcu();
++
+ 	mutex_lock(&ps->pinst->lock);
+ 	list_del(&ps->list);
+ 	pd = rcu_dereference_protected(ps->pd, 1);
+-	if (refcount_dec_and_test(&pd->refcnt))
+-		padata_free_pd(pd);
++	padata_put_pd(pd);
+ 	mutex_unlock(&ps->pinst->lock);
+ 
+ 	kfree(ps);
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 1f87aa01ba44f0..10a01af63a8079 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -608,7 +608,11 @@ int hibernation_platform_enter(void)
+ 
+ 	local_irq_disable();
+ 	system_state = SYSTEM_SUSPEND;
+-	syscore_suspend();
++
++	error = syscore_suspend();
++	if (error)
++		goto Enable_irqs;
++
+ 	if (pm_wakeup_pending()) {
+ 		error = -EAGAIN;
+ 		goto Power_up;
+@@ -620,6 +624,7 @@ int hibernation_platform_enter(void)
+ 
+  Power_up:
+ 	syscore_resume();
++ Enable_irqs:
+ 	system_state = SYSTEM_RUNNING;
+ 	local_irq_enable();
+ 
+diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h
+index c6bb47666aef67..a91bdf80296716 100644
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -338,3 +338,9 @@ bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
+ void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped);
+ void console_prepend_replay(struct printk_message *pmsg);
+ #endif
++
++#ifdef CONFIG_SMP
++bool is_printk_cpu_sync_owner(void);
++#else
++static inline bool is_printk_cpu_sync_owner(void) { return false; }
++#endif
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 80910bc3470c23..f446a06b4da8ca 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -4922,6 +4922,11 @@ void console_try_replay_all(void)
+ static atomic_t printk_cpu_sync_owner = ATOMIC_INIT(-1);
+ static atomic_t printk_cpu_sync_nested = ATOMIC_INIT(0);
+ 
++bool is_printk_cpu_sync_owner(void)
++{
++	return (atomic_read(&printk_cpu_sync_owner) == raw_smp_processor_id());
++}
++
+ /**
+  * __printk_cpu_sync_wait() - Busy wait until the printk cpu-reentrant
+  *                            spinning lock is not owned by any CPU.
+diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
+index 6f94418d53ffba..6bc40ac8847b56 100644
+--- a/kernel/printk/printk_safe.c
++++ b/kernel/printk/printk_safe.c
+@@ -61,10 +61,15 @@ bool is_printk_legacy_deferred(void)
+ 	/*
+ 	 * The per-CPU variable @printk_context can be read safely in any
+ 	 * context. CPU migration is always disabled when set.
++	 *
++	 * A context holding the printk_cpu_sync must not spin waiting for
++	 * another CPU. For legacy printing, it could be the console_lock
++	 * or the port lock.
+ 	 */
+ 	return (force_legacy_kthread() ||
+ 		this_cpu_read(printk_context) ||
+-		in_nmi());
++		in_nmi() ||
++		is_printk_cpu_sync_owner());
+ }
+ 
+ asmlinkage int vprintk(const char *fmt, va_list args)
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 3e5a6bf587f911..e0fd8069c60e64 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -6641,7 +6641,6 @@ static void __sched notrace __schedule(int sched_mode)
+ 	 * as a preemption by schedule_debug() and RCU.
+ 	 */
+ 	bool preempt = sched_mode > SM_NONE;
+-	bool block = false;
+ 	unsigned long *switch_count;
+ 	unsigned long prev_state;
+ 	struct rq_flags rf;
+@@ -6702,7 +6701,7 @@ static void __sched notrace __schedule(int sched_mode)
+ 			goto picked;
+ 		}
+ 	} else if (!preempt && prev_state) {
+-		block = try_to_block_task(rq, prev, prev_state);
++		try_to_block_task(rq, prev, prev_state);
+ 		switch_count = &prev->nvcsw;
+ 	}
+ 
+@@ -6748,7 +6747,8 @@ static void __sched notrace __schedule(int sched_mode)
+ 
+ 		migrate_disable_switch(rq, prev);
+ 		psi_account_irqtime(rq, prev, next);
+-		psi_sched_switch(prev, next, block);
++		psi_sched_switch(prev, next, !task_on_rq_queued(prev) ||
++					     prev->se.sched_delayed);
+ 
+ 		trace_sched_switch(preempt, prev, next, prev_state);
+ 
+diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+index 28c77904ea749f..e51d5ce730be15 100644
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -83,7 +83,7 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
+ 
+ 	if (unlikely(sg_policy->limits_changed)) {
+ 		sg_policy->limits_changed = false;
+-		sg_policy->need_freq_update = true;
++		sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
+ 		return true;
+ 	}
+ 
+@@ -96,7 +96,7 @@ static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
+ 				   unsigned int next_freq)
+ {
+ 	if (sg_policy->need_freq_update)
+-		sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
++		sg_policy->need_freq_update = false;
+ 	else if (sg_policy->next_freq == next_freq)
+ 		return false;
+ 
+diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
+index 19813b387ef98e..76030e54a3f596 100644
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -3219,6 +3219,74 @@ static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags)
+ 		goto retry;
+ }
+ 
++/*
++ * Return the amount of CPUs in the same LLC domain of @cpu (or zero if the LLC
++ * domain is not defined).
++ */
++static unsigned int llc_weight(s32 cpu)
++{
++	struct sched_domain *sd;
++
++	sd = rcu_dereference(per_cpu(sd_llc, cpu));
++	if (!sd)
++		return 0;
++
++	return sd->span_weight;
++}
++
++/*
++ * Return the cpumask representing the LLC domain of @cpu (or NULL if the LLC
++ * domain is not defined).
++ */
++static struct cpumask *llc_span(s32 cpu)
++{
++	struct sched_domain *sd;
++
++	sd = rcu_dereference(per_cpu(sd_llc, cpu));
++	if (!sd)
++		return 0;
++
++	return sched_domain_span(sd);
++}
++
++/*
++ * Return the amount of CPUs in the same NUMA domain of @cpu (or zero if the
++ * NUMA domain is not defined).
++ */
++static unsigned int numa_weight(s32 cpu)
++{
++	struct sched_domain *sd;
++	struct sched_group *sg;
++
++	sd = rcu_dereference(per_cpu(sd_numa, cpu));
++	if (!sd)
++		return 0;
++	sg = sd->groups;
++	if (!sg)
++		return 0;
++
++	return sg->group_weight;
++}
++
++/*
++ * Return the cpumask representing the NUMA domain of @cpu (or NULL if the NUMA
++ * domain is not defined).
++ */
++static struct cpumask *numa_span(s32 cpu)
++{
++	struct sched_domain *sd;
++	struct sched_group *sg;
++
++	sd = rcu_dereference(per_cpu(sd_numa, cpu));
++	if (!sd)
++		return NULL;
++	sg = sd->groups;
++	if (!sg)
++		return NULL;
++
++	return sched_group_span(sg);
++}
++
+ /*
+  * Return true if the LLC domains do not perfectly overlap with the NUMA
+  * domains, false otherwise.
+@@ -3250,19 +3318,10 @@ static bool llc_numa_mismatch(void)
+ 	 * overlapping, which is incorrect (as NUMA 1 has two distinct LLC
+ 	 * domains).
+ 	 */
+-	for_each_online_cpu(cpu) {
+-		const struct cpumask *numa_cpus;
+-		struct sched_domain *sd;
+-
+-		sd = rcu_dereference(per_cpu(sd_llc, cpu));
+-		if (!sd)
++	for_each_online_cpu(cpu)
++		if (llc_weight(cpu) != numa_weight(cpu))
+ 			return true;
+ 
+-		numa_cpus = cpumask_of_node(cpu_to_node(cpu));
+-		if (sd->span_weight != cpumask_weight(numa_cpus))
+-			return true;
+-	}
+-
+ 	return false;
+ }
+ 
+@@ -3280,8 +3339,7 @@ static bool llc_numa_mismatch(void)
+ static void update_selcpu_topology(void)
+ {
+ 	bool enable_llc = false, enable_numa = false;
+-	struct sched_domain *sd;
+-	const struct cpumask *cpus;
++	unsigned int nr_cpus;
+ 	s32 cpu = cpumask_first(cpu_online_mask);
+ 
+ 	/*
+@@ -3295,10 +3353,12 @@ static void update_selcpu_topology(void)
+ 	 * CPUs.
+ 	 */
+ 	rcu_read_lock();
+-	sd = rcu_dereference(per_cpu(sd_llc, cpu));
+-	if (sd) {
+-		if (sd->span_weight < num_online_cpus())
++	nr_cpus = llc_weight(cpu);
++	if (nr_cpus > 0) {
++		if (nr_cpus < num_online_cpus())
+ 			enable_llc = true;
++		pr_debug("sched_ext: LLC=%*pb weight=%u\n",
++			 cpumask_pr_args(llc_span(cpu)), llc_weight(cpu));
+ 	}
+ 
+ 	/*
+@@ -3310,9 +3370,13 @@ static void update_selcpu_topology(void)
+ 	 * enabling both NUMA and LLC optimizations is unnecessary, as checking
+ 	 * for an idle CPU in the same domain twice is redundant.
+ 	 */
+-	cpus = cpumask_of_node(cpu_to_node(cpu));
+-	if ((cpumask_weight(cpus) < num_online_cpus()) && llc_numa_mismatch())
+-		enable_numa = true;
++	nr_cpus = numa_weight(cpu);
++	if (nr_cpus > 0) {
++		if (nr_cpus < num_online_cpus() && llc_numa_mismatch())
++			enable_numa = true;
++		pr_debug("sched_ext: NUMA=%*pb weight=%u\n",
++			 cpumask_pr_args(numa_span(cpu)), numa_weight(cpu));
++	}
+ 	rcu_read_unlock();
+ 
+ 	pr_debug("sched_ext: LLC idle selection %s\n",
+@@ -3364,7 +3428,6 @@ static s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
+ 
+ 	*found = false;
+ 
+-
+ 	/*
+ 	 * This is necessary to protect llc_cpus.
+ 	 */
+@@ -3383,15 +3446,10 @@ static s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
+ 	 */
+ 	if (p->nr_cpus_allowed >= num_possible_cpus()) {
+ 		if (static_branch_maybe(CONFIG_NUMA, &scx_selcpu_topo_numa))
+-			numa_cpus = cpumask_of_node(cpu_to_node(prev_cpu));
+-
+-		if (static_branch_maybe(CONFIG_SCHED_MC, &scx_selcpu_topo_llc)) {
+-			struct sched_domain *sd;
++			numa_cpus = numa_span(prev_cpu);
+ 
+-			sd = rcu_dereference(per_cpu(sd_llc, prev_cpu));
+-			if (sd)
+-				llc_cpus = sched_domain_span(sd);
+-		}
++		if (static_branch_maybe(CONFIG_SCHED_MC, &scx_selcpu_topo_llc))
++			llc_cpus = llc_span(prev_cpu);
+ 	}
+ 
+ 	/*
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 26958431deb7a8..8800679b508d9f 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -5538,9 +5538,9 @@ static struct sched_entity *
+ pick_next_entity(struct rq *rq, struct cfs_rq *cfs_rq)
+ {
+ 	/*
+-	 * Enabling NEXT_BUDDY will affect latency but not fairness.
++	 * Picking the ->next buddy will affect latency but not fairness.
+ 	 */
+-	if (sched_feat(NEXT_BUDDY) &&
++	if (sched_feat(PICK_BUDDY) &&
+ 	    cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next)) {
+ 		/* ->next will never be delayed */
+ 		SCHED_WARN_ON(cfs_rq->next->sched_delayed);
+@@ -9303,6 +9303,8 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
+ 	int tsk_cache_hot;
+ 
+ 	lockdep_assert_rq_held(env->src_rq);
++	if (p->sched_task_hot)
++		p->sched_task_hot = 0;
+ 
+ 	/*
+ 	 * We do not migrate tasks that are:
+@@ -9375,10 +9377,8 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
+ 
+ 	if (tsk_cache_hot <= 0 ||
+ 	    env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
+-		if (tsk_cache_hot == 1) {
+-			schedstat_inc(env->sd->lb_hot_gained[env->idle]);
+-			schedstat_inc(p->stats.nr_forced_migrations);
+-		}
++		if (tsk_cache_hot == 1)
++			p->sched_task_hot = 1;
+ 		return 1;
+ 	}
+ 
+@@ -9393,6 +9393,12 @@ static void detach_task(struct task_struct *p, struct lb_env *env)
+ {
+ 	lockdep_assert_rq_held(env->src_rq);
+ 
++	if (p->sched_task_hot) {
++		p->sched_task_hot = 0;
++		schedstat_inc(env->sd->lb_hot_gained[env->idle]);
++		schedstat_inc(p->stats.nr_forced_migrations);
++	}
++
+ 	deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
+ 	set_task_cpu(p, env->dst_cpu);
+ }
+@@ -9553,6 +9559,9 @@ static int detach_tasks(struct lb_env *env)
+ 
+ 		continue;
+ next:
++		if (p->sched_task_hot)
++			schedstat_inc(p->stats.nr_failed_migrations_hot);
++
+ 		list_move(&p->se.group_node, tasks);
+ 	}
+ 
+diff --git a/kernel/sched/features.h b/kernel/sched/features.h
+index a3d331dd2d8ff4..3c12d9f93331d6 100644
+--- a/kernel/sched/features.h
++++ b/kernel/sched/features.h
+@@ -31,6 +31,15 @@ SCHED_FEAT(PREEMPT_SHORT, true)
+  */
+ SCHED_FEAT(NEXT_BUDDY, false)
+ 
++/*
++ * Allow completely ignoring cfs_rq->next; which can be set from various
++ * places:
++ *   - NEXT_BUDDY (wakeup preemption)
++ *   - yield_to_task()
++ *   - cgroup dequeue / pick
++ */
++SCHED_FEAT(PICK_BUDDY, true)
++
+ /*
+  * Consider buddies to be cache hot, decreases the likeliness of a
+  * cache buddy being migrated away, increases cache locality.
+diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
+index 8ee0add5a48a80..6ade91bce63ee3 100644
+--- a/kernel/sched/stats.h
++++ b/kernel/sched/stats.h
+@@ -138,6 +138,10 @@ static inline void psi_enqueue(struct task_struct *p, int flags)
+ 	if (flags & ENQUEUE_RESTORE)
+ 		return;
+ 
++	/* psi_sched_switch() will handle the flags */
++	if (task_on_cpu(task_rq(p), p))
++		return;
++
+ 	if (p->se.sched_delayed) {
+ 		/* CPU migration of "sleeping" task */
+ 		SCHED_WARN_ON(!(flags & ENQUEUE_MIGRATED));
+diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c
+index ff0e5ab4e37cb1..943406c4ee8650 100644
+--- a/kernel/sched/syscalls.c
++++ b/kernel/sched/syscalls.c
+@@ -1433,7 +1433,7 @@ int __sched yield_to(struct task_struct *p, bool preempt)
+ 	struct rq *rq, *p_rq;
+ 	int yielded = 0;
+ 
+-	scoped_guard (irqsave) {
++	scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
+ 		rq = this_rq();
+ 
+ again:
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index 1b8db5aee9d38c..2c2205e91fee96 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -619,7 +619,8 @@ static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
+ 
+ static __always_inline u64
+ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
+-			u64 flags, struct perf_sample_data *sd)
++			u64 flags, struct perf_raw_record *raw,
++			struct perf_sample_data *sd)
+ {
+ 	struct bpf_array *array = container_of(map, struct bpf_array, map);
+ 	unsigned int cpu = smp_processor_id();
+@@ -644,6 +645,8 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
+ 	if (unlikely(event->oncpu != cpu))
+ 		return -EOPNOTSUPP;
+ 
++	perf_sample_save_raw_data(sd, event, raw);
++
+ 	return perf_event_output(event, sd, regs);
+ }
+ 
+@@ -687,9 +690,8 @@ BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
+ 	}
+ 
+ 	perf_sample_data_init(sd, 0, 0);
+-	perf_sample_save_raw_data(sd, &raw);
+ 
+-	err = __bpf_perf_event_output(regs, map, flags, sd);
++	err = __bpf_perf_event_output(regs, map, flags, &raw, sd);
+ out:
+ 	this_cpu_dec(bpf_trace_nest_level);
+ 	preempt_enable();
+@@ -748,9 +750,8 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
+ 
+ 	perf_fetch_caller_regs(regs);
+ 	perf_sample_data_init(sd, 0, 0);
+-	perf_sample_save_raw_data(sd, &raw);
+ 
+-	ret = __bpf_perf_event_output(regs, map, flags, sd);
++	ret = __bpf_perf_event_output(regs, map, flags, &raw, sd);
+ out:
+ 	this_cpu_dec(bpf_event_output_nest_level);
+ 	preempt_enable();
+@@ -853,7 +854,7 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type, struct task_struc
+ 	if (unlikely(is_global_init(task)))
+ 		return -EPERM;
+ 
+-	if (irqs_disabled()) {
++	if (!preemptible()) {
+ 		/* Do an early check on signal validity. Otherwise,
+ 		 * the error is lost in deferred irq_work.
+ 		 */
+diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
+index 65e706e1bc199c..4e5d7af3eaa22f 100644
+--- a/lib/alloc_tag.c
++++ b/lib/alloc_tag.c
+@@ -29,6 +29,8 @@ EXPORT_SYMBOL(_shared_alloc_tag);
+ 
+ DEFINE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
+ 			mem_alloc_profiling_key);
++EXPORT_SYMBOL(mem_alloc_profiling_key);
++
+ DEFINE_STATIC_KEY_FALSE(mem_profiling_compressed);
+ 
+ struct alloc_tag_kernel_section kernel_tags = { NULL, 0 };
+diff --git a/lib/rhashtable.c b/lib/rhashtable.c
+index 6c902639728b76..0e9a1d4cf89be0 100644
+--- a/lib/rhashtable.c
++++ b/lib/rhashtable.c
+@@ -584,10 +584,6 @@ static struct bucket_table *rhashtable_insert_one(
+ 	 */
+ 	rht_assign_locked(bkt, obj);
+ 
+-	atomic_inc(&ht->nelems);
+-	if (rht_grow_above_75(ht, tbl))
+-		schedule_work(&ht->run_work);
+-
+ 	return NULL;
+ }
+ 
+@@ -615,15 +611,23 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
+ 			new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+ 			data = ERR_PTR(-EAGAIN);
+ 		} else {
++			bool inserted;
++
+ 			flags = rht_lock(tbl, bkt);
+ 			data = rhashtable_lookup_one(ht, bkt, tbl,
+ 						     hash, key, obj);
+ 			new_tbl = rhashtable_insert_one(ht, bkt, tbl,
+ 							hash, obj, data);
++			inserted = data && !new_tbl;
++			if (inserted)
++				atomic_inc(&ht->nelems);
+ 			if (PTR_ERR(new_tbl) != -EEXIST)
+ 				data = ERR_CAST(new_tbl);
+ 
+ 			rht_unlock(tbl, bkt, flags);
++
++			if (inserted && rht_grow_above_75(ht, tbl))
++				schedule_work(&ht->run_work);
+ 		}
+ 	} while (!IS_ERR_OR_NULL(new_tbl));
+ 
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 7b3503d12aaf1a..f7c1d9f4f58d93 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -1161,6 +1161,7 @@ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
+ {
+ 	struct mem_cgroup *iter;
+ 	int ret = 0;
++	int i = 0;
+ 
+ 	BUG_ON(mem_cgroup_is_root(memcg));
+ 
+@@ -1169,8 +1170,12 @@ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
+ 		struct task_struct *task;
+ 
+ 		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
+-		while (!ret && (task = css_task_iter_next(&it)))
++		while (!ret && (task = css_task_iter_next(&it))) {
++			/* Avoid potential softlockup warning */
++			if ((++i & 1023) == 0)
++				cond_resched();
+ 			ret = fn(task, arg);
++		}
+ 		css_task_iter_end(&it);
+ 		if (ret) {
+ 			mem_cgroup_iter_break(memcg, iter);
+diff --git a/mm/oom_kill.c b/mm/oom_kill.c
+index 1c485beb0b934b..044ebab2c9411b 100644
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -44,6 +44,7 @@
+ #include <linux/init.h>
+ #include <linux/mmu_notifier.h>
+ #include <linux/cred.h>
++#include <linux/nmi.h>
+ 
+ #include <asm/tlb.h>
+ #include "internal.h"
+@@ -430,10 +431,15 @@ static void dump_tasks(struct oom_control *oc)
+ 		mem_cgroup_scan_tasks(oc->memcg, dump_task, oc);
+ 	else {
+ 		struct task_struct *p;
++		int i = 0;
+ 
+ 		rcu_read_lock();
+-		for_each_process(p)
++		for_each_process(p) {
++			/* Avoid potential softlockup warning */
++			if ((++i & 1023) == 0)
++				touch_softlockup_watchdog();
+ 			dump_task(p, oc);
++		}
+ 		rcu_read_unlock();
+ 	}
+ }
+diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
+index d6f9fae06a9d81..aa6c714892ec9d 100644
+--- a/net/ax25/af_ax25.c
++++ b/net/ax25/af_ax25.c
+@@ -467,7 +467,7 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
+ 	goto out_put;
+ }
+ 
+-static void ax25_fillin_cb_from_dev(ax25_cb *ax25, ax25_dev *ax25_dev)
++static void ax25_fillin_cb_from_dev(ax25_cb *ax25, const ax25_dev *ax25_dev)
+ {
+ 	ax25->rtt     = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T1]) / 2;
+ 	ax25->t1      = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T1]);
+@@ -677,22 +677,22 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
+ 			break;
+ 		}
+ 
+-		rtnl_lock();
+-		dev = __dev_get_by_name(&init_net, devname);
++		rcu_read_lock();
++		dev = dev_get_by_name_rcu(&init_net, devname);
+ 		if (!dev) {
+-			rtnl_unlock();
++			rcu_read_unlock();
+ 			res = -ENODEV;
+ 			break;
+ 		}
+ 
+ 		ax25->ax25_dev = ax25_dev_ax25dev(dev);
+ 		if (!ax25->ax25_dev) {
+-			rtnl_unlock();
++			rcu_read_unlock();
+ 			res = -ENODEV;
+ 			break;
+ 		}
+ 		ax25_fillin_cb(ax25, ax25->ax25_dev);
+-		rtnl_unlock();
++		rcu_read_unlock();
+ 		break;
+ 
+ 	default:
+diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
+index 9efd6690b34436..3733c0254a5084 100644
+--- a/net/ax25/ax25_dev.c
++++ b/net/ax25/ax25_dev.c
+@@ -90,7 +90,7 @@ void ax25_dev_device_up(struct net_device *dev)
+ 
+ 	spin_lock_bh(&ax25_dev_lock);
+ 	list_add(&ax25_dev->list, &ax25_dev_list);
+-	dev->ax25_ptr     = ax25_dev;
++	rcu_assign_pointer(dev->ax25_ptr, ax25_dev);
+ 	spin_unlock_bh(&ax25_dev_lock);
+ 
+ 	ax25_register_dev_sysctl(ax25_dev);
+@@ -125,7 +125,7 @@ void ax25_dev_device_down(struct net_device *dev)
+ 		}
+ 	}
+ 
+-	dev->ax25_ptr = NULL;
++	RCU_INIT_POINTER(dev->ax25_ptr, NULL);
+ 	spin_unlock_bh(&ax25_dev_lock);
+ 	netdev_put(dev, &ax25_dev->dev_tracker);
+ 	ax25_dev_put(ax25_dev);
+diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
+index 36249776c021e7..215d4ccf12b913 100644
+--- a/net/ax25/ax25_ip.c
++++ b/net/ax25/ax25_ip.c
+@@ -122,6 +122,7 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
+ 	if (dev == NULL)
+ 		dev = skb->dev;
+ 
++	rcu_read_lock();
+ 	if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) {
+ 		kfree_skb(skb);
+ 		goto put;
+@@ -202,7 +203,7 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
+ 	ax25_queue_xmit(skb, dev);
+ 
+ put:
+-
++	rcu_read_unlock();
+ 	ax25_route_lock_unuse();
+ 	return NETDEV_TX_OK;
+ }
+diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c
+index 3db76d2470e954..8bca2ace98e51b 100644
+--- a/net/ax25/ax25_out.c
++++ b/net/ax25/ax25_out.c
+@@ -39,10 +39,14 @@ ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, const ax25_address *sr
+ 	 * specified.
+ 	 */
+ 	if (paclen == 0) {
+-		if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
++		rcu_read_lock();
++		ax25_dev = ax25_dev_ax25dev(dev);
++		if (!ax25_dev) {
++			rcu_read_unlock();
+ 			return NULL;
+-
++		}
+ 		paclen = ax25_dev->values[AX25_VALUES_PACLEN];
++		rcu_read_unlock();
+ 	}
+ 
+ 	/*
+@@ -53,13 +57,19 @@ ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, const ax25_address *sr
+ 		return ax25;		/* It already existed */
+ 	}
+ 
+-	if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
++	rcu_read_lock();
++	ax25_dev = ax25_dev_ax25dev(dev);
++	if (!ax25_dev) {
++		rcu_read_unlock();
+ 		return NULL;
++	}
+ 
+-	if ((ax25 = ax25_create_cb()) == NULL)
++	if ((ax25 = ax25_create_cb()) == NULL) {
++		rcu_read_unlock();
+ 		return NULL;
+-
++	}
+ 	ax25_fillin_cb(ax25, ax25_dev);
++	rcu_read_unlock();
+ 
+ 	ax25->source_addr = *src;
+ 	ax25->dest_addr   = *dest;
+@@ -358,7 +368,9 @@ void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ 	unsigned char *ptr;
+ 
++	rcu_read_lock();
+ 	skb->protocol = ax25_type_trans(skb, ax25_fwd_dev(dev));
++	rcu_read_unlock();
+ 
+ 	ptr  = skb_push(skb, 1);
+ 	*ptr = 0x00;			/* KISS */
+diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
+index b7c4d656a94b71..69de75db0c9c21 100644
+--- a/net/ax25/ax25_route.c
++++ b/net/ax25/ax25_route.c
+@@ -406,6 +406,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
+ 		ax25_route_lock_unuse();
+ 		return -EHOSTUNREACH;
+ 	}
++	rcu_read_lock();
+ 	if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) {
+ 		err = -EHOSTUNREACH;
+ 		goto put;
+@@ -442,6 +443,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
+ 	}
+ 
+ put:
++	rcu_read_unlock();
+ 	ax25_route_lock_unuse();
+ 	return err;
+ }
+diff --git a/net/core/dev.c b/net/core/dev.c
+index a9f62f5aeb8406..fbb796375aa0ef 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1279,7 +1279,9 @@ int dev_change_name(struct net_device *dev, const char *newname)
+ rollback:
+ 	ret = device_rename(&dev->dev, dev->name);
+ 	if (ret) {
++		write_seqlock_bh(&netdev_rename_lock);
+ 		memcpy(dev->name, oldname, IFNAMSIZ);
++		write_sequnlock_bh(&netdev_rename_lock);
+ 		WRITE_ONCE(dev->name_assign_type, old_assign_type);
+ 		up_write(&devnet_rename_sem);
+ 		return ret;
+@@ -2134,8 +2136,8 @@ EXPORT_SYMBOL_GPL(net_dec_egress_queue);
+ #endif
+ 
+ #ifdef CONFIG_NET_CLS_ACT
+-DEFINE_STATIC_KEY_FALSE(tcf_bypass_check_needed_key);
+-EXPORT_SYMBOL(tcf_bypass_check_needed_key);
++DEFINE_STATIC_KEY_FALSE(tcf_sw_enabled_key);
++EXPORT_SYMBOL(tcf_sw_enabled_key);
+ #endif
+ 
+ DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
+@@ -4030,10 +4032,13 @@ static int tc_run(struct tcx_entry *entry, struct sk_buff *skb,
+ 	if (!miniq)
+ 		return ret;
+ 
+-	if (static_branch_unlikely(&tcf_bypass_check_needed_key)) {
+-		if (tcf_block_bypass_sw(miniq->block))
+-			return ret;
+-	}
++	/* Global bypass */
++	if (!static_branch_likely(&tcf_sw_enabled_key))
++		return ret;
++
++	/* Block-wise bypass */
++	if (tcf_block_bypass_sw(miniq->block))
++		return ret;
+ 
+ 	tc_skb_cb(skb)->mru = 0;
+ 	tc_skb_cb(skb)->post_ct = false;
+@@ -6568,7 +6573,7 @@ void napi_resume_irqs(unsigned int napi_id)
+ static void __napi_hash_add_with_id(struct napi_struct *napi,
+ 				    unsigned int napi_id)
+ {
+-	napi->napi_id = napi_id;
++	WRITE_ONCE(napi->napi_id, napi_id);
+ 	hlist_add_head_rcu(&napi->napi_hash_node,
+ 			   &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
+ }
+@@ -9694,6 +9699,10 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack
+ 			NL_SET_ERR_MSG(extack, "Program bound to different device");
+ 			return -EINVAL;
+ 		}
++		if (bpf_prog_is_dev_bound(new_prog->aux) && mode == XDP_MODE_SKB) {
++			NL_SET_ERR_MSG(extack, "Can't attach device-bound programs in generic mode");
++			return -EINVAL;
++		}
+ 		if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) {
+ 			NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device");
+ 			return -EINVAL;
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 2fb45a86f3ddbf..d59a7ea646cadf 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -7652,7 +7652,7 @@ static const struct bpf_func_proto bpf_sock_ops_load_hdr_opt_proto = {
+ 	.gpl_only	= false,
+ 	.ret_type	= RET_INTEGER,
+ 	.arg1_type	= ARG_PTR_TO_CTX,
+-	.arg2_type	= ARG_PTR_TO_MEM,
++	.arg2_type	= ARG_PTR_TO_MEM | MEM_WRITE,
+ 	.arg3_type	= ARG_CONST_SIZE,
+ 	.arg4_type	= ARG_ANYTHING,
+ };
+diff --git a/net/core/page_pool.c b/net/core/page_pool.c
+index f89cf93f6eb45a..32570333068d87 100644
+--- a/net/core/page_pool.c
++++ b/net/core/page_pool.c
+@@ -1108,7 +1108,9 @@ void page_pool_disable_direct_recycling(struct page_pool *pool)
+ 	WARN_ON(!test_bit(NAPI_STATE_SCHED, &pool->p.napi->state));
+ 	WARN_ON(READ_ONCE(pool->p.napi->list_owner) != -1);
+ 
++	mutex_lock(&page_pools_lock);
+ 	WRITE_ONCE(pool->p.napi, NULL);
++	mutex_unlock(&page_pools_lock);
+ }
+ EXPORT_SYMBOL(page_pool_disable_direct_recycling);
+ 
+diff --git a/net/core/page_pool_priv.h b/net/core/page_pool_priv.h
+index 57439787b9c2b2..2fb06d5f6d5599 100644
+--- a/net/core/page_pool_priv.h
++++ b/net/core/page_pool_priv.h
+@@ -7,6 +7,8 @@
+ 
+ #include "netmem_priv.h"
+ 
++extern struct mutex page_pools_lock;
++
+ s32 page_pool_inflight(const struct page_pool *pool, bool strict);
+ 
+ int page_pool_list(struct page_pool *pool);
+diff --git a/net/core/page_pool_user.c b/net/core/page_pool_user.c
+index 48335766c1bfd6..6677e0c2e25650 100644
+--- a/net/core/page_pool_user.c
++++ b/net/core/page_pool_user.c
+@@ -3,6 +3,7 @@
+ #include <linux/mutex.h>
+ #include <linux/netdevice.h>
+ #include <linux/xarray.h>
++#include <net/busy_poll.h>
+ #include <net/net_debug.h>
+ #include <net/netdev_rx_queue.h>
+ #include <net/page_pool/helpers.h>
+@@ -14,10 +15,11 @@
+ #include "netdev-genl-gen.h"
+ 
+ static DEFINE_XARRAY_FLAGS(page_pools, XA_FLAGS_ALLOC1);
+-/* Protects: page_pools, netdevice->page_pools, pool->slow.netdev, pool->user.
++/* Protects: page_pools, netdevice->page_pools, pool->p.napi, pool->slow.netdev,
++ *	pool->user.
+  * Ordering: inside rtnl_lock
+  */
+-static DEFINE_MUTEX(page_pools_lock);
++DEFINE_MUTEX(page_pools_lock);
+ 
+ /* Page pools are only reachable from user space (via netlink) if they are
+  * linked to a netdev at creation time. Following page pool "visibility"
+@@ -216,6 +218,7 @@ page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool,
+ {
+ 	struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
+ 	size_t inflight, refsz;
++	unsigned int napi_id;
+ 	void *hdr;
+ 
+ 	hdr = genlmsg_iput(rsp, info);
+@@ -229,8 +232,10 @@ page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool,
+ 	    nla_put_u32(rsp, NETDEV_A_PAGE_POOL_IFINDEX,
+ 			pool->slow.netdev->ifindex))
+ 		goto err_cancel;
+-	if (pool->user.napi_id &&
+-	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_NAPI_ID, pool->user.napi_id))
++
++	napi_id = pool->p.napi ? READ_ONCE(pool->p.napi->napi_id) : 0;
++	if (napi_id >= MIN_NAPI_ID &&
++	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_NAPI_ID, napi_id))
+ 		goto err_cancel;
+ 
+ 	inflight = page_pool_inflight(pool, false);
+@@ -319,8 +324,6 @@ int page_pool_list(struct page_pool *pool)
+ 	if (pool->slow.netdev) {
+ 		hlist_add_head(&pool->user.list,
+ 			       &pool->slow.netdev->page_pools);
+-		pool->user.napi_id = pool->p.napi ? pool->p.napi->napi_id : 0;
+-
+ 		netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_ADD_NTF);
+ 	}
+ 
+diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
+index cb8d32e5c14e67..ad2741f1346af2 100644
+--- a/net/core/sysctl_net_core.c
++++ b/net/core/sysctl_net_core.c
+@@ -319,7 +319,7 @@ static int proc_do_dev_weight(const struct ctl_table *table, int write,
+ 	int ret, weight;
+ 
+ 	mutex_lock(&dev_weight_mutex);
+-	ret = proc_dointvec(table, write, buffer, lenp, ppos);
++	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ 	if (!ret && write) {
+ 		weight = READ_ONCE(weight_p);
+ 		WRITE_ONCE(net_hotdata.dev_rx_weight, weight * dev_weight_rx_bias);
+@@ -412,6 +412,7 @@ static struct ctl_table net_core_table[] = {
+ 		.maxlen		= sizeof(int),
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_do_dev_weight,
++		.extra1         = SYSCTL_ONE,
+ 	},
+ 	{
+ 		.procname	= "dev_weight_rx_bias",
+@@ -419,6 +420,7 @@ static struct ctl_table net_core_table[] = {
+ 		.maxlen		= sizeof(int),
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_do_dev_weight,
++		.extra1         = SYSCTL_ONE,
+ 	},
+ 	{
+ 		.procname	= "dev_weight_tx_bias",
+@@ -426,6 +428,7 @@ static struct ctl_table net_core_table[] = {
+ 		.maxlen		= sizeof(int),
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_do_dev_weight,
++		.extra1         = SYSCTL_ONE,
+ 	},
+ 	{
+ 		.procname	= "netdev_max_backlog",
+diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
+index 7bb94875a7ec87..34bee42e12470c 100644
+--- a/net/ethtool/ioctl.c
++++ b/net/ethtool/ioctl.c
+@@ -998,7 +998,7 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
+ 	    ethtool_get_flow_spec_ring(info.fs.ring_cookie))
+ 		return -EINVAL;
+ 
+-	if (ops->get_rxfh) {
++	if (cmd == ETHTOOL_SRXFH && ops->get_rxfh) {
+ 		struct ethtool_rxfh_param rxfh = {};
+ 
+ 		rc = ops->get_rxfh(dev, &rxfh);
+diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
+index e3f0ef6b851bb4..4d18dc29b30438 100644
+--- a/net/ethtool/netlink.c
++++ b/net/ethtool/netlink.c
+@@ -90,7 +90,7 @@ int ethnl_ops_begin(struct net_device *dev)
+ 		pm_runtime_get_sync(dev->dev.parent);
+ 
+ 	if (!netif_device_present(dev) ||
+-	    dev->reg_state == NETREG_UNREGISTERING) {
++	    dev->reg_state >= NETREG_UNREGISTERING) {
+ 		ret = -ENODEV;
+ 		goto err;
+ 	}
+diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
+index 87bb3a91598ee9..a4bacf1985558a 100644
+--- a/net/hsr/hsr_forward.c
++++ b/net/hsr/hsr_forward.c
+@@ -700,9 +700,12 @@ static int fill_frame_info(struct hsr_frame_info *frame,
+ 		frame->is_vlan = true;
+ 
+ 	if (frame->is_vlan) {
+-		if (skb->mac_len < offsetofend(struct hsr_vlan_ethhdr, vlanhdr))
++		/* Note: skb->mac_len might be wrong here. */
++		if (!pskb_may_pull(skb,
++				   skb_mac_offset(skb) +
++				   offsetofend(struct hsr_vlan_ethhdr, vlanhdr)))
+ 			return -EINVAL;
+-		vlan_hdr = (struct hsr_vlan_ethhdr *)ethhdr;
++		vlan_hdr = (struct hsr_vlan_ethhdr *)skb_mac_header(skb);
+ 		proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto;
+ 	}
+ 
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index 963a89ae9c26e8..094084b61bff8a 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -312,7 +312,6 @@ static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
+ 	struct dst_entry *dst = &rt->dst;
+ 	struct inet_peer *peer;
+ 	bool rc = true;
+-	int vif;
+ 
+ 	if (!apply_ratelimit)
+ 		return true;
+@@ -321,12 +320,12 @@ static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
+ 	if (dst->dev && (dst->dev->flags&IFF_LOOPBACK))
+ 		goto out;
+ 
+-	vif = l3mdev_master_ifindex(dst->dev);
+-	peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, vif, 1);
++	rcu_read_lock();
++	peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr,
++			       l3mdev_master_ifindex_rcu(dst->dev));
+ 	rc = inet_peer_xrlim_allow(peer,
+ 				   READ_ONCE(net->ipv4.sysctl_icmp_ratelimit));
+-	if (peer)
+-		inet_putpeer(peer);
++	rcu_read_unlock();
+ out:
+ 	if (!rc)
+ 		__ICMP_INC_STATS(net, ICMP_MIB_RATELIMITHOST);
+diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
+index 5ab56f4cb52976..e02484f4d22b8e 100644
+--- a/net/ipv4/inetpeer.c
++++ b/net/ipv4/inetpeer.c
+@@ -95,6 +95,7 @@ static struct inet_peer *lookup(const struct inetpeer_addr *daddr,
+ {
+ 	struct rb_node **pp, *parent, *next;
+ 	struct inet_peer *p;
++	u32 now;
+ 
+ 	pp = &base->rb_root.rb_node;
+ 	parent = NULL;
+@@ -108,8 +109,9 @@ static struct inet_peer *lookup(const struct inetpeer_addr *daddr,
+ 		p = rb_entry(parent, struct inet_peer, rb_node);
+ 		cmp = inetpeer_addr_cmp(daddr, &p->daddr);
+ 		if (cmp == 0) {
+-			if (!refcount_inc_not_zero(&p->refcnt))
+-				break;
++			now = jiffies;
++			if (READ_ONCE(p->dtime) != now)
++				WRITE_ONCE(p->dtime, now);
+ 			return p;
+ 		}
+ 		if (gc_stack) {
+@@ -150,9 +152,6 @@ static void inet_peer_gc(struct inet_peer_base *base,
+ 	for (i = 0; i < gc_cnt; i++) {
+ 		p = gc_stack[i];
+ 
+-		/* The READ_ONCE() pairs with the WRITE_ONCE()
+-		 * in inet_putpeer()
+-		 */
+ 		delta = (__u32)jiffies - READ_ONCE(p->dtime);
+ 
+ 		if (delta < ttl || !refcount_dec_if_one(&p->refcnt))
+@@ -168,31 +167,23 @@ static void inet_peer_gc(struct inet_peer_base *base,
+ 	}
+ }
+ 
++/* Must be called under RCU : No refcount change is done here. */
+ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
+-			       const struct inetpeer_addr *daddr,
+-			       int create)
++			       const struct inetpeer_addr *daddr)
+ {
+ 	struct inet_peer *p, *gc_stack[PEER_MAX_GC];
+ 	struct rb_node **pp, *parent;
+ 	unsigned int gc_cnt, seq;
+-	int invalidated;
+ 
+ 	/* Attempt a lockless lookup first.
+ 	 * Because of a concurrent writer, we might not find an existing entry.
+ 	 */
+-	rcu_read_lock();
+ 	seq = read_seqbegin(&base->lock);
+ 	p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp);
+-	invalidated = read_seqretry(&base->lock, seq);
+-	rcu_read_unlock();
+ 
+ 	if (p)
+ 		return p;
+ 
+-	/* If no writer did a change during our lookup, we can return early. */
+-	if (!create && !invalidated)
+-		return NULL;
+-
+ 	/* retry an exact lookup, taking the lock before.
+ 	 * At least, nodes should be hot in our cache.
+ 	 */
+@@ -201,12 +192,12 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
+ 
+ 	gc_cnt = 0;
+ 	p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp);
+-	if (!p && create) {
++	if (!p) {
+ 		p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
+ 		if (p) {
+ 			p->daddr = *daddr;
+ 			p->dtime = (__u32)jiffies;
+-			refcount_set(&p->refcnt, 2);
++			refcount_set(&p->refcnt, 1);
+ 			atomic_set(&p->rid, 0);
+ 			p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
+ 			p->rate_tokens = 0;
+@@ -231,15 +222,9 @@ EXPORT_SYMBOL_GPL(inet_getpeer);
+ 
+ void inet_putpeer(struct inet_peer *p)
+ {
+-	/* The WRITE_ONCE() pairs with itself (we run lockless)
+-	 * and the READ_ONCE() in inet_peer_gc()
+-	 */
+-	WRITE_ONCE(p->dtime, (__u32)jiffies);
+-
+ 	if (refcount_dec_and_test(&p->refcnt))
+ 		kfree_rcu(p, rcu);
+ }
+-EXPORT_SYMBOL_GPL(inet_putpeer);
+ 
+ /*
+  *	Check transmit rate limitation for given message.
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index 07036a2943c19f..7a435746a22dee 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -82,15 +82,20 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
+ static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
+ {
+ 	struct ipq *qp = container_of(q, struct ipq, q);
+-	struct net *net = q->fqdir->net;
+-
+ 	const struct frag_v4_compare_key *key = a;
++	struct net *net = q->fqdir->net;
++	struct inet_peer *p = NULL;
+ 
+ 	q->key.v4 = *key;
+ 	qp->ecn = 0;
+-	qp->peer = q->fqdir->max_dist ?
+-		inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif, 1) :
+-		NULL;
++	if (q->fqdir->max_dist) {
++		rcu_read_lock();
++		p = inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif);
++		if (p && !refcount_inc_not_zero(&p->refcnt))
++			p = NULL;
++		rcu_read_unlock();
++	}
++	qp->peer = p;
+ }
+ 
+ static void ip4_frag_free(struct inet_frag_queue *q)
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index 99d8faa508e532..21ae7594a8525a 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -831,7 +831,7 @@ static void ipmr_update_thresholds(struct mr_table *mrt, struct mr_mfc *cache,
+ 				cache->mfc_un.res.maxvif = vifi + 1;
+ 		}
+ 	}
+-	cache->mfc_un.res.lastuse = jiffies;
++	WRITE_ONCE(cache->mfc_un.res.lastuse, jiffies);
+ }
+ 
+ static int vif_add(struct net *net, struct mr_table *mrt,
+@@ -1681,9 +1681,9 @@ int ipmr_ioctl(struct sock *sk, int cmd, void *arg)
+ 		rcu_read_lock();
+ 		c = ipmr_cache_find(mrt, sr->src.s_addr, sr->grp.s_addr);
+ 		if (c) {
+-			sr->pktcnt = c->_c.mfc_un.res.pkt;
+-			sr->bytecnt = c->_c.mfc_un.res.bytes;
+-			sr->wrong_if = c->_c.mfc_un.res.wrong_if;
++			sr->pktcnt = atomic_long_read(&c->_c.mfc_un.res.pkt);
++			sr->bytecnt = atomic_long_read(&c->_c.mfc_un.res.bytes);
++			sr->wrong_if = atomic_long_read(&c->_c.mfc_un.res.wrong_if);
+ 			rcu_read_unlock();
+ 			return 0;
+ 		}
+@@ -1753,9 +1753,9 @@ int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
+ 		rcu_read_lock();
+ 		c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
+ 		if (c) {
+-			sr.pktcnt = c->_c.mfc_un.res.pkt;
+-			sr.bytecnt = c->_c.mfc_un.res.bytes;
+-			sr.wrong_if = c->_c.mfc_un.res.wrong_if;
++			sr.pktcnt = atomic_long_read(&c->_c.mfc_un.res.pkt);
++			sr.bytecnt = atomic_long_read(&c->_c.mfc_un.res.bytes);
++			sr.wrong_if = atomic_long_read(&c->_c.mfc_un.res.wrong_if);
+ 			rcu_read_unlock();
+ 
+ 			if (copy_to_user(arg, &sr, sizeof(sr)))
+@@ -1988,9 +1988,9 @@ static void ip_mr_forward(struct net *net, struct mr_table *mrt,
+ 	int vif, ct;
+ 
+ 	vif = c->_c.mfc_parent;
+-	c->_c.mfc_un.res.pkt++;
+-	c->_c.mfc_un.res.bytes += skb->len;
+-	c->_c.mfc_un.res.lastuse = jiffies;
++	atomic_long_inc(&c->_c.mfc_un.res.pkt);
++	atomic_long_add(skb->len, &c->_c.mfc_un.res.bytes);
++	WRITE_ONCE(c->_c.mfc_un.res.lastuse, jiffies);
+ 
+ 	if (c->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) {
+ 		struct mfc_cache *cache_proxy;
+@@ -2021,7 +2021,7 @@ static void ip_mr_forward(struct net *net, struct mr_table *mrt,
+ 			goto dont_forward;
+ 		}
+ 
+-		c->_c.mfc_un.res.wrong_if++;
++		atomic_long_inc(&c->_c.mfc_un.res.wrong_if);
+ 
+ 		if (true_vifi >= 0 && mrt->mroute_do_assert &&
+ 		    /* pimsm uses asserts, when switching from RPT to SPT,
+@@ -3029,9 +3029,9 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
+ 
+ 		if (it->cache != &mrt->mfc_unres_queue) {
+ 			seq_printf(seq, " %8lu %8lu %8lu",
+-				   mfc->_c.mfc_un.res.pkt,
+-				   mfc->_c.mfc_un.res.bytes,
+-				   mfc->_c.mfc_un.res.wrong_if);
++				   atomic_long_read(&mfc->_c.mfc_un.res.pkt),
++				   atomic_long_read(&mfc->_c.mfc_un.res.bytes),
++				   atomic_long_read(&mfc->_c.mfc_un.res.wrong_if));
+ 			for (n = mfc->_c.mfc_un.res.minvif;
+ 			     n < mfc->_c.mfc_un.res.maxvif; n++) {
+ 				if (VIF_EXISTS(mrt, n) &&
+diff --git a/net/ipv4/ipmr_base.c b/net/ipv4/ipmr_base.c
+index f0af12a2f70bcd..28d77d454d442e 100644
+--- a/net/ipv4/ipmr_base.c
++++ b/net/ipv4/ipmr_base.c
+@@ -263,9 +263,9 @@ int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
+ 	lastuse = READ_ONCE(c->mfc_un.res.lastuse);
+ 	lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
+ 
+-	mfcs.mfcs_packets = c->mfc_un.res.pkt;
+-	mfcs.mfcs_bytes = c->mfc_un.res.bytes;
+-	mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
++	mfcs.mfcs_packets = atomic_long_read(&c->mfc_un.res.pkt);
++	mfcs.mfcs_bytes = atomic_long_read(&c->mfc_un.res.bytes);
++	mfcs.mfcs_wrong_if = atomic_long_read(&c->mfc_un.res.wrong_if);
+ 	if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
+ 	    nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
+ 			      RTA_PAD))
+@@ -330,9 +330,6 @@ int mr_table_dump(struct mr_table *mrt, struct sk_buff *skb,
+ 	list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
+ 		if (e < s_e)
+ 			goto next_entry2;
+-		if (filter->dev &&
+-		    !mr_mfc_uses_dev(mrt, mfc, filter->dev))
+-			goto next_entry2;
+ 
+ 		err = fill(mrt, skb, NETLINK_CB(cb->skb).portid,
+ 			   cb->nlh->nlmsg_seq, mfc, RTM_NEWROUTE, flags);
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index e1564b95fab095..3a1467f2d553f3 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -870,11 +870,11 @@ void ip_rt_send_redirect(struct sk_buff *skb)
+ 	}
+ 	log_martians = IN_DEV_LOG_MARTIANS(in_dev);
+ 	vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
+-	rcu_read_unlock();
+ 
+ 	net = dev_net(rt->dst.dev);
+-	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
++	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif);
+ 	if (!peer) {
++		rcu_read_unlock();
+ 		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
+ 			  rt_nexthop(rt, ip_hdr(skb)->daddr));
+ 		return;
+@@ -893,7 +893,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
+ 	 */
+ 	if (peer->n_redirects >= ip_rt_redirect_number) {
+ 		peer->rate_last = jiffies;
+-		goto out_put_peer;
++		goto out_unlock;
+ 	}
+ 
+ 	/* Check for load limit; set rate_last to the latest sent
+@@ -914,8 +914,8 @@ void ip_rt_send_redirect(struct sk_buff *skb)
+ 					     &ip_hdr(skb)->saddr, inet_iif(skb),
+ 					     &ip_hdr(skb)->daddr, &gw);
+ 	}
+-out_put_peer:
+-	inet_putpeer(peer);
++out_unlock:
++	rcu_read_unlock();
+ }
+ 
+ static int ip_error(struct sk_buff *skb)
+@@ -975,9 +975,9 @@ static int ip_error(struct sk_buff *skb)
+ 		break;
+ 	}
+ 
++	rcu_read_lock();
+ 	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
+-			       l3mdev_master_ifindex(skb->dev), 1);
+-
++			       l3mdev_master_ifindex_rcu(skb->dev));
+ 	send = true;
+ 	if (peer) {
+ 		now = jiffies;
+@@ -989,8 +989,9 @@ static int ip_error(struct sk_buff *skb)
+ 			peer->rate_tokens -= ip_rt_error_cost;
+ 		else
+ 			send = false;
+-		inet_putpeer(peer);
+ 	}
++	rcu_read_unlock();
++
+ 	if (send)
+ 		icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
+ 
+diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
+index 5dbed91c617825..76c23675ae50ab 100644
+--- a/net/ipv4/tcp_cubic.c
++++ b/net/ipv4/tcp_cubic.c
+@@ -392,6 +392,10 @@ static void hystart_update(struct sock *sk, u32 delay)
+ 	if (after(tp->snd_una, ca->end_seq))
+ 		bictcp_hystart_reset(sk);
+ 
++	/* hystart triggers when cwnd is larger than some threshold */
++	if (tcp_snd_cwnd(tp) < hystart_low_window)
++		return;
++
+ 	if (hystart_detect & HYSTART_ACK_TRAIN) {
+ 		u32 now = bictcp_clock_us(sk);
+ 
+@@ -467,9 +471,7 @@ __bpf_kfunc static void cubictcp_acked(struct sock *sk, const struct ack_sample
+ 	if (ca->delay_min == 0 || ca->delay_min > delay)
+ 		ca->delay_min = delay;
+ 
+-	/* hystart triggers when cwnd is larger than some threshold */
+-	if (!ca->found && tcp_in_slow_start(tp) && hystart &&
+-	    tcp_snd_cwnd(tp) >= hystart_low_window)
++	if (!ca->found && tcp_in_slow_start(tp) && hystart)
+ 		hystart_update(sk, delay);
+ }
+ 
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 0e5b9a654254b3..bc95d2a5924fdc 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -265,11 +265,14 @@ static u16 tcp_select_window(struct sock *sk)
+ 	u32 cur_win, new_win;
+ 
+ 	/* Make the window 0 if we failed to queue the data because we
+-	 * are out of memory. The window is temporary, so we don't store
+-	 * it on the socket.
++	 * are out of memory.
+ 	 */
+-	if (unlikely(inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOMEM))
++	if (unlikely(inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOMEM)) {
++		tp->pred_flags = 0;
++		tp->rcv_wnd = 0;
++		tp->rcv_wup = tp->rcv_nxt;
+ 		return 0;
++	}
+ 
+ 	cur_win = tcp_receive_window(tp);
+ 	new_win = __tcp_select_window(sk);
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 86d28261851506..c472c9a57cf688 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -420,6 +420,49 @@ u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport,
+ }
+ EXPORT_SYMBOL(udp_ehashfn);
+ 
++/**
++ * udp4_lib_lookup1() - Simplified lookup using primary hash (destination port)
++ * @net:	Network namespace
++ * @saddr:	Source address, network order
++ * @sport:	Source port, network order
++ * @daddr:	Destination address, network order
++ * @hnum:	Destination port, host order
++ * @dif:	Destination interface index
++ * @sdif:	Destination bridge port index, if relevant
++ * @udptable:	Set of UDP hash tables
++ *
++ * Simplified lookup to be used as fallback if no sockets are found due to a
++ * potential race between (receive) address change, and lookup happening before
++ * the rehash operation. This function ignores SO_REUSEPORT groups while scoring
++ * result sockets, because if we have one, we don't need the fallback at all.
++ *
++ * Called under rcu_read_lock().
++ *
++ * Return: socket with highest matching score if any, NULL if none
++ */
++static struct sock *udp4_lib_lookup1(const struct net *net,
++				     __be32 saddr, __be16 sport,
++				     __be32 daddr, unsigned int hnum,
++				     int dif, int sdif,
++				     const struct udp_table *udptable)
++{
++	unsigned int slot = udp_hashfn(net, hnum, udptable->mask);
++	struct udp_hslot *hslot = &udptable->hash[slot];
++	struct sock *sk, *result = NULL;
++	int score, badness = 0;
++
++	sk_for_each_rcu(sk, &hslot->head) {
++		score = compute_score(sk, net,
++				      saddr, sport, daddr, hnum, dif, sdif);
++		if (score > badness) {
++			result = sk;
++			badness = score;
++		}
++	}
++
++	return result;
++}
++
+ /* called with rcu_read_lock() */
+ static struct sock *udp4_lib_lookup2(const struct net *net,
+ 				     __be32 saddr, __be16 sport,
+@@ -681,6 +724,19 @@ struct sock *__udp4_lib_lookup(const struct net *net, __be32 saddr,
+ 	result = udp4_lib_lookup2(net, saddr, sport,
+ 				  htonl(INADDR_ANY), hnum, dif, sdif,
+ 				  hslot2, skb);
++	if (!IS_ERR_OR_NULL(result))
++		goto done;
++
++	/* Primary hash (destination port) lookup as fallback for this race:
++	 *   1. __ip4_datagram_connect() sets sk_rcv_saddr
++	 *   2. lookup (this function): new sk_rcv_saddr, hashes not updated yet
++	 *   3. rehash operation updating _secondary and four-tuple_ hashes
++	 * The primary hash doesn't need an update after 1., so, thanks to this
++	 * further step, 1. and 3. don't need to be atomic against the lookup.
++	 */
++	result = udp4_lib_lookup1(net, saddr, sport, daddr, hnum, dif, sdif,
++				  udptable);
++
+ done:
+ 	if (IS_ERR(result))
+ 		return NULL;
+diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
+index 071b0bc1179d81..a6984a29fdb9dd 100644
+--- a/net/ipv6/icmp.c
++++ b/net/ipv6/icmp.c
+@@ -222,10 +222,10 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
+ 		if (rt->rt6i_dst.plen < 128)
+ 			tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
+ 
+-		peer = inet_getpeer_v6(net->ipv6.peers, &fl6->daddr, 1);
++		rcu_read_lock();
++		peer = inet_getpeer_v6(net->ipv6.peers, &fl6->daddr);
+ 		res = inet_peer_xrlim_allow(peer, tmo);
+-		if (peer)
+-			inet_putpeer(peer);
++		rcu_read_unlock();
+ 	}
+ 	if (!res)
+ 		__ICMP6_INC_STATS(net, ip6_dst_idev(dst),
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index f7b4608bb316ed..5a364b35211533 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -613,15 +613,15 @@ int ip6_forward(struct sk_buff *skb)
+ 		else
+ 			target = &hdr->daddr;
+ 
+-		peer = inet_getpeer_v6(net->ipv6.peers, &hdr->daddr, 1);
++		rcu_read_lock();
++		peer = inet_getpeer_v6(net->ipv6.peers, &hdr->daddr);
+ 
+ 		/* Limit redirects both by destination (here)
+ 		   and by source (inside ndisc_send_redirect)
+ 		 */
+ 		if (inet_peer_xrlim_allow(peer, 1*HZ))
+ 			ndisc_send_redirect(skb, target);
+-		if (peer)
+-			inet_putpeer(peer);
++		rcu_read_unlock();
+ 	} else {
+ 		int addrtype = ipv6_addr_type(&hdr->saddr);
+ 
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index 578ff1336afeff..535e9f72514c06 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -520,9 +520,9 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
+ 
+ 		if (it->cache != &mrt->mfc_unres_queue) {
+ 			seq_printf(seq, " %8lu %8lu %8lu",
+-				   mfc->_c.mfc_un.res.pkt,
+-				   mfc->_c.mfc_un.res.bytes,
+-				   mfc->_c.mfc_un.res.wrong_if);
++				   atomic_long_read(&mfc->_c.mfc_un.res.pkt),
++				   atomic_long_read(&mfc->_c.mfc_un.res.bytes),
++				   atomic_long_read(&mfc->_c.mfc_un.res.wrong_if));
+ 			for (n = mfc->_c.mfc_un.res.minvif;
+ 			     n < mfc->_c.mfc_un.res.maxvif; n++) {
+ 				if (VIF_EXISTS(mrt, n) &&
+@@ -884,7 +884,7 @@ static void ip6mr_update_thresholds(struct mr_table *mrt,
+ 				cache->mfc_un.res.maxvif = vifi + 1;
+ 		}
+ 	}
+-	cache->mfc_un.res.lastuse = jiffies;
++	WRITE_ONCE(cache->mfc_un.res.lastuse, jiffies);
+ }
+ 
+ static int mif6_add(struct net *net, struct mr_table *mrt,
+@@ -1945,9 +1945,9 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void *arg)
+ 		c = ip6mr_cache_find(mrt, &sr->src.sin6_addr,
+ 				     &sr->grp.sin6_addr);
+ 		if (c) {
+-			sr->pktcnt = c->_c.mfc_un.res.pkt;
+-			sr->bytecnt = c->_c.mfc_un.res.bytes;
+-			sr->wrong_if = c->_c.mfc_un.res.wrong_if;
++			sr->pktcnt = atomic_long_read(&c->_c.mfc_un.res.pkt);
++			sr->bytecnt = atomic_long_read(&c->_c.mfc_un.res.bytes);
++			sr->wrong_if = atomic_long_read(&c->_c.mfc_un.res.wrong_if);
+ 			rcu_read_unlock();
+ 			return 0;
+ 		}
+@@ -2017,9 +2017,9 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
+ 		rcu_read_lock();
+ 		c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
+ 		if (c) {
+-			sr.pktcnt = c->_c.mfc_un.res.pkt;
+-			sr.bytecnt = c->_c.mfc_un.res.bytes;
+-			sr.wrong_if = c->_c.mfc_un.res.wrong_if;
++			sr.pktcnt = atomic_long_read(&c->_c.mfc_un.res.pkt);
++			sr.bytecnt = atomic_long_read(&c->_c.mfc_un.res.bytes);
++			sr.wrong_if = atomic_long_read(&c->_c.mfc_un.res.wrong_if);
+ 			rcu_read_unlock();
+ 
+ 			if (copy_to_user(arg, &sr, sizeof(sr)))
+@@ -2142,9 +2142,9 @@ static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
+ 	int true_vifi = ip6mr_find_vif(mrt, dev);
+ 
+ 	vif = c->_c.mfc_parent;
+-	c->_c.mfc_un.res.pkt++;
+-	c->_c.mfc_un.res.bytes += skb->len;
+-	c->_c.mfc_un.res.lastuse = jiffies;
++	atomic_long_inc(&c->_c.mfc_un.res.pkt);
++	atomic_long_add(skb->len, &c->_c.mfc_un.res.bytes);
++	WRITE_ONCE(c->_c.mfc_un.res.lastuse, jiffies);
+ 
+ 	if (ipv6_addr_any(&c->mf6c_origin) && true_vifi >= 0) {
+ 		struct mfc6_cache *cache_proxy;
+@@ -2162,7 +2162,7 @@ static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
+ 	 * Wrong interface: drop packet and (maybe) send PIM assert.
+ 	 */
+ 	if (rcu_access_pointer(mrt->vif_table[vif].dev) != dev) {
+-		c->_c.mfc_un.res.wrong_if++;
++		atomic_long_inc(&c->_c.mfc_un.res.wrong_if);
+ 
+ 		if (true_vifi >= 0 && mrt->mroute_do_assert &&
+ 		    /* pimsm uses asserts, when switching from RPT to SPT,
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index aba94a34867379..d044c67019de6d 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -1731,10 +1731,12 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
+ 			  "Redirect: destination is not a neighbour\n");
+ 		goto release;
+ 	}
+-	peer = inet_getpeer_v6(net->ipv6.peers, &ipv6_hdr(skb)->saddr, 1);
++
++	rcu_read_lock();
++	peer = inet_getpeer_v6(net->ipv6.peers, &ipv6_hdr(skb)->saddr);
+ 	ret = inet_peer_xrlim_allow(peer, 1*HZ);
+-	if (peer)
+-		inet_putpeer(peer);
++	rcu_read_unlock();
++
+ 	if (!ret)
+ 		goto release;
+ 
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index d766fd798ecf99..b974116152dd3f 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -170,6 +170,49 @@ static int compute_score(struct sock *sk, const struct net *net,
+ 	return score;
+ }
+ 
++/**
++ * udp6_lib_lookup1() - Simplified lookup using primary hash (destination port)
++ * @net:	Network namespace
++ * @saddr:	Source address, network order
++ * @sport:	Source port, network order
++ * @daddr:	Destination address, network order
++ * @hnum:	Destination port, host order
++ * @dif:	Destination interface index
++ * @sdif:	Destination bridge port index, if relevant
++ * @udptable:	Set of UDP hash tables
++ *
++ * Simplified lookup to be used as fallback if no sockets are found due to a
++ * potential race between (receive) address change, and lookup happening before
++ * the rehash operation. This function ignores SO_REUSEPORT groups while scoring
++ * result sockets, because if we have one, we don't need the fallback at all.
++ *
++ * Called under rcu_read_lock().
++ *
++ * Return: socket with highest matching score if any, NULL if none
++ */
++static struct sock *udp6_lib_lookup1(const struct net *net,
++				     const struct in6_addr *saddr, __be16 sport,
++				     const struct in6_addr *daddr,
++				     unsigned int hnum, int dif, int sdif,
++				     const struct udp_table *udptable)
++{
++	unsigned int slot = udp_hashfn(net, hnum, udptable->mask);
++	struct udp_hslot *hslot = &udptable->hash[slot];
++	struct sock *sk, *result = NULL;
++	int score, badness = 0;
++
++	sk_for_each_rcu(sk, &hslot->head) {
++		score = compute_score(sk, net,
++				      saddr, sport, daddr, hnum, dif, sdif);
++		if (score > badness) {
++			result = sk;
++			badness = score;
++		}
++	}
++
++	return result;
++}
++
+ /* called with rcu_read_lock() */
+ static struct sock *udp6_lib_lookup2(const struct net *net,
+ 		const struct in6_addr *saddr, __be16 sport,
+@@ -347,6 +390,13 @@ struct sock *__udp6_lib_lookup(const struct net *net,
+ 	result = udp6_lib_lookup2(net, saddr, sport,
+ 				  &in6addr_any, hnum, dif, sdif,
+ 				  hslot2, skb);
++	if (!IS_ERR_OR_NULL(result))
++		goto done;
++
++	/* Cover address change/lookup/rehash race: see __udp4_lib_lookup() */
++	result = udp6_lib_lookup1(net, saddr, sport, daddr, hnum, dif, sdif,
++				  udptable);
++
+ done:
+ 	if (IS_ERR(result))
+ 		return NULL;
+diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
+index a9bc2fd59f55ad..e7687a7b16835a 100644
+--- a/net/mac80211/debugfs_netdev.c
++++ b/net/mac80211/debugfs_netdev.c
+@@ -727,7 +727,7 @@ static ssize_t ieee80211_if_parse_active_links(struct ieee80211_sub_if_data *sda
+ {
+ 	u16 active_links;
+ 
+-	if (kstrtou16(buf, 0, &active_links))
++	if (kstrtou16(buf, 0, &active_links) || !active_links)
+ 		return -EINVAL;
+ 
+ 	return ieee80211_set_active_links(&sdata->vif, active_links) ?: buflen;
+diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
+index edd1e4d4ad9d2a..ca04f2ff9f44e9 100644
+--- a/net/mac80211/driver-ops.h
++++ b/net/mac80211/driver-ops.h
+@@ -724,6 +724,9 @@ static inline void drv_flush_sta(struct ieee80211_local *local,
+ 	if (sdata && !check_sdata_in_driver(sdata))
+ 		return;
+ 
++	if (!sta->uploaded)
++		return;
++
+ 	trace_drv_flush_sta(local, sdata, &sta->sta);
+ 	if (local->ops->flush_sta)
+ 		local->ops->flush_sta(&local->hw, &sdata->vif, &sta->sta);
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 2bec18fc1b035b..c4a28ccbd0647f 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -3001,6 +3001,7 @@ ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta
+ 	}
+ 
+ 	IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
++	ieee80211_set_qos_hdr(sdata, fwd_skb);
+ 	ieee80211_add_pending_skb(local, fwd_skb);
+ 
+ rx_accept:
+diff --git a/net/mptcp/ctrl.c b/net/mptcp/ctrl.c
+index b0dd008e2114bc..dd595d9b5e50c7 100644
+--- a/net/mptcp/ctrl.c
++++ b/net/mptcp/ctrl.c
+@@ -405,9 +405,9 @@ void mptcp_active_detect_blackhole(struct sock *ssk, bool expired)
+ 			MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPCAPABLEACTIVEDROP);
+ 			subflow->mpc_drop = 1;
+ 			mptcp_subflow_early_fallback(mptcp_sk(subflow->conn), subflow);
+-		} else {
+-			subflow->mpc_drop = 0;
+ 		}
++	} else if (ssk->sk_state == TCP_SYN_SENT) {
++		subflow->mpc_drop = 0;
+ 	}
+ }
+ 
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index 123f3f2972841a..fd2de185bc939f 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -108,7 +108,6 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 			mp_opt->suboptions |= OPTION_MPTCP_DSS;
+ 			mp_opt->use_map = 1;
+ 			mp_opt->mpc_map = 1;
+-			mp_opt->use_ack = 0;
+ 			mp_opt->data_len = get_unaligned_be16(ptr);
+ 			ptr += 2;
+ 		}
+@@ -157,11 +156,6 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 		pr_debug("DSS\n");
+ 		ptr++;
+ 
+-		/* we must clear 'mpc_map' be able to detect MP_CAPABLE
+-		 * map vs DSS map in mptcp_incoming_options(), and reconstruct
+-		 * map info accordingly
+-		 */
+-		mp_opt->mpc_map = 0;
+ 		flags = (*ptr++) & MPTCP_DSS_FLAG_MASK;
+ 		mp_opt->data_fin = (flags & MPTCP_DSS_DATA_FIN) != 0;
+ 		mp_opt->dsn64 = (flags & MPTCP_DSS_DSN64) != 0;
+@@ -369,8 +363,11 @@ void mptcp_get_options(const struct sk_buff *skb,
+ 	const unsigned char *ptr;
+ 	int length;
+ 
+-	/* initialize option status */
+-	mp_opt->suboptions = 0;
++	/* Ensure that casting the whole status to u32 is efficient and safe */
++	BUILD_BUG_ON(sizeof_field(struct mptcp_options_received, status) != sizeof(u32));
++	BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct mptcp_options_received, status),
++				 sizeof(u32)));
++	*(u32 *)&mp_opt->status = 0;
+ 
+ 	length = (th->doff * 4) - sizeof(struct tcphdr);
+ 	ptr = (const unsigned char *)(th + 1);
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 7a0f7998376a5b..cc7db93e745c7c 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -2050,7 +2050,8 @@ int mptcp_pm_nl_set_flags(struct sk_buff *skb, struct genl_info *info)
+ 		return -EINVAL;
+ 	}
+ 	if ((addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) &&
+-	    (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) {
++	    (entry->flags & (MPTCP_PM_ADDR_FLAG_SIGNAL |
++			     MPTCP_PM_ADDR_FLAG_IMPLICIT))) {
+ 		spin_unlock_bh(&pernet->lock);
+ 		GENL_SET_ERR_MSG(info, "invalid addr flags");
+ 		return -EINVAL;
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 1b2e7cbb577fc2..f910e840aa8f1e 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -1767,8 +1767,10 @@ static int mptcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
+ 		 * see mptcp_disconnect().
+ 		 * Attempt it again outside the problematic scope.
+ 		 */
+-		if (!mptcp_disconnect(sk, 0))
++		if (!mptcp_disconnect(sk, 0)) {
++			sk->sk_disconnects++;
+ 			sk->sk_socket->state = SS_UNCONNECTED;
++		}
+ 	}
+ 	inet_clear_bit(DEFER_CONNECT, sk);
+ 
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 73526f1d768fcb..b70a303e082878 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -149,22 +149,24 @@ struct mptcp_options_received {
+ 	u32	subflow_seq;
+ 	u16	data_len;
+ 	__sum16	csum;
+-	u16	suboptions;
++	struct_group(status,
++		u16 suboptions;
++		u16 use_map:1,
++		    dsn64:1,
++		    data_fin:1,
++		    use_ack:1,
++		    ack64:1,
++		    mpc_map:1,
++		    reset_reason:4,
++		    reset_transient:1,
++		    echo:1,
++		    backup:1,
++		    deny_join_id0:1,
++		    __unused:2;
++	);
++	u8	join_id;
+ 	u32	token;
+ 	u32	nonce;
+-	u16	use_map:1,
+-		dsn64:1,
+-		data_fin:1,
+-		use_ack:1,
+-		ack64:1,
+-		mpc_map:1,
+-		reset_reason:4,
+-		reset_transient:1,
+-		echo:1,
+-		backup:1,
+-		deny_join_id0:1,
+-		__unused:2;
+-	u8	join_id;
+ 	u64	thmac;
+ 	u8	hmac[MPTCPOPT_HMAC_LEN];
+ 	struct mptcp_addr_info addr;
+diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
+index 14bd66909ca455..4a8ce2949faeac 100644
+--- a/net/ncsi/ncsi-rsp.c
++++ b/net/ncsi/ncsi-rsp.c
+@@ -1089,14 +1089,12 @@ static int ncsi_rsp_handler_netlink(struct ncsi_request *nr)
+ static int ncsi_rsp_handler_gmcma(struct ncsi_request *nr)
+ {
+ 	struct ncsi_dev_priv *ndp = nr->ndp;
++	struct sockaddr *saddr = &ndp->pending_mac;
+ 	struct net_device *ndev = ndp->ndev.dev;
+ 	struct ncsi_rsp_gmcma_pkt *rsp;
+-	struct sockaddr saddr;
+-	int ret = -1;
+ 	int i;
+ 
+ 	rsp = (struct ncsi_rsp_gmcma_pkt *)skb_network_header(nr->rsp);
+-	saddr.sa_family = ndev->type;
+ 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ 
+ 	netdev_info(ndev, "NCSI: Received %d provisioned MAC addresses\n",
+@@ -1108,20 +1106,20 @@ static int ncsi_rsp_handler_gmcma(struct ncsi_request *nr)
+ 			    rsp->addresses[i][4], rsp->addresses[i][5]);
+ 	}
+ 
++	saddr->sa_family = ndev->type;
+ 	for (i = 0; i < rsp->address_count; i++) {
+-		memcpy(saddr.sa_data, &rsp->addresses[i], ETH_ALEN);
+-		ret = ndev->netdev_ops->ndo_set_mac_address(ndev, &saddr);
+-		if (ret < 0) {
++		if (!is_valid_ether_addr(rsp->addresses[i])) {
+ 			netdev_warn(ndev, "NCSI: Unable to assign %pM to device\n",
+-				    saddr.sa_data);
++				    rsp->addresses[i]);
+ 			continue;
+ 		}
+-		netdev_warn(ndev, "NCSI: Set MAC address to %pM\n", saddr.sa_data);
++		memcpy(saddr->sa_data, rsp->addresses[i], ETH_ALEN);
++		netdev_warn(ndev, "NCSI: Will set MAC address to %pM\n", saddr->sa_data);
+ 		break;
+ 	}
+ 
+-	ndp->gma_flag = ret == 0;
+-	return ret;
++	ndp->gma_flag = 1;
++	return 0;
+ }
+ 
+ static struct ncsi_rsp_handler {
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index c4af283356e741..73e37861ff11fc 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -4753,6 +4753,14 @@ static int nf_tables_fill_set_concat(struct sk_buff *skb,
+ 	return 0;
+ }
+ 
++static u32 nft_set_userspace_size(const struct nft_set_ops *ops, u32 size)
++{
++	if (ops->usize)
++		return ops->usize(size);
++
++	return size;
++}
++
+ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
+ 			      const struct nft_set *set, u16 event, u16 flags)
+ {
+@@ -4823,7 +4831,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
+ 	if (!nest)
+ 		goto nla_put_failure;
+ 	if (set->size &&
+-	    nla_put_be32(skb, NFTA_SET_DESC_SIZE, htonl(set->size)))
++	    nla_put_be32(skb, NFTA_SET_DESC_SIZE,
++			 htonl(nft_set_userspace_size(set->ops, set->size))))
+ 		goto nla_put_failure;
+ 
+ 	if (set->field_count > 1 &&
+@@ -5065,7 +5074,7 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr,
+ static int nft_set_desc_concat(struct nft_set_desc *desc,
+ 			       const struct nlattr *nla)
+ {
+-	u32 num_regs = 0, key_num_regs = 0;
++	u32 len = 0, num_regs;
+ 	struct nlattr *attr;
+ 	int rem, err, i;
+ 
+@@ -5079,12 +5088,12 @@ static int nft_set_desc_concat(struct nft_set_desc *desc,
+ 	}
+ 
+ 	for (i = 0; i < desc->field_count; i++)
+-		num_regs += DIV_ROUND_UP(desc->field_len[i], sizeof(u32));
++		len += round_up(desc->field_len[i], sizeof(u32));
+ 
+-	key_num_regs = DIV_ROUND_UP(desc->klen, sizeof(u32));
+-	if (key_num_regs != num_regs)
++	if (len != desc->klen)
+ 		return -EINVAL;
+ 
++	num_regs = DIV_ROUND_UP(desc->klen, sizeof(u32));
+ 	if (num_regs > NFT_REG32_COUNT)
+ 		return -E2BIG;
+ 
+@@ -5191,6 +5200,15 @@ static bool nft_set_is_same(const struct nft_set *set,
+ 	return true;
+ }
+ 
++static u32 nft_set_kernel_size(const struct nft_set_ops *ops,
++			       const struct nft_set_desc *desc)
++{
++	if (ops->ksize)
++		return ops->ksize(desc->size);
++
++	return desc->size;
++}
++
+ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+ 			    const struct nlattr * const nla[])
+ {
+@@ -5373,6 +5391,9 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+ 		if (err < 0)
+ 			return err;
+ 
++		if (desc.size)
++			desc.size = nft_set_kernel_size(set->ops, &desc);
++
+ 		err = 0;
+ 		if (!nft_set_is_same(set, &desc, exprs, num_exprs, flags)) {
+ 			NL_SET_BAD_ATTR(extack, nla[NFTA_SET_NAME]);
+@@ -5395,6 +5416,9 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
+ 	if (IS_ERR(ops))
+ 		return PTR_ERR(ops);
+ 
++	if (desc.size)
++		desc.size = nft_set_kernel_size(ops, &desc);
++
+ 	udlen = 0;
+ 	if (nla[NFTA_SET_USERDATA])
+ 		udlen = nla_len(nla[NFTA_SET_USERDATA]);
+@@ -7051,6 +7075,27 @@ static bool nft_setelem_valid_key_end(const struct nft_set *set,
+ 	return true;
+ }
+ 
++static u32 nft_set_maxsize(const struct nft_set *set)
++{
++	u32 maxsize, delta;
++
++	if (!set->size)
++		return UINT_MAX;
++
++	if (set->ops->adjust_maxsize)
++		delta = set->ops->adjust_maxsize(set);
++	else
++		delta = 0;
++
++	if (check_add_overflow(set->size, set->ndeact, &maxsize))
++		return UINT_MAX;
++
++	if (check_add_overflow(maxsize, delta, &maxsize))
++		return UINT_MAX;
++
++	return maxsize;
++}
++
+ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ 			    const struct nlattr *attr, u32 nlmsg_flags)
+ {
+@@ -7423,7 +7468,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ 	}
+ 
+ 	if (!(flags & NFT_SET_ELEM_CATCHALL)) {
+-		unsigned int max = set->size ? set->size + set->ndeact : UINT_MAX;
++		unsigned int max = nft_set_maxsize(set);
+ 
+ 		if (!atomic_add_unless(&set->nelems, 1, max)) {
+ 			err = -ENFILE;
+diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
+index 3b474d23566388..221d5022301810 100644
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -289,6 +289,15 @@ static bool nft_flow_offload_skip(struct sk_buff *skb, int family)
+ 	return false;
+ }
+ 
++static void flow_offload_ct_tcp(struct nf_conn *ct)
++{
++	/* conntrack will not see all packets, disable tcp window validation. */
++	spin_lock_bh(&ct->lock);
++	ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
++	ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
++	spin_unlock_bh(&ct->lock);
++}
++
+ static void nft_flow_offload_eval(const struct nft_expr *expr,
+ 				  struct nft_regs *regs,
+ 				  const struct nft_pktinfo *pkt)
+@@ -356,11 +365,8 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
+ 		goto err_flow_alloc;
+ 
+ 	flow_offload_route_init(flow, &route);
+-
+-	if (tcph) {
+-		ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
+-		ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
+-	}
++	if (tcph)
++		flow_offload_ct_tcp(ct);
+ 
+ 	__set_bit(NF_FLOW_HW_BIDIRECTIONAL, &flow->flags);
+ 	ret = flow_offload_add(flowtable, flow);
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index b7ea21327549b3..2e8ef16ff191d4 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -750,6 +750,46 @@ static void nft_rbtree_gc_init(const struct nft_set *set)
+ 	priv->last_gc = jiffies;
+ }
+ 
++/* rbtree stores ranges as singleton elements, each range is composed of two
++ * elements ...
++ */
++static u32 nft_rbtree_ksize(u32 size)
++{
++	return size * 2;
++}
++
++/* ... hide this detail to userspace. */
++static u32 nft_rbtree_usize(u32 size)
++{
++	if (!size)
++		return 0;
++
++	return size / 2;
++}
++
++static u32 nft_rbtree_adjust_maxsize(const struct nft_set *set)
++{
++	struct nft_rbtree *priv = nft_set_priv(set);
++	struct nft_rbtree_elem *rbe;
++	struct rb_node *node;
++	const void *key;
++
++	node = rb_last(&priv->root);
++	if (!node)
++		return 0;
++
++	rbe = rb_entry(node, struct nft_rbtree_elem, node);
++	if (!nft_rbtree_interval_end(rbe))
++		return 0;
++
++	key = nft_set_ext_key(&rbe->ext);
++	if (memchr(key, 1, set->klen))
++		return 0;
++
++	/* this is the all-zero no-match element. */
++	return 1;
++}
++
+ const struct nft_set_type nft_set_rbtree_type = {
+ 	.features	= NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
+ 	.ops		= {
+@@ -768,5 +808,8 @@ const struct nft_set_type nft_set_rbtree_type = {
+ 		.lookup		= nft_rbtree_lookup,
+ 		.walk		= nft_rbtree_walk,
+ 		.get		= nft_rbtree_get,
++		.ksize		= nft_rbtree_ksize,
++		.usize		= nft_rbtree_usize,
++		.adjust_maxsize = nft_rbtree_adjust_maxsize,
+ 	},
+ };
+diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
+index 59050caab65c8b..72c65d938a150e 100644
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -397,15 +397,15 @@ static int rose_setsockopt(struct socket *sock, int level, int optname,
+ {
+ 	struct sock *sk = sock->sk;
+ 	struct rose_sock *rose = rose_sk(sk);
+-	int opt;
++	unsigned int opt;
+ 
+ 	if (level != SOL_ROSE)
+ 		return -ENOPROTOOPT;
+ 
+-	if (optlen < sizeof(int))
++	if (optlen < sizeof(unsigned int))
+ 		return -EINVAL;
+ 
+-	if (copy_from_sockptr(&opt, optval, sizeof(int)))
++	if (copy_from_sockptr(&opt, optval, sizeof(unsigned int)))
+ 		return -EFAULT;
+ 
+ 	switch (optname) {
+@@ -414,31 +414,31 @@ static int rose_setsockopt(struct socket *sock, int level, int optname,
+ 		return 0;
+ 
+ 	case ROSE_T1:
+-		if (opt < 1)
++		if (opt < 1 || opt > UINT_MAX / HZ)
+ 			return -EINVAL;
+ 		rose->t1 = opt * HZ;
+ 		return 0;
+ 
+ 	case ROSE_T2:
+-		if (opt < 1)
++		if (opt < 1 || opt > UINT_MAX / HZ)
+ 			return -EINVAL;
+ 		rose->t2 = opt * HZ;
+ 		return 0;
+ 
+ 	case ROSE_T3:
+-		if (opt < 1)
++		if (opt < 1 || opt > UINT_MAX / HZ)
+ 			return -EINVAL;
+ 		rose->t3 = opt * HZ;
+ 		return 0;
+ 
+ 	case ROSE_HOLDBACK:
+-		if (opt < 1)
++		if (opt < 1 || opt > UINT_MAX / HZ)
+ 			return -EINVAL;
+ 		rose->hb = opt * HZ;
+ 		return 0;
+ 
+ 	case ROSE_IDLE:
+-		if (opt < 0)
++		if (opt > UINT_MAX / (60 * HZ))
+ 			return -EINVAL;
+ 		rose->idle = opt * 60 * HZ;
+ 		return 0;
+diff --git a/net/rose/rose_timer.c b/net/rose/rose_timer.c
+index f06ddbed3fed63..1525773e94aa17 100644
+--- a/net/rose/rose_timer.c
++++ b/net/rose/rose_timer.c
+@@ -122,6 +122,10 @@ static void rose_heartbeat_expiry(struct timer_list *t)
+ 	struct rose_sock *rose = rose_sk(sk);
+ 
+ 	bh_lock_sock(sk);
++	if (sock_owned_by_user(sk)) {
++		sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ/20);
++		goto out;
++	}
+ 	switch (rose->state) {
+ 	case ROSE_STATE_0:
+ 		/* Magic here: If we listen() and a new link dies before it
+@@ -152,6 +156,7 @@ static void rose_heartbeat_expiry(struct timer_list *t)
+ 	}
+ 
+ 	rose_start_heartbeat(sk);
++out:
+ 	bh_unlock_sock(sk);
+ 	sock_put(sk);
+ }
+@@ -162,6 +167,10 @@ static void rose_timer_expiry(struct timer_list *t)
+ 	struct sock *sk = &rose->sock;
+ 
+ 	bh_lock_sock(sk);
++	if (sock_owned_by_user(sk)) {
++		sk_reset_timer(sk, &rose->timer, jiffies + HZ/20);
++		goto out;
++	}
+ 	switch (rose->state) {
+ 	case ROSE_STATE_1:	/* T1 */
+ 	case ROSE_STATE_4:	/* T2 */
+@@ -182,6 +191,7 @@ static void rose_timer_expiry(struct timer_list *t)
+ 		}
+ 		break;
+ 	}
++out:
+ 	bh_unlock_sock(sk);
+ 	sock_put(sk);
+ }
+@@ -192,6 +202,10 @@ static void rose_idletimer_expiry(struct timer_list *t)
+ 	struct sock *sk = &rose->sock;
+ 
+ 	bh_lock_sock(sk);
++	if (sock_owned_by_user(sk)) {
++		sk_reset_timer(sk, &rose->idletimer, jiffies + HZ/20);
++		goto out;
++	}
+ 	rose_clear_queues(sk);
+ 
+ 	rose_write_internal(sk, ROSE_CLEAR_REQUEST);
+@@ -207,6 +221,7 @@ static void rose_idletimer_expiry(struct timer_list *t)
+ 		sk->sk_state_change(sk);
+ 		sock_set_flag(sk, SOCK_DEAD);
+ 	}
++out:
+ 	bh_unlock_sock(sk);
+ 	sock_put(sk);
+ }
+diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
+index 598b4ee389fc1e..2a1396cd892f30 100644
+--- a/net/rxrpc/conn_event.c
++++ b/net/rxrpc/conn_event.c
+@@ -63,11 +63,12 @@ int rxrpc_abort_conn(struct rxrpc_connection *conn, struct sk_buff *skb,
+ /*
+  * Mark a connection as being remotely aborted.
+  */
+-static bool rxrpc_input_conn_abort(struct rxrpc_connection *conn,
++static void rxrpc_input_conn_abort(struct rxrpc_connection *conn,
+ 				   struct sk_buff *skb)
+ {
+-	return rxrpc_set_conn_aborted(conn, skb, skb->priority, -ECONNABORTED,
+-				      RXRPC_CALL_REMOTELY_ABORTED);
++	trace_rxrpc_rx_conn_abort(conn, skb);
++	rxrpc_set_conn_aborted(conn, skb, skb->priority, -ECONNABORTED,
++			       RXRPC_CALL_REMOTELY_ABORTED);
+ }
+ 
+ /*
+@@ -202,11 +203,14 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn)
+ 
+ 	for (i = 0; i < RXRPC_MAXCALLS; i++) {
+ 		call = conn->channels[i].call;
+-		if (call)
++		if (call) {
++			rxrpc_see_call(call, rxrpc_call_see_conn_abort);
+ 			rxrpc_set_call_completion(call,
+ 						  conn->completion,
+ 						  conn->abort_code,
+ 						  conn->error);
++			rxrpc_poke_call(call, rxrpc_call_poke_conn_abort);
++		}
+ 	}
+ 
+ 	_leave("");
+diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
+index 552ba84a255c43..5d0842efde69ff 100644
+--- a/net/rxrpc/peer_event.c
++++ b/net/rxrpc/peer_event.c
+@@ -238,7 +238,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
+ 	bool use;
+ 	int slot;
+ 
+-	spin_lock(&rxnet->peer_hash_lock);
++	spin_lock_bh(&rxnet->peer_hash_lock);
+ 
+ 	while (!list_empty(collector)) {
+ 		peer = list_entry(collector->next,
+@@ -249,7 +249,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
+ 			continue;
+ 
+ 		use = __rxrpc_use_local(peer->local, rxrpc_local_use_peer_keepalive);
+-		spin_unlock(&rxnet->peer_hash_lock);
++		spin_unlock_bh(&rxnet->peer_hash_lock);
+ 
+ 		if (use) {
+ 			keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
+@@ -269,17 +269,17 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
+ 			 */
+ 			slot += cursor;
+ 			slot &= mask;
+-			spin_lock(&rxnet->peer_hash_lock);
++			spin_lock_bh(&rxnet->peer_hash_lock);
+ 			list_add_tail(&peer->keepalive_link,
+ 				      &rxnet->peer_keepalive[slot & mask]);
+-			spin_unlock(&rxnet->peer_hash_lock);
++			spin_unlock_bh(&rxnet->peer_hash_lock);
+ 			rxrpc_unuse_local(peer->local, rxrpc_local_unuse_peer_keepalive);
+ 		}
+ 		rxrpc_put_peer(peer, rxrpc_peer_put_keepalive);
+-		spin_lock(&rxnet->peer_hash_lock);
++		spin_lock_bh(&rxnet->peer_hash_lock);
+ 	}
+ 
+-	spin_unlock(&rxnet->peer_hash_lock);
++	spin_unlock_bh(&rxnet->peer_hash_lock);
+ }
+ 
+ /*
+@@ -309,7 +309,7 @@ void rxrpc_peer_keepalive_worker(struct work_struct *work)
+ 	 * second; the bucket at cursor + 1 goes at now + 1s and so
+ 	 * on...
+ 	 */
+-	spin_lock(&rxnet->peer_hash_lock);
++	spin_lock_bh(&rxnet->peer_hash_lock);
+ 	list_splice_init(&rxnet->peer_keepalive_new, &collector);
+ 
+ 	stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive);
+@@ -321,7 +321,7 @@ void rxrpc_peer_keepalive_worker(struct work_struct *work)
+ 	}
+ 
+ 	base = now;
+-	spin_unlock(&rxnet->peer_hash_lock);
++	spin_unlock_bh(&rxnet->peer_hash_lock);
+ 
+ 	rxnet->peer_keepalive_base = base;
+ 	rxnet->peer_keepalive_cursor = cursor;
+diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
+index 49dcda67a0d591..956fc7ea4b7346 100644
+--- a/net/rxrpc/peer_object.c
++++ b/net/rxrpc/peer_object.c
+@@ -313,10 +313,10 @@ void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer)
+ 	hash_key = rxrpc_peer_hash_key(local, &peer->srx);
+ 	rxrpc_init_peer(local, peer, hash_key);
+ 
+-	spin_lock(&rxnet->peer_hash_lock);
++	spin_lock_bh(&rxnet->peer_hash_lock);
+ 	hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
+ 	list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
+-	spin_unlock(&rxnet->peer_hash_lock);
++	spin_unlock_bh(&rxnet->peer_hash_lock);
+ }
+ 
+ /*
+@@ -348,7 +348,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
+ 			return NULL;
+ 		}
+ 
+-		spin_lock(&rxnet->peer_hash_lock);
++		spin_lock_bh(&rxnet->peer_hash_lock);
+ 
+ 		/* Need to check that we aren't racing with someone else */
+ 		peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
+@@ -361,7 +361,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
+ 				      &rxnet->peer_keepalive_new);
+ 		}
+ 
+-		spin_unlock(&rxnet->peer_hash_lock);
++		spin_unlock_bh(&rxnet->peer_hash_lock);
+ 
+ 		if (peer)
+ 			rxrpc_free_peer(candidate);
+@@ -411,10 +411,10 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer)
+ 
+ 	ASSERT(hlist_empty(&peer->error_targets));
+ 
+-	spin_lock(&rxnet->peer_hash_lock);
++	spin_lock_bh(&rxnet->peer_hash_lock);
+ 	hash_del_rcu(&peer->hash_link);
+ 	list_del_init(&peer->keepalive_link);
+-	spin_unlock(&rxnet->peer_hash_lock);
++	spin_unlock_bh(&rxnet->peer_hash_lock);
+ 
+ 	rxrpc_free_peer(peer);
+ }
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index 7578e27260c9bb..8e47e5355be613 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -390,6 +390,7 @@ static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
+ 	tp->protocol = protocol;
+ 	tp->prio = prio;
+ 	tp->chain = chain;
++	tp->usesw = !tp->ops->reoffload;
+ 	spin_lock_init(&tp->lock);
+ 	refcount_set(&tp->refcnt, 1);
+ 
+@@ -410,39 +411,31 @@ static void tcf_proto_get(struct tcf_proto *tp)
+ 	refcount_inc(&tp->refcnt);
+ }
+ 
+-static void tcf_maintain_bypass(struct tcf_block *block)
++static void tcf_proto_count_usesw(struct tcf_proto *tp, bool add)
+ {
+-	int filtercnt = atomic_read(&block->filtercnt);
+-	int skipswcnt = atomic_read(&block->skipswcnt);
+-	bool bypass_wanted = filtercnt > 0 && filtercnt == skipswcnt;
+-
+-	if (bypass_wanted != block->bypass_wanted) {
+ #ifdef CONFIG_NET_CLS_ACT
+-		if (bypass_wanted)
+-			static_branch_inc(&tcf_bypass_check_needed_key);
+-		else
+-			static_branch_dec(&tcf_bypass_check_needed_key);
+-#endif
+-		block->bypass_wanted = bypass_wanted;
++	struct tcf_block *block = tp->chain->block;
++	bool counted = false;
++
++	if (!add) {
++		if (tp->usesw && tp->counted) {
++			if (!atomic_dec_return(&block->useswcnt))
++				static_branch_dec(&tcf_sw_enabled_key);
++			tp->counted = false;
++		}
++		return;
+ 	}
+-}
+-
+-static void tcf_block_filter_cnt_update(struct tcf_block *block, bool *counted, bool add)
+-{
+-	lockdep_assert_not_held(&block->cb_lock);
+ 
+-	down_write(&block->cb_lock);
+-	if (*counted != add) {
+-		if (add) {
+-			atomic_inc(&block->filtercnt);
+-			*counted = true;
+-		} else {
+-			atomic_dec(&block->filtercnt);
+-			*counted = false;
+-		}
++	spin_lock(&tp->lock);
++	if (tp->usesw && !tp->counted) {
++		counted = true;
++		tp->counted = true;
+ 	}
+-	tcf_maintain_bypass(block);
+-	up_write(&block->cb_lock);
++	spin_unlock(&tp->lock);
++
++	if (counted && atomic_inc_return(&block->useswcnt) == 1)
++		static_branch_inc(&tcf_sw_enabled_key);
++#endif
+ }
+ 
+ static void tcf_chain_put(struct tcf_chain *chain);
+@@ -451,7 +444,7 @@ static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
+ 			      bool sig_destroy, struct netlink_ext_ack *extack)
+ {
+ 	tp->ops->destroy(tp, rtnl_held, extack);
+-	tcf_block_filter_cnt_update(tp->chain->block, &tp->counted, false);
++	tcf_proto_count_usesw(tp, false);
+ 	if (sig_destroy)
+ 		tcf_proto_signal_destroyed(tp->chain, tp);
+ 	tcf_chain_put(tp->chain);
+@@ -2409,7 +2402,7 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
+ 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
+ 			       RTM_NEWTFILTER, false, rtnl_held, extack);
+ 		tfilter_put(tp, fh);
+-		tcf_block_filter_cnt_update(block, &tp->counted, true);
++		tcf_proto_count_usesw(tp, true);
+ 		/* q pointer is NULL for shared blocks */
+ 		if (q)
+ 			q->flags &= ~TCQ_F_CAN_BYPASS;
+@@ -3532,8 +3525,6 @@ static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
+ 	if (*flags & TCA_CLS_FLAGS_IN_HW)
+ 		return;
+ 	*flags |= TCA_CLS_FLAGS_IN_HW;
+-	if (tc_skip_sw(*flags))
+-		atomic_inc(&block->skipswcnt);
+ 	atomic_inc(&block->offloadcnt);
+ }
+ 
+@@ -3542,8 +3533,6 @@ static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
+ 	if (!(*flags & TCA_CLS_FLAGS_IN_HW))
+ 		return;
+ 	*flags &= ~TCA_CLS_FLAGS_IN_HW;
+-	if (tc_skip_sw(*flags))
+-		atomic_dec(&block->skipswcnt);
+ 	atomic_dec(&block->offloadcnt);
+ }
+ 
+diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
+index 1941ebec23ff9c..7fbe42f0e5c2b7 100644
+--- a/net/sched/cls_bpf.c
++++ b/net/sched/cls_bpf.c
+@@ -509,6 +509,8 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
+ 	if (!tc_in_hw(prog->gen_flags))
+ 		prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ 
++	tcf_proto_update_usesw(tp, prog->gen_flags);
++
+ 	if (oldprog) {
+ 		idr_replace(&head->handle_idr, prog, handle);
+ 		list_replace_rcu(&oldprog->link, &prog->link);
+diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
+index 1008ec8a464c93..03505673d5234d 100644
+--- a/net/sched/cls_flower.c
++++ b/net/sched/cls_flower.c
+@@ -2503,6 +2503,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
+ 	if (!tc_in_hw(fnew->flags))
+ 		fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ 
++	tcf_proto_update_usesw(tp, fnew->flags);
++
+ 	spin_lock(&tp->lock);
+ 
+ 	/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
+diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
+index 9f1e62ca508d04..f03bf5da39ee83 100644
+--- a/net/sched/cls_matchall.c
++++ b/net/sched/cls_matchall.c
+@@ -228,6 +228,8 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
+ 	if (!tc_in_hw(new->flags))
+ 		new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ 
++	tcf_proto_update_usesw(tp, new->flags);
++
+ 	*arg = head;
+ 	rcu_assign_pointer(tp->root, new);
+ 	return 0;
+diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
+index d3a03c57545bcc..2a1c00048fd6f4 100644
+--- a/net/sched/cls_u32.c
++++ b/net/sched/cls_u32.c
+@@ -951,6 +951,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
+ 		if (!tc_in_hw(new->flags))
+ 			new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ 
++		tcf_proto_update_usesw(tp, new->flags);
++
+ 		u32_replace_knode(tp, tp_c, new);
+ 		tcf_unbind_filter(tp, &n->res);
+ 		tcf_exts_get_net(&n->exts);
+@@ -1164,6 +1166,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
+ 		if (!tc_in_hw(n->flags))
+ 			n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ 
++		tcf_proto_update_usesw(tp, n->flags);
++
+ 		ins = &ht->ht[TC_U32_HASH(handle)];
+ 		for (pins = rtnl_dereference(*ins); pins;
+ 		     ins = &pins->next, pins = rtnl_dereference(*ins))
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 300430b8c4d22f..fac9c946a4c75a 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1664,6 +1664,10 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+ 				q = qdisc_lookup(dev, tcm->tcm_handle);
+ 				if (!q)
+ 					goto create_n_graft;
++				if (q->parent != tcm->tcm_parent) {
++					NL_SET_ERR_MSG(extack, "Cannot move an existing qdisc to a different parent");
++					return -EINVAL;
++				}
+ 				if (n->nlmsg_flags & NLM_F_EXCL) {
+ 					NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
+ 					return -EEXIST;
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index 38ec18f73de43a..8874ae6680952a 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -911,8 +911,8 @@ static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch,
+ 		bands[prio] = q;
+ 	}
+ 
+-	return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len,
+-					 GFP_KERNEL);
++	return skb_array_resize_multiple_bh(bands, PFIFO_FAST_BANDS, new_len,
++					    GFP_KERNEL);
+ }
+ 
+ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
+diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
+index a4b8296a2fa1ca..65d5b59da58303 100644
+--- a/net/sched/sch_sfq.c
++++ b/net/sched/sch_sfq.c
+@@ -652,6 +652,10 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt,
+ 		if (!p)
+ 			return -ENOMEM;
+ 	}
++	if (ctl->limit == 1) {
++		NL_SET_ERR_MSG_MOD(extack, "invalid limit");
++		return -EINVAL;
++	}
+ 	sch_tree_lock(sch);
+ 	if (ctl->quantum)
+ 		q->quantum = ctl->quantum;
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 6cc7b846cff1bb..ebc41a7b13dbec 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -2738,7 +2738,7 @@ int smc_accept(struct socket *sock, struct socket *new_sock,
+ 			release_sock(clcsk);
+ 		} else if (!atomic_read(&smc_sk(nsk)->conn.bytes_to_rcv)) {
+ 			lock_sock(nsk);
+-			smc_rx_wait(smc_sk(nsk), &timeo, smc_rx_data_available);
++			smc_rx_wait(smc_sk(nsk), &timeo, 0, smc_rx_data_available);
+ 			release_sock(nsk);
+ 		}
+ 	}
+diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c
+index f0cbe77a80b440..79047721df5110 100644
+--- a/net/smc/smc_rx.c
++++ b/net/smc/smc_rx.c
+@@ -238,22 +238,23 @@ static int smc_rx_splice(struct pipe_inode_info *pipe, char *src, size_t len,
+ 	return -ENOMEM;
+ }
+ 
+-static int smc_rx_data_available_and_no_splice_pend(struct smc_connection *conn)
++static int smc_rx_data_available_and_no_splice_pend(struct smc_connection *conn, size_t peeked)
+ {
+-	return atomic_read(&conn->bytes_to_rcv) &&
++	return smc_rx_data_available(conn, peeked) &&
+ 	       !atomic_read(&conn->splice_pending);
+ }
+ 
+ /* blocks rcvbuf consumer until >=len bytes available or timeout or interrupted
+  *   @smc    smc socket
+  *   @timeo  pointer to max seconds to wait, pointer to value 0 for no timeout
++ *   @peeked  number of bytes already peeked
+  *   @fcrit  add'l criterion to evaluate as function pointer
+  * Returns:
+  * 1 if at least 1 byte available in rcvbuf or if socket error/shutdown.
+  * 0 otherwise (nothing in rcvbuf nor timeout, e.g. interrupted).
+  */
+-int smc_rx_wait(struct smc_sock *smc, long *timeo,
+-		int (*fcrit)(struct smc_connection *conn))
++int smc_rx_wait(struct smc_sock *smc, long *timeo, size_t peeked,
++		int (*fcrit)(struct smc_connection *conn, size_t baseline))
+ {
+ 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ 	struct smc_connection *conn = &smc->conn;
+@@ -262,7 +263,7 @@ int smc_rx_wait(struct smc_sock *smc, long *timeo,
+ 	struct sock *sk = &smc->sk;
+ 	int rc;
+ 
+-	if (fcrit(conn))
++	if (fcrit(conn, peeked))
+ 		return 1;
+ 	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+ 	add_wait_queue(sk_sleep(sk), &wait);
+@@ -271,7 +272,7 @@ int smc_rx_wait(struct smc_sock *smc, long *timeo,
+ 			   cflags->peer_conn_abort ||
+ 			   READ_ONCE(sk->sk_shutdown) & RCV_SHUTDOWN ||
+ 			   conn->killed ||
+-			   fcrit(conn),
++			   fcrit(conn, peeked),
+ 			   &wait);
+ 	remove_wait_queue(sk_sleep(sk), &wait);
+ 	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+@@ -322,11 +323,11 @@ static int smc_rx_recv_urg(struct smc_sock *smc, struct msghdr *msg, int len,
+ 	return -EAGAIN;
+ }
+ 
+-static bool smc_rx_recvmsg_data_available(struct smc_sock *smc)
++static bool smc_rx_recvmsg_data_available(struct smc_sock *smc, size_t peeked)
+ {
+ 	struct smc_connection *conn = &smc->conn;
+ 
+-	if (smc_rx_data_available(conn))
++	if (smc_rx_data_available(conn, peeked))
+ 		return true;
+ 	else if (conn->urg_state == SMC_URG_VALID)
+ 		/* we received a single urgent Byte - skip */
+@@ -344,10 +345,10 @@ static bool smc_rx_recvmsg_data_available(struct smc_sock *smc)
+ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
+ 		   struct pipe_inode_info *pipe, size_t len, int flags)
+ {
+-	size_t copylen, read_done = 0, read_remaining = len;
++	size_t copylen, read_done = 0, read_remaining = len, peeked_bytes = 0;
+ 	size_t chunk_len, chunk_off, chunk_len_sum;
+ 	struct smc_connection *conn = &smc->conn;
+-	int (*func)(struct smc_connection *conn);
++	int (*func)(struct smc_connection *conn, size_t baseline);
+ 	union smc_host_cursor cons;
+ 	int readable, chunk;
+ 	char *rcvbuf_base;
+@@ -384,14 +385,14 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
+ 		if (conn->killed)
+ 			break;
+ 
+-		if (smc_rx_recvmsg_data_available(smc))
++		if (smc_rx_recvmsg_data_available(smc, peeked_bytes))
+ 			goto copy;
+ 
+ 		if (sk->sk_shutdown & RCV_SHUTDOWN) {
+ 			/* smc_cdc_msg_recv_action() could have run after
+ 			 * above smc_rx_recvmsg_data_available()
+ 			 */
+-			if (smc_rx_recvmsg_data_available(smc))
++			if (smc_rx_recvmsg_data_available(smc, peeked_bytes))
+ 				goto copy;
+ 			break;
+ 		}
+@@ -425,26 +426,28 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
+ 			}
+ 		}
+ 
+-		if (!smc_rx_data_available(conn)) {
+-			smc_rx_wait(smc, &timeo, smc_rx_data_available);
++		if (!smc_rx_data_available(conn, peeked_bytes)) {
++			smc_rx_wait(smc, &timeo, peeked_bytes, smc_rx_data_available);
+ 			continue;
+ 		}
+ 
+ copy:
+ 		/* initialize variables for 1st iteration of subsequent loop */
+ 		/* could be just 1 byte, even after waiting on data above */
+-		readable = atomic_read(&conn->bytes_to_rcv);
++		readable = smc_rx_data_available(conn, peeked_bytes);
+ 		splbytes = atomic_read(&conn->splice_pending);
+ 		if (!readable || (msg && splbytes)) {
+ 			if (splbytes)
+ 				func = smc_rx_data_available_and_no_splice_pend;
+ 			else
+ 				func = smc_rx_data_available;
+-			smc_rx_wait(smc, &timeo, func);
++			smc_rx_wait(smc, &timeo, peeked_bytes, func);
+ 			continue;
+ 		}
+ 
+ 		smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
++		if ((flags & MSG_PEEK) && peeked_bytes)
++			smc_curs_add(conn->rmb_desc->len, &cons, peeked_bytes);
+ 		/* subsequent splice() calls pick up where previous left */
+ 		if (splbytes)
+ 			smc_curs_add(conn->rmb_desc->len, &cons, splbytes);
+@@ -480,6 +483,8 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
+ 			}
+ 			read_remaining -= chunk_len;
+ 			read_done += chunk_len;
++			if (flags & MSG_PEEK)
++				peeked_bytes += chunk_len;
+ 
+ 			if (chunk_len_sum == copylen)
+ 				break; /* either on 1st or 2nd iteration */
+diff --git a/net/smc/smc_rx.h b/net/smc/smc_rx.h
+index db823c97d824ea..994f5e42d1ba26 100644
+--- a/net/smc/smc_rx.h
++++ b/net/smc/smc_rx.h
+@@ -21,11 +21,11 @@ void smc_rx_init(struct smc_sock *smc);
+ 
+ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
+ 		   struct pipe_inode_info *pipe, size_t len, int flags);
+-int smc_rx_wait(struct smc_sock *smc, long *timeo,
+-		int (*fcrit)(struct smc_connection *conn));
+-static inline int smc_rx_data_available(struct smc_connection *conn)
++int smc_rx_wait(struct smc_sock *smc, long *timeo, size_t peeked,
++		int (*fcrit)(struct smc_connection *conn, size_t baseline));
++static inline int smc_rx_data_available(struct smc_connection *conn, size_t peeked)
+ {
+-	return atomic_read(&conn->bytes_to_rcv);
++	return atomic_read(&conn->bytes_to_rcv) - peeked;
+ }
+ 
+ #endif /* SMC_RX_H */
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 95397677673bb0..cb3bd12f5818ba 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -1083,9 +1083,6 @@ static void svc_tcp_fragment_received(struct svc_sock *svsk)
+ 	/* If we have more data, signal svc_xprt_enqueue() to try again */
+ 	svsk->sk_tcplen = 0;
+ 	svsk->sk_marker = xdr_zero;
+-
+-	smp_wmb();
+-	tcp_set_rcvlowat(svsk->sk_sk, 1);
+ }
+ 
+ /**
+@@ -1175,17 +1172,10 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
+ 		goto err_delete;
+ 	if (len == want)
+ 		svc_tcp_fragment_received(svsk);
+-	else {
+-		/* Avoid more ->sk_data_ready() calls until the rest
+-		 * of the message has arrived. This reduces service
+-		 * thread wake-ups on large incoming messages. */
+-		tcp_set_rcvlowat(svsk->sk_sk,
+-				 svc_sock_reclen(svsk) - svsk->sk_tcplen);
+-
++	else
+ 		trace_svcsock_tcp_recv_short(&svsk->sk_xprt,
+ 				svc_sock_reclen(svsk),
+ 				svsk->sk_tcplen - sizeof(rpc_fraghdr));
+-	}
+ 	goto err_noclose;
+ error:
+ 	if (len != -EAGAIN)
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index fa9d1b49599bf2..141697e7a833bd 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -1519,6 +1519,11 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr,
+ 		if (err < 0)
+ 			goto out;
+ 
++		/* sk_err might have been set as a result of an earlier
++		 * (failed) connect attempt.
++		 */
++		sk->sk_err = 0;
++
+ 		/* Mark sock as connecting and set the error code to in
+ 		 * progress in case this is a non-blocking connect.
+ 		 */
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index 1c6fd45aa8093f..abca3d7ff56c90 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -763,12 +763,11 @@ static  void cfg80211_scan_req_add_chan(struct cfg80211_scan_request *request,
+ 		}
+ 	}
+ 
++	request->n_channels++;
+ 	request->channels[n_channels] = chan;
+ 	if (add_to_6ghz)
+ 		request->scan_6ghz_params[request->n_6ghz_params].channel_idx =
+ 			n_channels;
+-
+-	request->n_channels++;
+ }
+ 
+ static bool cfg80211_find_ssid_match(struct cfg80211_colocated_ap *ap,
+@@ -858,9 +857,7 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
+ 			if (ret)
+ 				continue;
+ 
+-			entry = kzalloc(sizeof(*entry) + IEEE80211_MAX_SSID_LEN,
+-					GFP_ATOMIC);
+-
++			entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ 			if (!entry)
+ 				continue;
+ 
+diff --git a/net/wireless/tests/scan.c b/net/wireless/tests/scan.c
+index e12f620b5f4241..b1a9c1466d6cbd 100644
+--- a/net/wireless/tests/scan.c
++++ b/net/wireless/tests/scan.c
+@@ -810,6 +810,8 @@ static void test_cfg80211_parse_colocated_ap(struct kunit *test)
+ 		skb_put_data(input, "123", 3);
+ 
+ 	ies = kunit_kzalloc(test, struct_size(ies, data, input->len), GFP_KERNEL);
++	KUNIT_ASSERT_NOT_NULL(test, ies);
++
+ 	ies->len = input->len;
+ 	memcpy(ies->data, input->data, input->len);
+ 
+diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
+index bc56c630572527..235bbefc2abae2 100644
+--- a/net/xfrm/xfrm_replay.c
++++ b/net/xfrm/xfrm_replay.c
+@@ -714,10 +714,12 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff
+ 			oseq += skb_shinfo(skb)->gso_segs;
+ 		}
+ 
+-		if (unlikely(xo->seq.low < replay_esn->oseq)) {
+-			XFRM_SKB_CB(skb)->seq.output.hi = ++oseq_hi;
+-			xo->seq.hi = oseq_hi;
+-			replay_esn->oseq_hi = oseq_hi;
++		if (unlikely(oseq < replay_esn->oseq)) {
++			replay_esn->oseq_hi = ++oseq_hi;
++			if (xo->seq.low < replay_esn->oseq) {
++				XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi;
++				xo->seq.hi = oseq_hi;
++			}
+ 			if (replay_esn->oseq_hi == 0) {
+ 				replay_esn->oseq--;
+ 				replay_esn->oseq_hi--;
+diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
+index 67ca7ac955a376..711e816fc4041e 100644
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -34,6 +34,8 @@
+ 
+ #define xfrm_state_deref_prot(table, net) \
+ 	rcu_dereference_protected((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
++#define xfrm_state_deref_check(table, net) \
++	rcu_dereference_check((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
+ 
+ static void xfrm_state_gc_task(struct work_struct *work);
+ 
+@@ -62,6 +64,8 @@ static inline unsigned int xfrm_dst_hash(struct net *net,
+ 					 u32 reqid,
+ 					 unsigned short family)
+ {
++	lockdep_assert_held(&net->xfrm.xfrm_state_lock);
++
+ 	return __xfrm_dst_hash(daddr, saddr, reqid, family, net->xfrm.state_hmask);
+ }
+ 
+@@ -70,6 +74,8 @@ static inline unsigned int xfrm_src_hash(struct net *net,
+ 					 const xfrm_address_t *saddr,
+ 					 unsigned short family)
+ {
++	lockdep_assert_held(&net->xfrm.xfrm_state_lock);
++
+ 	return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask);
+ }
+ 
+@@ -77,11 +83,15 @@ static inline unsigned int
+ xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr,
+ 	      __be32 spi, u8 proto, unsigned short family)
+ {
++	lockdep_assert_held(&net->xfrm.xfrm_state_lock);
++
+ 	return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask);
+ }
+ 
+ static unsigned int xfrm_seq_hash(struct net *net, u32 seq)
+ {
++	lockdep_assert_held(&net->xfrm.xfrm_state_lock);
++
+ 	return __xfrm_seq_hash(seq, net->xfrm.state_hmask);
+ }
+ 
+@@ -1041,16 +1051,38 @@ xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl,
+ 	x->props.family = tmpl->encap_family;
+ }
+ 
+-static struct xfrm_state *__xfrm_state_lookup_all(struct net *net, u32 mark,
++struct xfrm_hash_state_ptrs {
++	const struct hlist_head *bydst;
++	const struct hlist_head *bysrc;
++	const struct hlist_head *byspi;
++	unsigned int hmask;
++};
++
++static void xfrm_hash_ptrs_get(const struct net *net, struct xfrm_hash_state_ptrs *ptrs)
++{
++	unsigned int sequence;
++
++	do {
++		sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
++
++		ptrs->bydst = xfrm_state_deref_check(net->xfrm.state_bydst, net);
++		ptrs->bysrc = xfrm_state_deref_check(net->xfrm.state_bysrc, net);
++		ptrs->byspi = xfrm_state_deref_check(net->xfrm.state_byspi, net);
++		ptrs->hmask = net->xfrm.state_hmask;
++	} while (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence));
++}
++
++static struct xfrm_state *__xfrm_state_lookup_all(const struct xfrm_hash_state_ptrs *state_ptrs,
++						  u32 mark,
+ 						  const xfrm_address_t *daddr,
+ 						  __be32 spi, u8 proto,
+ 						  unsigned short family,
+ 						  struct xfrm_dev_offload *xdo)
+ {
+-	unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
++	unsigned int h = __xfrm_spi_hash(daddr, spi, proto, family, state_ptrs->hmask);
+ 	struct xfrm_state *x;
+ 
+-	hlist_for_each_entry_rcu(x, net->xfrm.state_byspi + h, byspi) {
++	hlist_for_each_entry_rcu(x, state_ptrs->byspi + h, byspi) {
+ #ifdef CONFIG_XFRM_OFFLOAD
+ 		if (xdo->type == XFRM_DEV_OFFLOAD_PACKET) {
+ 			if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
+@@ -1084,15 +1116,16 @@ static struct xfrm_state *__xfrm_state_lookup_all(struct net *net, u32 mark,
+ 	return NULL;
+ }
+ 
+-static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
++static struct xfrm_state *__xfrm_state_lookup(const struct xfrm_hash_state_ptrs *state_ptrs,
++					      u32 mark,
+ 					      const xfrm_address_t *daddr,
+ 					      __be32 spi, u8 proto,
+ 					      unsigned short family)
+ {
+-	unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
++	unsigned int h = __xfrm_spi_hash(daddr, spi, proto, family, state_ptrs->hmask);
+ 	struct xfrm_state *x;
+ 
+-	hlist_for_each_entry_rcu(x, net->xfrm.state_byspi + h, byspi) {
++	hlist_for_each_entry_rcu(x, state_ptrs->byspi + h, byspi) {
+ 		if (x->props.family != family ||
+ 		    x->id.spi       != spi ||
+ 		    x->id.proto     != proto ||
+@@ -1114,11 +1147,11 @@ struct xfrm_state *xfrm_input_state_lookup(struct net *net, u32 mark,
+ 					   __be32 spi, u8 proto,
+ 					   unsigned short family)
+ {
++	struct xfrm_hash_state_ptrs state_ptrs;
+ 	struct hlist_head *state_cache_input;
+ 	struct xfrm_state *x = NULL;
+-	int cpu = get_cpu();
+ 
+-	state_cache_input =  per_cpu_ptr(net->xfrm.state_cache_input, cpu);
++	state_cache_input = raw_cpu_ptr(net->xfrm.state_cache_input);
+ 
+ 	rcu_read_lock();
+ 	hlist_for_each_entry_rcu(x, state_cache_input, state_cache_input) {
+@@ -1135,7 +1168,9 @@ struct xfrm_state *xfrm_input_state_lookup(struct net *net, u32 mark,
+ 		goto out;
+ 	}
+ 
+-	x = __xfrm_state_lookup(net, mark, daddr, spi, proto, family);
++	xfrm_hash_ptrs_get(net, &state_ptrs);
++
++	x = __xfrm_state_lookup(&state_ptrs, mark, daddr, spi, proto, family);
+ 
+ 	if (x && x->km.state == XFRM_STATE_VALID) {
+ 		spin_lock_bh(&net->xfrm.xfrm_state_lock);
+@@ -1150,20 +1185,20 @@ struct xfrm_state *xfrm_input_state_lookup(struct net *net, u32 mark,
+ 
+ out:
+ 	rcu_read_unlock();
+-	put_cpu();
+ 	return x;
+ }
+ EXPORT_SYMBOL(xfrm_input_state_lookup);
+ 
+-static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
++static struct xfrm_state *__xfrm_state_lookup_byaddr(const struct xfrm_hash_state_ptrs *state_ptrs,
++						     u32 mark,
+ 						     const xfrm_address_t *daddr,
+ 						     const xfrm_address_t *saddr,
+ 						     u8 proto, unsigned short family)
+ {
+-	unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
++	unsigned int h = __xfrm_src_hash(daddr, saddr, family, state_ptrs->hmask);
+ 	struct xfrm_state *x;
+ 
+-	hlist_for_each_entry_rcu(x, net->xfrm.state_bysrc + h, bysrc) {
++	hlist_for_each_entry_rcu(x, state_ptrs->bysrc + h, bysrc) {
+ 		if (x->props.family != family ||
+ 		    x->id.proto     != proto ||
+ 		    !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
+@@ -1183,14 +1218,17 @@ static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
+ static inline struct xfrm_state *
+ __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
+ {
++	struct xfrm_hash_state_ptrs state_ptrs;
+ 	struct net *net = xs_net(x);
+ 	u32 mark = x->mark.v & x->mark.m;
+ 
++	xfrm_hash_ptrs_get(net, &state_ptrs);
++
+ 	if (use_spi)
+-		return __xfrm_state_lookup(net, mark, &x->id.daddr,
++		return __xfrm_state_lookup(&state_ptrs, mark, &x->id.daddr,
+ 					   x->id.spi, x->id.proto, family);
+ 	else
+-		return __xfrm_state_lookup_byaddr(net, mark,
++		return __xfrm_state_lookup_byaddr(&state_ptrs, mark,
+ 						  &x->id.daddr,
+ 						  &x->props.saddr,
+ 						  x->id.proto, family);
+@@ -1264,6 +1302,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
+ 		unsigned short family, u32 if_id)
+ {
+ 	static xfrm_address_t saddr_wildcard = { };
++	struct xfrm_hash_state_ptrs state_ptrs;
+ 	struct net *net = xp_net(pol);
+ 	unsigned int h, h_wildcard;
+ 	struct xfrm_state *x, *x0, *to_put;
+@@ -1328,8 +1367,10 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
+ 	else if (acquire_in_progress) /* XXX: acquire_in_progress should not happen */
+ 		WARN_ON(1);
+ 
+-	h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
+-	hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h, bydst) {
++	xfrm_hash_ptrs_get(net, &state_ptrs);
++
++	h = __xfrm_dst_hash(daddr, saddr, tmpl->reqid, encap_family, state_ptrs.hmask);
++	hlist_for_each_entry_rcu(x, state_ptrs.bydst + h, bydst) {
+ #ifdef CONFIG_XFRM_OFFLOAD
+ 		if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
+ 			if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
+@@ -1362,8 +1403,9 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
+ 	if (best || acquire_in_progress)
+ 		goto found;
+ 
+-	h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
+-	hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h_wildcard, bydst) {
++	h_wildcard = __xfrm_dst_hash(daddr, &saddr_wildcard, tmpl->reqid,
++				     encap_family, state_ptrs.hmask);
++	hlist_for_each_entry_rcu(x, state_ptrs.bydst + h_wildcard, bydst) {
+ #ifdef CONFIG_XFRM_OFFLOAD
+ 		if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
+ 			if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
+@@ -1401,7 +1443,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
+ 
+ 	if (!x && !error && !acquire_in_progress) {
+ 		if (tmpl->id.spi &&
+-		    (x0 = __xfrm_state_lookup_all(net, mark, daddr,
++		    (x0 = __xfrm_state_lookup_all(&state_ptrs, mark, daddr,
+ 						  tmpl->id.spi, tmpl->id.proto,
+ 						  encap_family,
+ 						  &pol->xdo)) != NULL) {
+@@ -2180,10 +2222,13 @@ struct xfrm_state *
+ xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi,
+ 		  u8 proto, unsigned short family)
+ {
++	struct xfrm_hash_state_ptrs state_ptrs;
+ 	struct xfrm_state *x;
+ 
+ 	rcu_read_lock();
+-	x = __xfrm_state_lookup(net, mark, daddr, spi, proto, family);
++	xfrm_hash_ptrs_get(net, &state_ptrs);
++
++	x = __xfrm_state_lookup(&state_ptrs, mark, daddr, spi, proto, family);
+ 	rcu_read_unlock();
+ 	return x;
+ }
+@@ -2194,10 +2239,14 @@ xfrm_state_lookup_byaddr(struct net *net, u32 mark,
+ 			 const xfrm_address_t *daddr, const xfrm_address_t *saddr,
+ 			 u8 proto, unsigned short family)
+ {
++	struct xfrm_hash_state_ptrs state_ptrs;
+ 	struct xfrm_state *x;
+ 
+ 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
+-	x = __xfrm_state_lookup_byaddr(net, mark, daddr, saddr, proto, family);
++
++	xfrm_hash_ptrs_get(net, &state_ptrs);
++
++	x = __xfrm_state_lookup_byaddr(&state_ptrs, mark, daddr, saddr, proto, family);
+ 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+ 	return x;
+ }
+diff --git a/samples/landlock/sandboxer.c b/samples/landlock/sandboxer.c
+index 57565dfd74a260..07fab2ef534e8d 100644
+--- a/samples/landlock/sandboxer.c
++++ b/samples/landlock/sandboxer.c
+@@ -91,6 +91,9 @@ static int parse_path(char *env_path, const char ***const path_list)
+ 		}
+ 	}
+ 	*path_list = malloc(num_paths * sizeof(**path_list));
++	if (!*path_list)
++		return -1;
++
+ 	for (i = 0; i < num_paths; i++)
+ 		(*path_list)[i] = strsep(&env_path, ENV_DELIMITER);
+ 
+@@ -127,6 +130,10 @@ static int populate_ruleset_fs(const char *const env_var, const int ruleset_fd,
+ 	env_path_name = strdup(env_path_name);
+ 	unsetenv(env_var);
+ 	num_paths = parse_path(env_path_name, &path_list);
++	if (num_paths < 0) {
++		fprintf(stderr, "Failed to allocate memory\n");
++		goto out_free_name;
++	}
+ 	if (num_paths == 1 && path_list[0][0] == '\0') {
+ 		/*
+ 		 * Allows to not use all possible restrictions (e.g. use
+diff --git a/scripts/Makefile.build b/scripts/Makefile.build
+index c16e4cf54d770f..0b85bf27598a82 100644
+--- a/scripts/Makefile.build
++++ b/scripts/Makefile.build
+@@ -183,7 +183,9 @@ endif # CONFIG_FTRACE_MCOUNT_USE_RECORDMCOUNT
+ 
+ is-standard-object = $(if $(filter-out y%, $(OBJECT_FILES_NON_STANDARD_$(target-stem).o)$(OBJECT_FILES_NON_STANDARD)n),$(is-kernel-object))
+ 
++ifdef CONFIG_OBJTOOL
+ $(obj)/%.o: private objtool-enabled = $(if $(is-standard-object),$(if $(delay-objtool),$(is-single-obj-m),y))
++endif
+ 
+ ifneq ($(findstring 1, $(KBUILD_EXTRA_WARN)),)
+ cmd_warn_shared_object = $(if $(word 2, $(modname-multi)),$(warning $(kbuild-file): $*.o is added to multiple modules: $(modname-multi)))
+diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
+index 7395200538da89..2e280a02e9e652 100644
+--- a/scripts/Makefile.lib
++++ b/scripts/Makefile.lib
+@@ -287,6 +287,8 @@ delay-objtool := $(or $(CONFIG_LTO_CLANG),$(CONFIG_X86_KERNEL_IBT))
+ cmd_objtool = $(if $(objtool-enabled), ; $(objtool) $(objtool-args) $@)
+ cmd_gen_objtooldep = $(if $(objtool-enabled), { echo ; echo '$@: $$(wildcard $(objtool))' ; } >> $(dot-target).cmd)
+ 
++objtool-enabled := y
++
+ endif # CONFIG_OBJTOOL
+ 
+ # Useful for describing the dependency of composite objects
+@@ -302,11 +304,11 @@ endef
+ # ===========================================================================
+ # These are shared by some Makefile.* files.
+ 
+-objtool-enabled := y
+-
+ ifdef CONFIG_LTO_CLANG
+-# objtool cannot process LLVM IR. Make $(LD) covert LLVM IR to ELF here.
+-cmd_ld_single = $(if $(objtool-enabled), ; $(LD) $(ld_flags) -r -o $(tmp-target) $@; mv $(tmp-target) $@)
++# Run $(LD) here to covert LLVM IR to ELF in the following cases:
++#  - when this object needs objtool processing, as objtool cannot process LLVM IR
++#  - when this is a single-object module, as modpost cannot process LLVM IR
++cmd_ld_single = $(if $(objtool-enabled)$(is-single-obj-m), ; $(LD) $(ld_flags) -r -o $(tmp-target) $@; mv $(tmp-target) $@)
+ endif
+ 
+ quiet_cmd_cc_o_c = CC $(quiet_modtag)  $@
+diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst
+index f97c9926ed31b2..1628198f3e8309 100644
+--- a/scripts/Makefile.modinst
++++ b/scripts/Makefile.modinst
+@@ -105,7 +105,7 @@ else
+ sig-key := $(CONFIG_MODULE_SIG_KEY)
+ endif
+ quiet_cmd_sign = SIGN    $@
+-      cmd_sign = scripts/sign-file $(CONFIG_MODULE_SIG_HASH) "$(sig-key)" certs/signing_key.x509 $@ \
++      cmd_sign = $(objtree)/scripts/sign-file $(CONFIG_MODULE_SIG_HASH) "$(sig-key)" $(objtree)/certs/signing_key.x509 $@ \
+                  $(if $(KBUILD_EXTMOD),|| true)
+ 
+ ifeq ($(sign-only),)
+diff --git a/scripts/genksyms/genksyms.c b/scripts/genksyms/genksyms.c
+index 07f9b8cfb23370..c5e8e0e0f94906 100644
+--- a/scripts/genksyms/genksyms.c
++++ b/scripts/genksyms/genksyms.c
+@@ -239,6 +239,7 @@ static struct symbol *__add_symbol(const char *name, enum symbol_type type,
+ 						"unchanged\n");
+ 				}
+ 				sym->is_declared = 1;
++				free_list(defn, NULL);
+ 				return sym;
+ 			} else if (!sym->is_declared) {
+ 				if (sym->is_override && flag_preserve) {
+@@ -247,6 +248,7 @@ static struct symbol *__add_symbol(const char *name, enum symbol_type type,
+ 					print_type_name(type, name);
+ 					fprintf(stderr, " modversion change\n");
+ 					sym->is_declared = 1;
++					free_list(defn, NULL);
+ 					return sym;
+ 				} else {
+ 					status = is_unknown_symbol(sym) ?
+@@ -254,6 +256,7 @@ static struct symbol *__add_symbol(const char *name, enum symbol_type type,
+ 				}
+ 			} else {
+ 				error_with_pos("redefinition of %s", name);
++				free_list(defn, NULL);
+ 				return sym;
+ 			}
+ 			break;
+@@ -269,11 +272,15 @@ static struct symbol *__add_symbol(const char *name, enum symbol_type type,
+ 				break;
+ 			}
+ 		}
++
++		free_list(sym->defn, NULL);
++		free(sym->name);
++		free(sym);
+ 		--nsyms;
+ 	}
+ 
+ 	sym = xmalloc(sizeof(*sym));
+-	sym->name = name;
++	sym->name = xstrdup(name);
+ 	sym->type = type;
+ 	sym->defn = defn;
+ 	sym->expansion_trail = NULL;
+@@ -480,7 +487,7 @@ static void read_reference(FILE *f)
+ 			defn = def;
+ 			def = read_node(f);
+ 		}
+-		subsym = add_reference_symbol(xstrdup(sym->string), sym->tag,
++		subsym = add_reference_symbol(sym->string, sym->tag,
+ 					      defn, is_extern);
+ 		subsym->is_override = is_override;
+ 		free_node(sym);
+diff --git a/scripts/genksyms/genksyms.h b/scripts/genksyms/genksyms.h
+index 21ed2ec2d98ca8..5621533dcb8e43 100644
+--- a/scripts/genksyms/genksyms.h
++++ b/scripts/genksyms/genksyms.h
+@@ -32,7 +32,7 @@ struct string_list {
+ 
+ struct symbol {
+ 	struct symbol *hash_next;
+-	const char *name;
++	char *name;
+ 	enum symbol_type type;
+ 	struct string_list *defn;
+ 	struct symbol *expansion_trail;
+diff --git a/scripts/genksyms/parse.y b/scripts/genksyms/parse.y
+index 8e9b5e69e8f01d..689cb6bb40b657 100644
+--- a/scripts/genksyms/parse.y
++++ b/scripts/genksyms/parse.y
+@@ -152,14 +152,19 @@ simple_declaration:
+ 	;
+ 
+ init_declarator_list_opt:
+-	/* empty */				{ $$ = NULL; }
+-	| init_declarator_list
++	/* empty */			{ $$ = NULL; }
++	| init_declarator_list		{ free_list(decl_spec, NULL); $$ = $1; }
+ 	;
+ 
+ init_declarator_list:
+ 	init_declarator
+ 		{ struct string_list *decl = *$1;
+ 		  *$1 = NULL;
++
++		  /* avoid sharing among multiple init_declarators */
++		  if (decl_spec)
++		    decl_spec = copy_list_range(decl_spec, NULL);
++
+ 		  add_symbol(current_name,
+ 			     is_typedef ? SYM_TYPEDEF : SYM_NORMAL, decl, is_extern);
+ 		  current_name = NULL;
+@@ -170,6 +175,11 @@ init_declarator_list:
+ 		  *$3 = NULL;
+ 		  free_list(*$2, NULL);
+ 		  *$2 = decl_spec;
++
++		  /* avoid sharing among multiple init_declarators */
++		  if (decl_spec)
++		    decl_spec = copy_list_range(decl_spec, NULL);
++
+ 		  add_symbol(current_name,
+ 			     is_typedef ? SYM_TYPEDEF : SYM_NORMAL, decl, is_extern);
+ 		  current_name = NULL;
+@@ -472,12 +482,12 @@ enumerator_list:
+ enumerator:
+ 	IDENT
+ 		{
+-			const char *name = strdup((*$1)->string);
++			const char *name = (*$1)->string;
+ 			add_symbol(name, SYM_ENUM_CONST, NULL, 0);
+ 		}
+ 	| IDENT '=' EXPRESSION_PHRASE
+ 		{
+-			const char *name = strdup((*$1)->string);
++			const char *name = (*$1)->string;
+ 			struct string_list *expr = copy_list_range(*$3, *$2);
+ 			add_symbol(name, SYM_ENUM_CONST, expr, 0);
+ 		}
+diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
+index 4286d5e7f95dc1..3b55e7a4131d9a 100644
+--- a/scripts/kconfig/confdata.c
++++ b/scripts/kconfig/confdata.c
+@@ -360,10 +360,12 @@ int conf_read_simple(const char *name, int def)
+ 
+ 			*p = '\0';
+ 
+-			in = zconf_fopen(env);
++			name = env;
++
++			in = zconf_fopen(name);
+ 			if (in) {
+ 				conf_message("using defaults found in %s",
+-					     env);
++					     name);
+ 				goto load;
+ 			}
+ 
+diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
+index 89b84bf8e21fa6..7beb59dec5a081 100644
+--- a/scripts/kconfig/symbol.c
++++ b/scripts/kconfig/symbol.c
+@@ -388,6 +388,7 @@ static void sym_warn_unmet_dep(const struct symbol *sym)
+ 			       "  Selected by [m]:\n");
+ 
+ 	fputs(str_get(&gs), stderr);
++	str_free(&gs);
+ 	sym_warnings++;
+ }
+ 
+diff --git a/security/landlock/fs.c b/security/landlock/fs.c
+index e31b97a9f175aa..7adb25150488fc 100644
+--- a/security/landlock/fs.c
++++ b/security/landlock/fs.c
+@@ -937,10 +937,6 @@ static access_mask_t get_mode_access(const umode_t mode)
+ 	switch (mode & S_IFMT) {
+ 	case S_IFLNK:
+ 		return LANDLOCK_ACCESS_FS_MAKE_SYM;
+-	case 0:
+-		/* A zero mode translates to S_IFREG. */
+-	case S_IFREG:
+-		return LANDLOCK_ACCESS_FS_MAKE_REG;
+ 	case S_IFDIR:
+ 		return LANDLOCK_ACCESS_FS_MAKE_DIR;
+ 	case S_IFCHR:
+@@ -951,9 +947,12 @@ static access_mask_t get_mode_access(const umode_t mode)
+ 		return LANDLOCK_ACCESS_FS_MAKE_FIFO;
+ 	case S_IFSOCK:
+ 		return LANDLOCK_ACCESS_FS_MAKE_SOCK;
++	case S_IFREG:
++	case 0:
++		/* A zero mode translates to S_IFREG. */
+ 	default:
+-		WARN_ON_ONCE(1);
+-		return 0;
++		/* Treats weird files as regular files. */
++		return LANDLOCK_ACCESS_FS_MAKE_REG;
+ 	}
+ }
+ 
+diff --git a/sound/core/seq/Kconfig b/sound/core/seq/Kconfig
+index 0374bbf51cd4d3..e4f58cb985d47c 100644
+--- a/sound/core/seq/Kconfig
++++ b/sound/core/seq/Kconfig
+@@ -62,7 +62,7 @@ config SND_SEQ_VIRMIDI
+ 
+ config SND_SEQ_UMP
+ 	bool "Support for UMP events"
+-	default y if SND_SEQ_UMP_CLIENT
++	default SND_UMP
+ 	help
+ 	  Say Y here to enable the support for handling UMP (Universal MIDI
+ 	  Packet) events via ALSA sequencer infrastructure, which is an
+@@ -71,6 +71,6 @@ config SND_SEQ_UMP
+ 	  among legacy and UMP clients.
+ 
+ config SND_SEQ_UMP_CLIENT
+-	def_tristate SND_UMP
++	def_tristate SND_UMP && SND_SEQ_UMP
+ 
+ endif # SND_SEQUENCER
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index ad66378d7321aa..2d523b53b3d731 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10158,6 +10158,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
++	SND_PCI_QUIRK(0x1025, 0x1360, "Acer Aspire A115", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1025, 0x141f, "Acer Spin SP513-54N", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1025, 0x142b, "Acer Swift SF314-42", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
+diff --git a/sound/soc/amd/acp/acp-i2s.c b/sound/soc/amd/acp/acp-i2s.c
+index 1f59ee248771cc..89e99ed4275a22 100644
+--- a/sound/soc/amd/acp/acp-i2s.c
++++ b/sound/soc/amd/acp/acp-i2s.c
+@@ -181,6 +181,7 @@ static int acp_i2s_set_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask, u32 rx_mas
+ 			break;
+ 		default:
+ 			dev_err(dev, "Unknown chip revision %d\n", chip->acp_rev);
++			spin_unlock_irq(&adata->acp_lock);
+ 			return -EINVAL;
+ 		}
+ 	}
+diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
+index f37e82ddb7a104..d7ad795603c17c 100644
+--- a/sound/soc/codecs/Makefile
++++ b/sound/soc/codecs/Makefile
+@@ -80,7 +80,7 @@ snd-soc-cs35l56-shared-y := cs35l56-shared.o
+ snd-soc-cs35l56-i2c-y := cs35l56-i2c.o
+ snd-soc-cs35l56-spi-y := cs35l56-spi.o
+ snd-soc-cs35l56-sdw-y := cs35l56-sdw.o
+-snd-soc-cs40l50-objs := cs40l50-codec.o
++snd-soc-cs40l50-y := cs40l50-codec.o
+ snd-soc-cs42l42-y := cs42l42.o
+ snd-soc-cs42l42-i2c-y := cs42l42-i2c.o
+ snd-soc-cs42l42-sdw-y := cs42l42-sdw.o
+@@ -92,7 +92,7 @@ snd-soc-cs42l52-y := cs42l52.o
+ snd-soc-cs42l56-y := cs42l56.o
+ snd-soc-cs42l73-y := cs42l73.o
+ snd-soc-cs42l83-i2c-y := cs42l83-i2c.o
+-snd-soc-cs42l84-objs := cs42l84.o
++snd-soc-cs42l84-y := cs42l84.o
+ snd-soc-cs4234-y := cs4234.o
+ snd-soc-cs4265-y := cs4265.o
+ snd-soc-cs4270-y := cs4270.o
+@@ -334,8 +334,8 @@ snd-soc-wcd-classh-y := wcd-clsh-v2.o
+ snd-soc-wcd-mbhc-y := wcd-mbhc-v2.o
+ snd-soc-wcd9335-y := wcd9335.o
+ snd-soc-wcd934x-y := wcd934x.o
+-snd-soc-wcd937x-objs := wcd937x.o
+-snd-soc-wcd937x-sdw-objs := wcd937x-sdw.o
++snd-soc-wcd937x-y := wcd937x.o
++snd-soc-wcd937x-sdw-y := wcd937x-sdw.o
+ snd-soc-wcd938x-y := wcd938x.o
+ snd-soc-wcd938x-sdw-y := wcd938x-sdw.o
+ snd-soc-wcd939x-y := wcd939x.o
+diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
+index ca4cc954efa8e6..eb97ac73ec0624 100644
+--- a/sound/soc/codecs/da7213.c
++++ b/sound/soc/codecs/da7213.c
+@@ -2203,6 +2203,8 @@ static int da7213_i2c_probe(struct i2c_client *i2c)
+ 		return ret;
+ 	}
+ 
++	mutex_init(&da7213->ctrl_lock);
++
+ 	pm_runtime_set_autosuspend_delay(&i2c->dev, 100);
+ 	pm_runtime_use_autosuspend(&i2c->dev);
+ 	pm_runtime_set_active(&i2c->dev);
+diff --git a/sound/soc/intel/avs/apl.c b/sound/soc/intel/avs/apl.c
+index 27516ef5718591..3dccf0a57a3a11 100644
+--- a/sound/soc/intel/avs/apl.c
++++ b/sound/soc/intel/avs/apl.c
+@@ -12,6 +12,7 @@
+ #include "avs.h"
+ #include "messages.h"
+ #include "path.h"
++#include "registers.h"
+ #include "topology.h"
+ 
+ static irqreturn_t avs_apl_dsp_interrupt(struct avs_dev *adev)
+@@ -125,7 +126,7 @@ int avs_apl_coredump(struct avs_dev *adev, union avs_notify_msg *msg)
+ 	struct avs_apl_log_buffer_layout layout;
+ 	void __iomem *addr, *buf;
+ 	size_t dump_size;
+-	u16 offset = 0;
++	u32 offset = 0;
+ 	u8 *dump, *pos;
+ 
+ 	dump_size = AVS_FW_REGS_SIZE + msg->ext.coredump.stack_dump_size;
+diff --git a/sound/soc/intel/avs/cnl.c b/sound/soc/intel/avs/cnl.c
+index bd3c4bb8bf5a17..03f8fb0dc187f5 100644
+--- a/sound/soc/intel/avs/cnl.c
++++ b/sound/soc/intel/avs/cnl.c
+@@ -9,6 +9,7 @@
+ #include <sound/hdaudio_ext.h>
+ #include "avs.h"
+ #include "messages.h"
++#include "registers.h"
+ 
+ static void avs_cnl_ipc_interrupt(struct avs_dev *adev)
+ {
+diff --git a/sound/soc/intel/avs/core.c b/sound/soc/intel/avs/core.c
+index 73d4bde9b2f788..82839d0994ee3e 100644
+--- a/sound/soc/intel/avs/core.c
++++ b/sound/soc/intel/avs/core.c
+@@ -829,10 +829,10 @@ static const struct avs_spec jsl_desc = {
+ 	.hipc = &cnl_hipc_spec,
+ };
+ 
+-#define AVS_TGL_BASED_SPEC(sname)		\
++#define AVS_TGL_BASED_SPEC(sname, min)		\
+ static const struct avs_spec sname##_desc = {	\
+ 	.name = #sname,				\
+-	.min_fw_version = { 10,	29, 0, 5646 },	\
++	.min_fw_version = { 10,	min, 0, 5646 },	\
+ 	.dsp_ops = &avs_tgl_dsp_ops,		\
+ 	.core_init_mask = 1,			\
+ 	.attributes = AVS_PLATATTR_IMR,		\
+@@ -840,11 +840,11 @@ static const struct avs_spec sname##_desc = {	\
+ 	.hipc = &cnl_hipc_spec,			\
+ }
+ 
+-AVS_TGL_BASED_SPEC(lkf);
+-AVS_TGL_BASED_SPEC(tgl);
+-AVS_TGL_BASED_SPEC(ehl);
+-AVS_TGL_BASED_SPEC(adl);
+-AVS_TGL_BASED_SPEC(adl_n);
++AVS_TGL_BASED_SPEC(lkf, 28);
++AVS_TGL_BASED_SPEC(tgl, 29);
++AVS_TGL_BASED_SPEC(ehl, 30);
++AVS_TGL_BASED_SPEC(adl, 35);
++AVS_TGL_BASED_SPEC(adl_n, 35);
+ 
+ static const struct pci_device_id avs_ids[] = {
+ 	{ PCI_DEVICE_DATA(INTEL, HDA_SKL_LP, &skl_desc) },
+diff --git a/sound/soc/intel/avs/loader.c b/sound/soc/intel/avs/loader.c
+index 890efd2f1feabe..37de077a998386 100644
+--- a/sound/soc/intel/avs/loader.c
++++ b/sound/soc/intel/avs/loader.c
+@@ -308,7 +308,7 @@ avs_hda_init_rom(struct avs_dev *adev, unsigned int dma_id, bool purge)
+ 	}
+ 
+ 	/* await ROM init */
+-	ret = snd_hdac_adsp_readq_poll(adev, spec->sram->rom_status_offset, reg,
++	ret = snd_hdac_adsp_readl_poll(adev, spec->sram->rom_status_offset, reg,
+ 				       (reg & 0xF) == AVS_ROM_INIT_DONE ||
+ 				       (reg & 0xF) == APL_ROM_FW_ENTERED,
+ 				       AVS_ROM_INIT_POLLING_US, APL_ROM_INIT_TIMEOUT_US);
+diff --git a/sound/soc/intel/avs/registers.h b/sound/soc/intel/avs/registers.h
+index f76e91cff2a9a6..5b6d60eb3c18bd 100644
+--- a/sound/soc/intel/avs/registers.h
++++ b/sound/soc/intel/avs/registers.h
+@@ -9,6 +9,8 @@
+ #ifndef __SOUND_SOC_INTEL_AVS_REGS_H
+ #define __SOUND_SOC_INTEL_AVS_REGS_H
+ 
++#include <linux/io-64-nonatomic-lo-hi.h>
++#include <linux/iopoll.h>
+ #include <linux/sizes.h>
+ 
+ #define AZX_PCIREG_PGCTL		0x44
+@@ -98,4 +100,47 @@
+ #define avs_downlink_addr(adev) \
+ 	avs_sram_addr(adev, AVS_DOWNLINK_WINDOW)
+ 
++#define snd_hdac_adsp_writeb(adev, reg, value) \
++	snd_hdac_reg_writeb(&(adev)->base.core, (adev)->dsp_ba + (reg), value)
++#define snd_hdac_adsp_readb(adev, reg) \
++	snd_hdac_reg_readb(&(adev)->base.core, (adev)->dsp_ba + (reg))
++#define snd_hdac_adsp_writew(adev, reg, value) \
++	snd_hdac_reg_writew(&(adev)->base.core, (adev)->dsp_ba + (reg), value)
++#define snd_hdac_adsp_readw(adev, reg) \
++	snd_hdac_reg_readw(&(adev)->base.core, (adev)->dsp_ba + (reg))
++#define snd_hdac_adsp_writel(adev, reg, value) \
++	snd_hdac_reg_writel(&(adev)->base.core, (adev)->dsp_ba + (reg), value)
++#define snd_hdac_adsp_readl(adev, reg) \
++	snd_hdac_reg_readl(&(adev)->base.core, (adev)->dsp_ba + (reg))
++#define snd_hdac_adsp_writeq(adev, reg, value) \
++	snd_hdac_reg_writeq(&(adev)->base.core, (adev)->dsp_ba + (reg), value)
++#define snd_hdac_adsp_readq(adev, reg) \
++	snd_hdac_reg_readq(&(adev)->base.core, (adev)->dsp_ba + (reg))
++
++#define snd_hdac_adsp_updateb(adev, reg, mask, val) \
++	snd_hdac_adsp_writeb(adev, reg, \
++			(snd_hdac_adsp_readb(adev, reg) & ~(mask)) | (val))
++#define snd_hdac_adsp_updatew(adev, reg, mask, val) \
++	snd_hdac_adsp_writew(adev, reg, \
++			(snd_hdac_adsp_readw(adev, reg) & ~(mask)) | (val))
++#define snd_hdac_adsp_updatel(adev, reg, mask, val) \
++	snd_hdac_adsp_writel(adev, reg, \
++			(snd_hdac_adsp_readl(adev, reg) & ~(mask)) | (val))
++#define snd_hdac_adsp_updateq(adev, reg, mask, val) \
++	snd_hdac_adsp_writeq(adev, reg, \
++			(snd_hdac_adsp_readq(adev, reg) & ~(mask)) | (val))
++
++#define snd_hdac_adsp_readb_poll(adev, reg, val, cond, delay_us, timeout_us) \
++	readb_poll_timeout((adev)->dsp_ba + (reg), val, cond, \
++			   delay_us, timeout_us)
++#define snd_hdac_adsp_readw_poll(adev, reg, val, cond, delay_us, timeout_us) \
++	readw_poll_timeout((adev)->dsp_ba + (reg), val, cond, \
++			   delay_us, timeout_us)
++#define snd_hdac_adsp_readl_poll(adev, reg, val, cond, delay_us, timeout_us) \
++	readl_poll_timeout((adev)->dsp_ba + (reg), val, cond, \
++			   delay_us, timeout_us)
++#define snd_hdac_adsp_readq_poll(adev, reg, val, cond, delay_us, timeout_us) \
++	readq_poll_timeout((adev)->dsp_ba + (reg), val, cond, \
++			   delay_us, timeout_us)
++
+ #endif /* __SOUND_SOC_INTEL_AVS_REGS_H */
+diff --git a/sound/soc/intel/avs/skl.c b/sound/soc/intel/avs/skl.c
+index 34f859d6e5a49a..d66ef000de9ee7 100644
+--- a/sound/soc/intel/avs/skl.c
++++ b/sound/soc/intel/avs/skl.c
+@@ -12,6 +12,7 @@
+ #include "avs.h"
+ #include "cldma.h"
+ #include "messages.h"
++#include "registers.h"
+ 
+ void avs_skl_ipc_interrupt(struct avs_dev *adev)
+ {
+diff --git a/sound/soc/intel/avs/topology.c b/sound/soc/intel/avs/topology.c
+index 5cda527020c7bf..d612f20ed98937 100644
+--- a/sound/soc/intel/avs/topology.c
++++ b/sound/soc/intel/avs/topology.c
+@@ -1466,7 +1466,7 @@ avs_tplg_path_template_create(struct snd_soc_component *comp, struct avs_tplg *o
+ 
+ static const struct avs_tplg_token_parser mod_init_config_parsers[] = {
+ 	{
+-		.token = AVS_TKN_MOD_INIT_CONFIG_ID_U32,
++		.token = AVS_TKN_INIT_CONFIG_ID_U32,
+ 		.type = SND_SOC_TPLG_TUPLE_TYPE_WORD,
+ 		.offset = offsetof(struct avs_tplg_init_config, id),
+ 		.parse = avs_parse_word_token,
+@@ -1519,7 +1519,7 @@ static int avs_tplg_parse_initial_configs(struct snd_soc_component *comp,
+ 		esize = le32_to_cpu(tuples->size) + le32_to_cpu(tmp->size);
+ 
+ 		ret = parse_dictionary_entries(comp, tuples, esize, config, 1, sizeof(*config),
+-					       AVS_TKN_MOD_INIT_CONFIG_ID_U32,
++					       AVS_TKN_INIT_CONFIG_ID_U32,
+ 					       mod_init_config_parsers,
+ 					       ARRAY_SIZE(mod_init_config_parsers));
+ 
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index c9f9c9b0de9b64..5554ad4e7c7877 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -22,6 +22,8 @@ static int quirk_override = -1;
+ module_param_named(quirk, quirk_override, int, 0444);
+ MODULE_PARM_DESC(quirk, "Board-specific quirk override");
+ 
++#define DMIC_DEFAULT_CHANNELS 2
++
+ static void log_quirks(struct device *dev)
+ {
+ 	if (SOC_SDW_JACK_JDSRC(sof_sdw_quirk))
+@@ -608,17 +610,32 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 		.callback = sof_sdw_quirk_cb,
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+-			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "3838")
++			DMI_MATCH(DMI_PRODUCT_NAME, "83JX")
+ 		},
+-		.driver_data = (void *)(SOC_SDW_SIDECAR_AMPS),
++		.driver_data = (void *)(SOC_SDW_SIDECAR_AMPS | SOC_SDW_CODEC_MIC),
+ 	},
+ 	{
+ 		.callback = sof_sdw_quirk_cb,
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+-			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "3832")
++			DMI_MATCH(DMI_PRODUCT_NAME, "83LC")
+ 		},
+-		.driver_data = (void *)(SOC_SDW_SIDECAR_AMPS),
++		.driver_data = (void *)(SOC_SDW_SIDECAR_AMPS | SOC_SDW_CODEC_MIC),
++	},
++	{
++		.callback = sof_sdw_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "83MC")
++		},
++		.driver_data = (void *)(SOC_SDW_SIDECAR_AMPS | SOC_SDW_CODEC_MIC),
++	},	{
++		.callback = sof_sdw_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "83NM")
++		},
++		.driver_data = (void *)(SOC_SDW_SIDECAR_AMPS | SOC_SDW_CODEC_MIC),
+ 	},
+ 	{
+ 		.callback = sof_sdw_quirk_cb,
+@@ -1127,17 +1144,19 @@ static int sof_card_dai_links_create(struct snd_soc_card *card)
+ 		hdmi_num = SOF_PRE_TGL_HDMI_COUNT;
+ 
+ 	/* enable dmic01 & dmic16k */
+-	if (sof_sdw_quirk & SOC_SDW_PCH_DMIC || mach_params->dmic_num) {
+-		if (ctx->ignore_internal_dmic)
+-			dev_warn(dev, "Ignoring PCH DMIC\n");
+-		else
+-			dmic_num = 2;
++	if (ctx->ignore_internal_dmic) {
++		dev_warn(dev, "Ignoring internal DMIC\n");
++		mach_params->dmic_num = 0;
++	} else if (mach_params->dmic_num) {
++		dmic_num = 2;
++	} else if (sof_sdw_quirk & SOC_SDW_PCH_DMIC) {
++		dmic_num = 2;
++		/*
++		 * mach_params->dmic_num will be used to set the cfg-mics value of
++		 * card->components string. Set it to the default value.
++		 */
++		mach_params->dmic_num = DMIC_DEFAULT_CHANNELS;
+ 	}
+-	/*
+-	 * mach_params->dmic_num will be used to set the cfg-mics value of card->components
+-	 * string. Overwrite it to the actual number of PCH DMICs used in the device.
+-	 */
+-	mach_params->dmic_num = dmic_num;
+ 
+ 	if (sof_sdw_quirk & SOF_SSP_BT_OFFLOAD_PRESENT)
+ 		bt_num = 1;
+diff --git a/sound/soc/mediatek/mt8365/Makefile b/sound/soc/mediatek/mt8365/Makefile
+index 52ba45a8498a20..b197025e34bb80 100644
+--- a/sound/soc/mediatek/mt8365/Makefile
++++ b/sound/soc/mediatek/mt8365/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ 
+ # MTK Platform driver
+-snd-soc-mt8365-pcm-objs := \
++snd-soc-mt8365-pcm-y := \
+ 	mt8365-afe-clk.o \
+ 	mt8365-afe-pcm.o \
+ 	mt8365-dai-adda.o \
+diff --git a/sound/soc/renesas/rz-ssi.c b/sound/soc/renesas/rz-ssi.c
+index 6efd017aaa7fce..6b442b1014155f 100644
+--- a/sound/soc/renesas/rz-ssi.c
++++ b/sound/soc/renesas/rz-ssi.c
+@@ -258,8 +258,7 @@ static void rz_ssi_stream_quit(struct rz_ssi_priv *ssi,
+ static int rz_ssi_clk_setup(struct rz_ssi_priv *ssi, unsigned int rate,
+ 			    unsigned int channels)
+ {
+-	static s8 ckdv[16] = { 1,  2,  4,  8, 16, 32, 64, 128,
+-			       6, 12, 24, 48, 96, -1, -1, -1 };
++	static u8 ckdv[] = { 1,  2,  4,  8, 16, 32, 64, 128, 6, 12, 24, 48, 96 };
+ 	unsigned int channel_bits = 32;	/* System Word Length */
+ 	unsigned long bclk_rate = rate * channels * channel_bits;
+ 	unsigned int div;
+diff --git a/sound/soc/rockchip/rockchip_i2s_tdm.c b/sound/soc/rockchip/rockchip_i2s_tdm.c
+index d1f28699652fe3..acd75e48851fcf 100644
+--- a/sound/soc/rockchip/rockchip_i2s_tdm.c
++++ b/sound/soc/rockchip/rockchip_i2s_tdm.c
+@@ -22,7 +22,6 @@
+ 
+ #define DRV_NAME "rockchip-i2s-tdm"
+ 
+-#define DEFAULT_MCLK_FS				256
+ #define CH_GRP_MAX				4  /* The max channel 8 / 2 */
+ #define MULTIPLEX_CH_MAX			10
+ 
+@@ -70,6 +69,8 @@ struct rk_i2s_tdm_dev {
+ 	bool has_playback;
+ 	bool has_capture;
+ 	struct snd_soc_dai_driver *dai;
++	unsigned int mclk_rx_freq;
++	unsigned int mclk_tx_freq;
+ };
+ 
+ static int to_ch_num(unsigned int val)
+@@ -645,6 +646,27 @@ static int rockchip_i2s_trcm_mode(struct snd_pcm_substream *substream,
+ 	return 0;
+ }
+ 
++static int rockchip_i2s_tdm_set_sysclk(struct snd_soc_dai *cpu_dai, int stream,
++				       unsigned int freq, int dir)
++{
++	struct rk_i2s_tdm_dev *i2s_tdm = to_info(cpu_dai);
++
++	if (i2s_tdm->clk_trcm) {
++		i2s_tdm->mclk_tx_freq = freq;
++		i2s_tdm->mclk_rx_freq = freq;
++	} else {
++		if (stream == SNDRV_PCM_STREAM_PLAYBACK)
++			i2s_tdm->mclk_tx_freq = freq;
++		else
++			i2s_tdm->mclk_rx_freq = freq;
++	}
++
++	dev_dbg(i2s_tdm->dev, "The target mclk_%s freq is: %d\n",
++		stream ? "rx" : "tx", freq);
++
++	return 0;
++}
++
+ static int rockchip_i2s_tdm_hw_params(struct snd_pcm_substream *substream,
+ 				      struct snd_pcm_hw_params *params,
+ 				      struct snd_soc_dai *dai)
+@@ -659,15 +681,19 @@ static int rockchip_i2s_tdm_hw_params(struct snd_pcm_substream *substream,
+ 
+ 		if (i2s_tdm->clk_trcm == TRCM_TX) {
+ 			mclk = i2s_tdm->mclk_tx;
++			mclk_rate = i2s_tdm->mclk_tx_freq;
+ 		} else if (i2s_tdm->clk_trcm == TRCM_RX) {
+ 			mclk = i2s_tdm->mclk_rx;
++			mclk_rate = i2s_tdm->mclk_rx_freq;
+ 		} else if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ 			mclk = i2s_tdm->mclk_tx;
++			mclk_rate = i2s_tdm->mclk_tx_freq;
+ 		} else {
+ 			mclk = i2s_tdm->mclk_rx;
++			mclk_rate = i2s_tdm->mclk_rx_freq;
+ 		}
+ 
+-		err = clk_set_rate(mclk, DEFAULT_MCLK_FS * params_rate(params));
++		err = clk_set_rate(mclk, mclk_rate);
+ 		if (err)
+ 			return err;
+ 
+@@ -827,6 +853,7 @@ static const struct snd_soc_dai_ops rockchip_i2s_tdm_dai_ops = {
+ 	.hw_params = rockchip_i2s_tdm_hw_params,
+ 	.set_bclk_ratio	= rockchip_i2s_tdm_set_bclk_ratio,
+ 	.set_fmt = rockchip_i2s_tdm_set_fmt,
++	.set_sysclk = rockchip_i2s_tdm_set_sysclk,
+ 	.set_tdm_slot = rockchip_dai_tdm_slot,
+ 	.trigger = rockchip_i2s_tdm_trigger,
+ };
+diff --git a/sound/soc/sdca/Makefile b/sound/soc/sdca/Makefile
+index c296bd5a0a7cfa..5d1ddbbfbf62b5 100644
+--- a/sound/soc/sdca/Makefile
++++ b/sound/soc/sdca/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ 
+-snd-soc-sdca-objs	:= sdca_functions.o sdca_device.o
++snd-soc-sdca-y	:= sdca_functions.o sdca_device.o
+ 
+ obj-$(CONFIG_SND_SOC_SDCA)	+= snd-soc-sdca.o
+diff --git a/sound/soc/sunxi/sun4i-spdif.c b/sound/soc/sunxi/sun4i-spdif.c
+index 0aa4164232464e..7cf623cbe9ed4b 100644
+--- a/sound/soc/sunxi/sun4i-spdif.c
++++ b/sound/soc/sunxi/sun4i-spdif.c
+@@ -176,6 +176,7 @@ struct sun4i_spdif_quirks {
+ 	unsigned int reg_dac_txdata;
+ 	bool has_reset;
+ 	unsigned int val_fctl_ftx;
++	unsigned int mclk_multiplier;
+ };
+ 
+ struct sun4i_spdif_dev {
+@@ -313,6 +314,7 @@ static int sun4i_spdif_hw_params(struct snd_pcm_substream *substream,
+ 	default:
+ 		return -EINVAL;
+ 	}
++	mclk *= host->quirks->mclk_multiplier;
+ 
+ 	ret = clk_set_rate(host->spdif_clk, mclk);
+ 	if (ret < 0) {
+@@ -347,6 +349,7 @@ static int sun4i_spdif_hw_params(struct snd_pcm_substream *substream,
+ 	default:
+ 		return -EINVAL;
+ 	}
++	mclk_div *= host->quirks->mclk_multiplier;
+ 
+ 	reg_val = 0;
+ 	reg_val |= SUN4I_SPDIF_TXCFG_ASS;
+@@ -540,24 +543,28 @@ static struct snd_soc_dai_driver sun4i_spdif_dai = {
+ static const struct sun4i_spdif_quirks sun4i_a10_spdif_quirks = {
+ 	.reg_dac_txdata	= SUN4I_SPDIF_TXFIFO,
+ 	.val_fctl_ftx   = SUN4I_SPDIF_FCTL_FTX,
++	.mclk_multiplier = 1,
+ };
+ 
+ static const struct sun4i_spdif_quirks sun6i_a31_spdif_quirks = {
+ 	.reg_dac_txdata	= SUN4I_SPDIF_TXFIFO,
+ 	.val_fctl_ftx   = SUN4I_SPDIF_FCTL_FTX,
+ 	.has_reset	= true,
++	.mclk_multiplier = 1,
+ };
+ 
+ static const struct sun4i_spdif_quirks sun8i_h3_spdif_quirks = {
+ 	.reg_dac_txdata	= SUN8I_SPDIF_TXFIFO,
+ 	.val_fctl_ftx   = SUN4I_SPDIF_FCTL_FTX,
+ 	.has_reset	= true,
++	.mclk_multiplier = 4,
+ };
+ 
+ static const struct sun4i_spdif_quirks sun50i_h6_spdif_quirks = {
+ 	.reg_dac_txdata = SUN8I_SPDIF_TXFIFO,
+ 	.val_fctl_ftx   = SUN50I_H6_SPDIF_FCTL_FTX,
+ 	.has_reset      = true,
++	.mclk_multiplier = 1,
+ };
+ 
+ static const struct of_device_id sun4i_spdif_of_match[] = {
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 7968d6a2f592ac..a97efb7b131ea2 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2343,6 +2343,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ 		   QUIRK_FLAG_CTL_MSG_DELAY_1M),
+ 	DEVICE_FLG(0x2d95, 0x8021, /* VIVO USB-C-XE710 HEADSET */
+ 		   QUIRK_FLAG_CTL_MSG_DELAY_1M),
++	DEVICE_FLG(0x2fc6, 0xf0b7, /* iBasso DC07 Pro */
++		   QUIRK_FLAG_CTL_MSG_DELAY_1M),
+ 	DEVICE_FLG(0x30be, 0x0101, /* Schiit Hel */
+ 		   QUIRK_FLAG_IGNORE_CTL_ERROR),
+ 	DEVICE_FLG(0x413c, 0xa506, /* Dell AE515 sound bar */
+diff --git a/tools/bootconfig/main.c b/tools/bootconfig/main.c
+index 156b62a163c5a6..8a48cc2536f566 100644
+--- a/tools/bootconfig/main.c
++++ b/tools/bootconfig/main.c
+@@ -226,7 +226,7 @@ static int load_xbc_from_initrd(int fd, char **buf)
+ 	/* Wrong Checksum */
+ 	rcsum = xbc_calc_checksum(*buf, size);
+ 	if (csum != rcsum) {
+-		pr_err("checksum error: %d != %d\n", csum, rcsum);
++		pr_err("checksum error: %u != %u\n", csum, rcsum);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -395,7 +395,7 @@ static int apply_xbc(const char *path, const char *xbc_path)
+ 	xbc_get_info(&ret, NULL);
+ 	printf("\tNumber of nodes: %d\n", ret);
+ 	printf("\tSize: %u bytes\n", (unsigned int)size);
+-	printf("\tChecksum: %d\n", (unsigned int)csum);
++	printf("\tChecksum: %u\n", (unsigned int)csum);
+ 
+ 	/* TODO: Check the options by schema */
+ 	xbc_exit();
+diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
+index bca47d136f058f..80563154318601 100644
+--- a/tools/build/Makefile.feature
++++ b/tools/build/Makefile.feature
+@@ -89,13 +89,6 @@ FEATURE_TESTS_EXTRA :=                  \
+          libbfd-liberty                 \
+          libbfd-liberty-z               \
+          libopencsd                     \
+-         libunwind-x86                  \
+-         libunwind-x86_64               \
+-         libunwind-arm                  \
+-         libunwind-aarch64              \
+-         libunwind-debug-frame          \
+-         libunwind-debug-frame-arm      \
+-         libunwind-debug-frame-aarch64  \
+          cxx                            \
+          llvm                           \
+          clang                          \
+diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
+index 59ef3d7fe6a4e7..80ac297f819671 100644
+--- a/tools/build/feature/test-all.c
++++ b/tools/build/feature/test-all.c
+@@ -58,10 +58,6 @@
+ # include "test-libelf-getshdrstrndx.c"
+ #undef main
+ 
+-#define main main_test_libunwind
+-# include "test-libunwind.c"
+-#undef main
+-
+ #define main main_test_libslang
+ # include "test-libslang.c"
+ #undef main
+@@ -184,7 +180,6 @@ int main(int argc, char *argv[])
+ 	main_test_libelf_getphdrnum();
+ 	main_test_libelf_gelf_getnote();
+ 	main_test_libelf_getshdrstrndx();
+-	main_test_libunwind();
+ 	main_test_libslang();
+ 	main_test_libbfd();
+ 	main_test_libbfd_buildid();
+diff --git a/tools/include/uapi/linux/if_xdp.h b/tools/include/uapi/linux/if_xdp.h
+index 2f082b01ff2284..42ec5ddaab8dc8 100644
+--- a/tools/include/uapi/linux/if_xdp.h
++++ b/tools/include/uapi/linux/if_xdp.h
+@@ -117,12 +117,12 @@ struct xdp_options {
+ 	((1ULL << XSK_UNALIGNED_BUF_OFFSET_SHIFT) - 1)
+ 
+ /* Request transmit timestamp. Upon completion, put it into tx_timestamp
+- * field of union xsk_tx_metadata.
++ * field of struct xsk_tx_metadata.
+  */
+ #define XDP_TXMD_FLAGS_TIMESTAMP		(1 << 0)
+ 
+ /* Request transmit checksum offload. Checksum start position and offset
+- * are communicated via csum_start and csum_offset fields of union
++ * are communicated via csum_start and csum_offset fields of struct
+  * xsk_tx_metadata.
+  */
+ #define XDP_TXMD_FLAGS_CHECKSUM			(1 << 1)
+diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
+index 12468ae0d573d7..7e810fa468eaa3 100644
+--- a/tools/lib/bpf/btf.c
++++ b/tools/lib/bpf/btf.c
+@@ -1186,6 +1186,7 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
+ 
+ 	elf = elf_begin(fd, ELF_C_READ, NULL);
+ 	if (!elf) {
++		err = -LIBBPF_ERRNO__FORMAT;
+ 		pr_warn("failed to open %s as ELF file\n", path);
+ 		goto done;
+ 	}
+diff --git a/tools/lib/bpf/btf_relocate.c b/tools/lib/bpf/btf_relocate.c
+index b72f83e15156a6..53d1f3541bce69 100644
+--- a/tools/lib/bpf/btf_relocate.c
++++ b/tools/lib/bpf/btf_relocate.c
+@@ -212,7 +212,7 @@ static int btf_relocate_map_distilled_base(struct btf_relocate *r)
+ 	 * need to match both name and size, otherwise embedding the base
+ 	 * struct/union in the split type is invalid.
+ 	 */
+-	for (id = r->nr_dist_base_types; id < r->nr_split_types; id++) {
++	for (id = r->nr_dist_base_types; id < r->nr_dist_base_types + r->nr_split_types; id++) {
+ 		err = btf_mark_embedded_composite_type_ids(r, id);
+ 		if (err)
+ 			goto done;
+diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
+index cf71d149fe26a2..e56ba6e67451d6 100644
+--- a/tools/lib/bpf/linker.c
++++ b/tools/lib/bpf/linker.c
+@@ -566,17 +566,15 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
+ 	}
+ 	obj->elf = elf_begin(obj->fd, ELF_C_READ_MMAP, NULL);
+ 	if (!obj->elf) {
+-		err = -errno;
+ 		pr_warn_elf("failed to parse ELF file '%s'", filename);
+-		return err;
++		return -EINVAL;
+ 	}
+ 
+ 	/* Sanity check ELF file high-level properties */
+ 	ehdr = elf64_getehdr(obj->elf);
+ 	if (!ehdr) {
+-		err = -errno;
+ 		pr_warn_elf("failed to get ELF header for %s", filename);
+-		return err;
++		return -EINVAL;
+ 	}
+ 
+ 	/* Linker output endianness set by first input object */
+@@ -606,9 +604,8 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
+ 	}
+ 
+ 	if (elf_getshdrstrndx(obj->elf, &obj->shstrs_sec_idx)) {
+-		err = -errno;
+ 		pr_warn_elf("failed to get SHSTRTAB section index for %s", filename);
+-		return err;
++		return -EINVAL;
+ 	}
+ 
+ 	scn = NULL;
+@@ -618,26 +615,23 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
+ 
+ 		shdr = elf64_getshdr(scn);
+ 		if (!shdr) {
+-			err = -errno;
+ 			pr_warn_elf("failed to get section #%zu header for %s",
+ 				    sec_idx, filename);
+-			return err;
++			return -EINVAL;
+ 		}
+ 
+ 		sec_name = elf_strptr(obj->elf, obj->shstrs_sec_idx, shdr->sh_name);
+ 		if (!sec_name) {
+-			err = -errno;
+ 			pr_warn_elf("failed to get section #%zu name for %s",
+ 				    sec_idx, filename);
+-			return err;
++			return -EINVAL;
+ 		}
+ 
+ 		data = elf_getdata(scn, 0);
+ 		if (!data) {
+-			err = -errno;
+ 			pr_warn_elf("failed to get section #%zu (%s) data from %s",
+ 				    sec_idx, sec_name, filename);
+-			return err;
++			return -EINVAL;
+ 		}
+ 
+ 		sec = add_src_sec(obj, sec_name);
+@@ -2680,14 +2674,14 @@ int bpf_linker__finalize(struct bpf_linker *linker)
+ 
+ 	/* Finalize ELF layout */
+ 	if (elf_update(linker->elf, ELF_C_NULL) < 0) {
+-		err = -errno;
++		err = -EINVAL;
+ 		pr_warn_elf("failed to finalize ELF layout");
+ 		return libbpf_err(err);
+ 	}
+ 
+ 	/* Write out final ELF contents */
+ 	if (elf_update(linker->elf, ELF_C_WRITE) < 0) {
+-		err = -errno;
++		err = -EINVAL;
+ 		pr_warn_elf("failed to write ELF contents");
+ 		return libbpf_err(err);
+ 	}
+diff --git a/tools/lib/bpf/usdt.c b/tools/lib/bpf/usdt.c
+index 5f085736c6c45d..4e4a52742b01c8 100644
+--- a/tools/lib/bpf/usdt.c
++++ b/tools/lib/bpf/usdt.c
+@@ -661,7 +661,7 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
+ 		 *   [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
+ 		 */
+ 		usdt_abs_ip = note.loc_addr;
+-		if (base_addr)
++		if (base_addr && note.base_addr)
+ 			usdt_abs_ip += base_addr - note.base_addr;
+ 
+ 		/* When attaching uprobes (which is what USDTs basically are)
+diff --git a/tools/net/ynl/lib/ynl.c b/tools/net/ynl/lib/ynl.c
+index e16cef160bc2cb..ce32cb35007d6f 100644
+--- a/tools/net/ynl/lib/ynl.c
++++ b/tools/net/ynl/lib/ynl.c
+@@ -95,7 +95,7 @@ ynl_err_walk(struct ynl_sock *ys, void *start, void *end, unsigned int off,
+ 
+ 	ynl_attr_for_each_payload(start, data_len, attr) {
+ 		astart_off = (char *)attr - (char *)start;
+-		aend_off = astart_off + ynl_attr_data_len(attr);
++		aend_off = (char *)ynl_attr_data_end(attr) - (char *)start;
+ 		if (aend_off <= off)
+ 			continue;
+ 
+diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
+index dc42de1785cee7..908165fcec7de3 100644
+--- a/tools/perf/MANIFEST
++++ b/tools/perf/MANIFEST
+@@ -1,5 +1,6 @@
+ arch/arm64/tools/gen-sysreg.awk
+ arch/arm64/tools/sysreg
++arch/*/include/uapi/asm/bpf_perf_event.h
+ tools/perf
+ tools/arch
+ tools/scripts
+diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
+index 2916d59c88cd08..0e4f6a860ae253 100644
+--- a/tools/perf/Makefile.config
++++ b/tools/perf/Makefile.config
+@@ -43,7 +43,9 @@ endif
+ # Additional ARCH settings for ppc
+ ifeq ($(SRCARCH),powerpc)
+   CFLAGS += -I$(OUTPUT)arch/powerpc/include/generated
+-  LIBUNWIND_LIBS := -lunwind -lunwind-ppc64
++  ifndef NO_LIBUNWIND
++    LIBUNWIND_LIBS := -lunwind -lunwind-ppc64
++  endif
+ endif
+ 
+ # Additional ARCH settings for x86
+@@ -53,25 +55,35 @@ ifeq ($(SRCARCH),x86)
+   ifeq (${IS_64_BIT}, 1)
+     CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT
+     ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S
+-    LIBUNWIND_LIBS = -lunwind-x86_64 -lunwind -llzma
++    ifndef NO_LIBUNWIND
++      LIBUNWIND_LIBS = -lunwind-x86_64 -lunwind -llzma
++    endif
+     $(call detected,CONFIG_X86_64)
+   else
+-    LIBUNWIND_LIBS = -lunwind-x86 -llzma -lunwind
++    ifndef NO_LIBUNWIND
++      LIBUNWIND_LIBS = -lunwind-x86 -llzma -lunwind
++    endif
+   endif
+ endif
+ 
+ ifeq ($(SRCARCH),arm)
+-  LIBUNWIND_LIBS = -lunwind -lunwind-arm
++  ifndef NO_LIBUNWIND
++    LIBUNWIND_LIBS = -lunwind -lunwind-arm
++  endif
+ endif
+ 
+ ifeq ($(SRCARCH),arm64)
+   CFLAGS += -I$(OUTPUT)arch/arm64/include/generated
+-  LIBUNWIND_LIBS = -lunwind -lunwind-aarch64
++  ifndef NO_LIBUNWIND
++    LIBUNWIND_LIBS = -lunwind -lunwind-aarch64
++  endif
+ endif
+ 
+ ifeq ($(SRCARCH),loongarch)
+   CFLAGS += -I$(OUTPUT)arch/loongarch/include/generated
+-  LIBUNWIND_LIBS = -lunwind -lunwind-loongarch64
++  ifndef NO_LIBUNWIND
++    LIBUNWIND_LIBS = -lunwind -lunwind-loongarch64
++  endif
+ endif
+ 
+ ifeq ($(ARCH),s390)
+@@ -80,7 +92,9 @@ endif
+ 
+ ifeq ($(ARCH),mips)
+   CFLAGS += -I$(OUTPUT)arch/mips/include/generated
+-  LIBUNWIND_LIBS = -lunwind -lunwind-mips
++  ifndef NO_LIBUNWIND
++    LIBUNWIND_LIBS = -lunwind -lunwind-mips
++  endif
+ endif
+ 
+ ifeq ($(ARCH),riscv)
+@@ -121,16 +135,18 @@ ifdef LIBUNWIND_DIR
+   $(foreach libunwind_arch,$(LIBUNWIND_ARCHS),$(call libunwind_arch_set_flags,$(libunwind_arch)))
+ endif
+ 
+-# Set per-feature check compilation flags
+-FEATURE_CHECK_CFLAGS-libunwind = $(LIBUNWIND_CFLAGS)
+-FEATURE_CHECK_LDFLAGS-libunwind = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
+-FEATURE_CHECK_CFLAGS-libunwind-debug-frame = $(LIBUNWIND_CFLAGS)
+-FEATURE_CHECK_LDFLAGS-libunwind-debug-frame = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
+-
+-FEATURE_CHECK_LDFLAGS-libunwind-arm += -lunwind -lunwind-arm
+-FEATURE_CHECK_LDFLAGS-libunwind-aarch64 += -lunwind -lunwind-aarch64
+-FEATURE_CHECK_LDFLAGS-libunwind-x86 += -lunwind -llzma -lunwind-x86
+-FEATURE_CHECK_LDFLAGS-libunwind-x86_64 += -lunwind -llzma -lunwind-x86_64
++ifndef NO_LIBUNWIND
++  # Set per-feature check compilation flags
++  FEATURE_CHECK_CFLAGS-libunwind = $(LIBUNWIND_CFLAGS)
++  FEATURE_CHECK_LDFLAGS-libunwind = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
++  FEATURE_CHECK_CFLAGS-libunwind-debug-frame = $(LIBUNWIND_CFLAGS)
++  FEATURE_CHECK_LDFLAGS-libunwind-debug-frame = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
++  
++  FEATURE_CHECK_LDFLAGS-libunwind-arm += -lunwind -lunwind-arm
++  FEATURE_CHECK_LDFLAGS-libunwind-aarch64 += -lunwind -lunwind-aarch64
++  FEATURE_CHECK_LDFLAGS-libunwind-x86 += -lunwind -llzma -lunwind-x86
++  FEATURE_CHECK_LDFLAGS-libunwind-x86_64 += -lunwind -llzma -lunwind-x86_64
++endif
+ 
+ FEATURE_CHECK_LDFLAGS-libcrypto = -lcrypto
+ 
+@@ -734,26 +750,25 @@ ifeq ($(dwarf-post-unwind),1)
+   $(call detected,CONFIG_DWARF_UNWIND)
+ endif
+ 
+-ifndef NO_LOCAL_LIBUNWIND
+-  ifeq ($(SRCARCH),$(filter $(SRCARCH),arm arm64))
+-    $(call feature_check,libunwind-debug-frame)
+-    ifneq ($(feature-libunwind-debug-frame), 1)
+-      $(warning No debug_frame support found in libunwind)
++ifndef NO_LIBUNWIND
++  ifndef NO_LOCAL_LIBUNWIND
++    ifeq ($(SRCARCH),$(filter $(SRCARCH),arm arm64))
++      $(call feature_check,libunwind-debug-frame)
++      ifneq ($(feature-libunwind-debug-frame), 1)
++        $(warning No debug_frame support found in libunwind)
++        CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME
++      endif
++    else
++      # non-ARM has no dwarf_find_debug_frame() function:
+       CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME
+     endif
+-  else
+-    # non-ARM has no dwarf_find_debug_frame() function:
+-    CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME
++    EXTLIBS += $(LIBUNWIND_LIBS)
++    LDFLAGS += $(LIBUNWIND_LIBS)
++  endif
++  ifeq ($(findstring -static,${LDFLAGS}),-static)
++    # gcc -static links libgcc_eh which contans piece of libunwind
++    LIBUNWIND_LDFLAGS += -Wl,--allow-multiple-definition
+   endif
+-  EXTLIBS += $(LIBUNWIND_LIBS)
+-  LDFLAGS += $(LIBUNWIND_LIBS)
+-endif
+-ifeq ($(findstring -static,${LDFLAGS}),-static)
+-  # gcc -static links libgcc_eh which contans piece of libunwind
+-  LIBUNWIND_LDFLAGS += -Wl,--allow-multiple-definition
+-endif
+-
+-ifndef NO_LIBUNWIND
+   CFLAGS  += -DHAVE_LIBUNWIND_SUPPORT
+   CFLAGS  += $(LIBUNWIND_CFLAGS)
+   LDFLAGS += $(LIBUNWIND_LDFLAGS)
+diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
+index d6989195a061ff..11e49cafa3af9d 100644
+--- a/tools/perf/builtin-inject.c
++++ b/tools/perf/builtin-inject.c
+@@ -2367,10 +2367,10 @@ int cmd_inject(int argc, const char **argv)
+ 	};
+ 	int ret;
+ 	const char *known_build_ids = NULL;
+-	bool build_ids;
+-	bool build_id_all;
+-	bool mmap2_build_ids;
+-	bool mmap2_build_id_all;
++	bool build_ids = false;
++	bool build_id_all = false;
++	bool mmap2_build_ids = false;
++	bool mmap2_build_id_all = false;
+ 
+ 	struct option options[] = {
+ 		OPT_BOOLEAN('b', "build-ids", &build_ids,
+diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
+index 062e2b56a2ab57..33a456980664a0 100644
+--- a/tools/perf/builtin-lock.c
++++ b/tools/perf/builtin-lock.c
+@@ -1591,8 +1591,8 @@ static const struct {
+ 	{ LCB_F_PERCPU | LCB_F_WRITE,	"pcpu-sem:W",	"percpu-rwsem" },
+ 	{ LCB_F_MUTEX,			"mutex",	"mutex" },
+ 	{ LCB_F_MUTEX | LCB_F_SPIN,	"mutex",	"mutex" },
+-	/* alias for get_type_flag() */
+-	{ LCB_F_MUTEX | LCB_F_SPIN,	"mutex-spin",	"mutex" },
++	/* alias for optimistic spinning only */
++	{ LCB_F_MUTEX | LCB_F_SPIN,	"mutex:spin",	"mutex-spin" },
+ };
+ 
+ static const char *get_type_str(unsigned int flags)
+@@ -1617,19 +1617,6 @@ static const char *get_type_name(unsigned int flags)
+ 	return "unknown";
+ }
+ 
+-static unsigned int get_type_flag(const char *str)
+-{
+-	for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
+-		if (!strcmp(lock_type_table[i].name, str))
+-			return lock_type_table[i].flags;
+-	}
+-	for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
+-		if (!strcmp(lock_type_table[i].str, str))
+-			return lock_type_table[i].flags;
+-	}
+-	return UINT_MAX;
+-}
+-
+ static void lock_filter_finish(void)
+ {
+ 	zfree(&filters.types);
+@@ -2350,29 +2337,58 @@ static int parse_lock_type(const struct option *opt __maybe_unused, const char *
+ 			   int unset __maybe_unused)
+ {
+ 	char *s, *tmp, *tok;
+-	int ret = 0;
+ 
+ 	s = strdup(str);
+ 	if (s == NULL)
+ 		return -1;
+ 
+ 	for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
+-		unsigned int flags = get_type_flag(tok);
++		bool found = false;
+ 
+-		if (flags == -1U) {
+-			pr_err("Unknown lock flags: %s\n", tok);
+-			ret = -1;
+-			break;
++		/* `tok` is `str` in `lock_type_table` if it contains ':'. */
++		if (strchr(tok, ':')) {
++			for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
++				if (!strcmp(lock_type_table[i].str, tok) &&
++				    add_lock_type(lock_type_table[i].flags)) {
++					found = true;
++					break;
++				}
++			}
++
++			if (!found) {
++				pr_err("Unknown lock flags name: %s\n", tok);
++				free(s);
++				return -1;
++			}
++
++			continue;
+ 		}
+ 
+-		if (!add_lock_type(flags)) {
+-			ret = -1;
+-			break;
++		/*
++		 * Otherwise `tok` is `name` in `lock_type_table`.
++		 * Single lock name could contain multiple flags.
++		 */
++		for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
++			if (!strcmp(lock_type_table[i].name, tok)) {
++				if (add_lock_type(lock_type_table[i].flags)) {
++					found = true;
++				} else {
++					free(s);
++					return -1;
++				}
++			}
+ 		}
++
++		if (!found) {
++			pr_err("Unknown lock name: %s\n", tok);
++			free(s);
++			return -1;
++		}
++
+ 	}
+ 
+ 	free(s);
+-	return ret;
++	return 0;
+ }
+ 
+ static bool add_lock_addr(unsigned long addr)
+diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
+index 048c91960ba91c..a5672749f78191 100644
+--- a/tools/perf/builtin-report.c
++++ b/tools/perf/builtin-report.c
+@@ -1422,7 +1422,7 @@ int cmd_report(int argc, const char **argv)
+ 	OPT_STRING(0, "addr2line", &addr2line_path, "path",
+ 		   "addr2line binary to use for line numbers"),
+ 	OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
+-		    "Disable symbol demangling"),
++		    "Symbol demangling. Enabled by default, use --no-demangle to disable."),
+ 	OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
+ 		    "Enable kernel symbol demangling"),
+ 	OPT_BOOLEAN(0, "mem-mode", &report.mem_mode, "mem access profile"),
+diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
+index 724a7938632126..ca3e8eca6610e8 100644
+--- a/tools/perf/builtin-top.c
++++ b/tools/perf/builtin-top.c
+@@ -809,7 +809,7 @@ static void perf_event__process_sample(const struct perf_tool *tool,
+ 		 * invalid --vmlinux ;-)
+ 		 */
+ 		if (!machine->kptr_restrict_warned && !top->vmlinux_warned &&
+-		    __map__is_kernel(al.map) && map__has_symbols(al.map)) {
++		    __map__is_kernel(al.map) && !map__has_symbols(al.map)) {
+ 			if (symbol_conf.vmlinux_name) {
+ 				char serr[256];
+ 
+diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
+index 6a1a128fe64501..2756c4f5b5dad7 100644
+--- a/tools/perf/builtin-trace.c
++++ b/tools/perf/builtin-trace.c
+@@ -2122,8 +2122,12 @@ static int trace__read_syscall_info(struct trace *trace, int id)
+ 		return PTR_ERR(sc->tp_format);
+ 	}
+ 
++	/*
++	 * The tracepoint format contains __syscall_nr field, so it's one more
++	 * than the actual number of syscall arguments.
++	 */
+ 	if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ?
+-					RAW_SYSCALL_ARGS_NUM : sc->tp_format->format.nr_fields))
++					RAW_SYSCALL_ARGS_NUM : sc->tp_format->format.nr_fields - 1))
+ 		return -ENOMEM;
+ 
+ 	sc->args = sc->tp_format->format.fields;
+diff --git a/tools/perf/tests/shell/lib/perf_json_output_lint.py b/tools/perf/tests/shell/lib/perf_json_output_lint.py
+index 8ddb8558613195..b066d721f89735 100644
+--- a/tools/perf/tests/shell/lib/perf_json_output_lint.py
++++ b/tools/perf/tests/shell/lib/perf_json_output_lint.py
+@@ -69,16 +69,16 @@ def check_json_output(expected_items):
+   for item in json.loads(input):
+     if expected_items != -1:
+       count = len(item)
+-      if count != expected_items and count >= 1 and count <= 7 and 'metric-value' in item:
++      if count not in expected_items and count >= 1 and count <= 7 and 'metric-value' in item:
+         # Events that generate >1 metric may have isolated metric
+         # values and possibly other prefixes like interval, core,
+         # aggregate-number, or event-runtime/pcnt-running from multiplexing.
+         pass
+-      elif count != expected_items and count >= 1 and count <= 5 and 'metricgroup' in item:
++      elif count not in expected_items and count >= 1 and count <= 5 and 'metricgroup' in item:
+         pass
+-      elif count == expected_items + 1 and 'metric-threshold' in item:
++      elif count - 1 in expected_items and 'metric-threshold' in item:
+           pass
+-      elif count != expected_items:
++      elif count not in expected_items:
+         raise RuntimeError(f'wrong number of fields. counted {count} expected {expected_items}'
+                            f' in \'{item}\'')
+     for key, value in item.items():
+@@ -90,11 +90,11 @@ def check_json_output(expected_items):
+ 
+ try:
+   if args.no_args or args.system_wide or args.event:
+-    expected_items = 7
++    expected_items = [5, 7]
+   elif args.interval or args.per_thread or args.system_wide_no_aggr:
+-    expected_items = 8
++    expected_items = [6, 8]
+   elif args.per_core or args.per_socket or args.per_node or args.per_die or args.per_cluster or args.per_cache:
+-    expected_items = 9
++    expected_items = [7, 9]
+   else:
+     # If no option is specified, don't check the number of items.
+     expected_items = -1
+diff --git a/tools/perf/tests/shell/stat.sh b/tools/perf/tests/shell/stat.sh
+index 7a8adf81e4b393..68323d636fb772 100755
+--- a/tools/perf/tests/shell/stat.sh
++++ b/tools/perf/tests/shell/stat.sh
+@@ -187,7 +187,11 @@ test_hybrid() {
+   # Run default Perf stat
+   cycles_events=$(perf stat -- true 2>&1 | grep -E "/cycles/[uH]*|  cycles[:uH]*  " -c)
+ 
+-  if [ "$pmus" -ne "$cycles_events" ]
++  # The expectation is that default output will have a cycles events on each
++  # hybrid PMU. In situations with no cycles PMU events, like virtualized, this
++  # can fall back to task-clock and so the end count may be 0. Fail if neither
++  # condition holds.
++  if [ "$pmus" -ne "$cycles_events" ] && [ "0" -ne "$cycles_events" ]
+   then
+     echo "hybrid test [Found $pmus PMUs but $cycles_events cycles events. Failed]"
+     err=1
+diff --git a/tools/perf/tests/shell/trace_btf_enum.sh b/tools/perf/tests/shell/trace_btf_enum.sh
+index 5a3b8a5a9b5cf2..8d1e6bbeac9068 100755
+--- a/tools/perf/tests/shell/trace_btf_enum.sh
++++ b/tools/perf/tests/shell/trace_btf_enum.sh
+@@ -26,8 +26,12 @@ check_vmlinux() {
+ trace_landlock() {
+   echo "Tracing syscall ${syscall}"
+ 
+-  # test flight just to see if landlock_add_rule and libbpf are available
+-  $TESTPROG
++  # test flight just to see if landlock_add_rule is available
++  if ! perf trace $TESTPROG 2>&1 | grep -q landlock
++  then
++    echo "No landlock system call found, skipping to non-syscall tracing."
++    return
++  fi
+ 
+   if perf trace -e $syscall $TESTPROG 2>&1 | \
+      grep -q -E ".*landlock_add_rule\(ruleset_fd: 11, rule_type: (LANDLOCK_RULE_PATH_BENEATH|LANDLOCK_RULE_NET_PORT), rule_attr: 0x[a-f0-9]+, flags: 45\) = -1.*"
+diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
+index 32e15c9f53f3c0..31dce9b87bffd1 100644
+--- a/tools/perf/util/annotate.c
++++ b/tools/perf/util/annotate.c
+@@ -2102,6 +2102,57 @@ int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel,
+ 	return 0;
+ }
+ 
++const char * const perf_disassembler__strs[] = {
++	[PERF_DISASM_UNKNOWN]  = "unknown",
++	[PERF_DISASM_LLVM]     = "llvm",
++	[PERF_DISASM_CAPSTONE] = "capstone",
++	[PERF_DISASM_OBJDUMP]  = "objdump",
++};
++
++
++static void annotation_options__add_disassembler(struct annotation_options *options,
++						 enum perf_disassembler dis)
++{
++	for (u8 i = 0; i < ARRAY_SIZE(options->disassemblers); i++) {
++		if (options->disassemblers[i] == dis) {
++			/* Disassembler is already present then don't add again. */
++			return;
++		}
++		if (options->disassemblers[i] == PERF_DISASM_UNKNOWN) {
++			/* Found a free slot. */
++			options->disassemblers[i] = dis;
++			return;
++		}
++	}
++	pr_err("Failed to add disassembler %d\n", dis);
++}
++
++static int annotation_options__add_disassemblers_str(struct annotation_options *options,
++						const char *str)
++{
++	while (str && *str != '\0') {
++		const char *comma = strchr(str, ',');
++		int len = comma ? comma - str : (int)strlen(str);
++		bool match = false;
++
++		for (u8 i = 0; i < ARRAY_SIZE(perf_disassembler__strs); i++) {
++			const char *dis_str = perf_disassembler__strs[i];
++
++			if (len == (int)strlen(dis_str) && !strncmp(str, dis_str, len)) {
++				annotation_options__add_disassembler(options, i);
++				match = true;
++				break;
++			}
++		}
++		if (!match) {
++			pr_err("Invalid disassembler '%.*s'\n", len, str);
++			return -1;
++		}
++		str = comma ? comma + 1 : NULL;
++	}
++	return 0;
++}
++
+ static int annotation__config(const char *var, const char *value, void *data)
+ {
+ 	struct annotation_options *opt = data;
+@@ -2117,11 +2168,10 @@ static int annotation__config(const char *var, const char *value, void *data)
+ 		else if (opt->offset_level < ANNOTATION__MIN_OFFSET_LEVEL)
+ 			opt->offset_level = ANNOTATION__MIN_OFFSET_LEVEL;
+ 	} else if (!strcmp(var, "annotate.disassemblers")) {
+-		opt->disassemblers_str = strdup(value);
+-		if (!opt->disassemblers_str) {
+-			pr_err("Not enough memory for annotate.disassemblers\n");
+-			return -1;
+-		}
++		int err = annotation_options__add_disassemblers_str(opt, value);
++
++		if (err)
++			return err;
+ 	} else if (!strcmp(var, "annotate.hide_src_code")) {
+ 		opt->hide_src_code = perf_config_bool("hide_src_code", value);
+ 	} else if (!strcmp(var, "annotate.jump_arrows")) {
+@@ -2187,9 +2237,25 @@ void annotation_options__exit(void)
+ 	zfree(&annotate_opts.objdump_path);
+ }
+ 
++static void annotation_options__default_init_disassemblers(struct annotation_options *options)
++{
++	if (options->disassemblers[0] != PERF_DISASM_UNKNOWN) {
++		/* Already initialized. */
++		return;
++	}
++#ifdef HAVE_LIBLLVM_SUPPORT
++	annotation_options__add_disassembler(options, PERF_DISASM_LLVM);
++#endif
++#ifdef HAVE_LIBCAPSTONE_SUPPORT
++	annotation_options__add_disassembler(options, PERF_DISASM_CAPSTONE);
++#endif
++	annotation_options__add_disassembler(options, PERF_DISASM_OBJDUMP);
++}
++
+ void annotation_config__init(void)
+ {
+ 	perf_config(annotation__config, &annotate_opts);
++	annotation_options__default_init_disassemblers(&annotate_opts);
+ }
+ 
+ static unsigned int parse_percent_type(char *str1, char *str2)
+diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
+index 194a05cbc506e4..858912157e0191 100644
+--- a/tools/perf/util/annotate.h
++++ b/tools/perf/util/annotate.h
+@@ -34,8 +34,13 @@ struct annotated_data_type;
+ #define ANNOTATION__BR_CNTR_WIDTH 30
+ #define ANNOTATION_DUMMY_LEN	256
+ 
+-// llvm, capstone, objdump
+-#define MAX_DISASSEMBLERS 3
++enum perf_disassembler {
++	PERF_DISASM_UNKNOWN = 0,
++	PERF_DISASM_LLVM,
++	PERF_DISASM_CAPSTONE,
++	PERF_DISASM_OBJDUMP,
++};
++#define MAX_DISASSEMBLERS (PERF_DISASM_OBJDUMP + 1)
+ 
+ struct annotation_options {
+ 	bool hide_src_code,
+@@ -52,14 +57,12 @@ struct annotation_options {
+ 	     annotate_src,
+ 	     full_addr;
+ 	u8   offset_level;
+-	u8   nr_disassemblers;
++	u8   disassemblers[MAX_DISASSEMBLERS];
+ 	int  min_pcnt;
+ 	int  max_lines;
+ 	int  context;
+ 	char *objdump_path;
+ 	char *disassembler_style;
+-	const char *disassemblers_str;
+-	const char *disassemblers[MAX_DISASSEMBLERS];
+ 	const char *prefix;
+ 	const char *prefix_strip;
+ 	unsigned int percent_type;
+@@ -134,6 +137,8 @@ struct disasm_line {
+ 	struct annotation_line	 al;
+ };
+ 
++extern const char * const perf_disassembler__strs[];
++
+ void annotation_line__add(struct annotation_line *al, struct list_head *head);
+ 
+ static inline double annotation_data__percent(struct annotation_data *data,
+diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
+index 13608237c50e05..c81444059ad077 100644
+--- a/tools/perf/util/bpf-event.c
++++ b/tools/perf/util/bpf-event.c
+@@ -289,7 +289,10 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
+ 		}
+ 
+ 		info_node->info_linear = info_linear;
+-		perf_env__insert_bpf_prog_info(env, info_node);
++		if (!perf_env__insert_bpf_prog_info(env, info_node)) {
++			free(info_linear);
++			free(info_node);
++		}
+ 		info_linear = NULL;
+ 
+ 		/*
+@@ -480,7 +483,10 @@ static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
+ 	info_node = malloc(sizeof(struct bpf_prog_info_node));
+ 	if (info_node) {
+ 		info_node->info_linear = info_linear;
+-		perf_env__insert_bpf_prog_info(env, info_node);
++		if (!perf_env__insert_bpf_prog_info(env, info_node)) {
++			free(info_linear);
++			free(info_node);
++		}
+ 	} else
+ 		free(info_linear);
+ 
+diff --git a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
+index 4a62ed593e84ed..e4352881e3faa6 100644
+--- a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
++++ b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
+@@ -431,9 +431,9 @@ static bool pid_filter__has(struct pids_filtered *pids, pid_t pid)
+ static int augment_sys_enter(void *ctx, struct syscall_enter_args *args)
+ {
+ 	bool augmented, do_output = false;
+-	int zero = 0, size, aug_size, index,
+-	    value_size = sizeof(struct augmented_arg) - offsetof(struct augmented_arg, value);
++	int zero = 0, index, value_size = sizeof(struct augmented_arg) - offsetof(struct augmented_arg, value);
+ 	u64 output = 0; /* has to be u64, otherwise it won't pass the verifier */
++	s64 aug_size, size;
+ 	unsigned int nr, *beauty_map;
+ 	struct beauty_payload_enter *payload;
+ 	void *arg, *payload_offset;
+@@ -484,14 +484,11 @@ static int augment_sys_enter(void *ctx, struct syscall_enter_args *args)
+ 		} else if (size > 0 && size <= value_size) { /* struct */
+ 			if (!bpf_probe_read_user(((struct augmented_arg *)payload_offset)->value, size, arg))
+ 				augmented = true;
+-		} else if (size < 0 && size >= -6) { /* buffer */
++		} else if ((int)size < 0 && size >= -6) { /* buffer */
+ 			index = -(size + 1);
+ 			barrier_var(index); // Prevent clang (noticed with v18) from removing the &= 7 trick.
+ 			index &= 7;	    // Satisfy the bounds checking with the verifier in some kernels.
+-			aug_size = args->args[index];
+-
+-			if (aug_size > TRACE_AUG_MAX_BUF)
+-				aug_size = TRACE_AUG_MAX_BUF;
++			aug_size = args->args[index] > TRACE_AUG_MAX_BUF ? TRACE_AUG_MAX_BUF : args->args[index];
+ 
+ 			if (aug_size > 0) {
+ 				if (!bpf_probe_read_user(((struct augmented_arg *)payload_offset)->value, aug_size, arg))
+diff --git a/tools/perf/util/disasm.c b/tools/perf/util/disasm.c
+index 41a2b08670dc5b..28ceb76e465ba9 100644
+--- a/tools/perf/util/disasm.c
++++ b/tools/perf/util/disasm.c
+@@ -2213,56 +2213,6 @@ static int symbol__disassemble_objdump(const char *filename, struct symbol *sym,
+ 	return err;
+ }
+ 
+-static int annotation_options__init_disassemblers(struct annotation_options *options)
+-{
+-	char *disassembler;
+-
+-	if (options->disassemblers_str == NULL) {
+-		const char *default_disassemblers_str =
+-#ifdef HAVE_LIBLLVM_SUPPORT
+-				"llvm,"
+-#endif
+-#ifdef HAVE_LIBCAPSTONE_SUPPORT
+-				"capstone,"
+-#endif
+-				"objdump";
+-
+-		options->disassemblers_str = strdup(default_disassemblers_str);
+-		if (!options->disassemblers_str)
+-			goto out_enomem;
+-	}
+-
+-	disassembler = strdup(options->disassemblers_str);
+-	if (disassembler == NULL)
+-		goto out_enomem;
+-
+-	while (1) {
+-		char *comma = strchr(disassembler, ',');
+-
+-		if (comma != NULL)
+-			*comma = '\0';
+-
+-		options->disassemblers[options->nr_disassemblers++] = strim(disassembler);
+-
+-		if (comma == NULL)
+-			break;
+-
+-		disassembler = comma + 1;
+-
+-		if (options->nr_disassemblers >= MAX_DISASSEMBLERS) {
+-			pr_debug("annotate.disassemblers can have at most %d entries, ignoring \"%s\"\n",
+-				 MAX_DISASSEMBLERS, disassembler);
+-			break;
+-		}
+-	}
+-
+-	return 0;
+-
+-out_enomem:
+-	pr_err("Not enough memory for annotate.disassemblers\n");
+-	return -1;
+-}
+-
+ int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
+ {
+ 	struct annotation_options *options = args->options;
+@@ -2271,7 +2221,6 @@ int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
+ 	char symfs_filename[PATH_MAX];
+ 	bool delete_extract = false;
+ 	struct kcore_extract kce;
+-	const char *disassembler;
+ 	bool decomp = false;
+ 	int err = dso__disassemble_filename(dso, symfs_filename, sizeof(symfs_filename));
+ 
+@@ -2331,28 +2280,26 @@ int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
+ 		}
+ 	}
+ 
+-	err = annotation_options__init_disassemblers(options);
+-	if (err)
+-		goto out_remove_tmp;
+-
+ 	err = -1;
++	for (u8 i = 0; i < ARRAY_SIZE(options->disassemblers) && err != 0; i++) {
++		enum perf_disassembler dis = options->disassemblers[i];
+ 
+-	for (int i = 0; i < options->nr_disassemblers && err != 0; ++i) {
+-		disassembler = options->disassemblers[i];
+-
+-		if (!strcmp(disassembler, "llvm"))
++		switch (dis) {
++		case PERF_DISASM_LLVM:
+ 			err = symbol__disassemble_llvm(symfs_filename, sym, args);
+-		else if (!strcmp(disassembler, "capstone"))
++			break;
++		case PERF_DISASM_CAPSTONE:
+ 			err = symbol__disassemble_capstone(symfs_filename, sym, args);
+-		else if (!strcmp(disassembler, "objdump"))
++			break;
++		case PERF_DISASM_OBJDUMP:
+ 			err = symbol__disassemble_objdump(symfs_filename, sym, args);
+-		else
+-			pr_debug("Unknown disassembler %s, skipping...\n", disassembler);
+-	}
+-
+-	if (err == 0) {
+-		pr_debug("Disassembled with %s\nannotate.disassemblers=%s\n",
+-			 disassembler, options->disassemblers_str);
++			break;
++		case PERF_DISASM_UNKNOWN: /* End of disassemblers. */
++		default:
++			goto out_remove_tmp;
++		}
++		if (err == 0)
++			pr_debug("Disassembled with %s\n", perf_disassembler__strs[dis]);
+ 	}
+ out_remove_tmp:
+ 	if (decomp)
+diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
+index e2843ca2edd92e..a6321e7f063300 100644
+--- a/tools/perf/util/env.c
++++ b/tools/perf/util/env.c
+@@ -24,15 +24,19 @@ struct perf_env perf_env;
+ #include "bpf-utils.h"
+ #include <bpf/libbpf.h>
+ 
+-void perf_env__insert_bpf_prog_info(struct perf_env *env,
++bool perf_env__insert_bpf_prog_info(struct perf_env *env,
+ 				    struct bpf_prog_info_node *info_node)
+ {
++	bool ret;
++
+ 	down_write(&env->bpf_progs.lock);
+-	__perf_env__insert_bpf_prog_info(env, info_node);
++	ret = __perf_env__insert_bpf_prog_info(env, info_node);
+ 	up_write(&env->bpf_progs.lock);
++
++	return ret;
+ }
+ 
+-void __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node)
++bool __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node)
+ {
+ 	__u32 prog_id = info_node->info_linear->info.id;
+ 	struct bpf_prog_info_node *node;
+@@ -50,13 +54,14 @@ void __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info
+ 			p = &(*p)->rb_right;
+ 		} else {
+ 			pr_debug("duplicated bpf prog info %u\n", prog_id);
+-			return;
++			return false;
+ 		}
+ 	}
+ 
+ 	rb_link_node(&info_node->rb_node, parent, p);
+ 	rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
+ 	env->bpf_progs.infos_cnt++;
++	return true;
+ }
+ 
+ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
+diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
+index ae604c4edbb7eb..da11add761d0c1 100644
+--- a/tools/perf/util/env.h
++++ b/tools/perf/util/env.h
+@@ -176,9 +176,9 @@ const char *perf_env__raw_arch(struct perf_env *env);
+ int perf_env__nr_cpus_avail(struct perf_env *env);
+ 
+ void perf_env__init(struct perf_env *env);
+-void __perf_env__insert_bpf_prog_info(struct perf_env *env,
++bool __perf_env__insert_bpf_prog_info(struct perf_env *env,
+ 				      struct bpf_prog_info_node *info_node);
+-void perf_env__insert_bpf_prog_info(struct perf_env *env,
++bool perf_env__insert_bpf_prog_info(struct perf_env *env,
+ 				    struct bpf_prog_info_node *info_node);
+ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
+ 							__u32 prog_id);
+diff --git a/tools/perf/util/expr.c b/tools/perf/util/expr.c
+index f289044a1f7c63..c221dcce666609 100644
+--- a/tools/perf/util/expr.c
++++ b/tools/perf/util/expr.c
+@@ -285,7 +285,7 @@ struct expr_parse_ctx *expr__ctx_new(void)
+ {
+ 	struct expr_parse_ctx *ctx;
+ 
+-	ctx = malloc(sizeof(struct expr_parse_ctx));
++	ctx = calloc(1, sizeof(struct expr_parse_ctx));
+ 	if (!ctx)
+ 		return NULL;
+ 
+@@ -294,9 +294,6 @@ struct expr_parse_ctx *expr__ctx_new(void)
+ 		free(ctx);
+ 		return NULL;
+ 	}
+-	ctx->sctx.user_requested_cpu_list = NULL;
+-	ctx->sctx.runtime = 0;
+-	ctx->sctx.system_wide = false;
+ 
+ 	return ctx;
+ }
+diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
+index 3451e542b69a8c..d06aa86352d3c1 100644
+--- a/tools/perf/util/header.c
++++ b/tools/perf/util/header.c
+@@ -3158,7 +3158,10 @@ static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
+ 		/* after reading from file, translate offset to address */
+ 		bpil_offs_to_addr(info_linear);
+ 		info_node->info_linear = info_linear;
+-		__perf_env__insert_bpf_prog_info(env, info_node);
++		if (!__perf_env__insert_bpf_prog_info(env, info_node)) {
++			free(info_linear);
++			free(info_node);
++		}
+ 	}
+ 
+ 	up_write(&env->bpf_progs.lock);
+@@ -3205,7 +3208,8 @@ static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
+ 		if (__do_read(ff, node->data, data_size))
+ 			goto out;
+ 
+-		__perf_env__insert_btf(env, node);
++		if (!__perf_env__insert_btf(env, node))
++			free(node);
+ 		node = NULL;
+ 	}
+ 
+diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
+index 27d5345d2b307a..9be2f4479f5257 100644
+--- a/tools/perf/util/machine.c
++++ b/tools/perf/util/machine.c
+@@ -1003,7 +1003,7 @@ static int machine__get_running_kernel_start(struct machine *machine,
+ 
+ 	err = kallsyms__get_symbol_start(filename, "_edata", &addr);
+ 	if (err)
+-		err = kallsyms__get_function_start(filename, "_etext", &addr);
++		err = kallsyms__get_symbol_start(filename, "_etext", &addr);
+ 	if (!err)
+ 		*end = addr;
+ 
+diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c
+index 432399cbe5dd39..09c9cc326c08d4 100644
+--- a/tools/perf/util/maps.c
++++ b/tools/perf/util/maps.c
+@@ -1136,8 +1136,13 @@ struct map *maps__find_next_entry(struct maps *maps, struct map *map)
+ 	struct map *result = NULL;
+ 
+ 	down_read(maps__lock(maps));
++	while (!maps__maps_by_address_sorted(maps)) {
++		up_read(maps__lock(maps));
++		maps__sort_by_address(maps);
++		down_read(maps__lock(maps));
++	}
+ 	i = maps__by_address_index(maps, map);
+-	if (i < maps__nr_maps(maps))
++	if (++i < maps__nr_maps(maps))
+ 		result = map__get(maps__maps_by_address(maps)[i]);
+ 
+ 	up_read(maps__lock(maps));
+diff --git a/tools/perf/util/namespaces.c b/tools/perf/util/namespaces.c
+index cb185c5659d6b3..68f5de2d79c72c 100644
+--- a/tools/perf/util/namespaces.c
++++ b/tools/perf/util/namespaces.c
+@@ -266,11 +266,16 @@ pid_t nsinfo__pid(const struct nsinfo  *nsi)
+ 	return RC_CHK_ACCESS(nsi)->pid;
+ }
+ 
+-pid_t nsinfo__in_pidns(const struct nsinfo  *nsi)
++bool nsinfo__in_pidns(const struct nsinfo *nsi)
+ {
+ 	return RC_CHK_ACCESS(nsi)->in_pidns;
+ }
+ 
++void nsinfo__set_in_pidns(struct nsinfo *nsi)
++{
++	RC_CHK_ACCESS(nsi)->in_pidns = true;
++}
++
+ void nsinfo__mountns_enter(struct nsinfo *nsi,
+ 				  struct nscookie *nc)
+ {
+diff --git a/tools/perf/util/namespaces.h b/tools/perf/util/namespaces.h
+index 8c0731c6cbb7ee..e95c79b80e27c8 100644
+--- a/tools/perf/util/namespaces.h
++++ b/tools/perf/util/namespaces.h
+@@ -58,7 +58,8 @@ void nsinfo__clear_need_setns(struct nsinfo *nsi);
+ pid_t nsinfo__tgid(const struct nsinfo  *nsi);
+ pid_t nsinfo__nstgid(const struct nsinfo  *nsi);
+ pid_t nsinfo__pid(const struct nsinfo  *nsi);
+-pid_t nsinfo__in_pidns(const struct nsinfo  *nsi);
++bool nsinfo__in_pidns(const struct nsinfo  *nsi);
++void nsinfo__set_in_pidns(struct nsinfo *nsi);
+ 
+ void nsinfo__mountns_enter(struct nsinfo *nsi, struct nscookie *nc);
+ void nsinfo__mountns_exit(struct nscookie *nc);
+diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
+index 53dcdf07f5a21e..a5d72f4a515c93 100644
+--- a/tools/perf/util/stat-display.c
++++ b/tools/perf/util/stat-display.c
+@@ -114,23 +114,44 @@ static void print_running_csv(struct perf_stat_config *config, u64 run, u64 ena)
+ 	fprintf(config->output, "%s%" PRIu64 "%s%.2f",
+ 		config->csv_sep, run, config->csv_sep, enabled_percent);
+ }
++struct outstate {
++	FILE *fh;
++	bool newline;
++	bool first;
++	const char *prefix;
++	int  nfields;
++	int  aggr_nr;
++	struct aggr_cpu_id id;
++	struct evsel *evsel;
++	struct cgroup *cgrp;
++};
+ 
+-static void print_running_json(struct perf_stat_config *config, u64 run, u64 ena)
++static const char *json_sep(struct outstate *os)
++{
++	const char *sep = os->first ? "" : ", ";
++
++	os->first = false;
++	return sep;
++}
++
++#define json_out(os, format, ...) fprintf((os)->fh, "%s" format, json_sep(os), ##__VA_ARGS__)
++
++static void print_running_json(struct outstate *os, u64 run, u64 ena)
+ {
+ 	double enabled_percent = 100;
+ 
+ 	if (run != ena)
+ 		enabled_percent = 100 * run / ena;
+-	fprintf(config->output, "\"event-runtime\" : %" PRIu64 ", \"pcnt-running\" : %.2f, ",
+-		run, enabled_percent);
++	json_out(os, "\"event-runtime\" : %" PRIu64 ", \"pcnt-running\" : %.2f",
++		 run, enabled_percent);
+ }
+ 
+-static void print_running(struct perf_stat_config *config,
++static void print_running(struct perf_stat_config *config, struct outstate *os,
+ 			  u64 run, u64 ena, bool before_metric)
+ {
+ 	if (config->json_output) {
+ 		if (before_metric)
+-			print_running_json(config, run, ena);
++			print_running_json(os, run, ena);
+ 	} else if (config->csv_output) {
+ 		if (before_metric)
+ 			print_running_csv(config, run, ena);
+@@ -153,20 +174,20 @@ static void print_noise_pct_csv(struct perf_stat_config *config,
+ 	fprintf(config->output, "%s%.2f%%", config->csv_sep, pct);
+ }
+ 
+-static void print_noise_pct_json(struct perf_stat_config *config,
++static void print_noise_pct_json(struct outstate *os,
+ 				 double pct)
+ {
+-	fprintf(config->output, "\"variance\" : %.2f, ", pct);
++	json_out(os, "\"variance\" : %.2f", pct);
+ }
+ 
+-static void print_noise_pct(struct perf_stat_config *config,
++static void print_noise_pct(struct perf_stat_config *config, struct outstate *os,
+ 			    double total, double avg, bool before_metric)
+ {
+ 	double pct = rel_stddev_stats(total, avg);
+ 
+ 	if (config->json_output) {
+ 		if (before_metric)
+-			print_noise_pct_json(config, pct);
++			print_noise_pct_json(os, pct);
+ 	} else if (config->csv_output) {
+ 		if (before_metric)
+ 			print_noise_pct_csv(config, pct);
+@@ -176,7 +197,7 @@ static void print_noise_pct(struct perf_stat_config *config,
+ 	}
+ }
+ 
+-static void print_noise(struct perf_stat_config *config,
++static void print_noise(struct perf_stat_config *config, struct outstate *os,
+ 			struct evsel *evsel, double avg, bool before_metric)
+ {
+ 	struct perf_stat_evsel *ps;
+@@ -185,7 +206,7 @@ static void print_noise(struct perf_stat_config *config,
+ 		return;
+ 
+ 	ps = evsel->stats;
+-	print_noise_pct(config, stddev_stats(&ps->res_stats), avg, before_metric);
++	print_noise_pct(config, os, stddev_stats(&ps->res_stats), avg, before_metric);
+ }
+ 
+ static void print_cgroup_std(struct perf_stat_config *config, const char *cgrp_name)
+@@ -198,18 +219,19 @@ static void print_cgroup_csv(struct perf_stat_config *config, const char *cgrp_n
+ 	fprintf(config->output, "%s%s", config->csv_sep, cgrp_name);
+ }
+ 
+-static void print_cgroup_json(struct perf_stat_config *config, const char *cgrp_name)
++static void print_cgroup_json(struct outstate *os, const char *cgrp_name)
+ {
+-	fprintf(config->output, "\"cgroup\" : \"%s\", ", cgrp_name);
++	json_out(os, "\"cgroup\" : \"%s\"", cgrp_name);
+ }
+ 
+-static void print_cgroup(struct perf_stat_config *config, struct cgroup *cgrp)
++static void print_cgroup(struct perf_stat_config *config, struct outstate *os,
++			 struct cgroup *cgrp)
+ {
+ 	if (nr_cgroups || config->cgroup_list) {
+ 		const char *cgrp_name = cgrp ? cgrp->name  : "";
+ 
+ 		if (config->json_output)
+-			print_cgroup_json(config, cgrp_name);
++			print_cgroup_json(os, cgrp_name);
+ 		else if (config->csv_output)
+ 			print_cgroup_csv(config, cgrp_name);
+ 		else
+@@ -324,47 +346,45 @@ static void print_aggr_id_csv(struct perf_stat_config *config,
+ 	}
+ }
+ 
+-static void print_aggr_id_json(struct perf_stat_config *config,
++static void print_aggr_id_json(struct perf_stat_config *config, struct outstate *os,
+ 			       struct evsel *evsel, struct aggr_cpu_id id, int aggr_nr)
+ {
+-	FILE *output = config->output;
+-
+ 	switch (config->aggr_mode) {
+ 	case AGGR_CORE:
+-		fprintf(output, "\"core\" : \"S%d-D%d-C%d\", \"aggregate-number\" : %d, ",
++		json_out(os, "\"core\" : \"S%d-D%d-C%d\", \"aggregate-number\" : %d",
+ 			id.socket, id.die, id.core, aggr_nr);
+ 		break;
+ 	case AGGR_CACHE:
+-		fprintf(output, "\"cache\" : \"S%d-D%d-L%d-ID%d\", \"aggregate-number\" : %d, ",
++		json_out(os, "\"cache\" : \"S%d-D%d-L%d-ID%d\", \"aggregate-number\" : %d",
+ 			id.socket, id.die, id.cache_lvl, id.cache, aggr_nr);
+ 		break;
+ 	case AGGR_CLUSTER:
+-		fprintf(output, "\"cluster\" : \"S%d-D%d-CLS%d\", \"aggregate-number\" : %d, ",
++		json_out(os, "\"cluster\" : \"S%d-D%d-CLS%d\", \"aggregate-number\" : %d",
+ 			id.socket, id.die, id.cluster, aggr_nr);
+ 		break;
+ 	case AGGR_DIE:
+-		fprintf(output, "\"die\" : \"S%d-D%d\", \"aggregate-number\" : %d, ",
++		json_out(os, "\"die\" : \"S%d-D%d\", \"aggregate-number\" : %d",
+ 			id.socket, id.die, aggr_nr);
+ 		break;
+ 	case AGGR_SOCKET:
+-		fprintf(output, "\"socket\" : \"S%d\", \"aggregate-number\" : %d, ",
++		json_out(os, "\"socket\" : \"S%d\", \"aggregate-number\" : %d",
+ 			id.socket, aggr_nr);
+ 		break;
+ 	case AGGR_NODE:
+-		fprintf(output, "\"node\" : \"N%d\", \"aggregate-number\" : %d, ",
++		json_out(os, "\"node\" : \"N%d\", \"aggregate-number\" : %d",
+ 			id.node, aggr_nr);
+ 		break;
+ 	case AGGR_NONE:
+ 		if (evsel->percore && !config->percore_show_thread) {
+-			fprintf(output, "\"core\" : \"S%d-D%d-C%d\"",
++			json_out(os, "\"core\" : \"S%d-D%d-C%d\"",
+ 				id.socket, id.die, id.core);
+ 		} else if (id.cpu.cpu > -1) {
+-			fprintf(output, "\"cpu\" : \"%d\", ",
++			json_out(os, "\"cpu\" : \"%d\"",
+ 				id.cpu.cpu);
+ 		}
+ 		break;
+ 	case AGGR_THREAD:
+-		fprintf(output, "\"thread\" : \"%s-%d\", ",
++		json_out(os, "\"thread\" : \"%s-%d\"",
+ 			perf_thread_map__comm(evsel->core.threads, id.thread_idx),
+ 			perf_thread_map__pid(evsel->core.threads, id.thread_idx));
+ 		break;
+@@ -376,29 +396,17 @@ static void print_aggr_id_json(struct perf_stat_config *config,
+ 	}
+ }
+ 
+-static void aggr_printout(struct perf_stat_config *config,
++static void aggr_printout(struct perf_stat_config *config, struct outstate *os,
+ 			  struct evsel *evsel, struct aggr_cpu_id id, int aggr_nr)
+ {
+ 	if (config->json_output)
+-		print_aggr_id_json(config, evsel, id, aggr_nr);
++		print_aggr_id_json(config, os, evsel, id, aggr_nr);
+ 	else if (config->csv_output)
+ 		print_aggr_id_csv(config, evsel, id, aggr_nr);
+ 	else
+ 		print_aggr_id_std(config, evsel, id, aggr_nr);
+ }
+ 
+-struct outstate {
+-	FILE *fh;
+-	bool newline;
+-	bool first;
+-	const char *prefix;
+-	int  nfields;
+-	int  aggr_nr;
+-	struct aggr_cpu_id id;
+-	struct evsel *evsel;
+-	struct cgroup *cgrp;
+-};
+-
+ static void new_line_std(struct perf_stat_config *config __maybe_unused,
+ 			 void *ctx)
+ {
+@@ -413,7 +421,7 @@ static inline void __new_line_std_csv(struct perf_stat_config *config,
+ 	fputc('\n', os->fh);
+ 	if (os->prefix)
+ 		fputs(os->prefix, os->fh);
+-	aggr_printout(config, os->evsel, os->id, os->aggr_nr);
++	aggr_printout(config, os, os->evsel, os->id, os->aggr_nr);
+ }
+ 
+ static inline void __new_line_std(struct outstate *os)
+@@ -499,9 +507,9 @@ static void print_metric_json(struct perf_stat_config *config __maybe_unused,
+ 	FILE *out = os->fh;
+ 
+ 	if (unit) {
+-		fprintf(out, "\"metric-value\" : \"%f\", \"metric-unit\" : \"%s\"", val, unit);
++		json_out(os, "\"metric-value\" : \"%f\", \"metric-unit\" : \"%s\"", val, unit);
+ 		if (thresh != METRIC_THRESHOLD_UNKNOWN) {
+-			fprintf(out, ", \"metric-threshold\" : \"%s\"",
++			json_out(os, "\"metric-threshold\" : \"%s\"",
+ 				metric_threshold_classify__str(thresh));
+ 		}
+ 	}
+@@ -514,9 +522,11 @@ static void new_line_json(struct perf_stat_config *config, void *ctx)
+ 	struct outstate *os = ctx;
+ 
+ 	fputs("\n{", os->fh);
++	os->first = true;
+ 	if (os->prefix)
+-		fprintf(os->fh, "%s", os->prefix);
+-	aggr_printout(config, os->evsel, os->id, os->aggr_nr);
++		json_out(os, "%s", os->prefix);
++
++	aggr_printout(config, os, os->evsel, os->id, os->aggr_nr);
+ }
+ 
+ static void print_metricgroup_header_json(struct perf_stat_config *config,
+@@ -526,7 +536,7 @@ static void print_metricgroup_header_json(struct perf_stat_config *config,
+ 	if (!metricgroup_name)
+ 		return;
+ 
+-	fprintf(config->output, "\"metricgroup\" : \"%s\"}", metricgroup_name);
++	json_out((struct outstate *) ctx, "\"metricgroup\" : \"%s\"}", metricgroup_name);
+ 	new_line_json(config, ctx);
+ }
+ 
+@@ -644,7 +654,6 @@ static void print_metric_only_json(struct perf_stat_config *config __maybe_unuse
+ 				  const char *unit, double val)
+ {
+ 	struct outstate *os = ctx;
+-	FILE *out = os->fh;
+ 	char buf[64], *ends;
+ 	char tbuf[1024];
+ 	const char *vals;
+@@ -661,8 +670,7 @@ static void print_metric_only_json(struct perf_stat_config *config __maybe_unuse
+ 	*ends = 0;
+ 	if (!vals[0])
+ 		vals = "none";
+-	fprintf(out, "%s\"%s\" : \"%s\"", os->first ? "" : ", ", unit, vals);
+-	os->first = false;
++	json_out(os, "\"%s\" : \"%s\"", unit, vals);
+ }
+ 
+ static void new_line_metric(struct perf_stat_config *config __maybe_unused,
+@@ -743,28 +751,27 @@ static void print_counter_value_csv(struct perf_stat_config *config,
+ 	fprintf(output, "%s", evsel__name(evsel));
+ }
+ 
+-static void print_counter_value_json(struct perf_stat_config *config,
++static void print_counter_value_json(struct outstate *os,
+ 				     struct evsel *evsel, double avg, bool ok)
+ {
+-	FILE *output = config->output;
+ 	const char *bad_count = evsel->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED;
+ 
+ 	if (ok)
+-		fprintf(output, "\"counter-value\" : \"%f\", ", avg);
++		json_out(os, "\"counter-value\" : \"%f\"", avg);
+ 	else
+-		fprintf(output, "\"counter-value\" : \"%s\", ", bad_count);
++		json_out(os, "\"counter-value\" : \"%s\"", bad_count);
+ 
+ 	if (evsel->unit)
+-		fprintf(output, "\"unit\" : \"%s\", ", evsel->unit);
++		json_out(os, "\"unit\" : \"%s\"", evsel->unit);
+ 
+-	fprintf(output, "\"event\" : \"%s\", ", evsel__name(evsel));
++	json_out(os, "\"event\" : \"%s\"", evsel__name(evsel));
+ }
+ 
+-static void print_counter_value(struct perf_stat_config *config,
++static void print_counter_value(struct perf_stat_config *config, struct outstate *os,
+ 				struct evsel *evsel, double avg, bool ok)
+ {
+ 	if (config->json_output)
+-		print_counter_value_json(config, evsel, avg, ok);
++		print_counter_value_json(os, evsel, avg, ok);
+ 	else if (config->csv_output)
+ 		print_counter_value_csv(config, evsel, avg, ok);
+ 	else
+@@ -772,12 +779,13 @@ static void print_counter_value(struct perf_stat_config *config,
+ }
+ 
+ static void abs_printout(struct perf_stat_config *config,
++			 struct outstate *os,
+ 			 struct aggr_cpu_id id, int aggr_nr,
+ 			 struct evsel *evsel, double avg, bool ok)
+ {
+-	aggr_printout(config, evsel, id, aggr_nr);
+-	print_counter_value(config, evsel, avg, ok);
+-	print_cgroup(config, evsel->cgrp);
++	aggr_printout(config, os, evsel, id, aggr_nr);
++	print_counter_value(config, os, evsel, avg, ok);
++	print_cgroup(config, os, evsel->cgrp);
+ }
+ 
+ static bool is_mixed_hw_group(struct evsel *counter)
+@@ -868,17 +876,17 @@ static void printout(struct perf_stat_config *config, struct outstate *os,
+ 	out.force_header = false;
+ 
+ 	if (!config->metric_only && !counter->default_metricgroup) {
+-		abs_printout(config, os->id, os->aggr_nr, counter, uval, ok);
++		abs_printout(config, os, os->id, os->aggr_nr, counter, uval, ok);
+ 
+-		print_noise(config, counter, noise, /*before_metric=*/true);
+-		print_running(config, run, ena, /*before_metric=*/true);
++		print_noise(config, os, counter, noise, /*before_metric=*/true);
++		print_running(config, os, run, ena, /*before_metric=*/true);
+ 	}
+ 
+ 	if (ok) {
+ 		if (!config->metric_only && counter->default_metricgroup) {
+ 			void *from = NULL;
+ 
+-			aggr_printout(config, os->evsel, os->id, os->aggr_nr);
++			aggr_printout(config, os, os->evsel, os->id, os->aggr_nr);
+ 			/* Print out all the metricgroup with the same metric event. */
+ 			do {
+ 				int num = 0;
+@@ -891,8 +899,8 @@ static void printout(struct perf_stat_config *config, struct outstate *os,
+ 						__new_line_std_csv(config, os);
+ 				}
+ 
+-				print_noise(config, counter, noise, /*before_metric=*/true);
+-				print_running(config, run, ena, /*before_metric=*/true);
++				print_noise(config, os, counter, noise, /*before_metric=*/true);
++				print_running(config, os, run, ena, /*before_metric=*/true);
+ 				from = perf_stat__print_shadow_stats_metricgroup(config, counter, aggr_idx,
+ 										 &num, from, &out,
+ 										 &config->metric_events);
+@@ -905,8 +913,8 @@ static void printout(struct perf_stat_config *config, struct outstate *os,
+ 	}
+ 
+ 	if (!config->metric_only) {
+-		print_noise(config, counter, noise, /*before_metric=*/false);
+-		print_running(config, run, ena, /*before_metric=*/false);
++		print_noise(config, os, counter, noise, /*before_metric=*/false);
++		print_running(config, os, run, ena, /*before_metric=*/false);
+ 	}
+ }
+ 
+@@ -1083,12 +1091,17 @@ static void print_counter_aggrdata(struct perf_stat_config *config,
+ 		return;
+ 
+ 	if (!metric_only) {
+-		if (config->json_output)
++		if (config->json_output) {
++			os->first = true;
+ 			fputc('{', output);
+-		if (os->prefix)
+-			fprintf(output, "%s", os->prefix);
+-		else if (config->summary && config->csv_output &&
+-			 !config->no_csv_summary && !config->interval)
++		}
++		if (os->prefix) {
++			if (config->json_output)
++				json_out(os, "%s", os->prefix);
++			else
++				fprintf(output, "%s", os->prefix);
++		} else if (config->summary && config->csv_output &&
++			   !config->no_csv_summary && !config->interval)
+ 			fprintf(output, "%s%s", "summary", config->csv_sep);
+ 	}
+ 
+@@ -1114,15 +1127,19 @@ static void print_metric_begin(struct perf_stat_config *config,
+ 
+ 	if (config->json_output)
+ 		fputc('{', config->output);
+-	if (os->prefix)
+-		fprintf(config->output, "%s", os->prefix);
+ 
++	if (os->prefix) {
++		if (config->json_output)
++			json_out(os, "%s", os->prefix);
++		else
++			fprintf(config->output, "%s", os->prefix);
++	}
+ 	evsel = evlist__first(evlist);
+ 	id = config->aggr_map->map[aggr_idx];
+ 	aggr = &evsel->stats->aggr[aggr_idx];
+-	aggr_printout(config, evsel, id, aggr->nr);
++	aggr_printout(config, os, evsel, id, aggr->nr);
+ 
+-	print_cgroup(config, os->cgrp ? : evsel->cgrp);
++	print_cgroup(config, os, os->cgrp ? : evsel->cgrp);
+ }
+ 
+ static void print_metric_end(struct perf_stat_config *config, struct outstate *os)
+@@ -1343,7 +1360,7 @@ static void prepare_interval(struct perf_stat_config *config,
+ 		return;
+ 
+ 	if (config->json_output)
+-		scnprintf(prefix, len, "\"interval\" : %lu.%09lu, ",
++		scnprintf(prefix, len, "\"interval\" : %lu.%09lu",
+ 			  (unsigned long) ts->tv_sec, ts->tv_nsec);
+ 	else if (config->csv_output)
+ 		scnprintf(prefix, len, "%lu.%09lu%s",
+@@ -1557,7 +1574,7 @@ static void print_footer(struct perf_stat_config *config)
+ 		fprintf(output, " %17.*f +- %.*f seconds time elapsed",
+ 			precision, avg, precision, sd);
+ 
+-		print_noise_pct(config, sd, avg, /*before_metric=*/false);
++		print_noise_pct(config, NULL, sd, avg, /*before_metric=*/false);
+ 	}
+ 	fprintf(output, "\n\n");
+ 
+diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
+index 0037f11639195d..49b08adc6ee343 100644
+--- a/tools/perf/util/symbol.c
++++ b/tools/perf/util/symbol.c
+@@ -154,6 +154,13 @@ static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
+ 	else if ((a == 0) && (b > 0))
+ 		return SYMBOL_B;
+ 
++	if (syma->type != symb->type) {
++		if (syma->type == STT_NOTYPE)
++			return SYMBOL_B;
++		if (symb->type == STT_NOTYPE)
++			return SYMBOL_A;
++	}
++
+ 	/* Prefer a non weak symbol over a weak one */
+ 	a = syma->binding == STB_WEAK;
+ 	b = symb->binding == STB_WEAK;
+@@ -257,7 +264,7 @@ void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms)
+ 		 * like in:
+ 		 *   ffffffffc1937000 T hdmi_driver_init  [snd_hda_codec_hdmi]
+ 		 */
+-		if (prev->end == prev->start && prev->type != STT_NOTYPE) {
++		if (prev->end == prev->start) {
+ 			const char *prev_mod;
+ 			const char *curr_mod;
+ 
+diff --git a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
+index ae6af354a81db5..08a399b0be286c 100644
+--- a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
++++ b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
+@@ -33,7 +33,7 @@ static int mperf_get_count_percent(unsigned int self_id, double *percent,
+ 				   unsigned int cpu);
+ static int mperf_get_count_freq(unsigned int id, unsigned long long *count,
+ 				unsigned int cpu);
+-static struct timespec time_start, time_end;
++static struct timespec *time_start, *time_end;
+ 
+ static cstate_t mperf_cstates[MPERF_CSTATE_COUNT] = {
+ 	{
+@@ -174,7 +174,7 @@ static int mperf_get_count_percent(unsigned int id, double *percent,
+ 		dprint("%s: TSC Ref - mperf_diff: %llu, tsc_diff: %llu\n",
+ 		       mperf_cstates[id].name, mperf_diff, tsc_diff);
+ 	} else if (max_freq_mode == MAX_FREQ_SYSFS) {
+-		timediff = max_frequency * timespec_diff_us(time_start, time_end);
++		timediff = max_frequency * timespec_diff_us(time_start[cpu], time_end[cpu]);
+ 		*percent = 100.0 * mperf_diff / timediff;
+ 		dprint("%s: MAXFREQ - mperf_diff: %llu, time_diff: %llu\n",
+ 		       mperf_cstates[id].name, mperf_diff, timediff);
+@@ -207,7 +207,7 @@ static int mperf_get_count_freq(unsigned int id, unsigned long long *count,
+ 	if (max_freq_mode == MAX_FREQ_TSC_REF) {
+ 		/* Calculate max_freq from TSC count */
+ 		tsc_diff = tsc_at_measure_end[cpu] - tsc_at_measure_start[cpu];
+-		time_diff = timespec_diff_us(time_start, time_end);
++		time_diff = timespec_diff_us(time_start[cpu], time_end[cpu]);
+ 		max_frequency = tsc_diff / time_diff;
+ 	}
+ 
+@@ -226,9 +226,8 @@ static int mperf_start(void)
+ {
+ 	int cpu;
+ 
+-	clock_gettime(CLOCK_REALTIME, &time_start);
+-
+ 	for (cpu = 0; cpu < cpu_count; cpu++) {
++		clock_gettime(CLOCK_REALTIME, &time_start[cpu]);
+ 		mperf_get_tsc(&tsc_at_measure_start[cpu]);
+ 		mperf_init_stats(cpu);
+ 	}
+@@ -243,9 +242,9 @@ static int mperf_stop(void)
+ 	for (cpu = 0; cpu < cpu_count; cpu++) {
+ 		mperf_measure_stats(cpu);
+ 		mperf_get_tsc(&tsc_at_measure_end[cpu]);
++		clock_gettime(CLOCK_REALTIME, &time_end[cpu]);
+ 	}
+ 
+-	clock_gettime(CLOCK_REALTIME, &time_end);
+ 	return 0;
+ }
+ 
+@@ -349,6 +348,8 @@ struct cpuidle_monitor *mperf_register(void)
+ 	aperf_current_count = calloc(cpu_count, sizeof(unsigned long long));
+ 	tsc_at_measure_start = calloc(cpu_count, sizeof(unsigned long long));
+ 	tsc_at_measure_end = calloc(cpu_count, sizeof(unsigned long long));
++	time_start = calloc(cpu_count, sizeof(struct timespec));
++	time_end = calloc(cpu_count, sizeof(struct timespec));
+ 	mperf_monitor.name_len = strlen(mperf_monitor.name);
+ 	return &mperf_monitor;
+ }
+@@ -361,6 +362,8 @@ void mperf_unregister(void)
+ 	free(aperf_current_count);
+ 	free(tsc_at_measure_start);
+ 	free(tsc_at_measure_end);
++	free(time_start);
++	free(time_end);
+ 	free(is_valid);
+ }
+ 
+diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
+index 58a487c225a73a..8ec677c639ecea 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -95,6 +95,8 @@
+ #define INTEL_ECORE_TYPE	0x20
+ #define INTEL_PCORE_TYPE	0x40
+ 
++#define ROUND_UP_TO_PAGE_SIZE(n) (((n) + 0x1000UL-1UL) & ~(0x1000UL-1UL))
++
+ enum counter_scope { SCOPE_CPU, SCOPE_CORE, SCOPE_PACKAGE };
+ enum counter_type { COUNTER_ITEMS, COUNTER_CYCLES, COUNTER_SECONDS, COUNTER_USEC, COUNTER_K2M };
+ enum counter_format { FORMAT_RAW, FORMAT_DELTA, FORMAT_PERCENT, FORMAT_AVERAGE };
+@@ -1094,8 +1096,8 @@ int backwards_count;
+ char *progname;
+ 
+ #define CPU_SUBSET_MAXCPUS	1024	/* need to use before probe... */
+-cpu_set_t *cpu_present_set, *cpu_effective_set, *cpu_allowed_set, *cpu_affinity_set, *cpu_subset;
+-size_t cpu_present_setsize, cpu_effective_setsize, cpu_allowed_setsize, cpu_affinity_setsize, cpu_subset_size;
++cpu_set_t *cpu_present_set, *cpu_possible_set, *cpu_effective_set, *cpu_allowed_set, *cpu_affinity_set, *cpu_subset;
++size_t cpu_present_setsize, cpu_possible_setsize, cpu_effective_setsize, cpu_allowed_setsize, cpu_affinity_setsize, cpu_subset_size;
+ #define MAX_ADDED_THREAD_COUNTERS 24
+ #define MAX_ADDED_CORE_COUNTERS 8
+ #define MAX_ADDED_PACKAGE_COUNTERS 16
+@@ -8292,6 +8294,33 @@ int dir_filter(const struct dirent *dirp)
+ 		return 0;
+ }
+ 
++char *possible_file = "/sys/devices/system/cpu/possible";
++char possible_buf[1024];
++
++int initialize_cpu_possible_set(void)
++{
++	FILE *fp;
++
++	fp = fopen(possible_file, "r");
++	if (!fp) {
++		warn("open %s", possible_file);
++		return -1;
++	}
++	if (fread(possible_buf, sizeof(char), 1024, fp) == 0) {
++		warn("read %s", possible_file);
++		goto err;
++	}
++	if (parse_cpu_str(possible_buf, cpu_possible_set, cpu_possible_setsize)) {
++		warnx("%s: cpu str malformat %s\n", possible_file, cpu_effective_str);
++		goto err;
++	}
++	return 0;
++
++err:
++	fclose(fp);
++	return -1;
++}
++
+ void topology_probe(bool startup)
+ {
+ 	int i;
+@@ -8323,6 +8352,16 @@ void topology_probe(bool startup)
+ 	CPU_ZERO_S(cpu_present_setsize, cpu_present_set);
+ 	for_all_proc_cpus(mark_cpu_present);
+ 
++	/*
++	 * Allocate and initialize cpu_possible_set
++	 */
++	cpu_possible_set = CPU_ALLOC((topo.max_cpu_num + 1));
++	if (cpu_possible_set == NULL)
++		err(3, "CPU_ALLOC");
++	cpu_possible_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
++	CPU_ZERO_S(cpu_possible_setsize, cpu_possible_set);
++	initialize_cpu_possible_set();
++
+ 	/*
+ 	 * Allocate and initialize cpu_effective_set
+ 	 */
+@@ -8924,7 +8963,7 @@ struct pmt_mmio *pmt_mmio_open(unsigned int target_guid)
+ 		if (fd_pmt == -1)
+ 			goto loop_cleanup_and_break;
+ 
+-		mmap_size = (size + 0x1000UL) & (~0x1000UL);
++		mmap_size = ROUND_UP_TO_PAGE_SIZE(size);
+ 		mmio = mmap(0, mmap_size, PROT_READ, MAP_SHARED, fd_pmt, 0);
+ 		if (mmio != MAP_FAILED) {
+ 
+@@ -9163,6 +9202,18 @@ void turbostat_init()
+ 	}
+ }
+ 
++void affinitize_child(void)
++{
++	/* Prefer cpu_possible_set, if available */
++	if (sched_setaffinity(0, cpu_possible_setsize, cpu_possible_set)) {
++		warn("sched_setaffinity cpu_possible_set");
++
++		/* Otherwise, allow child to run on same cpu set as turbostat */
++		if (sched_setaffinity(0, cpu_allowed_setsize, cpu_allowed_set))
++			warn("sched_setaffinity cpu_allowed_set");
++	}
++}
++
+ int fork_it(char **argv)
+ {
+ 	pid_t child_pid;
+@@ -9178,6 +9229,7 @@ int fork_it(char **argv)
+ 	child_pid = fork();
+ 	if (!child_pid) {
+ 		/* child */
++		affinitize_child();
+ 		execvp(argv[0], argv);
+ 		err(errno, "exec %s", argv[0]);
+ 	} else {
+diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
+index dacad94e2be42a..c76ad0be54e2ed 100755
+--- a/tools/testing/ktest/ktest.pl
++++ b/tools/testing/ktest/ktest.pl
+@@ -2419,6 +2419,11 @@ sub get_version {
+     return if ($have_version);
+     doprint "$make kernelrelease ... ";
+     $version = `$make -s kernelrelease | tail -1`;
++    if (!length($version)) {
++	run_command "$make allnoconfig" or return 0;
++	doprint "$make kernelrelease ... ";
++	$version = `$make -s kernelrelease | tail -1`;
++    }
+     chomp($version);
+     doprint "$version\n";
+     $have_version = 1;
+@@ -2960,8 +2965,6 @@ sub run_bisect_test {
+ 
+     my $failed = 0;
+     my $result;
+-    my $output;
+-    my $ret;
+ 
+     $in_bisect = 1;
+ 
+diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
+index 7eeb3cbe18c707..8d206266d98c8f 100644
+--- a/tools/testing/selftests/bpf/Makefile
++++ b/tools/testing/selftests/bpf/Makefile
+@@ -203,9 +203,9 @@ ifeq ($(shell expr $(MAKE_VERSION) \>= 4.4), 1)
+ $(let OUTPUT,$(OUTPUT)/,\
+ 	$(eval include ../../../build/Makefile.feature))
+ else
+-OUTPUT := $(OUTPUT)/
++override OUTPUT := $(OUTPUT)/
+ $(eval include ../../../build/Makefile.feature)
+-OUTPUT := $(patsubst %/,%,$(OUTPUT))
++override OUTPUT := $(patsubst %/,%,$(OUTPUT))
+ endif
+ endif
+ 
+diff --git a/tools/testing/selftests/bpf/prog_tests/btf_distill.c b/tools/testing/selftests/bpf/prog_tests/btf_distill.c
+index ca84726d5ac1b9..b72b966df77b90 100644
+--- a/tools/testing/selftests/bpf/prog_tests/btf_distill.c
++++ b/tools/testing/selftests/bpf/prog_tests/btf_distill.c
+@@ -385,7 +385,7 @@ static void test_distilled_base_missing_err(void)
+ 		"[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED");
+ 	btf5 = btf__new_empty();
+ 	if (!ASSERT_OK_PTR(btf5, "empty_reloc_btf"))
+-		return;
++		goto cleanup;
+ 	btf__add_int(btf5, "int", 4, BTF_INT_SIGNED);   /* [1] int */
+ 	VALIDATE_RAW_BTF(
+ 		btf5,
+@@ -478,7 +478,7 @@ static void test_distilled_base_multi_err2(void)
+ 		"[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED");
+ 	btf5 = btf__new_empty();
+ 	if (!ASSERT_OK_PTR(btf5, "empty_reloc_btf"))
+-		return;
++		goto cleanup;
+ 	btf__add_int(btf5, "int", 4, BTF_INT_SIGNED);   /* [1] int */
+ 	btf__add_int(btf5, "int", 4, BTF_INT_SIGNED);   /* [2] int */
+ 	VALIDATE_RAW_BTF(
+diff --git a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
+index d50cbd8040d45f..e59af2aa660166 100644
+--- a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
++++ b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
+@@ -171,6 +171,10 @@ static void test_kprobe_fill_link_info(struct test_fill_link_info *skel,
+ 		/* See also arch_adjust_kprobe_addr(). */
+ 		if (skel->kconfig->CONFIG_X86_KERNEL_IBT)
+ 			entry_offset = 4;
++		if (skel->kconfig->CONFIG_PPC64 &&
++		    skel->kconfig->CONFIG_KPROBES_ON_FTRACE &&
++		    !skel->kconfig->CONFIG_PPC_FTRACE_OUT_OF_LINE)
++			entry_offset = 4;
+ 		err = verify_perf_link_info(link_fd, type, kprobe_addr, 0, entry_offset);
+ 		ASSERT_OK(err, "verify_perf_link_info");
+ 	} else {
+diff --git a/tools/testing/selftests/bpf/progs/test_fill_link_info.c b/tools/testing/selftests/bpf/progs/test_fill_link_info.c
+index 6afa834756e9fd..fac33a14f2009c 100644
+--- a/tools/testing/selftests/bpf/progs/test_fill_link_info.c
++++ b/tools/testing/selftests/bpf/progs/test_fill_link_info.c
+@@ -6,13 +6,20 @@
+ #include <stdbool.h>
+ 
+ extern bool CONFIG_X86_KERNEL_IBT __kconfig __weak;
++extern bool CONFIG_PPC_FTRACE_OUT_OF_LINE __kconfig __weak;
++extern bool CONFIG_KPROBES_ON_FTRACE __kconfig __weak;
++extern bool CONFIG_PPC64 __kconfig __weak;
+ 
+-/* This function is here to have CONFIG_X86_KERNEL_IBT
+- * used and added to object BTF.
++/* This function is here to have CONFIG_X86_KERNEL_IBT,
++ * CONFIG_PPC_FTRACE_OUT_OF_LINE, CONFIG_KPROBES_ON_FTRACE,
++ * CONFIG_PPC6 used and added to object BTF.
+  */
+ int unused(void)
+ {
+-	return CONFIG_X86_KERNEL_IBT ? 0 : 1;
++	return CONFIG_X86_KERNEL_IBT ||
++			CONFIG_PPC_FTRACE_OUT_OF_LINE ||
++			CONFIG_KPROBES_ON_FTRACE ||
++			CONFIG_PPC64 ? 0 : 1;
+ }
+ 
+ SEC("kprobe")
+diff --git a/tools/testing/selftests/bpf/test_tc_tunnel.sh b/tools/testing/selftests/bpf/test_tc_tunnel.sh
+index 7989ec60845455..cb55a908bb0d70 100755
+--- a/tools/testing/selftests/bpf/test_tc_tunnel.sh
++++ b/tools/testing/selftests/bpf/test_tc_tunnel.sh
+@@ -305,6 +305,7 @@ else
+ 	client_connect
+ 	verify_data
+ 	server_listen
++	wait_for_port ${port} ${netcat_opt}
+ fi
+ 
+ # serverside, use BPF for decap
+diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c
+index e12ef953fba855..b174c013485cd6 100644
+--- a/tools/testing/selftests/bpf/veristat.c
++++ b/tools/testing/selftests/bpf/veristat.c
+@@ -21,6 +21,7 @@
+ #include <gelf.h>
+ #include <float.h>
+ #include <math.h>
++#include <limits.h>
+ 
+ #ifndef ARRAY_SIZE
+ #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+diff --git a/tools/testing/selftests/bpf/xdp_hw_metadata.c b/tools/testing/selftests/bpf/xdp_hw_metadata.c
+index 6f9956eed797f3..ad6c08dfd6c8cc 100644
+--- a/tools/testing/selftests/bpf/xdp_hw_metadata.c
++++ b/tools/testing/selftests/bpf/xdp_hw_metadata.c
+@@ -79,7 +79,7 @@ static int open_xsk(int ifindex, struct xsk *xsk, __u32 queue_id)
+ 		.fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
+ 		.comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
+ 		.frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE,
+-		.flags = XSK_UMEM__DEFAULT_FLAGS,
++		.flags = XDP_UMEM_TX_METADATA_LEN,
+ 		.tx_metadata_len = sizeof(struct xsk_tx_metadata),
+ 	};
+ 	__u32 idx = 0;
+diff --git a/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh b/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
+index 384cfa3d38a6cd..92c2f0376c081d 100755
+--- a/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
++++ b/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
+@@ -142,7 +142,7 @@ function pre_ethtool {
+ }
+ 
+ function check_table {
+-    local path=$NSIM_DEV_DFS/ports/$port/udp_ports_table$1
++    local path=$NSIM_DEV_DFS/ports/$port/udp_ports/table$1
+     local -n expected=$2
+     local last=$3
+ 
+@@ -212,7 +212,7 @@ function check_tables {
+ }
+ 
+ function print_table {
+-    local path=$NSIM_DEV_DFS/ports/$port/udp_ports_table$1
++    local path=$NSIM_DEV_DFS/ports/$port/udp_ports/table$1
+     read -a have < $path
+ 
+     tree $NSIM_DEV_DFS/
+@@ -641,7 +641,7 @@ for port in 0 1; do
+     NSIM_NETDEV=`get_netdev_name old_netdevs`
+     ip link set dev $NSIM_NETDEV up
+ 
+-    echo 110 > $NSIM_DEV_DFS/ports/$port/udp_ports_inject_error
++    echo 110 > $NSIM_DEV_DFS/ports/$port/udp_ports/inject_error
+ 
+     msg="1 - create VxLANs v6"
+     exp0=( 0 0 0 0 )
+@@ -663,7 +663,7 @@ for port in 0 1; do
+     new_geneve gnv0 20000
+ 
+     msg="2 - destroy GENEVE"
+-    echo 2 > $NSIM_DEV_DFS/ports/$port/udp_ports_inject_error
++    echo 2 > $NSIM_DEV_DFS/ports/$port/udp_ports/inject_error
+     exp1=( `mke 20000 2` 0 0 0 )
+     del_dev gnv0
+ 
+@@ -764,7 +764,7 @@ for port in 0 1; do
+     msg="create VxLANs v4"
+     new_vxlan vxlan0 10000 $NSIM_NETDEV
+ 
+-    echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports_reset
++    echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports/reset
+     check_tables
+ 
+     msg="NIC device goes down"
+@@ -775,7 +775,7 @@ for port in 0 1; do
+     fi
+     check_tables
+ 
+-    echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports_reset
++    echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports/reset
+     check_tables
+ 
+     msg="NIC device goes up again"
+@@ -789,7 +789,7 @@ for port in 0 1; do
+     del_dev vxlan0
+     check_tables
+ 
+-    echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports_reset
++    echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports/reset
+     check_tables
+ 
+     msg="destroy NIC"
+@@ -896,7 +896,7 @@ msg="vacate VxLAN in overflow table"
+ exp0=( `mke 10000 1` `mke 10004 1` 0 `mke 10003 1` )
+ del_dev vxlan2
+ 
+-echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports_reset
++echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports/reset
+ check_tables
+ 
+ msg="tunnels destroyed 2"
+diff --git a/tools/testing/selftests/ftrace/test.d/00basic/mount_options.tc b/tools/testing/selftests/ftrace/test.d/00basic/mount_options.tc
+index 35e8d47d607259..8a7ce647a60d1c 100644
+--- a/tools/testing/selftests/ftrace/test.d/00basic/mount_options.tc
++++ b/tools/testing/selftests/ftrace/test.d/00basic/mount_options.tc
+@@ -15,11 +15,11 @@ find_alternate_gid() {
+ 	tac /etc/group | grep -v ":$original_gid:" | head -1 | cut -d: -f3
+ }
+ 
+-mount_tracefs_with_options() {
++remount_tracefs_with_options() {
+ 	local mount_point="$1"
+ 	local options="$2"
+ 
+-	mount -t tracefs -o "$options" nodev "$mount_point"
++	mount -t tracefs -o "remount,$options" nodev "$mount_point"
+ 
+ 	setup
+ }
+@@ -81,7 +81,7 @@ test_gid_mount_option() {
+ 
+ 	# Unmount existing tracefs instance and mount with new GID
+ 	unmount_tracefs "$mount_point"
+-	mount_tracefs_with_options "$mount_point" "$new_options"
++	remount_tracefs_with_options "$mount_point" "$new_options"
+ 
+ 	check_gid "$mount_point" "$other_group"
+ 
+@@ -92,7 +92,7 @@ test_gid_mount_option() {
+ 
+ 	# Unmount and remount with the original GID
+ 	unmount_tracefs "$mount_point"
+-	mount_tracefs_with_options "$mount_point" "$mount_options"
++	remount_tracefs_with_options "$mount_point" "$mount_options"
+ 	check_gid "$mount_point" "$original_group"
+ }
+ 
+diff --git a/tools/testing/selftests/kselftest/ktap_helpers.sh b/tools/testing/selftests/kselftest/ktap_helpers.sh
+index 79a125eb24c2e8..14e7f3ec3f84c3 100644
+--- a/tools/testing/selftests/kselftest/ktap_helpers.sh
++++ b/tools/testing/selftests/kselftest/ktap_helpers.sh
+@@ -40,7 +40,7 @@ ktap_skip_all() {
+ __ktap_test() {
+ 	result="$1"
+ 	description="$2"
+-	directive="$3" # optional
++	directive="${3:-}" # optional
+ 
+ 	local directive_str=
+ 	[ ! -z "$directive" ] && directive_str="# $directive"
+diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
+index a5a72415e37b06..666c9fde76da9d 100644
+--- a/tools/testing/selftests/kselftest_harness.h
++++ b/tools/testing/selftests/kselftest_harness.h
+@@ -760,33 +760,33 @@
+ 		/* Report with actual signedness to avoid weird output. */ \
+ 		switch (is_signed_type(__exp) * 2 + is_signed_type(__seen)) { \
+ 		case 0: { \
+-			unsigned long long __exp_print = (uintptr_t)__exp; \
+-			unsigned long long __seen_print = (uintptr_t)__seen; \
+-			__TH_LOG("Expected %s (%llu) %s %s (%llu)", \
++			uintmax_t __exp_print = (uintmax_t)__exp; \
++			uintmax_t __seen_print = (uintmax_t)__seen; \
++			__TH_LOG("Expected %s (%ju) %s %s (%ju)", \
+ 				 _expected_str, __exp_print, #_t, \
+ 				 _seen_str, __seen_print); \
+ 			break; \
+ 			} \
+ 		case 1: { \
+-			unsigned long long __exp_print = (uintptr_t)__exp; \
+-			long long __seen_print = (intptr_t)__seen; \
+-			__TH_LOG("Expected %s (%llu) %s %s (%lld)", \
++			uintmax_t __exp_print = (uintmax_t)__exp; \
++			intmax_t  __seen_print = (intmax_t)__seen; \
++			__TH_LOG("Expected %s (%ju) %s %s (%jd)", \
+ 				 _expected_str, __exp_print, #_t, \
+ 				 _seen_str, __seen_print); \
+ 			break; \
+ 			} \
+ 		case 2: { \
+-			long long __exp_print = (intptr_t)__exp; \
+-			unsigned long long __seen_print = (uintptr_t)__seen; \
+-			__TH_LOG("Expected %s (%lld) %s %s (%llu)", \
++			intmax_t  __exp_print = (intmax_t)__exp; \
++			uintmax_t __seen_print = (uintmax_t)__seen; \
++			__TH_LOG("Expected %s (%jd) %s %s (%ju)", \
+ 				 _expected_str, __exp_print, #_t, \
+ 				 _seen_str, __seen_print); \
+ 			break; \
+ 			} \
+ 		case 3: { \
+-			long long __exp_print = (intptr_t)__exp; \
+-			long long __seen_print = (intptr_t)__seen; \
+-			__TH_LOG("Expected %s (%lld) %s %s (%lld)", \
++			intmax_t  __exp_print = (intmax_t)__exp; \
++			intmax_t  __seen_print = (intmax_t)__seen; \
++			__TH_LOG("Expected %s (%jd) %s %s (%jd)", \
+ 				 _expected_str, __exp_print, #_t, \
+ 				 _seen_str, __seen_print); \
+ 			break; \
+diff --git a/tools/testing/selftests/landlock/Makefile b/tools/testing/selftests/landlock/Makefile
+index 348e2dbdb4e0b9..480f13e77fcc4b 100644
+--- a/tools/testing/selftests/landlock/Makefile
++++ b/tools/testing/selftests/landlock/Makefile
+@@ -13,11 +13,11 @@ TEST_GEN_PROGS := $(src_test:.c=)
+ TEST_GEN_PROGS_EXTENDED := true
+ 
+ # Short targets:
+-$(TEST_GEN_PROGS): LDLIBS += -lcap
++$(TEST_GEN_PROGS): LDLIBS += -lcap -lpthread
+ $(TEST_GEN_PROGS_EXTENDED): LDFLAGS += -static
+ 
+ include ../lib.mk
+ 
+ # Targets with $(OUTPUT)/ prefix:
+-$(TEST_GEN_PROGS): LDLIBS += -lcap
++$(TEST_GEN_PROGS): LDLIBS += -lcap -lpthread
+ $(TEST_GEN_PROGS_EXTENDED): LDFLAGS += -static
+diff --git a/tools/testing/selftests/landlock/fs_test.c b/tools/testing/selftests/landlock/fs_test.c
+index 6788762188feac..97d360eae4f69e 100644
+--- a/tools/testing/selftests/landlock/fs_test.c
++++ b/tools/testing/selftests/landlock/fs_test.c
+@@ -2003,8 +2003,7 @@ static void test_execute(struct __test_metadata *const _metadata, const int err,
+ 	ASSERT_EQ(1, WIFEXITED(status));
+ 	ASSERT_EQ(err ? 2 : 0, WEXITSTATUS(status))
+ 	{
+-		TH_LOG("Unexpected return code for \"%s\": %s", path,
+-		       strerror(errno));
++		TH_LOG("Unexpected return code for \"%s\"", path);
+ 	};
+ }
+ 
+diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile
+index 3de23ea4663f79..6fad5007dea175 100644
+--- a/tools/testing/selftests/mm/Makefile
++++ b/tools/testing/selftests/mm/Makefile
+@@ -33,9 +33,16 @@ endif
+ # LDLIBS.
+ MAKEFLAGS += --no-builtin-rules
+ 
+-CFLAGS = -Wall -I $(top_srcdir) $(EXTRA_CFLAGS) $(KHDR_INCLUDES) $(TOOLS_INCLUDES)
++CFLAGS = -Wall -O2 -I $(top_srcdir) $(EXTRA_CFLAGS) $(KHDR_INCLUDES) $(TOOLS_INCLUDES)
+ LDLIBS = -lrt -lpthread -lm
+ 
++# Some distributions (such as Ubuntu) configure GCC so that _FORTIFY_SOURCE is
++# automatically enabled at -O1 or above. This triggers various unused-result
++# warnings where functions such as read() or write() are called and their
++# return value is not checked. Disable _FORTIFY_SOURCE to silence those
++# warnings.
++CFLAGS += -U_FORTIFY_SOURCE
++
+ KDIR ?= /lib/modules/$(shell uname -r)/build
+ ifneq (,$(wildcard $(KDIR)/Module.symvers))
+ ifneq (,$(wildcard $(KDIR)/include/linux/page_frag_cache.h))
+diff --git a/tools/testing/selftests/net/lib/Makefile b/tools/testing/selftests/net/lib/Makefile
+index 18b9443454a9e8..bc6b6762baf3ef 100644
+--- a/tools/testing/selftests/net/lib/Makefile
++++ b/tools/testing/selftests/net/lib/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ 
+-CFLAGS =  -Wall -Wl,--no-as-needed -O2 -g
++CFLAGS += -Wall -Wl,--no-as-needed -O2 -g
+ CFLAGS += -I../../../../../usr/include/ $(KHDR_INCLUDES)
+ # Additional include paths needed by kselftest.h
+ CFLAGS += -I../../
+diff --git a/tools/testing/selftests/net/mptcp/Makefile b/tools/testing/selftests/net/mptcp/Makefile
+index 8e3fc05a539797..c76525fe2b84d5 100644
+--- a/tools/testing/selftests/net/mptcp/Makefile
++++ b/tools/testing/selftests/net/mptcp/Makefile
+@@ -2,7 +2,7 @@
+ 
+ top_srcdir = ../../../../..
+ 
+-CFLAGS =  -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include $(KHDR_INCLUDES)
++CFLAGS += -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include $(KHDR_INCLUDES)
+ 
+ TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh diag.sh \
+ 	      simult_flows.sh mptcp_sockopt.sh userspace_pm.sh
+diff --git a/tools/testing/selftests/net/openvswitch/Makefile b/tools/testing/selftests/net/openvswitch/Makefile
+index 2f1508abc826b7..3fd1da2ec07d54 100644
+--- a/tools/testing/selftests/net/openvswitch/Makefile
++++ b/tools/testing/selftests/net/openvswitch/Makefile
+@@ -2,7 +2,7 @@
+ 
+ top_srcdir = ../../../../..
+ 
+-CFLAGS =  -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include $(KHDR_INCLUDES)
++CFLAGS += -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include $(KHDR_INCLUDES)
+ 
+ TEST_PROGS := openvswitch.sh
+ 
+diff --git a/tools/testing/selftests/powerpc/benchmarks/gettimeofday.c b/tools/testing/selftests/powerpc/benchmarks/gettimeofday.c
+index 580fcac0a09f31..b71ef8a493ed1a 100644
+--- a/tools/testing/selftests/powerpc/benchmarks/gettimeofday.c
++++ b/tools/testing/selftests/powerpc/benchmarks/gettimeofday.c
+@@ -20,7 +20,7 @@ static int test_gettimeofday(void)
+ 		gettimeofday(&tv_end, NULL);
+ 	}
+ 
+-	timersub(&tv_start, &tv_end, &tv_diff);
++	timersub(&tv_end, &tv_start, &tv_diff);
+ 
+ 	printf("time = %.6f\n", tv_diff.tv_sec + (tv_diff.tv_usec) * 1e-6);
+ 
+diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c
+index 5b9772cdf2651b..f6156790c3b4df 100644
+--- a/tools/testing/selftests/rseq/rseq.c
++++ b/tools/testing/selftests/rseq/rseq.c
+@@ -61,7 +61,6 @@ unsigned int rseq_size = -1U;
+ unsigned int rseq_flags;
+ 
+ static int rseq_ownership;
+-static int rseq_reg_success;	/* At least one rseq registration has succeded. */
+ 
+ /* Allocate a large area for the TLS. */
+ #define RSEQ_THREAD_AREA_ALLOC_SIZE	1024
+@@ -152,14 +151,27 @@ int rseq_register_current_thread(void)
+ 	}
+ 	rc = sys_rseq(&__rseq_abi, get_rseq_min_alloc_size(), 0, RSEQ_SIG);
+ 	if (rc) {
+-		if (RSEQ_READ_ONCE(rseq_reg_success)) {
++		/*
++		 * After at least one thread has registered successfully
++		 * (rseq_size > 0), the registration of other threads should
++		 * never fail.
++		 */
++		if (RSEQ_READ_ONCE(rseq_size) > 0) {
+ 			/* Incoherent success/failure within process. */
+ 			abort();
+ 		}
+ 		return -1;
+ 	}
+ 	assert(rseq_current_cpu_raw() >= 0);
+-	RSEQ_WRITE_ONCE(rseq_reg_success, 1);
++
++	/*
++	 * The first thread to register sets the rseq_size to mimic the libc
++	 * behavior.
++	 */
++	if (RSEQ_READ_ONCE(rseq_size) == 0) {
++		RSEQ_WRITE_ONCE(rseq_size, get_rseq_kernel_feature_size());
++	}
++
+ 	return 0;
+ }
+ 
+@@ -235,12 +247,18 @@ void rseq_init(void)
+ 		return;
+ 	}
+ 	rseq_ownership = 1;
+-	if (!rseq_available()) {
+-		rseq_size = 0;
+-		return;
+-	}
++
++	/* Calculate the offset of the rseq area from the thread pointer. */
+ 	rseq_offset = (void *)&__rseq_abi - rseq_thread_pointer();
++
++	/* rseq flags are deprecated, always set to 0. */
+ 	rseq_flags = 0;
++
++	/*
++	 * Set the size to 0 until at least one thread registers to mimic the
++	 * libc behavior.
++	 */
++	rseq_size = 0;
+ }
+ 
+ static __attribute__((destructor))
+diff --git a/tools/testing/selftests/rseq/rseq.h b/tools/testing/selftests/rseq/rseq.h
+index 4e217b620e0c7a..062d10925a1011 100644
+--- a/tools/testing/selftests/rseq/rseq.h
++++ b/tools/testing/selftests/rseq/rseq.h
+@@ -60,7 +60,14 @@
+ extern ptrdiff_t rseq_offset;
+ 
+ /*
+- * Size of the registered rseq area. 0 if the registration was
++ * The rseq ABI is composed of extensible feature fields. The extensions
++ * are done by appending additional fields at the end of the structure.
++ * The rseq_size defines the size of the active feature set which can be
++ * used by the application for the current rseq registration. Features
++ * starting at offset >= rseq_size are inactive and should not be used.
++ *
++ * The rseq_size is the intersection between the available allocation
++ * size for the rseq area and the feature size supported by the kernel.
+  * unsuccessful.
+  */
+ extern unsigned int rseq_size;
+diff --git a/tools/testing/selftests/timers/clocksource-switch.c b/tools/testing/selftests/timers/clocksource-switch.c
+index c5264594064c85..83faa4e354e389 100644
+--- a/tools/testing/selftests/timers/clocksource-switch.c
++++ b/tools/testing/selftests/timers/clocksource-switch.c
+@@ -156,8 +156,8 @@ int main(int argc, char **argv)
+ 	/* Check everything is sane before we start switching asynchronously */
+ 	if (do_sanity_check) {
+ 		for (i = 0; i < count; i++) {
+-			printf("Validating clocksource %s\n",
+-				clocksource_list[i]);
++			ksft_print_msg("Validating clocksource %s\n",
++					clocksource_list[i]);
+ 			if (change_clocksource(clocksource_list[i])) {
+ 				status = -1;
+ 				goto out;
+@@ -169,7 +169,7 @@ int main(int argc, char **argv)
+ 		}
+ 	}
+ 
+-	printf("Running Asynchronous Switching Tests...\n");
++	ksft_print_msg("Running Asynchronous Switching Tests...\n");
+ 	pid = fork();
+ 	if (!pid)
+ 		return run_tests(runtime);


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [gentoo-commits] proj/linux-patches:6.13 commit in: /
@ 2025-02-14 12:09 Mike Pagano
  0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2025-02-14 12:09 UTC (permalink / raw
  To: gentoo-commits

commit:     4ea745c4b0b681457301cd5b7543775fe4c6beef
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Feb 14 12:09:18 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Feb 14 12:09:18 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4ea745c4

menuconfig: Allow sorting the entries alphabetically

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                          |   4 +
 2901_permit-menuconfig-sorting.patch | 219 +++++++++++++++++++++++++++++++++++
 2 files changed, 223 insertions(+)

diff --git a/0000_README b/0000_README
index a507da31..65a7cda3 100644
--- a/0000_README
+++ b/0000_README
@@ -71,6 +71,10 @@ Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758
 
+Patch:  2901_permit-menuconfig-sorting.patch
+From:   https://lore.kernel.org/
+Desc:   menuconfig: Allow sorting the entries alphabetically
+
 Patch:  2910_bfp-mark-get-entry-ip-as--maybe-unused.patch
 From:   https://www.spinics.net/lists/stable/msg604665.html
 Desc:   bpf: mark get_entry_ip as __maybe_unused

diff --git a/2901_permit-menuconfig-sorting.patch b/2901_permit-menuconfig-sorting.patch
new file mode 100644
index 00000000..1ceade0c
--- /dev/null
+++ b/2901_permit-menuconfig-sorting.patch
@@ -0,0 +1,219 @@
+From git@z Thu Jan  1 00:00:00 1970
+Subject: [PATCH] menuconfig: Allow sorting the entries alphabetically
+From: Ivan Orlov <ivan.orlov0322@gmail.com>
+Date: Fri, 16 Aug 2024 15:18:31 +0100
+Message-Id: <20240816141831.104085-1-ivan.orlov0322@gmail.com>
+MIME-Version: 1.0
+Content-Type: text/plain; charset="utf-8"
+Content-Transfer-Encoding: 7bit
+
+Implement the functionality which allows to sort the Kconfig entries
+alphabetically if user decides to. It may help finding the desired entry
+faster, so the user will spend less time looking through the list.
+
+The sorting is done on the dialog_list elements in the 'dialog_menu'
+function, so on the option "representation" layer. The sorting could be
+enabled/disabled by pressing the '>' key. The labels are sorted in the
+following way:
+
+1. Put all entries into the array (from the linked list)
+2. Sort them alphabetically using qsort and custom comparator
+3. Restore the items linked list structure
+
+I know that this modification includes the ugly heruistics for
+extracting the actual label text from "    [ ] Some-option"-like
+expressions (to be able to alphabetically compare the labels), and I
+would be happy to discuss alternative solutions.
+
+Signed-off-by: Ivan Orlov <ivan.orlov0322@gmail.com>
+---
+ scripts/kconfig/lxdialog/dialog.h  |  5 +-
+ scripts/kconfig/lxdialog/menubox.c |  7 ++-
+ scripts/kconfig/lxdialog/util.c    | 79 ++++++++++++++++++++++++++++++
+ scripts/kconfig/mconf.c            |  9 +++-
+ 4 files changed, 97 insertions(+), 3 deletions(-)
+
+diff --git a/scripts/kconfig/lxdialog/dialog.h b/scripts/kconfig/lxdialog/dialog.h
+index f6c2ebe6d1f9..a036ed8cb43c 100644
+--- a/scripts/kconfig/lxdialog/dialog.h
++++ b/scripts/kconfig/lxdialog/dialog.h
+@@ -58,6 +58,8 @@
+ #define ACS_DARROW 'v'
+ #endif
+ 
++#define KEY_ACTION_SORT 11
++
+ /* error return codes */
+ #define ERRDISPLAYTOOSMALL (KEY_MAX + 1)
+ 
+@@ -127,6 +129,7 @@ void item_set_selected(int val);
+ int item_activate_selected(void);
+ void *item_data(void);
+ char item_tag(void);
++void sort_items(void);
+ 
+ /* item list manipulation for lxdialog use */
+ #define MAXITEMSTR 200
+@@ -196,7 +199,7 @@ int dialog_textbox(const char *title, const char *tbuf, int initial_height,
+ 		   int initial_width, int *_vscroll, int *_hscroll,
+ 		   int (*extra_key_cb)(int, size_t, size_t, void *), void *data);
+ int dialog_menu(const char *title, const char *prompt,
+-		const void *selected, int *s_scroll);
++		const void *selected, int *s_scroll, bool sort);
+ int dialog_checklist(const char *title, const char *prompt, int height,
+ 		     int width, int list_height);
+ int dialog_inputbox(const char *title, const char *prompt, int height,
+diff --git a/scripts/kconfig/lxdialog/menubox.c b/scripts/kconfig/lxdialog/menubox.c
+index 6e6244df0c56..4cba15f967c5 100644
+--- a/scripts/kconfig/lxdialog/menubox.c
++++ b/scripts/kconfig/lxdialog/menubox.c
+@@ -161,7 +161,7 @@ static void do_scroll(WINDOW *win, int *scroll, int n)
+  * Display a menu for choosing among a number of options
+  */
+ int dialog_menu(const char *title, const char *prompt,
+-		const void *selected, int *s_scroll)
++		const void *selected, int *s_scroll, bool sort)
+ {
+ 	int i, j, x, y, box_x, box_y;
+ 	int height, width, menu_height;
+@@ -181,6 +181,9 @@ int dialog_menu(const char *title, const char *prompt,
+ 
+ 	max_choice = MIN(menu_height, item_count());
+ 
++	if (sort)
++		sort_items();
++
+ 	/* center dialog box on screen */
+ 	x = (getmaxx(stdscr) - width) / 2;
+ 	y = (getmaxy(stdscr) - height) / 2;
+@@ -408,6 +411,8 @@ int dialog_menu(const char *title, const char *prompt,
+ 			delwin(menu);
+ 			delwin(dialog);
+ 			goto do_resize;
++		case '>':
++			return KEY_ACTION_SORT;
+ 		}
+ 	}
+ 	delwin(menu);
+diff --git a/scripts/kconfig/lxdialog/util.c b/scripts/kconfig/lxdialog/util.c
+index 964139c87fcb..cc87ddd69c10 100644
+--- a/scripts/kconfig/lxdialog/util.c
++++ b/scripts/kconfig/lxdialog/util.c
+@@ -563,6 +563,85 @@ void item_reset(void)
+ 	item_cur = &item_nil;
+ }
+ 
++/*
++ * Function skips a part of the label to get the actual label text
++ * (without the '[ ]'-like prefix).
++ */
++static char *skip_spec_characters(char *s)
++{
++	bool unbalanced = false;
++
++	while (*s) {
++		if (isalnum(*s) && !unbalanced) {
++			break;
++		} else if (*s == '[' || *s == '<' || *s == '(') {
++			/*
++			 * '[', '<' or '(' means that we need to look for
++			 * closure
++			 */
++			unbalanced = true;
++		} else if (*s == '-') {
++			/*
++			 * Labels could start with "-*-", so '-' here either
++			 * opens or closes the "checkbox"
++			 */
++			unbalanced = !unbalanced;
++		} else if (*s == '>' || *s == ']' || *s == ')') {
++			unbalanced = false;
++		}
++		s++;
++	}
++	return s;
++}
++
++static int compare_labels(const void *a, const void *b)
++{
++	struct dialog_list *el1 = *((struct dialog_list **)a);
++	struct dialog_list *el2 = *((struct dialog_list **)b);
++
++	return strcasecmp(skip_spec_characters(el1->node.str),
++			  skip_spec_characters(el2->node.str));
++}
++
++void sort_items(void)
++{
++	struct dialog_list **arr;
++	struct dialog_list *cur;
++	size_t n, i;
++
++	n = item_count();
++	if (n == 0)
++		return;
++
++	/* Copy all items from linked list into array */
++	cur = item_head;
++	arr = malloc(sizeof(*arr) * n);
++
++	if (!arr) {
++		/* Don't have enough memory, so don't do anything */
++		return;
++	}
++
++	for (i = 0; i < n; i++) {
++		arr[i] = cur;
++		cur = cur->next;
++	}
++
++	qsort(arr, n, sizeof(struct dialog_list *), compare_labels);
++
++	/* Restore the linked list structure from the sorted array */
++	for (i = 0; i < n; i++) {
++		if (i < n - 1)
++			arr[i]->next = arr[i + 1];
++		else
++			arr[i]->next = NULL;
++	}
++
++	item_head = arr[0];
++
++	free(arr);
++}
++
+ void item_make(const char *fmt, ...)
+ {
+ 	va_list ap;
+diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
+index 3887eac75289..8a961a41cae4 100644
+--- a/scripts/kconfig/mconf.c
++++ b/scripts/kconfig/mconf.c
+@@ -749,6 +749,7 @@ static void conf_save(void)
+ 	}
+ }
+ 
++static bool should_sort;
+ static void conf(struct menu *menu, struct menu *active_menu)
+ {
+ 	struct menu *submenu;
+@@ -774,9 +775,15 @@ static void conf(struct menu *menu, struct menu *active_menu)
+ 		dialog_clear();
+ 		res = dialog_menu(prompt ? prompt : "Main Menu",
+ 				  menu_instructions,
+-				  active_menu, &s_scroll);
++				  active_menu, &s_scroll, should_sort);
+ 		if (res == 1 || res == KEY_ESC || res == -ERRDISPLAYTOOSMALL)
+ 			break;
++
++		if (res == KEY_ACTION_SORT) {
++			should_sort = !should_sort;
++			continue;
++		}
++
+ 		if (item_count() != 0) {
+ 			if (!item_activate_selected())
+ 				continue;
+-- 
+2.34.1
+


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [gentoo-commits] proj/linux-patches:6.13 commit in: /
@ 2025-02-17 11:15 Mike Pagano
  0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2025-02-17 11:15 UTC (permalink / raw
  To: gentoo-commits

commit:     7250c448b42bc5c0f0d6114e4748c531b350fec8
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Feb 17 11:15:15 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Feb 17 11:15:15 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7250c448

Linux patch 6.13.3

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1002_linux-6.13.3.patch | 18162 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 18166 insertions(+)

diff --git a/0000_README b/0000_README
index 65a7cda3..beaf6885 100644
--- a/0000_README
+++ b/0000_README
@@ -51,6 +51,10 @@ Patch:  1001_linux-6.13.2.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.13.2
 
+Patch:  1002_linux-6.13.3.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.13.3
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.

diff --git a/1002_linux-6.13.3.patch b/1002_linux-6.13.3.patch
new file mode 100644
index 00000000..fca632f4
--- /dev/null
+++ b/1002_linux-6.13.3.patch
@@ -0,0 +1,18162 @@
+diff --git a/Documentation/arch/arm64/elf_hwcaps.rst b/Documentation/arch/arm64/elf_hwcaps.rst
+index 2ff922a406ad83..1a31723e79fd24 100644
+--- a/Documentation/arch/arm64/elf_hwcaps.rst
++++ b/Documentation/arch/arm64/elf_hwcaps.rst
+@@ -178,22 +178,28 @@ HWCAP2_DCPODP
+     Functionality implied by ID_AA64ISAR1_EL1.DPB == 0b0010.
+ 
+ HWCAP2_SVE2
+-    Functionality implied by ID_AA64ZFR0_EL1.SVEver == 0b0001.
++    Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
++    ID_AA64ZFR0_EL1.SVEver == 0b0001.
+ 
+ HWCAP2_SVEAES
+-    Functionality implied by ID_AA64ZFR0_EL1.AES == 0b0001.
++    Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
++    ID_AA64ZFR0_EL1.AES == 0b0001.
+ 
+ HWCAP2_SVEPMULL
+-    Functionality implied by ID_AA64ZFR0_EL1.AES == 0b0010.
++    Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
++    ID_AA64ZFR0_EL1.AES == 0b0010.
+ 
+ HWCAP2_SVEBITPERM
+-    Functionality implied by ID_AA64ZFR0_EL1.BitPerm == 0b0001.
++    Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
++    ID_AA64ZFR0_EL1.BitPerm == 0b0001.
+ 
+ HWCAP2_SVESHA3
+-    Functionality implied by ID_AA64ZFR0_EL1.SHA3 == 0b0001.
++    Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
++    ID_AA64ZFR0_EL1.SHA3 == 0b0001.
+ 
+ HWCAP2_SVESM4
+-    Functionality implied by ID_AA64ZFR0_EL1.SM4 == 0b0001.
++    Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
++    ID_AA64ZFR0_EL1.SM4 == 0b0001.
+ 
+ HWCAP2_FLAGM2
+     Functionality implied by ID_AA64ISAR0_EL1.TS == 0b0010.
+@@ -202,16 +208,20 @@ HWCAP2_FRINT
+     Functionality implied by ID_AA64ISAR1_EL1.FRINTTS == 0b0001.
+ 
+ HWCAP2_SVEI8MM
+-    Functionality implied by ID_AA64ZFR0_EL1.I8MM == 0b0001.
++    Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
++    ID_AA64ZFR0_EL1.I8MM == 0b0001.
+ 
+ HWCAP2_SVEF32MM
+-    Functionality implied by ID_AA64ZFR0_EL1.F32MM == 0b0001.
++    Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
++    ID_AA64ZFR0_EL1.F32MM == 0b0001.
+ 
+ HWCAP2_SVEF64MM
+-    Functionality implied by ID_AA64ZFR0_EL1.F64MM == 0b0001.
++    Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
++    ID_AA64ZFR0_EL1.F64MM == 0b0001.
+ 
+ HWCAP2_SVEBF16
+-    Functionality implied by ID_AA64ZFR0_EL1.BF16 == 0b0001.
++    Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
++    ID_AA64ZFR0_EL1.BF16 == 0b0001.
+ 
+ HWCAP2_I8MM
+     Functionality implied by ID_AA64ISAR1_EL1.I8MM == 0b0001.
+@@ -277,7 +287,8 @@ HWCAP2_EBF16
+     Functionality implied by ID_AA64ISAR1_EL1.BF16 == 0b0010.
+ 
+ HWCAP2_SVE_EBF16
+-    Functionality implied by ID_AA64ZFR0_EL1.BF16 == 0b0010.
++    Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
++    ID_AA64ZFR0_EL1.BF16 == 0b0010.
+ 
+ HWCAP2_CSSC
+     Functionality implied by ID_AA64ISAR2_EL1.CSSC == 0b0001.
+@@ -286,7 +297,8 @@ HWCAP2_RPRFM
+     Functionality implied by ID_AA64ISAR2_EL1.RPRFM == 0b0001.
+ 
+ HWCAP2_SVE2P1
+-    Functionality implied by ID_AA64ZFR0_EL1.SVEver == 0b0010.
++    Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
++    ID_AA64ZFR0_EL1.SVEver == 0b0010.
+ 
+ HWCAP2_SME2
+     Functionality implied by ID_AA64SMFR0_EL1.SMEver == 0b0001.
+@@ -313,7 +325,8 @@ HWCAP2_HBC
+     Functionality implied by ID_AA64ISAR2_EL1.BC == 0b0001.
+ 
+ HWCAP2_SVE_B16B16
+-    Functionality implied by ID_AA64ZFR0_EL1.B16B16 == 0b0001.
++    Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001 and
++    ID_AA64ZFR0_EL1.B16B16 == 0b0001.
+ 
+ HWCAP2_LRCPC3
+     Functionality implied by ID_AA64ISAR1_EL1.LRCPC == 0b0011.
+diff --git a/Documentation/driver-api/media/tx-rx.rst b/Documentation/driver-api/media/tx-rx.rst
+index dd09484df1d33d..b936065dd640b5 100644
+--- a/Documentation/driver-api/media/tx-rx.rst
++++ b/Documentation/driver-api/media/tx-rx.rst
+@@ -50,7 +50,7 @@ The :ref:`V4L2_CID_LINK_FREQ <v4l2-cid-link-freq>` control is used to tell the
+ receiver the frequency of the bus (i.e. it is not the same as the symbol rate).
+ 
+ ``.enable_streams()`` and ``.disable_streams()`` callbacks
+-^^^^^^^^^^^^^^^^^^^^^^^^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ 
+ The struct v4l2_subdev_pad_ops->enable_streams() and struct
+ v4l2_subdev_pad_ops->disable_streams() callbacks are used by the receiver driver
+diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
+index 8cf2f041af4704..b4ee25af1702b0 100644
+--- a/Documentation/gpu/drm-kms-helpers.rst
++++ b/Documentation/gpu/drm-kms-helpers.rst
+@@ -221,6 +221,9 @@ Panel Helper Reference
+ .. kernel-doc:: drivers/gpu/drm/drm_panel_orientation_quirks.c
+    :export:
+ 
++.. kernel-doc:: drivers/gpu/drm/drm_panel_backlight_quirks.c
++   :export:
++
+ Panel Self Refresh Helper Reference
+ ===================================
+ 
+diff --git a/Makefile b/Makefile
+index 9de0dc460a8368..423d087afad2d1 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 13
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm/boot/dts/ti/omap/dra7-l4.dtsi b/arch/arm/boot/dts/ti/omap/dra7-l4.dtsi
+index 6e67d99832ac25..ba7fdaae9c6e6d 100644
+--- a/arch/arm/boot/dts/ti/omap/dra7-l4.dtsi
++++ b/arch/arm/boot/dts/ti/omap/dra7-l4.dtsi
+@@ -12,6 +12,7 @@ &l4_cfg {						/* 0x4a000000 */
+ 	ranges = <0x00000000 0x4a000000 0x100000>,	/* segment 0 */
+ 		 <0x00100000 0x4a100000 0x100000>,	/* segment 1 */
+ 		 <0x00200000 0x4a200000 0x100000>;	/* segment 2 */
++	dma-ranges;
+ 
+ 	segment@0 {					/* 0x4a000000 */
+ 		compatible = "simple-pm-bus";
+@@ -557,6 +558,7 @@ segment@100000 {					/* 0x4a100000 */
+ 			 <0x0007e000 0x0017e000 0x001000>,	/* ap 124 */
+ 			 <0x00059000 0x00159000 0x001000>,	/* ap 125 */
+ 			 <0x0005a000 0x0015a000 0x001000>;	/* ap 126 */
++		dma-ranges;
+ 
+ 		target-module@2000 {			/* 0x4a102000, ap 27 3c.0 */
+ 			compatible = "ti,sysc";
+diff --git a/arch/arm/boot/dts/ti/omap/omap3-gta04.dtsi b/arch/arm/boot/dts/ti/omap/omap3-gta04.dtsi
+index 2ee3ddd640209b..536070e80b2c6d 100644
+--- a/arch/arm/boot/dts/ti/omap/omap3-gta04.dtsi
++++ b/arch/arm/boot/dts/ti/omap/omap3-gta04.dtsi
+@@ -446,6 +446,7 @@ &omap3_pmx_core2 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <
+ 			&hsusb2_2_pins
++			&mcspi3hog_pins
+ 	>;
+ 
+ 	hsusb2_2_pins: hsusb2-2-pins {
+@@ -459,6 +460,15 @@ OMAP3630_CORE2_IOPAD(0x25fa, PIN_INPUT_PULLDOWN | MUX_MODE3)	/* etk_d15.hsusb2_d
+ 		>;
+ 	};
+ 
++	mcspi3hog_pins: mcspi3hog-pins {
++		pinctrl-single,pins = <
++			OMAP3630_CORE2_IOPAD(0x25dc, PIN_OUTPUT_PULLDOWN | MUX_MODE4)	/* etk_d0 */
++			OMAP3630_CORE2_IOPAD(0x25de, PIN_OUTPUT_PULLDOWN | MUX_MODE4)	/* etk_d1 */
++			OMAP3630_CORE2_IOPAD(0x25e0, PIN_OUTPUT_PULLDOWN | MUX_MODE4)	/* etk_d2 */
++			OMAP3630_CORE2_IOPAD(0x25e2, PIN_OUTPUT_PULLDOWN | MUX_MODE4)	/* etk_d3 */
++		>;
++	};
++
+ 	spi_gpio_pins: spi-gpio-pinmux-pins {
+ 		pinctrl-single,pins = <
+ 			OMAP3630_CORE2_IOPAD(0x25d8, PIN_OUTPUT | MUX_MODE4) /* clk */
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+index 4b974bb781b104..bbb29218661b9c 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+@@ -269,11 +269,6 @@ dsi_out: endpoint {
+ 	};
+ };
+ 
+-&dpi0 {
+-	/* TODO Re-enable after DP to Type-C port muxing can be described */
+-	status = "disabled";
+-};
+-
+ &gic {
+ 	mediatek,broken-save-restore-fw;
+ };
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts b/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts
+index 61a6f66914b86d..dbdee604edab43 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts
+@@ -522,10 +522,6 @@ &scp {
+ 	status = "okay";
+ };
+ 
+-&dsi0 {
+-	status = "disabled";
+-};
+-
+ &dpi0 {
+ 	pinctrl-names = "default", "sleep";
+ 	pinctrl-0 = <&dpi_func_pins>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+index 9af6349dbfcf10..0aa34e5bbaaa87 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+@@ -1835,6 +1835,7 @@ dsi0: dsi@14014000 {
+ 			resets = <&mmsys MT8183_MMSYS_SW0_RST_B_DISP_DSI0>;
+ 			phys = <&mipi_tx0>;
+ 			phy-names = "dphy";
++			status = "disabled";
+ 		};
+ 
+ 		dpi0: dpi@14015000 {
+@@ -1846,6 +1847,7 @@ dpi0: dpi@14015000 {
+ 				 <&mmsys CLK_MM_DPI_MM>,
+ 				 <&apmixedsys CLK_APMIXED_TVDPLL>;
+ 			clock-names = "pixel", "engine", "pll";
++			status = "disabled";
+ 
+ 			port {
+ 				dpi_out: endpoint { };
+diff --git a/arch/arm64/boot/dts/nvidia/tegra234.dtsi b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+index 570331baa09ee3..2601b43b2d8cad 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra234.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+@@ -3815,7 +3815,7 @@ sce-fabric@b600000 {
+ 			compatible = "nvidia,tegra234-sce-fabric";
+ 			reg = <0x0 0xb600000 0x0 0x40000>;
+ 			interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
+-			status = "okay";
++			status = "disabled";
+ 		};
+ 
+ 		rce-fabric@be00000 {
+@@ -3995,7 +3995,7 @@ bpmp-fabric@d600000 {
+ 		};
+ 
+ 		dce-fabric@de00000 {
+-			compatible = "nvidia,tegra234-sce-fabric";
++			compatible = "nvidia,tegra234-dce-fabric";
+ 			reg = <0x0 0xde00000 0x0 0x40000>;
+ 			interrupts = <GIC_SPI 381 IRQ_TYPE_LEVEL_HIGH>;
+ 			status = "okay";
+@@ -4018,6 +4018,8 @@ gic: interrupt-controller@f400000 {
+ 			#redistributor-regions = <1>;
+ 			#interrupt-cells = <3>;
+ 			interrupt-controller;
++
++			#address-cells = <0>;
+ 		};
+ 
+ 		smmu_iso: iommu@10000000 {
+diff --git a/arch/arm64/boot/dts/qcom/sdx75.dtsi b/arch/arm64/boot/dts/qcom/sdx75.dtsi
+index 68d7dbe037b6ac..227f4705587a12 100644
+--- a/arch/arm64/boot/dts/qcom/sdx75.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdx75.dtsi
+@@ -893,7 +893,7 @@ tcsr: syscon@1fc0000 {
+ 
+ 		remoteproc_mpss: remoteproc@4080000 {
+ 			compatible = "qcom,sdx75-mpss-pas";
+-			reg = <0 0x04080000 0 0x4040>;
++			reg = <0 0x04080000 0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 250 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_modem_in 0 IRQ_TYPE_EDGE_RISING>,
+diff --git a/arch/arm64/boot/dts/qcom/sm6115.dtsi b/arch/arm64/boot/dts/qcom/sm6115.dtsi
+index 9b23534c456bde..7016843c2ca560 100644
+--- a/arch/arm64/boot/dts/qcom/sm6115.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6115.dtsi
+@@ -2027,7 +2027,7 @@ dispcc: clock-controller@5f00000 {
+ 
+ 		remoteproc_mpss: remoteproc@6080000 {
+ 			compatible = "qcom,sm6115-mpss-pas";
+-			reg = <0x0 0x06080000 0x0 0x100>;
++			reg = <0x0 0x06080000 0x0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 307 IRQ_TYPE_EDGE_RISING>,
+ 					      <&modem_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+@@ -2670,9 +2670,9 @@ funnel_apss1_in: endpoint {
+ 			};
+ 		};
+ 
+-		remoteproc_adsp: remoteproc@ab00000 {
++		remoteproc_adsp: remoteproc@a400000 {
+ 			compatible = "qcom,sm6115-adsp-pas";
+-			reg = <0x0 0x0ab00000 0x0 0x100>;
++			reg = <0x0 0x0a400000 0x0 0x4040>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 282 IRQ_TYPE_EDGE_RISING>,
+ 					      <&adsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+@@ -2744,7 +2744,7 @@ compute-cb@7 {
+ 
+ 		remoteproc_cdsp: remoteproc@b300000 {
+ 			compatible = "qcom,sm6115-cdsp-pas";
+-			reg = <0x0 0x0b300000 0x0 0x100000>;
++			reg = <0x0 0x0b300000 0x0 0x4040>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 265 IRQ_TYPE_EDGE_RISING>,
+ 					      <&cdsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+diff --git a/arch/arm64/boot/dts/qcom/sm6350.dtsi b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+index 8d697280249fef..fd662591f7c6d5 100644
+--- a/arch/arm64/boot/dts/qcom/sm6350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+@@ -936,7 +936,7 @@ uart1: serial@884000 {
+ 				power-domains = <&rpmhpd SM6350_CX>;
+ 				operating-points-v2 = <&qup_opp_table>;
+ 				interconnects = <&clk_virt MASTER_QUP_CORE_0 0 &clk_virt SLAVE_QUP_CORE_0 0>,
+-						<&aggre1_noc MASTER_QUP_0 0 &clk_virt SLAVE_EBI_CH0 0>;
++						<&gem_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_QUP_0 0>;
+ 				interconnect-names = "qup-core", "qup-config";
+ 				status = "disabled";
+ 			};
+@@ -1283,7 +1283,7 @@ tcsr_mutex: hwlock@1f40000 {
+ 
+ 		adsp: remoteproc@3000000 {
+ 			compatible = "qcom,sm6350-adsp-pas";
+-			reg = <0 0x03000000 0 0x100>;
++			reg = <0x0 0x03000000 0x0 0x10000>;
+ 
+ 			interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+@@ -1503,7 +1503,7 @@ gpucc: clock-controller@3d90000 {
+ 
+ 		mpss: remoteproc@4080000 {
+ 			compatible = "qcom,sm6350-mpss-pas";
+-			reg = <0x0 0x04080000 0x0 0x4040>;
++			reg = <0x0 0x04080000 0x0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 136 IRQ_TYPE_EDGE_RISING>,
+ 					      <&modem_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+diff --git a/arch/arm64/boot/dts/qcom/sm6375.dtsi b/arch/arm64/boot/dts/qcom/sm6375.dtsi
+index 7c929168ed0805..0faa3a40ff824a 100644
+--- a/arch/arm64/boot/dts/qcom/sm6375.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6375.dtsi
+@@ -1516,9 +1516,9 @@ gpucc: clock-controller@5990000 {
+ 			#power-domain-cells = <1>;
+ 		};
+ 
+-		remoteproc_mss: remoteproc@6000000 {
++		remoteproc_mss: remoteproc@6080000 {
+ 			compatible = "qcom,sm6375-mpss-pas";
+-			reg = <0 0x06000000 0 0x4040>;
++			reg = <0x0 0x06080000 0x0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 307 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_modem_in 0 IRQ_TYPE_EDGE_RISING>,
+@@ -1559,7 +1559,7 @@ IPCC_MPROC_SIGNAL_GLINK_QMP
+ 
+ 		remoteproc_adsp: remoteproc@a400000 {
+ 			compatible = "qcom,sm6375-adsp-pas";
+-			reg = <0 0x0a400000 0 0x100>;
++			reg = <0 0x0a400000 0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 282 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+@@ -1595,9 +1595,9 @@ IPCC_MPROC_SIGNAL_GLINK_QMP
+ 			};
+ 		};
+ 
+-		remoteproc_cdsp: remoteproc@b000000 {
++		remoteproc_cdsp: remoteproc@b300000 {
+ 			compatible = "qcom,sm6375-cdsp-pas";
+-			reg = <0x0 0x0b000000 0x0 0x100000>;
++			reg = <0x0 0x0b300000 0x0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 265 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
+diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+index 15b7f15b3836dc..766da4a701a0ee 100644
+--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+@@ -1876,6 +1876,142 @@ tcsr: syscon@1fc0000 {
+ 			reg = <0x0 0x1fc0000 0x0 0x30000>;
+ 		};
+ 
++		adsp: remoteproc@3000000 {
++			compatible = "qcom,sm8350-adsp-pas";
++			reg = <0x0 0x03000000 0x0 0x10000>;
++
++			interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "wdog", "fatal", "ready",
++					  "handover", "stop-ack";
++
++			clocks = <&rpmhcc RPMH_CXO_CLK>;
++			clock-names = "xo";
++
++			power-domains = <&rpmhpd RPMHPD_LCX>,
++					<&rpmhpd RPMHPD_LMX>;
++			power-domain-names = "lcx", "lmx";
++
++			memory-region = <&pil_adsp_mem>;
++
++			qcom,qmp = <&aoss_qmp>;
++
++			qcom,smem-states = <&smp2p_adsp_out 0>;
++			qcom,smem-state-names = "stop";
++
++			status = "disabled";
++
++			glink-edge {
++				interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
++							     IPCC_MPROC_SIGNAL_GLINK_QMP
++							     IRQ_TYPE_EDGE_RISING>;
++				mboxes = <&ipcc IPCC_CLIENT_LPASS
++						IPCC_MPROC_SIGNAL_GLINK_QMP>;
++
++				label = "lpass";
++				qcom,remote-pid = <2>;
++
++				apr {
++					compatible = "qcom,apr-v2";
++					qcom,glink-channels = "apr_audio_svc";
++					qcom,domain = <APR_DOMAIN_ADSP>;
++					#address-cells = <1>;
++					#size-cells = <0>;
++
++					service@3 {
++						reg = <APR_SVC_ADSP_CORE>;
++						compatible = "qcom,q6core";
++						qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
++					};
++
++					q6afe: service@4 {
++						compatible = "qcom,q6afe";
++						reg = <APR_SVC_AFE>;
++						qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
++
++						q6afedai: dais {
++							compatible = "qcom,q6afe-dais";
++							#address-cells = <1>;
++							#size-cells = <0>;
++							#sound-dai-cells = <1>;
++						};
++
++						q6afecc: clock-controller {
++							compatible = "qcom,q6afe-clocks";
++							#clock-cells = <2>;
++						};
++					};
++
++					q6asm: service@7 {
++						compatible = "qcom,q6asm";
++						reg = <APR_SVC_ASM>;
++						qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
++
++						q6asmdai: dais {
++							compatible = "qcom,q6asm-dais";
++							#address-cells = <1>;
++							#size-cells = <0>;
++							#sound-dai-cells = <1>;
++							iommus = <&apps_smmu 0x1801 0x0>;
++
++							dai@0 {
++								reg = <0>;
++							};
++
++							dai@1 {
++								reg = <1>;
++							};
++
++							dai@2 {
++								reg = <2>;
++							};
++						};
++					};
++
++					q6adm: service@8 {
++						compatible = "qcom,q6adm";
++						reg = <APR_SVC_ADM>;
++						qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
++
++						q6routing: routing {
++							compatible = "qcom,q6adm-routing";
++							#sound-dai-cells = <0>;
++						};
++					};
++				};
++
++				fastrpc {
++					compatible = "qcom,fastrpc";
++					qcom,glink-channels = "fastrpcglink-apps-dsp";
++					label = "adsp";
++					qcom,non-secure-domain;
++					#address-cells = <1>;
++					#size-cells = <0>;
++
++					compute-cb@3 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <3>;
++						iommus = <&apps_smmu 0x1803 0x0>;
++					};
++
++					compute-cb@4 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <4>;
++						iommus = <&apps_smmu 0x1804 0x0>;
++					};
++
++					compute-cb@5 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <5>;
++						iommus = <&apps_smmu 0x1805 0x0>;
++					};
++				};
++			};
++		};
++
+ 		lpass_tlmm: pinctrl@33c0000 {
+ 			compatible = "qcom,sm8350-lpass-lpi-pinctrl";
+ 			reg = <0 0x033c0000 0 0x20000>,
+@@ -2078,7 +2214,7 @@ lpass_ag_noc: interconnect@3c40000 {
+ 
+ 		mpss: remoteproc@4080000 {
+ 			compatible = "qcom,sm8350-mpss-pas";
+-			reg = <0x0 0x04080000 0x0 0x4040>;
++			reg = <0x0 0x04080000 0x0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 264 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_modem_in 0 IRQ_TYPE_EDGE_RISING>,
+@@ -2360,6 +2496,115 @@ compute_noc: interconnect@a0c0000 {
+ 			qcom,bcm-voters = <&apps_bcm_voter>;
+ 		};
+ 
++		cdsp: remoteproc@a300000 {
++			compatible = "qcom,sm8350-cdsp-pas";
++			reg = <0x0 0x0a300000 0x0 0x10000>;
++
++			interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_cdsp_in 1 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_cdsp_in 2 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_cdsp_in 3 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "wdog", "fatal", "ready",
++					  "handover", "stop-ack";
++
++			clocks = <&rpmhcc RPMH_CXO_CLK>;
++			clock-names = "xo";
++
++			power-domains = <&rpmhpd RPMHPD_CX>,
++					<&rpmhpd RPMHPD_MXC>;
++			power-domain-names = "cx", "mxc";
++
++			interconnects = <&compute_noc MASTER_CDSP_PROC 0 &mc_virt SLAVE_EBI1 0>;
++
++			memory-region = <&pil_cdsp_mem>;
++
++			qcom,qmp = <&aoss_qmp>;
++
++			qcom,smem-states = <&smp2p_cdsp_out 0>;
++			qcom,smem-state-names = "stop";
++
++			status = "disabled";
++
++			glink-edge {
++				interrupts-extended = <&ipcc IPCC_CLIENT_CDSP
++							     IPCC_MPROC_SIGNAL_GLINK_QMP
++							     IRQ_TYPE_EDGE_RISING>;
++				mboxes = <&ipcc IPCC_CLIENT_CDSP
++						IPCC_MPROC_SIGNAL_GLINK_QMP>;
++
++				label = "cdsp";
++				qcom,remote-pid = <5>;
++
++				fastrpc {
++					compatible = "qcom,fastrpc";
++					qcom,glink-channels = "fastrpcglink-apps-dsp";
++					label = "cdsp";
++					qcom,non-secure-domain;
++					#address-cells = <1>;
++					#size-cells = <0>;
++
++					compute-cb@1 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <1>;
++						iommus = <&apps_smmu 0x2161 0x0400>,
++							 <&apps_smmu 0x1181 0x0420>;
++					};
++
++					compute-cb@2 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <2>;
++						iommus = <&apps_smmu 0x2162 0x0400>,
++							 <&apps_smmu 0x1182 0x0420>;
++					};
++
++					compute-cb@3 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <3>;
++						iommus = <&apps_smmu 0x2163 0x0400>,
++							 <&apps_smmu 0x1183 0x0420>;
++					};
++
++					compute-cb@4 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <4>;
++						iommus = <&apps_smmu 0x2164 0x0400>,
++							 <&apps_smmu 0x1184 0x0420>;
++					};
++
++					compute-cb@5 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <5>;
++						iommus = <&apps_smmu 0x2165 0x0400>,
++							 <&apps_smmu 0x1185 0x0420>;
++					};
++
++					compute-cb@6 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <6>;
++						iommus = <&apps_smmu 0x2166 0x0400>,
++							 <&apps_smmu 0x1186 0x0420>;
++					};
++
++					compute-cb@7 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <7>;
++						iommus = <&apps_smmu 0x2167 0x0400>,
++							 <&apps_smmu 0x1187 0x0420>;
++					};
++
++					compute-cb@8 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <8>;
++						iommus = <&apps_smmu 0x2168 0x0400>,
++							 <&apps_smmu 0x1188 0x0420>;
++					};
++
++					/* note: secure cb9 in downstream */
++				};
++			};
++		};
++
+ 		usb_1: usb@a6f8800 {
+ 			compatible = "qcom,sm8350-dwc3", "qcom,dwc3";
+ 			reg = <0 0x0a6f8800 0 0x400>;
+@@ -3285,142 +3530,6 @@ apps_smmu: iommu@15000000 {
+ 			dma-coherent;
+ 		};
+ 
+-		adsp: remoteproc@17300000 {
+-			compatible = "qcom,sm8350-adsp-pas";
+-			reg = <0 0x17300000 0 0x100>;
+-
+-			interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "wdog", "fatal", "ready",
+-					  "handover", "stop-ack";
+-
+-			clocks = <&rpmhcc RPMH_CXO_CLK>;
+-			clock-names = "xo";
+-
+-			power-domains = <&rpmhpd RPMHPD_LCX>,
+-					<&rpmhpd RPMHPD_LMX>;
+-			power-domain-names = "lcx", "lmx";
+-
+-			memory-region = <&pil_adsp_mem>;
+-
+-			qcom,qmp = <&aoss_qmp>;
+-
+-			qcom,smem-states = <&smp2p_adsp_out 0>;
+-			qcom,smem-state-names = "stop";
+-
+-			status = "disabled";
+-
+-			glink-edge {
+-				interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
+-							     IPCC_MPROC_SIGNAL_GLINK_QMP
+-							     IRQ_TYPE_EDGE_RISING>;
+-				mboxes = <&ipcc IPCC_CLIENT_LPASS
+-						IPCC_MPROC_SIGNAL_GLINK_QMP>;
+-
+-				label = "lpass";
+-				qcom,remote-pid = <2>;
+-
+-				apr {
+-					compatible = "qcom,apr-v2";
+-					qcom,glink-channels = "apr_audio_svc";
+-					qcom,domain = <APR_DOMAIN_ADSP>;
+-					#address-cells = <1>;
+-					#size-cells = <0>;
+-
+-					service@3 {
+-						reg = <APR_SVC_ADSP_CORE>;
+-						compatible = "qcom,q6core";
+-						qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+-					};
+-
+-					q6afe: service@4 {
+-						compatible = "qcom,q6afe";
+-						reg = <APR_SVC_AFE>;
+-						qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+-
+-						q6afedai: dais {
+-							compatible = "qcom,q6afe-dais";
+-							#address-cells = <1>;
+-							#size-cells = <0>;
+-							#sound-dai-cells = <1>;
+-						};
+-
+-						q6afecc: clock-controller {
+-							compatible = "qcom,q6afe-clocks";
+-							#clock-cells = <2>;
+-						};
+-					};
+-
+-					q6asm: service@7 {
+-						compatible = "qcom,q6asm";
+-						reg = <APR_SVC_ASM>;
+-						qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+-
+-						q6asmdai: dais {
+-							compatible = "qcom,q6asm-dais";
+-							#address-cells = <1>;
+-							#size-cells = <0>;
+-							#sound-dai-cells = <1>;
+-							iommus = <&apps_smmu 0x1801 0x0>;
+-
+-							dai@0 {
+-								reg = <0>;
+-							};
+-
+-							dai@1 {
+-								reg = <1>;
+-							};
+-
+-							dai@2 {
+-								reg = <2>;
+-							};
+-						};
+-					};
+-
+-					q6adm: service@8 {
+-						compatible = "qcom,q6adm";
+-						reg = <APR_SVC_ADM>;
+-						qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
+-
+-						q6routing: routing {
+-							compatible = "qcom,q6adm-routing";
+-							#sound-dai-cells = <0>;
+-						};
+-					};
+-				};
+-
+-				fastrpc {
+-					compatible = "qcom,fastrpc";
+-					qcom,glink-channels = "fastrpcglink-apps-dsp";
+-					label = "adsp";
+-					qcom,non-secure-domain;
+-					#address-cells = <1>;
+-					#size-cells = <0>;
+-
+-					compute-cb@3 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <3>;
+-						iommus = <&apps_smmu 0x1803 0x0>;
+-					};
+-
+-					compute-cb@4 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <4>;
+-						iommus = <&apps_smmu 0x1804 0x0>;
+-					};
+-
+-					compute-cb@5 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <5>;
+-						iommus = <&apps_smmu 0x1805 0x0>;
+-					};
+-				};
+-			};
+-		};
+-
+ 		intc: interrupt-controller@17a00000 {
+ 			compatible = "arm,gic-v3";
+ 			#interrupt-cells = <3>;
+@@ -3589,115 +3698,6 @@ cpufreq_hw: cpufreq@18591000 {
+ 			#freq-domain-cells = <1>;
+ 			#clock-cells = <1>;
+ 		};
+-
+-		cdsp: remoteproc@98900000 {
+-			compatible = "qcom,sm8350-cdsp-pas";
+-			reg = <0 0x98900000 0 0x1400000>;
+-
+-			interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_cdsp_in 1 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_cdsp_in 2 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_cdsp_in 3 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "wdog", "fatal", "ready",
+-					  "handover", "stop-ack";
+-
+-			clocks = <&rpmhcc RPMH_CXO_CLK>;
+-			clock-names = "xo";
+-
+-			power-domains = <&rpmhpd RPMHPD_CX>,
+-					<&rpmhpd RPMHPD_MXC>;
+-			power-domain-names = "cx", "mxc";
+-
+-			interconnects = <&compute_noc MASTER_CDSP_PROC 0 &mc_virt SLAVE_EBI1 0>;
+-
+-			memory-region = <&pil_cdsp_mem>;
+-
+-			qcom,qmp = <&aoss_qmp>;
+-
+-			qcom,smem-states = <&smp2p_cdsp_out 0>;
+-			qcom,smem-state-names = "stop";
+-
+-			status = "disabled";
+-
+-			glink-edge {
+-				interrupts-extended = <&ipcc IPCC_CLIENT_CDSP
+-							     IPCC_MPROC_SIGNAL_GLINK_QMP
+-							     IRQ_TYPE_EDGE_RISING>;
+-				mboxes = <&ipcc IPCC_CLIENT_CDSP
+-						IPCC_MPROC_SIGNAL_GLINK_QMP>;
+-
+-				label = "cdsp";
+-				qcom,remote-pid = <5>;
+-
+-				fastrpc {
+-					compatible = "qcom,fastrpc";
+-					qcom,glink-channels = "fastrpcglink-apps-dsp";
+-					label = "cdsp";
+-					qcom,non-secure-domain;
+-					#address-cells = <1>;
+-					#size-cells = <0>;
+-
+-					compute-cb@1 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <1>;
+-						iommus = <&apps_smmu 0x2161 0x0400>,
+-							 <&apps_smmu 0x1181 0x0420>;
+-					};
+-
+-					compute-cb@2 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <2>;
+-						iommus = <&apps_smmu 0x2162 0x0400>,
+-							 <&apps_smmu 0x1182 0x0420>;
+-					};
+-
+-					compute-cb@3 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <3>;
+-						iommus = <&apps_smmu 0x2163 0x0400>,
+-							 <&apps_smmu 0x1183 0x0420>;
+-					};
+-
+-					compute-cb@4 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <4>;
+-						iommus = <&apps_smmu 0x2164 0x0400>,
+-							 <&apps_smmu 0x1184 0x0420>;
+-					};
+-
+-					compute-cb@5 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <5>;
+-						iommus = <&apps_smmu 0x2165 0x0400>,
+-							 <&apps_smmu 0x1185 0x0420>;
+-					};
+-
+-					compute-cb@6 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <6>;
+-						iommus = <&apps_smmu 0x2166 0x0400>,
+-							 <&apps_smmu 0x1186 0x0420>;
+-					};
+-
+-					compute-cb@7 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <7>;
+-						iommus = <&apps_smmu 0x2167 0x0400>,
+-							 <&apps_smmu 0x1187 0x0420>;
+-					};
+-
+-					compute-cb@8 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <8>;
+-						iommus = <&apps_smmu 0x2168 0x0400>,
+-							 <&apps_smmu 0x1188 0x0420>;
+-					};
+-
+-					/* note: secure cb9 in downstream */
+-				};
+-			};
+-		};
+ 	};
+ 
+ 	thermal_zones: thermal-zones {
+diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+index 7a0b901799bc32..e71e5fd5c9a54d 100644
+--- a/arch/arm64/boot/dts/qcom/sm8450.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+@@ -2496,6 +2496,112 @@ compute-cb@3 {
+ 			};
+ 		};
+ 
++		remoteproc_adsp: remoteproc@3000000 {
++			compatible = "qcom,sm8450-adsp-pas";
++			reg = <0x0 0x03000000 0x0 0x10000>;
++
++			interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "wdog", "fatal", "ready",
++					  "handover", "stop-ack";
++
++			clocks = <&rpmhcc RPMH_CXO_CLK>;
++			clock-names = "xo";
++
++			power-domains = <&rpmhpd RPMHPD_LCX>,
++					<&rpmhpd RPMHPD_LMX>;
++			power-domain-names = "lcx", "lmx";
++
++			memory-region = <&adsp_mem>;
++
++			qcom,qmp = <&aoss_qmp>;
++
++			qcom,smem-states = <&smp2p_adsp_out 0>;
++			qcom,smem-state-names = "stop";
++
++			status = "disabled";
++
++			remoteproc_adsp_glink: glink-edge {
++				interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
++							     IPCC_MPROC_SIGNAL_GLINK_QMP
++							     IRQ_TYPE_EDGE_RISING>;
++				mboxes = <&ipcc IPCC_CLIENT_LPASS
++						IPCC_MPROC_SIGNAL_GLINK_QMP>;
++
++				label = "lpass";
++				qcom,remote-pid = <2>;
++
++				gpr {
++					compatible = "qcom,gpr";
++					qcom,glink-channels = "adsp_apps";
++					qcom,domain = <GPR_DOMAIN_ID_ADSP>;
++					qcom,intents = <512 20>;
++					#address-cells = <1>;
++					#size-cells = <0>;
++
++					q6apm: service@1 {
++						compatible = "qcom,q6apm";
++						reg = <GPR_APM_MODULE_IID>;
++						#sound-dai-cells = <0>;
++						qcom,protection-domain = "avs/audio",
++									 "msm/adsp/audio_pd";
++
++						q6apmdai: dais {
++							compatible = "qcom,q6apm-dais";
++							iommus = <&apps_smmu 0x1801 0x0>;
++						};
++
++						q6apmbedai: bedais {
++							compatible = "qcom,q6apm-lpass-dais";
++							#sound-dai-cells = <1>;
++						};
++					};
++
++					q6prm: service@2 {
++						compatible = "qcom,q6prm";
++						reg = <GPR_PRM_MODULE_IID>;
++						qcom,protection-domain = "avs/audio",
++									 "msm/adsp/audio_pd";
++
++						q6prmcc: clock-controller {
++							compatible = "qcom,q6prm-lpass-clocks";
++							#clock-cells = <2>;
++						};
++					};
++				};
++
++				fastrpc {
++					compatible = "qcom,fastrpc";
++					qcom,glink-channels = "fastrpcglink-apps-dsp";
++					label = "adsp";
++					qcom,non-secure-domain;
++					#address-cells = <1>;
++					#size-cells = <0>;
++
++					compute-cb@3 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <3>;
++						iommus = <&apps_smmu 0x1803 0x0>;
++					};
++
++					compute-cb@4 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <4>;
++						iommus = <&apps_smmu 0x1804 0x0>;
++					};
++
++					compute-cb@5 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <5>;
++						iommus = <&apps_smmu 0x1805 0x0>;
++					};
++				};
++			};
++		};
++
+ 		wsa2macro: codec@31e0000 {
+ 			compatible = "qcom,sm8450-lpass-wsa-macro";
+ 			reg = <0 0x031e0000 0 0x1000>;
+@@ -2692,115 +2798,9 @@ vamacro: codec@33f0000 {
+ 			status = "disabled";
+ 		};
+ 
+-		remoteproc_adsp: remoteproc@30000000 {
+-			compatible = "qcom,sm8450-adsp-pas";
+-			reg = <0 0x30000000 0 0x100>;
+-
+-			interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "wdog", "fatal", "ready",
+-					  "handover", "stop-ack";
+-
+-			clocks = <&rpmhcc RPMH_CXO_CLK>;
+-			clock-names = "xo";
+-
+-			power-domains = <&rpmhpd RPMHPD_LCX>,
+-					<&rpmhpd RPMHPD_LMX>;
+-			power-domain-names = "lcx", "lmx";
+-
+-			memory-region = <&adsp_mem>;
+-
+-			qcom,qmp = <&aoss_qmp>;
+-
+-			qcom,smem-states = <&smp2p_adsp_out 0>;
+-			qcom,smem-state-names = "stop";
+-
+-			status = "disabled";
+-
+-			remoteproc_adsp_glink: glink-edge {
+-				interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
+-							     IPCC_MPROC_SIGNAL_GLINK_QMP
+-							     IRQ_TYPE_EDGE_RISING>;
+-				mboxes = <&ipcc IPCC_CLIENT_LPASS
+-						IPCC_MPROC_SIGNAL_GLINK_QMP>;
+-
+-				label = "lpass";
+-				qcom,remote-pid = <2>;
+-
+-				gpr {
+-					compatible = "qcom,gpr";
+-					qcom,glink-channels = "adsp_apps";
+-					qcom,domain = <GPR_DOMAIN_ID_ADSP>;
+-					qcom,intents = <512 20>;
+-					#address-cells = <1>;
+-					#size-cells = <0>;
+-
+-					q6apm: service@1 {
+-						compatible = "qcom,q6apm";
+-						reg = <GPR_APM_MODULE_IID>;
+-						#sound-dai-cells = <0>;
+-						qcom,protection-domain = "avs/audio",
+-									 "msm/adsp/audio_pd";
+-
+-						q6apmdai: dais {
+-							compatible = "qcom,q6apm-dais";
+-							iommus = <&apps_smmu 0x1801 0x0>;
+-						};
+-
+-						q6apmbedai: bedais {
+-							compatible = "qcom,q6apm-lpass-dais";
+-							#sound-dai-cells = <1>;
+-						};
+-					};
+-
+-					q6prm: service@2 {
+-						compatible = "qcom,q6prm";
+-						reg = <GPR_PRM_MODULE_IID>;
+-						qcom,protection-domain = "avs/audio",
+-									 "msm/adsp/audio_pd";
+-
+-						q6prmcc: clock-controller {
+-							compatible = "qcom,q6prm-lpass-clocks";
+-							#clock-cells = <2>;
+-						};
+-					};
+-				};
+-
+-				fastrpc {
+-					compatible = "qcom,fastrpc";
+-					qcom,glink-channels = "fastrpcglink-apps-dsp";
+-					label = "adsp";
+-					qcom,non-secure-domain;
+-					#address-cells = <1>;
+-					#size-cells = <0>;
+-
+-					compute-cb@3 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <3>;
+-						iommus = <&apps_smmu 0x1803 0x0>;
+-					};
+-
+-					compute-cb@4 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <4>;
+-						iommus = <&apps_smmu 0x1804 0x0>;
+-					};
+-
+-					compute-cb@5 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <5>;
+-						iommus = <&apps_smmu 0x1805 0x0>;
+-					};
+-				};
+-			};
+-		};
+-
+ 		remoteproc_cdsp: remoteproc@32300000 {
+ 			compatible = "qcom,sm8450-cdsp-pas";
+-			reg = <0 0x32300000 0 0x1400000>;
++			reg = <0 0x32300000 0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
+@@ -2907,7 +2907,7 @@ compute-cb@8 {
+ 
+ 		remoteproc_mpss: remoteproc@4080000 {
+ 			compatible = "qcom,sm8450-mpss-pas";
+-			reg = <0x0 0x04080000 0x0 0x4040>;
++			reg = <0x0 0x04080000 0x0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 264 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_modem_in 0 IRQ_TYPE_EDGE_RISING>,
+diff --git a/arch/arm64/boot/dts/qcom/sm8550.dtsi b/arch/arm64/boot/dts/qcom/sm8550.dtsi
+index e7774d32fb6d22..a2daf9712fc0cf 100644
+--- a/arch/arm64/boot/dts/qcom/sm8550.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8550.dtsi
+@@ -2314,7 +2314,7 @@ ipa: ipa@3f40000 {
+ 
+ 		remoteproc_mpss: remoteproc@4080000 {
+ 			compatible = "qcom,sm8550-mpss-pas";
+-			reg = <0x0 0x04080000 0x0 0x4040>;
++			reg = <0x0 0x04080000 0x0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 264 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_modem_in 0 IRQ_TYPE_EDGE_RISING>,
+@@ -2354,6 +2354,137 @@ IPCC_MPROC_SIGNAL_GLINK_QMP
+ 			};
+ 		};
+ 
++		remoteproc_adsp: remoteproc@6800000 {
++			compatible = "qcom,sm8550-adsp-pas";
++			reg = <0x0 0x06800000 0x0 0x10000>;
++
++			interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "wdog", "fatal", "ready",
++					  "handover", "stop-ack";
++
++			clocks = <&rpmhcc RPMH_CXO_CLK>;
++			clock-names = "xo";
++
++			power-domains = <&rpmhpd RPMHPD_LCX>,
++					<&rpmhpd RPMHPD_LMX>;
++			power-domain-names = "lcx", "lmx";
++
++			interconnects = <&lpass_lpicx_noc MASTER_LPASS_PROC 0 &mc_virt SLAVE_EBI1 0>;
++
++			memory-region = <&adspslpi_mem>, <&q6_adsp_dtb_mem>;
++
++			qcom,qmp = <&aoss_qmp>;
++
++			qcom,smem-states = <&smp2p_adsp_out 0>;
++			qcom,smem-state-names = "stop";
++
++			status = "disabled";
++
++			remoteproc_adsp_glink: glink-edge {
++				interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
++							     IPCC_MPROC_SIGNAL_GLINK_QMP
++							     IRQ_TYPE_EDGE_RISING>;
++				mboxes = <&ipcc IPCC_CLIENT_LPASS
++						IPCC_MPROC_SIGNAL_GLINK_QMP>;
++
++				label = "lpass";
++				qcom,remote-pid = <2>;
++
++				fastrpc {
++					compatible = "qcom,fastrpc";
++					qcom,glink-channels = "fastrpcglink-apps-dsp";
++					label = "adsp";
++					qcom,non-secure-domain;
++					#address-cells = <1>;
++					#size-cells = <0>;
++
++					compute-cb@3 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <3>;
++						iommus = <&apps_smmu 0x1003 0x80>,
++							 <&apps_smmu 0x1063 0x0>;
++						dma-coherent;
++					};
++
++					compute-cb@4 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <4>;
++						iommus = <&apps_smmu 0x1004 0x80>,
++							 <&apps_smmu 0x1064 0x0>;
++						dma-coherent;
++					};
++
++					compute-cb@5 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <5>;
++						iommus = <&apps_smmu 0x1005 0x80>,
++							 <&apps_smmu 0x1065 0x0>;
++						dma-coherent;
++					};
++
++					compute-cb@6 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <6>;
++						iommus = <&apps_smmu 0x1006 0x80>,
++							 <&apps_smmu 0x1066 0x0>;
++						dma-coherent;
++					};
++
++					compute-cb@7 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <7>;
++						iommus = <&apps_smmu 0x1007 0x80>,
++							 <&apps_smmu 0x1067 0x0>;
++						dma-coherent;
++					};
++				};
++
++				gpr {
++					compatible = "qcom,gpr";
++					qcom,glink-channels = "adsp_apps";
++					qcom,domain = <GPR_DOMAIN_ID_ADSP>;
++					qcom,intents = <512 20>;
++					#address-cells = <1>;
++					#size-cells = <0>;
++
++					q6apm: service@1 {
++						compatible = "qcom,q6apm";
++						reg = <GPR_APM_MODULE_IID>;
++						#sound-dai-cells = <0>;
++						qcom,protection-domain = "avs/audio",
++									 "msm/adsp/audio_pd";
++
++						q6apmdai: dais {
++							compatible = "qcom,q6apm-dais";
++							iommus = <&apps_smmu 0x1001 0x80>,
++								 <&apps_smmu 0x1061 0x0>;
++						};
++
++						q6apmbedai: bedais {
++							compatible = "qcom,q6apm-lpass-dais";
++							#sound-dai-cells = <1>;
++						};
++					};
++
++					q6prm: service@2 {
++						compatible = "qcom,q6prm";
++						reg = <GPR_PRM_MODULE_IID>;
++						qcom,protection-domain = "avs/audio",
++									 "msm/adsp/audio_pd";
++
++						q6prmcc: clock-controller {
++							compatible = "qcom,q6prm-lpass-clocks";
++							#clock-cells = <2>;
++						};
++					};
++				};
++			};
++		};
++
+ 		lpass_wsa2macro: codec@6aa0000 {
+ 			compatible = "qcom,sm8550-lpass-wsa-macro";
+ 			reg = <0 0x06aa0000 0 0x1000>;
+@@ -2872,9 +3003,8 @@ mdss: display-subsystem@ae00000 {
+ 
+ 			power-domains = <&dispcc MDSS_GDSC>;
+ 
+-			interconnects = <&mmss_noc MASTER_MDP 0 &gem_noc SLAVE_LLCC 0>,
+-					<&mc_virt MASTER_LLCC 0 &mc_virt SLAVE_EBI1 0>;
+-			interconnect-names = "mdp0-mem", "mdp1-mem";
++			interconnects = <&mmss_noc MASTER_MDP 0 &mc_virt SLAVE_EBI1 0>;
++			interconnect-names = "mdp0-mem";
+ 
+ 			iommus = <&apps_smmu 0x1c00 0x2>;
+ 
+@@ -4576,137 +4706,6 @@ system-cache-controller@25000000 {
+ 			interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
+ 		};
+ 
+-		remoteproc_adsp: remoteproc@30000000 {
+-			compatible = "qcom,sm8550-adsp-pas";
+-			reg = <0x0 0x30000000 0x0 0x100>;
+-
+-			interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "wdog", "fatal", "ready",
+-					  "handover", "stop-ack";
+-
+-			clocks = <&rpmhcc RPMH_CXO_CLK>;
+-			clock-names = "xo";
+-
+-			power-domains = <&rpmhpd RPMHPD_LCX>,
+-					<&rpmhpd RPMHPD_LMX>;
+-			power-domain-names = "lcx", "lmx";
+-
+-			interconnects = <&lpass_lpicx_noc MASTER_LPASS_PROC 0 &mc_virt SLAVE_EBI1 0>;
+-
+-			memory-region = <&adspslpi_mem>, <&q6_adsp_dtb_mem>;
+-
+-			qcom,qmp = <&aoss_qmp>;
+-
+-			qcom,smem-states = <&smp2p_adsp_out 0>;
+-			qcom,smem-state-names = "stop";
+-
+-			status = "disabled";
+-
+-			remoteproc_adsp_glink: glink-edge {
+-				interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
+-							     IPCC_MPROC_SIGNAL_GLINK_QMP
+-							     IRQ_TYPE_EDGE_RISING>;
+-				mboxes = <&ipcc IPCC_CLIENT_LPASS
+-						IPCC_MPROC_SIGNAL_GLINK_QMP>;
+-
+-				label = "lpass";
+-				qcom,remote-pid = <2>;
+-
+-				fastrpc {
+-					compatible = "qcom,fastrpc";
+-					qcom,glink-channels = "fastrpcglink-apps-dsp";
+-					label = "adsp";
+-					qcom,non-secure-domain;
+-					#address-cells = <1>;
+-					#size-cells = <0>;
+-
+-					compute-cb@3 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <3>;
+-						iommus = <&apps_smmu 0x1003 0x80>,
+-							 <&apps_smmu 0x1063 0x0>;
+-						dma-coherent;
+-					};
+-
+-					compute-cb@4 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <4>;
+-						iommus = <&apps_smmu 0x1004 0x80>,
+-							 <&apps_smmu 0x1064 0x0>;
+-						dma-coherent;
+-					};
+-
+-					compute-cb@5 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <5>;
+-						iommus = <&apps_smmu 0x1005 0x80>,
+-							 <&apps_smmu 0x1065 0x0>;
+-						dma-coherent;
+-					};
+-
+-					compute-cb@6 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <6>;
+-						iommus = <&apps_smmu 0x1006 0x80>,
+-							 <&apps_smmu 0x1066 0x0>;
+-						dma-coherent;
+-					};
+-
+-					compute-cb@7 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <7>;
+-						iommus = <&apps_smmu 0x1007 0x80>,
+-							 <&apps_smmu 0x1067 0x0>;
+-						dma-coherent;
+-					};
+-				};
+-
+-				gpr {
+-					compatible = "qcom,gpr";
+-					qcom,glink-channels = "adsp_apps";
+-					qcom,domain = <GPR_DOMAIN_ID_ADSP>;
+-					qcom,intents = <512 20>;
+-					#address-cells = <1>;
+-					#size-cells = <0>;
+-
+-					q6apm: service@1 {
+-						compatible = "qcom,q6apm";
+-						reg = <GPR_APM_MODULE_IID>;
+-						#sound-dai-cells = <0>;
+-						qcom,protection-domain = "avs/audio",
+-									 "msm/adsp/audio_pd";
+-
+-						q6apmdai: dais {
+-							compatible = "qcom,q6apm-dais";
+-							iommus = <&apps_smmu 0x1001 0x80>,
+-								 <&apps_smmu 0x1061 0x0>;
+-						};
+-
+-						q6apmbedai: bedais {
+-							compatible = "qcom,q6apm-lpass-dais";
+-							#sound-dai-cells = <1>;
+-						};
+-					};
+-
+-					q6prm: service@2 {
+-						compatible = "qcom,q6prm";
+-						reg = <GPR_PRM_MODULE_IID>;
+-						qcom,protection-domain = "avs/audio",
+-									 "msm/adsp/audio_pd";
+-
+-						q6prmcc: clock-controller {
+-							compatible = "qcom,q6prm-lpass-clocks";
+-							#clock-cells = <2>;
+-						};
+-					};
+-				};
+-			};
+-		};
+-
+ 		nsp_noc: interconnect@320c0000 {
+ 			compatible = "qcom,sm8550-nsp-noc";
+ 			reg = <0 0x320c0000 0 0xe080>;
+@@ -4716,7 +4715,7 @@ nsp_noc: interconnect@320c0000 {
+ 
+ 		remoteproc_cdsp: remoteproc@32300000 {
+ 			compatible = "qcom,sm8550-cdsp-pas";
+-			reg = <0x0 0x32300000 0x0 0x1400000>;
++			reg = <0x0 0x32300000 0x0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
+diff --git a/arch/arm64/boot/dts/qcom/sm8650.dtsi b/arch/arm64/boot/dts/qcom/sm8650.dtsi
+index 1e2d6ba0b8c127..1de6bab244b3b9 100644
+--- a/arch/arm64/boot/dts/qcom/sm8650.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8650.dtsi
+@@ -2853,7 +2853,7 @@ ipa: ipa@3f40000 {
+ 
+ 		remoteproc_mpss: remoteproc@4080000 {
+ 			compatible = "qcom,sm8650-mpss-pas";
+-			reg = <0 0x04080000 0 0x4040>;
++			reg = <0x0 0x04080000 0x0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 264 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_modem_in 0 IRQ_TYPE_EDGE_RISING>,
+@@ -2904,6 +2904,154 @@ IPCC_MPROC_SIGNAL_GLINK_QMP
+ 			};
+ 		};
+ 
++		remoteproc_adsp: remoteproc@6800000 {
++			compatible = "qcom,sm8650-adsp-pas";
++			reg = <0x0 0x06800000 0x0 0x10000>;
++
++			interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "wdog",
++					  "fatal",
++					  "ready",
++					  "handover",
++					  "stop-ack";
++
++			clocks = <&rpmhcc RPMH_CXO_CLK>;
++			clock-names = "xo";
++
++			interconnects = <&lpass_lpicx_noc MASTER_LPASS_PROC QCOM_ICC_TAG_ALWAYS
++					 &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
++
++			power-domains = <&rpmhpd RPMHPD_LCX>,
++					<&rpmhpd RPMHPD_LMX>;
++			power-domain-names = "lcx",
++					     "lmx";
++
++			memory-region = <&adspslpi_mem>, <&q6_adsp_dtb_mem>;
++
++			qcom,qmp = <&aoss_qmp>;
++
++			qcom,smem-states = <&smp2p_adsp_out 0>;
++			qcom,smem-state-names = "stop";
++
++			status = "disabled";
++
++			remoteproc_adsp_glink: glink-edge {
++				interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
++							     IPCC_MPROC_SIGNAL_GLINK_QMP
++							     IRQ_TYPE_EDGE_RISING>;
++
++				mboxes = <&ipcc IPCC_CLIENT_LPASS
++						IPCC_MPROC_SIGNAL_GLINK_QMP>;
++
++				qcom,remote-pid = <2>;
++
++				label = "lpass";
++
++				fastrpc {
++					compatible = "qcom,fastrpc";
++
++					qcom,glink-channels = "fastrpcglink-apps-dsp";
++
++					label = "adsp";
++
++					qcom,non-secure-domain;
++
++					#address-cells = <1>;
++					#size-cells = <0>;
++
++					compute-cb@3 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <3>;
++
++						iommus = <&apps_smmu 0x1003 0x80>,
++							 <&apps_smmu 0x1043 0x20>;
++						dma-coherent;
++					};
++
++					compute-cb@4 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <4>;
++
++						iommus = <&apps_smmu 0x1004 0x80>,
++							 <&apps_smmu 0x1044 0x20>;
++						dma-coherent;
++					};
++
++					compute-cb@5 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <5>;
++
++						iommus = <&apps_smmu 0x1005 0x80>,
++							 <&apps_smmu 0x1045 0x20>;
++						dma-coherent;
++					};
++
++					compute-cb@6 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <6>;
++
++						iommus = <&apps_smmu 0x1006 0x80>,
++							 <&apps_smmu 0x1046 0x20>;
++						dma-coherent;
++					};
++
++					compute-cb@7 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <7>;
++
++						iommus = <&apps_smmu 0x1007 0x40>,
++							 <&apps_smmu 0x1067 0x0>,
++							 <&apps_smmu 0x1087 0x0>;
++						dma-coherent;
++					};
++				};
++
++				gpr {
++					compatible = "qcom,gpr";
++					qcom,glink-channels = "adsp_apps";
++					qcom,domain = <GPR_DOMAIN_ID_ADSP>;
++					qcom,intents = <512 20>;
++					#address-cells = <1>;
++					#size-cells = <0>;
++
++					q6apm: service@1 {
++						compatible = "qcom,q6apm";
++						reg = <GPR_APM_MODULE_IID>;
++						#sound-dai-cells = <0>;
++						qcom,protection-domain = "avs/audio",
++									 "msm/adsp/audio_pd";
++
++						q6apmbedai: bedais {
++							compatible = "qcom,q6apm-lpass-dais";
++							#sound-dai-cells = <1>;
++						};
++
++						q6apmdai: dais {
++							compatible = "qcom,q6apm-dais";
++							iommus = <&apps_smmu 0x1001 0x80>,
++								 <&apps_smmu 0x1061 0x0>;
++						};
++					};
++
++					q6prm: service@2 {
++						compatible = "qcom,q6prm";
++						reg = <GPR_PRM_MODULE_IID>;
++						qcom,protection-domain = "avs/audio",
++									 "msm/adsp/audio_pd";
++
++						q6prmcc: clock-controller {
++							compatible = "qcom,q6prm-lpass-clocks";
++							#clock-cells = <2>;
++						};
++					};
++				};
++			};
++		};
++
+ 		lpass_wsa2macro: codec@6aa0000 {
+ 			compatible = "qcom,sm8650-lpass-wsa-macro", "qcom,sm8550-lpass-wsa-macro";
+ 			reg = <0 0x06aa0000 0 0x1000>;
+@@ -3455,11 +3603,8 @@ mdss: display-subsystem@ae00000 {
+ 			resets = <&dispcc DISP_CC_MDSS_CORE_BCR>;
+ 
+ 			interconnects = <&mmss_noc MASTER_MDP QCOM_ICC_TAG_ALWAYS
+-					 &gem_noc SLAVE_LLCC QCOM_ICC_TAG_ALWAYS>,
+-					<&mc_virt MASTER_LLCC QCOM_ICC_TAG_ALWAYS
+ 					 &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+-			interconnect-names = "mdp0-mem",
+-					     "mdp1-mem";
++			interconnect-names = "mdp0-mem";
+ 
+ 			power-domains = <&dispcc MDSS_GDSC>;
+ 
+@@ -5322,154 +5467,6 @@ system-cache-controller@25000000 {
+ 			interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
+ 		};
+ 
+-		remoteproc_adsp: remoteproc@30000000 {
+-			compatible = "qcom,sm8650-adsp-pas";
+-			reg = <0 0x30000000 0 0x100>;
+-
+-			interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "wdog",
+-					  "fatal",
+-					  "ready",
+-					  "handover",
+-					  "stop-ack";
+-
+-			clocks = <&rpmhcc RPMH_CXO_CLK>;
+-			clock-names = "xo";
+-
+-			interconnects = <&lpass_lpicx_noc MASTER_LPASS_PROC QCOM_ICC_TAG_ALWAYS
+-					 &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+-
+-			power-domains = <&rpmhpd RPMHPD_LCX>,
+-					<&rpmhpd RPMHPD_LMX>;
+-			power-domain-names = "lcx",
+-					     "lmx";
+-
+-			memory-region = <&adspslpi_mem>, <&q6_adsp_dtb_mem>;
+-
+-			qcom,qmp = <&aoss_qmp>;
+-
+-			qcom,smem-states = <&smp2p_adsp_out 0>;
+-			qcom,smem-state-names = "stop";
+-
+-			status = "disabled";
+-
+-			remoteproc_adsp_glink: glink-edge {
+-				interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
+-							     IPCC_MPROC_SIGNAL_GLINK_QMP
+-							     IRQ_TYPE_EDGE_RISING>;
+-
+-				mboxes = <&ipcc IPCC_CLIENT_LPASS
+-						IPCC_MPROC_SIGNAL_GLINK_QMP>;
+-
+-				qcom,remote-pid = <2>;
+-
+-				label = "lpass";
+-
+-				fastrpc {
+-					compatible = "qcom,fastrpc";
+-
+-					qcom,glink-channels = "fastrpcglink-apps-dsp";
+-
+-					label = "adsp";
+-
+-					qcom,non-secure-domain;
+-
+-					#address-cells = <1>;
+-					#size-cells = <0>;
+-
+-					compute-cb@3 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <3>;
+-
+-						iommus = <&apps_smmu 0x1003 0x80>,
+-							 <&apps_smmu 0x1043 0x20>;
+-						dma-coherent;
+-					};
+-
+-					compute-cb@4 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <4>;
+-
+-						iommus = <&apps_smmu 0x1004 0x80>,
+-							 <&apps_smmu 0x1044 0x20>;
+-						dma-coherent;
+-					};
+-
+-					compute-cb@5 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <5>;
+-
+-						iommus = <&apps_smmu 0x1005 0x80>,
+-							 <&apps_smmu 0x1045 0x20>;
+-						dma-coherent;
+-					};
+-
+-					compute-cb@6 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <6>;
+-
+-						iommus = <&apps_smmu 0x1006 0x80>,
+-							 <&apps_smmu 0x1046 0x20>;
+-						dma-coherent;
+-					};
+-
+-					compute-cb@7 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <7>;
+-
+-						iommus = <&apps_smmu 0x1007 0x40>,
+-							 <&apps_smmu 0x1067 0x0>,
+-							 <&apps_smmu 0x1087 0x0>;
+-						dma-coherent;
+-					};
+-				};
+-
+-				gpr {
+-					compatible = "qcom,gpr";
+-					qcom,glink-channels = "adsp_apps";
+-					qcom,domain = <GPR_DOMAIN_ID_ADSP>;
+-					qcom,intents = <512 20>;
+-					#address-cells = <1>;
+-					#size-cells = <0>;
+-
+-					q6apm: service@1 {
+-						compatible = "qcom,q6apm";
+-						reg = <GPR_APM_MODULE_IID>;
+-						#sound-dai-cells = <0>;
+-						qcom,protection-domain = "avs/audio",
+-									 "msm/adsp/audio_pd";
+-
+-						q6apmbedai: bedais {
+-							compatible = "qcom,q6apm-lpass-dais";
+-							#sound-dai-cells = <1>;
+-						};
+-
+-						q6apmdai: dais {
+-							compatible = "qcom,q6apm-dais";
+-							iommus = <&apps_smmu 0x1001 0x80>,
+-								 <&apps_smmu 0x1061 0x0>;
+-						};
+-					};
+-
+-					q6prm: service@2 {
+-						compatible = "qcom,q6prm";
+-						reg = <GPR_PRM_MODULE_IID>;
+-						qcom,protection-domain = "avs/audio",
+-									 "msm/adsp/audio_pd";
+-
+-						q6prmcc: clock-controller {
+-							compatible = "qcom,q6prm-lpass-clocks";
+-							#clock-cells = <2>;
+-						};
+-					};
+-				};
+-			};
+-		};
+-
+ 		nsp_noc: interconnect@320c0000 {
+ 			compatible = "qcom,sm8650-nsp-noc";
+ 			reg = <0 0x320c0000 0 0xf080>;
+@@ -5481,7 +5478,7 @@ nsp_noc: interconnect@320c0000 {
+ 
+ 		remoteproc_cdsp: remoteproc@32300000 {
+ 			compatible = "qcom,sm8650-cdsp-pas";
+-			reg = <0 0x32300000 0 0x1400000>;
++			reg = <0x0 0x32300000 0x0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
+diff --git a/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts b/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts
+index 66513fc8e67a3a..8435e55f63e8a1 100644
+--- a/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts
++++ b/arch/arm64/boot/dts/qcom/x1e78100-lenovo-thinkpad-t14s.dts
+@@ -763,7 +763,7 @@ &usb_1_ss0_hsphy {
+ };
+ 
+ &usb_1_ss0_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-phy-supply = <&vreg_l2j_1p2>;
+ 	vdda-pll-supply = <&vreg_l1j_0p8>;
+ 
+ 	status = "okay";
+@@ -795,7 +795,7 @@ &usb_1_ss1_hsphy {
+ };
+ 
+ &usb_1_ss1_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-phy-supply = <&vreg_l2j_1p2>;
+ 	vdda-pll-supply = <&vreg_l2d_0p9>;
+ 
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts b/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts
+index 8515c254e15868..3eb59d32c87419 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts
++++ b/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts
+@@ -591,7 +591,7 @@ &usb_1_ss0_hsphy {
+ };
+ 
+ &usb_1_ss0_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-phy-supply = <&vreg_l2j_1p2>;
+ 	vdda-pll-supply = <&vreg_l1j_0p8>;
+ 
+ 	status = "okay";
+@@ -623,7 +623,7 @@ &usb_1_ss1_hsphy {
+ };
+ 
+ &usb_1_ss1_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-phy-supply = <&vreg_l2j_1p2>;
+ 	vdda-pll-supply = <&vreg_l2d_0p9>;
+ 
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
+index d51a9bdcf67fcb..1d79eec6ecfa87 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
++++ b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
+@@ -1187,7 +1187,7 @@ &usb_1_ss0_hsphy {
+ };
+ 
+ &usb_1_ss0_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-phy-supply = <&vreg_l2j_1p2>;
+ 	vdda-pll-supply = <&vreg_l1j_0p8>;
+ 
+ 	status = "okay";
+@@ -1219,7 +1219,7 @@ &usb_1_ss1_hsphy {
+ };
+ 
+ &usb_1_ss1_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-phy-supply = <&vreg_l2j_1p2>;
+ 	vdda-pll-supply = <&vreg_l2d_0p9>;
+ 
+ 	status = "okay";
+@@ -1251,7 +1251,7 @@ &usb_1_ss2_hsphy {
+ };
+ 
+ &usb_1_ss2_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-phy-supply = <&vreg_l2j_1p2>;
+ 	vdda-pll-supply = <&vreg_l2d_0p9>;
+ 
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100-dell-xps13-9345.dts b/arch/arm64/boot/dts/qcom/x1e80100-dell-xps13-9345.dts
+index 05624226faf9ee..210cb2336d1456 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100-dell-xps13-9345.dts
++++ b/arch/arm64/boot/dts/qcom/x1e80100-dell-xps13-9345.dts
+@@ -820,7 +820,7 @@ &usb_1_ss0_hsphy {
+ };
+ 
+ &usb_1_ss0_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-phy-supply = <&vreg_l2j_1p2>;
+ 	vdda-pll-supply = <&vreg_l1j_0p9>;
+ 
+ 	status = "okay";
+@@ -852,7 +852,7 @@ &usb_1_ss1_hsphy {
+ };
+ 
+ &usb_1_ss1_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-phy-supply = <&vreg_l2j_1p2>;
+ 	vdda-pll-supply = <&vreg_l2d_0p9>;
+ 
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts b/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts
+index ca5a808f2c7df6..77908462aef660 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts
++++ b/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts
+@@ -908,7 +908,7 @@ &usb_1_ss0_hsphy {
+ };
+ 
+ &usb_1_ss0_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-phy-supply = <&vreg_l2j_1p2>;
+ 	vdda-pll-supply = <&vreg_l1j_0p8>;
+ 
+ 	status = "okay";
+@@ -940,7 +940,7 @@ &usb_1_ss1_hsphy {
+ };
+ 
+ &usb_1_ss1_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-phy-supply = <&vreg_l2j_1p2>;
+ 	vdda-pll-supply = <&vreg_l2d_0p9>;
+ 
+ 	status = "okay";
+@@ -972,7 +972,7 @@ &usb_1_ss2_hsphy {
+ };
+ 
+ &usb_1_ss2_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-phy-supply = <&vreg_l2j_1p2>;
+ 	vdda-pll-supply = <&vreg_l2d_0p9>;
+ 
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi
+index 8761874dc2f064..85d28fb8d8787c 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi
++++ b/arch/arm64/boot/dts/qcom/x1e80100-microsoft-romulus.dtsi
+@@ -823,7 +823,7 @@ &usb_1_ss0_hsphy {
+ };
+ 
+ &usb_1_ss0_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e>;
++	vdda-phy-supply = <&vreg_l2j>;
+ 	vdda-pll-supply = <&vreg_l1j>;
+ 
+ 	status = "okay";
+@@ -855,7 +855,7 @@ &usb_1_ss1_hsphy {
+ };
+ 
+ &usb_1_ss1_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e>;
++	vdda-phy-supply = <&vreg_l2j>;
+ 	vdda-pll-supply = <&vreg_l2d>;
+ 
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
+index 5ef030c60abe29..af76aa034d0e17 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
++++ b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
+@@ -896,7 +896,7 @@ &usb_1_ss0_hsphy {
+ };
+ 
+ &usb_1_ss0_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-phy-supply = <&vreg_l2j_1p2>;
+ 	vdda-pll-supply = <&vreg_l1j_0p8>;
+ 
+ 	status = "okay";
+@@ -928,7 +928,7 @@ &usb_1_ss1_hsphy {
+ };
+ 
+ &usb_1_ss1_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-phy-supply = <&vreg_l2j_1p2>;
+ 	vdda-pll-supply = <&vreg_l2d_0p9>;
+ 
+ 	status = "okay";
+@@ -960,7 +960,7 @@ &usb_1_ss2_hsphy {
+ };
+ 
+ &usb_1_ss2_qmpphy {
+-	vdda-phy-supply = <&vreg_l3e_1p2>;
++	vdda-phy-supply = <&vreg_l2j_1p2>;
+ 	vdda-pll-supply = <&vreg_l2d_0p9>;
+ 
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/qcom/x1e80100.dtsi b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
+index 4dfea255c83f70..857dc50e01b3ab 100644
+--- a/arch/arm64/boot/dts/qcom/x1e80100.dtsi
++++ b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
+@@ -3518,6 +3518,143 @@ nsp_noc: interconnect@320c0000 {
+ 			#interconnect-cells = <2>;
+ 		};
+ 
++		remoteproc_adsp: remoteproc@6800000 {
++			compatible = "qcom,x1e80100-adsp-pas";
++			reg = <0x0 0x06800000 0x0 0x10000>;
++
++			interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
++					      <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "wdog",
++					  "fatal",
++					  "ready",
++					  "handover",
++					  "stop-ack";
++
++			clocks = <&rpmhcc RPMH_CXO_CLK>;
++			clock-names = "xo";
++
++			power-domains = <&rpmhpd RPMHPD_LCX>,
++					<&rpmhpd RPMHPD_LMX>;
++			power-domain-names = "lcx",
++					     "lmx";
++
++			interconnects = <&lpass_lpicx_noc MASTER_LPASS_PROC QCOM_ICC_TAG_ALWAYS
++					 &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
++
++			memory-region = <&adspslpi_mem>,
++					<&q6_adsp_dtb_mem>;
++
++			qcom,qmp = <&aoss_qmp>;
++
++			qcom,smem-states = <&smp2p_adsp_out 0>;
++			qcom,smem-state-names = "stop";
++
++			status = "disabled";
++
++			glink-edge {
++				interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
++							     IPCC_MPROC_SIGNAL_GLINK_QMP
++							     IRQ_TYPE_EDGE_RISING>;
++				mboxes = <&ipcc IPCC_CLIENT_LPASS
++						IPCC_MPROC_SIGNAL_GLINK_QMP>;
++
++				label = "lpass";
++				qcom,remote-pid = <2>;
++
++				fastrpc {
++					compatible = "qcom,fastrpc";
++					qcom,glink-channels = "fastrpcglink-apps-dsp";
++					label = "adsp";
++					qcom,non-secure-domain;
++					#address-cells = <1>;
++					#size-cells = <0>;
++
++					compute-cb@3 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <3>;
++						iommus = <&apps_smmu 0x1003 0x80>,
++							 <&apps_smmu 0x1063 0x0>;
++						dma-coherent;
++					};
++
++					compute-cb@4 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <4>;
++						iommus = <&apps_smmu 0x1004 0x80>,
++							 <&apps_smmu 0x1064 0x0>;
++						dma-coherent;
++					};
++
++					compute-cb@5 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <5>;
++						iommus = <&apps_smmu 0x1005 0x80>,
++							 <&apps_smmu 0x1065 0x0>;
++						dma-coherent;
++					};
++
++					compute-cb@6 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <6>;
++						iommus = <&apps_smmu 0x1006 0x80>,
++							 <&apps_smmu 0x1066 0x0>;
++						dma-coherent;
++					};
++
++					compute-cb@7 {
++						compatible = "qcom,fastrpc-compute-cb";
++						reg = <7>;
++						iommus = <&apps_smmu 0x1007 0x80>,
++							 <&apps_smmu 0x1067 0x0>;
++						dma-coherent;
++					};
++				};
++
++				gpr {
++					compatible = "qcom,gpr";
++					qcom,glink-channels = "adsp_apps";
++					qcom,domain = <GPR_DOMAIN_ID_ADSP>;
++					qcom,intents = <512 20>;
++					#address-cells = <1>;
++					#size-cells = <0>;
++
++					q6apm: service@1 {
++						compatible = "qcom,q6apm";
++						reg = <GPR_APM_MODULE_IID>;
++						#sound-dai-cells = <0>;
++						qcom,protection-domain = "avs/audio",
++									 "msm/adsp/audio_pd";
++
++						q6apmbedai: bedais {
++							compatible = "qcom,q6apm-lpass-dais";
++							#sound-dai-cells = <1>;
++						};
++
++						q6apmdai: dais {
++							compatible = "qcom,q6apm-dais";
++							iommus = <&apps_smmu 0x1001 0x80>,
++								 <&apps_smmu 0x1061 0x0>;
++						};
++					};
++
++					q6prm: service@2 {
++						compatible = "qcom,q6prm";
++						reg = <GPR_PRM_MODULE_IID>;
++						qcom,protection-domain = "avs/audio",
++									 "msm/adsp/audio_pd";
++
++						q6prmcc: clock-controller {
++							compatible = "qcom,q6prm-lpass-clocks";
++							#clock-cells = <2>;
++						};
++					};
++				};
++			};
++		};
++
+ 		lpass_wsa2macro: codec@6aa0000 {
+ 			compatible = "qcom,x1e80100-lpass-wsa-macro", "qcom,sm8550-lpass-wsa-macro";
+ 			reg = <0 0x06aa0000 0 0x1000>;
+@@ -4118,7 +4255,7 @@ usb_2: usb@a2f8800 {
+ 					  <&gcc GCC_USB20_MASTER_CLK>;
+ 			assigned-clock-rates = <19200000>, <200000000>;
+ 
+-			interrupts-extended = <&intc GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>,
++			interrupts-extended = <&intc GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>,
+ 					      <&pdc 50 IRQ_TYPE_EDGE_BOTH>,
+ 					      <&pdc 49 IRQ_TYPE_EDGE_BOTH>;
+ 			interrupt-names = "pwr_event",
+@@ -4144,7 +4281,7 @@ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
+ 			usb_2_dwc3: usb@a200000 {
+ 				compatible = "snps,dwc3";
+ 				reg = <0 0x0a200000 0 0xcd00>;
+-				interrupts = <GIC_SPI 241 IRQ_TYPE_LEVEL_HIGH>;
++				interrupts = <GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>;
+ 				iommus = <&apps_smmu 0x14e0 0x0>;
+ 				phys = <&usb_2_hsphy>;
+ 				phy-names = "usb2-phy";
+@@ -6111,146 +6248,9 @@ system-cache-controller@25000000 {
+ 			interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
+ 		};
+ 
+-		remoteproc_adsp: remoteproc@30000000 {
+-			compatible = "qcom,x1e80100-adsp-pas";
+-			reg = <0 0x30000000 0 0x100>;
+-
+-			interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
+-					      <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "wdog",
+-					  "fatal",
+-					  "ready",
+-					  "handover",
+-					  "stop-ack";
+-
+-			clocks = <&rpmhcc RPMH_CXO_CLK>;
+-			clock-names = "xo";
+-
+-			power-domains = <&rpmhpd RPMHPD_LCX>,
+-					<&rpmhpd RPMHPD_LMX>;
+-			power-domain-names = "lcx",
+-					     "lmx";
+-
+-			interconnects = <&lpass_lpicx_noc MASTER_LPASS_PROC QCOM_ICC_TAG_ALWAYS
+-					 &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+-
+-			memory-region = <&adspslpi_mem>,
+-					<&q6_adsp_dtb_mem>;
+-
+-			qcom,qmp = <&aoss_qmp>;
+-
+-			qcom,smem-states = <&smp2p_adsp_out 0>;
+-			qcom,smem-state-names = "stop";
+-
+-			status = "disabled";
+-
+-			glink-edge {
+-				interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
+-							     IPCC_MPROC_SIGNAL_GLINK_QMP
+-							     IRQ_TYPE_EDGE_RISING>;
+-				mboxes = <&ipcc IPCC_CLIENT_LPASS
+-						IPCC_MPROC_SIGNAL_GLINK_QMP>;
+-
+-				label = "lpass";
+-				qcom,remote-pid = <2>;
+-
+-				fastrpc {
+-					compatible = "qcom,fastrpc";
+-					qcom,glink-channels = "fastrpcglink-apps-dsp";
+-					label = "adsp";
+-					qcom,non-secure-domain;
+-					#address-cells = <1>;
+-					#size-cells = <0>;
+-
+-					compute-cb@3 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <3>;
+-						iommus = <&apps_smmu 0x1003 0x80>,
+-							 <&apps_smmu 0x1063 0x0>;
+-						dma-coherent;
+-					};
+-
+-					compute-cb@4 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <4>;
+-						iommus = <&apps_smmu 0x1004 0x80>,
+-							 <&apps_smmu 0x1064 0x0>;
+-						dma-coherent;
+-					};
+-
+-					compute-cb@5 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <5>;
+-						iommus = <&apps_smmu 0x1005 0x80>,
+-							 <&apps_smmu 0x1065 0x0>;
+-						dma-coherent;
+-					};
+-
+-					compute-cb@6 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <6>;
+-						iommus = <&apps_smmu 0x1006 0x80>,
+-							 <&apps_smmu 0x1066 0x0>;
+-						dma-coherent;
+-					};
+-
+-					compute-cb@7 {
+-						compatible = "qcom,fastrpc-compute-cb";
+-						reg = <7>;
+-						iommus = <&apps_smmu 0x1007 0x80>,
+-							 <&apps_smmu 0x1067 0x0>;
+-						dma-coherent;
+-					};
+-				};
+-
+-				gpr {
+-					compatible = "qcom,gpr";
+-					qcom,glink-channels = "adsp_apps";
+-					qcom,domain = <GPR_DOMAIN_ID_ADSP>;
+-					qcom,intents = <512 20>;
+-					#address-cells = <1>;
+-					#size-cells = <0>;
+-
+-					q6apm: service@1 {
+-						compatible = "qcom,q6apm";
+-						reg = <GPR_APM_MODULE_IID>;
+-						#sound-dai-cells = <0>;
+-						qcom,protection-domain = "avs/audio",
+-									 "msm/adsp/audio_pd";
+-
+-						q6apmbedai: bedais {
+-							compatible = "qcom,q6apm-lpass-dais";
+-							#sound-dai-cells = <1>;
+-						};
+-
+-						q6apmdai: dais {
+-							compatible = "qcom,q6apm-dais";
+-							iommus = <&apps_smmu 0x1001 0x80>,
+-								 <&apps_smmu 0x1061 0x0>;
+-						};
+-					};
+-
+-					q6prm: service@2 {
+-						compatible = "qcom,q6prm";
+-						reg = <GPR_PRM_MODULE_IID>;
+-						qcom,protection-domain = "avs/audio",
+-									 "msm/adsp/audio_pd";
+-
+-						q6prmcc: clock-controller {
+-							compatible = "qcom,q6prm-lpass-clocks";
+-							#clock-cells = <2>;
+-						};
+-					};
+-				};
+-			};
+-		};
+-
+ 		remoteproc_cdsp: remoteproc@32300000 {
+ 			compatible = "qcom,x1e80100-cdsp-pas";
+-			reg = <0 0x32300000 0 0x1400000>;
++			reg = <0x0 0x32300000 0x0 0x10000>;
+ 
+ 			interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
+ 					      <&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+index d12e661dfd9917..995b30a7aae01a 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+@@ -182,7 +182,7 @@ &gmac {
+ 	snps,reset-active-low;
+ 	snps,reset-delays-us = <0 10000 50000>;
+ 	tx_delay = <0x10>;
+-	rx_delay = <0x10>;
++	rx_delay = <0x23>;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
+index 3d8d534a7a77c2..ad63457a05c5b0 100644
+--- a/arch/arm64/include/asm/assembler.h
++++ b/arch/arm64/include/asm/assembler.h
+@@ -343,6 +343,11 @@ alternative_cb_end
+ 	// Narrow PARange to fit the PS field in TCR_ELx
+ 	ubfx	\tmp0, \tmp0, #ID_AA64MMFR0_EL1_PARANGE_SHIFT, #3
+ 	mov	\tmp1, #ID_AA64MMFR0_EL1_PARANGE_MAX
++#ifdef CONFIG_ARM64_LPA2
++alternative_if_not ARM64_HAS_VA52
++	mov	\tmp1, #ID_AA64MMFR0_EL1_PARANGE_48
++alternative_else_nop_endif
++#endif
+ 	cmp	\tmp0, \tmp1
+ 	csel	\tmp0, \tmp1, \tmp0, hi
+ 	bfi	\tcr, \tmp0, \pos, #3
+diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
+index c78a988cca93a5..a9136cc551ccbb 100644
+--- a/arch/arm64/include/asm/pgtable-hwdef.h
++++ b/arch/arm64/include/asm/pgtable-hwdef.h
+@@ -222,12 +222,6 @@
+  */
+ #define S1_TABLE_AP		(_AT(pmdval_t, 3) << 61)
+ 
+-/*
+- * Highest possible physical address supported.
+- */
+-#define PHYS_MASK_SHIFT		(CONFIG_ARM64_PA_BITS)
+-#define PHYS_MASK		((UL(1) << PHYS_MASK_SHIFT) - 1)
+-
+ #define TTBR_CNP_BIT		(UL(1) << 0)
+ 
+ /*
+diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
+index 9f9cf13bbd95e7..a95f1f77bb39ad 100644
+--- a/arch/arm64/include/asm/pgtable-prot.h
++++ b/arch/arm64/include/asm/pgtable-prot.h
+@@ -81,6 +81,7 @@ extern unsigned long prot_ns_shared;
+ #define lpa2_is_enabled()	false
+ #define PTE_MAYBE_SHARED	PTE_SHARED
+ #define PMD_MAYBE_SHARED	PMD_SECT_S
++#define PHYS_MASK_SHIFT		(CONFIG_ARM64_PA_BITS)
+ #else
+ static inline bool __pure lpa2_is_enabled(void)
+ {
+@@ -89,8 +90,14 @@ static inline bool __pure lpa2_is_enabled(void)
+ 
+ #define PTE_MAYBE_SHARED	(lpa2_is_enabled() ? 0 : PTE_SHARED)
+ #define PMD_MAYBE_SHARED	(lpa2_is_enabled() ? 0 : PMD_SECT_S)
++#define PHYS_MASK_SHIFT		(lpa2_is_enabled() ? CONFIG_ARM64_PA_BITS : 48)
+ #endif
+ 
++/*
++ * Highest possible physical address supported.
++ */
++#define PHYS_MASK		((UL(1) << PHYS_MASK_SHIFT) - 1)
++
+ /*
+  * If we have userspace only BTI we don't want to mark kernel pages
+  * guarded even if the system does support BTI.
+diff --git a/arch/arm64/include/asm/sparsemem.h b/arch/arm64/include/asm/sparsemem.h
+index 8a8acc220371cb..84783efdc9d1f7 100644
+--- a/arch/arm64/include/asm/sparsemem.h
++++ b/arch/arm64/include/asm/sparsemem.h
+@@ -5,7 +5,10 @@
+ #ifndef __ASM_SPARSEMEM_H
+ #define __ASM_SPARSEMEM_H
+ 
+-#define MAX_PHYSMEM_BITS	CONFIG_ARM64_PA_BITS
++#include <asm/pgtable-prot.h>
++
++#define MAX_PHYSMEM_BITS		PHYS_MASK_SHIFT
++#define MAX_POSSIBLE_PHYSMEM_BITS	(52)
+ 
+ /*
+  * Section size must be at least 512MB for 64K base
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 6ce71f444ed84f..a5fe6b641a9fed 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -1167,12 +1167,6 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
+ 	    id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1))) {
+ 		unsigned long cpacr = cpacr_save_enable_kernel_sme();
+ 
+-		/*
+-		 * We mask out SMPS since even if the hardware
+-		 * supports priorities the kernel does not at present
+-		 * and we block access to them.
+-		 */
+-		info->reg_smidr = read_cpuid(SMIDR_EL1) & ~SMIDR_EL1_SMPS;
+ 		vec_init_vq_map(ARM64_VEC_SME);
+ 
+ 		cpacr_restore(cpacr);
+@@ -1423,13 +1417,6 @@ void update_cpu_features(int cpu,
+ 	    id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1))) {
+ 		unsigned long cpacr = cpacr_save_enable_kernel_sme();
+ 
+-		/*
+-		 * We mask out SMPS since even if the hardware
+-		 * supports priorities the kernel does not at present
+-		 * and we block access to them.
+-		 */
+-		info->reg_smidr = read_cpuid(SMIDR_EL1) & ~SMIDR_EL1_SMPS;
+-
+ 		/* Probe vector lengths */
+ 		if (!system_capabilities_finalized())
+ 			vec_update_vq_map(ARM64_VEC_SME);
+@@ -3022,6 +3009,13 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
+ 		.matches = match,						\
+ 	}
+ 
++#define HWCAP_CAP_MATCH_ID(match, reg, field, min_value, cap_type, cap)		\
++	{									\
++		__HWCAP_CAP(#cap, cap_type, cap)				\
++		HWCAP_CPUID_MATCH(reg, field, min_value) 			\
++		.matches = match,						\
++	}
++
+ #ifdef CONFIG_ARM64_PTR_AUTH
+ static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
+ 	{
+@@ -3050,6 +3044,13 @@ static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = {
+ };
+ #endif
+ 
++#ifdef CONFIG_ARM64_SVE
++static bool has_sve_feature(const struct arm64_cpu_capabilities *cap, int scope)
++{
++	return system_supports_sve() && has_user_cpuid_feature(cap, scope);
++}
++#endif
++
+ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
+ 	HWCAP_CAP(ID_AA64ISAR0_EL1, AES, PMULL, CAP_HWCAP, KERNEL_HWCAP_PMULL),
+ 	HWCAP_CAP(ID_AA64ISAR0_EL1, AES, AES, CAP_HWCAP, KERNEL_HWCAP_AES),
+@@ -3092,19 +3093,19 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
+ 	HWCAP_CAP(ID_AA64MMFR2_EL1, AT, IMP, CAP_HWCAP, KERNEL_HWCAP_USCAT),
+ #ifdef CONFIG_ARM64_SVE
+ 	HWCAP_CAP(ID_AA64PFR0_EL1, SVE, IMP, CAP_HWCAP, KERNEL_HWCAP_SVE),
+-	HWCAP_CAP(ID_AA64ZFR0_EL1, SVEver, SVE2p1, CAP_HWCAP, KERNEL_HWCAP_SVE2P1),
+-	HWCAP_CAP(ID_AA64ZFR0_EL1, SVEver, SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2),
+-	HWCAP_CAP(ID_AA64ZFR0_EL1, AES, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
+-	HWCAP_CAP(ID_AA64ZFR0_EL1, AES, PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
+-	HWCAP_CAP(ID_AA64ZFR0_EL1, BitPerm, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM),
+-	HWCAP_CAP(ID_AA64ZFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SVE_B16B16),
+-	HWCAP_CAP(ID_AA64ZFR0_EL1, BF16, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBF16),
+-	HWCAP_CAP(ID_AA64ZFR0_EL1, BF16, EBF16, CAP_HWCAP, KERNEL_HWCAP_SVE_EBF16),
+-	HWCAP_CAP(ID_AA64ZFR0_EL1, SHA3, IMP, CAP_HWCAP, KERNEL_HWCAP_SVESHA3),
+-	HWCAP_CAP(ID_AA64ZFR0_EL1, SM4, IMP, CAP_HWCAP, KERNEL_HWCAP_SVESM4),
+-	HWCAP_CAP(ID_AA64ZFR0_EL1, I8MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM),
+-	HWCAP_CAP(ID_AA64ZFR0_EL1, F32MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM),
+-	HWCAP_CAP(ID_AA64ZFR0_EL1, F64MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM),
++	HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, SVEver, SVE2p1, CAP_HWCAP, KERNEL_HWCAP_SVE2P1),
++	HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, SVEver, SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2),
++	HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, AES, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
++	HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, AES, PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
++	HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, BitPerm, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM),
++	HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SVE_B16B16),
++	HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, BF16, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBF16),
++	HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, BF16, EBF16, CAP_HWCAP, KERNEL_HWCAP_SVE_EBF16),
++	HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, SHA3, IMP, CAP_HWCAP, KERNEL_HWCAP_SVESHA3),
++	HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, SM4, IMP, CAP_HWCAP, KERNEL_HWCAP_SVESM4),
++	HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, I8MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM),
++	HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, F32MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM),
++	HWCAP_CAP_MATCH_ID(has_sve_feature, ID_AA64ZFR0_EL1, F64MM, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM),
+ #endif
+ #ifdef CONFIG_ARM64_GCS
+ 	HWCAP_CAP(ID_AA64PFR1_EL1, GCS, IMP, CAP_HWCAP, KERNEL_HWCAP_GCS),
+@@ -3478,7 +3479,7 @@ static void verify_hyp_capabilities(void)
+ 		return;
+ 
+ 	safe_mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
+-	mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
++	mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+ 	mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
+ 
+ 	/* Verify VMID bits */
+diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
+index d79e88fccdfce4..c45633b5ae233f 100644
+--- a/arch/arm64/kernel/cpuinfo.c
++++ b/arch/arm64/kernel/cpuinfo.c
+@@ -482,6 +482,16 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
+ 	if (id_aa64pfr0_mpam(info->reg_id_aa64pfr0))
+ 		info->reg_mpamidr = read_cpuid(MPAMIDR_EL1);
+ 
++	if (IS_ENABLED(CONFIG_ARM64_SME) &&
++	    id_aa64pfr1_sme(info->reg_id_aa64pfr1)) {
++		/*
++		 * We mask out SMPS since even if the hardware
++		 * supports priorities the kernel does not at present
++		 * and we block access to them.
++		 */
++		info->reg_smidr = read_cpuid(SMIDR_EL1) & ~SMIDR_EL1_SMPS;
++	}
++
+ 	cpuinfo_detect_icache_policy(info);
+ }
+ 
+diff --git a/arch/arm64/kernel/pi/idreg-override.c b/arch/arm64/kernel/pi/idreg-override.c
+index 22159251eb3a6a..c6b185b885f700 100644
+--- a/arch/arm64/kernel/pi/idreg-override.c
++++ b/arch/arm64/kernel/pi/idreg-override.c
+@@ -83,6 +83,15 @@ static bool __init mmfr2_varange_filter(u64 val)
+ 		id_aa64mmfr0_override.val |=
+ 			(ID_AA64MMFR0_EL1_TGRAN_LPA2 - 1) << ID_AA64MMFR0_EL1_TGRAN_SHIFT;
+ 		id_aa64mmfr0_override.mask |= 0xfU << ID_AA64MMFR0_EL1_TGRAN_SHIFT;
++
++		/*
++		 * Override PARange to 48 bits - the override will just be
++		 * ignored if the actual PARange is smaller, but this is
++		 * unlikely to be the case for LPA2 capable silicon.
++		 */
++		id_aa64mmfr0_override.val |=
++			ID_AA64MMFR0_EL1_PARANGE_48 << ID_AA64MMFR0_EL1_PARANGE_SHIFT;
++		id_aa64mmfr0_override.mask |= 0xfU << ID_AA64MMFR0_EL1_PARANGE_SHIFT;
+ 	}
+ #endif
+ 	return true;
+diff --git a/arch/arm64/kernel/pi/map_kernel.c b/arch/arm64/kernel/pi/map_kernel.c
+index f374a3e5a5fe10..e57b043f324b51 100644
+--- a/arch/arm64/kernel/pi/map_kernel.c
++++ b/arch/arm64/kernel/pi/map_kernel.c
+@@ -136,6 +136,12 @@ static void noinline __section(".idmap.text") set_ttbr0_for_lpa2(u64 ttbr)
+ {
+ 	u64 sctlr = read_sysreg(sctlr_el1);
+ 	u64 tcr = read_sysreg(tcr_el1) | TCR_DS;
++	u64 mmfr0 = read_sysreg(id_aa64mmfr0_el1);
++	u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
++							   ID_AA64MMFR0_EL1_PARANGE_SHIFT);
++
++	tcr &= ~TCR_IPS_MASK;
++	tcr |= parange << TCR_IPS_SHIFT;
+ 
+ 	asm("	msr	sctlr_el1, %0		;"
+ 	    "	isb				;"
+diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
+index 1215df59041856..754914d9ec6835 100644
+--- a/arch/arm64/kvm/arch_timer.c
++++ b/arch/arm64/kvm/arch_timer.c
+@@ -466,10 +466,8 @@ static void timer_emulate(struct arch_timer_context *ctx)
+ 
+ 	trace_kvm_timer_emulate(ctx, should_fire);
+ 
+-	if (should_fire != ctx->irq.level) {
++	if (should_fire != ctx->irq.level)
+ 		kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
+-		return;
+-	}
+ 
+ 	/*
+ 	 * If the timer can fire now, we don't need to have a soft timer
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index a102c3aebdbc41..7b2735ad32e911 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -1990,8 +1990,7 @@ static int kvm_init_vector_slots(void)
+ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
+ {
+ 	struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
+-	u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+-	unsigned long tcr;
++	unsigned long tcr, ips;
+ 
+ 	/*
+ 	 * Calculate the raw per-cpu offset without a translation from the
+@@ -2005,6 +2004,7 @@ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
+ 	params->mair_el2 = read_sysreg(mair_el1);
+ 
+ 	tcr = read_sysreg(tcr_el1);
++	ips = FIELD_GET(TCR_IPS_MASK, tcr);
+ 	if (cpus_have_final_cap(ARM64_KVM_HVHE)) {
+ 		tcr |= TCR_EPD1_MASK;
+ 	} else {
+@@ -2014,8 +2014,8 @@ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
+ 	tcr &= ~TCR_T0SZ_MASK;
+ 	tcr |= TCR_T0SZ(hyp_va_bits);
+ 	tcr &= ~TCR_EL2_PS_MASK;
+-	tcr |= FIELD_PREP(TCR_EL2_PS_MASK, kvm_get_parange(mmfr0));
+-	if (kvm_lpa2_is_enabled())
++	tcr |= FIELD_PREP(TCR_EL2_PS_MASK, ips);
++	if (lpa2_is_enabled())
+ 		tcr |= TCR_EL2_DS;
+ 	params->tcr_el2 = tcr;
+ 
+diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
+index 3215adf48a1b6c..98a2a0e64e2558 100644
+--- a/arch/arm64/mm/hugetlbpage.c
++++ b/arch/arm64/mm/hugetlbpage.c
+@@ -519,6 +519,18 @@ pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
+ 
+ static int __init hugetlbpage_init(void)
+ {
++	/*
++	 * HugeTLB pages are supported on maximum four page table
++	 * levels (PUD, CONT PMD, PMD, CONT PTE) for a given base
++	 * page size, corresponding to hugetlb_add_hstate() calls
++	 * here.
++	 *
++	 * HUGE_MAX_HSTATE should at least match maximum supported
++	 * HugeTLB page sizes on the platform. Any new addition to
++	 * supported HugeTLB page sizes will also require changing
++	 * HUGE_MAX_HSTATE as well.
++	 */
++	BUILD_BUG_ON(HUGE_MAX_HSTATE < 4);
+ 	if (pud_sect_supported())
+ 		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
+ 
+diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
+index ccdef53872a0bf..9c0b8d9558fc41 100644
+--- a/arch/arm64/mm/init.c
++++ b/arch/arm64/mm/init.c
+@@ -279,7 +279,12 @@ void __init arm64_memblock_init(void)
+ 
+ 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+ 		extern u16 memstart_offset_seed;
+-		u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
++
++		/*
++		 * Use the sanitised version of id_aa64mmfr0_el1 so that linear
++		 * map randomization can be enabled by shrinking the IPA space.
++		 */
++		u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+ 		int parange = cpuid_feature_extract_unsigned_field(
+ 					mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
+ 		s64 range = linear_region_size -
+diff --git a/arch/loongarch/include/uapi/asm/ptrace.h b/arch/loongarch/include/uapi/asm/ptrace.h
+index ac915f84165053..aafb3cd9e943e5 100644
+--- a/arch/loongarch/include/uapi/asm/ptrace.h
++++ b/arch/loongarch/include/uapi/asm/ptrace.h
+@@ -72,6 +72,16 @@ struct user_watch_state {
+ 	} dbg_regs[8];
+ };
+ 
++struct user_watch_state_v2 {
++	uint64_t dbg_info;
++	struct {
++		uint64_t    addr;
++		uint64_t    mask;
++		uint32_t    ctrl;
++		uint32_t    pad;
++	} dbg_regs[14];
++};
++
+ #define PTRACE_SYSEMU			0x1f
+ #define PTRACE_SYSEMU_SINGLESTEP	0x20
+ 
+diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c
+index 19dc6eff45ccc8..5e2402cfcab0a1 100644
+--- a/arch/loongarch/kernel/ptrace.c
++++ b/arch/loongarch/kernel/ptrace.c
+@@ -720,7 +720,7 @@ static int hw_break_set(struct task_struct *target,
+ 	unsigned int note_type = regset->core_note_type;
+ 
+ 	/* Resource info */
+-	offset = offsetof(struct user_watch_state, dbg_regs);
++	offset = offsetof(struct user_watch_state_v2, dbg_regs);
+ 	user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
+ 
+ 	/* (address, mask, ctrl) registers */
+@@ -920,7 +920,7 @@ static const struct user_regset loongarch64_regsets[] = {
+ #ifdef CONFIG_HAVE_HW_BREAKPOINT
+ 	[REGSET_HW_BREAK] = {
+ 		.core_note_type = NT_LOONGARCH_HW_BREAK,
+-		.n = sizeof(struct user_watch_state) / sizeof(u32),
++		.n = sizeof(struct user_watch_state_v2) / sizeof(u32),
+ 		.size = sizeof(u32),
+ 		.align = sizeof(u32),
+ 		.regset_get = hw_break_get,
+@@ -928,7 +928,7 @@ static const struct user_regset loongarch64_regsets[] = {
+ 	},
+ 	[REGSET_HW_WATCH] = {
+ 		.core_note_type = NT_LOONGARCH_HW_WATCH,
+-		.n = sizeof(struct user_watch_state) / sizeof(u32),
++		.n = sizeof(struct user_watch_state_v2) / sizeof(u32),
+ 		.size = sizeof(u32),
+ 		.align = sizeof(u32),
+ 		.regset_get = hw_break_get,
+diff --git a/arch/m68k/include/asm/vga.h b/arch/m68k/include/asm/vga.h
+index 4742e6bc3ab8ea..cdd414fa8710a9 100644
+--- a/arch/m68k/include/asm/vga.h
++++ b/arch/m68k/include/asm/vga.h
+@@ -9,7 +9,7 @@
+  */
+ #ifndef CONFIG_PCI
+ 
+-#include <asm/raw_io.h>
++#include <asm/io.h>
+ #include <asm/kmap.h>
+ 
+ /*
+@@ -29,9 +29,9 @@
+ #define inw_p(port)		0
+ #define outb_p(port, val)	do { } while (0)
+ #define outw(port, val)		do { } while (0)
+-#define readb			raw_inb
+-#define writeb			raw_outb
+-#define writew			raw_outw
++#define readb			__raw_readb
++#define writeb			__raw_writeb
++#define writew			__raw_writew
+ 
+ #endif /* CONFIG_PCI */
+ #endif /* _ASM_M68K_VGA_H */
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index 467b10f4361aeb..5078ebf071ec07 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -1084,7 +1084,6 @@ config CSRC_IOASIC
+ 
+ config CSRC_R4K
+ 	select CLOCKSOURCE_WATCHDOG if CPU_FREQ
+-	select HAVE_UNSTABLE_SCHED_CLOCK if SMP && 64BIT
+ 	bool
+ 
+ config CSRC_SB1250
+diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
+index 8c401e42301cbf..f39e85fd58fa99 100644
+--- a/arch/mips/kernel/ftrace.c
++++ b/arch/mips/kernel/ftrace.c
+@@ -248,7 +248,7 @@ int ftrace_disable_ftrace_graph_caller(void)
+ #define S_R_SP	(0xafb0 << 16)	/* s{d,w} R, offset(sp) */
+ #define OFFSET_MASK	0xffff	/* stack offset range: 0 ~ PT_SIZE */
+ 
+-unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
++static unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
+ 		old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
+ {
+ 	unsigned long sp, ip, tmp;
+diff --git a/arch/mips/loongson64/boardinfo.c b/arch/mips/loongson64/boardinfo.c
+index 280989c5a137b5..8bb275c93ac099 100644
+--- a/arch/mips/loongson64/boardinfo.c
++++ b/arch/mips/loongson64/boardinfo.c
+@@ -21,13 +21,11 @@ static ssize_t boardinfo_show(struct kobject *kobj,
+ 		       "BIOS Info\n"
+ 		       "Vendor\t\t\t: %s\n"
+ 		       "Version\t\t\t: %s\n"
+-		       "ROM Size\t\t: %d KB\n"
+ 		       "Release Date\t\t: %s\n",
+ 		       strsep(&tmp_board_manufacturer, "-"),
+ 		       eboard->name,
+ 		       strsep(&tmp_bios_vendor, "-"),
+ 		       einter->description,
+-		       einter->size,
+ 		       especial->special_name);
+ }
+ static struct kobj_attribute boardinfo_attr = __ATTR(boardinfo, 0444,
+diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
+index 265bc57819dfb5..c89e70df43d82b 100644
+--- a/arch/mips/math-emu/cp1emu.c
++++ b/arch/mips/math-emu/cp1emu.c
+@@ -1660,7 +1660,7 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
+ 		break;
+ 	}
+ 
+-	case 0x3:
++	case 0x7:
+ 		if (MIPSInst_FUNC(ir) != pfetch_op)
+ 			return SIGILL;
+ 
+diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c
+index ec2567f8efd83b..66898fd182dc1f 100644
+--- a/arch/mips/pci/pci-legacy.c
++++ b/arch/mips/pci/pci-legacy.c
+@@ -29,6 +29,14 @@ static LIST_HEAD(controllers);
+ 
+ static int pci_initialized;
+ 
++unsigned long pci_address_to_pio(phys_addr_t address)
++{
++	if (address > IO_SPACE_LIMIT)
++		return (unsigned long)-1;
++
++	return (unsigned long) address;
++}
++
+ /*
+  * We need to avoid collisions with `mirrored' VGA ports
+  * and other strange ISA hardware, so we always want the
+diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
+index aa6a3cad275d91..fcc5973f75195a 100644
+--- a/arch/parisc/Kconfig
++++ b/arch/parisc/Kconfig
+@@ -60,8 +60,8 @@ config PARISC
+ 	select HAVE_ARCH_MMAP_RND_BITS
+ 	select HAVE_ARCH_AUDITSYSCALL
+ 	select HAVE_ARCH_HASH
+-	select HAVE_ARCH_JUMP_LABEL
+-	select HAVE_ARCH_JUMP_LABEL_RELATIVE
++	# select HAVE_ARCH_JUMP_LABEL
++	# select HAVE_ARCH_JUMP_LABEL_RELATIVE
+ 	select HAVE_ARCH_KFENCE
+ 	select HAVE_ARCH_SECCOMP_FILTER
+ 	select HAVE_ARCH_TRACEHOOK
+diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
+index 1893f66371fa43..b12ef382fec709 100644
+--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
++++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
+@@ -580,8 +580,10 @@ static int pseries_eeh_get_state(struct eeh_pe *pe, int *delay)
+ 
+ 	switch(rets[0]) {
+ 	case 0:
+-		result = EEH_STATE_MMIO_ACTIVE |
+-			 EEH_STATE_DMA_ACTIVE;
++		result = EEH_STATE_MMIO_ACTIVE	|
++			 EEH_STATE_DMA_ACTIVE	|
++			 EEH_STATE_MMIO_ENABLED	|
++			 EEH_STATE_DMA_ENABLED;
+ 		break;
+ 	case 1:
+ 		result = EEH_STATE_RESET_ACTIVE |
+diff --git a/arch/s390/include/asm/asm-extable.h b/arch/s390/include/asm/asm-extable.h
+index 4a6b0a8b6412f1..00a67464c44534 100644
+--- a/arch/s390/include/asm/asm-extable.h
++++ b/arch/s390/include/asm/asm-extable.h
+@@ -14,6 +14,7 @@
+ #define EX_TYPE_UA_LOAD_REG	5
+ #define EX_TYPE_UA_LOAD_REGPAIR	6
+ #define EX_TYPE_ZEROPAD		7
++#define EX_TYPE_FPC		8
+ 
+ #define EX_DATA_REG_ERR_SHIFT	0
+ #define EX_DATA_REG_ERR		GENMASK(3, 0)
+@@ -84,4 +85,7 @@
+ #define EX_TABLE_ZEROPAD(_fault, _target, _regdata, _regaddr)		\
+ 	__EX_TABLE(__ex_table, _fault, _target, EX_TYPE_ZEROPAD, _regdata, _regaddr, 0)
+ 
++#define EX_TABLE_FPC(_fault, _target)					\
++	__EX_TABLE(__ex_table, _fault, _target, EX_TYPE_FPC, __stringify(%%r0), __stringify(%%r0), 0)
++
+ #endif /* __ASM_EXTABLE_H */
+diff --git a/arch/s390/include/asm/fpu-insn.h b/arch/s390/include/asm/fpu-insn.h
+index c1e2e521d9af7c..a4c9b4db62ff57 100644
+--- a/arch/s390/include/asm/fpu-insn.h
++++ b/arch/s390/include/asm/fpu-insn.h
+@@ -100,19 +100,12 @@ static __always_inline void fpu_lfpc(unsigned int *fpc)
+  */
+ static inline void fpu_lfpc_safe(unsigned int *fpc)
+ {
+-	u32 tmp;
+-
+ 	instrument_read(fpc, sizeof(*fpc));
+-	asm volatile("\n"
+-		"0:	lfpc	%[fpc]\n"
+-		"1:	nopr	%%r7\n"
+-		".pushsection .fixup, \"ax\"\n"
+-		"2:	lghi	%[tmp],0\n"
+-		"	sfpc	%[tmp]\n"
+-		"	jg	1b\n"
+-		".popsection\n"
+-		EX_TABLE(1b, 2b)
+-		: [tmp] "=d" (tmp)
++	asm_inline volatile(
++		"	lfpc	%[fpc]\n"
++		"0:	nopr	%%r7\n"
++		EX_TABLE_FPC(0b, 0b)
++		:
+ 		: [fpc] "Q" (*fpc)
+ 		: "memory");
+ }
+diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
+index eaeaeb3ff0be3e..752a2310f0d6c1 100644
+--- a/arch/s390/include/asm/futex.h
++++ b/arch/s390/include/asm/futex.h
+@@ -44,7 +44,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ 		break;
+ 	case FUTEX_OP_ANDN:
+ 		__futex_atomic_op("lr %2,%1\nnr %2,%5\n",
+-				  ret, oldval, newval, uaddr, oparg);
++				  ret, oldval, newval, uaddr, ~oparg);
+ 		break;
+ 	case FUTEX_OP_XOR:
+ 		__futex_atomic_op("lr %2,%1\nxr %2,%5\n",
+diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
+index 8761fd01a9f09f..4f8d5592c2981c 100644
+--- a/arch/s390/include/asm/processor.h
++++ b/arch/s390/include/asm/processor.h
+@@ -163,8 +163,7 @@ static __always_inline void __stackleak_poison(unsigned long erase_low,
+ 		"	la	%[addr],256(%[addr])\n"
+ 		"	brctg	%[tmp],0b\n"
+ 		"1:	stg	%[poison],0(%[addr])\n"
+-		"	larl	%[tmp],3f\n"
+-		"	ex	%[count],0(%[tmp])\n"
++		"	exrl	%[count],3f\n"
+ 		"	j	4f\n"
+ 		"2:	stg	%[poison],0(%[addr])\n"
+ 		"	j	4f\n"
+diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
+index 377b9aaf8c9248..ff1ddba96352a1 100644
+--- a/arch/s390/kernel/vmlinux.lds.S
++++ b/arch/s390/kernel/vmlinux.lds.S
+@@ -52,7 +52,6 @@ SECTIONS
+ 		SOFTIRQENTRY_TEXT
+ 		FTRACE_HOTPATCH_TRAMPOLINES_TEXT
+ 		*(.text.*_indirect_*)
+-		*(.fixup)
+ 		*(.gnu.warning)
+ 		. = ALIGN(PAGE_SIZE);
+ 		_etext = .;		/* End of text section */
+diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
+index a687695d8f68e7..513e608567ccc4 100644
+--- a/arch/s390/kvm/vsie.c
++++ b/arch/s390/kvm/vsie.c
+@@ -1362,8 +1362,14 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
+ 	page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9);
+ 	rcu_read_unlock();
+ 	if (page) {
+-		if (page_ref_inc_return(page) == 2)
+-			return page_to_virt(page);
++		if (page_ref_inc_return(page) == 2) {
++			if (page->index == addr)
++				return page_to_virt(page);
++			/*
++			 * We raced with someone reusing + putting this vsie
++			 * page before we grabbed it.
++			 */
++		}
+ 		page_ref_dec(page);
+ 	}
+ 
+@@ -1393,15 +1399,20 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
+ 			kvm->arch.vsie.next++;
+ 			kvm->arch.vsie.next %= nr_vcpus;
+ 		}
+-		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
++		if (page->index != ULONG_MAX)
++			radix_tree_delete(&kvm->arch.vsie.addr_to_page,
++					  page->index >> 9);
+ 	}
+-	page->index = addr;
+-	/* double use of the same address */
++	/* Mark it as invalid until it resides in the tree. */
++	page->index = ULONG_MAX;
++
++	/* Double use of the same address or allocation failure. */
+ 	if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) {
+ 		page_ref_dec(page);
+ 		mutex_unlock(&kvm->arch.vsie.mutex);
+ 		return NULL;
+ 	}
++	page->index = addr;
+ 	mutex_unlock(&kvm->arch.vsie.mutex);
+ 
+ 	vsie_page = page_to_virt(page);
+@@ -1496,7 +1507,9 @@ void kvm_s390_vsie_destroy(struct kvm *kvm)
+ 		vsie_page = page_to_virt(page);
+ 		release_gmap_shadow(vsie_page);
+ 		/* free the radix tree entry */
+-		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
++		if (page->index != ULONG_MAX)
++			radix_tree_delete(&kvm->arch.vsie.addr_to_page,
++					  page->index >> 9);
+ 		__free_page(page);
+ 	}
+ 	kvm->arch.vsie.page_count = 0;
+diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c
+index 0a0738a473af05..812ec5be129169 100644
+--- a/arch/s390/mm/extable.c
++++ b/arch/s390/mm/extable.c
+@@ -77,6 +77,13 @@ static bool ex_handler_zeropad(const struct exception_table_entry *ex, struct pt
+ 	return true;
+ }
+ 
++static bool ex_handler_fpc(const struct exception_table_entry *ex, struct pt_regs *regs)
++{
++	asm volatile("sfpc	%[val]\n" : : [val] "d" (0));
++	regs->psw.addr = extable_fixup(ex);
++	return true;
++}
++
+ bool fixup_exception(struct pt_regs *regs)
+ {
+ 	const struct exception_table_entry *ex;
+@@ -99,6 +106,8 @@ bool fixup_exception(struct pt_regs *regs)
+ 		return ex_handler_ua_load_reg(ex, true, regs);
+ 	case EX_TYPE_ZEROPAD:
+ 		return ex_handler_zeropad(ex, regs);
++	case EX_TYPE_FPC:
++		return ex_handler_fpc(ex, regs);
+ 	}
+ 	panic("invalid exception table entry");
+ }
+diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c
+index d5ace00d10f042..857afbc4828f0c 100644
+--- a/arch/s390/pci/pci_bus.c
++++ b/arch/s390/pci/pci_bus.c
+@@ -171,7 +171,6 @@ void zpci_bus_scan_busses(void)
+ static bool zpci_bus_is_multifunction_root(struct zpci_dev *zdev)
+ {
+ 	return !s390_pci_no_rid && zdev->rid_available &&
+-		zpci_is_device_configured(zdev) &&
+ 		!zdev->vfn;
+ }
+ 
+diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
+index f2051644de9432..606c74f274593e 100644
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -25,6 +25,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
+ # avoid errors with '-march=i386', and future flags may depend on the target to
+ # be valid.
+ KBUILD_CFLAGS := -m$(BITS) -O2 $(CLANG_FLAGS)
++KBUILD_CFLAGS += -std=gnu11
+ KBUILD_CFLAGS += -fno-strict-aliasing -fPIE
+ KBUILD_CFLAGS += -Wundef
+ KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
+diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
+index ae5482a2f0ca0e..ccb8ff37fa9d4b 100644
+--- a/arch/x86/include/asm/kexec.h
++++ b/arch/x86/include/asm/kexec.h
+@@ -16,6 +16,7 @@
+ # define PAGES_NR		4
+ #endif
+ 
++# define KEXEC_CONTROL_PAGE_SIZE	4096
+ # define KEXEC_CONTROL_CODE_MAX_SIZE	2048
+ 
+ #ifndef __ASSEMBLY__
+@@ -43,7 +44,6 @@ struct kimage;
+ /* Maximum address we can use for the control code buffer */
+ # define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
+ 
+-# define KEXEC_CONTROL_PAGE_SIZE	4096
+ 
+ /* The native architecture */
+ # define KEXEC_ARCH KEXEC_ARCH_386
+@@ -58,9 +58,6 @@ struct kimage;
+ /* Maximum address we can use for the control pages */
+ # define KEXEC_CONTROL_MEMORY_LIMIT     (MAXMEM-1)
+ 
+-/* Allocate one page for the pdp and the second for the code */
+-# define KEXEC_CONTROL_PAGE_SIZE  (4096UL + 4096UL)
+-
+ /* The native architecture */
+ # define KEXEC_ARCH KEXEC_ARCH_X86_64
+ #endif
+@@ -145,6 +142,19 @@ struct kimage_arch {
+ };
+ #else
+ struct kimage_arch {
++	/*
++	 * This is a kimage control page, as it must not overlap with either
++	 * source or destination address ranges.
++	 */
++	pgd_t *pgd;
++	/*
++	 * The virtual mapping of the control code page itself is used only
++	 * during the transition, while the current kernel's pages are all
++	 * in place. Thus the intermediate page table pages used to map it
++	 * are not control pages, but instead just normal pages obtained
++	 * with get_zeroed_page(). And have to be tracked (below) so that
++	 * they can be freed.
++	 */
+ 	p4d_t *p4d;
+ 	pud_t *pud;
+ 	pmd_t *pmd;
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 5aa50dfe01042a..c1043f8c9b0312 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -27,6 +27,7 @@
+ #include <linux/hyperv.h>
+ #include <linux/kfifo.h>
+ #include <linux/sched/vhost_task.h>
++#include <linux/call_once.h>
+ 
+ #include <asm/apic.h>
+ #include <asm/pvclock-abi.h>
+@@ -1445,6 +1446,7 @@ struct kvm_arch {
+ 	struct kvm_x86_pmu_event_filter __rcu *pmu_event_filter;
+ 	struct vhost_task *nx_huge_page_recovery_thread;
+ 	u64 nx_huge_page_last;
++	struct once nx_once;
+ 
+ #ifdef CONFIG_X86_64
+ 	/* The number of TDP MMU pages across all roots. */
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index 3a44a9dc3fb7ae..18485170d51b4a 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -226,6 +226,28 @@ acpi_parse_x2apic(union acpi_subtable_headers *header, const unsigned long end)
+ 	return 0;
+ }
+ 
++static int __init
++acpi_check_lapic(union acpi_subtable_headers *header, const unsigned long end)
++{
++	struct acpi_madt_local_apic *processor = NULL;
++
++	processor = (struct acpi_madt_local_apic *)header;
++
++	if (BAD_MADT_ENTRY(processor, end))
++		return -EINVAL;
++
++	/* Ignore invalid ID */
++	if (processor->id == 0xff)
++		return 0;
++
++	/* Ignore processors that can not be onlined */
++	if (!acpi_is_processor_usable(processor->lapic_flags))
++		return 0;
++
++	has_lapic_cpus = true;
++	return 0;
++}
++
+ static int __init
+ acpi_parse_lapic(union acpi_subtable_headers * header, const unsigned long end)
+ {
+@@ -257,7 +279,6 @@ acpi_parse_lapic(union acpi_subtable_headers * header, const unsigned long end)
+ 			       processor->processor_id, /* ACPI ID */
+ 			       processor->lapic_flags & ACPI_MADT_ENABLED);
+ 
+-	has_lapic_cpus = true;
+ 	return 0;
+ }
+ 
+@@ -1029,6 +1050,8 @@ static int __init early_acpi_parse_madt_lapic_addr_ovr(void)
+ static int __init acpi_parse_madt_lapic_entries(void)
+ {
+ 	int count, x2count = 0;
++	struct acpi_subtable_proc madt_proc[2];
++	int ret;
+ 
+ 	if (!boot_cpu_has(X86_FEATURE_APIC))
+ 		return -ENODEV;
+@@ -1037,10 +1060,27 @@ static int __init acpi_parse_madt_lapic_entries(void)
+ 				      acpi_parse_sapic, MAX_LOCAL_APIC);
+ 
+ 	if (!count) {
+-		count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC,
+-					acpi_parse_lapic, MAX_LOCAL_APIC);
+-		x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC,
+-					acpi_parse_x2apic, MAX_LOCAL_APIC);
++		/* Check if there are valid LAPIC entries */
++		acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_check_lapic, MAX_LOCAL_APIC);
++
++		/*
++		 * Enumerate the APIC IDs in the order that they appear in the
++		 * MADT, no matter LAPIC entry or x2APIC entry is used.
++		 */
++		memset(madt_proc, 0, sizeof(madt_proc));
++		madt_proc[0].id = ACPI_MADT_TYPE_LOCAL_APIC;
++		madt_proc[0].handler = acpi_parse_lapic;
++		madt_proc[1].id = ACPI_MADT_TYPE_LOCAL_X2APIC;
++		madt_proc[1].handler = acpi_parse_x2apic;
++		ret = acpi_table_parse_entries_array(ACPI_SIG_MADT,
++				sizeof(struct acpi_table_madt),
++				madt_proc, ARRAY_SIZE(madt_proc), MAX_LOCAL_APIC);
++		if (ret < 0) {
++			pr_err("Error parsing LAPIC/X2APIC entries\n");
++			return ret;
++		}
++		count = madt_proc[0].count;
++		x2count = madt_proc[1].count;
+ 	}
+ 	if (!count && !x2count) {
+ 		pr_err("No LAPIC entries present\n");
+diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
+index 9fe9972d2071b9..37b8244899d895 100644
+--- a/arch/x86/kernel/amd_nb.c
++++ b/arch/x86/kernel/amd_nb.c
+@@ -582,6 +582,10 @@ static __init void fix_erratum_688(void)
+ 
+ static __init int init_amd_nbs(void)
+ {
++	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
++	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
++		return 0;
++
+ 	amd_cache_northbridges();
+ 	amd_cache_gart();
+ 
+diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
+index 9c9ac606893e99..7223c38a8708fc 100644
+--- a/arch/x86/kernel/machine_kexec_64.c
++++ b/arch/x86/kernel/machine_kexec_64.c
+@@ -146,7 +146,8 @@ static void free_transition_pgtable(struct kimage *image)
+ 	image->arch.pte = NULL;
+ }
+ 
+-static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
++static int init_transition_pgtable(struct kimage *image, pgd_t *pgd,
++				   unsigned long control_page)
+ {
+ 	pgprot_t prot = PAGE_KERNEL_EXEC_NOENC;
+ 	unsigned long vaddr, paddr;
+@@ -157,7 +158,7 @@ static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
+ 	pte_t *pte;
+ 
+ 	vaddr = (unsigned long)relocate_kernel;
+-	paddr = __pa(page_address(image->control_code_page)+PAGE_SIZE);
++	paddr = control_page;
+ 	pgd += pgd_index(vaddr);
+ 	if (!pgd_present(*pgd)) {
+ 		p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL);
+@@ -216,7 +217,7 @@ static void *alloc_pgt_page(void *data)
+ 	return p;
+ }
+ 
+-static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
++static int init_pgtable(struct kimage *image, unsigned long control_page)
+ {
+ 	struct x86_mapping_info info = {
+ 		.alloc_pgt_page	= alloc_pgt_page,
+@@ -225,12 +226,12 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
+ 		.kernpg_flag	= _KERNPG_TABLE_NOENC,
+ 	};
+ 	unsigned long mstart, mend;
+-	pgd_t *level4p;
+ 	int result;
+ 	int i;
+ 
+-	level4p = (pgd_t *)__va(start_pgtable);
+-	clear_page(level4p);
++	image->arch.pgd = alloc_pgt_page(image);
++	if (!image->arch.pgd)
++		return -ENOMEM;
+ 
+ 	if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
+ 		info.page_flag   |= _PAGE_ENC;
+@@ -244,8 +245,8 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
+ 		mstart = pfn_mapped[i].start << PAGE_SHIFT;
+ 		mend   = pfn_mapped[i].end << PAGE_SHIFT;
+ 
+-		result = kernel_ident_mapping_init(&info,
+-						 level4p, mstart, mend);
++		result = kernel_ident_mapping_init(&info, image->arch.pgd,
++						   mstart, mend);
+ 		if (result)
+ 			return result;
+ 	}
+@@ -260,8 +261,8 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
+ 		mstart = image->segment[i].mem;
+ 		mend   = mstart + image->segment[i].memsz;
+ 
+-		result = kernel_ident_mapping_init(&info,
+-						 level4p, mstart, mend);
++		result = kernel_ident_mapping_init(&info, image->arch.pgd,
++						   mstart, mend);
+ 
+ 		if (result)
+ 			return result;
+@@ -271,15 +272,19 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
+ 	 * Prepare EFI systab and ACPI tables for kexec kernel since they are
+ 	 * not covered by pfn_mapped.
+ 	 */
+-	result = map_efi_systab(&info, level4p);
++	result = map_efi_systab(&info, image->arch.pgd);
+ 	if (result)
+ 		return result;
+ 
+-	result = map_acpi_tables(&info, level4p);
++	result = map_acpi_tables(&info, image->arch.pgd);
+ 	if (result)
+ 		return result;
+ 
+-	return init_transition_pgtable(image, level4p);
++	/*
++	 * This must be last because the intermediate page table pages it
++	 * allocates will not be control pages and may overlap the image.
++	 */
++	return init_transition_pgtable(image, image->arch.pgd, control_page);
+ }
+ 
+ static void load_segments(void)
+@@ -296,14 +301,14 @@ static void load_segments(void)
+ 
+ int machine_kexec_prepare(struct kimage *image)
+ {
+-	unsigned long start_pgtable;
++	unsigned long control_page;
+ 	int result;
+ 
+ 	/* Calculate the offsets */
+-	start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
++	control_page = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
+ 
+ 	/* Setup the identity mapped 64bit page table */
+-	result = init_pgtable(image, start_pgtable);
++	result = init_pgtable(image, control_page);
+ 	if (result)
+ 		return result;
+ 
+@@ -357,13 +362,12 @@ void machine_kexec(struct kimage *image)
+ #endif
+ 	}
+ 
+-	control_page = page_address(image->control_code_page) + PAGE_SIZE;
++	control_page = page_address(image->control_code_page);
+ 	__memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
+ 
+ 	page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
+ 	page_list[VA_CONTROL_PAGE] = (unsigned long)control_page;
+-	page_list[PA_TABLE_PAGE] =
+-	  (unsigned long)__pa(page_address(image->control_code_page));
++	page_list[PA_TABLE_PAGE] = (unsigned long)__pa(image->arch.pgd);
+ 
+ 	if (image->type == KEXEC_TYPE_DEFAULT)
+ 		page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page)
+@@ -573,8 +577,7 @@ static void kexec_mark_crashkres(bool protect)
+ 
+ 	/* Don't touch the control code page used in crash_kexec().*/
+ 	control = PFN_PHYS(page_to_pfn(kexec_crash_image->control_code_page));
+-	/* Control code page is located in the 2nd page. */
+-	kexec_mark_range(crashk_res.start, control + PAGE_SIZE - 1, protect);
++	kexec_mark_range(crashk_res.start, control - 1, protect);
+ 	control += KEXEC_CONTROL_PAGE_SIZE;
+ 	kexec_mark_range(control, crashk_res.end, protect);
+ }
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index f63f8fd00a91f3..15507e739c255b 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -838,7 +838,7 @@ void __noreturn stop_this_cpu(void *dummy)
+ #ifdef CONFIG_SMP
+ 	if (smp_ops.stop_this_cpu) {
+ 		smp_ops.stop_this_cpu();
+-		unreachable();
++		BUG();
+ 	}
+ #endif
+ 
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index 615922838c510b..dc1dd3f3e67fcd 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -883,7 +883,7 @@ static int crash_nmi_callback(unsigned int val, struct pt_regs *regs)
+ 
+ 	if (smp_ops.stop_this_cpu) {
+ 		smp_ops.stop_this_cpu();
+-		unreachable();
++		BUG();
+ 	}
+ 
+ 	/* Assume hlt works */
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 39ae2f5f9866c3..d0913aceeae48f 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -816,6 +816,17 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
+ 	}
+ }
+ 
++void kvm_apic_update_hwapic_isr(struct kvm_vcpu *vcpu)
++{
++	struct kvm_lapic *apic = vcpu->arch.apic;
++
++	if (WARN_ON_ONCE(!lapic_in_kernel(vcpu)) || !apic->apicv_active)
++		return;
++
++	kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic));
++}
++EXPORT_SYMBOL_GPL(kvm_apic_update_hwapic_isr);
++
+ int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
+ {
+ 	/* This may race with setting of irr in __apic_accept_irq() and
+diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
+index 24add38beaf0ba..1a8553ebdb42f3 100644
+--- a/arch/x86/kvm/lapic.h
++++ b/arch/x86/kvm/lapic.h
+@@ -118,6 +118,7 @@ void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high);
+ int kvm_apic_set_base(struct kvm_vcpu *vcpu, u64 value, bool host_initiated);
+ int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
+ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
++void kvm_apic_update_hwapic_isr(struct kvm_vcpu *vcpu);
+ int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
+ 
+ u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu);
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index 2401606db2604b..74c45519309030 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -7090,6 +7090,19 @@ static void mmu_destroy_caches(void)
+ 	kmem_cache_destroy(mmu_page_header_cache);
+ }
+ 
++static void kvm_wake_nx_recovery_thread(struct kvm *kvm)
++{
++	/*
++	 * The NX recovery thread is spawned on-demand at the first KVM_RUN and
++	 * may not be valid even though the VM is globally visible.  Do nothing,
++	 * as such a VM can't have any possible NX huge pages.
++	 */
++	struct vhost_task *nx_thread = READ_ONCE(kvm->arch.nx_huge_page_recovery_thread);
++
++	if (nx_thread)
++		vhost_task_wake(nx_thread);
++}
++
+ static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp)
+ {
+ 	if (nx_hugepage_mitigation_hard_disabled)
+@@ -7150,7 +7163,7 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
+ 			kvm_mmu_zap_all_fast(kvm);
+ 			mutex_unlock(&kvm->slots_lock);
+ 
+-			vhost_task_wake(kvm->arch.nx_huge_page_recovery_thread);
++			kvm_wake_nx_recovery_thread(kvm);
+ 		}
+ 		mutex_unlock(&kvm_lock);
+ 	}
+@@ -7279,7 +7292,7 @@ static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel
+ 		mutex_lock(&kvm_lock);
+ 
+ 		list_for_each_entry(kvm, &vm_list, vm_list)
+-			vhost_task_wake(kvm->arch.nx_huge_page_recovery_thread);
++			kvm_wake_nx_recovery_thread(kvm);
+ 
+ 		mutex_unlock(&kvm_lock);
+ 	}
+@@ -7411,20 +7424,34 @@ static bool kvm_nx_huge_page_recovery_worker(void *data)
+ 	return true;
+ }
+ 
++static void kvm_mmu_start_lpage_recovery(struct once *once)
++{
++	struct kvm_arch *ka = container_of(once, struct kvm_arch, nx_once);
++	struct kvm *kvm = container_of(ka, struct kvm, arch);
++	struct vhost_task *nx_thread;
++
++	kvm->arch.nx_huge_page_last = get_jiffies_64();
++	nx_thread = vhost_task_create(kvm_nx_huge_page_recovery_worker,
++				      kvm_nx_huge_page_recovery_worker_kill,
++				      kvm, "kvm-nx-lpage-recovery");
++
++	if (!nx_thread)
++		return;
++
++	vhost_task_start(nx_thread);
++
++	/* Make the task visible only once it is fully started. */
++	WRITE_ONCE(kvm->arch.nx_huge_page_recovery_thread, nx_thread);
++}
++
+ int kvm_mmu_post_init_vm(struct kvm *kvm)
+ {
+ 	if (nx_hugepage_mitigation_hard_disabled)
+ 		return 0;
+ 
+-	kvm->arch.nx_huge_page_last = get_jiffies_64();
+-	kvm->arch.nx_huge_page_recovery_thread = vhost_task_create(
+-		kvm_nx_huge_page_recovery_worker, kvm_nx_huge_page_recovery_worker_kill,
+-		kvm, "kvm-nx-lpage-recovery");
+-
++	call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery);
+ 	if (!kvm->arch.nx_huge_page_recovery_thread)
+ 		return -ENOMEM;
+-
+-	vhost_task_start(kvm->arch.nx_huge_page_recovery_thread);
+ 	return 0;
+ }
+ 
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
+index 943bd074a5d372..fe6cc763fd5189 100644
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -3820,7 +3820,7 @@ static int snp_begin_psc(struct vcpu_svm *svm, struct psc_buffer *psc)
+ 		goto next_range;
+ 	}
+ 
+-	unreachable();
++	BUG();
+ }
+ 
+ static int __sev_snp_update_protected_guest_state(struct kvm_vcpu *vcpu)
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index aa78b6f38dfefd..103baa8e4cf8de 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -5050,6 +5050,11 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
+ 		kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
+ 	}
+ 
++	if (vmx->nested.update_vmcs01_hwapic_isr) {
++		vmx->nested.update_vmcs01_hwapic_isr = false;
++		kvm_apic_update_hwapic_isr(vcpu);
++	}
++
+ 	if ((vm_exit_reason != -1) &&
+ 	    (enable_shadow_vmcs || nested_vmx_is_evmptr12_valid(vmx)))
+ 		vmx->nested.need_vmcs12_to_shadow_sync = true;
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 22cb11ab87090d..01abcdcbbf70ab 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -6867,6 +6867,27 @@ void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
+ 	u16 status;
+ 	u8 old;
+ 
++	/*
++	 * If L2 is active, defer the SVI update until vmcs01 is loaded, as SVI
++	 * is only relevant for if and only if Virtual Interrupt Delivery is
++	 * enabled in vmcs12, and if VID is enabled then L2 EOIs affect L2's
++	 * vAPIC, not L1's vAPIC.  KVM must update vmcs01 on the next nested
++	 * VM-Exit, otherwise L1 with run with a stale SVI.
++	 */
++	if (is_guest_mode(vcpu)) {
++		/*
++		 * KVM is supposed to forward intercepted L2 EOIs to L1 if VID
++		 * is enabled in vmcs12; as above, the EOIs affect L2's vAPIC.
++		 * Note, userspace can stuff state while L2 is active; assert
++		 * that VID is disabled if and only if the vCPU is in KVM_RUN
++		 * to avoid false positives if userspace is setting APIC state.
++		 */
++		WARN_ON_ONCE(vcpu->wants_to_run &&
++			     nested_cpu_has_vid(get_vmcs12(vcpu)));
++		to_vmx(vcpu)->nested.update_vmcs01_hwapic_isr = true;
++		return;
++	}
++
+ 	if (max_isr == -1)
+ 		max_isr = 0;
+ 
+diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
+index 43f573f6ca46a3..892302022094a3 100644
+--- a/arch/x86/kvm/vmx/vmx.h
++++ b/arch/x86/kvm/vmx/vmx.h
+@@ -176,6 +176,7 @@ struct nested_vmx {
+ 	bool reload_vmcs01_apic_access_page;
+ 	bool update_vmcs01_cpu_dirty_logging;
+ 	bool update_vmcs01_apicv_status;
++	bool update_vmcs01_hwapic_isr;
+ 
+ 	/*
+ 	 * Enlightened VMCS has been enabled. It does not mean that L1 has to
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index c79a8cc57ba42d..23bf088fc4ae1e 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -11463,6 +11463,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
+ 	struct kvm_run *kvm_run = vcpu->run;
+ 	int r;
+ 
++	r = kvm_mmu_post_init_vm(vcpu->kvm);
++	if (r)
++		return r;
++
+ 	vcpu_load(vcpu);
+ 	kvm_sigset_activate(vcpu);
+ 	kvm_run->flags = 0;
+@@ -12742,7 +12746,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+ 
+ int kvm_arch_post_init_vm(struct kvm *kvm)
+ {
+-	return kvm_mmu_post_init_vm(kvm);
++	once_init(&kvm->arch.nx_once);
++	return 0;
+ }
+ 
+ static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index e6c469b323ccb7..ac52255fab01f4 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -678,7 +678,7 @@ page_fault_oops(struct pt_regs *regs, unsigned long error_code,
+ 			      ASM_CALL_ARG3,
+ 			      , [arg1] "r" (regs), [arg2] "r" (address), [arg3] "r" (&info));
+ 
+-		unreachable();
++		BUG();
+ 	}
+ #endif
+ 
+diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
+index 0681ecfe34300e..f348a3179b2dbd 100644
+--- a/arch/x86/pci/fixup.c
++++ b/arch/x86/pci/fixup.c
+@@ -1010,4 +1010,34 @@ DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x1668, amd_rp_pme_suspend);
+ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1668, amd_rp_pme_resume);
+ DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x1669, amd_rp_pme_suspend);
+ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1669, amd_rp_pme_resume);
++
++/*
++ * Putting PCIe root ports on Ryzen SoCs with USB4 controllers into D3hot
++ * may cause problems when the system attempts wake up from s2idle.
++ *
++ * On the TUXEDO Sirius 16 Gen 1 with a specific old BIOS this manifests as
++ * a system hang.
++ */
++static const struct dmi_system_id quirk_tuxeo_rp_d3_dmi_table[] = {
++	{
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
++			DMI_EXACT_MATCH(DMI_BOARD_NAME, "APX958"),
++			DMI_EXACT_MATCH(DMI_BIOS_VERSION, "V1.00A00_20240108"),
++		},
++	},
++	{}
++};
++
++static void quirk_tuxeo_rp_d3(struct pci_dev *pdev)
++{
++	struct pci_dev *root_pdev;
++
++	if (dmi_check_system(quirk_tuxeo_rp_d3_dmi_table)) {
++		root_pdev = pcie_find_root_port(pdev);
++		if (root_pdev)
++			root_pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
++	}
++}
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1502, quirk_tuxeo_rp_d3);
+ #endif /* CONFIG_SUSPEND */
+diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
+index 846bf49f2508da..553f330198f2f3 100644
+--- a/arch/x86/platform/efi/quirks.c
++++ b/arch/x86/platform/efi/quirks.c
+@@ -561,6 +561,11 @@ int __init efi_reuse_config(u64 tables, int nr_tables)
+ 
+ 		if (!efi_guidcmp(guid, SMBIOS_TABLE_GUID))
+ 			((efi_config_table_64_t *)p)->table = data->smbios;
++
++		/* Do not bother to play with mem attr table across kexec */
++		if (!efi_guidcmp(guid, EFI_MEMORY_ATTRIBUTES_TABLE_GUID))
++			((efi_config_table_64_t *)p)->table = EFI_INVALID_TABLE_ADDR;
++
+ 		p += sz;
+ 	}
+ 	early_memunmap(tablep, nr_tables * sz);
+diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
+index 9252652afe5964..4e481b0eefc96d 100644
+--- a/arch/x86/xen/xen-head.S
++++ b/arch/x86/xen/xen-head.S
+@@ -117,8 +117,8 @@ SYM_FUNC_START(xen_hypercall_hvm)
+ 	pop %ebx
+ 	pop %eax
+ #else
+-	lea xen_hypercall_amd(%rip), %rbx
+-	cmp %rax, %rbx
++	lea xen_hypercall_amd(%rip), %rcx
++	cmp %rax, %rcx
+ #ifdef CONFIG_FRAME_POINTER
+ 	pop %rax	/* Dummy pop. */
+ #endif
+@@ -132,6 +132,7 @@ SYM_FUNC_START(xen_hypercall_hvm)
+ 	pop %rcx
+ 	pop %rax
+ #endif
++	FRAME_END
+ 	/* Use correct hypercall function. */
+ 	jz xen_hypercall_amd
+ 	jmp xen_hypercall_intel
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index 45a395862fbc88..f1cf7f2909f3a7 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -1138,6 +1138,7 @@ static void blkcg_fill_root_iostats(void)
+ 		blkg_iostat_set(&blkg->iostat.cur, &tmp);
+ 		u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
+ 	}
++	class_dev_iter_exit(&iter);
+ }
+ 
+ static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s)
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index e828be777206bb..e09b455874bfd6 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -681,6 +681,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
+ 	struct queue_sysfs_entry *entry = to_queue(attr);
+ 	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
+ 	struct request_queue *q = disk->queue;
++	unsigned int noio_flag;
+ 	ssize_t res;
+ 
+ 	if (!entry->store_limit && !entry->store)
+@@ -711,7 +712,9 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
+ 
+ 	mutex_lock(&q->sysfs_lock);
+ 	blk_mq_freeze_queue(q);
++	noio_flag = memalloc_noio_save();
+ 	res = entry->store(disk, page, length);
++	memalloc_noio_restore(noio_flag);
+ 	blk_mq_unfreeze_queue(q);
+ 	mutex_unlock(&q->sysfs_lock);
+ 	return res;
+diff --git a/block/fops.c b/block/fops.c
+index 13a67940d0408d..43983be5a2b3b1 100644
+--- a/block/fops.c
++++ b/block/fops.c
+@@ -758,11 +758,12 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
+ 		file_accessed(iocb->ki_filp);
+ 
+ 		ret = blkdev_direct_IO(iocb, to);
+-		if (ret >= 0) {
++		if (ret > 0) {
+ 			iocb->ki_pos += ret;
+ 			count -= ret;
+ 		}
+-		iov_iter_revert(to, count - iov_iter_count(to));
++		if (ret != -EIOCBQUEUED)
++			iov_iter_revert(to, count - iov_iter_count(to));
+ 		if (ret < 0 || !count)
+ 			goto reexpand;
+ 	}
+diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
+index ca2bf47ce2484a..0c4a82271c26df 100644
+--- a/drivers/accel/ivpu/ivpu_drv.c
++++ b/drivers/accel/ivpu/ivpu_drv.c
+@@ -397,15 +397,19 @@ int ivpu_boot(struct ivpu_device *vdev)
+ 	if (ivpu_fw_is_cold_boot(vdev)) {
+ 		ret = ivpu_pm_dct_init(vdev);
+ 		if (ret)
+-			goto err_diagnose_failure;
++			goto err_disable_ipc;
+ 
+ 		ret = ivpu_hw_sched_init(vdev);
+ 		if (ret)
+-			goto err_diagnose_failure;
++			goto err_disable_ipc;
+ 	}
+ 
+ 	return 0;
+ 
++err_disable_ipc:
++	ivpu_ipc_disable(vdev);
++	ivpu_hw_irq_disable(vdev);
++	disable_irq(vdev->irq);
+ err_diagnose_failure:
+ 	ivpu_hw_diagnose_failure(vdev);
+ 	ivpu_mmu_evtq_dump(vdev);
+diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
+index 949f4233946c63..5060c5dd40d1fc 100644
+--- a/drivers/accel/ivpu/ivpu_pm.c
++++ b/drivers/accel/ivpu/ivpu_pm.c
+@@ -78,8 +78,8 @@ static int ivpu_resume(struct ivpu_device *vdev)
+ 	int ret;
+ 
+ retry:
+-	pci_restore_state(to_pci_dev(vdev->drm.dev));
+ 	pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
++	pci_restore_state(to_pci_dev(vdev->drm.dev));
+ 
+ 	ret = ivpu_hw_power_up(vdev);
+ 	if (ret) {
+@@ -115,41 +115,57 @@ static int ivpu_resume(struct ivpu_device *vdev)
+ 	return ret;
+ }
+ 
+-static void ivpu_pm_recovery_work(struct work_struct *work)
++static void ivpu_pm_reset_begin(struct ivpu_device *vdev)
+ {
+-	struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
+-	struct ivpu_device *vdev = pm->vdev;
+-	char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
+-	int ret;
+-
+-	ivpu_err(vdev, "Recovering the NPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
+-
+-	ret = pm_runtime_resume_and_get(vdev->drm.dev);
+-	if (ret)
+-		ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
+-
+-	ivpu_jsm_state_dump(vdev);
+-	ivpu_dev_coredump(vdev);
++	pm_runtime_disable(vdev->drm.dev);
+ 
+ 	atomic_inc(&vdev->pm->reset_counter);
+ 	atomic_set(&vdev->pm->reset_pending, 1);
+ 	down_write(&vdev->pm->reset_lock);
++}
++
++static void ivpu_pm_reset_complete(struct ivpu_device *vdev)
++{
++	int ret;
+ 
+-	ivpu_suspend(vdev);
+ 	ivpu_pm_prepare_cold_boot(vdev);
+ 	ivpu_jobs_abort_all(vdev);
+ 	ivpu_ms_cleanup_all(vdev);
+ 
+ 	ret = ivpu_resume(vdev);
+-	if (ret)
++	if (ret) {
+ 		ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
++		pm_runtime_set_suspended(vdev->drm.dev);
++	} else {
++		pm_runtime_set_active(vdev->drm.dev);
++	}
+ 
+ 	up_write(&vdev->pm->reset_lock);
+ 	atomic_set(&vdev->pm->reset_pending, 0);
+ 
+-	kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
+ 	pm_runtime_mark_last_busy(vdev->drm.dev);
+-	pm_runtime_put_autosuspend(vdev->drm.dev);
++	pm_runtime_enable(vdev->drm.dev);
++}
++
++static void ivpu_pm_recovery_work(struct work_struct *work)
++{
++	struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
++	struct ivpu_device *vdev = pm->vdev;
++	char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
++
++	ivpu_err(vdev, "Recovering the NPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
++
++	ivpu_pm_reset_begin(vdev);
++
++	if (!pm_runtime_status_suspended(vdev->drm.dev)) {
++		ivpu_jsm_state_dump(vdev);
++		ivpu_dev_coredump(vdev);
++		ivpu_suspend(vdev);
++	}
++
++	ivpu_pm_reset_complete(vdev);
++
++	kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
+ }
+ 
+ void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason)
+@@ -309,7 +325,10 @@ int ivpu_rpm_get(struct ivpu_device *vdev)
+ 	int ret;
+ 
+ 	ret = pm_runtime_resume_and_get(vdev->drm.dev);
+-	drm_WARN_ON(&vdev->drm, ret < 0);
++	if (ret < 0) {
++		ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
++		pm_runtime_set_suspended(vdev->drm.dev);
++	}
+ 
+ 	return ret;
+ }
+@@ -325,16 +344,13 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
+ 	struct ivpu_device *vdev = pci_get_drvdata(pdev);
+ 
+ 	ivpu_dbg(vdev, PM, "Pre-reset..\n");
+-	atomic_inc(&vdev->pm->reset_counter);
+-	atomic_set(&vdev->pm->reset_pending, 1);
+ 
+-	pm_runtime_get_sync(vdev->drm.dev);
+-	down_write(&vdev->pm->reset_lock);
+-	ivpu_prepare_for_reset(vdev);
+-	ivpu_hw_reset(vdev);
+-	ivpu_pm_prepare_cold_boot(vdev);
+-	ivpu_jobs_abort_all(vdev);
+-	ivpu_ms_cleanup_all(vdev);
++	ivpu_pm_reset_begin(vdev);
++
++	if (!pm_runtime_status_suspended(vdev->drm.dev)) {
++		ivpu_prepare_for_reset(vdev);
++		ivpu_hw_reset(vdev);
++	}
+ 
+ 	ivpu_dbg(vdev, PM, "Pre-reset done.\n");
+ }
+@@ -342,18 +358,12 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
+ void ivpu_pm_reset_done_cb(struct pci_dev *pdev)
+ {
+ 	struct ivpu_device *vdev = pci_get_drvdata(pdev);
+-	int ret;
+ 
+ 	ivpu_dbg(vdev, PM, "Post-reset..\n");
+-	ret = ivpu_resume(vdev);
+-	if (ret)
+-		ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
+-	up_write(&vdev->pm->reset_lock);
+-	atomic_set(&vdev->pm->reset_pending, 0);
+-	ivpu_dbg(vdev, PM, "Post-reset done.\n");
+ 
+-	pm_runtime_mark_last_busy(vdev->drm.dev);
+-	pm_runtime_put_autosuspend(vdev->drm.dev);
++	ivpu_pm_reset_complete(vdev);
++
++	ivpu_dbg(vdev, PM, "Post-reset done.\n");
+ }
+ 
+ void ivpu_pm_init(struct ivpu_device *vdev)
+diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
+index 07789f0b59bcda..b7277249465567 100644
+--- a/drivers/acpi/apei/ghes.c
++++ b/drivers/acpi/apei/ghes.c
+@@ -173,8 +173,6 @@ static struct gen_pool *ghes_estatus_pool;
+ static struct ghes_estatus_cache __rcu *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE];
+ static atomic_t ghes_estatus_cache_alloced;
+ 
+-static int ghes_panic_timeout __read_mostly = 30;
+-
+ static void __iomem *ghes_map(u64 pfn, enum fixed_addresses fixmap_idx)
+ {
+ 	phys_addr_t paddr;
+@@ -983,14 +981,16 @@ static void __ghes_panic(struct ghes *ghes,
+ 			 struct acpi_hest_generic_status *estatus,
+ 			 u64 buf_paddr, enum fixed_addresses fixmap_idx)
+ {
++	const char *msg = GHES_PFX "Fatal hardware error";
++
+ 	__ghes_print_estatus(KERN_EMERG, ghes->generic, estatus);
+ 
+ 	ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
+ 
+-	/* reboot to log the error! */
+ 	if (!panic_timeout)
+-		panic_timeout = ghes_panic_timeout;
+-	panic("Fatal hardware error!");
++		pr_emerg("%s but panic disabled\n", msg);
++
++	panic(msg);
+ }
+ 
+ static int ghes_proc(struct ghes *ghes)
+diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
+index 747f83f7114d29..e549914a636c66 100644
+--- a/drivers/acpi/prmt.c
++++ b/drivers/acpi/prmt.c
+@@ -287,9 +287,7 @@ static acpi_status acpi_platformrt_space_handler(u32 function,
+ 		if (!handler || !module)
+ 			goto invalid_guid;
+ 
+-		if (!handler->handler_addr ||
+-		    !handler->static_data_buffer_addr ||
+-		    !handler->acpi_param_buffer_addr) {
++		if (!handler->handler_addr) {
+ 			buffer->prm_status = PRM_HANDLER_ERROR;
+ 			return AE_OK;
+ 		}
+diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
+index 80a52a4e66dd16..e9186339f6e6bb 100644
+--- a/drivers/acpi/property.c
++++ b/drivers/acpi/property.c
+@@ -1187,8 +1187,6 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
+ 		}
+ 		break;
+ 	}
+-	if (nval == 0)
+-		return -EINVAL;
+ 
+ 	if (obj->type == ACPI_TYPE_BUFFER) {
+ 		if (proptype != DEV_PROP_U8)
+@@ -1212,9 +1210,11 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
+ 		ret = acpi_copy_property_array_uint(items, (u64 *)val, nval);
+ 		break;
+ 	case DEV_PROP_STRING:
+-		ret = acpi_copy_property_array_string(
+-			items, (char **)val,
+-			min_t(u32, nval, obj->package.count));
++		nval = min_t(u32, nval, obj->package.count);
++		if (nval == 0)
++			return -ENODATA;
++
++		ret = acpi_copy_property_array_string(items, (char **)val, nval);
+ 		break;
+ 	default:
+ 		ret = -EINVAL;
+diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
+index 67f277e1c3bf31..5a46c066abc365 100644
+--- a/drivers/ata/libata-sff.c
++++ b/drivers/ata/libata-sff.c
+@@ -601,7 +601,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
+ {
+ 	struct ata_port *ap = qc->ap;
+ 	struct page *page;
+-	unsigned int offset;
++	unsigned int offset, count;
+ 
+ 	if (!qc->cursg) {
+ 		qc->curbytes = qc->nbytes;
+@@ -617,25 +617,27 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
+ 	page = nth_page(page, (offset >> PAGE_SHIFT));
+ 	offset %= PAGE_SIZE;
+ 
+-	trace_ata_sff_pio_transfer_data(qc, offset, qc->sect_size);
++	/* don't overrun current sg */
++	count = min(qc->cursg->length - qc->cursg_ofs, qc->sect_size);
++
++	trace_ata_sff_pio_transfer_data(qc, offset, count);
+ 
+ 	/*
+ 	 * Split the transfer when it splits a page boundary.  Note that the
+ 	 * split still has to be dword aligned like all ATA data transfers.
+ 	 */
+ 	WARN_ON_ONCE(offset % 4);
+-	if (offset + qc->sect_size > PAGE_SIZE) {
++	if (offset + count > PAGE_SIZE) {
+ 		unsigned int split_len = PAGE_SIZE - offset;
+ 
+ 		ata_pio_xfer(qc, page, offset, split_len);
+-		ata_pio_xfer(qc, nth_page(page, 1), 0,
+-			     qc->sect_size - split_len);
++		ata_pio_xfer(qc, nth_page(page, 1), 0, count - split_len);
+ 	} else {
+-		ata_pio_xfer(qc, page, offset, qc->sect_size);
++		ata_pio_xfer(qc, page, offset, count);
+ 	}
+ 
+-	qc->curbytes += qc->sect_size;
+-	qc->cursg_ofs += qc->sect_size;
++	qc->curbytes += count;
++	qc->cursg_ofs += count;
+ 
+ 	if (qc->cursg_ofs == qc->cursg->length) {
+ 		qc->cursg = sg_next(qc->cursg);
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index f69df515d668b6..72e85673b70957 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -377,6 +377,8 @@ static const struct usb_device_id quirks_table[] = {
+ 						     BTUSB_WIDEBAND_SPEECH },
+ 	{ USB_DEVICE(0x13d3, 0x3623), .driver_info = BTUSB_QCA_WCN6855 |
+ 						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x2c7c, 0x0130), .driver_info = BTUSB_QCA_WCN6855 |
++						     BTUSB_WIDEBAND_SPEECH },
+ 
+ 	/* Broadcom BCM2035 */
+ 	{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
+@@ -610,6 +612,8 @@ static const struct usb_device_id quirks_table[] = {
+ 	/* MediaTek MT7922 Bluetooth devices */
+ 	{ USB_DEVICE(0x13d3, 0x3585), .driver_info = BTUSB_MEDIATEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x13d3, 0x3610), .driver_info = BTUSB_MEDIATEK |
++						     BTUSB_WIDEBAND_SPEECH },
+ 
+ 	/* MediaTek MT7922A Bluetooth devices */
+ 	{ USB_DEVICE(0x0489, 0xe0d8), .driver_info = BTUSB_MEDIATEK |
+@@ -674,6 +678,8 @@ static const struct usb_device_id quirks_table[] = {
+ 						     BTUSB_WIDEBAND_SPEECH },
+ 	{ USB_DEVICE(0x13d3, 0x3608), .driver_info = BTUSB_MEDIATEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
++	{ USB_DEVICE(0x13d3, 0x3628), .driver_info = BTUSB_MEDIATEK |
++						     BTUSB_WIDEBAND_SPEECH },
+ 
+ 	/* Additional Realtek 8723AE Bluetooth devices */
+ 	{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
+diff --git a/drivers/char/misc.c b/drivers/char/misc.c
+index 541edc26ec89a1..2cf595d2e10b85 100644
+--- a/drivers/char/misc.c
++++ b/drivers/char/misc.c
+@@ -63,16 +63,30 @@ static DEFINE_MUTEX(misc_mtx);
+ #define DYNAMIC_MINORS 128 /* like dynamic majors */
+ static DEFINE_IDA(misc_minors_ida);
+ 
+-static int misc_minor_alloc(void)
++static int misc_minor_alloc(int minor)
+ {
+-	int ret;
+-
+-	ret = ida_alloc_max(&misc_minors_ida, DYNAMIC_MINORS - 1, GFP_KERNEL);
+-	if (ret >= 0) {
+-		ret = DYNAMIC_MINORS - ret - 1;
++	int ret = 0;
++
++	if (minor == MISC_DYNAMIC_MINOR) {
++		/* allocate free id */
++		ret = ida_alloc_max(&misc_minors_ida, DYNAMIC_MINORS - 1, GFP_KERNEL);
++		if (ret >= 0) {
++			ret = DYNAMIC_MINORS - ret - 1;
++		} else {
++			ret = ida_alloc_range(&misc_minors_ida, MISC_DYNAMIC_MINOR + 1,
++					      MINORMASK, GFP_KERNEL);
++		}
+ 	} else {
+-		ret = ida_alloc_range(&misc_minors_ida, MISC_DYNAMIC_MINOR + 1,
+-				      MINORMASK, GFP_KERNEL);
++		/* specific minor, check if it is in dynamic or misc dynamic range  */
++		if (minor < DYNAMIC_MINORS) {
++			minor = DYNAMIC_MINORS - minor - 1;
++			ret = ida_alloc_range(&misc_minors_ida, minor, minor, GFP_KERNEL);
++		} else if (minor > MISC_DYNAMIC_MINOR) {
++			ret = ida_alloc_range(&misc_minors_ida, minor, minor, GFP_KERNEL);
++		} else {
++			/* case of non-dynamic minors, no need to allocate id */
++			ret = 0;
++		}
+ 	}
+ 	return ret;
+ }
+@@ -219,7 +233,7 @@ int misc_register(struct miscdevice *misc)
+ 	mutex_lock(&misc_mtx);
+ 
+ 	if (is_dynamic) {
+-		int i = misc_minor_alloc();
++		int i = misc_minor_alloc(misc->minor);
+ 
+ 		if (i < 0) {
+ 			err = -EBUSY;
+@@ -228,6 +242,7 @@ int misc_register(struct miscdevice *misc)
+ 		misc->minor = i;
+ 	} else {
+ 		struct miscdevice *c;
++		int i;
+ 
+ 		list_for_each_entry(c, &misc_list, list) {
+ 			if (c->minor == misc->minor) {
+@@ -235,6 +250,12 @@ int misc_register(struct miscdevice *misc)
+ 				goto out;
+ 			}
+ 		}
++
++		i = misc_minor_alloc(misc->minor);
++		if (i < 0) {
++			err = -EBUSY;
++			goto out;
++		}
+ 	}
+ 
+ 	dev = MKDEV(MISC_MAJOR, misc->minor);
+diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
+index 69533d0bfb51e8..cf02ec646f46f0 100644
+--- a/drivers/char/tpm/eventlog/acpi.c
++++ b/drivers/char/tpm/eventlog/acpi.c
+@@ -63,6 +63,11 @@ static bool tpm_is_tpm2_log(void *bios_event_log, u64 len)
+ 	return n == 0;
+ }
+ 
++static void tpm_bios_log_free(void *data)
++{
++	kvfree(data);
++}
++
+ /* read binary bios log */
+ int tpm_read_log_acpi(struct tpm_chip *chip)
+ {
+@@ -136,7 +141,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
+ 	}
+ 
+ 	/* malloc EventLog space */
+-	log->bios_event_log = devm_kmalloc(&chip->dev, len, GFP_KERNEL);
++	log->bios_event_log = kvmalloc(len, GFP_KERNEL);
+ 	if (!log->bios_event_log)
+ 		return -ENOMEM;
+ 
+@@ -161,10 +166,16 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
+ 		goto err;
+ 	}
+ 
++	ret = devm_add_action(&chip->dev, tpm_bios_log_free, log->bios_event_log);
++	if (ret) {
++		log->bios_event_log = NULL;
++		goto err;
++	}
++
+ 	return format;
+ 
+ err:
+-	devm_kfree(&chip->dev, log->bios_event_log);
++	tpm_bios_log_free(log->bios_event_log);
+ 	log->bios_event_log = NULL;
+ 	return ret;
+ }
+diff --git a/drivers/clk/clk-loongson2.c b/drivers/clk/clk-loongson2.c
+index 7082b4309c6f15..0d9485e83938a1 100644
+--- a/drivers/clk/clk-loongson2.c
++++ b/drivers/clk/clk-loongson2.c
+@@ -294,7 +294,7 @@ static int loongson2_clk_probe(struct platform_device *pdev)
+ 		return -EINVAL;
+ 
+ 	for (p = data; p->name; p++)
+-		clks_num++;
++		clks_num = max(clks_num, p->id + 1);
+ 
+ 	clp = devm_kzalloc(dev, struct_size(clp, clk_data.hws, clks_num),
+ 			   GFP_KERNEL);
+@@ -309,6 +309,9 @@ static int loongson2_clk_probe(struct platform_device *pdev)
+ 	clp->clk_data.num = clks_num;
+ 	clp->dev = dev;
+ 
++	/* Avoid returning NULL for unused id */
++	memset_p((void **)clp->clk_data.hws, ERR_PTR(-ENOENT), clks_num);
++
+ 	for (i = 0; i < clks_num; i++) {
+ 		p = &data[i];
+ 		switch (p->type) {
+diff --git a/drivers/clk/mediatek/clk-mt2701-aud.c b/drivers/clk/mediatek/clk-mt2701-aud.c
+index 425c69cfb105a6..e103121cf58e77 100644
+--- a/drivers/clk/mediatek/clk-mt2701-aud.c
++++ b/drivers/clk/mediatek/clk-mt2701-aud.c
+@@ -55,10 +55,16 @@ static const struct mtk_gate audio_clks[] = {
+ 	GATE_DUMMY(CLK_DUMMY, "aud_dummy"),
+ 	/* AUDIO0 */
+ 	GATE_AUDIO0(CLK_AUD_AFE, "audio_afe", "aud_intbus_sel", 2),
++	GATE_DUMMY(CLK_AUD_LRCK_DETECT, "audio_lrck_detect_dummy"),
++	GATE_DUMMY(CLK_AUD_I2S, "audio_i2c_dummy"),
++	GATE_DUMMY(CLK_AUD_APLL_TUNER, "audio_apll_tuner_dummy"),
+ 	GATE_AUDIO0(CLK_AUD_HDMI, "audio_hdmi", "audpll_sel", 20),
+ 	GATE_AUDIO0(CLK_AUD_SPDF, "audio_spdf", "audpll_sel", 21),
+ 	GATE_AUDIO0(CLK_AUD_SPDF2, "audio_spdf2", "audpll_sel", 22),
+ 	GATE_AUDIO0(CLK_AUD_APLL, "audio_apll", "audpll_sel", 23),
++	GATE_DUMMY(CLK_AUD_TML, "audio_tml_dummy"),
++	GATE_DUMMY(CLK_AUD_AHB_IDLE_EXT, "audio_ahb_idle_ext_dummy"),
++	GATE_DUMMY(CLK_AUD_AHB_IDLE_INT, "audio_ahb_idle_int_dummy"),
+ 	/* AUDIO1 */
+ 	GATE_AUDIO1(CLK_AUD_I2SIN1, "audio_i2sin1", "aud_mux1_sel", 0),
+ 	GATE_AUDIO1(CLK_AUD_I2SIN2, "audio_i2sin2", "aud_mux1_sel", 1),
+@@ -76,10 +82,12 @@ static const struct mtk_gate audio_clks[] = {
+ 	GATE_AUDIO1(CLK_AUD_ASRCI2, "audio_asrci2", "asm_h_sel", 13),
+ 	GATE_AUDIO1(CLK_AUD_ASRCO1, "audio_asrco1", "asm_h_sel", 14),
+ 	GATE_AUDIO1(CLK_AUD_ASRCO2, "audio_asrco2", "asm_h_sel", 15),
++	GATE_DUMMY(CLK_AUD_HDMIRX, "audio_hdmirx_dummy"),
+ 	GATE_AUDIO1(CLK_AUD_INTDIR, "audio_intdir", "intdir_sel", 20),
+ 	GATE_AUDIO1(CLK_AUD_A1SYS, "audio_a1sys", "aud_mux1_sel", 21),
+ 	GATE_AUDIO1(CLK_AUD_A2SYS, "audio_a2sys", "aud_mux2_sel", 22),
+ 	GATE_AUDIO1(CLK_AUD_AFE_CONN, "audio_afe_conn", "aud_mux1_sel", 23),
++	GATE_DUMMY(CLK_AUD_AFE_PCMIF, "audio_afe_pcmif_dummy"),
+ 	GATE_AUDIO1(CLK_AUD_AFE_MRGIF, "audio_afe_mrgif", "aud_mux1_sel", 25),
+ 	/* AUDIO2 */
+ 	GATE_AUDIO2(CLK_AUD_MMIF_UL1, "audio_ul1", "aud_mux1_sel", 0),
+@@ -100,6 +108,8 @@ static const struct mtk_gate audio_clks[] = {
+ 	GATE_AUDIO2(CLK_AUD_MMIF_AWB2, "audio_awb2", "aud_mux1_sel", 15),
+ 	GATE_AUDIO2(CLK_AUD_MMIF_DAI, "audio_dai", "aud_mux1_sel", 16),
+ 	/* AUDIO3 */
++	GATE_DUMMY(CLK_AUD_DMIC1, "audio_dmic1_dummy"),
++	GATE_DUMMY(CLK_AUD_DMIC2, "audio_dmic2_dummy"),
+ 	GATE_AUDIO3(CLK_AUD_ASRCI3, "audio_asrci3", "asm_h_sel", 2),
+ 	GATE_AUDIO3(CLK_AUD_ASRCI4, "audio_asrci4", "asm_h_sel", 3),
+ 	GATE_AUDIO3(CLK_AUD_ASRCI5, "audio_asrci5", "asm_h_sel", 4),
+diff --git a/drivers/clk/mediatek/clk-mt2701-bdp.c b/drivers/clk/mediatek/clk-mt2701-bdp.c
+index 5da3eabffd3e76..f11c7a4fa37b65 100644
+--- a/drivers/clk/mediatek/clk-mt2701-bdp.c
++++ b/drivers/clk/mediatek/clk-mt2701-bdp.c
+@@ -31,6 +31,7 @@ static const struct mtk_gate_regs bdp1_cg_regs = {
+ 	GATE_MTK(_id, _name, _parent, &bdp1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+ static const struct mtk_gate bdp_clks[] = {
++	GATE_DUMMY(CLK_DUMMY, "bdp_dummy"),
+ 	GATE_BDP0(CLK_BDP_BRG_BA, "brg_baclk", "mm_sel", 0),
+ 	GATE_BDP0(CLK_BDP_BRG_DRAM, "brg_dram", "mm_sel", 1),
+ 	GATE_BDP0(CLK_BDP_LARB_DRAM, "larb_dram", "mm_sel", 2),
+diff --git a/drivers/clk/mediatek/clk-mt2701-img.c b/drivers/clk/mediatek/clk-mt2701-img.c
+index 875594bc9dcba8..c158e54c46526e 100644
+--- a/drivers/clk/mediatek/clk-mt2701-img.c
++++ b/drivers/clk/mediatek/clk-mt2701-img.c
+@@ -22,6 +22,7 @@ static const struct mtk_gate_regs img_cg_regs = {
+ 	GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate img_clks[] = {
++	GATE_DUMMY(CLK_DUMMY, "img_dummy"),
+ 	GATE_IMG(CLK_IMG_SMI_COMM, "img_smi_comm", "mm_sel", 0),
+ 	GATE_IMG(CLK_IMG_RESZ, "img_resz", "mm_sel", 1),
+ 	GATE_IMG(CLK_IMG_JPGDEC_SMI, "img_jpgdec_smi", "mm_sel", 5),
+diff --git a/drivers/clk/mediatek/clk-mt2701-mm.c b/drivers/clk/mediatek/clk-mt2701-mm.c
+index bc68fa718878f9..474d87d62e8331 100644
+--- a/drivers/clk/mediatek/clk-mt2701-mm.c
++++ b/drivers/clk/mediatek/clk-mt2701-mm.c
+@@ -31,6 +31,7 @@ static const struct mtk_gate_regs disp1_cg_regs = {
+ 	GATE_MTK(_id, _name, _parent, &disp1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate mm_clks[] = {
++	GATE_DUMMY(CLK_DUMMY, "mm_dummy"),
+ 	GATE_DISP0(CLK_MM_SMI_COMMON, "mm_smi_comm", "mm_sel", 0),
+ 	GATE_DISP0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 1),
+ 	GATE_DISP0(CLK_MM_CMDQ, "mm_cmdq", "mm_sel", 2),
+diff --git a/drivers/clk/mediatek/clk-mt2701-vdec.c b/drivers/clk/mediatek/clk-mt2701-vdec.c
+index 94db86f8d0a462..5299d92f3aba0f 100644
+--- a/drivers/clk/mediatek/clk-mt2701-vdec.c
++++ b/drivers/clk/mediatek/clk-mt2701-vdec.c
+@@ -31,6 +31,7 @@ static const struct mtk_gate_regs vdec1_cg_regs = {
+ 	GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+ static const struct mtk_gate vdec_clks[] = {
++	GATE_DUMMY(CLK_DUMMY, "vdec_dummy"),
+ 	GATE_VDEC0(CLK_VDEC_CKGEN, "vdec_cken", "vdec_sel", 0),
+ 	GATE_VDEC1(CLK_VDEC_LARB, "vdec_larb_cken", "mm_sel", 0),
+ };
+diff --git a/drivers/clk/mmp/pwr-island.c b/drivers/clk/mmp/pwr-island.c
+index edaa2433a472ad..eaf5d2c5e59337 100644
+--- a/drivers/clk/mmp/pwr-island.c
++++ b/drivers/clk/mmp/pwr-island.c
+@@ -106,10 +106,10 @@ struct generic_pm_domain *mmp_pm_domain_register(const char *name,
+ 	pm_domain->flags = flags;
+ 	pm_domain->lock = lock;
+ 
+-	pm_genpd_init(&pm_domain->genpd, NULL, true);
+ 	pm_domain->genpd.name = name;
+ 	pm_domain->genpd.power_on = mmp_pm_domain_power_on;
+ 	pm_domain->genpd.power_off = mmp_pm_domain_power_off;
++	pm_genpd_init(&pm_domain->genpd, NULL, true);
+ 
+ 	return &pm_domain->genpd;
+ }
+diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
+index ef89d686cbc4e0..c27ea46283fd90 100644
+--- a/drivers/clk/qcom/Kconfig
++++ b/drivers/clk/qcom/Kconfig
+@@ -1079,6 +1079,7 @@ config SM_GCC_7150
+ config SM_GCC_8150
+ 	tristate "SM8150 Global Clock Controller"
+ 	depends on ARM64 || COMPILE_TEST
++	select QCOM_GDSC
+ 	help
+ 	  Support for the global clock controller on SM8150 devices.
+ 	  Say Y if you want to use peripheral devices such as UART,
+diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
+index b8351f8c0b8401..35e97eab5d059f 100644
+--- a/drivers/clk/qcom/clk-alpha-pll.c
++++ b/drivers/clk/qcom/clk-alpha-pll.c
+@@ -432,6 +432,8 @@ void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ 	mask |= config->pre_div_mask;
+ 	mask |= config->post_div_mask;
+ 	mask |= config->vco_mask;
++	mask |= config->alpha_en_mask;
++	mask |= config->alpha_mode_mask;
+ 
+ 	regmap_update_bits(regmap, PLL_USER_CTL(pll), mask, val);
+ 
+diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
+index eefc322ce36798..e6c33010cfbf69 100644
+--- a/drivers/clk/qcom/clk-rpmh.c
++++ b/drivers/clk/qcom/clk-rpmh.c
+@@ -329,7 +329,7 @@ static unsigned long clk_rpmh_bcm_recalc_rate(struct clk_hw *hw,
+ {
+ 	struct clk_rpmh *c = to_clk_rpmh(hw);
+ 
+-	return c->aggr_state * c->unit;
++	return (unsigned long)c->aggr_state * c->unit;
+ }
+ 
+ static const struct clk_ops clk_rpmh_bcm_ops = {
+diff --git a/drivers/clk/qcom/dispcc-sm6350.c b/drivers/clk/qcom/dispcc-sm6350.c
+index 50facb36701af9..2bc6b5f99f5725 100644
+--- a/drivers/clk/qcom/dispcc-sm6350.c
++++ b/drivers/clk/qcom/dispcc-sm6350.c
+@@ -187,13 +187,12 @@ static struct clk_rcg2 disp_cc_mdss_dp_aux_clk_src = {
+ 	.cmd_rcgr = 0x1144,
+ 	.mnd_width = 0,
+ 	.hid_width = 5,
++	.parent_map = disp_cc_parent_map_6,
+ 	.freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "disp_cc_mdss_dp_aux_clk_src",
+-		.parent_data = &(const struct clk_parent_data){
+-			.fw_name = "bi_tcxo",
+-		},
+-		.num_parents = 1,
++		.parent_data = disp_cc_parent_data_6,
++		.num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
+ 		.ops = &clk_rcg2_ops,
+ 	},
+ };
+diff --git a/drivers/clk/qcom/gcc-mdm9607.c b/drivers/clk/qcom/gcc-mdm9607.c
+index 6e6068b168e66e..07f1b78d737a15 100644
+--- a/drivers/clk/qcom/gcc-mdm9607.c
++++ b/drivers/clk/qcom/gcc-mdm9607.c
+@@ -535,7 +535,7 @@ static struct clk_rcg2 blsp1_uart5_apps_clk_src = {
+ };
+ 
+ static struct clk_rcg2 blsp1_uart6_apps_clk_src = {
+-	.cmd_rcgr = 0x6044,
++	.cmd_rcgr = 0x7044,
+ 	.mnd_width = 16,
+ 	.hid_width = 5,
+ 	.parent_map = gcc_xo_gpll0_map,
+diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
+index a811fad2aa2785..74346dc026068a 100644
+--- a/drivers/clk/qcom/gcc-sm6350.c
++++ b/drivers/clk/qcom/gcc-sm6350.c
+@@ -182,6 +182,14 @@ static const struct clk_parent_data gcc_parent_data_2_ao[] = {
+ 	{ .hw = &gpll0_out_odd.clkr.hw },
+ };
+ 
++static const struct parent_map gcc_parent_map_3[] = {
++	{ P_BI_TCXO, 0 },
++};
++
++static const struct clk_parent_data gcc_parent_data_3[] = {
++	{ .fw_name = "bi_tcxo" },
++};
++
+ static const struct parent_map gcc_parent_map_4[] = {
+ 	{ P_BI_TCXO, 0 },
+ 	{ P_GPLL0_OUT_MAIN, 1 },
+@@ -701,13 +709,12 @@ static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ 	.cmd_rcgr = 0x3a0b0,
+ 	.mnd_width = 0,
+ 	.hid_width = 5,
++	.parent_map = gcc_parent_map_3,
+ 	.freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src,
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "gcc_ufs_phy_phy_aux_clk_src",
+-		.parent_data = &(const struct clk_parent_data){
+-			.fw_name = "bi_tcxo",
+-		},
+-		.num_parents = 1,
++		.parent_data = gcc_parent_data_3,
++		.num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ 		.ops = &clk_rcg2_ops,
+ 	},
+ };
+@@ -764,13 +771,12 @@ static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ 	.cmd_rcgr = 0x1a034,
+ 	.mnd_width = 0,
+ 	.hid_width = 5,
++	.parent_map = gcc_parent_map_3,
+ 	.freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "gcc_usb30_prim_mock_utmi_clk_src",
+-		.parent_data = &(const struct clk_parent_data){
+-			.fw_name = "bi_tcxo",
+-		},
+-		.num_parents = 1,
++		.parent_data = gcc_parent_data_3,
++		.num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ 		.ops = &clk_rcg2_ops,
+ 	},
+ };
+diff --git a/drivers/clk/qcom/gcc-sm8550.c b/drivers/clk/qcom/gcc-sm8550.c
+index 5abaeddd6afcc5..862a9bf73bcb5d 100644
+--- a/drivers/clk/qcom/gcc-sm8550.c
++++ b/drivers/clk/qcom/gcc-sm8550.c
+@@ -3003,7 +3003,7 @@ static struct gdsc pcie_0_gdsc = {
+ 	.pd = {
+ 		.name = "pcie_0_gdsc",
+ 	},
+-	.pwrsts = PWRSTS_OFF_ON,
++	.pwrsts = PWRSTS_RET_ON,
+ 	.flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+ 
+@@ -3014,7 +3014,7 @@ static struct gdsc pcie_0_phy_gdsc = {
+ 	.pd = {
+ 		.name = "pcie_0_phy_gdsc",
+ 	},
+-	.pwrsts = PWRSTS_OFF_ON,
++	.pwrsts = PWRSTS_RET_ON,
+ 	.flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+ 
+@@ -3025,7 +3025,7 @@ static struct gdsc pcie_1_gdsc = {
+ 	.pd = {
+ 		.name = "pcie_1_gdsc",
+ 	},
+-	.pwrsts = PWRSTS_OFF_ON,
++	.pwrsts = PWRSTS_RET_ON,
+ 	.flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+ 
+@@ -3036,7 +3036,7 @@ static struct gdsc pcie_1_phy_gdsc = {
+ 	.pd = {
+ 		.name = "pcie_1_phy_gdsc",
+ 	},
+-	.pwrsts = PWRSTS_OFF_ON,
++	.pwrsts = PWRSTS_RET_ON,
+ 	.flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+ 
+diff --git a/drivers/clk/qcom/gcc-sm8650.c b/drivers/clk/qcom/gcc-sm8650.c
+index fd9d6544bdd53a..9dd5c48f33bed5 100644
+--- a/drivers/clk/qcom/gcc-sm8650.c
++++ b/drivers/clk/qcom/gcc-sm8650.c
+@@ -3437,7 +3437,7 @@ static struct gdsc pcie_0_gdsc = {
+ 	.pd = {
+ 		.name = "pcie_0_gdsc",
+ 	},
+-	.pwrsts = PWRSTS_OFF_ON,
++	.pwrsts = PWRSTS_RET_ON,
+ 	.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+ };
+ 
+@@ -3448,7 +3448,7 @@ static struct gdsc pcie_0_phy_gdsc = {
+ 	.pd = {
+ 		.name = "pcie_0_phy_gdsc",
+ 	},
+-	.pwrsts = PWRSTS_OFF_ON,
++	.pwrsts = PWRSTS_RET_ON,
+ 	.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+ };
+ 
+@@ -3459,7 +3459,7 @@ static struct gdsc pcie_1_gdsc = {
+ 	.pd = {
+ 		.name = "pcie_1_gdsc",
+ 	},
+-	.pwrsts = PWRSTS_OFF_ON,
++	.pwrsts = PWRSTS_RET_ON,
+ 	.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+ };
+ 
+@@ -3470,7 +3470,7 @@ static struct gdsc pcie_1_phy_gdsc = {
+ 	.pd = {
+ 		.name = "pcie_1_phy_gdsc",
+ 	},
+-	.pwrsts = PWRSTS_OFF_ON,
++	.pwrsts = PWRSTS_RET_ON,
+ 	.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+ };
+ 
+diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
+index 7133377d41630f..1f81c7ac41af4b 100644
+--- a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
++++ b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
+@@ -436,7 +436,7 @@ static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0", mmc_parents, 0x830,
+ 					  24, 2,	/* mux */
+ 					  BIT(31),	/* gate */
+ 					  2,		/* post-div */
+-					  CLK_SET_RATE_NO_REPARENT);
++					  0);
+ 
+ static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1", mmc_parents, 0x834,
+ 					  0, 4,		/* M */
+@@ -444,7 +444,7 @@ static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1", mmc_parents, 0x834,
+ 					  24, 2,	/* mux */
+ 					  BIT(31),	/* gate */
+ 					  2,		/* post-div */
+-					  CLK_SET_RATE_NO_REPARENT);
++					  0);
+ 
+ static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc_parents, 0x838,
+ 					  0, 4,		/* M */
+@@ -452,7 +452,7 @@ static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc_parents, 0x838,
+ 					  24, 2,	/* mux */
+ 					  BIT(31),	/* gate */
+ 					  2,		/* post-div */
+-					  CLK_SET_RATE_NO_REPARENT);
++					  0);
+ 
+ static SUNXI_CCU_GATE(bus_mmc0_clk, "bus-mmc0", "ahb3", 0x84c, BIT(0), 0);
+ static SUNXI_CCU_GATE(bus_mmc1_clk, "bus-mmc1", "ahb3", 0x84c, BIT(1), 0);
+diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
+index 26e98fea991ae0..d64b07ec48e540 100644
+--- a/drivers/cpufreq/Kconfig
++++ b/drivers/cpufreq/Kconfig
+@@ -232,7 +232,7 @@ config CPUFREQ_VIRT
+ 	  If in doubt, say N.
+ 
+ config CPUFREQ_DT_PLATDEV
+-	tristate "Generic DT based cpufreq platdev driver"
++	bool "Generic DT based cpufreq platdev driver"
+ 	depends on OF
+ 	help
+ 	  This adds a generic DT based cpufreq platdev driver for frequency
+diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
+index 2a3e8bd317c9d2..9c198bd4f7e9b2 100644
+--- a/drivers/cpufreq/cpufreq-dt-platdev.c
++++ b/drivers/cpufreq/cpufreq-dt-platdev.c
+@@ -235,5 +235,3 @@ static int __init cpufreq_dt_platdev_init(void)
+ 			       sizeof(struct cpufreq_dt_platform_data)));
+ }
+ core_initcall(cpufreq_dt_platdev_init);
+-MODULE_DESCRIPTION("Generic DT based cpufreq platdev driver");
+-MODULE_LICENSE("GPL");
+diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
+index c6bdfc308e9908..9cef7152807626 100644
+--- a/drivers/cpufreq/s3c64xx-cpufreq.c
++++ b/drivers/cpufreq/s3c64xx-cpufreq.c
+@@ -24,6 +24,7 @@ struct s3c64xx_dvfs {
+ 	unsigned int vddarm_max;
+ };
+ 
++#ifdef CONFIG_REGULATOR
+ static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
+ 	[0] = { 1000000, 1150000 },
+ 	[1] = { 1050000, 1150000 },
+@@ -31,6 +32,7 @@ static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
+ 	[3] = { 1200000, 1350000 },
+ 	[4] = { 1300000, 1350000 },
+ };
++#endif
+ 
+ static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
+ 	{ 0, 0,  66000 },
+@@ -51,15 +53,16 @@ static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
+ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
+ 				      unsigned int index)
+ {
+-	struct s3c64xx_dvfs *dvfs;
+-	unsigned int old_freq, new_freq;
++	unsigned int new_freq = s3c64xx_freq_table[index].frequency;
+ 	int ret;
+ 
++#ifdef CONFIG_REGULATOR
++	struct s3c64xx_dvfs *dvfs;
++	unsigned int old_freq;
++
+ 	old_freq = clk_get_rate(policy->clk) / 1000;
+-	new_freq = s3c64xx_freq_table[index].frequency;
+ 	dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[index].driver_data];
+ 
+-#ifdef CONFIG_REGULATOR
+ 	if (vddarm && new_freq > old_freq) {
+ 		ret = regulator_set_voltage(vddarm,
+ 					    dvfs->vddarm_min,
+diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c
+index 7d811728f04782..97b56e92ea33f5 100644
+--- a/drivers/crypto/qce/aead.c
++++ b/drivers/crypto/qce/aead.c
+@@ -786,7 +786,7 @@ static int qce_aead_register_one(const struct qce_aead_def *def, struct qce_devi
+ 	alg->init			= qce_aead_init;
+ 	alg->exit			= qce_aead_exit;
+ 
+-	alg->base.cra_priority		= 300;
++	alg->base.cra_priority		= 275;
+ 	alg->base.cra_flags		= CRYPTO_ALG_ASYNC |
+ 					  CRYPTO_ALG_ALLOCATES_MEMORY |
+ 					  CRYPTO_ALG_KERN_DRIVER_ONLY |
+diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
+index e228a31fe28dc0..848e5e802b92b9 100644
+--- a/drivers/crypto/qce/core.c
++++ b/drivers/crypto/qce/core.c
+@@ -51,16 +51,19 @@ static void qce_unregister_algs(struct qce_device *qce)
+ static int qce_register_algs(struct qce_device *qce)
+ {
+ 	const struct qce_algo_ops *ops;
+-	int i, ret = -ENODEV;
++	int i, j, ret = -ENODEV;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
+ 		ops = qce_ops[i];
+ 		ret = ops->register_algs(qce);
+-		if (ret)
+-			break;
++		if (ret) {
++			for (j = i - 1; j >= 0; j--)
++				ops->unregister_algs(qce);
++			return ret;
++		}
+ 	}
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ static int qce_handle_request(struct crypto_async_request *async_req)
+@@ -247,7 +250,7 @@ static int qce_crypto_probe(struct platform_device *pdev)
+ 
+ 	ret = qce_check_version(qce);
+ 	if (ret)
+-		goto err_clks;
++		goto err_dma;
+ 
+ 	spin_lock_init(&qce->lock);
+ 	tasklet_init(&qce->done_tasklet, qce_tasklet_req_done,
+diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
+index fc72af8aa9a725..71b748183cfa86 100644
+--- a/drivers/crypto/qce/sha.c
++++ b/drivers/crypto/qce/sha.c
+@@ -482,7 +482,7 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def,
+ 
+ 	base = &alg->halg.base;
+ 	base->cra_blocksize = def->blocksize;
+-	base->cra_priority = 300;
++	base->cra_priority = 175;
+ 	base->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+ 	base->cra_ctxsize = sizeof(struct qce_sha_ctx);
+ 	base->cra_alignmask = 0;
+diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
+index 5b493fdc1e747f..ffb334eb5b3461 100644
+--- a/drivers/crypto/qce/skcipher.c
++++ b/drivers/crypto/qce/skcipher.c
+@@ -461,7 +461,7 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
+ 	alg->encrypt			= qce_skcipher_encrypt;
+ 	alg->decrypt			= qce_skcipher_decrypt;
+ 
+-	alg->base.cra_priority		= 300;
++	alg->base.cra_priority		= 275;
+ 	alg->base.cra_flags		= CRYPTO_ALG_ASYNC |
+ 					  CRYPTO_ALG_ALLOCATES_MEMORY |
+ 					  CRYPTO_ALG_KERN_DRIVER_ONLY;
+diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
+index 71d8b26c4103b9..9f35f69e0f9e2b 100644
+--- a/drivers/firmware/Kconfig
++++ b/drivers/firmware/Kconfig
+@@ -106,7 +106,7 @@ config ISCSI_IBFT
+ 	select ISCSI_BOOT_SYSFS
+ 	select ISCSI_IBFT_FIND if X86
+ 	depends on ACPI && SCSI && SCSI_LOWLEVEL
+-	default	n
++	default n
+ 	help
+ 	  This option enables support for detection and exposing of iSCSI
+ 	  Boot Firmware Table (iBFT) via sysfs to userspace. If you wish to
+diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
+index ed4e8ddbe76a50..1141cd06011ff4 100644
+--- a/drivers/firmware/efi/libstub/Makefile
++++ b/drivers/firmware/efi/libstub/Makefile
+@@ -11,7 +11,7 @@ cflags-y			:= $(KBUILD_CFLAGS)
+ 
+ cflags-$(CONFIG_X86_32)		:= -march=i386
+ cflags-$(CONFIG_X86_64)		:= -mcmodel=small
+-cflags-$(CONFIG_X86)		+= -m$(BITS) -D__KERNEL__ \
++cflags-$(CONFIG_X86)		+= -m$(BITS) -D__KERNEL__ -std=gnu11 \
+ 				   -fPIC -fno-strict-aliasing -mno-red-zone \
+ 				   -mno-mmx -mno-sse -fshort-wchar \
+ 				   -Wno-pointer-sign \
+diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
+index 26312a5131d2ab..959bc156f35f9a 100644
+--- a/drivers/firmware/qcom/qcom_scm.c
++++ b/drivers/firmware/qcom/qcom_scm.c
+@@ -217,7 +217,10 @@ static DEFINE_SPINLOCK(scm_query_lock);
+ 
+ struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void)
+ {
+-	return __scm ? __scm->mempool : NULL;
++	if (!qcom_scm_is_available())
++		return NULL;
++
++	return __scm->mempool;
+ }
+ 
+ static enum qcom_scm_convention __get_convention(void)
+@@ -1867,7 +1870,8 @@ static int qcom_scm_qseecom_init(struct qcom_scm *scm)
+  */
+ bool qcom_scm_is_available(void)
+ {
+-	return !!READ_ONCE(__scm);
++	/* Paired with smp_store_release() in qcom_scm_probe */
++	return !!smp_load_acquire(&__scm);
+ }
+ EXPORT_SYMBOL_GPL(qcom_scm_is_available);
+ 
+@@ -2024,7 +2028,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		return ret;
+ 
+-	/* Let all above stores be available after this */
++	/* Paired with smp_load_acquire() in qcom_scm_is_available(). */
+ 	smp_store_release(&__scm, scm);
+ 
+ 	irq = platform_get_irq_optional(pdev, 0);
+diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
+index 93ee3aa092f81c..1a66ca075aded1 100644
+--- a/drivers/gpio/Kconfig
++++ b/drivers/gpio/Kconfig
+@@ -338,6 +338,7 @@ config GPIO_GRANITERAPIDS
+ 
+ config GPIO_GRGPIO
+ 	tristate "Aeroflex Gaisler GRGPIO support"
++	depends on OF || COMPILE_TEST
+ 	select GPIO_GENERIC
+ 	select IRQ_DOMAIN
+ 	help
+diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
+index be4c9981ebc404..d63c1030e6ac0e 100644
+--- a/drivers/gpio/gpio-pca953x.c
++++ b/drivers/gpio/gpio-pca953x.c
+@@ -841,25 +841,6 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
+ 	DECLARE_BITMAP(trigger, MAX_LINE);
+ 	int ret;
+ 
+-	if (chip->driver_data & PCA_PCAL) {
+-		/* Read the current interrupt status from the device */
+-		ret = pca953x_read_regs(chip, PCAL953X_INT_STAT, trigger);
+-		if (ret)
+-			return false;
+-
+-		/* Check latched inputs and clear interrupt status */
+-		ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
+-		if (ret)
+-			return false;
+-
+-		/* Apply filter for rising/falling edge selection */
+-		bitmap_replace(new_stat, chip->irq_trig_fall, chip->irq_trig_raise, cur_stat, gc->ngpio);
+-
+-		bitmap_and(pending, new_stat, trigger, gc->ngpio);
+-
+-		return !bitmap_empty(pending, gc->ngpio);
+-	}
+-
+ 	ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
+ 	if (ret)
+ 		return false;
+diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
+index 686ae3d11ba362..940165235db647 100644
+--- a/drivers/gpio/gpio-sim.c
++++ b/drivers/gpio/gpio-sim.c
+@@ -1033,20 +1033,23 @@ gpio_sim_device_lockup_configfs(struct gpio_sim_device *dev, bool lock)
+ 	struct configfs_subsystem *subsys = dev->group.cg_subsys;
+ 	struct gpio_sim_bank *bank;
+ 	struct gpio_sim_line *line;
++	struct config_item *item;
+ 
+ 	/*
+-	 * The device only needs to depend on leaf line entries. This is
++	 * The device only needs to depend on leaf entries. This is
+ 	 * sufficient to lock up all the configfs entries that the
+ 	 * instantiated, alive device depends on.
+ 	 */
+ 	list_for_each_entry(bank, &dev->bank_list, siblings) {
+ 		list_for_each_entry(line, &bank->line_list, siblings) {
++			item = line->hog ? &line->hog->item
++					 : &line->group.cg_item;
++
+ 			if (lock)
+-				WARN_ON(configfs_depend_item_unlocked(
+-						subsys, &line->group.cg_item));
++				WARN_ON(configfs_depend_item_unlocked(subsys,
++								      item));
+ 			else
+-				configfs_undepend_item_unlocked(
+-						&line->group.cg_item);
++				configfs_undepend_item_unlocked(item);
+ 		}
+ 	}
+ }
+diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
+index 772fc7625639de..b55be8889e2ca6 100644
+--- a/drivers/gpu/drm/Kconfig
++++ b/drivers/gpu/drm/Kconfig
+@@ -530,6 +530,10 @@ config DRM_HYPERV
+ config DRM_EXPORT_FOR_TESTS
+ 	bool
+ 
++# Separate option as not all DRM drivers use it
++config DRM_PANEL_BACKLIGHT_QUIRKS
++	tristate
++
+ config DRM_LIB_RANDOM
+ 	bool
+ 	default n
+diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
+index 463afad1b5ca62..06b73068d24830 100644
+--- a/drivers/gpu/drm/Makefile
++++ b/drivers/gpu/drm/Makefile
+@@ -95,6 +95,7 @@ drm-$(CONFIG_DRM_PANIC_SCREEN_QR_CODE) += drm_panic_qr.o
+ obj-$(CONFIG_DRM)	+= drm.o
+ 
+ obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
++obj-$(CONFIG_DRM_PANEL_BACKLIGHT_QUIRKS) += drm_panel_backlight_quirks.o
+ 
+ #
+ # Memory-management helpers
+diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
+index 41fa3377d9cf56..1a11cab741aca4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
++++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
+@@ -26,6 +26,7 @@ config DRM_AMDGPU
+ 	select DRM_BUDDY
+ 	select DRM_SUBALLOC_HELPER
+ 	select DRM_EXEC
++	select DRM_PANEL_BACKLIGHT_QUIRKS
+ 	# amdgpu depends on ACPI_VIDEO when ACPI is enabled, for select to work
+ 	# ACPI_VIDEO's dependencies must also be selected.
+ 	select INPUT if ACPI
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 38686203bea630..e63efe5c5b75a2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -119,9 +119,10 @@
+  * - 3.57.0 - Compute tunneling on GFX10+
+  * - 3.58.0 - Add GFX12 DCC support
+  * - 3.59.0 - Cleared VRAM
++ * - 3.60.0 - Add AMDGPU_TILING_GFX12_DCC_WRITE_COMPRESS_DISABLE (Vulkan requirement)
+  */
+ #define KMS_DRIVER_MAJOR	3
+-#define KMS_DRIVER_MINOR	59
++#define KMS_DRIVER_MINOR	60
+ #define KMS_DRIVER_PATCHLEVEL	0
+ 
+ /*
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index c4da62d111052e..2890f54339ad0c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -309,7 +309,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
+ 	mutex_lock(&adev->mman.gtt_window_lock);
+ 	while (src_mm.remaining) {
+ 		uint64_t from, to, cur_size, tiling_flags;
+-		uint32_t num_type, data_format, max_com;
++		uint32_t num_type, data_format, max_com, write_compress_disable;
+ 		struct dma_fence *next;
+ 
+ 		/* Never copy more than 256MiB at once to avoid a timeout */
+@@ -340,9 +340,13 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
+ 			max_com = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_MAX_COMPRESSED_BLOCK);
+ 			num_type = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_NUMBER_TYPE);
+ 			data_format = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_DATA_FORMAT);
++			write_compress_disable =
++				AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_WRITE_COMPRESS_DISABLE);
+ 			copy_flags |= (AMDGPU_COPY_FLAGS_SET(MAX_COMPRESSED, max_com) |
+ 				       AMDGPU_COPY_FLAGS_SET(NUMBER_TYPE, num_type) |
+-				       AMDGPU_COPY_FLAGS_SET(DATA_FORMAT, data_format));
++				       AMDGPU_COPY_FLAGS_SET(DATA_FORMAT, data_format) |
++				       AMDGPU_COPY_FLAGS_SET(WRITE_COMPRESS_DISABLE,
++							     write_compress_disable));
+ 		}
+ 
+ 		r = amdgpu_copy_buffer(ring, from, to, cur_size, resv,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+index 2852a6064c9ac5..cd349a3c798818 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+@@ -119,6 +119,8 @@ struct amdgpu_copy_mem {
+ #define AMDGPU_COPY_FLAGS_NUMBER_TYPE_MASK		0x07
+ #define AMDGPU_COPY_FLAGS_DATA_FORMAT_SHIFT		8
+ #define AMDGPU_COPY_FLAGS_DATA_FORMAT_MASK		0x3f
++#define AMDGPU_COPY_FLAGS_WRITE_COMPRESS_DISABLE_SHIFT	14
++#define AMDGPU_COPY_FLAGS_WRITE_COMPRESS_DISABLE_MASK	0x1
+ 
+ #define AMDGPU_COPY_FLAGS_SET(field, value) \
+ 	(((__u32)(value) & AMDGPU_COPY_FLAGS_##field##_MASK) << AMDGPU_COPY_FLAGS_##field##_SHIFT)
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+index da327ab48a572c..5b50e07ba4c7d8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+@@ -3999,17 +3999,6 @@ static void gfx_v12_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
+ 
+ 		if (def != data)
+ 			WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data);
+-
+-		data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL);
+-		data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
+-		WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
+-
+-		/* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
+-		if (adev->sdma.num_instances > 1) {
+-			data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
+-			data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
+-			WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
+-		}
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+index a38553f38fdc87..ed68d7971a93a7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+@@ -953,10 +953,12 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
+ 		/* set utc l1 enable flag always to 1 */
+ 		temp = RREG32_SDMA(i, regSDMA_CNTL);
+ 		temp = REG_SET_FIELD(temp, SDMA_CNTL, UTC_L1_ENABLE, 1);
+-		/* enable context empty interrupt during initialization */
+-		temp = REG_SET_FIELD(temp, SDMA_CNTL, CTXEMPTY_INT_ENABLE, 1);
+-		WREG32_SDMA(i, regSDMA_CNTL, temp);
+ 
++		if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) < IP_VERSION(4, 4, 5)) {
++			/* enable context empty interrupt during initialization */
++			temp = REG_SET_FIELD(temp, SDMA_CNTL, CTXEMPTY_INT_ENABLE, 1);
++			WREG32_SDMA(i, regSDMA_CNTL, temp);
++		}
+ 		if (!amdgpu_sriov_vf(adev)) {
+ 			if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ 				/* unhalt engine */
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+index d2ce6b6a7ff64e..d41e38a0bd4164 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+@@ -1684,11 +1684,12 @@ static void sdma_v7_0_emit_copy_buffer(struct amdgpu_ib *ib,
+ 				       uint32_t byte_count,
+ 				       uint32_t copy_flags)
+ {
+-	uint32_t num_type, data_format, max_com;
++	uint32_t num_type, data_format, max_com, write_cm;
+ 
+ 	max_com = AMDGPU_COPY_FLAGS_GET(copy_flags, MAX_COMPRESSED);
+ 	data_format = AMDGPU_COPY_FLAGS_GET(copy_flags, DATA_FORMAT);
+ 	num_type = AMDGPU_COPY_FLAGS_GET(copy_flags, NUMBER_TYPE);
++	write_cm = AMDGPU_COPY_FLAGS_GET(copy_flags, WRITE_COMPRESS_DISABLE) ? 2 : 1;
+ 
+ 	ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) |
+ 		SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
+@@ -1705,7 +1706,7 @@ static void sdma_v7_0_emit_copy_buffer(struct amdgpu_ib *ib,
+ 	if ((copy_flags & (AMDGPU_COPY_FLAGS_READ_DECOMPRESSED | AMDGPU_COPY_FLAGS_WRITE_COMPRESSED)))
+ 		ib->ptr[ib->length_dw++] = SDMA_DCC_DATA_FORMAT(data_format) | SDMA_DCC_NUM_TYPE(num_type) |
+ 			((copy_flags & AMDGPU_COPY_FLAGS_READ_DECOMPRESSED) ? SDMA_DCC_READ_CM(2) : 0) |
+-			((copy_flags & AMDGPU_COPY_FLAGS_WRITE_COMPRESSED) ? SDMA_DCC_WRITE_CM(1) : 0) |
++			((copy_flags & AMDGPU_COPY_FLAGS_WRITE_COMPRESSED) ? SDMA_DCC_WRITE_CM(write_cm) : 0) |
+ 			SDMA_DCC_MAX_COM(max_com) | SDMA_DCC_MAX_UCOM(1);
+ 	else
+ 		ib->ptr[ib->length_dw++] = 0;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index 9b51dd75fefc7d..35caa71f317dc4 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -638,6 +638,14 @@ static void kfd_cleanup_nodes(struct kfd_dev *kfd, unsigned int num_nodes)
+ 	struct kfd_node *knode;
+ 	unsigned int i;
+ 
++	/*
++	 * flush_work ensures that there are no outstanding
++	 * work-queue items that will access interrupt_ring. New work items
++	 * can't be created because we stopped interrupt handling above.
++	 */
++	flush_workqueue(kfd->ih_wq);
++	destroy_workqueue(kfd->ih_wq);
++
+ 	for (i = 0; i < num_nodes; i++) {
+ 		knode = kfd->nodes[i];
+ 		device_queue_manager_uninit(knode->dqm);
+@@ -1059,21 +1067,6 @@ static int kfd_resume(struct kfd_node *node)
+ 	return err;
+ }
+ 
+-static inline void kfd_queue_work(struct workqueue_struct *wq,
+-				  struct work_struct *work)
+-{
+-	int cpu, new_cpu;
+-
+-	cpu = new_cpu = smp_processor_id();
+-	do {
+-		new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids;
+-		if (cpu_to_node(new_cpu) == numa_node_id())
+-			break;
+-	} while (cpu != new_cpu);
+-
+-	queue_work_on(new_cpu, wq, work);
+-}
+-
+ /* This is called directly from KGD at ISR. */
+ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
+ {
+@@ -1099,7 +1092,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
+ 			    	patched_ihre, &is_patched)
+ 		    && enqueue_ih_ring_entry(node,
+ 			    	is_patched ? patched_ihre : ih_ring_entry)) {
+-			kfd_queue_work(node->ih_wq, &node->interrupt_work);
++			queue_work(node->kfd->ih_wq, &node->interrupt_work);
+ 			spin_unlock_irqrestore(&node->interrupt_lock, flags);
+ 			return;
+ 		}
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 16b5daaa272f13..6f309948805354 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -2325,9 +2325,9 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
+ 	 */
+ 	mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
+ 	if (mqd_mgr->check_preemption_failed(mqd_mgr, dqm->packet_mgr.priv_queue->queue->mqd)) {
++		while (halt_if_hws_hang)
++			schedule();
+ 		if (reset_queues_on_hws_hang(dqm)) {
+-			while (halt_if_hws_hang)
+-				schedule();
+ 			dqm->is_hws_hang = true;
+ 			kfd_hws_hang(dqm);
+ 			retval = -ETIME;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+index 9b6b6e88259348..15b4b70cf19976 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+@@ -62,11 +62,14 @@ int kfd_interrupt_init(struct kfd_node *node)
+ 		return r;
+ 	}
+ 
+-	node->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1);
+-	if (unlikely(!node->ih_wq)) {
+-		kfifo_free(&node->ih_fifo);
+-		dev_err(node->adev->dev, "Failed to allocate KFD IH workqueue\n");
+-		return -ENOMEM;
++	if (!node->kfd->ih_wq) {
++		node->kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI | WQ_UNBOUND,
++						   node->kfd->num_nodes);
++		if (unlikely(!node->kfd->ih_wq)) {
++			kfifo_free(&node->ih_fifo);
++			dev_err(node->adev->dev, "Failed to allocate KFD IH workqueue\n");
++			return -ENOMEM;
++		}
+ 	}
+ 	spin_lock_init(&node->interrupt_lock);
+ 
+@@ -96,16 +99,6 @@ void kfd_interrupt_exit(struct kfd_node *node)
+ 	spin_lock_irqsave(&node->interrupt_lock, flags);
+ 	node->interrupts_active = false;
+ 	spin_unlock_irqrestore(&node->interrupt_lock, flags);
+-
+-	/*
+-	 * flush_work ensures that there are no outstanding
+-	 * work-queue items that will access interrupt_ring. New work items
+-	 * can't be created because we stopped interrupt handling above.
+-	 */
+-	flush_workqueue(node->ih_wq);
+-
+-	destroy_workqueue(node->ih_wq);
+-
+ 	kfifo_free(&node->ih_fifo);
+ }
+ 
+@@ -162,7 +155,7 @@ static void interrupt_wq(struct work_struct *work)
+ 			/* If we spent more than a second processing signals,
+ 			 * reschedule the worker to avoid soft-lockup warnings
+ 			 */
+-			queue_work(dev->ih_wq, &dev->interrupt_work);
++			queue_work(dev->kfd->ih_wq, &dev->interrupt_work);
+ 			break;
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 9e5ca0b93b2a25..74881a5ca59adc 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -273,7 +273,6 @@ struct kfd_node {
+ 
+ 	/* Interrupts */
+ 	struct kfifo ih_fifo;
+-	struct workqueue_struct *ih_wq;
+ 	struct work_struct interrupt_work;
+ 	spinlock_t interrupt_lock;
+ 
+@@ -366,6 +365,8 @@ struct kfd_dev {
+ 	struct kfd_node *nodes[MAX_KFD_NODES];
+ 	unsigned int num_nodes;
+ 
++	struct workqueue_struct *ih_wq;
++
+ 	/* Kernel doorbells for KFD device */
+ 	struct amdgpu_bo *doorbells;
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+index 59b92d66e95890..bd595b1db15f27 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+@@ -86,9 +86,12 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
+ 
+ 	if (pdd->already_dequeued)
+ 		return;
+-
++	/* The MES context flush needs to filter out the case which the
++	 * KFD process is created without setting up the MES context and
++	 * queue for creating a compute queue.
++	 */
+ 	dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
+-	if (dev->kfd->shared_resources.enable_mes &&
++	if (dev->kfd->shared_resources.enable_mes && !!pdd->proc_ctx_gpu_addr &&
+ 	    down_read_trylock(&dev->adev->reset_domain->sem)) {
+ 		amdgpu_mes_flush_shader_debugger(dev->adev,
+ 						 pdd->proc_ctx_gpu_addr);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 53694baca96637..f7c0d7625ff12d 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -93,6 +93,7 @@
+ #include <drm/drm_fourcc.h>
+ #include <drm/drm_edid.h>
+ #include <drm/drm_eld.h>
++#include <drm/drm_utils.h>
+ #include <drm/drm_vblank.h>
+ #include <drm/drm_audio_component.h>
+ #include <drm/drm_gem_atomic_helper.h>
+@@ -1036,8 +1037,10 @@ static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
+ 			continue;
+ 
+ 		*enabled = true;
++		mutex_lock(&connector->eld_mutex);
+ 		ret = drm_eld_size(connector->eld);
+ 		memcpy(buf, connector->eld, min(max_bytes, ret));
++		mutex_unlock(&connector->eld_mutex);
+ 
+ 		break;
+ 	}
+@@ -3457,6 +3460,7 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
+ 	struct drm_connector *conn_base;
+ 	struct amdgpu_device *adev;
+ 	struct drm_luminance_range_info *luminance_range;
++	int min_input_signal_override;
+ 
+ 	if (aconnector->bl_idx == -1 ||
+ 	    aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP)
+@@ -3493,6 +3497,10 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
+ 		caps->aux_min_input_signal = 0;
+ 		caps->aux_max_input_signal = 512;
+ 	}
++
++	min_input_signal_override = drm_get_panel_min_brightness_quirk(aconnector->drm_edid);
++	if (min_input_signal_override >= 0)
++		caps->min_input_signal = min_input_signal_override;
+ }
+ 
+ void amdgpu_dm_update_connector_after_detect(
+@@ -5522,8 +5530,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
+ 			    const u64 tiling_flags,
+ 			    struct dc_plane_info *plane_info,
+ 			    struct dc_plane_address *address,
+-			    bool tmz_surface,
+-			    bool force_disable_dcc)
++			    bool tmz_surface)
+ {
+ 	const struct drm_framebuffer *fb = plane_state->fb;
+ 	const struct amdgpu_framebuffer *afb =
+@@ -5622,7 +5629,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
+ 					   &plane_info->tiling_info,
+ 					   &plane_info->plane_size,
+ 					   &plane_info->dcc, address,
+-					   tmz_surface, force_disable_dcc);
++					   tmz_surface);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -5643,7 +5650,6 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
+ 	struct dc_scaling_info scaling_info;
+ 	struct dc_plane_info plane_info;
+ 	int ret;
+-	bool force_disable_dcc = false;
+ 
+ 	ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info);
+ 	if (ret)
+@@ -5654,13 +5660,11 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
+ 	dc_plane_state->clip_rect = scaling_info.clip_rect;
+ 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
+ 
+-	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
+ 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
+ 					  afb->tiling_flags,
+ 					  &plane_info,
+ 					  &dc_plane_state->address,
+-					  afb->tmz_surface,
+-					  force_disable_dcc);
++					  afb->tmz_surface);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -9068,7 +9072,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ 			afb->tiling_flags,
+ 			&bundle->plane_infos[planes_count],
+ 			&bundle->flip_addrs[planes_count].address,
+-			afb->tmz_surface, false);
++			afb->tmz_surface);
+ 
+ 		drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
+ 				 new_plane_state->plane->index,
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index 1080075ccb17c2..e096fb56212298 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -1695,16 +1695,16 @@ int pre_validate_dsc(struct drm_atomic_state *state,
+ 	return ret;
+ }
+ 
+-static unsigned int kbps_from_pbn(unsigned int pbn)
++static uint32_t kbps_from_pbn(unsigned int pbn)
+ {
+-	unsigned int kbps = pbn;
++	uint64_t kbps = (uint64_t)pbn;
+ 
+ 	kbps *= (1000000 / PEAK_FACTOR_X1000);
+ 	kbps *= 8;
+ 	kbps *= 54;
+ 	kbps /= 64;
+ 
+-	return kbps;
++	return (uint32_t)kbps;
+ }
+ 
+ static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+index 495e3cd70426db..83c7c8853edeca 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+@@ -309,8 +309,7 @@ static int amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(struct amdg
+ 								     const struct plane_size *plane_size,
+ 								     union dc_tiling_info *tiling_info,
+ 								     struct dc_plane_dcc_param *dcc,
+-								     struct dc_plane_address *address,
+-								     const bool force_disable_dcc)
++								     struct dc_plane_address *address)
+ {
+ 	const uint64_t modifier = afb->base.modifier;
+ 	int ret = 0;
+@@ -318,7 +317,7 @@ static int amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(struct amdg
+ 	amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
+ 	tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier);
+ 
+-	if (amdgpu_dm_plane_modifier_has_dcc(modifier) && !force_disable_dcc) {
++	if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
+ 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
+ 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
+ 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
+@@ -360,8 +359,7 @@ static int amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(struct amd
+ 								      const struct plane_size *plane_size,
+ 								      union dc_tiling_info *tiling_info,
+ 								      struct dc_plane_dcc_param *dcc,
+-								      struct dc_plane_address *address,
+-								      const bool force_disable_dcc)
++								      struct dc_plane_address *address)
+ {
+ 	const uint64_t modifier = afb->base.modifier;
+ 	int ret = 0;
+@@ -371,7 +369,7 @@ static int amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(struct amd
+ 
+ 	tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier);
+ 
+-	if (amdgpu_dm_plane_modifier_has_dcc(modifier) && !force_disable_dcc) {
++	if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
+ 		int max_compressed_block = AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier);
+ 
+ 		dcc->enable = 1;
+@@ -839,8 +837,7 @@ int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
+ 			     struct plane_size *plane_size,
+ 			     struct dc_plane_dcc_param *dcc,
+ 			     struct dc_plane_address *address,
+-			     bool tmz_surface,
+-			     bool force_disable_dcc)
++			     bool tmz_surface)
+ {
+ 	const struct drm_framebuffer *fb = &afb->base;
+ 	int ret;
+@@ -900,16 +897,14 @@ int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
+ 		ret = amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(adev, afb, format,
+ 										 rotation, plane_size,
+ 										 tiling_info, dcc,
+-										 address,
+-										 force_disable_dcc);
++										 address);
+ 		if (ret)
+ 			return ret;
+ 	} else if (adev->family >= AMDGPU_FAMILY_AI) {
+ 		ret = amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
+ 										rotation, plane_size,
+ 										tiling_info, dcc,
+-										address,
+-										force_disable_dcc);
++										address);
+ 		if (ret)
+ 			return ret;
+ 	} else {
+@@ -1000,14 +995,13 @@ static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane,
+ 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
+ 		struct dc_plane_state *plane_state =
+ 			dm_plane_state_new->dc_state;
+-		bool force_disable_dcc = !plane_state->dcc.enable;
+ 
+ 		amdgpu_dm_plane_fill_plane_buffer_attributes(
+ 			adev, afb, plane_state->format, plane_state->rotation,
+ 			afb->tiling_flags,
+ 			&plane_state->tiling_info, &plane_state->plane_size,
+ 			&plane_state->dcc, &plane_state->address,
+-			afb->tmz_surface, force_disable_dcc);
++			afb->tmz_surface);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
+index 6498359bff6f68..2eef13b1c05a4b 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
+@@ -51,8 +51,7 @@ int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
+ 				 struct plane_size *plane_size,
+ 				 struct dc_plane_dcc_param *dcc,
+ 				 struct dc_plane_address *address,
+-				 bool tmz_surface,
+-				 bool force_disable_dcc);
++				 bool tmz_surface);
+ 
+ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
+ 			 struct drm_plane *plane,
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 2723558049d656..9f37b39027051f 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -2063,7 +2063,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
+ 
+ 	dc_enable_stereo(dc, context, dc_streams, context->stream_count);
+ 
+-	if (context->stream_count > get_seamless_boot_stream_count(context) ||
++	if (get_seamless_boot_stream_count(context) == 0 ||
+ 		context->stream_count == 0) {
+ 		/* Must wait for no flips to be pending before doing optimize bw */
+ 		hwss_wait_for_no_pipes_pending(dc, context);
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+index 5bb8b78bf250a0..bf636b28e3e16e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+@@ -63,8 +63,7 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
+ 
+ bool should_use_dmub_lock(struct dc_link *link)
+ {
+-	if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 ||
+-	    link->psr_settings.psr_version == DC_PSR_VERSION_1)
++	if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
+ 		return true;
+ 
+ 	if (link->replay_settings.replay_feature_enabled)
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
+index c4378e620cbf91..986a69c5bd4bca 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
+@@ -29,7 +29,11 @@ dml2_rcflags := $(CC_FLAGS_NO_FPU)
+ 
+ ifneq ($(CONFIG_FRAME_WARN),0)
+ ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
++ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy)
++frame_warn_flag := -Wframe-larger-than=4096
++else
+ frame_warn_flag := -Wframe-larger-than=3072
++endif
+ else
+ frame_warn_flag := -Wframe-larger-than=2048
+ endif
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+index 8dabb1ac0b684d..6822b07951204b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+@@ -6301,9 +6301,9 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
+ 			mode_lib->ms.meta_row_bandwidth_this_state,
+ 			mode_lib->ms.dpte_row_bandwidth_this_state,
+ 			mode_lib->ms.NoOfDPPThisState,
+-			mode_lib->ms.UrgentBurstFactorLuma,
+-			mode_lib->ms.UrgentBurstFactorChroma,
+-			mode_lib->ms.UrgentBurstFactorCursor);
++			mode_lib->ms.UrgentBurstFactorLuma[j],
++			mode_lib->ms.UrgentBurstFactorChroma[j],
++			mode_lib->ms.UrgentBurstFactorCursor[j]);
+ 
+ 		s->VMDataOnlyReturnBWPerState = dml_get_return_bw_mbps_vm_only(
+ 																	&mode_lib->ms.soc,
+@@ -6434,7 +6434,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
+ 							/* Output */
+ 							&mode_lib->ms.UrgentBurstFactorCursorPre[k],
+ 							&mode_lib->ms.UrgentBurstFactorLumaPre[k],
+-							&mode_lib->ms.UrgentBurstFactorChroma[k],
++							&mode_lib->ms.UrgentBurstFactorChromaPre[k],
+ 							&mode_lib->ms.NotUrgentLatencyHidingPre[k]);
+ 
+ 					mode_lib->ms.cursor_bw_pre[k] = mode_lib->ms.cache_display_cfg.plane.NumberOfCursors[k] * mode_lib->ms.cache_display_cfg.plane.CursorWidth[k] *
+@@ -6458,9 +6458,9 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
+ 				mode_lib->ms.cursor_bw_pre,
+ 				mode_lib->ms.prefetch_vmrow_bw,
+ 				mode_lib->ms.NoOfDPPThisState,
+-				mode_lib->ms.UrgentBurstFactorLuma,
+-				mode_lib->ms.UrgentBurstFactorChroma,
+-				mode_lib->ms.UrgentBurstFactorCursor,
++				mode_lib->ms.UrgentBurstFactorLuma[j],
++				mode_lib->ms.UrgentBurstFactorChroma[j],
++				mode_lib->ms.UrgentBurstFactorCursor[j],
+ 				mode_lib->ms.UrgentBurstFactorLumaPre,
+ 				mode_lib->ms.UrgentBurstFactorChromaPre,
+ 				mode_lib->ms.UrgentBurstFactorCursorPre,
+@@ -6517,9 +6517,9 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
+ 						mode_lib->ms.cursor_bw,
+ 						mode_lib->ms.cursor_bw_pre,
+ 						mode_lib->ms.NoOfDPPThisState,
+-						mode_lib->ms.UrgentBurstFactorLuma,
+-						mode_lib->ms.UrgentBurstFactorChroma,
+-						mode_lib->ms.UrgentBurstFactorCursor,
++						mode_lib->ms.UrgentBurstFactorLuma[j],
++						mode_lib->ms.UrgentBurstFactorChroma[j],
++						mode_lib->ms.UrgentBurstFactorCursor[j],
+ 						mode_lib->ms.UrgentBurstFactorLumaPre,
+ 						mode_lib->ms.UrgentBurstFactorChromaPre,
+ 						mode_lib->ms.UrgentBurstFactorCursorPre);
+@@ -6586,9 +6586,9 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
+ 													mode_lib->ms.cursor_bw_pre,
+ 													mode_lib->ms.prefetch_vmrow_bw,
+ 													mode_lib->ms.NoOfDPP[j], // VBA_ERROR DPPPerSurface is not assigned at this point, should use NoOfDpp here
+-													mode_lib->ms.UrgentBurstFactorLuma,
+-													mode_lib->ms.UrgentBurstFactorChroma,
+-													mode_lib->ms.UrgentBurstFactorCursor,
++													mode_lib->ms.UrgentBurstFactorLuma[j],
++													mode_lib->ms.UrgentBurstFactorChroma[j],
++													mode_lib->ms.UrgentBurstFactorCursor[j],
+ 													mode_lib->ms.UrgentBurstFactorLumaPre,
+ 													mode_lib->ms.UrgentBurstFactorChromaPre,
+ 													mode_lib->ms.UrgentBurstFactorCursorPre,
+@@ -7809,9 +7809,9 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
+ 				mode_lib->ms.DETBufferSizeYThisState[k],
+ 				mode_lib->ms.DETBufferSizeCThisState[k],
+ 				/* Output */
+-				&mode_lib->ms.UrgentBurstFactorCursor[k],
+-				&mode_lib->ms.UrgentBurstFactorLuma[k],
+-				&mode_lib->ms.UrgentBurstFactorChroma[k],
++				&mode_lib->ms.UrgentBurstFactorCursor[j][k],
++				&mode_lib->ms.UrgentBurstFactorLuma[j][k],
++				&mode_lib->ms.UrgentBurstFactorChroma[j][k],
+ 				&mode_lib->ms.NotUrgentLatencyHiding[k]);
+ 		}
+ 
+@@ -9190,6 +9190,8 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
+ 			&locals->FractionOfUrgentBandwidth,
+ 			&s->dummy_boolean[0]); // dml_bool_t *PrefetchBandwidthSupport
+ 
++
++
+ 		if (s->VRatioPrefetchMoreThanMax != false || s->DestinationLineTimesForPrefetchLessThan2 != false) {
+ 			dml_print("DML::%s: VRatioPrefetchMoreThanMax                   = %u\n", __func__, s->VRatioPrefetchMoreThanMax);
+ 			dml_print("DML::%s: DestinationLineTimesForPrefetchLessThan2    = %u\n", __func__, s->DestinationLineTimesForPrefetchLessThan2);
+@@ -9204,6 +9206,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
+ 			}
+ 		}
+ 
++
+ 		if (locals->PrefetchModeSupported == true && mode_lib->ms.support.ImmediateFlipSupport == true) {
+ 			locals->BandwidthAvailableForImmediateFlip = CalculateBandwidthAvailableForImmediateFlip(
+ 																	mode_lib->ms.num_active_planes,
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
+index f951936bb579e6..504c427b3b3191 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
++++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
+@@ -884,11 +884,11 @@ struct mode_support_st {
+ 	dml_uint_t meta_row_height[__DML_NUM_PLANES__];
+ 	dml_uint_t meta_row_height_chroma[__DML_NUM_PLANES__];
+ 	dml_float_t UrgLatency;
+-	dml_float_t UrgentBurstFactorCursor[__DML_NUM_PLANES__];
++	dml_float_t UrgentBurstFactorCursor[2][__DML_NUM_PLANES__];
+ 	dml_float_t UrgentBurstFactorCursorPre[__DML_NUM_PLANES__];
+-	dml_float_t UrgentBurstFactorLuma[__DML_NUM_PLANES__];
++	dml_float_t UrgentBurstFactorLuma[2][__DML_NUM_PLANES__];
+ 	dml_float_t UrgentBurstFactorLumaPre[__DML_NUM_PLANES__];
+-	dml_float_t UrgentBurstFactorChroma[__DML_NUM_PLANES__];
++	dml_float_t UrgentBurstFactorChroma[2][__DML_NUM_PLANES__];
+ 	dml_float_t UrgentBurstFactorChromaPre[__DML_NUM_PLANES__];
+ 	dml_float_t MaximumSwathWidthInLineBufferLuma;
+ 	dml_float_t MaximumSwathWidthInLineBufferChroma;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+index 9190c1328d5b2d..340791d40ecbfd 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
++++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+@@ -531,14 +531,21 @@ static bool optimize_pstate_with_svp_and_drr(struct dml2_context *dml2, struct d
+ static bool call_dml_mode_support_and_programming(struct dc_state *context)
+ {
+ 	unsigned int result = 0;
+-	unsigned int min_state;
++	unsigned int min_state = 0;
+ 	int min_state_for_g6_temp_read = 0;
++
++
++	if (!context)
++		return false;
++
+ 	struct dml2_context *dml2 = context->bw_ctx.dml2;
+ 	struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
+ 
+-	min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context);
++	if (!context->streams[0]->sink->link->dc->caps.is_apu) {
++		min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context);
+ 
+-	ASSERT(min_state_for_g6_temp_read >= 0);
++		ASSERT(min_state_for_g6_temp_read >= 0);
++	}
+ 
+ 	if (!dml2->config.use_native_pstate_optimization) {
+ 		result = optimize_pstate_with_svp_and_drr(dml2, context);
+@@ -549,14 +556,20 @@ static bool call_dml_mode_support_and_programming(struct dc_state *context)
+ 	/* Upon trying to sett certain frequencies in FRL, min_state_for_g6_temp_read is reported as -1. This leads to an invalid value of min_state causing crashes later on.
+ 	 * Use the default logic for min_state only when min_state_for_g6_temp_read is a valid value. In other cases, use the value calculated by the DML directly.
+ 	 */
+-	if (min_state_for_g6_temp_read >= 0)
+-		min_state = min_state_for_g6_temp_read > s->mode_support_params.out_lowest_state_idx ? min_state_for_g6_temp_read : s->mode_support_params.out_lowest_state_idx;
+-	else
+-		min_state = s->mode_support_params.out_lowest_state_idx;
+-
+-	if (result)
+-		result = dml_mode_programming(&dml2->v20.dml_core_ctx, min_state, &s->cur_display_config, true);
++	if (!context->streams[0]->sink->link->dc->caps.is_apu) {
++		if (min_state_for_g6_temp_read >= 0)
++			min_state = min_state_for_g6_temp_read > s->mode_support_params.out_lowest_state_idx ? min_state_for_g6_temp_read : s->mode_support_params.out_lowest_state_idx;
++		else
++			min_state = s->mode_support_params.out_lowest_state_idx;
++	}
+ 
++	if (result) {
++		if (!context->streams[0]->sink->link->dc->caps.is_apu) {
++			result = dml_mode_programming(&dml2->v20.dml_core_ctx, min_state, &s->cur_display_config, true);
++		} else {
++			result = dml_mode_programming(&dml2->v20.dml_core_ctx, s->mode_support_params.out_lowest_state_idx, &s->cur_display_config, true);
++		}
++	}
+ 	return result;
+ }
+ 
+@@ -685,6 +698,8 @@ static bool dml2_validate_only(struct dc_state *context)
+ 	build_unoptimized_policy_settings(dml2->v20.dml_core_ctx.project, &dml2->v20.dml_core_ctx.policy);
+ 
+ 	map_dc_state_into_dml_display_cfg(dml2, context, &dml2->v20.scratch.cur_display_config);
++	 if (!dml2->config.skip_hw_state_mapping)
++		 dml2_apply_det_buffer_allocation_policy(dml2, &dml2->v20.scratch.cur_display_config);
+ 
+ 	result = pack_and_call_dml_mode_support_ex(dml2,
+ 		&dml2->v20.scratch.cur_display_config,
+diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
+index 961d8936150ab7..75fb77bca83ba2 100644
+--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
++++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
+@@ -483,10 +483,11 @@ void dpp1_set_cursor_position(
+ 	if (src_y_offset + cursor_height <= 0)
+ 		cur_en = 0;  /* not visible beyond top edge*/
+ 
+-	REG_UPDATE(CURSOR0_CONTROL,
+-			CUR0_ENABLE, cur_en);
++	if (dpp_base->pos.cur0_ctl.bits.cur0_enable != cur_en) {
++		REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
+ 
+-	dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
++		dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
++	}
+ }
+ 
+ void dpp1_cnv_set_optional_cursor_attributes(
+diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
+index 3b6ca7974e188d..1236e0f9a2560c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
++++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
+@@ -154,9 +154,11 @@ void dpp401_set_cursor_position(
+ 	struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
+ 	uint32_t cur_en = pos->enable ? 1 : 0;
+ 
+-	REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
++	if (dpp_base->pos.cur0_ctl.bits.cur0_enable != cur_en) {
++		REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
+ 
+-	dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
++		dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
++	}
+ }
+ 
+ void dpp401_set_optional_cursor_attributes(
+diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
+index fe741100c0f880..d347bb06577ac6 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
++++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
+@@ -129,7 +129,8 @@ bool hubbub3_program_watermarks(
+ 	REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
+ 			DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);
+ 
+-	hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
++	if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
++		hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ 
+ 	return wm_pending;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
+index 7fb5523f972244..b98505b240a797 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
++++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
+@@ -750,7 +750,8 @@ static bool hubbub31_program_watermarks(
+ 	REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
+ 			DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/
+ 
+-	hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
++	if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
++		hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ 	return wm_pending;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
+index 5264dc26cce1fa..32a6be543105c1 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
++++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
+@@ -786,7 +786,8 @@ static bool hubbub32_program_watermarks(
+ 	REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
+ 			DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/
+ 
+-	hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
++	if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
++		hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ 
+ 	hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow);
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
+index 5eb3da8d5206e9..dce7269959ce74 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
++++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
+@@ -326,7 +326,8 @@ static bool hubbub35_program_watermarks(
+ 			DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, 0xA);/*hw delta*/
+ 	REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL, DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, 0xF);
+ 
+-	hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
++	if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
++		hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ 
+ 	hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow);
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
+index b405fa22f87a9e..c74ee2d50a699a 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
+@@ -1044,11 +1044,13 @@ void hubp2_cursor_set_position(
+ 	if (src_y_offset + cursor_height <= 0)
+ 		cur_en = 0;  /* not visible beyond top edge*/
+ 
+-	if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
+-		hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
++	if (hubp->pos.cur_ctl.bits.cur_enable != cur_en) {
++		if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
++			hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
+ 
+-	REG_UPDATE(CURSOR_CONTROL,
++		REG_UPDATE(CURSOR_CONTROL,
+ 			CURSOR_ENABLE, cur_en);
++	}
+ 
+ 	REG_SET_2(CURSOR_POSITION, 0,
+ 			CURSOR_X_POSITION, pos->x,
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
+index c55b1b8be8ffd6..5cf7e6771cb49e 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
+@@ -484,6 +484,8 @@ void hubp3_init(struct hubp *hubp)
+ 	//hubp[i].HUBPREQ_DEBUG.HUBPREQ_DEBUG[26] = 1;
+ 	REG_WRITE(HUBPREQ_DEBUG, 1 << 26);
+ 
++	REG_UPDATE(DCHUBP_CNTL, HUBP_TTU_DISABLE, 0);
++
+ 	hubp_reset(hubp);
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
+index 45023fa9b708dc..c4f41350d1b3ce 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
+@@ -168,6 +168,8 @@ void hubp32_init(struct hubp *hubp)
+ {
+ 	struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ 	REG_WRITE(HUBPREQ_DEBUG_DB, 1 << 8);
++
++	REG_UPDATE(DCHUBP_CNTL, HUBP_TTU_DISABLE, 0);
+ }
+ static struct hubp_funcs dcn32_hubp_funcs = {
+ 	.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
+diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+index 2d52100510f05f..7013c124efcff8 100644
+--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+@@ -718,11 +718,13 @@ void hubp401_cursor_set_position(
+ 			dc_fixpt_from_int(dst_x_offset),
+ 			param->h_scale_ratio));
+ 
+-	if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
+-		hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
++	if (hubp->pos.cur_ctl.bits.cur_enable != cur_en) {
++		if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
++			hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
+ 
+-	REG_UPDATE(CURSOR_CONTROL,
+-		CURSOR_ENABLE, cur_en);
++		REG_UPDATE(CURSOR_CONTROL,
++			CURSOR_ENABLE, cur_en);
++	}
+ 
+ 	REG_SET_2(CURSOR_POSITION, 0,
+ 		CURSOR_X_POSITION, x_pos,
+diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+index 463f7abe35a7dd..70295ecfbe5e12 100644
+--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+@@ -236,7 +236,8 @@ void dcn35_init_hw(struct dc *dc)
+ 		}
+ 
+ 		hws->funcs.init_pipes(dc, dc->current_state);
+-		if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
++		if (dc->res_pool->hubbub->funcs->allow_self_refresh_control &&
++			!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter)
+ 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
+ 					!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
+ 	}
+diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
+index a9816affd312db..0cc8a27be5935c 100644
+--- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
+@@ -671,9 +671,9 @@ static const struct dc_plane_cap plane_cap = {
+ 
+ 	/* 6:1 downscaling ratio: 1000/6 = 166.666 */
+ 	.max_downscale_factor = {
+-			.argb8888 = 167,
+-			.nv12 = 167,
+-			.fp16 = 167 
++			.argb8888 = 358,
++			.nv12 = 358,
++			.fp16 = 358
+ 	},
+ 	64,
+ 	64
+@@ -693,7 +693,7 @@ static const struct dc_debug_options debug_defaults_drv = {
+ 	.disable_dcc = DCC_ENABLE,
+ 	.vsr_support = true,
+ 	.performance_trace = false,
+-	.max_downscale_src_width = 7680,/*upto 8K*/
++	.max_downscale_src_width = 4096,/*upto true 4k*/
+ 	.scl_reset_length10 = true,
+ 	.sanity_checks = false,
+ 	.underflow_assert_delay_us = 0xFFFFFFFF,
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+index f6b0293543275b..83163d7c7f0014 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+@@ -1732,7 +1732,6 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
+ 
+ 	gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
+ 	gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
+-	gpu_metrics->average_mm_activity = 0;
+ 
+ 	/* Valid power data is available only from primary die */
+ 	if (aldebaran_is_primary(smu)) {
+diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
+index ebccb74306a765..f30b3d5eeca5c5 100644
+--- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
++++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
+@@ -160,6 +160,10 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
+ 	formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl,
+ 					       kwb_conn->wb_layer->layer_type,
+ 					       &n_formats);
++	if (!formats) {
++		kfree(kwb_conn);
++		return -ENOMEM;
++	}
+ 
+ 	err = drm_writeback_connector_init(&kms->base, wb_conn,
+ 					   &komeda_wb_connector_funcs,
+diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
+index 0e282b7b167c6b..b9eb67e3fa90e4 100644
+--- a/drivers/gpu/drm/ast/ast_dp.c
++++ b/drivers/gpu/drm/ast/ast_dp.c
+@@ -195,7 +195,7 @@ static bool __ast_dp_wait_enable(struct ast_device *ast, bool enabled)
+ 	if (enabled)
+ 		vgacrdf_test |= AST_IO_VGACRDF_DP_VIDEO_ENABLE;
+ 
+-	for (i = 0; i < 200; ++i) {
++	for (i = 0; i < 1000; ++i) {
+ 		if (i)
+ 			mdelay(1);
+ 		vgacrdf = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xdf,
+diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
+index a2675b121fe44b..c036bbc92ba96e 100644
+--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
++++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
+@@ -2002,8 +2002,10 @@ static int anx7625_audio_get_eld(struct device *dev, void *data,
+ 		memset(buf, 0, len);
+ 	} else {
+ 		dev_dbg(dev, "audio copy eld\n");
++		mutex_lock(&ctx->connector->eld_mutex);
+ 		memcpy(buf, ctx->connector->eld,
+ 		       min(sizeof(ctx->connector->eld), len));
++		mutex_unlock(&ctx->connector->eld_mutex);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
+index cf891e7677c0e2..faee8e2e82a053 100644
+--- a/drivers/gpu/drm/bridge/ite-it6505.c
++++ b/drivers/gpu/drm/bridge/ite-it6505.c
+@@ -296,7 +296,7 @@
+ #define MAX_LANE_COUNT 4
+ #define MAX_LINK_RATE HBR
+ #define AUTO_TRAIN_RETRY 3
+-#define MAX_HDCP_DOWN_STREAM_COUNT 10
++#define MAX_HDCP_DOWN_STREAM_COUNT 127
+ #define MAX_CR_LEVEL 0x03
+ #define MAX_EQ_LEVEL 0x03
+ #define AUX_WAIT_TIMEOUT_MS 15
+@@ -2023,7 +2023,7 @@ static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505)
+ {
+ 	struct device *dev = it6505->dev;
+ 	u8 av[5][4], bv[5][4];
+-	int i, err;
++	int i, err, retry;
+ 
+ 	i = it6505_setup_sha1_input(it6505, it6505->sha1_input);
+ 	if (i <= 0) {
+@@ -2032,22 +2032,28 @@ static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505)
+ 	}
+ 
+ 	it6505_sha1_digest(it6505, it6505->sha1_input, i, (u8 *)av);
++	/*1B-05 V' must retry 3 times */
++	for (retry = 0; retry < 3; retry++) {
++		err = it6505_get_dpcd(it6505, DP_AUX_HDCP_V_PRIME(0), (u8 *)bv,
++				      sizeof(bv));
+ 
+-	err = it6505_get_dpcd(it6505, DP_AUX_HDCP_V_PRIME(0), (u8 *)bv,
+-			      sizeof(bv));
++		if (err < 0) {
++			dev_err(dev, "Read V' value Fail %d", retry);
++			continue;
++		}
+ 
+-	if (err < 0) {
+-		dev_err(dev, "Read V' value Fail");
+-		return false;
+-	}
++		for (i = 0; i < 5; i++) {
++			if (bv[i][3] != av[i][0] || bv[i][2] != av[i][1] ||
++			    av[i][1] != av[i][2] || bv[i][0] != av[i][3])
++				break;
+ 
+-	for (i = 0; i < 5; i++)
+-		if (bv[i][3] != av[i][0] || bv[i][2] != av[i][1] ||
+-		    bv[i][1] != av[i][2] || bv[i][0] != av[i][3])
+-			return false;
++			DRM_DEV_DEBUG_DRIVER(dev, "V' all match!! %d, %d", retry, i);
++			return true;
++		}
++	}
+ 
+-	DRM_DEV_DEBUG_DRIVER(dev, "V' all match!!");
+-	return true;
++	DRM_DEV_DEBUG_DRIVER(dev, "V' NOT match!! %d", retry);
++	return false;
+ }
+ 
+ static void it6505_hdcp_wait_ksv_list(struct work_struct *work)
+@@ -2055,12 +2061,13 @@ static void it6505_hdcp_wait_ksv_list(struct work_struct *work)
+ 	struct it6505 *it6505 = container_of(work, struct it6505,
+ 					     hdcp_wait_ksv_list);
+ 	struct device *dev = it6505->dev;
+-	unsigned int timeout = 5000;
+-	u8 bstatus = 0;
++	u8 bstatus;
+ 	bool ksv_list_check;
++	/* 1B-04 wait ksv list for 5s */
++	unsigned long timeout = jiffies +
++				msecs_to_jiffies(5000) + 1;
+ 
+-	timeout /= 20;
+-	while (timeout > 0) {
++	for (;;) {
+ 		if (!it6505_get_sink_hpd_status(it6505))
+ 			return;
+ 
+@@ -2069,27 +2076,23 @@ static void it6505_hdcp_wait_ksv_list(struct work_struct *work)
+ 		if (bstatus & DP_BSTATUS_READY)
+ 			break;
+ 
+-		msleep(20);
+-		timeout--;
+-	}
++		if (time_after(jiffies, timeout)) {
++			DRM_DEV_DEBUG_DRIVER(dev, "KSV list wait timeout");
++			goto timeout;
++		}
+ 
+-	if (timeout == 0) {
+-		DRM_DEV_DEBUG_DRIVER(dev, "timeout and ksv list wait failed");
+-		goto timeout;
++		msleep(20);
+ 	}
+ 
+ 	ksv_list_check = it6505_hdcp_part2_ksvlist_check(it6505);
+ 	DRM_DEV_DEBUG_DRIVER(dev, "ksv list ready, ksv list check %s",
+ 			     ksv_list_check ? "pass" : "fail");
+-	if (ksv_list_check) {
+-		it6505_set_bits(it6505, REG_HDCP_TRIGGER,
+-				HDCP_TRIGGER_KSV_DONE, HDCP_TRIGGER_KSV_DONE);
++
++	if (ksv_list_check)
+ 		return;
+-	}
++
+ timeout:
+-	it6505_set_bits(it6505, REG_HDCP_TRIGGER,
+-			HDCP_TRIGGER_KSV_DONE | HDCP_TRIGGER_KSV_FAIL,
+-			HDCP_TRIGGER_KSV_DONE | HDCP_TRIGGER_KSV_FAIL);
++	it6505_start_hdcp(it6505);
+ }
+ 
+ static void it6505_hdcp_work(struct work_struct *work)
+@@ -2312,14 +2315,20 @@ static int it6505_process_hpd_irq(struct it6505 *it6505)
+ 	DRM_DEV_DEBUG_DRIVER(dev, "dp_irq_vector = 0x%02x", dp_irq_vector);
+ 
+ 	if (dp_irq_vector & DP_CP_IRQ) {
+-		it6505_set_bits(it6505, REG_HDCP_TRIGGER, HDCP_TRIGGER_CPIRQ,
+-				HDCP_TRIGGER_CPIRQ);
+-
+ 		bstatus = it6505_dpcd_read(it6505, DP_AUX_HDCP_BSTATUS);
+ 		if (bstatus < 0)
+ 			return bstatus;
+ 
+ 		DRM_DEV_DEBUG_DRIVER(dev, "Bstatus = 0x%02x", bstatus);
++
++		/*Check BSTATUS when recive CP_IRQ */
++		if (bstatus & DP_BSTATUS_R0_PRIME_READY &&
++		    it6505->hdcp_status == HDCP_AUTH_GOING)
++			it6505_set_bits(it6505, REG_HDCP_TRIGGER, HDCP_TRIGGER_CPIRQ,
++					HDCP_TRIGGER_CPIRQ);
++		else if (bstatus & (DP_BSTATUS_REAUTH_REQ | DP_BSTATUS_LINK_FAILURE) &&
++			 it6505->hdcp_status == HDCP_AUTH_DONE)
++			it6505_start_hdcp(it6505);
+ 	}
+ 
+ 	ret = drm_dp_dpcd_read_link_status(&it6505->aux, link_status);
+@@ -2456,7 +2465,11 @@ static void it6505_irq_hdcp_ksv_check(struct it6505 *it6505)
+ {
+ 	struct device *dev = it6505->dev;
+ 
+-	DRM_DEV_DEBUG_DRIVER(dev, "HDCP event Interrupt");
++	DRM_DEV_DEBUG_DRIVER(dev, "HDCP repeater R0 event Interrupt");
++	/* 1B01 HDCP encription should start when R0 is ready*/
++	it6505_set_bits(it6505, REG_HDCP_TRIGGER,
++			HDCP_TRIGGER_KSV_DONE, HDCP_TRIGGER_KSV_DONE);
++
+ 	schedule_work(&it6505->hdcp_wait_ksv_list);
+ }
+ 
+diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
+index 35ae3f0e8f51f7..940083e5d2ddbf 100644
+--- a/drivers/gpu/drm/bridge/ite-it66121.c
++++ b/drivers/gpu/drm/bridge/ite-it66121.c
+@@ -1450,8 +1450,10 @@ static int it66121_audio_get_eld(struct device *dev, void *data,
+ 		dev_dbg(dev, "No connector present, passing empty EDID data");
+ 		memset(buf, 0, len);
+ 	} else {
++		mutex_lock(&ctx->connector->eld_mutex);
+ 		memcpy(buf, ctx->connector->eld,
+ 		       min(sizeof(ctx->connector->eld), len));
++		mutex_unlock(&ctx->connector->eld_mutex);
+ 	}
+ 	mutex_unlock(&ctx->lock);
+ 
+diff --git a/drivers/gpu/drm/display/drm_dp_cec.c b/drivers/gpu/drm/display/drm_dp_cec.c
+index 007ceb281d00da..56a4965e518cc2 100644
+--- a/drivers/gpu/drm/display/drm_dp_cec.c
++++ b/drivers/gpu/drm/display/drm_dp_cec.c
+@@ -311,16 +311,6 @@ void drm_dp_cec_attach(struct drm_dp_aux *aux, u16 source_physical_address)
+ 	if (!aux->transfer)
+ 		return;
+ 
+-#ifndef CONFIG_MEDIA_CEC_RC
+-	/*
+-	 * CEC_CAP_RC is part of CEC_CAP_DEFAULTS, but it is stripped by
+-	 * cec_allocate_adapter() if CONFIG_MEDIA_CEC_RC is undefined.
+-	 *
+-	 * Do this here as well to ensure the tests against cec_caps are
+-	 * correct.
+-	 */
+-	cec_caps &= ~CEC_CAP_RC;
+-#endif
+ 	cancel_delayed_work_sync(&aux->cec.unregister_work);
+ 
+ 	mutex_lock(&aux->cec.lock);
+@@ -337,7 +327,9 @@ void drm_dp_cec_attach(struct drm_dp_aux *aux, u16 source_physical_address)
+ 		num_las = CEC_MAX_LOG_ADDRS;
+ 
+ 	if (aux->cec.adap) {
+-		if (aux->cec.adap->capabilities == cec_caps &&
++		/* Check if the adapter properties have changed */
++		if ((aux->cec.adap->capabilities & CEC_CAP_MONITOR_ALL) ==
++		    (cec_caps & CEC_CAP_MONITOR_ALL) &&
+ 		    aux->cec.adap->available_log_addrs == num_las) {
+ 			/* Unchanged, so just set the phys addr */
+ 			cec_s_phys_addr(aux->cec.adap, source_physical_address, false);
+diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
+index 251f9431371793..aca442c25209a9 100644
+--- a/drivers/gpu/drm/drm_client_modeset.c
++++ b/drivers/gpu/drm/drm_client_modeset.c
+@@ -743,6 +743,15 @@ static bool drm_client_firmware_config(struct drm_client_dev *client,
+ 	if ((conn_configured & mask) != mask && conn_configured != conn_seq)
+ 		goto retry;
+ 
++	for (i = 0; i < count; i++) {
++		struct drm_connector *connector = connectors[i];
++
++		if (connector->has_tile)
++			drm_client_get_tile_offsets(dev, connectors, connector_count,
++						    modes, offsets, i,
++						    connector->tile_h_loc, connector->tile_v_loc);
++	}
++
+ 	/*
+ 	 * If the BIOS didn't enable everything it could, fall back to have the
+ 	 * same user experiencing of lighting up as much as possible like the
+diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
+index ca7f43c8d6f1b3..0e6021235a9304 100644
+--- a/drivers/gpu/drm/drm_connector.c
++++ b/drivers/gpu/drm/drm_connector.c
+@@ -277,6 +277,7 @@ static int __drm_connector_init(struct drm_device *dev,
+ 	INIT_LIST_HEAD(&connector->probed_modes);
+ 	INIT_LIST_HEAD(&connector->modes);
+ 	mutex_init(&connector->mutex);
++	mutex_init(&connector->eld_mutex);
+ 	mutex_init(&connector->edid_override_mutex);
+ 	mutex_init(&connector->hdmi.infoframes.lock);
+ 	connector->edid_blob_ptr = NULL;
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 855beafb76ffbe..13bc4c290b17d5 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -5605,7 +5605,9 @@ EXPORT_SYMBOL(drm_edid_get_monitor_name);
+ 
+ static void clear_eld(struct drm_connector *connector)
+ {
++	mutex_lock(&connector->eld_mutex);
+ 	memset(connector->eld, 0, sizeof(connector->eld));
++	mutex_unlock(&connector->eld_mutex);
+ 
+ 	connector->latency_present[0] = false;
+ 	connector->latency_present[1] = false;
+@@ -5657,6 +5659,8 @@ static void drm_edid_to_eld(struct drm_connector *connector,
+ 	if (!drm_edid)
+ 		return;
+ 
++	mutex_lock(&connector->eld_mutex);
++
+ 	mnl = get_monitor_name(drm_edid, &eld[DRM_ELD_MONITOR_NAME_STRING]);
+ 	drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] ELD monitor %s\n",
+ 		    connector->base.id, connector->name,
+@@ -5717,6 +5721,8 @@ static void drm_edid_to_eld(struct drm_connector *connector,
+ 	drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] ELD size %d, SAD count %d\n",
+ 		    connector->base.id, connector->name,
+ 		    drm_eld_size(eld), total_sad_count);
++
++	mutex_unlock(&connector->eld_mutex);
+ }
+ 
+ static int _drm_edid_to_sad(const struct drm_edid *drm_edid,
+diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
+index c9008113111ba2..fb3614a7ba44bb 100644
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -1354,14 +1354,14 @@ int drm_fb_helper_set_par(struct fb_info *info)
+ }
+ EXPORT_SYMBOL(drm_fb_helper_set_par);
+ 
+-static void pan_set(struct drm_fb_helper *fb_helper, int x, int y)
++static void pan_set(struct drm_fb_helper *fb_helper, int dx, int dy)
+ {
+ 	struct drm_mode_set *mode_set;
+ 
+ 	mutex_lock(&fb_helper->client.modeset_mutex);
+ 	drm_client_for_each_modeset(mode_set, &fb_helper->client) {
+-		mode_set->x = x;
+-		mode_set->y = y;
++		mode_set->x += dx;
++		mode_set->y += dy;
+ 	}
+ 	mutex_unlock(&fb_helper->client.modeset_mutex);
+ }
+@@ -1370,16 +1370,18 @@ static int pan_display_atomic(struct fb_var_screeninfo *var,
+ 			      struct fb_info *info)
+ {
+ 	struct drm_fb_helper *fb_helper = info->par;
+-	int ret;
++	int ret, dx, dy;
+ 
+-	pan_set(fb_helper, var->xoffset, var->yoffset);
++	dx = var->xoffset - info->var.xoffset;
++	dy = var->yoffset - info->var.yoffset;
++	pan_set(fb_helper, dx, dy);
+ 
+ 	ret = drm_client_modeset_commit_locked(&fb_helper->client);
+ 	if (!ret) {
+ 		info->var.xoffset = var->xoffset;
+ 		info->var.yoffset = var->yoffset;
+ 	} else
+-		pan_set(fb_helper, info->var.xoffset, info->var.yoffset);
++		pan_set(fb_helper, -dx, -dy);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/drm_panel_backlight_quirks.c b/drivers/gpu/drm/drm_panel_backlight_quirks.c
+new file mode 100644
+index 00000000000000..c477d98ade2b41
+--- /dev/null
++++ b/drivers/gpu/drm/drm_panel_backlight_quirks.c
+@@ -0,0 +1,94 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#include <linux/array_size.h>
++#include <linux/dmi.h>
++#include <linux/mod_devicetable.h>
++#include <linux/module.h>
++#include <drm/drm_edid.h>
++#include <drm/drm_utils.h>
++
++struct drm_panel_min_backlight_quirk {
++	struct {
++		enum dmi_field field;
++		const char * const value;
++	} dmi_match;
++	struct drm_edid_ident ident;
++	u8 min_brightness;
++};
++
++static const struct drm_panel_min_backlight_quirk drm_panel_min_backlight_quirks[] = {
++	/* 13 inch matte panel */
++	{
++		.dmi_match.field = DMI_BOARD_VENDOR,
++		.dmi_match.value = "Framework",
++		.ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x0bca),
++		.ident.name = "NE135FBM-N41",
++		.min_brightness = 0,
++	},
++	/* 13 inch glossy panel */
++	{
++		.dmi_match.field = DMI_BOARD_VENDOR,
++		.dmi_match.value = "Framework",
++		.ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x095f),
++		.ident.name = "NE135FBM-N41",
++		.min_brightness = 0,
++	},
++	/* 13 inch 2.8k panel */
++	{
++		.dmi_match.field = DMI_BOARD_VENDOR,
++		.dmi_match.value = "Framework",
++		.ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x0cb4),
++		.ident.name = "NE135A1M-NY1",
++		.min_brightness = 0,
++	},
++};
++
++static bool drm_panel_min_backlight_quirk_matches(const struct drm_panel_min_backlight_quirk *quirk,
++						  const struct drm_edid *edid)
++{
++	if (!dmi_match(quirk->dmi_match.field, quirk->dmi_match.value))
++		return false;
++
++	if (!drm_edid_match(edid, &quirk->ident))
++		return false;
++
++	return true;
++}
++
++/**
++ * drm_get_panel_min_brightness_quirk - Get minimum supported brightness level for a panel.
++ * @edid: EDID of the panel to check
++ *
++ * This function checks for platform specific (e.g. DMI based) quirks
++ * providing info on the minimum backlight brightness for systems where this
++ * cannot be probed correctly from the hard-/firm-ware.
++ *
++ * Returns:
++ * A negative error value or
++ * an override value in the range [0, 255] representing 0-100% to be scaled to
++ * the drivers target range.
++ */
++int drm_get_panel_min_brightness_quirk(const struct drm_edid *edid)
++{
++	const struct drm_panel_min_backlight_quirk *quirk;
++	size_t i;
++
++	if (!IS_ENABLED(CONFIG_DMI))
++		return -ENODATA;
++
++	if (!edid)
++		return -EINVAL;
++
++	for (i = 0; i < ARRAY_SIZE(drm_panel_min_backlight_quirks); i++) {
++		quirk = &drm_panel_min_backlight_quirks[i];
++
++		if (drm_panel_min_backlight_quirk_matches(quirk, edid))
++			return quirk->min_brightness;
++	}
++
++	return -ENODATA;
++}
++EXPORT_SYMBOL(drm_get_panel_min_brightness_quirk);
++
++MODULE_DESCRIPTION("Quirks for panel backlight overrides");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
+index 466a9e514aa1c8..7a57d4a23e410d 100644
+--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
++++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
+@@ -1648,7 +1648,9 @@ static int hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf,
+ 	struct hdmi_context *hdata = dev_get_drvdata(dev);
+ 	struct drm_connector *connector = &hdata->connector;
+ 
++	mutex_lock(&connector->eld_mutex);
+ 	memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
++	mutex_unlock(&connector->eld_mutex);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index ff5ba7b3035f3b..4ae7459e11455b 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -2049,11 +2049,10 @@ icl_dsc_compute_link_config(struct intel_dp *intel_dp,
+ 	/* Compressed BPP should be less than the Input DSC bpp */
+ 	dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
+ 
+-	for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp); i++) {
+-		if (valid_dsc_bpp[i] < dsc_min_bpp)
++	for (i = ARRAY_SIZE(valid_dsc_bpp) - 1; i >= 0; i--) {
++		if (valid_dsc_bpp[i] < dsc_min_bpp ||
++		    valid_dsc_bpp[i] > dsc_max_bpp)
+ 			continue;
+-		if (valid_dsc_bpp[i] > dsc_max_bpp)
+-			break;
+ 
+ 		ret = dsc_compute_link_config(intel_dp,
+ 					      pipe_config,
+@@ -2778,7 +2777,6 @@ static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp,
+ 
+ 	crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC);
+ 
+-	/* Currently only DP_AS_SDP_AVT_FIXED_VTOTAL mode supported */
+ 	as_sdp->sdp_type = DP_SDP_ADAPTIVE_SYNC;
+ 	as_sdp->length = 0x9;
+ 	as_sdp->duration_incr_ms = 0;
+@@ -2789,7 +2787,7 @@ static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp,
+ 		as_sdp->target_rr = drm_mode_vrefresh(adjusted_mode);
+ 		as_sdp->target_rr_divider = true;
+ 	} else {
+-		as_sdp->mode = DP_AS_SDP_AVT_FIXED_VTOTAL;
++		as_sdp->mode = DP_AS_SDP_AVT_DYNAMIC_VTOTAL;
+ 		as_sdp->vtotal = adjusted_mode->vtotal;
+ 		as_sdp->target_rr = 0;
+ 	}
+diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
+index 8fee26d791f4fe..3bf42871a4f08e 100644
+--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
++++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
+@@ -2180,6 +2180,19 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
+ 
+ 		drm_dbg_kms(display->drm,
+ 			    "HDCP2.2 Downstream topology change\n");
++
++		ret = hdcp2_authenticate_repeater_topology(connector);
++		if (!ret) {
++			intel_hdcp_update_value(connector,
++						DRM_MODE_CONTENT_PROTECTION_ENABLED,
++						true);
++			goto out;
++		}
++
++		drm_dbg_kms(display->drm,
++			    "[CONNECTOR:%d:%s] Repeater topology auth failed.(%d)\n",
++			    connector->base.base.id, connector->base.name,
++			    ret);
+ 	} else {
+ 		drm_dbg_kms(display->drm,
+ 			    "[CONNECTOR:%d:%s] HDCP2.2 link failed, retrying auth\n",
+diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
+index 038ca2ec5d7a60..decf0ebeec0b17 100644
+--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
++++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
+@@ -106,8 +106,6 @@ static const u32 icl_sdr_y_plane_formats[] = {
+ 	DRM_FORMAT_Y216,
+ 	DRM_FORMAT_XYUV8888,
+ 	DRM_FORMAT_XVYU2101010,
+-	DRM_FORMAT_XVYU12_16161616,
+-	DRM_FORMAT_XVYU16161616,
+ };
+ 
+ static const u32 icl_sdr_uv_plane_formats[] = {
+@@ -134,8 +132,6 @@ static const u32 icl_sdr_uv_plane_formats[] = {
+ 	DRM_FORMAT_Y216,
+ 	DRM_FORMAT_XYUV8888,
+ 	DRM_FORMAT_XVYU2101010,
+-	DRM_FORMAT_XVYU12_16161616,
+-	DRM_FORMAT_XVYU16161616,
+ };
+ 
+ static const u32 icl_hdr_plane_formats[] = {
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+index fe69f2c8527d79..ae3343c81a6455 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+@@ -209,8 +209,6 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
+ 	struct address_space *mapping = obj->base.filp->f_mapping;
+ 	unsigned int max_segment = i915_sg_segment_size(i915->drm.dev);
+ 	struct sg_table *st;
+-	struct sgt_iter sgt_iter;
+-	struct page *page;
+ 	int ret;
+ 
+ 	/*
+@@ -239,9 +237,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
+ 		 * for PAGE_SIZE chunks instead may be helpful.
+ 		 */
+ 		if (max_segment > PAGE_SIZE) {
+-			for_each_sgt_page(page, sgt_iter, st)
+-				put_page(page);
+-			sg_free_table(st);
++			shmem_sg_free_table(st, mapping, false, false);
+ 			kfree(st);
+ 
+ 			max_segment = PAGE_SIZE;
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+index c0bd730383f21e..4b12a6c7c247bd 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+@@ -5511,12 +5511,20 @@ static inline void guc_log_context(struct drm_printer *p,
+ {
+ 	drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id);
+ 	drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca);
+-	drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
+-		   ce->ring->head,
+-		   ce->lrc_reg_state[CTX_RING_HEAD]);
+-	drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
+-		   ce->ring->tail,
+-		   ce->lrc_reg_state[CTX_RING_TAIL]);
++	if (intel_context_pin_if_active(ce)) {
++		drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
++			   ce->ring->head,
++			   ce->lrc_reg_state[CTX_RING_HEAD]);
++		drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
++			   ce->ring->tail,
++			   ce->lrc_reg_state[CTX_RING_TAIL]);
++		intel_context_unpin(ce);
++	} else {
++		drm_printf(p, "\t\tLRC Head: Internal %u, Memory not pinned\n",
++			   ce->ring->head);
++		drm_printf(p, "\t\tLRC Tail: Internal %u, Memory not pinned\n",
++			   ce->ring->tail);
++	}
+ 	drm_printf(p, "\t\tContext Pin Count: %u\n",
+ 		   atomic_read(&ce->pin_count));
+ 	drm_printf(p, "\t\tGuC ID Ref Count: %u\n",
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+index 9f6ffd344693ec..ad3462476a143e 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+@@ -732,6 +732,13 @@ static int _dpu_crtc_check_and_setup_lm_bounds(struct drm_crtc *crtc,
+ 	struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+ 	int i;
+ 
++	/* if we cannot merge 2 LMs (no 3d mux) better to fail earlier
++	 * before even checking the width after the split
++	 */
++	if (!dpu_kms->catalog->caps->has_3d_merge &&
++	    adj_mode->hdisplay > dpu_kms->catalog->caps->max_mixer_width)
++		return -E2BIG;
++
+ 	for (i = 0; i < cstate->num_mixers; i++) {
+ 		struct drm_rect *r = &cstate->lm_bounds[i];
+ 		r->x1 = crtc_split_width * i;
+@@ -1251,6 +1258,12 @@ static enum drm_mode_status dpu_crtc_mode_valid(struct drm_crtc *crtc,
+ {
+ 	struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+ 
++	/* if there is no 3d_mux block we cannot merge LMs so we cannot
++	 * split the large layer into 2 LMs, filter out such modes
++	 */
++	if (!dpu_kms->catalog->caps->has_3d_merge &&
++	    mode->hdisplay > dpu_kms->catalog->caps->max_mixer_width)
++		return MODE_BAD_HVALUE;
+ 	/*
+ 	 * max crtc width is equal to the max mixer width * 2 and max height is 4K
+ 	 */
+diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
+index 5cbb11986460d1..c179ed0e9e2bd1 100644
+--- a/drivers/gpu/drm/msm/dp/dp_audio.c
++++ b/drivers/gpu/drm/msm/dp/dp_audio.c
+@@ -414,8 +414,10 @@ static int msm_dp_audio_get_eld(struct device *dev,
+ 		return -ENODEV;
+ 	}
+ 
++	mutex_lock(&msm_dp_display->connector->eld_mutex);
+ 	memcpy(buf, msm_dp_display->connector->eld,
+ 		min(sizeof(msm_dp_display->connector->eld), len));
++	mutex_unlock(&msm_dp_display->connector->eld_mutex);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+index d586aea3089841..9c83bab0a53091 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+@@ -121,6 +121,8 @@ r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 repc, u32 *prepc, int *ptime)
+ 		return mqe->data;
+ 	}
+ 
++	size = ALIGN(repc + GSP_MSG_HDR_SIZE, GSP_PAGE_SIZE);
++
+ 	msg = kvmalloc(repc, GFP_KERNEL);
+ 	if (!msg)
+ 		return ERR_PTR(-ENOMEM);
+@@ -129,19 +131,15 @@ r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 repc, u32 *prepc, int *ptime)
+ 	len = min_t(u32, repc, len);
+ 	memcpy(msg, mqe->data, len);
+ 
+-	rptr += DIV_ROUND_UP(len, GSP_PAGE_SIZE);
+-	if (rptr == gsp->msgq.cnt)
+-		rptr = 0;
+-
+ 	repc -= len;
+ 
+ 	if (repc) {
+ 		mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000);
+ 		memcpy(msg + len, mqe, repc);
+-
+-		rptr += DIV_ROUND_UP(repc, GSP_PAGE_SIZE);
+ 	}
+ 
++	rptr = (rptr + DIV_ROUND_UP(size, GSP_PAGE_SIZE)) % gsp->msgq.cnt;
++
+ 	mb();
+ 	(*gsp->msgq.rptr) = rptr;
+ 	return msg;
+@@ -163,7 +161,7 @@ r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *argv)
+ 	u64 *end;
+ 	u64 csum = 0;
+ 	int free, time = 1000000;
+-	u32 wptr, size;
++	u32 wptr, size, step;
+ 	u32 off = 0;
+ 
+ 	argc = ALIGN(GSP_MSG_HDR_SIZE + argc, GSP_PAGE_SIZE);
+@@ -197,7 +195,9 @@ r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *argv)
+ 		}
+ 
+ 		cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000);
+-		size = min_t(u32, argc, (gsp->cmdq.cnt - wptr) * GSP_PAGE_SIZE);
++		step = min_t(u32, free, (gsp->cmdq.cnt - wptr));
++		size = min_t(u32, argc, step * GSP_PAGE_SIZE);
++
+ 		memcpy(cqe, (u8 *)cmd + off, size);
+ 
+ 		wptr += DIV_ROUND_UP(size, 0x1000);
+diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
+index 5b69cc8011b42b..8d64ba18572ec4 100644
+--- a/drivers/gpu/drm/radeon/radeon_audio.c
++++ b/drivers/gpu/drm/radeon/radeon_audio.c
+@@ -775,8 +775,10 @@ static int radeon_audio_component_get_eld(struct device *kdev, int port,
+ 		if (!dig->pin || dig->pin->id != port)
+ 			continue;
+ 		*enabled = true;
++		mutex_lock(&connector->eld_mutex);
+ 		ret = drm_eld_size(connector->eld);
+ 		memcpy(buf, connector->eld, min(max_bytes, ret));
++		mutex_unlock(&connector->eld_mutex);
+ 		break;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
+index ff9d95e2c4d4d5..a7891a139c8813 100644
+--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
++++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
+@@ -947,9 +947,6 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
+ {
+ 	struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device,
+ 						event_work);
+-	struct drm_connector *connector = &dp->connector;
+-	enum drm_connector_status old_status;
+-
+ 	int ret;
+ 
+ 	mutex_lock(&dp->lock);
+@@ -1009,11 +1006,7 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
+ 
+ out:
+ 	mutex_unlock(&dp->lock);
+-
+-	old_status = connector->status;
+-	connector->status = connector->funcs->detect(connector, false);
+-	if (old_status != connector->status)
+-		drm_kms_helper_hotplug_event(dp->drm_dev);
++	drm_connector_helper_hpd_irq_event(&dp->connector);
+ }
+ 
+ static int cdn_dp_pd_event(struct notifier_block *nb,
+diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
+index 21b46a6465f08a..f8bbae6393ef8c 100644
+--- a/drivers/gpu/drm/sti/sti_hdmi.c
++++ b/drivers/gpu/drm/sti/sti_hdmi.c
+@@ -1225,7 +1225,9 @@ static int hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size
+ 	struct drm_connector *connector = hdmi->drm_connector;
+ 
+ 	DRM_DEBUG_DRIVER("\n");
++	mutex_lock(&connector->eld_mutex);
+ 	memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
++	mutex_unlock(&connector->eld_mutex);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+index 294773342e710d..4ba869e0e794c7 100644
+--- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
++++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+@@ -46,7 +46,7 @@ static struct drm_display_mode *find_preferred_mode(struct drm_connector *connec
+ 	struct drm_display_mode *mode, *preferred;
+ 
+ 	mutex_lock(&drm->mode_config.mutex);
+-	preferred = list_first_entry(&connector->modes, struct drm_display_mode, head);
++	preferred = list_first_entry_or_null(&connector->modes, struct drm_display_mode, head);
+ 	list_for_each_entry(mode, &connector->modes, head)
+ 		if (mode->type & DRM_MODE_TYPE_PREFERRED)
+ 			preferred = mode;
+@@ -105,9 +105,8 @@ static int set_connector_edid(struct kunit *test, struct drm_connector *connecto
+ 	mutex_lock(&drm->mode_config.mutex);
+ 	ret = connector->funcs->fill_modes(connector, 4096, 4096);
+ 	mutex_unlock(&drm->mode_config.mutex);
+-	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static const struct drm_connector_hdmi_funcs dummy_connector_hdmi_funcs = {
+@@ -223,7 +222,7 @@ drm_atomic_helper_connector_hdmi_init(struct kunit *test,
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_max_200mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+ 	return priv;
+ }
+@@ -728,7 +727,7 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test)
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+ 	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+@@ -802,7 +801,7 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test)
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+ 	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+@@ -873,7 +872,7 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test)
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_dvi_1080p,
+ 				 ARRAY_SIZE(test_edid_dvi_1080p));
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+ 	info = &conn->display_info;
+ 	KUNIT_ASSERT_FALSE(test, info->is_hdmi);
+@@ -920,7 +919,7 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test)
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_max_200mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+ 	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+@@ -967,7 +966,7 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test)
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+ 	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+@@ -1014,7 +1013,7 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test)
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+ 	ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+@@ -1121,7 +1120,7 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+ 	info = &conn->display_info;
+ 	KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+@@ -1190,7 +1189,7 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+ 	info = &conn->display_info;
+ 	KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+@@ -1254,7 +1253,7 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+ 	info = &conn->display_info;
+ 	KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+@@ -1314,7 +1313,7 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+ 	info = &conn->display_info;
+ 	KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+@@ -1381,7 +1380,7 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_max_200mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+ 	info = &conn->display_info;
+ 	KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+@@ -1447,7 +1446,7 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+ 	info = &conn->display_info;
+ 	KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+@@ -1507,7 +1506,7 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
+ 	ret = set_connector_edid(test, conn,
+ 				 test_edid_hdmi_1080p_rgb_max_340mhz,
+ 				 ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_340mhz));
+-	KUNIT_ASSERT_EQ(test, ret, 0);
++	KUNIT_ASSERT_GT(test, ret, 0);
+ 
+ 	info = &conn->display_info;
+ 	KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
+index e3818c48c9b8c8..e86123009c9053 100644
+--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
+@@ -2193,9 +2193,9 @@ static int vc4_hdmi_audio_get_eld(struct device *dev, void *data,
+ 	struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ 	struct drm_connector *connector = &vc4_hdmi->connector;
+ 
+-	mutex_lock(&vc4_hdmi->mutex);
++	mutex_lock(&connector->eld_mutex);
+ 	memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
+-	mutex_unlock(&vc4_hdmi->mutex);
++	mutex_unlock(&connector->eld_mutex);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
+index 64c236169db88a..5dc8eeaf7123c4 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
++++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
+@@ -194,6 +194,13 @@ struct virtio_gpu_framebuffer {
+ #define to_virtio_gpu_framebuffer(x) \
+ 	container_of(x, struct virtio_gpu_framebuffer, base)
+ 
++struct virtio_gpu_plane_state {
++	struct drm_plane_state base;
++	struct virtio_gpu_fence *fence;
++};
++#define to_virtio_gpu_plane_state(x) \
++	container_of(x, struct virtio_gpu_plane_state, base)
++
+ struct virtio_gpu_queue {
+ 	struct virtqueue *vq;
+ 	spinlock_t qlock;
+diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
+index a72a2dbda031c2..7acd38b962c621 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
++++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
+@@ -66,11 +66,28 @@ uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
+ 	return format;
+ }
+ 
++static struct
++drm_plane_state *virtio_gpu_plane_duplicate_state(struct drm_plane *plane)
++{
++	struct virtio_gpu_plane_state *new;
++
++	if (WARN_ON(!plane->state))
++		return NULL;
++
++	new = kzalloc(sizeof(*new), GFP_KERNEL);
++	if (!new)
++		return NULL;
++
++	__drm_atomic_helper_plane_duplicate_state(plane, &new->base);
++
++	return &new->base;
++}
++
+ static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
+ 	.update_plane		= drm_atomic_helper_update_plane,
+ 	.disable_plane		= drm_atomic_helper_disable_plane,
+ 	.reset			= drm_atomic_helper_plane_reset,
+-	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
++	.atomic_duplicate_state = virtio_gpu_plane_duplicate_state,
+ 	.atomic_destroy_state	= drm_atomic_helper_plane_destroy_state,
+ };
+ 
+@@ -138,11 +155,13 @@ static void virtio_gpu_resource_flush(struct drm_plane *plane,
+ 	struct drm_device *dev = plane->dev;
+ 	struct virtio_gpu_device *vgdev = dev->dev_private;
+ 	struct virtio_gpu_framebuffer *vgfb;
++	struct virtio_gpu_plane_state *vgplane_st;
+ 	struct virtio_gpu_object *bo;
+ 
+ 	vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
++	vgplane_st = to_virtio_gpu_plane_state(plane->state);
+ 	bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
+-	if (vgfb->fence) {
++	if (vgplane_st->fence) {
+ 		struct virtio_gpu_object_array *objs;
+ 
+ 		objs = virtio_gpu_array_alloc(1);
+@@ -151,13 +170,11 @@ static void virtio_gpu_resource_flush(struct drm_plane *plane,
+ 		virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
+ 		virtio_gpu_array_lock_resv(objs);
+ 		virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
+-					      width, height, objs, vgfb->fence);
++					      width, height, objs,
++					      vgplane_st->fence);
+ 		virtio_gpu_notify(vgdev);
+-
+-		dma_fence_wait_timeout(&vgfb->fence->f, true,
++		dma_fence_wait_timeout(&vgplane_st->fence->f, true,
+ 				       msecs_to_jiffies(50));
+-		dma_fence_put(&vgfb->fence->f);
+-		vgfb->fence = NULL;
+ 	} else {
+ 		virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
+ 					      width, height, NULL, NULL);
+@@ -247,20 +264,23 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
+ 	struct drm_device *dev = plane->dev;
+ 	struct virtio_gpu_device *vgdev = dev->dev_private;
+ 	struct virtio_gpu_framebuffer *vgfb;
++	struct virtio_gpu_plane_state *vgplane_st;
+ 	struct virtio_gpu_object *bo;
+ 
+ 	if (!new_state->fb)
+ 		return 0;
+ 
+ 	vgfb = to_virtio_gpu_framebuffer(new_state->fb);
++	vgplane_st = to_virtio_gpu_plane_state(new_state);
+ 	bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
+ 	if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob))
+ 		return 0;
+ 
+-	if (bo->dumb && (plane->state->fb != new_state->fb)) {
+-		vgfb->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
++	if (bo->dumb) {
++		vgplane_st->fence = virtio_gpu_fence_alloc(vgdev,
++						     vgdev->fence_drv.context,
+ 						     0);
+-		if (!vgfb->fence)
++		if (!vgplane_st->fence)
+ 			return -ENOMEM;
+ 	}
+ 
+@@ -270,15 +290,15 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
+ static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane,
+ 					struct drm_plane_state *state)
+ {
+-	struct virtio_gpu_framebuffer *vgfb;
++	struct virtio_gpu_plane_state *vgplane_st;
+ 
+ 	if (!state->fb)
+ 		return;
+ 
+-	vgfb = to_virtio_gpu_framebuffer(state->fb);
+-	if (vgfb->fence) {
+-		dma_fence_put(&vgfb->fence->f);
+-		vgfb->fence = NULL;
++	vgplane_st = to_virtio_gpu_plane_state(state);
++	if (vgplane_st->fence) {
++		dma_fence_put(&vgplane_st->fence->f);
++		vgplane_st->fence = NULL;
+ 	}
+ }
+ 
+@@ -291,6 +311,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
+ 	struct virtio_gpu_device *vgdev = dev->dev_private;
+ 	struct virtio_gpu_output *output = NULL;
+ 	struct virtio_gpu_framebuffer *vgfb;
++	struct virtio_gpu_plane_state *vgplane_st;
+ 	struct virtio_gpu_object *bo = NULL;
+ 	uint32_t handle;
+ 
+@@ -303,6 +324,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
+ 
+ 	if (plane->state->fb) {
+ 		vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
++		vgplane_st = to_virtio_gpu_plane_state(plane->state);
+ 		bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
+ 		handle = bo->hw_res_handle;
+ 	} else {
+@@ -322,11 +344,9 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
+ 			(vgdev, 0,
+ 			 plane->state->crtc_w,
+ 			 plane->state->crtc_h,
+-			 0, 0, objs, vgfb->fence);
++			 0, 0, objs, vgplane_st->fence);
+ 		virtio_gpu_notify(vgdev);
+-		dma_fence_wait(&vgfb->fence->f, true);
+-		dma_fence_put(&vgfb->fence->f);
+-		vgfb->fence = NULL;
++		dma_fence_wait(&vgplane_st->fence->f, true);
+ 	}
+ 
+ 	if (plane->state->fb != old_state->fb) {
+diff --git a/drivers/gpu/drm/xe/regs/xe_oa_regs.h b/drivers/gpu/drm/xe/regs/xe_oa_regs.h
+index a9b0091cb7ee11..6d31573ed1765f 100644
+--- a/drivers/gpu/drm/xe/regs/xe_oa_regs.h
++++ b/drivers/gpu/drm/xe/regs/xe_oa_regs.h
+@@ -59,6 +59,10 @@
+ /* Common to all OA units */
+ #define  OA_OACONTROL_REPORT_BC_MASK		REG_GENMASK(9, 9)
+ #define  OA_OACONTROL_COUNTER_SIZE_MASK		REG_GENMASK(8, 8)
++#define  OAG_OACONTROL_USED_BITS \
++	(OAG_OACONTROL_OA_PES_DISAG_EN | OAG_OACONTROL_OA_CCS_SELECT_MASK | \
++	 OAG_OACONTROL_OA_COUNTER_SEL_MASK | OAG_OACONTROL_OA_COUNTER_ENABLE | \
++	 OA_OACONTROL_REPORT_BC_MASK | OA_OACONTROL_COUNTER_SIZE_MASK)
+ 
+ #define OAG_OA_DEBUG XE_REG(0xdaf8, XE_REG_OPTION_MASKED)
+ #define  OAG_OA_DEBUG_DISABLE_MMIO_TRG			REG_BIT(14)
+@@ -85,6 +89,8 @@
+ #define OAM_CONTEXT_CONTROL_OFFSET		(0x1bc)
+ #define OAM_CONTROL_OFFSET			(0x194)
+ #define  OAM_CONTROL_COUNTER_SEL_MASK		REG_GENMASK(3, 1)
++#define  OAM_OACONTROL_USED_BITS \
++	(OAM_CONTROL_COUNTER_SEL_MASK | OAG_OACONTROL_OA_COUNTER_ENABLE)
+ #define OAM_DEBUG_OFFSET			(0x198)
+ #define OAM_STATUS_OFFSET			(0x19c)
+ #define OAM_MMIO_TRG_OFFSET			(0x1d0)
+diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
+index 21a50d539426b0..2577291b396ae6 100644
+--- a/drivers/gpu/drm/xe/xe_devcoredump.c
++++ b/drivers/gpu/drm/xe/xe_devcoredump.c
+@@ -109,11 +109,7 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
+ 	drm_puts(&p, "\n**** GuC CT ****\n");
+ 	xe_guc_ct_snapshot_print(ss->guc.ct, &p);
+ 
+-	/*
+-	 * Don't add a new section header here because the mesa debug decoder
+-	 * tool expects the context information to be in the 'GuC CT' section.
+-	 */
+-	/* drm_puts(&p, "\n**** Contexts ****\n"); */
++	drm_puts(&p, "\n**** Contexts ****\n");
+ 	xe_guc_exec_queue_snapshot_print(ss->ge, &p);
+ 
+ 	drm_puts(&p, "\n**** Job ****\n");
+@@ -342,42 +338,34 @@ int xe_devcoredump_init(struct xe_device *xe)
+ /**
+  * xe_print_blob_ascii85 - print a BLOB to some useful location in ASCII85
+  *
+- * The output is split to multiple lines because some print targets, e.g. dmesg
+- * cannot handle arbitrarily long lines. Note also that printing to dmesg in
+- * piece-meal fashion is not possible, each separate call to drm_puts() has a
+- * line-feed automatically added! Therefore, the entire output line must be
+- * constructed in a local buffer first, then printed in one atomic output call.
++ * The output is split into multiple calls to drm_puts() because some print
++ * targets, e.g. dmesg, cannot handle arbitrarily long lines. These targets may
++ * add newlines, as is the case with dmesg: each drm_puts() call creates a
++ * separate line.
+  *
+  * There is also a scheduler yield call to prevent the 'task has been stuck for
+  * 120s' kernel hang check feature from firing when printing to a slow target
+  * such as dmesg over a serial port.
+  *
+- * TODO: Add compression prior to the ASCII85 encoding to shrink huge buffers down.
+- *
+  * @p: the printer object to output to
+  * @prefix: optional prefix to add to output string
++ * @suffix: optional suffix to add at the end. 0 disables it and is
++ *          not added to the output, which is useful when using multiple calls
++ *          to dump data to @p
+  * @blob: the Binary Large OBject to dump out
+  * @offset: offset in bytes to skip from the front of the BLOB, must be a multiple of sizeof(u32)
+  * @size: the size in bytes of the BLOB, must be a multiple of sizeof(u32)
+  */
+-void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
++void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, char suffix,
+ 			   const void *blob, size_t offset, size_t size)
+ {
+ 	const u32 *blob32 = (const u32 *)blob;
+ 	char buff[ASCII85_BUFSZ], *line_buff;
+ 	size_t line_pos = 0;
+ 
+-	/*
+-	 * Splitting blobs across multiple lines is not compatible with the mesa
+-	 * debug decoder tool. Note that even dropping the explicit '\n' below
+-	 * doesn't help because the GuC log is so big some underlying implementation
+-	 * still splits the lines at 512K characters. So just bail completely for
+-	 * the moment.
+-	 */
+-	return;
+-
+ #define DMESG_MAX_LINE_LEN	800
+-#define MIN_SPACE		(ASCII85_BUFSZ + 2)		/* 85 + "\n\0" */
++	/* Always leave space for the suffix char and the \0 */
++#define MIN_SPACE		(ASCII85_BUFSZ + 2)	/* 85 + "<suffix>\0" */
+ 
+ 	if (size & 3)
+ 		drm_printf(p, "Size not word aligned: %zu", size);
+@@ -409,7 +397,6 @@ void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
+ 		line_pos += strlen(line_buff + line_pos);
+ 
+ 		if ((line_pos + MIN_SPACE) >= DMESG_MAX_LINE_LEN) {
+-			line_buff[line_pos++] = '\n';
+ 			line_buff[line_pos++] = 0;
+ 
+ 			drm_puts(p, line_buff);
+@@ -421,10 +408,11 @@ void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
+ 		}
+ 	}
+ 
++	if (suffix)
++		line_buff[line_pos++] = suffix;
++
+ 	if (line_pos) {
+-		line_buff[line_pos++] = '\n';
+ 		line_buff[line_pos++] = 0;
+-
+ 		drm_puts(p, line_buff);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/xe/xe_devcoredump.h b/drivers/gpu/drm/xe/xe_devcoredump.h
+index a4eebc285fc837..b231c8ad799f69 100644
+--- a/drivers/gpu/drm/xe/xe_devcoredump.h
++++ b/drivers/gpu/drm/xe/xe_devcoredump.h
+@@ -26,7 +26,7 @@ static inline int xe_devcoredump_init(struct xe_device *xe)
+ }
+ #endif
+ 
+-void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
++void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, char suffix,
+ 			   const void *blob, size_t offset, size_t size);
+ 
+ #endif
+diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
+index 94d468d01253dd..77d818beb26d35 100644
+--- a/drivers/gpu/drm/xe/xe_gt.c
++++ b/drivers/gpu/drm/xe/xe_gt.c
+@@ -532,8 +532,10 @@ static int all_fw_domain_init(struct xe_gt *gt)
+ 	if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
+ 		xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
+ 
+-	if (IS_SRIOV_PF(gt_to_xe(gt)))
++	if (IS_SRIOV_PF(gt_to_xe(gt))) {
++		xe_gt_sriov_pf_init(gt);
+ 		xe_gt_sriov_pf_init_hw(gt);
++	}
+ 
+ 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ 
+diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
+index e71fc3d2bda224..6f906c8e8108ba 100644
+--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
++++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
+@@ -68,6 +68,19 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
+ 	return 0;
+ }
+ 
++/**
++ * xe_gt_sriov_pf_init - Prepare SR-IOV PF data structures on PF.
++ * @gt: the &xe_gt to initialize
++ *
++ * Late one-time initialization of the PF data.
++ *
++ * Return: 0 on success or a negative error code on failure.
++ */
++int xe_gt_sriov_pf_init(struct xe_gt *gt)
++{
++	return xe_gt_sriov_pf_migration_init(gt);
++}
++
+ static bool pf_needs_enable_ggtt_guest_update(struct xe_device *xe)
+ {
+ 	return GRAPHICS_VERx100(xe) == 1200;
+@@ -90,7 +103,6 @@ void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
+ 		pf_enable_ggtt_guest_update(gt);
+ 
+ 	xe_gt_sriov_pf_service_update(gt);
+-	xe_gt_sriov_pf_migration_init(gt);
+ }
+ 
+ static u32 pf_get_vf_regs_stride(struct xe_device *xe)
+diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
+index 96fab779a906f0..f474509411c0cd 100644
+--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
++++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
+@@ -10,6 +10,7 @@ struct xe_gt;
+ 
+ #ifdef CONFIG_PCI_IOV
+ int xe_gt_sriov_pf_init_early(struct xe_gt *gt);
++int xe_gt_sriov_pf_init(struct xe_gt *gt);
+ void xe_gt_sriov_pf_init_hw(struct xe_gt *gt);
+ void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid);
+ void xe_gt_sriov_pf_restart(struct xe_gt *gt);
+@@ -19,6 +20,11 @@ static inline int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
+ 	return 0;
+ }
+ 
++static inline int xe_gt_sriov_pf_init(struct xe_gt *gt)
++{
++	return 0;
++}
++
+ static inline void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
+ {
+ }
+diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
+index 8aeb1789805c5e..6eabf7a9d3b072 100644
+--- a/drivers/gpu/drm/xe/xe_guc_ct.c
++++ b/drivers/gpu/drm/xe/xe_guc_ct.c
+@@ -1700,7 +1700,8 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
+ 			   snapshot->g2h_outstanding);
+ 
+ 		if (snapshot->ctb)
+-			xe_print_blob_ascii85(p, "CTB data", snapshot->ctb, 0, snapshot->ctb_size);
++			xe_print_blob_ascii85(p, "CTB data", '\n',
++					      snapshot->ctb, 0, snapshot->ctb_size);
+ 	} else {
+ 		drm_puts(p, "CT disabled\n");
+ 	}
+diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c
+index df4cfb698cdbc7..2baa4d95571fbf 100644
+--- a/drivers/gpu/drm/xe/xe_guc_log.c
++++ b/drivers/gpu/drm/xe/xe_guc_log.c
+@@ -211,8 +211,10 @@ void xe_guc_log_snapshot_print(struct xe_guc_log_snapshot *snapshot, struct drm_
+ 	remain = snapshot->size;
+ 	for (i = 0; i < snapshot->num_chunks; i++) {
+ 		size_t size = min(GUC_LOG_CHUNK_SIZE, remain);
++		const char *prefix = i ? NULL : "Log data";
++		char suffix = i == snapshot->num_chunks - 1 ? '\n' : 0;
+ 
+-		xe_print_blob_ascii85(p, i ? NULL : "Log data", snapshot->copy[i], 0, size);
++		xe_print_blob_ascii85(p, prefix, suffix, snapshot->copy[i], 0, size);
+ 		remain -= size;
+ 	}
+ }
+diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
+index 678fa40e4cea7b..d8af82dcdce4b7 100644
+--- a/drivers/gpu/drm/xe/xe_oa.c
++++ b/drivers/gpu/drm/xe/xe_oa.c
+@@ -445,6 +445,12 @@ static u32 __oa_ccs_select(struct xe_oa_stream *stream)
+ 	return val;
+ }
+ 
++static u32 __oactrl_used_bits(struct xe_oa_stream *stream)
++{
++	return stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG ?
++		OAG_OACONTROL_USED_BITS : OAM_OACONTROL_USED_BITS;
++}
++
+ static void xe_oa_enable(struct xe_oa_stream *stream)
+ {
+ 	const struct xe_oa_format *format = stream->oa_buffer.format;
+@@ -465,14 +471,14 @@ static void xe_oa_enable(struct xe_oa_stream *stream)
+ 	    stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG)
+ 		val |= OAG_OACONTROL_OA_PES_DISAG_EN;
+ 
+-	xe_mmio_write32(&stream->gt->mmio, regs->oa_ctrl, val);
++	xe_mmio_rmw32(&stream->gt->mmio, regs->oa_ctrl, __oactrl_used_bits(stream), val);
+ }
+ 
+ static void xe_oa_disable(struct xe_oa_stream *stream)
+ {
+ 	struct xe_mmio *mmio = &stream->gt->mmio;
+ 
+-	xe_mmio_write32(mmio, __oa_regs(stream)->oa_ctrl, 0);
++	xe_mmio_rmw32(mmio, __oa_regs(stream)->oa_ctrl, __oactrl_used_bits(stream), 0);
+ 	if (xe_mmio_wait32(mmio, __oa_regs(stream)->oa_ctrl,
+ 			   OAG_OACONTROL_OA_COUNTER_ENABLE, 0, 50000, NULL, false))
+ 		drm_err(&stream->oa->xe->drm,
+@@ -2569,6 +2575,8 @@ static void __xe_oa_init_oa_units(struct xe_gt *gt)
+ 			u->type = DRM_XE_OA_UNIT_TYPE_OAM;
+ 		}
+ 
++		xe_mmio_write32(&gt->mmio, u->regs.oa_ctrl, 0);
++
+ 		/* Ensure MMIO trigger remains disabled till there is a stream */
+ 		xe_mmio_write32(&gt->mmio, u->regs.oa_debug,
+ 				oag_configure_mmio_trigger(NULL, false));
+diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
+index 506c6f377e7d6c..46e3e42f9eb5fb 100644
+--- a/drivers/hid/hid-asus.c
++++ b/drivers/hid/hid-asus.c
+@@ -432,6 +432,26 @@ static int asus_kbd_get_functions(struct hid_device *hdev,
+ 	return ret;
+ }
+ 
++static int asus_kbd_disable_oobe(struct hid_device *hdev)
++{
++	const u8 init[][6] = {
++		{ FEATURE_KBD_REPORT_ID, 0x05, 0x20, 0x31, 0x00, 0x08 },
++		{ FEATURE_KBD_REPORT_ID, 0xBA, 0xC5, 0xC4 },
++		{ FEATURE_KBD_REPORT_ID, 0xD0, 0x8F, 0x01 },
++		{ FEATURE_KBD_REPORT_ID, 0xD0, 0x85, 0xFF }
++	};
++	int ret;
++
++	for (size_t i = 0; i < ARRAY_SIZE(init); i++) {
++		ret = asus_kbd_set_report(hdev, init[i], sizeof(init[i]));
++		if (ret < 0)
++			return ret;
++	}
++
++	hid_info(hdev, "Disabled OOBE for keyboard\n");
++	return 0;
++}
++
+ static void asus_schedule_work(struct asus_kbd_leds *led)
+ {
+ 	unsigned long flags;
+@@ -534,6 +554,12 @@ static int asus_kbd_register_leds(struct hid_device *hdev)
+ 		ret = asus_kbd_init(hdev, FEATURE_KBD_LED_REPORT_ID2);
+ 		if (ret < 0)
+ 			return ret;
++
++		if (dmi_match(DMI_PRODUCT_FAMILY, "ProArt P16")) {
++			ret = asus_kbd_disable_oobe(hdev);
++			if (ret < 0)
++				return ret;
++		}
+ 	} else {
+ 		/* Initialize keyboard */
+ 		ret = asus_kbd_init(hdev, FEATURE_KBD_REPORT_ID);
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 42c0bd9d2f31e3..82900857bfd87c 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -2314,6 +2314,11 @@ static const struct hid_device_id mt_devices[] = {
+ 		HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_SIS_TOUCH,
+ 			HID_ANY_ID) },
+ 
++	/* Hantick */
++	{ .driver_data = MT_CLS_NSMU,
++		HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
++			   I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288) },
++
+ 	/* Generic MT device */
+ 	{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) },
+ 
+diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
+index 7bd86eef6ec761..4c94c03cb57396 100644
+--- a/drivers/hid/hid-sensor-hub.c
++++ b/drivers/hid/hid-sensor-hub.c
+@@ -730,23 +730,30 @@ static int sensor_hub_probe(struct hid_device *hdev,
+ 	return ret;
+ }
+ 
++static int sensor_hub_finalize_pending_fn(struct device *dev, void *data)
++{
++	struct hid_sensor_hub_device *hsdev = dev->platform_data;
++
++	if (hsdev->pending.status)
++		complete(&hsdev->pending.ready);
++
++	return 0;
++}
++
+ static void sensor_hub_remove(struct hid_device *hdev)
+ {
+ 	struct sensor_hub_data *data = hid_get_drvdata(hdev);
+ 	unsigned long flags;
+-	int i;
+ 
+ 	hid_dbg(hdev, " hardware removed\n");
+ 	hid_hw_close(hdev);
+ 	hid_hw_stop(hdev);
++
+ 	spin_lock_irqsave(&data->lock, flags);
+-	for (i = 0; i < data->hid_sensor_client_cnt; ++i) {
+-		struct hid_sensor_hub_device *hsdev =
+-			data->hid_sensor_hub_client_devs[i].platform_data;
+-		if (hsdev->pending.status)
+-			complete(&hsdev->pending.ready);
+-	}
++	device_for_each_child(&hdev->dev, NULL,
++			      sensor_hub_finalize_pending_fn);
+ 	spin_unlock_irqrestore(&data->lock, flags);
++
+ 	mfd_remove_devices(&hdev->dev);
+ 	mutex_destroy(&data->mutex);
+ }
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 5501a560fb07ff..b60bfafc6a8fb0 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -4946,6 +4946,10 @@ static const struct wacom_features wacom_features_0x94 =
+ 	HID_DEVICE(BUS_I2C, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\
+ 	.driver_data = (kernel_ulong_t)&wacom_features_##prod
+ 
++#define PCI_DEVICE_WACOM(prod)						\
++	HID_DEVICE(BUS_PCI, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\
++	.driver_data = (kernel_ulong_t)&wacom_features_##prod
++
+ #define USB_DEVICE_LENOVO(prod)					\
+ 	HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, prod),			\
+ 	.driver_data = (kernel_ulong_t)&wacom_features_##prod
+@@ -5115,6 +5119,7 @@ const struct hid_device_id wacom_ids[] = {
+ 
+ 	{ USB_DEVICE_WACOM(HID_ANY_ID) },
+ 	{ I2C_DEVICE_WACOM(HID_ANY_ID) },
++	{ PCI_DEVICE_WACOM(HID_ANY_ID) },
+ 	{ BT_DEVICE_WACOM(HID_ANY_ID) },
+ 	{ }
+ };
+diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
+index 14ae0cfc325efb..d2499f302b5083 100644
+--- a/drivers/i2c/i2c-core-acpi.c
++++ b/drivers/i2c/i2c-core-acpi.c
+@@ -355,6 +355,25 @@ static const struct acpi_device_id i2c_acpi_force_400khz_device_ids[] = {
+ 	{}
+ };
+ 
++static const struct acpi_device_id i2c_acpi_force_100khz_device_ids[] = {
++	/*
++	 * When a 400KHz freq is used on this model of ELAN touchpad in Linux,
++	 * excessive smoothing (similar to when the touchpad's firmware detects
++	 * a noisy signal) is sometimes applied. As some devices' (e.g, Lenovo
++	 * V15 G4) ACPI tables specify a 400KHz frequency for this device and
++	 * some I2C busses (e.g, Designware I2C) default to a 400KHz freq,
++	 * force the speed to 100KHz as a workaround.
++	 *
++	 * For future investigation: This problem may be related to the default
++	 * HCNT/LCNT values given by some busses' drivers, because they are not
++	 * specified in the aforementioned devices' ACPI tables, and because
++	 * the device works without issues on Windows at what is expected to be
++	 * a 400KHz frequency. The root cause of the issue is not known.
++	 */
++	{ "ELAN06FA", 0 },
++	{}
++};
++
+ static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
+ 					   void *data, void **return_value)
+ {
+@@ -373,6 +392,9 @@ static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
+ 	if (acpi_match_device_ids(adev, i2c_acpi_force_400khz_device_ids) == 0)
+ 		lookup->force_speed = I2C_MAX_FAST_MODE_FREQ;
+ 
++	if (acpi_match_device_ids(adev, i2c_acpi_force_100khz_device_ids) == 0)
++		lookup->force_speed = I2C_MAX_STANDARD_MODE_FREQ;
++
+ 	return AE_OK;
+ }
+ 
+diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
+index 42310c9a00c2d1..53ab814b676ffd 100644
+--- a/drivers/i3c/master.c
++++ b/drivers/i3c/master.c
+@@ -1919,7 +1919,7 @@ static int i3c_master_bus_init(struct i3c_master_controller *master)
+ 		goto err_bus_cleanup;
+ 
+ 	if (master->ops->set_speed) {
+-		master->ops->set_speed(master, I3C_OPEN_DRAIN_NORMAL_SPEED);
++		ret = master->ops->set_speed(master, I3C_OPEN_DRAIN_NORMAL_SPEED);
+ 		if (ret)
+ 			goto err_bus_cleanup;
+ 	}
+diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c
+index d12270409c8ad2..a2949daf946732 100644
+--- a/drivers/iio/chemical/bme680_core.c
++++ b/drivers/iio/chemical/bme680_core.c
+@@ -874,11 +874,11 @@ static int bme680_read_raw(struct iio_dev *indio_dev,
+ 	case IIO_CHAN_INFO_RAW:
+ 		switch (chan->type) {
+ 		case IIO_TEMP:
+-			ret = bme680_read_temp(data, (s16 *)&chan_val);
++			ret = bme680_read_temp(data, &temp_chan_val);
+ 			if (ret)
+ 				return ret;
+ 
+-			*val = chan_val;
++			*val = temp_chan_val;
+ 			return IIO_VAL_INT;
+ 		case IIO_PRESSURE:
+ 			ret = bme680_read_press(data, &chan_val);
+diff --git a/drivers/iio/dac/ad3552r-common.c b/drivers/iio/dac/ad3552r-common.c
+index 0f495df2e5ce77..03e0864f50846b 100644
+--- a/drivers/iio/dac/ad3552r-common.c
++++ b/drivers/iio/dac/ad3552r-common.c
+@@ -22,11 +22,10 @@ EXPORT_SYMBOL_NS_GPL(ad3552r_ch_ranges, "IIO_AD3552R");
+ 
+ const s32 ad3542r_ch_ranges[AD3542R_MAX_RANGES][2] = {
+ 	[AD3542R_CH_OUTPUT_RANGE_0__2P5V]	= { 0, 2500 },
+-	[AD3542R_CH_OUTPUT_RANGE_0__3V]		= { 0, 3000 },
+ 	[AD3542R_CH_OUTPUT_RANGE_0__5V]		= { 0, 5000 },
+ 	[AD3542R_CH_OUTPUT_RANGE_0__10V]	= { 0, 10000 },
+-	[AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V]	= { -2500, 7500 },
+-	[AD3542R_CH_OUTPUT_RANGE_NEG_5__5V]	= { -5000, 5000 }
++	[AD3542R_CH_OUTPUT_RANGE_NEG_5__5V]	= { -5000, 5000 },
++	[AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V]	= { -2500, 7500 }
+ };
+ EXPORT_SYMBOL_NS_GPL(ad3542r_ch_ranges, "IIO_AD3552R");
+ 
+diff --git a/drivers/iio/dac/ad3552r-hs.c b/drivers/iio/dac/ad3552r-hs.c
+index 216c634f3eaf72..8974df62567081 100644
+--- a/drivers/iio/dac/ad3552r-hs.c
++++ b/drivers/iio/dac/ad3552r-hs.c
+@@ -329,6 +329,12 @@ static int ad3552r_hs_setup(struct ad3552r_hs_state *st)
+ 		dev_info(st->dev, "Chip ID error. Expected 0x%x, Read 0x%x\n",
+ 			 AD3552R_ID, id);
+ 
++	/* Clear reset error flag, see ad3552r manual, rev B table 38. */
++	ret = st->data->bus_reg_write(st->back, AD3552R_REG_ADDR_ERR_STATUS,
++				      AD3552R_MASK_RESET_STATUS, 1);
++	if (ret)
++		return ret;
++
+ 	ret = st->data->bus_reg_write(st->back,
+ 				      AD3552R_REG_ADDR_SH_REFERENCE_CONFIG,
+ 				      0, 1);
+diff --git a/drivers/iio/dac/ad3552r.h b/drivers/iio/dac/ad3552r.h
+index fd5a3dfd1d1cfe..4b5581039ae95d 100644
+--- a/drivers/iio/dac/ad3552r.h
++++ b/drivers/iio/dac/ad3552r.h
+@@ -131,7 +131,7 @@
+ #define AD3552R_CH1_ACTIVE				BIT(1)
+ 
+ #define AD3552R_MAX_RANGES	5
+-#define AD3542R_MAX_RANGES	6
++#define AD3542R_MAX_RANGES	5
+ #define AD3552R_QUAD_SPI	2
+ 
+ extern const s32 ad3552r_ch_ranges[AD3552R_MAX_RANGES][2];
+@@ -189,16 +189,14 @@ enum ad3552r_ch_vref_select {
+ enum ad3542r_ch_output_range {
+ 	/* Range from 0 V to 2.5 V. Requires Rfb1x connection */
+ 	AD3542R_CH_OUTPUT_RANGE_0__2P5V,
+-	/* Range from 0 V to 3 V. Requires Rfb1x connection  */
+-	AD3542R_CH_OUTPUT_RANGE_0__3V,
+ 	/* Range from 0 V to 5 V. Requires Rfb1x connection  */
+ 	AD3542R_CH_OUTPUT_RANGE_0__5V,
+ 	/* Range from 0 V to 10 V. Requires Rfb2x connection  */
+ 	AD3542R_CH_OUTPUT_RANGE_0__10V,
+-	/* Range from -2.5 V to 7.5 V. Requires Rfb2x connection  */
+-	AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V,
+ 	/* Range from -5 V to 5 V. Requires Rfb2x connection  */
+ 	AD3542R_CH_OUTPUT_RANGE_NEG_5__5V,
++	/* Range from -2.5 V to 7.5 V. Requires Rfb2x connection  */
++	AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V,
+ };
+ 
+ enum ad3552r_ch_output_range {
+diff --git a/drivers/iio/light/as73211.c b/drivers/iio/light/as73211.c
+index be0068081ebbbb..11fbdcdd26d656 100644
+--- a/drivers/iio/light/as73211.c
++++ b/drivers/iio/light/as73211.c
+@@ -177,6 +177,12 @@ struct as73211_data {
+ 	BIT(AS73211_SCAN_INDEX_TEMP) | \
+ 	AS73211_SCAN_MASK_COLOR)
+ 
++static const unsigned long as73211_scan_masks[] = {
++	AS73211_SCAN_MASK_COLOR,
++	AS73211_SCAN_MASK_ALL,
++	0
++};
++
+ static const struct iio_chan_spec as73211_channels[] = {
+ 	{
+ 		.type = IIO_TEMP,
+@@ -672,9 +678,12 @@ static irqreturn_t as73211_trigger_handler(int irq __always_unused, void *p)
+ 
+ 		/* AS73211 starts reading at address 2 */
+ 		ret = i2c_master_recv(data->client,
+-				(char *)&scan.chan[1], 3 * sizeof(scan.chan[1]));
++				(char *)&scan.chan[0], 3 * sizeof(scan.chan[0]));
+ 		if (ret < 0)
+ 			goto done;
++
++		/* Avoid pushing uninitialized data */
++		scan.chan[3] = 0;
+ 	}
+ 
+ 	if (data_result) {
+@@ -682,9 +691,15 @@ static irqreturn_t as73211_trigger_handler(int irq __always_unused, void *p)
+ 		 * Saturate all channels (in case of overflows). Temperature channel
+ 		 * is not affected by overflows.
+ 		 */
+-		scan.chan[1] = cpu_to_le16(U16_MAX);
+-		scan.chan[2] = cpu_to_le16(U16_MAX);
+-		scan.chan[3] = cpu_to_le16(U16_MAX);
++		if (*indio_dev->active_scan_mask == AS73211_SCAN_MASK_ALL) {
++			scan.chan[1] = cpu_to_le16(U16_MAX);
++			scan.chan[2] = cpu_to_le16(U16_MAX);
++			scan.chan[3] = cpu_to_le16(U16_MAX);
++		} else {
++			scan.chan[0] = cpu_to_le16(U16_MAX);
++			scan.chan[1] = cpu_to_le16(U16_MAX);
++			scan.chan[2] = cpu_to_le16(U16_MAX);
++		}
+ 	}
+ 
+ 	iio_push_to_buffers_with_timestamp(indio_dev, &scan, iio_get_time_ns(indio_dev));
+@@ -758,6 +773,7 @@ static int as73211_probe(struct i2c_client *client)
+ 	indio_dev->channels = data->spec_dev->channels;
+ 	indio_dev->num_channels = data->spec_dev->num_channels;
+ 	indio_dev->modes = INDIO_DIRECT_MODE;
++	indio_dev->available_scan_masks = as73211_scan_masks;
+ 
+ 	ret = i2c_smbus_read_byte_data(data->client, AS73211_REG_OSR);
+ 	if (ret < 0)
+diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
+index 45d9dc9c6c8fda..bb02b6adbf2c21 100644
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -2021,6 +2021,11 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
+ {
+ 	struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
+ 	struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
++	bool is_odp = is_odp_mr(mr);
++	int ret = 0;
++
++	if (is_odp)
++		mutex_lock(&to_ib_umem_odp(mr->umem)->umem_mutex);
+ 
+ 	if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) {
+ 		ent = mr->mmkey.cache_ent;
+@@ -2032,7 +2037,7 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
+ 			ent->tmp_cleanup_scheduled = true;
+ 		}
+ 		spin_unlock_irq(&ent->mkeys_queue.lock);
+-		return 0;
++		goto out;
+ 	}
+ 
+ 	if (ent) {
+@@ -2041,7 +2046,15 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
+ 		mr->mmkey.cache_ent = NULL;
+ 		spin_unlock_irq(&ent->mkeys_queue.lock);
+ 	}
+-	return destroy_mkey(dev, mr);
++	ret = destroy_mkey(dev, mr);
++out:
++	if (is_odp) {
++		if (!ret)
++			to_ib_umem_odp(mr->umem)->private = NULL;
++		mutex_unlock(&to_ib_umem_odp(mr->umem)->umem_mutex);
++	}
++
++	return ret;
+ }
+ 
+ static int __mlx5_ib_dereg_mr(struct ib_mr *ibmr)
+diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
+index 64b441542cd5dd..1d3bf56157702d 100644
+--- a/drivers/infiniband/hw/mlx5/odp.c
++++ b/drivers/infiniband/hw/mlx5/odp.c
+@@ -282,6 +282,8 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
+ 	if (!umem_odp->npages)
+ 		goto out;
+ 	mr = umem_odp->private;
++	if (!mr)
++		goto out;
+ 
+ 	start = max_t(u64, ib_umem_start(umem_odp), range->start);
+ 	end = min_t(u64, ib_umem_end(umem_odp), range->end);
+diff --git a/drivers/input/misc/nxp-bbnsm-pwrkey.c b/drivers/input/misc/nxp-bbnsm-pwrkey.c
+index eb4173f9c82044..7ba8d166d68c18 100644
+--- a/drivers/input/misc/nxp-bbnsm-pwrkey.c
++++ b/drivers/input/misc/nxp-bbnsm-pwrkey.c
+@@ -187,6 +187,12 @@ static int bbnsm_pwrkey_probe(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
++static void bbnsm_pwrkey_remove(struct platform_device *pdev)
++{
++	dev_pm_clear_wake_irq(&pdev->dev);
++	device_init_wakeup(&pdev->dev, false);
++}
++
+ static int __maybe_unused bbnsm_pwrkey_suspend(struct device *dev)
+ {
+ 	struct platform_device *pdev = to_platform_device(dev);
+@@ -223,6 +229,8 @@ static struct platform_driver bbnsm_pwrkey_driver = {
+ 		.of_match_table = bbnsm_pwrkey_ids,
+ 	},
+ 	.probe = bbnsm_pwrkey_probe,
++	.remove = bbnsm_pwrkey_remove,
++
+ };
+ module_platform_driver(bbnsm_pwrkey_driver);
+ 
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 2735f86c23cc89..aba57abe697882 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -665,23 +665,50 @@ static void synaptics_pt_stop(struct serio *serio)
+ 	priv->pt_port = NULL;
+ }
+ 
++static int synaptics_pt_open(struct serio *serio)
++{
++	struct psmouse *parent = psmouse_from_serio(serio->parent);
++	struct synaptics_data *priv = parent->private;
++
++	guard(serio_pause_rx)(parent->ps2dev.serio);
++	priv->pt_port_open = true;
++
++	return 0;
++}
++
++static void synaptics_pt_close(struct serio *serio)
++{
++	struct psmouse *parent = psmouse_from_serio(serio->parent);
++	struct synaptics_data *priv = parent->private;
++
++	guard(serio_pause_rx)(parent->ps2dev.serio);
++	priv->pt_port_open = false;
++}
++
+ static int synaptics_is_pt_packet(u8 *buf)
+ {
+ 	return (buf[0] & 0xFC) == 0x84 && (buf[3] & 0xCC) == 0xC4;
+ }
+ 
+-static void synaptics_pass_pt_packet(struct serio *ptport, u8 *packet)
++static void synaptics_pass_pt_packet(struct synaptics_data *priv, u8 *packet)
+ {
+-	struct psmouse *child = psmouse_from_serio(ptport);
++	struct serio *ptport;
+ 
+-	if (child && child->state == PSMOUSE_ACTIVATED) {
+-		serio_interrupt(ptport, packet[1], 0);
+-		serio_interrupt(ptport, packet[4], 0);
+-		serio_interrupt(ptport, packet[5], 0);
+-		if (child->pktsize == 4)
+-			serio_interrupt(ptport, packet[2], 0);
+-	} else {
+-		serio_interrupt(ptport, packet[1], 0);
++	ptport = priv->pt_port;
++	if (!ptport)
++		return;
++
++	serio_interrupt(ptport, packet[1], 0);
++
++	if (priv->pt_port_open) {
++		struct psmouse *child = psmouse_from_serio(ptport);
++
++		if (child->state == PSMOUSE_ACTIVATED) {
++			serio_interrupt(ptport, packet[4], 0);
++			serio_interrupt(ptport, packet[5], 0);
++			if (child->pktsize == 4)
++				serio_interrupt(ptport, packet[2], 0);
++		}
+ 	}
+ }
+ 
+@@ -720,6 +747,8 @@ static void synaptics_pt_create(struct psmouse *psmouse)
+ 	serio->write = synaptics_pt_write;
+ 	serio->start = synaptics_pt_start;
+ 	serio->stop = synaptics_pt_stop;
++	serio->open = synaptics_pt_open;
++	serio->close = synaptics_pt_close;
+ 	serio->parent = psmouse->ps2dev.serio;
+ 
+ 	psmouse->pt_activate = synaptics_pt_activate;
+@@ -1216,11 +1245,10 @@ static psmouse_ret_t synaptics_process_byte(struct psmouse *psmouse)
+ 
+ 		if (SYN_CAP_PASS_THROUGH(priv->info.capabilities) &&
+ 		    synaptics_is_pt_packet(psmouse->packet)) {
+-			if (priv->pt_port)
+-				synaptics_pass_pt_packet(priv->pt_port,
+-							 psmouse->packet);
+-		} else
++			synaptics_pass_pt_packet(priv, psmouse->packet);
++		} else {
+ 			synaptics_process_packet(psmouse);
++		}
+ 
+ 		return PSMOUSE_FULL_PACKET;
+ 	}
+diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
+index 899aee598632b9..3853165b6b3a06 100644
+--- a/drivers/input/mouse/synaptics.h
++++ b/drivers/input/mouse/synaptics.h
+@@ -188,6 +188,7 @@ struct synaptics_data {
+ 	bool disable_gesture;			/* disable gestures */
+ 
+ 	struct serio *pt_port;			/* Pass-through serio port */
++	bool pt_port_open;
+ 
+ 	/*
+ 	 * Last received Advanced Gesture Mode (AGM) packet. An AGM packet
+diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+index 6d15405f0ea3e9..42a89d499cda83 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+@@ -4670,7 +4670,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
+ 	/* Initialise in-memory data structures */
+ 	ret = arm_smmu_init_structures(smmu);
+ 	if (ret)
+-		return ret;
++		goto err_free_iopf;
+ 
+ 	/* Record our private device structure */
+ 	platform_set_drvdata(pdev, smmu);
+@@ -4681,22 +4681,29 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
+ 	/* Reset the device */
+ 	ret = arm_smmu_device_reset(smmu);
+ 	if (ret)
+-		return ret;
++		goto err_disable;
+ 
+ 	/* And we're up. Go go go! */
+ 	ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL,
+ 				     "smmu3.%pa", &ioaddr);
+ 	if (ret)
+-		return ret;
++		goto err_disable;
+ 
+ 	ret = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
+ 	if (ret) {
+ 		dev_err(dev, "Failed to register iommu\n");
+-		iommu_device_sysfs_remove(&smmu->iommu);
+-		return ret;
++		goto err_free_sysfs;
+ 	}
+ 
+ 	return 0;
++
++err_free_sysfs:
++	iommu_device_sysfs_remove(&smmu->iommu);
++err_disable:
++	arm_smmu_device_disable(smmu);
++err_free_iopf:
++	iopf_queue_free(smmu->evtq.iopf);
++	return ret;
+ }
+ 
+ static void arm_smmu_device_remove(struct platform_device *pdev)
+diff --git a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
+index 6e41ddaa24d636..d525ab43a4aebf 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
++++ b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
+@@ -79,7 +79,6 @@
+ #define TEGRA241_VCMDQ_PAGE1(q)		(TEGRA241_VCMDQ_PAGE1_BASE + 0x80*(q))
+ #define  VCMDQ_ADDR			GENMASK(47, 5)
+ #define  VCMDQ_LOG2SIZE			GENMASK(4, 0)
+-#define  VCMDQ_LOG2SIZE_MAX		19
+ 
+ #define TEGRA241_VCMDQ_BASE		0x00000
+ #define TEGRA241_VCMDQ_CONS_INDX_BASE	0x00008
+@@ -505,12 +504,15 @@ static int tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq *vcmdq)
+ 	struct arm_smmu_cmdq *cmdq = &vcmdq->cmdq;
+ 	struct arm_smmu_queue *q = &cmdq->q;
+ 	char name[16];
++	u32 regval;
+ 	int ret;
+ 
+ 	snprintf(name, 16, "vcmdq%u", vcmdq->idx);
+ 
+-	/* Queue size, capped to ensure natural alignment */
+-	q->llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, VCMDQ_LOG2SIZE_MAX);
++	/* Cap queue size to SMMU's IDR1.CMDQS and ensure natural alignment */
++	regval = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
++	q->llq.max_n_shift =
++		min_t(u32, CMDQ_MAX_SZ_SHIFT, FIELD_GET(IDR1_CMDQS, regval));
+ 
+ 	/* Use the common helper to init the VCMDQ, and then... */
+ 	ret = arm_smmu_init_one_queue(smmu, q, vcmdq->page0,
+diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+index 6372f3e25c4bc2..601fb878d0ef25 100644
+--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
++++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+@@ -567,6 +567,7 @@ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
+ 	{ .compatible = "qcom,sc8180x-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ 	{ .compatible = "qcom,sc8280xp-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ 	{ .compatible = "qcom,sdm630-smmu-v2", .data = &qcom_smmu_v2_data },
++	{ .compatible = "qcom,sdm670-smmu-v2", .data = &qcom_smmu_v2_data },
+ 	{ .compatible = "qcom,sdm845-smmu-v2", .data = &qcom_smmu_v2_data },
+ 	{ .compatible = "qcom,sdm845-smmu-500", .data = &sdm845_smmu_500_data },
+ 	{ .compatible = "qcom,sm6115-smmu-500", .data = &qcom_smmu_500_impl0_data},
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index 79e0da9eb626cc..8f75c11a3ec48e 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -4090,13 +4090,14 @@ void domain_remove_dev_pasid(struct iommu_domain *domain,
+ 			break;
+ 		}
+ 	}
+-	WARN_ON_ONCE(!dev_pasid);
+ 	spin_unlock_irqrestore(&dmar_domain->lock, flags);
+ 
+ 	cache_tag_unassign_domain(dmar_domain, dev, pasid);
+ 	domain_detach_iommu(dmar_domain, iommu);
+-	intel_iommu_debugfs_remove_dev_pasid(dev_pasid);
+-	kfree(dev_pasid);
++	if (!WARN_ON_ONCE(!dev_pasid)) {
++		intel_iommu_debugfs_remove_dev_pasid(dev_pasid);
++		kfree(dev_pasid);
++	}
+ }
+ 
+ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
+diff --git a/drivers/iommu/iommufd/fault.c b/drivers/iommu/iommufd/fault.c
+index 1fe804e28a8663..d9a937450e5526 100644
+--- a/drivers/iommu/iommufd/fault.c
++++ b/drivers/iommu/iommufd/fault.c
+@@ -103,15 +103,23 @@ static void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
+ {
+ 	struct iommufd_fault *fault = hwpt->fault;
+ 	struct iopf_group *group, *next;
++	struct list_head free_list;
+ 	unsigned long index;
+ 
+ 	if (!fault)
+ 		return;
++	INIT_LIST_HEAD(&free_list);
+ 
+ 	mutex_lock(&fault->mutex);
++	spin_lock(&fault->lock);
+ 	list_for_each_entry_safe(group, next, &fault->deliver, node) {
+ 		if (group->attach_handle != &handle->handle)
+ 			continue;
++		list_move(&group->node, &free_list);
++	}
++	spin_unlock(&fault->lock);
++
++	list_for_each_entry_safe(group, next, &free_list, node) {
+ 		list_del(&group->node);
+ 		iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
+ 		iopf_free_group(group);
+@@ -213,6 +221,7 @@ void iommufd_fault_destroy(struct iommufd_object *obj)
+ {
+ 	struct iommufd_fault *fault = container_of(obj, struct iommufd_fault, obj);
+ 	struct iopf_group *group, *next;
++	unsigned long index;
+ 
+ 	/*
+ 	 * The iommufd object's reference count is zero at this point.
+@@ -225,6 +234,13 @@ void iommufd_fault_destroy(struct iommufd_object *obj)
+ 		iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
+ 		iopf_free_group(group);
+ 	}
++	xa_for_each(&fault->response, index, group) {
++		xa_erase(&fault->response, index);
++		iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
++		iopf_free_group(group);
++	}
++	xa_destroy(&fault->response);
++	mutex_destroy(&fault->mutex);
+ }
+ 
+ static void iommufd_compose_fault_message(struct iommu_fault *fault,
+@@ -247,7 +263,7 @@ static ssize_t iommufd_fault_fops_read(struct file *filep, char __user *buf,
+ {
+ 	size_t fault_size = sizeof(struct iommu_hwpt_pgfault);
+ 	struct iommufd_fault *fault = filep->private_data;
+-	struct iommu_hwpt_pgfault data;
++	struct iommu_hwpt_pgfault data = {};
+ 	struct iommufd_device *idev;
+ 	struct iopf_group *group;
+ 	struct iopf_fault *iopf;
+@@ -258,17 +274,19 @@ static ssize_t iommufd_fault_fops_read(struct file *filep, char __user *buf,
+ 		return -ESPIPE;
+ 
+ 	mutex_lock(&fault->mutex);
+-	while (!list_empty(&fault->deliver) && count > done) {
+-		group = list_first_entry(&fault->deliver,
+-					 struct iopf_group, node);
+-
+-		if (group->fault_count * fault_size > count - done)
++	while ((group = iommufd_fault_deliver_fetch(fault))) {
++		if (done >= count ||
++		    group->fault_count * fault_size > count - done) {
++			iommufd_fault_deliver_restore(fault, group);
+ 			break;
++		}
+ 
+ 		rc = xa_alloc(&fault->response, &group->cookie, group,
+ 			      xa_limit_32b, GFP_KERNEL);
+-		if (rc)
++		if (rc) {
++			iommufd_fault_deliver_restore(fault, group);
+ 			break;
++		}
+ 
+ 		idev = to_iommufd_handle(group->attach_handle)->idev;
+ 		list_for_each_entry(iopf, &group->faults, list) {
+@@ -277,13 +295,12 @@ static ssize_t iommufd_fault_fops_read(struct file *filep, char __user *buf,
+ 						      group->cookie);
+ 			if (copy_to_user(buf + done, &data, fault_size)) {
+ 				xa_erase(&fault->response, group->cookie);
++				iommufd_fault_deliver_restore(fault, group);
+ 				rc = -EFAULT;
+ 				break;
+ 			}
+ 			done += fault_size;
+ 		}
+-
+-		list_del(&group->node);
+ 	}
+ 	mutex_unlock(&fault->mutex);
+ 
+@@ -341,10 +358,10 @@ static __poll_t iommufd_fault_fops_poll(struct file *filep,
+ 	__poll_t pollflags = EPOLLOUT;
+ 
+ 	poll_wait(filep, &fault->wait_queue, wait);
+-	mutex_lock(&fault->mutex);
++	spin_lock(&fault->lock);
+ 	if (!list_empty(&fault->deliver))
+ 		pollflags |= EPOLLIN | EPOLLRDNORM;
+-	mutex_unlock(&fault->mutex);
++	spin_unlock(&fault->lock);
+ 
+ 	return pollflags;
+ }
+@@ -386,6 +403,7 @@ int iommufd_fault_alloc(struct iommufd_ucmd *ucmd)
+ 	INIT_LIST_HEAD(&fault->deliver);
+ 	xa_init_flags(&fault->response, XA_FLAGS_ALLOC1);
+ 	mutex_init(&fault->mutex);
++	spin_lock_init(&fault->lock);
+ 	init_waitqueue_head(&fault->wait_queue);
+ 
+ 	filep = anon_inode_getfile("[iommufd-pgfault]", &iommufd_fault_fops,
+@@ -434,9 +452,9 @@ int iommufd_fault_iopf_handler(struct iopf_group *group)
+ 	hwpt = group->attach_handle->domain->fault_data;
+ 	fault = hwpt->fault;
+ 
+-	mutex_lock(&fault->mutex);
++	spin_lock(&fault->lock);
+ 	list_add_tail(&group->node, &fault->deliver);
+-	mutex_unlock(&fault->mutex);
++	spin_unlock(&fault->lock);
+ 
+ 	wake_up_interruptible(&fault->wait_queue);
+ 
+diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
+index b6d706cf2c66fb..0b1bafc7fd9940 100644
+--- a/drivers/iommu/iommufd/iommufd_private.h
++++ b/drivers/iommu/iommufd/iommufd_private.h
+@@ -443,14 +443,39 @@ struct iommufd_fault {
+ 	struct iommufd_ctx *ictx;
+ 	struct file *filep;
+ 
+-	/* The lists of outstanding faults protected by below mutex. */
+-	struct mutex mutex;
++	spinlock_t lock; /* protects the deliver list */
+ 	struct list_head deliver;
++	struct mutex mutex; /* serializes response flows */
+ 	struct xarray response;
+ 
+ 	struct wait_queue_head wait_queue;
+ };
+ 
++/* Fetch the first node out of the fault->deliver list */
++static inline struct iopf_group *
++iommufd_fault_deliver_fetch(struct iommufd_fault *fault)
++{
++	struct list_head *list = &fault->deliver;
++	struct iopf_group *group = NULL;
++
++	spin_lock(&fault->lock);
++	if (!list_empty(list)) {
++		group = list_first_entry(list, struct iopf_group, node);
++		list_del(&group->node);
++	}
++	spin_unlock(&fault->lock);
++	return group;
++}
++
++/* Restore a node back to the head of the fault->deliver list */
++static inline void iommufd_fault_deliver_restore(struct iommufd_fault *fault,
++						 struct iopf_group *group)
++{
++	spin_lock(&fault->lock);
++	list_add(&group->node, &fault->deliver);
++	spin_unlock(&fault->lock);
++}
++
+ struct iommufd_attach_handle {
+ 	struct iommu_attach_handle handle;
+ 	struct iommufd_device *idev;
+diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
+index 9bee02db164391..990674713b863b 100644
+--- a/drivers/irqchip/Kconfig
++++ b/drivers/irqchip/Kconfig
+@@ -169,6 +169,7 @@ config IXP4XX_IRQ
+ 
+ config LAN966X_OIC
+ 	tristate "Microchip LAN966x OIC Support"
++	depends on MCHP_LAN966X_PCI || COMPILE_TEST
+ 	select GENERIC_IRQ_CHIP
+ 	select IRQ_DOMAIN
+ 	help
+diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
+index da5250f0155cfa..2b1684c60e3cac 100644
+--- a/drivers/irqchip/irq-apple-aic.c
++++ b/drivers/irqchip/irq-apple-aic.c
+@@ -577,7 +577,8 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
+ 						  AIC_FIQ_HWIRQ(AIC_TMR_EL02_VIRT));
+ 	}
+ 
+-	if (read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & PMCR0_IACT) {
++	if ((read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & (PMCR0_IMODE | PMCR0_IACT)) ==
++			(FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_FIQ) | PMCR0_IACT)) {
+ 		int irq;
+ 		if (cpumask_test_cpu(smp_processor_id(),
+ 				     &aic_irqc->fiq_aff[AIC_CPU_PMU_P]->aff))
+diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c
+index b337f6c05f184f..4eebed39880a5b 100644
+--- a/drivers/irqchip/irq-mvebu-icu.c
++++ b/drivers/irqchip/irq-mvebu-icu.c
+@@ -68,7 +68,8 @@ static int mvebu_icu_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
+ 			       unsigned long *hwirq, unsigned int *type)
+ {
+ 	unsigned int param_count = static_branch_unlikely(&legacy_bindings) ? 3 : 2;
+-	struct mvebu_icu_msi_data *msi_data = d->host_data;
++	struct msi_domain_info *info = d->host_data;
++	struct mvebu_icu_msi_data *msi_data = info->chip_data;
+ 	struct mvebu_icu *icu = msi_data->icu;
+ 
+ 	/* Check the count of the parameters in dt */
+diff --git a/drivers/leds/leds-lp8860.c b/drivers/leds/leds-lp8860.c
+index 7a136fd8172061..06196d851ade71 100644
+--- a/drivers/leds/leds-lp8860.c
++++ b/drivers/leds/leds-lp8860.c
+@@ -265,7 +265,7 @@ static int lp8860_init(struct lp8860_led *led)
+ 		goto out;
+ 	}
+ 
+-	reg_count = ARRAY_SIZE(lp8860_eeprom_disp_regs) / sizeof(lp8860_eeprom_disp_regs[0]);
++	reg_count = ARRAY_SIZE(lp8860_eeprom_disp_regs);
+ 	for (i = 0; i < reg_count; i++) {
+ 		ret = regmap_write(led->eeprom_regmap,
+ 				lp8860_eeprom_disp_regs[i].reg,
+diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
+index 8d5e2d7dc03b2f..c1981f091bd1bb 100644
+--- a/drivers/mailbox/tegra-hsp.c
++++ b/drivers/mailbox/tegra-hsp.c
+@@ -388,7 +388,6 @@ static void tegra_hsp_sm_recv32(struct tegra_hsp_channel *channel)
+ 	value = tegra_hsp_channel_readl(channel, HSP_SM_SHRD_MBOX);
+ 	value &= ~HSP_SM_SHRD_MBOX_FULL;
+ 	msg = (void *)(unsigned long)value;
+-	mbox_chan_received_data(channel->chan, msg);
+ 
+ 	/*
+ 	 * Need to clear all bits here since some producers, such as TCU, depend
+@@ -398,6 +397,8 @@ static void tegra_hsp_sm_recv32(struct tegra_hsp_channel *channel)
+ 	 * explicitly, so we have to make sure we cover all possible cases.
+ 	 */
+ 	tegra_hsp_channel_writel(channel, 0x0, HSP_SM_SHRD_MBOX);
++
++	mbox_chan_received_data(channel->chan, msg);
+ }
+ 
+ static const struct tegra_hsp_sm_ops tegra_hsp_sm_32bit_ops = {
+@@ -433,7 +434,6 @@ static void tegra_hsp_sm_recv128(struct tegra_hsp_channel *channel)
+ 	value[3] = tegra_hsp_channel_readl(channel, HSP_SHRD_MBOX_TYPE1_DATA3);
+ 
+ 	msg = (void *)(unsigned long)value;
+-	mbox_chan_received_data(channel->chan, msg);
+ 
+ 	/*
+ 	 * Clear data registers and tag.
+@@ -443,6 +443,8 @@ static void tegra_hsp_sm_recv128(struct tegra_hsp_channel *channel)
+ 	tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA2);
+ 	tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA3);
+ 	tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_TAG);
++
++	mbox_chan_received_data(channel->chan, msg);
+ }
+ 
+ static const struct tegra_hsp_sm_ops tegra_hsp_sm_128bit_ops = {
+diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
+index aa5249da59b2f5..0c143beaafda60 100644
+--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
++++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
+@@ -905,7 +905,7 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+ 	struct device_node *nc, *np = pdev->dev.of_node;
+-	struct zynqmp_ipi_pdata __percpu *pdata;
++	struct zynqmp_ipi_pdata *pdata;
+ 	struct of_phandle_args out_irq;
+ 	struct zynqmp_ipi_mbox *mbox;
+ 	int num_mboxes, ret = -EINVAL;
+diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
+index 1e9db8e4acdf65..0b1870a09e1fdc 100644
+--- a/drivers/md/Kconfig
++++ b/drivers/md/Kconfig
+@@ -61,6 +61,19 @@ config MD_BITMAP_FILE
+ 	  various kernel APIs and can only work with files on a file system not
+ 	  actually sitting on the MD device.
+ 
++config MD_LINEAR
++	tristate "Linear (append) mode"
++	depends on BLK_DEV_MD
++	help
++	  If you say Y here, then your multiple devices driver will be able to
++	  use the so-called linear mode, i.e. it will combine the hard disk
++	  partitions by simply appending one to the other.
++
++	  To compile this as a module, choose M here: the module
++	  will be called linear.
++
++	  If unsure, say Y.
++
+ config MD_RAID0
+ 	tristate "RAID-0 (striping) mode"
+ 	depends on BLK_DEV_MD
+diff --git a/drivers/md/Makefile b/drivers/md/Makefile
+index 476a214e4bdc26..87bdfc9fe14c55 100644
+--- a/drivers/md/Makefile
++++ b/drivers/md/Makefile
+@@ -29,12 +29,14 @@ dm-zoned-y	+= dm-zoned-target.o dm-zoned-metadata.o dm-zoned-reclaim.o
+ 
+ md-mod-y	+= md.o md-bitmap.o
+ raid456-y	+= raid5.o raid5-cache.o raid5-ppl.o
++linear-y       += md-linear.o
+ 
+ # Note: link order is important.  All raid personalities
+ # and must come before md.o, as they each initialise
+ # themselves, and md.o may use the personalities when it
+ # auto-initialised.
+ 
++obj-$(CONFIG_MD_LINEAR)		+= linear.o
+ obj-$(CONFIG_MD_RAID0)		+= raid0.o
+ obj-$(CONFIG_MD_RAID1)		+= raid1.o
+ obj-$(CONFIG_MD_RAID10)		+= raid10.o
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 1ae2c71bb383b7..78c975d7cd5f42 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -59,6 +59,7 @@ struct convert_context {
+ 	struct bio *bio_out;
+ 	struct bvec_iter iter_out;
+ 	atomic_t cc_pending;
++	unsigned int tag_offset;
+ 	u64 cc_sector;
+ 	union {
+ 		struct skcipher_request *req;
+@@ -1256,6 +1257,7 @@ static void crypt_convert_init(struct crypt_config *cc,
+ 	if (bio_out)
+ 		ctx->iter_out = bio_out->bi_iter;
+ 	ctx->cc_sector = sector + cc->iv_offset;
++	ctx->tag_offset = 0;
+ 	init_completion(&ctx->restart);
+ }
+ 
+@@ -1588,7 +1590,6 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_
+ static blk_status_t crypt_convert(struct crypt_config *cc,
+ 			 struct convert_context *ctx, bool atomic, bool reset_pending)
+ {
+-	unsigned int tag_offset = 0;
+ 	unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
+ 	int r;
+ 
+@@ -1611,9 +1612,9 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
+ 		atomic_inc(&ctx->cc_pending);
+ 
+ 		if (crypt_integrity_aead(cc))
+-			r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset);
++			r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, ctx->tag_offset);
+ 		else
+-			r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset);
++			r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, ctx->tag_offset);
+ 
+ 		switch (r) {
+ 		/*
+@@ -1633,8 +1634,8 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
+ 					 * exit and continue processing in a workqueue
+ 					 */
+ 					ctx->r.req = NULL;
++					ctx->tag_offset++;
+ 					ctx->cc_sector += sector_step;
+-					tag_offset++;
+ 					return BLK_STS_DEV_RESOURCE;
+ 				}
+ 			} else {
+@@ -1648,8 +1649,8 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
+ 		 */
+ 		case -EINPROGRESS:
+ 			ctx->r.req = NULL;
++			ctx->tag_offset++;
+ 			ctx->cc_sector += sector_step;
+-			tag_offset++;
+ 			continue;
+ 		/*
+ 		 * The request was already processed (synchronously).
+@@ -1657,7 +1658,7 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
+ 		case 0:
+ 			atomic_dec(&ctx->cc_pending);
+ 			ctx->cc_sector += sector_step;
+-			tag_offset++;
++			ctx->tag_offset++;
+ 			if (!atomic)
+ 				cond_resched();
+ 			continue;
+@@ -2092,7 +2093,6 @@ static void kcryptd_crypt_write_continue(struct work_struct *work)
+ 	struct crypt_config *cc = io->cc;
+ 	struct convert_context *ctx = &io->ctx;
+ 	int crypt_finished;
+-	sector_t sector = io->sector;
+ 	blk_status_t r;
+ 
+ 	wait_for_completion(&ctx->restart);
+@@ -2109,10 +2109,8 @@ static void kcryptd_crypt_write_continue(struct work_struct *work)
+ 	}
+ 
+ 	/* Encryption was already finished, submit io now */
+-	if (crypt_finished) {
++	if (crypt_finished)
+ 		kcryptd_crypt_write_io_submit(io, 0);
+-		io->sector = sector;
+-	}
+ 
+ 	crypt_dec_pending(io);
+ }
+@@ -2123,14 +2121,13 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
+ 	struct convert_context *ctx = &io->ctx;
+ 	struct bio *clone;
+ 	int crypt_finished;
+-	sector_t sector = io->sector;
+ 	blk_status_t r;
+ 
+ 	/*
+ 	 * Prevent io from disappearing until this function completes.
+ 	 */
+ 	crypt_inc_pending(io);
+-	crypt_convert_init(cc, ctx, NULL, io->base_bio, sector);
++	crypt_convert_init(cc, ctx, NULL, io->base_bio, io->sector);
+ 
+ 	clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
+ 	if (unlikely(!clone)) {
+@@ -2147,8 +2144,6 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
+ 		io->ctx.iter_in = clone->bi_iter;
+ 	}
+ 
+-	sector += bio_sectors(clone);
+-
+ 	crypt_inc_pending(io);
+ 	r = crypt_convert(cc, ctx,
+ 			  test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true);
+@@ -2172,10 +2167,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
+ 	}
+ 
+ 	/* Encryption was already finished, submit io now */
+-	if (crypt_finished) {
++	if (crypt_finished)
+ 		kcryptd_crypt_write_io_submit(io, 0);
+-		io->sector = sector;
+-	}
+ 
+ dec:
+ 	crypt_dec_pending(io);
+diff --git a/drivers/md/md-autodetect.c b/drivers/md/md-autodetect.c
+index b2a00f213c2cd7..4b80165afd2331 100644
+--- a/drivers/md/md-autodetect.c
++++ b/drivers/md/md-autodetect.c
+@@ -49,6 +49,7 @@ static int md_setup_ents __initdata;
+  *             instead of just one.  -- KTK
+  * 18May2000: Added support for persistent-superblock arrays:
+  *             md=n,0,factor,fault,device-list   uses RAID0 for device n
++ *             md=n,-1,factor,fault,device-list  uses LINEAR for device n
+  *             md=n,device-list      reads a RAID superblock from the devices
+  *             elements in device-list are read by name_to_kdev_t so can be
+  *             a hex number or something like /dev/hda1 /dev/sdb
+@@ -87,7 +88,7 @@ static int __init md_setup(char *str)
+ 		md_setup_ents++;
+ 	switch (get_option(&str, &level)) {	/* RAID level */
+ 	case 2: /* could be 0 or -1.. */
+-		if (level == 0) {
++		if (level == 0 || level == LEVEL_LINEAR) {
+ 			if (get_option(&str, &factor) != 2 ||	/* Chunk Size */
+ 					get_option(&str, &fault) != 2) {
+ 				printk(KERN_WARNING "md: Too few arguments supplied to md=.\n");
+@@ -95,7 +96,10 @@ static int __init md_setup(char *str)
+ 			}
+ 			md_setup_args[ent].level = level;
+ 			md_setup_args[ent].chunk = 1 << (factor+12);
+-			pername = "raid0";
++			if (level ==  LEVEL_LINEAR)
++				pername = "linear";
++			else
++				pername = "raid0";
+ 			break;
+ 		}
+ 		fallthrough;
+diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
+new file mode 100644
+index 00000000000000..369aed044b409f
+--- /dev/null
++++ b/drivers/md/md-linear.c
+@@ -0,0 +1,352 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * linear.c : Multiple Devices driver for Linux Copyright (C) 1994-96 Marc
++ * ZYNGIER <zyngier@ufr-info-p7.ibp.fr> or <maz@gloups.fdn.fr>
++ */
++
++#include <linux/blkdev.h>
++#include <linux/raid/md_u.h>
++#include <linux/seq_file.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <trace/events/block.h>
++#include "md.h"
++
++struct dev_info {
++	struct md_rdev	*rdev;
++	sector_t	end_sector;
++};
++
++struct linear_conf {
++	struct rcu_head         rcu;
++	sector_t                array_sectors;
++	/* a copy of mddev->raid_disks */
++	int                     raid_disks;
++	struct dev_info         disks[] __counted_by(raid_disks);
++};
++
++/*
++ * find which device holds a particular offset
++ */
++static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector)
++{
++	int lo, mid, hi;
++	struct linear_conf *conf;
++
++	lo = 0;
++	hi = mddev->raid_disks - 1;
++	conf = mddev->private;
++
++	/*
++	 * Binary Search
++	 */
++
++	while (hi > lo) {
++
++		mid = (hi + lo) / 2;
++		if (sector < conf->disks[mid].end_sector)
++			hi = mid;
++		else
++			lo = mid + 1;
++	}
++
++	return conf->disks + lo;
++}
++
++static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks)
++{
++	struct linear_conf *conf;
++	sector_t array_sectors;
++
++	conf = mddev->private;
++	WARN_ONCE(sectors || raid_disks,
++		  "%s does not support generic reshape\n", __func__);
++	array_sectors = conf->array_sectors;
++
++	return array_sectors;
++}
++
++static int linear_set_limits(struct mddev *mddev)
++{
++	struct queue_limits lim;
++	int err;
++
++	md_init_stacking_limits(&lim);
++	lim.max_hw_sectors = mddev->chunk_sectors;
++	lim.max_write_zeroes_sectors = mddev->chunk_sectors;
++	lim.io_min = mddev->chunk_sectors << 9;
++	err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
++	if (err)
++		return err;
++
++	return queue_limits_set(mddev->gendisk->queue, &lim);
++}
++
++static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
++{
++	struct linear_conf *conf;
++	struct md_rdev *rdev;
++	int ret = -EINVAL;
++	int cnt;
++	int i;
++
++	conf = kzalloc(struct_size(conf, disks, raid_disks), GFP_KERNEL);
++	if (!conf)
++		return ERR_PTR(-ENOMEM);
++
++	/*
++	 * conf->raid_disks is copy of mddev->raid_disks. The reason to
++	 * keep a copy of mddev->raid_disks in struct linear_conf is,
++	 * mddev->raid_disks may not be consistent with pointers number of
++	 * conf->disks[] when it is updated in linear_add() and used to
++	 * iterate old conf->disks[] earray in linear_congested().
++	 * Here conf->raid_disks is always consitent with number of
++	 * pointers in conf->disks[] array, and mddev->private is updated
++	 * with rcu_assign_pointer() in linear_addr(), such race can be
++	 * avoided.
++	 */
++	conf->raid_disks = raid_disks;
++
++	cnt = 0;
++	conf->array_sectors = 0;
++
++	rdev_for_each(rdev, mddev) {
++		int j = rdev->raid_disk;
++		struct dev_info *disk = conf->disks + j;
++		sector_t sectors;
++
++		if (j < 0 || j >= raid_disks || disk->rdev) {
++			pr_warn("md/linear:%s: disk numbering problem. Aborting!\n",
++				mdname(mddev));
++			goto out;
++		}
++
++		disk->rdev = rdev;
++		if (mddev->chunk_sectors) {
++			sectors = rdev->sectors;
++			sector_div(sectors, mddev->chunk_sectors);
++			rdev->sectors = sectors * mddev->chunk_sectors;
++		}
++
++		conf->array_sectors += rdev->sectors;
++		cnt++;
++	}
++	if (cnt != raid_disks) {
++		pr_warn("md/linear:%s: not enough drives present. Aborting!\n",
++			mdname(mddev));
++		goto out;
++	}
++
++	/*
++	 * Here we calculate the device offsets.
++	 */
++	conf->disks[0].end_sector = conf->disks[0].rdev->sectors;
++
++	for (i = 1; i < raid_disks; i++)
++		conf->disks[i].end_sector =
++			conf->disks[i-1].end_sector +
++			conf->disks[i].rdev->sectors;
++
++	if (!mddev_is_dm(mddev)) {
++		ret = linear_set_limits(mddev);
++		if (ret)
++			goto out;
++	}
++
++	return conf;
++
++out:
++	kfree(conf);
++	return ERR_PTR(ret);
++}
++
++static int linear_run(struct mddev *mddev)
++{
++	struct linear_conf *conf;
++	int ret;
++
++	if (md_check_no_bitmap(mddev))
++		return -EINVAL;
++
++	conf = linear_conf(mddev, mddev->raid_disks);
++	if (IS_ERR(conf))
++		return PTR_ERR(conf);
++
++	mddev->private = conf;
++	md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
++
++	ret =  md_integrity_register(mddev);
++	if (ret) {
++		kfree(conf);
++		mddev->private = NULL;
++	}
++	return ret;
++}
++
++static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
++{
++	/* Adding a drive to a linear array allows the array to grow.
++	 * It is permitted if the new drive has a matching superblock
++	 * already on it, with raid_disk equal to raid_disks.
++	 * It is achieved by creating a new linear_private_data structure
++	 * and swapping it in in-place of the current one.
++	 * The current one is never freed until the array is stopped.
++	 * This avoids races.
++	 */
++	struct linear_conf *newconf, *oldconf;
++
++	if (rdev->saved_raid_disk != mddev->raid_disks)
++		return -EINVAL;
++
++	rdev->raid_disk = rdev->saved_raid_disk;
++	rdev->saved_raid_disk = -1;
++
++	newconf = linear_conf(mddev, mddev->raid_disks + 1);
++	if (IS_ERR(newconf))
++		return PTR_ERR(newconf);
++
++	/* newconf->raid_disks already keeps a copy of * the increased
++	 * value of mddev->raid_disks, WARN_ONCE() is just used to make
++	 * sure of this. It is possible that oldconf is still referenced
++	 * in linear_congested(), therefore kfree_rcu() is used to free
++	 * oldconf until no one uses it anymore.
++	 */
++	oldconf = rcu_dereference_protected(mddev->private,
++			lockdep_is_held(&mddev->reconfig_mutex));
++	mddev->raid_disks++;
++	WARN_ONCE(mddev->raid_disks != newconf->raid_disks,
++		"copied raid_disks doesn't match mddev->raid_disks");
++	rcu_assign_pointer(mddev->private, newconf);
++	md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
++	set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
++	kfree_rcu(oldconf, rcu);
++	return 0;
++}
++
++static void linear_free(struct mddev *mddev, void *priv)
++{
++	struct linear_conf *conf = priv;
++
++	kfree(conf);
++}
++
++static bool linear_make_request(struct mddev *mddev, struct bio *bio)
++{
++	struct dev_info *tmp_dev;
++	sector_t start_sector, end_sector, data_offset;
++	sector_t bio_sector = bio->bi_iter.bi_sector;
++
++	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
++	    && md_flush_request(mddev, bio))
++		return true;
++
++	tmp_dev = which_dev(mddev, bio_sector);
++	start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
++	end_sector = tmp_dev->end_sector;
++	data_offset = tmp_dev->rdev->data_offset;
++
++	if (unlikely(bio_sector >= end_sector ||
++		     bio_sector < start_sector))
++		goto out_of_bounds;
++
++	if (unlikely(is_rdev_broken(tmp_dev->rdev))) {
++		md_error(mddev, tmp_dev->rdev);
++		bio_io_error(bio);
++		return true;
++	}
++
++	if (unlikely(bio_end_sector(bio) > end_sector)) {
++		/* This bio crosses a device boundary, so we have to split it */
++		struct bio *split = bio_split(bio, end_sector - bio_sector,
++					      GFP_NOIO, &mddev->bio_set);
++
++		if (IS_ERR(split)) {
++			bio->bi_status = errno_to_blk_status(PTR_ERR(split));
++			bio_endio(bio);
++			return true;
++		}
++
++		bio_chain(split, bio);
++		submit_bio_noacct(bio);
++		bio = split;
++	}
++
++	md_account_bio(mddev, &bio);
++	bio_set_dev(bio, tmp_dev->rdev->bdev);
++	bio->bi_iter.bi_sector = bio->bi_iter.bi_sector -
++		start_sector + data_offset;
++
++	if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
++		     !bdev_max_discard_sectors(bio->bi_bdev))) {
++		/* Just ignore it */
++		bio_endio(bio);
++	} else {
++		if (mddev->gendisk)
++			trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
++					      bio_sector);
++		mddev_check_write_zeroes(mddev, bio);
++		submit_bio_noacct(bio);
++	}
++	return true;
++
++out_of_bounds:
++	pr_err("md/linear:%s: make_request: Sector %llu out of bounds on dev %pg: %llu sectors, offset %llu\n",
++	       mdname(mddev),
++	       (unsigned long long)bio->bi_iter.bi_sector,
++	       tmp_dev->rdev->bdev,
++	       (unsigned long long)tmp_dev->rdev->sectors,
++	       (unsigned long long)start_sector);
++	bio_io_error(bio);
++	return true;
++}
++
++static void linear_status(struct seq_file *seq, struct mddev *mddev)
++{
++	seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
++}
++
++static void linear_error(struct mddev *mddev, struct md_rdev *rdev)
++{
++	if (!test_and_set_bit(MD_BROKEN, &mddev->flags)) {
++		char *md_name = mdname(mddev);
++
++		pr_crit("md/linear%s: Disk failure on %pg detected, failing array.\n",
++			md_name, rdev->bdev);
++	}
++}
++
++static void linear_quiesce(struct mddev *mddev, int state)
++{
++}
++
++static struct md_personality linear_personality = {
++	.name		= "linear",
++	.level		= LEVEL_LINEAR,
++	.owner		= THIS_MODULE,
++	.make_request	= linear_make_request,
++	.run		= linear_run,
++	.free		= linear_free,
++	.status		= linear_status,
++	.hot_add_disk	= linear_add,
++	.size		= linear_size,
++	.quiesce	= linear_quiesce,
++	.error_handler	= linear_error,
++};
++
++static int __init linear_init(void)
++{
++	return register_md_personality(&linear_personality);
++}
++
++static void linear_exit(void)
++{
++	unregister_md_personality(&linear_personality);
++}
++
++module_init(linear_init);
++module_exit(linear_exit);
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Linear device concatenation personality for MD (deprecated)");
++MODULE_ALIAS("md-personality-1"); /* LINEAR - deprecated*/
++MODULE_ALIAS("md-linear");
++MODULE_ALIAS("md-level--1");
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index f0d007f967f1d3..465ca2af1e6efb 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -8124,7 +8124,7 @@ void md_error(struct mddev *mddev, struct md_rdev *rdev)
+ 		return;
+ 	mddev->pers->error_handler(mddev, rdev);
+ 
+-	if (mddev->pers->level == 0)
++	if (mddev->pers->level == 0 || mddev->pers->level == LEVEL_LINEAR)
+ 		return;
+ 
+ 	if (mddev->degraded && !test_bit(MD_BROKEN, &mddev->flags))
+diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
+index e1ae0f9fad4326..cb21df46bab169 100644
+--- a/drivers/media/i2c/ccs/ccs-core.c
++++ b/drivers/media/i2c/ccs/ccs-core.c
+@@ -3566,15 +3566,15 @@ static int ccs_probe(struct i2c_client *client)
+ out_cleanup:
+ 	ccs_cleanup(sensor);
+ 
++out_free_ccs_limits:
++	kfree(sensor->ccs_limits);
++
+ out_release_mdata:
+ 	kvfree(sensor->mdata.backing);
+ 
+ out_release_sdata:
+ 	kvfree(sensor->sdata.backing);
+ 
+-out_free_ccs_limits:
+-	kfree(sensor->ccs_limits);
+-
+ out_power_off:
+ 	ccs_power_off(&client->dev);
+ 	mutex_destroy(&sensor->mutex);
+diff --git a/drivers/media/i2c/ccs/ccs-data.c b/drivers/media/i2c/ccs/ccs-data.c
+index 08400edf77ced1..2591dba51e17e2 100644
+--- a/drivers/media/i2c/ccs/ccs-data.c
++++ b/drivers/media/i2c/ccs/ccs-data.c
+@@ -10,6 +10,7 @@
+ #include <linux/limits.h>
+ #include <linux/mm.h>
+ #include <linux/slab.h>
++#include <linux/string.h>
+ 
+ #include "ccs-data-defs.h"
+ 
+@@ -97,7 +98,7 @@ ccs_data_parse_length_specifier(const struct __ccs_data_length_specifier *__len,
+ 		plen = ((size_t)
+ 			(__len3->length[0] &
+ 			 ((1 << CCS_DATA_LENGTH_SPECIFIER_SIZE_SHIFT) - 1))
+-			<< 16) + (__len3->length[0] << 8) + __len3->length[1];
++			<< 16) + (__len3->length[1] << 8) + __len3->length[2];
+ 		break;
+ 	}
+ 	default:
+@@ -948,15 +949,15 @@ int ccs_data_parse(struct ccs_data_container *ccsdata, const void *data,
+ 
+ 	rval = __ccs_data_parse(&bin, ccsdata, data, len, dev, verbose);
+ 	if (rval)
+-		return rval;
++		goto out_cleanup;
+ 
+ 	rval = bin_backing_alloc(&bin);
+ 	if (rval)
+-		return rval;
++		goto out_cleanup;
+ 
+ 	rval = __ccs_data_parse(&bin, ccsdata, data, len, dev, false);
+ 	if (rval)
+-		goto out_free;
++		goto out_cleanup;
+ 
+ 	if (verbose && ccsdata->version)
+ 		print_ccs_data_version(dev, ccsdata->version);
+@@ -965,15 +966,16 @@ int ccs_data_parse(struct ccs_data_container *ccsdata, const void *data,
+ 		rval = -EPROTO;
+ 		dev_dbg(dev, "parsing mismatch; base %p; now %p; end %p\n",
+ 			bin.base, bin.now, bin.end);
+-		goto out_free;
++		goto out_cleanup;
+ 	}
+ 
+ 	ccsdata->backing = bin.base;
+ 
+ 	return 0;
+ 
+-out_free:
++out_cleanup:
+ 	kvfree(bin.base);
++	memset(ccsdata, 0, sizeof(*ccsdata));
+ 
+ 	return rval;
+ }
+diff --git a/drivers/media/i2c/ds90ub913.c b/drivers/media/i2c/ds90ub913.c
+index 79bddfee2e2ec7..9f01950a0ca336 100644
+--- a/drivers/media/i2c/ds90ub913.c
++++ b/drivers/media/i2c/ds90ub913.c
+@@ -793,7 +793,6 @@ static void ub913_subdev_uninit(struct ub913_data *priv)
+ 	v4l2_async_unregister_subdev(&priv->sd);
+ 	ub913_v4l2_nf_unregister(priv);
+ 	v4l2_subdev_cleanup(&priv->sd);
+-	fwnode_handle_put(priv->sd.fwnode);
+ 	media_entity_cleanup(&priv->sd.entity);
+ }
+ 
+diff --git a/drivers/media/i2c/ds90ub953.c b/drivers/media/i2c/ds90ub953.c
+index 725589b3e1c58a..b27656f8d2b121 100644
+--- a/drivers/media/i2c/ds90ub953.c
++++ b/drivers/media/i2c/ds90ub953.c
+@@ -1288,7 +1288,6 @@ static void ub953_subdev_uninit(struct ub953_data *priv)
+ 	v4l2_async_unregister_subdev(&priv->sd);
+ 	ub953_v4l2_notifier_unregister(priv);
+ 	v4l2_subdev_cleanup(&priv->sd);
+-	fwnode_handle_put(priv->sd.fwnode);
+ 	media_entity_cleanup(&priv->sd.entity);
+ }
+ 
+diff --git a/drivers/media/i2c/ds90ub960.c b/drivers/media/i2c/ds90ub960.c
+index 1b1ff7f7505b00..d6b790ee08d6ad 100644
+--- a/drivers/media/i2c/ds90ub960.c
++++ b/drivers/media/i2c/ds90ub960.c
+@@ -351,6 +351,8 @@
+ 
+ #define UB960_SR_I2C_RX_ID(n)			(0xf8 + (n)) /* < UB960_FPD_RX_NPORTS */
+ 
++#define UB9702_SR_REFCLK_FREQ			0x3d
++
+ /* Indirect register blocks */
+ #define UB960_IND_TARGET_PAT_GEN		0x00
+ #define UB960_IND_TARGET_RX_ANA(n)		(0x01 + (n))
+@@ -1574,16 +1576,24 @@ static int ub960_rxport_wait_locks(struct ub960_data *priv,
+ 
+ 		ub960_rxport_read16(priv, nport, UB960_RR_RX_FREQ_HIGH, &v);
+ 
+-		ret = ub960_rxport_get_strobe_pos(priv, nport, &strobe_pos);
+-		if (ret)
+-			return ret;
++		if (priv->hw_data->is_ub9702) {
++			dev_dbg(dev, "\trx%u: locked, freq %llu Hz\n",
++				nport, (v * 1000000ULL) >> 8);
++		} else {
++			ret = ub960_rxport_get_strobe_pos(priv, nport,
++							  &strobe_pos);
++			if (ret)
++				return ret;
+ 
+-		ret = ub960_rxport_get_eq_level(priv, nport, &eq_level);
+-		if (ret)
+-			return ret;
++			ret = ub960_rxport_get_eq_level(priv, nport, &eq_level);
++			if (ret)
++				return ret;
+ 
+-		dev_dbg(dev, "\trx%u: locked, SP: %d, EQ: %u, freq %llu Hz\n",
+-			nport, strobe_pos, eq_level, (v * 1000000ULL) >> 8);
++			dev_dbg(dev,
++				"\trx%u: locked, SP: %d, EQ: %u, freq %llu Hz\n",
++				nport, strobe_pos, eq_level,
++				(v * 1000000ULL) >> 8);
++		}
+ 	}
+ 
+ 	return 0;
+@@ -2522,7 +2532,7 @@ static int ub960_configure_ports_for_streaming(struct ub960_data *priv,
+ 				for (i = 0; i < 8; i++)
+ 					ub960_rxport_write(priv, nport,
+ 							   UB960_RR_VC_ID_MAP(i),
+-							   nport);
++							   (nport << 4) | nport);
+ 			}
+ 
+ 			break;
+@@ -2939,6 +2949,54 @@ static const struct v4l2_subdev_pad_ops ub960_pad_ops = {
+ 	.set_fmt = ub960_set_fmt,
+ };
+ 
++static void ub960_log_status_ub960_sp_eq(struct ub960_data *priv,
++					 unsigned int nport)
++{
++	struct device *dev = &priv->client->dev;
++	u8 eq_level;
++	s8 strobe_pos;
++	u8 v = 0;
++
++	/* Strobe */
++
++	ub960_read(priv, UB960_XR_AEQ_CTL1, &v);
++
++	dev_info(dev, "\t%s strobe\n",
++		 (v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) ? "Adaptive" :
++							  "Manual");
++
++	if (v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) {
++		ub960_read(priv, UB960_XR_SFILTER_CFG, &v);
++
++		dev_info(dev, "\tStrobe range [%d, %d]\n",
++			 ((v >> UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT) & 0xf) - 7,
++			 ((v >> UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT) & 0xf) - 7);
++	}
++
++	ub960_rxport_get_strobe_pos(priv, nport, &strobe_pos);
++
++	dev_info(dev, "\tStrobe pos %d\n", strobe_pos);
++
++	/* EQ */
++
++	ub960_rxport_read(priv, nport, UB960_RR_AEQ_BYPASS, &v);
++
++	dev_info(dev, "\t%s EQ\n",
++		 (v & UB960_RR_AEQ_BYPASS_ENABLE) ? "Manual" :
++						    "Adaptive");
++
++	if (!(v & UB960_RR_AEQ_BYPASS_ENABLE)) {
++		ub960_rxport_read(priv, nport, UB960_RR_AEQ_MIN_MAX, &v);
++
++		dev_info(dev, "\tEQ range [%u, %u]\n",
++			 (v >> UB960_RR_AEQ_MIN_MAX_AEQ_FLOOR_SHIFT) & 0xf,
++			 (v >> UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT) & 0xf);
++	}
++
++	if (ub960_rxport_get_eq_level(priv, nport, &eq_level) == 0)
++		dev_info(dev, "\tEQ level %u\n", eq_level);
++}
++
+ static int ub960_log_status(struct v4l2_subdev *sd)
+ {
+ 	struct ub960_data *priv = sd_to_ub960(sd);
+@@ -2986,8 +3044,6 @@ static int ub960_log_status(struct v4l2_subdev *sd)
+ 
+ 	for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
+ 		struct ub960_rxport *rxport = priv->rxports[nport];
+-		u8 eq_level;
+-		s8 strobe_pos;
+ 		unsigned int i;
+ 
+ 		dev_info(dev, "RX %u\n", nport);
+@@ -3023,44 +3079,8 @@ static int ub960_log_status(struct v4l2_subdev *sd)
+ 		ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER, &v);
+ 		dev_info(dev, "\tcsi_err_counter %u\n", v);
+ 
+-		/* Strobe */
+-
+-		ub960_read(priv, UB960_XR_AEQ_CTL1, &v);
+-
+-		dev_info(dev, "\t%s strobe\n",
+-			 (v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) ? "Adaptive" :
+-								  "Manual");
+-
+-		if (v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) {
+-			ub960_read(priv, UB960_XR_SFILTER_CFG, &v);
+-
+-			dev_info(dev, "\tStrobe range [%d, %d]\n",
+-				 ((v >> UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT) & 0xf) - 7,
+-				 ((v >> UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT) & 0xf) - 7);
+-		}
+-
+-		ub960_rxport_get_strobe_pos(priv, nport, &strobe_pos);
+-
+-		dev_info(dev, "\tStrobe pos %d\n", strobe_pos);
+-
+-		/* EQ */
+-
+-		ub960_rxport_read(priv, nport, UB960_RR_AEQ_BYPASS, &v);
+-
+-		dev_info(dev, "\t%s EQ\n",
+-			 (v & UB960_RR_AEQ_BYPASS_ENABLE) ? "Manual" :
+-							    "Adaptive");
+-
+-		if (!(v & UB960_RR_AEQ_BYPASS_ENABLE)) {
+-			ub960_rxport_read(priv, nport, UB960_RR_AEQ_MIN_MAX, &v);
+-
+-			dev_info(dev, "\tEQ range [%u, %u]\n",
+-				 (v >> UB960_RR_AEQ_MIN_MAX_AEQ_FLOOR_SHIFT) & 0xf,
+-				 (v >> UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT) & 0xf);
+-		}
+-
+-		if (ub960_rxport_get_eq_level(priv, nport, &eq_level) == 0)
+-			dev_info(dev, "\tEQ level %u\n", eq_level);
++		if (!priv->hw_data->is_ub9702)
++			ub960_log_status_ub960_sp_eq(priv, nport);
+ 
+ 		/* GPIOs */
+ 		for (i = 0; i < UB960_NUM_BC_GPIOS; i++) {
+@@ -3834,7 +3854,10 @@ static int ub960_enable_core_hw(struct ub960_data *priv)
+ 	if (ret)
+ 		goto err_pd_gpio;
+ 
+-	ret = ub960_read(priv, UB960_XR_REFCLK_FREQ, &refclk_freq);
++	if (priv->hw_data->is_ub9702)
++		ret = ub960_read(priv, UB9702_SR_REFCLK_FREQ, &refclk_freq);
++	else
++		ret = ub960_read(priv, UB960_XR_REFCLK_FREQ, &refclk_freq);
+ 	if (ret)
+ 		goto err_pd_gpio;
+ 
+diff --git a/drivers/media/i2c/imx296.c b/drivers/media/i2c/imx296.c
+index 83149fa729c424..f3bec16b527c44 100644
+--- a/drivers/media/i2c/imx296.c
++++ b/drivers/media/i2c/imx296.c
+@@ -954,6 +954,8 @@ static int imx296_identify_model(struct imx296 *sensor)
+ 		return ret;
+ 	}
+ 
++	usleep_range(2000, 5000);
++
+ 	ret = imx296_read(sensor, IMX296_SENSOR_INFO);
+ 	if (ret < 0) {
+ 		dev_err(sensor->dev, "failed to read sensor information (%d)\n",
+diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
+index da5cb5f45a4ff5..0dae0438aa8085 100644
+--- a/drivers/media/i2c/ov5640.c
++++ b/drivers/media/i2c/ov5640.c
+@@ -1982,6 +1982,7 @@ static int ov5640_get_light_freq(struct ov5640_dev *sensor)
+ 			light_freq = 50;
+ 		} else {
+ 			/* 60Hz */
++			light_freq = 60;
+ 		}
+ 	}
+ 
+diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys.c b/drivers/media/pci/intel/ipu6/ipu6-isys.c
+index 77f9c73198681b..8df1d83a74b5af 100644
+--- a/drivers/media/pci/intel/ipu6/ipu6-isys.c
++++ b/drivers/media/pci/intel/ipu6/ipu6-isys.c
+@@ -1133,6 +1133,7 @@ static int isys_probe(struct auxiliary_device *auxdev,
+ free_fw_msg_bufs:
+ 	free_fw_msg_bufs(isys);
+ out_remove_pkg_dir_shared_buffer:
++	cpu_latency_qos_remove_request(&isys->pm_qos);
+ 	if (!isp->secure_mode)
+ 		ipu6_cpd_free_pkg_dir(adev);
+ remove_shared_buffer:
+diff --git a/drivers/media/platform/marvell/mmp-driver.c b/drivers/media/platform/marvell/mmp-driver.c
+index 3fd4fc1b9c48f6..d3da7ebb4a2be6 100644
+--- a/drivers/media/platform/marvell/mmp-driver.c
++++ b/drivers/media/platform/marvell/mmp-driver.c
+@@ -231,13 +231,23 @@ static int mmpcam_probe(struct platform_device *pdev)
+ 
+ 	mcam_init_clk(mcam);
+ 
++	/*
++	 * Register with V4L.
++	 */
++
++	ret = v4l2_device_register(mcam->dev, &mcam->v4l2_dev);
++	if (ret)
++		return ret;
++
+ 	/*
+ 	 * Create a match of the sensor against its OF node.
+ 	 */
+ 	ep = fwnode_graph_get_next_endpoint(of_fwnode_handle(pdev->dev.of_node),
+ 					    NULL);
+-	if (!ep)
+-		return -ENODEV;
++	if (!ep) {
++		ret = -ENODEV;
++		goto out_v4l2_device_unregister;
++	}
+ 
+ 	v4l2_async_nf_init(&mcam->notifier, &mcam->v4l2_dev);
+ 
+@@ -246,7 +256,7 @@ static int mmpcam_probe(struct platform_device *pdev)
+ 	fwnode_handle_put(ep);
+ 	if (IS_ERR(asd)) {
+ 		ret = PTR_ERR(asd);
+-		goto out;
++		goto out_v4l2_device_unregister;
+ 	}
+ 
+ 	/*
+@@ -254,7 +264,7 @@ static int mmpcam_probe(struct platform_device *pdev)
+ 	 */
+ 	ret = mccic_register(mcam);
+ 	if (ret)
+-		goto out;
++		goto out_v4l2_device_unregister;
+ 
+ 	/*
+ 	 * Add OF clock provider.
+@@ -283,6 +293,8 @@ static int mmpcam_probe(struct platform_device *pdev)
+ 	return 0;
+ out:
+ 	mccic_shutdown(mcam);
++out_v4l2_device_unregister:
++	v4l2_device_unregister(&mcam->v4l2_dev);
+ 
+ 	return ret;
+ }
+@@ -293,6 +305,7 @@ static void mmpcam_remove(struct platform_device *pdev)
+ 	struct mcam_camera *mcam = &cam->mcam;
+ 
+ 	mccic_shutdown(mcam);
++	v4l2_device_unregister(&mcam->v4l2_dev);
+ 	pm_runtime_force_suspend(mcam->dev);
+ }
+ 
+diff --git a/drivers/media/platform/nuvoton/npcm-video.c b/drivers/media/platform/nuvoton/npcm-video.c
+index 4f5d75645b2bb9..024cd8ee17098d 100644
+--- a/drivers/media/platform/nuvoton/npcm-video.c
++++ b/drivers/media/platform/nuvoton/npcm-video.c
+@@ -1665,9 +1665,9 @@ static int npcm_video_ece_init(struct npcm_video *video)
+ 		dev_info(dev, "Support HEXTILE pixel format\n");
+ 
+ 		ece_pdev = of_find_device_by_node(ece_node);
+-		if (IS_ERR(ece_pdev)) {
++		if (!ece_pdev) {
+ 			dev_err(dev, "Failed to find ECE device\n");
+-			return PTR_ERR(ece_pdev);
++			return -ENODEV;
+ 		}
+ 		of_node_put(ece_node);
+ 
+diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
+index 2d27c5167246f5..807487a1f5365f 100644
+--- a/drivers/media/platform/qcom/venus/core.c
++++ b/drivers/media/platform/qcom/venus/core.c
+@@ -506,18 +506,14 @@ static __maybe_unused int venus_runtime_suspend(struct device *dev)
+ void venus_close_common(struct venus_inst *inst)
+ {
+ 	/*
+-	 * First, remove the inst from the ->instances list, so that
+-	 * to_instance() will return NULL.
+-	 */
+-	hfi_session_destroy(inst);
+-	/*
+-	 * Second, make sure we don't have IRQ/IRQ-thread currently running
++	 * Make sure we don't have IRQ/IRQ-thread currently running
+ 	 * or pending execution, which would race with the inst destruction.
+ 	 */
+ 	synchronize_irq(inst->core->irq);
+ 
+ 	v4l2_m2m_ctx_release(inst->m2m_ctx);
+ 	v4l2_m2m_release(inst->m2m_dev);
++	hfi_session_destroy(inst);
+ 	v4l2_fh_del(&inst->fh);
+ 	v4l2_fh_exit(&inst->fh);
+ 	v4l2_ctrl_handler_free(&inst->ctrl_handler);
+diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
+index 7edd49bfe7e5b8..7f9fa79402df0d 100644
+--- a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
++++ b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
+@@ -887,7 +887,7 @@ struct dcmipp_ent_device *dcmipp_bytecap_ent_init(struct device *dev,
+ 	q->dev = dev;
+ 
+ 	/* DCMIPP requires 16 bytes aligned buffers */
+-	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32) & ~0x0f);
++	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ 	if (ret) {
+ 		dev_err(dev, "Failed to set DMA mask\n");
+ 		goto err_mutex_destroy;
+diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
+index 4fe26e82e3d1c1..4837d8df9c0386 100644
+--- a/drivers/media/usb/uvc/uvc_ctrl.c
++++ b/drivers/media/usb/uvc/uvc_ctrl.c
+@@ -1579,6 +1579,40 @@ static void uvc_ctrl_send_slave_event(struct uvc_video_chain *chain,
+ 	uvc_ctrl_send_event(chain, handle, ctrl, mapping, val, changes);
+ }
+ 
++static void uvc_ctrl_set_handle(struct uvc_fh *handle, struct uvc_control *ctrl,
++				struct uvc_fh *new_handle)
++{
++	lockdep_assert_held(&handle->chain->ctrl_mutex);
++
++	if (new_handle) {
++		if (ctrl->handle)
++			dev_warn_ratelimited(&handle->stream->dev->udev->dev,
++					     "UVC non compliance: Setting an async control with a pending operation.");
++
++		if (new_handle == ctrl->handle)
++			return;
++
++		if (ctrl->handle) {
++			WARN_ON(!ctrl->handle->pending_async_ctrls);
++			if (ctrl->handle->pending_async_ctrls)
++				ctrl->handle->pending_async_ctrls--;
++		}
++
++		ctrl->handle = new_handle;
++		handle->pending_async_ctrls++;
++		return;
++	}
++
++	/* Cannot clear the handle for a control not owned by us.*/
++	if (WARN_ON(ctrl->handle != handle))
++		return;
++
++	ctrl->handle = NULL;
++	if (WARN_ON(!handle->pending_async_ctrls))
++		return;
++	handle->pending_async_ctrls--;
++}
++
+ void uvc_ctrl_status_event(struct uvc_video_chain *chain,
+ 			   struct uvc_control *ctrl, const u8 *data)
+ {
+@@ -1589,7 +1623,8 @@ void uvc_ctrl_status_event(struct uvc_video_chain *chain,
+ 	mutex_lock(&chain->ctrl_mutex);
+ 
+ 	handle = ctrl->handle;
+-	ctrl->handle = NULL;
++	if (handle)
++		uvc_ctrl_set_handle(handle, ctrl, NULL);
+ 
+ 	list_for_each_entry(mapping, &ctrl->info.mappings, list) {
+ 		s32 value = __uvc_ctrl_get_value(mapping, data);
+@@ -1640,10 +1675,8 @@ bool uvc_ctrl_status_event_async(struct urb *urb, struct uvc_video_chain *chain,
+ 	struct uvc_device *dev = chain->dev;
+ 	struct uvc_ctrl_work *w = &dev->async_ctrl;
+ 
+-	if (list_empty(&ctrl->info.mappings)) {
+-		ctrl->handle = NULL;
++	if (list_empty(&ctrl->info.mappings))
+ 		return false;
+-	}
+ 
+ 	w->data = data;
+ 	w->urb = urb;
+@@ -1673,13 +1706,13 @@ static void uvc_ctrl_send_events(struct uvc_fh *handle,
+ {
+ 	struct uvc_control_mapping *mapping;
+ 	struct uvc_control *ctrl;
+-	u32 changes = V4L2_EVENT_CTRL_CH_VALUE;
+ 	unsigned int i;
+ 	unsigned int j;
+ 
+ 	for (i = 0; i < xctrls_count; ++i) {
+-		ctrl = uvc_find_control(handle->chain, xctrls[i].id, &mapping);
++		u32 changes = V4L2_EVENT_CTRL_CH_VALUE;
+ 
++		ctrl = uvc_find_control(handle->chain, xctrls[i].id, &mapping);
+ 		if (ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
+ 			/* Notification will be sent from an Interrupt event. */
+ 			continue;
+@@ -1811,7 +1844,10 @@ int uvc_ctrl_begin(struct uvc_video_chain *chain)
+ }
+ 
+ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
+-	struct uvc_entity *entity, int rollback, struct uvc_control **err_ctrl)
++				  struct uvc_fh *handle,
++				  struct uvc_entity *entity,
++				  int rollback,
++				  struct uvc_control **err_ctrl)
+ {
+ 	struct uvc_control *ctrl;
+ 	unsigned int i;
+@@ -1859,6 +1895,10 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
+ 				*err_ctrl = ctrl;
+ 			return ret;
+ 		}
++
++		if (!rollback && handle &&
++		    ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
++			uvc_ctrl_set_handle(handle, ctrl, handle);
+ 	}
+ 
+ 	return 0;
+@@ -1895,8 +1935,8 @@ int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
+ 
+ 	/* Find the control. */
+ 	list_for_each_entry(entity, &chain->entities, chain) {
+-		ret = uvc_ctrl_commit_entity(chain->dev, entity, rollback,
+-					     &err_ctrl);
++		ret = uvc_ctrl_commit_entity(chain->dev, handle, entity,
++					     rollback, &err_ctrl);
+ 		if (ret < 0) {
+ 			if (ctrls)
+ 				ctrls->error_idx =
+@@ -2046,9 +2086,6 @@ int uvc_ctrl_set(struct uvc_fh *handle,
+ 	mapping->set(mapping, value,
+ 		uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
+ 
+-	if (ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
+-		ctrl->handle = handle;
+-
+ 	ctrl->dirty = 1;
+ 	ctrl->modified = 1;
+ 	return 0;
+@@ -2377,7 +2414,7 @@ int uvc_ctrl_restore_values(struct uvc_device *dev)
+ 			ctrl->dirty = 1;
+ 		}
+ 
+-		ret = uvc_ctrl_commit_entity(dev, entity, 0, NULL);
++		ret = uvc_ctrl_commit_entity(dev, NULL, entity, 0, NULL);
+ 		if (ret < 0)
+ 			return ret;
+ 	}
+@@ -2770,6 +2807,26 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
+ 	return 0;
+ }
+ 
++void uvc_ctrl_cleanup_fh(struct uvc_fh *handle)
++{
++	struct uvc_entity *entity;
++
++	guard(mutex)(&handle->chain->ctrl_mutex);
++
++	if (!handle->pending_async_ctrls)
++		return;
++
++	list_for_each_entry(entity, &handle->chain->dev->entities, list) {
++		for (unsigned int i = 0; i < entity->ncontrols; ++i) {
++			if (entity->controls[i].handle != handle)
++				continue;
++			uvc_ctrl_set_handle(handle, &entity->controls[i], NULL);
++		}
++	}
++
++	WARN_ON(handle->pending_async_ctrls);
++}
++
+ /*
+  * Cleanup device controls.
+  */
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index b3c8411dc05c9e..31b4b54657feee 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -775,27 +775,14 @@ static const u8 uvc_media_transport_input_guid[16] =
+ 	UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT;
+ static const u8 uvc_processing_guid[16] = UVC_GUID_UVC_PROCESSING;
+ 
+-static struct uvc_entity *uvc_alloc_new_entity(struct uvc_device *dev, u16 type,
+-					       u16 id, unsigned int num_pads,
+-					       unsigned int extra_size)
++static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id,
++		unsigned int num_pads, unsigned int extra_size)
+ {
+ 	struct uvc_entity *entity;
+ 	unsigned int num_inputs;
+ 	unsigned int size;
+ 	unsigned int i;
+ 
+-	/* Per UVC 1.1+ spec 3.7.2, the ID should be non-zero. */
+-	if (id == 0) {
+-		dev_err(&dev->udev->dev, "Found Unit with invalid ID 0.\n");
+-		return ERR_PTR(-EINVAL);
+-	}
+-
+-	/* Per UVC 1.1+ spec 3.7.2, the ID is unique. */
+-	if (uvc_entity_by_id(dev, id)) {
+-		dev_err(&dev->udev->dev, "Found multiple Units with ID %u\n", id);
+-		return ERR_PTR(-EINVAL);
+-	}
+-
+ 	extra_size = roundup(extra_size, sizeof(*entity->pads));
+ 	if (num_pads)
+ 		num_inputs = type & UVC_TERM_OUTPUT ? num_pads : num_pads - 1;
+@@ -805,7 +792,7 @@ static struct uvc_entity *uvc_alloc_new_entity(struct uvc_device *dev, u16 type,
+ 	     + num_inputs;
+ 	entity = kzalloc(size, GFP_KERNEL);
+ 	if (entity == NULL)
+-		return ERR_PTR(-ENOMEM);
++		return NULL;
+ 
+ 	entity->id = id;
+ 	entity->type = type;
+@@ -917,10 +904,10 @@ static int uvc_parse_vendor_control(struct uvc_device *dev,
+ 			break;
+ 		}
+ 
+-		unit = uvc_alloc_new_entity(dev, UVC_VC_EXTENSION_UNIT,
+-					    buffer[3], p + 1, 2 * n);
+-		if (IS_ERR(unit))
+-			return PTR_ERR(unit);
++		unit = uvc_alloc_entity(UVC_VC_EXTENSION_UNIT, buffer[3],
++					p + 1, 2*n);
++		if (unit == NULL)
++			return -ENOMEM;
+ 
+ 		memcpy(unit->guid, &buffer[4], 16);
+ 		unit->extension.bNumControls = buffer[20];
+@@ -1029,10 +1016,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 			return -EINVAL;
+ 		}
+ 
+-		term = uvc_alloc_new_entity(dev, type | UVC_TERM_INPUT,
+-					    buffer[3], 1, n + p);
+-		if (IS_ERR(term))
+-			return PTR_ERR(term);
++		term = uvc_alloc_entity(type | UVC_TERM_INPUT, buffer[3],
++					1, n + p);
++		if (term == NULL)
++			return -ENOMEM;
+ 
+ 		if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA) {
+ 			term->camera.bControlSize = n;
+@@ -1088,10 +1075,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 			return 0;
+ 		}
+ 
+-		term = uvc_alloc_new_entity(dev, type | UVC_TERM_OUTPUT,
+-					    buffer[3], 1, 0);
+-		if (IS_ERR(term))
+-			return PTR_ERR(term);
++		term = uvc_alloc_entity(type | UVC_TERM_OUTPUT, buffer[3],
++					1, 0);
++		if (term == NULL)
++			return -ENOMEM;
+ 
+ 		memcpy(term->baSourceID, &buffer[7], 1);
+ 
+@@ -1110,10 +1097,9 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 			return -EINVAL;
+ 		}
+ 
+-		unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3],
+-					    p + 1, 0);
+-		if (IS_ERR(unit))
+-			return PTR_ERR(unit);
++		unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, 0);
++		if (unit == NULL)
++			return -ENOMEM;
+ 
+ 		memcpy(unit->baSourceID, &buffer[5], p);
+ 
+@@ -1133,9 +1119,9 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 			return -EINVAL;
+ 		}
+ 
+-		unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3], 2, n);
+-		if (IS_ERR(unit))
+-			return PTR_ERR(unit);
++		unit = uvc_alloc_entity(buffer[2], buffer[3], 2, n);
++		if (unit == NULL)
++			return -ENOMEM;
+ 
+ 		memcpy(unit->baSourceID, &buffer[4], 1);
+ 		unit->processing.wMaxMultiplier =
+@@ -1162,10 +1148,9 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 			return -EINVAL;
+ 		}
+ 
+-		unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3],
+-					    p + 1, n);
+-		if (IS_ERR(unit))
+-			return PTR_ERR(unit);
++		unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, n);
++		if (unit == NULL)
++			return -ENOMEM;
+ 
+ 		memcpy(unit->guid, &buffer[4], 16);
+ 		unit->extension.bNumControls = buffer[20];
+@@ -1295,20 +1280,19 @@ static int uvc_gpio_parse(struct uvc_device *dev)
+ 	struct gpio_desc *gpio_privacy;
+ 	int irq;
+ 
+-	gpio_privacy = devm_gpiod_get_optional(&dev->udev->dev, "privacy",
++	gpio_privacy = devm_gpiod_get_optional(&dev->intf->dev, "privacy",
+ 					       GPIOD_IN);
+ 	if (IS_ERR_OR_NULL(gpio_privacy))
+ 		return PTR_ERR_OR_ZERO(gpio_privacy);
+ 
+ 	irq = gpiod_to_irq(gpio_privacy);
+ 	if (irq < 0)
+-		return dev_err_probe(&dev->udev->dev, irq,
++		return dev_err_probe(&dev->intf->dev, irq,
+ 				     "No IRQ for privacy GPIO\n");
+ 
+-	unit = uvc_alloc_new_entity(dev, UVC_EXT_GPIO_UNIT,
+-				    UVC_EXT_GPIO_UNIT_ID, 0, 1);
+-	if (IS_ERR(unit))
+-		return PTR_ERR(unit);
++	unit = uvc_alloc_entity(UVC_EXT_GPIO_UNIT, UVC_EXT_GPIO_UNIT_ID, 0, 1);
++	if (!unit)
++		return -ENOMEM;
+ 
+ 	unit->gpio.gpio_privacy = gpio_privacy;
+ 	unit->gpio.irq = irq;
+@@ -1329,15 +1313,27 @@ static int uvc_gpio_parse(struct uvc_device *dev)
+ static int uvc_gpio_init_irq(struct uvc_device *dev)
+ {
+ 	struct uvc_entity *unit = dev->gpio_unit;
++	int ret;
+ 
+ 	if (!unit || unit->gpio.irq < 0)
+ 		return 0;
+ 
+-	return devm_request_threaded_irq(&dev->udev->dev, unit->gpio.irq, NULL,
+-					 uvc_gpio_irq,
+-					 IRQF_ONESHOT | IRQF_TRIGGER_FALLING |
+-					 IRQF_TRIGGER_RISING,
+-					 "uvc_privacy_gpio", dev);
++	ret = request_threaded_irq(unit->gpio.irq, NULL, uvc_gpio_irq,
++				   IRQF_ONESHOT | IRQF_TRIGGER_FALLING |
++				   IRQF_TRIGGER_RISING,
++				   "uvc_privacy_gpio", dev);
++
++	unit->gpio.initialized = !ret;
++
++	return ret;
++}
++
++static void uvc_gpio_deinit(struct uvc_device *dev)
++{
++	if (!dev->gpio_unit || !dev->gpio_unit->gpio.initialized)
++		return;
++
++	free_irq(dev->gpio_unit->gpio.irq, dev);
+ }
+ 
+ /* ------------------------------------------------------------------------
+@@ -1934,6 +1930,8 @@ static void uvc_unregister_video(struct uvc_device *dev)
+ {
+ 	struct uvc_streaming *stream;
+ 
++	uvc_gpio_deinit(dev);
++
+ 	list_for_each_entry(stream, &dev->streams, list) {
+ 		/* Nothing to do here, continue. */
+ 		if (!video_is_registered(&stream->vdev))
+diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
+index 97c5407f66032a..b425306a3b8ca1 100644
+--- a/drivers/media/usb/uvc/uvc_v4l2.c
++++ b/drivers/media/usb/uvc/uvc_v4l2.c
+@@ -652,6 +652,8 @@ static int uvc_v4l2_release(struct file *file)
+ 
+ 	uvc_dbg(stream->dev, CALLS, "%s\n", __func__);
+ 
++	uvc_ctrl_cleanup_fh(handle);
++
+ 	/* Only free resources if this is a privileged handle. */
+ 	if (uvc_has_privileges(handle))
+ 		uvc_queue_release(&stream->queue);
+diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
+index e00f38dd07d935..d2fe01bcd209e5 100644
+--- a/drivers/media/usb/uvc/uvc_video.c
++++ b/drivers/media/usb/uvc/uvc_video.c
+@@ -79,6 +79,27 @@ int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
+ 	if (likely(ret == size))
+ 		return 0;
+ 
++	/*
++	 * Some devices return shorter USB control packets than expected if the
++	 * returned value can fit in less bytes. Zero all the bytes that the
++	 * device has not written.
++	 *
++	 * This quirk is applied to all controls, regardless of their data type.
++	 * Most controls are little-endian integers, in which case the missing
++	 * bytes become 0 MSBs. For other data types, a different heuristic
++	 * could be implemented if a device is found needing it.
++	 *
++	 * We exclude UVC_GET_INFO from the quirk. UVC_GET_LEN does not need
++	 * to be excluded because its size is always 1.
++	 */
++	if (ret > 0 && query != UVC_GET_INFO) {
++		memset(data + ret, 0, size - ret);
++		dev_warn_once(&dev->udev->dev,
++			      "UVC non compliance: %s control %u on unit %u returned %d bytes when we expected %u.\n",
++			      uvc_query_name(query), cs, unit, ret, size);
++		return 0;
++	}
++
+ 	if (ret != -EPIPE) {
+ 		dev_err(&dev->udev->dev,
+ 			"Failed to query (%s) UVC control %u on unit %u: %d (exp. %u).\n",
+diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
+index 07f9921d83f2d5..5690cfd61e23a1 100644
+--- a/drivers/media/usb/uvc/uvcvideo.h
++++ b/drivers/media/usb/uvc/uvcvideo.h
+@@ -234,6 +234,7 @@ struct uvc_entity {
+ 			u8  *bmControls;
+ 			struct gpio_desc *gpio_privacy;
+ 			int irq;
++			bool initialized;
+ 		} gpio;
+ 	};
+ 
+@@ -337,7 +338,11 @@ struct uvc_video_chain {
+ 	struct uvc_entity *processing;		/* Processing unit */
+ 	struct uvc_entity *selector;		/* Selector unit */
+ 
+-	struct mutex ctrl_mutex;		/* Protects ctrl.info */
++	struct mutex ctrl_mutex;		/*
++						 * Protects ctrl.info,
++						 * ctrl.handle and
++						 * uvc_fh.pending_async_ctrls
++						 */
+ 
+ 	struct v4l2_prio_state prio;		/* V4L2 priority state */
+ 	u32 caps;				/* V4L2 chain-wide caps */
+@@ -612,6 +617,7 @@ struct uvc_fh {
+ 	struct uvc_video_chain *chain;
+ 	struct uvc_streaming *stream;
+ 	enum uvc_handle_state state;
++	unsigned int pending_async_ctrls;
+ };
+ 
+ struct uvc_driver {
+@@ -797,6 +803,8 @@ int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
+ int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
+ 		      struct uvc_xu_control_query *xqry);
+ 
++void uvc_ctrl_cleanup_fh(struct uvc_fh *handle);
++
+ /* Utility functions */
+ struct usb_host_endpoint *uvc_find_endpoint(struct usb_host_interface *alts,
+ 					    u8 epaddr);
+diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c
+index 4bb91359e3a9a7..937d358697e19a 100644
+--- a/drivers/media/v4l2-core/v4l2-mc.c
++++ b/drivers/media/v4l2-core/v4l2-mc.c
+@@ -329,7 +329,7 @@ int v4l2_create_fwnode_links_to_pad(struct v4l2_subdev *src_sd,
+ 	if (!(sink->flags & MEDIA_PAD_FL_SINK))
+ 		return -EINVAL;
+ 
+-	fwnode_graph_for_each_endpoint(dev_fwnode(src_sd->dev), endpoint) {
++	fwnode_graph_for_each_endpoint(src_sd->fwnode, endpoint) {
+ 		struct fwnode_handle *remote_ep;
+ 		int src_idx, sink_idx, ret;
+ 		struct media_pad *src;
+diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
+index 251465a656d092..bce85a58944ac4 100644
+--- a/drivers/mfd/axp20x.c
++++ b/drivers/mfd/axp20x.c
+@@ -1445,7 +1445,7 @@ int axp20x_device_probe(struct axp20x_dev *axp20x)
+ 		}
+ 	}
+ 
+-	ret = mfd_add_devices(axp20x->dev, PLATFORM_DEVID_AUTO, axp20x->cells,
++	ret = mfd_add_devices(axp20x->dev, PLATFORM_DEVID_NONE, axp20x->cells,
+ 			      axp20x->nr_cells, NULL, 0, NULL);
+ 
+ 	if (ret) {
+diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
+index f14901660147f5..4b7d0cb9340f1a 100644
+--- a/drivers/mfd/lpc_ich.c
++++ b/drivers/mfd/lpc_ich.c
+@@ -834,8 +834,9 @@ static const struct pci_device_id lpc_ich_ids[] = {
+ 	{ PCI_VDEVICE(INTEL, 0x2917), LPC_ICH9ME},
+ 	{ PCI_VDEVICE(INTEL, 0x2918), LPC_ICH9},
+ 	{ PCI_VDEVICE(INTEL, 0x2919), LPC_ICH9M},
+-	{ PCI_VDEVICE(INTEL, 0x3197), LPC_GLK},
+ 	{ PCI_VDEVICE(INTEL, 0x2b9c), LPC_COUGARMOUNTAIN},
++	{ PCI_VDEVICE(INTEL, 0x3197), LPC_GLK},
++	{ PCI_VDEVICE(INTEL, 0x31e8), LPC_GLK},
+ 	{ PCI_VDEVICE(INTEL, 0x3a14), LPC_ICH10DO},
+ 	{ PCI_VDEVICE(INTEL, 0x3a16), LPC_ICH10R},
+ 	{ PCI_VDEVICE(INTEL, 0x3a18), LPC_ICH10},
+diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
+index 48d08eeb2d20b5..d1188cd12ec6ec 100644
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -992,7 +992,7 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
+ 			mmap_read_lock(current->mm);
+ 			vma = find_vma(current->mm, ctx->args[i].ptr);
+ 			if (vma)
+-				pages[i].addr += ctx->args[i].ptr -
++				pages[i].addr += (ctx->args[i].ptr & PAGE_MASK) -
+ 						 vma->vm_start;
+ 			mmap_read_unlock(current->mm);
+ 
+@@ -1019,8 +1019,8 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
+ 					(pkt_size - rlen);
+ 			pages[i].addr = pages[i].addr &	PAGE_MASK;
+ 
+-			pg_start = (args & PAGE_MASK) >> PAGE_SHIFT;
+-			pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
++			pg_start = (rpra[i].buf.pv & PAGE_MASK) >> PAGE_SHIFT;
++			pg_end = ((rpra[i].buf.pv + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
+ 			pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
+ 			args = args + mlen;
+ 			rlen -= mlen;
+@@ -2344,7 +2344,7 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
+ 
+ 		err = fastrpc_device_register(rdev, data, false, domains[domain_id]);
+ 		if (err)
+-			goto fdev_error;
++			goto populate_error;
+ 		break;
+ 	default:
+ 		err = -EINVAL;
+diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
+index 9566837c9848e6..4b19b8a16b0968 100644
+--- a/drivers/mmc/core/sdio.c
++++ b/drivers/mmc/core/sdio.c
+@@ -458,6 +458,8 @@ static unsigned mmc_sdio_get_max_clock(struct mmc_card *card)
+ 	if (mmc_card_sd_combo(card))
+ 		max_dtr = min(max_dtr, mmc_sd_get_max_clock(card));
+ 
++	max_dtr = min_not_zero(max_dtr, card->quirk_max_rate);
++
+ 	return max_dtr;
+ }
+ 
+diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
+index d55d045ef2363b..e23177ea9d9166 100644
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -304,6 +304,7 @@ static struct esdhc_soc_data usdhc_s32g2_data = {
+ 			| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
+ 			| ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
+ 			| ESDHC_FLAG_SKIP_ERR004536 | ESDHC_FLAG_SKIP_CD_WAKE,
++	.quirks = SDHCI_QUIRK_NO_LED,
+ };
+ 
+ static struct esdhc_soc_data usdhc_imx7ulp_data = {
+diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
+index 319f0ebbe652d6..1fcaaf683d68c9 100644
+--- a/drivers/mmc/host/sdhci-msm.c
++++ b/drivers/mmc/host/sdhci-msm.c
+@@ -134,9 +134,18 @@
+ /* Timeout value to avoid infinite waiting for pwr_irq */
+ #define MSM_PWR_IRQ_TIMEOUT_MS 5000
+ 
++/* Max load for eMMC Vdd supply */
++#define MMC_VMMC_MAX_LOAD_UA	570000
++
+ /* Max load for eMMC Vdd-io supply */
+ #define MMC_VQMMC_MAX_LOAD_UA	325000
+ 
++/* Max load for SD Vdd supply */
++#define SD_VMMC_MAX_LOAD_UA	800000
++
++/* Max load for SD Vdd-io supply */
++#define SD_VQMMC_MAX_LOAD_UA	22000
++
+ #define msm_host_readl(msm_host, host, offset) \
+ 	msm_host->var_ops->msm_readl_relaxed(host, offset)
+ 
+@@ -1403,11 +1412,48 @@ static int sdhci_msm_set_pincfg(struct sdhci_msm_host *msm_host, bool level)
+ 	return ret;
+ }
+ 
+-static int sdhci_msm_set_vmmc(struct mmc_host *mmc)
++static void msm_config_vmmc_regulator(struct mmc_host *mmc, bool hpm)
++{
++	int load;
++
++	if (!hpm)
++		load = 0;
++	else if (!mmc->card)
++		load = max(MMC_VMMC_MAX_LOAD_UA, SD_VMMC_MAX_LOAD_UA);
++	else if (mmc_card_mmc(mmc->card))
++		load = MMC_VMMC_MAX_LOAD_UA;
++	else if (mmc_card_sd(mmc->card))
++		load = SD_VMMC_MAX_LOAD_UA;
++	else
++		return;
++
++	regulator_set_load(mmc->supply.vmmc, load);
++}
++
++static void msm_config_vqmmc_regulator(struct mmc_host *mmc, bool hpm)
++{
++	int load;
++
++	if (!hpm)
++		load = 0;
++	else if (!mmc->card)
++		load = max(MMC_VQMMC_MAX_LOAD_UA, SD_VQMMC_MAX_LOAD_UA);
++	else if (mmc_card_sd(mmc->card))
++		load = SD_VQMMC_MAX_LOAD_UA;
++	else
++		return;
++
++	regulator_set_load(mmc->supply.vqmmc, load);
++}
++
++static int sdhci_msm_set_vmmc(struct sdhci_msm_host *msm_host,
++			      struct mmc_host *mmc, bool hpm)
+ {
+ 	if (IS_ERR(mmc->supply.vmmc))
+ 		return 0;
+ 
++	msm_config_vmmc_regulator(mmc, hpm);
++
+ 	return mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, mmc->ios.vdd);
+ }
+ 
+@@ -1420,6 +1466,8 @@ static int msm_toggle_vqmmc(struct sdhci_msm_host *msm_host,
+ 	if (msm_host->vqmmc_enabled == level)
+ 		return 0;
+ 
++	msm_config_vqmmc_regulator(mmc, level);
++
+ 	if (level) {
+ 		/* Set the IO voltage regulator to default voltage level */
+ 		if (msm_host->caps_0 & CORE_3_0V_SUPPORT)
+@@ -1642,7 +1690,8 @@ static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
+ 	}
+ 
+ 	if (pwr_state) {
+-		ret = sdhci_msm_set_vmmc(mmc);
++		ret = sdhci_msm_set_vmmc(msm_host, mmc,
++					 pwr_state & REQ_BUS_ON);
+ 		if (!ret)
+ 			ret = sdhci_msm_set_vqmmc(msm_host, mmc,
+ 					pwr_state & REQ_BUS_ON);
+diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c
+index f66385faf631cd..0dc2ea4fc857b7 100644
+--- a/drivers/mtd/nand/onenand/onenand_base.c
++++ b/drivers/mtd/nand/onenand/onenand_base.c
+@@ -2923,6 +2923,7 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
+ 	ret = ONENAND_IS_4KB_PAGE(this) ?
+ 		onenand_mlc_read_ops_nolock(mtd, from, &ops) :
+ 		onenand_read_ops_nolock(mtd, from, &ops);
++	*retlen = ops.retlen;
+ 
+ 	/* Exit OTP access mode */
+ 	this->command(mtd, ONENAND_CMD_RESET, 0, 0);
+diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
+index 30be4ed68fad29..ef6a22f372f95c 100644
+--- a/drivers/mtd/ubi/build.c
++++ b/drivers/mtd/ubi/build.c
+@@ -1537,7 +1537,7 @@ static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
+ 	if (token) {
+ 		int err = kstrtoint(token, 10, &p->ubi_num);
+ 
+-		if (err) {
++		if (err || p->ubi_num < UBI_DEV_NUM_AUTO) {
+ 			pr_err("UBI error: bad value for ubi_num parameter: %s\n",
+ 			       token);
+ 			return -EINVAL;
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+index fe0e3e2a811718..71e50fc65c1478 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+@@ -1441,7 +1441,9 @@ void aq_nic_deinit(struct aq_nic_s *self, bool link_down)
+ 	aq_ptp_ring_free(self);
+ 	aq_ptp_free(self);
+ 
+-	if (likely(self->aq_fw_ops->deinit) && link_down) {
++	/* May be invoked during hot unplug. */
++	if (pci_device_is_present(self->pdev) &&
++	    likely(self->aq_fw_ops->deinit) && link_down) {
+ 		mutex_lock(&self->fwreq_mutex);
+ 		self->aq_fw_ops->deinit(self->aq_hw);
+ 		mutex_unlock(&self->fwreq_mutex);
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+index 0715ea5bf13ed9..3b082114f2e538 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+@@ -41,9 +41,12 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+ {
+ 	struct bcmgenet_priv *priv = netdev_priv(dev);
+ 	struct device *kdev = &priv->pdev->dev;
++	u32 phy_wolopts = 0;
+ 
+-	if (dev->phydev)
++	if (dev->phydev) {
+ 		phy_ethtool_get_wol(dev->phydev, wol);
++		phy_wolopts = wol->wolopts;
++	}
+ 
+ 	/* MAC is not wake-up capable, return what the PHY does */
+ 	if (!device_can_wakeup(kdev))
+@@ -51,9 +54,14 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+ 
+ 	/* Overlay MAC capabilities with that of the PHY queried before */
+ 	wol->supported |= WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
+-	wol->wolopts = priv->wolopts;
+-	memset(wol->sopass, 0, sizeof(wol->sopass));
++	wol->wolopts |= priv->wolopts;
+ 
++	/* Return the PHY configured magic password */
++	if (phy_wolopts & WAKE_MAGICSECURE)
++		return;
++
++	/* Otherwise the MAC one */
++	memset(wol->sopass, 0, sizeof(wol->sopass));
+ 	if (wol->wolopts & WAKE_MAGICSECURE)
+ 		memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
+ }
+@@ -70,7 +78,7 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+ 	/* Try Wake-on-LAN from the PHY first */
+ 	if (dev->phydev) {
+ 		ret = phy_ethtool_set_wol(dev->phydev, wol);
+-		if (ret != -EOPNOTSUPP)
++		if (ret != -EOPNOTSUPP && wol->wolopts)
+ 			return ret;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 9cc8db10a8d604..5ba22fe0995f91 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -55,6 +55,7 @@
+ #include <linux/hwmon.h>
+ #include <linux/hwmon-sysfs.h>
+ #include <linux/crc32poly.h>
++#include <linux/dmi.h>
+ 
+ #include <net/checksum.h>
+ #include <net/gso.h>
+@@ -18192,6 +18193,50 @@ static int tg3_resume(struct device *device)
+ 
+ static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
+ 
++/* Systems where ACPI _PTS (Prepare To Sleep) S5 will result in a fatal
++ * PCIe AER event on the tg3 device if the tg3 device is not, or cannot
++ * be, powered down.
++ */
++static const struct dmi_system_id tg3_restart_aer_quirk_table[] = {
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R440"),
++		},
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R540"),
++		},
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R640"),
++		},
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R650"),
++		},
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R740"),
++		},
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R750"),
++		},
++	},
++	{}
++};
++
+ static void tg3_shutdown(struct pci_dev *pdev)
+ {
+ 	struct net_device *dev = pci_get_drvdata(pdev);
+@@ -18208,6 +18253,19 @@ static void tg3_shutdown(struct pci_dev *pdev)
+ 
+ 	if (system_state == SYSTEM_POWER_OFF)
+ 		tg3_power_down(tp);
++	else if (system_state == SYSTEM_RESTART &&
++		 dmi_first_match(tg3_restart_aer_quirk_table) &&
++		 pdev->current_state != PCI_D3cold &&
++		 pdev->current_state != PCI_UNKNOWN) {
++		/* Disable PCIe AER on the tg3 to avoid a fatal
++		 * error during this system restart.
++		 */
++		pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL,
++					   PCI_EXP_DEVCTL_CERE |
++					   PCI_EXP_DEVCTL_NFERE |
++					   PCI_EXP_DEVCTL_FERE |
++					   PCI_EXP_DEVCTL_URRE);
++	}
+ 
+ 	rtnl_unlock();
+ 
+diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
+index 415445cefdb2aa..b1efd287b3309c 100644
+--- a/drivers/net/ethernet/intel/ice/devlink/devlink.c
++++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
+@@ -977,6 +977,9 @@ static int ice_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv
+ 
+ 	/* preallocate memory for ice_sched_node */
+ 	node = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*node), GFP_KERNEL);
++	if (!node)
++		return -ENOMEM;
++
+ 	*priv = node;
+ 
+ 	return 0;
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
+index 5d2d7736fd5f12..9c9ea4c1b93b7f 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
+@@ -527,15 +527,14 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
+  * @xdp: xdp_buff used as input to the XDP program
+  * @xdp_prog: XDP program to run
+  * @xdp_ring: ring to be used for XDP_TX action
+- * @rx_buf: Rx buffer to store the XDP action
+  * @eop_desc: Last descriptor in packet to read metadata from
+  *
+  * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
+  */
+-static void
++static u32
+ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ 	    struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
+-	    struct ice_rx_buf *rx_buf, union ice_32b_rx_flex_desc *eop_desc)
++	    union ice_32b_rx_flex_desc *eop_desc)
+ {
+ 	unsigned int ret = ICE_XDP_PASS;
+ 	u32 act;
+@@ -574,7 +573,7 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ 		ret = ICE_XDP_CONSUMED;
+ 	}
+ exit:
+-	ice_set_rx_bufs_act(xdp, rx_ring, ret);
++	return ret;
+ }
+ 
+ /**
+@@ -860,10 +859,8 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ 		xdp_buff_set_frags_flag(xdp);
+ 	}
+ 
+-	if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
+-		ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
++	if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS))
+ 		return -ENOMEM;
+-	}
+ 
+ 	__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
+ 				   rx_buf->page_offset, size);
+@@ -924,7 +921,6 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
+ 	struct ice_rx_buf *rx_buf;
+ 
+ 	rx_buf = &rx_ring->rx_buf[ntc];
+-	rx_buf->pgcnt = page_count(rx_buf->page);
+ 	prefetchw(rx_buf->page);
+ 
+ 	if (!size)
+@@ -940,6 +936,31 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
+ 	return rx_buf;
+ }
+ 
++/**
++ * ice_get_pgcnts - grab page_count() for gathered fragments
++ * @rx_ring: Rx descriptor ring to store the page counts on
++ *
++ * This function is intended to be called right before running XDP
++ * program so that the page recycling mechanism will be able to take
++ * a correct decision regarding underlying pages; this is done in such
++ * way as XDP program can change the refcount of page
++ */
++static void ice_get_pgcnts(struct ice_rx_ring *rx_ring)
++{
++	u32 nr_frags = rx_ring->nr_frags + 1;
++	u32 idx = rx_ring->first_desc;
++	struct ice_rx_buf *rx_buf;
++	u32 cnt = rx_ring->count;
++
++	for (int i = 0; i < nr_frags; i++) {
++		rx_buf = &rx_ring->rx_buf[idx];
++		rx_buf->pgcnt = page_count(rx_buf->page);
++
++		if (++idx == cnt)
++			idx = 0;
++	}
++}
++
+ /**
+  * ice_build_skb - Build skb around an existing buffer
+  * @rx_ring: Rx descriptor ring to transact packets on
+@@ -1051,12 +1072,12 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
+ 				rx_buf->page_offset + headlen, size,
+ 				xdp->frame_sz);
+ 	} else {
+-		/* buffer is unused, change the act that should be taken later
+-		 * on; data was copied onto skb's linear part so there's no
++		/* buffer is unused, restore biased page count in Rx buffer;
++		 * data was copied onto skb's linear part so there's no
+ 		 * need for adjusting page offset and we can reuse this buffer
+ 		 * as-is
+ 		 */
+-		rx_buf->act = ICE_SKB_CONSUMED;
++		rx_buf->pagecnt_bias++;
+ 	}
+ 
+ 	if (unlikely(xdp_buff_has_frags(xdp))) {
+@@ -1103,6 +1124,65 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
+ 	rx_buf->page = NULL;
+ }
+ 
++/**
++ * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all frame frags
++ * @rx_ring: Rx ring with all the auxiliary data
++ * @xdp: XDP buffer carrying linear + frags part
++ * @xdp_xmit: XDP_TX/XDP_REDIRECT verdict storage
++ * @ntc: a current next_to_clean value to be stored at rx_ring
++ * @verdict: return code from XDP program execution
++ *
++ * Walk through gathered fragments and satisfy internal page
++ * recycle mechanism; we take here an action related to verdict
++ * returned by XDP program;
++ */
++static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
++			    u32 *xdp_xmit, u32 ntc, u32 verdict)
++{
++	u32 nr_frags = rx_ring->nr_frags + 1;
++	u32 idx = rx_ring->first_desc;
++	u32 cnt = rx_ring->count;
++	u32 post_xdp_frags = 1;
++	struct ice_rx_buf *buf;
++	int i;
++
++	if (unlikely(xdp_buff_has_frags(xdp)))
++		post_xdp_frags += xdp_get_shared_info_from_buff(xdp)->nr_frags;
++
++	for (i = 0; i < post_xdp_frags; i++) {
++		buf = &rx_ring->rx_buf[idx];
++
++		if (verdict & (ICE_XDP_TX | ICE_XDP_REDIR)) {
++			ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
++			*xdp_xmit |= verdict;
++		} else if (verdict & ICE_XDP_CONSUMED) {
++			buf->pagecnt_bias++;
++		} else if (verdict == ICE_XDP_PASS) {
++			ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
++		}
++
++		ice_put_rx_buf(rx_ring, buf);
++
++		if (++idx == cnt)
++			idx = 0;
++	}
++	/* handle buffers that represented frags released by XDP prog;
++	 * for these we keep pagecnt_bias as-is; refcount from struct page
++	 * has been decremented within XDP prog and we do not have to increase
++	 * the biased refcnt
++	 */
++	for (; i < nr_frags; i++) {
++		buf = &rx_ring->rx_buf[idx];
++		ice_put_rx_buf(rx_ring, buf);
++		if (++idx == cnt)
++			idx = 0;
++	}
++
++	xdp->data = NULL;
++	rx_ring->first_desc = ntc;
++	rx_ring->nr_frags = 0;
++}
++
+ /**
+  * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+  * @rx_ring: Rx descriptor ring to transact packets on
+@@ -1120,15 +1200,13 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ 	unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
+ 	unsigned int offset = rx_ring->rx_offset;
+ 	struct xdp_buff *xdp = &rx_ring->xdp;
+-	u32 cached_ntc = rx_ring->first_desc;
+ 	struct ice_tx_ring *xdp_ring = NULL;
+ 	struct bpf_prog *xdp_prog = NULL;
+ 	u32 ntc = rx_ring->next_to_clean;
++	u32 cached_ntu, xdp_verdict;
+ 	u32 cnt = rx_ring->count;
+ 	u32 xdp_xmit = 0;
+-	u32 cached_ntu;
+ 	bool failure;
+-	u32 first;
+ 
+ 	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ 	if (xdp_prog) {
+@@ -1190,6 +1268,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ 			xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
+ 			xdp_buff_clear_frags_flag(xdp);
+ 		} else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {
++			ice_put_rx_mbuf(rx_ring, xdp, NULL, ntc, ICE_XDP_CONSUMED);
+ 			break;
+ 		}
+ 		if (++ntc == cnt)
+@@ -1199,15 +1278,15 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ 		if (ice_is_non_eop(rx_ring, rx_desc))
+ 			continue;
+ 
+-		ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf, rx_desc);
+-		if (rx_buf->act == ICE_XDP_PASS)
++		ice_get_pgcnts(rx_ring);
++		xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc);
++		if (xdp_verdict == ICE_XDP_PASS)
+ 			goto construct_skb;
+ 		total_rx_bytes += xdp_get_buff_len(xdp);
+ 		total_rx_pkts++;
+ 
+-		xdp->data = NULL;
+-		rx_ring->first_desc = ntc;
+-		rx_ring->nr_frags = 0;
++		ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
++
+ 		continue;
+ construct_skb:
+ 		if (likely(ice_ring_uses_build_skb(rx_ring)))
+@@ -1217,18 +1296,12 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ 		/* exit if we failed to retrieve a buffer */
+ 		if (!skb) {
+ 			rx_ring->ring_stats->rx_stats.alloc_page_failed++;
+-			rx_buf->act = ICE_XDP_CONSUMED;
+-			if (unlikely(xdp_buff_has_frags(xdp)))
+-				ice_set_rx_bufs_act(xdp, rx_ring,
+-						    ICE_XDP_CONSUMED);
+-			xdp->data = NULL;
+-			rx_ring->first_desc = ntc;
+-			rx_ring->nr_frags = 0;
+-			break;
++			xdp_verdict = ICE_XDP_CONSUMED;
+ 		}
+-		xdp->data = NULL;
+-		rx_ring->first_desc = ntc;
+-		rx_ring->nr_frags = 0;
++		ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
++
++		if (!skb)
++			break;
+ 
+ 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
+ 		if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
+@@ -1257,23 +1330,6 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ 		total_rx_pkts++;
+ 	}
+ 
+-	first = rx_ring->first_desc;
+-	while (cached_ntc != first) {
+-		struct ice_rx_buf *buf = &rx_ring->rx_buf[cached_ntc];
+-
+-		if (buf->act & (ICE_XDP_TX | ICE_XDP_REDIR)) {
+-			ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
+-			xdp_xmit |= buf->act;
+-		} else if (buf->act & ICE_XDP_CONSUMED) {
+-			buf->pagecnt_bias++;
+-		} else if (buf->act == ICE_XDP_PASS) {
+-			ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
+-		}
+-
+-		ice_put_rx_buf(rx_ring, buf);
+-		if (++cached_ntc >= cnt)
+-			cached_ntc = 0;
+-	}
+ 	rx_ring->next_to_clean = ntc;
+ 	/* return up to cleaned_count buffers to hardware */
+ 	failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring));
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
+index cb347c852ba9e8..806bce701df349 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
+@@ -201,7 +201,6 @@ struct ice_rx_buf {
+ 	struct page *page;
+ 	unsigned int page_offset;
+ 	unsigned int pgcnt;
+-	unsigned int act;
+ 	unsigned int pagecnt_bias;
+ };
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+index 79f960c6680d17..6cf32b40412753 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
++++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+@@ -5,49 +5,6 @@
+ #define _ICE_TXRX_LIB_H_
+ #include "ice.h"
+ 
+-/**
+- * ice_set_rx_bufs_act - propagate Rx buffer action to frags
+- * @xdp: XDP buffer representing frame (linear and frags part)
+- * @rx_ring: Rx ring struct
+- * act: action to store onto Rx buffers related to XDP buffer parts
+- *
+- * Set action that should be taken before putting Rx buffer from first frag
+- * to the last.
+- */
+-static inline void
+-ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
+-		    const unsigned int act)
+-{
+-	u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
+-	u32 nr_frags = rx_ring->nr_frags + 1;
+-	u32 idx = rx_ring->first_desc;
+-	u32 cnt = rx_ring->count;
+-	struct ice_rx_buf *buf;
+-
+-	for (int i = 0; i < nr_frags; i++) {
+-		buf = &rx_ring->rx_buf[idx];
+-		buf->act = act;
+-
+-		if (++idx == cnt)
+-			idx = 0;
+-	}
+-
+-	/* adjust pagecnt_bias on frags freed by XDP prog */
+-	if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) {
+-		u32 delta = rx_ring->nr_frags - sinfo_frags;
+-
+-		while (delta) {
+-			if (idx == 0)
+-				idx = cnt - 1;
+-			else
+-				idx--;
+-			buf = &rx_ring->rx_buf[idx];
+-			buf->pagecnt_bias--;
+-			delta--;
+-		}
+-	}
+-}
+-
+ /**
+  * ice_test_staterr - tests bits in Rx descriptor status and error fields
+  * @status_err_n: Rx descriptor status_error0 or status_error1 bits
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
+index 4f4d5818911885..a88c006ea65b71 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
+@@ -150,17 +150,14 @@ octep_get_ethtool_stats(struct net_device *netdev,
+ 				    iface_rx_stats,
+ 				    iface_tx_stats);
+ 
+-	for (q = 0; q < oct->num_oqs; q++) {
+-		struct octep_iq *iq = oct->iq[q];
+-		struct octep_oq *oq = oct->oq[q];
+-
+-		tx_packets += iq->stats.instr_completed;
+-		tx_bytes += iq->stats.bytes_sent;
+-		tx_busy_errors += iq->stats.tx_busy;
+-
+-		rx_packets += oq->stats.packets;
+-		rx_bytes += oq->stats.bytes;
+-		rx_alloc_errors += oq->stats.alloc_failures;
++	for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
++		tx_packets += oct->stats_iq[q].instr_completed;
++		tx_bytes += oct->stats_iq[q].bytes_sent;
++		tx_busy_errors += oct->stats_iq[q].tx_busy;
++
++		rx_packets += oct->stats_oq[q].packets;
++		rx_bytes += oct->stats_oq[q].bytes;
++		rx_alloc_errors += oct->stats_oq[q].alloc_failures;
+ 	}
+ 	i = 0;
+ 	data[i++] = rx_packets;
+@@ -198,22 +195,18 @@ octep_get_ethtool_stats(struct net_device *netdev,
+ 	data[i++] = iface_rx_stats->err_pkts;
+ 
+ 	/* Per Tx Queue stats */
+-	for (q = 0; q < oct->num_iqs; q++) {
+-		struct octep_iq *iq = oct->iq[q];
+-
+-		data[i++] = iq->stats.instr_posted;
+-		data[i++] = iq->stats.instr_completed;
+-		data[i++] = iq->stats.bytes_sent;
+-		data[i++] = iq->stats.tx_busy;
++	for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
++		data[i++] = oct->stats_iq[q].instr_posted;
++		data[i++] = oct->stats_iq[q].instr_completed;
++		data[i++] = oct->stats_iq[q].bytes_sent;
++		data[i++] = oct->stats_iq[q].tx_busy;
+ 	}
+ 
+ 	/* Per Rx Queue stats */
+-	for (q = 0; q < oct->num_oqs; q++) {
+-		struct octep_oq *oq = oct->oq[q];
+-
+-		data[i++] = oq->stats.packets;
+-		data[i++] = oq->stats.bytes;
+-		data[i++] = oq->stats.alloc_failures;
++	for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
++		data[i++] = oct->stats_oq[q].packets;
++		data[i++] = oct->stats_oq[q].bytes;
++		data[i++] = oct->stats_oq[q].alloc_failures;
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+index 730aa5632cceee..a89f80bac39b8d 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+@@ -822,7 +822,7 @@ static inline int octep_iq_full_check(struct octep_iq *iq)
+ 	if (unlikely(IQ_INSTR_SPACE(iq) >
+ 		     OCTEP_WAKE_QUEUE_THRESHOLD)) {
+ 		netif_start_subqueue(iq->netdev, iq->q_no);
+-		iq->stats.restart_cnt++;
++		iq->stats->restart_cnt++;
+ 		return 0;
+ 	}
+ 
+@@ -960,7 +960,7 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
+ 	wmb();
+ 	/* Ring Doorbell to notify the NIC of new packets */
+ 	writel(iq->fill_cnt, iq->doorbell_reg);
+-	iq->stats.instr_posted += iq->fill_cnt;
++	iq->stats->instr_posted += iq->fill_cnt;
+ 	iq->fill_cnt = 0;
+ 	return NETDEV_TX_OK;
+ 
+@@ -991,22 +991,19 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
+ static void octep_get_stats64(struct net_device *netdev,
+ 			      struct rtnl_link_stats64 *stats)
+ {
+-	u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
+ 	struct octep_device *oct = netdev_priv(netdev);
++	u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
+ 	int q;
+ 
+ 	tx_packets = 0;
+ 	tx_bytes = 0;
+ 	rx_packets = 0;
+ 	rx_bytes = 0;
+-	for (q = 0; q < oct->num_oqs; q++) {
+-		struct octep_iq *iq = oct->iq[q];
+-		struct octep_oq *oq = oct->oq[q];
+-
+-		tx_packets += iq->stats.instr_completed;
+-		tx_bytes += iq->stats.bytes_sent;
+-		rx_packets += oq->stats.packets;
+-		rx_bytes += oq->stats.bytes;
++	for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
++		tx_packets += oct->stats_iq[q].instr_completed;
++		tx_bytes += oct->stats_iq[q].bytes_sent;
++		rx_packets += oct->stats_oq[q].packets;
++		rx_bytes += oct->stats_oq[q].bytes;
+ 	}
+ 	stats->tx_packets = tx_packets;
+ 	stats->tx_bytes = tx_bytes;
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
+index fee59e0e0138fe..936b786f428168 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
+@@ -257,11 +257,17 @@ struct octep_device {
+ 	/* Pointers to Octeon Tx queues */
+ 	struct octep_iq *iq[OCTEP_MAX_IQ];
+ 
++	/* Per iq stats */
++	struct octep_iq_stats stats_iq[OCTEP_MAX_IQ];
++
+ 	/* Rx queues (OQ: Output Queue) */
+ 	u16 num_oqs;
+ 	/* Pointers to Octeon Rx queues */
+ 	struct octep_oq *oq[OCTEP_MAX_OQ];
+ 
++	/* Per oq stats */
++	struct octep_oq_stats stats_oq[OCTEP_MAX_OQ];
++
+ 	/* Hardware port number of the PCIe interface */
+ 	u16 pcie_port;
+ 
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+index 8af75cb37c3ee8..82b6b19e76b47a 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+@@ -87,7 +87,7 @@ static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq)
+ 		page = dev_alloc_page();
+ 		if (unlikely(!page)) {
+ 			dev_err(oq->dev, "refill: rx buffer alloc failed\n");
+-			oq->stats.alloc_failures++;
++			oq->stats->alloc_failures++;
+ 			break;
+ 		}
+ 
+@@ -98,7 +98,7 @@ static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq)
+ 				"OQ-%d buffer refill: DMA mapping error!\n",
+ 				oq->q_no);
+ 			put_page(page);
+-			oq->stats.alloc_failures++;
++			oq->stats->alloc_failures++;
+ 			break;
+ 		}
+ 		oq->buff_info[refill_idx].page = page;
+@@ -134,6 +134,7 @@ static int octep_setup_oq(struct octep_device *oct, int q_no)
+ 	oq->netdev = oct->netdev;
+ 	oq->dev = &oct->pdev->dev;
+ 	oq->q_no = q_no;
++	oq->stats = &oct->stats_oq[q_no];
+ 	oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf);
+ 	oq->ring_size_mask = oq->max_count - 1;
+ 	oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf);
+@@ -443,7 +444,7 @@ static int __octep_oq_process_rx(struct octep_device *oct,
+ 		if (!skb) {
+ 			octep_oq_drop_rx(oq, buff_info,
+ 					 &read_idx, &desc_used);
+-			oq->stats.alloc_failures++;
++			oq->stats->alloc_failures++;
+ 			continue;
+ 		}
+ 		skb_reserve(skb, data_offset);
+@@ -494,8 +495,8 @@ static int __octep_oq_process_rx(struct octep_device *oct,
+ 
+ 	oq->host_read_idx = read_idx;
+ 	oq->refill_count += desc_used;
+-	oq->stats.packets += pkt;
+-	oq->stats.bytes += rx_bytes;
++	oq->stats->packets += pkt;
++	oq->stats->bytes += rx_bytes;
+ 
+ 	return pkt;
+ }
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
+index 3b08e2d560dc39..b4696c93d0e6a9 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
+@@ -186,8 +186,8 @@ struct octep_oq {
+ 	 */
+ 	u8 __iomem *pkts_sent_reg;
+ 
+-	/* Statistics for this OQ. */
+-	struct octep_oq_stats stats;
++	/* Pointer to statistics for this OQ. */
++	struct octep_oq_stats *stats;
+ 
+ 	/* Packets pending to be processed */
+ 	u32 pkts_pending;
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
+index 06851b78aa28c8..08ee90013fef3b 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
+@@ -81,9 +81,9 @@ int octep_iq_process_completions(struct octep_iq *iq, u16 budget)
+ 	}
+ 
+ 	iq->pkts_processed += compl_pkts;
+-	iq->stats.instr_completed += compl_pkts;
+-	iq->stats.bytes_sent += compl_bytes;
+-	iq->stats.sgentry_sent += compl_sg;
++	iq->stats->instr_completed += compl_pkts;
++	iq->stats->bytes_sent += compl_bytes;
++	iq->stats->sgentry_sent += compl_sg;
+ 	iq->flush_index = fi;
+ 
+ 	netdev_tx_completed_queue(iq->netdev_q, compl_pkts, compl_bytes);
+@@ -187,6 +187,7 @@ static int octep_setup_iq(struct octep_device *oct, int q_no)
+ 	iq->netdev = oct->netdev;
+ 	iq->dev = &oct->pdev->dev;
+ 	iq->q_no = q_no;
++	iq->stats = &oct->stats_iq[q_no];
+ 	iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
+ 	iq->ring_size_mask = iq->max_count - 1;
+ 	iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
+index 875a2c34091ffe..58fb39dda977c0 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
+@@ -170,8 +170,8 @@ struct octep_iq {
+ 	 */
+ 	u16 flush_index;
+ 
+-	/* Statistics for this input queue. */
+-	struct octep_iq_stats stats;
++	/* Pointer to statistics for this input queue. */
++	struct octep_iq_stats *stats;
+ 
+ 	/* Pointer to the Virtual Base addr of the input ring. */
+ 	struct octep_tx_desc_hw *desc_ring;
+diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
+index 7b21439a315f2b..d60441928ba96c 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
++++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
+@@ -114,12 +114,9 @@ static void octep_vf_get_ethtool_stats(struct net_device *netdev,
+ 	iface_tx_stats = &oct->iface_tx_stats;
+ 	iface_rx_stats = &oct->iface_rx_stats;
+ 
+-	for (q = 0; q < oct->num_oqs; q++) {
+-		struct octep_vf_iq *iq = oct->iq[q];
+-		struct octep_vf_oq *oq = oct->oq[q];
+-
+-		tx_busy_errors += iq->stats.tx_busy;
+-		rx_alloc_errors += oq->stats.alloc_failures;
++	for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) {
++		tx_busy_errors += oct->stats_iq[q].tx_busy;
++		rx_alloc_errors += oct->stats_oq[q].alloc_failures;
+ 	}
+ 	i = 0;
+ 	data[i++] = rx_alloc_errors;
+@@ -134,22 +131,18 @@ static void octep_vf_get_ethtool_stats(struct net_device *netdev,
+ 	data[i++] = iface_rx_stats->dropped_octets_fifo_full;
+ 
+ 	/* Per Tx Queue stats */
+-	for (q = 0; q < oct->num_iqs; q++) {
+-		struct octep_vf_iq *iq = oct->iq[q];
+-
+-		data[i++] = iq->stats.instr_posted;
+-		data[i++] = iq->stats.instr_completed;
+-		data[i++] = iq->stats.bytes_sent;
+-		data[i++] = iq->stats.tx_busy;
++	for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) {
++		data[i++] = oct->stats_iq[q].instr_posted;
++		data[i++] = oct->stats_iq[q].instr_completed;
++		data[i++] = oct->stats_iq[q].bytes_sent;
++		data[i++] = oct->stats_iq[q].tx_busy;
+ 	}
+ 
+ 	/* Per Rx Queue stats */
+ 	for (q = 0; q < oct->num_oqs; q++) {
+-		struct octep_vf_oq *oq = oct->oq[q];
+-
+-		data[i++] = oq->stats.packets;
+-		data[i++] = oq->stats.bytes;
+-		data[i++] = oq->stats.alloc_failures;
++		data[i++] = oct->stats_oq[q].packets;
++		data[i++] = oct->stats_oq[q].bytes;
++		data[i++] = oct->stats_oq[q].alloc_failures;
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+index 4c699514fd57a0..18c922dd5fc64d 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+@@ -574,7 +574,7 @@ static int octep_vf_iq_full_check(struct octep_vf_iq *iq)
+ 		  * caused queues to get re-enabled after
+ 		  * being stopped
+ 		  */
+-		iq->stats.restart_cnt++;
++		iq->stats->restart_cnt++;
+ 		fallthrough;
+ 	case 1: /* Queue left enabled, since IQ is not yet full*/
+ 		return 0;
+@@ -731,7 +731,7 @@ static netdev_tx_t octep_vf_start_xmit(struct sk_buff *skb,
+ 	/* Flush the hw descriptors before writing to doorbell */
+ 	smp_wmb();
+ 	writel(iq->fill_cnt, iq->doorbell_reg);
+-	iq->stats.instr_posted += iq->fill_cnt;
++	iq->stats->instr_posted += iq->fill_cnt;
+ 	iq->fill_cnt = 0;
+ 	return NETDEV_TX_OK;
+ }
+@@ -786,14 +786,11 @@ static void octep_vf_get_stats64(struct net_device *netdev,
+ 	tx_bytes = 0;
+ 	rx_packets = 0;
+ 	rx_bytes = 0;
+-	for (q = 0; q < oct->num_oqs; q++) {
+-		struct octep_vf_iq *iq = oct->iq[q];
+-		struct octep_vf_oq *oq = oct->oq[q];
+-
+-		tx_packets += iq->stats.instr_completed;
+-		tx_bytes += iq->stats.bytes_sent;
+-		rx_packets += oq->stats.packets;
+-		rx_bytes += oq->stats.bytes;
++	for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) {
++		tx_packets += oct->stats_iq[q].instr_completed;
++		tx_bytes += oct->stats_iq[q].bytes_sent;
++		rx_packets += oct->stats_oq[q].packets;
++		rx_bytes += oct->stats_oq[q].bytes;
+ 	}
+ 	stats->tx_packets = tx_packets;
+ 	stats->tx_bytes = tx_bytes;
+diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
+index 5769f62545cd44..1a352f41f823cd 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
++++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
+@@ -246,11 +246,17 @@ struct octep_vf_device {
+ 	/* Pointers to Octeon Tx queues */
+ 	struct octep_vf_iq *iq[OCTEP_VF_MAX_IQ];
+ 
++	/* Per iq stats */
++	struct octep_vf_iq_stats stats_iq[OCTEP_VF_MAX_IQ];
++
+ 	/* Rx queues (OQ: Output Queue) */
+ 	u16 num_oqs;
+ 	/* Pointers to Octeon Rx queues */
+ 	struct octep_vf_oq *oq[OCTEP_VF_MAX_OQ];
+ 
++	/* Per oq stats */
++	struct octep_vf_oq_stats stats_oq[OCTEP_VF_MAX_OQ];
++
+ 	/* Hardware port number of the PCIe interface */
+ 	u16 pcie_port;
+ 
+diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
+index 82821bc28634b6..d70c8be3cfc40b 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
++++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
+@@ -87,7 +87,7 @@ static int octep_vf_oq_refill(struct octep_vf_device *oct, struct octep_vf_oq *o
+ 		page = dev_alloc_page();
+ 		if (unlikely(!page)) {
+ 			dev_err(oq->dev, "refill: rx buffer alloc failed\n");
+-			oq->stats.alloc_failures++;
++			oq->stats->alloc_failures++;
+ 			break;
+ 		}
+ 
+@@ -98,7 +98,7 @@ static int octep_vf_oq_refill(struct octep_vf_device *oct, struct octep_vf_oq *o
+ 				"OQ-%d buffer refill: DMA mapping error!\n",
+ 				oq->q_no);
+ 			put_page(page);
+-			oq->stats.alloc_failures++;
++			oq->stats->alloc_failures++;
+ 			break;
+ 		}
+ 		oq->buff_info[refill_idx].page = page;
+@@ -134,6 +134,7 @@ static int octep_vf_setup_oq(struct octep_vf_device *oct, int q_no)
+ 	oq->netdev = oct->netdev;
+ 	oq->dev = &oct->pdev->dev;
+ 	oq->q_no = q_no;
++	oq->stats = &oct->stats_oq[q_no];
+ 	oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf);
+ 	oq->ring_size_mask = oq->max_count - 1;
+ 	oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf);
+@@ -458,8 +459,8 @@ static int __octep_vf_oq_process_rx(struct octep_vf_device *oct,
+ 
+ 	oq->host_read_idx = read_idx;
+ 	oq->refill_count += desc_used;
+-	oq->stats.packets += pkt;
+-	oq->stats.bytes += rx_bytes;
++	oq->stats->packets += pkt;
++	oq->stats->bytes += rx_bytes;
+ 
+ 	return pkt;
+ }
+diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
+index fe46838b5200ff..9e296b7d7e3494 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
++++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
+@@ -187,7 +187,7 @@ struct octep_vf_oq {
+ 	u8 __iomem *pkts_sent_reg;
+ 
+ 	/* Statistics for this OQ. */
+-	struct octep_vf_oq_stats stats;
++	struct octep_vf_oq_stats *stats;
+ 
+ 	/* Packets pending to be processed */
+ 	u32 pkts_pending;
+diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
+index 47a5c054fdb636..8180e5ce3d7efe 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
++++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
+@@ -82,9 +82,9 @@ int octep_vf_iq_process_completions(struct octep_vf_iq *iq, u16 budget)
+ 	}
+ 
+ 	iq->pkts_processed += compl_pkts;
+-	iq->stats.instr_completed += compl_pkts;
+-	iq->stats.bytes_sent += compl_bytes;
+-	iq->stats.sgentry_sent += compl_sg;
++	iq->stats->instr_completed += compl_pkts;
++	iq->stats->bytes_sent += compl_bytes;
++	iq->stats->sgentry_sent += compl_sg;
+ 	iq->flush_index = fi;
+ 
+ 	netif_subqueue_completed_wake(iq->netdev, iq->q_no, compl_pkts,
+@@ -186,6 +186,7 @@ static int octep_vf_setup_iq(struct octep_vf_device *oct, int q_no)
+ 	iq->netdev = oct->netdev;
+ 	iq->dev = &oct->pdev->dev;
+ 	iq->q_no = q_no;
++	iq->stats = &oct->stats_iq[q_no];
+ 	iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
+ 	iq->ring_size_mask = iq->max_count - 1;
+ 	iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
+diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
+index f338b975103c30..1cede90e3a5fae 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
++++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
+@@ -129,7 +129,7 @@ struct octep_vf_iq {
+ 	u16 flush_index;
+ 
+ 	/* Statistics for this input queue. */
+-	struct octep_vf_iq_stats stats;
++	struct octep_vf_iq_stats *stats;
+ 
+ 	/* Pointer to the Virtual Base addr of the input ring. */
+ 	struct octep_vf_tx_desc_hw *desc_ring;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+index 4822d01123b45d..d61a1a9297c904 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+@@ -322,17 +322,16 @@ static void mlx5_pps_out(struct work_struct *work)
+ 	}
+ }
+ 
+-static void mlx5_timestamp_overflow(struct work_struct *work)
++static long mlx5_timestamp_overflow(struct ptp_clock_info *ptp_info)
+ {
+-	struct delayed_work *dwork = to_delayed_work(work);
+ 	struct mlx5_core_dev *mdev;
+ 	struct mlx5_timer *timer;
+ 	struct mlx5_clock *clock;
+ 	unsigned long flags;
+ 
+-	timer = container_of(dwork, struct mlx5_timer, overflow_work);
+-	clock = container_of(timer, struct mlx5_clock, timer);
++	clock = container_of(ptp_info, struct mlx5_clock, ptp_info);
+ 	mdev = container_of(clock, struct mlx5_core_dev, clock);
++	timer = &clock->timer;
+ 
+ 	if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ 		goto out;
+@@ -343,7 +342,7 @@ static void mlx5_timestamp_overflow(struct work_struct *work)
+ 	write_sequnlock_irqrestore(&clock->lock, flags);
+ 
+ out:
+-	schedule_delayed_work(&timer->overflow_work, timer->overflow_period);
++	return timer->overflow_period;
+ }
+ 
+ static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
+@@ -517,6 +516,7 @@ static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+ 	timer->cycles.mult = mult;
+ 	mlx5_update_clock_info_page(mdev);
+ 	write_sequnlock_irqrestore(&clock->lock, flags);
++	ptp_schedule_worker(clock->ptp, timer->overflow_period);
+ 
+ 	return 0;
+ }
+@@ -852,6 +852,7 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = {
+ 	.settime64	= mlx5_ptp_settime,
+ 	.enable		= NULL,
+ 	.verify		= NULL,
++	.do_aux_work	= mlx5_timestamp_overflow,
+ };
+ 
+ static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
+@@ -1052,12 +1053,11 @@ static void mlx5_init_overflow_period(struct mlx5_clock *clock)
+ 	do_div(ns, NSEC_PER_SEC / HZ);
+ 	timer->overflow_period = ns;
+ 
+-	INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow);
+-	if (timer->overflow_period)
+-		schedule_delayed_work(&timer->overflow_work, 0);
+-	else
++	if (!timer->overflow_period) {
++		timer->overflow_period = HZ;
+ 		mlx5_core_warn(mdev,
+-			       "invalid overflow period, overflow_work is not scheduled\n");
++			       "invalid overflow period, overflow_work is scheduled once per second\n");
++	}
+ 
+ 	if (clock_info)
+ 		clock_info->overflow_period = timer->overflow_period;
+@@ -1172,6 +1172,9 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
+ 
+ 	MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
+ 	mlx5_eq_notifier_register(mdev, &clock->pps_nb);
++
++	if (clock->ptp)
++		ptp_schedule_worker(clock->ptp, 0);
+ }
+ 
+ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
+@@ -1188,7 +1191,6 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
+ 	}
+ 
+ 	cancel_work_sync(&clock->pps_info.out_work);
+-	cancel_delayed_work_sync(&clock->timer.overflow_work);
+ 
+ 	if (mdev->clock_info) {
+ 		free_page((unsigned long)mdev->clock_info);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
+index baacf662c0ab89..ae2849cf4dd49c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
+@@ -152,6 +152,8 @@ mlx5hws_bwc_matcher_create(struct mlx5hws_table *table,
+ 	if (!bwc_matcher)
+ 		return NULL;
+ 
++	atomic_set(&bwc_matcher->num_of_rules, 0);
++
+ 	/* Check if the required match params can be all matched
+ 	 * in single STE, otherwise complex matcher is needed.
+ 	 */
+@@ -199,10 +201,12 @@ int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
+ 
+ int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
+ {
+-	if (bwc_matcher->num_of_rules)
++	u32 num_of_rules = atomic_read(&bwc_matcher->num_of_rules);
++
++	if (num_of_rules)
+ 		mlx5hws_err(bwc_matcher->matcher->tbl->ctx,
+ 			    "BWC matcher destroy: matcher still has %d rules\n",
+-			    bwc_matcher->num_of_rules);
++			    num_of_rules);
+ 
+ 	mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
+ 
+@@ -309,7 +313,7 @@ static void hws_bwc_rule_list_add(struct mlx5hws_bwc_rule *bwc_rule, u16 idx)
+ {
+ 	struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ 
+-	bwc_matcher->num_of_rules++;
++	atomic_inc(&bwc_matcher->num_of_rules);
+ 	bwc_rule->bwc_queue_idx = idx;
+ 	list_add(&bwc_rule->list_node, &bwc_matcher->rules[idx]);
+ }
+@@ -318,7 +322,7 @@ static void hws_bwc_rule_list_remove(struct mlx5hws_bwc_rule *bwc_rule)
+ {
+ 	struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ 
+-	bwc_matcher->num_of_rules--;
++	atomic_dec(&bwc_matcher->num_of_rules);
+ 	list_del_init(&bwc_rule->list_node);
+ }
+ 
+@@ -704,7 +708,8 @@ hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
+ 	 * Need to check again if we really need rehash.
+ 	 * If the reason for rehash was size, but not any more - skip rehash.
+ 	 */
+-	if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher, bwc_matcher->num_of_rules))
++	if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher,
++						atomic_read(&bwc_matcher->num_of_rules)))
+ 		return 0;
+ 
+ 	/* Now we're done all the checking - do the rehash:
+@@ -797,7 +802,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
+ 	}
+ 
+ 	/* check if number of rules require rehash */
+-	num_of_rules = bwc_matcher->num_of_rules;
++	num_of_rules = atomic_read(&bwc_matcher->num_of_rules);
+ 
+ 	if (unlikely(hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))) {
+ 		mutex_unlock(queue_lock);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
+index 0b745968e21e18..655fa7a22d84f6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
+@@ -19,7 +19,7 @@ struct mlx5hws_bwc_matcher {
+ 	u8 num_of_at;
+ 	u16 priority;
+ 	u8 size_log;
+-	u32 num_of_rules; /* atomically accessed */
++	atomic_t num_of_rules;
+ 	struct list_head *rules;
+ };
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
+index 1bb3a6f8c3cda8..e94f96c0c781f7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
+@@ -165,14 +165,14 @@ static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
+ 						    next->match_ste.rtc_0_id,
+ 						    next->match_ste.rtc_1_id);
+ 		if (ret) {
+-			mlx5hws_err(tbl->ctx, "Failed to disconnect matcher\n");
+-			goto matcher_reconnect;
++			mlx5hws_err(tbl->ctx, "Fatal error, failed to disconnect matcher\n");
++			return ret;
+ 		}
+ 	} else {
+ 		ret = mlx5hws_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl);
+ 		if (ret) {
+-			mlx5hws_err(tbl->ctx, "Failed to disconnect last matcher\n");
+-			goto matcher_reconnect;
++			mlx5hws_err(tbl->ctx, "Fatal error, failed to disconnect last matcher\n");
++			return ret;
+ 		}
+ 	}
+ 
+@@ -180,27 +180,19 @@ static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
+ 	if (prev_ft_id == tbl->ft_id) {
+ 		ret = mlx5hws_table_update_connected_miss_tables(tbl);
+ 		if (ret) {
+-			mlx5hws_err(tbl->ctx, "Fatal error, failed to update connected miss table\n");
+-			goto matcher_reconnect;
++			mlx5hws_err(tbl->ctx,
++				    "Fatal error, failed to update connected miss table\n");
++			return ret;
+ 		}
+ 	}
+ 
+ 	ret = mlx5hws_table_ft_set_default_next_ft(tbl, prev_ft_id);
+ 	if (ret) {
+ 		mlx5hws_err(tbl->ctx, "Fatal error, failed to restore matcher ft default miss\n");
+-		goto matcher_reconnect;
++		return ret;
+ 	}
+ 
+ 	return 0;
+-
+-matcher_reconnect:
+-	if (list_empty(&tbl->matchers_list) || !prev)
+-		list_add(&matcher->list_node, &tbl->matchers_list);
+-	else
+-		/* insert after prev matcher */
+-		list_add(&matcher->list_node, &prev->list_node);
+-
+-	return ret;
+ }
+ 
+ static void hws_matcher_set_rtc_attr_sz(struct mlx5hws_matcher *matcher,
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index e1de45fb18aeea..2be2889d0646ba 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -2242,8 +2242,6 @@ static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
+ 	struct device *dev = common->dev;
+ 	int i;
+ 
+-	devm_remove_action(dev, am65_cpsw_nuss_free_tx_chns, common);
+-
+ 	common->tx_ch_rate_msk = 0;
+ 	for (i = 0; i < common->tx_ch_num; i++) {
+ 		struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+@@ -2265,8 +2263,6 @@ static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
+ 	for (i = 0; i < common->tx_ch_num; i++) {
+ 		struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+ 
+-		netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
+-				  am65_cpsw_nuss_tx_poll);
+ 		hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
+ 		tx_chn->tx_hrtimer.function = &am65_cpsw_nuss_tx_timer_callback;
+ 
+@@ -2279,9 +2275,21 @@ static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
+ 				tx_chn->id, tx_chn->irq, ret);
+ 			goto err;
+ 		}
++
++		netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
++				  am65_cpsw_nuss_tx_poll);
+ 	}
+ 
++	return 0;
++
+ err:
++	for (--i ; i >= 0 ; i--) {
++		struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
++
++		netif_napi_del(&tx_chn->napi_tx);
++		devm_free_irq(dev, tx_chn->irq, tx_chn);
++	}
++
+ 	return ret;
+ }
+ 
+@@ -2362,12 +2370,10 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
+ 		goto err;
+ 	}
+ 
++	return 0;
++
+ err:
+-	i = devm_add_action(dev, am65_cpsw_nuss_free_tx_chns, common);
+-	if (i) {
+-		dev_err(dev, "Failed to add free_tx_chns action %d\n", i);
+-		return i;
+-	}
++	am65_cpsw_nuss_free_tx_chns(common);
+ 
+ 	return ret;
+ }
+@@ -2395,7 +2401,6 @@ static void am65_cpsw_nuss_remove_rx_chns(struct am65_cpsw_common *common)
+ 
+ 	rx_chn = &common->rx_chns;
+ 	flows = rx_chn->flows;
+-	devm_remove_action(dev, am65_cpsw_nuss_free_rx_chns, common);
+ 
+ 	for (i = 0; i < common->rx_ch_num_flows; i++) {
+ 		if (!(flows[i].irq < 0))
+@@ -2494,7 +2499,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
+ 						i, &rx_flow_cfg);
+ 		if (ret) {
+ 			dev_err(dev, "Failed to init rx flow%d %d\n", i, ret);
+-			goto err;
++			goto err_flow;
+ 		}
+ 		if (!i)
+ 			fdqring_id =
+@@ -2506,14 +2511,12 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
+ 			dev_err(dev, "Failed to get rx dma irq %d\n",
+ 				flow->irq);
+ 			ret = flow->irq;
+-			goto err;
++			goto err_flow;
+ 		}
+ 
+ 		snprintf(flow->name,
+ 			 sizeof(flow->name), "%s-rx%d",
+ 			 dev_name(dev), i);
+-		netif_napi_add(common->dma_ndev, &flow->napi_rx,
+-			       am65_cpsw_nuss_rx_poll);
+ 		hrtimer_init(&flow->rx_hrtimer, CLOCK_MONOTONIC,
+ 			     HRTIMER_MODE_REL_PINNED);
+ 		flow->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback;
+@@ -2526,20 +2529,28 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
+ 			dev_err(dev, "failure requesting rx %d irq %u, %d\n",
+ 				i, flow->irq, ret);
+ 			flow->irq = -EINVAL;
+-			goto err;
++			goto err_flow;
+ 		}
++
++		netif_napi_add(common->dma_ndev, &flow->napi_rx,
++			       am65_cpsw_nuss_rx_poll);
+ 	}
+ 
+ 	/* setup classifier to route priorities to flows */
+ 	cpsw_ale_classifier_setup_default(common->ale, common->rx_ch_num_flows);
+ 
+-err:
+-	i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common);
+-	if (i) {
+-		dev_err(dev, "Failed to add free_rx_chns action %d\n", i);
+-		return i;
++	return 0;
++
++err_flow:
++	for (--i; i >= 0 ; i--) {
++		flow = &rx_chn->flows[i];
++		netif_napi_del(&flow->napi_rx);
++		devm_free_irq(dev, flow->irq, flow);
+ 	}
+ 
++err:
++	am65_cpsw_nuss_free_rx_chns(common);
++
+ 	return ret;
+ }
+ 
+@@ -3349,7 +3360,7 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
+ 		return ret;
+ 	ret = am65_cpsw_nuss_init_rx_chns(common);
+ 	if (ret)
+-		return ret;
++		goto err_remove_tx;
+ 
+ 	/* The DMA Channels are not guaranteed to be in a clean state.
+ 	 * Reset and disable them to ensure that they are back to the
+@@ -3370,7 +3381,7 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
+ 
+ 	ret = am65_cpsw_nuss_register_devlink(common);
+ 	if (ret)
+-		return ret;
++		goto err_remove_rx;
+ 
+ 	for (i = 0; i < common->port_num; i++) {
+ 		port = &common->ports[i];
+@@ -3401,6 +3412,10 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
+ err_cleanup_ndev:
+ 	am65_cpsw_nuss_cleanup_ndev(common);
+ 	am65_cpsw_unregister_devlink(common);
++err_remove_rx:
++	am65_cpsw_nuss_remove_rx_chns(common);
++err_remove_tx:
++	am65_cpsw_nuss_remove_tx_chns(common);
+ 
+ 	return ret;
+ }
+@@ -3420,6 +3435,8 @@ int am65_cpsw_nuss_update_tx_rx_chns(struct am65_cpsw_common *common,
+ 		return ret;
+ 
+ 	ret = am65_cpsw_nuss_init_rx_chns(common);
++	if (ret)
++		am65_cpsw_nuss_remove_tx_chns(common);
+ 
+ 	return ret;
+ }
+@@ -3678,6 +3695,8 @@ static void am65_cpsw_nuss_remove(struct platform_device *pdev)
+ 	 */
+ 	am65_cpsw_nuss_cleanup_ndev(common);
+ 	am65_cpsw_unregister_devlink(common);
++	am65_cpsw_nuss_remove_rx_chns(common);
++	am65_cpsw_nuss_remove_tx_chns(common);
+ 	am65_cpsw_nuss_phylink_cleanup(common);
+ 	am65_cpts_release(common->cpts);
+ 	am65_cpsw_disable_serdes_phy(common);
+@@ -3739,8 +3758,10 @@ static int am65_cpsw_nuss_resume(struct device *dev)
+ 	if (ret)
+ 		return ret;
+ 	ret = am65_cpsw_nuss_init_rx_chns(common);
+-	if (ret)
++	if (ret) {
++		am65_cpsw_nuss_remove_tx_chns(common);
+ 		return ret;
++	}
+ 
+ 	/* If RX IRQ was disabled before suspend, keep it disabled */
+ 	for (i = 0; i < common->rx_ch_num_flows; i++) {
+diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
+index ade544bc007d25..872e582b7e8387 100644
+--- a/drivers/net/phy/nxp-c45-tja11xx.c
++++ b/drivers/net/phy/nxp-c45-tja11xx.c
+@@ -1297,6 +1297,8 @@ static int nxp_c45_soft_reset(struct phy_device *phydev)
+ 	if (ret)
+ 		return ret;
+ 
++	usleep_range(2000, 2050);
++
+ 	return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
+ 					 VEND1_DEVICE_CONTROL, ret,
+ 					 !(ret & DEVICE_CONTROL_RESET), 20000,
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 148c7bc66c0af1..acf96f26248873 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -580,7 +580,7 @@ static inline bool tun_not_capable(struct tun_struct *tun)
+ 	struct net *net = dev_net(tun->dev);
+ 
+ 	return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
+-		  (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
++		(gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
+ 		!ns_capable(net->user_ns, CAP_NET_ADMIN);
+ }
+ 
+diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
+index 46afb95ffabe3b..a19789b571905a 100644
+--- a/drivers/net/usb/ipheth.c
++++ b/drivers/net/usb/ipheth.c
+@@ -61,7 +61,18 @@
+ #define IPHETH_USBINTF_PROTO    1
+ 
+ #define IPHETH_IP_ALIGN		2	/* padding at front of URB */
+-#define IPHETH_NCM_HEADER_SIZE  (12 + 96) /* NCMH + NCM0 */
++/* On iOS devices, NCM headers in RX have a fixed size regardless of DPE count:
++ * - NTH16 (NCMH): 12 bytes, as per CDC NCM 1.0 spec
++ * - NDP16 (NCM0): 96 bytes, of which
++ *    - NDP16 fixed header: 8 bytes
++ *    - maximum of 22 DPEs (21 datagrams + trailer), 4 bytes each
++ */
++#define IPHETH_NDP16_MAX_DPE	22
++#define IPHETH_NDP16_HEADER_SIZE (sizeof(struct usb_cdc_ncm_ndp16) + \
++				  IPHETH_NDP16_MAX_DPE * \
++				  sizeof(struct usb_cdc_ncm_dpe16))
++#define IPHETH_NCM_HEADER_SIZE	(sizeof(struct usb_cdc_ncm_nth16) + \
++				 IPHETH_NDP16_HEADER_SIZE)
+ #define IPHETH_TX_BUF_SIZE      ETH_FRAME_LEN
+ #define IPHETH_RX_BUF_SIZE_LEGACY (IPHETH_IP_ALIGN + ETH_FRAME_LEN)
+ #define IPHETH_RX_BUF_SIZE_NCM	65536
+@@ -207,15 +218,23 @@ static int ipheth_rcvbulk_callback_legacy(struct urb *urb)
+ 	return ipheth_consume_skb(buf, len, dev);
+ }
+ 
++/* In "NCM mode", the iOS device encapsulates RX (phone->computer) traffic
++ * in NCM Transfer Blocks (similarly to CDC NCM). However, unlike reverse
++ * tethering (handled by the `cdc_ncm` driver), regular tethering is not
++ * compliant with the CDC NCM spec, as the device is missing the necessary
++ * descriptors, and TX (computer->phone) traffic is not encapsulated
++ * at all. Thus `ipheth` implements a very limited subset of the spec with
++ * the sole purpose of parsing RX URBs.
++ */
+ static int ipheth_rcvbulk_callback_ncm(struct urb *urb)
+ {
+ 	struct usb_cdc_ncm_nth16 *ncmh;
+ 	struct usb_cdc_ncm_ndp16 *ncm0;
+ 	struct usb_cdc_ncm_dpe16 *dpe;
+ 	struct ipheth_device *dev;
++	u16 dg_idx, dg_len;
+ 	int retval = -EINVAL;
+ 	char *buf;
+-	int len;
+ 
+ 	dev = urb->context;
+ 
+@@ -226,40 +245,42 @@ static int ipheth_rcvbulk_callback_ncm(struct urb *urb)
+ 
+ 	ncmh = urb->transfer_buffer;
+ 	if (ncmh->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH16_SIGN) ||
+-	    le16_to_cpu(ncmh->wNdpIndex) >= urb->actual_length) {
+-		dev->net->stats.rx_errors++;
+-		return retval;
+-	}
++	    /* On iOS, NDP16 directly follows NTH16 */
++	    ncmh->wNdpIndex != cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16)))
++		goto rx_error;
+ 
+-	ncm0 = urb->transfer_buffer + le16_to_cpu(ncmh->wNdpIndex);
+-	if (ncm0->dwSignature != cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN) ||
+-	    le16_to_cpu(ncmh->wHeaderLength) + le16_to_cpu(ncm0->wLength) >=
+-	    urb->actual_length) {
+-		dev->net->stats.rx_errors++;
+-		return retval;
+-	}
++	ncm0 = urb->transfer_buffer + sizeof(struct usb_cdc_ncm_nth16);
++	if (ncm0->dwSignature != cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN))
++		goto rx_error;
+ 
+ 	dpe = ncm0->dpe16;
+-	while (le16_to_cpu(dpe->wDatagramIndex) != 0 &&
+-	       le16_to_cpu(dpe->wDatagramLength) != 0) {
+-		if (le16_to_cpu(dpe->wDatagramIndex) >= urb->actual_length ||
+-		    le16_to_cpu(dpe->wDatagramIndex) +
+-		    le16_to_cpu(dpe->wDatagramLength) > urb->actual_length) {
++	for (int dpe_i = 0; dpe_i < IPHETH_NDP16_MAX_DPE; ++dpe_i, ++dpe) {
++		dg_idx = le16_to_cpu(dpe->wDatagramIndex);
++		dg_len = le16_to_cpu(dpe->wDatagramLength);
++
++		/* Null DPE must be present after last datagram pointer entry
++		 * (3.3.1 USB CDC NCM spec v1.0)
++		 */
++		if (dg_idx == 0 && dg_len == 0)
++			return 0;
++
++		if (dg_idx < IPHETH_NCM_HEADER_SIZE ||
++		    dg_idx >= urb->actual_length ||
++		    dg_len > urb->actual_length - dg_idx) {
+ 			dev->net->stats.rx_length_errors++;
+ 			return retval;
+ 		}
+ 
+-		buf = urb->transfer_buffer + le16_to_cpu(dpe->wDatagramIndex);
+-		len = le16_to_cpu(dpe->wDatagramLength);
++		buf = urb->transfer_buffer + dg_idx;
+ 
+-		retval = ipheth_consume_skb(buf, len, dev);
++		retval = ipheth_consume_skb(buf, dg_len, dev);
+ 		if (retval != 0)
+ 			return retval;
+-
+-		dpe++;
+ 	}
+ 
+-	return 0;
++rx_error:
++	dev->net->stats.rx_errors++;
++	return retval;
+ }
+ 
+ static void ipheth_rcvbulk_callback(struct urb *urb)
+diff --git a/drivers/net/vmxnet3/vmxnet3_xdp.c b/drivers/net/vmxnet3/vmxnet3_xdp.c
+index 1341374a4588a0..616ecc38d1726c 100644
+--- a/drivers/net/vmxnet3/vmxnet3_xdp.c
++++ b/drivers/net/vmxnet3/vmxnet3_xdp.c
+@@ -28,7 +28,7 @@ vmxnet3_xdp_get_tq(struct vmxnet3_adapter *adapter)
+ 	if (likely(cpu < tq_number))
+ 		tq = &adapter->tx_queue[cpu];
+ 	else
+-		tq = &adapter->tx_queue[reciprocal_scale(cpu, tq_number)];
++		tq = &adapter->tx_queue[cpu % tq_number];
+ 
+ 	return tq;
+ }
+@@ -124,6 +124,7 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
+ 	u32 buf_size;
+ 	u32 dw2;
+ 
++	spin_lock_irq(&tq->tx_lock);
+ 	dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
+ 	dw2 |= xdpf->len;
+ 	ctx.sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
+@@ -134,6 +135,7 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
+ 
+ 	if (vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) == 0) {
+ 		tq->stats.tx_ring_full++;
++		spin_unlock_irq(&tq->tx_lock);
+ 		return -ENOSPC;
+ 	}
+ 
+@@ -142,8 +144,10 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
+ 		tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
+ 					       xdpf->data, buf_size,
+ 					       DMA_TO_DEVICE);
+-		if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
++		if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) {
++			spin_unlock_irq(&tq->tx_lock);
+ 			return -EFAULT;
++		}
+ 		tbi->map_type |= VMXNET3_MAP_SINGLE;
+ 	} else { /* XDP buffer from page pool */
+ 		page = virt_to_page(xdpf->data);
+@@ -182,6 +186,7 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
+ 	dma_wmb();
+ 	gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
+ 						  VMXNET3_TXD_GEN);
++	spin_unlock_irq(&tq->tx_lock);
+ 
+ 	/* No need to handle the case when tx_num_deferred doesn't reach
+ 	 * threshold. Backend driver at hypervisor side will poll and reset
+@@ -225,6 +230,7 @@ vmxnet3_xdp_xmit(struct net_device *dev,
+ {
+ 	struct vmxnet3_adapter *adapter = netdev_priv(dev);
+ 	struct vmxnet3_tx_queue *tq;
++	struct netdev_queue *nq;
+ 	int i;
+ 
+ 	if (unlikely(test_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)))
+@@ -236,6 +242,9 @@ vmxnet3_xdp_xmit(struct net_device *dev,
+ 	if (tq->stopped)
+ 		return -ENETDOWN;
+ 
++	nq = netdev_get_tx_queue(adapter->netdev, tq->qid);
++
++	__netif_tx_lock(nq, smp_processor_id());
+ 	for (i = 0; i < n; i++) {
+ 		if (vmxnet3_xdp_xmit_frame(adapter, frames[i], tq, true)) {
+ 			tq->stats.xdp_xmit_err++;
+@@ -243,6 +252,7 @@ vmxnet3_xdp_xmit(struct net_device *dev,
+ 		}
+ 	}
+ 	tq->stats.xdp_xmit += i;
++	__netif_tx_unlock(nq);
+ 
+ 	return i;
+ }
+diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
+index c9980c0193d1d7..43ea87e981f421 100644
+--- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
++++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
+@@ -1562,7 +1562,8 @@ ath12k_htt_print_tx_selfgen_ac_stats_tlv(const void *tag_buf, u16 tag_len,
+ 			 le32_to_cpu(htt_stats_buf->ac_mu_mimo_ndp));
+ 	len += print_array_to_buf_index(buf, len, "ac_mu_mimo_brpollX_tried = ", 1,
+ 					htt_stats_buf->ac_mu_mimo_brpoll,
+-					ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS, "\n\n");
++					ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS - 1,
++					"\n\n");
+ 
+ 	stats_req->buf_len = len;
+ }
+@@ -1590,7 +1591,7 @@ ath12k_htt_print_tx_selfgen_ax_stats_tlv(const void *tag_buf, u16 tag_len,
+ 			 le32_to_cpu(htt_stats_buf->ax_mu_mimo_ndp));
+ 	len += print_array_to_buf_index(buf, len, "ax_mu_mimo_brpollX_tried = ", 1,
+ 					htt_stats_buf->ax_mu_mimo_brpoll,
+-					ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS, "\n");
++					ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS - 1, "\n");
+ 	len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger = %u\n",
+ 			 le32_to_cpu(htt_stats_buf->ax_basic_trigger));
+ 	len += scnprintf(buf + len, buf_len - len, "ax_ulmumimo_total_trigger = %u\n",
+diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
+index ef2736fb5f53fd..e8639ad8761a2e 100644
+--- a/drivers/net/wireless/ath/ath12k/mac.c
++++ b/drivers/net/wireless/ath/ath12k/mac.c
+@@ -4378,6 +4378,7 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 
+ 	if (sta) {
+ 		ahsta = ath12k_sta_to_ahsta(sta);
++
+ 		/* For an ML STA Pairwise key is same for all associated link Stations,
+ 		 * hence do set key for all link STAs which are active.
+ 		 */
+@@ -4400,41 +4401,47 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 				if (ret)
+ 					break;
+ 			}
+-		} else {
+-			arsta = &ahsta->deflink;
+-			arvif = arsta->arvif;
+-			if (WARN_ON(!arvif)) {
+-				ret = -EINVAL;
+-				goto out;
+-			}
+ 
+-			ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, arsta, key);
+-		}
+-	} else {
+-		if (key->link_id >= 0 && key->link_id < IEEE80211_MLD_MAX_NUM_LINKS) {
+-			link_id = key->link_id;
+-			arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+-		} else {
+-			link_id = 0;
+-			arvif = &ahvif->deflink;
++			return 0;
+ 		}
+ 
+-		if (!arvif || !arvif->is_created) {
+-			cache = ath12k_ahvif_get_link_cache(ahvif, link_id);
+-			if (!cache)
+-				return -ENOSPC;
++		arsta = &ahsta->deflink;
++		arvif = arsta->arvif;
++		if (WARN_ON(!arvif))
++			return -EINVAL;
+ 
+-			ret = ath12k_mac_update_key_cache(cache, cmd, sta, key);
++		ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, arsta, key);
++		if (ret)
++			return ret;
+ 
++		return 0;
++	}
++
++	if (key->link_id >= 0 && key->link_id < IEEE80211_MLD_MAX_NUM_LINKS) {
++		link_id = key->link_id;
++		arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
++	} else {
++		link_id = 0;
++		arvif = &ahvif->deflink;
++	}
++
++	if (!arvif || !arvif->is_created) {
++		cache = ath12k_ahvif_get_link_cache(ahvif, link_id);
++		if (!cache)
++			return -ENOSPC;
++
++		ret = ath12k_mac_update_key_cache(cache, cmd, sta, key);
++		if (ret)
+ 			return ret;
+-		}
+ 
+-		ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, NULL, key);
++		return 0;
+ 	}
+ 
+-out:
++	ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, NULL, key);
++	if (ret)
++		return ret;
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ static int
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+index da72fd2d541ff7..20ab9b1eea2836 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+@@ -540,6 +540,11 @@ void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success)
+ 	struct ethhdr *eh;
+ 	u16 type;
+ 
++	if (!ifp) {
++		brcmu_pkt_buf_free_skb(txp);
++		return;
++	}
++
+ 	eh = (struct ethhdr *)(txp->data);
+ 	type = ntohs(eh->h_proto);
+ 
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+index c1f18e2fe540d3..1681ad00f82ecd 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+@@ -99,13 +99,13 @@ int brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
+ 	/* Set board-type to the first string of the machine compatible prop */
+ 	root = of_find_node_by_path("/");
+ 	if (root && err) {
+-		char *board_type;
++		char *board_type = NULL;
+ 		const char *tmp;
+ 
+-		of_property_read_string_index(root, "compatible", 0, &tmp);
+-
+ 		/* get rid of '/' in the compatible string to be able to find the FW */
+-		board_type = devm_kstrdup(dev, tmp, GFP_KERNEL);
++		if (!of_property_read_string_index(root, "compatible", 0, &tmp))
++			board_type = devm_kstrdup(dev, tmp, GFP_KERNEL);
++
+ 		if (!board_type) {
+ 			of_node_put(root);
+ 			return 0;
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+index d69879e1bd870c..d362c4337616b4 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+@@ -23423,6 +23423,9 @@ wlc_phy_iqcal_gainparams_nphy(struct brcms_phy *pi, u16 core_no,
+ 				break;
+ 		}
+ 
++		if (WARN_ON(k == NPHY_IQCAL_NUMGAINS))
++			return;
++
+ 		params->txgm = tbl_iqcal_gainparams_nphy[band_idx][k][1];
+ 		params->pga = tbl_iqcal_gainparams_nphy[band_idx][k][2];
+ 		params->pad = tbl_iqcal_gainparams_nphy[band_idx][k][3];
+diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
+index 64c1233142451a..a3052684b341f2 100644
+--- a/drivers/net/wireless/intel/iwlwifi/Makefile
++++ b/drivers/net/wireless/intel/iwlwifi/Makefile
+@@ -11,7 +11,7 @@ iwlwifi-objs		+= pcie/ctxt-info.o pcie/ctxt-info-gen3.o
+ iwlwifi-objs		+= pcie/trans-gen2.o pcie/tx-gen2.o
+ iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
+ iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
+-iwlwifi-$(CONFIG_IWLMVM) += cfg/ax210.o cfg/bz.o cfg/sc.o
++iwlwifi-$(CONFIG_IWLMVM) += cfg/ax210.o cfg/bz.o cfg/sc.o cfg/dr.o
+ iwlwifi-objs		+= iwl-dbg-tlv.o
+ iwlwifi-objs		+= iwl-trans.o
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
+new file mode 100644
+index 00000000000000..ab7c0f8d54f425
+--- /dev/null
++++ b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
+@@ -0,0 +1,167 @@
++// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
++/*
++ * Copyright (C) 2024 Intel Corporation
++ */
++#include <linux/module.h>
++#include <linux/stringify.h>
++#include "iwl-config.h"
++#include "iwl-prph.h"
++#include "fw/api/txq.h"
++
++/* Highest firmware API version supported */
++#define IWL_DR_UCODE_API_MAX	96
++
++/* Lowest firmware API version supported */
++#define IWL_DR_UCODE_API_MIN	96
++
++/* NVM versions */
++#define IWL_DR_NVM_VERSION		0x0a1d
++
++/* Memory offsets and lengths */
++#define IWL_DR_DCCM_OFFSET		0x800000 /* LMAC1 */
++#define IWL_DR_DCCM_LEN			0x10000 /* LMAC1 */
++#define IWL_DR_DCCM2_OFFSET		0x880000
++#define IWL_DR_DCCM2_LEN		0x8000
++#define IWL_DR_SMEM_OFFSET		0x400000
++#define IWL_DR_SMEM_LEN			0xD0000
++
++#define IWL_DR_A_PE_A_FW_PRE		"iwlwifi-dr-a0-pe-a0"
++#define IWL_BR_A_PET_A_FW_PRE		"iwlwifi-br-a0-petc-a0"
++#define IWL_BR_A_PE_A_FW_PRE		"iwlwifi-br-a0-pe-a0"
++
++#define IWL_DR_A_PE_A_FW_MODULE_FIRMWARE(api) \
++	IWL_DR_A_PE_A_FW_PRE "-" __stringify(api) ".ucode"
++#define IWL_BR_A_PET_A_FW_MODULE_FIRMWARE(api) \
++	IWL_BR_A_PET_A_FW_PRE "-" __stringify(api) ".ucode"
++#define IWL_BR_A_PE_A_FW_MODULE_FIRMWARE(api) \
++	IWL_BR_A_PE_A_FW_PRE "-" __stringify(api) ".ucode"
++
++static const struct iwl_base_params iwl_dr_base_params = {
++	.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
++	.num_of_queues = 512,
++	.max_tfd_queue_size = 65536,
++	.shadow_ram_support = true,
++	.led_compensation = 57,
++	.wd_timeout = IWL_LONG_WD_TIMEOUT,
++	.max_event_log_size = 512,
++	.shadow_reg_enable = true,
++	.pcie_l1_allowed = true,
++};
++
++#define IWL_DEVICE_DR_COMMON						\
++	.ucode_api_max = IWL_DR_UCODE_API_MAX,			\
++	.ucode_api_min = IWL_DR_UCODE_API_MIN,			\
++	.led_mode = IWL_LED_RF_STATE,					\
++	.nvm_hw_section_num = 10,					\
++	.non_shared_ant = ANT_B,					\
++	.dccm_offset = IWL_DR_DCCM_OFFSET,				\
++	.dccm_len = IWL_DR_DCCM_LEN,					\
++	.dccm2_offset = IWL_DR_DCCM2_OFFSET,				\
++	.dccm2_len = IWL_DR_DCCM2_LEN,				\
++	.smem_offset = IWL_DR_SMEM_OFFSET,				\
++	.smem_len = IWL_DR_SMEM_LEN,					\
++	.apmg_not_supported = true,					\
++	.trans.mq_rx_supported = true,					\
++	.vht_mu_mimo_supported = true,					\
++	.mac_addr_from_csr = 0x30,					\
++	.nvm_ver = IWL_DR_NVM_VERSION,				\
++	.trans.rf_id = true,						\
++	.trans.gen2 = true,						\
++	.nvm_type = IWL_NVM_EXT,					\
++	.dbgc_supported = true,						\
++	.min_umac_error_event_table = 0xD0000,				\
++	.d3_debug_data_base_addr = 0x401000,				\
++	.d3_debug_data_length = 60 * 1024,				\
++	.mon_smem_regs = {						\
++		.write_ptr = {						\
++			.addr = LDBG_M2S_BUF_WPTR,			\
++			.mask = LDBG_M2S_BUF_WPTR_VAL_MSK,		\
++	},								\
++		.cycle_cnt = {						\
++			.addr = LDBG_M2S_BUF_WRAP_CNT,			\
++			.mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK,		\
++		},							\
++	},								\
++	.trans.umac_prph_offset = 0x300000,				\
++	.trans.device_family = IWL_DEVICE_FAMILY_DR,			\
++	.trans.base_params = &iwl_dr_base_params,			\
++	.min_txq_size = 128,						\
++	.gp2_reg_addr = 0xd02c68,					\
++	.min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT,			\
++	.mon_dram_regs = {						\
++		.write_ptr = {						\
++			.addr = DBGC_CUR_DBGBUF_STATUS,			\
++			.mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK,	\
++		},							\
++		.cycle_cnt = {						\
++			.addr = DBGC_DBGBUF_WRAP_AROUND,		\
++			.mask = 0xffffffff,				\
++		},							\
++		.cur_frag = {						\
++			.addr = DBGC_CUR_DBGBUF_STATUS,			\
++			.mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK,		\
++		},							\
++	},								\
++	.mon_dbgi_regs = {						\
++		.write_ptr = {						\
++			.addr = DBGI_SRAM_FIFO_POINTERS,		\
++			.mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK,	\
++		},							\
++	}
++
++#define IWL_DEVICE_DR							\
++	IWL_DEVICE_DR_COMMON,						\
++	.uhb_supported = true,						\
++	.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,		\
++	.num_rbds = IWL_NUM_RBDS_DR_EHT,				\
++	.ht_params = &iwl_22000_ht_params
++
++/*
++ * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
++ * A-MPDU, with additional overhead to account for processing time.
++ */
++#define IWL_NUM_RBDS_DR_EHT		(512 * 16)
++
++const struct iwl_cfg_trans_params iwl_dr_trans_cfg = {
++	.device_family = IWL_DEVICE_FAMILY_DR,
++	.base_params = &iwl_dr_base_params,
++	.mq_rx_supported = true,
++	.rf_id = true,
++	.gen2 = true,
++	.integrated = true,
++	.umac_prph_offset = 0x300000,
++	.xtal_latency = 12000,
++	.low_latency_xtal = true,
++	.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
++};
++
++const char iwl_dr_name[] = "Intel(R) TBD Dr device";
++
++const struct iwl_cfg iwl_cfg_dr = {
++	.fw_name_mac = "dr",
++	IWL_DEVICE_DR,
++};
++
++const struct iwl_cfg_trans_params iwl_br_trans_cfg = {
++	.device_family = IWL_DEVICE_FAMILY_DR,
++	.base_params = &iwl_dr_base_params,
++	.mq_rx_supported = true,
++	.rf_id = true,
++	.gen2 = true,
++	.integrated = true,
++	.umac_prph_offset = 0x300000,
++	.xtal_latency = 12000,
++	.low_latency_xtal = true,
++	.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
++};
++
++const char iwl_br_name[] = "Intel(R) TBD Br device";
++
++const struct iwl_cfg iwl_cfg_br = {
++	.fw_name_mac = "br",
++	IWL_DEVICE_DR,
++};
++
++MODULE_FIRMWARE(IWL_DR_A_PE_A_FW_MODULE_FIRMWARE(IWL_DR_UCODE_API_MAX));
++MODULE_FIRMWARE(IWL_BR_A_PET_A_FW_MODULE_FIRMWARE(IWL_DR_UCODE_API_MAX));
++MODULE_FIRMWARE(IWL_BR_A_PE_A_FW_MODULE_FIRMWARE(IWL_DR_UCODE_API_MAX));
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+index 0bc32291815e1b..a26c5573d20916 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+@@ -108,7 +108,7 @@ static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func,
+ 				    size_t expected_size)
+ {
+ 	union acpi_object *obj;
+-	int ret = 0;
++	int ret;
+ 
+ 	obj = iwl_acpi_get_dsm_object(dev, rev, func, NULL, guid);
+ 	if (IS_ERR(obj)) {
+@@ -123,8 +123,10 @@ static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func,
+ 	} else if (obj->type == ACPI_TYPE_BUFFER) {
+ 		__le64 le_value = 0;
+ 
+-		if (WARN_ON_ONCE(expected_size > sizeof(le_value)))
+-			return -EINVAL;
++		if (WARN_ON_ONCE(expected_size > sizeof(le_value))) {
++			ret = -EINVAL;
++			goto out;
++		}
+ 
+ 		/* if the buffer size doesn't match the expected size */
+ 		if (obj->buffer.length != expected_size)
+@@ -145,8 +147,9 @@ static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func,
+ 	}
+ 
+ 	IWL_DEBUG_DEV_RADIO(dev,
+-			    "ACPI: DSM method evaluated: func=%d, ret=%d\n",
+-			    func, ret);
++			    "ACPI: DSM method evaluated: func=%d, value=%lld\n",
++			    func, *value);
++	ret = 0;
+ out:
+ 	ACPI_FREE(obj);
+ 	return ret;
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+index 17721bb47e2511..89744dbedb4a5a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+@@ -38,6 +38,7 @@ enum iwl_device_family {
+ 	IWL_DEVICE_FAMILY_AX210,
+ 	IWL_DEVICE_FAMILY_BZ,
+ 	IWL_DEVICE_FAMILY_SC,
++	IWL_DEVICE_FAMILY_DR,
+ };
+ 
+ /*
+@@ -424,6 +425,8 @@ struct iwl_cfg {
+ #define IWL_CFG_MAC_TYPE_SC2		0x49
+ #define IWL_CFG_MAC_TYPE_SC2F		0x4A
+ #define IWL_CFG_MAC_TYPE_BZ_W		0x4B
++#define IWL_CFG_MAC_TYPE_BR		0x4C
++#define IWL_CFG_MAC_TYPE_DR		0x4D
+ 
+ #define IWL_CFG_RF_TYPE_TH		0x105
+ #define IWL_CFG_RF_TYPE_TH1		0x108
+@@ -434,6 +437,7 @@ struct iwl_cfg {
+ #define IWL_CFG_RF_TYPE_GF		0x10D
+ #define IWL_CFG_RF_TYPE_FM		0x112
+ #define IWL_CFG_RF_TYPE_WH		0x113
++#define IWL_CFG_RF_TYPE_PE		0x114
+ 
+ #define IWL_CFG_RF_ID_TH		0x1
+ #define IWL_CFG_RF_ID_TH1		0x1
+@@ -506,6 +510,8 @@ extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg;
+ extern const struct iwl_cfg_trans_params iwl_bz_trans_cfg;
+ extern const struct iwl_cfg_trans_params iwl_gl_trans_cfg;
+ extern const struct iwl_cfg_trans_params iwl_sc_trans_cfg;
++extern const struct iwl_cfg_trans_params iwl_dr_trans_cfg;
++extern const struct iwl_cfg_trans_params iwl_br_trans_cfg;
+ extern const char iwl9162_name[];
+ extern const char iwl9260_name[];
+ extern const char iwl9260_1_name[];
+@@ -551,6 +557,8 @@ extern const char iwl_mtp_name[];
+ extern const char iwl_sc_name[];
+ extern const char iwl_sc2_name[];
+ extern const char iwl_sc2f_name[];
++extern const char iwl_dr_name[];
++extern const char iwl_br_name[];
+ #if IS_ENABLED(CONFIG_IWLDVM)
+ extern const struct iwl_cfg iwl5300_agn_cfg;
+ extern const struct iwl_cfg iwl5100_agn_cfg;
+@@ -658,6 +666,8 @@ extern const struct iwl_cfg iwl_cfg_gl;
+ extern const struct iwl_cfg iwl_cfg_sc;
+ extern const struct iwl_cfg iwl_cfg_sc2;
+ extern const struct iwl_cfg iwl_cfg_sc2f;
++extern const struct iwl_cfg iwl_cfg_dr;
++extern const struct iwl_cfg iwl_cfg_br;
+ #endif /* CONFIG_IWLMVM */
+ 
+ #endif /* __IWL_CONFIG_H__ */
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index 8fb2aa28224212..9dd0e0a51ce5cc 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -540,6 +540,9 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = {
+ 	{IWL_PCI_DEVICE(0xE340, PCI_ANY_ID, iwl_sc_trans_cfg)},
+ 	{IWL_PCI_DEVICE(0xD340, PCI_ANY_ID, iwl_sc_trans_cfg)},
+ 	{IWL_PCI_DEVICE(0x6E70, PCI_ANY_ID, iwl_sc_trans_cfg)},
++
++/* Dr devices */
++	{IWL_PCI_DEVICE(0x272F, PCI_ANY_ID, iwl_dr_trans_cfg)},
+ #endif /* CONFIG_IWLMVM */
+ 
+ 	{0}
+@@ -1182,6 +1185,19 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
+ 		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ 		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ 		      iwl_cfg_sc2f, iwl_sc2f_name),
++/* Dr */
++	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
++		      IWL_CFG_MAC_TYPE_DR, IWL_CFG_ANY,
++		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
++		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
++		      iwl_cfg_dr, iwl_dr_name),
++
++/* Br */
++	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
++		      IWL_CFG_MAC_TYPE_BR, IWL_CFG_ANY,
++		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
++		      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
++		      iwl_cfg_br, iwl_br_name),
+ #endif /* CONFIG_IWLMVM */
+ };
+ EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+index bfdbc15abaa9a7..928e0b07a9bf18 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+@@ -2,9 +2,14 @@
+ /* Copyright (C) 2020 MediaTek Inc. */
+ 
+ #include <linux/firmware.h>
++#include <linux/moduleparam.h>
+ #include "mt7915.h"
+ #include "eeprom.h"
+ 
++static bool enable_6ghz;
++module_param(enable_6ghz, bool, 0644);
++MODULE_PARM_DESC(enable_6ghz, "Enable 6 GHz instead of 5 GHz on hardware that supports both");
++
+ static int mt7915_eeprom_load_precal(struct mt7915_dev *dev)
+ {
+ 	struct mt76_dev *mdev = &dev->mt76;
+@@ -170,8 +175,20 @@ static void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
+ 			phy->mt76->cap.has_6ghz = true;
+ 			return;
+ 		case MT_EE_V2_BAND_SEL_5GHZ_6GHZ:
+-			phy->mt76->cap.has_5ghz = true;
+-			phy->mt76->cap.has_6ghz = true;
++			if (enable_6ghz) {
++				phy->mt76->cap.has_6ghz = true;
++				u8p_replace_bits(&eeprom[MT_EE_WIFI_CONF + band],
++						 MT_EE_V2_BAND_SEL_6GHZ,
++						 MT_EE_WIFI_CONF0_BAND_SEL);
++			} else {
++				phy->mt76->cap.has_5ghz = true;
++				u8p_replace_bits(&eeprom[MT_EE_WIFI_CONF + band],
++						 MT_EE_V2_BAND_SEL_5GHZ,
++						 MT_EE_WIFI_CONF0_BAND_SEL);
++			}
++			/* force to buffer mode */
++			dev->flash_mode = true;
++
+ 			return;
+ 		default:
+ 			phy->mt76->cap.has_2ghz = true;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+index 77d82ccd73079d..bc983ab10b0c7a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+@@ -1239,14 +1239,14 @@ int mt7915_register_device(struct mt7915_dev *dev)
+ 	if (ret)
+ 		goto unreg_dev;
+ 
+-	ieee80211_queue_work(mt76_hw(dev), &dev->init_work);
+-
+ 	if (phy2) {
+ 		ret = mt7915_register_ext_phy(dev, phy2);
+ 		if (ret)
+ 			goto unreg_thermal;
+ 	}
+ 
++	ieee80211_queue_work(mt76_hw(dev), &dev->init_work);
++
+ 	dev->recovery.hw_init_done = true;
+ 
+ 	ret = mt7915_init_debugfs(&dev->phy);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+index 8aa4f0203208ab..e3459295ad884e 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+@@ -21,6 +21,9 @@ static const struct usb_device_id mt7921u_device_table[] = {
+ 	/* Netgear, Inc. [A8000,AXE3000] */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9060, 0xff, 0xff, 0xff),
+ 		.driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
++	/* TP-Link TXE50UH */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x35bc, 0x0107, 0xff, 0xff, 0xff),
++		.driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
+ 	{ },
+ };
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
+index c269942b3f4ab1..af8d17b9e012ca 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
+@@ -197,9 +197,9 @@ enum rtl8821a_h2c_cmd {
+ 
+ /* _MEDIA_STATUS_RPT_PARM_CMD1 */
+ #define SET_H2CCMD_MSRRPT_PARM_OPMODE(__cmd, __value)	\
+-	u8p_replace_bits(__cmd + 1, __value, BIT(0))
++	u8p_replace_bits(__cmd, __value, BIT(0))
+ #define SET_H2CCMD_MSRRPT_PARM_MACID_IND(__cmd, __value)	\
+-	u8p_replace_bits(__cmd + 1, __value, BIT(1))
++	u8p_replace_bits(__cmd, __value, BIT(1))
+ 
+ /* AP_OFFLOAD */
+ #define SET_H2CCMD_AP_OFFLOAD_ON(__cmd, __value)	\
+diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
+index cd09fb6f7b8b33..65c7acea41aff4 100644
+--- a/drivers/net/wireless/realtek/rtw88/main.h
++++ b/drivers/net/wireless/realtek/rtw88/main.h
+@@ -510,12 +510,12 @@ struct rtw_5g_txpwr_idx {
+ 	struct rtw_5g_vht_ns_pwr_idx_diff vht_2s_diff;
+ 	struct rtw_5g_vht_ns_pwr_idx_diff vht_3s_diff;
+ 	struct rtw_5g_vht_ns_pwr_idx_diff vht_4s_diff;
+-};
++} __packed;
+ 
+ struct rtw_txpwr_idx {
+ 	struct rtw_2g_txpwr_idx pwr_idx_2g;
+ 	struct rtw_5g_txpwr_idx pwr_idx_5g;
+-};
++} __packed;
+ 
+ struct rtw_channel_params {
+ 	u8 center_chan;
+diff --git a/drivers/net/wireless/realtek/rtw88/rtw8703b.c b/drivers/net/wireless/realtek/rtw88/rtw8703b.c
+index a19b94d022ee64..1d232adbdd7e31 100644
+--- a/drivers/net/wireless/realtek/rtw88/rtw8703b.c
++++ b/drivers/net/wireless/realtek/rtw88/rtw8703b.c
+@@ -903,7 +903,7 @@ static void rtw8703b_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
+ 		rtw_write32_mask(rtwdev, REG_FPGA0_RFMOD, BIT_MASK_RFMOD, 0x0);
+ 		rtw_write32_mask(rtwdev, REG_FPGA1_RFMOD, BIT_MASK_RFMOD, 0x0);
+ 		rtw_write32_mask(rtwdev, REG_OFDM0_TX_PSD_NOISE,
+-				 GENMASK(31, 20), 0x0);
++				 GENMASK(31, 30), 0x0);
+ 		rtw_write32(rtwdev, REG_BBRX_DFIR, 0x4A880000);
+ 		rtw_write32(rtwdev, REG_OFDM0_A_TX_AFE, 0x19F60000);
+ 		break;
+@@ -1198,9 +1198,9 @@ static u8 rtw8703b_iqk_rx_path(struct rtw_dev *rtwdev,
+ 	rtw_write32(rtwdev, REG_RXIQK_TONE_A_11N, 0x38008c1c);
+ 	rtw_write32(rtwdev, REG_TX_IQK_TONE_B, 0x38008c1c);
+ 	rtw_write32(rtwdev, REG_RX_IQK_TONE_B, 0x38008c1c);
+-	rtw_write32(rtwdev, REG_TXIQK_PI_A_11N, 0x8216000f);
++	rtw_write32(rtwdev, REG_TXIQK_PI_A_11N, 0x8214030f);
+ 	rtw_write32(rtwdev, REG_RXIQK_PI_A_11N, 0x28110000);
+-	rtw_write32(rtwdev, REG_TXIQK_PI_B, 0x28110000);
++	rtw_write32(rtwdev, REG_TXIQK_PI_B, 0x82110000);
+ 	rtw_write32(rtwdev, REG_RXIQK_PI_B, 0x28110000);
+ 
+ 	/* LOK setting */
+@@ -1372,7 +1372,7 @@ void rtw8703b_iqk_fill_a_matrix(struct rtw_dev *rtwdev, const s32 result[])
+ 		return;
+ 
+ 	tmp_rx_iqi |= FIELD_PREP(BIT_MASK_RXIQ_S1_X, result[IQK_S1_RX_X]);
+-	tmp_rx_iqi |= FIELD_PREP(BIT_MASK_RXIQ_S1_Y1, result[IQK_S1_RX_X]);
++	tmp_rx_iqi |= FIELD_PREP(BIT_MASK_RXIQ_S1_Y1, result[IQK_S1_RX_Y]);
+ 	rtw_write32(rtwdev, REG_A_RXIQI, tmp_rx_iqi);
+ 	rtw_write32_mask(rtwdev, REG_RXIQK_MATRIX_LSB_11N, BIT_MASK_RXIQ_S1_Y2,
+ 			 BIT_SET_RXIQ_S1_Y2(result[IQK_S1_RX_Y]));
+diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723x.h b/drivers/net/wireless/realtek/rtw88/rtw8723x.h
+index e93bfce994bf82..a99af527c92cfb 100644
+--- a/drivers/net/wireless/realtek/rtw88/rtw8723x.h
++++ b/drivers/net/wireless/realtek/rtw88/rtw8723x.h
+@@ -47,7 +47,7 @@ struct rtw8723xe_efuse {
+ 	u8 device_id[2];
+ 	u8 sub_vendor_id[2];
+ 	u8 sub_device_id[2];
+-};
++} __packed;
+ 
+ struct rtw8723xu_efuse {
+ 	u8 res4[48];                    /* 0xd0 */
+@@ -56,12 +56,12 @@ struct rtw8723xu_efuse {
+ 	u8 usb_option;                  /* 0x104 */
+ 	u8 res5[2];			/* 0x105 */
+ 	u8 mac_addr[ETH_ALEN];          /* 0x107 */
+-};
++} __packed;
+ 
+ struct rtw8723xs_efuse {
+ 	u8 res4[0x4a];			/* 0xd0 */
+ 	u8 mac_addr[ETH_ALEN];		/* 0x11a */
+-};
++} __packed;
+ 
+ struct rtw8723x_efuse {
+ 	__le16 rtl_id;
+@@ -96,7 +96,7 @@ struct rtw8723x_efuse {
+ 		struct rtw8723xu_efuse u;
+ 		struct rtw8723xs_efuse s;
+ 	};
+-};
++} __packed;
+ 
+ #define RTW8723X_IQK_ADDA_REG_NUM	16
+ #define RTW8723X_IQK_MAC8_REG_NUM	3
+diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.h b/drivers/net/wireless/realtek/rtw88/rtw8821c.h
+index 7a33ebd612eda6..954e93c8020d81 100644
+--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.h
++++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.h
+@@ -27,7 +27,7 @@ struct rtw8821cu_efuse {
+ 	u8 res11[0xcf];
+ 	u8 package_type;		/* 0x1fb */
+ 	u8 res12[0x4];
+-};
++} __packed;
+ 
+ struct rtw8821ce_efuse {
+ 	u8 mac_addr[ETH_ALEN];		/* 0xd0 */
+@@ -47,7 +47,8 @@ struct rtw8821ce_efuse {
+ 	u8 ltr_en:1;
+ 	u8 res1:2;
+ 	u8 obff:2;
+-	u8 res2:3;
++	u8 res2_1:1;
++	u8 res2_2:2;
+ 	u8 obff_cap:2;
+ 	u8 res3:4;
+ 	u8 res4[3];
+@@ -63,7 +64,7 @@ struct rtw8821ce_efuse {
+ 	u8 res6:1;
+ 	u8 port_t_power_on_value:5;
+ 	u8 res7;
+-};
++} __packed;
+ 
+ struct rtw8821cs_efuse {
+ 	u8 res4[0x4a];			/* 0xd0 */
+@@ -101,7 +102,7 @@ struct rtw8821c_efuse {
+ 		struct rtw8821cu_efuse u;
+ 		struct rtw8821cs_efuse s;
+ 	};
+-};
++} __packed;
+ 
+ static inline void
+ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
+diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.h b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
+index 0514958fb57c36..9fca9ba67c90f1 100644
+--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.h
++++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
+@@ -27,7 +27,7 @@ struct rtw8822bu_efuse {
+ 	u8 res11[0xcf];
+ 	u8 package_type;		/* 0x1fb */
+ 	u8 res12[0x4];
+-};
++} __packed;
+ 
+ struct rtw8822be_efuse {
+ 	u8 mac_addr[ETH_ALEN];		/* 0xd0 */
+@@ -47,7 +47,8 @@ struct rtw8822be_efuse {
+ 	u8 ltr_en:1;
+ 	u8 res1:2;
+ 	u8 obff:2;
+-	u8 res2:3;
++	u8 res2_1:1;
++	u8 res2_2:2;
+ 	u8 obff_cap:2;
+ 	u8 res3:4;
+ 	u8 res4[3];
+@@ -63,7 +64,7 @@ struct rtw8822be_efuse {
+ 	u8 res6:1;
+ 	u8 port_t_power_on_value:5;
+ 	u8 res7;
+-};
++} __packed;
+ 
+ struct rtw8822bs_efuse {
+ 	u8 res4[0x4a];			/* 0xd0 */
+@@ -103,7 +104,7 @@ struct rtw8822b_efuse {
+ 		struct rtw8822bu_efuse u;
+ 		struct rtw8822bs_efuse s;
+ 	};
+-};
++} __packed;
+ 
+ static inline void
+ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
+diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
+index e2b383d633cd23..fc62b67a15f216 100644
+--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.h
++++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
+@@ -14,7 +14,7 @@ struct rtw8822cu_efuse {
+ 	u8 res1[3];
+ 	u8 mac_addr[ETH_ALEN];		/* 0x157 */
+ 	u8 res2[0x3d];
+-};
++} __packed;
+ 
+ struct rtw8822cs_efuse {
+ 	u8 res0[0x4a];			/* 0x120 */
+@@ -39,7 +39,8 @@ struct rtw8822ce_efuse {
+ 	u8 ltr_en:1;
+ 	u8 res1:2;
+ 	u8 obff:2;
+-	u8 res2:3;
++	u8 res2_1:1;
++	u8 res2_2:2;
+ 	u8 obff_cap:2;
+ 	u8 res3:4;
+ 	u8 class_code[3];
+@@ -55,7 +56,7 @@ struct rtw8822ce_efuse {
+ 	u8 res6:1;
+ 	u8 port_t_power_on_value:5;
+ 	u8 res7;
+-};
++} __packed;
+ 
+ struct rtw8822c_efuse {
+ 	__le16 rtl_id;
+@@ -102,7 +103,7 @@ struct rtw8822c_efuse {
+ 		struct rtw8822cu_efuse u;
+ 		struct rtw8822cs_efuse s;
+ 	};
+-};
++} __packed;
+ 
+ enum rtw8822c_dpk_agc_phase {
+ 	RTW_DPK_GAIN_CHECK,
+diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c
+index 799230eb5f16ff..e024061bdbf707 100644
+--- a/drivers/net/wireless/realtek/rtw88/sdio.c
++++ b/drivers/net/wireless/realtek/rtw88/sdio.c
+@@ -1192,6 +1192,8 @@ static void rtw_sdio_indicate_tx_status(struct rtw_dev *rtwdev,
+ 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ 	struct ieee80211_hw *hw = rtwdev->hw;
+ 
++	skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
++
+ 	/* enqueue to wait for tx report */
+ 	if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
+ 		rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
+diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
+index f923bec03d410f..c3a027735d0f97 100644
+--- a/drivers/net/wireless/realtek/rtw89/pci.c
++++ b/drivers/net/wireless/realtek/rtw89/pci.c
+@@ -2516,7 +2516,7 @@ static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev)
+ 				       PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1);
+ }
+ 
+-static void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up)
++static void rtw89_pci_power_wake_ax(struct rtw89_dev *rtwdev, bool pwr_up)
+ {
+ 	if (pwr_up)
+ 		rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
+@@ -2825,6 +2825,8 @@ static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev)
+ {
+ 	const struct rtw89_pci_info *info = rtwdev->pci_info;
+ 
++	rtw89_pci_power_wake(rtwdev, false);
++
+ 	if (rtwdev->chip->chip_id == RTL8852A) {
+ 		/* ltr sw trigger */
+ 		rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE);
+@@ -2867,7 +2869,7 @@ static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev)
+ 		return ret;
+ 	}
+ 
+-	rtw89_pci_power_wake(rtwdev, true);
++	rtw89_pci_power_wake_ax(rtwdev, true);
+ 	rtw89_pci_autoload_hang(rtwdev);
+ 	rtw89_pci_l12_vmain(rtwdev);
+ 	rtw89_pci_gen2_force_ib(rtwdev);
+@@ -2912,6 +2914,13 @@ static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev)
+ 	return 0;
+ }
+ 
++static int rtw89_pci_ops_mac_pre_deinit_ax(struct rtw89_dev *rtwdev)
++{
++	rtw89_pci_power_wake_ax(rtwdev, false);
++
++	return 0;
++}
++
+ int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en)
+ {
+ 	u32 val;
+@@ -4325,7 +4334,7 @@ const struct rtw89_pci_gen_def rtw89_pci_gen_ax = {
+ 					    B_AX_RDU_INT},
+ 
+ 	.mac_pre_init = rtw89_pci_ops_mac_pre_init_ax,
+-	.mac_pre_deinit = NULL,
++	.mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit_ax,
+ 	.mac_post_init = rtw89_pci_ops_mac_post_init_ax,
+ 
+ 	.clr_idx_all = rtw89_pci_clr_idx_all_ax,
+@@ -4343,6 +4352,7 @@ const struct rtw89_pci_gen_def rtw89_pci_gen_ax = {
+ 	.l1ss_set = rtw89_pci_l1ss_set_ax,
+ 
+ 	.disable_eq = rtw89_pci_disable_eq_ax,
++	.power_wake = rtw89_pci_power_wake_ax,
+ };
+ EXPORT_SYMBOL(rtw89_pci_gen_ax);
+ 
+diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
+index b68e2d82eea90d..d52db4ca1b9979 100644
+--- a/drivers/net/wireless/realtek/rtw89/pci.h
++++ b/drivers/net/wireless/realtek/rtw89/pci.h
+@@ -1290,6 +1290,7 @@ struct rtw89_pci_gen_def {
+ 	void (*l1ss_set)(struct rtw89_dev *rtwdev, bool enable);
+ 
+ 	void (*disable_eq)(struct rtw89_dev *rtwdev);
++	void (*power_wake)(struct rtw89_dev *rtwdev, bool pwr_up);
+ };
+ 
+ #define RTW89_PCI_SSID(v, d, ssv, ssd, cust) \
+@@ -1805,4 +1806,12 @@ static inline void rtw89_pci_disable_eq(struct rtw89_dev *rtwdev)
+ 	gen_def->disable_eq(rtwdev);
+ }
+ 
++static inline void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up)
++{
++	const struct rtw89_pci_info *info = rtwdev->pci_info;
++	const struct rtw89_pci_gen_def *gen_def = info->gen_def;
++
++	gen_def->power_wake(rtwdev, pwr_up);
++}
++
+ #endif
+diff --git a/drivers/net/wireless/realtek/rtw89/pci_be.c b/drivers/net/wireless/realtek/rtw89/pci_be.c
+index 34154506f5d467..cd39eebe818615 100644
+--- a/drivers/net/wireless/realtek/rtw89/pci_be.c
++++ b/drivers/net/wireless/realtek/rtw89/pci_be.c
+@@ -691,5 +691,6 @@ const struct rtw89_pci_gen_def rtw89_pci_gen_be = {
+ 	.l1ss_set = rtw89_pci_l1ss_set_be,
+ 
+ 	.disable_eq = rtw89_pci_disable_eq_be,
++	.power_wake = _patch_pcie_power_wake_be,
+ };
+ EXPORT_SYMBOL(rtw89_pci_gen_be);
+diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
+index f24aca663cf008..b9171f6ccae07d 100644
+--- a/drivers/net/wireless/realtek/rtw89/phy.c
++++ b/drivers/net/wireless/realtek/rtw89/phy.c
+@@ -4058,7 +4058,6 @@ static void rtw89_phy_cfo_set_crystal_cap(struct rtw89_dev *rtwdev,
+ 
+ 	if (!force && cfo->crystal_cap == crystal_cap)
+ 		return;
+-	crystal_cap = clamp_t(u8, crystal_cap, 0, 127);
+ 	if (chip->chip_id == RTL8852A || chip->chip_id == RTL8851B) {
+ 		rtw89_phy_cfo_set_xcap_reg(rtwdev, true, crystal_cap);
+ 		rtw89_phy_cfo_set_xcap_reg(rtwdev, false, crystal_cap);
+@@ -4181,7 +4180,7 @@ static void rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev *rtwdev,
+ 					     s32 curr_cfo)
+ {
+ 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
+-	s8 crystal_cap = cfo->crystal_cap;
++	int crystal_cap = cfo->crystal_cap;
+ 	s32 cfo_abs = abs(curr_cfo);
+ 	int sign;
+ 
+@@ -4202,15 +4201,17 @@ static void rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev *rtwdev,
+ 	}
+ 	sign = curr_cfo > 0 ? 1 : -1;
+ 	if (cfo_abs > CFO_TRK_STOP_TH_4)
+-		crystal_cap += 7 * sign;
++		crystal_cap += 3 * sign;
+ 	else if (cfo_abs > CFO_TRK_STOP_TH_3)
+-		crystal_cap += 5 * sign;
+-	else if (cfo_abs > CFO_TRK_STOP_TH_2)
+ 		crystal_cap += 3 * sign;
++	else if (cfo_abs > CFO_TRK_STOP_TH_2)
++		crystal_cap += 1 * sign;
+ 	else if (cfo_abs > CFO_TRK_STOP_TH_1)
+ 		crystal_cap += 1 * sign;
+ 	else
+ 		return;
++
++	crystal_cap = clamp(crystal_cap, 0, 127);
+ 	rtw89_phy_cfo_set_crystal_cap(rtwdev, (u8)crystal_cap, false);
+ 	rtw89_debug(rtwdev, RTW89_DBG_CFO,
+ 		    "X_cap{Curr,Default}={0x%x,0x%x}\n",
+diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
+index c683f4d7d29b0a..bc6912b3a2fbaf 100644
+--- a/drivers/net/wireless/realtek/rtw89/phy.h
++++ b/drivers/net/wireless/realtek/rtw89/phy.h
+@@ -57,7 +57,7 @@
+ #define CFO_TRK_STOP_TH_4 (30 << 2)
+ #define CFO_TRK_STOP_TH_3 (20 << 2)
+ #define CFO_TRK_STOP_TH_2 (10 << 2)
+-#define CFO_TRK_STOP_TH_1 (00 << 2)
++#define CFO_TRK_STOP_TH_1 (03 << 2)
+ #define CFO_TRK_STOP_TH (2 << 2)
+ #define CFO_SW_COMP_FINE_TUNE (2 << 2)
+ #define CFO_PERIOD_CNT 15
+diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
+index 04517bd3325a2a..a066977af0be5c 100644
+--- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c
++++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
+@@ -6,6 +6,7 @@
+ #include <linux/acpi.h>
+ #include <linux/bitfield.h>
+ #include <linux/module.h>
++#include <linux/suspend.h>
+ #include <net/rtnetlink.h>
+ 
+ #include "iosm_ipc_imem.h"
+@@ -18,6 +19,7 @@ MODULE_LICENSE("GPL v2");
+ /* WWAN GUID */
+ static guid_t wwan_acpi_guid = GUID_INIT(0xbad01b75, 0x22a8, 0x4f48, 0x87, 0x92,
+ 				       0xbd, 0xde, 0x94, 0x67, 0x74, 0x7d);
++static bool pci_registered;
+ 
+ static void ipc_pcie_resources_release(struct iosm_pcie *ipc_pcie)
+ {
+@@ -448,7 +450,6 @@ static struct pci_driver iosm_ipc_driver = {
+ 	},
+ 	.id_table = iosm_ipc_ids,
+ };
+-module_pci_driver(iosm_ipc_driver);
+ 
+ int ipc_pcie_addr_map(struct iosm_pcie *ipc_pcie, unsigned char *data,
+ 		      size_t size, dma_addr_t *mapping, int direction)
+@@ -530,3 +531,56 @@ void ipc_pcie_kfree_skb(struct iosm_pcie *ipc_pcie, struct sk_buff *skb)
+ 	IPC_CB(skb)->mapping = 0;
+ 	dev_kfree_skb(skb);
+ }
++
++static int pm_notify(struct notifier_block *nb, unsigned long mode, void *_unused)
++{
++	if (mode == PM_HIBERNATION_PREPARE || mode == PM_RESTORE_PREPARE) {
++		if (pci_registered) {
++			pci_unregister_driver(&iosm_ipc_driver);
++			pci_registered = false;
++		}
++	} else if (mode == PM_POST_HIBERNATION || mode == PM_POST_RESTORE) {
++		if (!pci_registered) {
++			int ret;
++
++			ret = pci_register_driver(&iosm_ipc_driver);
++			if (ret) {
++				pr_err(KBUILD_MODNAME ": unable to re-register PCI driver: %d\n",
++				       ret);
++			} else {
++				pci_registered = true;
++			}
++		}
++	}
++
++	return 0;
++}
++
++static struct notifier_block pm_notifier = {
++	.notifier_call = pm_notify,
++};
++
++static int __init iosm_ipc_driver_init(void)
++{
++	int ret;
++
++	ret = pci_register_driver(&iosm_ipc_driver);
++	if (ret)
++		return ret;
++
++	pci_registered = true;
++
++	register_pm_notifier(&pm_notifier);
++
++	return 0;
++}
++module_init(iosm_ipc_driver_init);
++
++static void __exit iosm_ipc_driver_exit(void)
++{
++	unregister_pm_notifier(&pm_notifier);
++
++	if (pci_registered)
++		pci_unregister_driver(&iosm_ipc_driver);
++}
++module_exit(iosm_ipc_driver_exit);
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 12e7ae1f99e208..46e04b30f6425c 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1695,7 +1695,13 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
+ 
+ 	status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
+ 			&result);
+-	if (status < 0)
++
++	/*
++	 * It's either a kernel error or the host observed a connection
++	 * lost. In either case it's not possible communicate with the
++	 * controller and thus enter the error code path.
++	 */
++	if (status < 0 || status == NVME_SC_HOST_PATH_ERROR)
+ 		return status;
+ 
+ 	/*
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index b81af7919e94c4..682234da2fabe0 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -2080,7 +2080,8 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
+ 		nvme_fc_complete_rq(rq);
+ 
+ check_error:
+-	if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING)
++	if (terminate_assoc &&
++	    nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_RESETTING)
+ 		queue_work(nvme_reset_wq, &ctrl->ioerr_work);
+ }
+ 
+@@ -2534,6 +2535,8 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
+ static void
+ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
+ {
++	enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
++
+ 	/*
+ 	 * if an error (io timeout, etc) while (re)connecting, the remote
+ 	 * port requested terminating of the association (disconnect_ls)
+@@ -2541,7 +2544,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
+ 	 * the controller.  Abort any ios on the association and let the
+ 	 * create_association error path resolve things.
+ 	 */
+-	if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
++	if (state == NVME_CTRL_CONNECTING) {
+ 		__nvme_fc_abort_outstanding_ios(ctrl, true);
+ 		set_bit(ASSOC_FAILED, &ctrl->flags);
+ 		dev_warn(ctrl->ctrl.device,
+@@ -2551,7 +2554,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
+ 	}
+ 
+ 	/* Otherwise, only proceed if in LIVE state - e.g. on first error */
+-	if (ctrl->ctrl.state != NVME_CTRL_LIVE)
++	if (state != NVME_CTRL_LIVE)
+ 		return;
+ 
+ 	dev_warn(ctrl->ctrl.device,
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index e2634f437f33cb..99c2983dbe6c87 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3148,7 +3148,9 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
+ 		 * because of high power consumption (> 2 Watt) in s2idle
+ 		 * sleep. Only some boards with Intel CPU are affected.
+ 		 */
+-		if (dmi_match(DMI_BOARD_NAME, "GMxPXxx") ||
++		if (dmi_match(DMI_BOARD_NAME, "DN50Z-140HC-YD") ||
++		    dmi_match(DMI_BOARD_NAME, "GMxPXxx") ||
++		    dmi_match(DMI_BOARD_NAME, "GXxMRXx") ||
+ 		    dmi_match(DMI_BOARD_NAME, "PH4PG31") ||
+ 		    dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") ||
+ 		    dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71"))
+diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c
+index b68a9e5f1ea395..3a41b9ab0f13c4 100644
+--- a/drivers/nvme/host/sysfs.c
++++ b/drivers/nvme/host/sysfs.c
+@@ -792,7 +792,7 @@ static umode_t nvme_tls_attrs_are_visible(struct kobject *kobj,
+ 	return a->mode;
+ }
+ 
+-const struct attribute_group nvme_tls_attrs_group = {
++static const struct attribute_group nvme_tls_attrs_group = {
+ 	.attrs		= nvme_tls_attrs,
+ 	.is_visible	= nvme_tls_attrs_are_visible,
+ };
+diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
+index fa89b0549c36c1..7b70635373fd82 100644
+--- a/drivers/nvme/target/admin-cmd.c
++++ b/drivers/nvme/target/admin-cmd.c
+@@ -915,6 +915,7 @@ static void nvme_execute_identify_ns_nvm(struct nvmet_req *req)
+ 		goto out;
+ 	}
+ 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
++	kfree(id);
+ out:
+ 	nvmet_req_complete(req, status);
+ }
+diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
+index d6494dfc20a732..845540b57e68cb 100644
+--- a/drivers/nvmem/core.c
++++ b/drivers/nvmem/core.c
+@@ -1790,6 +1790,8 @@ static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, si
+ 		return -EINVAL;
+ 
+ 	if (cell->bit_offset || cell->nbits) {
++		if (len != BITS_TO_BYTES(cell->nbits) && len != cell->bytes)
++			return -EINVAL;
+ 		buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
+ 		if (IS_ERR(buf))
+ 			return PTR_ERR(buf);
+diff --git a/drivers/nvmem/imx-ocotp-ele.c b/drivers/nvmem/imx-ocotp-ele.c
+index 1ba49449769874..ca6dd71d8a2e29 100644
+--- a/drivers/nvmem/imx-ocotp-ele.c
++++ b/drivers/nvmem/imx-ocotp-ele.c
+@@ -71,13 +71,15 @@ static int imx_ocotp_reg_read(void *context, unsigned int offset, void *val, siz
+ 	u32 *buf;
+ 	void *p;
+ 	int i;
++	u8 skipbytes;
+ 
+-	index = offset;
+-	num_bytes = round_up(bytes, 4);
+-	count = num_bytes >> 2;
++	if (offset + bytes > priv->data->size)
++		bytes = priv->data->size - offset;
+ 
+-	if (count > ((priv->data->size >> 2) - index))
+-		count = (priv->data->size >> 2) - index;
++	index = offset >> 2;
++	skipbytes = offset - (index << 2);
++	num_bytes = round_up(bytes + skipbytes, 4);
++	count = num_bytes >> 2;
+ 
+ 	p = kzalloc(num_bytes, GFP_KERNEL);
+ 	if (!p)
+@@ -100,7 +102,7 @@ static int imx_ocotp_reg_read(void *context, unsigned int offset, void *val, siz
+ 			*buf++ = readl_relaxed(reg + (i << 2));
+ 	}
+ 
+-	memcpy(val, (u8 *)p, bytes);
++	memcpy(val, ((u8 *)p) + skipbytes, bytes);
+ 
+ 	mutex_unlock(&priv->lock);
+ 
+@@ -109,6 +111,26 @@ static int imx_ocotp_reg_read(void *context, unsigned int offset, void *val, siz
+ 	return 0;
+ };
+ 
++static int imx_ocotp_cell_pp(void *context, const char *id, int index,
++			     unsigned int offset, void *data, size_t bytes)
++{
++	u8 *buf = data;
++	int i;
++
++	/* Deal with some post processing of nvmem cell data */
++	if (id && !strcmp(id, "mac-address"))
++		for (i = 0; i < bytes / 2; i++)
++			swap(buf[i], buf[bytes - i - 1]);
++
++	return 0;
++}
++
++static void imx_ocotp_fixup_dt_cell_info(struct nvmem_device *nvmem,
++					 struct nvmem_cell_info *cell)
++{
++	cell->read_post_process = imx_ocotp_cell_pp;
++}
++
+ static int imx_ele_ocotp_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+@@ -131,10 +153,12 @@ static int imx_ele_ocotp_probe(struct platform_device *pdev)
+ 	priv->config.owner = THIS_MODULE;
+ 	priv->config.size = priv->data->size;
+ 	priv->config.reg_read = priv->data->reg_read;
+-	priv->config.word_size = 4;
++	priv->config.word_size = 1;
+ 	priv->config.stride = 1;
+ 	priv->config.priv = priv;
+ 	priv->config.read_only = true;
++	priv->config.add_legacy_fixed_of_cells = true;
++	priv->config.fixup_dt_cell_info = imx_ocotp_fixup_dt_cell_info;
+ 	mutex_init(&priv->lock);
+ 
+ 	nvmem = devm_nvmem_register(dev, &priv->config);
+diff --git a/drivers/nvmem/qcom-spmi-sdam.c b/drivers/nvmem/qcom-spmi-sdam.c
+index 9aa8f42faa4c93..4f1cca6eab71e1 100644
+--- a/drivers/nvmem/qcom-spmi-sdam.c
++++ b/drivers/nvmem/qcom-spmi-sdam.c
+@@ -144,6 +144,7 @@ static int sdam_probe(struct platform_device *pdev)
+ 	sdam->sdam_config.owner = THIS_MODULE;
+ 	sdam->sdam_config.add_legacy_fixed_of_cells = true;
+ 	sdam->sdam_config.stride = 1;
++	sdam->sdam_config.size = sdam->size;
+ 	sdam->sdam_config.word_size = 1;
+ 	sdam->sdam_config.reg_read = sdam_read;
+ 	sdam->sdam_config.reg_write = sdam_write;
+diff --git a/drivers/of/address.c b/drivers/of/address.c
+index 8770004d9b085f..5c0663066a7f38 100644
+--- a/drivers/of/address.c
++++ b/drivers/of/address.c
+@@ -200,17 +200,15 @@ static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns,
+ 
+ static int __of_address_resource_bounds(struct resource *r, u64 start, u64 size)
+ {
+-	u64 end = start;
+-
+ 	if (overflows_type(start, r->start))
+ 		return -EOVERFLOW;
+-	if (size && check_add_overflow(end, size - 1, &end))
+-		return -EOVERFLOW;
+-	if (overflows_type(end, r->end))
+-		return -EOVERFLOW;
+ 
+ 	r->start = start;
+-	r->end = end;
++
++	if (!size)
++		r->end = wrapping_sub(typeof(r->end), r->start, 1);
++	else if (size && check_add_overflow(r->start, size - 1, &r->end))
++		return -EOVERFLOW;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/of/base.c b/drivers/of/base.c
+index 6f5abea2462a32..dc57401c72eadb 100644
+--- a/drivers/of/base.c
++++ b/drivers/of/base.c
+@@ -894,10 +894,10 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt
+ 	/* The path could begin with an alias */
+ 	if (*path != '/') {
+ 		int len;
+-		const char *p = separator;
++		const char *p = strchrnul(path, '/');
+ 
+-		if (!p)
+-			p = strchrnul(path, '/');
++		if (separator && separator < p)
++			p = separator;
+ 		len = p - path;
+ 
+ 		/* of_aliases must not be NULL */
+@@ -1546,7 +1546,6 @@ int of_parse_phandle_with_args_map(const struct device_node *np,
+ 		 * specifier into the out_args structure, keeping the
+ 		 * bits specified in <list>-map-pass-thru.
+ 		 */
+-		match_array = map - new_size;
+ 		for (i = 0; i < new_size; i++) {
+ 			__be32 val = *(map - new_size + i);
+ 
+@@ -1555,6 +1554,7 @@ int of_parse_phandle_with_args_map(const struct device_node *np,
+ 				val |= cpu_to_be32(out_args->args[i]) & pass[i];
+ 			}
+ 
++			initial_match_array[i] = val;
+ 			out_args->args[i] = be32_to_cpu(val);
+ 		}
+ 		out_args->args_count = list_size = new_size;
+diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
+index b47559f11f079c..e2da88d7706ab3 100644
+--- a/drivers/of/of_reserved_mem.c
++++ b/drivers/of/of_reserved_mem.c
+@@ -263,6 +263,11 @@ void __init fdt_scan_reserved_mem_reg_nodes(void)
+ 			       uname);
+ 			continue;
+ 		}
++
++		if (len > t_len)
++			pr_warn("%s() ignores %d regions in node '%s'\n",
++				__func__, len / t_len - 1, uname);
++
+ 		base = dt_mem_next_cell(dt_root_addr_cells, &prop);
+ 		size = dt_mem_next_cell(dt_root_size_cells, &prop);
+ 
+@@ -410,12 +415,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam
+ 
+ 	prop = of_get_flat_dt_prop(node, "alignment", &len);
+ 	if (prop) {
+-		if (len != dt_root_addr_cells * sizeof(__be32)) {
++		if (len != dt_root_size_cells * sizeof(__be32)) {
+ 			pr_err("invalid alignment property in '%s' node.\n",
+ 				uname);
+ 			return -EINVAL;
+ 		}
+-		align = dt_mem_next_cell(dt_root_addr_cells, &prop);
++		align = dt_mem_next_cell(dt_root_size_cells, &prop);
+ 	}
+ 
+ 	nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
+diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
+index f3ac7d46a855b2..44a617d54b15f1 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -222,19 +222,30 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ 	if ((flags & PCI_BASE_ADDRESS_MEM_TYPE_64) && (bar & 1))
+ 		return -EINVAL;
+ 
+-	reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+-
+-	if (!(flags & PCI_BASE_ADDRESS_SPACE))
+-		type = PCIE_ATU_TYPE_MEM;
+-	else
+-		type = PCIE_ATU_TYPE_IO;
++	/*
++	 * Certain EPF drivers dynamically change the physical address of a BAR
++	 * (i.e. they call set_bar() twice, without ever calling clear_bar(), as
++	 * calling clear_bar() would clear the BAR's PCI address assigned by the
++	 * host).
++	 */
++	if (ep->epf_bar[bar]) {
++		/*
++		 * We can only dynamically change a BAR if the new BAR size and
++		 * BAR flags do not differ from the existing configuration.
++		 */
++		if (ep->epf_bar[bar]->barno != bar ||
++		    ep->epf_bar[bar]->size != size ||
++		    ep->epf_bar[bar]->flags != flags)
++			return -EINVAL;
+ 
+-	ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar);
+-	if (ret)
+-		return ret;
++		/*
++		 * When dynamically changing a BAR, skip writing the BAR reg, as
++		 * that would clear the BAR's PCI address assigned by the host.
++		 */
++		goto config_atu;
++	}
+ 
+-	if (ep->epf_bar[bar])
+-		return 0;
++	reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+ 
+ 	dw_pcie_dbi_ro_wr_en(pci);
+ 
+@@ -246,9 +257,20 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ 		dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
+ 	}
+ 
+-	ep->epf_bar[bar] = epf_bar;
+ 	dw_pcie_dbi_ro_wr_dis(pci);
+ 
++config_atu:
++	if (!(flags & PCI_BASE_ADDRESS_SPACE))
++		type = PCIE_ATU_TYPE_MEM;
++	else
++		type = PCIE_ATU_TYPE_IO;
++
++	ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar);
++	if (ret)
++		return ret;
++
++	ep->epf_bar[bar] = epf_bar;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
+index 8fa2797d4169a9..50bc2892a36c54 100644
+--- a/drivers/pci/endpoint/pci-epf-core.c
++++ b/drivers/pci/endpoint/pci-epf-core.c
+@@ -202,6 +202,7 @@ void pci_epf_remove_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf)
+ 
+ 	mutex_lock(&epf_pf->lock);
+ 	clear_bit(epf_vf->vfunc_no, &epf_pf->vfunction_num_map);
++	epf_vf->epf_pf = NULL;
+ 	list_del(&epf_vf->list);
+ 	mutex_unlock(&epf_pf->lock);
+ }
+diff --git a/drivers/pci/tph.c b/drivers/pci/tph.c
+index 1e604fbbda6573..07de59ca2ebfa6 100644
+--- a/drivers/pci/tph.c
++++ b/drivers/pci/tph.c
+@@ -360,7 +360,7 @@ int pcie_tph_set_st_entry(struct pci_dev *pdev, unsigned int index, u16 tag)
+ 		return err;
+ 	}
+ 
+-	set_ctrl_reg_req_en(pdev, pdev->tph_mode);
++	set_ctrl_reg_req_en(pdev, pdev->tph_req_type);
+ 
+ 	pci_dbg(pdev, "set steering tag: %s table, index=%d, tag=%#04x\n",
+ 		(loc == PCI_TPH_LOC_MSIX) ? "MSI-X" : "ST", index, tag);
+diff --git a/drivers/perf/fsl_imx9_ddr_perf.c b/drivers/perf/fsl_imx9_ddr_perf.c
+index 3c856d9a4e97ac..843f163e6c339e 100644
+--- a/drivers/perf/fsl_imx9_ddr_perf.c
++++ b/drivers/perf/fsl_imx9_ddr_perf.c
+@@ -63,8 +63,21 @@
+ 
+ static DEFINE_IDA(ddr_ida);
+ 
++/*
++ * V1 support 1 read transaction, 1 write transaction and 1 read beats
++ * event which corresponding respecitively to counter 2, 3 and 4.
++ */
++#define DDR_PERF_AXI_FILTER_V1		0x1
++
++/*
++ * V2 support 1 read beats and 3 write beats events which corresponding
++ * respecitively to counter 2-5.
++ */
++#define DDR_PERF_AXI_FILTER_V2		0x2
++
+ struct imx_ddr_devtype_data {
+ 	const char *identifier;		/* system PMU identifier for userspace */
++	unsigned int filter_ver;	/* AXI filter version */
+ };
+ 
+ struct ddr_pmu {
+@@ -83,24 +96,27 @@ struct ddr_pmu {
+ 
+ static const struct imx_ddr_devtype_data imx91_devtype_data = {
+ 	.identifier = "imx91",
++	.filter_ver = DDR_PERF_AXI_FILTER_V1
+ };
+ 
+ static const struct imx_ddr_devtype_data imx93_devtype_data = {
+ 	.identifier = "imx93",
++	.filter_ver = DDR_PERF_AXI_FILTER_V1
+ };
+ 
+ static const struct imx_ddr_devtype_data imx95_devtype_data = {
+ 	.identifier = "imx95",
++	.filter_ver = DDR_PERF_AXI_FILTER_V2
+ };
+ 
+-static inline bool is_imx93(struct ddr_pmu *pmu)
++static inline bool axi_filter_v1(struct ddr_pmu *pmu)
+ {
+-	return pmu->devtype_data == &imx93_devtype_data;
++	return pmu->devtype_data->filter_ver == DDR_PERF_AXI_FILTER_V1;
+ }
+ 
+-static inline bool is_imx95(struct ddr_pmu *pmu)
++static inline bool axi_filter_v2(struct ddr_pmu *pmu)
+ {
+-	return pmu->devtype_data == &imx95_devtype_data;
++	return pmu->devtype_data->filter_ver == DDR_PERF_AXI_FILTER_V2;
+ }
+ 
+ static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
+@@ -155,7 +171,7 @@ static const struct attribute_group ddr_perf_cpumask_attr_group = {
+ struct imx9_pmu_events_attr {
+ 	struct device_attribute attr;
+ 	u64 id;
+-	const void *devtype_data;
++	const struct imx_ddr_devtype_data *devtype_data;
+ };
+ 
+ static ssize_t ddr_pmu_event_show(struct device *dev,
+@@ -307,7 +323,8 @@ ddr_perf_events_attrs_is_visible(struct kobject *kobj,
+ 	if (!eattr->devtype_data)
+ 		return attr->mode;
+ 
+-	if (eattr->devtype_data != ddr_pmu->devtype_data)
++	if (eattr->devtype_data != ddr_pmu->devtype_data &&
++	    eattr->devtype_data->filter_ver != ddr_pmu->devtype_data->filter_ver)
+ 		return 0;
+ 
+ 	return attr->mode;
+@@ -624,11 +641,11 @@ static int ddr_perf_event_add(struct perf_event *event, int flags)
+ 	hwc->idx = counter;
+ 	hwc->state |= PERF_HES_STOPPED;
+ 
+-	if (is_imx93(pmu))
++	if (axi_filter_v1(pmu))
+ 		/* read trans, write trans, read beat */
+ 		imx93_ddr_perf_monitor_config(pmu, event_id, counter, cfg1, cfg2);
+ 
+-	if (is_imx95(pmu))
++	if (axi_filter_v2(pmu))
+ 		/* write beat, read beat2, read beat1, read beat */
+ 		imx95_ddr_perf_monitor_config(pmu, event_id, counter, cfg1, cfg2);
+ 
+diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+index 1df9cec2873fff..515c38de445a92 100644
+--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
++++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+@@ -157,7 +157,7 @@
+ #define PWPR_REGWE_B		BIT(5)	/* OEN Register Write Enable, known only in RZ/V2H(P) */
+ 
+ #define PM_MASK			0x03
+-#define PFC_MASK		0x07
++#define PFC_MASK		0x0f
+ #define IEN_MASK		0x01
+ #define IOLH_MASK		0x03
+ #define SR_MASK			0x01
+diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
+index bbedd980ec6723..3ab5878b69d1be 100644
+--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
++++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
+@@ -1272,7 +1272,7 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
+ 
+ 	ret = platform_get_irq_optional(pdev, 0);
+ 	if (ret < 0 && ret != -ENXIO)
+-		return ret;
++		goto err_put_banks;
+ 	if (ret > 0)
+ 		drvdata->irq = ret;
+ 
+diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
+index d09baa3d3d902e..ac4f8ab45bdc0c 100644
+--- a/drivers/platform/x86/acer-wmi.c
++++ b/drivers/platform/x86/acer-wmi.c
+@@ -95,6 +95,7 @@ enum acer_wmi_event_ids {
+ 	WMID_HOTKEY_EVENT = 0x1,
+ 	WMID_ACCEL_OR_KBD_DOCK_EVENT = 0x5,
+ 	WMID_GAMING_TURBO_KEY_EVENT = 0x7,
++	WMID_AC_EVENT = 0x8,
+ };
+ 
+ enum acer_wmi_predator_v4_sys_info_command {
+@@ -393,6 +394,20 @@ static struct quirk_entry quirk_acer_predator_ph315_53 = {
+ 	.gpu_fans = 1,
+ };
+ 
++static struct quirk_entry quirk_acer_predator_ph16_72 = {
++	.turbo = 1,
++	.cpu_fans = 1,
++	.gpu_fans = 1,
++	.predator_v4 = 1,
++};
++
++static struct quirk_entry quirk_acer_predator_pt14_51 = {
++	.turbo = 1,
++	.cpu_fans = 1,
++	.gpu_fans = 1,
++	.predator_v4 = 1,
++};
++
+ static struct quirk_entry quirk_acer_predator_v4 = {
+ 	.predator_v4 = 1,
+ };
+@@ -564,6 +579,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
+ 		},
+ 		.driver_data = &quirk_acer_travelmate_2490,
+ 	},
++	{
++		.callback = dmi_matched,
++		.ident = "Acer Nitro AN515-58",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Nitro AN515-58"),
++		},
++		.driver_data = &quirk_acer_predator_v4,
++	},
+ 	{
+ 		.callback = dmi_matched,
+ 		.ident = "Acer Predator PH315-53",
+@@ -591,6 +615,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
+ 		},
+ 		.driver_data = &quirk_acer_predator_v4,
+ 	},
++	{
++		.callback = dmi_matched,
++		.ident = "Acer Predator PH16-72",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Predator PH16-72"),
++		},
++		.driver_data = &quirk_acer_predator_ph16_72,
++	},
+ 	{
+ 		.callback = dmi_matched,
+ 		.ident = "Acer Predator PH18-71",
+@@ -600,6 +633,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
+ 		},
+ 		.driver_data = &quirk_acer_predator_v4,
+ 	},
++	{
++		.callback = dmi_matched,
++		.ident = "Acer Predator PT14-51",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Predator PT14-51"),
++		},
++		.driver_data = &quirk_acer_predator_pt14_51,
++	},
+ 	{
+ 		.callback = set_force_caps,
+ 		.ident = "Acer Aspire Switch 10E SW3-016",
+@@ -2280,6 +2322,9 @@ static void acer_wmi_notify(union acpi_object *obj, void *context)
+ 		if (return_value.key_num == 0x5 && has_cap(ACER_CAP_PLATFORM_PROFILE))
+ 			acer_thermal_profile_change();
+ 		break;
++	case WMID_AC_EVENT:
++		/* We ignore AC events here */
++		break;
+ 	default:
+ 		pr_warn("Unknown function number - %d - %d\n",
+ 			return_value.function, return_value.key_num);
+diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c
+index d881b2cfcdfcfb..09fff213b0911c 100644
+--- a/drivers/platform/x86/intel/int3472/discrete.c
++++ b/drivers/platform/x86/intel/int3472/discrete.c
+@@ -336,6 +336,9 @@ static int skl_int3472_discrete_probe(struct platform_device *pdev)
+ 	struct int3472_cldb cldb;
+ 	int ret;
+ 
++	if (!adev)
++		return -ENODEV;
++
+ 	ret = skl_int3472_fill_cldb(adev, &cldb);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "Couldn't fill CLDB structure\n");
+diff --git a/drivers/platform/x86/intel/int3472/tps68470.c b/drivers/platform/x86/intel/int3472/tps68470.c
+index 1e107fd49f828c..81ac4c69196309 100644
+--- a/drivers/platform/x86/intel/int3472/tps68470.c
++++ b/drivers/platform/x86/intel/int3472/tps68470.c
+@@ -152,6 +152,9 @@ static int skl_int3472_tps68470_probe(struct i2c_client *client)
+ 	int ret;
+ 	int i;
+ 
++	if (!adev)
++		return -ENODEV;
++
+ 	n_consumers = skl_int3472_fill_clk_pdata(&client->dev, &clk_pdata);
+ 	if (n_consumers < 0)
+ 		return n_consumers;
+diff --git a/drivers/platform/x86/serdev_helpers.h b/drivers/platform/x86/serdev_helpers.h
+index bcf3a0c356ea1b..3bc7fd8e1e1972 100644
+--- a/drivers/platform/x86/serdev_helpers.h
++++ b/drivers/platform/x86/serdev_helpers.h
+@@ -35,7 +35,7 @@ get_serdev_controller(const char *serial_ctrl_hid,
+ 	ctrl_adev = acpi_dev_get_first_match_dev(serial_ctrl_hid, serial_ctrl_uid, -1);
+ 	if (!ctrl_adev) {
+ 		pr_err("error could not get %s/%s serial-ctrl adev\n",
+-		       serial_ctrl_hid, serial_ctrl_uid);
++		       serial_ctrl_hid, serial_ctrl_uid ?: "*");
+ 		return ERR_PTR(-ENODEV);
+ 	}
+ 
+@@ -43,7 +43,7 @@ get_serdev_controller(const char *serial_ctrl_hid,
+ 	ctrl_dev = get_device(acpi_get_first_physical_node(ctrl_adev));
+ 	if (!ctrl_dev) {
+ 		pr_err("error could not get %s/%s serial-ctrl physical node\n",
+-		       serial_ctrl_hid, serial_ctrl_uid);
++		       serial_ctrl_hid, serial_ctrl_uid ?: "*");
+ 		ctrl_dev = ERR_PTR(-ENODEV);
+ 		goto put_ctrl_adev;
+ 	}
+diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
+index 77a36e7bddd54e..1a1edd87122d3d 100644
+--- a/drivers/ptp/ptp_clock.c
++++ b/drivers/ptp/ptp_clock.c
+@@ -217,6 +217,11 @@ static int ptp_getcycles64(struct ptp_clock_info *info, struct timespec64 *ts)
+ 		return info->gettime64(info, ts);
+ }
+ 
++static int ptp_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *request, int on)
++{
++	return -EOPNOTSUPP;
++}
++
+ static void ptp_aux_kworker(struct kthread_work *work)
+ {
+ 	struct ptp_clock *ptp = container_of(work, struct ptp_clock,
+@@ -294,6 +299,9 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
+ 			ptp->info->getcrosscycles = ptp->info->getcrosststamp;
+ 	}
+ 
++	if (!ptp->info->enable)
++		ptp->info->enable = ptp_enable;
++
+ 	if (ptp->info->do_aux_work) {
+ 		kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
+ 		ptp->kworker = kthread_create_worker(0, "ptp%d", ptp->index);
+diff --git a/drivers/pwm/pwm-microchip-core.c b/drivers/pwm/pwm-microchip-core.c
+index c1f2287b8e9748..12821b4bbf9756 100644
+--- a/drivers/pwm/pwm-microchip-core.c
++++ b/drivers/pwm/pwm-microchip-core.c
+@@ -327,7 +327,7 @@ static int mchp_core_pwm_apply_locked(struct pwm_chip *chip, struct pwm_device *
+ 		 * mchp_core_pwm_calc_period().
+ 		 * The period is locked and we cannot change this, so we abort.
+ 		 */
+-		if (hw_period_steps == MCHPCOREPWM_PERIOD_STEPS_MAX)
++		if (hw_period_steps > MCHPCOREPWM_PERIOD_STEPS_MAX)
+ 			return -EINVAL;
+ 
+ 		prescale = hw_prescale;
+diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
+index 9ae2e831456d57..3260dd512491e8 100644
+--- a/drivers/remoteproc/omap_remoteproc.c
++++ b/drivers/remoteproc/omap_remoteproc.c
+@@ -37,6 +37,10 @@
+ 
+ #include <linux/platform_data/dmtimer-omap.h>
+ 
++#ifdef CONFIG_ARM_DMA_USE_IOMMU
++#include <asm/dma-iommu.h>
++#endif
++
+ #include "omap_remoteproc.h"
+ #include "remoteproc_internal.h"
+ 
+@@ -1323,6 +1327,19 @@ static int omap_rproc_probe(struct platform_device *pdev)
+ 	/* All existing OMAP IPU and DSP processors have an MMU */
+ 	rproc->has_iommu = true;
+ 
++#ifdef CONFIG_ARM_DMA_USE_IOMMU
++	/*
++	 * Throw away the ARM DMA mapping that we'll never use, so it doesn't
++	 * interfere with the core rproc->domain and we get the right DMA ops.
++	 */
++	if (pdev->dev.archdata.mapping) {
++		struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(&pdev->dev);
++
++		arm_iommu_detach_device(&pdev->dev);
++		arm_iommu_release_mapping(mapping);
++	}
++#endif
++
+ 	ret = omap_rproc_of_get_internal_memories(pdev, rproc);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c
+index af1abb69d1e324..17f68c25dc292d 100644
+--- a/drivers/rtc/rtc-zynqmp.c
++++ b/drivers/rtc/rtc-zynqmp.c
+@@ -318,8 +318,8 @@ static int xlnx_rtc_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
+-	/* Getting the rtc_clk info */
+-	xrtcdev->rtc_clk = devm_clk_get_optional(&pdev->dev, "rtc_clk");
++	/* Getting the rtc info */
++	xrtcdev->rtc_clk = devm_clk_get_optional(&pdev->dev, "rtc");
+ 	if (IS_ERR(xrtcdev->rtc_clk)) {
+ 		if (PTR_ERR(xrtcdev->rtc_clk) != -EPROBE_DEFER)
+ 			dev_warn(&pdev->dev, "Device clock not found.\n");
+diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
+index 15066c112817a8..cb95b7b12051da 100644
+--- a/drivers/scsi/qla2xxx/qla_def.h
++++ b/drivers/scsi/qla2xxx/qla_def.h
+@@ -4098,6 +4098,8 @@ struct qla_hw_data {
+ 		uint32_t	npiv_supported		:1;
+ 		uint32_t	pci_channel_io_perm_failure	:1;
+ 		uint32_t	fce_enabled		:1;
++		uint32_t	user_enabled_fce	:1;
++		uint32_t	fce_dump_buf_alloced	:1;
+ 		uint32_t	fac_supported		:1;
+ 
+ 		uint32_t	chip_reset_done		:1;
+diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
+index a1545dad0c0ce2..08273520c77793 100644
+--- a/drivers/scsi/qla2xxx/qla_dfs.c
++++ b/drivers/scsi/qla2xxx/qla_dfs.c
+@@ -409,26 +409,31 @@ qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
+ 
+ 	mutex_lock(&ha->fce_mutex);
+ 
+-	seq_puts(s, "FCE Trace Buffer\n");
+-	seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr);
+-	seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma);
+-	seq_puts(s, "FCE Enable Registers\n");
+-	seq_printf(s, "%08x %08x %08x %08x %08x %08x\n",
+-	    ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4],
+-	    ha->fce_mb[5], ha->fce_mb[6]);
+-
+-	fce = (uint32_t *) ha->fce;
+-	fce_start = (unsigned long long) ha->fce_dma;
+-	for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) {
+-		if (cnt % 8 == 0)
+-			seq_printf(s, "\n%llx: ",
+-			    (unsigned long long)((cnt * 4) + fce_start));
+-		else
+-			seq_putc(s, ' ');
+-		seq_printf(s, "%08x", *fce++);
+-	}
++	if (ha->flags.user_enabled_fce) {
++		seq_puts(s, "FCE Trace Buffer\n");
++		seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr);
++		seq_printf(s, "Base = %llx\n\n", (unsigned long long)ha->fce_dma);
++		seq_puts(s, "FCE Enable Registers\n");
++		seq_printf(s, "%08x %08x %08x %08x %08x %08x\n",
++			   ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4],
++			   ha->fce_mb[5], ha->fce_mb[6]);
++
++		fce = (uint32_t *)ha->fce;
++		fce_start = (unsigned long long)ha->fce_dma;
++		for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) {
++			if (cnt % 8 == 0)
++				seq_printf(s, "\n%llx: ",
++					   (unsigned long long)((cnt * 4) + fce_start));
++			else
++				seq_putc(s, ' ');
++			seq_printf(s, "%08x", *fce++);
++		}
+ 
+-	seq_puts(s, "\nEnd\n");
++		seq_puts(s, "\nEnd\n");
++	} else {
++		seq_puts(s, "FCE Trace is currently not enabled\n");
++		seq_puts(s, "\techo [ 1 | 0 ] > fce\n");
++	}
+ 
+ 	mutex_unlock(&ha->fce_mutex);
+ 
+@@ -467,7 +472,7 @@ qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
+ 	struct qla_hw_data *ha = vha->hw;
+ 	int rval;
+ 
+-	if (ha->flags.fce_enabled)
++	if (ha->flags.fce_enabled || !ha->fce)
+ 		goto out;
+ 
+ 	mutex_lock(&ha->fce_mutex);
+@@ -488,11 +493,88 @@ qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
+ 	return single_release(inode, file);
+ }
+ 
++static ssize_t
++qla2x00_dfs_fce_write(struct file *file, const char __user *buffer,
++		      size_t count, loff_t *pos)
++{
++	struct seq_file *s = file->private_data;
++	struct scsi_qla_host *vha = s->private;
++	struct qla_hw_data *ha = vha->hw;
++	char *buf;
++	int rc = 0;
++	unsigned long enable;
++
++	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
++	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) {
++		ql_dbg(ql_dbg_user, vha, 0xd034,
++		       "this adapter does not support FCE.");
++		return -EINVAL;
++	}
++
++	buf = memdup_user_nul(buffer, count);
++	if (IS_ERR(buf)) {
++		ql_dbg(ql_dbg_user, vha, 0xd037,
++		    "fail to copy user buffer.");
++		return PTR_ERR(buf);
++	}
++
++	enable = kstrtoul(buf, 0, 0);
++	rc = count;
++
++	mutex_lock(&ha->fce_mutex);
++
++	if (enable) {
++		if (ha->flags.user_enabled_fce) {
++			mutex_unlock(&ha->fce_mutex);
++			goto out_free;
++		}
++		ha->flags.user_enabled_fce = 1;
++		if (!ha->fce) {
++			rc = qla2x00_alloc_fce_trace(vha);
++			if (rc) {
++				ha->flags.user_enabled_fce = 0;
++				mutex_unlock(&ha->fce_mutex);
++				goto out_free;
++			}
++
++			/* adjust fw dump buffer to take into account of this feature */
++			if (!ha->flags.fce_dump_buf_alloced)
++				qla2x00_alloc_fw_dump(vha);
++		}
++
++		if (!ha->flags.fce_enabled)
++			qla_enable_fce_trace(vha);
++
++		ql_dbg(ql_dbg_user, vha, 0xd045, "User enabled FCE .\n");
++	} else {
++		if (!ha->flags.user_enabled_fce) {
++			mutex_unlock(&ha->fce_mutex);
++			goto out_free;
++		}
++		ha->flags.user_enabled_fce = 0;
++		if (ha->flags.fce_enabled) {
++			qla2x00_disable_fce_trace(vha, NULL, NULL);
++			ha->flags.fce_enabled = 0;
++		}
++
++		qla2x00_free_fce_trace(ha);
++		/* no need to re-adjust fw dump buffer */
++
++		ql_dbg(ql_dbg_user, vha, 0xd04f, "User disabled FCE .\n");
++	}
++
++	mutex_unlock(&ha->fce_mutex);
++out_free:
++	kfree(buf);
++	return rc;
++}
++
+ static const struct file_operations dfs_fce_ops = {
+ 	.open		= qla2x00_dfs_fce_open,
+ 	.read		= seq_read,
+ 	.llseek		= seq_lseek,
+ 	.release	= qla2x00_dfs_fce_release,
++	.write		= qla2x00_dfs_fce_write,
+ };
+ 
+ static int
+@@ -626,8 +708,6 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
+ 	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+ 	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ 		goto out;
+-	if (!ha->fce)
+-		goto out;
+ 
+ 	if (qla2x00_dfs_root)
+ 		goto create_dir;
+diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
+index cededfda9d0e31..e556f57c91af62 100644
+--- a/drivers/scsi/qla2xxx/qla_gbl.h
++++ b/drivers/scsi/qla2xxx/qla_gbl.h
+@@ -11,6 +11,9 @@
+ /*
+  * Global Function Prototypes in qla_init.c source file.
+  */
++int  qla2x00_alloc_fce_trace(scsi_qla_host_t *);
++void qla2x00_free_fce_trace(struct qla_hw_data *ha);
++void qla_enable_fce_trace(scsi_qla_host_t *);
+ extern int qla2x00_initialize_adapter(scsi_qla_host_t *);
+ extern int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport);
+ 
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 31fc6a0eca3e80..79cdfec2bca356 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -2681,7 +2681,7 @@ qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
+ 	return rval;
+ }
+ 
+-static void qla_enable_fce_trace(scsi_qla_host_t *vha)
++void qla_enable_fce_trace(scsi_qla_host_t *vha)
+ {
+ 	int rval;
+ 	struct qla_hw_data *ha = vha->hw;
+@@ -3717,25 +3717,24 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
+ 	return rval;
+ }
+ 
+-static void
+-qla2x00_alloc_fce_trace(scsi_qla_host_t *vha)
++int qla2x00_alloc_fce_trace(scsi_qla_host_t *vha)
+ {
+ 	dma_addr_t tc_dma;
+ 	void *tc;
+ 	struct qla_hw_data *ha = vha->hw;
+ 
+ 	if (!IS_FWI2_CAPABLE(ha))
+-		return;
++		return -EINVAL;
+ 
+ 	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+ 	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+-		return;
++		return -EINVAL;
+ 
+ 	if (ha->fce) {
+ 		ql_dbg(ql_dbg_init, vha, 0x00bd,
+ 		       "%s: FCE Mem is already allocated.\n",
+ 		       __func__);
+-		return;
++		return -EIO;
+ 	}
+ 
+ 	/* Allocate memory for Fibre Channel Event Buffer. */
+@@ -3745,7 +3744,7 @@ qla2x00_alloc_fce_trace(scsi_qla_host_t *vha)
+ 		ql_log(ql_log_warn, vha, 0x00be,
+ 		       "Unable to allocate (%d KB) for FCE.\n",
+ 		       FCE_SIZE / 1024);
+-		return;
++		return -ENOMEM;
+ 	}
+ 
+ 	ql_dbg(ql_dbg_init, vha, 0x00c0,
+@@ -3754,6 +3753,16 @@ qla2x00_alloc_fce_trace(scsi_qla_host_t *vha)
+ 	ha->fce_dma = tc_dma;
+ 	ha->fce = tc;
+ 	ha->fce_bufs = FCE_NUM_BUFFERS;
++	return 0;
++}
++
++void qla2x00_free_fce_trace(struct qla_hw_data *ha)
++{
++	if (!ha->fce)
++		return;
++	dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, ha->fce_dma);
++	ha->fce = NULL;
++	ha->fce_dma = 0;
+ }
+ 
+ static void
+@@ -3844,9 +3853,10 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
+ 		if (ha->tgt.atio_ring)
+ 			mq_size += ha->tgt.atio_q_length * sizeof(request_t);
+ 
+-		qla2x00_alloc_fce_trace(vha);
+-		if (ha->fce)
++		if (ha->fce) {
+ 			fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
++			ha->flags.fce_dump_buf_alloced = 1;
++		}
+ 		qla2x00_alloc_eft_trace(vha);
+ 		if (ha->eft)
+ 			eft_size = EFT_SIZE;
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index 0cc6a0f77b0922..5f9b107ae267f1 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -868,13 +868,18 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
+ 				case 0x1a: /* start stop unit in progress */
+ 				case 0x1b: /* sanitize in progress */
+ 				case 0x1d: /* configuration in progress */
+-				case 0x24: /* depopulation in progress */
+-				case 0x25: /* depopulation restore in progress */
+ 					action = ACTION_DELAYED_RETRY;
+ 					break;
+ 				case 0x0a: /* ALUA state transition */
+ 					action = ACTION_DELAYED_REPREP;
+ 					break;
++				/*
++				 * Depopulation might take many hours,
++				 * thus it is not worthwhile to retry.
++				 */
++				case 0x24: /* depopulation in progress */
++				case 0x25: /* depopulation restore in progress */
++					fallthrough;
+ 				default:
+ 					action = ACTION_FAIL;
+ 					break;
+diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
+index e8ef27d7ef6181..ebbd50ec0cda51 100644
+--- a/drivers/scsi/st.c
++++ b/drivers/scsi/st.c
+@@ -1030,6 +1030,11 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
+ 			retval = new_session ? CHKRES_NEW_SESSION : CHKRES_READY;
+ 		break;
+ 	}
++	if (STp->first_tur) {
++		/* Don't set pos_unknown right after device recognition */
++		STp->pos_unknown = 0;
++		STp->first_tur = 0;
++	}
+ 
+ 	if (SRpnt != NULL)
+ 		st_release_request(SRpnt);
+@@ -4328,6 +4333,7 @@ static int st_probe(struct device *dev)
+ 	blk_queue_rq_timeout(tpnt->device->request_queue, ST_TIMEOUT);
+ 	tpnt->long_timeout = ST_LONG_TIMEOUT;
+ 	tpnt->try_dio = try_direct_io;
++	tpnt->first_tur = 1;
+ 
+ 	for (i = 0; i < ST_NBR_MODES; i++) {
+ 		STm = &(tpnt->modes[i]);
+diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
+index 7a68eaba7e810c..1aaaf5369a40fc 100644
+--- a/drivers/scsi/st.h
++++ b/drivers/scsi/st.h
+@@ -170,6 +170,7 @@ struct scsi_tape {
+ 	unsigned char rew_at_close;  /* rewind necessary at close */
+ 	unsigned char inited;
+ 	unsigned char cleaning_req;  /* cleaning requested? */
++	unsigned char first_tur;     /* first TEST UNIT READY */
+ 	int block_size;
+ 	int min_block;
+ 	int max_block;
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index b3c588b102d900..b8186feccdf5aa 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1800,6 +1800,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+ 
+ 	length = scsi_bufflen(scmnd);
+ 	payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb;
++	payload->range.len = 0;
+ 	payload_sz = 0;
+ 
+ 	if (scsi_sg_count(scmnd)) {
+diff --git a/drivers/soc/mediatek/mtk-devapc.c b/drivers/soc/mediatek/mtk-devapc.c
+index 2a1adcb87d4e4b..f54c966138b5b8 100644
+--- a/drivers/soc/mediatek/mtk-devapc.c
++++ b/drivers/soc/mediatek/mtk-devapc.c
+@@ -273,23 +273,31 @@ static int mtk_devapc_probe(struct platform_device *pdev)
+ 		return -EINVAL;
+ 
+ 	devapc_irq = irq_of_parse_and_map(node, 0);
+-	if (!devapc_irq)
+-		return -EINVAL;
++	if (!devapc_irq) {
++		ret = -EINVAL;
++		goto err;
++	}
+ 
+ 	ctx->infra_clk = devm_clk_get_enabled(&pdev->dev, "devapc-infra-clock");
+-	if (IS_ERR(ctx->infra_clk))
+-		return -EINVAL;
++	if (IS_ERR(ctx->infra_clk)) {
++		ret = -EINVAL;
++		goto err;
++	}
+ 
+ 	ret = devm_request_irq(&pdev->dev, devapc_irq, devapc_violation_irq,
+ 			       IRQF_TRIGGER_NONE, "devapc", ctx);
+ 	if (ret)
+-		return ret;
++		goto err;
+ 
+ 	platform_set_drvdata(pdev, ctx);
+ 
+ 	start_devapc(ctx);
+ 
+ 	return 0;
++
++err:
++	iounmap(ctx->infra_base);
++	return ret;
+ }
+ 
+ static void mtk_devapc_remove(struct platform_device *pdev)
+@@ -297,6 +305,7 @@ static void mtk_devapc_remove(struct platform_device *pdev)
+ 	struct mtk_devapc_context *ctx = platform_get_drvdata(pdev);
+ 
+ 	stop_devapc(ctx);
++	iounmap(ctx->infra_base);
+ }
+ 
+ static struct platform_driver mtk_devapc_driver = {
+diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
+index 32c3bc887cefb8..1560db00a01248 100644
+--- a/drivers/soc/qcom/llcc-qcom.c
++++ b/drivers/soc/qcom/llcc-qcom.c
+@@ -3004,6 +3004,7 @@ static const struct llcc_slice_config x1e80100_data[] = {
+ 		.fixed_size = true,
+ 		.bonus_ways = 0xfff,
+ 		.cache_mode = 0,
++		.activate_on_init = true,
+ 	}, {
+ 		.usecase_id = LLCC_CAMEXP0,
+ 		.slice_id = 4,
+diff --git a/drivers/soc/qcom/smem_state.c b/drivers/soc/qcom/smem_state.c
+index d9bfac6c54fb8a..cc5be8019b6a3f 100644
+--- a/drivers/soc/qcom/smem_state.c
++++ b/drivers/soc/qcom/smem_state.c
+@@ -112,7 +112,8 @@ struct qcom_smem_state *qcom_smem_state_get(struct device *dev,
+ 
+ 	if (args.args_count != 1) {
+ 		dev_err(dev, "invalid #qcom,smem-state-cells\n");
+-		return ERR_PTR(-EINVAL);
++		state = ERR_PTR(-EINVAL);
++		goto put;
+ 	}
+ 
+ 	state = of_node_to_state(args.np);
+diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
+index 62fadfe44a09f8..d49c55979bb308 100644
+--- a/drivers/soc/qcom/socinfo.c
++++ b/drivers/soc/qcom/socinfo.c
+@@ -796,7 +796,7 @@ static int qcom_socinfo_probe(struct platform_device *pdev)
+ 	if (!qs->attr.soc_id || !qs->attr.revision)
+ 		return -ENOMEM;
+ 
+-	if (offsetof(struct socinfo, serial_num) <= item_size) {
++	if (offsetofend(struct socinfo, serial_num) <= item_size) {
+ 		qs->attr.serial_number = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ 							"%u",
+ 							le32_to_cpu(info->serial_num));
+diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
+index d8c53cec7f37ad..dd5256e5aae1ae 100644
+--- a/drivers/soc/samsung/exynos-pmu.c
++++ b/drivers/soc/samsung/exynos-pmu.c
+@@ -126,7 +126,7 @@ static int tensor_set_bits_atomic(void *ctx, unsigned int offset, u32 val,
+ 		if (ret)
+ 			return ret;
+ 	}
+-	return ret;
++	return 0;
+ }
+ 
+ static bool tensor_is_atomic(unsigned int reg)
+diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
+index 316bce577081f1..8887f6bd41c7d8 100644
+--- a/drivers/spi/atmel-quadspi.c
++++ b/drivers/spi/atmel-quadspi.c
+@@ -138,11 +138,15 @@
+ #define QSPI_WPSR_WPVSRC_MASK           GENMASK(15, 8)
+ #define QSPI_WPSR_WPVSRC(src)           (((src) << 8) & QSPI_WPSR_WPVSRC)
+ 
++#define ATMEL_QSPI_TIMEOUT		1000	/* ms */
++
+ struct atmel_qspi_caps {
+ 	bool has_qspick;
+ 	bool has_ricr;
+ };
+ 
++struct atmel_qspi_ops;
++
+ struct atmel_qspi {
+ 	void __iomem		*regs;
+ 	void __iomem		*mem;
+@@ -150,13 +154,22 @@ struct atmel_qspi {
+ 	struct clk		*qspick;
+ 	struct platform_device	*pdev;
+ 	const struct atmel_qspi_caps *caps;
++	const struct atmel_qspi_ops *ops;
+ 	resource_size_t		mmap_size;
+ 	u32			pending;
++	u32			irq_mask;
+ 	u32			mr;
+ 	u32			scr;
+ 	struct completion	cmd_completion;
+ };
+ 
++struct atmel_qspi_ops {
++	int (*set_cfg)(struct atmel_qspi *aq, const struct spi_mem_op *op,
++		       u32 *offset);
++	int (*transfer)(struct spi_mem *mem, const struct spi_mem_op *op,
++			u32 offset);
++};
++
+ struct atmel_qspi_mode {
+ 	u8 cmd_buswidth;
+ 	u8 addr_buswidth;
+@@ -404,10 +417,67 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
+ 	return 0;
+ }
+ 
++static int atmel_qspi_wait_for_completion(struct atmel_qspi *aq, u32 irq_mask)
++{
++	int err = 0;
++	u32 sr;
++
++	/* Poll INSTRuction End status */
++	sr = atmel_qspi_read(aq, QSPI_SR);
++	if ((sr & irq_mask) == irq_mask)
++		return 0;
++
++	/* Wait for INSTRuction End interrupt */
++	reinit_completion(&aq->cmd_completion);
++	aq->pending = sr & irq_mask;
++	aq->irq_mask = irq_mask;
++	atmel_qspi_write(irq_mask, aq, QSPI_IER);
++	if (!wait_for_completion_timeout(&aq->cmd_completion,
++					 msecs_to_jiffies(ATMEL_QSPI_TIMEOUT)))
++		err = -ETIMEDOUT;
++	atmel_qspi_write(irq_mask, aq, QSPI_IDR);
++
++	return err;
++}
++
++static int atmel_qspi_transfer(struct spi_mem *mem,
++			       const struct spi_mem_op *op, u32 offset)
++{
++	struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
++
++	/* Skip to the final steps if there is no data */
++	if (!op->data.nbytes)
++		return atmel_qspi_wait_for_completion(aq,
++						      QSPI_SR_CMD_COMPLETED);
++
++	/* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
++	(void)atmel_qspi_read(aq, QSPI_IFR);
++
++	/* Send/Receive data */
++	if (op->data.dir == SPI_MEM_DATA_IN) {
++		memcpy_fromio(op->data.buf.in, aq->mem + offset,
++			      op->data.nbytes);
++
++		/* Synchronize AHB and APB accesses again */
++		rmb();
++	} else {
++		memcpy_toio(aq->mem + offset, op->data.buf.out,
++			    op->data.nbytes);
++
++		/* Synchronize AHB and APB accesses again */
++		wmb();
++	}
++
++	/* Release the chip-select */
++	atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
++
++	return atmel_qspi_wait_for_completion(aq, QSPI_SR_CMD_COMPLETED);
++}
++
+ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+ {
+ 	struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
+-	u32 sr, offset;
++	u32 offset;
+ 	int err;
+ 
+ 	/*
+@@ -416,46 +486,20 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+ 	 * when the flash memories overrun the controller's memory space.
+ 	 */
+ 	if (op->addr.val + op->data.nbytes > aq->mmap_size)
+-		return -ENOTSUPP;
++		return -EOPNOTSUPP;
++
++	if (op->addr.nbytes > 4)
++		return -EOPNOTSUPP;
+ 
+ 	err = pm_runtime_resume_and_get(&aq->pdev->dev);
+ 	if (err < 0)
+ 		return err;
+ 
+-	err = atmel_qspi_set_cfg(aq, op, &offset);
++	err = aq->ops->set_cfg(aq, op, &offset);
+ 	if (err)
+ 		goto pm_runtime_put;
+ 
+-	/* Skip to the final steps if there is no data */
+-	if (op->data.nbytes) {
+-		/* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
+-		(void)atmel_qspi_read(aq, QSPI_IFR);
+-
+-		/* Send/Receive data */
+-		if (op->data.dir == SPI_MEM_DATA_IN)
+-			memcpy_fromio(op->data.buf.in, aq->mem + offset,
+-				      op->data.nbytes);
+-		else
+-			memcpy_toio(aq->mem + offset, op->data.buf.out,
+-				    op->data.nbytes);
+-
+-		/* Release the chip-select */
+-		atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
+-	}
+-
+-	/* Poll INSTRuction End status */
+-	sr = atmel_qspi_read(aq, QSPI_SR);
+-	if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
+-		goto pm_runtime_put;
+-
+-	/* Wait for INSTRuction End interrupt */
+-	reinit_completion(&aq->cmd_completion);
+-	aq->pending = sr & QSPI_SR_CMD_COMPLETED;
+-	atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IER);
+-	if (!wait_for_completion_timeout(&aq->cmd_completion,
+-					 msecs_to_jiffies(1000)))
+-		err = -ETIMEDOUT;
+-	atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IDR);
++	err = aq->ops->transfer(mem, op, offset);
+ 
+ pm_runtime_put:
+ 	pm_runtime_mark_last_busy(&aq->pdev->dev);
+@@ -599,12 +643,17 @@ static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
+ 		return IRQ_NONE;
+ 
+ 	aq->pending |= pending;
+-	if ((aq->pending & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
++	if ((aq->pending & aq->irq_mask) == aq->irq_mask)
+ 		complete(&aq->cmd_completion);
+ 
+ 	return IRQ_HANDLED;
+ }
+ 
++static const struct atmel_qspi_ops atmel_qspi_ops = {
++	.set_cfg = atmel_qspi_set_cfg,
++	.transfer = atmel_qspi_transfer,
++};
++
+ static int atmel_qspi_probe(struct platform_device *pdev)
+ {
+ 	struct spi_controller *ctrl;
+@@ -629,6 +678,7 @@ static int atmel_qspi_probe(struct platform_device *pdev)
+ 
+ 	init_completion(&aq->cmd_completion);
+ 	aq->pdev = pdev;
++	aq->ops = &atmel_qspi_ops;
+ 
+ 	/* Map the registers */
+ 	aq->regs = devm_platform_ioremap_resource_byname(pdev, "qspi_base");
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
+index 924b803af44003..0050d6253c05d1 100644
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -165,6 +165,7 @@ struct sci_port {
+ static struct sci_port sci_ports[SCI_NPORTS];
+ static unsigned long sci_ports_in_use;
+ static struct uart_driver sci_uart_driver;
++static bool sci_uart_earlycon;
+ 
+ static inline struct sci_port *
+ to_sci_port(struct uart_port *uart)
+@@ -3450,6 +3451,7 @@ static int sci_probe_single(struct platform_device *dev,
+ static int sci_probe(struct platform_device *dev)
+ {
+ 	struct plat_sci_port *p;
++	struct resource *res;
+ 	struct sci_port *sp;
+ 	unsigned int dev_id;
+ 	int ret;
+@@ -3479,6 +3481,26 @@ static int sci_probe(struct platform_device *dev)
+ 	}
+ 
+ 	sp = &sci_ports[dev_id];
++
++	/*
++	 * In case:
++	 * - the probed port alias is zero (as the one used by earlycon), and
++	 * - the earlycon is still active (e.g., "earlycon keep_bootcon" in
++	 *   bootargs)
++	 *
++	 * defer the probe of this serial. This is a debug scenario and the user
++	 * must be aware of it.
++	 *
++	 * Except when the probed port is the same as the earlycon port.
++	 */
++
++	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
++	if (!res)
++		return -ENODEV;
++
++	if (sci_uart_earlycon && sp == &sci_ports[0] && sp->port.mapbase != res->start)
++		return dev_err_probe(&dev->dev, -EBUSY, "sci_port[0] is used by earlycon!\n");
++
+ 	platform_set_drvdata(dev, sp);
+ 
+ 	ret = sci_probe_single(dev, dev_id, p, sp);
+@@ -3562,7 +3584,7 @@ sh_early_platform_init_buffer("earlyprintk", &sci_driver,
+ 			   early_serial_buf, ARRAY_SIZE(early_serial_buf));
+ #endif
+ #ifdef CONFIG_SERIAL_SH_SCI_EARLYCON
+-static struct plat_sci_port port_cfg __initdata;
++static struct plat_sci_port port_cfg;
+ 
+ static int __init early_console_setup(struct earlycon_device *device,
+ 				      int type)
+@@ -3575,6 +3597,7 @@ static int __init early_console_setup(struct earlycon_device *device,
+ 	port_cfg.type = type;
+ 	sci_ports[0].cfg = &port_cfg;
+ 	sci_ports[0].params = sci_probe_regmap(&port_cfg);
++	sci_uart_earlycon = true;
+ 	port_cfg.scscr = sci_serial_in(&sci_ports[0].port, SCSCR);
+ 	sci_serial_out(&sci_ports[0].port, SCSCR,
+ 		       SCSCR_RE | SCSCR_TE | port_cfg.scscr);
+diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
+index beb151be4d3287..92ec51870d1daf 100644
+--- a/drivers/tty/serial/xilinx_uartps.c
++++ b/drivers/tty/serial/xilinx_uartps.c
+@@ -287,7 +287,7 @@ static void cdns_uart_handle_rx(void *dev_id, unsigned int isrstatus)
+ 				continue;
+ 		}
+ 
+-		if (uart_handle_sysrq_char(port, data))
++		if (uart_prepare_sysrq_char(port, data))
+ 			continue;
+ 
+ 		if (is_rxbs_support) {
+@@ -495,7 +495,7 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
+ 	    !(readl(port->membase + CDNS_UART_CR) & CDNS_UART_CR_RX_DIS))
+ 		cdns_uart_handle_rx(dev_id, isrstatus);
+ 
+-	uart_port_unlock(port);
++	uart_unlock_and_check_sysrq(port);
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -1380,9 +1380,7 @@ static void cdns_uart_console_write(struct console *co, const char *s,
+ 	unsigned int imr, ctrl;
+ 	int locked = 1;
+ 
+-	if (port->sysrq)
+-		locked = 0;
+-	else if (oops_in_progress)
++	if (oops_in_progress)
+ 		locked = uart_port_trylock_irqsave(port, &flags);
+ 	else
+ 		uart_port_lock_irqsave(port, &flags);
+diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
+index 564341f1a74f3f..0bd6544e30a6b3 100644
+--- a/drivers/tty/vt/selection.c
++++ b/drivers/tty/vt/selection.c
+@@ -192,6 +192,20 @@ int set_selection_user(const struct tiocl_selection __user *sel,
+ 	if (copy_from_user(&v, sel, sizeof(*sel)))
+ 		return -EFAULT;
+ 
++	/*
++	 * TIOCL_SELCLEAR, TIOCL_SELPOINTER and TIOCL_SELMOUSEREPORT are OK to
++	 * use without CAP_SYS_ADMIN as they do not modify the selection.
++	 */
++	switch (v.sel_mode) {
++	case TIOCL_SELCLEAR:
++	case TIOCL_SELPOINTER:
++	case TIOCL_SELMOUSEREPORT:
++		break;
++	default:
++		if (!capable(CAP_SYS_ADMIN))
++			return -EPERM;
++	}
++
+ 	return set_selection_kernel(&v, tty);
+ }
+ 
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 96842ce817af47..be5564ed8c018a 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -3345,8 +3345,6 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
+ 
+ 	switch (type) {
+ 	case TIOCL_SETSEL:
+-		if (!capable(CAP_SYS_ADMIN))
+-			return -EPERM;
+ 		return set_selection_user(param, tty);
+ 	case TIOCL_PASTESEL:
+ 		if (!capable(CAP_SYS_ADMIN))
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 9c26e8767515b2..d4a628169a51a3 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -10292,16 +10292,6 @@ int ufshcd_system_thaw(struct device *dev)
+ EXPORT_SYMBOL_GPL(ufshcd_system_thaw);
+ #endif /* CONFIG_PM_SLEEP  */
+ 
+-/**
+- * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
+- * @hba: pointer to Host Bus Adapter (HBA)
+- */
+-void ufshcd_dealloc_host(struct ufs_hba *hba)
+-{
+-	scsi_host_put(hba->host);
+-}
+-EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
+-
+ /**
+  * ufshcd_set_dma_mask - Set dma mask based on the controller
+  *			 addressing capability
+@@ -10320,12 +10310,26 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba)
+ 	return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
+ }
+ 
++/**
++ * ufshcd_devres_release - devres cleanup handler, invoked during release of
++ *			   hba->dev
++ * @host: pointer to SCSI host
++ */
++static void ufshcd_devres_release(void *host)
++{
++	scsi_host_put(host);
++}
++
+ /**
+  * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
+  * @dev: pointer to device handle
+  * @hba_handle: driver private handle
+  *
+  * Return: 0 on success, non-zero value on failure.
++ *
++ * NOTE: There is no corresponding ufshcd_dealloc_host() because this function
++ * keeps track of its allocations using devres and deallocates everything on
++ * device removal automatically.
+  */
+ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
+ {
+@@ -10347,6 +10351,13 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
+ 		err = -ENOMEM;
+ 		goto out_error;
+ 	}
++
++	err = devm_add_action_or_reset(dev, ufshcd_devres_release,
++				       host);
++	if (err)
++		return dev_err_probe(dev, err,
++				     "failed to add ufshcd dealloc action\n");
++
+ 	host->nr_maps = HCTX_TYPE_POLL + 1;
+ 	hba = shost_priv(host);
+ 	hba->host = host;
+diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
+index 91e94fe990b4a5..d305889f450b36 100644
+--- a/drivers/ufs/host/ufs-qcom.c
++++ b/drivers/ufs/host/ufs-qcom.c
+@@ -155,8 +155,9 @@ static int ufs_qcom_ice_program_key(struct ufs_hba *hba,
+ {
+ 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ 	union ufs_crypto_cap_entry cap;
+-	bool config_enable =
+-		cfg->config_enable & UFS_CRYPTO_CONFIGURATION_ENABLE;
++
++	if (!(cfg->config_enable & UFS_CRYPTO_CONFIGURATION_ENABLE))
++		return qcom_ice_evict_key(host->ice, slot);
+ 
+ 	/* Only AES-256-XTS has been tested so far. */
+ 	cap = hba->crypto_cap_array[cfg->crypto_cap_idx];
+@@ -164,14 +165,11 @@ static int ufs_qcom_ice_program_key(struct ufs_hba *hba,
+ 	    cap.key_size != UFS_CRYPTO_KEY_SIZE_256)
+ 		return -EOPNOTSUPP;
+ 
+-	if (config_enable)
+-		return qcom_ice_program_key(host->ice,
+-					    QCOM_ICE_CRYPTO_ALG_AES_XTS,
+-					    QCOM_ICE_CRYPTO_KEY_SIZE_256,
+-					    cfg->crypto_key,
+-					    cfg->data_unit_size, slot);
+-	else
+-		return qcom_ice_evict_key(host->ice, slot);
++	return qcom_ice_program_key(host->ice,
++				    QCOM_ICE_CRYPTO_ALG_AES_XTS,
++				    QCOM_ICE_CRYPTO_KEY_SIZE_256,
++				    cfg->crypto_key,
++				    cfg->data_unit_size, slot);
+ }
+ 
+ #else
+diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c
+index ea39c5d5b8cf15..9cfcaad23cf920 100644
+--- a/drivers/ufs/host/ufshcd-pci.c
++++ b/drivers/ufs/host/ufshcd-pci.c
+@@ -562,7 +562,6 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
+ 	pm_runtime_forbid(&pdev->dev);
+ 	pm_runtime_get_noresume(&pdev->dev);
+ 	ufshcd_remove(hba);
+-	ufshcd_dealloc_host(hba);
+ }
+ 
+ /**
+@@ -605,7 +604,6 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	err = ufshcd_init(hba, mmio_base, pdev->irq);
+ 	if (err) {
+ 		dev_err(&pdev->dev, "Initialization failed\n");
+-		ufshcd_dealloc_host(hba);
+ 		return err;
+ 	}
+ 
+diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c
+index 505572d4fa878c..ffe5d1d2b21588 100644
+--- a/drivers/ufs/host/ufshcd-pltfrm.c
++++ b/drivers/ufs/host/ufshcd-pltfrm.c
+@@ -465,21 +465,17 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
+ 	struct device *dev = &pdev->dev;
+ 
+ 	mmio_base = devm_platform_ioremap_resource(pdev, 0);
+-	if (IS_ERR(mmio_base)) {
+-		err = PTR_ERR(mmio_base);
+-		goto out;
+-	}
++	if (IS_ERR(mmio_base))
++		return PTR_ERR(mmio_base);
+ 
+ 	irq = platform_get_irq(pdev, 0);
+-	if (irq < 0) {
+-		err = irq;
+-		goto out;
+-	}
++	if (irq < 0)
++		return irq;
+ 
+ 	err = ufshcd_alloc_host(dev, &hba);
+ 	if (err) {
+ 		dev_err(dev, "Allocation failed\n");
+-		goto out;
++		return err;
+ 	}
+ 
+ 	hba->vops = vops;
+@@ -488,13 +484,13 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
+ 	if (err) {
+ 		dev_err(dev, "%s: clock parse failed %d\n",
+ 				__func__, err);
+-		goto dealloc_host;
++		return err;
+ 	}
+ 	err = ufshcd_parse_regulator_info(hba);
+ 	if (err) {
+ 		dev_err(dev, "%s: regulator init failed %d\n",
+ 				__func__, err);
+-		goto dealloc_host;
++		return err;
+ 	}
+ 
+ 	ufshcd_init_lanes_per_dir(hba);
+@@ -502,25 +498,20 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
+ 	err = ufshcd_parse_operating_points(hba);
+ 	if (err) {
+ 		dev_err(dev, "%s: OPP parse failed %d\n", __func__, err);
+-		goto dealloc_host;
++		return err;
+ 	}
+ 
+ 	err = ufshcd_init(hba, mmio_base, irq);
+ 	if (err) {
+ 		dev_err_probe(dev, err, "Initialization failed with error %d\n",
+ 			      err);
+-		goto dealloc_host;
++		return err;
+ 	}
+ 
+ 	pm_runtime_set_active(dev);
+ 	pm_runtime_enable(dev);
+ 
+ 	return 0;
+-
+-dealloc_host:
+-	ufshcd_dealloc_host(hba);
+-out:
+-	return err;
+ }
+ EXPORT_SYMBOL_GPL(ufshcd_pltfrm_init);
+ 
+@@ -534,7 +525,6 @@ void ufshcd_pltfrm_remove(struct platform_device *pdev)
+ 
+ 	pm_runtime_get_sync(&pdev->dev);
+ 	ufshcd_remove(hba);
+-	ufshcd_dealloc_host(hba);
+ 	pm_runtime_disable(&pdev->dev);
+ 	pm_runtime_put_noidle(&pdev->dev);
+ }
+diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
+index 48dee166e5d89c..7b23631f47449b 100644
+--- a/drivers/usb/gadget/function/f_tcm.c
++++ b/drivers/usb/gadget/function/f_tcm.c
+@@ -245,7 +245,6 @@ static int bot_send_write_request(struct usbg_cmd *cmd)
+ {
+ 	struct f_uas *fu = cmd->fu;
+ 	struct se_cmd *se_cmd = &cmd->se_cmd;
+-	struct usb_gadget *gadget = fuas_to_gadget(fu);
+ 	int ret;
+ 
+ 	init_completion(&cmd->write_complete);
+@@ -256,22 +255,6 @@ static int bot_send_write_request(struct usbg_cmd *cmd)
+ 		return -EINVAL;
+ 	}
+ 
+-	if (!gadget->sg_supported) {
+-		cmd->data_buf = kmalloc(se_cmd->data_length, GFP_KERNEL);
+-		if (!cmd->data_buf)
+-			return -ENOMEM;
+-
+-		fu->bot_req_out->buf = cmd->data_buf;
+-	} else {
+-		fu->bot_req_out->buf = NULL;
+-		fu->bot_req_out->num_sgs = se_cmd->t_data_nents;
+-		fu->bot_req_out->sg = se_cmd->t_data_sg;
+-	}
+-
+-	fu->bot_req_out->complete = usbg_data_write_cmpl;
+-	fu->bot_req_out->length = se_cmd->data_length;
+-	fu->bot_req_out->context = cmd;
+-
+ 	ret = usbg_prepare_w_request(cmd, fu->bot_req_out);
+ 	if (ret)
+ 		goto cleanup;
+@@ -973,6 +956,7 @@ static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
+ 	return;
+ 
+ cleanup:
++	target_put_sess_cmd(se_cmd);
+ 	transport_generic_free_cmd(&cmd->se_cmd, 0);
+ }
+ 
+@@ -1065,7 +1049,7 @@ static void usbg_cmd_work(struct work_struct *work)
+ 
+ out:
+ 	transport_send_check_condition_and_sense(se_cmd,
+-			TCM_UNSUPPORTED_SCSI_OPCODE, 1);
++			TCM_UNSUPPORTED_SCSI_OPCODE, 0);
+ }
+ 
+ static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
+@@ -1193,7 +1177,7 @@ static void bot_cmd_work(struct work_struct *work)
+ 
+ out:
+ 	transport_send_check_condition_and_sense(se_cmd,
+-				TCM_UNSUPPORTED_SCSI_OPCODE, 1);
++				TCM_UNSUPPORTED_SCSI_OPCODE, 0);
+ }
+ 
+ static int bot_submit_command(struct f_uas *fu,
+@@ -1969,43 +1953,39 @@ static int tcm_bind(struct usb_configuration *c, struct usb_function *f)
+ 	bot_intf_desc.bInterfaceNumber = iface;
+ 	uasp_intf_desc.bInterfaceNumber = iface;
+ 	fu->iface = iface;
+-	ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_bi_desc,
+-			&uasp_bi_ep_comp_desc);
++	ep = usb_ep_autoconfig(gadget, &uasp_fs_bi_desc);
+ 	if (!ep)
+ 		goto ep_fail;
+ 
+ 	fu->ep_in = ep;
+ 
+-	ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_bo_desc,
+-			&uasp_bo_ep_comp_desc);
++	ep = usb_ep_autoconfig(gadget, &uasp_fs_bo_desc);
+ 	if (!ep)
+ 		goto ep_fail;
+ 	fu->ep_out = ep;
+ 
+-	ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_status_desc,
+-			&uasp_status_in_ep_comp_desc);
++	ep = usb_ep_autoconfig(gadget, &uasp_fs_status_desc);
+ 	if (!ep)
+ 		goto ep_fail;
+ 	fu->ep_status = ep;
+ 
+-	ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_cmd_desc,
+-			&uasp_cmd_comp_desc);
++	ep = usb_ep_autoconfig(gadget, &uasp_fs_cmd_desc);
+ 	if (!ep)
+ 		goto ep_fail;
+ 	fu->ep_cmd = ep;
+ 
+ 	/* Assume endpoint addresses are the same for both speeds */
+-	uasp_bi_desc.bEndpointAddress =	uasp_ss_bi_desc.bEndpointAddress;
+-	uasp_bo_desc.bEndpointAddress = uasp_ss_bo_desc.bEndpointAddress;
++	uasp_bi_desc.bEndpointAddress =	uasp_fs_bi_desc.bEndpointAddress;
++	uasp_bo_desc.bEndpointAddress = uasp_fs_bo_desc.bEndpointAddress;
+ 	uasp_status_desc.bEndpointAddress =
+-		uasp_ss_status_desc.bEndpointAddress;
+-	uasp_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
+-
+-	uasp_fs_bi_desc.bEndpointAddress = uasp_ss_bi_desc.bEndpointAddress;
+-	uasp_fs_bo_desc.bEndpointAddress = uasp_ss_bo_desc.bEndpointAddress;
+-	uasp_fs_status_desc.bEndpointAddress =
+-		uasp_ss_status_desc.bEndpointAddress;
+-	uasp_fs_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
++		uasp_fs_status_desc.bEndpointAddress;
++	uasp_cmd_desc.bEndpointAddress = uasp_fs_cmd_desc.bEndpointAddress;
++
++	uasp_ss_bi_desc.bEndpointAddress = uasp_fs_bi_desc.bEndpointAddress;
++	uasp_ss_bo_desc.bEndpointAddress = uasp_fs_bo_desc.bEndpointAddress;
++	uasp_ss_status_desc.bEndpointAddress =
++		uasp_fs_status_desc.bEndpointAddress;
++	uasp_ss_cmd_desc.bEndpointAddress = uasp_fs_cmd_desc.bEndpointAddress;
+ 
+ 	ret = usb_assign_descriptors(f, uasp_fs_function_desc,
+ 			uasp_hs_function_desc, uasp_ss_function_desc,
+diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
+index 3bf1043cd7957c..d63c2d266d0735 100644
+--- a/drivers/vfio/platform/vfio_platform_common.c
++++ b/drivers/vfio/platform/vfio_platform_common.c
+@@ -393,6 +393,11 @@ static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg,
+ 
+ 	count = min_t(size_t, count, reg->size - off);
+ 
++	if (off >= reg->size)
++		return -EINVAL;
++
++	count = min_t(size_t, count, reg->size - off);
++
+ 	if (!reg->ioaddr) {
+ 		reg->ioaddr =
+ 			ioremap(reg->addr, reg->size);
+@@ -477,6 +482,11 @@ static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg,
+ 
+ 	count = min_t(size_t, count, reg->size - off);
+ 
++	if (off >= reg->size)
++		return -EINVAL;
++
++	count = min_t(size_t, count, reg->size - off);
++
+ 	if (!reg->ioaddr) {
+ 		reg->ioaddr =
+ 			ioremap(reg->addr, reg->size);
+diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
+index 390808ce935d50..b5b5ca1a44f70b 100644
+--- a/fs/binfmt_flat.c
++++ b/fs/binfmt_flat.c
+@@ -478,7 +478,7 @@ static int load_flat_file(struct linux_binprm *bprm,
+ 	 * 28 bits (256 MB) is way more than reasonable in this case.
+ 	 * If some top bits are set we have probable binary corruption.
+ 	*/
+-	if ((text_len | data_len | bss_len | stack_len | full_data) >> 28) {
++	if ((text_len | data_len | bss_len | stack_len | relocs | full_data) >> 28) {
+ 		pr_err("bad header\n");
+ 		ret = -ENOEXEC;
+ 		goto err;
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index 185985a337b30f..5d2613b16cd24f 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -1563,6 +1563,7 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
+ 
+ 		if (!p->skip_locking) {
+ 			btrfs_unlock_up_safe(p, parent_level + 1);
++			btrfs_maybe_reset_lockdep_class(root, tmp);
+ 			tmp_locked = true;
+ 			btrfs_tree_read_lock(tmp);
+ 			btrfs_release_path(p);
+@@ -1606,6 +1607,7 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
+ 
+ 	if (!p->skip_locking) {
+ 		ASSERT(ret == -EAGAIN);
++		btrfs_maybe_reset_lockdep_class(root, tmp);
+ 		tmp_locked = true;
+ 		btrfs_tree_read_lock(tmp);
+ 		btrfs_release_path(p);
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 14e27473c5bcea..4d7c7a296d2d1f 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -224,7 +224,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
+ 	if (args->drop_cache)
+ 		btrfs_drop_extent_map_range(inode, args->start, args->end - 1, false);
+ 
+-	if (args->start >= inode->disk_i_size && !args->replace_extent)
++	if (data_race(args->start >= inode->disk_i_size) && !args->replace_extent)
+ 		modify_tree = 0;
+ 
+ 	update_refs = (btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID);
+diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
+index 30eceaf829a7ea..4aca7475fd82c3 100644
+--- a/fs/btrfs/ordered-data.c
++++ b/fs/btrfs/ordered-data.c
+@@ -1229,6 +1229,18 @@ struct btrfs_ordered_extent *btrfs_split_ordered_extent(
+ 	 */
+ 	if (WARN_ON_ONCE(len >= ordered->num_bytes))
+ 		return ERR_PTR(-EINVAL);
++	/*
++	 * If our ordered extent had an error there's no point in continuing.
++	 * The error may have come from a transaction abort done either by this
++	 * task or some other concurrent task, and the transaction abort path
++	 * iterates over all existing ordered extents and sets the flag
++	 * BTRFS_ORDERED_IOERR on them.
++	 */
++	if (unlikely(flags & (1U << BTRFS_ORDERED_IOERR))) {
++		const int fs_error = BTRFS_FS_ERROR(fs_info);
++
++		return fs_error ? ERR_PTR(fs_error) : ERR_PTR(-EIO);
++	}
+ 	/* We cannot split partially completed ordered extents. */
+ 	if (ordered->bytes_left) {
+ 		ASSERT(!(flags & ~BTRFS_ORDERED_TYPE_FLAGS));
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 993b5e803699ec..5ab51781d0e4f6 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -1915,8 +1915,11 @@ int btrfs_qgroup_cleanup_dropped_subvolume(struct btrfs_fs_info *fs_info, u64 su
+ 	/*
+ 	 * It's squota and the subvolume still has numbers needed for future
+ 	 * accounting, in this case we can not delete it.  Just skip it.
++	 *
++	 * Or the qgroup is already removed by a qgroup rescan. For both cases we're
++	 * safe to ignore them.
+ 	 */
+-	if (ret == -EBUSY)
++	if (ret == -EBUSY || ret == -ENOENT)
+ 		ret = 0;
+ 	return ret;
+ }
+diff --git a/fs/btrfs/raid-stripe-tree.c b/fs/btrfs/raid-stripe-tree.c
+index 9ffc79f250fbb2..10781c015ee8de 100644
+--- a/fs/btrfs/raid-stripe-tree.c
++++ b/fs/btrfs/raid-stripe-tree.c
+@@ -13,12 +13,13 @@
+ #include "volumes.h"
+ #include "print-tree.h"
+ 
+-static void btrfs_partially_delete_raid_extent(struct btrfs_trans_handle *trans,
++static int btrfs_partially_delete_raid_extent(struct btrfs_trans_handle *trans,
+ 					       struct btrfs_path *path,
+ 					       const struct btrfs_key *oldkey,
+ 					       u64 newlen, u64 frontpad)
+ {
+-	struct btrfs_stripe_extent *extent;
++	struct btrfs_root *stripe_root = trans->fs_info->stripe_root;
++	struct btrfs_stripe_extent *extent, *newitem;
+ 	struct extent_buffer *leaf;
+ 	int slot;
+ 	size_t item_size;
+@@ -27,23 +28,38 @@ static void btrfs_partially_delete_raid_extent(struct btrfs_trans_handle *trans,
+ 		.type = BTRFS_RAID_STRIPE_KEY,
+ 		.offset = newlen,
+ 	};
++	int ret;
+ 
+ 	ASSERT(oldkey->type == BTRFS_RAID_STRIPE_KEY);
+ 
+ 	leaf = path->nodes[0];
+ 	slot = path->slots[0];
+ 	item_size = btrfs_item_size(leaf, slot);
++
++	newitem = kzalloc(item_size, GFP_NOFS);
++	if (!newitem)
++		return -ENOMEM;
++
+ 	extent = btrfs_item_ptr(leaf, slot, struct btrfs_stripe_extent);
+ 
+ 	for (int i = 0; i < btrfs_num_raid_stripes(item_size); i++) {
+ 		struct btrfs_raid_stride *stride = &extent->strides[i];
+ 		u64 phys;
+ 
+-		phys = btrfs_raid_stride_physical(leaf, stride);
+-		btrfs_set_raid_stride_physical(leaf, stride, phys + frontpad);
++		phys = btrfs_raid_stride_physical(leaf, stride) + frontpad;
++		btrfs_set_stack_raid_stride_physical(&newitem->strides[i], phys);
+ 	}
+ 
+-	btrfs_set_item_key_safe(trans, path, &newkey);
++	ret = btrfs_del_item(trans, stripe_root, path);
++	if (ret)
++		goto out;
++
++	btrfs_release_path(path);
++	ret = btrfs_insert_item(trans, stripe_root, &newkey, newitem, item_size);
++
++out:
++	kfree(newitem);
++	return ret;
+ }
+ 
+ int btrfs_delete_raid_extent(struct btrfs_trans_handle *trans, u64 start, u64 length)
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index db8b42f674b7c8..ab2de2d1b2beed 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -4405,8 +4405,18 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
+ 		WARN_ON(!first_cow && level == 0);
+ 
+ 		node = rc->backref_cache.path[level];
+-		BUG_ON(node->bytenr != buf->start &&
+-		       node->new_bytenr != buf->start);
++
++		/*
++		 * If node->bytenr != buf->start and node->new_bytenr !=
++		 * buf->start then we've got the wrong backref node for what we
++		 * expected to see here and the cache is incorrect.
++		 */
++		if (unlikely(node->bytenr != buf->start && node->new_bytenr != buf->start)) {
++			btrfs_err(fs_info,
++"bytenr %llu was found but our backref cache was expecting %llu or %llu",
++				  buf->start, node->bytenr, node->new_bytenr);
++			return -EUCLEAN;
++		}
+ 
+ 		btrfs_backref_drop_node_buffer(node);
+ 		atomic_inc(&cow->refs);
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index dc0b837efd5dfb..914a22cc950b5d 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -274,8 +274,10 @@ static noinline int join_transaction(struct btrfs_fs_info *fs_info,
+ 	cur_trans = fs_info->running_transaction;
+ 	if (cur_trans) {
+ 		if (TRANS_ABORTED(cur_trans)) {
++			const int abort_error = cur_trans->aborted;
++
+ 			spin_unlock(&fs_info->trans_lock);
+-			return cur_trans->aborted;
++			return abort_error;
+ 		}
+ 		if (btrfs_blocked_trans_types[cur_trans->state] & type) {
+ 			spin_unlock(&fs_info->trans_lock);
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 785fe489ef4b8e..ae37f0e24c996c 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -5690,18 +5690,18 @@ static int ceph_mds_auth_match(struct ceph_mds_client *mdsc,
+ 			 *
+ 			 * All the other cases                       --> mismatch
+ 			 */
++			bool path_matched = true;
+ 			char *first = strstr(_tpath, auth->match.path);
+-			if (first != _tpath) {
+-				if (free_tpath)
+-					kfree(_tpath);
+-				return 0;
++			if (first != _tpath ||
++			    (tlen > len && _tpath[len] != '/')) {
++				path_matched = false;
+ 			}
+ 
+-			if (tlen > len && _tpath[len] != '/') {
+-				if (free_tpath)
+-					kfree(_tpath);
++			if (free_tpath)
++				kfree(_tpath);
++
++			if (!path_matched)
+ 				return 0;
+-			}
+ 		}
+ 	}
+ 
+diff --git a/fs/exec.c b/fs/exec.c
+index 98cb7ba9983c7f..b1f6b47ad20e17 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1341,7 +1341,28 @@ int begin_new_exec(struct linux_binprm * bprm)
+ 		set_dumpable(current->mm, SUID_DUMP_USER);
+ 
+ 	perf_event_exec();
+-	__set_task_comm(me, kbasename(bprm->filename), true);
++
++	/*
++	 * If the original filename was empty, alloc_bprm() made up a path
++	 * that will probably not be useful to admins running ps or similar.
++	 * Let's fix it up to be something reasonable.
++	 */
++	if (bprm->comm_from_dentry) {
++		/*
++		 * Hold RCU lock to keep the name from being freed behind our back.
++		 * Use acquire semantics to make sure the terminating NUL from
++		 * __d_alloc() is seen.
++		 *
++		 * Note, we're deliberately sloppy here. We don't need to care about
++		 * detecting a concurrent rename and just want a terminated name.
++		 */
++		rcu_read_lock();
++		__set_task_comm(me, smp_load_acquire(&bprm->file->f_path.dentry->d_name.name),
++				true);
++		rcu_read_unlock();
++	} else {
++		__set_task_comm(me, kbasename(bprm->filename), true);
++	}
+ 
+ 	/* An exec changes our domain. We are no longer part of the thread
+ 	   group */
+@@ -1517,11 +1538,13 @@ static struct linux_binprm *alloc_bprm(int fd, struct filename *filename, int fl
+ 	if (fd == AT_FDCWD || filename->name[0] == '/') {
+ 		bprm->filename = filename->name;
+ 	} else {
+-		if (filename->name[0] == '\0')
++		if (filename->name[0] == '\0') {
+ 			bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d", fd);
+-		else
++			bprm->comm_from_dentry = 1;
++		} else {
+ 			bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d/%s",
+ 						  fd, filename->name);
++		}
+ 		if (!bprm->fdpath)
+ 			goto out_free;
+ 
+diff --git a/fs/namespace.c b/fs/namespace.c
+index eac057e56948d1..cb08db40fc039f 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -5037,26 +5037,29 @@ static int statmount_mnt_opts(struct kstatmount *s, struct seq_file *seq)
+ {
+ 	struct vfsmount *mnt = s->mnt;
+ 	struct super_block *sb = mnt->mnt_sb;
++	size_t start = seq->count;
+ 	int err;
+ 
+-	if (sb->s_op->show_options) {
+-		size_t start = seq->count;
++	err = security_sb_show_options(seq, sb);
++	if (err)
++		return err;
+ 
++	if (sb->s_op->show_options) {
+ 		err = sb->s_op->show_options(seq, mnt->mnt_root);
+ 		if (err)
+ 			return err;
++	}
+ 
+-		if (unlikely(seq_has_overflowed(seq)))
+-			return -EAGAIN;
++	if (unlikely(seq_has_overflowed(seq)))
++		return -EAGAIN;
+ 
+-		if (seq->count == start)
+-			return 0;
++	if (seq->count == start)
++		return 0;
+ 
+-		/* skip leading comma */
+-		memmove(seq->buf + start, seq->buf + start + 1,
+-			seq->count - start - 1);
+-		seq->count--;
+-	}
++	/* skip leading comma */
++	memmove(seq->buf + start, seq->buf + start + 1,
++		seq->count - start - 1);
++	seq->count--;
+ 
+ 	return 0;
+ }
+@@ -5137,39 +5140,45 @@ static int statmount_string(struct kstatmount *s, u64 flag)
+ 	size_t kbufsize;
+ 	struct seq_file *seq = &s->seq;
+ 	struct statmount *sm = &s->sm;
+-	u32 start = seq->count;
++	u32 start, *offp;
++
++	/* Reserve an empty string at the beginning for any unset offsets */
++	if (!seq->count)
++		seq_putc(seq, 0);
++
++	start = seq->count;
+ 
+ 	switch (flag) {
+ 	case STATMOUNT_FS_TYPE:
+-		sm->fs_type = start;
++		offp = &sm->fs_type;
+ 		ret = statmount_fs_type(s, seq);
+ 		break;
+ 	case STATMOUNT_MNT_ROOT:
+-		sm->mnt_root = start;
++		offp = &sm->mnt_root;
+ 		ret = statmount_mnt_root(s, seq);
+ 		break;
+ 	case STATMOUNT_MNT_POINT:
+-		sm->mnt_point = start;
++		offp = &sm->mnt_point;
+ 		ret = statmount_mnt_point(s, seq);
+ 		break;
+ 	case STATMOUNT_MNT_OPTS:
+-		sm->mnt_opts = start;
++		offp = &sm->mnt_opts;
+ 		ret = statmount_mnt_opts(s, seq);
+ 		break;
+ 	case STATMOUNT_OPT_ARRAY:
+-		sm->opt_array = start;
++		offp = &sm->opt_array;
+ 		ret = statmount_opt_array(s, seq);
+ 		break;
+ 	case STATMOUNT_OPT_SEC_ARRAY:
+-		sm->opt_sec_array = start;
++		offp = &sm->opt_sec_array;
+ 		ret = statmount_opt_sec_array(s, seq);
+ 		break;
+ 	case STATMOUNT_FS_SUBTYPE:
+-		sm->fs_subtype = start;
++		offp = &sm->fs_subtype;
+ 		statmount_fs_subtype(s, seq);
+ 		break;
+ 	case STATMOUNT_SB_SOURCE:
+-		sm->sb_source = start;
++		offp = &sm->sb_source;
+ 		ret = statmount_sb_source(s, seq);
+ 		break;
+ 	default:
+@@ -5197,6 +5206,7 @@ static int statmount_string(struct kstatmount *s, u64 flag)
+ 
+ 	seq->buf[seq->count++] = '\0';
+ 	sm->mask |= flag;
++	*offp = start;
+ 	return 0;
+ }
+ 
+diff --git a/fs/netfs/read_collect.c b/fs/netfs/read_collect.c
+index e8624f5c7fcc66..8878b46589ff20 100644
+--- a/fs/netfs/read_collect.c
++++ b/fs/netfs/read_collect.c
+@@ -258,17 +258,18 @@ static bool netfs_consume_read_data(struct netfs_io_subrequest *subreq, bool was
+ 	 */
+ 	if (!subreq->consumed &&
+ 	    !prev_donated &&
+-	    !list_is_first(&subreq->rreq_link, &rreq->subrequests) &&
+-	    subreq->start == prev->start + prev->len) {
++	    !list_is_first(&subreq->rreq_link, &rreq->subrequests)) {
+ 		prev = list_prev_entry(subreq, rreq_link);
+-		WRITE_ONCE(prev->next_donated, prev->next_donated + subreq->len);
+-		subreq->start += subreq->len;
+-		subreq->len = 0;
+-		subreq->transferred = 0;
+-		trace_netfs_donate(rreq, subreq, prev, subreq->len,
+-				   netfs_trace_donate_to_prev);
+-		trace_netfs_sreq(subreq, netfs_sreq_trace_donate_to_prev);
+-		goto remove_subreq_locked;
++		if (subreq->start == prev->start + prev->len) {
++			WRITE_ONCE(prev->next_donated, prev->next_donated + subreq->len);
++			subreq->start += subreq->len;
++			subreq->len = 0;
++			subreq->transferred = 0;
++			trace_netfs_donate(rreq, subreq, prev, subreq->len,
++					   netfs_trace_donate_to_prev);
++			trace_netfs_sreq(subreq, netfs_sreq_trace_donate_to_prev);
++			goto remove_subreq_locked;
++		}
+ 	}
+ 
+ 	/* If we can't donate down the chain, donate up the chain instead. */
+diff --git a/fs/netfs/read_pgpriv2.c b/fs/netfs/read_pgpriv2.c
+index 54d5004fec1826..e72f5e67483422 100644
+--- a/fs/netfs/read_pgpriv2.c
++++ b/fs/netfs/read_pgpriv2.c
+@@ -181,16 +181,17 @@ void netfs_pgpriv2_write_to_the_cache(struct netfs_io_request *rreq)
+ 			break;
+ 
+ 		folioq_unmark3(folioq, slot);
+-		if (!folioq->marks3) {
++		while (!folioq->marks3) {
+ 			folioq = folioq->next;
+ 			if (!folioq)
+-				break;
++				goto end_of_queue;
+ 		}
+ 
+ 		slot = __ffs(folioq->marks3);
+ 		folio = folioq_folio(folioq, slot);
+ 	}
+ 
++end_of_queue:
+ 	netfs_issue_write(wreq, &wreq->io_streams[1]);
+ 	smp_wmb(); /* Write lists before ALL_QUEUED. */
+ 	set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
+diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
+index 0eb20012792f07..d3f76101ad4b91 100644
+--- a/fs/nfs/Kconfig
++++ b/fs/nfs/Kconfig
+@@ -170,7 +170,8 @@ config ROOT_NFS
+ 
+ config NFS_FSCACHE
+ 	bool "Provide NFS client caching support"
+-	depends on NFS_FS=m && NETFS_SUPPORT || NFS_FS=y && NETFS_SUPPORT=y
++	depends on NFS_FS
++	select NETFS_SUPPORT
+ 	select FSCACHE
+ 	help
+ 	  Say Y here if you want NFS data to be cached locally on disc through
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index f78115c6c2c12a..a1cfe4cc60c4b1 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -847,6 +847,9 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
+ 	struct nfs4_pnfs_ds *ds;
+ 	u32 ds_idx;
+ 
++	if (NFS_SERVER(pgio->pg_inode)->flags &
++			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
++		pgio->pg_maxretrans = io_maxretrans;
+ retry:
+ 	pnfs_generic_pg_check_layout(pgio, req);
+ 	/* Use full layout for now */
+@@ -860,6 +863,8 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
+ 		if (!pgio->pg_lseg)
+ 			goto out_nolseg;
+ 	}
++	/* Reset wb_nio, since getting layout segment was successful */
++	req->wb_nio = 0;
+ 
+ 	ds = ff_layout_get_ds_for_read(pgio, &ds_idx);
+ 	if (!ds) {
+@@ -876,14 +881,24 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
+ 	pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
+ 
+ 	pgio->pg_mirror_idx = ds_idx;
+-
+-	if (NFS_SERVER(pgio->pg_inode)->flags &
+-			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
+-		pgio->pg_maxretrans = io_maxretrans;
+ 	return;
+ out_nolseg:
+-	if (pgio->pg_error < 0)
+-		return;
++	if (pgio->pg_error < 0) {
++		if (pgio->pg_error != -EAGAIN)
++			return;
++		/* Retry getting layout segment if lower layer returned -EAGAIN */
++		if (pgio->pg_maxretrans && req->wb_nio++ > pgio->pg_maxretrans) {
++			if (NFS_SERVER(pgio->pg_inode)->flags & NFS_MOUNT_SOFTERR)
++				pgio->pg_error = -ETIMEDOUT;
++			else
++				pgio->pg_error = -EIO;
++			return;
++		}
++		pgio->pg_error = 0;
++		/* Sleep for 1 second before retrying */
++		ssleep(1);
++		goto retry;
++	}
+ out_mds:
+ 	trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode,
+ 			0, NFS4_MAX_UINT64, IOMODE_READ,
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 53fac037611c05..e8adf62d3484a1 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -5760,15 +5760,14 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
+ 	struct nfs4_stateowner *so = resp->cstate.replay_owner;
+ 	struct svc_rqst *rqstp = resp->rqstp;
+ 	const struct nfsd4_operation *opdesc = op->opdesc;
+-	int post_err_offset;
++	unsigned int op_status_offset;
+ 	nfsd4_enc encoder;
+-	__be32 *p;
+ 
+-	p = xdr_reserve_space(xdr, 8);
+-	if (!p)
++	if (xdr_stream_encode_u32(xdr, op->opnum) != XDR_UNIT)
++		goto release;
++	op_status_offset = xdr->buf->len;
++	if (!xdr_reserve_space(xdr, XDR_UNIT))
+ 		goto release;
+-	*p++ = cpu_to_be32(op->opnum);
+-	post_err_offset = xdr->buf->len;
+ 
+ 	if (op->opnum == OP_ILLEGAL)
+ 		goto status;
+@@ -5809,20 +5808,21 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
+ 		 * bug if we had to do this on a non-idempotent op:
+ 		 */
+ 		warn_on_nonidempotent_op(op);
+-		xdr_truncate_encode(xdr, post_err_offset);
++		xdr_truncate_encode(xdr, op_status_offset + XDR_UNIT);
+ 	}
+ 	if (so) {
+-		int len = xdr->buf->len - post_err_offset;
++		int len = xdr->buf->len - (op_status_offset + XDR_UNIT);
+ 
+ 		so->so_replay.rp_status = op->status;
+ 		so->so_replay.rp_buflen = len;
+-		read_bytes_from_xdr_buf(xdr->buf, post_err_offset,
++		read_bytes_from_xdr_buf(xdr->buf, op_status_offset + XDR_UNIT,
+ 						so->so_replay.rp_buf, len);
+ 	}
+ status:
+ 	op->status = nfsd4_map_status(op->status,
+ 				      resp->cstate.minorversion);
+-	*p = op->status;
++	write_bytes_to_xdr_buf(xdr->buf, op_status_offset,
++			       &op->status, XDR_UNIT);
+ release:
+ 	if (opdesc && opdesc->op_release)
+ 		opdesc->op_release(&op->u);
+diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
+index 23f3a75edd5016..81abb58dcbd880 100644
+--- a/fs/nilfs2/inode.c
++++ b/fs/nilfs2/inode.c
+@@ -1188,7 +1188,7 @@ int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ 			if (size) {
+ 				if (phys && blkphy << blkbits == phys + size) {
+ 					/* The current extent goes on */
+-					size += n << blkbits;
++					size += (u64)n << blkbits;
+ 				} else {
+ 					/* Terminate the current extent */
+ 					ret = fiemap_fill_next_extent(
+@@ -1201,14 +1201,14 @@ int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ 					flags = FIEMAP_EXTENT_MERGED;
+ 					logical = blkoff << blkbits;
+ 					phys = blkphy << blkbits;
+-					size = n << blkbits;
++					size = (u64)n << blkbits;
+ 				}
+ 			} else {
+ 				/* Start a new extent */
+ 				flags = FIEMAP_EXTENT_MERGED;
+ 				logical = blkoff << blkbits;
+ 				phys = blkphy << blkbits;
+-				size = n << blkbits;
++				size = (u64)n << blkbits;
+ 			}
+ 			blkoff += n;
+ 		}
+diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
+index c79b4291777f63..1e87554f6f4104 100644
+--- a/fs/ocfs2/super.c
++++ b/fs/ocfs2/super.c
+@@ -2340,7 +2340,7 @@ static int ocfs2_verify_volume(struct ocfs2_dinode *di,
+ 			mlog(ML_ERROR, "found superblock with incorrect block "
+ 			     "size bits: found %u, should be 9, 10, 11, or 12\n",
+ 			     blksz_bits);
+-		} else if ((1 << le32_to_cpu(blksz_bits)) != blksz) {
++		} else if ((1 << blksz_bits) != blksz) {
+ 			mlog(ML_ERROR, "found superblock with incorrect block "
+ 			     "size: found %u, should be %u\n", 1 << blksz_bits, blksz);
+ 		} else if (le16_to_cpu(di->id2.i_super.s_major_rev_level) !=
+diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
+index d4c5fdcfa1e464..f5cf2255dc0972 100644
+--- a/fs/ocfs2/symlink.c
++++ b/fs/ocfs2/symlink.c
+@@ -65,7 +65,7 @@ static int ocfs2_fast_symlink_read_folio(struct file *f, struct folio *folio)
+ 
+ 	if (status < 0) {
+ 		mlog_errno(status);
+-		return status;
++		goto out;
+ 	}
+ 
+ 	fe = (struct ocfs2_dinode *) bh->b_data;
+@@ -76,9 +76,10 @@ static int ocfs2_fast_symlink_read_folio(struct file *f, struct folio *folio)
+ 	memcpy(kaddr, link, len + 1);
+ 	kunmap_atomic(kaddr);
+ 	SetPageUptodate(page);
++out:
+ 	unlock_page(page);
+ 	brelse(bh);
+-	return 0;
++	return status;
+ }
+ 
+ const struct address_space_operations ocfs2_fast_symlink_aops = {
+diff --git a/fs/pidfs.c b/fs/pidfs.c
+index 618abb1fa1b84c..96d8aeeebb4351 100644
+--- a/fs/pidfs.c
++++ b/fs/pidfs.c
+@@ -190,6 +190,37 @@ static long pidfd_info(struct task_struct *task, unsigned int cmd, unsigned long
+ 	return 0;
+ }
+ 
++static bool pidfs_ioctl_valid(unsigned int cmd)
++{
++	switch (cmd) {
++	case FS_IOC_GETVERSION:
++	case PIDFD_GET_CGROUP_NAMESPACE:
++	case PIDFD_GET_IPC_NAMESPACE:
++	case PIDFD_GET_MNT_NAMESPACE:
++	case PIDFD_GET_NET_NAMESPACE:
++	case PIDFD_GET_PID_FOR_CHILDREN_NAMESPACE:
++	case PIDFD_GET_TIME_NAMESPACE:
++	case PIDFD_GET_TIME_FOR_CHILDREN_NAMESPACE:
++	case PIDFD_GET_UTS_NAMESPACE:
++	case PIDFD_GET_USER_NAMESPACE:
++	case PIDFD_GET_PID_NAMESPACE:
++		return true;
++	}
++
++	/* Extensible ioctls require some more careful checks. */
++	switch (_IOC_NR(cmd)) {
++	case _IOC_NR(PIDFD_GET_INFO):
++		/*
++		 * Try to prevent performing a pidfd ioctl when someone
++		 * erronously mistook the file descriptor for a pidfd.
++		 * This is not perfect but will catch most cases.
++		 */
++		return (_IOC_TYPE(cmd) == _IOC_TYPE(PIDFD_GET_INFO));
++	}
++
++	return false;
++}
++
+ static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ {
+ 	struct task_struct *task __free(put_task) = NULL;
+@@ -198,6 +229,9 @@ static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 	struct ns_common *ns_common = NULL;
+ 	struct pid_namespace *pid_ns;
+ 
++	if (!pidfs_ioctl_valid(cmd))
++		return -ENOIOCTLCMD;
++
+ 	task = get_pid_task(pid, PIDTYPE_PID);
+ 	if (!task)
+ 		return -ESRCH;
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index 55ed3510d2bbbe..d6a0369caa931e 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -500,7 +500,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+ 		 * a program is not able to use ptrace(2) in that case. It is
+ 		 * safe because the task has stopped executing permanently.
+ 		 */
+-		if (permitted && (task->flags & (PF_EXITING|PF_DUMPCORE))) {
++		if (permitted && (task->flags & (PF_EXITING|PF_DUMPCORE|PF_POSTCOREDUMP))) {
+ 			if (try_get_task_stack(task)) {
+ 				eip = KSTK_EIP(task);
+ 				esp = KSTK_ESP(task);
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index 6e63abe461fd2e..cf53503e001e14 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -326,7 +326,7 @@ struct smb_version_operations {
+ 	int (*handle_cancelled_mid)(struct mid_q_entry *, struct TCP_Server_Info *);
+ 	void (*downgrade_oplock)(struct TCP_Server_Info *server,
+ 				 struct cifsInodeInfo *cinode, __u32 oplock,
+-				 unsigned int epoch, bool *purge_cache);
++				 __u16 epoch, bool *purge_cache);
+ 	/* process transaction2 response */
+ 	bool (*check_trans2)(struct mid_q_entry *, struct TCP_Server_Info *,
+ 			     char *, int);
+@@ -521,12 +521,12 @@ struct smb_version_operations {
+ 	/* if we can do cache read operations */
+ 	bool (*is_read_op)(__u32);
+ 	/* set oplock level for the inode */
+-	void (*set_oplock_level)(struct cifsInodeInfo *, __u32, unsigned int,
+-				 bool *);
++	void (*set_oplock_level)(struct cifsInodeInfo *cinode, __u32 oplock, __u16 epoch,
++				 bool *purge_cache);
+ 	/* create lease context buffer for CREATE request */
+ 	char * (*create_lease_buf)(u8 *lease_key, u8 oplock);
+ 	/* parse lease context buffer and return oplock/epoch info */
+-	__u8 (*parse_lease_buf)(void *buf, unsigned int *epoch, char *lkey);
++	__u8 (*parse_lease_buf)(void *buf, __u16 *epoch, char *lkey);
+ 	ssize_t (*copychunk_range)(const unsigned int,
+ 			struct cifsFileInfo *src_file,
+ 			struct cifsFileInfo *target_file,
+@@ -1422,7 +1422,7 @@ struct cifs_fid {
+ 	__u8 create_guid[16];
+ 	__u32 access;
+ 	struct cifs_pending_open *pending_open;
+-	unsigned int epoch;
++	__u16 epoch;
+ #ifdef CONFIG_CIFS_DEBUG2
+ 	__u64 mid;
+ #endif /* CIFS_DEBUG2 */
+@@ -1455,7 +1455,7 @@ struct cifsFileInfo {
+ 	bool oplock_break_cancelled:1;
+ 	bool status_file_deleted:1; /* file has been deleted */
+ 	bool offload:1; /* offload final part of _put to a wq */
+-	unsigned int oplock_epoch; /* epoch from the lease break */
++	__u16 oplock_epoch; /* epoch from the lease break */
+ 	__u32 oplock_level; /* oplock/lease level from the lease break */
+ 	int count;
+ 	spinlock_t file_info_lock; /* protects four flag/count fields above */
+@@ -1552,7 +1552,7 @@ struct cifsInodeInfo {
+ 	spinlock_t	open_file_lock;	/* protects openFileList */
+ 	__u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */
+ 	unsigned int oplock;		/* oplock/lease level we have */
+-	unsigned int epoch;		/* used to track lease state changes */
++	__u16 epoch;		/* used to track lease state changes */
+ #define CIFS_INODE_PENDING_OPLOCK_BREAK   (0) /* oplock break in progress */
+ #define CIFS_INODE_PENDING_WRITERS	  (1) /* Writes in progress */
+ #define CIFS_INODE_FLAG_UNUSED		  (2) /* Unused flag */
+diff --git a/fs/smb/client/dir.c b/fs/smb/client/dir.c
+index 864b194dbaa0a0..1822493dd0842e 100644
+--- a/fs/smb/client/dir.c
++++ b/fs/smb/client/dir.c
+@@ -627,7 +627,7 @@ int cifs_mknod(struct mnt_idmap *idmap, struct inode *inode,
+ 		goto mknod_out;
+ 	}
+ 
+-	trace_smb3_mknod_enter(xid, tcon->ses->Suid, tcon->tid, full_path);
++	trace_smb3_mknod_enter(xid, tcon->tid, tcon->ses->Suid, full_path);
+ 
+ 	rc = tcon->ses->server->ops->make_node(xid, inode, direntry, tcon,
+ 					       full_path, mode,
+@@ -635,9 +635,9 @@ int cifs_mknod(struct mnt_idmap *idmap, struct inode *inode,
+ 
+ mknod_out:
+ 	if (rc)
+-		trace_smb3_mknod_err(xid,  tcon->ses->Suid, tcon->tid, rc);
++		trace_smb3_mknod_err(xid,  tcon->tid, tcon->ses->Suid, rc);
+ 	else
+-		trace_smb3_mknod_done(xid, tcon->ses->Suid, tcon->tid);
++		trace_smb3_mknod_done(xid, tcon->tid, tcon->ses->Suid);
+ 
+ 	free_dentry_path(page);
+ 	free_xid(xid);
+diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c
+index db3695eddcf9d5..c70f4961c4eb78 100644
+--- a/fs/smb/client/smb1ops.c
++++ b/fs/smb/client/smb1ops.c
+@@ -377,7 +377,7 @@ coalesce_t2(char *second_buf, struct smb_hdr *target_hdr)
+ static void
+ cifs_downgrade_oplock(struct TCP_Server_Info *server,
+ 		      struct cifsInodeInfo *cinode, __u32 oplock,
+-		      unsigned int epoch, bool *purge_cache)
++		      __u16 epoch, bool *purge_cache)
+ {
+ 	cifs_set_oplock_level(cinode, oplock);
+ }
+diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
+index b935c1a62d10cf..7dfd3eb3847b33 100644
+--- a/fs/smb/client/smb2inode.c
++++ b/fs/smb/client/smb2inode.c
+@@ -298,8 +298,8 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 				goto finished;
+ 			}
+ 			num_rqst++;
+-			trace_smb3_query_info_compound_enter(xid, ses->Suid,
+-							     tcon->tid, full_path);
++			trace_smb3_query_info_compound_enter(xid, tcon->tid,
++							     ses->Suid, full_path);
+ 			break;
+ 		case SMB2_OP_POSIX_QUERY_INFO:
+ 			rqst[num_rqst].rq_iov = &vars->qi_iov;
+@@ -334,18 +334,18 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 				goto finished;
+ 			}
+ 			num_rqst++;
+-			trace_smb3_posix_query_info_compound_enter(xid, ses->Suid,
+-								   tcon->tid, full_path);
++			trace_smb3_posix_query_info_compound_enter(xid, tcon->tid,
++								   ses->Suid, full_path);
+ 			break;
+ 		case SMB2_OP_DELETE:
+-			trace_smb3_delete_enter(xid, ses->Suid, tcon->tid, full_path);
++			trace_smb3_delete_enter(xid, tcon->tid, ses->Suid, full_path);
+ 			break;
+ 		case SMB2_OP_MKDIR:
+ 			/*
+ 			 * Directories are created through parameters in the
+ 			 * SMB2_open() call.
+ 			 */
+-			trace_smb3_mkdir_enter(xid, ses->Suid, tcon->tid, full_path);
++			trace_smb3_mkdir_enter(xid, tcon->tid, ses->Suid, full_path);
+ 			break;
+ 		case SMB2_OP_RMDIR:
+ 			rqst[num_rqst].rq_iov = &vars->si_iov[0];
+@@ -363,7 +363,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 				goto finished;
+ 			smb2_set_next_command(tcon, &rqst[num_rqst]);
+ 			smb2_set_related(&rqst[num_rqst++]);
+-			trace_smb3_rmdir_enter(xid, ses->Suid, tcon->tid, full_path);
++			trace_smb3_rmdir_enter(xid, tcon->tid, ses->Suid, full_path);
+ 			break;
+ 		case SMB2_OP_SET_EOF:
+ 			rqst[num_rqst].rq_iov = &vars->si_iov[0];
+@@ -398,7 +398,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 				goto finished;
+ 			}
+ 			num_rqst++;
+-			trace_smb3_set_eof_enter(xid, ses->Suid, tcon->tid, full_path);
++			trace_smb3_set_eof_enter(xid, tcon->tid, ses->Suid, full_path);
+ 			break;
+ 		case SMB2_OP_SET_INFO:
+ 			rqst[num_rqst].rq_iov = &vars->si_iov[0];
+@@ -429,8 +429,8 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 				goto finished;
+ 			}
+ 			num_rqst++;
+-			trace_smb3_set_info_compound_enter(xid, ses->Suid,
+-							   tcon->tid, full_path);
++			trace_smb3_set_info_compound_enter(xid, tcon->tid,
++							   ses->Suid, full_path);
+ 			break;
+ 		case SMB2_OP_RENAME:
+ 			rqst[num_rqst].rq_iov = &vars->si_iov[0];
+@@ -469,7 +469,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 				goto finished;
+ 			}
+ 			num_rqst++;
+-			trace_smb3_rename_enter(xid, ses->Suid, tcon->tid, full_path);
++			trace_smb3_rename_enter(xid, tcon->tid, ses->Suid, full_path);
+ 			break;
+ 		case SMB2_OP_HARDLINK:
+ 			rqst[num_rqst].rq_iov = &vars->si_iov[0];
+@@ -496,7 +496,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 				goto finished;
+ 			smb2_set_next_command(tcon, &rqst[num_rqst]);
+ 			smb2_set_related(&rqst[num_rqst++]);
+-			trace_smb3_hardlink_enter(xid, ses->Suid, tcon->tid, full_path);
++			trace_smb3_hardlink_enter(xid, tcon->tid, ses->Suid, full_path);
+ 			break;
+ 		case SMB2_OP_SET_REPARSE:
+ 			rqst[num_rqst].rq_iov = vars->io_iov;
+@@ -523,8 +523,8 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 				goto finished;
+ 			}
+ 			num_rqst++;
+-			trace_smb3_set_reparse_compound_enter(xid, ses->Suid,
+-							      tcon->tid, full_path);
++			trace_smb3_set_reparse_compound_enter(xid, tcon->tid,
++							      ses->Suid, full_path);
+ 			break;
+ 		case SMB2_OP_GET_REPARSE:
+ 			rqst[num_rqst].rq_iov = vars->io_iov;
+@@ -549,8 +549,8 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 				goto finished;
+ 			}
+ 			num_rqst++;
+-			trace_smb3_get_reparse_compound_enter(xid, ses->Suid,
+-							      tcon->tid, full_path);
++			trace_smb3_get_reparse_compound_enter(xid, tcon->tid,
++							      ses->Suid, full_path);
+ 			break;
+ 		case SMB2_OP_QUERY_WSL_EA:
+ 			rqst[num_rqst].rq_iov = &vars->ea_iov;
+@@ -663,11 +663,11 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 			}
+ 			SMB2_query_info_free(&rqst[num_rqst++]);
+ 			if (rc)
+-				trace_smb3_query_info_compound_err(xid,  ses->Suid,
+-								   tcon->tid, rc);
++				trace_smb3_query_info_compound_err(xid,  tcon->tid,
++								   ses->Suid, rc);
+ 			else
+-				trace_smb3_query_info_compound_done(xid, ses->Suid,
+-								    tcon->tid);
++				trace_smb3_query_info_compound_done(xid, tcon->tid,
++								    ses->Suid);
+ 			break;
+ 		case SMB2_OP_POSIX_QUERY_INFO:
+ 			idata = in_iov[i].iov_base;
+@@ -690,15 +690,15 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 			SMB2_query_info_free(&rqst[num_rqst++]);
+ 			if (rc)
+-				trace_smb3_posix_query_info_compound_err(xid,  ses->Suid,
+-									 tcon->tid, rc);
++				trace_smb3_posix_query_info_compound_err(xid,  tcon->tid,
++									 ses->Suid, rc);
+ 			else
+-				trace_smb3_posix_query_info_compound_done(xid, ses->Suid,
+-									  tcon->tid);
++				trace_smb3_posix_query_info_compound_done(xid, tcon->tid,
++									  ses->Suid);
+ 			break;
+ 		case SMB2_OP_DELETE:
+ 			if (rc)
+-				trace_smb3_delete_err(xid,  ses->Suid, tcon->tid, rc);
++				trace_smb3_delete_err(xid, tcon->tid, ses->Suid, rc);
+ 			else {
+ 				/*
+ 				 * If dentry (hence, inode) is NULL, lease break is going to
+@@ -706,59 +706,59 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 				 */
+ 				if (inode)
+ 					cifs_mark_open_handles_for_deleted_file(inode, full_path);
+-				trace_smb3_delete_done(xid, ses->Suid, tcon->tid);
++				trace_smb3_delete_done(xid, tcon->tid, ses->Suid);
+ 			}
+ 			break;
+ 		case SMB2_OP_MKDIR:
+ 			if (rc)
+-				trace_smb3_mkdir_err(xid,  ses->Suid, tcon->tid, rc);
++				trace_smb3_mkdir_err(xid, tcon->tid, ses->Suid, rc);
+ 			else
+-				trace_smb3_mkdir_done(xid, ses->Suid, tcon->tid);
++				trace_smb3_mkdir_done(xid, tcon->tid, ses->Suid);
+ 			break;
+ 		case SMB2_OP_HARDLINK:
+ 			if (rc)
+-				trace_smb3_hardlink_err(xid,  ses->Suid, tcon->tid, rc);
++				trace_smb3_hardlink_err(xid,  tcon->tid, ses->Suid, rc);
+ 			else
+-				trace_smb3_hardlink_done(xid, ses->Suid, tcon->tid);
++				trace_smb3_hardlink_done(xid, tcon->tid, ses->Suid);
+ 			SMB2_set_info_free(&rqst[num_rqst++]);
+ 			break;
+ 		case SMB2_OP_RENAME:
+ 			if (rc)
+-				trace_smb3_rename_err(xid,  ses->Suid, tcon->tid, rc);
++				trace_smb3_rename_err(xid, tcon->tid, ses->Suid, rc);
+ 			else
+-				trace_smb3_rename_done(xid, ses->Suid, tcon->tid);
++				trace_smb3_rename_done(xid, tcon->tid, ses->Suid);
+ 			SMB2_set_info_free(&rqst[num_rqst++]);
+ 			break;
+ 		case SMB2_OP_RMDIR:
+ 			if (rc)
+-				trace_smb3_rmdir_err(xid,  ses->Suid, tcon->tid, rc);
++				trace_smb3_rmdir_err(xid, tcon->tid, ses->Suid, rc);
+ 			else
+-				trace_smb3_rmdir_done(xid, ses->Suid, tcon->tid);
++				trace_smb3_rmdir_done(xid, tcon->tid, ses->Suid);
+ 			SMB2_set_info_free(&rqst[num_rqst++]);
+ 			break;
+ 		case SMB2_OP_SET_EOF:
+ 			if (rc)
+-				trace_smb3_set_eof_err(xid,  ses->Suid, tcon->tid, rc);
++				trace_smb3_set_eof_err(xid, tcon->tid, ses->Suid, rc);
+ 			else
+-				trace_smb3_set_eof_done(xid, ses->Suid, tcon->tid);
++				trace_smb3_set_eof_done(xid, tcon->tid, ses->Suid);
+ 			SMB2_set_info_free(&rqst[num_rqst++]);
+ 			break;
+ 		case SMB2_OP_SET_INFO:
+ 			if (rc)
+-				trace_smb3_set_info_compound_err(xid,  ses->Suid,
+-								 tcon->tid, rc);
++				trace_smb3_set_info_compound_err(xid,  tcon->tid,
++								 ses->Suid, rc);
+ 			else
+-				trace_smb3_set_info_compound_done(xid, ses->Suid,
+-								  tcon->tid);
++				trace_smb3_set_info_compound_done(xid, tcon->tid,
++								  ses->Suid);
+ 			SMB2_set_info_free(&rqst[num_rqst++]);
+ 			break;
+ 		case SMB2_OP_SET_REPARSE:
+ 			if (rc) {
+-				trace_smb3_set_reparse_compound_err(xid,  ses->Suid,
+-								    tcon->tid, rc);
++				trace_smb3_set_reparse_compound_err(xid, tcon->tid,
++								    ses->Suid, rc);
+ 			} else {
+-				trace_smb3_set_reparse_compound_done(xid, ses->Suid,
+-								     tcon->tid);
++				trace_smb3_set_reparse_compound_done(xid, tcon->tid,
++								     ses->Suid);
+ 			}
+ 			SMB2_ioctl_free(&rqst[num_rqst++]);
+ 			break;
+@@ -771,18 +771,18 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 				rbuf = reparse_buf_ptr(iov);
+ 				if (IS_ERR(rbuf)) {
+ 					rc = PTR_ERR(rbuf);
+-					trace_smb3_set_reparse_compound_err(xid,  ses->Suid,
+-									    tcon->tid, rc);
++					trace_smb3_get_reparse_compound_err(xid, tcon->tid,
++									    ses->Suid, rc);
+ 				} else {
+ 					idata->reparse.tag = le32_to_cpu(rbuf->ReparseTag);
+-					trace_smb3_set_reparse_compound_done(xid, ses->Suid,
+-									     tcon->tid);
++					trace_smb3_get_reparse_compound_done(xid, tcon->tid,
++									     ses->Suid);
+ 				}
+ 				memset(iov, 0, sizeof(*iov));
+ 				resp_buftype[i + 1] = CIFS_NO_BUFFER;
+ 			} else {
+-				trace_smb3_set_reparse_compound_err(xid, ses->Suid,
+-								    tcon->tid, rc);
++				trace_smb3_get_reparse_compound_err(xid, tcon->tid,
++								    ses->Suid, rc);
+ 			}
+ 			SMB2_ioctl_free(&rqst[num_rqst++]);
+ 			break;
+@@ -799,11 +799,11 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 				}
+ 			}
+ 			if (!rc) {
+-				trace_smb3_query_wsl_ea_compound_done(xid, ses->Suid,
+-								      tcon->tid);
++				trace_smb3_query_wsl_ea_compound_done(xid, tcon->tid,
++								      ses->Suid);
+ 			} else {
+-				trace_smb3_query_wsl_ea_compound_err(xid, ses->Suid,
+-								     tcon->tid, rc);
++				trace_smb3_query_wsl_ea_compound_err(xid, tcon->tid,
++								     ses->Suid, rc);
+ 			}
+ 			SMB2_query_info_free(&rqst[num_rqst++]);
+ 			break;
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 9790ff2cc5b32d..a588f6b3f3b6a5 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -3904,22 +3904,22 @@ static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
+ static void
+ smb2_downgrade_oplock(struct TCP_Server_Info *server,
+ 		      struct cifsInodeInfo *cinode, __u32 oplock,
+-		      unsigned int epoch, bool *purge_cache)
++		      __u16 epoch, bool *purge_cache)
+ {
+ 	server->ops->set_oplock_level(cinode, oplock, 0, NULL);
+ }
+ 
+ static void
+ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
+-		       unsigned int epoch, bool *purge_cache);
++		       __u16 epoch, bool *purge_cache);
+ 
+ static void
+ smb3_downgrade_oplock(struct TCP_Server_Info *server,
+ 		       struct cifsInodeInfo *cinode, __u32 oplock,
+-		       unsigned int epoch, bool *purge_cache)
++		       __u16 epoch, bool *purge_cache)
+ {
+ 	unsigned int old_state = cinode->oplock;
+-	unsigned int old_epoch = cinode->epoch;
++	__u16 old_epoch = cinode->epoch;
+ 	unsigned int new_state;
+ 
+ 	if (epoch > old_epoch) {
+@@ -3939,7 +3939,7 @@ smb3_downgrade_oplock(struct TCP_Server_Info *server,
+ 
+ static void
+ smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
+-		      unsigned int epoch, bool *purge_cache)
++		      __u16 epoch, bool *purge_cache)
+ {
+ 	oplock &= 0xFF;
+ 	cinode->lease_granted = false;
+@@ -3963,7 +3963,7 @@ smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
+ 
+ static void
+ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
+-		       unsigned int epoch, bool *purge_cache)
++		       __u16 epoch, bool *purge_cache)
+ {
+ 	char message[5] = {0};
+ 	unsigned int new_oplock = 0;
+@@ -4000,7 +4000,7 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
+ 
+ static void
+ smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
+-		      unsigned int epoch, bool *purge_cache)
++		      __u16 epoch, bool *purge_cache)
+ {
+ 	unsigned int old_oplock = cinode->oplock;
+ 
+@@ -4114,7 +4114,7 @@ smb3_create_lease_buf(u8 *lease_key, u8 oplock)
+ }
+ 
+ static __u8
+-smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
++smb2_parse_lease_buf(void *buf, __u16 *epoch, char *lease_key)
+ {
+ 	struct create_lease *lc = (struct create_lease *)buf;
+ 
+@@ -4125,7 +4125,7 @@ smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
+ }
+ 
+ static __u8
+-smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
++smb3_parse_lease_buf(void *buf, __u16 *epoch, char *lease_key)
+ {
+ 	struct create_lease_v2 *lc = (struct create_lease_v2 *)buf;
+ 
+@@ -5077,6 +5077,7 @@ int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
+ {
+ 	struct TCP_Server_Info *server = tcon->ses->server;
+ 	struct cifs_open_parms oparms;
++	struct cifs_open_info_data idata;
+ 	struct cifs_io_parms io_parms = {};
+ 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ 	struct cifs_fid fid;
+@@ -5146,10 +5147,20 @@ int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
+ 			     CREATE_OPTION_SPECIAL, ACL_NO_MODE);
+ 	oparms.fid = &fid;
+ 
+-	rc = server->ops->open(xid, &oparms, &oplock, NULL);
++	rc = server->ops->open(xid, &oparms, &oplock, &idata);
+ 	if (rc)
+ 		goto out;
+ 
++	/*
++	 * Check if the server honored ATTR_SYSTEM flag by CREATE_OPTION_SPECIAL
++	 * option. If not then server does not support ATTR_SYSTEM and newly
++	 * created file is not SFU compatible, which means that the call failed.
++	 */
++	if (!(le32_to_cpu(idata.fi.Attributes) & ATTR_SYSTEM)) {
++		rc = -EOPNOTSUPP;
++		goto out_close;
++	}
++
+ 	if (type_len + data_len > 0) {
+ 		io_parms.pid = current->tgid;
+ 		io_parms.tcon = tcon;
+@@ -5164,8 +5175,18 @@ int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
+ 					     iov, ARRAY_SIZE(iov)-1);
+ 	}
+ 
++out_close:
+ 	server->ops->close(xid, tcon, &fid);
+ 
++	/*
++	 * If CREATE was successful but either setting ATTR_SYSTEM failed or
++	 * writing type/data information failed then remove the intermediate
++	 * object created by CREATE. Otherwise intermediate empty object stay
++	 * on the server.
++	 */
++	if (rc)
++		server->ops->unlink(xid, tcon, full_path, cifs_sb, NULL);
++
+ out:
+ 	kfree(symname_utf16);
+ 	return rc;
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index 0577556f0a4118..10eca164c8386f 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -2329,7 +2329,7 @@ parse_posix_ctxt(struct create_context *cc, struct smb2_file_all_info *info,
+ 
+ int smb2_parse_contexts(struct TCP_Server_Info *server,
+ 			struct kvec *rsp_iov,
+-			unsigned int *epoch,
++			__u16 *epoch,
+ 			char *lease_key, __u8 *oplock,
+ 			struct smb2_file_all_info *buf,
+ 			struct create_posix_rsp *posix)
+diff --git a/fs/smb/client/smb2proto.h b/fs/smb/client/smb2proto.h
+index 09349fa8da039a..51d890f74e36f3 100644
+--- a/fs/smb/client/smb2proto.h
++++ b/fs/smb/client/smb2proto.h
+@@ -282,7 +282,7 @@ extern enum securityEnum smb2_select_sectype(struct TCP_Server_Info *,
+ 					enum securityEnum);
+ int smb2_parse_contexts(struct TCP_Server_Info *server,
+ 			struct kvec *rsp_iov,
+-			unsigned int *epoch,
++			__u16 *epoch,
+ 			char *lease_key, __u8 *oplock,
+ 			struct smb2_file_all_info *buf,
+ 			struct create_posix_rsp *posix);
+diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c
+index befaf42b84cc34..e9aa92d0278998 100644
+--- a/fs/smb/server/transport_ipc.c
++++ b/fs/smb/server/transport_ipc.c
+@@ -626,6 +626,9 @@ ksmbd_ipc_spnego_authen_request(const char *spnego_blob, int blob_len)
+ 	struct ksmbd_spnego_authen_request *req;
+ 	struct ksmbd_spnego_authen_response *resp;
+ 
++	if (blob_len > KSMBD_IPC_MAX_PAYLOAD)
++		return NULL;
++
+ 	msg = ipc_msg_alloc(sizeof(struct ksmbd_spnego_authen_request) +
+ 			blob_len + 1);
+ 	if (!msg)
+@@ -805,6 +808,9 @@ struct ksmbd_rpc_command *ksmbd_rpc_write(struct ksmbd_session *sess, int handle
+ 	struct ksmbd_rpc_command *req;
+ 	struct ksmbd_rpc_command *resp;
+ 
++	if (payload_sz > KSMBD_IPC_MAX_PAYLOAD)
++		return NULL;
++
+ 	msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command) + payload_sz + 1);
+ 	if (!msg)
+ 		return NULL;
+@@ -853,6 +859,9 @@ struct ksmbd_rpc_command *ksmbd_rpc_ioctl(struct ksmbd_session *sess, int handle
+ 	struct ksmbd_rpc_command *req;
+ 	struct ksmbd_rpc_command *resp;
+ 
++	if (payload_sz > KSMBD_IPC_MAX_PAYLOAD)
++		return NULL;
++
+ 	msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command) + payload_sz + 1);
+ 	if (!msg)
+ 		return NULL;
+diff --git a/fs/xfs/xfs_exchrange.c b/fs/xfs/xfs_exchrange.c
+index 265c424498933e..56803863b26c33 100644
+--- a/fs/xfs/xfs_exchrange.c
++++ b/fs/xfs/xfs_exchrange.c
+@@ -326,22 +326,6 @@ xfs_exchrange_mappings(
+  * successfully but before locks are dropped.
+  */
+ 
+-/* Verify that we have security clearance to perform this operation. */
+-static int
+-xfs_exchange_range_verify_area(
+-	struct xfs_exchrange	*fxr)
+-{
+-	int			ret;
+-
+-	ret = remap_verify_area(fxr->file1, fxr->file1_offset, fxr->length,
+-			true);
+-	if (ret)
+-		return ret;
+-
+-	return remap_verify_area(fxr->file2, fxr->file2_offset, fxr->length,
+-			true);
+-}
+-
+ /*
+  * Performs necessary checks before doing a range exchange, having stabilized
+  * mutable inode attributes via i_rwsem.
+@@ -352,11 +336,13 @@ xfs_exchange_range_checks(
+ 	unsigned int		alloc_unit)
+ {
+ 	struct inode		*inode1 = file_inode(fxr->file1);
++	loff_t			size1 = i_size_read(inode1);
+ 	struct inode		*inode2 = file_inode(fxr->file2);
++	loff_t			size2 = i_size_read(inode2);
+ 	uint64_t		allocmask = alloc_unit - 1;
+ 	int64_t			test_len;
+ 	uint64_t		blen;
+-	loff_t			size1, size2, tmp;
++	loff_t			tmp;
+ 	int			error;
+ 
+ 	/* Don't touch certain kinds of inodes */
+@@ -365,24 +351,25 @@ xfs_exchange_range_checks(
+ 	if (IS_SWAPFILE(inode1) || IS_SWAPFILE(inode2))
+ 		return -ETXTBSY;
+ 
+-	size1 = i_size_read(inode1);
+-	size2 = i_size_read(inode2);
+-
+ 	/* Ranges cannot start after EOF. */
+ 	if (fxr->file1_offset > size1 || fxr->file2_offset > size2)
+ 		return -EINVAL;
+ 
+-	/*
+-	 * If the caller said to exchange to EOF, we set the length of the
+-	 * request large enough to cover everything to the end of both files.
+-	 */
+ 	if (fxr->flags & XFS_EXCHANGE_RANGE_TO_EOF) {
++		/*
++		 * If the caller said to exchange to EOF, we set the length of
++		 * the request large enough to cover everything to the end of
++		 * both files.
++		 */
+ 		fxr->length = max_t(int64_t, size1 - fxr->file1_offset,
+ 					     size2 - fxr->file2_offset);
+-
+-		error = xfs_exchange_range_verify_area(fxr);
+-		if (error)
+-			return error;
++	} else {
++		/*
++		 * Otherwise we require both ranges to end within EOF.
++		 */
++		if (fxr->file1_offset + fxr->length > size1 ||
++		    fxr->file2_offset + fxr->length > size2)
++			return -EINVAL;
+ 	}
+ 
+ 	/*
+@@ -398,15 +385,6 @@ xfs_exchange_range_checks(
+ 	    check_add_overflow(fxr->file2_offset, fxr->length, &tmp))
+ 		return -EINVAL;
+ 
+-	/*
+-	 * We require both ranges to end within EOF, unless we're exchanging
+-	 * to EOF.
+-	 */
+-	if (!(fxr->flags & XFS_EXCHANGE_RANGE_TO_EOF) &&
+-	    (fxr->file1_offset + fxr->length > size1 ||
+-	     fxr->file2_offset + fxr->length > size2))
+-		return -EINVAL;
+-
+ 	/*
+ 	 * Make sure we don't hit any file size limits.  If we hit any size
+ 	 * limits such that test_length was adjusted, we abort the whole
+@@ -744,6 +722,7 @@ xfs_exchange_range(
+ {
+ 	struct inode		*inode1 = file_inode(fxr->file1);
+ 	struct inode		*inode2 = file_inode(fxr->file2);
++	loff_t			check_len = fxr->length;
+ 	int			ret;
+ 
+ 	BUILD_BUG_ON(XFS_EXCHANGE_RANGE_ALL_FLAGS &
+@@ -776,14 +755,18 @@ xfs_exchange_range(
+ 		return -EBADF;
+ 
+ 	/*
+-	 * If we're not exchanging to EOF, we can check the areas before
+-	 * stabilizing both files' i_size.
++	 * If we're exchanging to EOF we can't calculate the length until taking
++	 * the iolock.  Pass a 0 length to remap_verify_area similar to the
++	 * FICLONE and FICLONERANGE ioctls that support cloning to EOF as well.
+ 	 */
+-	if (!(fxr->flags & XFS_EXCHANGE_RANGE_TO_EOF)) {
+-		ret = xfs_exchange_range_verify_area(fxr);
+-		if (ret)
+-			return ret;
+-	}
++	if (fxr->flags & XFS_EXCHANGE_RANGE_TO_EOF)
++		check_len = 0;
++	ret = remap_verify_area(fxr->file1, fxr->file1_offset, check_len, true);
++	if (ret)
++		return ret;
++	ret = remap_verify_area(fxr->file2, fxr->file2_offset, check_len, true);
++	if (ret)
++		return ret;
+ 
+ 	/* Update cmtime if the fd/inode don't forbid it. */
+ 	if (!(fxr->file1->f_mode & FMODE_NOCMTIME) && !IS_NOCMTIME(inode1))
+diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
+index c8ad2606f928b2..1ff514b6c03506 100644
+--- a/fs/xfs/xfs_inode.c
++++ b/fs/xfs/xfs_inode.c
+@@ -1404,8 +1404,11 @@ xfs_inactive(
+ 		goto out;
+ 
+ 	/* Try to clean out the cow blocks if there are any. */
+-	if (xfs_inode_has_cow_data(ip))
+-		xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
++	if (xfs_inode_has_cow_data(ip)) {
++		error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
++		if (error)
++			goto out;
++	}
+ 
+ 	if (VFS_I(ip)->i_nlink != 0) {
+ 		/*
+diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
+index 50fa3ef89f6c98..d61460309a7830 100644
+--- a/fs/xfs/xfs_iomap.c
++++ b/fs/xfs/xfs_iomap.c
+@@ -976,10 +976,8 @@ xfs_dax_write_iomap_end(
+ 	if (!xfs_is_cow_inode(ip))
+ 		return 0;
+ 
+-	if (!written) {
+-		xfs_reflink_cancel_cow_range(ip, pos, length, true);
+-		return 0;
+-	}
++	if (!written)
++		return xfs_reflink_cancel_cow_range(ip, pos, length, true);
+ 
+ 	return xfs_reflink_end_cow(ip, pos, written);
+ }
+diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
+index e3fa43291f449d..1e2b25e204cb52 100644
+--- a/include/drm/drm_connector.h
++++ b/include/drm/drm_connector.h
+@@ -2001,8 +2001,11 @@ struct drm_connector {
+ 	struct drm_encoder *encoder;
+ 
+ #define MAX_ELD_BYTES	128
+-	/** @eld: EDID-like data, if present */
++	/** @eld: EDID-like data, if present, protected by @eld_mutex */
+ 	uint8_t eld[MAX_ELD_BYTES];
++	/** @eld_mutex: protection for concurrenct access to @eld */
++	struct mutex eld_mutex;
++
+ 	/** @latency_present: AV delay info from ELD, if found */
+ 	bool latency_present[2];
+ 	/**
+diff --git a/include/drm/drm_utils.h b/include/drm/drm_utils.h
+index 70775748d243b0..15fa9b6865f448 100644
+--- a/include/drm/drm_utils.h
++++ b/include/drm/drm_utils.h
+@@ -12,8 +12,12 @@
+ 
+ #include <linux/types.h>
+ 
++struct drm_edid;
++
+ int drm_get_panel_orientation_quirk(int width, int height);
+ 
++int drm_get_panel_min_brightness_quirk(const struct drm_edid *edid);
++
+ signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec);
+ 
+ #endif
+diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
+index e6c00e860951ae..3305c849abd66a 100644
+--- a/include/linux/binfmts.h
++++ b/include/linux/binfmts.h
+@@ -42,7 +42,9 @@ struct linux_binprm {
+ 		 * Set when errors can no longer be returned to the
+ 		 * original userspace.
+ 		 */
+-		point_of_no_return:1;
++		point_of_no_return:1,
++		/* Set when "comm" must come from the dentry. */
++		comm_from_dentry:1;
+ 	struct file *executable; /* Executable to pass to the interpreter */
+ 	struct file *interpreter;
+ 	struct file *file;
+diff --git a/include/linux/call_once.h b/include/linux/call_once.h
+new file mode 100644
+index 00000000000000..6261aa0b3fb00d
+--- /dev/null
++++ b/include/linux/call_once.h
+@@ -0,0 +1,45 @@
++#ifndef _LINUX_CALL_ONCE_H
++#define _LINUX_CALL_ONCE_H
++
++#include <linux/types.h>
++#include <linux/mutex.h>
++
++#define ONCE_NOT_STARTED 0
++#define ONCE_RUNNING     1
++#define ONCE_COMPLETED   2
++
++struct once {
++        atomic_t state;
++        struct mutex lock;
++};
++
++static inline void __once_init(struct once *once, const char *name,
++			       struct lock_class_key *key)
++{
++        atomic_set(&once->state, ONCE_NOT_STARTED);
++        __mutex_init(&once->lock, name, key);
++}
++
++#define once_init(once)							\
++do {									\
++	static struct lock_class_key __key;				\
++	__once_init((once), #once, &__key);				\
++} while (0)
++
++static inline void call_once(struct once *once, void (*cb)(struct once *))
++{
++        /* Pairs with atomic_set_release() below.  */
++        if (atomic_read_acquire(&once->state) == ONCE_COMPLETED)
++                return;
++
++        guard(mutex)(&once->lock);
++        WARN_ON(atomic_read(&once->state) == ONCE_RUNNING);
++        if (atomic_read(&once->state) != ONCE_NOT_STARTED)
++                return;
++
++        atomic_set(&once->state, ONCE_RUNNING);
++        cb(once);
++        atomic_set_release(&once->state, ONCE_COMPLETED);
++}
++
++#endif /* _LINUX_CALL_ONCE_H */
+diff --git a/include/linux/hrtimer_defs.h b/include/linux/hrtimer_defs.h
+index c3b4b7ed7c163f..84a5045f80f36f 100644
+--- a/include/linux/hrtimer_defs.h
++++ b/include/linux/hrtimer_defs.h
+@@ -125,6 +125,7 @@ struct hrtimer_cpu_base {
+ 	ktime_t				softirq_expires_next;
+ 	struct hrtimer			*softirq_next_timer;
+ 	struct hrtimer_clock_base	clock_base[HRTIMER_MAX_CLOCK_BASES];
++	call_single_data_t		csd;
+ } ____cacheline_aligned;
+ 
+ 
+diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
+index ed945f42e064af..0ea8c9887429ff 100644
+--- a/include/linux/jiffies.h
++++ b/include/linux/jiffies.h
+@@ -537,7 +537,7 @@ static __always_inline unsigned long msecs_to_jiffies(const unsigned int m)
+  *
+  * Return: jiffies value
+  */
+-#define secs_to_jiffies(_secs) ((_secs) * HZ)
++#define secs_to_jiffies(_secs) (unsigned long)((_secs) * HZ)
+ 
+ extern unsigned long __usecs_to_jiffies(const unsigned int u);
+ #if !(USEC_PER_SEC % HZ)
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index 401439bb21e3e6..b0b38744c4b055 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -963,6 +963,15 @@ static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
+ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
+ {
+ 	int num_vcpus = atomic_read(&kvm->online_vcpus);
++
++	/*
++	 * Explicitly verify the target vCPU is online, as the anti-speculation
++	 * logic only limits the CPU's ability to speculate, e.g. given a "bad"
++	 * index, clamping the index to 0 would return vCPU0, not NULL.
++	 */
++	if (i >= num_vcpus)
++		return NULL;
++
+ 	i = array_index_nospec(i, num_vcpus);
+ 
+ 	/* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu.  */
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index ea48eb879a0f52..fed666c5bd1632 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -691,7 +691,6 @@ struct mlx5_timer {
+ 	struct timecounter         tc;
+ 	u32                        nominal_c_mult;
+ 	unsigned long              overflow_period;
+-	struct delayed_work        overflow_work;
+ };
+ 
+ struct mlx5_clock {
+diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
+index 365e119bebaa23..783e2a336861b7 100644
+--- a/include/linux/platform_data/x86/asus-wmi.h
++++ b/include/linux/platform_data/x86/asus-wmi.h
+@@ -184,6 +184,11 @@ static const struct dmi_system_id asus_use_hid_led_dmi_ids[] = {
+ 			DMI_MATCH(DMI_PRODUCT_FAMILY, "ROG Flow"),
+ 		},
+ 	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_PRODUCT_FAMILY, "ProArt P16"),
++		},
++	},
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "GA403U"),
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 1e6324f0d4efda..24e48af7e8f74a 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -851,7 +851,7 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ }
+ 
+ static inline void _bstats_update(struct gnet_stats_basic_sync *bstats,
+-				  __u64 bytes, __u32 packets)
++				  __u64 bytes, __u64 packets)
+ {
+ 	u64_stats_update_begin(&bstats->syncp);
+ 	u64_stats_add(&bstats->bytes, bytes);
+diff --git a/include/rv/da_monitor.h b/include/rv/da_monitor.h
+index 9705b2a98e49e1..510c88bfabd433 100644
+--- a/include/rv/da_monitor.h
++++ b/include/rv/da_monitor.h
+@@ -14,6 +14,7 @@
+ #include <rv/automata.h>
+ #include <linux/rv.h>
+ #include <linux/bug.h>
++#include <linux/sched.h>
+ 
+ #ifdef CONFIG_RV_REACTORS
+ 
+@@ -324,10 +325,13 @@ static inline struct da_monitor *da_get_monitor_##name(struct task_struct *tsk)
+ static void da_monitor_reset_all_##name(void)							\
+ {												\
+ 	struct task_struct *g, *p;								\
++	int cpu;										\
+ 												\
+ 	read_lock(&tasklist_lock);								\
+ 	for_each_process_thread(g, p)								\
+ 		da_monitor_reset_##name(da_get_monitor_##name(p));				\
++	for_each_present_cpu(cpu)								\
++		da_monitor_reset_##name(da_get_monitor_##name(idle_task(cpu)));			\
+ 	read_unlock(&tasklist_lock);								\
+ }												\
+ 												\
+diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
+index 27c23873c88115..ee4030f2e99da8 100644
+--- a/include/trace/events/rxrpc.h
++++ b/include/trace/events/rxrpc.h
+@@ -218,6 +218,7 @@
+ 	EM(rxrpc_conn_get_conn_input,		"GET inp-conn") \
+ 	EM(rxrpc_conn_get_idle,			"GET idle    ") \
+ 	EM(rxrpc_conn_get_poke_abort,		"GET pk-abort") \
++	EM(rxrpc_conn_get_poke_secured,		"GET secured ") \
+ 	EM(rxrpc_conn_get_poke_timer,		"GET poke    ") \
+ 	EM(rxrpc_conn_get_service_conn,		"GET svc-conn") \
+ 	EM(rxrpc_conn_new_client,		"NEW client  ") \
+diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
+index efe5de6ce208a1..aaa4f3bc688b57 100644
+--- a/include/uapi/drm/amdgpu_drm.h
++++ b/include/uapi/drm/amdgpu_drm.h
+@@ -411,13 +411,20 @@ struct drm_amdgpu_gem_userptr {
+ /* GFX12 and later: */
+ #define AMDGPU_TILING_GFX12_SWIZZLE_MODE_SHIFT			0
+ #define AMDGPU_TILING_GFX12_SWIZZLE_MODE_MASK			0x7
+-/* These are DCC recompression setting for memory management: */
++/* These are DCC recompression settings for memory management: */
+ #define AMDGPU_TILING_GFX12_DCC_MAX_COMPRESSED_BLOCK_SHIFT	3
+ #define AMDGPU_TILING_GFX12_DCC_MAX_COMPRESSED_BLOCK_MASK	0x3 /* 0:64B, 1:128B, 2:256B */
+ #define AMDGPU_TILING_GFX12_DCC_NUMBER_TYPE_SHIFT		5
+ #define AMDGPU_TILING_GFX12_DCC_NUMBER_TYPE_MASK		0x7 /* CB_COLOR0_INFO.NUMBER_TYPE */
+ #define AMDGPU_TILING_GFX12_DCC_DATA_FORMAT_SHIFT		8
+ #define AMDGPU_TILING_GFX12_DCC_DATA_FORMAT_MASK		0x3f /* [0:4]:CB_COLOR0_INFO.FORMAT, [5]:MM */
++/* When clearing the buffer or moving it from VRAM to GTT, don't compress and set DCC metadata
++ * to uncompressed. Set when parts of an allocation bypass DCC and read raw data. */
++#define AMDGPU_TILING_GFX12_DCC_WRITE_COMPRESS_DISABLE_SHIFT	14
++#define AMDGPU_TILING_GFX12_DCC_WRITE_COMPRESS_DISABLE_MASK	0x1
++/* bit gap */
++#define AMDGPU_TILING_GFX12_SCANOUT_SHIFT			63
++#define AMDGPU_TILING_GFX12_SCANOUT_MASK			0x1
+ 
+ /* Set/Get helpers for tiling flags. */
+ #define AMDGPU_TILING_SET(field, value) \
+diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
+index a4206723f50333..5a199f3d4a26a2 100644
+--- a/include/uapi/linux/input-event-codes.h
++++ b/include/uapi/linux/input-event-codes.h
+@@ -519,6 +519,7 @@
+ #define KEY_NOTIFICATION_CENTER	0x1bc	/* Show/hide the notification center */
+ #define KEY_PICKUP_PHONE	0x1bd	/* Answer incoming call */
+ #define KEY_HANGUP_PHONE	0x1be	/* Decline incoming call */
++#define KEY_LINK_PHONE		0x1bf   /* AL Phone Syncing */
+ 
+ #define KEY_DEL_EOL		0x1c0
+ #define KEY_DEL_EOS		0x1c1
+diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h
+index 34810f6ae2b5a9..78747b24bd0fbc 100644
+--- a/include/uapi/linux/iommufd.h
++++ b/include/uapi/linux/iommufd.h
+@@ -868,6 +868,7 @@ enum iommu_hwpt_pgfault_perm {
+  * @pasid: Process Address Space ID
+  * @grpid: Page Request Group Index
+  * @perm: Combination of enum iommu_hwpt_pgfault_perm
++ * @__reserved: Must be 0.
+  * @addr: Fault address
+  * @length: a hint of how much data the requestor is expecting to fetch. For
+  *          example, if the PRI initiator knows it is going to do a 10MB
+@@ -883,7 +884,8 @@ struct iommu_hwpt_pgfault {
+ 	__u32 pasid;
+ 	__u32 grpid;
+ 	__u32 perm;
+-	__u64 addr;
++	__u32 __reserved;
++	__aligned_u64 addr;
+ 	__u32 length;
+ 	__u32 cookie;
+ };
+diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h
+index 5a43c23f53bfbf..ff47b6f0ba0f5f 100644
+--- a/include/uapi/linux/raid/md_p.h
++++ b/include/uapi/linux/raid/md_p.h
+@@ -233,7 +233,7 @@ struct mdp_superblock_1 {
+ 	char	set_name[32];	/* set and interpreted by user-space */
+ 
+ 	__le64	ctime;		/* lo 40 bits are seconds, top 24 are microseconds or 0*/
+-	__le32	level;		/* 0,1,4,5 */
++	__le32	level;		/* 0,1,4,5, -1 (linear) */
+ 	__le32	layout;		/* only for raid5 and raid10 currently */
+ 	__le64	size;		/* used size of component devices, in 512byte sectors */
+ 
+diff --git a/include/uapi/linux/raid/md_u.h b/include/uapi/linux/raid/md_u.h
+index 7be89a4906e73e..a893010735fbad 100644
+--- a/include/uapi/linux/raid/md_u.h
++++ b/include/uapi/linux/raid/md_u.h
+@@ -103,6 +103,8 @@ typedef struct mdu_array_info_s {
+ 
+ } mdu_array_info_t;
+ 
++#define LEVEL_LINEAR		(-1)
++
+ /* we need a value for 'no level specified' and 0
+  * means 'raid0', so we need something else.  This is
+  * for internal use only
+diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h
+index e594abe5d05fed..f0c6111160e7af 100644
+--- a/include/ufs/ufs.h
++++ b/include/ufs/ufs.h
+@@ -386,8 +386,8 @@ enum {
+ 
+ /* Possible values for dExtendedUFSFeaturesSupport */
+ enum {
+-	UFS_DEV_LOW_TEMP_NOTIF		= BIT(4),
+-	UFS_DEV_HIGH_TEMP_NOTIF		= BIT(5),
++	UFS_DEV_HIGH_TEMP_NOTIF		= BIT(4),
++	UFS_DEV_LOW_TEMP_NOTIF		= BIT(5),
+ 	UFS_DEV_EXT_TEMP_NOTIF		= BIT(6),
+ 	UFS_DEV_HPB_SUPPORT		= BIT(7),
+ 	UFS_DEV_WRITE_BOOSTER_SUP	= BIT(8),
+diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
+index 74e5b9960c5466..82b2d2b25c23b7 100644
+--- a/include/ufs/ufshcd.h
++++ b/include/ufs/ufshcd.h
+@@ -1297,7 +1297,6 @@ static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
+ void ufshcd_enable_irq(struct ufs_hba *hba);
+ void ufshcd_disable_irq(struct ufs_hba *hba);
+ int ufshcd_alloc_host(struct device *, struct ufs_hba **);
+-void ufshcd_dealloc_host(struct ufs_hba *);
+ int ufshcd_hba_enable(struct ufs_hba *hba);
+ int ufshcd_init(struct ufs_hba *, void __iomem *, unsigned int);
+ int ufshcd_link_recovery(struct ufs_hba *hba);
+diff --git a/io_uring/net.c b/io_uring/net.c
+index c6cd38cc5dc4eb..b01bf900e3b940 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -1709,6 +1709,11 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
+ 	int ret;
+ 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+ 
++	if (unlikely(req->flags & REQ_F_FAIL)) {
++		ret = -ECONNRESET;
++		goto out;
++	}
++
+ 	file_flags = force_nonblock ? O_NONBLOCK : 0;
+ 
+ 	ret = __sys_connect_file(req->file, &io->addr, connect->addr_len,
+diff --git a/io_uring/poll.c b/io_uring/poll.c
+index bced9edd52335a..2c28c4c9307160 100644
+--- a/io_uring/poll.c
++++ b/io_uring/poll.c
+@@ -273,6 +273,8 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
+ 				return IOU_POLL_REISSUE;
+ 			}
+ 		}
++		if (unlikely(req->cqe.res & EPOLLERR))
++			req_set_fail(req);
+ 		if (req->apoll_events & EPOLLONESHOT)
+ 			return IOU_POLL_DONE;
+ 
+@@ -315,8 +317,10 @@ void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
+ 
+ 	ret = io_poll_check_events(req, ts);
+ 	if (ret == IOU_POLL_NO_ACTION) {
++		io_kbuf_recycle(req, 0);
+ 		return;
+ 	} else if (ret == IOU_POLL_REQUEUE) {
++		io_kbuf_recycle(req, 0);
+ 		__io_poll_execute(req, 0);
+ 		return;
+ 	}
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 77f56674aaa99a..4f02345b764fdd 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -18803,7 +18803,7 @@ static int do_check(struct bpf_verifier_env *env)
+ 				 * match caller reference state when it exits.
+ 				 */
+ 				err = check_resource_leak(env, exception_exit, !env->cur_state->curframe,
+-							  "BPF_EXIT instruction");
++							  "BPF_EXIT instruction in main prog");
+ 				if (err)
+ 					return err;
+ 
+diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
+index 5d58b2c0ef98bc..bcb1b9fea58807 100644
+--- a/kernel/locking/test-ww_mutex.c
++++ b/kernel/locking/test-ww_mutex.c
+@@ -404,7 +404,7 @@ static inline u32 prandom_u32_below(u32 ceil)
+ static int *get_random_order(int count)
+ {
+ 	int *order;
+-	int n, r, tmp;
++	int n, r;
+ 
+ 	order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
+ 	if (!order)
+@@ -415,11 +415,8 @@ static int *get_random_order(int count)
+ 
+ 	for (n = count - 1; n > 1; n--) {
+ 		r = prandom_u32_below(n + 1);
+-		if (r != n) {
+-			tmp = order[n];
+-			order[n] = order[r];
+-			order[r] = tmp;
+-		}
++		if (r != n)
++			swap(order[n], order[r]);
+ 	}
+ 
+ 	return order;
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index f446a06b4da8ca..07668433644b8a 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -523,7 +523,7 @@ static struct latched_seq clear_seq = {
+ /* record buffer */
+ #define LOG_ALIGN __alignof__(unsigned long)
+ #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
+-#define LOG_BUF_LEN_MAX (u32)(1 << 31)
++#define LOG_BUF_LEN_MAX ((u32)1 << 31)
+ static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
+ static char *log_buf = __log_buf;
+ static u32 log_buf_len = __LOG_BUF_LEN;
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index e0fd8069c60e64..ffceb5ff4c5c37 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -766,13 +766,15 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
+ #endif
+ #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+ 	if (static_key_false((&paravirt_steal_rq_enabled))) {
+-		steal = paravirt_steal_clock(cpu_of(rq));
++		u64 prev_steal;
++
++		steal = prev_steal = paravirt_steal_clock(cpu_of(rq));
+ 		steal -= rq->prev_steal_time_rq;
+ 
+ 		if (unlikely(steal > delta))
+ 			steal = delta;
+ 
+-		rq->prev_steal_time_rq += steal;
++		rq->prev_steal_time_rq = prev_steal;
+ 		delta -= steal;
+ 	}
+ #endif
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 8800679b508d9f..7d0a05660e5efc 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -5372,6 +5372,15 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
+ static void set_delayed(struct sched_entity *se)
+ {
+ 	se->sched_delayed = 1;
++
++	/*
++	 * Delayed se of cfs_rq have no tasks queued on them.
++	 * Do not adjust h_nr_runnable since dequeue_entities()
++	 * will account it for blocked tasks.
++	 */
++	if (!entity_is_task(se))
++		return;
++
+ 	for_each_sched_entity(se) {
+ 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ 
+@@ -5384,6 +5393,16 @@ static void set_delayed(struct sched_entity *se)
+ static void clear_delayed(struct sched_entity *se)
+ {
+ 	se->sched_delayed = 0;
++
++	/*
++	 * Delayed se of cfs_rq have no tasks queued on them.
++	 * Do not adjust h_nr_runnable since a dequeue has
++	 * already accounted for it or an enqueue of a task
++	 * below it will account for it in enqueue_task_fair().
++	 */
++	if (!entity_is_task(se))
++		return;
++
+ 	for_each_sched_entity(se) {
+ 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ 
+diff --git a/kernel/seccomp.c b/kernel/seccomp.c
+index 385d48293a5fa1..0cd1f8b5a102ee 100644
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -749,6 +749,15 @@ static bool seccomp_is_const_allow(struct sock_fprog_kern *fprog,
+ 	if (WARN_ON_ONCE(!fprog))
+ 		return false;
+ 
++	/* Our single exception to filtering. */
++#ifdef __NR_uretprobe
++#ifdef SECCOMP_ARCH_COMPAT
++	if (sd->arch == SECCOMP_ARCH_NATIVE)
++#endif
++		if (sd->nr == __NR_uretprobe)
++			return true;
++#endif
++
+ 	for (pc = 0; pc < fprog->len; pc++) {
+ 		struct sock_filter *insn = &fprog->filter[pc];
+ 		u16 code = insn->code;
+@@ -1023,6 +1032,9 @@ static inline void seccomp_log(unsigned long syscall, long signr, u32 action,
+  */
+ static const int mode1_syscalls[] = {
+ 	__NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn,
++#ifdef __NR_uretprobe
++	__NR_uretprobe,
++#endif
+ 	-1, /* negative terminated */
+ };
+ 
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 030426c8c944e0..36dd2f5c30da1e 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -58,6 +58,8 @@
+ #define HRTIMER_ACTIVE_SOFT	(HRTIMER_ACTIVE_HARD << MASK_SHIFT)
+ #define HRTIMER_ACTIVE_ALL	(HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)
+ 
++static void retrigger_next_event(void *arg);
++
+ /*
+  * The timer bases:
+  *
+@@ -111,7 +113,8 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
+ 			.clockid = CLOCK_TAI,
+ 			.get_time = &ktime_get_clocktai,
+ 		},
+-	}
++	},
++	.csd = CSD_INIT(retrigger_next_event, NULL)
+ };
+ 
+ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
+@@ -124,6 +127,14 @@ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
+ 	[CLOCK_TAI]		= HRTIMER_BASE_TAI,
+ };
+ 
++static inline bool hrtimer_base_is_online(struct hrtimer_cpu_base *base)
++{
++	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
++		return true;
++	else
++		return likely(base->online);
++}
++
+ /*
+  * Functions and macros which are different for UP/SMP systems are kept in a
+  * single place
+@@ -183,27 +194,54 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
+ }
+ 
+ /*
+- * We do not migrate the timer when it is expiring before the next
+- * event on the target cpu. When high resolution is enabled, we cannot
+- * reprogram the target cpu hardware and we would cause it to fire
+- * late. To keep it simple, we handle the high resolution enabled and
+- * disabled case similar.
++ * Check if the elected target is suitable considering its next
++ * event and the hotplug state of the current CPU.
++ *
++ * If the elected target is remote and its next event is after the timer
++ * to queue, then a remote reprogram is necessary. However there is no
++ * guarantee the IPI handling the operation would arrive in time to meet
++ * the high resolution deadline. In this case the local CPU becomes a
++ * preferred target, unless it is offline.
++ *
++ * High and low resolution modes are handled the same way for simplicity.
+  *
+  * Called with cpu_base->lock of target cpu held.
+  */
+-static int
+-hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
++static bool hrtimer_suitable_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base,
++				    struct hrtimer_cpu_base *new_cpu_base,
++				    struct hrtimer_cpu_base *this_cpu_base)
+ {
+ 	ktime_t expires;
+ 
++	/*
++	 * The local CPU clockevent can be reprogrammed. Also get_target_base()
++	 * guarantees it is online.
++	 */
++	if (new_cpu_base == this_cpu_base)
++		return true;
++
++	/*
++	 * The offline local CPU can't be the default target if the
++	 * next remote target event is after this timer. Keep the
++	 * elected new base. An IPI will we issued to reprogram
++	 * it as a last resort.
++	 */
++	if (!hrtimer_base_is_online(this_cpu_base))
++		return true;
++
+ 	expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
+-	return expires < new_base->cpu_base->expires_next;
++
++	return expires >= new_base->cpu_base->expires_next;
+ }
+ 
+-static inline
+-struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
+-					 int pinned)
++static inline struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base, int pinned)
+ {
++	if (!hrtimer_base_is_online(base)) {
++		int cpu = cpumask_any_and(cpu_online_mask, housekeeping_cpumask(HK_TYPE_TIMER));
++
++		return &per_cpu(hrtimer_bases, cpu);
++	}
++
+ #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+ 	if (static_branch_likely(&timers_migration_enabled) && !pinned)
+ 		return &per_cpu(hrtimer_bases, get_nohz_timer_target());
+@@ -254,8 +292,8 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
+ 		raw_spin_unlock(&base->cpu_base->lock);
+ 		raw_spin_lock(&new_base->cpu_base->lock);
+ 
+-		if (new_cpu_base != this_cpu_base &&
+-		    hrtimer_check_target(timer, new_base)) {
++		if (!hrtimer_suitable_target(timer, new_base, new_cpu_base,
++					     this_cpu_base)) {
+ 			raw_spin_unlock(&new_base->cpu_base->lock);
+ 			raw_spin_lock(&base->cpu_base->lock);
+ 			new_cpu_base = this_cpu_base;
+@@ -264,8 +302,7 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
+ 		}
+ 		WRITE_ONCE(timer->base, new_base);
+ 	} else {
+-		if (new_cpu_base != this_cpu_base &&
+-		    hrtimer_check_target(timer, new_base)) {
++		if (!hrtimer_suitable_target(timer, new_base,  new_cpu_base, this_cpu_base)) {
+ 			new_cpu_base = this_cpu_base;
+ 			goto again;
+ 		}
+@@ -716,8 +753,6 @@ static inline int hrtimer_is_hres_enabled(void)
+ 	return hrtimer_hres_enabled;
+ }
+ 
+-static void retrigger_next_event(void *arg);
+-
+ /*
+  * Switch to high resolution mode
+  */
+@@ -1206,6 +1241,7 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ 				    u64 delta_ns, const enum hrtimer_mode mode,
+ 				    struct hrtimer_clock_base *base)
+ {
++	struct hrtimer_cpu_base *this_cpu_base = this_cpu_ptr(&hrtimer_bases);
+ 	struct hrtimer_clock_base *new_base;
+ 	bool force_local, first;
+ 
+@@ -1217,9 +1253,15 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ 	 * and enforce reprogramming after it is queued no matter whether
+ 	 * it is the new first expiring timer again or not.
+ 	 */
+-	force_local = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
++	force_local = base->cpu_base == this_cpu_base;
+ 	force_local &= base->cpu_base->next_timer == timer;
+ 
++	/*
++	 * Don't force local queuing if this enqueue happens on a unplugged
++	 * CPU after hrtimer_cpu_dying() has been invoked.
++	 */
++	force_local &= this_cpu_base->online;
++
+ 	/*
+ 	 * Remove an active timer from the queue. In case it is not queued
+ 	 * on the current CPU, make sure that remove_hrtimer() updates the
+@@ -1249,8 +1291,27 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ 	}
+ 
+ 	first = enqueue_hrtimer(timer, new_base, mode);
+-	if (!force_local)
+-		return first;
++	if (!force_local) {
++		/*
++		 * If the current CPU base is online, then the timer is
++		 * never queued on a remote CPU if it would be the first
++		 * expiring timer there.
++		 */
++		if (hrtimer_base_is_online(this_cpu_base))
++			return first;
++
++		/*
++		 * Timer was enqueued remote because the current base is
++		 * already offline. If the timer is the first to expire,
++		 * kick the remote CPU to reprogram the clock event.
++		 */
++		if (first) {
++			struct hrtimer_cpu_base *new_cpu_base = new_base->cpu_base;
++
++			smp_call_function_single_async(new_cpu_base->cpu, &new_cpu_base->csd);
++		}
++		return 0;
++	}
+ 
+ 	/*
+ 	 * Timer was forced to stay on the current CPU to avoid
+diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c
+index 066c9ddca4ec66..0707f1ef05f7ef 100644
+--- a/kernel/time/timer_migration.c
++++ b/kernel/time/timer_migration.c
+@@ -1677,6 +1677,9 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node)
+ 
+ 	} while (i < tmigr_hierarchy_levels);
+ 
++	/* Assert single root */
++	WARN_ON_ONCE(!err && !group->parent && !list_is_singular(&tmigr_level_list[top]));
++
+ 	while (i > 0) {
+ 		group = stack[--i];
+ 
+@@ -1718,7 +1721,12 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node)
+ 		WARN_ON_ONCE(top == 0);
+ 
+ 		lvllist = &tmigr_level_list[top];
+-		if (group->num_children == 1 && list_is_singular(lvllist)) {
++
++		/*
++		 * Newly created root level should have accounted the upcoming
++		 * CPU's child group and pre-accounted the old root.
++		 */
++		if (group->num_children == 2 && list_is_singular(lvllist)) {
+ 			/*
+ 			 * The target CPU must never do the prepare work, except
+ 			 * on early boot when the boot CPU is the target. Otherwise
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 60210fb5b21103..6b888699f916a1 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -4398,8 +4398,13 @@ rb_reserve_next_event(struct trace_buffer *buffer,
+ 	int nr_loops = 0;
+ 	int add_ts_default;
+ 
+-	/* ring buffer does cmpxchg, make sure it is safe in NMI context */
+-	if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) &&
++	/*
++	 * ring buffer does cmpxchg as well as atomic64 operations
++	 * (which some archs use locking for atomic64), make sure this
++	 * is safe in NMI context
++	 */
++	if ((!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) ||
++	     IS_ENABLED(CONFIG_GENERIC_ATOMIC64)) &&
+ 	    (unlikely(in_nmi()))) {
+ 		return NULL;
+ 	}
+@@ -7059,7 +7064,7 @@ static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
+ 	}
+ 
+ 	while (p < nr_pages) {
+-		struct page *page = virt_to_page((void *)cpu_buffer->subbuf_ids[s]);
++		struct page *page;
+ 		int off = 0;
+ 
+ 		if (WARN_ON_ONCE(s >= nr_subbufs)) {
+@@ -7067,6 +7072,8 @@ static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
+ 			goto out;
+ 		}
+ 
++		page = virt_to_page((void *)cpu_buffer->subbuf_ids[s]);
++
+ 		for (; off < (1 << (subbuf_order)); off++, page++) {
+ 			if (p >= nr_pages)
+ 				break;
+diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
+index 5504b5e4e7b411..d81fcf51b196e7 100644
+--- a/kernel/trace/trace_functions_graph.c
++++ b/kernel/trace/trace_functions_graph.c
+@@ -198,7 +198,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace,
+ 	 * returning from the function.
+ 	 */
+ 	if (ftrace_graph_notrace_addr(trace->func)) {
+-		*task_var |= TRACE_GRAPH_NOTRACE_BIT;
++		*task_var |= TRACE_GRAPH_NOTRACE;
+ 		/*
+ 		 * Need to return 1 to have the return called
+ 		 * that will clear the NOTRACE bit.
+diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
+index b9f96c77527db8..23cbc24ed2922f 100644
+--- a/kernel/trace/trace_osnoise.c
++++ b/kernel/trace/trace_osnoise.c
+@@ -1229,6 +1229,8 @@ static void trace_sched_migrate_callback(void *data, struct task_struct *p, int
+ 	}
+ }
+ 
++static bool monitor_enabled;
++
+ static int register_migration_monitor(void)
+ {
+ 	int ret = 0;
+@@ -1237,16 +1239,25 @@ static int register_migration_monitor(void)
+ 	 * Timerlat thread migration check is only required when running timerlat in user-space.
+ 	 * Thus, enable callback only if timerlat is set with no workload.
+ 	 */
+-	if (timerlat_enabled() && !test_bit(OSN_WORKLOAD, &osnoise_options))
++	if (timerlat_enabled() && !test_bit(OSN_WORKLOAD, &osnoise_options)) {
++		if (WARN_ON_ONCE(monitor_enabled))
++			return 0;
++
+ 		ret = register_trace_sched_migrate_task(trace_sched_migrate_callback, NULL);
++		if (!ret)
++			monitor_enabled = true;
++	}
+ 
+ 	return ret;
+ }
+ 
+ static void unregister_migration_monitor(void)
+ {
+-	if (timerlat_enabled() && !test_bit(OSN_WORKLOAD, &osnoise_options))
+-		unregister_trace_sched_migrate_task(trace_sched_migrate_callback, NULL);
++	if (!monitor_enabled)
++		return;
++
++	unregister_trace_sched_migrate_task(trace_sched_migrate_callback, NULL);
++	monitor_enabled = false;
+ }
+ #else
+ static int register_migration_monitor(void)
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index f3d72370587936..bc725add84f468 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -1504,7 +1504,7 @@ config LOCKDEP_SMALL
+ config LOCKDEP_BITS
+ 	int "Bitsize for MAX_LOCKDEP_ENTRIES"
+ 	depends on LOCKDEP && !LOCKDEP_SMALL
+-	range 10 30
++	range 10 24
+ 	default 15
+ 	help
+ 	  Try increasing this value if you hit "BUG: MAX_LOCKDEP_ENTRIES too low!" message.
+@@ -1520,7 +1520,7 @@ config LOCKDEP_CHAINS_BITS
+ config LOCKDEP_STACK_TRACE_BITS
+ 	int "Bitsize for MAX_STACK_TRACE_ENTRIES"
+ 	depends on LOCKDEP && !LOCKDEP_SMALL
+-	range 10 30
++	range 10 26
+ 	default 19
+ 	help
+ 	  Try increasing this value if you hit "BUG: MAX_STACK_TRACE_ENTRIES too low!" message.
+@@ -1528,7 +1528,7 @@ config LOCKDEP_STACK_TRACE_BITS
+ config LOCKDEP_STACK_TRACE_HASH_BITS
+ 	int "Bitsize for STACK_TRACE_HASH_SIZE"
+ 	depends on LOCKDEP && !LOCKDEP_SMALL
+-	range 10 30
++	range 10 26
+ 	default 14
+ 	help
+ 	  Try increasing this value if you need large STACK_TRACE_HASH_SIZE.
+@@ -1536,7 +1536,7 @@ config LOCKDEP_STACK_TRACE_HASH_BITS
+ config LOCKDEP_CIRCULAR_QUEUE_BITS
+ 	int "Bitsize for elements in circular_queue struct"
+ 	depends on LOCKDEP
+-	range 10 30
++	range 10 26
+ 	default 12
+ 	help
+ 	  Try increasing this value if you hit "lockdep bfs error:-1" warning due to __cq_enqueue() failure.
+diff --git a/lib/atomic64.c b/lib/atomic64.c
+index caf895789a1ee6..1a72bba36d2430 100644
+--- a/lib/atomic64.c
++++ b/lib/atomic64.c
+@@ -25,15 +25,15 @@
+  * Ensure each lock is in a separate cacheline.
+  */
+ static union {
+-	raw_spinlock_t lock;
++	arch_spinlock_t lock;
+ 	char pad[L1_CACHE_BYTES];
+ } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
+ 	[0 ... (NR_LOCKS - 1)] = {
+-		.lock =  __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
++		.lock =  __ARCH_SPIN_LOCK_UNLOCKED,
+ 	},
+ };
+ 
+-static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
++static inline arch_spinlock_t *lock_addr(const atomic64_t *v)
+ {
+ 	unsigned long addr = (unsigned long) v;
+ 
+@@ -45,12 +45,14 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
+ s64 generic_atomic64_read(const atomic64_t *v)
+ {
+ 	unsigned long flags;
+-	raw_spinlock_t *lock = lock_addr(v);
++	arch_spinlock_t *lock = lock_addr(v);
+ 	s64 val;
+ 
+-	raw_spin_lock_irqsave(lock, flags);
++	local_irq_save(flags);
++	arch_spin_lock(lock);
+ 	val = v->counter;
+-	raw_spin_unlock_irqrestore(lock, flags);
++	arch_spin_unlock(lock);
++	local_irq_restore(flags);
+ 	return val;
+ }
+ EXPORT_SYMBOL(generic_atomic64_read);
+@@ -58,11 +60,13 @@ EXPORT_SYMBOL(generic_atomic64_read);
+ void generic_atomic64_set(atomic64_t *v, s64 i)
+ {
+ 	unsigned long flags;
+-	raw_spinlock_t *lock = lock_addr(v);
++	arch_spinlock_t *lock = lock_addr(v);
+ 
+-	raw_spin_lock_irqsave(lock, flags);
++	local_irq_save(flags);
++	arch_spin_lock(lock);
+ 	v->counter = i;
+-	raw_spin_unlock_irqrestore(lock, flags);
++	arch_spin_unlock(lock);
++	local_irq_restore(flags);
+ }
+ EXPORT_SYMBOL(generic_atomic64_set);
+ 
+@@ -70,11 +74,13 @@ EXPORT_SYMBOL(generic_atomic64_set);
+ void generic_atomic64_##op(s64 a, atomic64_t *v)			\
+ {									\
+ 	unsigned long flags;						\
+-	raw_spinlock_t *lock = lock_addr(v);				\
++	arch_spinlock_t *lock = lock_addr(v);				\
+ 									\
+-	raw_spin_lock_irqsave(lock, flags);				\
++	local_irq_save(flags);						\
++	arch_spin_lock(lock);						\
+ 	v->counter c_op a;						\
+-	raw_spin_unlock_irqrestore(lock, flags);			\
++	arch_spin_unlock(lock);						\
++	local_irq_restore(flags);					\
+ }									\
+ EXPORT_SYMBOL(generic_atomic64_##op);
+ 
+@@ -82,12 +88,14 @@ EXPORT_SYMBOL(generic_atomic64_##op);
+ s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v)		\
+ {									\
+ 	unsigned long flags;						\
+-	raw_spinlock_t *lock = lock_addr(v);				\
++	arch_spinlock_t *lock = lock_addr(v);				\
+ 	s64 val;							\
+ 									\
+-	raw_spin_lock_irqsave(lock, flags);				\
++	local_irq_save(flags);						\
++	arch_spin_lock(lock);						\
+ 	val = (v->counter c_op a);					\
+-	raw_spin_unlock_irqrestore(lock, flags);			\
++	arch_spin_unlock(lock);						\
++	local_irq_restore(flags);					\
+ 	return val;							\
+ }									\
+ EXPORT_SYMBOL(generic_atomic64_##op##_return);
+@@ -96,13 +104,15 @@ EXPORT_SYMBOL(generic_atomic64_##op##_return);
+ s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v)			\
+ {									\
+ 	unsigned long flags;						\
+-	raw_spinlock_t *lock = lock_addr(v);				\
++	arch_spinlock_t *lock = lock_addr(v);				\
+ 	s64 val;							\
+ 									\
+-	raw_spin_lock_irqsave(lock, flags);				\
++	local_irq_save(flags);						\
++	arch_spin_lock(lock);						\
+ 	val = v->counter;						\
+ 	v->counter c_op a;						\
+-	raw_spin_unlock_irqrestore(lock, flags);			\
++	arch_spin_unlock(lock);						\
++	local_irq_restore(flags);					\
+ 	return val;							\
+ }									\
+ EXPORT_SYMBOL(generic_atomic64_fetch_##op);
+@@ -131,14 +141,16 @@ ATOMIC64_OPS(xor, ^=)
+ s64 generic_atomic64_dec_if_positive(atomic64_t *v)
+ {
+ 	unsigned long flags;
+-	raw_spinlock_t *lock = lock_addr(v);
++	arch_spinlock_t *lock = lock_addr(v);
+ 	s64 val;
+ 
+-	raw_spin_lock_irqsave(lock, flags);
++	local_irq_save(flags);
++	arch_spin_lock(lock);
+ 	val = v->counter - 1;
+ 	if (val >= 0)
+ 		v->counter = val;
+-	raw_spin_unlock_irqrestore(lock, flags);
++	arch_spin_unlock(lock);
++	local_irq_restore(flags);
+ 	return val;
+ }
+ EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
+@@ -146,14 +158,16 @@ EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
+ s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
+ {
+ 	unsigned long flags;
+-	raw_spinlock_t *lock = lock_addr(v);
++	arch_spinlock_t *lock = lock_addr(v);
+ 	s64 val;
+ 
+-	raw_spin_lock_irqsave(lock, flags);
++	local_irq_save(flags);
++	arch_spin_lock(lock);
+ 	val = v->counter;
+ 	if (val == o)
+ 		v->counter = n;
+-	raw_spin_unlock_irqrestore(lock, flags);
++	arch_spin_unlock(lock);
++	local_irq_restore(flags);
+ 	return val;
+ }
+ EXPORT_SYMBOL(generic_atomic64_cmpxchg);
+@@ -161,13 +175,15 @@ EXPORT_SYMBOL(generic_atomic64_cmpxchg);
+ s64 generic_atomic64_xchg(atomic64_t *v, s64 new)
+ {
+ 	unsigned long flags;
+-	raw_spinlock_t *lock = lock_addr(v);
++	arch_spinlock_t *lock = lock_addr(v);
+ 	s64 val;
+ 
+-	raw_spin_lock_irqsave(lock, flags);
++	local_irq_save(flags);
++	arch_spin_lock(lock);
+ 	val = v->counter;
+ 	v->counter = new;
+-	raw_spin_unlock_irqrestore(lock, flags);
++	arch_spin_unlock(lock);
++	local_irq_restore(flags);
+ 	return val;
+ }
+ EXPORT_SYMBOL(generic_atomic64_xchg);
+@@ -175,14 +191,16 @@ EXPORT_SYMBOL(generic_atomic64_xchg);
+ s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+ {
+ 	unsigned long flags;
+-	raw_spinlock_t *lock = lock_addr(v);
++	arch_spinlock_t *lock = lock_addr(v);
+ 	s64 val;
+ 
+-	raw_spin_lock_irqsave(lock, flags);
++	local_irq_save(flags);
++	arch_spin_lock(lock);
+ 	val = v->counter;
+ 	if (val != u)
+ 		v->counter += a;
+-	raw_spin_unlock_irqrestore(lock, flags);
++	arch_spin_unlock(lock);
++	local_irq_restore(flags);
+ 
+ 	return val;
+ }
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index 047397136f15bf..db9bf663661c76 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -1863,11 +1863,11 @@ static inline int mab_no_null_split(struct maple_big_node *b_node,
+  * Return: The first split location.  The middle split is set in @mid_split.
+  */
+ static inline int mab_calc_split(struct ma_state *mas,
+-	 struct maple_big_node *bn, unsigned char *mid_split, unsigned long min)
++	 struct maple_big_node *bn, unsigned char *mid_split)
+ {
+ 	unsigned char b_end = bn->b_end;
+ 	int split = b_end / 2; /* Assume equal split. */
+-	unsigned char slot_min, slot_count = mt_slots[bn->type];
++	unsigned char slot_count = mt_slots[bn->type];
+ 
+ 	/*
+ 	 * To support gap tracking, all NULL entries are kept together and a node cannot
+@@ -1900,18 +1900,7 @@ static inline int mab_calc_split(struct ma_state *mas,
+ 		split = b_end / 3;
+ 		*mid_split = split * 2;
+ 	} else {
+-		slot_min = mt_min_slots[bn->type];
+-
+ 		*mid_split = 0;
+-		/*
+-		 * Avoid having a range less than the slot count unless it
+-		 * causes one node to be deficient.
+-		 * NOTE: mt_min_slots is 1 based, b_end and split are zero.
+-		 */
+-		while ((split < slot_count - 1) &&
+-		       ((bn->pivot[split] - min) < slot_count - 1) &&
+-		       (b_end - split > slot_min))
+-			split++;
+ 	}
+ 
+ 	/* Avoid ending a node on a NULL entry */
+@@ -2377,7 +2366,7 @@ static inline struct maple_enode
+ static inline unsigned char mas_mab_to_node(struct ma_state *mas,
+ 	struct maple_big_node *b_node, struct maple_enode **left,
+ 	struct maple_enode **right, struct maple_enode **middle,
+-	unsigned char *mid_split, unsigned long min)
++	unsigned char *mid_split)
+ {
+ 	unsigned char split = 0;
+ 	unsigned char slot_count = mt_slots[b_node->type];
+@@ -2390,7 +2379,7 @@ static inline unsigned char mas_mab_to_node(struct ma_state *mas,
+ 	if (b_node->b_end < slot_count) {
+ 		split = b_node->b_end;
+ 	} else {
+-		split = mab_calc_split(mas, b_node, mid_split, min);
++		split = mab_calc_split(mas, b_node, mid_split);
+ 		*right = mas_new_ma_node(mas, b_node);
+ 	}
+ 
+@@ -2877,7 +2866,7 @@ static void mas_spanning_rebalance(struct ma_state *mas,
+ 		mast->bn->b_end--;
+ 		mast->bn->type = mte_node_type(mast->orig_l->node);
+ 		split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
+-					&mid_split, mast->orig_l->min);
++					&mid_split);
+ 		mast_set_split_parents(mast, left, middle, right, split,
+ 				       mid_split);
+ 		mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
+@@ -3365,7 +3354,7 @@ static void mas_split(struct ma_state *mas, struct maple_big_node *b_node)
+ 		if (mas_push_data(mas, height, &mast, false))
+ 			break;
+ 
+-		split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min);
++		split = mab_calc_split(mas, b_node, &mid_split);
+ 		mast_split_data(&mast, mas, split);
+ 		/*
+ 		 * Usually correct, mab_mas_cp in the above call overwrites
+diff --git a/mm/compaction.c b/mm/compaction.c
+index a2b16b08cbbff7..384e4672998e55 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -630,7 +630,8 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
+ 		if (PageCompound(page)) {
+ 			const unsigned int order = compound_order(page);
+ 
+-			if (blockpfn + (1UL << order) <= end_pfn) {
++			if ((order <= MAX_PAGE_ORDER) &&
++			    (blockpfn + (1UL << order) <= end_pfn)) {
+ 				blockpfn += (1UL << order) - 1;
+ 				page += (1UL << order) - 1;
+ 				nr_scanned += (1UL << order) - 1;
+diff --git a/mm/gup.c b/mm/gup.c
+index 3b75e631f36916..569a4d82012d2a 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -2323,13 +2323,13 @@ static void pofs_unpin(struct pages_or_folios *pofs)
+ /*
+  * Returns the number of collected folios. Return value is always >= 0.
+  */
+-static unsigned long collect_longterm_unpinnable_folios(
++static void collect_longterm_unpinnable_folios(
+ 		struct list_head *movable_folio_list,
+ 		struct pages_or_folios *pofs)
+ {
+-	unsigned long i, collected = 0;
+ 	struct folio *prev_folio = NULL;
+ 	bool drain_allow = true;
++	unsigned long i;
+ 
+ 	for (i = 0; i < pofs->nr_entries; i++) {
+ 		struct folio *folio = pofs_get_folio(pofs, i);
+@@ -2341,8 +2341,6 @@ static unsigned long collect_longterm_unpinnable_folios(
+ 		if (folio_is_longterm_pinnable(folio))
+ 			continue;
+ 
+-		collected++;
+-
+ 		if (folio_is_device_coherent(folio))
+ 			continue;
+ 
+@@ -2364,8 +2362,6 @@ static unsigned long collect_longterm_unpinnable_folios(
+ 				    NR_ISOLATED_ANON + folio_is_file_lru(folio),
+ 				    folio_nr_pages(folio));
+ 	}
+-
+-	return collected;
+ }
+ 
+ /*
+@@ -2442,11 +2438,9 @@ static long
+ check_and_migrate_movable_pages_or_folios(struct pages_or_folios *pofs)
+ {
+ 	LIST_HEAD(movable_folio_list);
+-	unsigned long collected;
+ 
+-	collected = collect_longterm_unpinnable_folios(&movable_folio_list,
+-						       pofs);
+-	if (!collected)
++	collect_longterm_unpinnable_folios(&movable_folio_list, pofs);
++	if (list_empty(&movable_folio_list))
+ 		return 0;
+ 
+ 	return migrate_longterm_unpinnable_folios(&movable_folio_list, pofs);
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index eaaec19caa7cee..b2294dc1dd6e13 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1394,8 +1394,7 @@ static unsigned long available_huge_pages(struct hstate *h)
+ 
+ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
+ 				struct vm_area_struct *vma,
+-				unsigned long address, int avoid_reserve,
+-				long chg)
++				unsigned long address, long chg)
+ {
+ 	struct folio *folio = NULL;
+ 	struct mempolicy *mpol;
+@@ -1411,10 +1410,6 @@ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
+ 	if (!vma_has_reserves(vma, chg) && !available_huge_pages(h))
+ 		goto err;
+ 
+-	/* If reserves cannot be used, ensure enough pages are in the pool */
+-	if (avoid_reserve && !available_huge_pages(h))
+-		goto err;
+-
+ 	gfp_mask = htlb_alloc_mask(h);
+ 	nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
+ 
+@@ -1430,7 +1425,7 @@ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
+ 		folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
+ 							nid, nodemask);
+ 
+-	if (folio && !avoid_reserve && vma_has_reserves(vma, chg)) {
++	if (folio && vma_has_reserves(vma, chg)) {
+ 		folio_set_hugetlb_restore_reserve(folio);
+ 		h->resv_huge_pages--;
+ 	}
+@@ -3007,17 +3002,6 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
+ 		gbl_chg = hugepage_subpool_get_pages(spool, 1);
+ 		if (gbl_chg < 0)
+ 			goto out_end_reservation;
+-
+-		/*
+-		 * Even though there was no reservation in the region/reserve
+-		 * map, there could be reservations associated with the
+-		 * subpool that can be used.  This would be indicated if the
+-		 * return value of hugepage_subpool_get_pages() is zero.
+-		 * However, if avoid_reserve is specified we still avoid even
+-		 * the subpool reservations.
+-		 */
+-		if (avoid_reserve)
+-			gbl_chg = 1;
+ 	}
+ 
+ 	/* If this allocation is not consuming a reservation, charge it now.
+@@ -3040,7 +3024,7 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
+ 	 * from the global free pool (global change).  gbl_chg == 0 indicates
+ 	 * a reservation exists for the allocation.
+ 	 */
+-	folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg);
++	folio = dequeue_hugetlb_folio_vma(h, vma, addr, gbl_chg);
+ 	if (!folio) {
+ 		spin_unlock_irq(&hugetlb_lock);
+ 		folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr);
+@@ -3289,7 +3273,7 @@ static void __init gather_bootmem_prealloc(void)
+ 		.thread_fn	= gather_bootmem_prealloc_parallel,
+ 		.fn_arg		= NULL,
+ 		.start		= 0,
+-		.size		= num_node_state(N_MEMORY),
++		.size		= nr_node_ids,
+ 		.align		= 1,
+ 		.min_chunk	= 1,
+ 		.max_threads	= num_node_state(N_MEMORY),
+diff --git a/mm/kfence/core.c b/mm/kfence/core.c
+index 67fc321db79b7e..102048821c222a 100644
+--- a/mm/kfence/core.c
++++ b/mm/kfence/core.c
+@@ -21,6 +21,7 @@
+ #include <linux/log2.h>
+ #include <linux/memblock.h>
+ #include <linux/moduleparam.h>
++#include <linux/nodemask.h>
+ #include <linux/notifier.h>
+ #include <linux/panic_notifier.h>
+ #include <linux/random.h>
+@@ -1084,6 +1085,7 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
+ 	 * properties (e.g. reside in DMAable memory).
+ 	 */
+ 	if ((flags & GFP_ZONEMASK) ||
++	    ((flags & __GFP_THISNODE) && num_online_nodes() > 1) ||
+ 	    (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) {
+ 		atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
+ 		return NULL;
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c
+index 820ba3b5cbfc8f..bb7d61fc4da308 100644
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -1689,7 +1689,7 @@ static void kmemleak_scan(void)
+ 			unsigned long phys = object->pointer;
+ 
+ 			if (PHYS_PFN(phys) < min_low_pfn ||
+-			    PHYS_PFN(phys + object->size) >= max_low_pfn)
++			    PHYS_PFN(phys + object->size) > max_low_pfn)
+ 				__paint_it(object, KMEMLEAK_BLACK);
+ 		}
+ 
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index b1ec5ece067e16..c22175120f5ddb 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1053,7 +1053,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
+ 	struct folio_batch free_folios;
+ 	LIST_HEAD(ret_folios);
+ 	LIST_HEAD(demote_folios);
+-	unsigned int nr_reclaimed = 0;
++	unsigned int nr_reclaimed = 0, nr_demoted = 0;
+ 	unsigned int pgactivate = 0;
+ 	bool do_demote_pass;
+ 	struct swap_iocb *plug = NULL;
+@@ -1522,8 +1522,9 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
+ 	/* 'folio_list' is always empty here */
+ 
+ 	/* Migrate folios selected for demotion */
+-	stat->nr_demoted = demote_folio_list(&demote_folios, pgdat);
+-	nr_reclaimed += stat->nr_demoted;
++	nr_demoted = demote_folio_list(&demote_folios, pgdat);
++	nr_reclaimed += nr_demoted;
++	stat->nr_demoted += nr_demoted;
+ 	/* Folios that could not be demoted are still in @demote_folios */
+ 	if (!list_empty(&demote_folios)) {
+ 		/* Folios which weren't demoted go back on @folio_list */
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index 3d2553dcdb1b3c..46ea0bee2259f8 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -710,12 +710,12 @@ static bool l2cap_valid_mtu(struct l2cap_chan *chan, u16 mtu)
+ {
+ 	switch (chan->scid) {
+ 	case L2CAP_CID_ATT:
+-		if (mtu < L2CAP_LE_MIN_MTU)
++		if (mtu && mtu < L2CAP_LE_MIN_MTU)
+ 			return false;
+ 		break;
+ 
+ 	default:
+-		if (mtu < L2CAP_DEFAULT_MIN_MTU)
++		if (mtu && mtu < L2CAP_DEFAULT_MIN_MTU)
+ 			return false;
+ 	}
+ 
+@@ -1888,7 +1888,8 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
+ 	chan = l2cap_chan_create();
+ 	if (!chan) {
+ 		sk_free(sk);
+-		sock->sk = NULL;
++		if (sock)
++			sock->sk = NULL;
+ 		return NULL;
+ 	}
+ 
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index de47ad999d7b64..71dda10f6a24f0 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -5519,10 +5519,16 @@ static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
+ {
+ 	struct mgmt_rp_remove_adv_monitor rp;
+ 	struct mgmt_pending_cmd *cmd = data;
+-	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
++	struct mgmt_cp_remove_adv_monitor *cp;
++
++	if (status == -ECANCELED ||
++	    cmd != pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev))
++		return;
+ 
+ 	hci_dev_lock(hdev);
+ 
++	cp = cmd->param;
++
+ 	rp.monitor_handle = cp->monitor_handle;
+ 
+ 	if (!status)
+@@ -5540,6 +5546,10 @@ static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
+ static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
+ {
+ 	struct mgmt_pending_cmd *cmd = data;
++
++	if (cmd != pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev))
++		return -ECANCELED;
++
+ 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
+ 	u16 handle = __le16_to_cpu(cp->monitor_handle);
+ 
+diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
+index 34bee42e12470c..7609ce2b2c5e2e 100644
+--- a/net/ethtool/ioctl.c
++++ b/net/ethtool/ioctl.c
+@@ -993,7 +993,7 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
+ 		return rc;
+ 
+ 	/* Nonzero ring with RSS only makes sense if NIC adds them together */
+-	if (cmd == ETHTOOL_SRXCLSRLINS && info.flow_type & FLOW_RSS &&
++	if (cmd == ETHTOOL_SRXCLSRLINS && info.fs.flow_type & FLOW_RSS &&
+ 	    !ops->cap_rss_rxnfc_adds &&
+ 	    ethtool_get_flow_spec_ring(info.fs.ring_cookie))
+ 		return -EINVAL;
+diff --git a/net/ethtool/rss.c b/net/ethtool/rss.c
+index 7cb106b590aba1..58df9ad02ce8a1 100644
+--- a/net/ethtool/rss.c
++++ b/net/ethtool/rss.c
+@@ -107,6 +107,8 @@ rss_prepare_ctx(const struct rss_req_info *request, struct net_device *dev,
+ 	u32 total_size, indir_bytes;
+ 	u8 *rss_config;
+ 
++	data->no_key_fields = !dev->ethtool_ops->rxfh_per_ctx_key;
++
+ 	ctx = xa_load(&dev->ethtool->rss_ctx, request->rss_context);
+ 	if (!ctx)
+ 		return -ENOENT;
+@@ -153,7 +155,6 @@ rss_prepare_data(const struct ethnl_req_info *req_base,
+ 		if (!ops->cap_rss_ctx_supported && !ops->create_rxfh_context)
+ 			return -EOPNOTSUPP;
+ 
+-		data->no_key_fields = !ops->rxfh_per_ctx_key;
+ 		return rss_prepare_ctx(request, dev, data, info);
+ 	}
+ 
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index c472c9a57cf688..a9bb9ce5438eaa 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1141,9 +1141,9 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
+ 		const int hlen = skb_network_header_len(skb) +
+ 				 sizeof(struct udphdr);
+ 
+-		if (hlen + cork->gso_size > cork->fragsize) {
++		if (hlen + min(datalen, cork->gso_size) > cork->fragsize) {
+ 			kfree_skb(skb);
+-			return -EINVAL;
++			return -EMSGSIZE;
+ 		}
+ 		if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
+ 			kfree_skb(skb);
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index b974116152dd3f..3a3c7639d1d615 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1389,9 +1389,9 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
+ 		const int hlen = skb_network_header_len(skb) +
+ 				 sizeof(struct udphdr);
+ 
+-		if (hlen + cork->gso_size > cork->fragsize) {
++		if (hlen + min(datalen, cork->gso_size) > cork->fragsize) {
+ 			kfree_skb(skb);
+-			return -EINVAL;
++			return -EMSGSIZE;
+ 		}
+ 		if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
+ 			kfree_skb(skb);
+diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
+index bf276eaf933075..7891a537bddd11 100644
+--- a/net/ncsi/ncsi-manage.c
++++ b/net/ncsi/ncsi-manage.c
+@@ -1385,6 +1385,12 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
+ 		nd->state = ncsi_dev_state_probe_package;
+ 		break;
+ 	case ncsi_dev_state_probe_package:
++		if (ndp->package_probe_id >= 8) {
++			/* Last package probed, finishing */
++			ndp->flags |= NCSI_DEV_PROBED;
++			break;
++		}
++
+ 		ndp->pending_req_num = 1;
+ 
+ 		nca.type = NCSI_PKT_CMD_SP;
+@@ -1501,13 +1507,8 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
+ 		if (ret)
+ 			goto error;
+ 
+-		/* Probe next package */
++		/* Probe next package after receiving response */
+ 		ndp->package_probe_id++;
+-		if (ndp->package_probe_id >= 8) {
+-			/* Probe finished */
+-			ndp->flags |= NCSI_DEV_PROBED;
+-			break;
+-		}
+ 		nd->state = ncsi_dev_state_probe_package;
+ 		ndp->active_package = NULL;
+ 		break;
+diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
+index de175318a3a0f3..082ab66f120b73 100644
+--- a/net/nfc/nci/hci.c
++++ b/net/nfc/nci/hci.c
+@@ -542,6 +542,8 @@ static u8 nci_hci_create_pipe(struct nci_dev *ndev, u8 dest_host,
+ 
+ 	pr_debug("pipe created=%d\n", pipe);
+ 
++	if (pipe >= NCI_HCI_MAX_PIPES)
++		pipe = NCI_HCI_INVALID_PIPE;
+ 	return pipe;
+ }
+ 
+diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
+index 72c65d938a150e..a4a668b88a8f27 100644
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -701,11 +701,9 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 	struct net_device *dev;
+ 	ax25_address *source;
+ 	ax25_uid_assoc *user;
++	int err = -EINVAL;
+ 	int n;
+ 
+-	if (!sock_flag(sk, SOCK_ZAPPED))
+-		return -EINVAL;
+-
+ 	if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
+ 		return -EINVAL;
+ 
+@@ -718,8 +716,15 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 	if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
+ 		return -EINVAL;
+ 
+-	if ((dev = rose_dev_get(&addr->srose_addr)) == NULL)
+-		return -EADDRNOTAVAIL;
++	lock_sock(sk);
++
++	if (!sock_flag(sk, SOCK_ZAPPED))
++		goto out_release;
++
++	err = -EADDRNOTAVAIL;
++	dev = rose_dev_get(&addr->srose_addr);
++	if (!dev)
++		goto out_release;
+ 
+ 	source = &addr->srose_call;
+ 
+@@ -730,7 +735,8 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 	} else {
+ 		if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) {
+ 			dev_put(dev);
+-			return -EACCES;
++			err = -EACCES;
++			goto out_release;
+ 		}
+ 		rose->source_call   = *source;
+ 	}
+@@ -753,8 +759,10 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 	rose_insert_socket(sk);
+ 
+ 	sock_reset_flag(sk, SOCK_ZAPPED);
+-
+-	return 0;
++	err = 0;
++out_release:
++	release_sock(sk);
++	return err;
+ }
+ 
+ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags)
+diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
+index d0fd37bdcfe9c8..6b036c0564c7a8 100644
+--- a/net/rxrpc/ar-internal.h
++++ b/net/rxrpc/ar-internal.h
+@@ -567,6 +567,7 @@ enum rxrpc_call_flag {
+ 	RXRPC_CALL_EXCLUSIVE,		/* The call uses a once-only connection */
+ 	RXRPC_CALL_RX_IS_IDLE,		/* recvmsg() is idle - send an ACK */
+ 	RXRPC_CALL_RECVMSG_READ_ALL,	/* recvmsg() read all of the received data */
++	RXRPC_CALL_CONN_CHALLENGING,	/* The connection is being challenged */
+ };
+ 
+ /*
+@@ -587,7 +588,6 @@ enum rxrpc_call_state {
+ 	RXRPC_CALL_CLIENT_AWAIT_REPLY,	/* - client awaiting reply */
+ 	RXRPC_CALL_CLIENT_RECV_REPLY,	/* - client receiving reply phase */
+ 	RXRPC_CALL_SERVER_PREALLOC,	/* - service preallocation */
+-	RXRPC_CALL_SERVER_SECURING,	/* - server securing request connection */
+ 	RXRPC_CALL_SERVER_RECV_REQUEST,	/* - server receiving request */
+ 	RXRPC_CALL_SERVER_ACK_REQUEST,	/* - server pending ACK of request */
+ 	RXRPC_CALL_SERVER_SEND_REPLY,	/* - server sending reply */
+diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
+index f9e983a12c1492..e379a2a9375ae0 100644
+--- a/net/rxrpc/call_object.c
++++ b/net/rxrpc/call_object.c
+@@ -22,7 +22,6 @@ const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
+ 	[RXRPC_CALL_CLIENT_AWAIT_REPLY]		= "ClAwtRpl",
+ 	[RXRPC_CALL_CLIENT_RECV_REPLY]		= "ClRcvRpl",
+ 	[RXRPC_CALL_SERVER_PREALLOC]		= "SvPrealc",
+-	[RXRPC_CALL_SERVER_SECURING]		= "SvSecure",
+ 	[RXRPC_CALL_SERVER_RECV_REQUEST]	= "SvRcvReq",
+ 	[RXRPC_CALL_SERVER_ACK_REQUEST]		= "SvAckReq",
+ 	[RXRPC_CALL_SERVER_SEND_REPLY]		= "SvSndRpl",
+@@ -453,17 +452,16 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
+ 	call->cong_tstamp	= skb->tstamp;
+ 
+ 	__set_bit(RXRPC_CALL_EXPOSED, &call->flags);
+-	rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SECURING);
++	rxrpc_set_call_state(call, RXRPC_CALL_SERVER_RECV_REQUEST);
+ 
+ 	spin_lock(&conn->state_lock);
+ 
+ 	switch (conn->state) {
+ 	case RXRPC_CONN_SERVICE_UNSECURED:
+ 	case RXRPC_CONN_SERVICE_CHALLENGING:
+-		rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SECURING);
++		__set_bit(RXRPC_CALL_CONN_CHALLENGING, &call->flags);
+ 		break;
+ 	case RXRPC_CONN_SERVICE:
+-		rxrpc_set_call_state(call, RXRPC_CALL_SERVER_RECV_REQUEST);
+ 		break;
+ 
+ 	case RXRPC_CONN_ABORTED:
+diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
+index 2a1396cd892f30..c4eb7986efddf8 100644
+--- a/net/rxrpc/conn_event.c
++++ b/net/rxrpc/conn_event.c
+@@ -222,10 +222,8 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn)
+  */
+ static void rxrpc_call_is_secure(struct rxrpc_call *call)
+ {
+-	if (call && __rxrpc_call_state(call) == RXRPC_CALL_SERVER_SECURING) {
+-		rxrpc_set_call_state(call, RXRPC_CALL_SERVER_RECV_REQUEST);
++	if (call && __test_and_clear_bit(RXRPC_CALL_CONN_CHALLENGING, &call->flags))
+ 		rxrpc_notify_socket(call);
+-	}
+ }
+ 
+ /*
+@@ -266,6 +264,7 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
+ 			 * we've already received the packet, put it on the
+ 			 * front of the queue.
+ 			 */
++			sp->conn = rxrpc_get_connection(conn, rxrpc_conn_get_poke_secured);
+ 			skb->mark = RXRPC_SKB_MARK_SERVICE_CONN_SECURED;
+ 			rxrpc_get_skb(skb, rxrpc_skb_get_conn_secured);
+ 			skb_queue_head(&conn->local->rx_queue, skb);
+@@ -431,14 +430,16 @@ void rxrpc_input_conn_event(struct rxrpc_connection *conn, struct sk_buff *skb)
+ 	if (test_and_clear_bit(RXRPC_CONN_EV_ABORT_CALLS, &conn->events))
+ 		rxrpc_abort_calls(conn);
+ 
+-	switch (skb->mark) {
+-	case RXRPC_SKB_MARK_SERVICE_CONN_SECURED:
+-		if (conn->state != RXRPC_CONN_SERVICE)
+-			break;
++	if (skb) {
++		switch (skb->mark) {
++		case RXRPC_SKB_MARK_SERVICE_CONN_SECURED:
++			if (conn->state != RXRPC_CONN_SERVICE)
++				break;
+ 
+-		for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
+-			rxrpc_call_is_secure(conn->channels[loop].call);
+-		break;
++			for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
++				rxrpc_call_is_secure(conn->channels[loop].call);
++			break;
++		}
+ 	}
+ 
+ 	/* Process delayed ACKs whose time has come. */
+diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
+index 694c4df7a1a31e..88b4aab5a0913a 100644
+--- a/net/rxrpc/conn_object.c
++++ b/net/rxrpc/conn_object.c
+@@ -67,6 +67,7 @@ struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *rxnet,
+ 		INIT_WORK(&conn->destructor, rxrpc_clean_up_connection);
+ 		INIT_LIST_HEAD(&conn->proc_link);
+ 		INIT_LIST_HEAD(&conn->link);
++		INIT_LIST_HEAD(&conn->attend_link);
+ 		mutex_init(&conn->security_lock);
+ 		mutex_init(&conn->tx_data_alloc_lock);
+ 		skb_queue_head_init(&conn->rx_queue);
+diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
+index 16d49a861dbb58..6a075a7c190db3 100644
+--- a/net/rxrpc/input.c
++++ b/net/rxrpc/input.c
+@@ -573,7 +573,7 @@ static bool rxrpc_input_split_jumbo(struct rxrpc_call *call, struct sk_buff *skb
+ 		rxrpc_propose_delay_ACK(call, sp->hdr.serial,
+ 					rxrpc_propose_ack_input_data);
+ 	}
+-	if (notify) {
++	if (notify && !test_bit(RXRPC_CALL_CONN_CHALLENGING, &call->flags)) {
+ 		trace_rxrpc_notify_socket(call->debug_id, sp->hdr.serial);
+ 		rxrpc_notify_socket(call);
+ 	}
+diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
+index 6abb8eec1b2b12..708a1484dadacb 100644
+--- a/net/rxrpc/sendmsg.c
++++ b/net/rxrpc/sendmsg.c
+@@ -655,7 +655,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
+ 	} else {
+ 		switch (rxrpc_call_state(call)) {
+ 		case RXRPC_CALL_CLIENT_AWAIT_CONN:
+-		case RXRPC_CALL_SERVER_SECURING:
++		case RXRPC_CALL_SERVER_RECV_REQUEST:
+ 			if (p.command == RXRPC_CMD_SEND_ABORT)
+ 				break;
+ 			fallthrough;
+diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
+index b50b2c2cc09bc6..e6bfd39ff33965 100644
+--- a/net/sched/sch_fifo.c
++++ b/net/sched/sch_fifo.c
+@@ -40,6 +40,9 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ {
+ 	unsigned int prev_backlog;
+ 
++	if (unlikely(READ_ONCE(sch->limit) == 0))
++		return qdisc_drop(skb, sch, to_free);
++
+ 	if (likely(sch->q.qlen < READ_ONCE(sch->limit)))
+ 		return qdisc_enqueue_tail(skb, sch);
+ 
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index 71ec9986ed37f4..fdd79d3ccd8ce7 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -749,9 +749,9 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
+ 				if (err != NET_XMIT_SUCCESS) {
+ 					if (net_xmit_drop_count(err))
+ 						qdisc_qstats_drop(sch);
+-					qdisc_tree_reduce_backlog(sch, 1, pkt_len);
+ 					sch->qstats.backlog -= pkt_len;
+ 					sch->q.qlen--;
++					qdisc_tree_reduce_backlog(sch, 1, pkt_len);
+ 				}
+ 				goto tfifo_dequeue;
+ 			}
+diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
+index 43c3f1c971b8fd..c524421ec65252 100644
+--- a/net/tipc/crypto.c
++++ b/net/tipc/crypto.c
+@@ -2293,8 +2293,8 @@ static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr)
+ 	keylen = ntohl(*((__be32 *)(data + TIPC_AEAD_ALG_NAME)));
+ 
+ 	/* Verify the supplied size values */
+-	if (unlikely(size != keylen + sizeof(struct tipc_aead_key) ||
+-		     keylen > TIPC_AEAD_KEY_SIZE_MAX)) {
++	if (unlikely(keylen > TIPC_AEAD_KEY_SIZE_MAX ||
++		     size != keylen + sizeof(struct tipc_aead_key))) {
+ 		pr_debug("%s: invalid MSG_CRYPTO key size\n", rx->name);
+ 		goto exit;
+ 	}
+diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs
+index 347049df556b14..c962029f96e1f1 100644
+--- a/rust/kernel/init.rs
++++ b/rust/kernel/init.rs
+@@ -854,7 +854,7 @@ pub unsafe trait PinInit<T: ?Sized, E = Infallible>: Sized {
+     /// use kernel::{types::Opaque, init::pin_init_from_closure};
+     /// #[repr(C)]
+     /// struct RawFoo([u8; 16]);
+-    /// extern {
++    /// extern "C" {
+     ///     fn init_foo(_: *mut RawFoo);
+     /// }
+     ///
+diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
+index 1d13cecc7cc780..04faf15ed316a9 100644
+--- a/scripts/Makefile.extrawarn
++++ b/scripts/Makefile.extrawarn
+@@ -130,7 +130,6 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast)
+ KBUILD_CFLAGS += -Wno-tautological-constant-out-of-range-compare
+ KBUILD_CFLAGS += $(call cc-disable-warning, unaligned-access)
+ KBUILD_CFLAGS += -Wno-enum-compare-conditional
+-KBUILD_CFLAGS += -Wno-enum-enum-conversion
+ endif
+ 
+ endif
+@@ -154,6 +153,10 @@ KBUILD_CFLAGS += -Wno-missing-field-initializers
+ KBUILD_CFLAGS += -Wno-type-limits
+ KBUILD_CFLAGS += -Wno-shift-negative-value
+ 
++ifdef CONFIG_CC_IS_CLANG
++KBUILD_CFLAGS += -Wno-enum-enum-conversion
++endif
++
+ ifdef CONFIG_CC_IS_GCC
+ KBUILD_CFLAGS += -Wno-maybe-uninitialized
+ endif
+diff --git a/scripts/gdb/linux/cpus.py b/scripts/gdb/linux/cpus.py
+index 2f11c4f9c345a0..13eb8b3901b8fc 100644
+--- a/scripts/gdb/linux/cpus.py
++++ b/scripts/gdb/linux/cpus.py
+@@ -167,7 +167,7 @@ def get_current_task(cpu):
+             var_ptr = gdb.parse_and_eval("&pcpu_hot.current_task")
+             return per_cpu(var_ptr, cpu).dereference()
+     elif utils.is_target_arch("aarch64"):
+-        current_task_addr = gdb.parse_and_eval("$SP_EL0")
++        current_task_addr = gdb.parse_and_eval("(unsigned long)$SP_EL0")
+         if (current_task_addr >> 63) != 0:
+             current_task = current_task_addr.cast(task_ptr_type)
+             return current_task.dereference()
+diff --git a/scripts/generate_rust_target.rs b/scripts/generate_rust_target.rs
+index 0d00ac3723b5e5..4fd6b6ab3e329d 100644
+--- a/scripts/generate_rust_target.rs
++++ b/scripts/generate_rust_target.rs
+@@ -165,6 +165,18 @@ fn has(&self, option: &str) -> bool {
+         let option = "CONFIG_".to_owned() + option;
+         self.0.contains_key(&option)
+     }
++
++    /// Is the rustc version at least `major.minor.patch`?
++    fn rustc_version_atleast(&self, major: u32, minor: u32, patch: u32) -> bool {
++        let check_version = 100000 * major + 100 * minor + patch;
++        let actual_version = self
++            .0
++            .get("CONFIG_RUSTC_VERSION")
++            .unwrap()
++            .parse::<u32>()
++            .unwrap();
++        check_version <= actual_version
++    }
+ }
+ 
+ fn main() {
+@@ -182,6 +194,9 @@ fn main() {
+         }
+     } else if cfg.has("X86_64") {
+         ts.push("arch", "x86_64");
++        if cfg.rustc_version_atleast(1, 86, 0) {
++            ts.push("rustc-abi", "x86-softfloat");
++        }
+         ts.push(
+             "data-layout",
+             "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128",
+@@ -215,6 +230,9 @@ fn main() {
+             panic!("32-bit x86 only works under UML");
+         }
+         ts.push("arch", "x86");
++        if cfg.rustc_version_atleast(1, 86, 0) {
++            ts.push("rustc-abi", "x86-softfloat");
++        }
+         ts.push(
+             "data-layout",
+             "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i128:128-f64:32:64-f80:32-n8:16:32-S128",
+diff --git a/security/keys/trusted-keys/trusted_dcp.c b/security/keys/trusted-keys/trusted_dcp.c
+index e908c53a803c4b..7b6eb655df0cbf 100644
+--- a/security/keys/trusted-keys/trusted_dcp.c
++++ b/security/keys/trusted-keys/trusted_dcp.c
+@@ -201,12 +201,16 @@ static int trusted_dcp_seal(struct trusted_key_payload *p, char *datablob)
+ {
+ 	struct dcp_blob_fmt *b = (struct dcp_blob_fmt *)p->blob;
+ 	int blen, ret;
+-	u8 plain_blob_key[AES_KEYSIZE_128];
++	u8 *plain_blob_key;
+ 
+ 	blen = calc_blob_len(p->key_len);
+ 	if (blen > MAX_BLOB_SIZE)
+ 		return -E2BIG;
+ 
++	plain_blob_key = kmalloc(AES_KEYSIZE_128, GFP_KERNEL);
++	if (!plain_blob_key)
++		return -ENOMEM;
++
+ 	b->fmt_version = DCP_BLOB_VERSION;
+ 	get_random_bytes(b->nonce, AES_KEYSIZE_128);
+ 	get_random_bytes(plain_blob_key, AES_KEYSIZE_128);
+@@ -229,7 +233,8 @@ static int trusted_dcp_seal(struct trusted_key_payload *p, char *datablob)
+ 	ret = 0;
+ 
+ out:
+-	memzero_explicit(plain_blob_key, sizeof(plain_blob_key));
++	memzero_explicit(plain_blob_key, AES_KEYSIZE_128);
++	kfree(plain_blob_key);
+ 
+ 	return ret;
+ }
+@@ -238,7 +243,7 @@ static int trusted_dcp_unseal(struct trusted_key_payload *p, char *datablob)
+ {
+ 	struct dcp_blob_fmt *b = (struct dcp_blob_fmt *)p->blob;
+ 	int blen, ret;
+-	u8 plain_blob_key[AES_KEYSIZE_128];
++	u8 *plain_blob_key = NULL;
+ 
+ 	if (b->fmt_version != DCP_BLOB_VERSION) {
+ 		pr_err("DCP blob has bad version: %i, expected %i\n",
+@@ -256,6 +261,12 @@ static int trusted_dcp_unseal(struct trusted_key_payload *p, char *datablob)
+ 		goto out;
+ 	}
+ 
++	plain_blob_key = kmalloc(AES_KEYSIZE_128, GFP_KERNEL);
++	if (!plain_blob_key) {
++		ret = -ENOMEM;
++		goto out;
++	}
++
+ 	ret = decrypt_blob_key(b->blob_key, plain_blob_key);
+ 	if (ret) {
+ 		pr_err("Unable to decrypt blob key: %i\n", ret);
+@@ -271,7 +282,10 @@ static int trusted_dcp_unseal(struct trusted_key_payload *p, char *datablob)
+ 
+ 	ret = 0;
+ out:
+-	memzero_explicit(plain_blob_key, sizeof(plain_blob_key));
++	if (plain_blob_key) {
++		memzero_explicit(plain_blob_key, AES_KEYSIZE_128);
++		kfree(plain_blob_key);
++	}
+ 
+ 	return ret;
+ }
+diff --git a/security/safesetid/securityfs.c b/security/safesetid/securityfs.c
+index 25310468bcddff..8e1ffd70b18ab4 100644
+--- a/security/safesetid/securityfs.c
++++ b/security/safesetid/securityfs.c
+@@ -143,6 +143,9 @@ static ssize_t handle_policy_update(struct file *file,
+ 	char *buf, *p, *end;
+ 	int err;
+ 
++	if (len >= KMALLOC_MAX_SIZE)
++		return -EINVAL;
++
+ 	pol = kmalloc(sizeof(struct setid_ruleset), GFP_KERNEL);
+ 	if (!pol)
+ 		return -ENOMEM;
+diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
+index 5c7b059a332aac..972664962e8f67 100644
+--- a/security/tomoyo/common.c
++++ b/security/tomoyo/common.c
+@@ -2665,7 +2665,7 @@ ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head,
+ 
+ 		if (head->w.avail >= head->writebuf_size - 1) {
+ 			const int len = head->writebuf_size * 2;
+-			char *cp = kzalloc(len, GFP_NOFS);
++			char *cp = kzalloc(len, GFP_NOFS | __GFP_NOWARN);
+ 
+ 			if (!cp) {
+ 				error = -ENOMEM;
+diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
+index 84393f4f429dff..8923813ce4247d 100644
+--- a/sound/pci/hda/hda_auto_parser.c
++++ b/sound/pci/hda/hda_auto_parser.c
+@@ -80,7 +80,11 @@ static int compare_input_type(const void *ap, const void *bp)
+ 
+ 	/* In case one has boost and the other one has not,
+ 	   pick the one with boost first. */
+-	return (int)(b->has_boost_on_pin - a->has_boost_on_pin);
++	if (a->has_boost_on_pin != b->has_boost_on_pin)
++		return (int)(b->has_boost_on_pin - a->has_boost_on_pin);
++
++	/* Keep the original order */
++	return a->order - b->order;
+ }
+ 
+ /* Reorder the surround channels
+@@ -400,6 +404,8 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec,
+ 	reorder_outputs(cfg->speaker_outs, cfg->speaker_pins);
+ 
+ 	/* sort inputs in the order of AUTO_PIN_* type */
++	for (i = 0; i < cfg->num_inputs; i++)
++		cfg->inputs[i].order = i;
+ 	sort(cfg->inputs, cfg->num_inputs, sizeof(cfg->inputs[0]),
+ 	     compare_input_type, NULL);
+ 
+diff --git a/sound/pci/hda/hda_auto_parser.h b/sound/pci/hda/hda_auto_parser.h
+index 579b11beac718e..87af3d8c02f7f6 100644
+--- a/sound/pci/hda/hda_auto_parser.h
++++ b/sound/pci/hda/hda_auto_parser.h
+@@ -37,6 +37,7 @@ struct auto_pin_cfg_item {
+ 	unsigned int is_headset_mic:1;
+ 	unsigned int is_headphone_mic:1; /* Mic-only in headphone jack */
+ 	unsigned int has_boost_on_pin:1;
++	int order;
+ };
+ 
+ struct auto_pin_cfg;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 2d523b53b3d731..6c352602987bac 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -7485,6 +7485,16 @@ static void alc287_fixup_lenovo_thinkpad_with_alc1318(struct hda_codec *codec,
+ 	spec->gen.pcm_playback_hook = alc287_alc1318_playback_pcm_hook;
+ }
+ 
++/*
++ * Clear COEF 0x0d (PCBEEP passthrough) bit 0x40 where BIOS sets it wrongly
++ * at PM resume
++ */
++static void alc283_fixup_dell_hp_resume(struct hda_codec *codec,
++					const struct hda_fixup *fix, int action)
++{
++	if (action == HDA_FIXUP_ACT_INIT)
++		alc_write_coef_idx(codec, 0xd, 0x2800);
++}
+ 
+ enum {
+ 	ALC269_FIXUP_GPIO2,
+@@ -7785,6 +7795,7 @@ enum {
+ 	ALC269_FIXUP_VAIO_VJFH52_MIC_NO_PRESENCE,
+ 	ALC233_FIXUP_MEDION_MTL_SPK,
+ 	ALC294_FIXUP_BASS_SPEAKER_15,
++	ALC283_FIXUP_DELL_HP_RESUME,
+ };
+ 
+ /* A special fixup for Lenovo C940 and Yoga Duet 7;
+@@ -10117,6 +10128,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc294_fixup_bass_speaker_15,
+ 	},
++	[ALC283_FIXUP_DELL_HP_RESUME] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc283_fixup_dell_hp_resume,
++	},
+ };
+ 
+ static const struct hda_quirk alc269_fixup_tbl[] = {
+@@ -10177,6 +10192,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x05f4, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x05f5, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1028, 0x0604, "Dell Venue 11 Pro 7130", ALC283_FIXUP_DELL_HP_RESUME),
+ 	SND_PCI_QUIRK(0x1028, 0x0615, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
+ 	SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
+ 	SND_PCI_QUIRK(0x1028, 0x062c, "Dell Latitude E5550", ALC292_FIXUP_DELL_E7X),
+@@ -10389,6 +10405,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x887a, "HP Laptop 15s-eq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
++	SND_PCI_QUIRK(0x103c, 0x887c, "HP Laptop 14s-fq1xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x888a, "HP ENVY x360 Convertible 15-eu0xxx", ALC245_FIXUP_HP_X360_MUTE_LEDS),
+ 	SND_PCI_QUIRK(0x103c, 0x888d, "HP ZBook Power 15.6 inch G8 Mobile Workstation PC", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8895, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_SPEAKERS_MICMUTE_LED),
+@@ -10889,7 +10906,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x3869, "Lenovo Yoga7 14IAL7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
+ 	HDA_CODEC_QUIRK(0x17aa, 0x386e, "Legion Y9000X 2022 IAH7", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x17aa, 0x386e, "Yoga Pro 7 14ARP8", ALC285_FIXUP_SPEAKER2_TO_DAC1),
+-	HDA_CODEC_QUIRK(0x17aa, 0x386f, "Legion Pro 7 16ARX8H", ALC287_FIXUP_TAS2781_I2C),
++	HDA_CODEC_QUIRK(0x17aa, 0x38a8, "Legion Pro 7 16ARX8H", ALC287_FIXUP_TAS2781_I2C), /* this must match before PCI SSID 17aa:386f below */
+ 	SND_PCI_QUIRK(0x17aa, 0x386f, "Legion Pro 7i 16IAX7", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x17aa, 0x3870, "Lenovo Yoga 7 14ARB7", ALC287_FIXUP_YOGA7_14ARB7_I2C),
+ 	SND_PCI_QUIRK(0x17aa, 0x3877, "Lenovo Legion 7 Slim 16ARHA7", ALC287_FIXUP_CS35L41_I2C_2),
+@@ -10964,6 +10981,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+ 	SND_PCI_QUIRK(0x17aa, 0x9e56, "Lenovo ZhaoYang CF4620Z", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1849, 0x0269, "Positivo Master C6400", ALC269VB_FIXUP_ASUS_ZENBOOK),
+ 	SND_PCI_QUIRK(0x1849, 0x1233, "ASRock NUC Box 1100", ALC233_FIXUP_NO_AUDIO_JACK),
+ 	SND_PCI_QUIRK(0x1849, 0xa233, "Positivo Master C6300", ALC269_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1854, 0x0440, "LG CQ6", ALC256_FIXUP_HEADPHONE_AMP_VOL),
+diff --git a/sound/soc/amd/Kconfig b/sound/soc/amd/Kconfig
+index c7590d4989bba5..80352117827914 100644
+--- a/sound/soc/amd/Kconfig
++++ b/sound/soc/amd/Kconfig
+@@ -105,7 +105,7 @@ config SND_SOC_AMD_ACP6x
+ config SND_SOC_AMD_YC_MACH
+ 	tristate "AMD YC support for DMIC"
+ 	select SND_SOC_DMIC
+-	depends on SND_SOC_AMD_ACP6x
++	depends on SND_SOC_AMD_ACP6x && ACPI
+ 	help
+ 	  This option enables machine driver for Yellow Carp platform
+ 	  using dmic. ACP IP has PDM Decoder block with DMA controller.
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index ecf57a6cb7c37d..b16587d8f97a89 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -304,6 +304,34 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "83AS"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "83L3"),
++		}
++	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "83N6"),
++		}
++	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "83Q2"),
++		}
++	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"),
++		}
++	},
+ 	{
+ 		.driver_data = &acp6x_card,
+ 		.matches = {
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index 5554ad4e7c7877..65e55c46fb0645 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -641,9 +641,10 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 		.callback = sof_sdw_quirk_cb,
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+-			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "380E")
++			DMI_MATCH(DMI_PRODUCT_NAME, "83HM")
+ 		},
+-		.driver_data = (void *)(SOC_SDW_SIDECAR_AMPS),
++		.driver_data = (void *)(SOC_SDW_SIDECAR_AMPS |
++					SOC_SDW_CODEC_MIC),
+ 	},
+ 	{
+ 		.callback = sof_sdw_quirk_cb,
+diff --git a/sound/soc/renesas/rz-ssi.c b/sound/soc/renesas/rz-ssi.c
+index 6b442b1014155f..b7874dd42b2a2b 100644
+--- a/sound/soc/renesas/rz-ssi.c
++++ b/sound/soc/renesas/rz-ssi.c
+@@ -414,8 +414,12 @@ static int rz_ssi_stop(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm)
+ 	rz_ssi_reg_mask_setl(ssi, SSICR, SSICR_TEN | SSICR_REN, 0);
+ 
+ 	/* Cancel all remaining DMA transactions */
+-	if (rz_ssi_is_dma_enabled(ssi))
+-		dmaengine_terminate_async(strm->dma_ch);
++	if (rz_ssi_is_dma_enabled(ssi)) {
++		if (ssi->playback.dma_ch)
++			dmaengine_terminate_async(ssi->playback.dma_ch);
++		if (ssi->capture.dma_ch)
++			dmaengine_terminate_async(ssi->capture.dma_ch);
++	}
+ 
+ 	rz_ssi_set_idle(ssi);
+ 
+@@ -522,6 +526,8 @@ static int rz_ssi_pio_send(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm)
+ 	sample_space = strm->fifo_sample_size;
+ 	ssifsr = rz_ssi_reg_readl(ssi, SSIFSR);
+ 	sample_space -= (ssifsr >> SSIFSR_TDC_SHIFT) & SSIFSR_TDC_MASK;
++	if (sample_space < 0)
++		return -EINVAL;
+ 
+ 	/* Only add full frames at a time */
+ 	while (frames_left && (sample_space >= runtime->channels)) {
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 1150455619aa49..88b3ad5a255205 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -38,7 +38,6 @@ static inline int _soc_pcm_ret(struct snd_soc_pcm_runtime *rtd,
+ 	switch (ret) {
+ 	case -EPROBE_DEFER:
+ 	case -ENOTSUPP:
+-	case -EINVAL:
+ 		break;
+ 	default:
+ 		dev_err(rtd->dev,
+@@ -986,7 +985,13 @@ static int __soc_pcm_prepare(struct snd_soc_pcm_runtime *rtd,
+ 	}
+ 
+ out:
+-	return soc_pcm_ret(rtd, ret);
++	/*
++	 * Don't use soc_pcm_ret() on .prepare callback to lower error log severity
++	 *
++	 * We don't want to log an error since we do not want to give userspace a way to do a
++	 * denial-of-service attack on the syslog / diskspace.
++	 */
++	return ret;
+ }
+ 
+ /* PCM prepare ops for non-DPCM streams */
+@@ -998,6 +1003,13 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
+ 	snd_soc_dpcm_mutex_lock(rtd);
+ 	ret = __soc_pcm_prepare(rtd, substream);
+ 	snd_soc_dpcm_mutex_unlock(rtd);
++
++	/*
++	 * Don't use soc_pcm_ret() on .prepare callback to lower error log severity
++	 *
++	 * We don't want to log an error since we do not want to give userspace a way to do a
++	 * denial-of-service attack on the syslog / diskspace.
++	 */
+ 	return ret;
+ }
+ 
+@@ -2539,7 +2551,13 @@ int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
+ 		be->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE;
+ 	}
+ 
+-	return soc_pcm_ret(fe, ret);
++	/*
++	 * Don't use soc_pcm_ret() on .prepare callback to lower error log severity
++	 *
++	 * We don't want to log an error since we do not want to give userspace a way to do a
++	 * denial-of-service attack on the syslog / diskspace.
++	 */
++	return ret;
+ }
+ 
+ static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream)
+@@ -2579,7 +2597,13 @@ static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream)
+ 	dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
+ 	snd_soc_dpcm_mutex_unlock(fe);
+ 
+-	return soc_pcm_ret(fe, ret);
++	/*
++	 * Don't use soc_pcm_ret() on .prepare callback to lower error log severity
++	 *
++	 * We don't want to log an error since we do not want to give userspace a way to do a
++	 * denial-of-service attack on the syslog / diskspace.
++	 */
++	return ret;
+ }
+ 
+ static int dpcm_run_update_shutdown(struct snd_soc_pcm_runtime *fe, int stream)
+diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
+index 0db2a3e554fb2f..da12aabc1bb856 100644
+--- a/sound/soc/sof/intel/hda-dai.c
++++ b/sound/soc/sof/intel/hda-dai.c
+@@ -503,6 +503,12 @@ int sdw_hda_dai_hw_params(struct snd_pcm_substream *substream,
+ 	int ret;
+ 	int i;
+ 
++	if (!w) {
++		dev_err(cpu_dai->dev, "%s widget not found, check amp link num in the topology\n",
++			cpu_dai->name);
++		return -EINVAL;
++	}
++
+ 	ops = hda_dai_get_ops(substream, cpu_dai);
+ 	if (!ops) {
+ 		dev_err(cpu_dai->dev, "DAI widget ops not set\n");
+@@ -582,6 +588,12 @@ int sdw_hda_dai_hw_params(struct snd_pcm_substream *substream,
+ 	 */
+ 	for_each_rtd_cpu_dais(rtd, i, dai) {
+ 		w = snd_soc_dai_get_widget(dai, substream->stream);
++		if (!w) {
++			dev_err(cpu_dai->dev,
++				"%s widget not found, check amp link num in the topology\n",
++				dai->name);
++			return -EINVAL;
++		}
+ 		ipc4_copier = widget_to_copier(w);
+ 		memcpy(&ipc4_copier->dma_config_tlv[cpu_dai_id], dma_config_tlv,
+ 		       sizeof(*dma_config_tlv));
+diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
+index f991785f727e9e..be689f6e10c81e 100644
+--- a/sound/soc/sof/intel/hda.c
++++ b/sound/soc/sof/intel/hda.c
+@@ -63,6 +63,11 @@ static int sdw_params_stream(struct device *dev,
+ 	struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(d, params_data->substream->stream);
+ 	struct snd_sof_dai_config_data data = { 0 };
+ 
++	if (!w) {
++		dev_err(dev, "%s widget not found, check amp link num in the topology\n",
++			d->name);
++		return -EINVAL;
++	}
+ 	data.dai_index = (params_data->link_id << 8) | d->id;
+ 	data.dai_data = params_data->alh_stream_id;
+ 	data.dai_node_id = data.dai_data;
+diff --git a/tools/perf/bench/epoll-wait.c b/tools/perf/bench/epoll-wait.c
+index ef5c4257844d13..20fe4f72b4afcc 100644
+--- a/tools/perf/bench/epoll-wait.c
++++ b/tools/perf/bench/epoll-wait.c
+@@ -420,7 +420,12 @@ static int cmpworker(const void *p1, const void *p2)
+ 
+ 	struct worker *w1 = (struct worker *) p1;
+ 	struct worker *w2 = (struct worker *) p2;
+-	return w1->tid > w2->tid;
++
++	if (w1->tid > w2->tid)
++		return 1;
++	if (w1->tid < w2->tid)
++		return -1;
++	return 0;
+ }
+ 
+ int bench_epoll_wait(int argc, const char **argv)
+diff --git a/tools/testing/selftests/bpf/progs/exceptions_fail.c b/tools/testing/selftests/bpf/progs/exceptions_fail.c
+index fe0f3fa5aab689..8a0fdff899271d 100644
+--- a/tools/testing/selftests/bpf/progs/exceptions_fail.c
++++ b/tools/testing/selftests/bpf/progs/exceptions_fail.c
+@@ -131,7 +131,7 @@ int reject_subprog_with_lock(void *ctx)
+ }
+ 
+ SEC("?tc")
+-__failure __msg("BPF_EXIT instruction cannot be used inside bpf_rcu_read_lock-ed region")
++__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_rcu_read_lock-ed region")
+ int reject_with_rcu_read_lock(void *ctx)
+ {
+ 	bpf_rcu_read_lock();
+@@ -147,7 +147,7 @@ __noinline static int throwing_subprog(struct __sk_buff *ctx)
+ }
+ 
+ SEC("?tc")
+-__failure __msg("BPF_EXIT instruction cannot be used inside bpf_rcu_read_lock-ed region")
++__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_rcu_read_lock-ed region")
+ int reject_subprog_with_rcu_read_lock(void *ctx)
+ {
+ 	bpf_rcu_read_lock();
+diff --git a/tools/testing/selftests/bpf/progs/preempt_lock.c b/tools/testing/selftests/bpf/progs/preempt_lock.c
+index 885377e8360775..5269571cf7b57d 100644
+--- a/tools/testing/selftests/bpf/progs/preempt_lock.c
++++ b/tools/testing/selftests/bpf/progs/preempt_lock.c
+@@ -6,7 +6,7 @@
+ #include "bpf_experimental.h"
+ 
+ SEC("?tc")
+-__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region")
++__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
+ int preempt_lock_missing_1(struct __sk_buff *ctx)
+ {
+ 	bpf_preempt_disable();
+@@ -14,7 +14,7 @@ int preempt_lock_missing_1(struct __sk_buff *ctx)
+ }
+ 
+ SEC("?tc")
+-__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region")
++__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
+ int preempt_lock_missing_2(struct __sk_buff *ctx)
+ {
+ 	bpf_preempt_disable();
+@@ -23,7 +23,7 @@ int preempt_lock_missing_2(struct __sk_buff *ctx)
+ }
+ 
+ SEC("?tc")
+-__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region")
++__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
+ int preempt_lock_missing_3(struct __sk_buff *ctx)
+ {
+ 	bpf_preempt_disable();
+@@ -33,7 +33,7 @@ int preempt_lock_missing_3(struct __sk_buff *ctx)
+ }
+ 
+ SEC("?tc")
+-__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region")
++__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
+ int preempt_lock_missing_3_minus_2(struct __sk_buff *ctx)
+ {
+ 	bpf_preempt_disable();
+@@ -55,7 +55,7 @@ static __noinline void preempt_enable(void)
+ }
+ 
+ SEC("?tc")
+-__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region")
++__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
+ int preempt_lock_missing_1_subprog(struct __sk_buff *ctx)
+ {
+ 	preempt_disable();
+@@ -63,7 +63,7 @@ int preempt_lock_missing_1_subprog(struct __sk_buff *ctx)
+ }
+ 
+ SEC("?tc")
+-__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region")
++__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
+ int preempt_lock_missing_2_subprog(struct __sk_buff *ctx)
+ {
+ 	preempt_disable();
+@@ -72,7 +72,7 @@ int preempt_lock_missing_2_subprog(struct __sk_buff *ctx)
+ }
+ 
+ SEC("?tc")
+-__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region")
++__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
+ int preempt_lock_missing_2_minus_1_subprog(struct __sk_buff *ctx)
+ {
+ 	preempt_disable();
+diff --git a/tools/testing/selftests/bpf/progs/verifier_spin_lock.c b/tools/testing/selftests/bpf/progs/verifier_spin_lock.c
+index 3f679de73229f3..25599eac9a7029 100644
+--- a/tools/testing/selftests/bpf/progs/verifier_spin_lock.c
++++ b/tools/testing/selftests/bpf/progs/verifier_spin_lock.c
+@@ -187,7 +187,7 @@ l0_%=:	r6 = r0;					\
+ 
+ SEC("cgroup/skb")
+ __description("spin_lock: test6 missing unlock")
+-__failure __msg("BPF_EXIT instruction cannot be used inside bpf_spin_lock-ed region")
++__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_spin_lock-ed region")
+ __failure_unpriv __msg_unpriv("")
+ __naked void spin_lock_test6_missing_unlock(void)
+ {
+diff --git a/tools/testing/selftests/drivers/net/hw/rss_ctx.py b/tools/testing/selftests/drivers/net/hw/rss_ctx.py
+index ca8a7edff3dda2..27e24e20749ffd 100755
+--- a/tools/testing/selftests/drivers/net/hw/rss_ctx.py
++++ b/tools/testing/selftests/drivers/net/hw/rss_ctx.py
+@@ -252,6 +252,7 @@ def test_rss_queue_reconfigure(cfg, main_ctx=True):
+         try:
+             # this targets queue 4, which doesn't exist
+             ntuple2 = ethtool_create(cfg, "-N", flow)
++            defer(ethtool, f"-N {cfg.ifname} delete {ntuple2}")
+         except CmdExitFailure:
+             pass
+         else:
+@@ -260,6 +261,7 @@ def test_rss_queue_reconfigure(cfg, main_ctx=True):
+         ethtool(f"-X {cfg.ifname} {ctx_ref} weight 1 0 1 0")
+         # ntuple rule therefore targets queues 1 and 3
+         ntuple2 = ethtool_create(cfg, "-N", flow)
++        defer(ethtool, f"-N {cfg.ifname} delete {ntuple2}")
+         # should replace existing filter
+         ksft_eq(ntuple, ntuple2)
+         _send_traffic_check(cfg, port, ctx_ref, { 'target': (1, 3),
+diff --git a/tools/testing/selftests/net/ipsec.c b/tools/testing/selftests/net/ipsec.c
+index be4a30a0d02aef..9b44a091802cbb 100644
+--- a/tools/testing/selftests/net/ipsec.c
++++ b/tools/testing/selftests/net/ipsec.c
+@@ -227,7 +227,8 @@ static int rtattr_pack(struct nlmsghdr *nh, size_t req_sz,
+ 
+ 	attr->rta_len = RTA_LENGTH(size);
+ 	attr->rta_type = rta_type;
+-	memcpy(RTA_DATA(attr), payload, size);
++	if (payload)
++		memcpy(RTA_DATA(attr), payload, size);
+ 
+ 	return 0;
+ }
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
+index 414addef9a4514..d240d02fa443a1 100644
+--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
+@@ -1302,7 +1302,7 @@ int main_loop(void)
+ 		return ret;
+ 
+ 	if (cfg_truncate > 0) {
+-		xdisconnect(fd);
++		shutdown(fd, SHUT_WR);
+ 	} else if (--cfg_repeat > 0) {
+ 		xdisconnect(fd);
+ 
+diff --git a/tools/testing/selftests/net/udpgso.c b/tools/testing/selftests/net/udpgso.c
+index 3f2fca02fec53f..36ff28af4b1905 100644
+--- a/tools/testing/selftests/net/udpgso.c
++++ b/tools/testing/selftests/net/udpgso.c
+@@ -102,6 +102,19 @@ struct testcase testcases_v4[] = {
+ 		.gso_len = CONST_MSS_V4,
+ 		.r_num_mss = 1,
+ 	},
++	{
++		/* datalen <= MSS < gso_len: will fall back to no GSO */
++		.tlen = CONST_MSS_V4,
++		.gso_len = CONST_MSS_V4 + 1,
++		.r_num_mss = 0,
++		.r_len_last = CONST_MSS_V4,
++	},
++	{
++		/* MSS < datalen < gso_len: fail */
++		.tlen = CONST_MSS_V4 + 1,
++		.gso_len = CONST_MSS_V4 + 2,
++		.tfail = true,
++	},
+ 	{
+ 		/* send a single MSS + 1B */
+ 		.tlen = CONST_MSS_V4 + 1,
+@@ -205,6 +218,19 @@ struct testcase testcases_v6[] = {
+ 		.gso_len = CONST_MSS_V6,
+ 		.r_num_mss = 1,
+ 	},
++	{
++		/* datalen <= MSS < gso_len: will fall back to no GSO */
++		.tlen = CONST_MSS_V6,
++		.gso_len = CONST_MSS_V6 + 1,
++		.r_num_mss = 0,
++		.r_len_last = CONST_MSS_V6,
++	},
++	{
++		/* MSS < datalen < gso_len: fail */
++		.tlen = CONST_MSS_V6 + 1,
++		.gso_len = CONST_MSS_V6 + 2,
++		.tfail = true
++	},
+ 	{
+ 		/* send a single MSS + 1B */
+ 		.tlen = CONST_MSS_V6 + 1,
+diff --git a/tools/tracing/rtla/src/osnoise.c b/tools/tracing/rtla/src/osnoise.c
+index 245e9344932bc4..699a83f538a8e8 100644
+--- a/tools/tracing/rtla/src/osnoise.c
++++ b/tools/tracing/rtla/src/osnoise.c
+@@ -867,7 +867,7 @@ int osnoise_set_workload(struct osnoise_context *context, bool onoff)
+ 
+ 	retval = osnoise_options_set_option("OSNOISE_WORKLOAD", onoff);
+ 	if (retval < 0)
+-		return -1;
++		return -2;
+ 
+ 	context->opt_workload = onoff;
+ 
+diff --git a/tools/tracing/rtla/src/timerlat_hist.c b/tools/tracing/rtla/src/timerlat_hist.c
+index 4403cc4eba302a..90b33b0c4391bf 100644
+--- a/tools/tracing/rtla/src/timerlat_hist.c
++++ b/tools/tracing/rtla/src/timerlat_hist.c
+@@ -1100,12 +1100,15 @@ timerlat_hist_apply_config(struct osnoise_tool *tool, struct timerlat_hist_param
+ 		}
+ 	}
+ 
+-	if (params->user_hist) {
+-		retval = osnoise_set_workload(tool->context, 0);
+-		if (retval) {
+-			err_msg("Failed to set OSNOISE_WORKLOAD option\n");
+-			goto out_err;
+-		}
++	/*
++	* Set workload according to type of thread if the kernel supports it.
++	* On kernels without support, user threads will have already failed
++	* on missing timerlat_fd, and kernel threads do not need it.
++	*/
++	retval = osnoise_set_workload(tool->context, params->kernel_workload);
++	if (retval < -1) {
++		err_msg("Failed to set OSNOISE_WORKLOAD option\n");
++		goto out_err;
+ 	}
+ 
+ 	return 0;
+@@ -1146,9 +1149,12 @@ static struct osnoise_tool
+ }
+ 
+ static int stop_tracing;
++static struct trace_instance *hist_inst = NULL;
+ static void stop_hist(int sig)
+ {
+ 	stop_tracing = 1;
++	if (hist_inst)
++		trace_instance_stop(hist_inst);
+ }
+ 
+ /*
+@@ -1195,6 +1201,12 @@ int timerlat_hist_main(int argc, char *argv[])
+ 	}
+ 
+ 	trace = &tool->trace;
++	/*
++	 * Save trace instance into global variable so that SIGINT can stop
++	 * the timerlat tracer.
++	 * Otherwise, rtla could loop indefinitely when overloaded.
++	 */
++	hist_inst = trace;
+ 
+ 	retval = enable_timerlat(trace);
+ 	if (retval) {
+@@ -1363,7 +1375,7 @@ int timerlat_hist_main(int argc, char *argv[])
+ 
+ 	return_value = 0;
+ 
+-	if (trace_is_off(&tool->trace, &record->trace)) {
++	if (trace_is_off(&tool->trace, &record->trace) && !stop_tracing) {
+ 		printf("rtla timerlat hit stop tracing\n");
+ 
+ 		if (!params->no_aa)
+diff --git a/tools/tracing/rtla/src/timerlat_top.c b/tools/tracing/rtla/src/timerlat_top.c
+index 059b468981e4db..139eb58336c36f 100644
+--- a/tools/tracing/rtla/src/timerlat_top.c
++++ b/tools/tracing/rtla/src/timerlat_top.c
+@@ -851,12 +851,15 @@ timerlat_top_apply_config(struct osnoise_tool *top, struct timerlat_top_params *
+ 		}
+ 	}
+ 
+-	if (params->user_top) {
+-		retval = osnoise_set_workload(top->context, 0);
+-		if (retval) {
+-			err_msg("Failed to set OSNOISE_WORKLOAD option\n");
+-			goto out_err;
+-		}
++	/*
++	* Set workload according to type of thread if the kernel supports it.
++	* On kernels without support, user threads will have already failed
++	* on missing timerlat_fd, and kernel threads do not need it.
++	*/
++	retval = osnoise_set_workload(top->context, params->kernel_workload);
++	if (retval < -1) {
++		err_msg("Failed to set OSNOISE_WORKLOAD option\n");
++		goto out_err;
+ 	}
+ 
+ 	if (isatty(STDOUT_FILENO) && !params->quiet)
+@@ -900,9 +903,12 @@ static struct osnoise_tool
+ }
+ 
+ static int stop_tracing;
++static struct trace_instance *top_inst = NULL;
+ static void stop_top(int sig)
+ {
+ 	stop_tracing = 1;
++	if (top_inst)
++		trace_instance_stop(top_inst);
+ }
+ 
+ /*
+@@ -950,6 +956,13 @@ int timerlat_top_main(int argc, char *argv[])
+ 	}
+ 
+ 	trace = &top->trace;
++	/*
++	* Save trace instance into global variable so that SIGINT can stop
++	* the timerlat tracer.
++	* Otherwise, rtla could loop indefinitely when overloaded.
++	*/
++	top_inst = trace;
++
+ 
+ 	retval = enable_timerlat(trace);
+ 	if (retval) {
+@@ -1131,7 +1144,7 @@ int timerlat_top_main(int argc, char *argv[])
+ 
+ 	return_value = 0;
+ 
+-	if (trace_is_off(&top->trace, &record->trace)) {
++	if (trace_is_off(&top->trace, &record->trace) && !stop_tracing) {
+ 		printf("rtla timerlat hit stop tracing\n");
+ 
+ 		if (!params->no_aa)
+diff --git a/tools/tracing/rtla/src/trace.c b/tools/tracing/rtla/src/trace.c
+index 170a706248abff..440323a997c621 100644
+--- a/tools/tracing/rtla/src/trace.c
++++ b/tools/tracing/rtla/src/trace.c
+@@ -196,6 +196,14 @@ int trace_instance_start(struct trace_instance *trace)
+ 	return tracefs_trace_on(trace->inst);
+ }
+ 
++/*
++ * trace_instance_stop - stop tracing a given rtla instance
++ */
++int trace_instance_stop(struct trace_instance *trace)
++{
++	return tracefs_trace_off(trace->inst);
++}
++
+ /*
+  * trace_events_free - free a list of trace events
+  */
+diff --git a/tools/tracing/rtla/src/trace.h b/tools/tracing/rtla/src/trace.h
+index c7c92dc9a18a61..76e1b77291ba2a 100644
+--- a/tools/tracing/rtla/src/trace.h
++++ b/tools/tracing/rtla/src/trace.h
+@@ -21,6 +21,7 @@ struct trace_instance {
+ 
+ int trace_instance_init(struct trace_instance *trace, char *tool_name);
+ int trace_instance_start(struct trace_instance *trace);
++int trace_instance_stop(struct trace_instance *trace);
+ void trace_instance_destroy(struct trace_instance *trace);
+ 
+ struct trace_seq *get_trace_seq(void);


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [gentoo-commits] proj/linux-patches:6.13 commit in: /
@ 2025-02-17 11:28 Mike Pagano
  0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2025-02-17 11:28 UTC (permalink / raw
  To: gentoo-commits

commit:     7683f271c09c73fe34236499e1ffebb82c77d5df
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Feb 17 11:28:22 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Feb 17 11:28:22 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7683f271

Remove redundant patch

Removed:
2980_GCC15-gnu23-to-gnu11-fix.patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                         |   4 --
 2980_GCC15-gnu23-to-gnu11-fix.patch | 105 ------------------------------------
 2 files changed, 109 deletions(-)

diff --git a/0000_README b/0000_README
index beaf6885..c7f21cd0 100644
--- a/0000_README
+++ b/0000_README
@@ -87,10 +87,6 @@ Patch:  2920_sign-file-patch-for-libressl.patch
 From:   https://bugs.gentoo.org/717166
 Desc:   sign-file: full functionality with modern LibreSSL
 
-Patch:  2980_GCC15-gnu23-to-gnu11-fix.patch
-From:   https://lore.kernel.org/linux-kbuild/20241119044724.GA2246422@thelio-3990X/
-Desc:   GCC 15 defaults to -std=gnu23. Hack in CSTD_FLAG to pass -std=gnu11 everywhere.
-
 Patch:  2990_libbpf-v2-workaround-Wmaybe-uninitialized-false-pos.patch
 From:   https://lore.kernel.org/bpf/
 Desc:   libbpf: workaround -Wmaybe-uninitialized false positive

diff --git a/2980_GCC15-gnu23-to-gnu11-fix.patch b/2980_GCC15-gnu23-to-gnu11-fix.patch
deleted file mode 100644
index c74b6180..00000000
--- a/2980_GCC15-gnu23-to-gnu11-fix.patch
+++ /dev/null
@@ -1,105 +0,0 @@
-iGCC 15 defaults to -std=gnu23. While most of the kernel builds with -std=gnu11,
-some of it forgets to pass that flag. Hack in CSTD_FLAG to pass -std=gnu11
-everywhere.
-
-https://lore.kernel.org/linux-kbuild/20241119044724.GA2246422@thelio-3990X/
---- a/Makefile
-+++ b/Makefile
-@@ -416,6 +416,8 @@ export KCONFIG_CONFIG
- # SHELL used by kbuild
- CONFIG_SHELL := sh
- 
-+CSTD_FLAG := -std=gnu11
-+
- HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS 2>/dev/null)
- HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null)
- HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null)
-@@ -437,7 +439,7 @@ HOSTRUSTC = rustc
- HOSTPKG_CONFIG	= pkg-config
- 
- KBUILD_USERHOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes \
--			 -O2 -fomit-frame-pointer -std=gnu11
-+			 -O2 -fomit-frame-pointer $(CSTD_FLAG)
- KBUILD_USERCFLAGS  := $(KBUILD_USERHOSTCFLAGS) $(USERCFLAGS)
- KBUILD_USERLDFLAGS := $(USERLDFLAGS)
- 
-@@ -545,7 +547,7 @@ LINUXINCLUDE    := \
- KBUILD_AFLAGS   := -D__ASSEMBLY__ -fno-PIE
- 
- KBUILD_CFLAGS :=
--KBUILD_CFLAGS += -std=gnu11
-+KBUILD_CFLAGS += $(CSTD_FLAG)
- KBUILD_CFLAGS += -fshort-wchar
- KBUILD_CFLAGS += -funsigned-char
- KBUILD_CFLAGS += -fno-common
-@@ -589,7 +591,7 @@ export CPP AR NM STRIP OBJCOPY OBJDUMP READELF PAHOLE RESOLVE_BTFIDS LEX YACC AW
- export PERL PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
- export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD
- export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
--export KBUILD_USERCFLAGS KBUILD_USERLDFLAGS
-+export KBUILD_USERCFLAGS KBUILD_USERLDFLAGS CSTD_FLAG
- 
- export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS
- export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
---- a/arch/arm64/kernel/vdso32/Makefile
-+++ b/arch/arm64/kernel/vdso32/Makefile
-@@ -65,7 +65,7 @@ VDSO_CFLAGS += -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-                -fno-strict-aliasing -fno-common \
-                -Werror-implicit-function-declaration \
-                -Wno-format-security \
--               -std=gnu11
-+               $(CSTD_FLAG)
- VDSO_CFLAGS  += -O2
- # Some useful compiler-dependent flags from top-level Makefile
- VDSO_CFLAGS += $(call cc32-option,-Wno-pointer-sign)
---- a/arch/x86/Makefile
-+++ b/arch/x86/Makefile
-@@ -47,7 +47,7 @@ endif
- 
- # How to compile the 16-bit code.  Note we always compile for -march=i386;
- # that way we can complain to the user if the CPU is insufficient.
--REALMODE_CFLAGS	:= -std=gnu11 -m16 -g -Os -DDISABLE_BRANCH_PROFILING -D__DISABLE_EXPORTS \
-+REALMODE_CFLAGS	:= $(CSTD_FLAG) -m16 -g -Os -DDISABLE_BRANCH_PROFILING -D__DISABLE_EXPORTS \
- 		   -Wall -Wstrict-prototypes -march=i386 -mregparm=3 \
- 		   -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
- 		   -mno-mmx -mno-sse $(call cc-option,-fcf-protection=none)
---- a/drivers/firmware/efi/libstub/Makefile
-+++ b/drivers/firmware/efi/libstub/Makefile
-@@ -7,7 +7,7 @@
- #
- 
- # non-x86 reuses KBUILD_CFLAGS, x86 does not
--cflags-y			:= $(KBUILD_CFLAGS)
-+cflags-y			:= $(KBUILD_CFLAGS) $(CSTD_FLAG)
- 
- cflags-$(CONFIG_X86_32)		:= -march=i386
- cflags-$(CONFIG_X86_64)		:= -mcmodel=small
-@@ -18,7 +18,7 @@ cflags-$(CONFIG_X86)		+= -m$(BITS) -D__KERNEL__ \
- 				   $(call cc-disable-warning, address-of-packed-member) \
- 				   $(call cc-disable-warning, gnu) \
- 				   -fno-asynchronous-unwind-tables \
--				   $(CLANG_FLAGS)
-+				   $(CLANG_FLAGS) $(CSTD_FLAG)
- 
- # arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
- # disable the stackleak plugin
-@@ -42,7 +42,7 @@ KBUILD_CFLAGS			:= $(subst $(CC_FLAGS_FTRACE),,$(cflags-y)) \
- 				   -ffreestanding \
- 				   -fno-stack-protector \
- 				   $(call cc-option,-fno-addrsig) \
--				   -D__DISABLE_EXPORTS
-+				   -D__DISABLE_EXPORTS $(CSTD_FLAG)
- 
- #
- # struct randomization only makes sense for Linux internal types, which the EFI
---- a/arch/x86/boot/compressed/Makefile
-+++ b/arch/x86/boot/compressed/Makefile
-@@ -24,7 +24,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
- # case of cross compiling, as it has the '--target=' flag, which is needed to
- # avoid errors with '-march=i386', and future flags may depend on the target to
- # be valid.
--KBUILD_CFLAGS := -m$(BITS) -O2 $(CLANG_FLAGS)
-+KBUILD_CFLAGS := -m$(BITS) -O2 $(CLANG_FLAGS) $(CSTD_FLAG)
- KBUILD_CFLAGS += -fno-strict-aliasing -fPIE
- KBUILD_CFLAGS += -Wundef
- KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [gentoo-commits] proj/linux-patches:6.13 commit in: /
@ 2025-02-17 15:42 Mike Pagano
  0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2025-02-17 15:42 UTC (permalink / raw
  To: gentoo-commits

commit:     a15f9b326a1c293f4417a303a3651e3bbf6c24f6
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Feb 17 15:41:32 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Feb 17 15:41:32 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a15f9b32

Kbuild gcc 15 fixes. Thanks holgerh

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                |  4 ++
 2980_kbuild-gcc15-gnu23-to-gnu11-fix.patch | 94 ++++++++++++++++++++++++++++++
 2 files changed, 98 insertions(+)

diff --git a/0000_README b/0000_README
index c7f21cd0..1c7fc7ef 100644
--- a/0000_README
+++ b/0000_README
@@ -87,6 +87,10 @@ Patch:  2920_sign-file-patch-for-libressl.patch
 From:   https://bugs.gentoo.org/717166
 Desc:   sign-file: full functionality with modern LibreSSL
 
+Patch:  2980_kbuild-gcc15-gnu23-to-gnu11-fix.patch
+From:   https://github.com/hhoffstaette/kernel-patches/
+Desc:   gcc 15 kbuild fixes
+
 Patch:  2990_libbpf-v2-workaround-Wmaybe-uninitialized-false-pos.patch
 From:   https://lore.kernel.org/bpf/
 Desc:   libbpf: workaround -Wmaybe-uninitialized false positive

diff --git a/2980_kbuild-gcc15-gnu23-to-gnu11-fix.patch b/2980_kbuild-gcc15-gnu23-to-gnu11-fix.patch
new file mode 100644
index 00000000..e55dc3ed
--- /dev/null
+++ b/2980_kbuild-gcc15-gnu23-to-gnu11-fix.patch
@@ -0,0 +1,94 @@
+iGCC 15 defaults to -std=gnu23. While most of the kernel builds with -std=gnu11,
+some of it forgets to pass that flag. Hack in CSTD_FLAG to pass -std=gnu11
+everywhere.
+
+https://lore.kernel.org/linux-kbuild/20241119044724.GA2246422@thelio-3990X/
+--- a/Makefile
++++ b/Makefile
+@@ -416,6 +416,8 @@ export KCONFIG_CONFIG
+ # SHELL used by kbuild
+ CONFIG_SHELL := sh
+ 
++CSTD_FLAG := -std=gnu11
++
+ HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS 2>/dev/null)
+ HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null)
+ HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null)
+@@ -437,7 +439,7 @@ HOSTRUSTC = rustc
+ HOSTPKG_CONFIG	= pkg-config
+ 
+ KBUILD_USERHOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes \
+-			 -O2 -fomit-frame-pointer -std=gnu11
++			 -O2 -fomit-frame-pointer $(CSTD_FLAG)
+ KBUILD_USERCFLAGS  := $(KBUILD_USERHOSTCFLAGS) $(USERCFLAGS)
+ KBUILD_USERLDFLAGS := $(USERLDFLAGS)
+ 
+@@ -545,7 +547,7 @@ LINUXINCLUDE    := \
+ KBUILD_AFLAGS   := -D__ASSEMBLY__ -fno-PIE
+ 
+ KBUILD_CFLAGS :=
+-KBUILD_CFLAGS += -std=gnu11
++KBUILD_CFLAGS += $(CSTD_FLAG)
+ KBUILD_CFLAGS += -fshort-wchar
+ KBUILD_CFLAGS += -funsigned-char
+ KBUILD_CFLAGS += -fno-common
+@@ -589,7 +591,7 @@ export CPP AR NM STRIP OBJCOPY OBJDUMP READELF PAHOLE RESOLVE_BTFIDS LEX YACC AW
+ export PERL PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
+ export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD
+ export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
+-export KBUILD_USERCFLAGS KBUILD_USERLDFLAGS
++export KBUILD_USERCFLAGS KBUILD_USERLDFLAGS CSTD_FLAG
+ 
+ export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS
+ export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
+--- a/arch/arm64/kernel/vdso32/Makefile
++++ b/arch/arm64/kernel/vdso32/Makefile
+@@ -65,7 +65,7 @@ VDSO_CFLAGS += -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
+                -fno-strict-aliasing -fno-common \
+                -Werror-implicit-function-declaration \
+                -Wno-format-security \
+-               -std=gnu11
++               $(CSTD_FLAG)
+ VDSO_CFLAGS  += -O2
+ # Some useful compiler-dependent flags from top-level Makefile
+ VDSO_CFLAGS += $(call cc32-option,-Wno-pointer-sign)
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -47,7 +47,7 @@ endif
+ 
+ # How to compile the 16-bit code.  Note we always compile for -march=i386;
+ # that way we can complain to the user if the CPU is insufficient.
+-REALMODE_CFLAGS	:= -std=gnu11 -m16 -g -Os -DDISABLE_BRANCH_PROFILING -D__DISABLE_EXPORTS \
++REALMODE_CFLAGS	:= $(CSTD_FLAG) -m16 -g -Os -DDISABLE_BRANCH_PROFILING -D__DISABLE_EXPORTS \
+ 		   -Wall -Wstrict-prototypes -march=i386 -mregparm=3 \
+ 		   -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
+ 		   -mno-mmx -mno-sse $(call cc-option,-fcf-protection=none)
+--- a/drivers/firmware/efi/libstub/Makefile
++++ b/drivers/firmware/efi/libstub/Makefile
+@@ -7,7 +7,7 @@
+ #
+ 
+ # non-x86 reuses KBUILD_CFLAGS, x86 does not
+-cflags-y			:= $(KBUILD_CFLAGS)
++cflags-y			:= $(KBUILD_CFLAGS) $(CSTD_FLAG)
+ 
+ cflags-$(CONFIG_X86_32)		:= -march=i386
+ cflags-$(CONFIG_X86_64)		:= -mcmodel=small
+@@ -18,7 +18,7 @@ cflags-$(CONFIG_X86)		+= -m$(BITS) -D__KERNEL__ \
+ 				   $(call cc-disable-warning, address-of-packed-member) \
+ 				   $(call cc-disable-warning, gnu) \
+ 				   -fno-asynchronous-unwind-tables \
+-				   $(CLANG_FLAGS)
++				   $(CLANG_FLAGS) $(CSTD_FLAG)
+ 
+ # arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
+ # disable the stackleak plugin
+@@ -42,7 +42,7 @@ KBUILD_CFLAGS			:= $(subst $(CC_FLAGS_FTRACE),,$(cflags-y)) \
+ 				   -ffreestanding \
+ 				   -fno-stack-protector \
+ 				   $(call cc-option,-fno-addrsig) \
+-				   -D__DISABLE_EXPORTS
++				   -D__DISABLE_EXPORTS $(CSTD_FLAG)
+ 
+ #
+ # struct randomization only makes sense for Linux internal types, which the EFI


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [gentoo-commits] proj/linux-patches:6.13 commit in: /
@ 2025-02-21 13:39 Mike Pagano
  0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2025-02-21 13:39 UTC (permalink / raw
  To: gentoo-commits

commit:     e78e68986525995b7caf223b6cf6df36644144d6
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Feb 21 13:39:40 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Feb 21 13:39:40 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e78e6898

Linux patch 6.13.4

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1003_linux-6.13.4.patch | 10887 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 10891 insertions(+)

diff --git a/0000_README b/0000_README
index 1c7fc7ef..60c36739 100644
--- a/0000_README
+++ b/0000_README
@@ -55,6 +55,10 @@ Patch:  1002_linux-6.13.3.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.13.3
 
+Patch:  1003_linux-6.13.4.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.13.4
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.

diff --git a/1003_linux-6.13.4.patch b/1003_linux-6.13.4.patch
new file mode 100644
index 00000000..cdd2b362
--- /dev/null
+++ b/1003_linux-6.13.4.patch
@@ -0,0 +1,10887 @@
+diff --git a/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml b/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml
+index f2fd2df68a9ed9..b7241ce975b961 100644
+--- a/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml
++++ b/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml
+@@ -22,7 +22,7 @@ description:
+   Each sub-node is identified using the node's name, with valid values listed
+   for each of the pmics below.
+ 
+-  For mp5496, s1, s2
++  For mp5496, s1, s2, l2, l5
+ 
+   For pm2250, s1, s2, s3, s4, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11,
+   l12, l13, l14, l15, l16, l17, l18, l19, l20, l21, l22
+diff --git a/Documentation/networking/iso15765-2.rst b/Documentation/networking/iso15765-2.rst
+index 0e9d960741783b..37ebb2c417cb44 100644
+--- a/Documentation/networking/iso15765-2.rst
++++ b/Documentation/networking/iso15765-2.rst
+@@ -369,8 +369,8 @@ to their default.
+ 
+   addr.can_family = AF_CAN;
+   addr.can_ifindex = if_nametoindex("can0");
+-  addr.tp.tx_id = 0x18DA42F1 | CAN_EFF_FLAG;
+-  addr.tp.rx_id = 0x18DAF142 | CAN_EFF_FLAG;
++  addr.can_addr.tp.tx_id = 0x18DA42F1 | CAN_EFF_FLAG;
++  addr.can_addr.tp.rx_id = 0x18DAF142 | CAN_EFF_FLAG;
+ 
+   ret = bind(s, (struct sockaddr *)&addr, sizeof(addr));
+   if (ret < 0)
+diff --git a/Makefile b/Makefile
+index 423d087afad2d1..c436a6e64971d7 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 13
+-SUBLEVEL = 3
++SUBLEVEL = 4
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+@@ -1119,8 +1119,8 @@ LDFLAGS_vmlinux += --orphan-handling=$(CONFIG_LD_ORPHAN_WARN_LEVEL)
+ endif
+ 
+ # Align the bit size of userspace programs with the kernel
+-KBUILD_USERCFLAGS  += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
+-KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
++KBUILD_USERCFLAGS  += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
++KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
+ 
+ # make the checker run with the right architecture
+ CHECKFLAGS += --arch=$(ARCH)
+@@ -1416,18 +1416,13 @@ ifneq ($(wildcard $(resolve_btfids_O)),)
+ 	$(Q)$(MAKE) -sC $(srctree)/tools/bpf/resolve_btfids O=$(resolve_btfids_O) clean
+ endif
+ 
+-# Clear a bunch of variables before executing the submake
+-ifeq ($(quiet),silent_)
+-tools_silent=s
+-endif
+-
+ tools/: FORCE
+ 	$(Q)mkdir -p $(objtree)/tools
+-	$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/
++	$(Q)$(MAKE) LDFLAGS= O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/
+ 
+ tools/%: FORCE
+ 	$(Q)mkdir -p $(objtree)/tools
+-	$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ $*
++	$(Q)$(MAKE) LDFLAGS= O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ $*
+ 
+ # ---------------------------------------------------------------------------
+ # Kernel selftest
+diff --git a/arch/alpha/include/uapi/asm/ptrace.h b/arch/alpha/include/uapi/asm/ptrace.h
+index 5ca45934fcbb82..72ed913a910f25 100644
+--- a/arch/alpha/include/uapi/asm/ptrace.h
++++ b/arch/alpha/include/uapi/asm/ptrace.h
+@@ -42,6 +42,8 @@ struct pt_regs {
+ 	unsigned long trap_a0;
+ 	unsigned long trap_a1;
+ 	unsigned long trap_a2;
++/* This makes the stack 16-byte aligned as GCC expects */
++	unsigned long __pad0;
+ /* These are saved by PAL-code: */
+ 	unsigned long ps;
+ 	unsigned long pc;
+diff --git a/arch/alpha/kernel/asm-offsets.c b/arch/alpha/kernel/asm-offsets.c
+index 4cfeae42c79ac7..e9dad60b147f33 100644
+--- a/arch/alpha/kernel/asm-offsets.c
++++ b/arch/alpha/kernel/asm-offsets.c
+@@ -19,9 +19,13 @@ static void __used foo(void)
+ 	DEFINE(TI_STATUS, offsetof(struct thread_info, status));
+ 	BLANK();
+ 
++	DEFINE(SP_OFF, offsetof(struct pt_regs, ps));
+ 	DEFINE(SIZEOF_PT_REGS, sizeof(struct pt_regs));
+ 	BLANK();
+ 
++	DEFINE(SWITCH_STACK_SIZE, sizeof(struct switch_stack));
++	BLANK();
++
+ 	DEFINE(HAE_CACHE, offsetof(struct alpha_machine_vector, hae_cache));
+ 	DEFINE(HAE_REG, offsetof(struct alpha_machine_vector, hae_register));
+ }
+diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
+index dd26062d75b3c5..f4d41b4538c2e8 100644
+--- a/arch/alpha/kernel/entry.S
++++ b/arch/alpha/kernel/entry.S
+@@ -15,10 +15,6 @@
+ 	.set noat
+ 	.cfi_sections	.debug_frame
+ 
+-/* Stack offsets.  */
+-#define SP_OFF			184
+-#define SWITCH_STACK_SIZE	64
+-
+ .macro	CFI_START_OSF_FRAME	func
+ 	.align	4
+ 	.globl	\func
+@@ -198,8 +194,8 @@ CFI_END_OSF_FRAME entArith
+ CFI_START_OSF_FRAME entMM
+ 	SAVE_ALL
+ /* save $9 - $15 so the inline exception code can manipulate them.  */
+-	subq	$sp, 56, $sp
+-	.cfi_adjust_cfa_offset	56
++	subq	$sp, 64, $sp
++	.cfi_adjust_cfa_offset	64
+ 	stq	$9, 0($sp)
+ 	stq	$10, 8($sp)
+ 	stq	$11, 16($sp)
+@@ -214,7 +210,7 @@ CFI_START_OSF_FRAME entMM
+ 	.cfi_rel_offset	$13, 32
+ 	.cfi_rel_offset	$14, 40
+ 	.cfi_rel_offset	$15, 48
+-	addq	$sp, 56, $19
++	addq	$sp, 64, $19
+ /* handle the fault */
+ 	lda	$8, 0x3fff
+ 	bic	$sp, $8, $8
+@@ -227,7 +223,7 @@ CFI_START_OSF_FRAME entMM
+ 	ldq	$13, 32($sp)
+ 	ldq	$14, 40($sp)
+ 	ldq	$15, 48($sp)
+-	addq	$sp, 56, $sp
++	addq	$sp, 64, $sp
+ 	.cfi_restore	$9
+ 	.cfi_restore	$10
+ 	.cfi_restore	$11
+@@ -235,7 +231,7 @@ CFI_START_OSF_FRAME entMM
+ 	.cfi_restore	$13
+ 	.cfi_restore	$14
+ 	.cfi_restore	$15
+-	.cfi_adjust_cfa_offset	-56
++	.cfi_adjust_cfa_offset	-64
+ /* finish up the syscall as normal.  */
+ 	br	ret_from_sys_call
+ CFI_END_OSF_FRAME entMM
+@@ -382,8 +378,8 @@ entUnaUser:
+ 	.cfi_restore	$0
+ 	.cfi_adjust_cfa_offset	-256
+ 	SAVE_ALL		/* setup normal kernel stack */
+-	lda	$sp, -56($sp)
+-	.cfi_adjust_cfa_offset	56
++	lda	$sp, -64($sp)
++	.cfi_adjust_cfa_offset	64
+ 	stq	$9, 0($sp)
+ 	stq	$10, 8($sp)
+ 	stq	$11, 16($sp)
+@@ -399,7 +395,7 @@ entUnaUser:
+ 	.cfi_rel_offset	$14, 40
+ 	.cfi_rel_offset	$15, 48
+ 	lda	$8, 0x3fff
+-	addq	$sp, 56, $19
++	addq	$sp, 64, $19
+ 	bic	$sp, $8, $8
+ 	jsr	$26, do_entUnaUser
+ 	ldq	$9, 0($sp)
+@@ -409,7 +405,7 @@ entUnaUser:
+ 	ldq	$13, 32($sp)
+ 	ldq	$14, 40($sp)
+ 	ldq	$15, 48($sp)
+-	lda	$sp, 56($sp)
++	lda	$sp, 64($sp)
+ 	.cfi_restore	$9
+ 	.cfi_restore	$10
+ 	.cfi_restore	$11
+@@ -417,7 +413,7 @@ entUnaUser:
+ 	.cfi_restore	$13
+ 	.cfi_restore	$14
+ 	.cfi_restore	$15
+-	.cfi_adjust_cfa_offset	-56
++	.cfi_adjust_cfa_offset	-64
+ 	br	ret_from_sys_call
+ CFI_END_OSF_FRAME entUna
+ 
+diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
+index a9a38c80c4a7af..7004397937cfda 100644
+--- a/arch/alpha/kernel/traps.c
++++ b/arch/alpha/kernel/traps.c
+@@ -649,7 +649,7 @@ s_reg_to_mem (unsigned long s_reg)
+ static int unauser_reg_offsets[32] = {
+ 	R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8),
+ 	/* r9 ... r15 are stored in front of regs.  */
+-	-56, -48, -40, -32, -24, -16, -8,
++	-64, -56, -48, -40, -32, -24, -16,	/* padding at -8 */
+ 	R(r16), R(r17), R(r18),
+ 	R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26),
+ 	R(r27), R(r28), R(gp),
+diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
+index 8c9850437e6744..a9816bbc9f34d3 100644
+--- a/arch/alpha/mm/fault.c
++++ b/arch/alpha/mm/fault.c
+@@ -78,8 +78,8 @@ __load_new_mm_context(struct mm_struct *next_mm)
+ 
+ /* Macro for exception fixup code to access integer registers.  */
+ #define dpf_reg(r)							\
+-	(((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 :	\
+-				 (r) <= 18 ? (r)+10 : (r)-10])
++	(((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-17 :	\
++				 (r) <= 18 ? (r)+11 : (r)-10])
+ 
+ asmlinkage void
+ do_page_fault(unsigned long address, unsigned long mmcsr,
+diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
+index 358c68565bfd06..2b25d671365f29 100644
+--- a/arch/arm64/Makefile
++++ b/arch/arm64/Makefile
+@@ -48,7 +48,11 @@ KBUILD_CFLAGS	+= $(CC_FLAGS_NO_FPU) \
+ KBUILD_CFLAGS	+= $(call cc-disable-warning, psabi)
+ KBUILD_AFLAGS	+= $(compat_vdso)
+ 
++ifeq ($(call test-ge, $(CONFIG_RUSTC_VERSION), 108500),y)
++KBUILD_RUSTFLAGS += --target=aarch64-unknown-none-softfloat
++else
+ KBUILD_RUSTFLAGS += --target=aarch64-unknown-none -Ctarget-feature="-neon"
++endif
+ 
+ KBUILD_CFLAGS	+= $(call cc-option,-mabi=lp64)
+ KBUILD_AFLAGS	+= $(call cc-option,-mabi=lp64)
+diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c
+index d9c9218fa1fddc..309942b06c5bc2 100644
+--- a/arch/arm64/kernel/cacheinfo.c
++++ b/arch/arm64/kernel/cacheinfo.c
+@@ -101,16 +101,18 @@ int populate_cache_leaves(unsigned int cpu)
+ 	unsigned int level, idx;
+ 	enum cache_type type;
+ 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+-	struct cacheinfo *this_leaf = this_cpu_ci->info_list;
++	struct cacheinfo *infos = this_cpu_ci->info_list;
+ 
+ 	for (idx = 0, level = 1; level <= this_cpu_ci->num_levels &&
+-	     idx < this_cpu_ci->num_leaves; idx++, level++) {
++	     idx < this_cpu_ci->num_leaves; level++) {
+ 		type = get_cache_type(level);
+ 		if (type == CACHE_TYPE_SEPARATE) {
+-			ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
+-			ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
++			if (idx + 1 >= this_cpu_ci->num_leaves)
++				break;
++			ci_leaf_init(&infos[idx++], CACHE_TYPE_DATA, level);
++			ci_leaf_init(&infos[idx++], CACHE_TYPE_INST, level);
+ 		} else {
+-			ci_leaf_init(this_leaf++, type, level);
++			ci_leaf_init(&infos[idx++], type, level);
+ 		}
+ 	}
+ 	return 0;
+diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
+index 4ec32e86a8da22..47ad6944f9f088 100644
+--- a/arch/arm64/kernel/vdso/vdso.lds.S
++++ b/arch/arm64/kernel/vdso/vdso.lds.S
+@@ -41,6 +41,7 @@ SECTIONS
+ 	 */
+ 	/DISCARD/	: {
+ 		*(.note.GNU-stack .note.gnu.property)
++		*(.ARM.attributes)
+ 	}
+ 	.note		: { *(.note.*) }		:text	:note
+ 
+diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
+index f84c71f04d9ea9..e73326bd3ff7e9 100644
+--- a/arch/arm64/kernel/vmlinux.lds.S
++++ b/arch/arm64/kernel/vmlinux.lds.S
+@@ -162,6 +162,7 @@ SECTIONS
+ 	/DISCARD/ : {
+ 		*(.interp .dynamic)
+ 		*(.dynsym .dynstr .hash .gnu.hash)
++		*(.ARM.attributes)
+ 	}
+ 
+ 	. = KIMAGE_VADDR;
+diff --git a/arch/loongarch/kernel/genex.S b/arch/loongarch/kernel/genex.S
+index 86d5d90ebefe5b..4f09121417818d 100644
+--- a/arch/loongarch/kernel/genex.S
++++ b/arch/loongarch/kernel/genex.S
+@@ -18,16 +18,19 @@
+ 
+ 	.align	5
+ SYM_FUNC_START(__arch_cpu_idle)
+-	/* start of rollback region */
+-	LONG_L	t0, tp, TI_FLAGS
+-	nop
+-	andi	t0, t0, _TIF_NEED_RESCHED
+-	bnez	t0, 1f
+-	nop
+-	nop
+-	nop
++	/* start of idle interrupt region */
++	ori	t0, zero, CSR_CRMD_IE
++	/* idle instruction needs irq enabled */
++	csrxchg	t0, t0, LOONGARCH_CSR_CRMD
++	/*
++	 * If an interrupt lands here; between enabling interrupts above and
++	 * going idle on the next instruction, we must *NOT* go idle since the
++	 * interrupt could have set TIF_NEED_RESCHED or caused an timer to need
++	 * reprogramming. Fall through -- see handle_vint() below -- and have
++	 * the idle loop take care of things.
++	 */
+ 	idle	0
+-	/* end of rollback region */
++	/* end of idle interrupt region */
+ 1:	jr	ra
+ SYM_FUNC_END(__arch_cpu_idle)
+ 
+@@ -35,11 +38,10 @@ SYM_CODE_START(handle_vint)
+ 	UNWIND_HINT_UNDEFINED
+ 	BACKUP_T0T1
+ 	SAVE_ALL
+-	la_abs	t1, __arch_cpu_idle
++	la_abs	t1, 1b
+ 	LONG_L	t0, sp, PT_ERA
+-	/* 32 byte rollback region */
+-	ori	t0, t0, 0x1f
+-	xori	t0, t0, 0x1f
++	/* 3 instructions idle interrupt region */
++	ori	t0, t0, 0b1100
+ 	bne	t0, t1, 1f
+ 	LONG_S	t0, sp, PT_ERA
+ 1:	move	a0, sp
+diff --git a/arch/loongarch/kernel/idle.c b/arch/loongarch/kernel/idle.c
+index 0b5dd2faeb90b8..54b247d8cdb695 100644
+--- a/arch/loongarch/kernel/idle.c
++++ b/arch/loongarch/kernel/idle.c
+@@ -11,7 +11,6 @@
+ 
+ void __cpuidle arch_cpu_idle(void)
+ {
+-	raw_local_irq_enable();
+-	__arch_cpu_idle(); /* idle instruction needs irq enabled */
++	__arch_cpu_idle();
+ 	raw_local_irq_disable();
+ }
+diff --git a/arch/loongarch/kernel/reset.c b/arch/loongarch/kernel/reset.c
+index 1ef8c63835351b..de8fa5a8a825cd 100644
+--- a/arch/loongarch/kernel/reset.c
++++ b/arch/loongarch/kernel/reset.c
+@@ -33,7 +33,7 @@ void machine_halt(void)
+ 	console_flush_on_panic(CONSOLE_FLUSH_PENDING);
+ 
+ 	while (true) {
+-		__arch_cpu_idle();
++		__asm__ __volatile__("idle 0" : : : "memory");
+ 	}
+ }
+ 
+@@ -53,7 +53,7 @@ void machine_power_off(void)
+ #endif
+ 
+ 	while (true) {
+-		__arch_cpu_idle();
++		__asm__ __volatile__("idle 0" : : : "memory");
+ 	}
+ }
+ 
+@@ -74,6 +74,6 @@ void machine_restart(char *command)
+ 		acpi_reboot();
+ 
+ 	while (true) {
+-		__arch_cpu_idle();
++		__asm__ __volatile__("idle 0" : : : "memory");
+ 	}
+ }
+diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c
+index 396fed2665a517..034402e0948c93 100644
+--- a/arch/loongarch/kvm/main.c
++++ b/arch/loongarch/kvm/main.c
+@@ -285,9 +285,9 @@ int kvm_arch_enable_virtualization_cpu(void)
+ 	 * TOE=0:       Trap on Exception.
+ 	 * TIT=0:       Trap on Timer.
+ 	 */
+-	if (env & CSR_GCFG_GCIP_ALL)
++	if (env & CSR_GCFG_GCIP_SECURE)
+ 		gcfg |= CSR_GCFG_GCI_SECURE;
+-	if (env & CSR_GCFG_MATC_ROOT)
++	if (env & CSR_GCFG_MATP_ROOT)
+ 		gcfg |= CSR_GCFG_MATC_ROOT;
+ 
+ 	write_csr_gcfg(gcfg);
+diff --git a/arch/loongarch/lib/csum.c b/arch/loongarch/lib/csum.c
+index a5e84b403c3b34..df309ae4045dee 100644
+--- a/arch/loongarch/lib/csum.c
++++ b/arch/loongarch/lib/csum.c
+@@ -25,7 +25,7 @@ unsigned int __no_sanitize_address do_csum(const unsigned char *buff, int len)
+ 	const u64 *ptr;
+ 	u64 data, sum64 = 0;
+ 
+-	if (unlikely(len == 0))
++	if (unlikely(len <= 0))
+ 		return 0;
+ 
+ 	offset = (unsigned long)buff & 7;
+diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
+index 1aa0cb097c9c9d..7b9a5ea9cad9d3 100644
+--- a/arch/powerpc/sysdev/fsl_msi.c
++++ b/arch/powerpc/sysdev/fsl_msi.c
+@@ -75,7 +75,7 @@ static void fsl_msi_print_chip(struct irq_data *irqd, struct seq_file *p)
+ 	srs = (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK;
+ 	cascade_virq = msi_data->cascade_array[srs]->virq;
+ 
+-	seq_printf(p, " fsl-msi-%d", cascade_virq);
++	seq_printf(p, "fsl-msi-%d", cascade_virq);
+ }
+ 
+ 
+diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c
+index 857afbc4828f0c..39a481ec4a402d 100644
+--- a/arch/s390/pci/pci_bus.c
++++ b/arch/s390/pci/pci_bus.c
+@@ -331,6 +331,17 @@ static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
+ 	return rc;
+ }
+ 
++static bool zpci_bus_is_isolated_vf(struct zpci_bus *zbus, struct zpci_dev *zdev)
++{
++	struct pci_dev *pdev;
++
++	pdev = zpci_iov_find_parent_pf(zbus, zdev);
++	if (!pdev)
++		return true;
++	pci_dev_put(pdev);
++	return false;
++}
++
+ int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops)
+ {
+ 	bool topo_is_tid = zdev->tid_avail;
+@@ -345,6 +356,15 @@ int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops)
+ 
+ 	topo = topo_is_tid ? zdev->tid : zdev->pchid;
+ 	zbus = zpci_bus_get(topo, topo_is_tid);
++	/*
++	 * An isolated VF gets its own domain/bus even if there exists
++	 * a matching domain/bus already
++	 */
++	if (zbus && zpci_bus_is_isolated_vf(zbus, zdev)) {
++		zpci_bus_put(zbus);
++		zbus = NULL;
++	}
++
+ 	if (!zbus) {
+ 		zbus = zpci_bus_alloc(topo, topo_is_tid);
+ 		if (!zbus)
+diff --git a/arch/s390/pci/pci_iov.c b/arch/s390/pci/pci_iov.c
+index ead062bf2b41cc..191e56a623f62c 100644
+--- a/arch/s390/pci/pci_iov.c
++++ b/arch/s390/pci/pci_iov.c
+@@ -60,18 +60,35 @@ static int zpci_iov_link_virtfn(struct pci_dev *pdev, struct pci_dev *virtfn, in
+ 	return 0;
+ }
+ 
+-int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn)
++/**
++ * zpci_iov_find_parent_pf - Find the parent PF, if any, of the given function
++ * @zbus:	The bus that the PCI function is on, or would be added on
++ * @zdev:	The PCI function
++ *
++ * Finds the parent PF, if it exists and is configured, of the given PCI function
++ * and increments its refcount. Th PF is searched for on the provided bus so the
++ * caller has to ensure that this is the correct bus to search. This function may
++ * be used before adding the PCI function to a zbus.
++ *
++ * Return: Pointer to the struct pci_dev of the parent PF or NULL if it not
++ * found. If the function is not a VF or has no RequesterID information,
++ * NULL is returned as well.
++ */
++struct pci_dev *zpci_iov_find_parent_pf(struct zpci_bus *zbus, struct zpci_dev *zdev)
+ {
+-	int i, cand_devfn;
+-	struct zpci_dev *zdev;
++	int i, vfid, devfn, cand_devfn;
+ 	struct pci_dev *pdev;
+-	int vfid = vfn - 1; /* Linux' vfid's start at 0 vfn at 1*/
+-	int rc = 0;
+ 
+ 	if (!zbus->multifunction)
+-		return 0;
+-
+-	/* If the parent PF for the given VF is also configured in the
++		return NULL;
++	/* Non-VFs and VFs without RID available don't have a parent */
++	if (!zdev->vfn || !zdev->rid_available)
++		return NULL;
++	/* Linux vfid starts at 0 vfn at 1 */
++	vfid = zdev->vfn - 1;
++	devfn = zdev->rid & ZPCI_RID_MASK_DEVFN;
++	/*
++	 * If the parent PF for the given VF is also configured in the
+ 	 * instance, it must be on the same zbus.
+ 	 * We can then identify the parent PF by checking what
+ 	 * devfn the VF would have if it belonged to that PF using the PF's
+@@ -85,15 +102,26 @@ int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn
+ 			if (!pdev)
+ 				continue;
+ 			cand_devfn = pci_iov_virtfn_devfn(pdev, vfid);
+-			if (cand_devfn == virtfn->devfn) {
+-				rc = zpci_iov_link_virtfn(pdev, virtfn, vfid);
+-				/* balance pci_get_slot() */
+-				pci_dev_put(pdev);
+-				break;
+-			}
++			if (cand_devfn == devfn)
++				return pdev;
+ 			/* balance pci_get_slot() */
+ 			pci_dev_put(pdev);
+ 		}
+ 	}
++	return NULL;
++}
++
++int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn)
++{
++	struct zpci_dev *zdev = to_zpci(virtfn);
++	struct pci_dev *pdev_pf;
++	int rc = 0;
++
++	pdev_pf = zpci_iov_find_parent_pf(zbus, zdev);
++	if (pdev_pf) {
++		/* Linux' vfids start at 0 while zdev->vfn starts at 1 */
++		rc = zpci_iov_link_virtfn(pdev_pf, virtfn, zdev->vfn - 1);
++		pci_dev_put(pdev_pf);
++	}
+ 	return rc;
+ }
+diff --git a/arch/s390/pci/pci_iov.h b/arch/s390/pci/pci_iov.h
+index e3fa4e77fc867a..d2c2793eb0f348 100644
+--- a/arch/s390/pci/pci_iov.h
++++ b/arch/s390/pci/pci_iov.h
+@@ -19,6 +19,8 @@ void zpci_iov_map_resources(struct pci_dev *pdev);
+ 
+ int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn);
+ 
++struct pci_dev *zpci_iov_find_parent_pf(struct zpci_bus *zbus, struct zpci_dev *zdev);
++
+ #else /* CONFIG_PCI_IOV */
+ static inline void zpci_iov_remove_virtfn(struct pci_dev *pdev, int vfn) {}
+ 
+@@ -28,5 +30,10 @@ static inline int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *v
+ {
+ 	return 0;
+ }
++
++static inline struct pci_dev *zpci_iov_find_parent_pf(struct zpci_bus *zbus, struct zpci_dev *zdev)
++{
++	return NULL;
++}
+ #endif /* CONFIG_PCI_IOV */
+ #endif /* __S390_PCI_IOV_h */
+diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
+index 30bdc0a87dc854..3a67ba8aa62dcc 100644
+--- a/arch/um/kernel/process.c
++++ b/arch/um/kernel/process.c
+@@ -191,7 +191,15 @@ void initial_thread_cb(void (*proc)(void *), void *arg)
+ int arch_dup_task_struct(struct task_struct *dst,
+ 			 struct task_struct *src)
+ {
+-	memcpy(dst, src, arch_task_struct_size);
++	/* init_task is not dynamically sized (missing FPU state) */
++	if (unlikely(src == &init_task)) {
++		memcpy(dst, src, sizeof(init_task));
++		memset((void *)dst + sizeof(init_task), 0,
++		       arch_task_struct_size - sizeof(init_task));
++	} else {
++		memcpy(dst, src, arch_task_struct_size);
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
+index f683cfc9e51a54..e2f8f156402f50 100644
+--- a/arch/um/os-Linux/skas/process.c
++++ b/arch/um/os-Linux/skas/process.c
+@@ -181,6 +181,10 @@ extern char __syscall_stub_start[];
+ 
+ static int stub_exe_fd;
+ 
++#ifndef CLOSE_RANGE_CLOEXEC
++#define CLOSE_RANGE_CLOEXEC	(1U << 2)
++#endif
++
+ static int userspace_tramp(void *stack)
+ {
+ 	char *const argv[] = { "uml-userspace", NULL };
+@@ -202,8 +206,12 @@ static int userspace_tramp(void *stack)
+ 	init_data.stub_data_fd = phys_mapping(uml_to_phys(stack), &offset);
+ 	init_data.stub_data_offset = MMAP_OFFSET(offset);
+ 
+-	/* Set CLOEXEC on all FDs and then unset on all memory related FDs */
+-	close_range(0, ~0U, CLOSE_RANGE_CLOEXEC);
++	/*
++	 * Avoid leaking unneeded FDs to the stub by setting CLOEXEC on all FDs
++	 * and then unsetting it on all memory related FDs.
++	 * This is not strictly necessary from a safety perspective.
++	 */
++	syscall(__NR_close_range, 0, ~0U, CLOSE_RANGE_CLOEXEC);
+ 
+ 	fcntl(init_data.stub_data_fd, F_SETFD, 0);
+ 	for (iomem = iomem_regions; iomem; iomem = iomem->next)
+@@ -224,7 +232,9 @@ static int userspace_tramp(void *stack)
+ 	if (ret != sizeof(init_data))
+ 		exit(4);
+ 
+-	execveat(stub_exe_fd, "", argv, NULL, AT_EMPTY_PATH);
++	/* Raw execveat for compatibility with older libc versions */
++	syscall(__NR_execveat, stub_exe_fd, (unsigned long)"",
++		(unsigned long)argv, NULL, AT_EMPTY_PATH);
+ 
+ 	exit(5);
+ }
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index ef6cfea9df7333..c2fb8fe86a4559 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -2593,7 +2593,8 @@ config MITIGATION_IBPB_ENTRY
+ 	depends on CPU_SUP_AMD && X86_64
+ 	default y
+ 	help
+-	  Compile the kernel with support for the retbleed=ibpb mitigation.
++	  Compile the kernel with support for the retbleed=ibpb and
++	  spec_rstack_overflow={ibpb,ibpb-vmexit} mitigations.
+ 
+ config MITIGATION_IBRS_ENTRY
+ 	bool "Enable IBRS on kernel entry"
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 99c590da0ae241..b1855a46b2adf6 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -4887,20 +4887,22 @@ static inline bool intel_pmu_broken_perf_cap(void)
+ 
+ static void update_pmu_cap(struct x86_hybrid_pmu *pmu)
+ {
+-	unsigned int sub_bitmaps, eax, ebx, ecx, edx;
++	unsigned int cntr, fixed_cntr, ecx, edx;
++	union cpuid35_eax eax;
++	union cpuid35_ebx ebx;
+ 
+-	cpuid(ARCH_PERFMON_EXT_LEAF, &sub_bitmaps, &ebx, &ecx, &edx);
++	cpuid(ARCH_PERFMON_EXT_LEAF, &eax.full, &ebx.full, &ecx, &edx);
+ 
+-	if (ebx & ARCH_PERFMON_EXT_UMASK2)
++	if (ebx.split.umask2)
+ 		pmu->config_mask |= ARCH_PERFMON_EVENTSEL_UMASK2;
+-	if (ebx & ARCH_PERFMON_EXT_EQ)
++	if (ebx.split.eq)
+ 		pmu->config_mask |= ARCH_PERFMON_EVENTSEL_EQ;
+ 
+-	if (sub_bitmaps & ARCH_PERFMON_NUM_COUNTER_LEAF_BIT) {
++	if (eax.split.cntr_subleaf) {
+ 		cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_NUM_COUNTER_LEAF,
+-			    &eax, &ebx, &ecx, &edx);
+-		pmu->cntr_mask64 = eax;
+-		pmu->fixed_cntr_mask64 = ebx;
++			    &cntr, &fixed_cntr, &ecx, &edx);
++		pmu->cntr_mask64 = cntr;
++		pmu->fixed_cntr_mask64 = fixed_cntr;
+ 	}
+ 
+ 	if (!intel_pmu_broken_perf_cap()) {
+@@ -4923,11 +4925,6 @@ static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu)
+ 	else
+ 		pmu->intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS);
+ 
+-	if (pmu->intel_cap.pebs_output_pt_available)
+-		pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
+-	else
+-		pmu->pmu.capabilities &= ~PERF_PMU_CAP_AUX_OUTPUT;
+-
+ 	intel_pmu_check_event_constraints(pmu->event_constraints,
+ 					  pmu->cntr_mask64,
+ 					  pmu->fixed_cntr_mask64,
+@@ -5005,9 +5002,6 @@ static bool init_hybrid_pmu(int cpu)
+ 
+ 	pr_info("%s PMU driver: ", pmu->name);
+ 
+-	if (pmu->intel_cap.pebs_output_pt_available)
+-		pr_cont("PEBS-via-PT ");
+-
+ 	pr_cont("\n");
+ 
+ 	x86_pmu_show_pmu_cap(&pmu->pmu);
+@@ -5030,8 +5024,11 @@ static void intel_pmu_cpu_starting(int cpu)
+ 
+ 	init_debug_store_on_cpu(cpu);
+ 	/*
+-	 * Deal with CPUs that don't clear their LBRs on power-up.
++	 * Deal with CPUs that don't clear their LBRs on power-up, and that may
++	 * even boot with LBRs enabled.
+ 	 */
++	if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && x86_pmu.lbr_nr)
++		msr_clear_bit(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR_BIT);
+ 	intel_pmu_lbr_reset();
+ 
+ 	cpuc->lbr_sel = NULL;
+@@ -6362,11 +6359,9 @@ static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus)
+ 		pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities;
+ 		if (pmu->pmu_type & hybrid_small_tiny) {
+ 			pmu->intel_cap.perf_metrics = 0;
+-			pmu->intel_cap.pebs_output_pt_available = 1;
+ 			pmu->mid_ack = true;
+ 		} else if (pmu->pmu_type & hybrid_big) {
+ 			pmu->intel_cap.perf_metrics = 1;
+-			pmu->intel_cap.pebs_output_pt_available = 0;
+ 			pmu->late_ack = true;
+ 		}
+ 	}
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index 6ba6549f26fac6..cb0eca73478995 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -2544,7 +2544,15 @@ void __init intel_ds_init(void)
+ 			}
+ 			pr_cont("PEBS fmt4%c%s, ", pebs_type, pebs_qual);
+ 
+-			if (!is_hybrid() && x86_pmu.intel_cap.pebs_output_pt_available) {
++			/*
++			 * The PEBS-via-PT is not supported on hybrid platforms,
++			 * because not all CPUs of a hybrid machine support it.
++			 * The global x86_pmu.intel_cap, which only contains the
++			 * common capabilities, is used to check the availability
++			 * of the feature. The per-PMU pebs_output_pt_available
++			 * in a hybrid machine should be ignored.
++			 */
++			if (x86_pmu.intel_cap.pebs_output_pt_available) {
+ 				pr_cont("PEBS-via-PT, ");
+ 				x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
+ 			}
+diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
+index 5aff7222e40fac..14dbed976e42a7 100644
+--- a/arch/x86/include/asm/kvm-x86-ops.h
++++ b/arch/x86/include/asm/kvm-x86-ops.h
+@@ -48,6 +48,7 @@ KVM_X86_OP(set_idt)
+ KVM_X86_OP(get_gdt)
+ KVM_X86_OP(set_gdt)
+ KVM_X86_OP(sync_dirty_debug_regs)
++KVM_X86_OP(set_dr6)
+ KVM_X86_OP(set_dr7)
+ KVM_X86_OP(cache_reg)
+ KVM_X86_OP(get_rflags)
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index c1043f8c9b0312..76bfeb03c041ad 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1674,6 +1674,7 @@ struct kvm_x86_ops {
+ 	void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+ 	void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+ 	void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
++	void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
+ 	void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
+ 	void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
+ 	unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
+diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
+index ce4677b8b7356c..3b496cdcb74b3c 100644
+--- a/arch/x86/include/asm/mmu.h
++++ b/arch/x86/include/asm/mmu.h
+@@ -37,6 +37,8 @@ typedef struct {
+ 	 */
+ 	atomic64_t tlb_gen;
+ 
++	unsigned long next_trim_cpumask;
++
+ #ifdef CONFIG_MODIFY_LDT_SYSCALL
+ 	struct rw_semaphore	ldt_usr_sem;
+ 	struct ldt_struct	*ldt;
+diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
+index 2886cb668d7fae..795fdd53bd0a6d 100644
+--- a/arch/x86/include/asm/mmu_context.h
++++ b/arch/x86/include/asm/mmu_context.h
+@@ -151,6 +151,7 @@ static inline int init_new_context(struct task_struct *tsk,
+ 
+ 	mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
+ 	atomic64_set(&mm->context.tlb_gen, 0);
++	mm->context.next_trim_cpumask = jiffies + HZ;
+ 
+ #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
+ 	if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index 3ae84c3b8e6dba..61e991507353eb 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -395,7 +395,8 @@
+ #define MSR_IA32_PASID_VALID		BIT_ULL(31)
+ 
+ /* DEBUGCTLMSR bits (others vary by model): */
+-#define DEBUGCTLMSR_LBR			(1UL <<  0) /* last branch recording */
++#define DEBUGCTLMSR_LBR_BIT		0	     /* last branch recording */
++#define DEBUGCTLMSR_LBR			(1UL <<  DEBUGCTLMSR_LBR_BIT)
+ #define DEBUGCTLMSR_BTF_SHIFT		1
+ #define DEBUGCTLMSR_BTF			(1UL <<  1) /* single-step on branches */
+ #define DEBUGCTLMSR_BUS_LOCK_DETECT	(1UL <<  2)
+diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
+index d95f902acc5211..f996a32ca78800 100644
+--- a/arch/x86/include/asm/perf_event.h
++++ b/arch/x86/include/asm/perf_event.h
+@@ -187,11 +187,33 @@ union cpuid10_edx {
+  * detection/enumeration details:
+  */
+ #define ARCH_PERFMON_EXT_LEAF			0x00000023
+-#define ARCH_PERFMON_EXT_UMASK2			0x1
+-#define ARCH_PERFMON_EXT_EQ			0x2
+-#define ARCH_PERFMON_NUM_COUNTER_LEAF_BIT	0x1
+ #define ARCH_PERFMON_NUM_COUNTER_LEAF		0x1
+ 
++union cpuid35_eax {
++	struct {
++		unsigned int	leaf0:1;
++		/* Counters Sub-Leaf */
++		unsigned int    cntr_subleaf:1;
++		/* Auto Counter Reload Sub-Leaf */
++		unsigned int    acr_subleaf:1;
++		/* Events Sub-Leaf */
++		unsigned int    events_subleaf:1;
++		unsigned int	reserved:28;
++	} split;
++	unsigned int            full;
++};
++
++union cpuid35_ebx {
++	struct {
++		/* UnitMask2 Supported */
++		unsigned int    umask2:1;
++		/* EQ-bit Supported */
++		unsigned int    eq:1;
++		unsigned int	reserved:30;
++	} split;
++	unsigned int            full;
++};
++
+ /*
+  * Intel Architectural LBR CPUID detection/enumeration details:
+  */
+diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
+index 69e79fff41b800..02fc2aa06e9e0e 100644
+--- a/arch/x86/include/asm/tlbflush.h
++++ b/arch/x86/include/asm/tlbflush.h
+@@ -222,6 +222,7 @@ struct flush_tlb_info {
+ 	unsigned int		initiating_cpu;
+ 	u8			stride_shift;
+ 	u8			freed_tables;
++	u8			trim_cpumask;
+ };
+ 
+ void flush_tlb_local(void);
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 47a01d4028f60e..5fba44a4f988c0 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1115,6 +1115,8 @@ static void __init retbleed_select_mitigation(void)
+ 
+ 	case RETBLEED_MITIGATION_IBPB:
+ 		setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
++		setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
++		mitigate_smt = true;
+ 
+ 		/*
+ 		 * IBPB on entry already obviates the need for
+@@ -1124,9 +1126,6 @@ static void __init retbleed_select_mitigation(void)
+ 		setup_clear_cpu_cap(X86_FEATURE_UNRET);
+ 		setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
+ 
+-		setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
+-		mitigate_smt = true;
+-
+ 		/*
+ 		 * There is no need for RSB filling: entry_ibpb() ensures
+ 		 * all predictions, including the RSB, are invalidated,
+@@ -2643,6 +2642,7 @@ static void __init srso_select_mitigation(void)
+ 		if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
+ 			if (has_microcode) {
+ 				setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
++				setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
+ 				srso_mitigation = SRSO_MITIGATION_IBPB;
+ 
+ 				/*
+@@ -2652,6 +2652,13 @@ static void __init srso_select_mitigation(void)
+ 				 */
+ 				setup_clear_cpu_cap(X86_FEATURE_UNRET);
+ 				setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
++
++				/*
++				 * There is no need for RSB filling: entry_ibpb() ensures
++				 * all predictions, including the RSB, are invalidated,
++				 * regardless of IBPB implementation.
++				 */
++				setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
+ 			}
+ 		} else {
+ 			pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
+@@ -2659,8 +2666,8 @@ static void __init srso_select_mitigation(void)
+ 		break;
+ 
+ 	case SRSO_CMD_IBPB_ON_VMEXIT:
+-		if (IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
+-			if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) {
++		if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
++			if (has_microcode) {
+ 				setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
+ 				srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
+ 
+@@ -2672,8 +2679,8 @@ static void __init srso_select_mitigation(void)
+ 				setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
+ 			}
+ 		} else {
+-			pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
+-                }
++			pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
++		}
+ 		break;
+ 	default:
+ 		break;
+diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
+index 4f0a94346d0094..44c88537448c74 100644
+--- a/arch/x86/kvm/hyperv.c
++++ b/arch/x86/kvm/hyperv.c
+@@ -2226,6 +2226,9 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
+ 	u32 vector;
+ 	bool all_cpus;
+ 
++	if (!lapic_in_kernel(vcpu))
++		return HV_STATUS_INVALID_HYPERCALL_INPUT;
++
+ 	if (hc->code == HVCALL_SEND_IPI) {
+ 		if (!hc->fast) {
+ 			if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi,
+@@ -2852,7 +2855,8 @@ int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
+ 			ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
+ 			ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
+ 			ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
+-			ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
++			if (!vcpu || lapic_in_kernel(vcpu))
++				ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
+ 			ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
+ 			if (evmcs_ver)
+ 				ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index 74c45519309030..e102505735a7bc 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -5524,7 +5524,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
+ 	union kvm_mmu_page_role root_role;
+ 
+ 	/* NPT requires CR0.PG=1. */
+-	WARN_ON_ONCE(cpu_role.base.direct);
++	WARN_ON_ONCE(cpu_role.base.direct || !cpu_role.base.guest_mode);
+ 
+ 	root_role = cpu_role.base;
+ 	root_role.level = kvm_mmu_get_tdp_level(vcpu);
+diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
+index b708bdf7eaffd2..ee37e3ebc04389 100644
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -646,6 +646,11 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
+ 	u32 pause_count12;
+ 	u32 pause_thresh12;
+ 
++	nested_svm_transition_tlb_flush(vcpu);
++
++	/* Enter Guest-Mode */
++	enter_guest_mode(vcpu);
++
+ 	/*
+ 	 * Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2,
+ 	 * exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes.
+@@ -762,11 +767,6 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
+ 		}
+ 	}
+ 
+-	nested_svm_transition_tlb_flush(vcpu);
+-
+-	/* Enter Guest-Mode */
+-	enter_guest_mode(vcpu);
+-
+ 	/*
+ 	 * Merge guest and host intercepts - must be called with vcpu in
+ 	 * guest-mode to take effect.
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 21dacd31277922..68704e035d7cbd 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -1995,11 +1995,11 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
+ 	svm->asid = sd->next_asid++;
+ }
+ 
+-static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value)
++static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
+ {
+-	struct vmcb *vmcb = svm->vmcb;
++	struct vmcb *vmcb = to_svm(vcpu)->vmcb;
+ 
+-	if (svm->vcpu.arch.guest_state_protected)
++	if (vcpu->arch.guest_state_protected)
+ 		return;
+ 
+ 	if (unlikely(value != vmcb->save.dr6)) {
+@@ -4236,10 +4236,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
+ 	 * Run with all-zero DR6 unless needed, so that we can get the exact cause
+ 	 * of a #DB.
+ 	 */
+-	if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
+-		svm_set_dr6(svm, vcpu->arch.dr6);
+-	else
+-		svm_set_dr6(svm, DR6_ACTIVE_LOW);
++	if (likely(!(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)))
++		svm_set_dr6(vcpu, DR6_ACTIVE_LOW);
+ 
+ 	clgi();
+ 	kvm_load_guest_xsave_state(vcpu);
+@@ -5036,6 +5034,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
+ 	.set_idt = svm_set_idt,
+ 	.get_gdt = svm_get_gdt,
+ 	.set_gdt = svm_set_gdt,
++	.set_dr6 = svm_set_dr6,
+ 	.set_dr7 = svm_set_dr7,
+ 	.sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
+ 	.cache_reg = svm_cache_reg,
+diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
+index 92d35cc6cd15d8..7972f2152b3684 100644
+--- a/arch/x86/kvm/vmx/main.c
++++ b/arch/x86/kvm/vmx/main.c
+@@ -61,6 +61,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
+ 	.set_idt = vmx_set_idt,
+ 	.get_gdt = vmx_get_gdt,
+ 	.set_gdt = vmx_set_gdt,
++	.set_dr6 = vmx_set_dr6,
+ 	.set_dr7 = vmx_set_dr7,
+ 	.sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
+ 	.cache_reg = vmx_cache_reg,
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 01abcdcbbf70ab..aebd45d43ebbe3 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -5644,6 +5644,12 @@ void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
+ 	set_debugreg(DR6_RESERVED, 6);
+ }
+ 
++void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
++{
++	lockdep_assert_irqs_disabled();
++	set_debugreg(vcpu->arch.dr6, 6);
++}
++
+ void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
+ {
+ 	vmcs_writel(GUEST_DR7, val);
+@@ -7428,10 +7434,6 @@ fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
+ 		vmx->loaded_vmcs->host_state.cr4 = cr4;
+ 	}
+ 
+-	/* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */
+-	if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
+-		set_debugreg(vcpu->arch.dr6, 6);
+-
+ 	/* When single-stepping over STI and MOV SS, we must clear the
+ 	 * corresponding interruptibility bits in the guest state. Otherwise
+ 	 * vmentry fails as it then expects bit 14 (BS) in pending debug
+diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
+index 48dc76bf0ec03a..4aba200f435d42 100644
+--- a/arch/x86/kvm/vmx/x86_ops.h
++++ b/arch/x86/kvm/vmx/x86_ops.h
+@@ -74,6 +74,7 @@ void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+ void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+ void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+ void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
++void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val);
+ void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val);
+ void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu);
+ void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 23bf088fc4ae1e..030310b26c6933 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -10953,6 +10953,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 		set_debugreg(vcpu->arch.eff_db[1], 1);
+ 		set_debugreg(vcpu->arch.eff_db[2], 2);
+ 		set_debugreg(vcpu->arch.eff_db[3], 3);
++		/* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */
++		if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
++			kvm_x86_call(set_dr6)(vcpu, vcpu->arch.dr6);
+ 	} else if (unlikely(hw_breakpoint_active())) {
+ 		set_debugreg(0, 7);
+ 	}
+diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
+index a2becb85bea796..90a9e474091314 100644
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -893,9 +893,36 @@ static void flush_tlb_func(void *info)
+ 			nr_invalidate);
+ }
+ 
+-static bool tlb_is_not_lazy(int cpu, void *data)
++static bool should_flush_tlb(int cpu, void *data)
+ {
+-	return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu);
++	struct flush_tlb_info *info = data;
++
++	/* Lazy TLB will get flushed at the next context switch. */
++	if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
++		return false;
++
++	/* No mm means kernel memory flush. */
++	if (!info->mm)
++		return true;
++
++	/* The target mm is loaded, and the CPU is not lazy. */
++	if (per_cpu(cpu_tlbstate.loaded_mm, cpu) == info->mm)
++		return true;
++
++	/* In cpumask, but not the loaded mm? Periodically remove by flushing. */
++	if (info->trim_cpumask)
++		return true;
++
++	return false;
++}
++
++static bool should_trim_cpumask(struct mm_struct *mm)
++{
++	if (time_after(jiffies, READ_ONCE(mm->context.next_trim_cpumask))) {
++		WRITE_ONCE(mm->context.next_trim_cpumask, jiffies + HZ);
++		return true;
++	}
++	return false;
+ }
+ 
+ DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);
+@@ -929,7 +956,7 @@ STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask,
+ 	if (info->freed_tables)
+ 		on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true);
+ 	else
+-		on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func,
++		on_each_cpu_cond_mask(should_flush_tlb, flush_tlb_func,
+ 				(void *)info, 1, cpumask);
+ }
+ 
+@@ -980,6 +1007,7 @@ static struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
+ 	info->freed_tables	= freed_tables;
+ 	info->new_tlb_gen	= new_tlb_gen;
+ 	info->initiating_cpu	= smp_processor_id();
++	info->trim_cpumask	= 0;
+ 
+ 	return info;
+ }
+@@ -1022,6 +1050,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
+ 	 * flush_tlb_func_local() directly in this case.
+ 	 */
+ 	if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) {
++		info->trim_cpumask = should_trim_cpumask(mm);
+ 		flush_tlb_multi(mm_cpumask(mm), info);
+ 	} else if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
+ 		lockdep_assert_irqs_enabled();
+diff --git a/arch/x86/um/os-Linux/registers.c b/arch/x86/um/os-Linux/registers.c
+index 76eaeb93928cce..eb1cdadc8a61dc 100644
+--- a/arch/x86/um/os-Linux/registers.c
++++ b/arch/x86/um/os-Linux/registers.c
+@@ -18,6 +18,7 @@
+ #include <registers.h>
+ #include <sys/mman.h>
+ 
++static unsigned long ptrace_regset;
+ unsigned long host_fp_size;
+ 
+ int get_fp_registers(int pid, unsigned long *regs)
+@@ -27,7 +28,7 @@ int get_fp_registers(int pid, unsigned long *regs)
+ 		.iov_len = host_fp_size,
+ 	};
+ 
+-	if (ptrace(PTRACE_GETREGSET, pid, NT_X86_XSTATE, &iov) < 0)
++	if (ptrace(PTRACE_GETREGSET, pid, ptrace_regset, &iov) < 0)
+ 		return -errno;
+ 	return 0;
+ }
+@@ -39,7 +40,7 @@ int put_fp_registers(int pid, unsigned long *regs)
+ 		.iov_len = host_fp_size,
+ 	};
+ 
+-	if (ptrace(PTRACE_SETREGSET, pid, NT_X86_XSTATE, &iov) < 0)
++	if (ptrace(PTRACE_SETREGSET, pid, ptrace_regset, &iov) < 0)
+ 		return -errno;
+ 	return 0;
+ }
+@@ -58,9 +59,23 @@ int arch_init_registers(int pid)
+ 		return -ENOMEM;
+ 
+ 	/* GDB has x86_xsave_length, which uses x86_cpuid_count */
+-	ret = ptrace(PTRACE_GETREGSET, pid, NT_X86_XSTATE, &iov);
++	ptrace_regset = NT_X86_XSTATE;
++	ret = ptrace(PTRACE_GETREGSET, pid, ptrace_regset, &iov);
+ 	if (ret)
+ 		ret = -errno;
++
++	if (ret == -ENODEV) {
++#ifdef CONFIG_X86_32
++		ptrace_regset = NT_PRXFPREG;
++#else
++		ptrace_regset = NT_PRFPREG;
++#endif
++		iov.iov_len = 2 * 1024 * 1024;
++		ret = ptrace(PTRACE_GETREGSET, pid, ptrace_regset, &iov);
++		if (ret)
++			ret = -errno;
++	}
++
+ 	munmap(iov.iov_base, 2 * 1024 * 1024);
+ 
+ 	host_fp_size = iov.iov_len;
+diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c
+index 75087e85b6fdb7..2934e170b0fe0b 100644
+--- a/arch/x86/um/signal.c
++++ b/arch/x86/um/signal.c
+@@ -187,7 +187,12 @@ static int copy_sc_to_user(struct sigcontext __user *to,
+ 	 * Put magic/size values for userspace. We do not bother to verify them
+ 	 * later on, however, userspace needs them should it try to read the
+ 	 * XSTATE data. And ptrace does not fill in these parts.
++	 *
++	 * Skip this if we do not have an XSTATE frame.
+ 	 */
++	if (host_fp_size <= sizeof(to_fp64->fpstate))
++		return 0;
++
+ 	BUILD_BUG_ON(sizeof(int) != FP_XSTATE_MAGIC2_SIZE);
+ #ifdef CONFIG_X86_32
+ 	__put_user(offsetof(struct _fpstate_32, _fxsr_env) +
+@@ -367,11 +372,13 @@ int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
+ 	int err = 0, sig = ksig->sig;
+ 	unsigned long fp_to;
+ 
+-	frame = (struct rt_sigframe __user *)
+-		round_down(stack_top - sizeof(struct rt_sigframe), 16);
++	frame = (void __user *)stack_top - sizeof(struct rt_sigframe);
+ 
+ 	/* Add required space for math frame */
+-	frame = (struct rt_sigframe __user *)((unsigned long)frame - math_size);
++	frame = (void __user *)((unsigned long)frame - math_size);
++
++	/* ABI requires 16 byte boundary alignment */
++	frame = (void __user *)round_down((unsigned long)frame, 16);
+ 
+ 	/* Subtract 128 for a red zone and 8 for proper alignment */
+ 	frame = (struct rt_sigframe __user *) ((unsigned long) frame - 128 - 8);
+diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
+index 55a4996d0c04f1..d078de2c952b37 100644
+--- a/arch/x86/xen/mmu_pv.c
++++ b/arch/x86/xen/mmu_pv.c
+@@ -111,6 +111,51 @@ static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
+  */
+ static DEFINE_SPINLOCK(xen_reservation_lock);
+ 
++/* Protected by xen_reservation_lock. */
++#define MIN_CONTIG_ORDER 9 /* 2MB */
++static unsigned int discontig_frames_order = MIN_CONTIG_ORDER;
++static unsigned long discontig_frames_early[1UL << MIN_CONTIG_ORDER] __initdata;
++static unsigned long *discontig_frames __refdata = discontig_frames_early;
++static bool discontig_frames_dyn;
++
++static int alloc_discontig_frames(unsigned int order)
++{
++	unsigned long *new_array, *old_array;
++	unsigned int old_order;
++	unsigned long flags;
++
++	BUG_ON(order < MIN_CONTIG_ORDER);
++	BUILD_BUG_ON(sizeof(discontig_frames_early) != PAGE_SIZE);
++
++	new_array = (unsigned long *)__get_free_pages(GFP_KERNEL,
++						      order - MIN_CONTIG_ORDER);
++	if (!new_array)
++		return -ENOMEM;
++
++	spin_lock_irqsave(&xen_reservation_lock, flags);
++
++	old_order = discontig_frames_order;
++
++	if (order > discontig_frames_order || !discontig_frames_dyn) {
++		if (!discontig_frames_dyn)
++			old_array = NULL;
++		else
++			old_array = discontig_frames;
++
++		discontig_frames = new_array;
++		discontig_frames_order = order;
++		discontig_frames_dyn = true;
++	} else {
++		old_array = new_array;
++	}
++
++	spin_unlock_irqrestore(&xen_reservation_lock, flags);
++
++	free_pages((unsigned long)old_array, old_order - MIN_CONTIG_ORDER);
++
++	return 0;
++}
++
+ /*
+  * Note about cr3 (pagetable base) values:
+  *
+@@ -781,6 +826,7 @@ void xen_mm_pin_all(void)
+ {
+ 	struct page *page;
+ 
++	spin_lock(&init_mm.page_table_lock);
+ 	spin_lock(&pgd_lock);
+ 
+ 	list_for_each_entry(page, &pgd_list, lru) {
+@@ -791,6 +837,7 @@ void xen_mm_pin_all(void)
+ 	}
+ 
+ 	spin_unlock(&pgd_lock);
++	spin_unlock(&init_mm.page_table_lock);
+ }
+ 
+ static void __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
+@@ -812,6 +859,9 @@ static void __init xen_after_bootmem(void)
+ 	SetPagePinned(virt_to_page(level3_user_vsyscall));
+ #endif
+ 	xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
++
++	if (alloc_discontig_frames(MIN_CONTIG_ORDER))
++		BUG();
+ }
+ 
+ static void xen_unpin_page(struct mm_struct *mm, struct page *page,
+@@ -887,6 +937,7 @@ void xen_mm_unpin_all(void)
+ {
+ 	struct page *page;
+ 
++	spin_lock(&init_mm.page_table_lock);
+ 	spin_lock(&pgd_lock);
+ 
+ 	list_for_each_entry(page, &pgd_list, lru) {
+@@ -898,6 +949,7 @@ void xen_mm_unpin_all(void)
+ 	}
+ 
+ 	spin_unlock(&pgd_lock);
++	spin_unlock(&init_mm.page_table_lock);
+ }
+ 
+ static void xen_enter_mmap(struct mm_struct *mm)
+@@ -2199,10 +2251,6 @@ void __init xen_init_mmu_ops(void)
+ 	memset(dummy_mapping, 0xff, PAGE_SIZE);
+ }
+ 
+-/* Protected by xen_reservation_lock. */
+-#define MAX_CONTIG_ORDER 9 /* 2MB */
+-static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
+-
+ #define VOID_PTE (mfn_pte(0, __pgprot(0)))
+ static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
+ 				unsigned long *in_frames,
+@@ -2319,18 +2367,25 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
+ 				 unsigned int address_bits,
+ 				 dma_addr_t *dma_handle)
+ {
+-	unsigned long *in_frames = discontig_frames, out_frame;
++	unsigned long *in_frames, out_frame;
+ 	unsigned long  flags;
+ 	int            success;
+ 	unsigned long vstart = (unsigned long)phys_to_virt(pstart);
+ 
+-	if (unlikely(order > MAX_CONTIG_ORDER))
+-		return -ENOMEM;
++	if (unlikely(order > discontig_frames_order)) {
++		if (!discontig_frames_dyn)
++			return -ENOMEM;
++
++		if (alloc_discontig_frames(order))
++			return -ENOMEM;
++	}
+ 
+ 	memset((void *) vstart, 0, PAGE_SIZE << order);
+ 
+ 	spin_lock_irqsave(&xen_reservation_lock, flags);
+ 
++	in_frames = discontig_frames;
++
+ 	/* 1. Zap current PTEs, remembering MFNs. */
+ 	xen_zap_pfn_range(vstart, order, in_frames, NULL);
+ 
+@@ -2354,12 +2409,12 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
+ 
+ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
+ {
+-	unsigned long *out_frames = discontig_frames, in_frame;
++	unsigned long *out_frames, in_frame;
+ 	unsigned long  flags;
+ 	int success;
+ 	unsigned long vstart;
+ 
+-	if (unlikely(order > MAX_CONTIG_ORDER))
++	if (unlikely(order > discontig_frames_order))
+ 		return;
+ 
+ 	vstart = (unsigned long)phys_to_virt(pstart);
+@@ -2367,6 +2422,8 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
+ 
+ 	spin_lock_irqsave(&xen_reservation_lock, flags);
+ 
++	out_frames = discontig_frames;
++
+ 	/* 1. Find start MFN of contiguous extent. */
+ 	in_frame = virt_to_mfn((void *)vstart);
+ 
+diff --git a/block/partitions/mac.c b/block/partitions/mac.c
+index c80183156d6802..b02530d9862970 100644
+--- a/block/partitions/mac.c
++++ b/block/partitions/mac.c
+@@ -53,13 +53,25 @@ int mac_partition(struct parsed_partitions *state)
+ 	}
+ 	secsize = be16_to_cpu(md->block_size);
+ 	put_dev_sector(sect);
++
++	/*
++	 * If the "block size" is not a power of 2, things get weird - we might
++	 * end up with a partition straddling a sector boundary, so we wouldn't
++	 * be able to read a partition entry with read_part_sector().
++	 * Real block sizes are probably (?) powers of two, so just require
++	 * that.
++	 */
++	if (!is_power_of_2(secsize))
++		return -1;
+ 	datasize = round_down(secsize, 512);
+ 	data = read_part_sector(state, datasize / 512, &sect);
+ 	if (!data)
+ 		return -1;
+ 	partoffset = secsize % 512;
+-	if (partoffset + sizeof(*part) > datasize)
++	if (partoffset + sizeof(*part) > datasize) {
++		put_dev_sector(sect);
+ 		return -1;
++	}
+ 	part = (struct mac_partition *) (data + partoffset);
+ 	if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) {
+ 		put_dev_sector(sect);
+@@ -112,8 +124,8 @@ int mac_partition(struct parsed_partitions *state)
+ 				int i, l;
+ 
+ 				goodness++;
+-				l = strlen(part->name);
+-				if (strcmp(part->name, "/") == 0)
++				l = strnlen(part->name, sizeof(part->name));
++				if (strncmp(part->name, "/", sizeof(part->name)) == 0)
+ 					goodness++;
+ 				for (i = 0; i <= l - 4; ++i) {
+ 					if (strncasecmp(part->name + i, "root",
+diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c
+index 3561553eff8b5e..70f8290b659de5 100644
+--- a/drivers/acpi/arm64/gtdt.c
++++ b/drivers/acpi/arm64/gtdt.c
+@@ -163,7 +163,7 @@ int __init acpi_gtdt_init(struct acpi_table_header *table,
+ {
+ 	void *platform_timer;
+ 	struct acpi_table_gtdt *gtdt;
+-	int cnt = 0;
++	u32 cnt = 0;
+ 
+ 	gtdt = container_of(table, struct acpi_table_gtdt, header);
+ 	acpi_gtdt_desc.gtdt = gtdt;
+@@ -188,13 +188,17 @@ int __init acpi_gtdt_init(struct acpi_table_header *table,
+ 		cnt++;
+ 
+ 	if (cnt != gtdt->platform_timer_count) {
++		cnt = min(cnt, gtdt->platform_timer_count);
++		pr_err(FW_BUG "limiting Platform Timer count to %d\n", cnt);
++	}
++
++	if (!cnt) {
+ 		acpi_gtdt_desc.platform_timer = NULL;
+-		pr_err(FW_BUG "invalid timer data.\n");
+-		return -EINVAL;
++		return 0;
+ 	}
+ 
+ 	if (platform_timer_count)
+-		*platform_timer_count = gtdt->platform_timer_count;
++		*platform_timer_count = cnt;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
+index cb45ef5240dab6..068c1612660bc0 100644
+--- a/drivers/acpi/x86/utils.c
++++ b/drivers/acpi/x86/utils.c
+@@ -407,6 +407,19 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
+ 		.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ 					ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ 	},
++	{
++		/* Vexia Edu Atla 10 tablet 5V version */
++		.matches = {
++			/* Having all 3 of these not set is somewhat unique */
++			DMI_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
++			DMI_MATCH(DMI_BOARD_NAME, "To be filled by O.E.M."),
++			/* Above strings are too generic, also match on BIOS date */
++			DMI_MATCH(DMI_BIOS_DATE, "05/14/2015"),
++		},
++		.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
++					ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
++	},
+ 	{
+ 		/* Vexia Edu Atla 10 tablet 9V version */
+ 		.matches = {
+diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
+index 0bcd81389a29f8..978613407ea3cf 100644
+--- a/drivers/base/regmap/regmap-irq.c
++++ b/drivers/base/regmap/regmap-irq.c
+@@ -906,6 +906,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
+ 	kfree(d->wake_buf);
+ 	kfree(d->mask_buf_def);
+ 	kfree(d->mask_buf);
++	kfree(d->main_status_buf);
+ 	kfree(d->status_buf);
+ 	kfree(d->status_reg_buf);
+ 	if (d->config_buf) {
+@@ -981,6 +982,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
+ 	kfree(d->wake_buf);
+ 	kfree(d->mask_buf_def);
+ 	kfree(d->mask_buf);
++	kfree(d->main_status_buf);
+ 	kfree(d->status_reg_buf);
+ 	kfree(d->status_buf);
+ 	if (d->config_buf) {
+diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
+index 2b79952f3628de..091ffe3e14954a 100644
+--- a/drivers/bluetooth/btintel_pcie.c
++++ b/drivers/bluetooth/btintel_pcie.c
+@@ -1320,6 +1320,10 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
+ 			if (opcode == 0xfc01)
+ 				btintel_pcie_inject_cmd_complete(hdev, opcode);
+ 		}
++		/* Firmware raises alive interrupt on HCI_OP_RESET */
++		if (opcode == HCI_OP_RESET)
++			data->gp0_received = false;
++
+ 		hdev->stat.cmd_tx++;
+ 		break;
+ 	case HCI_ACLDATA_PKT:
+@@ -1357,7 +1361,6 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
+ 			   opcode, btintel_pcie_alivectxt_state2str(old_ctxt),
+ 			   btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
+ 		if (opcode == HCI_OP_RESET) {
+-			data->gp0_received = false;
+ 			ret = wait_event_timeout(data->gp0_wait_q,
+ 						 data->gp0_received,
+ 						 msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
+diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
+index 6276551d79680e..1e57ebfb76229a 100644
+--- a/drivers/bus/moxtet.c
++++ b/drivers/bus/moxtet.c
+@@ -657,7 +657,7 @@ static void moxtet_irq_print_chip(struct irq_data *d, struct seq_file *p)
+ 
+ 	id = moxtet->modules[pos->idx];
+ 
+-	seq_printf(p, " moxtet-%s.%i#%i", mox_module_name(id), pos->idx,
++	seq_printf(p, "moxtet-%s.%i#%i", mox_module_name(id), pos->idx,
+ 		   pos->bit);
+ }
+ 
+diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
+index f6d04eb40af94d..f71057c2cf9043 100644
+--- a/drivers/cpufreq/amd-pstate.c
++++ b/drivers/cpufreq/amd-pstate.c
+@@ -727,12 +727,12 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
+ 		pr_err("Boost mode is not supported by this processor or SBIOS\n");
+ 		return -EOPNOTSUPP;
+ 	}
+-	mutex_lock(&amd_pstate_driver_lock);
++	guard(mutex)(&amd_pstate_driver_lock);
++
+ 	ret = amd_pstate_cpu_boost_update(policy, state);
+ 	WRITE_ONCE(cpudata->boost_state, !ret ? state : false);
+ 	policy->boost_enabled = !ret ? state : false;
+ 	refresh_frequency_limits(policy);
+-	mutex_unlock(&amd_pstate_driver_lock);
+ 
+ 	return ret;
+ }
+@@ -809,24 +809,28 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
+ 
+ static void amd_pstate_update_limits(unsigned int cpu)
+ {
+-	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
++	struct cpufreq_policy *policy = NULL;
+ 	struct amd_cpudata *cpudata;
+ 	u32 prev_high = 0, cur_high = 0;
+ 	int ret;
+ 	bool highest_perf_changed = false;
+ 
++	if (!amd_pstate_prefcore)
++		return;
++
++	policy = cpufreq_cpu_get(cpu);
+ 	if (!policy)
+ 		return;
+ 
+ 	cpudata = policy->driver_data;
+ 
+-	if (!amd_pstate_prefcore)
+-		return;
++	guard(mutex)(&amd_pstate_driver_lock);
+ 
+-	mutex_lock(&amd_pstate_driver_lock);
+ 	ret = amd_get_highest_perf(cpu, &cur_high);
+-	if (ret)
+-		goto free_cpufreq_put;
++	if (ret) {
++		cpufreq_cpu_put(policy);
++		return;
++	}
+ 
+ 	prev_high = READ_ONCE(cpudata->prefcore_ranking);
+ 	highest_perf_changed = (prev_high != cur_high);
+@@ -836,14 +840,11 @@ static void amd_pstate_update_limits(unsigned int cpu)
+ 		if (cur_high < CPPC_MAX_PERF)
+ 			sched_set_itmt_core_prio((int)cur_high, cpu);
+ 	}
+-
+-free_cpufreq_put:
+ 	cpufreq_cpu_put(policy);
+ 
+ 	if (!highest_perf_changed)
+ 		cpufreq_update_policy(cpu);
+ 
+-	mutex_unlock(&amd_pstate_driver_lock);
+ }
+ 
+ /*
+@@ -1172,11 +1173,11 @@ static ssize_t store_energy_performance_preference(
+ 	if (ret < 0)
+ 		return -EINVAL;
+ 
+-	mutex_lock(&amd_pstate_limits_lock);
++	guard(mutex)(&amd_pstate_limits_lock);
++
+ 	ret = amd_pstate_set_energy_pref_index(cpudata, ret);
+-	mutex_unlock(&amd_pstate_limits_lock);
+ 
+-	return ret ?: count;
++	return ret ? ret : count;
+ }
+ 
+ static ssize_t show_energy_performance_preference(
+@@ -1340,13 +1341,10 @@ EXPORT_SYMBOL_GPL(amd_pstate_update_status);
+ static ssize_t status_show(struct device *dev,
+ 			   struct device_attribute *attr, char *buf)
+ {
+-	ssize_t ret;
+ 
+-	mutex_lock(&amd_pstate_driver_lock);
+-	ret = amd_pstate_show_status(buf);
+-	mutex_unlock(&amd_pstate_driver_lock);
++	guard(mutex)(&amd_pstate_driver_lock);
+ 
+-	return ret;
++	return amd_pstate_show_status(buf);
+ }
+ 
+ static ssize_t status_store(struct device *a, struct device_attribute *b,
+@@ -1355,9 +1353,8 @@ static ssize_t status_store(struct device *a, struct device_attribute *b,
+ 	char *p = memchr(buf, '\n', count);
+ 	int ret;
+ 
+-	mutex_lock(&amd_pstate_driver_lock);
++	guard(mutex)(&amd_pstate_driver_lock);
+ 	ret = amd_pstate_update_status(buf, p ? p - buf : count);
+-	mutex_unlock(&amd_pstate_driver_lock);
+ 
+ 	return ret < 0 ? ret : count;
+ }
+@@ -1605,25 +1602,17 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
+ 
+ static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
+ {
+-	struct cppc_perf_ctrls perf_ctrls;
+-	u64 value, max_perf;
++	u64 max_perf;
+ 	int ret;
+ 
+ 	ret = amd_pstate_cppc_enable(true);
+ 	if (ret)
+ 		pr_err("failed to enable amd pstate during resume, return %d\n", ret);
+ 
+-	value = READ_ONCE(cpudata->cppc_req_cached);
+ 	max_perf = READ_ONCE(cpudata->highest_perf);
+ 
+-	if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
+-		wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+-	} else {
+-		perf_ctrls.max_perf = max_perf;
+-		cppc_set_perf(cpudata->cpu, &perf_ctrls);
+-		perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached);
+-		cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
+-	}
++	amd_pstate_update_perf(cpudata, 0, 0, max_perf, false);
++	amd_pstate_set_epp(cpudata, cpudata->epp_cached);
+ }
+ 
+ static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
+@@ -1632,56 +1621,26 @@ static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
+ 
+ 	pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
+ 
+-	if (cppc_state == AMD_PSTATE_ACTIVE) {
+-		amd_pstate_epp_reenable(cpudata);
+-		cpudata->suspended = false;
+-	}
++	amd_pstate_epp_reenable(cpudata);
++	cpudata->suspended = false;
+ 
+ 	return 0;
+ }
+ 
+-static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
+-{
+-	struct amd_cpudata *cpudata = policy->driver_data;
+-	struct cppc_perf_ctrls perf_ctrls;
+-	int min_perf;
+-	u64 value;
+-
+-	min_perf = READ_ONCE(cpudata->lowest_perf);
+-	value = READ_ONCE(cpudata->cppc_req_cached);
+-
+-	mutex_lock(&amd_pstate_limits_lock);
+-	if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
+-		cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
+-
+-		/* Set max perf same as min perf */
+-		value &= ~AMD_CPPC_MAX_PERF(~0L);
+-		value |= AMD_CPPC_MAX_PERF(min_perf);
+-		value &= ~AMD_CPPC_MIN_PERF(~0L);
+-		value |= AMD_CPPC_MIN_PERF(min_perf);
+-		wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+-	} else {
+-		perf_ctrls.desired_perf = 0;
+-		perf_ctrls.min_perf = min_perf;
+-		perf_ctrls.max_perf = min_perf;
+-		cppc_set_perf(cpudata->cpu, &perf_ctrls);
+-		perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE);
+-		cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
+-	}
+-	mutex_unlock(&amd_pstate_limits_lock);
+-}
+-
+ static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
+ {
+ 	struct amd_cpudata *cpudata = policy->driver_data;
+-
+-	pr_debug("AMD CPU Core %d going offline\n", cpudata->cpu);
++	int min_perf;
+ 
+ 	if (cpudata->suspended)
+ 		return 0;
+ 
+-	if (cppc_state == AMD_PSTATE_ACTIVE)
+-		amd_pstate_epp_offline(policy);
++	min_perf = READ_ONCE(cpudata->lowest_perf);
++
++	guard(mutex)(&amd_pstate_limits_lock);
++
++	amd_pstate_update_perf(cpudata, min_perf, 0, min_perf, false);
++	amd_pstate_set_epp(cpudata, AMD_CPPC_EPP_BALANCE_POWERSAVE);
+ 
+ 	return 0;
+ }
+@@ -1711,13 +1670,11 @@ static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
+ 	struct amd_cpudata *cpudata = policy->driver_data;
+ 
+ 	if (cpudata->suspended) {
+-		mutex_lock(&amd_pstate_limits_lock);
++		guard(mutex)(&amd_pstate_limits_lock);
+ 
+ 		/* enable amd pstate from suspend state*/
+ 		amd_pstate_epp_reenable(cpudata);
+ 
+-		mutex_unlock(&amd_pstate_limits_lock);
+-
+ 		cpudata->suspended = false;
+ 	}
+ 
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index 60c64b81d2c32c..1992d1176c7ed1 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -937,13 +937,15 @@ char * __init efi_md_typeattr_format(char *buf, size_t size,
+ 		     EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
+ 		     EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
+ 		     EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
+-		     EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
++		     EFI_MEMORY_MORE_RELIABLE | EFI_MEMORY_HOT_PLUGGABLE |
++		     EFI_MEMORY_RUNTIME))
+ 		snprintf(pos, size, "|attr=0x%016llx]",
+ 			 (unsigned long long)attr);
+ 	else
+ 		snprintf(pos, size,
+-			 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
++			 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
+ 			 attr & EFI_MEMORY_RUNTIME		? "RUN" : "",
++			 attr & EFI_MEMORY_HOT_PLUGGABLE	? "HP"  : "",
+ 			 attr & EFI_MEMORY_MORE_RELIABLE	? "MR"  : "",
+ 			 attr & EFI_MEMORY_CPU_CRYPTO   	? "CC"  : "",
+ 			 attr & EFI_MEMORY_SP			? "SP"  : "",
+diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
+index c41e7b2091cdd1..8ad3efb9b1ff16 100644
+--- a/drivers/firmware/efi/libstub/randomalloc.c
++++ b/drivers/firmware/efi/libstub/randomalloc.c
+@@ -25,6 +25,9 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
+ 	if (md->type != EFI_CONVENTIONAL_MEMORY)
+ 		return 0;
+ 
++	if (md->attribute & EFI_MEMORY_HOT_PLUGGABLE)
++		return 0;
++
+ 	if (efi_soft_reserve_enabled() &&
+ 	    (md->attribute & EFI_MEMORY_SP))
+ 		return 0;
+diff --git a/drivers/firmware/efi/libstub/relocate.c b/drivers/firmware/efi/libstub/relocate.c
+index d694bcfa1074e9..bf676dd127a143 100644
+--- a/drivers/firmware/efi/libstub/relocate.c
++++ b/drivers/firmware/efi/libstub/relocate.c
+@@ -53,6 +53,9 @@ efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
+ 		if (desc->type != EFI_CONVENTIONAL_MEMORY)
+ 			continue;
+ 
++		if (desc->attribute & EFI_MEMORY_HOT_PLUGGABLE)
++			continue;
++
+ 		if (efi_soft_reserve_enabled() &&
+ 		    (desc->attribute & EFI_MEMORY_SP))
+ 			continue;
+diff --git a/drivers/firmware/qcom/qcom_scm-smc.c b/drivers/firmware/qcom/qcom_scm-smc.c
+index 2b4c2826f57251..3f10b23ec941b5 100644
+--- a/drivers/firmware/qcom/qcom_scm-smc.c
++++ b/drivers/firmware/qcom/qcom_scm-smc.c
+@@ -173,6 +173,9 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
+ 		smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i];
+ 
+ 	if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) {
++		if (!mempool)
++			return -EINVAL;
++
+ 		args_virt = qcom_tzmem_alloc(mempool,
+ 					     SCM_SMC_N_EXT_ARGS * sizeof(u64),
+ 					     flag);
+diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
+index 5321ef98f4427d..64908f1a5e7f9b 100644
+--- a/drivers/gpio/gpio-bcm-kona.c
++++ b/drivers/gpio/gpio-bcm-kona.c
+@@ -69,6 +69,22 @@ struct bcm_kona_gpio {
+ struct bcm_kona_gpio_bank {
+ 	int id;
+ 	int irq;
++	/*
++	 * Used to keep track of lock/unlock operations for each GPIO in the
++	 * bank.
++	 *
++	 * All GPIOs are locked by default (see bcm_kona_gpio_reset), and the
++	 * unlock count for all GPIOs is 0 by default. Each unlock increments
++	 * the counter, and each lock decrements the counter.
++	 *
++	 * The lock function only locks the GPIO once its unlock counter is
++	 * down to 0. This is necessary because the GPIO is unlocked in two
++	 * places in this driver: once for requested GPIOs, and once for
++	 * requested IRQs. Since it is possible for a GPIO to be requested
++	 * as both a GPIO and an IRQ, we need to ensure that we don't lock it
++	 * too early.
++	 */
++	u8 gpio_unlock_count[GPIO_PER_BANK];
+ 	/* Used in the interrupt handler */
+ 	struct bcm_kona_gpio *kona_gpio;
+ };
+@@ -86,14 +102,24 @@ static void bcm_kona_gpio_lock_gpio(struct bcm_kona_gpio *kona_gpio,
+ 	u32 val;
+ 	unsigned long flags;
+ 	int bank_id = GPIO_BANK(gpio);
++	int bit = GPIO_BIT(gpio);
++	struct bcm_kona_gpio_bank *bank = &kona_gpio->banks[bank_id];
+ 
+-	raw_spin_lock_irqsave(&kona_gpio->lock, flags);
++	if (bank->gpio_unlock_count[bit] == 0) {
++		dev_err(kona_gpio->gpio_chip.parent,
++			"Unbalanced locks for GPIO %u\n", gpio);
++		return;
++	}
+ 
+-	val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
+-	val |= BIT(gpio);
+-	bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
++	if (--bank->gpio_unlock_count[bit] == 0) {
++		raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ 
+-	raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
++		val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
++		val |= BIT(bit);
++		bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
++
++		raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
++	}
+ }
+ 
+ static void bcm_kona_gpio_unlock_gpio(struct bcm_kona_gpio *kona_gpio,
+@@ -102,14 +128,20 @@ static void bcm_kona_gpio_unlock_gpio(struct bcm_kona_gpio *kona_gpio,
+ 	u32 val;
+ 	unsigned long flags;
+ 	int bank_id = GPIO_BANK(gpio);
++	int bit = GPIO_BIT(gpio);
++	struct bcm_kona_gpio_bank *bank = &kona_gpio->banks[bank_id];
+ 
+-	raw_spin_lock_irqsave(&kona_gpio->lock, flags);
++	if (bank->gpio_unlock_count[bit] == 0) {
++		raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ 
+-	val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
+-	val &= ~BIT(gpio);
+-	bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
++		val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
++		val &= ~BIT(bit);
++		bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
+ 
+-	raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
++		raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
++	}
++
++	++bank->gpio_unlock_count[bit];
+ }
+ 
+ static int bcm_kona_gpio_get_dir(struct gpio_chip *chip, unsigned gpio)
+@@ -360,6 +392,7 @@ static void bcm_kona_gpio_irq_mask(struct irq_data *d)
+ 
+ 	kona_gpio = irq_data_get_irq_chip_data(d);
+ 	reg_base = kona_gpio->reg_base;
++
+ 	raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ 
+ 	val = readl(reg_base + GPIO_INT_MASK(bank_id));
+@@ -382,6 +415,7 @@ static void bcm_kona_gpio_irq_unmask(struct irq_data *d)
+ 
+ 	kona_gpio = irq_data_get_irq_chip_data(d);
+ 	reg_base = kona_gpio->reg_base;
++
+ 	raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ 
+ 	val = readl(reg_base + GPIO_INT_MSKCLR(bank_id));
+@@ -477,15 +511,26 @@ static void bcm_kona_gpio_irq_handler(struct irq_desc *desc)
+ static int bcm_kona_gpio_irq_reqres(struct irq_data *d)
+ {
+ 	struct bcm_kona_gpio *kona_gpio = irq_data_get_irq_chip_data(d);
++	unsigned int gpio = d->hwirq;
+ 
+-	return gpiochip_reqres_irq(&kona_gpio->gpio_chip, d->hwirq);
++	/*
++	 * We need to unlock the GPIO before any other operations are performed
++	 * on the relevant GPIO configuration registers
++	 */
++	bcm_kona_gpio_unlock_gpio(kona_gpio, gpio);
++
++	return gpiochip_reqres_irq(&kona_gpio->gpio_chip, gpio);
+ }
+ 
+ static void bcm_kona_gpio_irq_relres(struct irq_data *d)
+ {
+ 	struct bcm_kona_gpio *kona_gpio = irq_data_get_irq_chip_data(d);
++	unsigned int gpio = d->hwirq;
++
++	/* Once we no longer use it, lock the GPIO again */
++	bcm_kona_gpio_lock_gpio(kona_gpio, gpio);
+ 
+-	gpiochip_relres_irq(&kona_gpio->gpio_chip, d->hwirq);
++	gpiochip_relres_irq(&kona_gpio->gpio_chip, gpio);
+ }
+ 
+ static struct irq_chip bcm_gpio_irq_chip = {
+@@ -614,7 +659,7 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
+ 		bank->irq = platform_get_irq(pdev, i);
+ 		bank->kona_gpio = kona_gpio;
+ 		if (bank->irq < 0) {
+-			dev_err(dev, "Couldn't get IRQ for bank %d", i);
++			dev_err(dev, "Couldn't get IRQ for bank %d\n", i);
+ 			ret = -ENOENT;
+ 			goto err_irq_domain;
+ 		}
+diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
+index 75a3633ceddbb8..222279a9d82b2d 100644
+--- a/drivers/gpio/gpio-stmpe.c
++++ b/drivers/gpio/gpio-stmpe.c
+@@ -191,7 +191,7 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
+ 		[REG_IE][CSB] = STMPE_IDX_IEGPIOR_CSB,
+ 		[REG_IE][MSB] = STMPE_IDX_IEGPIOR_MSB,
+ 	};
+-	int i, j;
++	int ret, i, j;
+ 
+ 	/*
+ 	 * STMPE1600: to be able to get IRQ from pins,
+@@ -199,8 +199,16 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
+ 	 * GPSR or GPCR registers
+ 	 */
+ 	if (stmpe->partnum == STMPE1600) {
+-		stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]);
+-		stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]);
++		ret = stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]);
++		if (ret < 0) {
++			dev_err(stmpe->dev, "Failed to read GPMR_LSB: %d\n", ret);
++			goto err;
++		}
++		ret = stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]);
++		if (ret < 0) {
++			dev_err(stmpe->dev, "Failed to read GPMR_CSB: %d\n", ret);
++			goto err;
++		}
+ 	}
+ 
+ 	for (i = 0; i < CACHE_NR_REGS; i++) {
+@@ -222,6 +230,7 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
+ 		}
+ 	}
+ 
++err:
+ 	mutex_unlock(&stmpe_gpio->irq_lock);
+ }
+ 
+diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
+index 1f9fe50bba0058..f7746c57ba76a7 100644
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -1689,6 +1689,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
+ 			.ignore_wake = "PNP0C50:00@8",
+ 		},
+ 	},
++	{
++		/*
++		 * Spurious wakeups from GPIO 11
++		 * Found in BIOS 1.04
++		 * https://gitlab.freedesktop.org/drm/amd/-/issues/3954
++		 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_FAMILY, "Acer Nitro V 14"),
++		},
++		.driver_data = &(struct acpi_gpiolib_dmi_quirk) {
++			.ignore_interrupt = "AMDI0030:00@11",
++		},
++	},
+ 	{} /* Terminating entry */
+ };
+ 
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 679ed764cb143c..ca2f58a2cd45e7 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -904,13 +904,13 @@ int gpiochip_get_ngpios(struct gpio_chip *gc, struct device *dev)
+ 	}
+ 
+ 	if (gc->ngpio == 0) {
+-		chip_err(gc, "tried to insert a GPIO chip with zero lines\n");
++		dev_err(dev, "tried to insert a GPIO chip with zero lines\n");
+ 		return -EINVAL;
+ 	}
+ 
+ 	if (gc->ngpio > FASTPATH_NGPIO)
+-		chip_warn(gc, "line cnt %u is greater than fast path cnt %u\n",
+-			gc->ngpio, FASTPATH_NGPIO);
++		dev_warn(dev, "line cnt %u is greater than fast path cnt %u\n",
++			 gc->ngpio, FASTPATH_NGPIO);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index 448f9e742983f3..75c0f64602ed94 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -3790,9 +3790,10 @@ int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
+ 		if (err == -ENODEV) {
+ 			dev_warn(adev->dev, "cap microcode does not exist, skip\n");
+ 			err = 0;
+-			goto out;
++		} else {
++			dev_err(adev->dev, "fail to initialize cap microcode\n");
+ 		}
+-		dev_err(adev->dev, "fail to initialize cap microcode\n");
++		goto out;
+ 	}
+ 
+ 	info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+index bd595b1db15f27..1d538e874140c6 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+@@ -298,7 +298,7 @@ static int init_user_queue(struct process_queue_manager *pqm,
+ 	return 0;
+ 
+ free_gang_ctx_bo:
+-	amdgpu_amdkfd_free_gtt_mem(dev->adev, (*q)->gang_ctx_bo);
++	amdgpu_amdkfd_free_gtt_mem(dev->adev, &(*q)->gang_ctx_bo);
+ cleanup:
+ 	uninit_queue(*q);
+ 	*q = NULL;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index 21bd635bcdfc15..c0b98749dde707 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -609,7 +609,8 @@ static int smu_sys_set_pp_table(void *handle,
+ 		return -EIO;
+ 	}
+ 
+-	if (!smu_table->hardcode_pptable) {
++	if (!smu_table->hardcode_pptable || smu_table->power_play_table_size < size) {
++		kfree(smu_table->hardcode_pptable);
+ 		smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
+ 		if (!smu_table->hardcode_pptable)
+ 			return -ENOMEM;
+diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
+index 6ee51003de3ce6..9fa13da513d24e 100644
+--- a/drivers/gpu/drm/display/drm_dp_helper.c
++++ b/drivers/gpu/drm/display/drm_dp_helper.c
+@@ -2421,7 +2421,7 @@ u8 drm_dp_dsc_sink_bpp_incr(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+ {
+ 	u8 bpp_increment_dpcd = dsc_dpcd[DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT];
+ 
+-	switch (bpp_increment_dpcd) {
++	switch (bpp_increment_dpcd & DP_DSC_BITS_PER_PIXEL_MASK) {
+ 	case DP_DSC_BITS_PER_PIXEL_1_16:
+ 		return 16;
+ 	case DP_DSC_BITS_PER_PIXEL_1_8:
+diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+index 5c397a2df70e28..5d27e1c733c527 100644
+--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
++++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+@@ -168,7 +168,7 @@ static int igt_ppgtt_alloc(void *arg)
+ 		return PTR_ERR(ppgtt);
+ 
+ 	if (!ppgtt->vm.allocate_va_range)
+-		goto err_ppgtt_cleanup;
++		goto ppgtt_vm_put;
+ 
+ 	/*
+ 	 * While we only allocate the page tables here and so we could
+@@ -236,7 +236,7 @@ static int igt_ppgtt_alloc(void *arg)
+ 			goto retry;
+ 	}
+ 	i915_gem_ww_ctx_fini(&ww);
+-
++ppgtt_vm_put:
+ 	i915_vm_put(&ppgtt->vm);
+ 	return err;
+ }
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
+index e084406ebb0711..4f110be6b750d3 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
+@@ -391,8 +391,8 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
+ 		.type = INTF_DP,
+ 		.controller_id = MSM_DP_CONTROLLER_2,
+ 		.prog_fetch_lines_worst_case = 24,
+-		.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17),
+-		.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16),
++		.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16),
++		.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17),
+ 	}, {
+ 		.name = "intf_7", .id = INTF_7,
+ 		.base = 0x3b000, .len = 0x280,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
+index 16f144cbc0c986..8ff496082902b1 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
+@@ -42,9 +42,6 @@ static int dpu_wb_conn_atomic_check(struct drm_connector *connector,
+ 	if (!conn_state || !conn_state->connector) {
+ 		DPU_ERROR("invalid connector state\n");
+ 		return -EINVAL;
+-	} else if (conn_state->connector->status != connector_status_connected) {
+-		DPU_ERROR("connector not connected %d\n", conn_state->connector->status);
+-		return -EINVAL;
+ 	}
+ 
+ 	crtc = conn_state->crtc;
+diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
+index fba78193127dee..f775638d239a5c 100644
+--- a/drivers/gpu/drm/msm/msm_gem_submit.c
++++ b/drivers/gpu/drm/msm/msm_gem_submit.c
+@@ -787,8 +787,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
+ 			goto out;
+ 
+ 		if (!submit->cmd[i].size ||
+-			((submit->cmd[i].size + submit->cmd[i].offset) >
+-				obj->size / 4)) {
++		    (size_add(submit->cmd[i].size, submit->cmd[i].offset) > obj->size / 4)) {
+ 			SUBMIT_ERROR(submit, "invalid cmdstream size: %u\n", submit->cmd[i].size * 4);
+ 			ret = -EINVAL;
+ 			goto out;
+diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
+index 0b3fbee3d37a8a..44f5c72d46c3f9 100644
+--- a/drivers/gpu/drm/panthor/panthor_drv.c
++++ b/drivers/gpu/drm/panthor/panthor_drv.c
+@@ -802,6 +802,7 @@ static void panthor_query_group_priorities_info(struct drm_file *file,
+ {
+ 	int prio;
+ 
++	memset(arg, 0, sizeof(*arg));
+ 	for (prio = PANTHOR_GROUP_PRIORITY_REALTIME; prio >= 0; prio--) {
+ 		if (!group_priority_permit(file, prio))
+ 			arg->allowed_mask |= BIT(prio);
+diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
+index 8180625d5866d1..be4ffc0ab14fee 100644
+--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
++++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
+@@ -587,7 +587,7 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
+ 	for (timeout = 10; timeout > 0; --timeout) {
+ 		if ((rcar_mipi_dsi_read(dsi, PPICLSR) & PPICLSR_STPST) &&
+ 		    (rcar_mipi_dsi_read(dsi, PPIDLSR) & PPIDLSR_STPST) &&
+-		    (rcar_mipi_dsi_read(dsi, CLOCKSET1) & CLOCKSET1_LOCK))
++		    (rcar_mipi_dsi_read(dsi, CLOCKSET1) & CLOCKSET1_LOCK_PHY))
+ 			break;
+ 
+ 		usleep_range(1000, 2000);
+diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
+index f8114d11f2d158..a6b276f1d6ee15 100644
+--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
++++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
+@@ -142,7 +142,6 @@
+ 
+ #define CLOCKSET1			0x101c
+ #define CLOCKSET1_LOCK_PHY		(1 << 17)
+-#define CLOCKSET1_LOCK			(1 << 16)
+ #define CLOCKSET1_CLKSEL		(1 << 8)
+ #define CLOCKSET1_CLKINSEL_EXTAL	(0 << 2)
+ #define CLOCKSET1_CLKINSEL_DIG		(1 << 2)
+diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
+index b99217b4e05d7d..90c6269ccd2920 100644
+--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
++++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
+@@ -311,11 +311,11 @@ int rzg2l_du_modeset_init(struct rzg2l_du_device *rcdu)
+ 	dev->mode_config.helper_private = &rzg2l_du_mode_config_helper;
+ 
+ 	/*
+-	 * The RZ DU uses the VSP1 for memory access, and is limited
+-	 * to frame sizes of 1920x1080.
++	 * The RZ DU was designed to support a frame size of 1920x1200 (landscape)
++	 * or 1200x1920 (portrait).
+ 	 */
+ 	dev->mode_config.max_width = 1920;
+-	dev->mode_config.max_height = 1080;
++	dev->mode_config.max_height = 1920;
+ 
+ 	rcdu->num_crtcs = hweight8(rcdu->info->channels_mask);
+ 
+diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+index 4ba869e0e794c7..cbd9584af32995 100644
+--- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
++++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+@@ -70,10 +70,17 @@ static int light_up_connector(struct kunit *test,
+ 	state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+ 
++retry:
+ 	conn_state = drm_atomic_get_connector_state(state, connector);
+ 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+ 
+ 	ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
++	if (ret == -EDEADLK) {
++		drm_atomic_state_clear(state);
++		ret = drm_modeset_backoff(ctx);
++		if (!ret)
++			goto retry;
++	}
+ 	KUNIT_EXPECT_EQ(test, ret, 0);
+ 
+ 	crtc_state = drm_atomic_get_crtc_state(state, crtc);
+diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
+index 1ad711f8d2a8bf..45f22ead3e61d3 100644
+--- a/drivers/gpu/drm/tidss/tidss_dispc.c
++++ b/drivers/gpu/drm/tidss/tidss_dispc.c
+@@ -700,7 +700,7 @@ void dispc_k2g_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
+ {
+ 	dispc_irq_t old_mask = dispc_k2g_read_irqenable(dispc);
+ 
+-	/* clear the irqstatus for newly enabled irqs */
++	/* clear the irqstatus for irqs that will be enabled */
+ 	dispc_k2g_clear_irqstatus(dispc, (mask ^ old_mask) & mask);
+ 
+ 	dispc_k2g_vp_set_irqenable(dispc, 0, mask);
+@@ -708,6 +708,9 @@ void dispc_k2g_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
+ 
+ 	dispc_write(dispc, DISPC_IRQENABLE_SET, (1 << 0) | (1 << 7));
+ 
++	/* clear the irqstatus for irqs that were disabled */
++	dispc_k2g_clear_irqstatus(dispc, (mask ^ old_mask) & old_mask);
++
+ 	/* flush posted write */
+ 	dispc_k2g_read_irqenable(dispc);
+ }
+@@ -780,24 +783,20 @@ static
+ void dispc_k3_clear_irqstatus(struct dispc_device *dispc, dispc_irq_t clearmask)
+ {
+ 	unsigned int i;
+-	u32 top_clear = 0;
+ 
+ 	for (i = 0; i < dispc->feat->num_vps; ++i) {
+-		if (clearmask & DSS_IRQ_VP_MASK(i)) {
++		if (clearmask & DSS_IRQ_VP_MASK(i))
+ 			dispc_k3_vp_write_irqstatus(dispc, i, clearmask);
+-			top_clear |= BIT(i);
+-		}
+ 	}
+ 	for (i = 0; i < dispc->feat->num_planes; ++i) {
+-		if (clearmask & DSS_IRQ_PLANE_MASK(i)) {
++		if (clearmask & DSS_IRQ_PLANE_MASK(i))
+ 			dispc_k3_vid_write_irqstatus(dispc, i, clearmask);
+-			top_clear |= BIT(4 + i);
+-		}
+ 	}
+ 	if (dispc->feat->subrev == DISPC_K2G)
+ 		return;
+ 
+-	dispc_write(dispc, DISPC_IRQSTATUS, top_clear);
++	/* always clear the top level irqstatus */
++	dispc_write(dispc, DISPC_IRQSTATUS, dispc_read(dispc, DISPC_IRQSTATUS));
+ 
+ 	/* Flush posted writes */
+ 	dispc_read(dispc, DISPC_IRQSTATUS);
+@@ -843,7 +842,7 @@ static void dispc_k3_set_irqenable(struct dispc_device *dispc,
+ 
+ 	old_mask = dispc_k3_read_irqenable(dispc);
+ 
+-	/* clear the irqstatus for newly enabled irqs */
++	/* clear the irqstatus for irqs that will be enabled */
+ 	dispc_k3_clear_irqstatus(dispc, (old_mask ^ mask) & mask);
+ 
+ 	for (i = 0; i < dispc->feat->num_vps; ++i) {
+@@ -868,6 +867,9 @@ static void dispc_k3_set_irqenable(struct dispc_device *dispc,
+ 	if (main_disable)
+ 		dispc_write(dispc, DISPC_IRQENABLE_CLR, main_disable);
+ 
++	/* clear the irqstatus for irqs that were disabled */
++	dispc_k3_clear_irqstatus(dispc, (old_mask ^ mask) & old_mask);
++
+ 	/* Flush posted writes */
+ 	dispc_read(dispc, DISPC_IRQENABLE_SET);
+ }
+@@ -2767,8 +2769,12 @@ static void dispc_init_errata(struct dispc_device *dispc)
+  */
+ static void dispc_softreset_k2g(struct dispc_device *dispc)
+ {
++	unsigned long flags;
++
++	spin_lock_irqsave(&dispc->tidss->wait_lock, flags);
+ 	dispc_set_irqenable(dispc, 0);
+ 	dispc_read_and_clear_irqstatus(dispc);
++	spin_unlock_irqrestore(&dispc->tidss->wait_lock, flags);
+ 
+ 	for (unsigned int vp_idx = 0; vp_idx < dispc->feat->num_vps; ++vp_idx)
+ 		VP_REG_FLD_MOD(dispc, vp_idx, DISPC_VP_CONTROL, 0, 0, 0);
+diff --git a/drivers/gpu/drm/tidss/tidss_irq.c b/drivers/gpu/drm/tidss/tidss_irq.c
+index 604334ef526a04..d053dbb9d28c5d 100644
+--- a/drivers/gpu/drm/tidss/tidss_irq.c
++++ b/drivers/gpu/drm/tidss/tidss_irq.c
+@@ -60,7 +60,9 @@ static irqreturn_t tidss_irq_handler(int irq, void *arg)
+ 	unsigned int id;
+ 	dispc_irq_t irqstatus;
+ 
++	spin_lock(&tidss->wait_lock);
+ 	irqstatus = dispc_read_and_clear_irqstatus(tidss->dispc);
++	spin_unlock(&tidss->wait_lock);
+ 
+ 	for (id = 0; id < tidss->num_crtcs; id++) {
+ 		struct drm_crtc *crtc = tidss->crtcs[id];
+diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c
+index ecf06e8e9fbccb..c49abb90954d49 100644
+--- a/drivers/gpu/drm/v3d/v3d_perfmon.c
++++ b/drivers/gpu/drm/v3d/v3d_perfmon.c
+@@ -384,6 +384,7 @@ int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
+ {
+ 	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+ 	struct drm_v3d_perfmon_destroy *req = data;
++	struct v3d_dev *v3d = v3d_priv->v3d;
+ 	struct v3d_perfmon *perfmon;
+ 
+ 	mutex_lock(&v3d_priv->perfmon.lock);
+@@ -393,6 +394,10 @@ int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
+ 	if (!perfmon)
+ 		return -EINVAL;
+ 
++	/* If the active perfmon is being destroyed, stop it first */
++	if (perfmon == v3d->active_perfmon)
++		v3d_perfmon_stop(v3d, perfmon, false);
++
+ 	v3d_perfmon_put(perfmon);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/xe/regs/xe_oa_regs.h b/drivers/gpu/drm/xe/regs/xe_oa_regs.h
+index 6d31573ed1765f..a79ad2da070c21 100644
+--- a/drivers/gpu/drm/xe/regs/xe_oa_regs.h
++++ b/drivers/gpu/drm/xe/regs/xe_oa_regs.h
+@@ -41,14 +41,6 @@
+ 
+ #define OAG_OABUFFER		XE_REG(0xdb08)
+ #define  OABUFFER_SIZE_MASK	REG_GENMASK(5, 3)
+-#define  OABUFFER_SIZE_128K	REG_FIELD_PREP(OABUFFER_SIZE_MASK, 0)
+-#define  OABUFFER_SIZE_256K	REG_FIELD_PREP(OABUFFER_SIZE_MASK, 1)
+-#define  OABUFFER_SIZE_512K	REG_FIELD_PREP(OABUFFER_SIZE_MASK, 2)
+-#define  OABUFFER_SIZE_1M	REG_FIELD_PREP(OABUFFER_SIZE_MASK, 3)
+-#define  OABUFFER_SIZE_2M	REG_FIELD_PREP(OABUFFER_SIZE_MASK, 4)
+-#define  OABUFFER_SIZE_4M	REG_FIELD_PREP(OABUFFER_SIZE_MASK, 5)
+-#define  OABUFFER_SIZE_8M	REG_FIELD_PREP(OABUFFER_SIZE_MASK, 6)
+-#define  OABUFFER_SIZE_16M	REG_FIELD_PREP(OABUFFER_SIZE_MASK, 7)
+ #define  OAG_OABUFFER_MEMORY_SELECT		REG_BIT(0) /* 0: PPGTT, 1: GGTT */
+ 
+ #define OAG_OACONTROL				XE_REG(0xdaf4)
+@@ -67,6 +59,7 @@
+ #define OAG_OA_DEBUG XE_REG(0xdaf8, XE_REG_OPTION_MASKED)
+ #define  OAG_OA_DEBUG_DISABLE_MMIO_TRG			REG_BIT(14)
+ #define  OAG_OA_DEBUG_START_TRIGGER_SCOPE_CONTROL	REG_BIT(13)
++#define  OAG_OA_DEBUG_BUF_SIZE_SELECT			REG_BIT(12)
+ #define  OAG_OA_DEBUG_DISABLE_START_TRG_2_COUNT_QUAL	REG_BIT(8)
+ #define  OAG_OA_DEBUG_DISABLE_START_TRG_1_COUNT_QUAL	REG_BIT(7)
+ #define  OAG_OA_DEBUG_INCLUDE_CLK_RATIO			REG_BIT(6)
+diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
+index 22f0f1a6dfd55d..e8eaeb46460612 100644
+--- a/drivers/gpu/drm/xe/xe_drm_client.c
++++ b/drivers/gpu/drm/xe/xe_drm_client.c
+@@ -135,8 +135,8 @@ void xe_drm_client_add_bo(struct xe_drm_client *client,
+ 	XE_WARN_ON(bo->client);
+ 	XE_WARN_ON(!list_empty(&bo->client_link));
+ 
+-	spin_lock(&client->bos_lock);
+ 	bo->client = xe_drm_client_get(client);
++	spin_lock(&client->bos_lock);
+ 	list_add_tail(&bo->client_link, &client->bos_list);
+ 	spin_unlock(&client->bos_lock);
+ }
+diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
+index d8af82dcdce4b7..913f6ba606370b 100644
+--- a/drivers/gpu/drm/xe/xe_oa.c
++++ b/drivers/gpu/drm/xe/xe_oa.c
+@@ -90,6 +90,8 @@ struct xe_oa_open_param {
+ 	struct drm_xe_sync __user *syncs_user;
+ 	int num_syncs;
+ 	struct xe_sync_entry *syncs;
++	size_t oa_buffer_size;
++	int wait_num_reports;
+ };
+ 
+ struct xe_oa_config_bo {
+@@ -234,11 +236,9 @@ static void oa_timestamp_clear(struct xe_oa_stream *stream, u32 *report)
+ static bool xe_oa_buffer_check_unlocked(struct xe_oa_stream *stream)
+ {
+ 	u32 gtt_offset = xe_bo_ggtt_addr(stream->oa_buffer.bo);
++	u32 tail, hw_tail, partial_report_size, available;
+ 	int report_size = stream->oa_buffer.format->size;
+-	u32 tail, hw_tail;
+ 	unsigned long flags;
+-	bool pollin;
+-	u32 partial_report_size;
+ 
+ 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
+ 
+@@ -282,12 +282,12 @@ static bool xe_oa_buffer_check_unlocked(struct xe_oa_stream *stream)
+ 
+ 	stream->oa_buffer.tail = tail;
+ 
+-	pollin = xe_oa_circ_diff(stream, stream->oa_buffer.tail,
+-				 stream->oa_buffer.head) >= report_size;
++	available = xe_oa_circ_diff(stream, stream->oa_buffer.tail, stream->oa_buffer.head);
++	stream->pollin = available >= stream->wait_num_reports * report_size;
+ 
+ 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
+ 
+-	return pollin;
++	return stream->pollin;
+ }
+ 
+ static enum hrtimer_restart xe_oa_poll_check_timer_cb(struct hrtimer *hrtimer)
+@@ -295,10 +295,8 @@ static enum hrtimer_restart xe_oa_poll_check_timer_cb(struct hrtimer *hrtimer)
+ 	struct xe_oa_stream *stream =
+ 		container_of(hrtimer, typeof(*stream), poll_check_timer);
+ 
+-	if (xe_oa_buffer_check_unlocked(stream)) {
+-		stream->pollin = true;
++	if (xe_oa_buffer_check_unlocked(stream))
+ 		wake_up(&stream->poll_wq);
+-	}
+ 
+ 	hrtimer_forward_now(hrtimer, ns_to_ktime(stream->poll_period_ns));
+ 
+@@ -397,11 +395,19 @@ static int xe_oa_append_reports(struct xe_oa_stream *stream, char __user *buf,
+ 
+ static void xe_oa_init_oa_buffer(struct xe_oa_stream *stream)
+ {
+-	struct xe_mmio *mmio = &stream->gt->mmio;
+ 	u32 gtt_offset = xe_bo_ggtt_addr(stream->oa_buffer.bo);
+-	u32 oa_buf = gtt_offset | OABUFFER_SIZE_16M | OAG_OABUFFER_MEMORY_SELECT;
++	int size_exponent = __ffs(stream->oa_buffer.bo->size);
++	u32 oa_buf = gtt_offset | OAG_OABUFFER_MEMORY_SELECT;
++	struct xe_mmio *mmio = &stream->gt->mmio;
+ 	unsigned long flags;
+ 
++	/*
++	 * If oa buffer size is more than 16MB (exponent greater than 24), the
++	 * oa buffer size field is multiplied by 8 in xe_oa_enable_metric_set.
++	 */
++	oa_buf |= REG_FIELD_PREP(OABUFFER_SIZE_MASK,
++		size_exponent > 24 ? size_exponent - 20 : size_exponent - 17);
++
+ 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
+ 
+ 	xe_mmio_write32(mmio, __oa_regs(stream)->oa_status, 0);
+@@ -863,15 +869,12 @@ static void xe_oa_stream_destroy(struct xe_oa_stream *stream)
+ 	xe_file_put(stream->xef);
+ }
+ 
+-static int xe_oa_alloc_oa_buffer(struct xe_oa_stream *stream)
++static int xe_oa_alloc_oa_buffer(struct xe_oa_stream *stream, size_t size)
+ {
+ 	struct xe_bo *bo;
+ 
+-	BUILD_BUG_ON_NOT_POWER_OF_2(XE_OA_BUFFER_SIZE);
+-	BUILD_BUG_ON(XE_OA_BUFFER_SIZE < SZ_128K || XE_OA_BUFFER_SIZE > SZ_16M);
+-
+ 	bo = xe_bo_create_pin_map(stream->oa->xe, stream->gt->tile, NULL,
+-				  XE_OA_BUFFER_SIZE, ttm_bo_type_kernel,
++				  size, ttm_bo_type_kernel,
+ 				  XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT);
+ 	if (IS_ERR(bo))
+ 		return PTR_ERR(bo);
+@@ -1049,6 +1052,13 @@ static u32 oag_report_ctx_switches(const struct xe_oa_stream *stream)
+ 			     0 : OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
+ }
+ 
++static u32 oag_buf_size_select(const struct xe_oa_stream *stream)
++{
++	return _MASKED_FIELD(OAG_OA_DEBUG_BUF_SIZE_SELECT,
++			     stream->oa_buffer.bo->size > SZ_16M ?
++			     OAG_OA_DEBUG_BUF_SIZE_SELECT : 0);
++}
++
+ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
+ {
+ 	struct xe_mmio *mmio = &stream->gt->mmio;
+@@ -1081,6 +1091,7 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
+ 	xe_mmio_write32(mmio, __oa_regs(stream)->oa_debug,
+ 			_MASKED_BIT_ENABLE(oa_debug) |
+ 			oag_report_ctx_switches(stream) |
++			oag_buf_size_select(stream) |
+ 			oag_configure_mmio_trigger(stream, true));
+ 
+ 	xe_mmio_write32(mmio, __oa_regs(stream)->oa_ctx_ctrl, stream->periodic ?
+@@ -1222,6 +1233,28 @@ static int xe_oa_set_prop_syncs_user(struct xe_oa *oa, u64 value,
+ 	return 0;
+ }
+ 
++static int xe_oa_set_prop_oa_buffer_size(struct xe_oa *oa, u64 value,
++					 struct xe_oa_open_param *param)
++{
++	if (!is_power_of_2(value) || value < SZ_128K || value > SZ_128M) {
++		drm_dbg(&oa->xe->drm, "OA buffer size invalid %llu\n", value);
++		return -EINVAL;
++	}
++	param->oa_buffer_size = value;
++	return 0;
++}
++
++static int xe_oa_set_prop_wait_num_reports(struct xe_oa *oa, u64 value,
++					   struct xe_oa_open_param *param)
++{
++	if (!value) {
++		drm_dbg(&oa->xe->drm, "wait_num_reports %llu\n", value);
++		return -EINVAL;
++	}
++	param->wait_num_reports = value;
++	return 0;
++}
++
+ static int xe_oa_set_prop_ret_inval(struct xe_oa *oa, u64 value,
+ 				    struct xe_oa_open_param *param)
+ {
+@@ -1242,6 +1275,8 @@ static const xe_oa_set_property_fn xe_oa_set_property_funcs_open[] = {
+ 	[DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_no_preempt,
+ 	[DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs,
+ 	[DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user,
++	[DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE] = xe_oa_set_prop_oa_buffer_size,
++	[DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS] = xe_oa_set_prop_wait_num_reports,
+ };
+ 
+ static const xe_oa_set_property_fn xe_oa_set_property_funcs_config[] = {
+@@ -1256,6 +1291,8 @@ static const xe_oa_set_property_fn xe_oa_set_property_funcs_config[] = {
+ 	[DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_prop_ret_inval,
+ 	[DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs,
+ 	[DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user,
++	[DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE] = xe_oa_set_prop_ret_inval,
++	[DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS] = xe_oa_set_prop_ret_inval,
+ };
+ 
+ static int xe_oa_user_ext_set_property(struct xe_oa *oa, enum xe_oa_user_extn_from from,
+@@ -1515,7 +1552,7 @@ static long xe_oa_status_locked(struct xe_oa_stream *stream, unsigned long arg)
+ 
+ static long xe_oa_info_locked(struct xe_oa_stream *stream, unsigned long arg)
+ {
+-	struct drm_xe_oa_stream_info info = { .oa_buf_size = XE_OA_BUFFER_SIZE, };
++	struct drm_xe_oa_stream_info info = { .oa_buf_size = stream->oa_buffer.bo->size, };
+ 	void __user *uaddr = (void __user *)arg;
+ 
+ 	if (copy_to_user(uaddr, &info, sizeof(info)))
+@@ -1601,7 +1638,7 @@ static int xe_oa_mmap(struct file *file, struct vm_area_struct *vma)
+ 	}
+ 
+ 	/* Can mmap the entire OA buffer or nothing (no partial OA buffer mmaps) */
+-	if (vma->vm_end - vma->vm_start != XE_OA_BUFFER_SIZE) {
++	if (vma->vm_end - vma->vm_start != stream->oa_buffer.bo->size) {
+ 		drm_dbg(&stream->oa->xe->drm, "Wrong mmap size, must be OA buffer size\n");
+ 		return -EINVAL;
+ 	}
+@@ -1732,6 +1769,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
+ 	stream->periodic = param->period_exponent > 0;
+ 	stream->period_exponent = param->period_exponent;
+ 	stream->no_preempt = param->no_preempt;
++	stream->wait_num_reports = param->wait_num_reports;
+ 
+ 	stream->xef = xe_file_get(param->xef);
+ 	stream->num_syncs = param->num_syncs;
+@@ -1745,9 +1783,10 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
+ 	if (GRAPHICS_VER(stream->oa->xe) >= 20 &&
+ 	    stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG && stream->sample)
+ 		stream->oa_buffer.circ_size =
+-			XE_OA_BUFFER_SIZE - XE_OA_BUFFER_SIZE % stream->oa_buffer.format->size;
++			param->oa_buffer_size -
++			param->oa_buffer_size % stream->oa_buffer.format->size;
+ 	else
+-		stream->oa_buffer.circ_size = XE_OA_BUFFER_SIZE;
++		stream->oa_buffer.circ_size = param->oa_buffer_size;
+ 
+ 	if (stream->exec_q && engine_supports_mi_query(stream->hwe)) {
+ 		/* If we don't find the context offset, just return error */
+@@ -1790,7 +1829,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
+ 		goto err_fw_put;
+ 	}
+ 
+-	ret = xe_oa_alloc_oa_buffer(stream);
++	ret = xe_oa_alloc_oa_buffer(stream, param->oa_buffer_size);
+ 	if (ret)
+ 		goto err_fw_put;
+ 
+@@ -2087,6 +2126,17 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
+ 		drm_dbg(&oa->xe->drm, "Using periodic sampling freq %lld Hz\n", oa_freq_hz);
+ 	}
+ 
++	if (!param.oa_buffer_size)
++		param.oa_buffer_size = DEFAULT_XE_OA_BUFFER_SIZE;
++
++	if (!param.wait_num_reports)
++		param.wait_num_reports = 1;
++	if (param.wait_num_reports > param.oa_buffer_size / f->size) {
++		drm_dbg(&oa->xe->drm, "wait_num_reports %d\n", param.wait_num_reports);
++		ret = -EINVAL;
++		goto err_exec_q;
++	}
++
+ 	ret = xe_oa_parse_syncs(oa, &param);
+ 	if (ret)
+ 		goto err_exec_q;
+diff --git a/drivers/gpu/drm/xe/xe_oa_types.h b/drivers/gpu/drm/xe/xe_oa_types.h
+index fea9d981e414fa..2dcd3b9562e977 100644
+--- a/drivers/gpu/drm/xe/xe_oa_types.h
++++ b/drivers/gpu/drm/xe/xe_oa_types.h
+@@ -15,7 +15,7 @@
+ #include "regs/xe_reg_defs.h"
+ #include "xe_hw_engine_types.h"
+ 
+-#define XE_OA_BUFFER_SIZE SZ_16M
++#define DEFAULT_XE_OA_BUFFER_SIZE SZ_16M
+ 
+ enum xe_oa_report_header {
+ 	HDR_32_BIT = 0,
+@@ -218,6 +218,9 @@ struct xe_oa_stream {
+ 	/** @pollin: Whether there is data available to read */
+ 	bool pollin;
+ 
++	/** @wait_num_reports: Number of reports to wait for before signalling pollin */
++	int wait_num_reports;
++
+ 	/** @periodic: Whether periodic sampling is currently enabled */
+ 	bool periodic;
+ 
+diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
+index 170ae72d1a7bb7..1bdffe6315d547 100644
+--- a/drivers/gpu/drm/xe/xe_query.c
++++ b/drivers/gpu/drm/xe/xe_query.c
+@@ -670,7 +670,9 @@ static int query_oa_units(struct xe_device *xe,
+ 			du->oa_unit_id = u->oa_unit_id;
+ 			du->oa_unit_type = u->type;
+ 			du->oa_timestamp_freq = xe_oa_timestamp_frequency(gt);
+-			du->capabilities = DRM_XE_OA_CAPS_BASE | DRM_XE_OA_CAPS_SYNCS;
++			du->capabilities = DRM_XE_OA_CAPS_BASE | DRM_XE_OA_CAPS_SYNCS |
++					   DRM_XE_OA_CAPS_OA_BUFFER_SIZE |
++					   DRM_XE_OA_CAPS_WAIT_NUM_REPORTS;
+ 
+ 			j = 0;
+ 			for_each_hw_engine(hwe, gt, hwe_id) {
+diff --git a/drivers/gpu/drm/xe/xe_trace_bo.h b/drivers/gpu/drm/xe/xe_trace_bo.h
+index 30a3cfbaaa094d..4ff023b5d040de 100644
+--- a/drivers/gpu/drm/xe/xe_trace_bo.h
++++ b/drivers/gpu/drm/xe/xe_trace_bo.h
+@@ -55,8 +55,8 @@ TRACE_EVENT(xe_bo_move,
+ 	    TP_STRUCT__entry(
+ 		     __field(struct xe_bo *, bo)
+ 		     __field(size_t, size)
+-		     __field(u32, new_placement)
+-		     __field(u32, old_placement)
++		     __string(new_placement_name, xe_mem_type_to_name[new_placement])
++		     __string(old_placement_name, xe_mem_type_to_name[old_placement])
+ 		     __string(device_id, __dev_name_bo(bo))
+ 		     __field(bool, move_lacks_source)
+ 			),
+@@ -64,15 +64,15 @@ TRACE_EVENT(xe_bo_move,
+ 	    TP_fast_assign(
+ 		   __entry->bo      = bo;
+ 		   __entry->size = bo->size;
+-		   __entry->new_placement = new_placement;
+-		   __entry->old_placement = old_placement;
++		   __assign_str(new_placement_name);
++		   __assign_str(old_placement_name);
+ 		   __assign_str(device_id);
+ 		   __entry->move_lacks_source = move_lacks_source;
+ 		   ),
+ 	    TP_printk("move_lacks_source:%s, migrate object %p [size %zu] from %s to %s device_id:%s",
+ 		      __entry->move_lacks_source ? "yes" : "no", __entry->bo, __entry->size,
+-		      xe_mem_type_to_name[__entry->old_placement],
+-		      xe_mem_type_to_name[__entry->new_placement], __get_str(device_id))
++		      __get_str(old_placement_name),
++		      __get_str(new_placement_name), __get_str(device_id))
+ );
+ 
+ DECLARE_EVENT_CLASS(xe_vma,
+diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
+index 423856cc18d400..d414421f8c131e 100644
+--- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
++++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
+@@ -57,12 +57,35 @@ bool xe_ttm_stolen_cpu_access_needs_ggtt(struct xe_device *xe)
+ 	return GRAPHICS_VERx100(xe) < 1270 && !IS_DGFX(xe);
+ }
+ 
++static u32 get_wopcm_size(struct xe_device *xe)
++{
++	u32 wopcm_size;
++	u64 val;
++
++	val = xe_mmio_read64_2x32(xe_root_tile_mmio(xe), STOLEN_RESERVED);
++	val = REG_FIELD_GET64(WOPCM_SIZE_MASK, val);
++
++	switch (val) {
++	case 0x5 ... 0x6:
++		val--;
++		fallthrough;
++	case 0x0 ... 0x3:
++		wopcm_size = (1U << val) * SZ_1M;
++		break;
++	default:
++		WARN(1, "Missing case wopcm_size=%llx\n", val);
++		wopcm_size = 0;
++	}
++
++	return wopcm_size;
++}
++
+ static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
+ {
+ 	struct xe_tile *tile = xe_device_get_root_tile(xe);
+ 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
+ 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+-	u64 stolen_size;
++	u64 stolen_size, wopcm_size;
+ 	u64 tile_offset;
+ 	u64 tile_size;
+ 
+@@ -74,7 +97,13 @@ static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
+ 	if (drm_WARN_ON(&xe->drm, tile_size < mgr->stolen_base))
+ 		return 0;
+ 
++	/* Carve out the top of DSM as it contains the reserved WOPCM region */
++	wopcm_size = get_wopcm_size(xe);
++	if (drm_WARN_ON(&xe->drm, !wopcm_size))
++		return 0;
++
+ 	stolen_size = tile_size - mgr->stolen_base;
++	stolen_size -= wopcm_size;
+ 
+ 	/* Verify usage fits in the actual resource available */
+ 	if (mgr->stolen_base + stolen_size <= pci_resource_len(pdev, LMEM_BAR))
+@@ -89,29 +118,6 @@ static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
+ 	return ALIGN_DOWN(stolen_size, SZ_1M);
+ }
+ 
+-static u32 get_wopcm_size(struct xe_device *xe)
+-{
+-	u32 wopcm_size;
+-	u64 val;
+-
+-	val = xe_mmio_read64_2x32(xe_root_tile_mmio(xe), STOLEN_RESERVED);
+-	val = REG_FIELD_GET64(WOPCM_SIZE_MASK, val);
+-
+-	switch (val) {
+-	case 0x5 ... 0x6:
+-		val--;
+-		fallthrough;
+-	case 0x0 ... 0x3:
+-		wopcm_size = (1U << val) * SZ_1M;
+-		break;
+-	default:
+-		WARN(1, "Missing case wopcm_size=%llx\n", val);
+-		wopcm_size = 0;
+-	}
+-
+-	return wopcm_size;
+-}
+-
+ static u32 detect_bar2_integrated(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
+ {
+ 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
+index 25c5dc61ee88b2..56a261a40ea3c9 100644
+--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
++++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
+@@ -2190,7 +2190,7 @@ static int zynqmp_dp_rate_get(void *data, u64 *val)
+ 	struct zynqmp_dp *dp = data;
+ 
+ 	mutex_lock(&dp->lock);
+-	*val = drm_dp_bw_code_to_link_rate(dp->test.bw_code) * 10000;
++	*val = drm_dp_bw_code_to_link_rate(dp->test.bw_code) * 10000ULL;
+ 	mutex_unlock(&dp->lock);
+ 	return 0;
+ }
+diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
+index 7b1d091f3c090c..46cae925b09592 100644
+--- a/drivers/gpu/host1x/dev.c
++++ b/drivers/gpu/host1x/dev.c
+@@ -619,6 +619,8 @@ static int host1x_probe(struct platform_device *pdev)
+ 		goto free_contexts;
+ 	}
+ 
++	mutex_init(&host->intr_mutex);
++
+ 	pm_runtime_enable(&pdev->dev);
+ 
+ 	err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
+diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c
+index b3285dd101804c..f77a678949e96b 100644
+--- a/drivers/gpu/host1x/intr.c
++++ b/drivers/gpu/host1x/intr.c
+@@ -104,8 +104,6 @@ int host1x_intr_init(struct host1x *host)
+ 	unsigned int id;
+ 	int i, err;
+ 
+-	mutex_init(&host->intr_mutex);
+-
+ 	for (id = 0; id < host1x_syncpt_nb_pts(host); ++id) {
+ 		struct host1x_syncpt *syncpt = &host->syncpt[id];
+ 
+diff --git a/drivers/hid/hid-corsair-void.c b/drivers/hid/hid-corsair-void.c
+index 6ece56b850fc02..56e858066c3c31 100644
+--- a/drivers/hid/hid-corsair-void.c
++++ b/drivers/hid/hid-corsair-void.c
+@@ -553,7 +553,7 @@ static void corsair_void_battery_remove_work_handler(struct work_struct *work)
+ static void corsair_void_battery_add_work_handler(struct work_struct *work)
+ {
+ 	struct corsair_void_drvdata *drvdata;
+-	struct power_supply_config psy_cfg;
++	struct power_supply_config psy_cfg = {};
+ 	struct power_supply *new_supply;
+ 
+ 	drvdata = container_of(work, struct corsair_void_drvdata,
+@@ -726,6 +726,7 @@ static void corsair_void_remove(struct hid_device *hid_dev)
+ 	if (drvdata->battery)
+ 		power_supply_unregister(drvdata->battery);
+ 
++	cancel_delayed_work_sync(&drvdata->delayed_status_work);
+ 	cancel_delayed_work_sync(&drvdata->delayed_firmware_work);
+ 	sysfs_remove_group(&hid_dev->dev.kobj, &corsair_void_attr_group);
+ }
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 82900857bfd87c..e50887a6d22c24 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -1679,9 +1679,12 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
+ 		break;
+ 	}
+ 
+-	if (suffix)
++	if (suffix) {
+ 		hi->input->name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
+ 						 "%s %s", hdev->name, suffix);
++		if (!hi->input->name)
++			return -ENOMEM;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
+index 6439913372a8a9..5f8518f6f5ac74 100644
+--- a/drivers/hid/hid-steam.c
++++ b/drivers/hid/hid-steam.c
+@@ -313,6 +313,7 @@ struct steam_device {
+ 	u16 rumble_left;
+ 	u16 rumble_right;
+ 	unsigned int sensor_timestamp_us;
++	struct work_struct unregister_work;
+ };
+ 
+ static int steam_recv_report(struct steam_device *steam,
+@@ -1072,6 +1073,31 @@ static void steam_mode_switch_cb(struct work_struct *work)
+ 	}
+ }
+ 
++static void steam_work_unregister_cb(struct work_struct *work)
++{
++	struct steam_device *steam = container_of(work, struct steam_device,
++							unregister_work);
++	unsigned long flags;
++	bool connected;
++	bool opened;
++
++	spin_lock_irqsave(&steam->lock, flags);
++	opened = steam->client_opened;
++	connected = steam->connected;
++	spin_unlock_irqrestore(&steam->lock, flags);
++
++	if (connected) {
++		if (opened) {
++			steam_sensors_unregister(steam);
++			steam_input_unregister(steam);
++		} else {
++			steam_set_lizard_mode(steam, lizard_mode);
++			steam_input_register(steam);
++			steam_sensors_register(steam);
++		}
++	}
++}
++
+ static bool steam_is_valve_interface(struct hid_device *hdev)
+ {
+ 	struct hid_report_enum *rep_enum;
+@@ -1117,8 +1143,7 @@ static int steam_client_ll_open(struct hid_device *hdev)
+ 	steam->client_opened++;
+ 	spin_unlock_irqrestore(&steam->lock, flags);
+ 
+-	steam_sensors_unregister(steam);
+-	steam_input_unregister(steam);
++	schedule_work(&steam->unregister_work);
+ 
+ 	return 0;
+ }
+@@ -1135,11 +1160,7 @@ static void steam_client_ll_close(struct hid_device *hdev)
+ 	connected = steam->connected && !steam->client_opened;
+ 	spin_unlock_irqrestore(&steam->lock, flags);
+ 
+-	if (connected) {
+-		steam_set_lizard_mode(steam, lizard_mode);
+-		steam_input_register(steam);
+-		steam_sensors_register(steam);
+-	}
++	schedule_work(&steam->unregister_work);
+ }
+ 
+ static int steam_client_ll_raw_request(struct hid_device *hdev,
+@@ -1231,6 +1252,7 @@ static int steam_probe(struct hid_device *hdev,
+ 	INIT_LIST_HEAD(&steam->list);
+ 	INIT_WORK(&steam->rumble_work, steam_haptic_rumble_cb);
+ 	steam->sensor_timestamp_us = 0;
++	INIT_WORK(&steam->unregister_work, steam_work_unregister_cb);
+ 
+ 	/*
+ 	 * With the real steam controller interface, do not connect hidraw.
+@@ -1291,6 +1313,7 @@ static int steam_probe(struct hid_device *hdev,
+ 	cancel_work_sync(&steam->work_connect);
+ 	cancel_delayed_work_sync(&steam->mode_switch);
+ 	cancel_work_sync(&steam->rumble_work);
++	cancel_work_sync(&steam->unregister_work);
+ 
+ 	return ret;
+ }
+@@ -1306,6 +1329,8 @@ static void steam_remove(struct hid_device *hdev)
+ 
+ 	cancel_delayed_work_sync(&steam->mode_switch);
+ 	cancel_work_sync(&steam->work_connect);
++	cancel_work_sync(&steam->rumble_work);
++	cancel_work_sync(&steam->unregister_work);
+ 	hid_destroy_device(steam->client_hdev);
+ 	steam->client_hdev = NULL;
+ 	steam->client_opened = 0;
+@@ -1592,7 +1617,7 @@ static void steam_do_deck_input_event(struct steam_device *steam,
+ 
+ 	if (!(b9 & BIT(6)) && steam->did_mode_switch) {
+ 		steam->did_mode_switch = false;
+-		cancel_delayed_work_sync(&steam->mode_switch);
++		cancel_delayed_work(&steam->mode_switch);
+ 	} else if (!steam->client_opened && (b9 & BIT(6)) && !steam->did_mode_switch) {
+ 		steam->did_mode_switch = true;
+ 		schedule_delayed_work(&steam->mode_switch, 45 * HZ / 100);
+diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c
+index 6c3e758bbb09e3..3b81468a1df297 100644
+--- a/drivers/hid/hid-thrustmaster.c
++++ b/drivers/hid/hid-thrustmaster.c
+@@ -171,7 +171,7 @@ static void thrustmaster_interrupts(struct hid_device *hdev)
+ 	b_ep = ep->desc.bEndpointAddress;
+ 
+ 	/* Are the expected endpoints present? */
+-	u8 ep_addr[1] = {b_ep};
++	u8 ep_addr[2] = {b_ep, 0};
+ 
+ 	if (!usb_check_int_endpoints(usbif, ep_addr)) {
+ 		hid_err(hdev, "Unexpected non-int endpoint\n");
+diff --git a/drivers/hid/hid-winwing.c b/drivers/hid/hid-winwing.c
+index 831b760c66ea72..d4afbbd2780797 100644
+--- a/drivers/hid/hid-winwing.c
++++ b/drivers/hid/hid-winwing.c
+@@ -106,6 +106,8 @@ static int winwing_init_led(struct hid_device *hdev,
+ 						"%s::%s",
+ 						dev_name(&input->dev),
+ 						info->led_name);
++		if (!led->cdev.name)
++			return -ENOMEM;
+ 
+ 		ret = devm_led_classdev_register(&hdev->dev, &led->cdev);
+ 		if (ret)
+diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
+index 3c6011a48dabe7..6e084c2074141b 100644
+--- a/drivers/hv/channel_mgmt.c
++++ b/drivers/hv/channel_mgmt.c
+@@ -944,16 +944,6 @@ void vmbus_initiate_unload(bool crash)
+ 		vmbus_wait_for_unload();
+ }
+ 
+-static void check_ready_for_resume_event(void)
+-{
+-	/*
+-	 * If all the old primary channels have been fixed up, then it's safe
+-	 * to resume.
+-	 */
+-	if (atomic_dec_and_test(&vmbus_connection.nr_chan_fixup_on_resume))
+-		complete(&vmbus_connection.ready_for_resume_event);
+-}
+-
+ static void vmbus_setup_channel_state(struct vmbus_channel *channel,
+ 				      struct vmbus_channel_offer_channel *offer)
+ {
+@@ -1109,8 +1099,6 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
+ 
+ 		/* Add the channel back to the array of channels. */
+ 		vmbus_channel_map_relid(oldchannel);
+-		check_ready_for_resume_event();
+-
+ 		mutex_unlock(&vmbus_connection.channel_mutex);
+ 		return;
+ 	}
+@@ -1296,13 +1284,28 @@ EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
+ 
+ /*
+  * vmbus_onoffers_delivered -
+- * This is invoked when all offers have been delivered.
++ * The CHANNELMSG_ALLOFFERS_DELIVERED message arrives after all
++ * boot-time offers are delivered. A boot-time offer is for the primary
++ * channel for any virtual hardware configured in the VM at the time it boots.
++ * Boot-time offers include offers for physical devices assigned to the VM
++ * via Hyper-V's Discrete Device Assignment (DDA) functionality that are
++ * handled as virtual PCI devices in Linux (e.g., NVMe devices and GPUs).
++ * Boot-time offers do not include offers for VMBus sub-channels. Because
++ * devices can be hot-added to the VM after it is booted, additional channel
++ * offers that aren't boot-time offers can be received at any time after the
++ * all-offers-delivered message.
+  *
+- * Nothing to do here.
++ * SR-IOV NIC Virtual Functions (VFs) assigned to a VM are not considered
++ * to be assigned to the VM at boot-time, and offers for VFs may occur after
++ * the all-offers-delivered message. VFs are optional accelerators to the
++ * synthetic VMBus NIC and are effectively hot-added only after the VMBus
++ * NIC channel is opened (once it knows the guest can support it, via the
++ * sriov bit in the netvsc protocol).
+  */
+ static void vmbus_onoffers_delivered(
+ 			struct vmbus_channel_message_header *hdr)
+ {
++	complete(&vmbus_connection.all_offers_delivered_event);
+ }
+ 
+ /*
+@@ -1578,7 +1581,8 @@ void vmbus_onmessage(struct vmbus_channel_message_header *hdr)
+ }
+ 
+ /*
+- * vmbus_request_offers - Send a request to get all our pending offers.
++ * vmbus_request_offers - Send a request to get all our pending offers
++ * and wait for all boot-time offers to arrive.
+  */
+ int vmbus_request_offers(void)
+ {
+@@ -1596,6 +1600,10 @@ int vmbus_request_offers(void)
+ 
+ 	msg->msgtype = CHANNELMSG_REQUESTOFFERS;
+ 
++	/*
++	 * This REQUESTOFFERS message will result in the host sending an all
++	 * offers delivered message after all the boot-time offers are sent.
++	 */
+ 	ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header),
+ 			     true);
+ 
+@@ -1607,6 +1615,29 @@ int vmbus_request_offers(void)
+ 		goto cleanup;
+ 	}
+ 
++	/*
++	 * Wait for the host to send all boot-time offers.
++	 * Keeping it as a best-effort mechanism, where a warning is
++	 * printed if a timeout occurs, and execution is resumed.
++	 */
++	if (!wait_for_completion_timeout(&vmbus_connection.all_offers_delivered_event,
++					 secs_to_jiffies(60))) {
++		pr_warn("timed out waiting for all boot-time offers to be delivered.\n");
++	}
++
++	/*
++	 * Flush handling of offer messages (which may initiate work on
++	 * other work queues).
++	 */
++	flush_workqueue(vmbus_connection.work_queue);
++
++	/*
++	 * Flush workqueue for processing the incoming offers. Subchannel
++	 * offers and their processing can happen later, so there is no need to
++	 * flush that workqueue here.
++	 */
++	flush_workqueue(vmbus_connection.handle_primary_chan_wq);
++
+ cleanup:
+ 	kfree(msginfo);
+ 
+diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
+index f001ae880e1dbe..8351360bba1617 100644
+--- a/drivers/hv/connection.c
++++ b/drivers/hv/connection.c
+@@ -34,8 +34,8 @@ struct vmbus_connection vmbus_connection = {
+ 
+ 	.ready_for_suspend_event = COMPLETION_INITIALIZER(
+ 				  vmbus_connection.ready_for_suspend_event),
+-	.ready_for_resume_event	= COMPLETION_INITIALIZER(
+-				  vmbus_connection.ready_for_resume_event),
++	.all_offers_delivered_event = COMPLETION_INITIALIZER(
++				  vmbus_connection.all_offers_delivered_event),
+ };
+ EXPORT_SYMBOL_GPL(vmbus_connection);
+ 
+diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
+index 52cb744b4d7fde..e4058af987316e 100644
+--- a/drivers/hv/hyperv_vmbus.h
++++ b/drivers/hv/hyperv_vmbus.h
+@@ -287,18 +287,10 @@ struct vmbus_connection {
+ 	struct completion ready_for_suspend_event;
+ 
+ 	/*
+-	 * The number of primary channels that should be "fixed up"
+-	 * upon resume: these channels are re-offered upon resume, and some
+-	 * fields of the channel offers (i.e. child_relid and connection_id)
+-	 * can change, so the old offermsg must be fixed up, before the resume
+-	 * callbacks of the VSC drivers start to further touch the channels.
++	 * Completed once the host has offered all boot-time channels.
++	 * Note that some channels may still be under process on a workqueue.
+ 	 */
+-	atomic_t nr_chan_fixup_on_resume;
+-	/*
+-	 * vmbus_bus_resume() waits for "nr_chan_fixup_on_resume" to
+-	 * drop to zero.
+-	 */
+-	struct completion ready_for_resume_event;
++	struct completion all_offers_delivered_event;
+ };
+ 
+ 
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index 2892b8da20a5e2..bf5608a7405610 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -2427,11 +2427,6 @@ static int vmbus_bus_suspend(struct device *dev)
+ 	if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0)
+ 		wait_for_completion(&vmbus_connection.ready_for_suspend_event);
+ 
+-	if (atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0) {
+-		pr_err("Can not suspend due to a previous failed resuming\n");
+-		return -EBUSY;
+-	}
+-
+ 	mutex_lock(&vmbus_connection.channel_mutex);
+ 
+ 	list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
+@@ -2456,17 +2451,12 @@ static int vmbus_bus_suspend(struct device *dev)
+ 			pr_err("Sub-channel not deleted!\n");
+ 			WARN_ON_ONCE(1);
+ 		}
+-
+-		atomic_inc(&vmbus_connection.nr_chan_fixup_on_resume);
+ 	}
+ 
+ 	mutex_unlock(&vmbus_connection.channel_mutex);
+ 
+ 	vmbus_initiate_unload(false);
+ 
+-	/* Reset the event for the next resume. */
+-	reinit_completion(&vmbus_connection.ready_for_resume_event);
+-
+ 	return 0;
+ }
+ 
+@@ -2502,14 +2492,8 @@ static int vmbus_bus_resume(struct device *dev)
+ 	if (ret != 0)
+ 		return ret;
+ 
+-	WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) == 0);
+-
+ 	vmbus_request_offers();
+ 
+-	if (wait_for_completion_timeout(
+-		&vmbus_connection.ready_for_resume_event, secs_to_jiffies(10)) == 0)
+-		pr_err("Some vmbus device is missing after suspending?\n");
+-
+ 	/* Reset the event for the next suspend. */
+ 	reinit_completion(&vmbus_connection.ready_for_suspend_event);
+ 
+diff --git a/drivers/i3c/master/Kconfig b/drivers/i3c/master/Kconfig
+index 90dee3ec552097..77da199c7413e6 100644
+--- a/drivers/i3c/master/Kconfig
++++ b/drivers/i3c/master/Kconfig
+@@ -57,3 +57,14 @@ config MIPI_I3C_HCI
+ 
+ 	  This driver can also be built as a module.  If so, the module will be
+ 	  called mipi-i3c-hci.
++
++config MIPI_I3C_HCI_PCI
++	tristate "MIPI I3C Host Controller Interface PCI support"
++	depends on MIPI_I3C_HCI
++	depends on PCI
++	help
++	  Support for MIPI I3C Host Controller Interface compatible hardware
++	  on the PCI bus.
++
++	  This driver can also be built as a module. If so, the module will be
++	  called mipi-i3c-hci-pci.
+diff --git a/drivers/i3c/master/mipi-i3c-hci/Makefile b/drivers/i3c/master/mipi-i3c-hci/Makefile
+index 1f8cd5c48fdef3..e3d3ef757035f0 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/Makefile
++++ b/drivers/i3c/master/mipi-i3c-hci/Makefile
+@@ -5,3 +5,4 @@ mipi-i3c-hci-y				:= core.o ext_caps.o pio.o dma.o \
+ 					   cmd_v1.o cmd_v2.o \
+ 					   dat_v1.o dct_v1.o \
+ 					   hci_quirks.o
++obj-$(CONFIG_MIPI_I3C_HCI_PCI)		+= mipi-i3c-hci-pci.o
+diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
+index e8e56a8d20573f..491dfe70b66002 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/dma.c
++++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
+@@ -758,9 +758,26 @@ static bool hci_dma_irq_handler(struct i3c_hci *hci)
+ 			complete(&rh->op_done);
+ 
+ 		if (status & INTR_TRANSFER_ABORT) {
++			u32 ring_status;
++
+ 			dev_notice_ratelimited(&hci->master.dev,
+ 				"ring %d: Transfer Aborted\n", i);
+ 			mipi_i3c_hci_resume(hci);
++			ring_status = rh_reg_read(RING_STATUS);
++			if (!(ring_status & RING_STATUS_RUNNING) &&
++			    status & INTR_TRANSFER_COMPLETION &&
++			    status & INTR_TRANSFER_ERR) {
++				/*
++				 * Ring stop followed by run is an Intel
++				 * specific required quirk after resuming the
++				 * halted controller. Do it only when the ring
++				 * is not in running state after a transfer
++				 * error.
++				 */
++				rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
++				rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE |
++							   RING_CTRL_RUN_STOP);
++			}
+ 		}
+ 		if (status & INTR_WARN_INS_STOP_MODE)
+ 			dev_warn_ratelimited(&hci->master.dev,
+diff --git a/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c b/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c
+new file mode 100644
+index 00000000000000..c6c3a3ec11eae3
+--- /dev/null
++++ b/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c
+@@ -0,0 +1,148 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * PCI glue code for MIPI I3C HCI driver
++ *
++ * Copyright (C) 2024 Intel Corporation
++ *
++ * Author: Jarkko Nikula <jarkko.nikula@linux.intel.com>
++ */
++#include <linux/acpi.h>
++#include <linux/idr.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/platform_device.h>
++
++struct mipi_i3c_hci_pci_info {
++	int (*init)(struct pci_dev *pci);
++};
++
++#define INTEL_PRIV_OFFSET		0x2b0
++#define INTEL_PRIV_SIZE			0x28
++#define INTEL_PRIV_RESETS		0x04
++#define INTEL_PRIV_RESETS_RESET		BIT(0)
++#define INTEL_PRIV_RESETS_RESET_DONE	BIT(1)
++
++static DEFINE_IDA(mipi_i3c_hci_pci_ida);
++
++static int mipi_i3c_hci_pci_intel_init(struct pci_dev *pci)
++{
++	unsigned long timeout;
++	void __iomem *priv;
++
++	priv = devm_ioremap(&pci->dev,
++			    pci_resource_start(pci, 0) + INTEL_PRIV_OFFSET,
++			    INTEL_PRIV_SIZE);
++	if (!priv)
++		return -ENOMEM;
++
++	/* Assert reset, wait for completion and release reset */
++	writel(0, priv + INTEL_PRIV_RESETS);
++	timeout = jiffies + msecs_to_jiffies(10);
++	while (!(readl(priv + INTEL_PRIV_RESETS) &
++		 INTEL_PRIV_RESETS_RESET_DONE)) {
++		if (time_after(jiffies, timeout))
++			break;
++		cpu_relax();
++	}
++	writel(INTEL_PRIV_RESETS_RESET, priv + INTEL_PRIV_RESETS);
++
++	return 0;
++}
++
++static struct mipi_i3c_hci_pci_info intel_info = {
++	.init = mipi_i3c_hci_pci_intel_init,
++};
++
++static int mipi_i3c_hci_pci_probe(struct pci_dev *pci,
++				  const struct pci_device_id *id)
++{
++	struct mipi_i3c_hci_pci_info *info;
++	struct platform_device *pdev;
++	struct resource res[2];
++	int dev_id, ret;
++
++	ret = pcim_enable_device(pci);
++	if (ret)
++		return ret;
++
++	pci_set_master(pci);
++
++	memset(&res, 0, sizeof(res));
++
++	res[0].flags = IORESOURCE_MEM;
++	res[0].start = pci_resource_start(pci, 0);
++	res[0].end = pci_resource_end(pci, 0);
++
++	res[1].flags = IORESOURCE_IRQ;
++	res[1].start = pci->irq;
++	res[1].end = pci->irq;
++
++	dev_id = ida_alloc(&mipi_i3c_hci_pci_ida, GFP_KERNEL);
++	if (dev_id < 0)
++		return dev_id;
++
++	pdev = platform_device_alloc("mipi-i3c-hci", dev_id);
++	if (!pdev)
++		return -ENOMEM;
++
++	pdev->dev.parent = &pci->dev;
++	device_set_node(&pdev->dev, dev_fwnode(&pci->dev));
++
++	ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
++	if (ret)
++		goto err;
++
++	info = (struct mipi_i3c_hci_pci_info *)id->driver_data;
++	if (info && info->init) {
++		ret = info->init(pci);
++		if (ret)
++			goto err;
++	}
++
++	ret = platform_device_add(pdev);
++	if (ret)
++		goto err;
++
++	pci_set_drvdata(pci, pdev);
++
++	return 0;
++
++err:
++	platform_device_put(pdev);
++	ida_free(&mipi_i3c_hci_pci_ida, dev_id);
++	return ret;
++}
++
++static void mipi_i3c_hci_pci_remove(struct pci_dev *pci)
++{
++	struct platform_device *pdev = pci_get_drvdata(pci);
++	int dev_id = pdev->id;
++
++	platform_device_unregister(pdev);
++	ida_free(&mipi_i3c_hci_pci_ida, dev_id);
++}
++
++static const struct pci_device_id mipi_i3c_hci_pci_devices[] = {
++	/* Panther Lake-H */
++	{ PCI_VDEVICE(INTEL, 0xe37c), (kernel_ulong_t)&intel_info},
++	{ PCI_VDEVICE(INTEL, 0xe36f), (kernel_ulong_t)&intel_info},
++	/* Panther Lake-P */
++	{ PCI_VDEVICE(INTEL, 0xe47c), (kernel_ulong_t)&intel_info},
++	{ PCI_VDEVICE(INTEL, 0xe46f), (kernel_ulong_t)&intel_info},
++	{ },
++};
++MODULE_DEVICE_TABLE(pci, mipi_i3c_hci_pci_devices);
++
++static struct pci_driver mipi_i3c_hci_pci_driver = {
++	.name = "mipi_i3c_hci_pci",
++	.id_table = mipi_i3c_hci_pci_devices,
++	.probe = mipi_i3c_hci_pci_probe,
++	.remove = mipi_i3c_hci_pci_remove,
++};
++
++module_pci_driver(mipi_i3c_hci_pci_driver);
++
++MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@intel.com>");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("MIPI I3C HCI driver on PCI bus");
+diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
+index ad225823e6f2fe..45a4564c670c01 100644
+--- a/drivers/infiniband/hw/efa/efa_main.c
++++ b/drivers/infiniband/hw/efa/efa_main.c
+@@ -470,7 +470,6 @@ static void efa_ib_device_remove(struct efa_dev *dev)
+ 	ibdev_info(&dev->ibdev, "Unregister ib device\n");
+ 	ib_unregister_device(&dev->ibdev);
+ 	efa_destroy_eqs(dev);
+-	efa_com_dev_reset(&dev->edev, EFA_REGS_RESET_NORMAL);
+ 	efa_release_doorbell_bar(dev);
+ }
+ 
+@@ -643,12 +642,14 @@ static struct efa_dev *efa_probe_device(struct pci_dev *pdev)
+ 	return ERR_PTR(err);
+ }
+ 
+-static void efa_remove_device(struct pci_dev *pdev)
++static void efa_remove_device(struct pci_dev *pdev,
++			      enum efa_regs_reset_reason_types reset_reason)
+ {
+ 	struct efa_dev *dev = pci_get_drvdata(pdev);
+ 	struct efa_com_dev *edev;
+ 
+ 	edev = &dev->edev;
++	efa_com_dev_reset(edev, reset_reason);
+ 	efa_com_admin_destroy(edev);
+ 	efa_free_irq(dev, &dev->admin_irq);
+ 	efa_disable_msix(dev);
+@@ -676,7 +677,7 @@ static int efa_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	return 0;
+ 
+ err_remove_device:
+-	efa_remove_device(pdev);
++	efa_remove_device(pdev, EFA_REGS_RESET_INIT_ERR);
+ 	return err;
+ }
+ 
+@@ -685,7 +686,7 @@ static void efa_remove(struct pci_dev *pdev)
+ 	struct efa_dev *dev = pci_get_drvdata(pdev);
+ 
+ 	efa_ib_device_remove(dev);
+-	efa_remove_device(pdev);
++	efa_remove_device(pdev, EFA_REGS_RESET_NORMAL);
+ }
+ 
+ static void efa_shutdown(struct pci_dev *pdev)
+diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
+index fdb0357e0bb91a..903b426c9f8934 100644
+--- a/drivers/iommu/amd/amd_iommu_types.h
++++ b/drivers/iommu/amd/amd_iommu_types.h
+@@ -175,6 +175,7 @@
+ #define CONTROL_GAM_EN		25
+ #define CONTROL_GALOG_EN	28
+ #define CONTROL_GAINT_EN	29
++#define CONTROL_EPH_EN		45
+ #define CONTROL_XT_EN		50
+ #define CONTROL_INTCAPXT_EN	51
+ #define CONTROL_IRTCACHEDIS	59
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index db4b52aae1fcf1..4c0f876445de1e 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -2635,6 +2635,10 @@ static void iommu_init_flags(struct amd_iommu *iommu)
+ 
+ 	/* Set IOTLB invalidation timeout to 1s */
+ 	iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
++
++	/* Enable Enhanced Peripheral Page Request Handling */
++	if (check_feature(FEATURE_EPHSUP))
++		iommu_feature_enable(iommu, CONTROL_EPH_EN);
+ }
+ 
+ static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
+diff --git a/drivers/iommu/intel/prq.c b/drivers/iommu/intel/prq.c
+index c2d792db52c3e2..064194399b38bb 100644
+--- a/drivers/iommu/intel/prq.c
++++ b/drivers/iommu/intel/prq.c
+@@ -87,7 +87,9 @@ void intel_iommu_drain_pasid_prq(struct device *dev, u32 pasid)
+ 		struct page_req_dsc *req;
+ 
+ 		req = &iommu->prq[head / sizeof(*req)];
+-		if (!req->pasid_present || req->pasid != pasid) {
++		if (req->rid != sid ||
++		    (req->pasid_present && pasid != req->pasid) ||
++		    (!req->pasid_present && pasid != IOMMU_NO_PASID)) {
+ 			head = (head + sizeof(*req)) & PRQ_RING_MASK;
+ 			continue;
+ 		}
+diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c
+index 4674e618797c15..8b5926c1452edb 100644
+--- a/drivers/iommu/io-pgfault.c
++++ b/drivers/iommu/io-pgfault.c
+@@ -478,6 +478,7 @@ void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
+ 
+ 		ops->page_response(dev, iopf, &resp);
+ 		list_del_init(&group->pending_node);
++		iopf_free_group(group);
+ 	}
+ 	mutex_unlock(&fault_param->lock);
+ 
+diff --git a/drivers/irqchip/irq-partition-percpu.c b/drivers/irqchip/irq-partition-percpu.c
+index 8e76d2913e6beb..4441ffe149ea0d 100644
+--- a/drivers/irqchip/irq-partition-percpu.c
++++ b/drivers/irqchip/irq-partition-percpu.c
+@@ -98,7 +98,7 @@ static void partition_irq_print_chip(struct irq_data *d, struct seq_file *p)
+ 	struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+ 	struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+ 
+-	seq_printf(p, " %5s-%lu", chip->name, data->hwirq);
++	seq_printf(p, "%5s-%lu", chip->name, data->hwirq);
+ }
+ 
+ static struct irq_chip partition_irq_chip = {
+diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
+index d925ca24183b50..415f1f91cc3072 100644
+--- a/drivers/media/dvb-frontends/cxd2841er.c
++++ b/drivers/media/dvb-frontends/cxd2841er.c
+@@ -311,12 +311,8 @@ static int cxd2841er_set_reg_bits(struct cxd2841er_priv *priv,
+ 
+ static u32 cxd2841er_calc_iffreq_xtal(enum cxd2841er_xtal xtal, u32 ifhz)
+ {
+-	u64 tmp;
+-
+-	tmp = (u64) ifhz * 16777216;
+-	do_div(tmp, ((xtal == SONY_XTAL_24000) ? 48000000 : 41000000));
+-
+-	return (u32) tmp;
++	return div_u64(ifhz * 16777216ull,
++		       (xtal == SONY_XTAL_24000) ? 48000000 : 41000000);
+ }
+ 
+ static u32 cxd2841er_calc_iffreq(u32 ifhz)
+diff --git a/drivers/media/i2c/ds90ub913.c b/drivers/media/i2c/ds90ub913.c
+index 9f01950a0ca336..fd2d2d5272bfb6 100644
+--- a/drivers/media/i2c/ds90ub913.c
++++ b/drivers/media/i2c/ds90ub913.c
+@@ -8,6 +8,7 @@
+  * Copyright (c) 2023 Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+  */
+ 
++#include <linux/bitfield.h>
+ #include <linux/clk-provider.h>
+ #include <linux/clk.h>
+ #include <linux/delay.h>
+@@ -146,6 +147,19 @@ static int ub913_write(const struct ub913_data *priv, u8 reg, u8 val)
+ 	return ret;
+ }
+ 
++static int ub913_update_bits(const struct ub913_data *priv, u8 reg, u8 mask,
++			     u8 val)
++{
++	int ret;
++
++	ret = regmap_update_bits(priv->regmap, reg, mask, val);
++	if (ret < 0)
++		dev_err(&priv->client->dev,
++			"Cannot update register 0x%02x %d!\n", reg, ret);
++
++	return ret;
++}
++
+ /*
+  * GPIO chip
+  */
+@@ -733,10 +747,13 @@ static int ub913_hw_init(struct ub913_data *priv)
+ 	if (ret)
+ 		return dev_err_probe(dev, ret, "i2c master init failed\n");
+ 
+-	ub913_read(priv, UB913_REG_GENERAL_CFG, &v);
+-	v &= ~UB913_REG_GENERAL_CFG_PCLK_RISING;
+-	v |= priv->pclk_polarity_rising ? UB913_REG_GENERAL_CFG_PCLK_RISING : 0;
+-	ub913_write(priv, UB913_REG_GENERAL_CFG, v);
++	ret = ub913_update_bits(priv, UB913_REG_GENERAL_CFG,
++				UB913_REG_GENERAL_CFG_PCLK_RISING,
++				FIELD_PREP(UB913_REG_GENERAL_CFG_PCLK_RISING,
++					   priv->pclk_polarity_rising));
++
++	if (ret)
++		return ret;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/media/i2c/ds90ub953.c b/drivers/media/i2c/ds90ub953.c
+index b27656f8d2b121..db30626e9c9624 100644
+--- a/drivers/media/i2c/ds90ub953.c
++++ b/drivers/media/i2c/ds90ub953.c
+@@ -397,8 +397,13 @@ static int ub953_gpiochip_probe(struct ub953_data *priv)
+ 	int ret;
+ 
+ 	/* Set all GPIOs to local input mode */
+-	ub953_write(priv, UB953_REG_LOCAL_GPIO_DATA, 0);
+-	ub953_write(priv, UB953_REG_GPIO_INPUT_CTRL, 0xf);
++	ret = ub953_write(priv, UB953_REG_LOCAL_GPIO_DATA, 0);
++	if (ret)
++		return ret;
++
++	ret = ub953_write(priv, UB953_REG_GPIO_INPUT_CTRL, 0xf);
++	if (ret)
++		return ret;
+ 
+ 	gc->label = dev_name(dev);
+ 	gc->parent = dev;
+@@ -958,10 +963,11 @@ static void ub953_calc_clkout_params(struct ub953_data *priv,
+ 	clkout_data->rate = clkout_rate;
+ }
+ 
+-static void ub953_write_clkout_regs(struct ub953_data *priv,
+-				    const struct ub953_clkout_data *clkout_data)
++static int ub953_write_clkout_regs(struct ub953_data *priv,
++				   const struct ub953_clkout_data *clkout_data)
+ {
+ 	u8 clkout_ctrl0, clkout_ctrl1;
++	int ret;
+ 
+ 	if (priv->hw_data->is_ub971)
+ 		clkout_ctrl0 = clkout_data->m;
+@@ -971,8 +977,15 @@ static void ub953_write_clkout_regs(struct ub953_data *priv,
+ 
+ 	clkout_ctrl1 = clkout_data->n;
+ 
+-	ub953_write(priv, UB953_REG_CLKOUT_CTRL0, clkout_ctrl0);
+-	ub953_write(priv, UB953_REG_CLKOUT_CTRL1, clkout_ctrl1);
++	ret = ub953_write(priv, UB953_REG_CLKOUT_CTRL0, clkout_ctrl0);
++	if (ret)
++		return ret;
++
++	ret = ub953_write(priv, UB953_REG_CLKOUT_CTRL1, clkout_ctrl1);
++	if (ret)
++		return ret;
++
++	return 0;
+ }
+ 
+ static unsigned long ub953_clkout_recalc_rate(struct clk_hw *hw,
+@@ -1052,9 +1065,7 @@ static int ub953_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+ 	dev_dbg(&priv->client->dev, "%s %lu (requested %lu)\n", __func__,
+ 		clkout_data.rate, rate);
+ 
+-	ub953_write_clkout_regs(priv, &clkout_data);
+-
+-	return 0;
++	return ub953_write_clkout_regs(priv, &clkout_data);
+ }
+ 
+ static const struct clk_ops ub953_clkout_ops = {
+@@ -1079,7 +1090,9 @@ static int ub953_register_clkout(struct ub953_data *priv)
+ 
+ 	/* Initialize clkout to 25MHz by default */
+ 	ub953_calc_clkout_params(priv, UB953_DEFAULT_CLKOUT_RATE, &clkout_data);
+-	ub953_write_clkout_regs(priv, &clkout_data);
++	ret = ub953_write_clkout_regs(priv, &clkout_data);
++	if (ret)
++		return ret;
+ 
+ 	priv->clkout_clk_hw.init = &init;
+ 
+@@ -1226,10 +1239,15 @@ static int ub953_hw_init(struct ub953_data *priv)
+ 	if (ret)
+ 		return dev_err_probe(dev, ret, "i2c init failed\n");
+ 
+-	ub953_write(priv, UB953_REG_GENERAL_CFG,
+-		    (priv->non_continous_clk ? 0 : UB953_REG_GENERAL_CFG_CONT_CLK) |
+-		    ((priv->num_data_lanes - 1) << UB953_REG_GENERAL_CFG_CSI_LANE_SEL_SHIFT) |
+-		    UB953_REG_GENERAL_CFG_CRC_TX_GEN_ENABLE);
++	v = 0;
++	v |= priv->non_continous_clk ? 0 : UB953_REG_GENERAL_CFG_CONT_CLK;
++	v |= (priv->num_data_lanes - 1) <<
++		UB953_REG_GENERAL_CFG_CSI_LANE_SEL_SHIFT;
++	v |= UB953_REG_GENERAL_CFG_CRC_TX_GEN_ENABLE;
++
++	ret = ub953_write(priv, UB953_REG_GENERAL_CFG, v);
++	if (ret)
++		return ret;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/media/platform/broadcom/bcm2835-unicam.c b/drivers/media/platform/broadcom/bcm2835-unicam.c
+index 3aed0e493c81f1..4090a8e4774029 100644
+--- a/drivers/media/platform/broadcom/bcm2835-unicam.c
++++ b/drivers/media/platform/broadcom/bcm2835-unicam.c
+@@ -816,11 +816,6 @@ static irqreturn_t unicam_isr(int irq, void *dev)
+ 		}
+ 	}
+ 
+-	if (unicam_reg_read(unicam, UNICAM_ICTL) & UNICAM_FCM) {
+-		/* Switch out of trigger mode if selected */
+-		unicam_reg_write_field(unicam, UNICAM_ICTL, 1, UNICAM_TFC);
+-		unicam_reg_write_field(unicam, UNICAM_ICTL, 0, UNICAM_FCM);
+-	}
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -984,8 +979,7 @@ static void unicam_start_rx(struct unicam_device *unicam,
+ 
+ 	unicam_reg_write_field(unicam, UNICAM_ANA, 0, UNICAM_DDL);
+ 
+-	/* Always start in trigger frame capture mode (UNICAM_FCM set) */
+-	val = UNICAM_FSIE | UNICAM_FEIE | UNICAM_FCM | UNICAM_IBOB;
++	val = UNICAM_FSIE | UNICAM_FEIE | UNICAM_IBOB;
+ 	line_int_freq = max(fmt->height >> 2, 128);
+ 	unicam_set_field(&val, line_int_freq, UNICAM_LCIE_MASK);
+ 	unicam_reg_write(unicam, UNICAM_ICTL, val);
+diff --git a/drivers/media/test-drivers/vidtv/vidtv_bridge.c b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
+index e1dd8adeba469f..438483c62facc2 100644
+--- a/drivers/media/test-drivers/vidtv/vidtv_bridge.c
++++ b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
+@@ -191,10 +191,11 @@ static int vidtv_start_streaming(struct vidtv_dvb *dvb)
+ 
+ 	mux_args.mux_buf_sz  = mux_buf_sz;
+ 
+-	dvb->streaming = true;
+ 	dvb->mux = vidtv_mux_init(dvb->fe[0], dev, &mux_args);
+ 	if (!dvb->mux)
+ 		return -ENOMEM;
++
++	dvb->streaming = true;
+ 	vidtv_mux_start_thread(dvb->mux);
+ 
+ 	dev_dbg_ratelimited(dev, "Started streaming\n");
+@@ -205,6 +206,11 @@ static int vidtv_stop_streaming(struct vidtv_dvb *dvb)
+ {
+ 	struct device *dev = &dvb->pdev->dev;
+ 
++	if (!dvb->streaming) {
++		dev_warn_ratelimited(dev, "No streaming. Skipping.\n");
++		return 0;
++	}
++
+ 	dvb->streaming = false;
+ 	vidtv_mux_stop_thread(dvb->mux);
+ 	vidtv_mux_destroy(dvb->mux);
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index 31b4b54657feee..011a14506ea0b7 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -2800,6 +2800,15 @@ static const struct usb_device_id uvc_ids[] = {
+ 	  .bInterfaceSubClass	= 1,
+ 	  .bInterfaceProtocol	= 0,
+ 	  .driver_info		= (kernel_ulong_t)&uvc_quirk_probe_minmax },
++	/* Sonix Technology Co. Ltd. - 292A IPC AR0330 */
++	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
++				| USB_DEVICE_ID_MATCH_INT_INFO,
++	  .idVendor		= 0x0c45,
++	  .idProduct		= 0x6366,
++	  .bInterfaceClass	= USB_CLASS_VIDEO,
++	  .bInterfaceSubClass	= 1,
++	  .bInterfaceProtocol	= 0,
++	  .driver_info		= UVC_INFO_QUIRK(UVC_QUIRK_MJPEG_NO_EOF) },
+ 	/* MT6227 */
+ 	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
+ 				| USB_DEVICE_ID_MATCH_INT_INFO,
+@@ -2828,6 +2837,15 @@ static const struct usb_device_id uvc_ids[] = {
+ 	  .bInterfaceSubClass	= 1,
+ 	  .bInterfaceProtocol	= 0,
+ 	  .driver_info		= (kernel_ulong_t)&uvc_quirk_probe_minmax },
++	/* Kurokesu C1 PRO */
++	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
++				| USB_DEVICE_ID_MATCH_INT_INFO,
++	  .idVendor		= 0x16d0,
++	  .idProduct		= 0x0ed1,
++	  .bInterfaceClass	= USB_CLASS_VIDEO,
++	  .bInterfaceSubClass	= 1,
++	  .bInterfaceProtocol	= 0,
++	  .driver_info		= UVC_INFO_QUIRK(UVC_QUIRK_MJPEG_NO_EOF) },
+ 	/* Syntek (HP Spartan) */
+ 	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
+ 				| USB_DEVICE_ID_MATCH_INT_INFO,
+diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
+index d2fe01bcd209e5..eab7b8f5573057 100644
+--- a/drivers/media/usb/uvc/uvc_video.c
++++ b/drivers/media/usb/uvc/uvc_video.c
+@@ -20,6 +20,7 @@
+ #include <linux/atomic.h>
+ #include <linux/unaligned.h>
+ 
++#include <media/jpeg.h>
+ #include <media/v4l2-common.h>
+ 
+ #include "uvcvideo.h"
+@@ -1137,6 +1138,7 @@ static void uvc_video_stats_stop(struct uvc_streaming *stream)
+ static int uvc_video_decode_start(struct uvc_streaming *stream,
+ 		struct uvc_buffer *buf, const u8 *data, int len)
+ {
++	u8 header_len;
+ 	u8 fid;
+ 
+ 	/*
+@@ -1150,6 +1152,7 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
+ 		return -EINVAL;
+ 	}
+ 
++	header_len = data[0];
+ 	fid = data[1] & UVC_STREAM_FID;
+ 
+ 	/*
+@@ -1231,9 +1234,31 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
+ 		return -EAGAIN;
+ 	}
+ 
++	/*
++	 * Some cameras, when running two parallel streams (one MJPEG alongside
++	 * another non-MJPEG stream), are known to lose the EOF packet for a frame.
++	 * We can detect the end of a frame by checking for a new SOI marker, as
++	 * the SOI always lies on the packet boundary between two frames for
++	 * these devices.
++	 */
++	if (stream->dev->quirks & UVC_QUIRK_MJPEG_NO_EOF &&
++	    (stream->cur_format->fcc == V4L2_PIX_FMT_MJPEG ||
++	    stream->cur_format->fcc == V4L2_PIX_FMT_JPEG)) {
++		const u8 *packet = data + header_len;
++
++		if (len >= header_len + 2 &&
++		    packet[0] == 0xff && packet[1] == JPEG_MARKER_SOI &&
++		    buf->bytesused != 0) {
++			buf->state = UVC_BUF_STATE_READY;
++			buf->error = 1;
++			stream->last_fid ^= UVC_STREAM_FID;
++			return -EAGAIN;
++		}
++	}
++
+ 	stream->last_fid = fid;
+ 
+-	return data[0];
++	return header_len;
+ }
+ 
+ static inline enum dma_data_direction uvc_stream_dir(
+diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
+index 5690cfd61e23a1..7daf2aca29b775 100644
+--- a/drivers/media/usb/uvc/uvcvideo.h
++++ b/drivers/media/usb/uvc/uvcvideo.h
+@@ -76,6 +76,7 @@
+ #define UVC_QUIRK_NO_RESET_RESUME	0x00004000
+ #define UVC_QUIRK_DISABLE_AUTOSUSPEND	0x00008000
+ #define UVC_QUIRK_INVALID_DEVICE_SOF	0x00010000
++#define UVC_QUIRK_MJPEG_NO_EOF		0x00020000
+ 
+ /* Format flags */
+ #define UVC_FMT_FLAG_COMPRESSED		0x00000001
+diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
+index af445d3f8e2ae7..5ab7a26529a089 100644
+--- a/drivers/mmc/host/mtk-sd.c
++++ b/drivers/mmc/host/mtk-sd.c
+@@ -273,6 +273,7 @@
+ #define MSDC_PAD_TUNE_CMD2_SEL	  BIT(21)   /* RW */
+ 
+ #define PAD_DS_TUNE_DLY_SEL       BIT(0)	  /* RW */
++#define PAD_DS_TUNE_DLY2_SEL      BIT(1)	  /* RW */
+ #define PAD_DS_TUNE_DLY1	  GENMASK(6, 2)   /* RW */
+ #define PAD_DS_TUNE_DLY2	  GENMASK(11, 7)  /* RW */
+ #define PAD_DS_TUNE_DLY3	  GENMASK(16, 12) /* RW */
+@@ -318,6 +319,7 @@
+ 
+ /* EMMC50_PAD_DS_TUNE mask */
+ #define PAD_DS_DLY_SEL		BIT(16)	/* RW */
++#define PAD_DS_DLY2_SEL		BIT(15)	/* RW */
+ #define PAD_DS_DLY1		GENMASK(14, 10)	/* RW */
+ #define PAD_DS_DLY3		GENMASK(4, 0)	/* RW */
+ 
+@@ -2498,13 +2500,23 @@ static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
+ static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
+ {
+ 	struct msdc_host *host = mmc_priv(mmc);
++
+ 	host->hs400_mode = true;
+ 
+-	if (host->top_base)
+-		writel(host->hs400_ds_delay,
+-		       host->top_base + EMMC50_PAD_DS_TUNE);
+-	else
+-		writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
++	if (host->top_base) {
++		if (host->hs400_ds_dly3)
++			sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
++				      PAD_DS_DLY3, host->hs400_ds_dly3);
++		if (host->hs400_ds_delay)
++			writel(host->hs400_ds_delay,
++			       host->top_base + EMMC50_PAD_DS_TUNE);
++	} else {
++		if (host->hs400_ds_dly3)
++			sdr_set_field(host->base + PAD_DS_TUNE,
++				      PAD_DS_TUNE_DLY3, host->hs400_ds_dly3);
++		if (host->hs400_ds_delay)
++			writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
++	}
+ 	/* hs400 mode must set it to 0 */
+ 	sdr_clr_bits(host->base + MSDC_PATCH_BIT2, MSDC_PATCH_BIT2_CFGCRCSTS);
+ 	/* to improve read performance, set outstanding to 2 */
+@@ -2524,14 +2536,11 @@ static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card
+ 	if (host->top_base) {
+ 		sdr_set_bits(host->top_base + EMMC50_PAD_DS_TUNE,
+ 			     PAD_DS_DLY_SEL);
+-		if (host->hs400_ds_dly3)
+-			sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
+-				      PAD_DS_DLY3, host->hs400_ds_dly3);
++		sdr_clr_bits(host->top_base + EMMC50_PAD_DS_TUNE,
++			     PAD_DS_DLY2_SEL);
+ 	} else {
+ 		sdr_set_bits(host->base + PAD_DS_TUNE, PAD_DS_TUNE_DLY_SEL);
+-		if (host->hs400_ds_dly3)
+-			sdr_set_field(host->base + PAD_DS_TUNE,
+-				      PAD_DS_TUNE_DLY3, host->hs400_ds_dly3);
++		sdr_clr_bits(host->base + PAD_DS_TUNE, PAD_DS_TUNE_DLY2_SEL);
+ 	}
+ 
+ 	host->hs400_tuning = true;
+diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
+index b73f673db92bbc..f75c31815ab00d 100644
+--- a/drivers/mmc/host/sdhci_am654.c
++++ b/drivers/mmc/host/sdhci_am654.c
+@@ -155,7 +155,6 @@ struct sdhci_am654_data {
+ 	u32 tuning_loop;
+ 
+ #define SDHCI_AM654_QUIRK_FORCE_CDTEST BIT(0)
+-#define SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA BIT(1)
+ };
+ 
+ struct window {
+@@ -357,29 +356,6 @@ static void sdhci_j721e_4bit_set_clock(struct sdhci_host *host,
+ 	sdhci_set_clock(host, clock);
+ }
+ 
+-static int sdhci_am654_start_signal_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
+-{
+-	struct sdhci_host *host = mmc_priv(mmc);
+-	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+-	struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
+-	int ret;
+-
+-	if ((sdhci_am654->quirks & SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA) &&
+-	    ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
+-		if (!IS_ERR(mmc->supply.vqmmc)) {
+-			ret = mmc_regulator_set_vqmmc(mmc, ios);
+-			if (ret < 0) {
+-				pr_err("%s: Switching to 1.8V signalling voltage failed,\n",
+-				       mmc_hostname(mmc));
+-				return -EIO;
+-			}
+-		}
+-		return 0;
+-	}
+-
+-	return sdhci_start_signal_voltage_switch(mmc, ios);
+-}
+-
+ static u8 sdhci_am654_write_power_on(struct sdhci_host *host, u8 val, int reg)
+ {
+ 	writeb(val, host->ioaddr + reg);
+@@ -868,11 +844,6 @@ static int sdhci_am654_get_of_property(struct platform_device *pdev,
+ 	if (device_property_read_bool(dev, "ti,fails-without-test-cd"))
+ 		sdhci_am654->quirks |= SDHCI_AM654_QUIRK_FORCE_CDTEST;
+ 
+-	/* Suppress v1p8 ena for eMMC and SD with vqmmc supply */
+-	if (!!of_parse_phandle(dev->of_node, "vmmc-supply", 0) ==
+-	    !!of_parse_phandle(dev->of_node, "vqmmc-supply", 0))
+-		sdhci_am654->quirks |= SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA;
+-
+ 	sdhci_get_of_property(pdev);
+ 
+ 	return 0;
+@@ -969,7 +940,6 @@ static int sdhci_am654_probe(struct platform_device *pdev)
+ 		goto err_pltfm_free;
+ 	}
+ 
+-	host->mmc_host_ops.start_signal_voltage_switch = sdhci_am654_start_signal_voltage_switch;
+ 	host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning;
+ 
+ 	pm_runtime_get_noresume(dev);
+diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
+index 6cba9717a6d87d..399844809bbeaa 100644
+--- a/drivers/net/can/c_can/c_can_platform.c
++++ b/drivers/net/can/c_can/c_can_platform.c
+@@ -385,15 +385,16 @@ static int c_can_plat_probe(struct platform_device *pdev)
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
+ 			KBUILD_MODNAME, ret);
+-		goto exit_free_device;
++		goto exit_pm_runtime;
+ 	}
+ 
+ 	dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
+ 		 KBUILD_MODNAME, priv->base, dev->irq);
+ 	return 0;
+ 
+-exit_free_device:
++exit_pm_runtime:
+ 	pm_runtime_disable(priv->device);
++exit_free_device:
+ 	free_c_can_dev(dev);
+ exit:
+ 	dev_err(&pdev->dev, "probe failed\n");
+diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c
+index 64c349fd46007f..f65c1a1e05ccdf 100644
+--- a/drivers/net/can/ctucanfd/ctucanfd_base.c
++++ b/drivers/net/can/ctucanfd/ctucanfd_base.c
+@@ -867,10 +867,12 @@ static void ctucan_err_interrupt(struct net_device *ndev, u32 isr)
+ 			}
+ 			break;
+ 		case CAN_STATE_ERROR_ACTIVE:
+-			cf->can_id |= CAN_ERR_CNT;
+-			cf->data[1] = CAN_ERR_CRTL_ACTIVE;
+-			cf->data[6] = bec.txerr;
+-			cf->data[7] = bec.rxerr;
++			if (skb) {
++				cf->can_id |= CAN_ERR_CNT;
++				cf->data[1] = CAN_ERR_CRTL_ACTIVE;
++				cf->data[6] = bec.txerr;
++				cf->data[7] = bec.rxerr;
++			}
+ 			break;
+ 		default:
+ 			netdev_warn(ndev, "unhandled error state (%d:%s)!\n",
+diff --git a/drivers/net/can/rockchip/rockchip_canfd-core.c b/drivers/net/can/rockchip/rockchip_canfd-core.c
+index df18c85fc07841..d9a937ba126c3c 100644
+--- a/drivers/net/can/rockchip/rockchip_canfd-core.c
++++ b/drivers/net/can/rockchip/rockchip_canfd-core.c
+@@ -622,7 +622,7 @@ rkcanfd_handle_rx_fifo_overflow_int(struct rkcanfd_priv *priv)
+ 	netdev_dbg(priv->ndev, "RX-FIFO overflow\n");
+ 
+ 	skb = rkcanfd_alloc_can_err_skb(priv, &cf, &timestamp);
+-	if (skb)
++	if (!skb)
+ 		return 0;
+ 
+ 	rkcanfd_get_berr_counter_corrected(priv, &bec);
+diff --git a/drivers/net/can/usb/etas_es58x/es58x_devlink.c b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
+index eee20839d96fd4..0d155eb1b9e999 100644
+--- a/drivers/net/can/usb/etas_es58x/es58x_devlink.c
++++ b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
+@@ -248,7 +248,11 @@ static int es58x_devlink_info_get(struct devlink *devlink,
+ 			return ret;
+ 	}
+ 
+-	return devlink_info_serial_number_put(req, es58x_dev->udev->serial);
++	if (es58x_dev->udev->serial)
++		ret = devlink_info_serial_number_put(req,
++						     es58x_dev->udev->serial);
++
++	return ret;
+ }
+ 
+ const struct devlink_ops es58x_dl_ops = {
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
+index b4fbb99bfad208..a3d6b8f198a86a 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
+@@ -2159,8 +2159,13 @@ static int idpf_open(struct net_device *netdev)
+ 	idpf_vport_ctrl_lock(netdev);
+ 	vport = idpf_netdev_to_vport(netdev);
+ 
++	err = idpf_set_real_num_queues(vport);
++	if (err)
++		goto unlock;
++
+ 	err = idpf_vport_open(vport);
+ 
++unlock:
+ 	idpf_vport_ctrl_unlock(netdev);
+ 
+ 	return err;
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+index 2fa9c36e33c9c8..9be6a6b59c4e14 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+@@ -3008,8 +3008,6 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ 		return -EINVAL;
+ 
+ 	rsc_segments = DIV_ROUND_UP(skb->data_len, rsc_seg_len);
+-	if (unlikely(rsc_segments == 1))
+-		return 0;
+ 
+ 	NAPI_GRO_CB(skb)->count = rsc_segments;
+ 	skb_shinfo(skb)->gso_size = rsc_seg_len;
+@@ -3072,6 +3070,7 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ 	idpf_rx_hash(rxq, skb, rx_desc, decoded);
+ 
+ 	skb->protocol = eth_type_trans(skb, rxq->netdev);
++	skb_record_rx_queue(skb, rxq->idx);
+ 
+ 	if (le16_get_bits(rx_desc->hdrlen_flags,
+ 			  VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
+@@ -3080,8 +3079,6 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ 	csum_bits = idpf_rx_splitq_extract_csum_bits(rx_desc);
+ 	idpf_rx_csum(rxq, skb, csum_bits, decoded);
+ 
+-	skb_record_rx_queue(skb, rxq->idx);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 27872bdea9bd1f..8dd0fb86e3a274 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -1096,6 +1096,7 @@ static int igc_init_empty_frame(struct igc_ring *ring,
+ 		return -ENOMEM;
+ 	}
+ 
++	buffer->type = IGC_TX_BUFFER_TYPE_SKB;
+ 	buffer->skb = skb;
+ 	buffer->protocol = 0;
+ 	buffer->bytecount = skb->len;
+@@ -2707,8 +2708,9 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
+ }
+ 
+ static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
+-					    struct xdp_buff *xdp)
++					    struct igc_xdp_buff *ctx)
+ {
++	struct xdp_buff *xdp = &ctx->xdp;
+ 	unsigned int totalsize = xdp->data_end - xdp->data_meta;
+ 	unsigned int metasize = xdp->data - xdp->data_meta;
+ 	struct sk_buff *skb;
+@@ -2727,27 +2729,28 @@ static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
+ 		__skb_pull(skb, metasize);
+ 	}
+ 
++	if (ctx->rx_ts) {
++		skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV;
++		skb_hwtstamps(skb)->netdev_data = ctx->rx_ts;
++	}
++
+ 	return skb;
+ }
+ 
+ static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector,
+ 				union igc_adv_rx_desc *desc,
+-				struct xdp_buff *xdp,
+-				ktime_t timestamp)
++				struct igc_xdp_buff *ctx)
+ {
+ 	struct igc_ring *ring = q_vector->rx.ring;
+ 	struct sk_buff *skb;
+ 
+-	skb = igc_construct_skb_zc(ring, xdp);
++	skb = igc_construct_skb_zc(ring, ctx);
+ 	if (!skb) {
+ 		ring->rx_stats.alloc_failed++;
+ 		set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &ring->flags);
+ 		return;
+ 	}
+ 
+-	if (timestamp)
+-		skb_hwtstamps(skb)->hwtstamp = timestamp;
+-
+ 	if (igc_cleanup_headers(ring, desc, skb))
+ 		return;
+ 
+@@ -2783,7 +2786,6 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
+ 		union igc_adv_rx_desc *desc;
+ 		struct igc_rx_buffer *bi;
+ 		struct igc_xdp_buff *ctx;
+-		ktime_t timestamp = 0;
+ 		unsigned int size;
+ 		int res;
+ 
+@@ -2813,6 +2815,8 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
+ 			 */
+ 			bi->xdp->data_meta += IGC_TS_HDR_LEN;
+ 			size -= IGC_TS_HDR_LEN;
++		} else {
++			ctx->rx_ts = NULL;
+ 		}
+ 
+ 		bi->xdp->data_end = bi->xdp->data + size;
+@@ -2821,7 +2825,7 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
+ 		res = __igc_xdp_run_prog(adapter, prog, bi->xdp);
+ 		switch (res) {
+ 		case IGC_XDP_PASS:
+-			igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp);
++			igc_dispatch_skb_zc(q_vector, desc, ctx);
+ 			fallthrough;
+ 		case IGC_XDP_CONSUMED:
+ 			xsk_buff_free(bi->xdp);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+index 2bed8c86b7cfc5..3f64cdbabfa3c1 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+@@ -768,7 +768,9 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
+ 	err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
+ 	if (err)
+ 		return;
+-	mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
++	err = mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
++	if (err)
++		return;
+ 	for (i = 0; i < len; i++) {
+ 		data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
+ 		if (!hw_stats[i].cells_bytes)
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index 2be2889d0646ba..f4ddacff08469a 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -684,21 +684,30 @@ static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
+ static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
+ {
+ 	struct am65_cpsw_tx_chn *tx_chn = data;
++	enum am65_cpsw_tx_buf_type buf_type;
+ 	struct cppi5_host_desc_t *desc_tx;
++	struct xdp_frame *xdpf;
+ 	struct sk_buff *skb;
+ 	void **swdata;
+ 
+ 	desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
+ 	swdata = cppi5_hdesc_get_swdata(desc_tx);
+-	skb = *(swdata);
+-	am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
++	buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
++	if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
++		skb = *(swdata);
++		dev_kfree_skb_any(skb);
++	} else {
++		xdpf = *(swdata);
++		xdp_return_frame(xdpf);
++	}
+ 
+-	dev_kfree_skb_any(skb);
++	am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
+ }
+ 
+ static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
+ 					   struct net_device *ndev,
+-					   unsigned int len)
++					   unsigned int len,
++					   unsigned int headroom)
+ {
+ 	struct sk_buff *skb;
+ 
+@@ -708,7 +717,7 @@ static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
+ 	if (unlikely(!skb))
+ 		return NULL;
+ 
+-	skb_reserve(skb, AM65_CPSW_HEADROOM);
++	skb_reserve(skb, headroom);
+ 	skb->dev = ndev;
+ 
+ 	return skb;
+@@ -1133,9 +1142,11 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
+ 	struct xdp_frame *xdpf;
+ 	struct bpf_prog *prog;
+ 	struct page *page;
++	int pkt_len;
+ 	u32 act;
+ 	int err;
+ 
++	pkt_len = *len;
+ 	prog = READ_ONCE(port->xdp_prog);
+ 	if (!prog)
+ 		return AM65_CPSW_XDP_PASS;
+@@ -1153,8 +1164,10 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
+ 		netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
+ 
+ 		xdpf = xdp_convert_buff_to_frame(xdp);
+-		if (unlikely(!xdpf))
++		if (unlikely(!xdpf)) {
++			ndev->stats.tx_dropped++;
+ 			goto drop;
++		}
+ 
+ 		__netif_tx_lock(netif_txq, cpu);
+ 		err = am65_cpsw_xdp_tx_frame(ndev, tx_chn, xdpf,
+@@ -1163,14 +1176,14 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
+ 		if (err)
+ 			goto drop;
+ 
+-		dev_sw_netstats_tx_add(ndev, 1, *len);
++		dev_sw_netstats_rx_add(ndev, pkt_len);
+ 		ret = AM65_CPSW_XDP_CONSUMED;
+ 		goto out;
+ 	case XDP_REDIRECT:
+ 		if (unlikely(xdp_do_redirect(ndev, xdp, prog)))
+ 			goto drop;
+ 
+-		dev_sw_netstats_rx_add(ndev, *len);
++		dev_sw_netstats_rx_add(ndev, pkt_len);
+ 		ret = AM65_CPSW_XDP_REDIRECT;
+ 		goto out;
+ 	default:
+@@ -1279,16 +1292,8 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
+ 	dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
+ 
+ 	dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
+-
+ 	k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+ 
+-	skb = am65_cpsw_build_skb(page_addr, ndev,
+-				  AM65_CPSW_MAX_PACKET_SIZE);
+-	if (unlikely(!skb)) {
+-		new_page = page;
+-		goto requeue;
+-	}
+-
+ 	if (port->xdp_prog) {
+ 		xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]);
+ 		xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM,
+@@ -1298,9 +1303,16 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
+ 		if (*xdp_state != AM65_CPSW_XDP_PASS)
+ 			goto allocate;
+ 
+-		/* Compute additional headroom to be reserved */
+-		headroom = (xdp.data - xdp.data_hard_start) - skb_headroom(skb);
+-		skb_reserve(skb, headroom);
++		headroom = xdp.data - xdp.data_hard_start;
++	} else {
++		headroom = AM65_CPSW_HEADROOM;
++	}
++
++	skb = am65_cpsw_build_skb(page_addr, ndev,
++				  AM65_CPSW_MAX_PACKET_SIZE, headroom);
++	if (unlikely(!skb)) {
++		new_page = page;
++		goto requeue;
+ 	}
+ 
+ 	ndev_priv = netdev_priv(ndev);
+diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c
+index dc7cbd6a9798a7..f4019815f47361 100644
+--- a/drivers/net/team/team_core.c
++++ b/drivers/net/team/team_core.c
+@@ -2639,7 +2639,9 @@ int team_nl_options_set_doit(struct sk_buff *skb, struct genl_info *info)
+ 				ctx.data.u32_val = nla_get_u32(attr_data);
+ 				break;
+ 			case TEAM_OPTION_TYPE_STRING:
+-				if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
++				if (nla_len(attr_data) > TEAM_STRING_MAX_LEN ||
++				    !memchr(nla_data(attr_data), '\0',
++					    nla_len(attr_data))) {
+ 					err = -EINVAL;
+ 					goto team_put;
+ 				}
+diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
+index 9ea63059d52d75..cbe0f191a116bc 100644
+--- a/drivers/net/vxlan/vxlan_core.c
++++ b/drivers/net/vxlan/vxlan_core.c
+@@ -2904,8 +2904,11 @@ static int vxlan_init(struct net_device *dev)
+ 	struct vxlan_dev *vxlan = netdev_priv(dev);
+ 	int err;
+ 
+-	if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
+-		vxlan_vnigroup_init(vxlan);
++	if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) {
++		err = vxlan_vnigroup_init(vxlan);
++		if (err)
++			return err;
++	}
+ 
+ 	err = gro_cells_init(&vxlan->gro_cells, dev);
+ 	if (err)
+diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
+index dced2aa9ba1a3e..d953742b67e149 100644
+--- a/drivers/net/wireless/ath/ath12k/wmi.c
++++ b/drivers/net/wireless/ath/ath12k/wmi.c
+@@ -4681,6 +4681,22 @@ static struct ath12k_reg_rule
+ 	return reg_rule_ptr;
+ }
+ 
++static u8 ath12k_wmi_ignore_num_extra_rules(struct ath12k_wmi_reg_rule_ext_params *rule,
++					    u32 num_reg_rules)
++{
++	u8 num_invalid_5ghz_rules = 0;
++	u32 count, start_freq;
++
++	for (count = 0; count < num_reg_rules; count++) {
++		start_freq = le32_get_bits(rule[count].freq_info, REG_RULE_START_FREQ);
++
++		if (start_freq >= ATH12K_MIN_6G_FREQ)
++			num_invalid_5ghz_rules++;
++	}
++
++	return num_invalid_5ghz_rules;
++}
++
+ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
+ 						   struct sk_buff *skb,
+ 						   struct ath12k_reg_info *reg_info)
+@@ -4691,6 +4707,7 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
+ 	u32 num_2g_reg_rules, num_5g_reg_rules;
+ 	u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
+ 	u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
++	u8 num_invalid_5ghz_ext_rules;
+ 	u32 total_reg_rules = 0;
+ 	int ret, i, j;
+ 
+@@ -4784,20 +4801,6 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
+ 
+ 	memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN);
+ 
+-	/* FIXME: Currently FW includes 6G reg rule also in 5G rule
+-	 * list for country US.
+-	 * Having same 6G reg rule in 5G and 6G rules list causes
+-	 * intersect check to be true, and same rules will be shown
+-	 * multiple times in iw cmd. So added hack below to avoid
+-	 * parsing 6G rule from 5G reg rule list, and this can be
+-	 * removed later, after FW updates to remove 6G reg rule
+-	 * from 5G rules list.
+-	 */
+-	if (memcmp(reg_info->alpha2, "US", 2) == 0) {
+-		reg_info->num_5g_reg_rules = REG_US_5G_NUM_REG_RULES;
+-		num_5g_reg_rules = reg_info->num_5g_reg_rules;
+-	}
+-
+ 	reg_info->dfs_region = le32_to_cpu(ev->dfs_region);
+ 	reg_info->phybitmap = le32_to_cpu(ev->phybitmap);
+ 	reg_info->num_phy = le32_to_cpu(ev->num_phy);
+@@ -4900,8 +4903,29 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
+ 		}
+ 	}
+ 
++	ext_wmi_reg_rule += num_2g_reg_rules;
++
++	/* Firmware might include 6 GHz reg rule in 5 GHz rule list
++	 * for few countries along with separate 6 GHz rule.
++	 * Having same 6 GHz reg rule in 5 GHz and 6 GHz rules list
++	 * causes intersect check to be true, and same rules will be
++	 * shown multiple times in iw cmd.
++	 * Hence, avoid parsing 6 GHz rule from 5 GHz reg rule list
++	 */
++	num_invalid_5ghz_ext_rules = ath12k_wmi_ignore_num_extra_rules(ext_wmi_reg_rule,
++								       num_5g_reg_rules);
++
++	if (num_invalid_5ghz_ext_rules) {
++		ath12k_dbg(ab, ATH12K_DBG_WMI,
++			   "CC: %s 5 GHz reg rules number %d from fw, %d number of invalid 5 GHz rules",
++			   reg_info->alpha2, reg_info->num_5g_reg_rules,
++			   num_invalid_5ghz_ext_rules);
++
++		num_5g_reg_rules = num_5g_reg_rules - num_invalid_5ghz_ext_rules;
++		reg_info->num_5g_reg_rules = num_5g_reg_rules;
++	}
++
+ 	if (num_5g_reg_rules) {
+-		ext_wmi_reg_rule += num_2g_reg_rules;
+ 		reg_info->reg_rules_5g_ptr =
+ 			create_ext_reg_rules_from_wmi(num_5g_reg_rules,
+ 						      ext_wmi_reg_rule);
+@@ -4913,7 +4937,12 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
+ 		}
+ 	}
+ 
+-	ext_wmi_reg_rule += num_5g_reg_rules;
++	/* We have adjusted the number of 5 GHz reg rules above. But still those
++	 * many rules needs to be adjusted in ext_wmi_reg_rule.
++	 *
++	 * NOTE: num_invalid_5ghz_ext_rules will be 0 for rest other cases.
++	 */
++	ext_wmi_reg_rule += (num_5g_reg_rules + num_invalid_5ghz_ext_rules);
+ 
+ 	for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
+ 		reg_info->reg_rules_6g_ap_ptr[i] =
+diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
+index 6f55dbdf629dbe..b16615b116ae71 100644
+--- a/drivers/net/wireless/ath/ath12k/wmi.h
++++ b/drivers/net/wireless/ath/ath12k/wmi.h
+@@ -3943,7 +3943,6 @@ struct ath12k_wmi_eht_rate_set_params {
+ #define MAX_REG_RULES 10
+ #define REG_ALPHA2_LEN 2
+ #define MAX_6G_REG_RULES 5
+-#define REG_US_5G_NUM_REG_RULES 4
+ 
+ enum wmi_start_event_param {
+ 	WMI_VDEV_START_RESP_EVENT = 0,
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+index e4395b1f8c11e8..d2caa80e941235 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+@@ -2712,7 +2712,7 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
+ 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID, WCC),
+ 	BRCMF_PCIE_DEVICE_SUB(0x4355, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4355, WCC),
+ 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_RAW_DEVICE_ID, WCC),
+-	BRCMF_PCIE_DEVICE(BRCM_PCIE_4355_DEVICE_ID, WCC),
++	BRCMF_PCIE_DEVICE(BRCM_PCIE_4355_DEVICE_ID, WCC_SEED),
+ 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID, WCC),
+ 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID, WCC),
+ 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID, WCC),
+@@ -2723,7 +2723,7 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
+ 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID, WCC),
+ 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID, WCC),
+ 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID, WCC),
+-	BRCMF_PCIE_DEVICE(BRCM_PCIE_4364_DEVICE_ID, WCC),
++	BRCMF_PCIE_DEVICE(BRCM_PCIE_4364_DEVICE_ID, WCC_SEED),
+ 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID, BCA),
+ 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID, BCA),
+ 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID, BCA),
+diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
+index 3644997a834255..24d4f3a3ec3d0e 100644
+--- a/drivers/parport/parport_serial.c
++++ b/drivers/parport/parport_serial.c
+@@ -266,10 +266,14 @@ static struct pci_device_id parport_serial_pci_tbl[] = {
+ 	{ 0x1409, 0x7168, 0x1409, 0xd079, 0, 0, timedia_9079c },
+ 
+ 	/* WCH CARDS */
+-	{ 0x4348, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, wch_ch353_1s1p},
+-	{ 0x4348, 0x7053, 0x4348, 0x3253, 0, 0, wch_ch353_2s1p},
+-	{ 0x1c00, 0x3050, 0x1c00, 0x3050, 0, 0, wch_ch382_0s1p},
+-	{ 0x1c00, 0x3250, 0x1c00, 0x3250, 0, 0, wch_ch382_2s1p},
++	{ PCI_VENDOR_ID_WCHCN, PCI_DEVICE_ID_WCHCN_CH353_1S1P,
++	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, wch_ch353_1s1p },
++	{ PCI_VENDOR_ID_WCHCN, PCI_DEVICE_ID_WCHCN_CH353_2S1P,
++	  0x4348, 0x3253, 0, 0, wch_ch353_2s1p },
++	{ PCI_VENDOR_ID_WCHIC, PCI_DEVICE_ID_WCHIC_CH382_0S1P,
++	  0x1c00, 0x3050, 0, 0, wch_ch382_0s1p },
++	{ PCI_VENDOR_ID_WCHIC, PCI_DEVICE_ID_WCHIC_CH382_2S1P,
++	  0x1c00, 0x3250, 0, 0, wch_ch382_2s1p },
+ 
+ 	/* BrainBoxes PX272/PX306 MIO card */
+ 	{ PCI_VENDOR_ID_INTASHIELD, 0x4100,
+diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
+index be52e3a123abd0..74dfef8ce9ec1b 100644
+--- a/drivers/pci/controller/pcie-mediatek-gen3.c
++++ b/drivers/pci/controller/pcie-mediatek-gen3.c
+@@ -133,10 +133,18 @@ struct mtk_gen3_pcie;
+ #define PCIE_CONF_LINK2_CTL_STS		(PCIE_CFG_OFFSET_ADDR + 0xb0)
+ #define PCIE_CONF_LINK2_LCR2_LINK_SPEED	GENMASK(3, 0)
+ 
++enum mtk_gen3_pcie_flags {
++	SKIP_PCIE_RSTB	= BIT(0), /* Skip PERST# assertion during device
++				   * probing or suspend/resume phase to
++				   * avoid hw bugs/issues.
++				   */
++};
++
+ /**
+  * struct mtk_gen3_pcie_pdata - differentiate between host generations
+  * @power_up: pcie power_up callback
+  * @phy_resets: phy reset lines SoC data.
++ * @flags: pcie device flags.
+  */
+ struct mtk_gen3_pcie_pdata {
+ 	int (*power_up)(struct mtk_gen3_pcie *pcie);
+@@ -144,6 +152,7 @@ struct mtk_gen3_pcie_pdata {
+ 		const char *id[MAX_NUM_PHY_RESETS];
+ 		int num_resets;
+ 	} phy_resets;
++	u32 flags;
+ };
+ 
+ /**
+@@ -438,22 +447,33 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
+ 	val |= PCIE_DISABLE_DVFSRC_VLT_REQ;
+ 	writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG);
+ 
+-	/* Assert all reset signals */
+-	val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
+-	val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
+-	writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+-
+ 	/*
+-	 * Described in PCIe CEM specification sections 2.2 (PERST# Signal)
+-	 * and 2.2.1 (Initial Power-Up (G3 to S0)).
+-	 * The deassertion of PERST# should be delayed 100ms (TPVPERL)
+-	 * for the power and clock to become stable.
++	 * Airoha EN7581 has a hw bug asserting/releasing PCIE_PE_RSTB signal
++	 * causing occasional PCIe link down. In order to overcome the issue,
++	 * PCIE_RSTB signals are not asserted/released at this stage and the
++	 * PCIe block is reset using en7523_reset_assert() and
++	 * en7581_pci_enable().
+ 	 */
+-	msleep(100);
+-
+-	/* De-assert reset signals */
+-	val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB);
+-	writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
++	if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) {
++		/* Assert all reset signals */
++		val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
++		val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB |
++		       PCIE_PE_RSTB;
++		writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
++
++		/*
++		 * Described in PCIe CEM specification revision 6.0.
++		 *
++		 * The deassertion of PERST# should be delayed 100ms (TPVPERL)
++		 * for the power and clock to become stable.
++		 */
++		msleep(PCIE_T_PVPERL_MS);
++
++		/* De-assert reset signals */
++		val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB |
++			 PCIE_PE_RSTB);
++		writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
++	}
+ 
+ 	/* Check if the link is up or not */
+ 	err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val,
+@@ -1231,10 +1251,12 @@ static int mtk_pcie_suspend_noirq(struct device *dev)
+ 		return err;
+ 	}
+ 
+-	/* Pull down the PERST# pin */
+-	val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
+-	val |= PCIE_PE_RSTB;
+-	writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
++	if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) {
++		/* Assert the PERST# pin */
++		val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
++		val |= PCIE_PE_RSTB;
++		writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
++	}
+ 
+ 	dev_dbg(pcie->dev, "entered L2 states successfully");
+ 
+@@ -1285,6 +1307,7 @@ static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_en7581 = {
+ 		.id[2] = "phy-lane2",
+ 		.num_resets = 3,
+ 	},
++	.flags = SKIP_PCIE_RSTB,
+ };
+ 
+ static const struct of_device_id mtk_pcie_of_match[] = {
+diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
+index 75c66882900343..111caa42f6b750 100644
+--- a/drivers/pci/endpoint/pci-epc-core.c
++++ b/drivers/pci/endpoint/pci-epc-core.c
+@@ -609,10 +609,17 @@ EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
+ int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ 		    struct pci_epf_bar *epf_bar)
+ {
+-	int ret;
++	const struct pci_epc_features *epc_features;
++	enum pci_barno bar = epf_bar->barno;
+ 	int flags = epf_bar->flags;
++	int ret;
+ 
+-	if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
++	epc_features = pci_epc_get_features(epc, func_no, vfunc_no);
++	if (!epc_features)
++		return -EINVAL;
++
++	if (epc_features->bar[bar].type == BAR_FIXED &&
++	    (epc_features->bar[bar].fixed_size != epf_bar->size))
+ 		return -EINVAL;
+ 
+ 	if ((epf_bar->barno == BAR_5 && flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 76f4df75b08a14..0a1f668999cef9 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -5521,7 +5521,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
+  * AMD Matisse USB 3.0 Host Controller 0x149c
+  * Intel 82579LM Gigabit Ethernet Controller 0x1502
+  * Intel 82579V Gigabit Ethernet Controller 0x1503
+- *
++ * Mediatek MT7922 802.11ax PCI Express Wireless Network Adapter
+  */
+ static void quirk_no_flr(struct pci_dev *dev)
+ {
+@@ -5533,6 +5533,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x7901, quirk_no_flr);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_MEDIATEK, 0x0616, quirk_no_flr);
+ 
+ /* FLR may cause the SolidRun SNET DPU (rev 0x1) to hang */
+ static void quirk_no_flr_snet(struct pci_dev *dev)
+@@ -5984,6 +5985,17 @@ SWITCHTEC_QUIRK(0x5552);  /* PAXA 52XG5 */
+ SWITCHTEC_QUIRK(0x5536);  /* PAXA 36XG5 */
+ SWITCHTEC_QUIRK(0x5528);  /* PAXA 28XG5 */
+ 
++#define SWITCHTEC_PCI100X_QUIRK(vid) \
++	DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_EFAR, vid, \
++		PCI_CLASS_BRIDGE_OTHER, 8, quirk_switchtec_ntb_dma_alias)
++SWITCHTEC_PCI100X_QUIRK(0x1001);  /* PCI1001XG4 */
++SWITCHTEC_PCI100X_QUIRK(0x1002);  /* PCI1002XG4 */
++SWITCHTEC_PCI100X_QUIRK(0x1003);  /* PCI1003XG4 */
++SWITCHTEC_PCI100X_QUIRK(0x1004);  /* PCI1004XG4 */
++SWITCHTEC_PCI100X_QUIRK(0x1005);  /* PCI1005XG4 */
++SWITCHTEC_PCI100X_QUIRK(0x1006);  /* PCI1006XG4 */
++
++
+ /*
+  * The PLX NTB uses devfn proxy IDs to move TLPs between NT endpoints.
+  * These IDs are used to forward responses to the originator on the other
+@@ -6253,6 +6265,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2b, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa72f, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa73f, dpc_log_size);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa76e, dpc_log_size);
+ #endif
+diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
+index c7e1089ffdafcb..b14dfab04d846c 100644
+--- a/drivers/pci/switch/switchtec.c
++++ b/drivers/pci/switch/switchtec.c
+@@ -1739,6 +1739,26 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
+ 		.driver_data = gen, \
+ 	}
+ 
++#define SWITCHTEC_PCI100X_DEVICE(device_id, gen) \
++	{ \
++		.vendor     = PCI_VENDOR_ID_EFAR, \
++		.device     = device_id, \
++		.subvendor  = PCI_ANY_ID, \
++		.subdevice  = PCI_ANY_ID, \
++		.class      = (PCI_CLASS_MEMORY_OTHER << 8), \
++		.class_mask = 0xFFFFFFFF, \
++		.driver_data = gen, \
++	}, \
++	{ \
++		.vendor     = PCI_VENDOR_ID_EFAR, \
++		.device     = device_id, \
++		.subvendor  = PCI_ANY_ID, \
++		.subdevice  = PCI_ANY_ID, \
++		.class      = (PCI_CLASS_BRIDGE_OTHER << 8), \
++		.class_mask = 0xFFFFFFFF, \
++		.driver_data = gen, \
++	}
++
+ static const struct pci_device_id switchtec_pci_tbl[] = {
+ 	SWITCHTEC_PCI_DEVICE(0x8531, SWITCHTEC_GEN3),  /* PFX 24xG3 */
+ 	SWITCHTEC_PCI_DEVICE(0x8532, SWITCHTEC_GEN3),  /* PFX 32xG3 */
+@@ -1833,6 +1853,12 @@ static const struct pci_device_id switchtec_pci_tbl[] = {
+ 	SWITCHTEC_PCI_DEVICE(0x5552, SWITCHTEC_GEN5),  /* PAXA 52XG5 */
+ 	SWITCHTEC_PCI_DEVICE(0x5536, SWITCHTEC_GEN5),  /* PAXA 36XG5 */
+ 	SWITCHTEC_PCI_DEVICE(0x5528, SWITCHTEC_GEN5),  /* PAXA 28XG5 */
++	SWITCHTEC_PCI100X_DEVICE(0x1001, SWITCHTEC_GEN4),  /* PCI1001 16XG4 */
++	SWITCHTEC_PCI100X_DEVICE(0x1002, SWITCHTEC_GEN4),  /* PCI1002 12XG4 */
++	SWITCHTEC_PCI100X_DEVICE(0x1003, SWITCHTEC_GEN4),  /* PCI1003 16XG4 */
++	SWITCHTEC_PCI100X_DEVICE(0x1004, SWITCHTEC_GEN4),  /* PCI1004 16XG4 */
++	SWITCHTEC_PCI100X_DEVICE(0x1005, SWITCHTEC_GEN4),  /* PCI1005 16XG4 */
++	SWITCHTEC_PCI100X_DEVICE(0x1006, SWITCHTEC_GEN4),  /* PCI1006 16XG4 */
+ 	{0}
+ };
+ MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
+diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
+index 0b13d7f17b3256..42547f64453e85 100644
+--- a/drivers/pinctrl/pinconf-generic.c
++++ b/drivers/pinctrl/pinconf-generic.c
+@@ -89,12 +89,12 @@ static void pinconf_generic_dump_one(struct pinctrl_dev *pctldev,
+ 		seq_puts(s, items[i].display);
+ 		/* Print unit if available */
+ 		if (items[i].has_arg) {
+-			seq_printf(s, " (0x%x",
+-				   pinconf_to_config_argument(config));
++			u32 val = pinconf_to_config_argument(config);
++
+ 			if (items[i].format)
+-				seq_printf(s, " %s)", items[i].format);
++				seq_printf(s, " (%u %s)", val, items[i].format);
+ 			else
+-				seq_puts(s, ")");
++				seq_printf(s, " (0x%x)", val);
+ 		}
+ 	}
+ }
+diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c
+index 0d6c2027d4c18a..d73004b4a45e70 100644
+--- a/drivers/pinctrl/pinctrl-cy8c95x0.c
++++ b/drivers/pinctrl/pinctrl-cy8c95x0.c
+@@ -42,7 +42,7 @@
+ #define CY8C95X0_PORTSEL	0x18
+ /* Port settings, write PORTSEL first */
+ #define CY8C95X0_INTMASK	0x19
+-#define CY8C95X0_PWMSEL		0x1A
++#define CY8C95X0_SELPWM		0x1A
+ #define CY8C95X0_INVERT		0x1B
+ #define CY8C95X0_DIRECTION	0x1C
+ /* Drive mode register change state on writing '1' */
+@@ -328,14 +328,14 @@ static int cypress_get_pin_mask(struct cy8c95x0_pinctrl *chip, unsigned int pin)
+ static bool cy8c95x0_readable_register(struct device *dev, unsigned int reg)
+ {
+ 	/*
+-	 * Only 12 registers are present per port (see Table 6 in the
+-	 * datasheet).
++	 * Only 12 registers are present per port (see Table 6 in the datasheet).
+ 	 */
+-	if (reg >= CY8C95X0_VIRTUAL && (reg % MUXED_STRIDE) < 12)
+-		return true;
++	if (reg >= CY8C95X0_VIRTUAL && (reg % MUXED_STRIDE) >= 12)
++		return false;
+ 
+ 	switch (reg) {
+ 	case 0x24 ... 0x27:
++	case 0x31 ... 0x3f:
+ 		return false;
+ 	default:
+ 		return true;
+@@ -344,8 +344,11 @@ static bool cy8c95x0_readable_register(struct device *dev, unsigned int reg)
+ 
+ static bool cy8c95x0_writeable_register(struct device *dev, unsigned int reg)
+ {
+-	if (reg >= CY8C95X0_VIRTUAL)
+-		return true;
++	/*
++	 * Only 12 registers are present per port (see Table 6 in the datasheet).
++	 */
++	if (reg >= CY8C95X0_VIRTUAL && (reg % MUXED_STRIDE) >= 12)
++		return false;
+ 
+ 	switch (reg) {
+ 	case CY8C95X0_INPUT_(0) ... CY8C95X0_INPUT_(7):
+@@ -353,6 +356,7 @@ static bool cy8c95x0_writeable_register(struct device *dev, unsigned int reg)
+ 	case CY8C95X0_DEVID:
+ 		return false;
+ 	case 0x24 ... 0x27:
++	case 0x31 ... 0x3f:
+ 		return false;
+ 	default:
+ 		return true;
+@@ -365,8 +369,8 @@ static bool cy8c95x0_volatile_register(struct device *dev, unsigned int reg)
+ 	case CY8C95X0_INPUT_(0) ... CY8C95X0_INPUT_(7):
+ 	case CY8C95X0_INTSTATUS_(0) ... CY8C95X0_INTSTATUS_(7):
+ 	case CY8C95X0_INTMASK:
++	case CY8C95X0_SELPWM:
+ 	case CY8C95X0_INVERT:
+-	case CY8C95X0_PWMSEL:
+ 	case CY8C95X0_DIRECTION:
+ 	case CY8C95X0_DRV_PU:
+ 	case CY8C95X0_DRV_PD:
+@@ -395,7 +399,7 @@ static bool cy8c95x0_muxed_register(unsigned int reg)
+ {
+ 	switch (reg) {
+ 	case CY8C95X0_INTMASK:
+-	case CY8C95X0_PWMSEL:
++	case CY8C95X0_SELPWM:
+ 	case CY8C95X0_INVERT:
+ 	case CY8C95X0_DIRECTION:
+ 	case CY8C95X0_DRV_PU:
+@@ -466,7 +470,11 @@ static const struct regmap_config cy8c9520_i2c_regmap = {
+ 	.max_register = 0,		/* Updated at runtime */
+ 	.num_reg_defaults_raw = 0,	/* Updated at runtime */
+ 	.use_single_read = true,	/* Workaround for regcache bug */
++#if IS_ENABLED(CONFIG_DEBUG_PINCTRL)
++	.disable_locking = false,
++#else
+ 	.disable_locking = true,
++#endif
+ };
+ 
+ static inline int cy8c95x0_regmap_update_bits_base(struct cy8c95x0_pinctrl *chip,
+@@ -789,7 +797,7 @@ static int cy8c95x0_gpio_get_pincfg(struct cy8c95x0_pinctrl *chip,
+ 		reg = CY8C95X0_DIRECTION;
+ 		break;
+ 	case PIN_CONFIG_MODE_PWM:
+-		reg = CY8C95X0_PWMSEL;
++		reg = CY8C95X0_SELPWM;
+ 		break;
+ 	case PIN_CONFIG_OUTPUT:
+ 		reg = CY8C95X0_OUTPUT;
+@@ -868,7 +876,7 @@ static int cy8c95x0_gpio_set_pincfg(struct cy8c95x0_pinctrl *chip,
+ 		reg = CY8C95X0_DRV_PP_FAST;
+ 		break;
+ 	case PIN_CONFIG_MODE_PWM:
+-		reg = CY8C95X0_PWMSEL;
++		reg = CY8C95X0_SELPWM;
+ 		break;
+ 	case PIN_CONFIG_OUTPUT_ENABLE:
+ 		return cy8c95x0_pinmux_direction(chip, off, !arg);
+@@ -1153,7 +1161,7 @@ static void cy8c95x0_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *
+ 	bitmap_zero(mask, MAX_LINE);
+ 	__set_bit(pin, mask);
+ 
+-	if (cy8c95x0_read_regs_mask(chip, CY8C95X0_PWMSEL, pwm, mask)) {
++	if (cy8c95x0_read_regs_mask(chip, CY8C95X0_SELPWM, pwm, mask)) {
+ 		seq_puts(s, "not available");
+ 		return;
+ 	}
+@@ -1198,7 +1206,7 @@ static int cy8c95x0_set_mode(struct cy8c95x0_pinctrl *chip, unsigned int off, bo
+ 	u8 port = cypress_get_port(chip, off);
+ 	u8 bit = cypress_get_pin_mask(chip, off);
+ 
+-	return cy8c95x0_regmap_write_bits(chip, CY8C95X0_PWMSEL, port, bit, mode ? bit : 0);
++	return cy8c95x0_regmap_write_bits(chip, CY8C95X0_SELPWM, port, bit, mode ? bit : 0);
+ }
+ 
+ static int cy8c95x0_pinmux_mode(struct cy8c95x0_pinctrl *chip,
+@@ -1347,7 +1355,7 @@ static int cy8c95x0_irq_setup(struct cy8c95x0_pinctrl *chip, int irq)
+ 
+ 	ret = devm_request_threaded_irq(chip->dev, irq,
+ 					NULL, cy8c95x0_irq_handler,
+-					IRQF_ONESHOT | IRQF_SHARED | IRQF_TRIGGER_HIGH,
++					IRQF_ONESHOT | IRQF_SHARED,
+ 					dev_name(chip->dev), chip);
+ 	if (ret) {
+ 		dev_err(chip->dev, "failed to request irq %d\n", irq);
+@@ -1438,15 +1446,15 @@ static int cy8c95x0_probe(struct i2c_client *client)
+ 	switch (chip->tpin) {
+ 	case 20:
+ 		strscpy(chip->name, cy8c95x0_id[0].name);
+-		regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 3 * MUXED_STRIDE;
++		regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 3 * MUXED_STRIDE - 1;
+ 		break;
+ 	case 40:
+ 		strscpy(chip->name, cy8c95x0_id[1].name);
+-		regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 6 * MUXED_STRIDE;
++		regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 6 * MUXED_STRIDE - 1;
+ 		break;
+ 	case 60:
+ 		strscpy(chip->name, cy8c95x0_id[2].name);
+-		regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 8 * MUXED_STRIDE;
++		regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 8 * MUXED_STRIDE - 1;
+ 		break;
+ 	default:
+ 		return -ENODEV;
+diff --git a/drivers/ptp/ptp_vmclock.c b/drivers/ptp/ptp_vmclock.c
+index 0a2cfc8ad3c540..9b8bd626a39731 100644
+--- a/drivers/ptp/ptp_vmclock.c
++++ b/drivers/ptp/ptp_vmclock.c
+@@ -414,6 +414,7 @@ static ssize_t vmclock_miscdev_read(struct file *fp, char __user *buf,
+ }
+ 
+ static const struct file_operations vmclock_miscdev_fops = {
++	.owner = THIS_MODULE,
+ 	.mmap = vmclock_miscdev_mmap,
+ 	.read = vmclock_miscdev_read,
+ };
+@@ -524,6 +525,8 @@ static int vmclock_probe(struct platform_device *pdev)
+ 		goto out;
+ 	}
+ 
++	dev_set_drvdata(dev, st);
++
+ 	if (le32_to_cpu(st->clk->magic) != VMCLOCK_MAGIC ||
+ 	    le32_to_cpu(st->clk->size) > resource_size(&st->res) ||
+ 	    le16_to_cpu(st->clk->version) != 1) {
+@@ -547,6 +550,8 @@ static int vmclock_probe(struct platform_device *pdev)
+ 		goto out;
+ 	}
+ 
++	st->miscdev.minor = MISC_DYNAMIC_MINOR;
++
+ 	/*
+ 	 * If the structure is big enough, it can be mapped to userspace.
+ 	 * Theoretically a guest OS even using larger pages could still
+@@ -554,7 +559,6 @@ static int vmclock_probe(struct platform_device *pdev)
+ 	 * cross that bridge if/when we come to it.
+ 	 */
+ 	if (le32_to_cpu(st->clk->size) >= PAGE_SIZE) {
+-		st->miscdev.minor = MISC_DYNAMIC_MINOR;
+ 		st->miscdev.fops = &vmclock_miscdev_fops;
+ 		st->miscdev.name = st->name;
+ 
+@@ -587,8 +591,6 @@ static int vmclock_probe(struct platform_device *pdev)
+ 		 (st->miscdev.minor && st->ptp_clock) ? ", " : "",
+ 		 st->ptp_clock ? "PTP" : "");
+ 
+-	dev_set_drvdata(dev, st);
+-
+  out:
+ 	return ret;
+ }
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 13d9c3e349682c..8524018e899148 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -5643,43 +5643,36 @@ regulator_register(struct device *dev,
+ 		goto clean;
+ 	}
+ 
+-	if (config->init_data) {
+-		/*
+-		 * Providing of_match means the framework is expected to parse
+-		 * DT to get the init_data. This would conflict with provided
+-		 * init_data, if set. Warn if it happens.
+-		 */
+-		if (regulator_desc->of_match)
+-			dev_warn(dev, "Using provided init data - OF match ignored\n");
++	/*
++	 * DT may override the config->init_data provided if the platform
++	 * needs to do so. If so, config->init_data is completely ignored.
++	 */
++	init_data = regulator_of_get_init_data(dev, regulator_desc, config,
++					       &rdev->dev.of_node);
+ 
++	/*
++	 * Sometimes not all resources are probed already so we need to take
++	 * that into account. This happens most the time if the ena_gpiod comes
++	 * from a gpio extender or something else.
++	 */
++	if (PTR_ERR(init_data) == -EPROBE_DEFER) {
++		ret = -EPROBE_DEFER;
++		goto clean;
++	}
++
++	/*
++	 * We need to keep track of any GPIO descriptor coming from the
++	 * device tree until we have handled it over to the core. If the
++	 * config that was passed in to this function DOES NOT contain
++	 * a descriptor, and the config after this call DOES contain
++	 * a descriptor, we definitely got one from parsing the device
++	 * tree.
++	 */
++	if (!cfg->ena_gpiod && config->ena_gpiod)
++		dangling_of_gpiod = true;
++	if (!init_data) {
+ 		init_data = config->init_data;
+ 		rdev->dev.of_node = of_node_get(config->of_node);
+-
+-	} else {
+-		init_data = regulator_of_get_init_data(dev, regulator_desc,
+-						       config,
+-						       &rdev->dev.of_node);
+-
+-		/*
+-		 * Sometimes not all resources are probed already so we need to
+-		 * take that into account. This happens most the time if the
+-		 * ena_gpiod comes from a gpio extender or something else.
+-		 */
+-		if (PTR_ERR(init_data) == -EPROBE_DEFER) {
+-			ret = -EPROBE_DEFER;
+-			goto clean;
+-		}
+-
+-		/*
+-		 * We need to keep track of any GPIO descriptor coming from the
+-		 * device tree until we have handled it over to the core. If the
+-		 * config that was passed in to this function DOES NOT contain a
+-		 * descriptor, and the config after this call DOES contain a
+-		 * descriptor, we definitely got one from parsing the device
+-		 * tree.
+-		 */
+-		if (!cfg->ena_gpiod && config->ena_gpiod)
+-			dangling_of_gpiod = true;
+ 	}
+ 
+ 	ww_mutex_init(&rdev->mutex, &regulator_ww_class);
+diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
+index 1560db00a01248..56823b6a2facc4 100644
+--- a/drivers/soc/qcom/llcc-qcom.c
++++ b/drivers/soc/qcom/llcc-qcom.c
+@@ -142,6 +142,7 @@ struct qcom_llcc_config {
+ 	bool skip_llcc_cfg;
+ 	bool no_edac;
+ 	bool irq_configured;
++	bool no_broadcast_register;
+ };
+ 
+ struct qcom_sct_config {
+@@ -154,6 +155,38 @@ enum llcc_reg_offset {
+ 	LLCC_COMMON_STATUS0,
+ };
+ 
++static const struct llcc_slice_config ipq5424_data[] =  {
++	{
++		.usecase_id = LLCC_CPUSS,
++		.slice_id = 1,
++		.max_cap = 768,
++		.priority = 1,
++		.bonus_ways = 0xFFFF,
++		.retain_on_pc = true,
++		.activate_on_init = true,
++		.write_scid_cacheable_en = true,
++		.stale_en = true,
++		.stale_cap_en = true,
++		.alloc_oneway_en = true,
++		.ovcap_en = true,
++		.ovcap_prio = true,
++		.vict_prio = true,
++	},
++	{
++		.usecase_id = LLCC_VIDSC0,
++		.slice_id = 2,
++		.max_cap = 256,
++		.priority = 2,
++		.fixed_size = true,
++		.bonus_ways = 0xF000,
++		.retain_on_pc = true,
++		.activate_on_init = true,
++		.write_scid_cacheable_en = true,
++		.stale_en = true,
++		.stale_cap_en = true,
++	},
++};
++
+ static const struct llcc_slice_config sa8775p_data[] =  {
+ 	{
+ 		.usecase_id = LLCC_CPUSS,
+@@ -3186,6 +3219,16 @@ static const struct qcom_llcc_config qdu1000_cfg[] = {
+ 	},
+ };
+ 
++static const struct qcom_llcc_config ipq5424_cfg[] = {
++	{
++		.sct_data       = ipq5424_data,
++		.size           = ARRAY_SIZE(ipq5424_data),
++		.reg_offset     = llcc_v2_1_reg_offset,
++		.edac_reg_offset = &llcc_v2_1_edac_reg_offset,
++		.no_broadcast_register = true,
++	},
++};
++
+ static const struct qcom_llcc_config sa8775p_cfg[] = {
+ 	{
+ 		.sct_data	= sa8775p_data,
+@@ -3361,6 +3404,11 @@ static const struct qcom_sct_config qdu1000_cfgs = {
+ 	.num_config	= ARRAY_SIZE(qdu1000_cfg),
+ };
+ 
++static const struct qcom_sct_config ipq5424_cfgs = {
++	.llcc_config	= ipq5424_cfg,
++	.num_config	= ARRAY_SIZE(ipq5424_cfg),
++};
++
+ static const struct qcom_sct_config sa8775p_cfgs = {
+ 	.llcc_config	= sa8775p_cfg,
+ 	.num_config	= ARRAY_SIZE(sa8775p_cfg),
+@@ -3958,8 +4006,12 @@ static int qcom_llcc_probe(struct platform_device *pdev)
+ 
+ 	drv_data->bcast_regmap = qcom_llcc_init_mmio(pdev, i, "llcc_broadcast_base");
+ 	if (IS_ERR(drv_data->bcast_regmap)) {
+-		ret = PTR_ERR(drv_data->bcast_regmap);
+-		goto err;
++		if (cfg->no_broadcast_register) {
++			drv_data->bcast_regmap = regmap;
++		} else {
++			ret = PTR_ERR(drv_data->bcast_regmap);
++			goto err;
++		}
+ 	}
+ 
+ 	/* Extract version of the IP */
+@@ -4030,6 +4082,7 @@ static int qcom_llcc_probe(struct platform_device *pdev)
+ }
+ 
+ static const struct of_device_id qcom_llcc_of_match[] = {
++	{ .compatible = "qcom,ipq5424-llcc", .data = &ipq5424_cfgs},
+ 	{ .compatible = "qcom,qcs615-llcc", .data = &qcs615_cfgs},
+ 	{ .compatible = "qcom,qcs8300-llcc", .data = &qcs8300_cfgs},
+ 	{ .compatible = "qcom,qdu1000-llcc", .data = &qdu1000_cfgs},
+diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
+index 4783ab1adb8d95..a3e88ced328a91 100644
+--- a/drivers/soc/qcom/smp2p.c
++++ b/drivers/soc/qcom/smp2p.c
+@@ -365,7 +365,7 @@ static void smp2p_irq_print_chip(struct irq_data *irqd, struct seq_file *p)
+ {
+ 	struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
+ 
+-	seq_printf(p, " %8s", dev_name(entry->smp2p->dev));
++	seq_printf(p, "%8s", dev_name(entry->smp2p->dev));
+ }
+ 
+ static struct irq_chip smp2p_irq_chip = {
+diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
+index eb14e5ff5a0aa8..e24ab5f7d2bf10 100644
+--- a/drivers/soc/tegra/fuse/fuse-tegra30.c
++++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
+@@ -647,15 +647,20 @@ static const struct nvmem_cell_lookup tegra234_fuse_lookups[] = {
+ };
+ 
+ static const struct nvmem_keepout tegra234_fuse_keepouts[] = {
+-	{ .start = 0x01c, .end = 0x0c8 },
+-	{ .start = 0x12c, .end = 0x184 },
++	{ .start = 0x01c, .end = 0x064 },
++	{ .start = 0x084, .end = 0x0a0 },
++	{ .start = 0x0a4, .end = 0x0c8 },
++	{ .start = 0x12c, .end = 0x164 },
++	{ .start = 0x16c, .end = 0x184 },
+ 	{ .start = 0x190, .end = 0x198 },
+ 	{ .start = 0x1a0, .end = 0x204 },
+-	{ .start = 0x21c, .end = 0x250 },
+-	{ .start = 0x25c, .end = 0x2f0 },
++	{ .start = 0x21c, .end = 0x2f0 },
+ 	{ .start = 0x310, .end = 0x3d8 },
+-	{ .start = 0x400, .end = 0x4f0 },
+-	{ .start = 0x4f8, .end = 0x7e8 },
++	{ .start = 0x400, .end = 0x420 },
++	{ .start = 0x444, .end = 0x490 },
++	{ .start = 0x4bc, .end = 0x4f0 },
++	{ .start = 0x4f8, .end = 0x54c },
++	{ .start = 0x57c, .end = 0x7e8 },
+ 	{ .start = 0x8d0, .end = 0x8d8 },
+ 	{ .start = 0xacc, .end = 0xf00 }
+ };
+diff --git a/drivers/spi/spi-sn-f-ospi.c b/drivers/spi/spi-sn-f-ospi.c
+index adac645732fedf..56ef114effc973 100644
+--- a/drivers/spi/spi-sn-f-ospi.c
++++ b/drivers/spi/spi-sn-f-ospi.c
+@@ -116,6 +116,9 @@ struct f_ospi {
+ 
+ static u32 f_ospi_get_dummy_cycle(const struct spi_mem_op *op)
+ {
++	if (!op->dummy.nbytes)
++		return 0;
++
+ 	return (op->dummy.nbytes * 8) / op->dummy.buswidth;
+ }
+ 
+diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
+index e5310c65cf52b3..10a706fe4b247d 100644
+--- a/drivers/tty/serial/8250/8250.h
++++ b/drivers/tty/serial/8250/8250.h
+@@ -374,6 +374,7 @@ static inline int is_omap1510_8250(struct uart_8250_port *pt)
+ 
+ #ifdef CONFIG_SERIAL_8250_DMA
+ extern int serial8250_tx_dma(struct uart_8250_port *);
++extern void serial8250_tx_dma_flush(struct uart_8250_port *);
+ extern int serial8250_rx_dma(struct uart_8250_port *);
+ extern void serial8250_rx_dma_flush(struct uart_8250_port *);
+ extern int serial8250_request_dma(struct uart_8250_port *);
+@@ -406,6 +407,7 @@ static inline int serial8250_tx_dma(struct uart_8250_port *p)
+ {
+ 	return -1;
+ }
++static inline void serial8250_tx_dma_flush(struct uart_8250_port *p) { }
+ static inline int serial8250_rx_dma(struct uart_8250_port *p)
+ {
+ 	return -1;
+diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
+index d215c494ee24c1..f245a84f4a508d 100644
+--- a/drivers/tty/serial/8250/8250_dma.c
++++ b/drivers/tty/serial/8250/8250_dma.c
+@@ -149,6 +149,22 @@ int serial8250_tx_dma(struct uart_8250_port *p)
+ 	return ret;
+ }
+ 
++void serial8250_tx_dma_flush(struct uart_8250_port *p)
++{
++	struct uart_8250_dma *dma = p->dma;
++
++	if (!dma->tx_running)
++		return;
++
++	/*
++	 * kfifo_reset() has been called by the serial core, avoid
++	 * advancing and underflowing in __dma_tx_complete().
++	 */
++	dma->tx_size = 0;
++
++	dmaengine_terminate_async(dma->rxchan);
++}
++
+ int serial8250_rx_dma(struct uart_8250_port *p)
+ {
+ 	struct uart_8250_dma		*dma = p->dma;
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index 3c3f7c926afb87..df4d0d832e5421 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -64,23 +64,17 @@
+ #define PCIE_DEVICE_ID_NEO_2_OX_IBM	0x00F6
+ #define PCI_DEVICE_ID_PLX_CRONYX_OMEGA	0xc001
+ #define PCI_DEVICE_ID_INTEL_PATSBURG_KT 0x1d3d
+-#define PCI_VENDOR_ID_WCH		0x4348
+-#define PCI_DEVICE_ID_WCH_CH352_2S	0x3253
+-#define PCI_DEVICE_ID_WCH_CH353_4S	0x3453
+-#define PCI_DEVICE_ID_WCH_CH353_2S1PF	0x5046
+-#define PCI_DEVICE_ID_WCH_CH353_1S1P	0x5053
+-#define PCI_DEVICE_ID_WCH_CH353_2S1P	0x7053
+-#define PCI_DEVICE_ID_WCH_CH355_4S	0x7173
++
++#define PCI_DEVICE_ID_WCHCN_CH352_2S	0x3253
++#define PCI_DEVICE_ID_WCHCN_CH355_4S	0x7173
++
+ #define PCI_VENDOR_ID_AGESTAR		0x5372
+ #define PCI_DEVICE_ID_AGESTAR_9375	0x6872
+ #define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a
+ #define PCI_DEVICE_ID_AMCC_ADDIDATA_APCI7800 0x818e
+ 
+-#define PCIE_VENDOR_ID_WCH		0x1c00
+-#define PCIE_DEVICE_ID_WCH_CH382_2S1P	0x3250
+-#define PCIE_DEVICE_ID_WCH_CH384_4S	0x3470
+-#define PCIE_DEVICE_ID_WCH_CH384_8S	0x3853
+-#define PCIE_DEVICE_ID_WCH_CH382_2S	0x3253
++#define PCI_DEVICE_ID_WCHIC_CH384_4S	0x3470
++#define PCI_DEVICE_ID_WCHIC_CH384_8S	0x3853
+ 
+ #define PCI_DEVICE_ID_MOXA_CP102E	0x1024
+ #define PCI_DEVICE_ID_MOXA_CP102EL	0x1025
+@@ -2817,80 +2811,80 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
+ 	},
+ 	/* WCH CH353 1S1P card (16550 clone) */
+ 	{
+-		.vendor         = PCI_VENDOR_ID_WCH,
+-		.device         = PCI_DEVICE_ID_WCH_CH353_1S1P,
++		.vendor         = PCI_VENDOR_ID_WCHCN,
++		.device         = PCI_DEVICE_ID_WCHCN_CH353_1S1P,
+ 		.subvendor      = PCI_ANY_ID,
+ 		.subdevice      = PCI_ANY_ID,
+ 		.setup          = pci_wch_ch353_setup,
+ 	},
+ 	/* WCH CH353 2S1P card (16550 clone) */
+ 	{
+-		.vendor         = PCI_VENDOR_ID_WCH,
+-		.device         = PCI_DEVICE_ID_WCH_CH353_2S1P,
++		.vendor         = PCI_VENDOR_ID_WCHCN,
++		.device         = PCI_DEVICE_ID_WCHCN_CH353_2S1P,
+ 		.subvendor      = PCI_ANY_ID,
+ 		.subdevice      = PCI_ANY_ID,
+ 		.setup          = pci_wch_ch353_setup,
+ 	},
+ 	/* WCH CH353 4S card (16550 clone) */
+ 	{
+-		.vendor         = PCI_VENDOR_ID_WCH,
+-		.device         = PCI_DEVICE_ID_WCH_CH353_4S,
++		.vendor         = PCI_VENDOR_ID_WCHCN,
++		.device         = PCI_DEVICE_ID_WCHCN_CH353_4S,
+ 		.subvendor      = PCI_ANY_ID,
+ 		.subdevice      = PCI_ANY_ID,
+ 		.setup          = pci_wch_ch353_setup,
+ 	},
+ 	/* WCH CH353 2S1PF card (16550 clone) */
+ 	{
+-		.vendor         = PCI_VENDOR_ID_WCH,
+-		.device         = PCI_DEVICE_ID_WCH_CH353_2S1PF,
++		.vendor         = PCI_VENDOR_ID_WCHCN,
++		.device         = PCI_DEVICE_ID_WCHCN_CH353_2S1PF,
+ 		.subvendor      = PCI_ANY_ID,
+ 		.subdevice      = PCI_ANY_ID,
+ 		.setup          = pci_wch_ch353_setup,
+ 	},
+ 	/* WCH CH352 2S card (16550 clone) */
+ 	{
+-		.vendor		= PCI_VENDOR_ID_WCH,
+-		.device		= PCI_DEVICE_ID_WCH_CH352_2S,
++		.vendor		= PCI_VENDOR_ID_WCHCN,
++		.device		= PCI_DEVICE_ID_WCHCN_CH352_2S,
+ 		.subvendor	= PCI_ANY_ID,
+ 		.subdevice	= PCI_ANY_ID,
+ 		.setup		= pci_wch_ch353_setup,
+ 	},
+ 	/* WCH CH355 4S card (16550 clone) */
+ 	{
+-		.vendor		= PCI_VENDOR_ID_WCH,
+-		.device		= PCI_DEVICE_ID_WCH_CH355_4S,
++		.vendor		= PCI_VENDOR_ID_WCHCN,
++		.device		= PCI_DEVICE_ID_WCHCN_CH355_4S,
+ 		.subvendor	= PCI_ANY_ID,
+ 		.subdevice	= PCI_ANY_ID,
+ 		.setup		= pci_wch_ch355_setup,
+ 	},
+ 	/* WCH CH382 2S card (16850 clone) */
+ 	{
+-		.vendor         = PCIE_VENDOR_ID_WCH,
+-		.device         = PCIE_DEVICE_ID_WCH_CH382_2S,
++		.vendor         = PCI_VENDOR_ID_WCHIC,
++		.device         = PCI_DEVICE_ID_WCHIC_CH382_2S,
+ 		.subvendor      = PCI_ANY_ID,
+ 		.subdevice      = PCI_ANY_ID,
+ 		.setup          = pci_wch_ch38x_setup,
+ 	},
+ 	/* WCH CH382 2S1P card (16850 clone) */
+ 	{
+-		.vendor         = PCIE_VENDOR_ID_WCH,
+-		.device         = PCIE_DEVICE_ID_WCH_CH382_2S1P,
++		.vendor         = PCI_VENDOR_ID_WCHIC,
++		.device         = PCI_DEVICE_ID_WCHIC_CH382_2S1P,
+ 		.subvendor      = PCI_ANY_ID,
+ 		.subdevice      = PCI_ANY_ID,
+ 		.setup          = pci_wch_ch38x_setup,
+ 	},
+ 	/* WCH CH384 4S card (16850 clone) */
+ 	{
+-		.vendor         = PCIE_VENDOR_ID_WCH,
+-		.device         = PCIE_DEVICE_ID_WCH_CH384_4S,
++		.vendor         = PCI_VENDOR_ID_WCHIC,
++		.device         = PCI_DEVICE_ID_WCHIC_CH384_4S,
+ 		.subvendor      = PCI_ANY_ID,
+ 		.subdevice      = PCI_ANY_ID,
+ 		.setup          = pci_wch_ch38x_setup,
+ 	},
+ 	/* WCH CH384 8S card (16850 clone) */
+ 	{
+-		.vendor         = PCIE_VENDOR_ID_WCH,
+-		.device         = PCIE_DEVICE_ID_WCH_CH384_8S,
++		.vendor         = PCI_VENDOR_ID_WCHIC,
++		.device         = PCI_DEVICE_ID_WCHIC_CH384_8S,
+ 		.subvendor      = PCI_ANY_ID,
+ 		.subdevice      = PCI_ANY_ID,
+ 		.init           = pci_wch_ch38x_init,
+@@ -3967,11 +3961,11 @@ static const struct pci_device_id blacklist[] = {
+ 
+ 	/* multi-io cards handled by parport_serial */
+ 	/* WCH CH353 2S1P */
+-	{ PCI_DEVICE(0x4348, 0x7053), 0, 0, REPORT_CONFIG(PARPORT_SERIAL), },
++	{ PCI_VDEVICE(WCHCN, 0x7053), REPORT_CONFIG(PARPORT_SERIAL), },
+ 	/* WCH CH353 1S1P */
+-	{ PCI_DEVICE(0x4348, 0x5053), 0, 0, REPORT_CONFIG(PARPORT_SERIAL), },
++	{ PCI_VDEVICE(WCHCN, 0x5053), REPORT_CONFIG(PARPORT_SERIAL), },
+ 	/* WCH CH382 2S1P */
+-	{ PCI_DEVICE(0x1c00, 0x3250), 0, 0, REPORT_CONFIG(PARPORT_SERIAL), },
++	{ PCI_VDEVICE(WCHIC, 0x3250), REPORT_CONFIG(PARPORT_SERIAL), },
+ 
+ 	/* Intel platforms with MID UART */
+ 	{ PCI_VDEVICE(INTEL, 0x081b), REPORT_8250_CONFIG(MID), },
+@@ -6044,27 +6038,27 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 	 * WCH CH353 series devices: The 2S1P is handled by parport_serial
+ 	 * so not listed here.
+ 	 */
+-	{	PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH353_4S,
++	{	PCI_VENDOR_ID_WCHCN, PCI_DEVICE_ID_WCHCN_CH353_4S,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0, pbn_b0_bt_4_115200 },
+ 
+-	{	PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH353_2S1PF,
++	{	PCI_VENDOR_ID_WCHCN, PCI_DEVICE_ID_WCHCN_CH353_2S1PF,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0, pbn_b0_bt_2_115200 },
+ 
+-	{	PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH355_4S,
++	{	PCI_VENDOR_ID_WCHCN, PCI_DEVICE_ID_WCHCN_CH355_4S,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0, pbn_b0_bt_4_115200 },
+ 
+-	{	PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH382_2S,
++	{	PCI_VENDOR_ID_WCHIC, PCI_DEVICE_ID_WCHIC_CH382_2S,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0, pbn_wch382_2 },
+ 
+-	{	PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S,
++	{	PCI_VENDOR_ID_WCHIC, PCI_DEVICE_ID_WCHIC_CH384_4S,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0, pbn_wch384_4 },
+ 
+-	{	PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_8S,
++	{	PCI_VENDOR_ID_WCHIC, PCI_DEVICE_ID_WCHIC_CH384_8S,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0, pbn_wch384_8 },
+ 	/*
+diff --git a/drivers/tty/serial/8250/8250_pci1xxxx.c b/drivers/tty/serial/8250/8250_pci1xxxx.c
+index 838f181f929bf0..e9c51d4e447dd2 100644
+--- a/drivers/tty/serial/8250/8250_pci1xxxx.c
++++ b/drivers/tty/serial/8250/8250_pci1xxxx.c
+@@ -78,6 +78,12 @@
+ #define UART_TX_BYTE_FIFO			0x00
+ #define UART_FIFO_CTL				0x02
+ 
++#define UART_MODEM_CTL_REG			0x04
++#define UART_MODEM_CTL_RTS_SET			BIT(1)
++
++#define UART_LINE_STAT_REG			0x05
++#define UART_LINE_XMIT_CHECK_MASK		GENMASK(6, 5)
++
+ #define UART_ACTV_REG				0x11
+ #define UART_BLOCK_SET_ACTIVE			BIT(0)
+ 
+@@ -94,6 +100,7 @@
+ #define UART_BIT_SAMPLE_CNT_16			16
+ #define BAUD_CLOCK_DIV_INT_MSK			GENMASK(31, 8)
+ #define ADCL_CFG_RTS_DELAY_MASK			GENMASK(11, 8)
++#define FRAC_DIV_TX_END_POINT_MASK		GENMASK(23, 20)
+ 
+ #define UART_WAKE_REG				0x8C
+ #define UART_WAKE_MASK_REG			0x90
+@@ -134,6 +141,11 @@
+ #define UART_BST_STAT_LSR_FRAME_ERR		0x8000000
+ #define UART_BST_STAT_LSR_THRE			0x20000000
+ 
++#define GET_MODEM_CTL_RTS_STATUS(reg)		((reg) & UART_MODEM_CTL_RTS_SET)
++#define GET_RTS_PIN_STATUS(val)			(((val) & TIOCM_RTS) >> 1)
++#define RTS_TOGGLE_STATUS_MASK(val, reg)	(GET_MODEM_CTL_RTS_STATUS(reg) \
++						 != GET_RTS_PIN_STATUS(val))
++
+ struct pci1xxxx_8250 {
+ 	unsigned int nr;
+ 	u8 dev_rev;
+@@ -254,6 +266,47 @@ static void pci1xxxx_set_divisor(struct uart_port *port, unsigned int baud,
+ 	       port->membase + UART_BAUD_CLK_DIVISOR_REG);
+ }
+ 
++static void pci1xxxx_set_mctrl(struct uart_port *port, unsigned int mctrl)
++{
++	u32 fract_div_cfg_reg;
++	u32 line_stat_reg;
++	u32 modem_ctl_reg;
++	u32 adcl_cfg_reg;
++
++	adcl_cfg_reg = readl(port->membase + ADCL_CFG_REG);
++
++	/* HW is responsible in ADCL_EN case */
++	if ((adcl_cfg_reg & (ADCL_CFG_EN | ADCL_CFG_PIN_SEL)))
++		return;
++
++	modem_ctl_reg = readl(port->membase + UART_MODEM_CTL_REG);
++
++	serial8250_do_set_mctrl(port, mctrl);
++
++	if (RTS_TOGGLE_STATUS_MASK(mctrl, modem_ctl_reg)) {
++		line_stat_reg = readl(port->membase + UART_LINE_STAT_REG);
++		if (line_stat_reg & UART_LINE_XMIT_CHECK_MASK) {
++			fract_div_cfg_reg = readl(port->membase +
++						  FRAC_DIV_CFG_REG);
++
++			writel((fract_div_cfg_reg &
++			       ~(FRAC_DIV_TX_END_POINT_MASK)),
++			       port->membase + FRAC_DIV_CFG_REG);
++
++			/* Enable ADC and set the nRTS pin */
++			writel((adcl_cfg_reg | (ADCL_CFG_EN |
++			       ADCL_CFG_PIN_SEL)),
++			       port->membase + ADCL_CFG_REG);
++
++			/* Revert to the original settings */
++			writel(adcl_cfg_reg, port->membase + ADCL_CFG_REG);
++
++			writel(fract_div_cfg_reg, port->membase +
++			       FRAC_DIV_CFG_REG);
++		}
++	}
++}
++
+ static int pci1xxxx_rs485_config(struct uart_port *port,
+ 				 struct ktermios *termios,
+ 				 struct serial_rs485 *rs485)
+@@ -631,9 +684,14 @@ static int pci1xxxx_setup(struct pci_dev *pdev,
+ 	port->port.rs485_config = pci1xxxx_rs485_config;
+ 	port->port.rs485_supported = pci1xxxx_rs485_supported;
+ 
+-	/* From C0 rev Burst operation is supported */
++	/*
++	 * C0 and later revisions support Burst operation.
++	 * RTS workaround in mctrl is applicable only to B0.
++	 */
+ 	if (rev >= 0xC0)
+ 		port->port.handle_irq = pci1xxxx_handle_irq;
++	else if (rev == 0xB0)
++		port->port.set_mctrl = pci1xxxx_set_mctrl;
+ 
+ 	ret = serial8250_pci_setup_port(pdev, port, 0, PORT_OFFSET * port_idx, 0);
+ 	if (ret < 0)
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index b8babbdec8f3f6..27572e5f1ff119 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -2544,6 +2544,14 @@ static void serial8250_shutdown(struct uart_port *port)
+ 		serial8250_do_shutdown(port);
+ }
+ 
++static void serial8250_flush_buffer(struct uart_port *port)
++{
++	struct uart_8250_port *up = up_to_u8250p(port);
++
++	if (up->dma)
++		serial8250_tx_dma_flush(up);
++}
++
+ static unsigned int serial8250_do_get_divisor(struct uart_port *port,
+ 					      unsigned int baud,
+ 					      unsigned int *frac)
+@@ -3227,6 +3235,7 @@ static const struct uart_ops serial8250_pops = {
+ 	.break_ctl	= serial8250_break_ctl,
+ 	.startup	= serial8250_startup,
+ 	.shutdown	= serial8250_shutdown,
++	.flush_buffer	= serial8250_flush_buffer,
+ 	.set_termios	= serial8250_set_termios,
+ 	.set_ldisc	= serial8250_set_ldisc,
+ 	.pm		= serial8250_pm,
+diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c
+index d35f1d24156c22..85285c56fabff4 100644
+--- a/drivers/tty/serial/serial_port.c
++++ b/drivers/tty/serial/serial_port.c
+@@ -173,6 +173,7 @@ EXPORT_SYMBOL(uart_remove_one_port);
+  * The caller is responsible to initialize the following fields of the @port
+  *   ->dev (must be valid)
+  *   ->flags
++ *   ->iobase
+  *   ->mapbase
+  *   ->mapsize
+  *   ->regshift (if @use_defaults is false)
+@@ -214,7 +215,7 @@ static int __uart_read_properties(struct uart_port *port, bool use_defaults)
+ 	/* Read the registers I/O access type (default: MMIO 8-bit) */
+ 	ret = device_property_read_u32(dev, "reg-io-width", &value);
+ 	if (ret) {
+-		port->iotype = UPIO_MEM;
++		port->iotype = port->iobase ? UPIO_PORT : UPIO_MEM;
+ 	} else {
+ 		switch (value) {
+ 		case 1:
+@@ -227,11 +228,11 @@ static int __uart_read_properties(struct uart_port *port, bool use_defaults)
+ 			port->iotype = device_is_big_endian(dev) ? UPIO_MEM32BE : UPIO_MEM32;
+ 			break;
+ 		default:
++			port->iotype = UPIO_UNKNOWN;
+ 			if (!use_defaults) {
+ 				dev_err(dev, "Unsupported reg-io-width (%u)\n", value);
+ 				return -EINVAL;
+ 			}
+-			port->iotype = UPIO_UNKNOWN;
+ 			break;
+ 		}
+ 	}
+diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
+index 58023f735c195f..8d4ad0a3f2cf02 100644
+--- a/drivers/ufs/core/ufs_bsg.c
++++ b/drivers/ufs/core/ufs_bsg.c
+@@ -216,6 +216,7 @@ void ufs_bsg_remove(struct ufs_hba *hba)
+ 		return;
+ 
+ 	bsg_remove_queue(hba->bsg_queue);
++	hba->bsg_queue = NULL;
+ 
+ 	device_del(bsg_dev);
+ 	put_device(bsg_dev);
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index d4a628169a51a3..56b32d245c2ee6 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -258,10 +258,15 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
+ 	return UFS_PM_LVL_0;
+ }
+ 
++static bool ufshcd_has_pending_tasks(struct ufs_hba *hba)
++{
++	return hba->outstanding_tasks || hba->active_uic_cmd ||
++	       hba->uic_async_done;
++}
++
+ static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba)
+ {
+-	return (hba->clk_gating.active_reqs || hba->outstanding_reqs || hba->outstanding_tasks ||
+-		hba->active_uic_cmd || hba->uic_async_done);
++	return hba->outstanding_reqs || ufshcd_has_pending_tasks(hba);
+ }
+ 
+ static const struct ufs_dev_quirk ufs_fixups[] = {
+@@ -1811,19 +1816,16 @@ static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
+ static void ufshcd_ungate_work(struct work_struct *work)
+ {
+ 	int ret;
+-	unsigned long flags;
+ 	struct ufs_hba *hba = container_of(work, struct ufs_hba,
+ 			clk_gating.ungate_work);
+ 
+ 	cancel_delayed_work_sync(&hba->clk_gating.gate_work);
+ 
+-	spin_lock_irqsave(hba->host->host_lock, flags);
+-	if (hba->clk_gating.state == CLKS_ON) {
+-		spin_unlock_irqrestore(hba->host->host_lock, flags);
+-		return;
++	scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) {
++		if (hba->clk_gating.state == CLKS_ON)
++			return;
+ 	}
+ 
+-	spin_unlock_irqrestore(hba->host->host_lock, flags);
+ 	ufshcd_hba_vreg_set_hpm(hba);
+ 	ufshcd_setup_clocks(hba, true);
+ 
+@@ -1858,7 +1860,7 @@ void ufshcd_hold(struct ufs_hba *hba)
+ 	if (!ufshcd_is_clkgating_allowed(hba) ||
+ 	    !hba->clk_gating.is_initialized)
+ 		return;
+-	spin_lock_irqsave(hba->host->host_lock, flags);
++	spin_lock_irqsave(&hba->clk_gating.lock, flags);
+ 	hba->clk_gating.active_reqs++;
+ 
+ start:
+@@ -1874,11 +1876,11 @@ void ufshcd_hold(struct ufs_hba *hba)
+ 		 */
+ 		if (ufshcd_can_hibern8_during_gating(hba) &&
+ 		    ufshcd_is_link_hibern8(hba)) {
+-			spin_unlock_irqrestore(hba->host->host_lock, flags);
++			spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
+ 			flush_result = flush_work(&hba->clk_gating.ungate_work);
+ 			if (hba->clk_gating.is_suspended && !flush_result)
+ 				return;
+-			spin_lock_irqsave(hba->host->host_lock, flags);
++			spin_lock_irqsave(&hba->clk_gating.lock, flags);
+ 			goto start;
+ 		}
+ 		break;
+@@ -1907,17 +1909,17 @@ void ufshcd_hold(struct ufs_hba *hba)
+ 		 */
+ 		fallthrough;
+ 	case REQ_CLKS_ON:
+-		spin_unlock_irqrestore(hba->host->host_lock, flags);
++		spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
+ 		flush_work(&hba->clk_gating.ungate_work);
+ 		/* Make sure state is CLKS_ON before returning */
+-		spin_lock_irqsave(hba->host->host_lock, flags);
++		spin_lock_irqsave(&hba->clk_gating.lock, flags);
+ 		goto start;
+ 	default:
+ 		dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
+ 				__func__, hba->clk_gating.state);
+ 		break;
+ 	}
+-	spin_unlock_irqrestore(hba->host->host_lock, flags);
++	spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(ufshcd_hold);
+ 
+@@ -1925,28 +1927,32 @@ static void ufshcd_gate_work(struct work_struct *work)
+ {
+ 	struct ufs_hba *hba = container_of(work, struct ufs_hba,
+ 			clk_gating.gate_work.work);
+-	unsigned long flags;
+ 	int ret;
+ 
+-	spin_lock_irqsave(hba->host->host_lock, flags);
+-	/*
+-	 * In case you are here to cancel this work the gating state
+-	 * would be marked as REQ_CLKS_ON. In this case save time by
+-	 * skipping the gating work and exit after changing the clock
+-	 * state to CLKS_ON.
+-	 */
+-	if (hba->clk_gating.is_suspended ||
+-		(hba->clk_gating.state != REQ_CLKS_OFF)) {
+-		hba->clk_gating.state = CLKS_ON;
+-		trace_ufshcd_clk_gating(dev_name(hba->dev),
+-					hba->clk_gating.state);
+-		goto rel_lock;
+-	}
++	scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) {
++		/*
++		 * In case you are here to cancel this work the gating state
++		 * would be marked as REQ_CLKS_ON. In this case save time by
++		 * skipping the gating work and exit after changing the clock
++		 * state to CLKS_ON.
++		 */
++		if (hba->clk_gating.is_suspended ||
++		    hba->clk_gating.state != REQ_CLKS_OFF) {
++			hba->clk_gating.state = CLKS_ON;
++			trace_ufshcd_clk_gating(dev_name(hba->dev),
++						hba->clk_gating.state);
++			return;
++		}
+ 
+-	if (ufshcd_is_ufs_dev_busy(hba) || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
+-		goto rel_lock;
++		if (hba->clk_gating.active_reqs)
++			return;
++	}
+ 
+-	spin_unlock_irqrestore(hba->host->host_lock, flags);
++	scoped_guard(spinlock_irqsave, hba->host->host_lock) {
++		if (ufshcd_is_ufs_dev_busy(hba) ||
++		    hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
++			return;
++	}
+ 
+ 	/* put the link into hibern8 mode before turning off clocks */
+ 	if (ufshcd_can_hibern8_during_gating(hba)) {
+@@ -1957,7 +1963,7 @@ static void ufshcd_gate_work(struct work_struct *work)
+ 					__func__, ret);
+ 			trace_ufshcd_clk_gating(dev_name(hba->dev),
+ 						hba->clk_gating.state);
+-			goto out;
++			return;
+ 		}
+ 		ufshcd_set_link_hibern8(hba);
+ 	}
+@@ -1977,33 +1983,34 @@ static void ufshcd_gate_work(struct work_struct *work)
+ 	 * prevent from doing cancel work multiple times when there are
+ 	 * new requests arriving before the current cancel work is done.
+ 	 */
+-	spin_lock_irqsave(hba->host->host_lock, flags);
++	guard(spinlock_irqsave)(&hba->clk_gating.lock);
+ 	if (hba->clk_gating.state == REQ_CLKS_OFF) {
+ 		hba->clk_gating.state = CLKS_OFF;
+ 		trace_ufshcd_clk_gating(dev_name(hba->dev),
+ 					hba->clk_gating.state);
+ 	}
+-rel_lock:
+-	spin_unlock_irqrestore(hba->host->host_lock, flags);
+-out:
+-	return;
+ }
+ 
+-/* host lock must be held before calling this variant */
+ static void __ufshcd_release(struct ufs_hba *hba)
+ {
++	lockdep_assert_held(&hba->clk_gating.lock);
++
+ 	if (!ufshcd_is_clkgating_allowed(hba))
+ 		return;
+ 
+ 	hba->clk_gating.active_reqs--;
+ 
+ 	if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
+-	    hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
+-	    hba->outstanding_tasks || !hba->clk_gating.is_initialized ||
+-	    hba->active_uic_cmd || hba->uic_async_done ||
++	    !hba->clk_gating.is_initialized ||
+ 	    hba->clk_gating.state == CLKS_OFF)
+ 		return;
+ 
++	scoped_guard(spinlock_irqsave, hba->host->host_lock) {
++		if (ufshcd_has_pending_tasks(hba) ||
++		    hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
++			return;
++	}
++
+ 	hba->clk_gating.state = REQ_CLKS_OFF;
+ 	trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
+ 	queue_delayed_work(hba->clk_gating.clk_gating_workq,
+@@ -2013,11 +2020,8 @@ static void __ufshcd_release(struct ufs_hba *hba)
+ 
+ void ufshcd_release(struct ufs_hba *hba)
+ {
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(hba->host->host_lock, flags);
++	guard(spinlock_irqsave)(&hba->clk_gating.lock);
+ 	__ufshcd_release(hba);
+-	spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(ufshcd_release);
+ 
+@@ -2032,11 +2036,9 @@ static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
+ void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value)
+ {
+ 	struct ufs_hba *hba = dev_get_drvdata(dev);
+-	unsigned long flags;
+ 
+-	spin_lock_irqsave(hba->host->host_lock, flags);
++	guard(spinlock_irqsave)(&hba->clk_gating.lock);
+ 	hba->clk_gating.delay_ms = value;
+-	spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set);
+ 
+@@ -2064,7 +2066,6 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
+ 		struct device_attribute *attr, const char *buf, size_t count)
+ {
+ 	struct ufs_hba *hba = dev_get_drvdata(dev);
+-	unsigned long flags;
+ 	u32 value;
+ 
+ 	if (kstrtou32(buf, 0, &value))
+@@ -2072,9 +2073,10 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
+ 
+ 	value = !!value;
+ 
+-	spin_lock_irqsave(hba->host->host_lock, flags);
++	guard(spinlock_irqsave)(&hba->clk_gating.lock);
++
+ 	if (value == hba->clk_gating.is_enabled)
+-		goto out;
++		return count;
+ 
+ 	if (value)
+ 		__ufshcd_release(hba);
+@@ -2082,8 +2084,7 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
+ 		hba->clk_gating.active_reqs++;
+ 
+ 	hba->clk_gating.is_enabled = value;
+-out:
+-	spin_unlock_irqrestore(hba->host->host_lock, flags);
++
+ 	return count;
+ }
+ 
+@@ -8259,7 +8260,9 @@ static void ufshcd_rtc_work(struct work_struct *work)
+ 	hba = container_of(to_delayed_work(work), struct ufs_hba, ufs_rtc_update_work);
+ 
+ 	 /* Update RTC only when there are no requests in progress and UFSHCI is operational */
+-	if (!ufshcd_is_ufs_dev_busy(hba) && hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL)
++	if (!ufshcd_is_ufs_dev_busy(hba) &&
++	    hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL &&
++	    !hba->clk_gating.active_reqs)
+ 		ufshcd_update_rtc(hba);
+ 
+ 	if (ufshcd_is_ufs_dev_active(hba) && hba->dev_info.rtc_update_period)
+@@ -9155,7 +9158,6 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
+ 	int ret = 0;
+ 	struct ufs_clk_info *clki;
+ 	struct list_head *head = &hba->clk_list_head;
+-	unsigned long flags;
+ 	ktime_t start = ktime_get();
+ 	bool clk_state_changed = false;
+ 
+@@ -9205,12 +9207,11 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
+ 			if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
+ 				clk_disable_unprepare(clki->clk);
+ 		}
+-	} else if (!ret && on) {
+-		spin_lock_irqsave(hba->host->host_lock, flags);
+-		hba->clk_gating.state = CLKS_ON;
++	} else if (!ret && on && hba->clk_gating.is_initialized) {
++		scoped_guard(spinlock_irqsave, &hba->clk_gating.lock)
++			hba->clk_gating.state = CLKS_ON;
+ 		trace_ufshcd_clk_gating(dev_name(hba->dev),
+ 					hba->clk_gating.state);
+-		spin_unlock_irqrestore(hba->host->host_lock, flags);
+ 	}
+ 
+ 	if (clk_state_changed)
+@@ -10487,6 +10488,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
+ 	hba->irq = irq;
+ 	hba->vps = &ufs_hba_vps;
+ 
++	/*
++	 * Initialize clk_gating.lock early since it is being used in
++	 * ufshcd_setup_clocks()
++	 */
++	spin_lock_init(&hba->clk_gating.lock);
++
+ 	err = ufshcd_hba_init(hba);
+ 	if (err)
+ 		goto out_error;
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 6b37d1c47fce13..c2ecfa3c83496f 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -371,7 +371,7 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
+ static void acm_ctrl_irq(struct urb *urb)
+ {
+ 	struct acm *acm = urb->context;
+-	struct usb_cdc_notification *dr = urb->transfer_buffer;
++	struct usb_cdc_notification *dr;
+ 	unsigned int current_size = urb->actual_length;
+ 	unsigned int expected_size, copy_size, alloc_size;
+ 	int retval;
+@@ -398,14 +398,25 @@ static void acm_ctrl_irq(struct urb *urb)
+ 
+ 	usb_mark_last_busy(acm->dev);
+ 
+-	if (acm->nb_index)
++	if (acm->nb_index == 0) {
++		/*
++		 * The first chunk of a message must contain at least the
++		 * notification header with the length field, otherwise we
++		 * can't get an expected_size.
++		 */
++		if (current_size < sizeof(struct usb_cdc_notification)) {
++			dev_dbg(&acm->control->dev, "urb too short\n");
++			goto exit;
++		}
++		dr = urb->transfer_buffer;
++	} else {
+ 		dr = (struct usb_cdc_notification *)acm->notification_buffer;
+-
++	}
+ 	/* size = notification-header + (optional) data */
+ 	expected_size = sizeof(struct usb_cdc_notification) +
+ 					le16_to_cpu(dr->wLength);
+ 
+-	if (current_size < expected_size) {
++	if (acm->nb_index != 0 || current_size < expected_size) {
+ 		/* notification is transmitted fragmented, reassemble */
+ 		if (acm->nb_size < expected_size) {
+ 			u8 *new_buffer;
+@@ -1727,13 +1738,16 @@ static const struct usb_device_id acm_ids[] = {
+ 	{ USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
+ 	.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ 	},
+-	{ USB_DEVICE(0x045b, 0x023c),	/* Renesas USB Download mode */
++	{ USB_DEVICE(0x045b, 0x023c),	/* Renesas R-Car H3 USB Download mode */
++	.driver_info = DISABLE_ECHO,	/* Don't echo banner */
++	},
++	{ USB_DEVICE(0x045b, 0x0247),	/* Renesas R-Car D3 USB Download mode */
+ 	.driver_info = DISABLE_ECHO,	/* Don't echo banner */
+ 	},
+-	{ USB_DEVICE(0x045b, 0x0248),	/* Renesas USB Download mode */
++	{ USB_DEVICE(0x045b, 0x0248),	/* Renesas R-Car M3-N USB Download mode */
+ 	.driver_info = DISABLE_ECHO,	/* Don't echo banner */
+ 	},
+-	{ USB_DEVICE(0x045b, 0x024D),	/* Renesas USB Download mode */
++	{ USB_DEVICE(0x045b, 0x024D),	/* Renesas R-Car E3 USB Download mode */
+ 	.driver_info = DISABLE_ECHO,	/* Don't echo banner */
+ 	},
+ 	{ USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 21ac9b464696f5..906daf423cb02b 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -1847,6 +1847,17 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ 	desc = intf->cur_altsetting;
+ 	hdev = interface_to_usbdev(intf);
+ 
++	/*
++	 * The USB 2.0 spec prohibits hubs from having more than one
++	 * configuration or interface, and we rely on this prohibition.
++	 * Refuse to accept a device that violates it.
++	 */
++	if (hdev->descriptor.bNumConfigurations > 1 ||
++			hdev->actconfig->desc.bNumInterfaces > 1) {
++		dev_err(&intf->dev, "Invalid hub with more than one config or interface\n");
++		return -EINVAL;
++	}
++
+ 	/*
+ 	 * Set default autosuspend delay as 0 to speedup bus suspend,
+ 	 * based on the below considerations:
+@@ -4698,7 +4709,6 @@ void usb_ep0_reinit(struct usb_device *udev)
+ EXPORT_SYMBOL_GPL(usb_ep0_reinit);
+ 
+ #define usb_sndaddr0pipe()	(PIPE_CONTROL << 30)
+-#define usb_rcvaddr0pipe()	((PIPE_CONTROL << 30) | USB_DIR_IN)
+ 
+ static int hub_set_address(struct usb_device *udev, int devnum)
+ {
+@@ -4804,7 +4814,7 @@ static int get_bMaxPacketSize0(struct usb_device *udev,
+ 	for (i = 0; i < GET_MAXPACKET0_TRIES; ++i) {
+ 		/* Start with invalid values in case the transfer fails */
+ 		buf->bDescriptorType = buf->bMaxPacketSize0 = 0;
+-		rc = usb_control_msg(udev, usb_rcvaddr0pipe(),
++		rc = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ 				USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
+ 				USB_DT_DEVICE << 8, 0,
+ 				buf, size,
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 13171454f9591a..027479179f09e9 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -432,6 +432,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x0c45, 0x7056), .driver_info =
+ 			USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+ 
++	/* Sony Xperia XZ1 Compact (lilac) smartphone in fastboot mode */
++	{ USB_DEVICE(0x0fce, 0x0dde), .driver_info = USB_QUIRK_NO_LPM },
++
+ 	/* Action Semiconductor flash disk */
+ 	{ USB_DEVICE(0x10d6, 0x2200), .driver_info =
+ 			USB_QUIRK_STRING_FETCH_255 },
+@@ -522,6 +525,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* Blackmagic Design UltraStudio SDI */
+ 	{ USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
+ 
++	/* Teclast disk */
++	{ USB_DEVICE(0x1f75, 0x0917), .driver_info = USB_QUIRK_NO_LPM },
++
+ 	/* Hauppauge HVR-950q */
+ 	{ USB_DEVICE(0x2040, 0x7200), .driver_info =
+ 			USB_QUIRK_CONFIG_INTF_STRINGS },
+diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
+index e7bf9cc635be6f..bd4c788f03bc14 100644
+--- a/drivers/usb/dwc2/gadget.c
++++ b/drivers/usb/dwc2/gadget.c
+@@ -4615,6 +4615,7 @@ static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget)
+ 	spin_lock_irqsave(&hsotg->lock, flags);
+ 
+ 	hsotg->driver = NULL;
++	hsotg->gadget.dev.of_node = NULL;
+ 	hsotg->gadget.speed = USB_SPEED_UNKNOWN;
+ 	hsotg->enabled = 0;
+ 
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 31a654c6f15be9..329bc164241a48 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -2630,10 +2630,38 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
+ {
+ 	u32			reg;
+ 	u32			timeout = 2000;
++	u32			saved_config = 0;
+ 
+ 	if (pm_runtime_suspended(dwc->dev))
+ 		return 0;
+ 
++	/*
++	 * When operating in USB 2.0 speeds (HS/FS), ensure that
++	 * GUSB2PHYCFG.ENBLSLPM and GUSB2PHYCFG.SUSPHY are cleared before starting
++	 * or stopping the controller. This resolves timeout issues that occur
++	 * during frequent role switches between host and device modes.
++	 *
++	 * Save and clear these settings, then restore them after completing the
++	 * controller start or stop sequence.
++	 *
++	 * This solution was discovered through experimentation as it is not
++	 * mentioned in the dwc3 programming guide. It has been tested on an
++	 * Exynos platforms.
++	 */
++	reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
++	if (reg & DWC3_GUSB2PHYCFG_SUSPHY) {
++		saved_config |= DWC3_GUSB2PHYCFG_SUSPHY;
++		reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
++	}
++
++	if (reg & DWC3_GUSB2PHYCFG_ENBLSLPM) {
++		saved_config |= DWC3_GUSB2PHYCFG_ENBLSLPM;
++		reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
++	}
++
++	if (saved_config)
++		dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
++
+ 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ 	if (is_on) {
+ 		if (DWC3_VER_IS_WITHIN(DWC3, ANY, 187A)) {
+@@ -2661,6 +2689,12 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
+ 		reg &= DWC3_DSTS_DEVCTRLHLT;
+ 	} while (--timeout && !(!is_on ^ !reg));
+ 
++	if (saved_config) {
++		reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
++		reg |= saved_config;
++		dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
++	}
++
+ 	if (!timeout)
+ 		return -ETIMEDOUT;
+ 
+diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
+index 837fcdfa3840ff..47260d65066a89 100644
+--- a/drivers/usb/gadget/function/f_midi.c
++++ b/drivers/usb/gadget/function/f_midi.c
+@@ -907,6 +907,15 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
+ 
+ 	status = -ENODEV;
+ 
++	/*
++	 * Reset wMaxPacketSize with maximum packet size of FS bulk transfer before
++	 * endpoint claim. This ensures that the wMaxPacketSize does not exceed the
++	 * limit during bind retries where configured dwc3 TX/RX FIFO's maxpacket
++	 * size of 512 bytes for IN/OUT endpoints in support HS speed only.
++	 */
++	bulk_in_desc.wMaxPacketSize = cpu_to_le16(64);
++	bulk_out_desc.wMaxPacketSize = cpu_to_le16(64);
++
+ 	/* allocate instance-specific endpoints */
+ 	midi->in_ep = usb_ep_autoconfig(cdev->gadget, &bulk_in_desc);
+ 	if (!midi->in_ep)
+@@ -1000,11 +1009,11 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
+ 	}
+ 
+ 	/* configure the endpoint descriptors ... */
+-	ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports);
+-	ms_out_desc.bNumEmbMIDIJack = midi->in_ports;
++	ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports);
++	ms_out_desc.bNumEmbMIDIJack = midi->out_ports;
+ 
+-	ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports);
+-	ms_in_desc.bNumEmbMIDIJack = midi->out_ports;
++	ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports);
++	ms_in_desc.bNumEmbMIDIJack = midi->in_ports;
+ 
+ 	/* ... and add them to the list */
+ 	endpoint_descriptor_index = i;
+diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
+index a6f46364be65f0..4b3d5075621aa0 100644
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -1543,8 +1543,8 @@ void usb_del_gadget(struct usb_gadget *gadget)
+ 
+ 	kobject_uevent(&udc->dev.kobj, KOBJ_REMOVE);
+ 	sysfs_remove_link(&udc->dev.kobj, "gadget");
+-	flush_work(&gadget->work);
+ 	device_del(&gadget->dev);
++	flush_work(&gadget->work);
+ 	ida_free(&gadget_id_numbers, gadget->id_number);
+ 	cancel_work_sync(&udc->vbus_work);
+ 	device_unregister(&udc->dev);
+diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
+index fce5c41d9f298b..89b304cf6d032f 100644
+--- a/drivers/usb/gadget/udc/renesas_usb3.c
++++ b/drivers/usb/gadget/udc/renesas_usb3.c
+@@ -310,7 +310,7 @@ struct renesas_usb3_request {
+ 	struct list_head	queue;
+ };
+ 
+-#define USB3_EP_NAME_SIZE	8
++#define USB3_EP_NAME_SIZE	16
+ struct renesas_usb3_ep {
+ 	struct usb_ep ep;
+ 	struct renesas_usb3 *usb3;
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index 1f9c1b1435d862..0404489c2f6a9c 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -958,6 +958,15 @@ static void quirk_usb_disable_ehci(struct pci_dev *pdev)
+ 	 * booting from USB disk or using a usb keyboard
+ 	 */
+ 	hcc_params = readl(base + EHCI_HCC_PARAMS);
++
++	/* LS7A EHCI controller doesn't have extended capabilities, the
++	 * EECP (EHCI Extended Capabilities Pointer) field of HCCPARAMS
++	 * register should be 0x0 but it reads as 0xa0.  So clear it to
++	 * avoid error messages on boot.
++	 */
++	if (pdev->vendor == PCI_VENDOR_ID_LOONGSON && pdev->device == 0x7a14)
++		hcc_params &= ~(0xffL << 8);
++
+ 	offset = (hcc_params >> 8) & 0xff;
+ 	while (offset && --count) {
+ 		pci_read_config_dword(pdev, offset, &cap);
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 2d1e205c14c609..ad0ff356f6fa0f 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -653,8 +653,8 @@ int xhci_pci_common_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ }
+ EXPORT_SYMBOL_NS_GPL(xhci_pci_common_probe, "xhci");
+ 
+-static const struct pci_device_id pci_ids_reject[] = {
+-	/* handled by xhci-pci-renesas */
++/* handled by xhci-pci-renesas if enabled */
++static const struct pci_device_id pci_ids_renesas[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0014) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0015) },
+ 	{ /* end: all zeroes */ }
+@@ -662,7 +662,8 @@ static const struct pci_device_id pci_ids_reject[] = {
+ 
+ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ {
+-	if (pci_match_id(pci_ids_reject, dev))
++	if (IS_ENABLED(CONFIG_USB_XHCI_PCI_RENESAS) &&
++			pci_match_id(pci_ids_renesas, dev))
+ 		return -ENODEV;
+ 
+ 	return xhci_pci_common_probe(dev, id);
+diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
+index c58a12c147f451..30482d4cf82678 100644
+--- a/drivers/usb/roles/class.c
++++ b/drivers/usb/roles/class.c
+@@ -387,8 +387,11 @@ usb_role_switch_register(struct device *parent,
+ 	dev_set_name(&sw->dev, "%s-role-switch",
+ 		     desc->name ? desc->name : dev_name(parent));
+ 
++	sw->registered = true;
++
+ 	ret = device_register(&sw->dev);
+ 	if (ret) {
++		sw->registered = false;
+ 		put_device(&sw->dev);
+ 		return ERR_PTR(ret);
+ 	}
+@@ -399,8 +402,6 @@ usb_role_switch_register(struct device *parent,
+ 			dev_warn(&sw->dev, "failed to add component\n");
+ 	}
+ 
+-	sw->registered = true;
+-
+ 	/* TODO: Symlinks for the host port and the device controller. */
+ 
+ 	return sw;
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 1e2ae0c6c41c79..58bd54e8c483a2 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -619,15 +619,6 @@ static void option_instat_callback(struct urb *urb);
+ /* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */
+ #define LUAT_PRODUCT_AIR720U			0x4e00
+ 
+-/* MeiG Smart Technology products */
+-#define MEIGSMART_VENDOR_ID			0x2dee
+-/* MeiG Smart SRM815/SRM825L based on Qualcomm 315 */
+-#define MEIGSMART_PRODUCT_SRM825L		0x4d22
+-/* MeiG Smart SLM320 based on UNISOC UIS8910 */
+-#define MEIGSMART_PRODUCT_SLM320		0x4d41
+-/* MeiG Smart SLM770A based on ASR1803 */
+-#define MEIGSMART_PRODUCT_SLM770A		0x4d57
+-
+ /* Device flags */
+ 
+ /* Highest interface number which can be used with NCTRL() and RSVD() */
+@@ -1367,15 +1358,15 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = NCTRL(2) | RSVD(3) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1063, 0xff),	/* Telit LN920 (ECM) */
+ 	  .driver_info = NCTRL(0) | RSVD(1) },
+-	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1070, 0xff),	/* Telit FN990 (rmnet) */
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1070, 0xff),	/* Telit FN990A (rmnet) */
+ 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+-	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1071, 0xff),	/* Telit FN990 (MBIM) */
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1071, 0xff),	/* Telit FN990A (MBIM) */
+ 	  .driver_info = NCTRL(0) | RSVD(1) },
+-	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1072, 0xff),	/* Telit FN990 (RNDIS) */
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1072, 0xff),	/* Telit FN990A (RNDIS) */
+ 	  .driver_info = NCTRL(2) | RSVD(3) },
+-	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff),	/* Telit FN990 (ECM) */
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff),	/* Telit FN990A (ECM) */
+ 	  .driver_info = NCTRL(0) | RSVD(1) },
+-	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff),	/* Telit FN990 (PCIe) */
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff),	/* Telit FN990A (PCIe) */
+ 	  .driver_info = RSVD(0) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff),	/* Telit FE990 (rmnet) */
+ 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+@@ -1403,6 +1394,22 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = RSVD(0) | NCTRL(3) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c8, 0xff),	/* Telit FE910C04 (rmnet) */
+ 	  .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
++	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x60) },	/* Telit FN990B (rmnet) */
++	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x40) },
++	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x30),
++	  .driver_info = NCTRL(5) },
++	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x60) },	/* Telit FN990B (MBIM) */
++	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x40) },
++	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x30),
++	  .driver_info = NCTRL(6) },
++	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x60) },	/* Telit FN990B (RNDIS) */
++	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x40) },
++	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x30),
++	  .driver_info = NCTRL(6) },
++	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x60) },	/* Telit FN990B (ECM) */
++	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x40) },
++	{ USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x30),
++	  .driver_info = NCTRL(6) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
+ 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
+@@ -2347,6 +2354,14 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a05, 0xff) },			/* Fibocom FM650-CN (NCM mode) */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a06, 0xff) },			/* Fibocom FM650-CN (RNDIS mode) */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a07, 0xff) },			/* Fibocom FM650-CN (MBIM mode) */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d41, 0xff, 0, 0) },		/* MeiG Smart SLM320 */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d57, 0xff, 0, 0) },		/* MeiG Smart SLM770A */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0, 0) },		/* MeiG Smart SRM815 */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0x10, 0x02) },	/* MeiG Smart SLM828 */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0x10, 0x03) },	/* MeiG Smart SLM828 */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0xff, 0x30) },	/* MeiG Smart SRM815 and SRM825L */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0xff, 0x40) },	/* MeiG Smart SRM825L */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0xff, 0x60) },	/* MeiG Smart SRM825L */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) },			/* LongSung M5710 */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) },			/* GosunCn GM500 RNDIS */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) },			/* GosunCn GM500 MBIM */
+@@ -2403,12 +2418,6 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM320, 0xff, 0, 0) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM770A, 0xff, 0, 0) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0, 0) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x30) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x40) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x60) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0530, 0xff),			/* TCL IK512 MBIM */
+ 	  .driver_info = NCTRL(1) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0640, 0xff),			/* TCL IK512 ECM */
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index a22c1644d0f792..061c04efffa765 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -5541,8 +5541,7 @@ static void run_state_machine(struct tcpm_port *port)
+ 		tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB,
+ 						       port->pps_data.active, 0);
+ 		tcpm_set_charge(port, false);
+-		tcpm_set_state(port, hard_reset_state(port),
+-			       port->timings.ps_src_off_time);
++		tcpm_set_state(port, ERROR_RECOVERY, port->timings.ps_src_off_time);
+ 		break;
+ 	case PR_SWAP_SNK_SRC_SOURCE_ON:
+ 		tcpm_enable_auto_vbus_discharge(port, true);
+diff --git a/drivers/vfio/pci/nvgrace-gpu/main.c b/drivers/vfio/pci/nvgrace-gpu/main.c
+index a467085038f0c5..778bfd0655de08 100644
+--- a/drivers/vfio/pci/nvgrace-gpu/main.c
++++ b/drivers/vfio/pci/nvgrace-gpu/main.c
+@@ -17,12 +17,14 @@
+ #define RESMEM_REGION_INDEX VFIO_PCI_BAR2_REGION_INDEX
+ #define USEMEM_REGION_INDEX VFIO_PCI_BAR4_REGION_INDEX
+ 
+-/* Memory size expected as non cached and reserved by the VM driver */
+-#define RESMEM_SIZE SZ_1G
+-
+ /* A hardwired and constant ABI value between the GPU FW and VFIO driver. */
+ #define MEMBLK_SIZE SZ_512M
+ 
++#define DVSEC_BITMAP_OFFSET 0xA
++#define MIG_SUPPORTED_WITH_CACHED_RESMEM BIT(0)
++
++#define GPU_CAP_DVSEC_REGISTER 3
++
+ /*
+  * The state of the two device memory region - resmem and usemem - is
+  * saved as struct mem_region.
+@@ -46,6 +48,7 @@ struct nvgrace_gpu_pci_core_device {
+ 	struct mem_region resmem;
+ 	/* Lock to control device memory kernel mapping */
+ 	struct mutex remap_lock;
++	bool has_mig_hw_bug;
+ };
+ 
+ static void nvgrace_gpu_init_fake_bar_emu_regs(struct vfio_device *core_vdev)
+@@ -66,7 +69,7 @@ nvgrace_gpu_memregion(int index,
+ 	if (index == USEMEM_REGION_INDEX)
+ 		return &nvdev->usemem;
+ 
+-	if (index == RESMEM_REGION_INDEX)
++	if (nvdev->resmem.memlength && index == RESMEM_REGION_INDEX)
+ 		return &nvdev->resmem;
+ 
+ 	return NULL;
+@@ -751,40 +754,67 @@ nvgrace_gpu_init_nvdev_struct(struct pci_dev *pdev,
+ 			      u64 memphys, u64 memlength)
+ {
+ 	int ret = 0;
++	u64 resmem_size = 0;
+ 
+ 	/*
+-	 * The VM GPU device driver needs a non-cacheable region to support
+-	 * the MIG feature. Since the device memory is mapped as NORMAL cached,
+-	 * carve out a region from the end with a different NORMAL_NC
+-	 * property (called as reserved memory and represented as resmem). This
+-	 * region then is exposed as a 64b BAR (region 2 and 3) to the VM, while
+-	 * exposing the rest (termed as usable memory and represented using usemem)
+-	 * as cacheable 64b BAR (region 4 and 5).
++	 * On Grace Hopper systems, the VM GPU device driver needs a non-cacheable
++	 * region to support the MIG feature owing to a hardware bug. Since the
++	 * device memory is mapped as NORMAL cached, carve out a region from the end
++	 * with a different NORMAL_NC property (called as reserved memory and
++	 * represented as resmem). This region then is exposed as a 64b BAR
++	 * (region 2 and 3) to the VM, while exposing the rest (termed as usable
++	 * memory and represented using usemem) as cacheable 64b BAR (region 4 and 5).
+ 	 *
+ 	 *               devmem (memlength)
+ 	 * |-------------------------------------------------|
+ 	 * |                                           |
+ 	 * usemem.memphys                              resmem.memphys
++	 *
++	 * This hardware bug is fixed on the Grace Blackwell platforms and the
++	 * presence of the bug can be determined through nvdev->has_mig_hw_bug.
++	 * Thus on systems with the hardware fix, there is no need to partition
++	 * the GPU device memory and the entire memory is usable and mapped as
++	 * NORMAL cached (i.e. resmem size is 0).
+ 	 */
++	if (nvdev->has_mig_hw_bug)
++		resmem_size = SZ_1G;
++
+ 	nvdev->usemem.memphys = memphys;
+ 
+ 	/*
+ 	 * The device memory exposed to the VM is added to the kernel by the
+-	 * VM driver module in chunks of memory block size. Only the usable
+-	 * memory (usemem) is added to the kernel for usage by the VM
+-	 * workloads. Make the usable memory size memblock aligned.
++	 * VM driver module in chunks of memory block size. Note that only the
++	 * usable memory (usemem) is added to the kernel for usage by the VM
++	 * workloads.
+ 	 */
+-	if (check_sub_overflow(memlength, RESMEM_SIZE,
++	if (check_sub_overflow(memlength, resmem_size,
+ 			       &nvdev->usemem.memlength)) {
+ 		ret = -EOVERFLOW;
+ 		goto done;
+ 	}
+ 
+ 	/*
+-	 * The USEMEM part of the device memory has to be MEMBLK_SIZE
+-	 * aligned. This is a hardwired ABI value between the GPU FW and
+-	 * VFIO driver. The VM device driver is also aware of it and make
+-	 * use of the value for its calculation to determine USEMEM size.
++	 * The usemem region is exposed as a 64B Bar composed of region 4 and 5.
++	 * Calculate and save the BAR size for the region.
++	 */
++	nvdev->usemem.bar_size = roundup_pow_of_two(nvdev->usemem.memlength);
++
++	/*
++	 * If the hardware has the fix for MIG, there is no requirement
++	 * for splitting the device memory to create RESMEM. The entire
++	 * device memory is usable and will be USEMEM. Return here for
++	 * such case.
++	 */
++	if (!nvdev->has_mig_hw_bug)
++		goto done;
++
++	/*
++	 * When the device memory is split to workaround the MIG bug on
++	 * Grace Hopper, the USEMEM part of the device memory has to be
++	 * MEMBLK_SIZE aligned. This is a hardwired ABI value between the
++	 * GPU FW and VFIO driver. The VM device driver is also aware of it
++	 * and make use of the value for its calculation to determine USEMEM
++	 * size. Note that the device memory may not be 512M aligned.
+ 	 */
+ 	nvdev->usemem.memlength = round_down(nvdev->usemem.memlength,
+ 					     MEMBLK_SIZE);
+@@ -803,15 +833,34 @@ nvgrace_gpu_init_nvdev_struct(struct pci_dev *pdev,
+ 	}
+ 
+ 	/*
+-	 * The memory regions are exposed as BARs. Calculate and save
+-	 * the BAR size for them.
++	 * The resmem region is exposed as a 64b BAR composed of region 2 and 3
++	 * for Grace Hopper. Calculate and save the BAR size for the region.
+ 	 */
+-	nvdev->usemem.bar_size = roundup_pow_of_two(nvdev->usemem.memlength);
+ 	nvdev->resmem.bar_size = roundup_pow_of_two(nvdev->resmem.memlength);
+ done:
+ 	return ret;
+ }
+ 
++static bool nvgrace_gpu_has_mig_hw_bug(struct pci_dev *pdev)
++{
++	int pcie_dvsec;
++	u16 dvsec_ctrl16;
++
++	pcie_dvsec = pci_find_dvsec_capability(pdev, PCI_VENDOR_ID_NVIDIA,
++					       GPU_CAP_DVSEC_REGISTER);
++
++	if (pcie_dvsec) {
++		pci_read_config_word(pdev,
++				     pcie_dvsec + DVSEC_BITMAP_OFFSET,
++				     &dvsec_ctrl16);
++
++		if (dvsec_ctrl16 & MIG_SUPPORTED_WITH_CACHED_RESMEM)
++			return false;
++	}
++
++	return true;
++}
++
+ static int nvgrace_gpu_probe(struct pci_dev *pdev,
+ 			     const struct pci_device_id *id)
+ {
+@@ -832,6 +881,8 @@ static int nvgrace_gpu_probe(struct pci_dev *pdev,
+ 	dev_set_drvdata(&pdev->dev, &nvdev->core_device);
+ 
+ 	if (ops == &nvgrace_gpu_pci_ops) {
++		nvdev->has_mig_hw_bug = nvgrace_gpu_has_mig_hw_bug(pdev);
++
+ 		/*
+ 		 * Device memory properties are identified in the host ACPI
+ 		 * table. Set the nvgrace_gpu_pci_core_device structure.
+diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
+index 66b72c2892841d..a0595c745732a3 100644
+--- a/drivers/vfio/pci/vfio_pci_rdwr.c
++++ b/drivers/vfio/pci/vfio_pci_rdwr.c
+@@ -16,6 +16,7 @@
+ #include <linux/io.h>
+ #include <linux/vfio.h>
+ #include <linux/vgaarb.h>
++#include <linux/io-64-nonatomic-lo-hi.h>
+ 
+ #include "vfio_pci_priv.h"
+ 
+diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
+index d63c2d266d0735..3bf1043cd7957c 100644
+--- a/drivers/vfio/platform/vfio_platform_common.c
++++ b/drivers/vfio/platform/vfio_platform_common.c
+@@ -393,11 +393,6 @@ static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg,
+ 
+ 	count = min_t(size_t, count, reg->size - off);
+ 
+-	if (off >= reg->size)
+-		return -EINVAL;
+-
+-	count = min_t(size_t, count, reg->size - off);
+-
+ 	if (!reg->ioaddr) {
+ 		reg->ioaddr =
+ 			ioremap(reg->addr, reg->size);
+@@ -482,11 +477,6 @@ static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg,
+ 
+ 	count = min_t(size_t, count, reg->size - off);
+ 
+-	if (off >= reg->size)
+-		return -EINVAL;
+-
+-	count = min_t(size_t, count, reg->size - off);
+-
+ 	if (!reg->ioaddr) {
+ 		reg->ioaddr =
+ 			ioremap(reg->addr, reg->size);
+diff --git a/drivers/video/fbdev/omap/lcd_dma.c b/drivers/video/fbdev/omap/lcd_dma.c
+index f85817635a8c2c..0da23c57e4757e 100644
+--- a/drivers/video/fbdev/omap/lcd_dma.c
++++ b/drivers/video/fbdev/omap/lcd_dma.c
+@@ -432,8 +432,8 @@ static int __init omap_init_lcd_dma(void)
+ 
+ 	spin_lock_init(&lcd_dma.lock);
+ 
+-	r = request_irq(INT_DMA_LCD, lcd_dma_irq_handler, 0,
+-			"LCD DMA", NULL);
++	r = request_threaded_irq(INT_DMA_LCD, NULL, lcd_dma_irq_handler,
++				 IRQF_ONESHOT, "LCD DMA", NULL);
+ 	if (r != 0)
+ 		pr_err("unable to request IRQ for LCD DMA (error %d)\n", r);
+ 
+diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
+index a337edcf8faf71..26c62e0d34e98b 100644
+--- a/drivers/xen/swiotlb-xen.c
++++ b/drivers/xen/swiotlb-xen.c
+@@ -74,19 +74,21 @@ static inline phys_addr_t xen_dma_to_phys(struct device *dev,
+ 	return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
+ }
+ 
++static inline bool range_requires_alignment(phys_addr_t p, size_t size)
++{
++	phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
++	phys_addr_t bus_addr = pfn_to_bfn(XEN_PFN_DOWN(p)) << XEN_PAGE_SHIFT;
++
++	return IS_ALIGNED(p, algn) && !IS_ALIGNED(bus_addr, algn);
++}
++
+ static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
+ {
+ 	unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
+ 	unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
+-	phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
+ 
+ 	next_bfn = pfn_to_bfn(xen_pfn);
+ 
+-	/* If buffer is physically aligned, ensure DMA alignment. */
+-	if (IS_ALIGNED(p, algn) &&
+-	    !IS_ALIGNED((phys_addr_t)next_bfn << XEN_PAGE_SHIFT, algn))
+-		return 1;
+-
+ 	for (i = 1; i < nr_pages; i++)
+ 		if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
+ 			return 1;
+@@ -156,7 +158,8 @@ xen_swiotlb_alloc_coherent(struct device *dev, size_t size,
+ 
+ 	*dma_handle = xen_phys_to_dma(dev, phys);
+ 	if (*dma_handle + size - 1 > dma_mask ||
+-	    range_straddles_page_boundary(phys, size)) {
++	    range_straddles_page_boundary(phys, size) ||
++	    range_requires_alignment(phys, size)) {
+ 		if (xen_create_contiguous_region(phys, order, fls64(dma_mask),
+ 				dma_handle) != 0)
+ 			goto out_free_pages;
+@@ -182,7 +185,8 @@ xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
+ 	size = ALIGN(size, XEN_PAGE_SIZE);
+ 
+ 	if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) ||
+-	    WARN_ON_ONCE(range_straddles_page_boundary(phys, size)))
++	    WARN_ON_ONCE(range_straddles_page_boundary(phys, size) ||
++			 range_requires_alignment(phys, size)))
+ 	    	return;
+ 
+ 	if (TestClearPageXenRemapped(virt_to_page(vaddr)))
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index b923d0cec61c73..d14ecbe24d7754 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -901,12 +901,11 @@ void clear_folio_extent_mapped(struct folio *folio)
+ 	folio_detach_private(folio);
+ }
+ 
+-static struct extent_map *__get_extent_map(struct inode *inode,
+-					   struct folio *folio, u64 start,
+-					   u64 len, struct extent_map **em_cached)
++static struct extent_map *get_extent_map(struct btrfs_inode *inode,
++					 struct folio *folio, u64 start,
++					 u64 len, struct extent_map **em_cached)
+ {
+ 	struct extent_map *em;
+-	struct extent_state *cached_state = NULL;
+ 
+ 	ASSERT(em_cached);
+ 
+@@ -922,14 +921,12 @@ static struct extent_map *__get_extent_map(struct inode *inode,
+ 		*em_cached = NULL;
+ 	}
+ 
+-	btrfs_lock_and_flush_ordered_range(BTRFS_I(inode), start, start + len - 1, &cached_state);
+-	em = btrfs_get_extent(BTRFS_I(inode), folio, start, len);
++	em = btrfs_get_extent(inode, folio, start, len);
+ 	if (!IS_ERR(em)) {
+ 		BUG_ON(*em_cached);
+ 		refcount_inc(&em->refs);
+ 		*em_cached = em;
+ 	}
+-	unlock_extent(&BTRFS_I(inode)->io_tree, start, start + len - 1, &cached_state);
+ 
+ 	return em;
+ }
+@@ -985,8 +982,7 @@ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
+ 			end_folio_read(folio, true, cur, iosize);
+ 			break;
+ 		}
+-		em = __get_extent_map(inode, folio, cur, end - cur + 1,
+-				      em_cached);
++		em = get_extent_map(BTRFS_I(inode), folio, cur, end - cur + 1, em_cached);
+ 		if (IS_ERR(em)) {
+ 			end_folio_read(folio, false, cur, end + 1 - cur);
+ 			return PTR_ERR(em);
+@@ -1087,11 +1083,18 @@ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
+ 
+ int btrfs_read_folio(struct file *file, struct folio *folio)
+ {
++	struct btrfs_inode *inode = folio_to_inode(folio);
++	const u64 start = folio_pos(folio);
++	const u64 end = start + folio_size(folio) - 1;
++	struct extent_state *cached_state = NULL;
+ 	struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
+ 	struct extent_map *em_cached = NULL;
+ 	int ret;
+ 
++	btrfs_lock_and_flush_ordered_range(inode, start, end, &cached_state);
+ 	ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl, NULL);
++	unlock_extent(&inode->io_tree, start, end, &cached_state);
++
+ 	free_extent_map(em_cached);
+ 
+ 	/*
+@@ -2332,12 +2335,20 @@ void btrfs_readahead(struct readahead_control *rac)
+ {
+ 	struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD };
+ 	struct folio *folio;
++	struct btrfs_inode *inode = BTRFS_I(rac->mapping->host);
++	const u64 start = readahead_pos(rac);
++	const u64 end = start + readahead_length(rac) - 1;
++	struct extent_state *cached_state = NULL;
+ 	struct extent_map *em_cached = NULL;
+ 	u64 prev_em_start = (u64)-1;
+ 
++	btrfs_lock_and_flush_ordered_range(inode, start, end, &cached_state);
++
+ 	while ((folio = readahead_folio(rac)) != NULL)
+ 		btrfs_do_readpage(folio, &em_cached, &bio_ctrl, &prev_em_start);
+ 
++	unlock_extent(&inode->io_tree, start, end, &cached_state);
++
+ 	if (em_cached)
+ 		free_extent_map(em_cached);
+ 	submit_one_bio(&bio_ctrl);
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 4d7c7a296d2d1f..6542ee00bf3979 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -1076,7 +1076,6 @@ int btrfs_write_check(struct kiocb *iocb, size_t count)
+ 	loff_t pos = iocb->ki_pos;
+ 	int ret;
+ 	loff_t oldsize;
+-	loff_t start_pos;
+ 
+ 	/*
+ 	 * Quickly bail out on NOWAIT writes if we don't have the nodatacow or
+@@ -1103,9 +1102,8 @@ int btrfs_write_check(struct kiocb *iocb, size_t count)
+ 		inode_inc_iversion(inode);
+ 	}
+ 
+-	start_pos = round_down(pos, fs_info->sectorsize);
+ 	oldsize = i_size_read(inode);
+-	if (start_pos > oldsize) {
++	if (pos > oldsize) {
+ 		/* Expand hole size to cover write data, preventing empty gap */
+ 		loff_t end_pos = round_up(pos + count, fs_info->sectorsize);
+ 
+diff --git a/fs/nfs/sysfs.c b/fs/nfs/sysfs.c
+index bf378ecd5d9fdd..7b59a40d40c061 100644
+--- a/fs/nfs/sysfs.c
++++ b/fs/nfs/sysfs.c
+@@ -280,9 +280,9 @@ void nfs_sysfs_link_rpc_client(struct nfs_server *server,
+ 	char name[RPC_CLIENT_NAME_SIZE];
+ 	int ret;
+ 
+-	strcpy(name, clnt->cl_program->name);
+-	strcat(name, uniq ? uniq : "");
+-	strcat(name, "_client");
++	strscpy(name, clnt->cl_program->name, sizeof(name));
++	strncat(name, uniq ? uniq : "", sizeof(name) - strlen(name) - 1);
++	strncat(name, "_client", sizeof(name) - strlen(name) - 1);
+ 
+ 	ret = sysfs_create_link_nowarn(&server->kobj,
+ 						&clnt->cl_sysfs->kobject, name);
+diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
+index a1cdba42c4fad4..78f4b5573b909f 100644
+--- a/fs/nfsd/filecache.c
++++ b/fs/nfsd/filecache.c
+@@ -445,11 +445,20 @@ nfsd_file_dispose_list_delayed(struct list_head *dispose)
+ 						struct nfsd_file, nf_gc);
+ 		struct nfsd_net *nn = net_generic(nf->nf_net, nfsd_net_id);
+ 		struct nfsd_fcache_disposal *l = nn->fcache_disposal;
++		struct svc_serv *serv;
+ 
+ 		spin_lock(&l->lock);
+ 		list_move_tail(&nf->nf_gc, &l->freeme);
+ 		spin_unlock(&l->lock);
+-		svc_wake_up(nn->nfsd_serv);
++
++		/*
++		 * The filecache laundrette is shut down after the
++		 * nn->nfsd_serv pointer is cleared, but before the
++		 * svc_serv is freed.
++		 */
++		serv = nn->nfsd_serv;
++		if (serv)
++			svc_wake_up(serv);
+ 	}
+ }
+ 
+diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
+index 4e3be7201b1c43..5fb202acb0fd00 100644
+--- a/fs/nfsd/nfs2acl.c
++++ b/fs/nfsd/nfs2acl.c
+@@ -84,6 +84,8 @@ static __be32 nfsacld_proc_getacl(struct svc_rqst *rqstp)
+ fail:
+ 	posix_acl_release(resp->acl_access);
+ 	posix_acl_release(resp->acl_default);
++	resp->acl_access = NULL;
++	resp->acl_default = NULL;
+ 	goto out;
+ }
+ 
+diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
+index 5e34e98db969db..7b5433bd301974 100644
+--- a/fs/nfsd/nfs3acl.c
++++ b/fs/nfsd/nfs3acl.c
+@@ -76,6 +76,8 @@ static __be32 nfsd3_proc_getacl(struct svc_rqst *rqstp)
+ fail:
+ 	posix_acl_release(resp->acl_access);
+ 	posix_acl_release(resp->acl_default);
++	resp->acl_access = NULL;
++	resp->acl_default = NULL;
+ 	goto out;
+ }
+ 
+diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
+index c083e539e898ba..b7b70ab962f880 100644
+--- a/fs/nfsd/nfs4callback.c
++++ b/fs/nfsd/nfs4callback.c
+@@ -1547,8 +1547,11 @@ nfsd4_run_cb_work(struct work_struct *work)
+ 		nfsd4_process_cb_update(cb);
+ 
+ 	clnt = clp->cl_cb_client;
+-	if (!clnt) {
+-		/* Callback channel broken, or client killed; give up: */
++	if (!clnt || clp->cl_state == NFSD4_COURTESY) {
++		/*
++		 * Callback channel broken, client killed or
++		 * nfs4_client in courtesy state; give up.
++		 */
+ 		nfsd41_destroy_cb(cb);
+ 		return;
+ 	}
+diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
+index 8d789b017fa9b6..af94e3737470d8 100644
+--- a/fs/ntfs3/attrib.c
++++ b/fs/ntfs3/attrib.c
+@@ -787,7 +787,8 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
+ 		if (err)
+ 			goto out;
+ 
+-		attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
++		attr = mi_find_attr(ni, mi, NULL, type, name, name_len,
++				    &le->id);
+ 		if (!attr) {
+ 			err = -EINVAL;
+ 			goto bad_inode;
+@@ -1181,7 +1182,7 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
+ 			goto out;
+ 		}
+ 
+-		attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, &le->id);
++		attr = mi_find_attr(ni, mi, NULL, ATTR_DATA, NULL, 0, &le->id);
+ 		if (!attr) {
+ 			err = -EINVAL;
+ 			goto out;
+@@ -1406,7 +1407,7 @@ int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
+ 	 */
+ 	if (!attr->non_res) {
+ 		if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
+-			ntfs_inode_err(&ni->vfs_inode, "is corrupted");
++			_ntfs_bad_inode(&ni->vfs_inode);
+ 			return -EINVAL;
+ 		}
+ 		addr = resident_data(attr);
+@@ -1796,7 +1797,7 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
+ 				goto out;
+ 			}
+ 
+-			attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
++			attr = mi_find_attr(ni, mi, NULL, ATTR_DATA, NULL, 0,
+ 					    &le->id);
+ 			if (!attr) {
+ 				err = -EINVAL;
+@@ -2041,8 +2042,8 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
+ 				}
+ 
+ 				/* Look for required attribute. */
+-				attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL,
+-						    0, &le->id);
++				attr = mi_find_attr(ni, mi, NULL, ATTR_DATA,
++						    NULL, 0, &le->id);
+ 				if (!attr) {
+ 					err = -EINVAL;
+ 					goto out;
+@@ -2587,7 +2588,7 @@ int attr_force_nonresident(struct ntfs_inode *ni)
+ 
+ 	attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL, &mi);
+ 	if (!attr) {
+-		ntfs_bad_inode(&ni->vfs_inode, "no data attribute");
++		_ntfs_bad_inode(&ni->vfs_inode);
+ 		return -ENOENT;
+ 	}
+ 
+diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
+index fc6a8aa29e3afe..b6da80c69ca634 100644
+--- a/fs/ntfs3/dir.c
++++ b/fs/ntfs3/dir.c
+@@ -512,7 +512,7 @@ static int ntfs_readdir(struct file *file, struct dir_context *ctx)
+ 		ctx->pos = pos;
+ 	} else if (err < 0) {
+ 		if (err == -EINVAL)
+-			ntfs_inode_err(dir, "directory corrupted");
++			_ntfs_bad_inode(dir);
+ 		ctx->pos = eod;
+ 	}
+ 
+diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
+index 8b39d0ce5f2890..f66186dbeda9db 100644
+--- a/fs/ntfs3/frecord.c
++++ b/fs/ntfs3/frecord.c
+@@ -75,7 +75,7 @@ struct ATTR_STD_INFO *ni_std(struct ntfs_inode *ni)
+ {
+ 	const struct ATTRIB *attr;
+ 
+-	attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
++	attr = mi_find_attr(ni, &ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
+ 	return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO)) :
+ 		      NULL;
+ }
+@@ -89,7 +89,7 @@ struct ATTR_STD_INFO5 *ni_std5(struct ntfs_inode *ni)
+ {
+ 	const struct ATTRIB *attr;
+ 
+-	attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
++	attr = mi_find_attr(ni, &ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
+ 
+ 	return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO5)) :
+ 		      NULL;
+@@ -148,8 +148,10 @@ int ni_load_mi_ex(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi)
+ 		goto out;
+ 
+ 	err = mi_get(ni->mi.sbi, rno, &r);
+-	if (err)
++	if (err) {
++		_ntfs_bad_inode(&ni->vfs_inode);
+ 		return err;
++	}
+ 
+ 	ni_add_mi(ni, r);
+ 
+@@ -201,7 +203,8 @@ struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr,
+ 			*mi = &ni->mi;
+ 
+ 		/* Look for required attribute in primary record. */
+-		return mi_find_attr(&ni->mi, attr, type, name, name_len, NULL);
++		return mi_find_attr(ni, &ni->mi, attr, type, name, name_len,
++				    NULL);
+ 	}
+ 
+ 	/* First look for list entry of required type. */
+@@ -217,7 +220,7 @@ struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr,
+ 		return NULL;
+ 
+ 	/* Look for required attribute. */
+-	attr = mi_find_attr(m, NULL, type, name, name_len, &le->id);
++	attr = mi_find_attr(ni, m, NULL, type, name, name_len, &le->id);
+ 
+ 	if (!attr)
+ 		goto out;
+@@ -238,8 +241,7 @@ struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr,
+ 	return attr;
+ 
+ out:
+-	ntfs_inode_err(&ni->vfs_inode, "failed to parse mft record");
+-	ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
++	_ntfs_bad_inode(&ni->vfs_inode);
+ 	return NULL;
+ }
+ 
+@@ -259,7 +261,7 @@ struct ATTRIB *ni_enum_attr_ex(struct ntfs_inode *ni, struct ATTRIB *attr,
+ 		if (mi)
+ 			*mi = &ni->mi;
+ 		/* Enum attributes in primary record. */
+-		return mi_enum_attr(&ni->mi, attr);
++		return mi_enum_attr(ni, &ni->mi, attr);
+ 	}
+ 
+ 	/* Get next list entry. */
+@@ -275,7 +277,7 @@ struct ATTRIB *ni_enum_attr_ex(struct ntfs_inode *ni, struct ATTRIB *attr,
+ 		*mi = mi2;
+ 
+ 	/* Find attribute in loaded record. */
+-	return rec_find_attr_le(mi2, le2);
++	return rec_find_attr_le(ni, mi2, le2);
+ }
+ 
+ /*
+@@ -293,7 +295,8 @@ struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
+ 	if (!ni->attr_list.size) {
+ 		if (pmi)
+ 			*pmi = &ni->mi;
+-		return mi_find_attr(&ni->mi, NULL, type, name, name_len, NULL);
++		return mi_find_attr(ni, &ni->mi, NULL, type, name, name_len,
++				    NULL);
+ 	}
+ 
+ 	le = al_find_ex(ni, NULL, type, name, name_len, NULL);
+@@ -319,7 +322,7 @@ struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
+ 	if (pmi)
+ 		*pmi = mi;
+ 
+-	attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
++	attr = mi_find_attr(ni, mi, NULL, type, name, name_len, &le->id);
+ 	if (!attr)
+ 		return NULL;
+ 
+@@ -330,6 +333,7 @@ struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
+ 	    vcn <= le64_to_cpu(attr->nres.evcn))
+ 		return attr;
+ 
++	_ntfs_bad_inode(&ni->vfs_inode);
+ 	return NULL;
+ }
+ 
+@@ -398,7 +402,8 @@ int ni_remove_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
+ 	int diff;
+ 
+ 	if (base_only || type == ATTR_LIST || !ni->attr_list.size) {
+-		attr = mi_find_attr(&ni->mi, NULL, type, name, name_len, id);
++		attr = mi_find_attr(ni, &ni->mi, NULL, type, name, name_len,
++				    id);
+ 		if (!attr)
+ 			return -ENOENT;
+ 
+@@ -437,7 +442,7 @@ int ni_remove_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
+ 
+ 		al_remove_le(ni, le);
+ 
+-		attr = mi_find_attr(mi, NULL, type, name, name_len, id);
++		attr = mi_find_attr(ni, mi, NULL, type, name, name_len, id);
+ 		if (!attr)
+ 			return -ENOENT;
+ 
+@@ -485,7 +490,7 @@ ni_ins_new_attr(struct ntfs_inode *ni, struct mft_inode *mi,
+ 		name = le->name;
+ 	}
+ 
+-	attr = mi_insert_attr(mi, type, name, name_len, asize, name_off);
++	attr = mi_insert_attr(ni, mi, type, name, name_len, asize, name_off);
+ 	if (!attr) {
+ 		if (le_added)
+ 			al_remove_le(ni, le);
+@@ -673,7 +678,7 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
+ 	if (err)
+ 		return err;
+ 
+-	attr_list = mi_find_attr(&ni->mi, NULL, ATTR_LIST, NULL, 0, NULL);
++	attr_list = mi_find_attr(ni, &ni->mi, NULL, ATTR_LIST, NULL, 0, NULL);
+ 	if (!attr_list)
+ 		return 0;
+ 
+@@ -695,7 +700,7 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
+ 		if (!mi)
+ 			return 0;
+ 
+-		attr = mi_find_attr(mi, NULL, le->type, le_name(le),
++		attr = mi_find_attr(ni, mi, NULL, le->type, le_name(le),
+ 				    le->name_len, &le->id);
+ 		if (!attr)
+ 			return 0;
+@@ -731,7 +736,7 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
+ 			goto out;
+ 		}
+ 
+-		attr = mi_find_attr(mi, NULL, le->type, le_name(le),
++		attr = mi_find_attr(ni, mi, NULL, le->type, le_name(le),
+ 				    le->name_len, &le->id);
+ 		if (!attr) {
+ 			/* Should never happened, 'cause already checked. */
+@@ -740,7 +745,7 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
+ 		asize = le32_to_cpu(attr->size);
+ 
+ 		/* Insert into primary record. */
+-		attr_ins = mi_insert_attr(&ni->mi, le->type, le_name(le),
++		attr_ins = mi_insert_attr(ni, &ni->mi, le->type, le_name(le),
+ 					  le->name_len, asize,
+ 					  le16_to_cpu(attr->name_off));
+ 		if (!attr_ins) {
+@@ -768,7 +773,7 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
+ 		if (!mi)
+ 			continue;
+ 
+-		attr = mi_find_attr(mi, NULL, le->type, le_name(le),
++		attr = mi_find_attr(ni, mi, NULL, le->type, le_name(le),
+ 				    le->name_len, &le->id);
+ 		if (!attr)
+ 			continue;
+@@ -831,7 +836,7 @@ int ni_create_attr_list(struct ntfs_inode *ni)
+ 	free_b = 0;
+ 	attr = NULL;
+ 
+-	for (; (attr = mi_enum_attr(&ni->mi, attr)); le = Add2Ptr(le, sz)) {
++	for (; (attr = mi_enum_attr(ni, &ni->mi, attr)); le = Add2Ptr(le, sz)) {
+ 		sz = le_size(attr->name_len);
+ 		le->type = attr->type;
+ 		le->size = cpu_to_le16(sz);
+@@ -886,7 +891,7 @@ int ni_create_attr_list(struct ntfs_inode *ni)
+ 		u32 asize = le32_to_cpu(b->size);
+ 		u16 name_off = le16_to_cpu(b->name_off);
+ 
+-		attr = mi_insert_attr(mi, b->type, Add2Ptr(b, name_off),
++		attr = mi_insert_attr(ni, mi, b->type, Add2Ptr(b, name_off),
+ 				      b->name_len, asize, name_off);
+ 		if (!attr)
+ 			goto out;
+@@ -909,7 +914,7 @@ int ni_create_attr_list(struct ntfs_inode *ni)
+ 			goto out;
+ 	}
+ 
+-	attr = mi_insert_attr(&ni->mi, ATTR_LIST, NULL, 0,
++	attr = mi_insert_attr(ni, &ni->mi, ATTR_LIST, NULL, 0,
+ 			      lsize + SIZEOF_RESIDENT, SIZEOF_RESIDENT);
+ 	if (!attr)
+ 		goto out;
+@@ -993,13 +998,13 @@ static int ni_ins_attr_ext(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le,
+ 		mi = rb_entry(node, struct mft_inode, node);
+ 
+ 		if (is_mft_data &&
+-		    (mi_enum_attr(mi, NULL) ||
++		    (mi_enum_attr(ni, mi, NULL) ||
+ 		     vbo <= ((u64)mi->rno << sbi->record_bits))) {
+ 			/* We can't accept this record 'cause MFT's bootstrapping. */
+ 			continue;
+ 		}
+ 		if (is_mft &&
+-		    mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, NULL)) {
++		    mi_find_attr(ni, mi, NULL, ATTR_DATA, NULL, 0, NULL)) {
+ 			/*
+ 			 * This child record already has a ATTR_DATA.
+ 			 * So it can't accept any other records.
+@@ -1008,7 +1013,7 @@ static int ni_ins_attr_ext(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le,
+ 		}
+ 
+ 		if ((type != ATTR_NAME || name_len) &&
+-		    mi_find_attr(mi, NULL, type, name, name_len, NULL)) {
++		    mi_find_attr(ni, mi, NULL, type, name, name_len, NULL)) {
+ 			/* Only indexed attributes can share same record. */
+ 			continue;
+ 		}
+@@ -1157,7 +1162,7 @@ static int ni_insert_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
+ 	/* Estimate the result of moving all possible attributes away. */
+ 	attr = NULL;
+ 
+-	while ((attr = mi_enum_attr(&ni->mi, attr))) {
++	while ((attr = mi_enum_attr(ni, &ni->mi, attr))) {
+ 		if (attr->type == ATTR_STD)
+ 			continue;
+ 		if (attr->type == ATTR_LIST)
+@@ -1175,7 +1180,7 @@ static int ni_insert_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
+ 	attr = NULL;
+ 
+ 	for (;;) {
+-		attr = mi_enum_attr(&ni->mi, attr);
++		attr = mi_enum_attr(ni, &ni->mi, attr);
+ 		if (!attr) {
+ 			/* We should never be here 'cause we have already check this case. */
+ 			err = -EINVAL;
+@@ -1259,7 +1264,7 @@ static int ni_expand_mft_list(struct ntfs_inode *ni)
+ 	for (node = rb_first(&ni->mi_tree); node; node = rb_next(node)) {
+ 		mi = rb_entry(node, struct mft_inode, node);
+ 
+-		attr = mi_enum_attr(mi, NULL);
++		attr = mi_enum_attr(ni, mi, NULL);
+ 
+ 		if (!attr) {
+ 			mft_min = mi->rno;
+@@ -1280,7 +1285,7 @@ static int ni_expand_mft_list(struct ntfs_inode *ni)
+ 		ni_remove_mi(ni, mi_new);
+ 	}
+ 
+-	attr = mi_find_attr(&ni->mi, NULL, ATTR_DATA, NULL, 0, NULL);
++	attr = mi_find_attr(ni, &ni->mi, NULL, ATTR_DATA, NULL, 0, NULL);
+ 	if (!attr) {
+ 		err = -EINVAL;
+ 		goto out;
+@@ -1397,7 +1402,7 @@ int ni_expand_list(struct ntfs_inode *ni)
+ 			continue;
+ 
+ 		/* Find attribute in primary record. */
+-		attr = rec_find_attr_le(&ni->mi, le);
++		attr = rec_find_attr_le(ni, &ni->mi, le);
+ 		if (!attr) {
+ 			err = -EINVAL;
+ 			goto out;
+@@ -1604,8 +1609,8 @@ int ni_delete_all(struct ntfs_inode *ni)
+ 		roff = le16_to_cpu(attr->nres.run_off);
+ 
+ 		if (roff > asize) {
+-			_ntfs_bad_inode(&ni->vfs_inode);
+-			return -EINVAL;
++			/* ni_enum_attr_ex checks this case. */
++			continue;
+ 		}
+ 
+ 		/* run==1 means unpack and deallocate. */
+@@ -3343,7 +3348,7 @@ int ni_write_inode(struct inode *inode, int sync, const char *hint)
+ 		if (!mi->dirty)
+ 			continue;
+ 
+-		is_empty = !mi_enum_attr(mi, NULL);
++		is_empty = !mi_enum_attr(ni, mi, NULL);
+ 
+ 		if (is_empty)
+ 			clear_rec_inuse(mi->mrec);
+diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
+index 03471bc9371cd1..938d351ebac721 100644
+--- a/fs/ntfs3/fsntfs.c
++++ b/fs/ntfs3/fsntfs.c
+@@ -908,7 +908,11 @@ void ntfs_bad_inode(struct inode *inode, const char *hint)
+ 
+ 	ntfs_inode_err(inode, "%s", hint);
+ 	make_bad_inode(inode);
+-	ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
++	/* Avoid recursion if bad inode is $Volume. */
++	if (inode->i_ino != MFT_REC_VOL &&
++	    !(sbi->flags & NTFS_FLAGS_LOG_REPLAYING)) {
++		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
++	}
+ }
+ 
+ /*
+diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
+index 9089c58a005ce1..7eb9fae22f8da6 100644
+--- a/fs/ntfs3/index.c
++++ b/fs/ntfs3/index.c
+@@ -1094,8 +1094,7 @@ int indx_read(struct ntfs_index *indx, struct ntfs_inode *ni, CLST vbn,
+ 
+ ok:
+ 	if (!index_buf_check(ib, bytes, &vbn)) {
+-		ntfs_inode_err(&ni->vfs_inode, "directory corrupted");
+-		ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
++		_ntfs_bad_inode(&ni->vfs_inode);
+ 		err = -EINVAL;
+ 		goto out;
+ 	}
+@@ -1117,8 +1116,7 @@ int indx_read(struct ntfs_index *indx, struct ntfs_inode *ni, CLST vbn,
+ 
+ out:
+ 	if (err == -E_NTFS_CORRUPT) {
+-		ntfs_inode_err(&ni->vfs_inode, "directory corrupted");
+-		ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
++		_ntfs_bad_inode(&ni->vfs_inode);
+ 		err = -EINVAL;
+ 	}
+ 
+diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
+index be04d2845bb7bc..a1e11228dafd02 100644
+--- a/fs/ntfs3/inode.c
++++ b/fs/ntfs3/inode.c
+@@ -410,6 +410,9 @@ static struct inode *ntfs_read_mft(struct inode *inode,
+ 	if (!std5)
+ 		goto out;
+ 
++	if (is_bad_inode(inode))
++		goto out;
++
+ 	if (!is_match && name) {
+ 		err = -ENOENT;
+ 		goto out;
+diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
+index cd8e8374bb5a0a..382820464dee73 100644
+--- a/fs/ntfs3/ntfs_fs.h
++++ b/fs/ntfs3/ntfs_fs.h
+@@ -745,23 +745,24 @@ int mi_get(struct ntfs_sb_info *sbi, CLST rno, struct mft_inode **mi);
+ void mi_put(struct mft_inode *mi);
+ int mi_init(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno);
+ int mi_read(struct mft_inode *mi, bool is_mft);
+-struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr);
+-// TODO: id?
+-struct ATTRIB *mi_find_attr(struct mft_inode *mi, struct ATTRIB *attr,
+-			    enum ATTR_TYPE type, const __le16 *name,
+-			    u8 name_len, const __le16 *id);
+-static inline struct ATTRIB *rec_find_attr_le(struct mft_inode *rec,
++struct ATTRIB *mi_enum_attr(struct ntfs_inode *ni, struct mft_inode *mi,
++			    struct ATTRIB *attr);
++struct ATTRIB *mi_find_attr(struct ntfs_inode *ni, struct mft_inode *mi,
++			    struct ATTRIB *attr, enum ATTR_TYPE type,
++			    const __le16 *name, u8 name_len, const __le16 *id);
++static inline struct ATTRIB *rec_find_attr_le(struct ntfs_inode *ni,
++					      struct mft_inode *rec,
+ 					      struct ATTR_LIST_ENTRY *le)
+ {
+-	return mi_find_attr(rec, NULL, le->type, le_name(le), le->name_len,
++	return mi_find_attr(ni, rec, NULL, le->type, le_name(le), le->name_len,
+ 			    &le->id);
+ }
+ int mi_write(struct mft_inode *mi, int wait);
+ int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
+ 		  __le16 flags, bool is_mft);
+-struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
+-			      const __le16 *name, u8 name_len, u32 asize,
+-			      u16 name_off);
++struct ATTRIB *mi_insert_attr(struct ntfs_inode *ni, struct mft_inode *mi,
++			      enum ATTR_TYPE type, const __le16 *name,
++			      u8 name_len, u32 asize, u16 name_off);
+ 
+ bool mi_remove_attr(struct ntfs_inode *ni, struct mft_inode *mi,
+ 		    struct ATTRIB *attr);
+diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
+index 61d53d39f3b9f7..714c7ecedca830 100644
+--- a/fs/ntfs3/record.c
++++ b/fs/ntfs3/record.c
+@@ -31,7 +31,7 @@ static inline int compare_attr(const struct ATTRIB *left, enum ATTR_TYPE type,
+  *
+  * Return: Unused attribute id that is less than mrec->next_attr_id.
+  */
+-static __le16 mi_new_attt_id(struct mft_inode *mi)
++static __le16 mi_new_attt_id(struct ntfs_inode *ni, struct mft_inode *mi)
+ {
+ 	u16 free_id, max_id, t16;
+ 	struct MFT_REC *rec = mi->mrec;
+@@ -52,7 +52,7 @@ static __le16 mi_new_attt_id(struct mft_inode *mi)
+ 	attr = NULL;
+ 
+ 	for (;;) {
+-		attr = mi_enum_attr(mi, attr);
++		attr = mi_enum_attr(ni, mi, attr);
+ 		if (!attr) {
+ 			rec->next_attr_id = cpu_to_le16(max_id + 1);
+ 			mi->dirty = true;
+@@ -195,7 +195,8 @@ int mi_read(struct mft_inode *mi, bool is_mft)
+  * NOTE: mi->mrec - memory of size sbi->record_size
+  * here we sure that mi->mrec->total == sbi->record_size (see mi_read)
+  */
+-struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
++struct ATTRIB *mi_enum_attr(struct ntfs_inode *ni, struct mft_inode *mi,
++			    struct ATTRIB *attr)
+ {
+ 	const struct MFT_REC *rec = mi->mrec;
+ 	u32 used = le32_to_cpu(rec->used);
+@@ -209,11 +210,11 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
+ 		off = le16_to_cpu(rec->attr_off);
+ 
+ 		if (used > total)
+-			return NULL;
++			goto out;
+ 
+ 		if (off >= used || off < MFTRECORD_FIXUP_OFFSET_1 ||
+ 		    !IS_ALIGNED(off, 8)) {
+-			return NULL;
++			goto out;
+ 		}
+ 
+ 		/* Skip non-resident records. */
+@@ -243,7 +244,7 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
+ 	 */
+ 	if (off + 8 > used) {
+ 		static_assert(ALIGN(sizeof(enum ATTR_TYPE), 8) == 8);
+-		return NULL;
++		goto out;
+ 	}
+ 
+ 	if (attr->type == ATTR_END) {
+@@ -254,112 +255,116 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
+ 	/* 0x100 is last known attribute for now. */
+ 	t32 = le32_to_cpu(attr->type);
+ 	if (!t32 || (t32 & 0xf) || (t32 > 0x100))
+-		return NULL;
++		goto out;
+ 
+ 	/* attributes in record must be ordered by type */
+ 	if (t32 < prev_type)
+-		return NULL;
++		goto out;
+ 
+ 	asize = le32_to_cpu(attr->size);
+ 
+ 	if (!IS_ALIGNED(asize, 8))
+-		return NULL;
++		goto out;
+ 
+ 	/* Check overflow and boundary. */
+ 	if (off + asize < off || off + asize > used)
+-		return NULL;
++		goto out;
+ 
+ 	/* Can we use the field attr->non_res. */
+ 	if (off + 9 > used)
+-		return NULL;
++		goto out;
+ 
+ 	/* Check size of attribute. */
+ 	if (!attr->non_res) {
+ 		/* Check resident fields. */
+ 		if (asize < SIZEOF_RESIDENT)
+-			return NULL;
++			goto out;
+ 
+ 		t16 = le16_to_cpu(attr->res.data_off);
+ 		if (t16 > asize)
+-			return NULL;
++			goto out;
+ 
+ 		if (le32_to_cpu(attr->res.data_size) > asize - t16)
+-			return NULL;
++			goto out;
+ 
+ 		t32 = sizeof(short) * attr->name_len;
+ 		if (t32 && le16_to_cpu(attr->name_off) + t32 > t16)
+-			return NULL;
++			goto out;
+ 
+ 		return attr;
+ 	}
+ 
+ 	/* Check nonresident fields. */
+ 	if (attr->non_res != 1)
+-		return NULL;
++		goto out;
+ 
+ 	/* Can we use memory including attr->nres.valid_size? */
+ 	if (asize < SIZEOF_NONRESIDENT)
+-		return NULL;
++		goto out;
+ 
+ 	t16 = le16_to_cpu(attr->nres.run_off);
+ 	if (t16 > asize)
+-		return NULL;
++		goto out;
+ 
+ 	t32 = sizeof(short) * attr->name_len;
+ 	if (t32 && le16_to_cpu(attr->name_off) + t32 > t16)
+-		return NULL;
++		goto out;
+ 
+ 	/* Check start/end vcn. */
+ 	if (le64_to_cpu(attr->nres.svcn) > le64_to_cpu(attr->nres.evcn) + 1)
+-		return NULL;
++		goto out;
+ 
+ 	data_size = le64_to_cpu(attr->nres.data_size);
+ 	if (le64_to_cpu(attr->nres.valid_size) > data_size)
+-		return NULL;
++		goto out;
+ 
+ 	alloc_size = le64_to_cpu(attr->nres.alloc_size);
+ 	if (data_size > alloc_size)
+-		return NULL;
++		goto out;
+ 
+ 	t32 = mi->sbi->cluster_mask;
+ 	if (alloc_size & t32)
+-		return NULL;
++		goto out;
+ 
+ 	if (!attr->nres.svcn && is_attr_ext(attr)) {
+ 		/* First segment of sparse/compressed attribute */
+ 		/* Can we use memory including attr->nres.total_size? */
+ 		if (asize < SIZEOF_NONRESIDENT_EX)
+-			return NULL;
++			goto out;
+ 
+ 		tot_size = le64_to_cpu(attr->nres.total_size);
+ 		if (tot_size & t32)
+-			return NULL;
++			goto out;
+ 
+ 		if (tot_size > alloc_size)
+-			return NULL;
++			goto out;
+ 	} else {
+ 		if (attr->nres.c_unit)
+-			return NULL;
++			goto out;
+ 
+ 		if (alloc_size > mi->sbi->volume.size)
+-			return NULL;
++			goto out;
+ 	}
+ 
+ 	return attr;
++
++out:
++	_ntfs_bad_inode(&ni->vfs_inode);
++	return NULL;
+ }
+ 
+ /*
+  * mi_find_attr - Find the attribute by type and name and id.
+  */
+-struct ATTRIB *mi_find_attr(struct mft_inode *mi, struct ATTRIB *attr,
+-			    enum ATTR_TYPE type, const __le16 *name,
+-			    u8 name_len, const __le16 *id)
++struct ATTRIB *mi_find_attr(struct ntfs_inode *ni, struct mft_inode *mi,
++			    struct ATTRIB *attr, enum ATTR_TYPE type,
++			    const __le16 *name, u8 name_len, const __le16 *id)
+ {
+ 	u32 type_in = le32_to_cpu(type);
+ 	u32 atype;
+ 
+ next_attr:
+-	attr = mi_enum_attr(mi, attr);
++	attr = mi_enum_attr(ni, mi, attr);
+ 	if (!attr)
+ 		return NULL;
+ 
+@@ -467,9 +472,9 @@ int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
+  *
+  * Return: Not full constructed attribute or NULL if not possible to create.
+  */
+-struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
+-			      const __le16 *name, u8 name_len, u32 asize,
+-			      u16 name_off)
++struct ATTRIB *mi_insert_attr(struct ntfs_inode *ni, struct mft_inode *mi,
++			      enum ATTR_TYPE type, const __le16 *name,
++			      u8 name_len, u32 asize, u16 name_off)
+ {
+ 	size_t tail;
+ 	struct ATTRIB *attr;
+@@ -488,7 +493,7 @@ struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
+ 	 * at which we should insert it.
+ 	 */
+ 	attr = NULL;
+-	while ((attr = mi_enum_attr(mi, attr))) {
++	while ((attr = mi_enum_attr(ni, mi, attr))) {
+ 		int diff = compare_attr(attr, type, name, name_len, upcase);
+ 
+ 		if (diff < 0)
+@@ -508,7 +513,7 @@ struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
+ 		tail = used - PtrOffset(rec, attr);
+ 	}
+ 
+-	id = mi_new_attt_id(mi);
++	id = mi_new_attt_id(ni, mi);
+ 
+ 	memmove(Add2Ptr(attr, asize), attr, tail);
+ 	memset(attr, 0, asize);
+diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
+index 1b508f5433846e..fa41db08848802 100644
+--- a/fs/orangefs/orangefs-debugfs.c
++++ b/fs/orangefs/orangefs-debugfs.c
+@@ -393,9 +393,9 @@ static ssize_t orangefs_debug_write(struct file *file,
+ 	 * Thwart users who try to jamb a ridiculous number
+ 	 * of bytes into the debug file...
+ 	 */
+-	if (count > ORANGEFS_MAX_DEBUG_STRING_LEN + 1) {
++	if (count > ORANGEFS_MAX_DEBUG_STRING_LEN) {
+ 		silly = count;
+-		count = ORANGEFS_MAX_DEBUG_STRING_LEN + 1;
++		count = ORANGEFS_MAX_DEBUG_STRING_LEN;
+ 	}
+ 
+ 	buf = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL);
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index cf53503e001e14..1e6085f2f78ee6 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -1483,7 +1483,6 @@ struct cifs_io_parms {
+ struct cifs_io_request {
+ 	struct netfs_io_request		rreq;
+ 	struct cifsFileInfo		*cfile;
+-	struct TCP_Server_Info		*server;
+ 	pid_t				pid;
+ };
+ 
+diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
+index 3b2d33291a7e64..a34de8ed5ed180 100644
+--- a/fs/smb/client/file.c
++++ b/fs/smb/client/file.c
+@@ -147,7 +147,7 @@ static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
+ 	struct netfs_io_request *rreq = subreq->rreq;
+ 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
+ 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
+-	struct TCP_Server_Info *server = req->server;
++	struct TCP_Server_Info *server;
+ 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
+ 	size_t size;
+ 	int rc = 0;
+@@ -156,6 +156,8 @@ static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
+ 		rdata->xid = get_xid();
+ 		rdata->have_xid = true;
+ 	}
++
++	server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
+ 	rdata->server = server;
+ 
+ 	if (cifs_sb->ctx->rsize == 0)
+@@ -198,7 +200,7 @@ static void cifs_issue_read(struct netfs_io_subrequest *subreq)
+ 	struct netfs_io_request *rreq = subreq->rreq;
+ 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
+ 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
+-	struct TCP_Server_Info *server = req->server;
++	struct TCP_Server_Info *server = rdata->server;
+ 	int rc = 0;
+ 
+ 	cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
+@@ -265,7 +267,6 @@ static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
+ 		open_file = file->private_data;
+ 		rreq->netfs_priv = file->private_data;
+ 		req->cfile = cifsFileInfo_get(open_file);
+-		req->server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
+ 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
+ 			req->pid = req->cfile->pid;
+ 	} else if (rreq->origin != NETFS_WRITEBACK) {
+diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
+index a6f8b098c56f14..3bd9f482f0c3e6 100644
+--- a/include/drm/display/drm_dp.h
++++ b/include/drm/display/drm_dp.h
+@@ -359,6 +359,7 @@
+ # define DP_DSC_BITS_PER_PIXEL_1_4          0x2
+ # define DP_DSC_BITS_PER_PIXEL_1_2          0x3
+ # define DP_DSC_BITS_PER_PIXEL_1_1          0x4
++# define DP_DSC_BITS_PER_PIXEL_MASK         0x7
+ 
+ #define DP_PSR_SUPPORT                      0x070   /* XXX 1.2? */
+ # define DP_PSR_IS_SUPPORTED                1
+diff --git a/include/kunit/platform_device.h b/include/kunit/platform_device.h
+index 0fc0999d2420aa..f8236a8536f7eb 100644
+--- a/include/kunit/platform_device.h
++++ b/include/kunit/platform_device.h
+@@ -2,6 +2,7 @@
+ #ifndef _KUNIT_PLATFORM_DRIVER_H
+ #define _KUNIT_PLATFORM_DRIVER_H
+ 
++struct completion;
+ struct kunit;
+ struct platform_device;
+ struct platform_driver;
+diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
+index c596e0e4cb751a..7b19b83349cf85 100644
+--- a/include/linux/blk-mq.h
++++ b/include/linux/blk-mq.h
+@@ -872,12 +872,22 @@ static inline bool blk_mq_add_to_batch(struct request *req,
+ 				       void (*complete)(struct io_comp_batch *))
+ {
+ 	/*
+-	 * blk_mq_end_request_batch() can't end request allocated from
+-	 * sched tags
++	 * Check various conditions that exclude batch processing:
++	 * 1) No batch container
++	 * 2) Has scheduler data attached
++	 * 3) Not a passthrough request and end_io set
++	 * 4) Not a passthrough request and an ioerror
+ 	 */
+-	if (!iob || (req->rq_flags & RQF_SCHED_TAGS) || ioerror ||
+-			(req->end_io && !blk_rq_is_passthrough(req)))
++	if (!iob)
+ 		return false;
++	if (req->rq_flags & RQF_SCHED_TAGS)
++		return false;
++	if (!blk_rq_is_passthrough(req)) {
++		if (req->end_io)
++			return false;
++		if (ioerror < 0)
++			return false;
++	}
+ 
+ 	if (!iob->complete)
+ 		iob->complete = complete;
+diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
+index 1b20d2d8ef7cce..17960a1e858dbe 100644
+--- a/include/linux/cgroup-defs.h
++++ b/include/linux/cgroup-defs.h
+@@ -71,9 +71,6 @@ enum {
+ 
+ 	/* Cgroup is frozen. */
+ 	CGRP_FROZEN,
+-
+-	/* Control group has to be killed. */
+-	CGRP_KILL,
+ };
+ 
+ /* cgroup_root->flags */
+@@ -461,6 +458,9 @@ struct cgroup {
+ 
+ 	int nr_threaded_children;	/* # of live threaded child cgroups */
+ 
++	/* sequence number for cgroup.kill, serialized by css_set_lock. */
++	unsigned int kill_seq;
++
+ 	struct kernfs_node *kn;		/* cgroup kernfs entry */
+ 	struct cgroup_file procs_file;	/* handle for "cgroup.procs" */
+ 	struct cgroup_file events_file;	/* handle for "cgroup.events" */
+diff --git a/include/linux/compiler.h b/include/linux/compiler.h
+index 240c632c5b957c..7af999a131cb23 100644
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -214,6 +214,19 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
+ 	__v;								\
+ })
+ 
++#ifdef __CHECKER__
++#define __BUILD_BUG_ON_ZERO_MSG(e, msg) (0)
++#else /* __CHECKER__ */
++#define __BUILD_BUG_ON_ZERO_MSG(e, msg) ((int)sizeof(struct {_Static_assert(!(e), msg);}))
++#endif /* __CHECKER__ */
++
++/* &a[0] degrades to a pointer: a different type from an array */
++#define __must_be_array(a)	__BUILD_BUG_ON_ZERO_MSG(__same_type((a), &(a)[0]), "must be array")
++
++/* Require C Strings (i.e. NUL-terminated) lack the "nonstring" attribute. */
++#define __must_be_cstr(p) \
++	__BUILD_BUG_ON_ZERO_MSG(__annotated(p, nonstring), "must be cstr (NUL-terminated)")
++
+ #endif /* __KERNEL__ */
+ 
+ /**
+@@ -254,19 +267,6 @@ static inline void *offset_to_ptr(const int *off)
+ 
+ #define __ADDRESSABLE_ASM_STR(sym) __stringify(__ADDRESSABLE_ASM(sym))
+ 
+-#ifdef __CHECKER__
+-#define __BUILD_BUG_ON_ZERO_MSG(e, msg) (0)
+-#else /* __CHECKER__ */
+-#define __BUILD_BUG_ON_ZERO_MSG(e, msg) ((int)sizeof(struct {_Static_assert(!(e), msg);}))
+-#endif /* __CHECKER__ */
+-
+-/* &a[0] degrades to a pointer: a different type from an array */
+-#define __must_be_array(a)	__BUILD_BUG_ON_ZERO_MSG(__same_type((a), &(a)[0]), "must be array")
+-
+-/* Require C Strings (i.e. NUL-terminated) lack the "nonstring" attribute. */
+-#define __must_be_cstr(p) \
+-	__BUILD_BUG_ON_ZERO_MSG(__annotated(p, nonstring), "must be cstr (NUL-terminated)")
+-
+ /*
+  * This returns a constant expression while determining if an argument is
+  * a constant expression, most importantly without evaluating the argument.
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index e5815867aba971..8bcd629ee250d5 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -128,6 +128,7 @@ typedef	struct {
+ #define EFI_MEMORY_RO		((u64)0x0000000000020000ULL)	/* read-only */
+ #define EFI_MEMORY_SP		((u64)0x0000000000040000ULL)	/* soft reserved */
+ #define EFI_MEMORY_CPU_CRYPTO	((u64)0x0000000000080000ULL)	/* supports encryption */
++#define EFI_MEMORY_HOT_PLUGGABLE	BIT_ULL(20)	/* supports unplugging at runtime */
+ #define EFI_MEMORY_RUNTIME	((u64)0x8000000000000000ULL)	/* range requires runtime mapping */
+ #define EFI_MEMORY_DESCRIPTOR_VERSION	1
+ 
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 3928e91bb5905b..8268be0723eee9 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -2635,6 +2635,12 @@ struct net *dev_net(const struct net_device *dev)
+ 	return read_pnet(&dev->nd_net);
+ }
+ 
++static inline
++struct net *dev_net_rcu(const struct net_device *dev)
++{
++	return read_pnet_rcu(&dev->nd_net);
++}
++
+ static inline
+ void dev_net_set(struct net_device *dev, struct net *net)
+ {
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index d2402bf4aea2d1..de5deb1a0118fc 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -2593,6 +2593,11 @@
+ 
+ #define PCI_VENDOR_ID_REDHAT		0x1b36
+ 
++#define PCI_VENDOR_ID_WCHIC		0x1c00
++#define PCI_DEVICE_ID_WCHIC_CH382_0S1P	0x3050
++#define PCI_DEVICE_ID_WCHIC_CH382_2S1P	0x3250
++#define PCI_DEVICE_ID_WCHIC_CH382_2S	0x3253
++
+ #define PCI_VENDOR_ID_SILICOM_DENMARK	0x1c2c
+ 
+ #define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS	0x1c36
+@@ -2647,6 +2652,12 @@
+ #define PCI_VENDOR_ID_AKS		0x416c
+ #define PCI_DEVICE_ID_AKS_ALADDINCARD	0x0100
+ 
++#define PCI_VENDOR_ID_WCHCN		0x4348
++#define PCI_DEVICE_ID_WCHCN_CH353_4S	0x3453
++#define PCI_DEVICE_ID_WCHCN_CH353_2S1PF	0x5046
++#define PCI_DEVICE_ID_WCHCN_CH353_1S1P	0x5053
++#define PCI_DEVICE_ID_WCHCN_CH353_2S1P	0x7053
++
+ #define PCI_VENDOR_ID_ACCESSIO		0x494f
+ #define PCI_DEVICE_ID_ACCESSIO_WDG_CSM	0x22c0
+ 
+diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
+index 0f2aeb37bbb047..ca1db4b92c3244 100644
+--- a/include/linux/sched/task.h
++++ b/include/linux/sched/task.h
+@@ -43,6 +43,7 @@ struct kernel_clone_args {
+ 	void *fn_arg;
+ 	struct cgroup *cgrp;
+ 	struct css_set *cset;
++	unsigned int kill_seq;
+ };
+ 
+ /*
+diff --git a/include/net/dst.h b/include/net/dst.h
+index 0f303cc602520e..08647c99d79c9a 100644
+--- a/include/net/dst.h
++++ b/include/net/dst.h
+@@ -440,6 +440,15 @@ static inline void dst_set_expires(struct dst_entry *dst, int timeout)
+ 		dst->expires = expires;
+ }
+ 
++static inline unsigned int dst_dev_overhead(struct dst_entry *dst,
++					    struct sk_buff *skb)
++{
++	if (likely(dst))
++		return LL_RESERVED_SPACE(dst->dev);
++
++	return skb->mac_len;
++}
++
+ INDIRECT_CALLABLE_DECLARE(int ip6_output(struct net *, struct sock *,
+ 					 struct sk_buff *));
+ INDIRECT_CALLABLE_DECLARE(int ip_output(struct net *, struct sock *,
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 0e548c1f2a0ecd..23ecb10945b0f1 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -471,9 +471,12 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
+ 						    bool forwarding)
+ {
+ 	const struct rtable *rt = dst_rtable(dst);
+-	struct net *net = dev_net(dst->dev);
+-	unsigned int mtu;
++	unsigned int mtu, res;
++	struct net *net;
++
++	rcu_read_lock();
+ 
++	net = dev_net_rcu(dst->dev);
+ 	if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) ||
+ 	    ip_mtu_locked(dst) ||
+ 	    !forwarding) {
+@@ -497,7 +500,11 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
+ out:
+ 	mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
+ 
+-	return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
++	res = mtu - lwtunnel_headroom(dst->lwtstate, mtu);
++
++	rcu_read_unlock();
++
++	return res;
+ }
+ 
+ static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
+diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
+index 2d6141f28b5309..f7fe796e8429a5 100644
+--- a/include/net/l3mdev.h
++++ b/include/net/l3mdev.h
+@@ -198,10 +198,12 @@ struct sk_buff *l3mdev_l3_out(struct sock *sk, struct sk_buff *skb, u16 proto)
+ 	if (netif_is_l3_slave(dev)) {
+ 		struct net_device *master;
+ 
++		rcu_read_lock();
+ 		master = netdev_master_upper_dev_get_rcu(dev);
+ 		if (master && master->l3mdev_ops->l3mdev_l3_out)
+ 			skb = master->l3mdev_ops->l3mdev_l3_out(master, sk,
+ 								skb, proto);
++		rcu_read_unlock();
+ 	}
+ 
+ 	return skb;
+diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
+index 5a2a0df8ad91b6..44be742cf4d604 100644
+--- a/include/net/net_namespace.h
++++ b/include/net/net_namespace.h
+@@ -396,7 +396,7 @@ static inline struct net *read_pnet(const possible_net_t *pnet)
+ #endif
+ }
+ 
+-static inline struct net *read_pnet_rcu(possible_net_t *pnet)
++static inline struct net *read_pnet_rcu(const possible_net_t *pnet)
+ {
+ #ifdef CONFIG_NET_NS
+ 	return rcu_dereference(pnet->net);
+diff --git a/include/net/route.h b/include/net/route.h
+index 84cb1e04f5cd9c..64949854d35dc3 100644
+--- a/include/net/route.h
++++ b/include/net/route.h
+@@ -368,10 +368,15 @@ static inline int inet_iif(const struct sk_buff *skb)
+ static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
+ {
+ 	int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
+-	struct net *net = dev_net(dst->dev);
+ 
+-	if (hoplimit == 0)
++	if (hoplimit == 0) {
++		const struct net *net;
++
++		rcu_read_lock();
++		net = dev_net_rcu(dst->dev);
+ 		hoplimit = READ_ONCE(net->ipv4.sysctl_ip_default_ttl);
++		rcu_read_unlock();
++	}
+ 	return hoplimit;
+ }
+ 
+diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
+index 4a8a4a63e99ca8..f62689ca861a46 100644
+--- a/include/uapi/drm/xe_drm.h
++++ b/include/uapi/drm/xe_drm.h
+@@ -1486,6 +1486,8 @@ struct drm_xe_oa_unit {
+ 	__u64 capabilities;
+ #define DRM_XE_OA_CAPS_BASE		(1 << 0)
+ #define DRM_XE_OA_CAPS_SYNCS		(1 << 1)
++#define DRM_XE_OA_CAPS_OA_BUFFER_SIZE	(1 << 2)
++#define DRM_XE_OA_CAPS_WAIT_NUM_REPORTS	(1 << 3)
+ 
+ 	/** @oa_timestamp_freq: OA timestamp freq */
+ 	__u64 oa_timestamp_freq;
+@@ -1651,6 +1653,20 @@ enum drm_xe_oa_property_id {
+ 	 * to the VM bind case.
+ 	 */
+ 	DRM_XE_OA_PROPERTY_SYNCS,
++
++	/**
++	 * @DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE: Size of OA buffer to be
++	 * allocated by the driver in bytes. Supported sizes are powers of
++	 * 2 from 128 KiB to 128 MiB. When not specified, a 16 MiB OA
++	 * buffer is allocated by default.
++	 */
++	DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE,
++
++	/**
++	 * @DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS: Number of reports to wait
++	 * for before unblocking poll or read
++	 */
++	DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS,
+ };
+ 
+ /**
+diff --git a/include/uapi/linux/thermal.h b/include/uapi/linux/thermal.h
+index 349718c271ebf1..46a2633d33aaa4 100644
+--- a/include/uapi/linux/thermal.h
++++ b/include/uapi/linux/thermal.h
+@@ -30,7 +30,6 @@ enum thermal_genl_attr {
+ 	THERMAL_GENL_ATTR_TZ,
+ 	THERMAL_GENL_ATTR_TZ_ID,
+ 	THERMAL_GENL_ATTR_TZ_TEMP,
+-	THERMAL_GENL_ATTR_TZ_PREV_TEMP,
+ 	THERMAL_GENL_ATTR_TZ_TRIP,
+ 	THERMAL_GENL_ATTR_TZ_TRIP_ID,
+ 	THERMAL_GENL_ATTR_TZ_TRIP_TYPE,
+@@ -54,6 +53,7 @@ enum thermal_genl_attr {
+ 	THERMAL_GENL_ATTR_THRESHOLD,
+ 	THERMAL_GENL_ATTR_THRESHOLD_TEMP,
+ 	THERMAL_GENL_ATTR_THRESHOLD_DIRECTION,
++	THERMAL_GENL_ATTR_TZ_PREV_TEMP,
+ 	__THERMAL_GENL_ATTR_MAX,
+ };
+ #define THERMAL_GENL_ATTR_MAX (__THERMAL_GENL_ATTR_MAX - 1)
+diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
+index 82b2d2b25c23b7..ee2adc4de05e0a 100644
+--- a/include/ufs/ufshcd.h
++++ b/include/ufs/ufshcd.h
+@@ -403,6 +403,9 @@ enum clk_gating_state {
+  * delay_ms
+  * @ungate_work: worker to turn on clocks that will be used in case of
+  * interrupt context
++ * @clk_gating_workq: workqueue for clock gating work.
++ * @lock: serialize access to some struct ufs_clk_gating members. An outer lock
++ * relative to the host lock
+  * @state: the current clocks state
+  * @delay_ms: gating delay in ms
+  * @is_suspended: clk gating is suspended when set to 1 which can be used
+@@ -413,11 +416,14 @@ enum clk_gating_state {
+  * @is_initialized: Indicates whether clock gating is initialized or not
+  * @active_reqs: number of requests that are pending and should be waited for
+  * completion before gating clocks.
+- * @clk_gating_workq: workqueue for clock gating work.
+  */
+ struct ufs_clk_gating {
+ 	struct delayed_work gate_work;
+ 	struct work_struct ungate_work;
++	struct workqueue_struct *clk_gating_workq;
++
++	spinlock_t lock;
++
+ 	enum clk_gating_state state;
+ 	unsigned long delay_ms;
+ 	bool is_suspended;
+@@ -426,7 +432,6 @@ struct ufs_clk_gating {
+ 	bool is_enabled;
+ 	bool is_initialized;
+ 	int active_reqs;
+-	struct workqueue_struct *clk_gating_workq;
+ };
+ 
+ /**
+diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
+index eec5eb7de8430e..e1895952066eeb 100644
+--- a/io_uring/kbuf.c
++++ b/io_uring/kbuf.c
+@@ -420,6 +420,12 @@ void io_destroy_buffers(struct io_ring_ctx *ctx)
+ 	}
+ }
+ 
++static void io_destroy_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
++{
++	xa_erase(&ctx->io_bl_xa, bl->bgid);
++	io_put_bl(ctx, bl);
++}
++
+ int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ {
+ 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
+@@ -717,12 +723,13 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
+ 		/* if mapped buffer ring OR classic exists, don't allow */
+ 		if (bl->flags & IOBL_BUF_RING || !list_empty(&bl->buf_list))
+ 			return -EEXIST;
+-	} else {
+-		free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
+-		if (!bl)
+-			return -ENOMEM;
++		io_destroy_bl(ctx, bl);
+ 	}
+ 
++	free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
++	if (!bl)
++		return -ENOMEM;
++
+ 	if (!(reg.flags & IOU_PBUF_RING_MMAP))
+ 		ret = io_pin_pbuf_ring(&reg, bl);
+ 	else
+diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
+index 25cae9f5575be2..8c44a5198414ea 100644
+--- a/io_uring/uring_cmd.c
++++ b/io_uring/uring_cmd.c
+@@ -74,9 +74,6 @@ bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
+ 			continue;
+ 
+ 		if (cmd->flags & IORING_URING_CMD_CANCELABLE) {
+-			/* ->sqe isn't available if no async data */
+-			if (!req_has_async_data(req))
+-				cmd->sqe = NULL;
+ 			file->f_op->uring_cmd(cmd, IO_URING_F_CANCEL |
+ 						   IO_URING_F_COMPLETE_DEFER);
+ 			ret = true;
+@@ -198,14 +195,15 @@ static int io_uring_cmd_prep_setup(struct io_kiocb *req,
+ 	if (unlikely(!cache))
+ 		return -ENOMEM;
+ 
+-	if (!(req->flags & REQ_F_FORCE_ASYNC)) {
+-		/* defer memcpy until we need it */
+-		ioucmd->sqe = sqe;
+-		return 0;
+-	}
+-
+-	memcpy(req->async_data, sqe, uring_sqe_size(req->ctx));
+-	ioucmd->sqe = req->async_data;
++	/*
++	 * Unconditionally cache the SQE for now - this is only needed for
++	 * requests that go async, but prep handlers must ensure that any
++	 * sqe data is stable beyond prep. Since uring_cmd is special in
++	 * that it doesn't read in per-op data, play it safe and ensure that
++	 * any SQE data is stable beyond prep. This can later get relaxed.
++	 */
++	memcpy(cache->sqes, sqe, uring_sqe_size(req->ctx));
++	ioucmd->sqe = cache->sqes;
+ 	return 0;
+ }
+ 
+@@ -268,16 +266,8 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
+ 	}
+ 
+ 	ret = file->f_op->uring_cmd(ioucmd, issue_flags);
+-	if (ret == -EAGAIN) {
+-		struct io_uring_cmd_data *cache = req->async_data;
+-
+-		if (ioucmd->sqe != (void *) cache)
+-			memcpy(cache, ioucmd->sqe, uring_sqe_size(req->ctx));
+-		return -EAGAIN;
+-	} else if (ret == -EIOCBQUEUED) {
+-		return -EIOCBQUEUED;
+-	}
+-
++	if (ret == -EAGAIN || ret == -EIOCBQUEUED)
++		return ret;
+ 	if (ret < 0)
+ 		req_set_fail(req);
+ 	io_req_uring_cleanup(req, issue_flags);
+diff --git a/io_uring/waitid.c b/io_uring/waitid.c
+index daef5dd644f049..eddd2dffc88b67 100644
+--- a/io_uring/waitid.c
++++ b/io_uring/waitid.c
+@@ -118,7 +118,6 @@ static int io_waitid_finish(struct io_kiocb *req, int ret)
+ static void io_waitid_complete(struct io_kiocb *req, int ret)
+ {
+ 	struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
+-	struct io_tw_state ts = {};
+ 
+ 	/* anyone completing better be holding a reference */
+ 	WARN_ON_ONCE(!(atomic_read(&iw->refs) & IO_WAITID_REF_MASK));
+@@ -131,7 +130,6 @@ static void io_waitid_complete(struct io_kiocb *req, int ret)
+ 	if (ret < 0)
+ 		req_set_fail(req);
+ 	io_req_set_res(req, ret, 0);
+-	io_req_task_complete(req, &ts);
+ }
+ 
+ static bool __io_waitid_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
+@@ -153,6 +151,7 @@ static bool __io_waitid_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
+ 	list_del_init(&iwa->wo.child_wait.entry);
+ 	spin_unlock_irq(&iw->head->lock);
+ 	io_waitid_complete(req, -ECANCELED);
++	io_req_queue_tw_complete(req, -ECANCELED);
+ 	return true;
+ }
+ 
+@@ -258,6 +257,7 @@ static void io_waitid_cb(struct io_kiocb *req, struct io_tw_state *ts)
+ 	}
+ 
+ 	io_waitid_complete(req, ret);
++	io_req_task_complete(req, ts);
+ }
+ 
+ static int io_waitid_wait(struct wait_queue_entry *wait, unsigned mode,
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index d9061bd55436b5..afc665b7b1fe56 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -4013,7 +4013,7 @@ static void __cgroup_kill(struct cgroup *cgrp)
+ 	lockdep_assert_held(&cgroup_mutex);
+ 
+ 	spin_lock_irq(&css_set_lock);
+-	set_bit(CGRP_KILL, &cgrp->flags);
++	cgrp->kill_seq++;
+ 	spin_unlock_irq(&css_set_lock);
+ 
+ 	css_task_iter_start(&cgrp->self, CSS_TASK_ITER_PROCS | CSS_TASK_ITER_THREADED, &it);
+@@ -4029,10 +4029,6 @@ static void __cgroup_kill(struct cgroup *cgrp)
+ 		send_sig(SIGKILL, task, 0);
+ 	}
+ 	css_task_iter_end(&it);
+-
+-	spin_lock_irq(&css_set_lock);
+-	clear_bit(CGRP_KILL, &cgrp->flags);
+-	spin_unlock_irq(&css_set_lock);
+ }
+ 
+ static void cgroup_kill(struct cgroup *cgrp)
+@@ -6488,6 +6484,10 @@ static int cgroup_css_set_fork(struct kernel_clone_args *kargs)
+ 	spin_lock_irq(&css_set_lock);
+ 	cset = task_css_set(current);
+ 	get_css_set(cset);
++	if (kargs->cgrp)
++		kargs->kill_seq = kargs->cgrp->kill_seq;
++	else
++		kargs->kill_seq = cset->dfl_cgrp->kill_seq;
+ 	spin_unlock_irq(&css_set_lock);
+ 
+ 	if (!(kargs->flags & CLONE_INTO_CGROUP)) {
+@@ -6668,6 +6668,7 @@ void cgroup_post_fork(struct task_struct *child,
+ 		      struct kernel_clone_args *kargs)
+ 	__releases(&cgroup_threadgroup_rwsem) __releases(&cgroup_mutex)
+ {
++	unsigned int cgrp_kill_seq = 0;
+ 	unsigned long cgrp_flags = 0;
+ 	bool kill = false;
+ 	struct cgroup_subsys *ss;
+@@ -6681,10 +6682,13 @@ void cgroup_post_fork(struct task_struct *child,
+ 
+ 	/* init tasks are special, only link regular threads */
+ 	if (likely(child->pid)) {
+-		if (kargs->cgrp)
++		if (kargs->cgrp) {
+ 			cgrp_flags = kargs->cgrp->flags;
+-		else
++			cgrp_kill_seq = kargs->cgrp->kill_seq;
++		} else {
+ 			cgrp_flags = cset->dfl_cgrp->flags;
++			cgrp_kill_seq = cset->dfl_cgrp->kill_seq;
++		}
+ 
+ 		WARN_ON_ONCE(!list_empty(&child->cg_list));
+ 		cset->nr_tasks++;
+@@ -6719,7 +6723,7 @@ void cgroup_post_fork(struct task_struct *child,
+ 		 * child down right after we finished preparing it for
+ 		 * userspace.
+ 		 */
+-		kill = test_bit(CGRP_KILL, &cgrp_flags);
++		kill = kargs->kill_seq != cgrp_kill_seq;
+ 	}
+ 
+ 	spin_unlock_irq(&css_set_lock);
+diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
+index 5877974ece92c6..aac91466279f17 100644
+--- a/kernel/cgroup/rstat.c
++++ b/kernel/cgroup/rstat.c
+@@ -590,7 +590,6 @@ static void root_cgroup_cputime(struct cgroup_base_stat *bstat)
+ 
+ 		cputime->sum_exec_runtime += user;
+ 		cputime->sum_exec_runtime += sys;
+-		cputime->sum_exec_runtime += cpustat[CPUTIME_STEAL];
+ 
+ #ifdef CONFIG_SCHED_CORE
+ 		bstat->forceidle_sum += cpustat[CPUTIME_FORCEIDLE];
+diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c
+index db68a964e34e26..c4a3ccf6a8ace4 100644
+--- a/kernel/sched/autogroup.c
++++ b/kernel/sched/autogroup.c
+@@ -150,7 +150,7 @@ void sched_autogroup_exit_task(struct task_struct *p)
+ 	 * see this thread after that: we can no longer use signal->autogroup.
+ 	 * See the PF_EXITING check in task_wants_autogroup().
+ 	 */
+-	sched_move_task(p);
++	sched_move_task(p, true);
+ }
+ 
+ static void
+@@ -182,7 +182,7 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
+ 	 * sched_autogroup_exit_task().
+ 	 */
+ 	for_each_thread(p, t)
+-		sched_move_task(t);
++		sched_move_task(t, true);
+ 
+ 	unlock_task_sighand(p, &flags);
+ 	autogroup_kref_put(prev);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index ffceb5ff4c5c37..aeba4e92010205 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -9044,7 +9044,7 @@ static void sched_change_group(struct task_struct *tsk, struct task_group *group
+  * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
+  * its new group.
+  */
+-void sched_move_task(struct task_struct *tsk)
++void sched_move_task(struct task_struct *tsk, bool for_autogroup)
+ {
+ 	int queued, running, queue_flags =
+ 		DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
+@@ -9073,7 +9073,8 @@ void sched_move_task(struct task_struct *tsk)
+ 		put_prev_task(rq, tsk);
+ 
+ 	sched_change_group(tsk, group);
+-	scx_move_task(tsk);
++	if (!for_autogroup)
++		scx_cgroup_move_task(tsk);
+ 
+ 	if (queued)
+ 		enqueue_task(rq, tsk, queue_flags);
+@@ -9174,7 +9175,7 @@ static void cpu_cgroup_attach(struct cgroup_taskset *tset)
+ 	struct cgroup_subsys_state *css;
+ 
+ 	cgroup_taskset_for_each(task, css, tset)
+-		sched_move_task(task);
++		sched_move_task(task, false);
+ 
+ 	scx_cgroup_finish_attach();
+ }
+diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
+index 76030e54a3f596..c1dec2453af432 100644
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -2313,12 +2313,35 @@ static void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
+  *
+  * - The BPF scheduler is bypassed while the rq is offline and we can always say
+  *   no to the BPF scheduler initiated migrations while offline.
++ *
++ * The caller must ensure that @p and @rq are on different CPUs.
+  */
+ static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq,
+ 				      bool trigger_error)
+ {
+ 	int cpu = cpu_of(rq);
+ 
++	SCHED_WARN_ON(task_cpu(p) == cpu);
++
++	/*
++	 * If @p has migration disabled, @p->cpus_ptr is updated to contain only
++	 * the pinned CPU in migrate_disable_switch() while @p is being switched
++	 * out. However, put_prev_task_scx() is called before @p->cpus_ptr is
++	 * updated and thus another CPU may see @p on a DSQ inbetween leading to
++	 * @p passing the below task_allowed_on_cpu() check while migration is
++	 * disabled.
++	 *
++	 * Test the migration disabled state first as the race window is narrow
++	 * and the BPF scheduler failing to check migration disabled state can
++	 * easily be masked if task_allowed_on_cpu() is done first.
++	 */
++	if (unlikely(is_migration_disabled(p))) {
++		if (trigger_error)
++			scx_ops_error("SCX_DSQ_LOCAL[_ON] cannot move migration disabled %s[%d] from CPU %d to %d",
++				      p->comm, p->pid, task_cpu(p), cpu);
++		return false;
++	}
++
+ 	/*
+ 	 * We don't require the BPF scheduler to avoid dispatching to offline
+ 	 * CPUs mostly for convenience but also because CPUs can go offline
+@@ -2327,14 +2350,11 @@ static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq,
+ 	 */
+ 	if (!task_allowed_on_cpu(p, cpu)) {
+ 		if (trigger_error)
+-			scx_ops_error("SCX_DSQ_LOCAL[_ON] verdict target cpu %d not allowed for %s[%d]",
+-				      cpu_of(rq), p->comm, p->pid);
++			scx_ops_error("SCX_DSQ_LOCAL[_ON] target CPU %d not allowed for %s[%d]",
++				      cpu, p->comm, p->pid);
+ 		return false;
+ 	}
+ 
+-	if (unlikely(is_migration_disabled(p)))
+-		return false;
+-
+ 	if (!scx_rq_online(rq))
+ 		return false;
+ 
+@@ -2437,7 +2457,8 @@ static struct rq *move_task_between_dsqs(struct task_struct *p, u64 enq_flags,
+ 
+ 	if (dst_dsq->id == SCX_DSQ_LOCAL) {
+ 		dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
+-		if (!task_can_run_on_remote_rq(p, dst_rq, true)) {
++		if (src_rq != dst_rq &&
++		    unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) {
+ 			dst_dsq = find_global_dsq(p);
+ 			dst_rq = src_rq;
+ 		}
+@@ -2575,6 +2596,9 @@ static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
+ {
+ 	struct rq *src_rq = task_rq(p);
+ 	struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
++#ifdef CONFIG_SMP
++	struct rq *locked_rq = rq;
++#endif
+ 
+ 	/*
+ 	 * We're synchronized against dequeue through DISPATCHING. As @p can't
+@@ -2588,7 +2612,8 @@ static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
+ 	}
+ 
+ #ifdef CONFIG_SMP
+-	if (unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) {
++	if (src_rq != dst_rq &&
++	    unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) {
+ 		dispatch_enqueue(find_global_dsq(p), p,
+ 				 enq_flags | SCX_ENQ_CLEAR_OPSS);
+ 		return;
+@@ -2611,8 +2636,9 @@ static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
+ 	atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
+ 
+ 	/* switch to @src_rq lock */
+-	if (rq != src_rq) {
+-		raw_spin_rq_unlock(rq);
++	if (locked_rq != src_rq) {
++		raw_spin_rq_unlock(locked_rq);
++		locked_rq = src_rq;
+ 		raw_spin_rq_lock(src_rq);
+ 	}
+ 
+@@ -2630,6 +2656,8 @@ static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
+ 		} else {
+ 			move_remote_task_to_local_dsq(p, enq_flags,
+ 						      src_rq, dst_rq);
++			/* task has been moved to dst_rq, which is now locked */
++			locked_rq = dst_rq;
+ 		}
+ 
+ 		/* if the destination CPU is idle, wake it up */
+@@ -2638,8 +2666,8 @@ static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
+ 	}
+ 
+ 	/* switch back to @rq lock */
+-	if (rq != dst_rq) {
+-		raw_spin_rq_unlock(dst_rq);
++	if (locked_rq != rq) {
++		raw_spin_rq_unlock(locked_rq);
+ 		raw_spin_rq_lock(rq);
+ 	}
+ #else	/* CONFIG_SMP */
+@@ -3849,7 +3877,7 @@ static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued)
+ 		curr->scx.slice = 0;
+ 		touch_core_sched(rq, curr);
+ 	} else if (SCX_HAS_OP(tick)) {
+-		SCX_CALL_OP(SCX_KF_REST, tick, curr);
++		SCX_CALL_OP_TASK(SCX_KF_REST, tick, curr);
+ 	}
+ 
+ 	if (!curr->scx.slice)
+@@ -3996,7 +4024,7 @@ static void scx_ops_disable_task(struct task_struct *p)
+ 	WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED);
+ 
+ 	if (SCX_HAS_OP(disable))
+-		SCX_CALL_OP(SCX_KF_REST, disable, p);
++		SCX_CALL_OP_TASK(SCX_KF_REST, disable, p);
+ 	scx_set_task_state(p, SCX_TASK_READY);
+ }
+ 
+@@ -4025,7 +4053,7 @@ static void scx_ops_exit_task(struct task_struct *p)
+ 	}
+ 
+ 	if (SCX_HAS_OP(exit_task))
+-		SCX_CALL_OP(SCX_KF_REST, exit_task, p, &args);
++		SCX_CALL_OP_TASK(SCX_KF_REST, exit_task, p, &args);
+ 	scx_set_task_state(p, SCX_TASK_NONE);
+ }
+ 
+@@ -4321,24 +4349,11 @@ int scx_cgroup_can_attach(struct cgroup_taskset *tset)
+ 	return ops_sanitize_err("cgroup_prep_move", ret);
+ }
+ 
+-void scx_move_task(struct task_struct *p)
++void scx_cgroup_move_task(struct task_struct *p)
+ {
+ 	if (!scx_cgroup_enabled)
+ 		return;
+ 
+-	/*
+-	 * We're called from sched_move_task() which handles both cgroup and
+-	 * autogroup moves. Ignore the latter.
+-	 *
+-	 * Also ignore exiting tasks, because in the exit path tasks transition
+-	 * from the autogroup to the root group, so task_group_is_autogroup()
+-	 * alone isn't able to catch exiting autogroup tasks. This is safe for
+-	 * cgroup_move(), because cgroup migrations never happen for PF_EXITING
+-	 * tasks.
+-	 */
+-	if (task_group_is_autogroup(task_group(p)) || (p->flags & PF_EXITING))
+-		return;
+-
+ 	/*
+ 	 * @p must have ops.cgroup_prep_move() called on it and thus
+ 	 * cgrp_moving_from set.
+diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h
+index 4d022d17ac7dd6..1079b56b0f7aea 100644
+--- a/kernel/sched/ext.h
++++ b/kernel/sched/ext.h
+@@ -73,7 +73,7 @@ static inline void scx_update_idle(struct rq *rq, bool idle, bool do_notify) {}
+ int scx_tg_online(struct task_group *tg);
+ void scx_tg_offline(struct task_group *tg);
+ int scx_cgroup_can_attach(struct cgroup_taskset *tset);
+-void scx_move_task(struct task_struct *p);
++void scx_cgroup_move_task(struct task_struct *p);
+ void scx_cgroup_finish_attach(void);
+ void scx_cgroup_cancel_attach(struct cgroup_taskset *tset);
+ void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight);
+@@ -82,7 +82,7 @@ void scx_group_set_idle(struct task_group *tg, bool idle);
+ static inline int scx_tg_online(struct task_group *tg) { return 0; }
+ static inline void scx_tg_offline(struct task_group *tg) {}
+ static inline int scx_cgroup_can_attach(struct cgroup_taskset *tset) { return 0; }
+-static inline void scx_move_task(struct task_struct *p) {}
++static inline void scx_cgroup_move_task(struct task_struct *p) {}
+ static inline void scx_cgroup_finish_attach(void) {}
+ static inline void scx_cgroup_cancel_attach(struct cgroup_taskset *tset) {}
+ static inline void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight) {}
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index c5d67a43fe524b..66744d60904d57 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -572,7 +572,7 @@ extern void sched_online_group(struct task_group *tg,
+ extern void sched_destroy_group(struct task_group *tg);
+ extern void sched_release_group(struct task_group *tg);
+ 
+-extern void sched_move_task(struct task_struct *tsk);
++extern void sched_move_task(struct task_struct *tsk, bool for_autogroup);
+ 
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+ extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
+diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
+index 7304d7cf47f2d7..2a7802ec480cc5 100644
+--- a/kernel/time/clocksource.c
++++ b/kernel/time/clocksource.c
+@@ -373,16 +373,18 @@ void clocksource_verify_percpu(struct clocksource *cs)
+ 	cpumask_clear(&cpus_ahead);
+ 	cpumask_clear(&cpus_behind);
+ 	cpus_read_lock();
+-	preempt_disable();
++	migrate_disable();
+ 	clocksource_verify_choose_cpus();
+ 	if (cpumask_empty(&cpus_chosen)) {
+-		preempt_enable();
++		migrate_enable();
+ 		cpus_read_unlock();
+ 		pr_warn("Not enough CPUs to check clocksource '%s'.\n", cs->name);
+ 		return;
+ 	}
+ 	testcpu = smp_processor_id();
+-	pr_warn("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n", cs->name, testcpu, cpumask_pr_args(&cpus_chosen));
++	pr_info("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n",
++		cs->name, testcpu, cpumask_pr_args(&cpus_chosen));
++	preempt_disable();
+ 	for_each_cpu(cpu, &cpus_chosen) {
+ 		if (cpu == testcpu)
+ 			continue;
+@@ -402,6 +404,7 @@ void clocksource_verify_percpu(struct clocksource *cs)
+ 			cs_nsec_min = cs_nsec;
+ 	}
+ 	preempt_enable();
++	migrate_enable();
+ 	cpus_read_unlock();
+ 	if (!cpumask_empty(&cpus_ahead))
+ 		pr_warn("        CPUs %*pbl ahead of CPU %d for clocksource %s.\n",
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 6b888699f916a1..2bbc96568a2b97 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1672,7 +1672,8 @@ static void *rb_range_buffer(struct ring_buffer_per_cpu *cpu_buffer, int idx)
+  * must be the same.
+  */
+ static bool rb_meta_valid(struct ring_buffer_meta *meta, int cpu,
+-			  struct trace_buffer *buffer, int nr_pages)
++			  struct trace_buffer *buffer, int nr_pages,
++			  unsigned long *subbuf_mask)
+ {
+ 	int subbuf_size = PAGE_SIZE;
+ 	struct buffer_data_page *subbuf;
+@@ -1680,6 +1681,9 @@ static bool rb_meta_valid(struct ring_buffer_meta *meta, int cpu,
+ 	unsigned long buffers_end;
+ 	int i;
+ 
++	if (!subbuf_mask)
++		return false;
++
+ 	/* Check the meta magic and meta struct size */
+ 	if (meta->magic != RING_BUFFER_META_MAGIC ||
+ 	    meta->struct_size != sizeof(*meta)) {
+@@ -1712,6 +1716,8 @@ static bool rb_meta_valid(struct ring_buffer_meta *meta, int cpu,
+ 
+ 	subbuf = rb_subbufs_from_meta(meta);
+ 
++	bitmap_clear(subbuf_mask, 0, meta->nr_subbufs);
++
+ 	/* Is the meta buffers and the subbufs themselves have correct data? */
+ 	for (i = 0; i < meta->nr_subbufs; i++) {
+ 		if (meta->buffers[i] < 0 ||
+@@ -1725,6 +1731,12 @@ static bool rb_meta_valid(struct ring_buffer_meta *meta, int cpu,
+ 			return false;
+ 		}
+ 
++		if (test_bit(meta->buffers[i], subbuf_mask)) {
++			pr_info("Ring buffer boot meta [%d] array has duplicates\n", cpu);
++			return false;
++		}
++
++		set_bit(meta->buffers[i], subbuf_mask);
+ 		subbuf = (void *)subbuf + subbuf_size;
+ 	}
+ 
+@@ -1838,6 +1850,11 @@ static void rb_meta_validate_events(struct ring_buffer_per_cpu *cpu_buffer)
+ 				cpu_buffer->cpu);
+ 			goto invalid;
+ 		}
++
++		/* If the buffer has content, update pages_touched */
++		if (ret)
++			local_inc(&cpu_buffer->pages_touched);
++
+ 		entries += ret;
+ 		entry_bytes += local_read(&head_page->page->commit);
+ 		local_set(&cpu_buffer->head_page->entries, ret);
+@@ -1889,17 +1906,22 @@ static void rb_meta_init_text_addr(struct ring_buffer_meta *meta)
+ static void rb_range_meta_init(struct trace_buffer *buffer, int nr_pages)
+ {
+ 	struct ring_buffer_meta *meta;
++	unsigned long *subbuf_mask;
+ 	unsigned long delta;
+ 	void *subbuf;
+ 	int cpu;
+ 	int i;
+ 
++	/* Create a mask to test the subbuf array */
++	subbuf_mask = bitmap_alloc(nr_pages + 1, GFP_KERNEL);
++	/* If subbuf_mask fails to allocate, then rb_meta_valid() will return false */
++
+ 	for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+ 		void *next_meta;
+ 
+ 		meta = rb_range_meta(buffer, nr_pages, cpu);
+ 
+-		if (rb_meta_valid(meta, cpu, buffer, nr_pages)) {
++		if (rb_meta_valid(meta, cpu, buffer, nr_pages, subbuf_mask)) {
+ 			/* Make the mappings match the current address */
+ 			subbuf = rb_subbufs_from_meta(meta);
+ 			delta = (unsigned long)subbuf - meta->first_buffer;
+@@ -1943,6 +1965,7 @@ static void rb_range_meta_init(struct trace_buffer *buffer, int nr_pages)
+ 			subbuf += meta->subbuf_size;
+ 		}
+ 	}
++	bitmap_free(subbuf_mask);
+ }
+ 
+ static void *rbm_start(struct seq_file *m, loff_t *pos)
+@@ -7157,6 +7180,7 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
+ 		kfree(cpu_buffer->subbuf_ids);
+ 		cpu_buffer->subbuf_ids = NULL;
+ 		rb_free_meta_page(cpu_buffer);
++		atomic_dec(&cpu_buffer->resize_disabled);
+ 	}
+ 
+ unlock:
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index b6e40e8791fa76..d2267b4406cd8a 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -8341,6 +8341,10 @@ static int tracing_buffers_mmap(struct file *filp, struct vm_area_struct *vma)
+ 	struct trace_iterator *iter = &info->iter;
+ 	int ret = 0;
+ 
++	/* Currently the boot mapped buffer is not supported for mmap */
++	if (iter->tr->flags & TRACE_ARRAY_FL_BOOT)
++		return -ENODEV;
++
+ 	ret = get_snapshot_map(iter->tr);
+ 	if (ret)
+ 		return ret;
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 9362484a653c4a..218f8c13880862 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -3516,12 +3516,6 @@ static int rescuer_thread(void *__rescuer)
+ 			}
+ 		}
+ 
+-		/*
+-		 * Put the reference grabbed by send_mayday().  @pool won't
+-		 * go away while we're still attached to it.
+-		 */
+-		put_pwq(pwq);
+-
+ 		/*
+ 		 * Leave this pool. Notify regular workers; otherwise, we end up
+ 		 * with 0 concurrency and stalling the execution.
+@@ -3532,6 +3526,12 @@ static int rescuer_thread(void *__rescuer)
+ 
+ 		worker_detach_from_pool(rescuer);
+ 
++		/*
++		 * Put the reference grabbed by send_mayday().  @pool might
++		 * go away any time after it.
++		 */
++		put_pwq_unlocked(pwq);
++
+ 		raw_spin_lock_irq(&wq_mayday_lock);
+ 	}
+ 
+diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
+index aa6c714892ec9d..9f3b8b682adb29 100644
+--- a/net/ax25/af_ax25.c
++++ b/net/ax25/af_ax25.c
+@@ -685,6 +685,15 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
+ 			break;
+ 		}
+ 
++		if (ax25->ax25_dev) {
++			if (dev == ax25->ax25_dev->dev) {
++				rcu_read_unlock();
++				break;
++			}
++			netdev_put(ax25->ax25_dev->dev, &ax25->dev_tracker);
++			ax25_dev_put(ax25->ax25_dev);
++		}
++
+ 		ax25->ax25_dev = ax25_dev_ax25dev(dev);
+ 		if (!ax25->ax25_dev) {
+ 			rcu_read_unlock();
+@@ -692,6 +701,8 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
+ 			break;
+ 		}
+ 		ax25_fillin_cb(ax25, ax25->ax25_dev);
++		netdev_hold(dev, &ax25->dev_tracker, GFP_ATOMIC);
++		ax25_dev_hold(ax25->ax25_dev);
+ 		rcu_read_unlock();
+ 		break;
+ 
+diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
+index ac11f1f08db0f9..d35479c465e2c4 100644
+--- a/net/batman-adv/bat_v.c
++++ b/net/batman-adv/bat_v.c
+@@ -113,8 +113,6 @@ static void
+ batadv_v_hardif_neigh_init(struct batadv_hardif_neigh_node *hardif_neigh)
+ {
+ 	ewma_throughput_init(&hardif_neigh->bat_v.throughput);
+-	INIT_WORK(&hardif_neigh->bat_v.metric_work,
+-		  batadv_v_elp_throughput_metric_update);
+ }
+ 
+ /**
+diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
+index 1d704574e6bf54..b065578b4436ee 100644
+--- a/net/batman-adv/bat_v_elp.c
++++ b/net/batman-adv/bat_v_elp.c
+@@ -18,6 +18,7 @@
+ #include <linux/if_ether.h>
+ #include <linux/jiffies.h>
+ #include <linux/kref.h>
++#include <linux/list.h>
+ #include <linux/minmax.h>
+ #include <linux/netdevice.h>
+ #include <linux/nl80211.h>
+@@ -26,6 +27,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/rtnetlink.h>
+ #include <linux/skbuff.h>
++#include <linux/slab.h>
+ #include <linux/stddef.h>
+ #include <linux/string.h>
+ #include <linux/types.h>
+@@ -41,6 +43,18 @@
+ #include "routing.h"
+ #include "send.h"
+ 
++/**
++ * struct batadv_v_metric_queue_entry - list of hardif neighbors which require
++ *  and metric update
++ */
++struct batadv_v_metric_queue_entry {
++	/** @hardif_neigh: hardif neighbor scheduled for metric update */
++	struct batadv_hardif_neigh_node *hardif_neigh;
++
++	/** @list: list node for metric_queue */
++	struct list_head list;
++};
++
+ /**
+  * batadv_v_elp_start_timer() - restart timer for ELP periodic work
+  * @hard_iface: the interface for which the timer has to be reset
+@@ -59,25 +73,36 @@ static void batadv_v_elp_start_timer(struct batadv_hard_iface *hard_iface)
+ /**
+  * batadv_v_elp_get_throughput() - get the throughput towards a neighbour
+  * @neigh: the neighbour for which the throughput has to be obtained
++ * @pthroughput: calculated throughput towards the given neighbour in multiples
++ *  of 100kpbs (a value of '1' equals 0.1Mbps, '10' equals 1Mbps, etc).
+  *
+- * Return: The throughput towards the given neighbour in multiples of 100kpbs
+- *         (a value of '1' equals 0.1Mbps, '10' equals 1Mbps, etc).
++ * Return: true when value behind @pthroughput was set
+  */
+-static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
++static bool batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh,
++					u32 *pthroughput)
+ {
+ 	struct batadv_hard_iface *hard_iface = neigh->if_incoming;
++	struct net_device *soft_iface = hard_iface->soft_iface;
+ 	struct ethtool_link_ksettings link_settings;
+ 	struct net_device *real_netdev;
+ 	struct station_info sinfo;
+ 	u32 throughput;
+ 	int ret;
+ 
++	/* don't query throughput when no longer associated with any
++	 * batman-adv interface
++	 */
++	if (!soft_iface)
++		return false;
++
+ 	/* if the user specified a customised value for this interface, then
+ 	 * return it directly
+ 	 */
+ 	throughput =  atomic_read(&hard_iface->bat_v.throughput_override);
+-	if (throughput != 0)
+-		return throughput;
++	if (throughput != 0) {
++		*pthroughput = throughput;
++		return true;
++	}
+ 
+ 	/* if this is a wireless device, then ask its throughput through
+ 	 * cfg80211 API
+@@ -104,27 +129,39 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
+ 			 * possible to delete this neighbor. For now set
+ 			 * the throughput metric to 0.
+ 			 */
+-			return 0;
++			*pthroughput = 0;
++			return true;
+ 		}
+ 		if (ret)
+ 			goto default_throughput;
+ 
+-		if (sinfo.filled & BIT(NL80211_STA_INFO_EXPECTED_THROUGHPUT))
+-			return sinfo.expected_throughput / 100;
++		if (sinfo.filled & BIT(NL80211_STA_INFO_EXPECTED_THROUGHPUT)) {
++			*pthroughput = sinfo.expected_throughput / 100;
++			return true;
++		}
+ 
+ 		/* try to estimate the expected throughput based on reported tx
+ 		 * rates
+ 		 */
+-		if (sinfo.filled & BIT(NL80211_STA_INFO_TX_BITRATE))
+-			return cfg80211_calculate_bitrate(&sinfo.txrate) / 3;
++		if (sinfo.filled & BIT(NL80211_STA_INFO_TX_BITRATE)) {
++			*pthroughput = cfg80211_calculate_bitrate(&sinfo.txrate) / 3;
++			return true;
++		}
+ 
+ 		goto default_throughput;
+ 	}
+ 
++	/* only use rtnl_trylock because the elp worker will be cancelled while
++	 * the rntl_lock is held. the cancel_delayed_work_sync() would otherwise
++	 * wait forever when the elp work_item was started and it is then also
++	 * trying to rtnl_lock
++	 */
++	if (!rtnl_trylock())
++		return false;
++
+ 	/* if not a wifi interface, check if this device provides data via
+ 	 * ethtool (e.g. an Ethernet adapter)
+ 	 */
+-	rtnl_lock();
+ 	ret = __ethtool_get_link_ksettings(hard_iface->net_dev, &link_settings);
+ 	rtnl_unlock();
+ 	if (ret == 0) {
+@@ -135,13 +172,15 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
+ 			hard_iface->bat_v.flags &= ~BATADV_FULL_DUPLEX;
+ 
+ 		throughput = link_settings.base.speed;
+-		if (throughput && throughput != SPEED_UNKNOWN)
+-			return throughput * 10;
++		if (throughput && throughput != SPEED_UNKNOWN) {
++			*pthroughput = throughput * 10;
++			return true;
++		}
+ 	}
+ 
+ default_throughput:
+ 	if (!(hard_iface->bat_v.flags & BATADV_WARNING_DEFAULT)) {
+-		batadv_info(hard_iface->soft_iface,
++		batadv_info(soft_iface,
+ 			    "WiFi driver or ethtool info does not provide information about link speeds on interface %s, therefore defaulting to hardcoded throughput values of %u.%1u Mbps. Consider overriding the throughput manually or checking your driver.\n",
+ 			    hard_iface->net_dev->name,
+ 			    BATADV_THROUGHPUT_DEFAULT_VALUE / 10,
+@@ -150,31 +189,26 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
+ 	}
+ 
+ 	/* if none of the above cases apply, return the base_throughput */
+-	return BATADV_THROUGHPUT_DEFAULT_VALUE;
++	*pthroughput = BATADV_THROUGHPUT_DEFAULT_VALUE;
++	return true;
+ }
+ 
+ /**
+  * batadv_v_elp_throughput_metric_update() - worker updating the throughput
+  *  metric of a single hop neighbour
+- * @work: the work queue item
++ * @neigh: the neighbour to probe
+  */
+-void batadv_v_elp_throughput_metric_update(struct work_struct *work)
++static void
++batadv_v_elp_throughput_metric_update(struct batadv_hardif_neigh_node *neigh)
+ {
+-	struct batadv_hardif_neigh_node_bat_v *neigh_bat_v;
+-	struct batadv_hardif_neigh_node *neigh;
+-
+-	neigh_bat_v = container_of(work, struct batadv_hardif_neigh_node_bat_v,
+-				   metric_work);
+-	neigh = container_of(neigh_bat_v, struct batadv_hardif_neigh_node,
+-			     bat_v);
++	u32 throughput;
++	bool valid;
+ 
+-	ewma_throughput_add(&neigh->bat_v.throughput,
+-			    batadv_v_elp_get_throughput(neigh));
++	valid = batadv_v_elp_get_throughput(neigh, &throughput);
++	if (!valid)
++		return;
+ 
+-	/* decrement refcounter to balance increment performed before scheduling
+-	 * this task
+-	 */
+-	batadv_hardif_neigh_put(neigh);
++	ewma_throughput_add(&neigh->bat_v.throughput, throughput);
+ }
+ 
+ /**
+@@ -248,14 +282,16 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh)
+  */
+ static void batadv_v_elp_periodic_work(struct work_struct *work)
+ {
++	struct batadv_v_metric_queue_entry *metric_entry;
++	struct batadv_v_metric_queue_entry *metric_safe;
+ 	struct batadv_hardif_neigh_node *hardif_neigh;
+ 	struct batadv_hard_iface *hard_iface;
+ 	struct batadv_hard_iface_bat_v *bat_v;
+ 	struct batadv_elp_packet *elp_packet;
++	struct list_head metric_queue;
+ 	struct batadv_priv *bat_priv;
+ 	struct sk_buff *skb;
+ 	u32 elp_interval;
+-	bool ret;
+ 
+ 	bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work);
+ 	hard_iface = container_of(bat_v, struct batadv_hard_iface, bat_v);
+@@ -291,6 +327,8 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
+ 
+ 	atomic_inc(&hard_iface->bat_v.elp_seqno);
+ 
++	INIT_LIST_HEAD(&metric_queue);
++
+ 	/* The throughput metric is updated on each sent packet. This way, if a
+ 	 * node is dead and no longer sends packets, batman-adv is still able to
+ 	 * react timely to its death.
+@@ -315,16 +353,28 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
+ 
+ 		/* Reading the estimated throughput from cfg80211 is a task that
+ 		 * may sleep and that is not allowed in an rcu protected
+-		 * context. Therefore schedule a task for that.
++		 * context. Therefore add it to metric_queue and process it
++		 * outside rcu protected context.
+ 		 */
+-		ret = queue_work(batadv_event_workqueue,
+-				 &hardif_neigh->bat_v.metric_work);
+-
+-		if (!ret)
++		metric_entry = kzalloc(sizeof(*metric_entry), GFP_ATOMIC);
++		if (!metric_entry) {
+ 			batadv_hardif_neigh_put(hardif_neigh);
++			continue;
++		}
++
++		metric_entry->hardif_neigh = hardif_neigh;
++		list_add(&metric_entry->list, &metric_queue);
+ 	}
+ 	rcu_read_unlock();
+ 
++	list_for_each_entry_safe(metric_entry, metric_safe, &metric_queue, list) {
++		batadv_v_elp_throughput_metric_update(metric_entry->hardif_neigh);
++
++		batadv_hardif_neigh_put(metric_entry->hardif_neigh);
++		list_del(&metric_entry->list);
++		kfree(metric_entry);
++	}
++
+ restart_timer:
+ 	batadv_v_elp_start_timer(hard_iface);
+ out:
+diff --git a/net/batman-adv/bat_v_elp.h b/net/batman-adv/bat_v_elp.h
+index 9e2740195fa2d4..c9cb0a30710045 100644
+--- a/net/batman-adv/bat_v_elp.h
++++ b/net/batman-adv/bat_v_elp.h
+@@ -10,7 +10,6 @@
+ #include "main.h"
+ 
+ #include <linux/skbuff.h>
+-#include <linux/workqueue.h>
+ 
+ int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface);
+ void batadv_v_elp_iface_disable(struct batadv_hard_iface *hard_iface);
+@@ -19,6 +18,5 @@ void batadv_v_elp_iface_activate(struct batadv_hard_iface *primary_iface,
+ void batadv_v_elp_primary_iface_set(struct batadv_hard_iface *primary_iface);
+ int batadv_v_elp_packet_recv(struct sk_buff *skb,
+ 			     struct batadv_hard_iface *if_incoming);
+-void batadv_v_elp_throughput_metric_update(struct work_struct *work);
+ 
+ #endif /* _NET_BATMAN_ADV_BAT_V_ELP_H_ */
+diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
+index 760d51fdbdf602..7d5de4cbb814fb 100644
+--- a/net/batman-adv/translation-table.c
++++ b/net/batman-adv/translation-table.c
+@@ -3959,23 +3959,21 @@ static void batadv_tt_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
+ 	struct batadv_tvlv_tt_change *tt_change;
+ 	struct batadv_tvlv_tt_data *tt_data;
+ 	u16 num_entries, num_vlan;
+-	size_t flex_size;
++	size_t tt_data_sz;
+ 
+ 	if (tvlv_value_len < sizeof(*tt_data))
+ 		return;
+ 
+ 	tt_data = tvlv_value;
+-	tvlv_value_len -= sizeof(*tt_data);
+-
+ 	num_vlan = ntohs(tt_data->num_vlan);
+ 
+-	flex_size = flex_array_size(tt_data, vlan_data, num_vlan);
+-	if (tvlv_value_len < flex_size)
++	tt_data_sz = struct_size(tt_data, vlan_data, num_vlan);
++	if (tvlv_value_len < tt_data_sz)
+ 		return;
+ 
+ 	tt_change = (struct batadv_tvlv_tt_change *)((void *)tt_data
+-						     + flex_size);
+-	tvlv_value_len -= flex_size;
++						     + tt_data_sz);
++	tvlv_value_len -= tt_data_sz;
+ 
+ 	num_entries = batadv_tt_entries(tvlv_value_len);
+ 
+diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
+index 04f6398b3a40e8..85a50096f5b24d 100644
+--- a/net/batman-adv/types.h
++++ b/net/batman-adv/types.h
+@@ -596,9 +596,6 @@ struct batadv_hardif_neigh_node_bat_v {
+ 	 *  neighbor
+ 	 */
+ 	unsigned long last_unicast_tx;
+-
+-	/** @metric_work: work queue callback item for metric update */
+-	struct work_struct metric_work;
+ };
+ 
+ /**
+diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
+index 305dd72c844c70..17226b2341d03d 100644
+--- a/net/can/j1939/socket.c
++++ b/net/can/j1939/socket.c
+@@ -1132,7 +1132,7 @@ static int j1939_sk_send_loop(struct j1939_priv *priv,  struct sock *sk,
+ 
+ 	todo_size = size;
+ 
+-	while (todo_size) {
++	do {
+ 		struct j1939_sk_buff_cb *skcb;
+ 
+ 		segment_size = min_t(size_t, J1939_MAX_TP_PACKET_SIZE,
+@@ -1177,7 +1177,7 @@ static int j1939_sk_send_loop(struct j1939_priv *priv,  struct sock *sk,
+ 
+ 		todo_size -= segment_size;
+ 		session->total_queued_size += segment_size;
+-	}
++	} while (todo_size);
+ 
+ 	switch (ret) {
+ 	case 0: /* OK */
+diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
+index 95f7a7e65a73fa..9b72d118d756dd 100644
+--- a/net/can/j1939/transport.c
++++ b/net/can/j1939/transport.c
+@@ -382,8 +382,9 @@ sk_buff *j1939_session_skb_get_by_offset(struct j1939_session *session,
+ 	skb_queue_walk(&session->skb_queue, do_skb) {
+ 		do_skcb = j1939_skb_to_cb(do_skb);
+ 
+-		if (offset_start >= do_skcb->offset &&
+-		    offset_start < (do_skcb->offset + do_skb->len)) {
++		if ((offset_start >= do_skcb->offset &&
++		     offset_start < (do_skcb->offset + do_skb->len)) ||
++		     (offset_start == 0 && do_skcb->offset == 0 && do_skb->len == 0)) {
+ 			skb = do_skb;
+ 		}
+ 	}
+diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
+index 34185d138c95ab..ff1cebd71f7b4f 100644
+--- a/net/core/fib_rules.c
++++ b/net/core/fib_rules.c
+@@ -37,8 +37,8 @@ static const struct fib_kuid_range fib_kuid_range_unset = {
+ 
+ bool fib_rule_matchall(const struct fib_rule *rule)
+ {
+-	if (rule->iifindex || rule->oifindex || rule->mark || rule->tun_id ||
+-	    rule->flags)
++	if (READ_ONCE(rule->iifindex) || READ_ONCE(rule->oifindex) ||
++	    rule->mark || rule->tun_id || rule->flags)
+ 		return false;
+ 	if (rule->suppress_ifgroup != -1 || rule->suppress_prefixlen != -1)
+ 		return false;
+@@ -261,12 +261,14 @@ static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
+ 			  struct flowi *fl, int flags,
+ 			  struct fib_lookup_arg *arg)
+ {
+-	int ret = 0;
++	int iifindex, oifindex, ret = 0;
+ 
+-	if (rule->iifindex && (rule->iifindex != fl->flowi_iif))
++	iifindex = READ_ONCE(rule->iifindex);
++	if (iifindex && (iifindex != fl->flowi_iif))
+ 		goto out;
+ 
+-	if (rule->oifindex && (rule->oifindex != fl->flowi_oif))
++	oifindex = READ_ONCE(rule->oifindex);
++	if (oifindex && (oifindex != fl->flowi_oif))
+ 		goto out;
+ 
+ 	if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
+@@ -1039,14 +1041,14 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
+ 	if (rule->iifname[0]) {
+ 		if (nla_put_string(skb, FRA_IIFNAME, rule->iifname))
+ 			goto nla_put_failure;
+-		if (rule->iifindex == -1)
++		if (READ_ONCE(rule->iifindex) == -1)
+ 			frh->flags |= FIB_RULE_IIF_DETACHED;
+ 	}
+ 
+ 	if (rule->oifname[0]) {
+ 		if (nla_put_string(skb, FRA_OIFNAME, rule->oifname))
+ 			goto nla_put_failure;
+-		if (rule->oifindex == -1)
++		if (READ_ONCE(rule->oifindex) == -1)
+ 			frh->flags |= FIB_RULE_OIF_DETACHED;
+ 	}
+ 
+@@ -1218,10 +1220,10 @@ static void attach_rules(struct list_head *rules, struct net_device *dev)
+ 	list_for_each_entry(rule, rules, list) {
+ 		if (rule->iifindex == -1 &&
+ 		    strcmp(dev->name, rule->iifname) == 0)
+-			rule->iifindex = dev->ifindex;
++			WRITE_ONCE(rule->iifindex, dev->ifindex);
+ 		if (rule->oifindex == -1 &&
+ 		    strcmp(dev->name, rule->oifname) == 0)
+-			rule->oifindex = dev->ifindex;
++			WRITE_ONCE(rule->oifindex, dev->ifindex);
+ 	}
+ }
+ 
+@@ -1231,9 +1233,9 @@ static void detach_rules(struct list_head *rules, struct net_device *dev)
+ 
+ 	list_for_each_entry(rule, rules, list) {
+ 		if (rule->iifindex == dev->ifindex)
+-			rule->iifindex = -1;
++			WRITE_ONCE(rule->iifindex, -1);
+ 		if (rule->oifindex == dev->ifindex)
+-			rule->oifindex = -1;
++			WRITE_ONCE(rule->oifindex, -1);
+ 	}
+ }
+ 
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index 0e638a37aa0961..5db41bf2ed93e0 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -1108,10 +1108,12 @@ bool __skb_flow_dissect(const struct net *net,
+ 					      FLOW_DISSECTOR_KEY_BASIC,
+ 					      target_container);
+ 
++	rcu_read_lock();
++
+ 	if (skb) {
+ 		if (!net) {
+ 			if (skb->dev)
+-				net = dev_net(skb->dev);
++				net = dev_net_rcu(skb->dev);
+ 			else if (skb->sk)
+ 				net = sock_net(skb->sk);
+ 		}
+@@ -1122,7 +1124,6 @@ bool __skb_flow_dissect(const struct net *net,
+ 		enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
+ 		struct bpf_prog_array *run_array;
+ 
+-		rcu_read_lock();
+ 		run_array = rcu_dereference(init_net.bpf.run_array[type]);
+ 		if (!run_array)
+ 			run_array = rcu_dereference(net->bpf.run_array[type]);
+@@ -1150,17 +1151,17 @@ bool __skb_flow_dissect(const struct net *net,
+ 			prog = READ_ONCE(run_array->items[0].prog);
+ 			result = bpf_flow_dissect(prog, &ctx, n_proto, nhoff,
+ 						  hlen, flags);
+-			if (result == BPF_FLOW_DISSECTOR_CONTINUE)
+-				goto dissect_continue;
+-			__skb_flow_bpf_to_target(&flow_keys, flow_dissector,
+-						 target_container);
+-			rcu_read_unlock();
+-			return result == BPF_OK;
++			if (result != BPF_FLOW_DISSECTOR_CONTINUE) {
++				__skb_flow_bpf_to_target(&flow_keys, flow_dissector,
++							 target_container);
++				rcu_read_unlock();
++				return result == BPF_OK;
++			}
+ 		}
+-dissect_continue:
+-		rcu_read_unlock();
+ 	}
+ 
++	rcu_read_unlock();
++
+ 	if (dissector_uses_key(flow_dissector,
+ 			       FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ 		struct ethhdr *eth = eth_hdr(skb);
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 89656d180bc60c..bd0251bd74a1f8 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -3447,10 +3447,12 @@ static const struct seq_operations neigh_stat_seq_ops = {
+ static void __neigh_notify(struct neighbour *n, int type, int flags,
+ 			   u32 pid)
+ {
+-	struct net *net = dev_net(n->dev);
+ 	struct sk_buff *skb;
+ 	int err = -ENOBUFS;
++	struct net *net;
+ 
++	rcu_read_lock();
++	net = dev_net_rcu(n->dev);
+ 	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
+ 	if (skb == NULL)
+ 		goto errout;
+@@ -3463,9 +3465,11 @@ static void __neigh_notify(struct neighbour *n, int type, int flags,
+ 		goto errout;
+ 	}
+ 	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
+-	return;
++	goto out;
+ errout:
+ 	rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
++out:
++	rcu_read_unlock();
+ }
+ 
+ void neigh_app_ns(struct neighbour *n)
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index d9f959c619d959..b7cc30fd80e8a6 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -3423,6 +3423,7 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+ 		err = -ENODEV;
+ 
+ 	rtnl_nets_unlock(&rtnl_nets);
++	rtnl_nets_destroy(&rtnl_nets);
+ errout:
+ 	return err;
+ }
+diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
+index cb9a7ed8abd3ab..f23a1ec6694cb2 100644
+--- a/net/ipv4/arp.c
++++ b/net/ipv4/arp.c
+@@ -659,10 +659,12 @@ static int arp_xmit_finish(struct net *net, struct sock *sk, struct sk_buff *skb
+  */
+ void arp_xmit(struct sk_buff *skb)
+ {
++	rcu_read_lock();
+ 	/* Send it off, maybe filter it using firewalling first.  */
+ 	NF_HOOK(NFPROTO_ARP, NF_ARP_OUT,
+-		dev_net(skb->dev), NULL, skb, NULL, skb->dev,
++		dev_net_rcu(skb->dev), NULL, skb, NULL, skb->dev,
+ 		arp_xmit_finish);
++	rcu_read_unlock();
+ }
+ EXPORT_SYMBOL(arp_xmit);
+ 
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index c8b3cf5fba4c02..55b8151759bc9f 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -1371,10 +1371,11 @@ __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
+ 	__be32 addr = 0;
+ 	unsigned char localnet_scope = RT_SCOPE_HOST;
+ 	struct in_device *in_dev;
+-	struct net *net = dev_net(dev);
++	struct net *net;
+ 	int master_idx;
+ 
+ 	rcu_read_lock();
++	net = dev_net_rcu(dev);
+ 	in_dev = __in_dev_get_rcu(dev);
+ 	if (!in_dev)
+ 		goto no_in_dev;
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index 094084b61bff8a..5482edb5aade2b 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -399,10 +399,10 @@ static void icmp_push_reply(struct sock *sk,
+ 
+ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
+ {
+-	struct ipcm_cookie ipc;
+ 	struct rtable *rt = skb_rtable(skb);
+-	struct net *net = dev_net(rt->dst.dev);
++	struct net *net = dev_net_rcu(rt->dst.dev);
+ 	bool apply_ratelimit = false;
++	struct ipcm_cookie ipc;
+ 	struct flowi4 fl4;
+ 	struct sock *sk;
+ 	struct inet_sock *inet;
+@@ -608,12 +608,14 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+ 	struct sock *sk;
+ 
+ 	if (!rt)
+-		goto out;
++		return;
++
++	rcu_read_lock();
+ 
+ 	if (rt->dst.dev)
+-		net = dev_net(rt->dst.dev);
++		net = dev_net_rcu(rt->dst.dev);
+ 	else if (skb_in->dev)
+-		net = dev_net(skb_in->dev);
++		net = dev_net_rcu(skb_in->dev);
+ 	else
+ 		goto out;
+ 
+@@ -785,7 +787,8 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+ 	icmp_xmit_unlock(sk);
+ out_bh_enable:
+ 	local_bh_enable();
+-out:;
++out:
++	rcu_read_unlock();
+ }
+ EXPORT_SYMBOL(__icmp_send);
+ 
+@@ -834,7 +837,7 @@ static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
+ 	 * avoid additional coding at protocol handlers.
+ 	 */
+ 	if (!pskb_may_pull(skb, iph->ihl * 4 + 8)) {
+-		__ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS);
++		__ICMP_INC_STATS(dev_net_rcu(skb->dev), ICMP_MIB_INERRORS);
+ 		return;
+ 	}
+ 
+@@ -868,7 +871,7 @@ static enum skb_drop_reason icmp_unreach(struct sk_buff *skb)
+ 	struct net *net;
+ 	u32 info = 0;
+ 
+-	net = dev_net(skb_dst(skb)->dev);
++	net = dev_net_rcu(skb_dst(skb)->dev);
+ 
+ 	/*
+ 	 *	Incomplete header ?
+@@ -979,7 +982,7 @@ static enum skb_drop_reason icmp_unreach(struct sk_buff *skb)
+ static enum skb_drop_reason icmp_redirect(struct sk_buff *skb)
+ {
+ 	if (skb->len < sizeof(struct iphdr)) {
+-		__ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS);
++		__ICMP_INC_STATS(dev_net_rcu(skb->dev), ICMP_MIB_INERRORS);
+ 		return SKB_DROP_REASON_PKT_TOO_SMALL;
+ 	}
+ 
+@@ -1011,7 +1014,7 @@ static enum skb_drop_reason icmp_echo(struct sk_buff *skb)
+ 	struct icmp_bxm icmp_param;
+ 	struct net *net;
+ 
+-	net = dev_net(skb_dst(skb)->dev);
++	net = dev_net_rcu(skb_dst(skb)->dev);
+ 	/* should there be an ICMP stat for ignored echos? */
+ 	if (READ_ONCE(net->ipv4.sysctl_icmp_echo_ignore_all))
+ 		return SKB_NOT_DROPPED_YET;
+@@ -1040,9 +1043,9 @@ static enum skb_drop_reason icmp_echo(struct sk_buff *skb)
+ 
+ bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
+ {
++	struct net *net = dev_net_rcu(skb->dev);
+ 	struct icmp_ext_hdr *ext_hdr, _ext_hdr;
+ 	struct icmp_ext_echo_iio *iio, _iio;
+-	struct net *net = dev_net(skb->dev);
+ 	struct inet6_dev *in6_dev;
+ 	struct in_device *in_dev;
+ 	struct net_device *dev;
+@@ -1181,7 +1184,7 @@ static enum skb_drop_reason icmp_timestamp(struct sk_buff *skb)
+ 	return SKB_NOT_DROPPED_YET;
+ 
+ out_err:
+-	__ICMP_INC_STATS(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
++	__ICMP_INC_STATS(dev_net_rcu(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
+ 	return SKB_DROP_REASON_PKT_TOO_SMALL;
+ }
+ 
+@@ -1198,7 +1201,7 @@ int icmp_rcv(struct sk_buff *skb)
+ {
+ 	enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
+ 	struct rtable *rt = skb_rtable(skb);
+-	struct net *net = dev_net(rt->dst.dev);
++	struct net *net = dev_net_rcu(rt->dst.dev);
+ 	struct icmphdr *icmph;
+ 
+ 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
+@@ -1371,9 +1374,9 @@ int icmp_err(struct sk_buff *skb, u32 info)
+ 	struct iphdr *iph = (struct iphdr *)skb->data;
+ 	int offset = iph->ihl<<2;
+ 	struct icmphdr *icmph = (struct icmphdr *)(skb->data + offset);
++	struct net *net = dev_net_rcu(skb->dev);
+ 	int type = icmp_hdr(skb)->type;
+ 	int code = icmp_hdr(skb)->code;
+-	struct net *net = dev_net(skb->dev);
+ 
+ 	/*
+ 	 * Use ping_err to handle all icmp errors except those
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 3a1467f2d553f3..cf84704af25c38 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -390,7 +390,13 @@ static inline int ip_rt_proc_init(void)
+ 
+ static inline bool rt_is_expired(const struct rtable *rth)
+ {
+-	return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
++	bool res;
++
++	rcu_read_lock();
++	res = rth->rt_genid != rt_genid_ipv4(dev_net_rcu(rth->dst.dev));
++	rcu_read_unlock();
++
++	return res;
+ }
+ 
+ void rt_cache_flush(struct net *net)
+@@ -1002,9 +1008,9 @@ out:	kfree_skb_reason(skb, reason);
+ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
+ {
+ 	struct dst_entry *dst = &rt->dst;
+-	struct net *net = dev_net(dst->dev);
+ 	struct fib_result res;
+ 	bool lock = false;
++	struct net *net;
+ 	u32 old_mtu;
+ 
+ 	if (ip_mtu_locked(dst))
+@@ -1014,6 +1020,8 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
+ 	if (old_mtu < mtu)
+ 		return;
+ 
++	rcu_read_lock();
++	net = dev_net_rcu(dst->dev);
+ 	if (mtu < net->ipv4.ip_rt_min_pmtu) {
+ 		lock = true;
+ 		mtu = min(old_mtu, net->ipv4.ip_rt_min_pmtu);
+@@ -1021,9 +1029,8 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
+ 
+ 	if (rt->rt_pmtu == mtu && !lock &&
+ 	    time_before(jiffies, dst->expires - net->ipv4.ip_rt_mtu_expires / 2))
+-		return;
++		goto out;
+ 
+-	rcu_read_lock();
+ 	if (fib_lookup(net, fl4, &res, 0) == 0) {
+ 		struct fib_nh_common *nhc;
+ 
+@@ -1037,14 +1044,14 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
+ 				update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
+ 						      jiffies + net->ipv4.ip_rt_mtu_expires);
+ 			}
+-			rcu_read_unlock();
+-			return;
++			goto out;
+ 		}
+ #endif /* CONFIG_IP_ROUTE_MULTIPATH */
+ 		nhc = FIB_RES_NHC(res);
+ 		update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
+ 				      jiffies + net->ipv4.ip_rt_mtu_expires);
+ 	}
++out:
+ 	rcu_read_unlock();
+ }
+ 
+@@ -1307,10 +1314,15 @@ static void set_class_tag(struct rtable *rt, u32 tag)
+ 
+ static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
+ {
+-	struct net *net = dev_net(dst->dev);
+ 	unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
+-	unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
+-				    net->ipv4.ip_rt_min_advmss);
++	unsigned int advmss;
++	struct net *net;
++
++	rcu_read_lock();
++	net = dev_net_rcu(dst->dev);
++	advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
++				   net->ipv4.ip_rt_min_advmss);
++	rcu_read_unlock();
+ 
+ 	return min(advmss, IPV4_MAX_PMTU - header_size);
+ }
+diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
+index a6984a29fdb9dd..4d14ab7f7e99f1 100644
+--- a/net/ipv6/icmp.c
++++ b/net/ipv6/icmp.c
+@@ -76,7 +76,7 @@ static int icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ {
+ 	/* icmpv6_notify checks 8 bytes can be pulled, icmp6hdr is 8 bytes */
+ 	struct icmp6hdr *icmp6 = (struct icmp6hdr *) (skb->data + offset);
+-	struct net *net = dev_net(skb->dev);
++	struct net *net = dev_net_rcu(skb->dev);
+ 
+ 	if (type == ICMPV6_PKT_TOOBIG)
+ 		ip6_update_pmtu(skb, net, info, skb->dev->ifindex, 0, sock_net_uid(net, NULL));
+@@ -473,7 +473,10 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ 
+ 	if (!skb->dev)
+ 		return;
+-	net = dev_net(skb->dev);
++
++	rcu_read_lock();
++
++	net = dev_net_rcu(skb->dev);
+ 	mark = IP6_REPLY_MARK(net, skb->mark);
+ 	/*
+ 	 *	Make sure we respect the rules
+@@ -496,7 +499,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ 		    !(type == ICMPV6_PARAMPROB &&
+ 		      code == ICMPV6_UNK_OPTION &&
+ 		      (opt_unrec(skb, info))))
+-			return;
++			goto out;
+ 
+ 		saddr = NULL;
+ 	}
+@@ -526,7 +529,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ 	if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
+ 		net_dbg_ratelimited("icmp6_send: addr_any/mcast source [%pI6c > %pI6c]\n",
+ 				    &hdr->saddr, &hdr->daddr);
+-		return;
++		goto out;
+ 	}
+ 
+ 	/*
+@@ -535,7 +538,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ 	if (is_ineligible(skb)) {
+ 		net_dbg_ratelimited("icmp6_send: no reply to icmp error [%pI6c > %pI6c]\n",
+ 				    &hdr->saddr, &hdr->daddr);
+-		return;
++		goto out;
+ 	}
+ 
+ 	/* Needed by both icmpv6_global_allow and icmpv6_xmit_lock */
+@@ -582,7 +585,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ 	np = inet6_sk(sk);
+ 
+ 	if (!icmpv6_xrlim_allow(sk, type, &fl6, apply_ratelimit))
+-		goto out;
++		goto out_unlock;
+ 
+ 	tmp_hdr.icmp6_type = type;
+ 	tmp_hdr.icmp6_code = code;
+@@ -600,7 +603,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ 
+ 	dst = icmpv6_route_lookup(net, skb, sk, &fl6);
+ 	if (IS_ERR(dst))
+-		goto out;
++		goto out_unlock;
+ 
+ 	ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
+ 
+@@ -616,7 +619,6 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ 		goto out_dst_release;
+ 	}
+ 
+-	rcu_read_lock();
+ 	idev = __in6_dev_get(skb->dev);
+ 
+ 	if (ip6_append_data(sk, icmpv6_getfrag, &msg,
+@@ -630,13 +632,15 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ 		icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
+ 					   len + sizeof(struct icmp6hdr));
+ 	}
+-	rcu_read_unlock();
++
+ out_dst_release:
+ 	dst_release(dst);
+-out:
++out_unlock:
+ 	icmpv6_xmit_unlock(sk);
+ out_bh_enable:
+ 	local_bh_enable();
++out:
++	rcu_read_unlock();
+ }
+ EXPORT_SYMBOL(icmp6_send);
+ 
+@@ -679,8 +683,8 @@ int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
+ 	skb_pull(skb2, nhs);
+ 	skb_reset_network_header(skb2);
+ 
+-	rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0,
+-			skb, 0);
++	rt = rt6_lookup(dev_net_rcu(skb->dev), &ipv6_hdr(skb2)->saddr,
++			NULL, 0, skb, 0);
+ 
+ 	if (rt && rt->dst.dev)
+ 		skb2->dev = rt->dst.dev;
+@@ -717,7 +721,7 @@ EXPORT_SYMBOL(ip6_err_gen_icmpv6_unreach);
+ 
+ static enum skb_drop_reason icmpv6_echo_reply(struct sk_buff *skb)
+ {
+-	struct net *net = dev_net(skb->dev);
++	struct net *net = dev_net_rcu(skb->dev);
+ 	struct sock *sk;
+ 	struct inet6_dev *idev;
+ 	struct ipv6_pinfo *np;
+@@ -832,7 +836,7 @@ enum skb_drop_reason icmpv6_notify(struct sk_buff *skb, u8 type,
+ 				   u8 code, __be32 info)
+ {
+ 	struct inet6_skb_parm *opt = IP6CB(skb);
+-	struct net *net = dev_net(skb->dev);
++	struct net *net = dev_net_rcu(skb->dev);
+ 	const struct inet6_protocol *ipprot;
+ 	enum skb_drop_reason reason;
+ 	int inner_offset;
+@@ -889,7 +893,7 @@ enum skb_drop_reason icmpv6_notify(struct sk_buff *skb, u8 type,
+ static int icmpv6_rcv(struct sk_buff *skb)
+ {
+ 	enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
+-	struct net *net = dev_net(skb->dev);
++	struct net *net = dev_net_rcu(skb->dev);
+ 	struct net_device *dev = icmp6_dev(skb);
+ 	struct inet6_dev *idev = __in6_dev_get(dev);
+ 	const struct in6_addr *saddr, *daddr;
+@@ -921,7 +925,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
+ 		skb_set_network_header(skb, nh);
+ 	}
+ 
+-	__ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_INMSGS);
++	__ICMP6_INC_STATS(dev_net_rcu(dev), idev, ICMP6_MIB_INMSGS);
+ 
+ 	saddr = &ipv6_hdr(skb)->saddr;
+ 	daddr = &ipv6_hdr(skb)->daddr;
+@@ -939,7 +943,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
+ 
+ 	type = hdr->icmp6_type;
+ 
+-	ICMP6MSGIN_INC_STATS(dev_net(dev), idev, type);
++	ICMP6MSGIN_INC_STATS(dev_net_rcu(dev), idev, type);
+ 
+ 	switch (type) {
+ 	case ICMPV6_ECHO_REQUEST:
+@@ -1034,9 +1038,9 @@ static int icmpv6_rcv(struct sk_buff *skb)
+ 
+ csum_error:
+ 	reason = SKB_DROP_REASON_ICMP_CSUM;
+-	__ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_CSUMERRORS);
++	__ICMP6_INC_STATS(dev_net_rcu(dev), idev, ICMP6_MIB_CSUMERRORS);
+ discard_it:
+-	__ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_INERRORS);
++	__ICMP6_INC_STATS(dev_net_rcu(dev), idev, ICMP6_MIB_INERRORS);
+ drop_no_count:
+ 	kfree_skb_reason(skb, reason);
+ 	return 0;
+diff --git a/net/ipv6/ioam6_iptunnel.c b/net/ipv6/ioam6_iptunnel.c
+index 9d8422e350f8d5..2c383c12a43159 100644
+--- a/net/ipv6/ioam6_iptunnel.c
++++ b/net/ipv6/ioam6_iptunnel.c
+@@ -253,14 +253,15 @@ static int ioam6_do_fill(struct net *net, struct sk_buff *skb)
+ }
+ 
+ static int ioam6_do_inline(struct net *net, struct sk_buff *skb,
+-			   struct ioam6_lwt_encap *tuninfo)
++			   struct ioam6_lwt_encap *tuninfo,
++			   struct dst_entry *cache_dst)
+ {
+ 	struct ipv6hdr *oldhdr, *hdr;
+ 	int hdrlen, err;
+ 
+ 	hdrlen = (tuninfo->eh.hdrlen + 1) << 3;
+ 
+-	err = skb_cow_head(skb, hdrlen + skb->mac_len);
++	err = skb_cow_head(skb, hdrlen + dst_dev_overhead(cache_dst, skb));
+ 	if (unlikely(err))
+ 		return err;
+ 
+@@ -291,7 +292,8 @@ static int ioam6_do_encap(struct net *net, struct sk_buff *skb,
+ 			  struct ioam6_lwt_encap *tuninfo,
+ 			  bool has_tunsrc,
+ 			  struct in6_addr *tunsrc,
+-			  struct in6_addr *tundst)
++			  struct in6_addr *tundst,
++			  struct dst_entry *cache_dst)
+ {
+ 	struct dst_entry *dst = skb_dst(skb);
+ 	struct ipv6hdr *hdr, *inner_hdr;
+@@ -300,7 +302,7 @@ static int ioam6_do_encap(struct net *net, struct sk_buff *skb,
+ 	hdrlen = (tuninfo->eh.hdrlen + 1) << 3;
+ 	len = sizeof(*hdr) + hdrlen;
+ 
+-	err = skb_cow_head(skb, len + skb->mac_len);
++	err = skb_cow_head(skb, len + dst_dev_overhead(cache_dst, skb));
+ 	if (unlikely(err))
+ 		return err;
+ 
+@@ -334,7 +336,7 @@ static int ioam6_do_encap(struct net *net, struct sk_buff *skb,
+ 
+ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
+-	struct dst_entry *dst = skb_dst(skb);
++	struct dst_entry *dst = skb_dst(skb), *cache_dst = NULL;
+ 	struct in6_addr orig_daddr;
+ 	struct ioam6_lwt *ilwt;
+ 	int err = -EINVAL;
+@@ -352,6 +354,10 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 
+ 	orig_daddr = ipv6_hdr(skb)->daddr;
+ 
++	local_bh_disable();
++	cache_dst = dst_cache_get(&ilwt->cache);
++	local_bh_enable();
++
+ 	switch (ilwt->mode) {
+ 	case IOAM6_IPTUNNEL_MODE_INLINE:
+ do_inline:
+@@ -359,7 +365,7 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 		if (ipv6_hdr(skb)->nexthdr == NEXTHDR_HOP)
+ 			goto out;
+ 
+-		err = ioam6_do_inline(net, skb, &ilwt->tuninfo);
++		err = ioam6_do_inline(net, skb, &ilwt->tuninfo, cache_dst);
+ 		if (unlikely(err))
+ 			goto drop;
+ 
+@@ -369,7 +375,7 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 		/* Encapsulation (ip6ip6) */
+ 		err = ioam6_do_encap(net, skb, &ilwt->tuninfo,
+ 				     ilwt->has_tunsrc, &ilwt->tunsrc,
+-				     &ilwt->tundst);
++				     &ilwt->tundst, cache_dst);
+ 		if (unlikely(err))
+ 			goto drop;
+ 
+@@ -387,46 +393,45 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 		goto drop;
+ 	}
+ 
+-	err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+-	if (unlikely(err))
+-		goto drop;
++	if (unlikely(!cache_dst)) {
++		struct ipv6hdr *hdr = ipv6_hdr(skb);
++		struct flowi6 fl6;
+ 
+-	if (!ipv6_addr_equal(&orig_daddr, &ipv6_hdr(skb)->daddr)) {
+-		local_bh_disable();
+-		dst = dst_cache_get(&ilwt->cache);
+-		local_bh_enable();
+-
+-		if (unlikely(!dst)) {
+-			struct ipv6hdr *hdr = ipv6_hdr(skb);
+-			struct flowi6 fl6;
+-
+-			memset(&fl6, 0, sizeof(fl6));
+-			fl6.daddr = hdr->daddr;
+-			fl6.saddr = hdr->saddr;
+-			fl6.flowlabel = ip6_flowinfo(hdr);
+-			fl6.flowi6_mark = skb->mark;
+-			fl6.flowi6_proto = hdr->nexthdr;
+-
+-			dst = ip6_route_output(net, NULL, &fl6);
+-			if (dst->error) {
+-				err = dst->error;
+-				dst_release(dst);
+-				goto drop;
+-			}
++		memset(&fl6, 0, sizeof(fl6));
++		fl6.daddr = hdr->daddr;
++		fl6.saddr = hdr->saddr;
++		fl6.flowlabel = ip6_flowinfo(hdr);
++		fl6.flowi6_mark = skb->mark;
++		fl6.flowi6_proto = hdr->nexthdr;
++
++		cache_dst = ip6_route_output(net, NULL, &fl6);
++		if (cache_dst->error) {
++			err = cache_dst->error;
++			goto drop;
++		}
+ 
++		/* cache only if we don't create a dst reference loop */
++		if (dst->lwtstate != cache_dst->lwtstate) {
+ 			local_bh_disable();
+-			dst_cache_set_ip6(&ilwt->cache, dst, &fl6.saddr);
++			dst_cache_set_ip6(&ilwt->cache, cache_dst, &fl6.saddr);
+ 			local_bh_enable();
+ 		}
+ 
+-		skb_dst_drop(skb);
+-		skb_dst_set(skb, dst);
++		err = skb_cow_head(skb, LL_RESERVED_SPACE(cache_dst->dev));
++		if (unlikely(err))
++			goto drop;
++	}
+ 
++	if (!ipv6_addr_equal(&orig_daddr, &ipv6_hdr(skb)->daddr)) {
++		skb_dst_drop(skb);
++		skb_dst_set(skb, cache_dst);
+ 		return dst_output(net, sk, skb);
+ 	}
+ out:
++	dst_release(cache_dst);
+ 	return dst->lwtstate->orig_output(net, sk, skb);
+ drop:
++	dst_release(cache_dst);
+ 	kfree_skb(skb);
+ 	return err;
+ }
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index b244dbf61d5f39..b7b62e5a562e5d 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -1730,21 +1730,19 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
+ 	struct net_device *dev = idev->dev;
+ 	int hlen = LL_RESERVED_SPACE(dev);
+ 	int tlen = dev->needed_tailroom;
+-	struct net *net = dev_net(dev);
+ 	const struct in6_addr *saddr;
+ 	struct in6_addr addr_buf;
+ 	struct mld2_report *pmr;
+ 	struct sk_buff *skb;
+ 	unsigned int size;
+ 	struct sock *sk;
+-	int err;
++	struct net *net;
+ 
+-	sk = net->ipv6.igmp_sk;
+ 	/* we assume size > sizeof(ra) here
+ 	 * Also try to not allocate high-order pages for big MTU
+ 	 */
+ 	size = min_t(int, mtu, PAGE_SIZE / 2) + hlen + tlen;
+-	skb = sock_alloc_send_skb(sk, size, 1, &err);
++	skb = alloc_skb(size, GFP_KERNEL);
+ 	if (!skb)
+ 		return NULL;
+ 
+@@ -1752,6 +1750,12 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
+ 	skb_reserve(skb, hlen);
+ 	skb_tailroom_reserve(skb, mtu, tlen);
+ 
++	rcu_read_lock();
++
++	net = dev_net_rcu(dev);
++	sk = net->ipv6.igmp_sk;
++	skb_set_owner_w(skb, sk);
++
+ 	if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
+ 		/* <draft-ietf-magma-mld-source-05.txt>:
+ 		 * use unspecified address as the source address
+@@ -1763,6 +1767,8 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
+ 
+ 	ip6_mc_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0);
+ 
++	rcu_read_unlock();
++
+ 	skb_put_data(skb, ra, sizeof(ra));
+ 
+ 	skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data);
+@@ -2122,21 +2128,21 @@ static void mld_send_cr(struct inet6_dev *idev)
+ 
+ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
+ {
+-	struct net *net = dev_net(dev);
+-	struct sock *sk = net->ipv6.igmp_sk;
++	const struct in6_addr *snd_addr, *saddr;
++	int err, len, payload_len, full_len;
++	struct in6_addr addr_buf;
+ 	struct inet6_dev *idev;
+ 	struct sk_buff *skb;
+ 	struct mld_msg *hdr;
+-	const struct in6_addr *snd_addr, *saddr;
+-	struct in6_addr addr_buf;
+ 	int hlen = LL_RESERVED_SPACE(dev);
+ 	int tlen = dev->needed_tailroom;
+-	int err, len, payload_len, full_len;
+ 	u8 ra[8] = { IPPROTO_ICMPV6, 0,
+ 		     IPV6_TLV_ROUTERALERT, 2, 0, 0,
+ 		     IPV6_TLV_PADN, 0 };
+-	struct flowi6 fl6;
+ 	struct dst_entry *dst;
++	struct flowi6 fl6;
++	struct net *net;
++	struct sock *sk;
+ 
+ 	if (type == ICMPV6_MGM_REDUCTION)
+ 		snd_addr = &in6addr_linklocal_allrouters;
+@@ -2147,19 +2153,21 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
+ 	payload_len = len + sizeof(ra);
+ 	full_len = sizeof(struct ipv6hdr) + payload_len;
+ 
+-	rcu_read_lock();
+-	IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_OUTREQUESTS);
+-	rcu_read_unlock();
++	skb = alloc_skb(hlen + tlen + full_len, GFP_KERNEL);
+ 
+-	skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err);
++	rcu_read_lock();
+ 
++	net = dev_net_rcu(dev);
++	idev = __in6_dev_get(dev);
++	IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
+ 	if (!skb) {
+-		rcu_read_lock();
+-		IP6_INC_STATS(net, __in6_dev_get(dev),
+-			      IPSTATS_MIB_OUTDISCARDS);
++		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
+ 		rcu_read_unlock();
+ 		return;
+ 	}
++	sk = net->ipv6.igmp_sk;
++	skb_set_owner_w(skb, sk);
++
+ 	skb->priority = TC_PRIO_CONTROL;
+ 	skb_reserve(skb, hlen);
+ 
+@@ -2184,9 +2192,6 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
+ 					 IPPROTO_ICMPV6,
+ 					 csum_partial(hdr, len, 0));
+ 
+-	rcu_read_lock();
+-	idev = __in6_dev_get(skb->dev);
+-
+ 	icmpv6_flow_init(sk, &fl6, type,
+ 			 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
+ 			 skb->dev->ifindex);
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index d044c67019de6d..8699d1a188dc4a 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -418,15 +418,11 @@ static struct sk_buff *ndisc_alloc_skb(struct net_device *dev,
+ {
+ 	int hlen = LL_RESERVED_SPACE(dev);
+ 	int tlen = dev->needed_tailroom;
+-	struct sock *sk = dev_net(dev)->ipv6.ndisc_sk;
+ 	struct sk_buff *skb;
+ 
+ 	skb = alloc_skb(hlen + sizeof(struct ipv6hdr) + len + tlen, GFP_ATOMIC);
+-	if (!skb) {
+-		ND_PRINTK(0, err, "ndisc: %s failed to allocate an skb\n",
+-			  __func__);
++	if (!skb)
+ 		return NULL;
+-	}
+ 
+ 	skb->protocol = htons(ETH_P_IPV6);
+ 	skb->dev = dev;
+@@ -437,7 +433,9 @@ static struct sk_buff *ndisc_alloc_skb(struct net_device *dev,
+ 	/* Manually assign socket ownership as we avoid calling
+ 	 * sock_alloc_send_pskb() to bypass wmem buffer limits
+ 	 */
+-	skb_set_owner_w(skb, sk);
++	rcu_read_lock();
++	skb_set_owner_w(skb, dev_net_rcu(dev)->ipv6.ndisc_sk);
++	rcu_read_unlock();
+ 
+ 	return skb;
+ }
+@@ -473,16 +471,20 @@ static void ip6_nd_hdr(struct sk_buff *skb,
+ void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr,
+ 		    const struct in6_addr *saddr)
+ {
++	struct icmp6hdr *icmp6h = icmp6_hdr(skb);
+ 	struct dst_entry *dst = skb_dst(skb);
+-	struct net *net = dev_net(skb->dev);
+-	struct sock *sk = net->ipv6.ndisc_sk;
+ 	struct inet6_dev *idev;
++	struct net *net;
++	struct sock *sk;
+ 	int err;
+-	struct icmp6hdr *icmp6h = icmp6_hdr(skb);
+ 	u8 type;
+ 
+ 	type = icmp6h->icmp6_type;
+ 
++	rcu_read_lock();
++
++	net = dev_net_rcu(skb->dev);
++	sk = net->ipv6.ndisc_sk;
+ 	if (!dst) {
+ 		struct flowi6 fl6;
+ 		int oif = skb->dev->ifindex;
+@@ -490,6 +492,7 @@ void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr,
+ 		icmpv6_flow_init(sk, &fl6, type, saddr, daddr, oif);
+ 		dst = icmp6_dst_alloc(skb->dev, &fl6);
+ 		if (IS_ERR(dst)) {
++			rcu_read_unlock();
+ 			kfree_skb(skb);
+ 			return;
+ 		}
+@@ -504,7 +507,6 @@ void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr,
+ 
+ 	ip6_nd_hdr(skb, saddr, daddr, READ_ONCE(inet6_sk(sk)->hop_limit), skb->len);
+ 
+-	rcu_read_lock();
+ 	idev = __in6_dev_get(dst->dev);
+ 	IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
+ 
+@@ -1694,7 +1696,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
+ 	bool ret;
+ 
+ 	if (netif_is_l3_master(skb->dev)) {
+-		dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
++		dev = dev_get_by_index_rcu(dev_net(skb->dev), IPCB(skb)->iif);
+ 		if (!dev)
+ 			return;
+ 	}
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 67ff16c047180b..997e2e4f441d2b 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -3196,13 +3196,18 @@ static unsigned int ip6_default_advmss(const struct dst_entry *dst)
+ {
+ 	struct net_device *dev = dst->dev;
+ 	unsigned int mtu = dst_mtu(dst);
+-	struct net *net = dev_net(dev);
++	struct net *net;
+ 
+ 	mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
+ 
++	rcu_read_lock();
++
++	net = dev_net_rcu(dev);
+ 	if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
+ 		mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
+ 
++	rcu_read_unlock();
++
+ 	/*
+ 	 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
+ 	 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
+diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c
+index db3c19a42e1ca7..0ac4283acdf20c 100644
+--- a/net/ipv6/rpl_iptunnel.c
++++ b/net/ipv6/rpl_iptunnel.c
+@@ -125,7 +125,8 @@ static void rpl_destroy_state(struct lwtunnel_state *lwt)
+ }
+ 
+ static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt,
+-			     const struct ipv6_rpl_sr_hdr *srh)
++			     const struct ipv6_rpl_sr_hdr *srh,
++			     struct dst_entry *cache_dst)
+ {
+ 	struct ipv6_rpl_sr_hdr *isrh, *csrh;
+ 	const struct ipv6hdr *oldhdr;
+@@ -153,7 +154,7 @@ static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt,
+ 
+ 	hdrlen = ((csrh->hdrlen + 1) << 3);
+ 
+-	err = skb_cow_head(skb, hdrlen + skb->mac_len);
++	err = skb_cow_head(skb, hdrlen + dst_dev_overhead(cache_dst, skb));
+ 	if (unlikely(err)) {
+ 		kfree(buf);
+ 		return err;
+@@ -186,7 +187,8 @@ static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt,
+ 	return 0;
+ }
+ 
+-static int rpl_do_srh(struct sk_buff *skb, const struct rpl_lwt *rlwt)
++static int rpl_do_srh(struct sk_buff *skb, const struct rpl_lwt *rlwt,
++		      struct dst_entry *cache_dst)
+ {
+ 	struct dst_entry *dst = skb_dst(skb);
+ 	struct rpl_iptunnel_encap *tinfo;
+@@ -196,7 +198,7 @@ static int rpl_do_srh(struct sk_buff *skb, const struct rpl_lwt *rlwt)
+ 
+ 	tinfo = rpl_encap_lwtunnel(dst->lwtstate);
+ 
+-	return rpl_do_srh_inline(skb, rlwt, tinfo->srh);
++	return rpl_do_srh_inline(skb, rlwt, tinfo->srh, cache_dst);
+ }
+ 
+ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+@@ -208,14 +210,14 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 
+ 	rlwt = rpl_lwt_lwtunnel(orig_dst->lwtstate);
+ 
+-	err = rpl_do_srh(skb, rlwt);
+-	if (unlikely(err))
+-		goto drop;
+-
+ 	local_bh_disable();
+ 	dst = dst_cache_get(&rlwt->cache);
+ 	local_bh_enable();
+ 
++	err = rpl_do_srh(skb, rlwt, dst);
++	if (unlikely(err))
++		goto drop;
++
+ 	if (unlikely(!dst)) {
+ 		struct ipv6hdr *hdr = ipv6_hdr(skb);
+ 		struct flowi6 fl6;
+@@ -230,25 +232,28 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 		dst = ip6_route_output(net, NULL, &fl6);
+ 		if (dst->error) {
+ 			err = dst->error;
+-			dst_release(dst);
+ 			goto drop;
+ 		}
+ 
+-		local_bh_disable();
+-		dst_cache_set_ip6(&rlwt->cache, dst, &fl6.saddr);
+-		local_bh_enable();
++		/* cache only if we don't create a dst reference loop */
++		if (orig_dst->lwtstate != dst->lwtstate) {
++			local_bh_disable();
++			dst_cache_set_ip6(&rlwt->cache, dst, &fl6.saddr);
++			local_bh_enable();
++		}
++
++		err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
++		if (unlikely(err))
++			goto drop;
+ 	}
+ 
+ 	skb_dst_drop(skb);
+ 	skb_dst_set(skb, dst);
+ 
+-	err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+-	if (unlikely(err))
+-		goto drop;
+-
+ 	return dst_output(net, sk, skb);
+ 
+ drop:
++	dst_release(dst);
+ 	kfree_skb(skb);
+ 	return err;
+ }
+@@ -262,29 +267,33 @@ static int rpl_input(struct sk_buff *skb)
+ 
+ 	rlwt = rpl_lwt_lwtunnel(orig_dst->lwtstate);
+ 
+-	err = rpl_do_srh(skb, rlwt);
+-	if (unlikely(err))
+-		goto drop;
+-
+ 	local_bh_disable();
+ 	dst = dst_cache_get(&rlwt->cache);
++	local_bh_enable();
++
++	err = rpl_do_srh(skb, rlwt, dst);
++	if (unlikely(err)) {
++		dst_release(dst);
++		goto drop;
++	}
+ 
+ 	if (!dst) {
+ 		ip6_route_input(skb);
+ 		dst = skb_dst(skb);
+ 		if (!dst->error) {
++			local_bh_disable();
+ 			dst_cache_set_ip6(&rlwt->cache, dst,
+ 					  &ipv6_hdr(skb)->saddr);
++			local_bh_enable();
+ 		}
++
++		err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
++		if (unlikely(err))
++			goto drop;
+ 	} else {
+ 		skb_dst_drop(skb);
+ 		skb_dst_set(skb, dst);
+ 	}
+-	local_bh_enable();
+-
+-	err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+-	if (unlikely(err))
+-		goto drop;
+ 
+ 	return dst_input(skb);
+ 
+diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
+index 098632adc9b5af..33833b2064c072 100644
+--- a/net/ipv6/seg6_iptunnel.c
++++ b/net/ipv6/seg6_iptunnel.c
+@@ -124,8 +124,8 @@ static __be32 seg6_make_flowlabel(struct net *net, struct sk_buff *skb,
+ 	return flowlabel;
+ }
+ 
+-/* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */
+-int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
++static int __seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh,
++			       int proto, struct dst_entry *cache_dst)
+ {
+ 	struct dst_entry *dst = skb_dst(skb);
+ 	struct net *net = dev_net(dst->dev);
+@@ -137,7 +137,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
+ 	hdrlen = (osrh->hdrlen + 1) << 3;
+ 	tot_len = hdrlen + sizeof(*hdr);
+ 
+-	err = skb_cow_head(skb, tot_len + skb->mac_len);
++	err = skb_cow_head(skb, tot_len + dst_dev_overhead(cache_dst, skb));
+ 	if (unlikely(err))
+ 		return err;
+ 
+@@ -197,11 +197,18 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
+ 
+ 	return 0;
+ }
++
++/* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */
++int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
++{
++	return __seg6_do_srh_encap(skb, osrh, proto, NULL);
++}
+ EXPORT_SYMBOL_GPL(seg6_do_srh_encap);
+ 
+ /* encapsulate an IPv6 packet within an outer IPv6 header with reduced SRH */
+ static int seg6_do_srh_encap_red(struct sk_buff *skb,
+-				 struct ipv6_sr_hdr *osrh, int proto)
++				 struct ipv6_sr_hdr *osrh, int proto,
++				 struct dst_entry *cache_dst)
+ {
+ 	__u8 first_seg = osrh->first_segment;
+ 	struct dst_entry *dst = skb_dst(skb);
+@@ -230,7 +237,7 @@ static int seg6_do_srh_encap_red(struct sk_buff *skb,
+ 
+ 	tot_len = red_hdrlen + sizeof(struct ipv6hdr);
+ 
+-	err = skb_cow_head(skb, tot_len + skb->mac_len);
++	err = skb_cow_head(skb, tot_len + dst_dev_overhead(cache_dst, skb));
+ 	if (unlikely(err))
+ 		return err;
+ 
+@@ -317,8 +324,8 @@ static int seg6_do_srh_encap_red(struct sk_buff *skb,
+ 	return 0;
+ }
+ 
+-/* insert an SRH within an IPv6 packet, just after the IPv6 header */
+-int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
++static int __seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh,
++				struct dst_entry *cache_dst)
+ {
+ 	struct ipv6hdr *hdr, *oldhdr;
+ 	struct ipv6_sr_hdr *isrh;
+@@ -326,7 +333,7 @@ int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
+ 
+ 	hdrlen = (osrh->hdrlen + 1) << 3;
+ 
+-	err = skb_cow_head(skb, hdrlen + skb->mac_len);
++	err = skb_cow_head(skb, hdrlen + dst_dev_overhead(cache_dst, skb));
+ 	if (unlikely(err))
+ 		return err;
+ 
+@@ -369,9 +376,8 @@ int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
+ 
+ 	return 0;
+ }
+-EXPORT_SYMBOL_GPL(seg6_do_srh_inline);
+ 
+-static int seg6_do_srh(struct sk_buff *skb)
++static int seg6_do_srh(struct sk_buff *skb, struct dst_entry *cache_dst)
+ {
+ 	struct dst_entry *dst = skb_dst(skb);
+ 	struct seg6_iptunnel_encap *tinfo;
+@@ -384,7 +390,7 @@ static int seg6_do_srh(struct sk_buff *skb)
+ 		if (skb->protocol != htons(ETH_P_IPV6))
+ 			return -EINVAL;
+ 
+-		err = seg6_do_srh_inline(skb, tinfo->srh);
++		err = __seg6_do_srh_inline(skb, tinfo->srh, cache_dst);
+ 		if (err)
+ 			return err;
+ 		break;
+@@ -402,9 +408,11 @@ static int seg6_do_srh(struct sk_buff *skb)
+ 			return -EINVAL;
+ 
+ 		if (tinfo->mode == SEG6_IPTUN_MODE_ENCAP)
+-			err = seg6_do_srh_encap(skb, tinfo->srh, proto);
++			err = __seg6_do_srh_encap(skb, tinfo->srh,
++						  proto, cache_dst);
+ 		else
+-			err = seg6_do_srh_encap_red(skb, tinfo->srh, proto);
++			err = seg6_do_srh_encap_red(skb, tinfo->srh,
++						    proto, cache_dst);
+ 
+ 		if (err)
+ 			return err;
+@@ -425,11 +433,13 @@ static int seg6_do_srh(struct sk_buff *skb)
+ 		skb_push(skb, skb->mac_len);
+ 
+ 		if (tinfo->mode == SEG6_IPTUN_MODE_L2ENCAP)
+-			err = seg6_do_srh_encap(skb, tinfo->srh,
+-						IPPROTO_ETHERNET);
++			err = __seg6_do_srh_encap(skb, tinfo->srh,
++						  IPPROTO_ETHERNET,
++						  cache_dst);
+ 		else
+ 			err = seg6_do_srh_encap_red(skb, tinfo->srh,
+-						    IPPROTO_ETHERNET);
++						    IPPROTO_ETHERNET,
++						    cache_dst);
+ 
+ 		if (err)
+ 			return err;
+@@ -444,6 +454,13 @@ static int seg6_do_srh(struct sk_buff *skb)
+ 	return 0;
+ }
+ 
++/* insert an SRH within an IPv6 packet, just after the IPv6 header */
++int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
++{
++	return __seg6_do_srh_inline(skb, osrh, NULL);
++}
++EXPORT_SYMBOL_GPL(seg6_do_srh_inline);
++
+ static int seg6_input_finish(struct net *net, struct sock *sk,
+ 			     struct sk_buff *skb)
+ {
+@@ -458,31 +475,35 @@ static int seg6_input_core(struct net *net, struct sock *sk,
+ 	struct seg6_lwt *slwt;
+ 	int err;
+ 
+-	err = seg6_do_srh(skb);
+-	if (unlikely(err))
+-		goto drop;
+-
+ 	slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
+ 
+ 	local_bh_disable();
+ 	dst = dst_cache_get(&slwt->cache);
++	local_bh_enable();
++
++	err = seg6_do_srh(skb, dst);
++	if (unlikely(err)) {
++		dst_release(dst);
++		goto drop;
++	}
+ 
+ 	if (!dst) {
+ 		ip6_route_input(skb);
+ 		dst = skb_dst(skb);
+ 		if (!dst->error) {
++			local_bh_disable();
+ 			dst_cache_set_ip6(&slwt->cache, dst,
+ 					  &ipv6_hdr(skb)->saddr);
++			local_bh_enable();
+ 		}
++
++		err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
++		if (unlikely(err))
++			goto drop;
+ 	} else {
+ 		skb_dst_drop(skb);
+ 		skb_dst_set(skb, dst);
+ 	}
+-	local_bh_enable();
+-
+-	err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+-	if (unlikely(err))
+-		goto drop;
+ 
+ 	if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
+ 		return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
+@@ -528,16 +549,16 @@ static int seg6_output_core(struct net *net, struct sock *sk,
+ 	struct seg6_lwt *slwt;
+ 	int err;
+ 
+-	err = seg6_do_srh(skb);
+-	if (unlikely(err))
+-		goto drop;
+-
+ 	slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
+ 
+ 	local_bh_disable();
+ 	dst = dst_cache_get(&slwt->cache);
+ 	local_bh_enable();
+ 
++	err = seg6_do_srh(skb, dst);
++	if (unlikely(err))
++		goto drop;
++
+ 	if (unlikely(!dst)) {
+ 		struct ipv6hdr *hdr = ipv6_hdr(skb);
+ 		struct flowi6 fl6;
+@@ -552,28 +573,31 @@ static int seg6_output_core(struct net *net, struct sock *sk,
+ 		dst = ip6_route_output(net, NULL, &fl6);
+ 		if (dst->error) {
+ 			err = dst->error;
+-			dst_release(dst);
+ 			goto drop;
+ 		}
+ 
+-		local_bh_disable();
+-		dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr);
+-		local_bh_enable();
++		/* cache only if we don't create a dst reference loop */
++		if (orig_dst->lwtstate != dst->lwtstate) {
++			local_bh_disable();
++			dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr);
++			local_bh_enable();
++		}
++
++		err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
++		if (unlikely(err))
++			goto drop;
+ 	}
+ 
+ 	skb_dst_drop(skb);
+ 	skb_dst_set(skb, dst);
+ 
+-	err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+-	if (unlikely(err))
+-		goto drop;
+-
+ 	if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
+ 		return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb,
+ 			       NULL, skb_dst(skb)->dev, dst_output);
+ 
+ 	return dst_output(net, sk, skb);
+ drop:
++	dst_release(dst);
+ 	kfree_skb(skb);
+ 	return err;
+ }
+diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
+index 225f6048867f4a..5d548eda742dfc 100644
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -2101,6 +2101,7 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
+ {
+ 	struct ovs_header *ovs_header;
+ 	struct ovs_vport_stats vport_stats;
++	struct net *net_vport;
+ 	int err;
+ 
+ 	ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
+@@ -2117,12 +2118,15 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
+ 	    nla_put_u32(skb, OVS_VPORT_ATTR_IFINDEX, vport->dev->ifindex))
+ 		goto nla_put_failure;
+ 
+-	if (!net_eq(net, dev_net(vport->dev))) {
+-		int id = peernet2id_alloc(net, dev_net(vport->dev), gfp);
++	rcu_read_lock();
++	net_vport = dev_net_rcu(vport->dev);
++	if (!net_eq(net, net_vport)) {
++		int id = peernet2id_alloc(net, net_vport, GFP_ATOMIC);
+ 
+ 		if (nla_put_s32(skb, OVS_VPORT_ATTR_NETNSID, id))
+-			goto nla_put_failure;
++			goto nla_put_failure_unlock;
+ 	}
++	rcu_read_unlock();
+ 
+ 	ovs_vport_get_stats(vport, &vport_stats);
+ 	if (nla_put_64bit(skb, OVS_VPORT_ATTR_STATS,
+@@ -2143,6 +2147,8 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
+ 	genlmsg_end(skb, ovs_header);
+ 	return 0;
+ 
++nla_put_failure_unlock:
++	rcu_read_unlock();
+ nla_put_failure:
+ 	err = -EMSGSIZE;
+ error:
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index 141697e7a833bd..53a081d49d28ac 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -337,7 +337,10 @@ EXPORT_SYMBOL_GPL(vsock_find_connected_socket);
+ 
+ void vsock_remove_sock(struct vsock_sock *vsk)
+ {
+-	vsock_remove_bound(vsk);
++	/* Transport reassignment must not remove the binding. */
++	if (sock_flag(sk_vsock(vsk), SOCK_DEAD))
++		vsock_remove_bound(vsk);
++
+ 	vsock_remove_connected(vsk);
+ }
+ EXPORT_SYMBOL_GPL(vsock_remove_sock);
+@@ -821,6 +824,13 @@ static void __vsock_release(struct sock *sk, int level)
+ 	 */
+ 	lock_sock_nested(sk, level);
+ 
++	/* Indicate to vsock_remove_sock() that the socket is being released and
++	 * can be removed from the bound_table. Unlike transport reassignment
++	 * case, where the socket must remain bound despite vsock_remove_sock()
++	 * being called from the transport release() callback.
++	 */
++	sock_set_flag(sk, SOCK_DEAD);
++
+ 	if (vsk->transport)
+ 		vsk->transport->release(vsk);
+ 	else if (sock_type_connectible(sk->sk_type))
+diff --git a/rust/Makefile b/rust/Makefile
+index a40a3936126d60..43cd7f845a9a37 100644
+--- a/rust/Makefile
++++ b/rust/Makefile
+@@ -238,6 +238,7 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \
+ 	-fzero-call-used-regs=% -fno-stack-clash-protection \
+ 	-fno-inline-functions-called-once -fsanitize=bounds-strict \
+ 	-fstrict-flex-arrays=% -fmin-function-alignment=% \
++	-fzero-init-padding-bits=% \
+ 	--param=% --param asan-%
+ 
+ # Derived from `scripts/Makefile.clang`.
+diff --git a/rust/kernel/rbtree.rs b/rust/kernel/rbtree.rs
+index cb4415a1225825..571e27efe54489 100644
+--- a/rust/kernel/rbtree.rs
++++ b/rust/kernel/rbtree.rs
+@@ -1149,7 +1149,7 @@ pub struct VacantEntry<'a, K, V> {
+ /// # Invariants
+ /// - `parent` may be null if the new node becomes the root.
+ /// - `child_field_of_parent` is a valid pointer to the left-child or right-child of `parent`. If `parent` is
+-///     null, it is a pointer to the root of the [`RBTree`].
++///   null, it is a pointer to the root of the [`RBTree`].
+ struct RawVacantEntry<'a, K, V> {
+     rbtree: *mut RBTree<K, V>,
+     /// The node that will become the parent of the new node if we insert one.
+diff --git a/samples/hid/Makefile b/samples/hid/Makefile
+index 8ea59e9631a334..db5a077c77fc8b 100644
+--- a/samples/hid/Makefile
++++ b/samples/hid/Makefile
+@@ -40,16 +40,17 @@ BPF_EXTRA_CFLAGS += -I$(srctree)/arch/mips/include/asm/mach-generic
+ endif
+ endif
+ 
+-TPROGS_CFLAGS += -Wall -O2
+-TPROGS_CFLAGS += -Wmissing-prototypes
+-TPROGS_CFLAGS += -Wstrict-prototypes
++COMMON_CFLAGS += -Wall -O2
++COMMON_CFLAGS += -Wmissing-prototypes
++COMMON_CFLAGS += -Wstrict-prototypes
+ 
++TPROGS_CFLAGS += $(COMMON_CFLAGS)
+ TPROGS_CFLAGS += -I$(objtree)/usr/include
+ TPROGS_CFLAGS += -I$(LIBBPF_INCLUDE)
+ TPROGS_CFLAGS += -I$(srctree)/tools/include
+ 
+ ifdef SYSROOT
+-TPROGS_CFLAGS += --sysroot=$(SYSROOT)
++COMMON_CFLAGS += --sysroot=$(SYSROOT)
+ TPROGS_LDFLAGS := -L$(SYSROOT)/usr/lib
+ endif
+ 
+@@ -112,7 +113,7 @@ clean:
+ 
+ $(LIBBPF): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(LIBBPF_OUTPUT)
+ # Fix up variables inherited from Kbuild that tools/ build system won't like
+-	$(MAKE) -C $(LIBBPF_SRC) RM='rm -rf' EXTRA_CFLAGS="$(TPROGS_CFLAGS)" \
++	$(MAKE) -C $(LIBBPF_SRC) RM='rm -rf' EXTRA_CFLAGS="$(COMMON_CFLAGS)" \
+ 		LDFLAGS=$(TPROGS_LDFLAGS) srctree=$(HID_SAMPLES_PATH)/../../ \
+ 		O= OUTPUT=$(LIBBPF_OUTPUT)/ DESTDIR=$(LIBBPF_DESTDIR) prefix= \
+ 		$@ install_headers
+@@ -163,7 +164,7 @@ $(obj)/hid_surface_dial.o: $(obj)/hid_surface_dial.skel.h
+ 
+ VMLINUX_BTF_PATHS ?= $(abspath $(if $(O),$(O)/vmlinux))				\
+ 		     $(abspath $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux))	\
+-		     $(abspath ./vmlinux)
++		     $(abspath $(objtree)/vmlinux)
+ VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
+ 
+ $(obj)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL)
+diff --git a/scripts/Makefile.defconf b/scripts/Makefile.defconf
+index 226ea3df3b4b4c..a44307f08e9d68 100644
+--- a/scripts/Makefile.defconf
++++ b/scripts/Makefile.defconf
+@@ -1,6 +1,11 @@
+ # SPDX-License-Identifier: GPL-2.0
+ # Configuration heplers
+ 
++cmd_merge_fragments = \
++	$(srctree)/scripts/kconfig/merge_config.sh \
++	$4 -m -O $(objtree) $(srctree)/arch/$(SRCARCH)/configs/$2 \
++	$(foreach config,$3,$(srctree)/arch/$(SRCARCH)/configs/$(config).config)
++
+ # Creates 'merged defconfigs'
+ # ---------------------------------------------------------------------------
+ # Usage:
+@@ -8,9 +13,7 @@
+ #
+ # Input config fragments without '.config' suffix
+ define merge_into_defconfig
+-	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh \
+-		-m -O $(objtree) $(srctree)/arch/$(SRCARCH)/configs/$(1) \
+-		$(foreach config,$(2),$(srctree)/arch/$(SRCARCH)/configs/$(config).config)
++	$(call cmd,merge_fragments,$1,$2)
+ 	+$(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig
+ endef
+ 
+@@ -22,8 +25,6 @@ endef
+ #
+ # Input config fragments without '.config' suffix
+ define merge_into_defconfig_override
+-	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh \
+-		-Q -m -O $(objtree) $(srctree)/arch/$(SRCARCH)/configs/$(1) \
+-		$(foreach config,$(2),$(srctree)/arch/$(SRCARCH)/configs/$(config).config)
++	$(call cmd,merge_fragments,$1,$2,-Q)
+ 	+$(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig
+ endef
+diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
+index 04faf15ed316a9..dc081cf46d211c 100644
+--- a/scripts/Makefile.extrawarn
++++ b/scripts/Makefile.extrawarn
+@@ -31,6 +31,11 @@ KBUILD_CFLAGS-$(CONFIG_CC_NO_ARRAY_BOUNDS) += -Wno-array-bounds
+ ifdef CONFIG_CC_IS_CLANG
+ # The kernel builds with '-std=gnu11' so use of GNU extensions is acceptable.
+ KBUILD_CFLAGS += -Wno-gnu
++
++# Clang checks for overflow/truncation with '%p', while GCC does not:
++# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=111219
++KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow-non-kprintf)
++KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation-non-kprintf)
+ else
+ 
+ # gcc inanely warns about local variables called 'main'
+@@ -77,6 +82,9 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init)
+ # Warn if there is an enum types mismatch
+ KBUILD_CFLAGS += $(call cc-option,-Wenum-conversion)
+ 
++# Explicitly clear padding bits during variable initialization
++KBUILD_CFLAGS += $(call cc-option,-fzero-init-padding-bits=all)
++
+ KBUILD_CFLAGS += -Wextra
+ KBUILD_CFLAGS += -Wunused
+ 
+@@ -102,11 +110,6 @@ KBUILD_CFLAGS += $(call cc-disable-warning, packed-not-aligned)
+ KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
+ ifdef CONFIG_CC_IS_GCC
+ KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation)
+-else
+-# Clang checks for overflow/truncation with '%p', while GCC does not:
+-# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=111219
+-KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow-non-kprintf)
+-KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation-non-kprintf)
+ endif
+ KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation)
+ 
+diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
+index a0a0be38cbdc14..fb50bd4f4103f2 100644
+--- a/scripts/kconfig/Makefile
++++ b/scripts/kconfig/Makefile
+@@ -105,9 +105,11 @@ configfiles = $(wildcard $(srctree)/kernel/configs/$(1) $(srctree)/arch/$(SRCARC
+ all-config-fragments = $(call configfiles,*.config)
+ config-fragments = $(call configfiles,$@)
+ 
++cmd_merge_fragments = $(srctree)/scripts/kconfig/merge_config.sh -m $(KCONFIG_CONFIG) $(config-fragments)
++
+ %.config: $(obj)/conf
+ 	$(if $(config-fragments),, $(error $@ fragment does not exists on this architecture))
+-	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh -m $(KCONFIG_CONFIG) $(config-fragments)
++	$(call cmd,merge_fragments)
+ 	$(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig
+ 
+ PHONY += tinyconfig
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
+index 9caa4407c1ca3d..6446cda0f85727 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -1132,7 +1132,22 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ 					BYT_RT5640_SSP0_AIF2 |
+ 					BYT_RT5640_MCLK_EN),
+ 	},
+-	{	/* Vexia Edu Atla 10 tablet */
++	{
++		/* Vexia Edu Atla 10 tablet 5V version */
++		.matches = {
++			/* Having all 3 of these not set is somewhat unique */
++			DMI_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
++			DMI_MATCH(DMI_BOARD_NAME, "To be filled by O.E.M."),
++			/* Above strings are too generic, also match on BIOS date */
++			DMI_MATCH(DMI_BIOS_DATE, "05/14/2015"),
++		},
++		.driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
++					BYT_RT5640_JD_NOT_INV |
++					BYT_RT5640_SSP0_AIF1 |
++					BYT_RT5640_MCLK_EN),
++	},
++	{	/* Vexia Edu Atla 10 tablet 9V version */
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ 			DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+diff --git a/sound/soc/renesas/Kconfig b/sound/soc/renesas/Kconfig
+index 426632996a0a31..cb01fb36355f02 100644
+--- a/sound/soc/renesas/Kconfig
++++ b/sound/soc/renesas/Kconfig
+@@ -67,7 +67,7 @@ config SND_SH7760_AC97
+ 
+ config SND_SIU_MIGOR
+ 	tristate "SIU sound support on Migo-R"
+-	depends on SH_MIGOR && I2C
++	depends on SH_MIGOR && I2C && DMADEVICES
+ 	select SND_SOC_SH4_SIU
+ 	select SND_SOC_WM8978
+ 	help
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index 76060da755b5c5..e7ec29dfdff22a 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -218,6 +218,7 @@ static bool is_rust_noreturn(const struct symbol *func)
+ 	       str_ends_with(func->name, "_4core9panicking18panic_bounds_check")			||
+ 	       str_ends_with(func->name, "_4core9panicking19assert_failed_inner")			||
+ 	       str_ends_with(func->name, "_4core9panicking36panic_misaligned_pointer_dereference")	||
++	       strstr(func->name, "_4core9panicking13assert_failed")					||
+ 	       strstr(func->name, "_4core9panicking11panic_const24panic_const_")			||
+ 	       (strstr(func->name, "_4core5slice5index24slice_") &&
+ 		str_ends_with(func->name, "_fail"));
+diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h
+index 625f5b046776ce..9fa64b053ba916 100644
+--- a/tools/sched_ext/include/scx/common.bpf.h
++++ b/tools/sched_ext/include/scx/common.bpf.h
+@@ -251,8 +251,16 @@ void bpf_obj_drop_impl(void *kptr, void *meta) __ksym;
+ #define bpf_obj_new(type) ((type *)bpf_obj_new_impl(bpf_core_type_id_local(type), NULL))
+ #define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL)
+ 
+-void bpf_list_push_front(struct bpf_list_head *head, struct bpf_list_node *node) __ksym;
+-void bpf_list_push_back(struct bpf_list_head *head, struct bpf_list_node *node) __ksym;
++int bpf_list_push_front_impl(struct bpf_list_head *head,
++				    struct bpf_list_node *node,
++				    void *meta, __u64 off) __ksym;
++#define bpf_list_push_front(head, node) bpf_list_push_front_impl(head, node, NULL, 0)
++
++int bpf_list_push_back_impl(struct bpf_list_head *head,
++				   struct bpf_list_node *node,
++				   void *meta, __u64 off) __ksym;
++#define bpf_list_push_back(head, node) bpf_list_push_back_impl(head, node, NULL, 0)
++
+ struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __ksym;
+ struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksym;
+ struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
+diff --git a/tools/testing/selftests/gpio/gpio-sim.sh b/tools/testing/selftests/gpio/gpio-sim.sh
+index 6fb66a687f1737..bbc29ed9c60a91 100755
+--- a/tools/testing/selftests/gpio/gpio-sim.sh
++++ b/tools/testing/selftests/gpio/gpio-sim.sh
+@@ -46,12 +46,6 @@ remove_chip() {
+ 	rmdir $CONFIGFS_DIR/$CHIP || fail "Unable to remove the chip"
+ }
+ 
+-configfs_cleanup() {
+-	for CHIP in `ls $CONFIGFS_DIR/`; do
+-		remove_chip $CHIP
+-	done
+-}
+-
+ create_chip() {
+ 	local CHIP=$1
+ 
+@@ -105,6 +99,13 @@ disable_chip() {
+ 	echo 0 > $CONFIGFS_DIR/$CHIP/live || fail "Unable to disable the chip"
+ }
+ 
++configfs_cleanup() {
++	for CHIP in `ls $CONFIGFS_DIR/`; do
++		disable_chip $CHIP
++		remove_chip $CHIP
++	done
++}
++
+ configfs_chip_name() {
+ 	local CHIP=$1
+ 	local BANK=$2
+@@ -181,6 +182,7 @@ create_chip chip
+ create_bank chip bank
+ enable_chip chip
+ test -n `cat $CONFIGFS_DIR/chip/bank/chip_name` || fail "chip_name doesn't work"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "1.2. chip_name returns 'none' if the chip is still pending"
+@@ -195,6 +197,7 @@ create_chip chip
+ create_bank chip bank
+ enable_chip chip
+ test -n `cat $CONFIGFS_DIR/chip/dev_name` || fail "dev_name doesn't work"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "2. Creating and configuring simulated chips"
+@@ -204,6 +207,7 @@ create_chip chip
+ create_bank chip bank
+ enable_chip chip
+ test "`get_chip_num_lines chip bank`" = "1" || fail "default number of lines is not 1"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "2.2. Number of lines can be specified"
+@@ -212,6 +216,7 @@ create_bank chip bank
+ set_num_lines chip bank 16
+ enable_chip chip
+ test "`get_chip_num_lines chip bank`" = "16" || fail "number of lines is not 16"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "2.3. Label can be set"
+@@ -220,6 +225,7 @@ create_bank chip bank
+ set_label chip bank foobar
+ enable_chip chip
+ test "`get_chip_label chip bank`" = "foobar" || fail "label is incorrect"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "2.4. Label can be left empty"
+@@ -227,6 +233,7 @@ create_chip chip
+ create_bank chip bank
+ enable_chip chip
+ test -z "`cat $CONFIGFS_DIR/chip/bank/label`" || fail "label is not empty"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "2.5. Line names can be configured"
+@@ -238,6 +245,7 @@ set_line_name chip bank 2 bar
+ enable_chip chip
+ test "`get_line_name chip bank 0`" = "foo" || fail "line name is incorrect"
+ test "`get_line_name chip bank 2`" = "bar" || fail "line name is incorrect"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "2.6. Line config can remain unused if offset is greater than number of lines"
+@@ -248,6 +256,7 @@ set_line_name chip bank 5 foobar
+ enable_chip chip
+ test "`get_line_name chip bank 0`" = "" || fail "line name is incorrect"
+ test "`get_line_name chip bank 1`" = "" || fail "line name is incorrect"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "2.7. Line configfs directory names are sanitized"
+@@ -267,6 +276,7 @@ for CHIP in $CHIPS; do
+ 	enable_chip $CHIP
+ done
+ for CHIP in $CHIPS; do
++  disable_chip $CHIP
+ 	remove_chip $CHIP
+ done
+ 
+@@ -278,6 +288,7 @@ echo foobar > $CONFIGFS_DIR/chip/bank/label 2> /dev/null && \
+ 	fail "Setting label of a live chip should fail"
+ echo 8 > $CONFIGFS_DIR/chip/bank/num_lines 2> /dev/null && \
+ 	fail "Setting number of lines of a live chip should fail"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "2.10. Can't create line items when chip is live"
+@@ -285,6 +296,7 @@ create_chip chip
+ create_bank chip bank
+ enable_chip chip
+ mkdir $CONFIGFS_DIR/chip/bank/line0 2> /dev/null && fail "Creating line item should fail"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "2.11. Probe errors are propagated to user-space"
+@@ -316,6 +328,7 @@ mkdir -p $CONFIGFS_DIR/chip/bank/line4/hog
+ enable_chip chip
+ $BASE_DIR/gpio-mockup-cdev -s 1 /dev/`configfs_chip_name chip bank` 4 2> /dev/null && \
+ 	fail "Setting the value of a hogged line shouldn't succeed"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "3. Controlling simulated chips"
+@@ -331,6 +344,7 @@ test "$?" = "1" || fail "pull set incorrectly"
+ sysfs_set_pull chip bank 0 pull-down
+ $BASE_DIR/gpio-mockup-cdev /dev/`configfs_chip_name chip bank` 1
+ test "$?" = "0" || fail "pull set incorrectly"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "3.2. Pull can be read from sysfs"
+@@ -344,6 +358,7 @@ SYSFS_PATH=/sys/devices/platform/$DEVNAME/$CHIPNAME/sim_gpio0/pull
+ test `cat $SYSFS_PATH` = "pull-down" || fail "reading the pull failed"
+ sysfs_set_pull chip bank 0 pull-up
+ test `cat $SYSFS_PATH` = "pull-up" || fail "reading the pull failed"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "3.3. Incorrect input in sysfs is rejected"
+@@ -355,6 +370,7 @@ DEVNAME=`configfs_dev_name chip`
+ CHIPNAME=`configfs_chip_name chip bank`
+ SYSFS_PATH="/sys/devices/platform/$DEVNAME/$CHIPNAME/sim_gpio0/pull"
+ echo foobar > $SYSFS_PATH 2> /dev/null && fail "invalid input not detected"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "3.4. Can't write to value"
+@@ -365,6 +381,7 @@ DEVNAME=`configfs_dev_name chip`
+ CHIPNAME=`configfs_chip_name chip bank`
+ SYSFS_PATH="/sys/devices/platform/$DEVNAME/$CHIPNAME/sim_gpio0/value"
+ echo 1 > $SYSFS_PATH 2> /dev/null && fail "writing to 'value' succeeded unexpectedly"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "4. Simulated GPIO chips are functional"
+@@ -382,6 +399,7 @@ $BASE_DIR/gpio-mockup-cdev -s 1 /dev/`configfs_chip_name chip bank` 0 &
+ sleep 0.1 # FIXME Any better way?
+ test `cat $SYSFS_PATH` = "1" || fail "incorrect value read from sysfs"
+ kill $!
++disable_chip chip
+ remove_chip chip
+ 
+ echo "4.2. Bias settings work correctly"
+@@ -394,6 +412,7 @@ CHIPNAME=`configfs_chip_name chip bank`
+ SYSFS_PATH="/sys/devices/platform/$DEVNAME/$CHIPNAME/sim_gpio0/value"
+ $BASE_DIR/gpio-mockup-cdev -b pull-up /dev/`configfs_chip_name chip bank` 0
+ test `cat $SYSFS_PATH` = "1" || fail "bias setting does not work"
++disable_chip chip
+ remove_chip chip
+ 
+ echo "GPIO $MODULE test PASS"
+diff --git a/tools/tracing/rtla/src/timerlat_hist.c b/tools/tracing/rtla/src/timerlat_hist.c
+index 90b33b0c4391bf..446d650f0c948a 100644
+--- a/tools/tracing/rtla/src/timerlat_hist.c
++++ b/tools/tracing/rtla/src/timerlat_hist.c
+@@ -1152,6 +1152,14 @@ static int stop_tracing;
+ static struct trace_instance *hist_inst = NULL;
+ static void stop_hist(int sig)
+ {
++	if (stop_tracing) {
++		/*
++		 * Stop requested twice in a row; abort event processing and
++		 * exit immediately
++		 */
++		tracefs_iterate_stop(hist_inst->inst);
++		return;
++	}
+ 	stop_tracing = 1;
+ 	if (hist_inst)
+ 		trace_instance_stop(hist_inst);
+diff --git a/tools/tracing/rtla/src/timerlat_top.c b/tools/tracing/rtla/src/timerlat_top.c
+index 139eb58336c36f..f387597d3ac225 100644
+--- a/tools/tracing/rtla/src/timerlat_top.c
++++ b/tools/tracing/rtla/src/timerlat_top.c
+@@ -906,6 +906,14 @@ static int stop_tracing;
+ static struct trace_instance *top_inst = NULL;
+ static void stop_top(int sig)
+ {
++	if (stop_tracing) {
++		/*
++		 * Stop requested twice in a row; abort event processing and
++		 * exit immediately
++		 */
++		tracefs_iterate_stop(top_inst->inst);
++		return;
++	}
+ 	stop_tracing = 1;
+ 	if (top_inst)
+ 		trace_instance_stop(top_inst);


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [gentoo-commits] proj/linux-patches:6.13 commit in: /
@ 2025-02-23 19:55 Mike Pagano
  0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2025-02-23 19:55 UTC (permalink / raw
  To: gentoo-commits

commit:     c296975a0b99af5c25ecb10e3de4fb573ff5ddc7
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Feb 23 19:53:57 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Feb 23 19:53:57 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c296975a

Add two KVM patches to fix regression

Bug: https://bugzilla.kernel.org/show_bug.cgi?id=219787
Bug: https://bugs.gentoo.org/950113

KVM: x86: Snapshot the host's DEBUGCTL in common x86
KVM: SVM: Manually zero/restore DEBUGCTL if LBR virtualization is disabled
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                       |  8 ++
 1750_KVM-x86-Snapshot-hosts-DEBUGCTL.patch        | 95 +++++++++++++++++++++++
 1751_KVM-SVM-Manually-zero-restore-DEBUGCTL.patch | 69 ++++++++++++++++
 3 files changed, 172 insertions(+)

diff --git a/0000_README b/0000_README
index 60c36739..6a580881 100644
--- a/0000_README
+++ b/0000_README
@@ -75,6 +75,14 @@ Patch:  1740_x86-insn-decoder-test-allow-longer-symbol-names.patch
 From:   https://gitlab.com/cki-project/kernel-ark/-/commit/8d4a52c3921d278f27241fc0c6949d8fdc13a7f5
 Desc:   x86/insn_decoder_test: allow longer symbol-names
 
+Patch:  1750_KVM-x86-Snapshot-hosts-DEBUGCTL.patch  
+From:   https://bugzilla.kernel.org/show_bug.cgi?id=219787
+Desc:   KVM: x86: Snapshot the host's DEBUGCTL in common x86
+
+Patch:  1751_KVM-SVM-Manually-zero-restore-DEBUGCTL.patch
+From:   https://bugzilla.kernel.org/show_bug.cgi?id=219787
+Desc:   KVM: SVM: Manually zero/restore DEBUGCTL if LBR virtualization is disabled
+
 Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758

diff --git a/1750_KVM-x86-Snapshot-hosts-DEBUGCTL.patch b/1750_KVM-x86-Snapshot-hosts-DEBUGCTL.patch
new file mode 100644
index 00000000..0265460c
--- /dev/null
+++ b/1750_KVM-x86-Snapshot-hosts-DEBUGCTL.patch
@@ -0,0 +1,95 @@
+From d8595d6256fd46ece44b3433954e8545a0d199b8 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Fri, 21 Feb 2025 07:45:22 -0800
+Subject: [PATCH 1/2] KVM: x86: Snapshot the host's DEBUGCTL in common x86
+
+Move KVM's snapshot of DEBUGCTL to kvm_vcpu_arch and take the snapshot in
+common x86, so that SVM can also use the snapshot.
+
+Opportunistically change the field to a u64.  While bits 63:32 are reserved
+on AMD, not mentioned at all in Intel's SDM, and managed as an "unsigned
+long" by the kernel, DEBUGCTL is an MSR and therefore a 64-bit value.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+---
+ arch/x86/include/asm/kvm_host.h | 1 +
+ arch/x86/kvm/vmx/vmx.c          | 8 ++------
+ arch/x86/kvm/vmx/vmx.h          | 2 --
+ arch/x86/kvm/x86.c              | 1 +
+ 4 files changed, 4 insertions(+), 8 deletions(-)
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 0b7af5902ff7..32ae3aa50c7e 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -780,6 +780,7 @@ struct kvm_vcpu_arch {
+ 	u32 pkru;
+ 	u32 hflags;
+ 	u64 efer;
++	u64 host_debugctl;
+ 	u64 apic_base;
+ 	struct kvm_lapic *apic;    /* kernel irqchip context */
+ 	bool load_eoi_exitmap_pending;
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 6c56d5235f0f..3b92f893b239 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -1514,16 +1514,12 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
+  */
+ void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ {
+-	struct vcpu_vmx *vmx = to_vmx(vcpu);
+-
+ 	if (vcpu->scheduled_out && !kvm_pause_in_guest(vcpu->kvm))
+ 		shrink_ple_window(vcpu);
+ 
+ 	vmx_vcpu_load_vmcs(vcpu, cpu, NULL);
+ 
+ 	vmx_vcpu_pi_load(vcpu, cpu);
+-
+-	vmx->host_debugctlmsr = get_debugctlmsr();
+ }
+ 
+ void vmx_vcpu_put(struct kvm_vcpu *vcpu)
+@@ -7458,8 +7454,8 @@ fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
+ 	}
+ 
+ 	/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
+-	if (vmx->host_debugctlmsr)
+-		update_debugctlmsr(vmx->host_debugctlmsr);
++	if (vcpu->arch.host_debugctl)
++		update_debugctlmsr(vcpu->arch.host_debugctl);
+ 
+ #ifndef CONFIG_X86_64
+ 	/*
+diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
+index 8b111ce1087c..951e44dc9d0e 100644
+--- a/arch/x86/kvm/vmx/vmx.h
++++ b/arch/x86/kvm/vmx/vmx.h
+@@ -340,8 +340,6 @@ struct vcpu_vmx {
+ 	/* apic deadline value in host tsc */
+ 	u64 hv_deadline_tsc;
+ 
+-	unsigned long host_debugctlmsr;
+-
+ 	/*
+ 	 * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
+ 	 * msr_ia32_feature_control. FEAT_CTL_LOCKED is always included
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 02159c967d29..5c6fd0edc41f 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -4968,6 +4968,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ 
+ 	/* Save host pkru register if supported */
+ 	vcpu->arch.host_pkru = read_pkru();
++	vcpu->arch.host_debugctl = get_debugctlmsr();
+ 
+ 	/* Apply any externally detected TSC adjustments (due to suspend) */
+ 	if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
+
+base-commit: 0ad2507d5d93f39619fc42372c347d6006b64319
+-- 
+2.48.1.658.g4767266eb4-goog
+

diff --git a/1751_KVM-SVM-Manually-zero-restore-DEBUGCTL.patch b/1751_KVM-SVM-Manually-zero-restore-DEBUGCTL.patch
new file mode 100644
index 00000000..e3ce9fe4
--- /dev/null
+++ b/1751_KVM-SVM-Manually-zero-restore-DEBUGCTL.patch
@@ -0,0 +1,69 @@
+From d02de0dfc6fd10f7bc4f7067fb9765c24948c737 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Fri, 21 Feb 2025 08:16:36 -0800
+Subject: [PATCH 2/2] KVM: SVM: Manually zero/restore DEBUGCTL if LBR
+ virtualization is disabled
+
+Manually zero DEBUGCTL prior to VMRUN if the host's value is non-zero and
+LBR virtualization is disabled, as hardware only context switches DEBUGCTL
+if LBR virtualization is fully enabled.  Running the guest with the host's
+value has likely been mildly problematic for quite some time, e.g. it will
+result in undesirable behavior if host is running with BTF=1.
+
+But the bug became fatal with the introduction of Bus Lock Trap ("Detect"
+in kernel paralance) support for AMD (commit 408eb7417a92
+("x86/bus_lock: Add support for AMD")), as a bus lock in the guest will
+trigger an unexpected #DB.
+
+Note, KVM could suppress the bus lock #DB, i.e. simply resume the guest
+without injecting a #DB, but that wouldn't address things like BTF.  And
+it appears that AMD CPUs incorrectly clear DR6_BUS_LOCK (it's active low)
+when delivering a #DB that is NOT a bus lock trap, and BUS_LOCK_DETECT is
+enabled in DEBUGCTL.
+
+Reported-by: rangemachine@gmail.com
+Reported-by: whanos@sergal.fun
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=219787
+Closes: https://lore.kernel.org/all/bug-219787-28872@https.bugzilla.kernel.org%2F
+Cc: Ravi Bangoria <ravi.bangoria@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+---
+ arch/x86/kvm/svm/svm.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index a713c803a3a3..a50ca1f17e31 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -4253,6 +4253,16 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
+ 	clgi();
+ 	kvm_load_guest_xsave_state(vcpu);
+ 
++	/*
++	 * Hardware only context switches DEBUGCTL if LBR virtualization is
++	 * enabled.  Manually zero DEBUGCTL if necessary (and restore it after)
++	 * VM-Exit, as running with the host's DEBUGCTL can negatively affect
++	 * guest state and can even be fatal, e.g. due to bus lock detect.
++	 */
++	if (vcpu->arch.host_debugctl &&
++	    !(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK))
++		update_debugctlmsr(0);
++
+ 	kvm_wait_lapic_expire(vcpu);
+ 
+ 	/*
+@@ -4280,6 +4290,10 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
+ 	if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
+ 		kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
+ 
++	if (vcpu->arch.host_debugctl &&
++	    !(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK))
++		update_debugctlmsr(vcpu->arch.host_debugctl);
++
+ 	kvm_load_host_xsave_state(vcpu);
+ 	stgi();
+ 
+-- 
+2.48.1.658.g4767266eb4-goog
+


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [gentoo-commits] proj/linux-patches:6.13 commit in: /
@ 2025-02-27 13:20 Mike Pagano
  0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2025-02-27 13:20 UTC (permalink / raw
  To: gentoo-commits

commit:     d8f3f9f3f0b8bb0d73ecda7a445dc34dd7752c4d
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Feb 27 13:20:00 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Feb 27 13:20:00 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d8f3f9f3

Linux patch 6.13.5

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1004_linux-6.13.5.patch | 8352 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 8356 insertions(+)

diff --git a/0000_README b/0000_README
index 6a580881..51a3feed 100644
--- a/0000_README
+++ b/0000_README
@@ -59,6 +59,10 @@ Patch:  1003_linux-6.13.4.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.13.4
 
+Patch:  1004_linux-6.13.5.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.13.5
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.

diff --git a/1004_linux-6.13.5.patch b/1004_linux-6.13.5.patch
new file mode 100644
index 00000000..a9ea6c23
--- /dev/null
+++ b/1004_linux-6.13.5.patch
@@ -0,0 +1,8352 @@
+diff --git a/Makefile b/Makefile
+index c436a6e64971d7..56d5c11b6f1ec6 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 13
+-SUBLEVEL = 4
++SUBLEVEL = 5
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts b/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts
+index e4517f47d519cc..eb9470a00e549f 100644
+--- a/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts
++++ b/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts
+@@ -226,7 +226,6 @@ &uart0 {
+ };
+ 
+ &uart5 {
+-	pinctrl-0 = <&uart5_xfer>;
+ 	rts-gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_HIGH>;
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi b/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi
+index ae050cc6cd050f..e80412abec081f 100644
+--- a/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi
++++ b/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi
+@@ -396,6 +396,12 @@ &u2phy_host {
+ 	status = "okay";
+ };
+ 
++&uart5 {
++	/delete-property/ dmas;
++	/delete-property/ dma-names;
++	pinctrl-0 = <&uart5_xfer>;
++};
++
+ /* Mule UCAN */
+ &usb_host0_ehci {
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts b/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts
+index 67c246ad8b8c0d..ec2ce894da1fc1 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts
+@@ -17,8 +17,7 @@ / {
+ 
+ &gmac2io {
+ 	phy-handle = <&yt8531c>;
+-	tx_delay = <0x19>;
+-	rx_delay = <0x05>;
++	phy-mode = "rgmii-id";
+ 	status = "okay";
+ 
+ 	mdio {
+diff --git a/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus.dts b/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus.dts
+index 324a8e951f7e49..846b931e16d212 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus.dts
+@@ -15,6 +15,7 @@ / {
+ 
+ &gmac2io {
+ 	phy-handle = <&rtl8211e>;
++	phy-mode = "rgmii";
+ 	tx_delay = <0x24>;
+ 	rx_delay = <0x18>;
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus.dtsi b/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus.dtsi
+index 82021ffb0a49c2..381b88a912382c 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus.dtsi
+@@ -109,7 +109,6 @@ &gmac2io {
+ 	assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>;
+ 	assigned-clock-parents = <&gmac_clk>, <&gmac_clk>;
+ 	clock_in_out = "input";
+-	phy-mode = "rgmii";
+ 	phy-supply = <&vcc_io>;
+ 	pinctrl-0 = <&rgmiim1_pins>;
+ 	pinctrl-names = "default";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
+index 988e6ca32fac94..a9ea4b0daa04c6 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
+@@ -22,11 +22,11 @@ pp900_ap: regulator-pp900-ap {
+ 	};
+ 
+ 	/* EC turns on w/ pp900_usb_en */
+-	pp900_usb: pp900-ap {
++	pp900_usb: regulator-pp900-ap {
+ 	};
+ 
+ 	/* EC turns on w/ pp900_pcie_en */
+-	pp900_pcie: pp900-ap {
++	pp900_pcie: regulator-pp900-ap {
+ 	};
+ 
+ 	pp3000: regulator-pp3000 {
+@@ -126,7 +126,7 @@ pp1800_pcie: regulator-pp1800-pcie {
+ 	};
+ 
+ 	/* Always on; plain and simple */
+-	pp3000_ap: pp3000_emmc: pp3000 {
++	pp3000_ap: pp3000_emmc: regulator-pp3000 {
+ 	};
+ 
+ 	pp1500_ap_io: regulator-pp1500-ap-io {
+@@ -160,7 +160,7 @@ pp3300_disp: regulator-pp3300-disp {
+ 	};
+ 
+ 	/* EC turns on w/ pp3300_usb_en_l */
+-	pp3300_usb: pp3300 {
++	pp3300_usb: regulator-pp3300 {
+ 	};
+ 
+ 	/* gpio is shared with pp1800_pcie and pinctrl is set there */
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
+index 19b23b43896583..5e068377a0a28e 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
+@@ -92,7 +92,7 @@ pp900_s3: regulator-pp900-s3 {
+ 	};
+ 
+ 	/* EC turns on pp1800_s3_en */
+-	pp1800_s3: pp1800 {
++	pp1800_s3: regulator-pp1800 {
+ 	};
+ 
+ 	/* pp3300 children, sorted by name */
+@@ -109,11 +109,11 @@ pp2800_cam: regulator-pp2800-avdd {
+ 	};
+ 
+ 	/* EC turns on pp3300_s0_en */
+-	pp3300_s0: pp3300 {
++	pp3300_s0: regulator-pp3300 {
+ 	};
+ 
+ 	/* EC turns on pp3300_s3_en */
+-	pp3300_s3: pp3300 {
++	pp3300_s3: regulator-pp3300 {
+ 	};
+ 
+ 	/*
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
+index 6d9e60b01225e5..7eca1da78cffab 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
+@@ -189,39 +189,39 @@ ppvar_gpu: ppvar-gpu {
+ 	};
+ 
+ 	/* EC turns on w/ pp900_ddrpll_en */
+-	pp900_ddrpll: pp900-ap {
++	pp900_ddrpll: regulator-pp900-ap {
+ 	};
+ 
+ 	/* EC turns on w/ pp900_pll_en */
+-	pp900_pll: pp900-ap {
++	pp900_pll: regulator-pp900-ap {
+ 	};
+ 
+ 	/* EC turns on w/ pp900_pmu_en */
+-	pp900_pmu: pp900-ap {
++	pp900_pmu: regulator-pp900-ap {
+ 	};
+ 
+ 	/* EC turns on w/ pp1800_s0_en_l */
+-	pp1800_ap_io: pp1800_emmc: pp1800_nfc: pp1800_s0: pp1800 {
++	pp1800_ap_io: pp1800_emmc: pp1800_nfc: pp1800_s0: regulator-pp1800 {
+ 	};
+ 
+ 	/* EC turns on w/ pp1800_avdd_en_l */
+-	pp1800_avdd: pp1800 {
++	pp1800_avdd: regulator-pp1800 {
+ 	};
+ 
+ 	/* EC turns on w/ pp1800_lid_en_l */
+-	pp1800_lid: pp1800_mic: pp1800 {
++	pp1800_lid: pp1800_mic: regulator-pp1800 {
+ 	};
+ 
+ 	/* EC turns on w/ lpddr_pwr_en */
+-	pp1800_lpddr: pp1800 {
++	pp1800_lpddr: regulator-pp1800 {
+ 	};
+ 
+ 	/* EC turns on w/ pp1800_pmu_en_l */
+-	pp1800_pmu: pp1800 {
++	pp1800_pmu: regulator-pp1800 {
+ 	};
+ 
+ 	/* EC turns on w/ pp1800_usb_en_l */
+-	pp1800_usb: pp1800 {
++	pp1800_usb: regulator-pp1800 {
+ 	};
+ 
+ 	pp3000_sd_slot: regulator-pp3000-sd-slot {
+@@ -259,11 +259,11 @@ ppvar_sd_card_io: ppvar-sd-card-io {
+ 	};
+ 
+ 	/* EC turns on w/ pp3300_trackpad_en_l */
+-	pp3300_trackpad: pp3300-trackpad {
++	pp3300_trackpad: regulator-pp3300-trackpad {
+ 	};
+ 
+ 	/* EC turns on w/ usb_a_en */
+-	pp5000_usb_a_vbus: pp5000 {
++	pp5000_usb_a_vbus: regulator-pp5000 {
+ 	};
+ 
+ 	ap_rtc_clk: ap-rtc-clk {
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
+index a337f3fb8377e4..8e73c681268bbe 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
+@@ -549,10 +549,10 @@ usb_host2_xhci: usb@fcd00000 {
+ 	mmu600_pcie: iommu@fc900000 {
+ 		compatible = "arm,smmu-v3";
+ 		reg = <0x0 0xfc900000 0x0 0x200000>;
+-		interrupts = <GIC_SPI 369 IRQ_TYPE_LEVEL_HIGH 0>,
+-			     <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH 0>,
+-			     <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH 0>,
+-			     <GIC_SPI 367 IRQ_TYPE_LEVEL_HIGH 0>;
++		interrupts = <GIC_SPI 369 IRQ_TYPE_EDGE_RISING 0>,
++			     <GIC_SPI 371 IRQ_TYPE_EDGE_RISING 0>,
++			     <GIC_SPI 374 IRQ_TYPE_EDGE_RISING 0>,
++			     <GIC_SPI 367 IRQ_TYPE_EDGE_RISING 0>;
+ 		interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
+ 		#iommu-cells = <1>;
+ 		status = "disabled";
+@@ -561,10 +561,10 @@ mmu600_pcie: iommu@fc900000 {
+ 	mmu600_php: iommu@fcb00000 {
+ 		compatible = "arm,smmu-v3";
+ 		reg = <0x0 0xfcb00000 0x0 0x200000>;
+-		interrupts = <GIC_SPI 381 IRQ_TYPE_LEVEL_HIGH 0>,
+-			     <GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH 0>,
+-			     <GIC_SPI 386 IRQ_TYPE_LEVEL_HIGH 0>,
+-			     <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH 0>;
++		interrupts = <GIC_SPI 381 IRQ_TYPE_EDGE_RISING 0>,
++			     <GIC_SPI 383 IRQ_TYPE_EDGE_RISING 0>,
++			     <GIC_SPI 386 IRQ_TYPE_EDGE_RISING 0>,
++			     <GIC_SPI 379 IRQ_TYPE_EDGE_RISING 0>;
+ 		interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
+ 		#iommu-cells = <1>;
+ 		status = "disabled";
+@@ -2667,9 +2667,9 @@ tsadc: tsadc@fec00000 {
+ 		rockchip,hw-tshut-temp = <120000>;
+ 		rockchip,hw-tshut-mode = <0>; /* tshut mode 0:CRU 1:GPIO */
+ 		rockchip,hw-tshut-polarity = <0>; /* tshut polarity 0:LOW 1:HIGH */
+-		pinctrl-0 = <&tsadc_gpio_func>;
+-		pinctrl-1 = <&tsadc_shut>;
+-		pinctrl-names = "gpio", "otpout";
++		pinctrl-0 = <&tsadc_shut_org>;
++		pinctrl-1 = <&tsadc_gpio_func>;
++		pinctrl-names = "default", "sleep";
+ 		#thermal-sensor-cells = <1>;
+ 		status = "disabled";
+ 	};
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts
+index 92f0ed83c99022..bc6b43a771537b 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts
+@@ -113,7 +113,7 @@ vcc3v3_lcd: regulator-vcc3v3-lcd {
+ 		compatible = "regulator-fixed";
+ 		regulator-name = "vcc3v3_lcd";
+ 		enable-active-high;
+-		gpio = <&gpio1 RK_PC4 GPIO_ACTIVE_HIGH>;
++		gpio = <&gpio0 RK_PC4 GPIO_ACTIVE_HIGH>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&lcdpwr_en>;
+ 		vin-supply = <&vcc3v3_sys>;
+@@ -241,7 +241,7 @@ &pcie3x4 {
+ &pinctrl {
+ 	lcd {
+ 		lcdpwr_en: lcdpwr-en {
+-			rockchip,pins = <1 RK_PC4 RK_FUNC_GPIO &pcfg_pull_down>;
++			rockchip,pins = <0 RK_PC4 RK_FUNC_GPIO &pcfg_pull_down>;
+ 		};
+ 
+ 		bl_en: bl-en {
+diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
+index c3efacab4b9412..aa90a048f319a3 100644
+--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
++++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
+@@ -77,9 +77,17 @@
+ /*
+  * With 4K page size the real_pte machinery is all nops.
+  */
+-#define __real_pte(e, p, o)		((real_pte_t){(e)})
++static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep, int offset)
++{
++	return (real_pte_t){pte};
++}
++
+ #define __rpte_to_pte(r)	((r).pte)
+-#define __rpte_to_hidx(r,index)	(pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT)
++
++static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
++{
++	return pte_val(__rpte_to_pte(rpte)) >> H_PAGE_F_GIX_SHIFT;
++}
+ 
+ #define pte_iterate_hashed_subpages(rpte, psize, va, index, shift)       \
+ 	do {							         \
+diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
+index af97fbb3c257ef..f84e0337cc0296 100644
+--- a/arch/powerpc/lib/code-patching.c
++++ b/arch/powerpc/lib/code-patching.c
+@@ -108,7 +108,7 @@ static int text_area_cpu_up(unsigned int cpu)
+ 	unsigned long addr;
+ 	int err;
+ 
+-	area = get_vm_area(PAGE_SIZE, VM_ALLOC);
++	area = get_vm_area(PAGE_SIZE, 0);
+ 	if (!area) {
+ 		WARN_ONCE(1, "Failed to create text area for cpu %d\n",
+ 			cpu);
+@@ -493,7 +493,9 @@ static int __do_patch_instructions_mm(u32 *addr, u32 *code, size_t len, bool rep
+ 
+ 	orig_mm = start_using_temp_mm(patching_mm);
+ 
++	kasan_disable_current();
+ 	err = __patch_instructions(patch_addr, code, len, repeat_instr);
++	kasan_enable_current();
+ 
+ 	/* context synchronisation performed by __patch_instructions */
+ 	stop_using_temp_mm(patching_mm, orig_mm);
+diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
+index 6087d38c723512..ea56a6492c81bd 100644
+--- a/arch/s390/boot/startup.c
++++ b/arch/s390/boot/startup.c
+@@ -75,7 +75,7 @@ static int cmma_test_essa(void)
+ 		: [reg1] "=&d" (reg1),
+ 		  [reg2] "=&a" (reg2),
+ 		  [rc] "+&d" (rc),
+-		  [tmp] "=&d" (tmp),
++		  [tmp] "+&d" (tmp),
+ 		  "+Q" (get_lowcore()->program_new_psw),
+ 		  "=Q" (old)
+ 		: [psw_old] "a" (&old),
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index b1855a46b2adf6..2bba1d934efb0d 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -397,34 +397,28 @@ static struct event_constraint intel_lnc_event_constraints[] = {
+ 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
+ 	METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
+ 
++	INTEL_EVENT_CONSTRAINT(0x20, 0xf),
++
++	INTEL_UEVENT_CONSTRAINT(0x012a, 0xf),
++	INTEL_UEVENT_CONSTRAINT(0x012b, 0xf),
+ 	INTEL_UEVENT_CONSTRAINT(0x0148, 0x4),
+ 	INTEL_UEVENT_CONSTRAINT(0x0175, 0x4),
+ 
+ 	INTEL_EVENT_CONSTRAINT(0x2e, 0x3ff),
+ 	INTEL_EVENT_CONSTRAINT(0x3c, 0x3ff),
+-	/*
+-	 * Generally event codes < 0x90 are restricted to counters 0-3.
+-	 * The 0x2E and 0x3C are exception, which has no restriction.
+-	 */
+-	INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf),
+ 
+-	INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf),
+-	INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf),
+ 	INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
+ 	INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
+ 	INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
+ 	INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
+ 	INTEL_UEVENT_CONSTRAINT(0x10a4, 0x1),
+ 	INTEL_UEVENT_CONSTRAINT(0x01b1, 0x8),
++	INTEL_UEVENT_CONSTRAINT(0x01cd, 0x3fc),
+ 	INTEL_UEVENT_CONSTRAINT(0x02cd, 0x3),
+-	INTEL_EVENT_CONSTRAINT(0xce, 0x1),
+ 
+ 	INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf),
+-	/*
+-	 * Generally event codes >= 0x90 are likely to have no restrictions.
+-	 * The exception are defined as above.
+-	 */
+-	INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0x3ff),
++
++	INTEL_UEVENT_CONSTRAINT(0x00e0, 0xf),
+ 
+ 	EVENT_CONSTRAINT_END
+ };
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index cb0eca73478995..04b83d5af4c4ba 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -1199,7 +1199,7 @@ struct event_constraint intel_lnc_pebs_event_constraints[] = {
+ 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x100, 0x100000000ULL),	/* INST_RETIRED.PREC_DIST */
+ 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL),
+ 
+-	INTEL_HYBRID_LDLAT_CONSTRAINT(0x1cd, 0x3ff),
++	INTEL_HYBRID_LDLAT_CONSTRAINT(0x1cd, 0x3fc),
+ 	INTEL_HYBRID_STLAT_CONSTRAINT(0x2cd, 0x3),
+ 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf),	/* MEM_INST_RETIRED.STLB_MISS_LOADS */
+ 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf),	/* MEM_INST_RETIRED.STLB_MISS_STORES */
+diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
+index dfbbac92242a84..04d02c746ec0fd 100644
+--- a/drivers/bluetooth/btqca.c
++++ b/drivers/bluetooth/btqca.c
+@@ -272,6 +272,39 @@ int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
+ }
+ EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
+ 
++static bool qca_filename_has_extension(const char *filename)
++{
++	const char *suffix = strrchr(filename, '.');
++
++	/* File extensions require a dot, but not as the first or last character */
++	if (!suffix || suffix == filename || *(suffix + 1) == '\0')
++		return 0;
++
++	/* Avoid matching directories with names that look like files with extensions */
++	return !strchr(suffix, '/');
++}
++
++static bool qca_get_alt_nvm_file(char *filename, size_t max_size)
++{
++	char fwname[64];
++	const char *suffix;
++
++	/* nvm file name has an extension, replace with .bin */
++	if (qca_filename_has_extension(filename)) {
++		suffix = strrchr(filename, '.');
++		strscpy(fwname, filename, suffix - filename + 1);
++		snprintf(fwname + (suffix - filename),
++		       sizeof(fwname) - (suffix - filename), ".bin");
++		/* If nvm file is already the default one, return false to skip the retry. */
++		if (strcmp(fwname, filename) == 0)
++			return false;
++
++		snprintf(filename, max_size, "%s", fwname);
++		return true;
++	}
++	return false;
++}
++
+ static int qca_tlv_check_data(struct hci_dev *hdev,
+ 			       struct qca_fw_config *config,
+ 			       u8 *fw_data, size_t fw_size,
+@@ -564,6 +597,19 @@ static int qca_download_firmware(struct hci_dev *hdev,
+ 					   config->fwname, ret);
+ 				return ret;
+ 			}
++		}
++		/* If the board-specific file is missing, try loading the default
++		 * one, unless that was attempted already.
++		 */
++		else if (config->type == TLV_TYPE_NVM &&
++			 qca_get_alt_nvm_file(config->fwname, sizeof(config->fwname))) {
++			bt_dev_info(hdev, "QCA Downloading %s", config->fwname);
++			ret = request_firmware(&fw, config->fwname, &hdev->dev);
++			if (ret) {
++				bt_dev_err(hdev, "QCA Failed to request file: %s (%d)",
++					   config->fwname, ret);
++				return ret;
++			}
+ 		} else {
+ 			bt_dev_err(hdev, "QCA Failed to request file: %s (%d)",
+ 				   config->fwname, ret);
+@@ -700,34 +746,38 @@ static int qca_check_bdaddr(struct hci_dev *hdev, const struct qca_fw_config *co
+ 	return 0;
+ }
+ 
+-static void qca_generate_hsp_nvm_name(char *fwname, size_t max_size,
++static void qca_get_nvm_name_by_board(char *fwname, size_t max_size,
++		const char *stem, enum qca_btsoc_type soc_type,
+ 		struct qca_btsoc_version ver, u8 rom_ver, u16 bid)
+ {
+ 	const char *variant;
++	const char *prefix;
+ 
+-	/* hsp gf chip */
+-	if ((le32_to_cpu(ver.soc_id) & QCA_HSP_GF_SOC_MASK) == QCA_HSP_GF_SOC_ID)
+-		variant = "g";
+-	else
+-		variant = "";
++	/* Set the default value to variant and prefix */
++	variant = "";
++	prefix = "b";
+ 
+-	if (bid == 0x0)
+-		snprintf(fwname, max_size, "qca/hpnv%02x%s.bin", rom_ver, variant);
+-	else
+-		snprintf(fwname, max_size, "qca/hpnv%02x%s.%x", rom_ver, variant, bid);
+-}
++	if (soc_type == QCA_QCA2066)
++		prefix = "";
+ 
+-static inline void qca_get_nvm_name_generic(struct qca_fw_config *cfg,
+-					    const char *stem, u8 rom_ver, u16 bid)
+-{
+-	if (bid == 0x0)
+-		snprintf(cfg->fwname, sizeof(cfg->fwname), "qca/%snv%02x.bin", stem, rom_ver);
+-	else if (bid & 0xff00)
+-		snprintf(cfg->fwname, sizeof(cfg->fwname),
+-			 "qca/%snv%02x.b%x", stem, rom_ver, bid);
+-	else
+-		snprintf(cfg->fwname, sizeof(cfg->fwname),
+-			 "qca/%snv%02x.b%02x", stem, rom_ver, bid);
++	if (soc_type == QCA_WCN6855 || soc_type == QCA_QCA2066) {
++		/* If the chip is manufactured by GlobalFoundries */
++		if ((le32_to_cpu(ver.soc_id) & QCA_HSP_GF_SOC_MASK) == QCA_HSP_GF_SOC_ID)
++			variant = "g";
++	}
++
++	if (rom_ver != 0) {
++		if (bid == 0x0 || bid == 0xffff)
++			snprintf(fwname, max_size, "qca/%s%02x%s.bin", stem, rom_ver, variant);
++		else
++			snprintf(fwname, max_size, "qca/%s%02x%s.%s%02x", stem, rom_ver,
++						variant, prefix, bid);
++	} else {
++		if (bid == 0x0 || bid == 0xffff)
++			snprintf(fwname, max_size, "qca/%s%s.bin", stem, variant);
++		else
++			snprintf(fwname, max_size, "qca/%s%s.%s%02x", stem, variant, prefix, bid);
++	}
+ }
+ 
+ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+@@ -816,8 +866,14 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ 	/* Download NVM configuration */
+ 	config.type = TLV_TYPE_NVM;
+ 	if (firmware_name) {
+-		snprintf(config.fwname, sizeof(config.fwname),
+-			 "qca/%s", firmware_name);
++		/* The firmware name has an extension, use it directly */
++		if (qca_filename_has_extension(firmware_name)) {
++			snprintf(config.fwname, sizeof(config.fwname), "qca/%s", firmware_name);
++		} else {
++			qca_read_fw_board_id(hdev, &boardid);
++			qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname),
++				 firmware_name, soc_type, ver, 0, boardid);
++		}
+ 	} else {
+ 		switch (soc_type) {
+ 		case QCA_WCN3990:
+@@ -836,8 +892,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ 				 "qca/apnv%02x.bin", rom_ver);
+ 			break;
+ 		case QCA_QCA2066:
+-			qca_generate_hsp_nvm_name(config.fwname,
+-				sizeof(config.fwname), ver, rom_ver, boardid);
++			qca_get_nvm_name_by_board(config.fwname,
++				sizeof(config.fwname), "hpnv", soc_type, ver,
++				rom_ver, boardid);
+ 			break;
+ 		case QCA_QCA6390:
+ 			snprintf(config.fwname, sizeof(config.fwname),
+@@ -848,13 +905,14 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ 				 "qca/msnv%02x.bin", rom_ver);
+ 			break;
+ 		case QCA_WCN6855:
+-			snprintf(config.fwname, sizeof(config.fwname),
+-				 "qca/hpnv%02x.bin", rom_ver);
++			qca_read_fw_board_id(hdev, &boardid);
++			qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname),
++						  "hpnv", soc_type, ver, rom_ver, boardid);
+ 			break;
+ 		case QCA_WCN7850:
+-			qca_get_nvm_name_generic(&config, "hmt", rom_ver, boardid);
++			qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname),
++				 "hmtnv", soc_type, ver, rom_ver, boardid);
+ 			break;
+-
+ 		default:
+ 			snprintf(config.fwname, sizeof(config.fwname),
+ 				 "qca/nvm_%08x.bin", soc_ver);
+diff --git a/drivers/clocksource/jcore-pit.c b/drivers/clocksource/jcore-pit.c
+index a3fe98cd383820..82815428f8f925 100644
+--- a/drivers/clocksource/jcore-pit.c
++++ b/drivers/clocksource/jcore-pit.c
+@@ -114,6 +114,18 @@ static int jcore_pit_local_init(unsigned cpu)
+ 	pit->periodic_delta = DIV_ROUND_CLOSEST(NSEC_PER_SEC, HZ * buspd);
+ 
+ 	clockevents_config_and_register(&pit->ced, freq, 1, ULONG_MAX);
++	enable_percpu_irq(pit->ced.irq, IRQ_TYPE_NONE);
++
++	return 0;
++}
++
++static int jcore_pit_local_teardown(unsigned cpu)
++{
++	struct jcore_pit *pit = this_cpu_ptr(jcore_pit_percpu);
++
++	pr_info("Local J-Core PIT teardown on cpu %u\n", cpu);
++
++	disable_percpu_irq(pit->ced.irq);
+ 
+ 	return 0;
+ }
+@@ -168,6 +180,7 @@ static int __init jcore_pit_init(struct device_node *node)
+ 		return -ENOMEM;
+ 	}
+ 
++	irq_set_percpu_devid(pit_irq);
+ 	err = request_percpu_irq(pit_irq, jcore_timer_interrupt,
+ 				 "jcore_pit", jcore_pit_percpu);
+ 	if (err) {
+@@ -237,7 +250,7 @@ static int __init jcore_pit_init(struct device_node *node)
+ 
+ 	cpuhp_setup_state(CPUHP_AP_JCORE_TIMER_STARTING,
+ 			  "clockevents/jcore:starting",
+-			  jcore_pit_local_init, NULL);
++			  jcore_pit_local_init, jcore_pit_local_teardown);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
+index 04c42c83a2bad5..f3da9385ca0d88 100644
+--- a/drivers/edac/qcom_edac.c
++++ b/drivers/edac/qcom_edac.c
+@@ -95,7 +95,7 @@ static int qcom_llcc_core_setup(struct llcc_drv_data *drv, struct regmap *llcc_b
+ 	 * Configure interrupt enable registers such that Tag, Data RAM related
+ 	 * interrupts are propagated to interrupt controller for servicing
+ 	 */
+-	ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable,
++	ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_0_enable,
+ 				 TRP0_INTERRUPT_ENABLE,
+ 				 TRP0_INTERRUPT_ENABLE);
+ 	if (ret)
+@@ -113,7 +113,7 @@ static int qcom_llcc_core_setup(struct llcc_drv_data *drv, struct regmap *llcc_b
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable,
++	ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_0_enable,
+ 				 DRP0_INTERRUPT_ENABLE,
+ 				 DRP0_INTERRUPT_ENABLE);
+ 	if (ret)
+diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
+index a86ab9b35953f7..2641faa329cdd0 100644
+--- a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
++++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
+@@ -254,8 +254,8 @@ static int scmi_imx_misc_ctrl_set(const struct scmi_protocol_handle *ph,
+ 	if (num > max_num)
+ 		return -EINVAL;
+ 
+-	ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CTRL_SET, sizeof(*in),
+-				      0, &t);
++	ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CTRL_SET,
++				      sizeof(*in) + num * sizeof(__le32), 0, &t);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig
+index 907cd149c40a8b..c964f4924359fc 100644
+--- a/drivers/firmware/imx/Kconfig
++++ b/drivers/firmware/imx/Kconfig
+@@ -25,6 +25,7 @@ config IMX_SCU
+ 
+ config IMX_SCMI_MISC_DRV
+ 	tristate "IMX SCMI MISC Protocol driver"
++	depends on ARCH_MXC || COMPILE_TEST
+ 	default y if ARCH_MXC
+ 	help
+ 	  The System Controller Management Interface firmware (SCMI FW) is
+diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
+index c4f34a347cb6ea..c36a9dbccd4dd5 100644
+--- a/drivers/gpio/gpio-vf610.c
++++ b/drivers/gpio/gpio-vf610.c
+@@ -36,6 +36,7 @@ struct vf610_gpio_port {
+ 	struct clk *clk_port;
+ 	struct clk *clk_gpio;
+ 	int irq;
++	spinlock_t lock; /* protect gpio direction registers */
+ };
+ 
+ #define GPIO_PDOR		0x00
+@@ -124,6 +125,7 @@ static int vf610_gpio_direction_input(struct gpio_chip *chip, unsigned int gpio)
+ 	u32 val;
+ 
+ 	if (port->sdata->have_paddr) {
++		guard(spinlock_irqsave)(&port->lock);
+ 		val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
+ 		val &= ~mask;
+ 		vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
+@@ -142,6 +144,7 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio
+ 	vf610_gpio_set(chip, gpio, value);
+ 
+ 	if (port->sdata->have_paddr) {
++		guard(spinlock_irqsave)(&port->lock);
+ 		val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
+ 		val |= mask;
+ 		vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
+@@ -297,6 +300,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	port->sdata = device_get_match_data(dev);
++	spin_lock_init(&port->lock);
+ 
+ 	dual_base = port->sdata->have_dual_base;
+ 
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index ca2f58a2cd45e7..19878bc75e94ca 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -3129,6 +3129,8 @@ static int gpiod_get_raw_value_commit(const struct gpio_desc *desc)
+ static int gpio_chip_get_multiple(struct gpio_chip *gc,
+ 				  unsigned long *mask, unsigned long *bits)
+ {
++	lockdep_assert_held(&gc->gpiodev->srcu);
++
+ 	if (gc->get_multiple)
+ 		return gc->get_multiple(gc, mask, bits);
+ 	if (gc->get) {
+@@ -3159,6 +3161,7 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
+ 				  struct gpio_array *array_info,
+ 				  unsigned long *value_bitmap)
+ {
++	struct gpio_chip *gc;
+ 	int ret, i = 0;
+ 
+ 	/*
+@@ -3170,10 +3173,15 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
+ 	    array_size <= array_info->size &&
+ 	    (void *)array_info == desc_array + array_info->size) {
+ 		if (!can_sleep)
+-			WARN_ON(array_info->chip->can_sleep);
++			WARN_ON(array_info->gdev->can_sleep);
++
++		guard(srcu)(&array_info->gdev->srcu);
++		gc = srcu_dereference(array_info->gdev->chip,
++				      &array_info->gdev->srcu);
++		if (!gc)
++			return -ENODEV;
+ 
+-		ret = gpio_chip_get_multiple(array_info->chip,
+-					     array_info->get_mask,
++		ret = gpio_chip_get_multiple(gc, array_info->get_mask,
+ 					     value_bitmap);
+ 		if (ret)
+ 			return ret;
+@@ -3454,6 +3462,8 @@ static void gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
+ static void gpio_chip_set_multiple(struct gpio_chip *gc,
+ 				   unsigned long *mask, unsigned long *bits)
+ {
++	lockdep_assert_held(&gc->gpiodev->srcu);
++
+ 	if (gc->set_multiple) {
+ 		gc->set_multiple(gc, mask, bits);
+ 	} else {
+@@ -3471,6 +3481,7 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
+ 				  struct gpio_array *array_info,
+ 				  unsigned long *value_bitmap)
+ {
++	struct gpio_chip *gc;
+ 	int i = 0;
+ 
+ 	/*
+@@ -3482,14 +3493,19 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
+ 	    array_size <= array_info->size &&
+ 	    (void *)array_info == desc_array + array_info->size) {
+ 		if (!can_sleep)
+-			WARN_ON(array_info->chip->can_sleep);
++			WARN_ON(array_info->gdev->can_sleep);
++
++		guard(srcu)(&array_info->gdev->srcu);
++		gc = srcu_dereference(array_info->gdev->chip,
++				      &array_info->gdev->srcu);
++		if (!gc)
++			return -ENODEV;
+ 
+ 		if (!raw && !bitmap_empty(array_info->invert_mask, array_size))
+ 			bitmap_xor(value_bitmap, value_bitmap,
+ 				   array_info->invert_mask, array_size);
+ 
+-		gpio_chip_set_multiple(array_info->chip, array_info->set_mask,
+-				       value_bitmap);
++		gpio_chip_set_multiple(gc, array_info->set_mask, value_bitmap);
+ 
+ 		i = find_first_zero_bit(array_info->set_mask, array_size);
+ 		if (i == array_size)
+@@ -4751,9 +4767,10 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
+ {
+ 	struct gpio_desc *desc;
+ 	struct gpio_descs *descs;
++	struct gpio_device *gdev;
+ 	struct gpio_array *array_info = NULL;
+-	struct gpio_chip *gc;
+ 	int count, bitmap_size;
++	unsigned long dflags;
+ 	size_t descs_size;
+ 
+ 	count = gpiod_count(dev, con_id);
+@@ -4774,7 +4791,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
+ 
+ 		descs->desc[descs->ndescs] = desc;
+ 
+-		gc = gpiod_to_chip(desc);
++		gdev = gpiod_to_gpio_device(desc);
+ 		/*
+ 		 * If pin hardware number of array member 0 is also 0, select
+ 		 * its chip as a candidate for fast bitmap processing path.
+@@ -4782,8 +4799,8 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
+ 		if (descs->ndescs == 0 && gpio_chip_hwgpio(desc) == 0) {
+ 			struct gpio_descs *array;
+ 
+-			bitmap_size = BITS_TO_LONGS(gc->ngpio > count ?
+-						    gc->ngpio : count);
++			bitmap_size = BITS_TO_LONGS(gdev->ngpio > count ?
++						    gdev->ngpio : count);
+ 
+ 			array = krealloc(descs, descs_size +
+ 					 struct_size(array_info, invert_mask, 3 * bitmap_size),
+@@ -4803,7 +4820,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
+ 
+ 			array_info->desc = descs->desc;
+ 			array_info->size = count;
+-			array_info->chip = gc;
++			array_info->gdev = gdev;
+ 			bitmap_set(array_info->get_mask, descs->ndescs,
+ 				   count - descs->ndescs);
+ 			bitmap_set(array_info->set_mask, descs->ndescs,
+@@ -4816,7 +4833,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
+ 			continue;
+ 
+ 		/* Unmark array members which don't belong to the 'fast' chip */
+-		if (array_info->chip != gc) {
++		if (array_info->gdev != gdev) {
+ 			__clear_bit(descs->ndescs, array_info->get_mask);
+ 			__clear_bit(descs->ndescs, array_info->set_mask);
+ 		}
+@@ -4839,9 +4856,10 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
+ 					    array_info->set_mask);
+ 			}
+ 		} else {
++			dflags = READ_ONCE(desc->flags);
+ 			/* Exclude open drain or open source from fast output */
+-			if (gpiochip_line_is_open_drain(gc, descs->ndescs) ||
+-			    gpiochip_line_is_open_source(gc, descs->ndescs))
++			if (test_bit(FLAG_OPEN_DRAIN, &dflags) ||
++			    test_bit(FLAG_OPEN_SOURCE, &dflags))
+ 				__clear_bit(descs->ndescs,
+ 					    array_info->set_mask);
+ 			/* Identify 'fast' pins which require invertion */
+@@ -4853,7 +4871,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
+ 	if (array_info)
+ 		dev_dbg(dev,
+ 			"GPIO array info: chip=%s, size=%d, get_mask=%lx, set_mask=%lx, invert_mask=%lx\n",
+-			array_info->chip->label, array_info->size,
++			array_info->gdev->label, array_info->size,
+ 			*array_info->get_mask, *array_info->set_mask,
+ 			*array_info->invert_mask);
+ 	return descs;
+diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
+index 83690f72f7e5cb..147156ec502b29 100644
+--- a/drivers/gpio/gpiolib.h
++++ b/drivers/gpio/gpiolib.h
+@@ -114,7 +114,7 @@ extern const char *const gpio_suffixes[];
+  *
+  * @desc:		Array of pointers to the GPIO descriptors
+  * @size:		Number of elements in desc
+- * @chip:		Parent GPIO chip
++ * @gdev:		Parent GPIO device
+  * @get_mask:		Get mask used in fastpath
+  * @set_mask:		Set mask used in fastpath
+  * @invert_mask:	Invert mask used in fastpath
+@@ -126,7 +126,7 @@ extern const char *const gpio_suffixes[];
+ struct gpio_array {
+ 	struct gpio_desc	**desc;
+ 	unsigned int		size;
+-	struct gpio_chip	*chip;
++	struct gpio_device	*gdev;
+ 	unsigned long		*get_mask;
+ 	unsigned long		*set_mask;
+ 	unsigned long		invert_mask[];
+diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
+index b55be8889e2ca6..5f140d4541a83c 100644
+--- a/drivers/gpu/drm/Kconfig
++++ b/drivers/gpu/drm/Kconfig
+@@ -359,6 +359,7 @@ config DRM_TTM_HELPER
+ 	tristate
+ 	depends on DRM
+ 	select DRM_TTM
++	select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
+ 	select FB_CORE if DRM_FBDEV_EMULATION
+ 	select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
+ 	help
+@@ -367,6 +368,7 @@ config DRM_TTM_HELPER
+ config DRM_GEM_DMA_HELPER
+ 	tristate
+ 	depends on DRM
++	select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
+ 	select FB_CORE if DRM_FBDEV_EMULATION
+ 	select FB_DMAMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
+ 	help
+@@ -375,6 +377,7 @@ config DRM_GEM_DMA_HELPER
+ config DRM_GEM_SHMEM_HELPER
+ 	tristate
+ 	depends on DRM && MMU
++	select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
+ 	select FB_CORE if DRM_FBDEV_EMULATION
+ 	select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
+ 	help
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index e63efe5c5b75a2..91a874bb0e2415 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -120,9 +120,10 @@
+  * - 3.58.0 - Add GFX12 DCC support
+  * - 3.59.0 - Cleared VRAM
+  * - 3.60.0 - Add AMDGPU_TILING_GFX12_DCC_WRITE_COMPRESS_DISABLE (Vulkan requirement)
++ * - 3.61.0 - Contains fix for RV/PCO compute queues
+  */
+ #define KMS_DRIVER_MAJOR	3
+-#define KMS_DRIVER_MINOR	60
++#define KMS_DRIVER_MINOR	61
+ #define KMS_DRIVER_PATCHLEVEL	0
+ 
+ /*
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 0b6f09f2cc9bd0..d28258bb6d2985 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -7439,6 +7439,34 @@ static void gfx_v9_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
+ 	amdgpu_ring_write(ring, 0);  /* RESERVED field, programmed to zero */
+ }
+ 
++static void gfx_v9_0_ring_begin_use_compute(struct amdgpu_ring *ring)
++{
++	struct amdgpu_device *adev = ring->adev;
++
++	amdgpu_gfx_enforce_isolation_ring_begin_use(ring);
++
++	/* Raven and PCO APUs seem to have stability issues
++	 * with compute and gfxoff and gfx pg.  Disable gfx pg during
++	 * submission and allow again afterwards.
++	 */
++	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 1, 0))
++		gfx_v9_0_set_powergating_state(adev, AMD_PG_STATE_UNGATE);
++}
++
++static void gfx_v9_0_ring_end_use_compute(struct amdgpu_ring *ring)
++{
++	struct amdgpu_device *adev = ring->adev;
++
++	/* Raven and PCO APUs seem to have stability issues
++	 * with compute and gfxoff and gfx pg.  Disable gfx pg during
++	 * submission and allow again afterwards.
++	 */
++	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 1, 0))
++		gfx_v9_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
++
++	amdgpu_gfx_enforce_isolation_ring_end_use(ring);
++}
++
+ static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
+ 	.name = "gfx_v9_0",
+ 	.early_init = gfx_v9_0_early_init,
+@@ -7615,8 +7643,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
+ 	.emit_wave_limit = gfx_v9_0_emit_wave_limit,
+ 	.reset = gfx_v9_0_reset_kcq,
+ 	.emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
+-	.begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
+-	.end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
++	.begin_use = gfx_v9_0_ring_begin_use_compute,
++	.end_use = gfx_v9_0_ring_end_use_compute,
+ };
+ 
+ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+index 02f7ba8c93cd45..7062f12b5b7511 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+@@ -4117,7 +4117,8 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
+ 	0x0000ffff, 0x8bfe7e7e,
+ 	0x8bea6a6a, 0xb97af804,
+ 	0xbe804ec2, 0xbf94fffe,
+-	0xbe804a6c, 0xbfb10000,
++	0xbe804a6c, 0xbe804ec2,
++	0xbf94fffe, 0xbfb10000,
+ 	0xbf9f0000, 0xbf9f0000,
+ 	0xbf9f0000, 0xbf9f0000,
+ 	0xbf9f0000, 0x00000000,
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
+index 44772eec9ef4df..96fbb16ceb216d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
+@@ -34,41 +34,24 @@
+  *   cpp -DASIC_FAMILY=CHIP_PLUM_BONITO cwsr_trap_handler_gfx10.asm -P -o gfx11.sp3
+  *   sp3 gfx11.sp3 -hex gfx11.hex
+  *
+- * gfx12:
+- *   cpp -DASIC_FAMILY=CHIP_GFX12 cwsr_trap_handler_gfx10.asm -P -o gfx12.sp3
+- *   sp3 gfx12.sp3 -hex gfx12.hex
+  */
+ 
+ #define CHIP_NAVI10 26
+ #define CHIP_SIENNA_CICHLID 30
+ #define CHIP_PLUM_BONITO 36
+-#define CHIP_GFX12 37
+ 
+ #define NO_SQC_STORE (ASIC_FAMILY >= CHIP_SIENNA_CICHLID)
+ #define HAVE_XNACK (ASIC_FAMILY < CHIP_SIENNA_CICHLID)
+ #define HAVE_SENDMSG_RTN (ASIC_FAMILY >= CHIP_PLUM_BONITO)
+ #define HAVE_BUFFER_LDS_LOAD (ASIC_FAMILY < CHIP_PLUM_BONITO)
+-#define SW_SA_TRAP (ASIC_FAMILY >= CHIP_PLUM_BONITO && ASIC_FAMILY < CHIP_GFX12)
++#define SW_SA_TRAP (ASIC_FAMILY == CHIP_PLUM_BONITO)
+ #define SAVE_AFTER_XNACK_ERROR (HAVE_XNACK && !NO_SQC_STORE) // workaround for TCP store failure after XNACK error when ALLOW_REPLAY=0, for debugger
+ #define SINGLE_STEP_MISSED_WORKAROUND 1	//workaround for lost MODE.DEBUG_EN exception when SAVECTX raised
+ 
+-#if ASIC_FAMILY < CHIP_GFX12
+ #define S_COHERENCE glc:1
+ #define V_COHERENCE slc:1 glc:1
+ #define S_WAITCNT_0 s_waitcnt 0
+-#else
+-#define S_COHERENCE scope:SCOPE_SYS
+-#define V_COHERENCE scope:SCOPE_SYS
+-#define S_WAITCNT_0 s_wait_idle
+-
+-#define HW_REG_SHADER_FLAT_SCRATCH_LO HW_REG_WAVE_SCRATCH_BASE_LO
+-#define HW_REG_SHADER_FLAT_SCRATCH_HI HW_REG_WAVE_SCRATCH_BASE_HI
+-#define HW_REG_GPR_ALLOC HW_REG_WAVE_GPR_ALLOC
+-#define HW_REG_LDS_ALLOC HW_REG_WAVE_LDS_ALLOC
+-#define HW_REG_MODE HW_REG_WAVE_MODE
+-#endif
+ 
+-#if ASIC_FAMILY < CHIP_GFX12
+ var SQ_WAVE_STATUS_SPI_PRIO_MASK		= 0x00000006
+ var SQ_WAVE_STATUS_HALT_MASK			= 0x2000
+ var SQ_WAVE_STATUS_ECC_ERR_MASK			= 0x20000
+@@ -81,21 +64,6 @@ var S_STATUS_ALWAYS_CLEAR_MASK			= SQ_WAVE_STATUS_SPI_PRIO_MASK|SQ_WAVE_STATUS_E
+ var S_STATUS_HALT_MASK				= SQ_WAVE_STATUS_HALT_MASK
+ var S_SAVE_PC_HI_TRAP_ID_MASK			= 0x00FF0000
+ var S_SAVE_PC_HI_HT_MASK			= 0x01000000
+-#else
+-var SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK	= 0x4
+-var SQ_WAVE_STATE_PRIV_SCC_SHIFT		= 9
+-var SQ_WAVE_STATE_PRIV_SYS_PRIO_MASK		= 0xC00
+-var SQ_WAVE_STATE_PRIV_HALT_MASK		= 0x4000
+-var SQ_WAVE_STATE_PRIV_POISON_ERR_MASK		= 0x8000
+-var SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT		= 15
+-var SQ_WAVE_STATUS_WAVE64_SHIFT			= 29
+-var SQ_WAVE_STATUS_WAVE64_SIZE			= 1
+-var SQ_WAVE_LDS_ALLOC_GRANULARITY		= 9
+-var S_STATUS_HWREG				= HW_REG_WAVE_STATE_PRIV
+-var S_STATUS_ALWAYS_CLEAR_MASK			= SQ_WAVE_STATE_PRIV_SYS_PRIO_MASK|SQ_WAVE_STATE_PRIV_POISON_ERR_MASK
+-var S_STATUS_HALT_MASK				= SQ_WAVE_STATE_PRIV_HALT_MASK
+-var S_SAVE_PC_HI_TRAP_ID_MASK			= 0xF0000000
+-#endif
+ 
+ var SQ_WAVE_STATUS_NO_VGPRS_SHIFT		= 24
+ var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT		= 12
+@@ -110,7 +78,6 @@ var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT		= 8
+ var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT		= 12
+ #endif
+ 
+-#if ASIC_FAMILY < CHIP_GFX12
+ var SQ_WAVE_TRAPSTS_SAVECTX_MASK		= 0x400
+ var SQ_WAVE_TRAPSTS_EXCP_MASK			= 0x1FF
+ var SQ_WAVE_TRAPSTS_SAVECTX_SHIFT		= 10
+@@ -161,39 +128,6 @@ var S_TRAPSTS_RESTORE_PART_3_SIZE		= 32 - S_TRAPSTS_RESTORE_PART_3_SHIFT
+ var S_TRAPSTS_HWREG				= HW_REG_TRAPSTS
+ var S_TRAPSTS_SAVE_CONTEXT_MASK			= SQ_WAVE_TRAPSTS_SAVECTX_MASK
+ var S_TRAPSTS_SAVE_CONTEXT_SHIFT		= SQ_WAVE_TRAPSTS_SAVECTX_SHIFT
+-#else
+-var SQ_WAVE_EXCP_FLAG_PRIV_ADDR_WATCH_MASK	= 0xF
+-var SQ_WAVE_EXCP_FLAG_PRIV_MEM_VIOL_MASK	= 0x10
+-var SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT	= 5
+-var SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK	= 0x20
+-var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_MASK	= 0x40
+-var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT	= 6
+-var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK	= 0x80
+-var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT	= 7
+-var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK	= 0x100
+-var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT	= 8
+-var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK	= 0x200
+-var SQ_WAVE_EXCP_FLAG_PRIV_TRAP_AFTER_INST_MASK	= 0x800
+-var SQ_WAVE_TRAP_CTRL_ADDR_WATCH_MASK		= 0x80
+-var SQ_WAVE_TRAP_CTRL_TRAP_AFTER_INST_MASK	= 0x200
+-
+-var S_TRAPSTS_HWREG				= HW_REG_WAVE_EXCP_FLAG_PRIV
+-var S_TRAPSTS_SAVE_CONTEXT_MASK			= SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK
+-var S_TRAPSTS_SAVE_CONTEXT_SHIFT		= SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT
+-var S_TRAPSTS_NON_MASKABLE_EXCP_MASK		= SQ_WAVE_EXCP_FLAG_PRIV_MEM_VIOL_MASK		|\
+-						  SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_MASK	|\
+-						  SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK		|\
+-						  SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK	|\
+-						  SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK		|\
+-						  SQ_WAVE_EXCP_FLAG_PRIV_TRAP_AFTER_INST_MASK
+-var S_TRAPSTS_RESTORE_PART_1_SIZE		= SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT
+-var S_TRAPSTS_RESTORE_PART_2_SHIFT		= SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
+-var S_TRAPSTS_RESTORE_PART_2_SIZE		= SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT - SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
+-var S_TRAPSTS_RESTORE_PART_3_SHIFT		= SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT
+-var S_TRAPSTS_RESTORE_PART_3_SIZE		= 32 - S_TRAPSTS_RESTORE_PART_3_SHIFT
+-var BARRIER_STATE_SIGNAL_OFFSET			= 16
+-var BARRIER_STATE_VALID_OFFSET			= 0
+-#endif
+ 
+ // bits [31:24] unused by SPI debug data
+ var TTMP11_SAVE_REPLAY_W64H_SHIFT		= 31
+@@ -305,11 +239,7 @@ L_TRAP_NO_BARRIER:
+ 
+ L_HALTED:
+ 	// Host trap may occur while wave is halted.
+-#if ASIC_FAMILY < CHIP_GFX12
+ 	s_and_b32	ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
+-#else
+-	s_and_b32	ttmp2, s_save_trapsts, SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK
+-#endif
+ 	s_cbranch_scc1	L_FETCH_2ND_TRAP
+ 
+ L_CHECK_SAVE:
+@@ -336,7 +266,6 @@ L_NOT_HALTED:
+ 	// Check for maskable exceptions in trapsts.excp and trapsts.excp_hi.
+ 	// Maskable exceptions only cause the wave to enter the trap handler if
+ 	// their respective bit in mode.excp_en is set.
+-#if ASIC_FAMILY < CHIP_GFX12
+ 	s_and_b32	ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCP_MASK|SQ_WAVE_TRAPSTS_EXCP_HI_MASK
+ 	s_cbranch_scc0	L_CHECK_TRAP_ID
+ 
+@@ -349,17 +278,6 @@ L_NOT_ADDR_WATCH:
+ 	s_lshl_b32	ttmp2, ttmp2, SQ_WAVE_MODE_EXCP_EN_SHIFT
+ 	s_and_b32	ttmp2, ttmp2, ttmp3
+ 	s_cbranch_scc1	L_FETCH_2ND_TRAP
+-#else
+-	s_getreg_b32	ttmp2, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
+-	s_and_b32	ttmp3, s_save_trapsts, SQ_WAVE_EXCP_FLAG_PRIV_ADDR_WATCH_MASK
+-	s_cbranch_scc0	L_NOT_ADDR_WATCH
+-	s_or_b32	ttmp2, ttmp2, SQ_WAVE_TRAP_CTRL_ADDR_WATCH_MASK
+-
+-L_NOT_ADDR_WATCH:
+-	s_getreg_b32	ttmp3, hwreg(HW_REG_WAVE_TRAP_CTRL)
+-	s_and_b32	ttmp2, ttmp3, ttmp2
+-	s_cbranch_scc1	L_FETCH_2ND_TRAP
+-#endif
+ 
+ L_CHECK_TRAP_ID:
+ 	// Check trap_id != 0
+@@ -369,13 +287,8 @@ L_CHECK_TRAP_ID:
+ #if SINGLE_STEP_MISSED_WORKAROUND
+ 	// Prioritize single step exception over context save.
+ 	// Second-level trap will halt wave and RFE, re-entering for SAVECTX.
+-#if ASIC_FAMILY < CHIP_GFX12
+ 	s_getreg_b32	ttmp2, hwreg(HW_REG_MODE)
+ 	s_and_b32	ttmp2, ttmp2, SQ_WAVE_MODE_DEBUG_EN_MASK
+-#else
+-	// WAVE_TRAP_CTRL is already in ttmp3.
+-	s_and_b32	ttmp3, ttmp3, SQ_WAVE_TRAP_CTRL_TRAP_AFTER_INST_MASK
+-#endif
+ 	s_cbranch_scc1	L_FETCH_2ND_TRAP
+ #endif
+ 
+@@ -425,12 +338,7 @@ L_NO_NEXT_TRAP:
+ 	s_cbranch_scc1	L_TRAP_CASE
+ 
+ 	// Host trap will not cause trap re-entry.
+-#if ASIC_FAMILY < CHIP_GFX12
+ 	s_and_b32	ttmp2, s_save_pc_hi, S_SAVE_PC_HI_HT_MASK
+-#else
+-	s_getreg_b32	ttmp2, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
+-	s_and_b32	ttmp2, ttmp2, SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK
+-#endif
+ 	s_cbranch_scc1	L_EXIT_TRAP
+ 	s_or_b32	s_save_status, s_save_status, S_STATUS_HALT_MASK
+ 
+@@ -457,16 +365,7 @@ L_EXIT_TRAP:
+ 	s_and_b64	exec, exec, exec					// Restore STATUS.EXECZ, not writable by s_setreg_b32
+ 	s_and_b64	vcc, vcc, vcc						// Restore STATUS.VCCZ, not writable by s_setreg_b32
+ 
+-#if ASIC_FAMILY < CHIP_GFX12
+ 	s_setreg_b32	hwreg(S_STATUS_HWREG), s_save_status
+-#else
+-	// STATE_PRIV.BARRIER_COMPLETE may have changed since we read it.
+-	// Only restore fields which the trap handler changes.
+-	s_lshr_b32	s_save_status, s_save_status, SQ_WAVE_STATE_PRIV_SCC_SHIFT
+-	s_setreg_b32	hwreg(S_STATUS_HWREG, SQ_WAVE_STATE_PRIV_SCC_SHIFT, \
+-		SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT - SQ_WAVE_STATE_PRIV_SCC_SHIFT + 1), s_save_status
+-#endif
+-
+ 	s_rfe_b64	[ttmp0, ttmp1]
+ 
+ L_SAVE:
+@@ -478,14 +377,6 @@ L_SAVE:
+ 	s_endpgm
+ L_HAVE_VGPRS:
+ #endif
+-#if ASIC_FAMILY >= CHIP_GFX12
+-	s_getreg_b32	s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
+-	s_bitcmp1_b32	s_save_tmp, SQ_WAVE_STATUS_NO_VGPRS_SHIFT
+-	s_cbranch_scc0	L_HAVE_VGPRS
+-	s_endpgm
+-L_HAVE_VGPRS:
+-#endif
+-
+ 	s_and_b32	s_save_pc_hi, s_save_pc_hi, 0x0000ffff			//pc[47:32]
+ 	s_mov_b32	s_save_tmp, 0
+ 	s_setreg_b32	hwreg(S_TRAPSTS_HWREG, S_TRAPSTS_SAVE_CONTEXT_SHIFT, 1), s_save_tmp	//clear saveCtx bit
+@@ -671,19 +562,6 @@ L_SAVE_HWREG:
+ 	s_mov_b32	m0, 0x0							//Next lane of v2 to write to
+ #endif
+ 
+-#if ASIC_FAMILY >= CHIP_GFX12
+-	// Ensure no further changes to barrier or LDS state.
+-	// STATE_PRIV.BARRIER_COMPLETE may change up to this point.
+-	s_barrier_signal	-2
+-	s_barrier_wait	-2
+-
+-	// Re-read final state of BARRIER_COMPLETE field for save.
+-	s_getreg_b32	s_save_tmp, hwreg(S_STATUS_HWREG)
+-	s_and_b32	s_save_tmp, s_save_tmp, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
+-	s_andn2_b32	s_save_status, s_save_status, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
+-	s_or_b32	s_save_status, s_save_status, s_save_tmp
+-#endif
+-
+ 	write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
+ 	write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset)
+ 	s_andn2_b32	s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
+@@ -707,21 +585,6 @@ L_SAVE_HWREG:
+ 	s_getreg_b32	s_save_m0, hwreg(HW_REG_SHADER_FLAT_SCRATCH_HI)
+ 	write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
+ 
+-#if ASIC_FAMILY >= CHIP_GFX12
+-	s_getreg_b32	s_save_m0, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
+-	write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
+-
+-	s_getreg_b32	s_save_m0, hwreg(HW_REG_WAVE_TRAP_CTRL)
+-	write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
+-
+-	s_getreg_b32	s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
+-	write_hwreg_to_mem(s_save_tmp, s_save_buf_rsrc0, s_save_mem_offset)
+-
+-	s_get_barrier_state s_save_tmp, -1
+-	s_wait_kmcnt (0)
+-	write_hwreg_to_mem(s_save_tmp, s_save_buf_rsrc0, s_save_mem_offset)
+-#endif
+-
+ #if NO_SQC_STORE
+ 	// Write HWREGs with 16 VGPR lanes. TTMPs occupy space after this.
+ 	s_mov_b32       exec_lo, 0xFFFF
+@@ -814,9 +677,7 @@ L_SAVE_LDS_NORMAL:
+ 	s_and_b32	s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF	//lds_size is zero?
+ 	s_cbranch_scc0	L_SAVE_LDS_DONE						//no lds used? jump to L_SAVE_DONE
+ 
+-#if ASIC_FAMILY < CHIP_GFX12
+ 	s_barrier								//LDS is used? wait for other waves in the same TG
+-#endif
+ 	s_and_b32	s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
+ 	s_cbranch_scc0	L_SAVE_LDS_DONE
+ 
+@@ -1081,11 +942,6 @@ L_RESTORE:
+ 	s_mov_b32	s_restore_buf_rsrc2, 0					//NUM_RECORDS initial value = 0 (in bytes)
+ 	s_mov_b32	s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC
+ 
+-#if ASIC_FAMILY >= CHIP_GFX12
+-	// Save s_restore_spi_init_hi for later use.
+-	s_mov_b32 s_restore_spi_init_hi_save, s_restore_spi_init_hi
+-#endif
+-
+ 	//determine it is wave32 or wave64
+ 	get_wave_size2(s_restore_size)
+ 
+@@ -1320,9 +1176,7 @@ L_RESTORE_SGPR:
+ 	// s_barrier with MODE.DEBUG_EN=1, STATUS.PRIV=1 incorrectly asserts debug exception.
+ 	// Clear DEBUG_EN before and restore MODE after the barrier.
+ 	s_setreg_imm32_b32	hwreg(HW_REG_MODE), 0
+-#if ASIC_FAMILY < CHIP_GFX12
+ 	s_barrier								//barrier to ensure the readiness of LDS before access attemps from any other wave in the same TG
+-#endif
+ 
+ 	/* restore HW registers */
+ L_RESTORE_HWREG:
+@@ -1334,11 +1188,6 @@ L_RESTORE_HWREG:
+ 
+ 	s_mov_b32	s_restore_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
+ 
+-#if ASIC_FAMILY >= CHIP_GFX12
+-	// Restore s_restore_spi_init_hi before the saved value gets clobbered.
+-	s_mov_b32 s_restore_spi_init_hi, s_restore_spi_init_hi_save
+-#endif
+-
+ 	read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset)
+ 	read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset)
+ 	read_hwreg_from_mem(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
+@@ -1358,44 +1207,6 @@ L_RESTORE_HWREG:
+ 
+ 	s_setreg_b32	hwreg(HW_REG_SHADER_FLAT_SCRATCH_HI), s_restore_flat_scratch
+ 
+-#if ASIC_FAMILY >= CHIP_GFX12
+-	read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
+-	S_WAITCNT_0
+-	s_setreg_b32	hwreg(HW_REG_WAVE_EXCP_FLAG_USER), s_restore_tmp
+-
+-	read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
+-	S_WAITCNT_0
+-	s_setreg_b32	hwreg(HW_REG_WAVE_TRAP_CTRL), s_restore_tmp
+-
+-	// Only the first wave needs to restore the workgroup barrier.
+-	s_and_b32	s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK
+-	s_cbranch_scc0	L_SKIP_BARRIER_RESTORE
+-
+-	// Skip over WAVE_STATUS, since there is no state to restore from it
+-	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, 4
+-
+-	read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
+-	S_WAITCNT_0
+-
+-	s_bitcmp1_b32	s_restore_tmp, BARRIER_STATE_VALID_OFFSET
+-	s_cbranch_scc0	L_SKIP_BARRIER_RESTORE
+-
+-	// extract the saved signal count from s_restore_tmp
+-	s_lshr_b32	s_restore_tmp, s_restore_tmp, BARRIER_STATE_SIGNAL_OFFSET
+-
+-	// We need to call s_barrier_signal repeatedly to restore the signal
+-	// count of the work group barrier.  The member count is already
+-	// initialized with the number of waves in the work group.
+-L_BARRIER_RESTORE_LOOP:
+-	s_and_b32	s_restore_tmp, s_restore_tmp, s_restore_tmp
+-	s_cbranch_scc0	L_SKIP_BARRIER_RESTORE
+-	s_barrier_signal	-1
+-	s_add_i32	s_restore_tmp, s_restore_tmp, -1
+-	s_branch	L_BARRIER_RESTORE_LOOP
+-
+-L_SKIP_BARRIER_RESTORE:
+-#endif
+-
+ 	s_mov_b32	m0, s_restore_m0
+ 	s_mov_b32	exec_lo, s_restore_exec_lo
+ 	s_mov_b32	exec_hi, s_restore_exec_hi
+@@ -1453,13 +1264,6 @@ L_RETURN_WITHOUT_PRIV:
+ 
+ 	s_setreg_b32	hwreg(S_STATUS_HWREG), s_restore_status			// SCC is included, which is changed by previous salu
+ 
+-#if ASIC_FAMILY >= CHIP_GFX12
+-	// Make barrier and LDS state visible to all waves in the group.
+-	// STATE_PRIV.BARRIER_COMPLETE may change after this point.
+-	s_barrier_signal	-2
+-	s_barrier_wait	-2
+-#endif
+-
+ 	s_rfe_b64	s_restore_pc_lo						//Return to the main shader program and resume execution
+ 
+ L_END_PGM:
+@@ -1598,11 +1402,7 @@ function get_hwreg_size_bytes
+ end
+ 
+ function get_wave_size2(s_reg)
+-#if ASIC_FAMILY < CHIP_GFX12
+ 	s_getreg_b32	s_reg, hwreg(HW_REG_IB_STS2,SQ_WAVE_IB_STS2_WAVE64_SHIFT,SQ_WAVE_IB_STS2_WAVE64_SIZE)
+-#else
+-	s_getreg_b32	s_reg, hwreg(HW_REG_WAVE_STATUS,SQ_WAVE_STATUS_WAVE64_SHIFT,SQ_WAVE_STATUS_WAVE64_SIZE)
+-#endif
+ 	s_lshl_b32	s_reg, s_reg, S_WAVE_SIZE
+ end
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
+new file mode 100644
+index 00000000000000..7b9d36e5fa4372
+--- /dev/null
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
+@@ -0,0 +1,1130 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++/* To compile this assembly code:
++ *
++ * gfx12:
++ *   cpp -DASIC_FAMILY=CHIP_GFX12 cwsr_trap_handler_gfx12.asm -P -o gfx12.sp3
++ *   sp3 gfx12.sp3 -hex gfx12.hex
++ */
++
++#define CHIP_GFX12 37
++
++#define SINGLE_STEP_MISSED_WORKAROUND 1	//workaround for lost TRAP_AFTER_INST exception when SAVECTX raised
++
++var SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK	= 0x4
++var SQ_WAVE_STATE_PRIV_SCC_SHIFT		= 9
++var SQ_WAVE_STATE_PRIV_SYS_PRIO_MASK		= 0xC00
++var SQ_WAVE_STATE_PRIV_HALT_MASK		= 0x4000
++var SQ_WAVE_STATE_PRIV_POISON_ERR_MASK		= 0x8000
++var SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT		= 15
++var SQ_WAVE_STATUS_WAVE64_SHIFT			= 29
++var SQ_WAVE_STATUS_WAVE64_SIZE			= 1
++var SQ_WAVE_STATUS_NO_VGPRS_SHIFT		= 24
++var SQ_WAVE_STATE_PRIV_ALWAYS_CLEAR_MASK	= SQ_WAVE_STATE_PRIV_SYS_PRIO_MASK|SQ_WAVE_STATE_PRIV_POISON_ERR_MASK
++var S_SAVE_PC_HI_TRAP_ID_MASK			= 0xF0000000
++
++var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT		= 12
++var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE		= 9
++var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE		= 8
++var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT		= 12
++var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT	= 24
++var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE	= 4
++var SQ_WAVE_LDS_ALLOC_GRANULARITY		= 9
++
++var SQ_WAVE_EXCP_FLAG_PRIV_ADDR_WATCH_MASK	= 0xF
++var SQ_WAVE_EXCP_FLAG_PRIV_MEM_VIOL_MASK	= 0x10
++var SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT	= 5
++var SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK	= 0x20
++var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_MASK	= 0x40
++var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT	= 6
++var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK	= 0x80
++var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT	= 7
++var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK	= 0x100
++var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT	= 8
++var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK	= 0x200
++var SQ_WAVE_EXCP_FLAG_PRIV_TRAP_AFTER_INST_MASK	= 0x800
++var SQ_WAVE_TRAP_CTRL_ADDR_WATCH_MASK		= 0x80
++var SQ_WAVE_TRAP_CTRL_TRAP_AFTER_INST_MASK	= 0x200
++
++var SQ_WAVE_EXCP_FLAG_PRIV_NON_MASKABLE_EXCP_MASK= SQ_WAVE_EXCP_FLAG_PRIV_MEM_VIOL_MASK		|\
++						  SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_MASK	|\
++						  SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK		|\
++						  SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK	|\
++						  SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK		|\
++						  SQ_WAVE_EXCP_FLAG_PRIV_TRAP_AFTER_INST_MASK
++var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_1_SIZE	= SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT
++var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT	= SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
++var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SIZE	= SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT - SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
++var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT	= SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT
++var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SIZE	= 32 - SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT
++var BARRIER_STATE_SIGNAL_OFFSET			= 16
++var BARRIER_STATE_VALID_OFFSET			= 0
++
++var TTMP11_DEBUG_TRAP_ENABLED_SHIFT		= 23
++var TTMP11_DEBUG_TRAP_ENABLED_MASK		= 0x800000
++
++// SQ_SEL_X/Y/Z/W, BUF_NUM_FORMAT_FLOAT, (0 for MUBUF stride[17:14]
++// when ADD_TID_ENABLE and BUF_DATA_FORMAT_32 for MTBUF), ADD_TID_ENABLE
++var S_SAVE_BUF_RSRC_WORD1_STRIDE		= 0x00040000
++var S_SAVE_BUF_RSRC_WORD3_MISC			= 0x10807FAC
++var S_SAVE_SPI_INIT_FIRST_WAVE_MASK		= 0x04000000
++var S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT		= 26
++
++var S_SAVE_PC_HI_FIRST_WAVE_MASK		= 0x80000000
++var S_SAVE_PC_HI_FIRST_WAVE_SHIFT		= 31
++
++var s_sgpr_save_num				= 108
++
++var s_save_spi_init_lo				= exec_lo
++var s_save_spi_init_hi				= exec_hi
++var s_save_pc_lo				= ttmp0
++var s_save_pc_hi				= ttmp1
++var s_save_exec_lo				= ttmp2
++var s_save_exec_hi				= ttmp3
++var s_save_state_priv				= ttmp12
++var s_save_excp_flag_priv			= ttmp15
++var s_save_xnack_mask				= s_save_excp_flag_priv
++var s_wave_size					= ttmp7
++var s_save_buf_rsrc0				= ttmp8
++var s_save_buf_rsrc1				= ttmp9
++var s_save_buf_rsrc2				= ttmp10
++var s_save_buf_rsrc3				= ttmp11
++var s_save_mem_offset				= ttmp4
++var s_save_alloc_size				= s_save_excp_flag_priv
++var s_save_tmp					= ttmp14
++var s_save_m0					= ttmp5
++var s_save_ttmps_lo				= s_save_tmp
++var s_save_ttmps_hi				= s_save_excp_flag_priv
++
++var S_RESTORE_BUF_RSRC_WORD1_STRIDE		= S_SAVE_BUF_RSRC_WORD1_STRIDE
++var S_RESTORE_BUF_RSRC_WORD3_MISC		= S_SAVE_BUF_RSRC_WORD3_MISC
++
++var S_RESTORE_SPI_INIT_FIRST_WAVE_MASK		= 0x04000000
++var S_RESTORE_SPI_INIT_FIRST_WAVE_SHIFT		= 26
++var S_WAVE_SIZE					= 25
++
++var s_restore_spi_init_lo			= exec_lo
++var s_restore_spi_init_hi			= exec_hi
++var s_restore_mem_offset			= ttmp12
++var s_restore_alloc_size			= ttmp3
++var s_restore_tmp				= ttmp2
++var s_restore_mem_offset_save			= s_restore_tmp
++var s_restore_m0				= s_restore_alloc_size
++var s_restore_mode				= ttmp7
++var s_restore_flat_scratch			= s_restore_tmp
++var s_restore_pc_lo				= ttmp0
++var s_restore_pc_hi				= ttmp1
++var s_restore_exec_lo				= ttmp4
++var s_restore_exec_hi				= ttmp5
++var s_restore_state_priv			= ttmp14
++var s_restore_excp_flag_priv			= ttmp15
++var s_restore_xnack_mask			= ttmp13
++var s_restore_buf_rsrc0				= ttmp8
++var s_restore_buf_rsrc1				= ttmp9
++var s_restore_buf_rsrc2				= ttmp10
++var s_restore_buf_rsrc3				= ttmp11
++var s_restore_size				= ttmp6
++var s_restore_ttmps_lo				= s_restore_tmp
++var s_restore_ttmps_hi				= s_restore_alloc_size
++var s_restore_spi_init_hi_save			= s_restore_exec_hi
++
++shader main
++	asic(DEFAULT)
++	type(CS)
++	wave_size(32)
++
++	s_branch	L_SKIP_RESTORE						//NOT restore. might be a regular trap or save
++
++L_JUMP_TO_RESTORE:
++	s_branch	L_RESTORE
++
++L_SKIP_RESTORE:
++	s_getreg_b32	s_save_state_priv, hwreg(HW_REG_WAVE_STATE_PRIV)	//save STATUS since we will change SCC
++
++	// Clear SPI_PRIO: do not save with elevated priority.
++	// Clear ECC_ERR: prevents SQC store and triggers FATAL_HALT if setreg'd.
++	s_andn2_b32	s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_ALWAYS_CLEAR_MASK
++
++	s_getreg_b32	s_save_excp_flag_priv, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
++
++	s_and_b32       ttmp2, s_save_state_priv, SQ_WAVE_STATE_PRIV_HALT_MASK
++	s_cbranch_scc0	L_NOT_HALTED
++
++L_HALTED:
++	// Host trap may occur while wave is halted.
++	s_and_b32	ttmp2, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK
++	s_cbranch_scc1	L_FETCH_2ND_TRAP
++
++L_CHECK_SAVE:
++	s_and_b32	ttmp2, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK
++	s_cbranch_scc1	L_SAVE
++
++	// Wave is halted but neither host trap nor SAVECTX is raised.
++	// Caused by instruction fetch memory violation.
++	// Spin wait until context saved to prevent interrupt storm.
++	s_sleep		0x10
++	s_getreg_b32	s_save_excp_flag_priv, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
++	s_branch	L_CHECK_SAVE
++
++L_NOT_HALTED:
++	// Let second-level handle non-SAVECTX exception or trap.
++	// Any concurrent SAVECTX will be handled upon re-entry once halted.
++
++	// Check non-maskable exceptions. memory_violation, illegal_instruction
++	// and xnack_error exceptions always cause the wave to enter the trap
++	// handler.
++	s_and_b32	ttmp2, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_NON_MASKABLE_EXCP_MASK
++	s_cbranch_scc1	L_FETCH_2ND_TRAP
++
++	// Check for maskable exceptions in trapsts.excp and trapsts.excp_hi.
++	// Maskable exceptions only cause the wave to enter the trap handler if
++	// their respective bit in mode.excp_en is set.
++	s_getreg_b32	ttmp2, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
++	s_and_b32	ttmp3, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_ADDR_WATCH_MASK
++	s_cbranch_scc0	L_NOT_ADDR_WATCH
++	s_or_b32	ttmp2, ttmp2, SQ_WAVE_TRAP_CTRL_ADDR_WATCH_MASK
++
++L_NOT_ADDR_WATCH:
++	s_getreg_b32	ttmp3, hwreg(HW_REG_WAVE_TRAP_CTRL)
++	s_and_b32	ttmp2, ttmp3, ttmp2
++	s_cbranch_scc1	L_FETCH_2ND_TRAP
++
++L_CHECK_TRAP_ID:
++	// Check trap_id != 0
++	s_and_b32	ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
++	s_cbranch_scc1	L_FETCH_2ND_TRAP
++
++#if SINGLE_STEP_MISSED_WORKAROUND
++	// Prioritize single step exception over context save.
++	// Second-level trap will halt wave and RFE, re-entering for SAVECTX.
++	// WAVE_TRAP_CTRL is already in ttmp3.
++	s_and_b32	ttmp3, ttmp3, SQ_WAVE_TRAP_CTRL_TRAP_AFTER_INST_MASK
++	s_cbranch_scc1	L_FETCH_2ND_TRAP
++#endif
++
++	s_and_b32	ttmp2, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK
++	s_cbranch_scc1	L_SAVE
++
++L_FETCH_2ND_TRAP:
++	// Read second-level TBA/TMA from first-level TMA and jump if available.
++	// ttmp[2:5] and ttmp12 can be used (others hold SPI-initialized debug data)
++	// ttmp12 holds SQ_WAVE_STATUS
++	s_sendmsg_rtn_b64       [ttmp14, ttmp15], sendmsg(MSG_RTN_GET_TMA)
++	s_wait_idle
++	s_lshl_b64	[ttmp14, ttmp15], [ttmp14, ttmp15], 0x8
++
++	s_bitcmp1_b32	ttmp15, 0xF
++	s_cbranch_scc0	L_NO_SIGN_EXTEND_TMA
++	s_or_b32	ttmp15, ttmp15, 0xFFFF0000
++L_NO_SIGN_EXTEND_TMA:
++
++	s_load_dword    ttmp2, [ttmp14, ttmp15], 0x10 scope:SCOPE_SYS		// debug trap enabled flag
++	s_wait_idle
++	s_lshl_b32      ttmp2, ttmp2, TTMP11_DEBUG_TRAP_ENABLED_SHIFT
++	s_andn2_b32     ttmp11, ttmp11, TTMP11_DEBUG_TRAP_ENABLED_MASK
++	s_or_b32        ttmp11, ttmp11, ttmp2
++
++	s_load_dwordx2	[ttmp2, ttmp3], [ttmp14, ttmp15], 0x0 scope:SCOPE_SYS	// second-level TBA
++	s_wait_idle
++	s_load_dwordx2	[ttmp14, ttmp15], [ttmp14, ttmp15], 0x8 scope:SCOPE_SYS	// second-level TMA
++	s_wait_idle
++
++	s_and_b64	[ttmp2, ttmp3], [ttmp2, ttmp3], [ttmp2, ttmp3]
++	s_cbranch_scc0	L_NO_NEXT_TRAP						// second-level trap handler not been set
++	s_setpc_b64	[ttmp2, ttmp3]						// jump to second-level trap handler
++
++L_NO_NEXT_TRAP:
++	// If not caused by trap then halt wave to prevent re-entry.
++	s_and_b32	ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
++	s_cbranch_scc1	L_TRAP_CASE
++
++	// Host trap will not cause trap re-entry.
++	s_getreg_b32	ttmp2, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
++	s_and_b32	ttmp2, ttmp2, SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK
++	s_cbranch_scc1	L_EXIT_TRAP
++	s_or_b32	s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_HALT_MASK
++
++	// If the PC points to S_ENDPGM then context save will fail if STATE_PRIV.HALT is set.
++	// Rewind the PC to prevent this from occurring.
++	s_sub_u32	ttmp0, ttmp0, 0x8
++	s_subb_u32	ttmp1, ttmp1, 0x0
++
++	s_branch	L_EXIT_TRAP
++
++L_TRAP_CASE:
++	// Advance past trap instruction to prevent re-entry.
++	s_add_u32	ttmp0, ttmp0, 0x4
++	s_addc_u32	ttmp1, ttmp1, 0x0
++
++L_EXIT_TRAP:
++	s_and_b32	ttmp1, ttmp1, 0xFFFF
++
++	// Restore SQ_WAVE_STATUS.
++	s_and_b64	exec, exec, exec					// Restore STATUS.EXECZ, not writable by s_setreg_b32
++	s_and_b64	vcc, vcc, vcc						// Restore STATUS.VCCZ, not writable by s_setreg_b32
++
++	// STATE_PRIV.BARRIER_COMPLETE may have changed since we read it.
++	// Only restore fields which the trap handler changes.
++	s_lshr_b32	s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_SCC_SHIFT
++	s_setreg_b32	hwreg(HW_REG_WAVE_STATE_PRIV, SQ_WAVE_STATE_PRIV_SCC_SHIFT, \
++		SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT - SQ_WAVE_STATE_PRIV_SCC_SHIFT + 1), s_save_state_priv
++
++	s_rfe_b64	[ttmp0, ttmp1]
++
++L_SAVE:
++	// If VGPRs have been deallocated then terminate the wavefront.
++	// It has no remaining program to run and cannot save without VGPRs.
++	s_getreg_b32	s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
++	s_bitcmp1_b32	s_save_tmp, SQ_WAVE_STATUS_NO_VGPRS_SHIFT
++	s_cbranch_scc0	L_HAVE_VGPRS
++	s_endpgm
++L_HAVE_VGPRS:
++
++	s_and_b32	s_save_pc_hi, s_save_pc_hi, 0x0000ffff			//pc[47:32]
++	s_mov_b32	s_save_tmp, 0
++	s_setreg_b32	hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT, 1), s_save_tmp	//clear saveCtx bit
++
++	/* inform SPI the readiness and wait for SPI's go signal */
++	s_mov_b32	s_save_exec_lo, exec_lo					//save EXEC and use EXEC for the go signal from SPI
++	s_mov_b32	s_save_exec_hi, exec_hi
++	s_mov_b64	exec, 0x0						//clear EXEC to get ready to receive
++
++	s_sendmsg_rtn_b64       [exec_lo, exec_hi], sendmsg(MSG_RTN_SAVE_WAVE)
++	s_wait_idle
++
++	// Save first_wave flag so we can clear high bits of save address.
++	s_and_b32	s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK
++	s_lshl_b32	s_save_tmp, s_save_tmp, (S_SAVE_PC_HI_FIRST_WAVE_SHIFT - S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT)
++	s_or_b32	s_save_pc_hi, s_save_pc_hi, s_save_tmp
++
++	// Trap temporaries must be saved via VGPR but all VGPRs are in use.
++	// There is no ttmp space to hold the resource constant for VGPR save.
++	// Save v0 by itself since it requires only two SGPRs.
++	s_mov_b32	s_save_ttmps_lo, exec_lo
++	s_and_b32	s_save_ttmps_hi, exec_hi, 0xFFFF
++	s_mov_b32	exec_lo, 0xFFFFFFFF
++	s_mov_b32	exec_hi, 0xFFFFFFFF
++	global_store_dword_addtid	v0, [s_save_ttmps_lo, s_save_ttmps_hi] scope:SCOPE_SYS
++	v_mov_b32	v0, 0x0
++	s_mov_b32	exec_lo, s_save_ttmps_lo
++	s_mov_b32	exec_hi, s_save_ttmps_hi
++
++	// Save trap temporaries 4-11, 13 initialized by SPI debug dispatch logic
++	// ttmp SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)+0x40
++	get_wave_size2(s_save_ttmps_hi)
++	get_vgpr_size_bytes(s_save_ttmps_lo, s_save_ttmps_hi)
++	get_svgpr_size_bytes(s_save_ttmps_hi)
++	s_add_u32	s_save_ttmps_lo, s_save_ttmps_lo, s_save_ttmps_hi
++	s_and_b32	s_save_ttmps_hi, s_save_spi_init_hi, 0xFFFF
++	s_add_u32	s_save_ttmps_lo, s_save_ttmps_lo, get_sgpr_size_bytes()
++	s_add_u32	s_save_ttmps_lo, s_save_ttmps_lo, s_save_spi_init_lo
++	s_addc_u32	s_save_ttmps_hi, s_save_ttmps_hi, 0x0
++
++	v_writelane_b32	v0, ttmp4, 0x4
++	v_writelane_b32	v0, ttmp5, 0x5
++	v_writelane_b32	v0, ttmp6, 0x6
++	v_writelane_b32	v0, ttmp7, 0x7
++	v_writelane_b32	v0, ttmp8, 0x8
++	v_writelane_b32	v0, ttmp9, 0x9
++	v_writelane_b32	v0, ttmp10, 0xA
++	v_writelane_b32	v0, ttmp11, 0xB
++	v_writelane_b32	v0, ttmp13, 0xD
++	v_writelane_b32	v0, exec_lo, 0xE
++	v_writelane_b32	v0, exec_hi, 0xF
++
++	s_mov_b32	exec_lo, 0x3FFF
++	s_mov_b32	exec_hi, 0x0
++	global_store_dword_addtid	v0, [s_save_ttmps_lo, s_save_ttmps_hi] offset:0x40 scope:SCOPE_SYS
++	v_readlane_b32	ttmp14, v0, 0xE
++	v_readlane_b32	ttmp15, v0, 0xF
++	s_mov_b32	exec_lo, ttmp14
++	s_mov_b32	exec_hi, ttmp15
++
++	/* setup Resource Contants */
++	s_mov_b32	s_save_buf_rsrc0, s_save_spi_init_lo			//base_addr_lo
++	s_and_b32	s_save_buf_rsrc1, s_save_spi_init_hi, 0x0000FFFF	//base_addr_hi
++	s_or_b32	s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE
++	s_mov_b32	s_save_buf_rsrc2, 0					//NUM_RECORDS initial value = 0 (in bytes) although not neccessarily inited
++	s_mov_b32	s_save_buf_rsrc3, S_SAVE_BUF_RSRC_WORD3_MISC
++
++	s_mov_b32	s_save_m0, m0
++
++	/* global mem offset */
++	s_mov_b32	s_save_mem_offset, 0x0
++	get_wave_size2(s_wave_size)
++
++	/* save first 4 VGPRs, needed for SGPR save */
++	s_mov_b32	exec_lo, 0xFFFFFFFF					//need every thread from now on
++	s_lshr_b32	m0, s_wave_size, S_WAVE_SIZE
++	s_and_b32	m0, m0, 1
++	s_cmp_eq_u32	m0, 1
++	s_cbranch_scc1	L_ENABLE_SAVE_4VGPR_EXEC_HI
++	s_mov_b32	exec_hi, 0x00000000
++	s_branch	L_SAVE_4VGPR_WAVE32
++L_ENABLE_SAVE_4VGPR_EXEC_HI:
++	s_mov_b32	exec_hi, 0xFFFFFFFF
++	s_branch	L_SAVE_4VGPR_WAVE64
++L_SAVE_4VGPR_WAVE32:
++	s_mov_b32	s_save_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
++
++	// VGPR Allocated in 4-GPR granularity
++
++	buffer_store_dword	v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128
++	buffer_store_dword	v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128*2
++	buffer_store_dword	v3, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128*3
++	s_branch	L_SAVE_HWREG
++
++L_SAVE_4VGPR_WAVE64:
++	s_mov_b32	s_save_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
++
++	// VGPR Allocated in 4-GPR granularity
++
++	buffer_store_dword	v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256
++	buffer_store_dword	v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256*2
++	buffer_store_dword	v3, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256*3
++
++	/* save HW registers */
++
++L_SAVE_HWREG:
++	// HWREG SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)
++	get_vgpr_size_bytes(s_save_mem_offset, s_wave_size)
++	get_svgpr_size_bytes(s_save_tmp)
++	s_add_u32	s_save_mem_offset, s_save_mem_offset, s_save_tmp
++	s_add_u32	s_save_mem_offset, s_save_mem_offset, get_sgpr_size_bytes()
++
++	s_mov_b32	s_save_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
++
++	v_mov_b32	v0, 0x0							//Offset[31:0] from buffer resource
++	v_mov_b32	v1, 0x0							//Offset[63:32] from buffer resource
++	v_mov_b32	v2, 0x0							//Set of SGPRs for TCP store
++	s_mov_b32	m0, 0x0							//Next lane of v2 to write to
++
++	// Ensure no further changes to barrier or LDS state.
++	// STATE_PRIV.BARRIER_COMPLETE may change up to this point.
++	s_barrier_signal	-2
++	s_barrier_wait	-2
++
++	// Re-read final state of BARRIER_COMPLETE field for save.
++	s_getreg_b32	s_save_tmp, hwreg(HW_REG_WAVE_STATE_PRIV)
++	s_and_b32	s_save_tmp, s_save_tmp, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
++	s_andn2_b32	s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
++	s_or_b32	s_save_state_priv, s_save_state_priv, s_save_tmp
++
++	write_hwreg_to_v2(s_save_m0)
++	write_hwreg_to_v2(s_save_pc_lo)
++	s_andn2_b32	s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
++	write_hwreg_to_v2(s_save_tmp)
++	write_hwreg_to_v2(s_save_exec_lo)
++	write_hwreg_to_v2(s_save_exec_hi)
++	write_hwreg_to_v2(s_save_state_priv)
++
++	s_getreg_b32	s_save_tmp, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
++	write_hwreg_to_v2(s_save_tmp)
++
++	write_hwreg_to_v2(s_save_xnack_mask)
++
++	s_getreg_b32	s_save_m0, hwreg(HW_REG_WAVE_MODE)
++	write_hwreg_to_v2(s_save_m0)
++
++	s_getreg_b32	s_save_m0, hwreg(HW_REG_WAVE_SCRATCH_BASE_LO)
++	write_hwreg_to_v2(s_save_m0)
++
++	s_getreg_b32	s_save_m0, hwreg(HW_REG_WAVE_SCRATCH_BASE_HI)
++	write_hwreg_to_v2(s_save_m0)
++
++	s_getreg_b32	s_save_m0, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
++	write_hwreg_to_v2(s_save_m0)
++
++	s_getreg_b32	s_save_m0, hwreg(HW_REG_WAVE_TRAP_CTRL)
++	write_hwreg_to_v2(s_save_m0)
++
++	s_getreg_b32	s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
++	write_hwreg_to_v2(s_save_tmp)
++
++	s_get_barrier_state s_save_tmp, -1
++	s_wait_kmcnt (0)
++	write_hwreg_to_v2(s_save_tmp)
++
++	// Write HWREGs with 16 VGPR lanes. TTMPs occupy space after this.
++	s_mov_b32       exec_lo, 0xFFFF
++	s_mov_b32	exec_hi, 0x0
++	buffer_store_dword	v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
++
++	// Write SGPRs with 32 VGPR lanes. This works in wave32 and wave64 mode.
++	s_mov_b32       exec_lo, 0xFFFFFFFF
++
++	/* save SGPRs */
++	// Save SGPR before LDS save, then the s0 to s4 can be used during LDS save...
++
++	// SGPR SR memory offset : size(VGPR)+size(SVGPR)
++	get_vgpr_size_bytes(s_save_mem_offset, s_wave_size)
++	get_svgpr_size_bytes(s_save_tmp)
++	s_add_u32	s_save_mem_offset, s_save_mem_offset, s_save_tmp
++	s_mov_b32	s_save_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
++
++	s_mov_b32	ttmp13, 0x0						//next VGPR lane to copy SGPR into
++
++	s_mov_b32	m0, 0x0							//SGPR initial index value =0
++	s_nop		0x0							//Manually inserted wait states
++L_SAVE_SGPR_LOOP:
++	// SGPR is allocated in 16 SGPR granularity
++	s_movrels_b64	s0, s0							//s0 = s[0+m0], s1 = s[1+m0]
++	s_movrels_b64	s2, s2							//s2 = s[2+m0], s3 = s[3+m0]
++	s_movrels_b64	s4, s4							//s4 = s[4+m0], s5 = s[5+m0]
++	s_movrels_b64	s6, s6							//s6 = s[6+m0], s7 = s[7+m0]
++	s_movrels_b64	s8, s8							//s8 = s[8+m0], s9 = s[9+m0]
++	s_movrels_b64	s10, s10						//s10 = s[10+m0], s11 = s[11+m0]
++	s_movrels_b64	s12, s12						//s12 = s[12+m0], s13 = s[13+m0]
++	s_movrels_b64	s14, s14						//s14 = s[14+m0], s15 = s[15+m0]
++
++	write_16sgpr_to_v2(s0)
++
++	s_cmp_eq_u32	ttmp13, 0x20						//have 32 VGPR lanes filled?
++	s_cbranch_scc0	L_SAVE_SGPR_SKIP_TCP_STORE
++
++	buffer_store_dword	v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
++	s_add_u32	s_save_mem_offset, s_save_mem_offset, 0x80
++	s_mov_b32	ttmp13, 0x0
++	v_mov_b32	v2, 0x0
++L_SAVE_SGPR_SKIP_TCP_STORE:
++
++	s_add_u32	m0, m0, 16						//next sgpr index
++	s_cmp_lt_u32	m0, 96							//scc = (m0 < first 96 SGPR) ? 1 : 0
++	s_cbranch_scc1	L_SAVE_SGPR_LOOP					//first 96 SGPR save is complete?
++
++	//save the rest 12 SGPR
++	s_movrels_b64	s0, s0							//s0 = s[0+m0], s1 = s[1+m0]
++	s_movrels_b64	s2, s2							//s2 = s[2+m0], s3 = s[3+m0]
++	s_movrels_b64	s4, s4							//s4 = s[4+m0], s5 = s[5+m0]
++	s_movrels_b64	s6, s6							//s6 = s[6+m0], s7 = s[7+m0]
++	s_movrels_b64	s8, s8							//s8 = s[8+m0], s9 = s[9+m0]
++	s_movrels_b64	s10, s10						//s10 = s[10+m0], s11 = s[11+m0]
++	write_12sgpr_to_v2(s0)
++
++	buffer_store_dword	v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
++
++	/* save LDS */
++
++L_SAVE_LDS:
++	// Change EXEC to all threads...
++	s_mov_b32	exec_lo, 0xFFFFFFFF					//need every thread from now on
++	s_lshr_b32	m0, s_wave_size, S_WAVE_SIZE
++	s_and_b32	m0, m0, 1
++	s_cmp_eq_u32	m0, 1
++	s_cbranch_scc1	L_ENABLE_SAVE_LDS_EXEC_HI
++	s_mov_b32	exec_hi, 0x00000000
++	s_branch	L_SAVE_LDS_NORMAL
++L_ENABLE_SAVE_LDS_EXEC_HI:
++	s_mov_b32	exec_hi, 0xFFFFFFFF
++L_SAVE_LDS_NORMAL:
++	s_getreg_b32	s_save_alloc_size, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE)
++	s_and_b32	s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF	//lds_size is zero?
++	s_cbranch_scc0	L_SAVE_LDS_DONE						//no lds used? jump to L_SAVE_DONE
++
++	s_and_b32	s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
++	s_cbranch_scc0	L_SAVE_LDS_DONE
++
++	// first wave do LDS save;
++
++	s_lshl_b32	s_save_alloc_size, s_save_alloc_size, SQ_WAVE_LDS_ALLOC_GRANULARITY
++	s_mov_b32	s_save_buf_rsrc2, s_save_alloc_size			//NUM_RECORDS in bytes
++
++	// LDS at offset: size(VGPR)+size(SVGPR)+SIZE(SGPR)+SIZE(HWREG)
++	//
++	get_vgpr_size_bytes(s_save_mem_offset, s_wave_size)
++	get_svgpr_size_bytes(s_save_tmp)
++	s_add_u32	s_save_mem_offset, s_save_mem_offset, s_save_tmp
++	s_add_u32	s_save_mem_offset, s_save_mem_offset, get_sgpr_size_bytes()
++	s_add_u32	s_save_mem_offset, s_save_mem_offset, get_hwreg_size_bytes()
++
++	s_mov_b32	s_save_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
++
++	//load 0~63*4(byte address) to vgpr v0
++	v_mbcnt_lo_u32_b32	v0, -1, 0
++	v_mbcnt_hi_u32_b32	v0, -1, v0
++	v_mul_u32_u24	v0, 4, v0
++
++	s_lshr_b32	m0, s_wave_size, S_WAVE_SIZE
++	s_and_b32	m0, m0, 1
++	s_cmp_eq_u32	m0, 1
++	s_mov_b32	m0, 0x0
++	s_cbranch_scc1	L_SAVE_LDS_W64
++
++L_SAVE_LDS_W32:
++	s_mov_b32	s3, 128
++	s_nop		0
++	s_nop		0
++	s_nop		0
++L_SAVE_LDS_LOOP_W32:
++	ds_read_b32	v1, v0
++	s_wait_idle
++	buffer_store_dword	v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
++
++	s_add_u32	m0, m0, s3						//every buffer_store_lds does 128 bytes
++	s_add_u32	s_save_mem_offset, s_save_mem_offset, s3
++	v_add_nc_u32	v0, v0, 128						//mem offset increased by 128 bytes
++	s_cmp_lt_u32	m0, s_save_alloc_size					//scc=(m0 < s_save_alloc_size) ? 1 : 0
++	s_cbranch_scc1	L_SAVE_LDS_LOOP_W32					//LDS save is complete?
++
++	s_branch	L_SAVE_LDS_DONE
++
++L_SAVE_LDS_W64:
++	s_mov_b32	s3, 256
++	s_nop		0
++	s_nop		0
++	s_nop		0
++L_SAVE_LDS_LOOP_W64:
++	ds_read_b32	v1, v0
++	s_wait_idle
++	buffer_store_dword	v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
++
++	s_add_u32	m0, m0, s3						//every buffer_store_lds does 256 bytes
++	s_add_u32	s_save_mem_offset, s_save_mem_offset, s3
++	v_add_nc_u32	v0, v0, 256						//mem offset increased by 256 bytes
++	s_cmp_lt_u32	m0, s_save_alloc_size					//scc=(m0 < s_save_alloc_size) ? 1 : 0
++	s_cbranch_scc1	L_SAVE_LDS_LOOP_W64					//LDS save is complete?
++
++L_SAVE_LDS_DONE:
++	/* save VGPRs  - set the Rest VGPRs */
++L_SAVE_VGPR:
++	// VGPR SR memory offset: 0
++	s_mov_b32	exec_lo, 0xFFFFFFFF					//need every thread from now on
++	s_lshr_b32	m0, s_wave_size, S_WAVE_SIZE
++	s_and_b32	m0, m0, 1
++	s_cmp_eq_u32	m0, 1
++	s_cbranch_scc1	L_ENABLE_SAVE_VGPR_EXEC_HI
++	s_mov_b32	s_save_mem_offset, (0+128*4)				// for the rest VGPRs
++	s_mov_b32	exec_hi, 0x00000000
++	s_branch	L_SAVE_VGPR_NORMAL
++L_ENABLE_SAVE_VGPR_EXEC_HI:
++	s_mov_b32	s_save_mem_offset, (0+256*4)				// for the rest VGPRs
++	s_mov_b32	exec_hi, 0xFFFFFFFF
++L_SAVE_VGPR_NORMAL:
++	s_getreg_b32	s_save_alloc_size, hwreg(HW_REG_WAVE_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)
++	s_add_u32	s_save_alloc_size, s_save_alloc_size, 1
++	s_lshl_b32	s_save_alloc_size, s_save_alloc_size, 2			//Number of VGPRs = (vgpr_size + 1) * 4    (non-zero value)
++	//determine it is wave32 or wave64
++	s_lshr_b32	m0, s_wave_size, S_WAVE_SIZE
++	s_and_b32	m0, m0, 1
++	s_cmp_eq_u32	m0, 1
++	s_cbranch_scc1	L_SAVE_VGPR_WAVE64
++
++	s_mov_b32	s_save_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
++
++	// VGPR Allocated in 4-GPR granularity
++
++	// VGPR store using dw burst
++	s_mov_b32	m0, 0x4							//VGPR initial index value =4
++	s_cmp_lt_u32	m0, s_save_alloc_size
++	s_cbranch_scc0	L_SAVE_VGPR_END
++
++L_SAVE_VGPR_W32_LOOP:
++	v_movrels_b32	v0, v0							//v0 = v[0+m0]
++	v_movrels_b32	v1, v1							//v1 = v[1+m0]
++	v_movrels_b32	v2, v2							//v2 = v[2+m0]
++	v_movrels_b32	v3, v3							//v3 = v[3+m0]
++
++	buffer_store_dword	v0, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
++	buffer_store_dword	v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128
++	buffer_store_dword	v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128*2
++	buffer_store_dword	v3, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128*3
++
++	s_add_u32	m0, m0, 4						//next vgpr index
++	s_add_u32	s_save_mem_offset, s_save_mem_offset, 128*4		//every buffer_store_dword does 128 bytes
++	s_cmp_lt_u32	m0, s_save_alloc_size					//scc = (m0 < s_save_alloc_size) ? 1 : 0
++	s_cbranch_scc1	L_SAVE_VGPR_W32_LOOP					//VGPR save is complete?
++
++	s_branch	L_SAVE_VGPR_END
++
++L_SAVE_VGPR_WAVE64:
++	s_mov_b32	s_save_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
++
++	// VGPR store using dw burst
++	s_mov_b32	m0, 0x4							//VGPR initial index value =4
++	s_cmp_lt_u32	m0, s_save_alloc_size
++	s_cbranch_scc0	L_SAVE_SHARED_VGPR
++
++L_SAVE_VGPR_W64_LOOP:
++	v_movrels_b32	v0, v0							//v0 = v[0+m0]
++	v_movrels_b32	v1, v1							//v1 = v[1+m0]
++	v_movrels_b32	v2, v2							//v2 = v[2+m0]
++	v_movrels_b32	v3, v3							//v3 = v[3+m0]
++
++	buffer_store_dword	v0, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
++	buffer_store_dword	v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256
++	buffer_store_dword	v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256*2
++	buffer_store_dword	v3, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256*3
++
++	s_add_u32	m0, m0, 4						//next vgpr index
++	s_add_u32	s_save_mem_offset, s_save_mem_offset, 256*4		//every buffer_store_dword does 256 bytes
++	s_cmp_lt_u32	m0, s_save_alloc_size					//scc = (m0 < s_save_alloc_size) ? 1 : 0
++	s_cbranch_scc1	L_SAVE_VGPR_W64_LOOP					//VGPR save is complete?
++
++L_SAVE_SHARED_VGPR:
++	s_getreg_b32	s_save_alloc_size, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE)
++	s_and_b32	s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF	//shared_vgpr_size is zero?
++	s_cbranch_scc0	L_SAVE_VGPR_END						//no shared_vgpr used? jump to L_SAVE_LDS
++	s_lshl_b32	s_save_alloc_size, s_save_alloc_size, 3			//Number of SHARED_VGPRs = shared_vgpr_size * 8    (non-zero value)
++	//m0 now has the value of normal vgpr count, just add the m0 with shared_vgpr count to get the total count.
++	//save shared_vgpr will start from the index of m0
++	s_add_u32	s_save_alloc_size, s_save_alloc_size, m0
++	s_mov_b32	exec_lo, 0xFFFFFFFF
++	s_mov_b32	exec_hi, 0x00000000
++
++L_SAVE_SHARED_VGPR_WAVE64_LOOP:
++	v_movrels_b32	v0, v0							//v0 = v[0+m0]
++	buffer_store_dword	v0, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
++	s_add_u32	m0, m0, 1						//next vgpr index
++	s_add_u32	s_save_mem_offset, s_save_mem_offset, 128
++	s_cmp_lt_u32	m0, s_save_alloc_size					//scc = (m0 < s_save_alloc_size) ? 1 : 0
++	s_cbranch_scc1	L_SAVE_SHARED_VGPR_WAVE64_LOOP				//SHARED_VGPR save is complete?
++
++L_SAVE_VGPR_END:
++	s_branch	L_END_PGM
++
++L_RESTORE:
++	/* Setup Resource Contants */
++	s_mov_b32	s_restore_buf_rsrc0, s_restore_spi_init_lo		//base_addr_lo
++	s_and_b32	s_restore_buf_rsrc1, s_restore_spi_init_hi, 0x0000FFFF	//base_addr_hi
++	s_or_b32	s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE
++	s_mov_b32	s_restore_buf_rsrc2, 0					//NUM_RECORDS initial value = 0 (in bytes)
++	s_mov_b32	s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC
++
++	// Save s_restore_spi_init_hi for later use.
++	s_mov_b32 s_restore_spi_init_hi_save, s_restore_spi_init_hi
++
++	//determine it is wave32 or wave64
++	get_wave_size2(s_restore_size)
++
++	s_and_b32	s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK
++	s_cbranch_scc0	L_RESTORE_VGPR
++
++	/* restore LDS */
++L_RESTORE_LDS:
++	s_mov_b32	exec_lo, 0xFFFFFFFF					//need every thread from now on
++	s_lshr_b32	m0, s_restore_size, S_WAVE_SIZE
++	s_and_b32	m0, m0, 1
++	s_cmp_eq_u32	m0, 1
++	s_cbranch_scc1	L_ENABLE_RESTORE_LDS_EXEC_HI
++	s_mov_b32	exec_hi, 0x00000000
++	s_branch	L_RESTORE_LDS_NORMAL
++L_ENABLE_RESTORE_LDS_EXEC_HI:
++	s_mov_b32	exec_hi, 0xFFFFFFFF
++L_RESTORE_LDS_NORMAL:
++	s_getreg_b32	s_restore_alloc_size, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE)
++	s_and_b32	s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF	//lds_size is zero?
++	s_cbranch_scc0	L_RESTORE_VGPR						//no lds used? jump to L_RESTORE_VGPR
++	s_lshl_b32	s_restore_alloc_size, s_restore_alloc_size, SQ_WAVE_LDS_ALLOC_GRANULARITY
++	s_mov_b32	s_restore_buf_rsrc2, s_restore_alloc_size		//NUM_RECORDS in bytes
++
++	// LDS at offset: size(VGPR)+size(SVGPR)+SIZE(SGPR)+SIZE(HWREG)
++	//
++	get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size)
++	get_svgpr_size_bytes(s_restore_tmp)
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes()
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, get_hwreg_size_bytes()
++
++	s_mov_b32	s_restore_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
++
++	s_lshr_b32	m0, s_restore_size, S_WAVE_SIZE
++	s_and_b32	m0, m0, 1
++	s_cmp_eq_u32	m0, 1
++	s_mov_b32	m0, 0x0
++	s_cbranch_scc1	L_RESTORE_LDS_LOOP_W64
++
++L_RESTORE_LDS_LOOP_W32:
++	buffer_load_dword       v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset
++	s_wait_idle
++	ds_store_addtid_b32     v0
++	s_add_u32	m0, m0, 128						// 128 DW
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, 128		//mem offset increased by 128DW
++	s_cmp_lt_u32	m0, s_restore_alloc_size				//scc=(m0 < s_restore_alloc_size) ? 1 : 0
++	s_cbranch_scc1	L_RESTORE_LDS_LOOP_W32					//LDS restore is complete?
++	s_branch	L_RESTORE_VGPR
++
++L_RESTORE_LDS_LOOP_W64:
++	buffer_load_dword       v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset
++	s_wait_idle
++	ds_store_addtid_b32     v0
++	s_add_u32	m0, m0, 256						// 256 DW
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, 256		//mem offset increased by 256DW
++	s_cmp_lt_u32	m0, s_restore_alloc_size				//scc=(m0 < s_restore_alloc_size) ? 1 : 0
++	s_cbranch_scc1	L_RESTORE_LDS_LOOP_W64					//LDS restore is complete?
++
++	/* restore VGPRs */
++L_RESTORE_VGPR:
++	// VGPR SR memory offset : 0
++	s_mov_b32	s_restore_mem_offset, 0x0
++	s_mov_b32	exec_lo, 0xFFFFFFFF					//need every thread from now on
++	s_lshr_b32	m0, s_restore_size, S_WAVE_SIZE
++	s_and_b32	m0, m0, 1
++	s_cmp_eq_u32	m0, 1
++	s_cbranch_scc1	L_ENABLE_RESTORE_VGPR_EXEC_HI
++	s_mov_b32	exec_hi, 0x00000000
++	s_branch	L_RESTORE_VGPR_NORMAL
++L_ENABLE_RESTORE_VGPR_EXEC_HI:
++	s_mov_b32	exec_hi, 0xFFFFFFFF
++L_RESTORE_VGPR_NORMAL:
++	s_getreg_b32	s_restore_alloc_size, hwreg(HW_REG_WAVE_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)
++	s_add_u32	s_restore_alloc_size, s_restore_alloc_size, 1
++	s_lshl_b32	s_restore_alloc_size, s_restore_alloc_size, 2		//Number of VGPRs = (vgpr_size + 1) * 4    (non-zero value)
++	//determine it is wave32 or wave64
++	s_lshr_b32	m0, s_restore_size, S_WAVE_SIZE
++	s_and_b32	m0, m0, 1
++	s_cmp_eq_u32	m0, 1
++	s_cbranch_scc1	L_RESTORE_VGPR_WAVE64
++
++	s_mov_b32	s_restore_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
++
++	// VGPR load using dw burst
++	s_mov_b32	s_restore_mem_offset_save, s_restore_mem_offset		// restore start with v1, v0 will be the last
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, 128*4
++	s_mov_b32	m0, 4							//VGPR initial index value = 4
++	s_cmp_lt_u32	m0, s_restore_alloc_size
++	s_cbranch_scc0	L_RESTORE_SGPR
++
++L_RESTORE_VGPR_WAVE32_LOOP:
++	buffer_load_dword	v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS
++	buffer_load_dword	v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:128
++	buffer_load_dword	v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:128*2
++	buffer_load_dword	v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:128*3
++	s_wait_idle
++	v_movreld_b32	v0, v0							//v[0+m0] = v0
++	v_movreld_b32	v1, v1
++	v_movreld_b32	v2, v2
++	v_movreld_b32	v3, v3
++	s_add_u32	m0, m0, 4						//next vgpr index
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, 128*4	//every buffer_load_dword does 128 bytes
++	s_cmp_lt_u32	m0, s_restore_alloc_size				//scc = (m0 < s_restore_alloc_size) ? 1 : 0
++	s_cbranch_scc1	L_RESTORE_VGPR_WAVE32_LOOP				//VGPR restore (except v0) is complete?
++
++	/* VGPR restore on v0 */
++	buffer_load_dword	v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS
++	buffer_load_dword	v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:128
++	buffer_load_dword	v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:128*2
++	buffer_load_dword	v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:128*3
++	s_wait_idle
++
++	s_branch	L_RESTORE_SGPR
++
++L_RESTORE_VGPR_WAVE64:
++	s_mov_b32	s_restore_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
++
++	// VGPR load using dw burst
++	s_mov_b32	s_restore_mem_offset_save, s_restore_mem_offset		// restore start with v4, v0 will be the last
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, 256*4
++	s_mov_b32	m0, 4							//VGPR initial index value = 4
++	s_cmp_lt_u32	m0, s_restore_alloc_size
++	s_cbranch_scc0	L_RESTORE_SHARED_VGPR
++
++L_RESTORE_VGPR_WAVE64_LOOP:
++	buffer_load_dword	v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS
++	buffer_load_dword	v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:256
++	buffer_load_dword	v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:256*2
++	buffer_load_dword	v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:256*3
++	s_wait_idle
++	v_movreld_b32	v0, v0							//v[0+m0] = v0
++	v_movreld_b32	v1, v1
++	v_movreld_b32	v2, v2
++	v_movreld_b32	v3, v3
++	s_add_u32	m0, m0, 4						//next vgpr index
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, 256*4	//every buffer_load_dword does 256 bytes
++	s_cmp_lt_u32	m0, s_restore_alloc_size				//scc = (m0 < s_restore_alloc_size) ? 1 : 0
++	s_cbranch_scc1	L_RESTORE_VGPR_WAVE64_LOOP				//VGPR restore (except v0) is complete?
++
++L_RESTORE_SHARED_VGPR:
++	s_getreg_b32	s_restore_alloc_size, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE)	//shared_vgpr_size
++	s_and_b32	s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF	//shared_vgpr_size is zero?
++	s_cbranch_scc0	L_RESTORE_V0						//no shared_vgpr used?
++	s_lshl_b32	s_restore_alloc_size, s_restore_alloc_size, 3		//Number of SHARED_VGPRs = shared_vgpr_size * 8    (non-zero value)
++	//m0 now has the value of normal vgpr count, just add the m0 with shared_vgpr count to get the total count.
++	//restore shared_vgpr will start from the index of m0
++	s_add_u32	s_restore_alloc_size, s_restore_alloc_size, m0
++	s_mov_b32	exec_lo, 0xFFFFFFFF
++	s_mov_b32	exec_hi, 0x00000000
++L_RESTORE_SHARED_VGPR_WAVE64_LOOP:
++	buffer_load_dword	v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS
++	s_wait_idle
++	v_movreld_b32	v0, v0							//v[0+m0] = v0
++	s_add_u32	m0, m0, 1						//next vgpr index
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, 128
++	s_cmp_lt_u32	m0, s_restore_alloc_size				//scc = (m0 < s_restore_alloc_size) ? 1 : 0
++	s_cbranch_scc1	L_RESTORE_SHARED_VGPR_WAVE64_LOOP			//VGPR restore (except v0) is complete?
++
++	s_mov_b32	exec_hi, 0xFFFFFFFF					//restore back exec_hi before restoring V0!!
++
++	/* VGPR restore on v0 */
++L_RESTORE_V0:
++	buffer_load_dword	v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS
++	buffer_load_dword	v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:256
++	buffer_load_dword	v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:256*2
++	buffer_load_dword	v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:256*3
++	s_wait_idle
++
++	/* restore SGPRs */
++	//will be 2+8+16*6
++	// SGPR SR memory offset : size(VGPR)+size(SVGPR)
++L_RESTORE_SGPR:
++	get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size)
++	get_svgpr_size_bytes(s_restore_tmp)
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes()
++	s_sub_u32	s_restore_mem_offset, s_restore_mem_offset, 20*4	//s108~s127 is not saved
++
++	s_mov_b32	s_restore_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
++
++	s_mov_b32	m0, s_sgpr_save_num
++
++	read_4sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset)
++	s_wait_idle
++
++	s_sub_u32	m0, m0, 4						// Restore from S[0] to S[104]
++	s_nop		0							// hazard SALU M0=> S_MOVREL
++
++	s_movreld_b64	s0, s0							//s[0+m0] = s0
++	s_movreld_b64	s2, s2
++
++	read_8sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset)
++	s_wait_idle
++
++	s_sub_u32	m0, m0, 8						// Restore from S[0] to S[96]
++	s_nop		0							// hazard SALU M0=> S_MOVREL
++
++	s_movreld_b64	s0, s0							//s[0+m0] = s0
++	s_movreld_b64	s2, s2
++	s_movreld_b64	s4, s4
++	s_movreld_b64	s6, s6
++
++ L_RESTORE_SGPR_LOOP:
++	read_16sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset)
++	s_wait_idle
++
++	s_sub_u32	m0, m0, 16						// Restore from S[n] to S[0]
++	s_nop		0							// hazard SALU M0=> S_MOVREL
++
++	s_movreld_b64	s0, s0							//s[0+m0] = s0
++	s_movreld_b64	s2, s2
++	s_movreld_b64	s4, s4
++	s_movreld_b64	s6, s6
++	s_movreld_b64	s8, s8
++	s_movreld_b64	s10, s10
++	s_movreld_b64	s12, s12
++	s_movreld_b64	s14, s14
++
++	s_cmp_eq_u32	m0, 0							//scc = (m0 < s_sgpr_save_num) ? 1 : 0
++	s_cbranch_scc0	L_RESTORE_SGPR_LOOP
++
++	// s_barrier with STATE_PRIV.TRAP_AFTER_INST=1, STATUS.PRIV=1 incorrectly asserts debug exception.
++	// Clear DEBUG_EN before and restore MODE after the barrier.
++	s_setreg_imm32_b32	hwreg(HW_REG_WAVE_MODE), 0
++
++	/* restore HW registers */
++L_RESTORE_HWREG:
++	// HWREG SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)
++	get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size)
++	get_svgpr_size_bytes(s_restore_tmp)
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes()
++
++	s_mov_b32	s_restore_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
++
++	// Restore s_restore_spi_init_hi before the saved value gets clobbered.
++	s_mov_b32 s_restore_spi_init_hi, s_restore_spi_init_hi_save
++
++	read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset)
++	read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset)
++	read_hwreg_from_mem(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
++	read_hwreg_from_mem(s_restore_exec_lo, s_restore_buf_rsrc0, s_restore_mem_offset)
++	read_hwreg_from_mem(s_restore_exec_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
++	read_hwreg_from_mem(s_restore_state_priv, s_restore_buf_rsrc0, s_restore_mem_offset)
++	read_hwreg_from_mem(s_restore_excp_flag_priv, s_restore_buf_rsrc0, s_restore_mem_offset)
++	read_hwreg_from_mem(s_restore_xnack_mask, s_restore_buf_rsrc0, s_restore_mem_offset)
++	read_hwreg_from_mem(s_restore_mode, s_restore_buf_rsrc0, s_restore_mem_offset)
++	read_hwreg_from_mem(s_restore_flat_scratch, s_restore_buf_rsrc0, s_restore_mem_offset)
++	s_wait_idle
++
++	s_setreg_b32	hwreg(HW_REG_WAVE_SCRATCH_BASE_LO), s_restore_flat_scratch
++
++	read_hwreg_from_mem(s_restore_flat_scratch, s_restore_buf_rsrc0, s_restore_mem_offset)
++	s_wait_idle
++
++	s_setreg_b32	hwreg(HW_REG_WAVE_SCRATCH_BASE_HI), s_restore_flat_scratch
++
++	read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
++	s_wait_idle
++	s_setreg_b32	hwreg(HW_REG_WAVE_EXCP_FLAG_USER), s_restore_tmp
++
++	read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
++	s_wait_idle
++	s_setreg_b32	hwreg(HW_REG_WAVE_TRAP_CTRL), s_restore_tmp
++
++	// Only the first wave needs to restore the workgroup barrier.
++	s_and_b32	s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK
++	s_cbranch_scc0	L_SKIP_BARRIER_RESTORE
++
++	// Skip over WAVE_STATUS, since there is no state to restore from it
++	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, 4
++
++	read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
++	s_wait_idle
++
++	s_bitcmp1_b32	s_restore_tmp, BARRIER_STATE_VALID_OFFSET
++	s_cbranch_scc0	L_SKIP_BARRIER_RESTORE
++
++	// extract the saved signal count from s_restore_tmp
++	s_lshr_b32	s_restore_tmp, s_restore_tmp, BARRIER_STATE_SIGNAL_OFFSET
++
++	// We need to call s_barrier_signal repeatedly to restore the signal
++	// count of the work group barrier.  The member count is already
++	// initialized with the number of waves in the work group.
++L_BARRIER_RESTORE_LOOP:
++	s_and_b32	s_restore_tmp, s_restore_tmp, s_restore_tmp
++	s_cbranch_scc0	L_SKIP_BARRIER_RESTORE
++	s_barrier_signal	-1
++	s_add_i32	s_restore_tmp, s_restore_tmp, -1
++	s_branch	L_BARRIER_RESTORE_LOOP
++
++L_SKIP_BARRIER_RESTORE:
++
++	s_mov_b32	m0, s_restore_m0
++	s_mov_b32	exec_lo, s_restore_exec_lo
++	s_mov_b32	exec_hi, s_restore_exec_hi
++
++	// EXCP_FLAG_PRIV.SAVE_CONTEXT and HOST_TRAP may have changed.
++	// Only restore the other fields to avoid clobbering them.
++	s_setreg_b32	hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, 0, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_1_SIZE), s_restore_excp_flag_priv
++	s_lshr_b32	s_restore_excp_flag_priv, s_restore_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT
++	s_setreg_b32	hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SIZE), s_restore_excp_flag_priv
++	s_lshr_b32	s_restore_excp_flag_priv, s_restore_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT - SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT
++	s_setreg_b32	hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SIZE), s_restore_excp_flag_priv
++
++	s_setreg_b32	hwreg(HW_REG_WAVE_MODE), s_restore_mode
++
++	// Restore trap temporaries 4-11, 13 initialized by SPI debug dispatch logic
++	// ttmp SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)+0x40
++	get_vgpr_size_bytes(s_restore_ttmps_lo, s_restore_size)
++	get_svgpr_size_bytes(s_restore_ttmps_hi)
++	s_add_u32	s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_ttmps_hi
++	s_add_u32	s_restore_ttmps_lo, s_restore_ttmps_lo, get_sgpr_size_bytes()
++	s_add_u32	s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_buf_rsrc0
++	s_addc_u32	s_restore_ttmps_hi, s_restore_buf_rsrc1, 0x0
++	s_and_b32	s_restore_ttmps_hi, s_restore_ttmps_hi, 0xFFFF
++	s_load_dwordx4	[ttmp4, ttmp5, ttmp6, ttmp7], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x50 scope:SCOPE_SYS
++	s_load_dwordx4	[ttmp8, ttmp9, ttmp10, ttmp11], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x60 scope:SCOPE_SYS
++	s_load_dword	ttmp13, [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x74 scope:SCOPE_SYS
++	s_wait_idle
++
++	s_and_b32	s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff		//pc[47:32] //Do it here in order not to affect STATUS
++	s_and_b64	exec, exec, exec					// Restore STATUS.EXECZ, not writable by s_setreg_b32
++	s_and_b64	vcc, vcc, vcc						// Restore STATUS.VCCZ, not writable by s_setreg_b32
++
++	s_setreg_b32	hwreg(HW_REG_WAVE_STATE_PRIV), s_restore_state_priv	// SCC is included, which is changed by previous salu
++
++	// Make barrier and LDS state visible to all waves in the group.
++	// STATE_PRIV.BARRIER_COMPLETE may change after this point.
++	s_barrier_signal	-2
++	s_barrier_wait	-2
++
++	s_rfe_b64	s_restore_pc_lo						//Return to the main shader program and resume execution
++
++L_END_PGM:
++	// Make sure that no wave of the workgroup can exit the trap handler
++	// before the workgroup barrier state is saved.
++	s_barrier_signal	-2
++	s_barrier_wait	-2
++	s_endpgm_saved
++end
++
++function write_hwreg_to_v2(s)
++	// Copy into VGPR for later TCP store.
++	v_writelane_b32	v2, s, m0
++	s_add_u32	m0, m0, 0x1
++end
++
++
++function write_16sgpr_to_v2(s)
++	// Copy into VGPR for later TCP store.
++	for var sgpr_idx = 0; sgpr_idx < 16; sgpr_idx ++
++		v_writelane_b32	v2, s[sgpr_idx], ttmp13
++		s_add_u32	ttmp13, ttmp13, 0x1
++	end
++end
++
++function write_12sgpr_to_v2(s)
++	// Copy into VGPR for later TCP store.
++	for var sgpr_idx = 0; sgpr_idx < 12; sgpr_idx ++
++		v_writelane_b32	v2, s[sgpr_idx], ttmp13
++		s_add_u32	ttmp13, ttmp13, 0x1
++	end
++end
++
++function read_hwreg_from_mem(s, s_rsrc, s_mem_offset)
++	s_buffer_load_dword	s, s_rsrc, s_mem_offset scope:SCOPE_SYS
++	s_add_u32	s_mem_offset, s_mem_offset, 4
++end
++
++function read_16sgpr_from_mem(s, s_rsrc, s_mem_offset)
++	s_sub_u32	s_mem_offset, s_mem_offset, 4*16
++	s_buffer_load_dwordx16	s, s_rsrc, s_mem_offset scope:SCOPE_SYS
++end
++
++function read_8sgpr_from_mem(s, s_rsrc, s_mem_offset)
++	s_sub_u32	s_mem_offset, s_mem_offset, 4*8
++	s_buffer_load_dwordx8	s, s_rsrc, s_mem_offset scope:SCOPE_SYS
++end
++
++function read_4sgpr_from_mem(s, s_rsrc, s_mem_offset)
++	s_sub_u32	s_mem_offset, s_mem_offset, 4*4
++	s_buffer_load_dwordx4	s, s_rsrc, s_mem_offset scope:SCOPE_SYS
++end
++
++function get_vgpr_size_bytes(s_vgpr_size_byte, s_size)
++	s_getreg_b32	s_vgpr_size_byte, hwreg(HW_REG_WAVE_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)
++	s_add_u32	s_vgpr_size_byte, s_vgpr_size_byte, 1
++	s_bitcmp1_b32	s_size, S_WAVE_SIZE
++	s_cbranch_scc1	L_ENABLE_SHIFT_W64
++	s_lshl_b32	s_vgpr_size_byte, s_vgpr_size_byte, (2+7)		//Number of VGPRs = (vgpr_size + 1) * 4 * 32 * 4   (non-zero value)
++	s_branch	L_SHIFT_DONE
++L_ENABLE_SHIFT_W64:
++	s_lshl_b32	s_vgpr_size_byte, s_vgpr_size_byte, (2+8)		//Number of VGPRs = (vgpr_size + 1) * 4 * 64 * 4   (non-zero value)
++L_SHIFT_DONE:
++end
++
++function get_svgpr_size_bytes(s_svgpr_size_byte)
++	s_getreg_b32	s_svgpr_size_byte, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE)
++	s_lshl_b32	s_svgpr_size_byte, s_svgpr_size_byte, (3+7)
++end
++
++function get_sgpr_size_bytes
++	return 512
++end
++
++function get_hwreg_size_bytes
++	return 128
++end
++
++function get_wave_size2(s_reg)
++	s_getreg_b32	s_reg, hwreg(HW_REG_WAVE_STATUS,SQ_WAVE_STATUS_WAVE64_SHIFT,SQ_WAVE_STATUS_WAVE64_SIZE)
++	s_lshl_b32	s_reg, s_reg, S_WAVE_SIZE
++end
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+index ab1132bc896a32..d9955c5d2e5ed5 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+@@ -174,7 +174,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN32)
+ ###############################################################################
+ # DCN35
+ ###############################################################################
+-CLK_MGR_DCN35 = dcn35_smu.o dcn35_clk_mgr.o
++CLK_MGR_DCN35 = dcn35_smu.o dcn351_clk_mgr.o dcn35_clk_mgr.o
+ 
+ AMD_DAL_CLK_MGR_DCN35 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn35/,$(CLK_MGR_DCN35))
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+index 0e243f4344d050..4c3e58c730b11c 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+@@ -355,8 +355,11 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
+ 			BREAK_TO_DEBUGGER();
+ 			return NULL;
+ 		}
++		if (ctx->dce_version == DCN_VERSION_3_51)
++			dcn351_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
++		else
++			dcn35_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
+ 
+-		dcn35_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
+ 		return &clk_mgr->base.base;
+ 	}
+ 	break;
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c
+new file mode 100644
+index 00000000000000..6a6ae618650b6d
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c
+@@ -0,0 +1,140 @@
++/*
++ * Copyright 2024 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#include "core_types.h"
++#include "dcn35_clk_mgr.h"
++
++#define DCN_BASE__INST0_SEG1 0x000000C0
++#define mmCLK1_CLK_PLL_REQ 0x16E37
++
++#define mmCLK1_CLK0_DFS_CNTL 0x16E69
++#define mmCLK1_CLK1_DFS_CNTL 0x16E6C
++#define mmCLK1_CLK2_DFS_CNTL 0x16E6F
++#define mmCLK1_CLK3_DFS_CNTL 0x16E72
++#define mmCLK1_CLK4_DFS_CNTL 0x16E75
++#define mmCLK1_CLK5_DFS_CNTL 0x16E78
++
++#define mmCLK1_CLK0_CURRENT_CNT 0x16EFC
++#define mmCLK1_CLK1_CURRENT_CNT 0x16EFD
++#define mmCLK1_CLK2_CURRENT_CNT 0x16EFE
++#define mmCLK1_CLK3_CURRENT_CNT 0x16EFF
++#define mmCLK1_CLK4_CURRENT_CNT 0x16F00
++#define mmCLK1_CLK5_CURRENT_CNT 0x16F01
++
++#define mmCLK1_CLK0_BYPASS_CNTL 0x16E8A
++#define mmCLK1_CLK1_BYPASS_CNTL 0x16E93
++#define mmCLK1_CLK2_BYPASS_CNTL 0x16E9C
++#define mmCLK1_CLK3_BYPASS_CNTL 0x16EA5
++#define mmCLK1_CLK4_BYPASS_CNTL 0x16EAE
++#define mmCLK1_CLK5_BYPASS_CNTL 0x16EB7
++
++#define mmCLK1_CLK0_DS_CNTL 0x16E83
++#define mmCLK1_CLK1_DS_CNTL 0x16E8C
++#define mmCLK1_CLK2_DS_CNTL 0x16E95
++#define mmCLK1_CLK3_DS_CNTL 0x16E9E
++#define mmCLK1_CLK4_DS_CNTL 0x16EA7
++#define mmCLK1_CLK5_DS_CNTL 0x16EB0
++
++#define mmCLK1_CLK0_ALLOW_DS 0x16E84
++#define mmCLK1_CLK1_ALLOW_DS 0x16E8D
++#define mmCLK1_CLK2_ALLOW_DS 0x16E96
++#define mmCLK1_CLK3_ALLOW_DS 0x16E9F
++#define mmCLK1_CLK4_ALLOW_DS 0x16EA8
++#define mmCLK1_CLK5_ALLOW_DS 0x16EB1
++
++#define mmCLK5_spll_field_8 0x1B04B
++#define mmDENTIST_DISPCLK_CNTL 0x0124
++#define regDENTIST_DISPCLK_CNTL 0x0064
++#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1
++
++#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
++#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
++#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
++#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
++#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
++#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
++
++#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
++
++// DENTIST_DISPCLK_CNTL
++#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER__SHIFT 0x0
++#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER__SHIFT 0x8
++#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE__SHIFT 0x13
++#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE__SHIFT 0x14
++#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER__SHIFT 0x18
++#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER_MASK 0x0000007FL
++#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER_MASK 0x00007F00L
++#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE_MASK 0x00080000L
++#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE_MASK 0x00100000L
++#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L
++
++#define CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L
++
++#define REG(reg) \
++	(clk_mgr->regs->reg)
++
++#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
++
++#define BASE(seg) BASE_INNER(seg)
++
++#define SR(reg_name)\
++		.reg_name = BASE(reg ## reg_name ## _BASE_IDX) +  \
++					reg ## reg_name
++
++#define CLK_SR_DCN35(reg_name)\
++	.reg_name = mm ## reg_name
++
++static const struct clk_mgr_registers clk_mgr_regs_dcn351 = {
++	CLK_REG_LIST_DCN35()
++};
++
++static const struct clk_mgr_shift clk_mgr_shift_dcn351 = {
++	CLK_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
++};
++
++static const struct clk_mgr_mask clk_mgr_mask_dcn351 = {
++	CLK_COMMON_MASK_SH_LIST_DCN32(_MASK)
++};
++
++#define TO_CLK_MGR_DCN35(clk_mgr)\
++	container_of(clk_mgr, struct clk_mgr_dcn35, base)
++
++
++void dcn351_clk_mgr_construct(
++		struct dc_context *ctx,
++		struct clk_mgr_dcn35 *clk_mgr,
++		struct pp_smu_funcs *pp_smu,
++		struct dccg *dccg)
++{
++	/*register offset changed*/
++	clk_mgr->base.regs = &clk_mgr_regs_dcn351;
++	clk_mgr->base.clk_mgr_shift = &clk_mgr_shift_dcn351;
++	clk_mgr->base.clk_mgr_mask = &clk_mgr_mask_dcn351;
++
++	dcn35_clk_mgr_construct(ctx,  clk_mgr, pp_smu, dccg);
++
++}
++
++
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+index b77333817f1895..2e435ee363fede 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+@@ -36,15 +36,11 @@
+ #include "dcn20/dcn20_clk_mgr.h"
+ 
+ 
+-
+-
+ #include "reg_helper.h"
+ #include "core_types.h"
+ #include "dcn35_smu.h"
+ #include "dm_helpers.h"
+ 
+-/* TODO: remove this include once we ported over remaining clk mgr functions*/
+-#include "dcn30/dcn30_clk_mgr.h"
+ #include "dcn31/dcn31_clk_mgr.h"
+ 
+ #include "dc_dmub_srv.h"
+@@ -55,35 +51,102 @@
+ #define DC_LOGGER \
+ 	clk_mgr->base.base.ctx->logger
+ 
++#define DCN_BASE__INST0_SEG1 0x000000C0
++#define mmCLK1_CLK_PLL_REQ 0x16E37
++
++#define mmCLK1_CLK0_DFS_CNTL 0x16E69
++#define mmCLK1_CLK1_DFS_CNTL 0x16E6C
++#define mmCLK1_CLK2_DFS_CNTL 0x16E6F
++#define mmCLK1_CLK3_DFS_CNTL 0x16E72
++#define mmCLK1_CLK4_DFS_CNTL 0x16E75
++#define mmCLK1_CLK5_DFS_CNTL 0x16E78
++
++#define mmCLK1_CLK0_CURRENT_CNT 0x16EFB
++#define mmCLK1_CLK1_CURRENT_CNT 0x16EFC
++#define mmCLK1_CLK2_CURRENT_CNT 0x16EFD
++#define mmCLK1_CLK3_CURRENT_CNT 0x16EFE
++#define mmCLK1_CLK4_CURRENT_CNT 0x16EFF
++#define mmCLK1_CLK5_CURRENT_CNT 0x16F00
++
++#define mmCLK1_CLK0_BYPASS_CNTL 0x16E8A
++#define mmCLK1_CLK1_BYPASS_CNTL 0x16E93
++#define mmCLK1_CLK2_BYPASS_CNTL 0x16E9C
++#define mmCLK1_CLK3_BYPASS_CNTL 0x16EA5
++#define mmCLK1_CLK4_BYPASS_CNTL 0x16EAE
++#define mmCLK1_CLK5_BYPASS_CNTL 0x16EB7
++
++#define mmCLK1_CLK0_DS_CNTL 0x16E83
++#define mmCLK1_CLK1_DS_CNTL 0x16E8C
++#define mmCLK1_CLK2_DS_CNTL 0x16E95
++#define mmCLK1_CLK3_DS_CNTL 0x16E9E
++#define mmCLK1_CLK4_DS_CNTL 0x16EA7
++#define mmCLK1_CLK5_DS_CNTL 0x16EB0
++
++#define mmCLK1_CLK0_ALLOW_DS 0x16E84
++#define mmCLK1_CLK1_ALLOW_DS 0x16E8D
++#define mmCLK1_CLK2_ALLOW_DS 0x16E96
++#define mmCLK1_CLK3_ALLOW_DS 0x16E9F
++#define mmCLK1_CLK4_ALLOW_DS 0x16EA8
++#define mmCLK1_CLK5_ALLOW_DS 0x16EB1
++
++#define mmCLK5_spll_field_8 0x1B24B
++#define mmDENTIST_DISPCLK_CNTL 0x0124
++#define regDENTIST_DISPCLK_CNTL 0x0064
++#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1
++
++#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
++#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
++#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
++#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
++#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
++#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
++
++#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
++#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L
++// DENTIST_DISPCLK_CNTL
++#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER__SHIFT 0x0
++#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER__SHIFT 0x8
++#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE__SHIFT 0x13
++#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE__SHIFT 0x14
++#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER__SHIFT 0x18
++#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER_MASK 0x0000007FL
++#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER_MASK 0x00007F00L
++#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE_MASK 0x00080000L
++#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE_MASK 0x00100000L
++#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L
++
++#define CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L
++
++#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0
++#undef FN
++#define FN(reg_name, field_name) \
++	clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name
+ 
+-#define regCLK1_CLK_PLL_REQ			0x0237
+-#define regCLK1_CLK_PLL_REQ_BASE_IDX		0
++#define REG(reg) \
++	(clk_mgr->regs->reg)
+ 
+-#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT	0x0
+-#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT	0xc
+-#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT	0x10
+-#define CLK1_CLK_PLL_REQ__FbMult_int_MASK	0x000001FFL
+-#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK	0x0000F000L
+-#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK	0xFFFF0000L
++#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
+ 
+-#define regCLK1_CLK2_BYPASS_CNTL			0x029c
+-#define regCLK1_CLK2_BYPASS_CNTL_BASE_IDX	0
++#define BASE(seg) BASE_INNER(seg)
+ 
+-#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT	0x0
+-#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV__SHIFT	0x10
+-#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK		0x00000007L
+-#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK		0x000F0000L
++#define SR(reg_name)\
++		.reg_name = BASE(reg ## reg_name ## _BASE_IDX) +  \
++					reg ## reg_name
+ 
+-#define regCLK5_0_CLK5_spll_field_8				0x464b
+-#define regCLK5_0_CLK5_spll_field_8_BASE_IDX	0
++#define CLK_SR_DCN35(reg_name)\
++	.reg_name = mm ## reg_name
+ 
+-#define CLK5_0_CLK5_spll_field_8__spll_ssc_en__SHIFT	0xd
+-#define CLK5_0_CLK5_spll_field_8__spll_ssc_en_MASK		0x00002000L
++static const struct clk_mgr_registers clk_mgr_regs_dcn35 = {
++	CLK_REG_LIST_DCN35()
++};
+ 
+-#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0
++static const struct clk_mgr_shift clk_mgr_shift_dcn35 = {
++	CLK_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
++};
+ 
+-#define REG(reg_name) \
+-	(ctx->clk_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
++static const struct clk_mgr_mask clk_mgr_mask_dcn35 = {
++	CLK_COMMON_MASK_SH_LIST_DCN32(_MASK)
++};
+ 
+ #define TO_CLK_MGR_DCN35(clk_mgr)\
+ 	container_of(clk_mgr, struct clk_mgr_dcn35, base)
+@@ -452,7 +515,6 @@ static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
+ 	struct fixed31_32 pll_req;
+ 	unsigned int fbmult_frac_val = 0;
+ 	unsigned int fbmult_int_val = 0;
+-	struct dc_context *ctx = clk_mgr->base.ctx;
+ 
+ 	/*
+ 	 * Register value of fbmult is in 8.16 format, we are converting to 314.32
+@@ -512,12 +574,12 @@ static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs
+ static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
+ {
+ 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+-	struct dc_context *ctx = clk_mgr->base.ctx;
++
+ 	uint32_t ssc_enable;
+ 
+-	REG_GET(CLK5_0_CLK5_spll_field_8, spll_ssc_en, &ssc_enable);
++	ssc_enable = REG_READ(CLK5_spll_field_8) & CLK5_spll_field_8__spll_ssc_en_MASK;
+ 
+-	return ssc_enable == 1;
++	return ssc_enable != 0;
+ }
+ 
+ static void init_clk_states(struct clk_mgr *clk_mgr)
+@@ -642,10 +704,10 @@ static struct dcn35_ss_info_table ss_info_table = {
+ 
+ static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
+ {
+-	struct dc_context *ctx = clk_mgr->base.ctx;
+-	uint32_t clock_source;
++	uint32_t clock_source = 0;
++
++	clock_source = REG_READ(CLK1_CLK2_BYPASS_CNTL) & CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK;
+ 
+-	REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source);
+ 	// If it's DFS mode, clock_source is 0.
+ 	if (dcn35_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) {
+ 		clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source];
+@@ -1112,6 +1174,12 @@ void dcn35_clk_mgr_construct(
+ 	clk_mgr->base.dprefclk_ss_divider = 1000;
+ 	clk_mgr->base.ss_on_dprefclk = false;
+ 	clk_mgr->base.dfs_ref_freq_khz = 48000;
++	if (ctx->dce_version == DCN_VERSION_3_5) {
++		clk_mgr->base.regs = &clk_mgr_regs_dcn35;
++		clk_mgr->base.clk_mgr_shift = &clk_mgr_shift_dcn35;
++		clk_mgr->base.clk_mgr_mask = &clk_mgr_mask_dcn35;
++	}
++
+ 
+ 	clk_mgr->smu_wm_set.wm_set = (struct dcn35_watermarks *)dm_helpers_allocate_gpu_mem(
+ 				clk_mgr->base.base.ctx,
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h
+index 1203dc605b12c4..a12a9bf90806ed 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h
+@@ -60,4 +60,8 @@ void dcn35_clk_mgr_construct(struct dc_context *ctx,
+ 
+ void dcn35_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int);
+ 
++void dcn351_clk_mgr_construct(struct dc_context *ctx,
++		struct clk_mgr_dcn35 *clk_mgr,
++		struct pp_smu_funcs *pp_smu,
++		struct dccg *dccg);
+ #endif //__DCN35_CLK_MGR_H__
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+index c2dd061892f4d9..7a1ca1e98059b0 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+@@ -166,6 +166,41 @@ enum dentist_divider_range {
+     CLK_SR_DCN32(CLK1_CLK4_CURRENT_CNT), \
+     CLK_SR_DCN32(CLK4_CLK0_CURRENT_CNT)
+ 
++#define CLK_REG_LIST_DCN35()	  \
++	CLK_SR_DCN35(CLK1_CLK_PLL_REQ), \
++	CLK_SR_DCN35(CLK1_CLK0_DFS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK1_DFS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK2_DFS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK3_DFS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK4_DFS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK5_DFS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK0_CURRENT_CNT), \
++	CLK_SR_DCN35(CLK1_CLK1_CURRENT_CNT), \
++	CLK_SR_DCN35(CLK1_CLK2_CURRENT_CNT), \
++	CLK_SR_DCN35(CLK1_CLK3_CURRENT_CNT), \
++	CLK_SR_DCN35(CLK1_CLK4_CURRENT_CNT), \
++	CLK_SR_DCN35(CLK1_CLK5_CURRENT_CNT), \
++	CLK_SR_DCN35(CLK1_CLK0_BYPASS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK1_BYPASS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK2_BYPASS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK3_BYPASS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK4_BYPASS_CNTL),\
++	CLK_SR_DCN35(CLK1_CLK5_BYPASS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK0_DS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK1_DS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK2_DS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK3_DS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK4_DS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK5_DS_CNTL), \
++	CLK_SR_DCN35(CLK1_CLK0_ALLOW_DS), \
++	CLK_SR_DCN35(CLK1_CLK1_ALLOW_DS), \
++	CLK_SR_DCN35(CLK1_CLK2_ALLOW_DS), \
++	CLK_SR_DCN35(CLK1_CLK3_ALLOW_DS), \
++	CLK_SR_DCN35(CLK1_CLK4_ALLOW_DS), \
++	CLK_SR_DCN35(CLK1_CLK5_ALLOW_DS), \
++	CLK_SR_DCN35(CLK5_spll_field_8), \
++	SR(DENTIST_DISPCLK_CNTL), \
++
+ #define CLK_COMMON_MASK_SH_LIST_DCN32(mask_sh) \
+ 	CLK_COMMON_MASK_SH_LIST_DCN20_BASE(mask_sh),\
+ 	CLK_SF(CLK1_CLK_PLL_REQ, FbMult_int, mask_sh),\
+@@ -236,6 +271,7 @@ struct clk_mgr_registers {
+ 	uint32_t CLK1_CLK2_DFS_CNTL;
+ 	uint32_t CLK1_CLK3_DFS_CNTL;
+ 	uint32_t CLK1_CLK4_DFS_CNTL;
++	uint32_t CLK1_CLK5_DFS_CNTL;
+ 	uint32_t CLK2_CLK2_DFS_CNTL;
+ 
+ 	uint32_t CLK1_CLK0_CURRENT_CNT;
+@@ -243,11 +279,34 @@ struct clk_mgr_registers {
+     uint32_t CLK1_CLK2_CURRENT_CNT;
+     uint32_t CLK1_CLK3_CURRENT_CNT;
+     uint32_t CLK1_CLK4_CURRENT_CNT;
++	uint32_t CLK1_CLK5_CURRENT_CNT;
+ 
+ 	uint32_t CLK0_CLK0_DFS_CNTL;
+ 	uint32_t CLK0_CLK1_DFS_CNTL;
+ 	uint32_t CLK0_CLK3_DFS_CNTL;
+ 	uint32_t CLK0_CLK4_DFS_CNTL;
++	uint32_t CLK1_CLK0_BYPASS_CNTL;
++	uint32_t CLK1_CLK1_BYPASS_CNTL;
++	uint32_t CLK1_CLK2_BYPASS_CNTL;
++	uint32_t CLK1_CLK3_BYPASS_CNTL;
++	uint32_t CLK1_CLK4_BYPASS_CNTL;
++	uint32_t CLK1_CLK5_BYPASS_CNTL;
++
++	uint32_t CLK1_CLK0_DS_CNTL;
++	uint32_t CLK1_CLK1_DS_CNTL;
++	uint32_t CLK1_CLK2_DS_CNTL;
++	uint32_t CLK1_CLK3_DS_CNTL;
++	uint32_t CLK1_CLK4_DS_CNTL;
++	uint32_t CLK1_CLK5_DS_CNTL;
++
++	uint32_t CLK1_CLK0_ALLOW_DS;
++	uint32_t CLK1_CLK1_ALLOW_DS;
++	uint32_t CLK1_CLK2_ALLOW_DS;
++	uint32_t CLK1_CLK3_ALLOW_DS;
++	uint32_t CLK1_CLK4_ALLOW_DS;
++	uint32_t CLK1_CLK5_ALLOW_DS;
++	uint32_t CLK5_spll_field_8;
++
+ };
+ 
+ struct clk_mgr_shift {
+diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs
+index ef2d490965ba20..bcf248f69252c2 100644
+--- a/drivers/gpu/drm/drm_panic_qr.rs
++++ b/drivers/gpu/drm/drm_panic_qr.rs
+@@ -931,7 +931,7 @@ fn draw_all(&mut self, data: impl Iterator<Item = u8>) {
+ /// They must remain valid for the duration of the function call.
+ #[no_mangle]
+ pub unsafe extern "C" fn drm_panic_qr_generate(
+-    url: *const i8,
++    url: *const kernel::ffi::c_char,
+     data: *mut u8,
+     data_len: usize,
+     data_size: usize,
+diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
+index 8a49f499e3fb3f..b40f1398f0f822 100644
+--- a/drivers/gpu/drm/i915/display/icl_dsi.c
++++ b/drivers/gpu/drm/i915/display/icl_dsi.c
+@@ -808,8 +808,8 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
+ 		/* select data lane width */
+ 		tmp = intel_de_read(display,
+ 				    TRANS_DDI_FUNC_CTL(display, dsi_trans));
+-		tmp &= ~DDI_PORT_WIDTH_MASK;
+-		tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count);
++		tmp &= ~TRANS_DDI_PORT_WIDTH_MASK;
++		tmp |= TRANS_DDI_PORT_WIDTH(intel_dsi->lane_count);
+ 
+ 		/* select input pipe */
+ 		tmp &= ~TRANS_DDI_EDP_INPUT_MASK;
+diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
+index 49b5cc01ce40ad..943b57835b3a69 100644
+--- a/drivers/gpu/drm/i915/display/intel_ddi.c
++++ b/drivers/gpu/drm/i915/display/intel_ddi.c
+@@ -3399,7 +3399,7 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
+ 		intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, port),
+ 			     XELPDP_PORT_WIDTH_MASK | XELPDP_PORT_REVERSAL, port_buf);
+ 
+-		buf_ctl |= DDI_PORT_WIDTH(lane_count);
++		buf_ctl |= DDI_PORT_WIDTH(crtc_state->lane_count);
+ 
+ 		if (DISPLAY_VER(dev_priv) >= 20)
+ 			buf_ctl |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index 863927f429aa73..9d9fe11dd0557a 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -6641,12 +6641,30 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
+ static int intel_joiner_add_affected_crtcs(struct intel_atomic_state *state)
+ {
+ 	struct drm_i915_private *i915 = to_i915(state->base.dev);
++	const struct intel_plane_state *plane_state;
+ 	struct intel_crtc_state *crtc_state;
++	struct intel_plane *plane;
+ 	struct intel_crtc *crtc;
+ 	u8 affected_pipes = 0;
+ 	u8 modeset_pipes = 0;
+ 	int i;
+ 
++	/*
++	 * Any plane which is in use by the joiner needs its crtc.
++	 * Pull those in first as this will not have happened yet
++	 * if the plane remains disabled according to uapi.
++	 */
++	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
++		crtc = to_intel_crtc(plane_state->hw.crtc);
++		if (!crtc)
++			continue;
++
++		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
++		if (IS_ERR(crtc_state))
++			return PTR_ERR(crtc_state);
++	}
++
++	/* Now pull in all joined crtcs */
+ 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ 		affected_pipes |= crtc_state->joiner_pipes;
+ 		if (intel_crtc_needs_modeset(crtc_state))
+diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+index 397cc4ebae526a..bb70ba31efd9d6 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
++++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+@@ -1565,7 +1565,7 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp,
+ 
+ 	if (wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) {
+ 		lt_err(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clear\n");
+-		return false;
++		goto out;
+ 	}
+ 
+ 	if (intel_dp_128b132b_lane_eq(intel_dp, crtc_state) &&
+@@ -1577,6 +1577,19 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp,
+ 	       passed ? "passed" : "failed",
+ 	       crtc_state->port_clock, crtc_state->lane_count);
+ 
++out:
++	/*
++	 * Ensure that the training pattern does get set to TPS2 even in case
++	 * of a failure, as is the case at the end of a passing link training
++	 * and what is expected by the transcoder. Leaving TPS1 set (and
++	 * disabling the link train mode in DP_TP_CTL later from TPS1 directly)
++	 * would result in a stuck transcoder HW state and flip-done timeouts
++	 * later in the modeset sequence.
++	 */
++	if (!passed)
++		intel_dp_program_link_training_pattern(intel_dp, crtc_state,
++						       DP_PHY_DPRX, DP_TRAINING_PATTERN_2);
++
+ 	return passed;
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+index 4b12a6c7c247bd..20b5890754aefb 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+@@ -3425,10 +3425,10 @@ static inline int guc_lrc_desc_unpin(struct intel_context *ce)
+ 	 */
+ 	ret = deregister_context(ce, ce->guc_id.id);
+ 	if (ret) {
+-		spin_lock(&ce->guc_state.lock);
++		spin_lock_irqsave(&ce->guc_state.lock, flags);
+ 		set_context_registered(ce);
+ 		clr_context_destroyed(ce);
+-		spin_unlock(&ce->guc_state.lock);
++		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+ 		/*
+ 		 * As gt-pm is awake at function entry, intel_wakeref_put_async merely decrements
+ 		 * the wakeref immediately but per function spec usage call this after unlock.
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 22be4a731d27e6..2f0fc0dbd48477 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -3917,7 +3917,7 @@ enum skl_power_gate {
+ #define  DDI_BUF_IS_IDLE			(1 << 7)
+ #define  DDI_BUF_CTL_TC_PHY_OWNERSHIP		REG_BIT(6)
+ #define  DDI_A_4_LANES				(1 << 4)
+-#define  DDI_PORT_WIDTH(width)			(((width) - 1) << 1)
++#define  DDI_PORT_WIDTH(width)			(((width) == 3 ? 4 : ((width) - 1)) << 1)
+ #define  DDI_PORT_WIDTH_MASK			(7 << 1)
+ #define  DDI_PORT_WIDTH_SHIFT			1
+ #define  DDI_INIT_DISPLAY_DETECTED		(1 << 0)
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+index 421afacb724803..36cc9dbc00b5c1 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+@@ -297,7 +297,7 @@ static const struct dpu_wb_cfg sm8150_wb[] = {
+ 	{
+ 		.name = "wb_2", .id = WB_2,
+ 		.base = 0x65000, .len = 0x2c8,
+-		.features = WB_SDM845_MASK,
++		.features = WB_SM8250_MASK,
+ 		.format_list = wb2_formats_rgb,
+ 		.num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ 		.clk_ctrl = DPU_CLK_CTRL_WB2,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+index 641023b102bf59..e8eacdb47967a2 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+@@ -304,7 +304,7 @@ static const struct dpu_wb_cfg sc8180x_wb[] = {
+ 	{
+ 		.name = "wb_2", .id = WB_2,
+ 		.base = 0x65000, .len = 0x2c8,
+-		.features = WB_SDM845_MASK,
++		.features = WB_SM8250_MASK,
+ 		.format_list = wb2_formats_rgb,
+ 		.num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ 		.clk_ctrl = DPU_CLK_CTRL_WB2,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
+index d039b96beb97cf..76f60a2df7a890 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
+@@ -144,7 +144,7 @@ static const struct dpu_wb_cfg sm6125_wb[] = {
+ 	{
+ 		.name = "wb_2", .id = WB_2,
+ 		.base = 0x65000, .len = 0x2c8,
+-		.features = WB_SDM845_MASK,
++		.features = WB_SM8250_MASK,
+ 		.format_list = wb2_formats_rgb,
+ 		.num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ 		.clk_ctrl = DPU_CLK_CTRL_WB2,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+index 83de7564e2c1fe..67f5fc6fdae102 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+@@ -2281,6 +2281,9 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
+ 		}
+ 	}
+ 
++	if (phys_enc->hw_pp && phys_enc->hw_pp->ops.setup_dither)
++		phys_enc->hw_pp->ops.setup_dither(phys_enc->hw_pp, NULL);
++
+ 	/* reset the merge 3D HW block */
+ 	if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) {
+ 		phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+index 657200401f5763..cec6d4e8baec4d 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+@@ -52,6 +52,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
+ 	u32 slice_last_group_size;
+ 	u32 det_thresh_flatness;
+ 	bool is_cmd_mode = !(mode & DSC_MODE_VIDEO);
++	bool input_10_bits = dsc->bits_per_component == 10;
+ 
+ 	DPU_REG_WRITE(c, DSC_COMMON_MODE, mode);
+ 
+@@ -68,7 +69,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
+ 	data |= (dsc->line_buf_depth << 3);
+ 	data |= (dsc->simple_422 << 2);
+ 	data |= (dsc->convert_rgb << 1);
+-	data |= dsc->bits_per_component;
++	data |= input_10_bits;
+ 
+ 	DPU_REG_WRITE(c, DSC_ENC, data);
+ 
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+index ad19330de61abd..562a3f4c5238a3 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+@@ -272,7 +272,7 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
+ 
+ 	if (cap & BIT(DPU_MDP_VSYNC_SEL))
+ 		ops->setup_vsync_source = dpu_hw_setup_vsync_sel;
+-	else
++	else if (!(cap & BIT(DPU_MDP_PERIPH_0_REMOVED)))
+ 		ops->setup_vsync_source = dpu_hw_setup_wd_timer;
+ 
+ 	ops->get_safe_status = dpu_hw_get_safe_status;
+diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
+index aff51bb973ebe0..6d69598e85c573 100644
+--- a/drivers/gpu/drm/msm/dp/dp_display.c
++++ b/drivers/gpu/drm/msm/dp/dp_display.c
+@@ -937,16 +937,17 @@ enum drm_mode_status msm_dp_bridge_mode_valid(struct drm_bridge *bridge,
+ 		return -EINVAL;
+ 	}
+ 
+-	if (mode->clock > DP_MAX_PIXEL_CLK_KHZ)
+-		return MODE_CLOCK_HIGH;
+-
+ 	msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display);
+ 	link_info = &msm_dp_display->panel->link_info;
+ 
+-	if (drm_mode_is_420_only(&dp->connector->display_info, mode) &&
+-	    msm_dp_display->panel->vsc_sdp_supported)
++	if ((drm_mode_is_420_only(&dp->connector->display_info, mode) &&
++	     msm_dp_display->panel->vsc_sdp_supported) ||
++	     msm_dp_wide_bus_available(dp))
+ 		mode_pclk_khz /= 2;
+ 
++	if (mode_pclk_khz > DP_MAX_PIXEL_CLK_KHZ)
++		return MODE_CLOCK_HIGH;
++
+ 	mode_bpp = dp->connector->display_info.bpc * num_components;
+ 	if (!mode_bpp)
+ 		mode_bpp = default_bpp;
+diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
+index d3e241ea694161..16b7913d1eefa8 100644
+--- a/drivers/gpu/drm/msm/dp/dp_drm.c
++++ b/drivers/gpu/drm/msm/dp/dp_drm.c
+@@ -257,7 +257,10 @@ static enum drm_mode_status msm_edp_bridge_mode_valid(struct drm_bridge *bridge,
+ 		return -EINVAL;
+ 	}
+ 
+-	if (mode->clock > DP_MAX_PIXEL_CLK_KHZ)
++	if (msm_dp_wide_bus_available(dp))
++		mode_pclk_khz /= 2;
++
++	if (mode_pclk_khz > DP_MAX_PIXEL_CLK_KHZ)
+ 		return MODE_CLOCK_HIGH;
+ 
+ 	/*
+diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+index 031446c87daec0..798168180c1ab6 100644
+--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+@@ -83,6 +83,9 @@ struct dsi_pll_7nm {
+ 	/* protects REG_DSI_7nm_PHY_CMN_CLK_CFG0 register */
+ 	spinlock_t postdiv_lock;
+ 
++	/* protects REG_DSI_7nm_PHY_CMN_CLK_CFG1 register */
++	spinlock_t pclk_mux_lock;
++
+ 	struct pll_7nm_cached_state cached_state;
+ 
+ 	struct dsi_pll_7nm *slave;
+@@ -372,22 +375,41 @@ static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)
+ 	ndelay(250);
+ }
+ 
+-static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll)
++static void dsi_pll_cmn_clk_cfg0_write(struct dsi_pll_7nm *pll, u32 val)
+ {
++	unsigned long flags;
++
++	spin_lock_irqsave(&pll->postdiv_lock, flags);
++	writel(val, pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
++	spin_unlock_irqrestore(&pll->postdiv_lock, flags);
++}
++
++static void dsi_pll_cmn_clk_cfg1_update(struct dsi_pll_7nm *pll, u32 mask,
++					u32 val)
++{
++	unsigned long flags;
+ 	u32 data;
+ 
++	spin_lock_irqsave(&pll->pclk_mux_lock, flags);
+ 	data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+-	writel(data & ~BIT(5), pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
++	data &= ~mask;
++	data |= val & mask;
++
++	writel(data, pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
++	spin_unlock_irqrestore(&pll->pclk_mux_lock, flags);
++}
++
++static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll)
++{
++	dsi_pll_cmn_clk_cfg1_update(pll, DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN, 0);
+ }
+ 
+ static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll)
+ {
+-	u32 data;
++	u32 cfg_1 = DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN | DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN_SEL;
+ 
+ 	writel(0x04, pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_3);
+-
+-	data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+-	writel(data | BIT(5) | BIT(4), pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
++	dsi_pll_cmn_clk_cfg1_update(pll, cfg_1, cfg_1);
+ }
+ 
+ static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll)
+@@ -565,7 +587,6 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
+ {
+ 	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw);
+ 	struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
+-	void __iomem *phy_base = pll_7nm->phy->base;
+ 	u32 val;
+ 	int ret;
+ 
+@@ -574,13 +595,10 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
+ 	val |= cached->pll_out_div;
+ 	writel(val, pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
+ 
+-	writel(cached->bit_clk_div | (cached->pix_clk_div << 4),
+-	       phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
+-
+-	val = readl(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+-	val &= ~0x3;
+-	val |= cached->pll_mux;
+-	writel(val, phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
++	dsi_pll_cmn_clk_cfg0_write(pll_7nm,
++				   DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(cached->bit_clk_div) |
++				   DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(cached->pix_clk_div));
++	dsi_pll_cmn_clk_cfg1_update(pll_7nm, 0x3, cached->pll_mux);
+ 
+ 	ret = dsi_pll_7nm_vco_set_rate(phy->vco_hw,
+ 			pll_7nm->vco_current_rate,
+@@ -599,7 +617,6 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
+ static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
+ {
+ 	struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw);
+-	void __iomem *base = phy->base;
+ 	u32 data = 0x0;	/* internal PLL */
+ 
+ 	DBG("DSI PLL%d", pll_7nm->phy->id);
+@@ -618,7 +635,8 @@ static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
+ 	}
+ 
+ 	/* set PLL src */
+-	writel(data << 2, base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
++	dsi_pll_cmn_clk_cfg1_update(pll_7nm, DSI_7nm_PHY_CMN_CLK_CFG1_BITCLK_SEL__MASK,
++				    DSI_7nm_PHY_CMN_CLK_CFG1_BITCLK_SEL(data));
+ 
+ 	return 0;
+ }
+@@ -733,7 +751,7 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide
+ 					pll_by_2_bit,
+ 				}), 2, 0, pll_7nm->phy->base +
+ 					REG_DSI_7nm_PHY_CMN_CLK_CFG1,
+-				0, 1, 0, NULL);
++				0, 1, 0, &pll_7nm->pclk_mux_lock);
+ 		if (IS_ERR(hw)) {
+ 			ret = PTR_ERR(hw);
+ 			goto fail;
+@@ -778,6 +796,7 @@ static int dsi_pll_7nm_init(struct msm_dsi_phy *phy)
+ 	pll_7nm_list[phy->id] = pll_7nm;
+ 
+ 	spin_lock_init(&pll_7nm->postdiv_lock);
++	spin_lock_init(&pll_7nm->pclk_mux_lock);
+ 
+ 	pll_7nm->phy = phy;
+ 
+diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
+index d8c9a1b192632d..f15962cfb373c5 100644
+--- a/drivers/gpu/drm/msm/msm_drv.h
++++ b/drivers/gpu/drm/msm/msm_drv.h
+@@ -530,15 +530,12 @@ static inline int align_pitch(int width, int bpp)
+ static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
+ {
+ 	ktime_t now = ktime_get();
+-	s64 remaining_jiffies;
+ 
+-	if (ktime_compare(*timeout, now) < 0) {
+-		remaining_jiffies = 0;
+-	} else {
+-		ktime_t rem = ktime_sub(*timeout, now);
+-		remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);
+-	}
++	if (ktime_compare(*timeout, now) <= 0)
++		return 0;
+ 
++	ktime_t rem = ktime_sub(*timeout, now);
++	s64 remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);
+ 	return clamp(remaining_jiffies, 1LL, (s64)INT_MAX);
+ }
+ 
+diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
+index d54b72f924493b..35f7f40e405b7d 100644
+--- a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
++++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
+@@ -9,8 +9,15 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+ 	<reg32 offset="0x00004" name="REVISION_ID1"/>
+ 	<reg32 offset="0x00008" name="REVISION_ID2"/>
+ 	<reg32 offset="0x0000c" name="REVISION_ID3"/>
+-	<reg32 offset="0x00010" name="CLK_CFG0"/>
+-	<reg32 offset="0x00014" name="CLK_CFG1"/>
++	<reg32 offset="0x00010" name="CLK_CFG0">
++		<bitfield name="DIV_CTRL_3_0" low="0" high="3" type="uint"/>
++		<bitfield name="DIV_CTRL_7_4" low="4" high="7" type="uint"/>
++	</reg32>
++	<reg32 offset="0x00014" name="CLK_CFG1">
++		<bitfield name="CLK_EN" pos="5" type="boolean"/>
++		<bitfield name="CLK_EN_SEL" pos="4" type="boolean"/>
++		<bitfield name="BITCLK_SEL" low="2" high="3" type="uint"/>
++	</reg32>
+ 	<reg32 offset="0x00018" name="GLBL_CTRL"/>
+ 	<reg32 offset="0x0001c" name="RBUF_CTRL"/>
+ 	<reg32 offset="0x00020" name="VREG_CTRL_0"/>
+diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
+index b4da82ddbb6b2f..8ea98f06d39afc 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
+@@ -590,6 +590,7 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
+ 	unsigned long timeout =
+ 		jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ 	struct mm_struct *mm = svmm->notifier.mm;
++	struct folio *folio;
+ 	struct page *page;
+ 	unsigned long start = args->p.addr;
+ 	unsigned long notifier_seq;
+@@ -616,12 +617,16 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
+ 			ret = -EINVAL;
+ 			goto out;
+ 		}
++		folio = page_folio(page);
+ 
+ 		mutex_lock(&svmm->mutex);
+ 		if (!mmu_interval_read_retry(&notifier->notifier,
+ 					     notifier_seq))
+ 			break;
+ 		mutex_unlock(&svmm->mutex);
++
++		folio_unlock(folio);
++		folio_put(folio);
+ 	}
+ 
+ 	/* Map the page on the GPU. */
+@@ -637,8 +642,8 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
+ 	ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
+ 	mutex_unlock(&svmm->mutex);
+ 
+-	unlock_page(page);
+-	put_page(page);
++	folio_unlock(folio);
++	folio_put(folio);
+ 
+ out:
+ 	mmu_interval_notifier_remove(&notifier->notifier);
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
+index a6f410ba60bc94..d393bc540f8628 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
+@@ -75,7 +75,7 @@ gp10b_pmu_acr = {
+ 	.bootstrap_multiple_falcons = gp10b_pmu_acr_bootstrap_multiple_falcons,
+ };
+ 
+-#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
++#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
+ MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin");
+ MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin");
+ MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin");
+diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+index 45d09e6fa667fd..7d68a8acfe2ea4 100644
+--- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
++++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+@@ -109,13 +109,13 @@ static int jadard_prepare(struct drm_panel *panel)
+ 	if (jadard->desc->lp11_to_reset_delay_ms)
+ 		msleep(jadard->desc->lp11_to_reset_delay_ms);
+ 
+-	gpiod_set_value(jadard->reset, 1);
++	gpiod_set_value(jadard->reset, 0);
+ 	msleep(5);
+ 
+-	gpiod_set_value(jadard->reset, 0);
++	gpiod_set_value(jadard->reset, 1);
+ 	msleep(10);
+ 
+-	gpiod_set_value(jadard->reset, 1);
++	gpiod_set_value(jadard->reset, 0);
+ 	msleep(130);
+ 
+ 	ret = jadard->desc->init(jadard);
+@@ -1130,7 +1130,7 @@ static int jadard_dsi_probe(struct mipi_dsi_device *dsi)
+ 	dsi->format = desc->format;
+ 	dsi->lanes = desc->lanes;
+ 
+-	jadard->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
++	jadard->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ 	if (IS_ERR(jadard->reset)) {
+ 		DRM_DEV_ERROR(&dsi->dev, "failed to get our reset GPIO\n");
+ 		return PTR_ERR(jadard->reset);
+diff --git a/drivers/gpu/drm/xe/display/ext/i915_irq.c b/drivers/gpu/drm/xe/display/ext/i915_irq.c
+index a7dbc6554d6944..ac4cda2d81c7a1 100644
+--- a/drivers/gpu/drm/xe/display/ext/i915_irq.c
++++ b/drivers/gpu/drm/xe/display/ext/i915_irq.c
+@@ -53,18 +53,7 @@ void gen2_irq_init(struct intel_uncore *uncore, struct i915_irq_regs regs,
+ 
+ bool intel_irqs_enabled(struct xe_device *xe)
+ {
+-	/*
+-	 * XXX: i915 has a racy handling of the irq.enabled, since it doesn't
+-	 * lock its transitions. Because of that, the irq.enabled sometimes
+-	 * is not read with the irq.lock in place.
+-	 * However, the most critical cases like vblank and page flips are
+-	 * properly using the locks.
+-	 * We cannot take the lock in here or run any kind of assert because
+-	 * of i915 inconsistency.
+-	 * But at this point the xe irq is better protected against races,
+-	 * although the full solution would be protecting the i915 side.
+-	 */
+-	return xe->irq.enabled;
++	return atomic_read(&xe->irq.enabled);
+ }
+ 
+ void intel_synchronize_irq(struct xe_device *xe)
+diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
+index 06d6db8b50f93f..7f902d50ebf696 100644
+--- a/drivers/gpu/drm/xe/xe_device.c
++++ b/drivers/gpu/drm/xe/xe_device.c
+@@ -324,7 +324,9 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
+ 	xe->info.revid = pdev->revision;
+ 	xe->info.force_execlist = xe_modparam.force_execlist;
+ 
+-	spin_lock_init(&xe->irq.lock);
++	err = xe_irq_init(xe);
++	if (err)
++		goto err;
+ 
+ 	init_waitqueue_head(&xe->ufence_wq);
+ 
+diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
+index f1fbfe91686782..fc3c2af3fb7fd1 100644
+--- a/drivers/gpu/drm/xe/xe_device.h
++++ b/drivers/gpu/drm/xe/xe_device.h
+@@ -157,8 +157,7 @@ static inline bool xe_device_has_sriov(struct xe_device *xe)
+ 
+ static inline bool xe_device_has_msix(struct xe_device *xe)
+ {
+-	/* TODO: change this when MSI-X support is fully integrated */
+-	return false;
++	return xe->irq.msix.nvec > 0;
+ }
+ 
+ static inline bool xe_device_has_memirq(struct xe_device *xe)
+diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
+index b9ea455d6f59fa..782eb224a46e7a 100644
+--- a/drivers/gpu/drm/xe/xe_device_types.h
++++ b/drivers/gpu/drm/xe/xe_device_types.h
+@@ -345,7 +345,13 @@ struct xe_device {
+ 		spinlock_t lock;
+ 
+ 		/** @irq.enabled: interrupts enabled on this device */
+-		bool enabled;
++		atomic_t enabled;
++
++		/** @irq.msix: irq info for platforms that support MSI-X */
++		struct {
++			/** @irq.msix.nvec: number of MSI-X interrupts */
++			u16 nvec;
++		} msix;
+ 	} irq;
+ 
+ 	/** @ttm: ttm device */
+diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c
+index b7995ebd54abde..ca04327bd6dfbd 100644
+--- a/drivers/gpu/drm/xe/xe_irq.c
++++ b/drivers/gpu/drm/xe/xe_irq.c
+@@ -10,6 +10,7 @@
+ #include <drm/drm_managed.h>
+ 
+ #include "display/xe_display.h"
++#include "regs/xe_guc_regs.h"
+ #include "regs/xe_irq_regs.h"
+ #include "xe_device.h"
+ #include "xe_drv.h"
+@@ -29,6 +30,11 @@
+ #define IIR(offset)				XE_REG(offset + 0x8)
+ #define IER(offset)				XE_REG(offset + 0xc)
+ 
++static int xe_irq_msix_init(struct xe_device *xe);
++static void xe_irq_msix_free(struct xe_device *xe);
++static int xe_irq_msix_request_irqs(struct xe_device *xe);
++static void xe_irq_msix_synchronize_irq(struct xe_device *xe);
++
+ static void assert_iir_is_zero(struct xe_mmio *mmio, struct xe_reg reg)
+ {
+ 	u32 val = xe_mmio_read32(mmio, reg);
+@@ -348,12 +354,8 @@ static irqreturn_t xelp_irq_handler(int irq, void *arg)
+ 	unsigned long intr_dw[2];
+ 	u32 identity[32];
+ 
+-	spin_lock(&xe->irq.lock);
+-	if (!xe->irq.enabled) {
+-		spin_unlock(&xe->irq.lock);
++	if (!atomic_read(&xe->irq.enabled))
+ 		return IRQ_NONE;
+-	}
+-	spin_unlock(&xe->irq.lock);
+ 
+ 	master_ctl = xelp_intr_disable(xe);
+ 	if (!master_ctl) {
+@@ -417,12 +419,8 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
+ 
+ 	/* TODO: This really shouldn't be copied+pasted */
+ 
+-	spin_lock(&xe->irq.lock);
+-	if (!xe->irq.enabled) {
+-		spin_unlock(&xe->irq.lock);
++	if (!atomic_read(&xe->irq.enabled))
+ 		return IRQ_NONE;
+-	}
+-	spin_unlock(&xe->irq.lock);
+ 
+ 	master_tile_ctl = dg1_intr_disable(xe);
+ 	if (!master_tile_ctl) {
+@@ -580,6 +578,11 @@ static void xe_irq_reset(struct xe_device *xe)
+ 	if (IS_SRIOV_VF(xe))
+ 		return vf_irq_reset(xe);
+ 
++	if (xe_device_uses_memirq(xe)) {
++		for_each_tile(tile, xe, id)
++			xe_memirq_reset(&tile->memirq);
++	}
++
+ 	for_each_tile(tile, xe, id) {
+ 		if (GRAPHICS_VERx100(xe) >= 1210)
+ 			dg1_irq_reset(tile);
+@@ -622,6 +625,14 @@ static void xe_irq_postinstall(struct xe_device *xe)
+ 	if (IS_SRIOV_VF(xe))
+ 		return vf_irq_postinstall(xe);
+ 
++	if (xe_device_uses_memirq(xe)) {
++		struct xe_tile *tile;
++		unsigned int id;
++
++		for_each_tile(tile, xe, id)
++			xe_memirq_postinstall(&tile->memirq);
++	}
++
+ 	xe_display_irq_postinstall(xe, xe_root_mmio_gt(xe));
+ 
+ 	/*
+@@ -644,12 +655,8 @@ static irqreturn_t vf_mem_irq_handler(int irq, void *arg)
+ 	struct xe_tile *tile;
+ 	unsigned int id;
+ 
+-	spin_lock(&xe->irq.lock);
+-	if (!xe->irq.enabled) {
+-		spin_unlock(&xe->irq.lock);
++	if (!atomic_read(&xe->irq.enabled))
+ 		return IRQ_NONE;
+-	}
+-	spin_unlock(&xe->irq.lock);
+ 
+ 	for_each_tile(tile, xe, id)
+ 		xe_memirq_handler(&tile->memirq);
+@@ -668,87 +675,105 @@ static irq_handler_t xe_irq_handler(struct xe_device *xe)
+ 		return xelp_irq_handler;
+ }
+ 
+-static void irq_uninstall(void *arg)
++static int xe_irq_msi_request_irqs(struct xe_device *xe)
++{
++	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
++	irq_handler_t irq_handler;
++	int irq, err;
++
++	irq_handler = xe_irq_handler(xe);
++	if (!irq_handler) {
++		drm_err(&xe->drm, "No supported interrupt handler");
++		return -EINVAL;
++	}
++
++	irq = pci_irq_vector(pdev, 0);
++	err = request_irq(irq, irq_handler, IRQF_SHARED, DRIVER_NAME, xe);
++	if (err < 0) {
++		drm_err(&xe->drm, "Failed to request MSI IRQ %d\n", err);
++		return err;
++	}
++
++	return 0;
++}
++
++static void xe_irq_msi_free(struct xe_device *xe)
+ {
+-	struct xe_device *xe = arg;
+ 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ 	int irq;
+ 
+-	if (!xe->irq.enabled)
++	irq = pci_irq_vector(pdev, 0);
++	free_irq(irq, xe);
++}
++
++static void irq_uninstall(void *arg)
++{
++	struct xe_device *xe = arg;
++
++	if (!atomic_xchg(&xe->irq.enabled, 0))
+ 		return;
+ 
+-	xe->irq.enabled = false;
+ 	xe_irq_reset(xe);
+ 
+-	irq = pci_irq_vector(pdev, 0);
+-	free_irq(irq, xe);
++	if (xe_device_has_msix(xe))
++		xe_irq_msix_free(xe);
++	else
++		xe_irq_msi_free(xe);
++}
++
++int xe_irq_init(struct xe_device *xe)
++{
++	spin_lock_init(&xe->irq.lock);
++
++	return xe_irq_msix_init(xe);
+ }
+ 
+ int xe_irq_install(struct xe_device *xe)
+ {
+ 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+-	unsigned int irq_flags = PCI_IRQ_MSIX;
+-	irq_handler_t irq_handler;
+-	int err, irq, nvec;
+-
+-	irq_handler = xe_irq_handler(xe);
+-	if (!irq_handler) {
+-		drm_err(&xe->drm, "No supported interrupt handler");
+-		return -EINVAL;
+-	}
++	unsigned int irq_flags = PCI_IRQ_MSI;
++	int nvec = 1;
++	int err;
+ 
+ 	xe_irq_reset(xe);
+ 
+-	nvec = pci_msix_vec_count(pdev);
+-	if (nvec <= 0) {
+-		if (nvec == -EINVAL) {
+-			/* MSIX capability is not supported in the device, using MSI */
+-			irq_flags = PCI_IRQ_MSI;
+-			nvec = 1;
+-		} else {
+-			drm_err(&xe->drm, "MSIX: Failed getting count\n");
+-			return nvec;
+-		}
++	if (xe_device_has_msix(xe)) {
++		nvec = xe->irq.msix.nvec;
++		irq_flags = PCI_IRQ_MSIX;
+ 	}
+ 
+ 	err = pci_alloc_irq_vectors(pdev, nvec, nvec, irq_flags);
+ 	if (err < 0) {
+-		drm_err(&xe->drm, "MSI/MSIX: Failed to enable support %d\n", err);
++		drm_err(&xe->drm, "Failed to allocate IRQ vectors: %d\n", err);
+ 		return err;
+ 	}
+ 
+-	irq = pci_irq_vector(pdev, 0);
+-	err = request_irq(irq, irq_handler, IRQF_SHARED, DRIVER_NAME, xe);
+-	if (err < 0) {
+-		drm_err(&xe->drm, "Failed to request MSI/MSIX IRQ %d\n", err);
++	err = xe_device_has_msix(xe) ? xe_irq_msix_request_irqs(xe) :
++					xe_irq_msi_request_irqs(xe);
++	if (err)
+ 		return err;
+-	}
+ 
+-	xe->irq.enabled = true;
++	atomic_set(&xe->irq.enabled, 1);
+ 
+ 	xe_irq_postinstall(xe);
+ 
+-	err = devm_add_action_or_reset(xe->drm.dev, irq_uninstall, xe);
+-	if (err)
+-		goto free_irq_handler;
+-
+-	return 0;
+-
+-free_irq_handler:
+-	free_irq(irq, xe);
++	return devm_add_action_or_reset(xe->drm.dev, irq_uninstall, xe);
++}
+ 
+-	return err;
++static void xe_irq_msi_synchronize_irq(struct xe_device *xe)
++{
++	synchronize_irq(to_pci_dev(xe->drm.dev)->irq);
+ }
+ 
+ void xe_irq_suspend(struct xe_device *xe)
+ {
+-	int irq = to_pci_dev(xe->drm.dev)->irq;
+-
+-	spin_lock_irq(&xe->irq.lock);
+-	xe->irq.enabled = false; /* no new irqs */
+-	spin_unlock_irq(&xe->irq.lock);
++	atomic_set(&xe->irq.enabled, 0); /* no new irqs */
+ 
+-	synchronize_irq(irq); /* flush irqs */
++	/* flush irqs */
++	if (xe_device_has_msix(xe))
++		xe_irq_msix_synchronize_irq(xe);
++	else
++		xe_irq_msi_synchronize_irq(xe);
+ 	xe_irq_reset(xe); /* turn irqs off */
+ }
+ 
+@@ -762,10 +787,149 @@ void xe_irq_resume(struct xe_device *xe)
+ 	 * 1. no irq will arrive before the postinstall
+ 	 * 2. display is not yet resumed
+ 	 */
+-	xe->irq.enabled = true;
++	atomic_set(&xe->irq.enabled, 1);
+ 	xe_irq_reset(xe);
+ 	xe_irq_postinstall(xe); /* turn irqs on */
+ 
+ 	for_each_gt(gt, xe, id)
+ 		xe_irq_enable_hwe(gt);
+ }
++
++/* MSI-X related definitions and functions below. */
++
++enum xe_irq_msix_static {
++	GUC2HOST_MSIX = 0,
++	DEFAULT_MSIX = XE_IRQ_DEFAULT_MSIX,
++	/* Must be last */
++	NUM_OF_STATIC_MSIX,
++};
++
++static int xe_irq_msix_init(struct xe_device *xe)
++{
++	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
++	int nvec = pci_msix_vec_count(pdev);
++
++	if (nvec == -EINVAL)
++		return 0;  /* MSI */
++
++	if (nvec < 0) {
++		drm_err(&xe->drm, "Failed getting MSI-X vectors count: %d\n", nvec);
++		return nvec;
++	}
++
++	xe->irq.msix.nvec = nvec;
++	return 0;
++}
++
++static irqreturn_t guc2host_irq_handler(int irq, void *arg)
++{
++	struct xe_device *xe = arg;
++	struct xe_tile *tile;
++	u8 id;
++
++	if (!atomic_read(&xe->irq.enabled))
++		return IRQ_NONE;
++
++	for_each_tile(tile, xe, id)
++		xe_guc_irq_handler(&tile->primary_gt->uc.guc,
++				   GUC_INTR_GUC2HOST);
++
++	return IRQ_HANDLED;
++}
++
++static irqreturn_t xe_irq_msix_default_hwe_handler(int irq, void *arg)
++{
++	unsigned int tile_id, gt_id;
++	struct xe_device *xe = arg;
++	struct xe_memirq *memirq;
++	struct xe_hw_engine *hwe;
++	enum xe_hw_engine_id id;
++	struct xe_tile *tile;
++	struct xe_gt *gt;
++
++	if (!atomic_read(&xe->irq.enabled))
++		return IRQ_NONE;
++
++	for_each_tile(tile, xe, tile_id) {
++		memirq = &tile->memirq;
++		if (!memirq->bo)
++			continue;
++
++		for_each_gt(gt, xe, gt_id) {
++			if (gt->tile != tile)
++				continue;
++
++			for_each_hw_engine(hwe, gt, id)
++				xe_memirq_hwe_handler(memirq, hwe);
++		}
++	}
++
++	return IRQ_HANDLED;
++}
++
++static int xe_irq_msix_request_irq(struct xe_device *xe, irq_handler_t handler,
++				   const char *name, u16 msix)
++{
++	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
++	int ret, irq;
++
++	irq = pci_irq_vector(pdev, msix);
++	if (irq < 0)
++		return irq;
++
++	ret = request_irq(irq, handler, IRQF_SHARED, name, xe);
++	if (ret < 0)
++		return ret;
++
++	return 0;
++}
++
++static void xe_irq_msix_free_irq(struct xe_device *xe, u16 msix)
++{
++	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
++	int irq;
++
++	irq = pci_irq_vector(pdev, msix);
++	if (irq < 0) {
++		drm_err(&xe->drm, "MSI-X %u can't be released, there is no matching IRQ\n", msix);
++		return;
++	}
++
++	free_irq(irq, xe);
++}
++
++static int xe_irq_msix_request_irqs(struct xe_device *xe)
++{
++	int err;
++
++	err = xe_irq_msix_request_irq(xe, guc2host_irq_handler,
++				      DRIVER_NAME "-guc2host", GUC2HOST_MSIX);
++	if (err) {
++		drm_err(&xe->drm, "Failed to request MSI-X IRQ %d: %d\n", GUC2HOST_MSIX, err);
++		return err;
++	}
++
++	err = xe_irq_msix_request_irq(xe, xe_irq_msix_default_hwe_handler,
++				      DRIVER_NAME "-default-msix", DEFAULT_MSIX);
++	if (err) {
++		drm_err(&xe->drm, "Failed to request MSI-X IRQ %d: %d\n", DEFAULT_MSIX, err);
++		xe_irq_msix_free_irq(xe, GUC2HOST_MSIX);
++		return err;
++	}
++
++	return 0;
++}
++
++static void xe_irq_msix_free(struct xe_device *xe)
++{
++	xe_irq_msix_free_irq(xe, GUC2HOST_MSIX);
++	xe_irq_msix_free_irq(xe, DEFAULT_MSIX);
++}
++
++static void xe_irq_msix_synchronize_irq(struct xe_device *xe)
++{
++	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
++
++	synchronize_irq(pci_irq_vector(pdev, GUC2HOST_MSIX));
++	synchronize_irq(pci_irq_vector(pdev, DEFAULT_MSIX));
++}
+diff --git a/drivers/gpu/drm/xe/xe_irq.h b/drivers/gpu/drm/xe/xe_irq.h
+index 067514e13675ba..24ff16111b9688 100644
+--- a/drivers/gpu/drm/xe/xe_irq.h
++++ b/drivers/gpu/drm/xe/xe_irq.h
+@@ -6,10 +6,13 @@
+ #ifndef _XE_IRQ_H_
+ #define _XE_IRQ_H_
+ 
++#define XE_IRQ_DEFAULT_MSIX 1
++
+ struct xe_device;
+ struct xe_tile;
+ struct xe_gt;
+ 
++int xe_irq_init(struct xe_device *xe);
+ int xe_irq_install(struct xe_device *xe);
+ void xe_irq_suspend(struct xe_device *xe);
+ void xe_irq_resume(struct xe_device *xe);
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index bf5608a7405610..0f6cd44fff2921 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -2462,6 +2462,7 @@ static int vmbus_bus_suspend(struct device *dev)
+ 
+ static int vmbus_bus_resume(struct device *dev)
+ {
++	struct vmbus_channel *channel;
+ 	struct vmbus_channel_msginfo *msginfo;
+ 	size_t msgsize;
+ 	int ret;
+@@ -2494,6 +2495,22 @@ static int vmbus_bus_resume(struct device *dev)
+ 
+ 	vmbus_request_offers();
+ 
++	mutex_lock(&vmbus_connection.channel_mutex);
++	list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
++		if (channel->offermsg.child_relid != INVALID_RELID)
++			continue;
++
++		/* hvsock channels are not expected to be present. */
++		if (is_hvsock_channel(channel))
++			continue;
++
++		pr_err("channel %pUl/%pUl not present after resume.\n",
++		       &channel->offermsg.offer.if_type,
++		       &channel->offermsg.offer.if_instance);
++		/* ToDo: Cleanup these channels here */
++	}
++	mutex_unlock(&vmbus_connection.channel_mutex);
++
+ 	/* Reset the event for the next suspend. */
+ 	reinit_completion(&vmbus_connection.ready_for_suspend_event);
+ 
+diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
+index 76dce0aac24656..270d7a4d85a6d7 100644
+--- a/drivers/irqchip/irq-gic-v3.c
++++ b/drivers/irqchip/irq-gic-v3.c
+@@ -44,6 +44,7 @@ static u8 dist_prio_nmi __ro_after_init = GICV3_PRIO_NMI;
+ #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996	(1ULL << 0)
+ #define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539	(1ULL << 1)
+ #define FLAGS_WORKAROUND_ASR_ERRATUM_8601001	(1ULL << 2)
++#define FLAGS_WORKAROUND_INSECURE		(1ULL << 3)
+ 
+ #define GIC_IRQ_TYPE_PARTITION	(GIC_IRQ_TYPE_LPI + 1)
+ 
+@@ -83,6 +84,8 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
+ #define GIC_LINE_NR	min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
+ #define GIC_ESPI_NR	GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
+ 
++static bool nmi_support_forbidden;
++
+ /*
+  * There are 16 SGIs, though we only actually use 8 in Linux. The other 8 SGIs
+  * are potentially stolen by the secure side. Some code, especially code dealing
+@@ -163,21 +166,27 @@ static void __init gic_prio_init(void)
+ {
+ 	bool ds;
+ 
+-	ds = gic_dist_security_disabled();
+-	if (!ds) {
+-		u32 val;
+-
+-		val = readl_relaxed(gic_data.dist_base + GICD_CTLR);
+-		val |= GICD_CTLR_DS;
+-		writel_relaxed(val, gic_data.dist_base + GICD_CTLR);
++	cpus_have_group0 = gic_has_group0();
+ 
+-		ds = gic_dist_security_disabled();
+-		if (ds)
+-			pr_warn("Broken GIC integration, security disabled");
++	ds = gic_dist_security_disabled();
++	if ((gic_data.flags & FLAGS_WORKAROUND_INSECURE) && !ds) {
++		if (cpus_have_group0) {
++			u32 val;
++
++			val = readl_relaxed(gic_data.dist_base + GICD_CTLR);
++			val |= GICD_CTLR_DS;
++			writel_relaxed(val, gic_data.dist_base + GICD_CTLR);
++
++			ds = gic_dist_security_disabled();
++			if (ds)
++				pr_warn("Broken GIC integration, security disabled\n");
++		} else {
++			pr_warn("Broken GIC integration, pNMI forbidden\n");
++			nmi_support_forbidden = true;
++		}
+ 	}
+ 
+ 	cpus_have_security_disabled = ds;
+-	cpus_have_group0 = gic_has_group0();
+ 
+ 	/*
+ 	 * How priority values are used by the GIC depends on two things:
+@@ -209,7 +218,7 @@ static void __init gic_prio_init(void)
+ 	 * be in the non-secure range, we program the non-secure values into
+ 	 * the distributor to match the PMR values we want.
+ 	 */
+-	if (cpus_have_group0 & !cpus_have_security_disabled) {
++	if (cpus_have_group0 && !cpus_have_security_disabled) {
+ 		dist_prio_irq = __gicv3_prio_to_ns(dist_prio_irq);
+ 		dist_prio_nmi = __gicv3_prio_to_ns(dist_prio_nmi);
+ 	}
+@@ -1922,6 +1931,18 @@ static bool gic_enable_quirk_arm64_2941627(void *data)
+ 	return true;
+ }
+ 
++static bool gic_enable_quirk_rk3399(void *data)
++{
++	struct gic_chip_data *d = data;
++
++	if (of_machine_is_compatible("rockchip,rk3399")) {
++		d->flags |= FLAGS_WORKAROUND_INSECURE;
++		return true;
++	}
++
++	return false;
++}
++
+ static bool rd_set_non_coherent(void *data)
+ {
+ 	struct gic_chip_data *d = data;
+@@ -1996,6 +2017,12 @@ static const struct gic_quirk gic_quirks[] = {
+ 		.property = "dma-noncoherent",
+ 		.init   = rd_set_non_coherent,
+ 	},
++	{
++		.desc	= "GICv3: Insecure RK3399 integration",
++		.iidr	= 0x0000043b,
++		.mask	= 0xff000fff,
++		.init	= gic_enable_quirk_rk3399,
++	},
+ 	{
+ 	}
+ };
+@@ -2004,7 +2031,7 @@ static void gic_enable_nmi_support(void)
+ {
+ 	int i;
+ 
+-	if (!gic_prio_masking_enabled())
++	if (!gic_prio_masking_enabled() || nmi_support_forbidden)
+ 		return;
+ 
+ 	rdist_nmi_refs = kcalloc(gic_data.ppi_nr + SGI_NR,
+diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c
+index b9dcc8e78c7501..1f613eb7b7f034 100644
+--- a/drivers/irqchip/irq-jcore-aic.c
++++ b/drivers/irqchip/irq-jcore-aic.c
+@@ -38,7 +38,7 @@ static struct irq_chip jcore_aic;
+ static void handle_jcore_irq(struct irq_desc *desc)
+ {
+ 	if (irqd_is_per_cpu(irq_desc_get_irq_data(desc)))
+-		handle_percpu_irq(desc);
++		handle_percpu_devid_irq(desc);
+ 	else
+ 		handle_simple_irq(desc);
+ }
+diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
+index 7049ec7fb8eb44..e8802309ed600b 100644
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -386,10 +386,8 @@ static int raid0_set_limits(struct mddev *mddev)
+ 	lim.io_opt = lim.io_min * mddev->raid_disks;
+ 	lim.features |= BLK_FEAT_ATOMIC_WRITES_STACKED;
+ 	err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
+-	if (err) {
+-		queue_limits_cancel_update(mddev->gendisk->queue);
++	if (err)
+ 		return err;
+-	}
+ 	return queue_limits_set(mddev->gendisk->queue, &lim);
+ }
+ 
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index a5cd6522fc2d4d..3c75a69376f470 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -3219,10 +3219,8 @@ static int raid1_set_limits(struct mddev *mddev)
+ 	lim.max_write_zeroes_sectors = 0;
+ 	lim.features |= BLK_FEAT_ATOMIC_WRITES_STACKED;
+ 	err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
+-	if (err) {
+-		queue_limits_cancel_update(mddev->gendisk->queue);
++	if (err)
+ 		return err;
+-	}
+ 	return queue_limits_set(mddev->gendisk->queue, &lim);
+ }
+ 
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index e1e6cd7fb125e1..8b736f30ef9262 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -4020,10 +4020,8 @@ static int raid10_set_queue_limits(struct mddev *mddev)
+ 	lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
+ 	lim.features |= BLK_FEAT_ATOMIC_WRITES_STACKED;
+ 	err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
+-	if (err) {
+-		queue_limits_cancel_update(mddev->gendisk->queue);
++	if (err)
+ 		return err;
+-	}
+ 	return queue_limits_set(mddev->gendisk->queue, &lim);
+ }
+ 
+diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
+index 8d1d710e439dd3..0b2db4173e7230 100644
+--- a/drivers/mtd/nand/raw/cadence-nand-controller.c
++++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
+@@ -471,6 +471,8 @@ struct cdns_nand_ctrl {
+ 	struct {
+ 		void __iomem *virt;
+ 		dma_addr_t dma;
++		dma_addr_t iova_dma;
++		u32 size;
+ 	} io;
+ 
+ 	int irq;
+@@ -1835,11 +1837,11 @@ static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl,
+ 	}
+ 
+ 	if (dir == DMA_FROM_DEVICE) {
+-		src_dma = cdns_ctrl->io.dma;
++		src_dma = cdns_ctrl->io.iova_dma;
+ 		dst_dma = buf_dma;
+ 	} else {
+ 		src_dma = buf_dma;
+-		dst_dma = cdns_ctrl->io.dma;
++		dst_dma = cdns_ctrl->io.iova_dma;
+ 	}
+ 
+ 	tx = dmaengine_prep_dma_memcpy(cdns_ctrl->dmac, dst_dma, src_dma, len,
+@@ -1861,12 +1863,12 @@ static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl,
+ 	dma_async_issue_pending(cdns_ctrl->dmac);
+ 	wait_for_completion(&finished);
+ 
+-	dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
++	dma_unmap_single(dma_dev->dev, buf_dma, len, dir);
+ 
+ 	return 0;
+ 
+ err_unmap:
+-	dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
++	dma_unmap_single(dma_dev->dev, buf_dma, len, dir);
+ 
+ err:
+ 	dev_dbg(cdns_ctrl->dev, "Fall back to CPU I/O\n");
+@@ -2869,6 +2871,7 @@ cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl)
+ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
+ {
+ 	dma_cap_mask_t mask;
++	struct dma_device *dma_dev = cdns_ctrl->dmac->device;
+ 	int ret;
+ 
+ 	cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev,
+@@ -2904,15 +2907,24 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
+ 	dma_cap_set(DMA_MEMCPY, mask);
+ 
+ 	if (cdns_ctrl->caps1->has_dma) {
+-		cdns_ctrl->dmac = dma_request_channel(mask, NULL, NULL);
+-		if (!cdns_ctrl->dmac) {
+-			dev_err(cdns_ctrl->dev,
+-				"Unable to get a DMA channel\n");
+-			ret = -EBUSY;
++		cdns_ctrl->dmac = dma_request_chan_by_mask(&mask);
++		if (IS_ERR(cdns_ctrl->dmac)) {
++			ret = dev_err_probe(cdns_ctrl->dev, PTR_ERR(cdns_ctrl->dmac),
++					    "%d: Failed to get a DMA channel\n", ret);
+ 			goto disable_irq;
+ 		}
+ 	}
+ 
++	cdns_ctrl->io.iova_dma = dma_map_resource(dma_dev->dev, cdns_ctrl->io.dma,
++						  cdns_ctrl->io.size,
++						  DMA_BIDIRECTIONAL, 0);
++
++	ret = dma_mapping_error(dma_dev->dev, cdns_ctrl->io.iova_dma);
++	if (ret) {
++		dev_err(cdns_ctrl->dev, "Failed to map I/O resource to DMA\n");
++		goto dma_release_chnl;
++	}
++
+ 	nand_controller_init(&cdns_ctrl->controller);
+ 	INIT_LIST_HEAD(&cdns_ctrl->chips);
+ 
+@@ -2923,18 +2935,22 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
+ 	if (ret) {
+ 		dev_err(cdns_ctrl->dev, "Failed to register MTD: %d\n",
+ 			ret);
+-		goto dma_release_chnl;
++		goto unmap_dma_resource;
+ 	}
+ 
+ 	kfree(cdns_ctrl->buf);
+ 	cdns_ctrl->buf = kzalloc(cdns_ctrl->buf_size, GFP_KERNEL);
+ 	if (!cdns_ctrl->buf) {
+ 		ret = -ENOMEM;
+-		goto dma_release_chnl;
++		goto unmap_dma_resource;
+ 	}
+ 
+ 	return 0;
+ 
++unmap_dma_resource:
++	dma_unmap_resource(dma_dev->dev, cdns_ctrl->io.iova_dma,
++			   cdns_ctrl->io.size, DMA_BIDIRECTIONAL, 0);
++
+ dma_release_chnl:
+ 	if (cdns_ctrl->dmac)
+ 		dma_release_channel(cdns_ctrl->dmac);
+@@ -2956,6 +2972,8 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
+ static void cadence_nand_remove(struct cdns_nand_ctrl *cdns_ctrl)
+ {
+ 	cadence_nand_chips_cleanup(cdns_ctrl);
++	dma_unmap_resource(cdns_ctrl->dmac->device->dev, cdns_ctrl->io.iova_dma,
++			   cdns_ctrl->io.size, DMA_BIDIRECTIONAL, 0);
+ 	cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
+ 	kfree(cdns_ctrl->buf);
+ 	dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
+@@ -3020,7 +3038,9 @@ static int cadence_nand_dt_probe(struct platform_device *ofdev)
+ 	cdns_ctrl->io.virt = devm_platform_get_and_ioremap_resource(ofdev, 1, &res);
+ 	if (IS_ERR(cdns_ctrl->io.virt))
+ 		return PTR_ERR(cdns_ctrl->io.virt);
++
+ 	cdns_ctrl->io.dma = res->start;
++	cdns_ctrl->io.size = resource_size(res);
+ 
+ 	dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk");
+ 	if (IS_ERR(dt->clk))
+diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c
+index b5ad7118c49a2b..175211fe6a5ed2 100644
+--- a/drivers/mtd/spi-nor/sst.c
++++ b/drivers/mtd/spi-nor/sst.c
+@@ -174,7 +174,7 @@ static int sst_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
+ 	int ret;
+ 
+ 	nor->program_opcode = op;
+-	ret = spi_nor_write_data(nor, to, 1, buf);
++	ret = spi_nor_write_data(nor, to, len, buf);
+ 	if (ret < 0)
+ 		return ret;
+ 	WARN(ret != len, "While writing %zu byte written %i bytes\n", len, ret);
+diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
+index 8167cc5fb0df13..78d2a19593d180 100644
+--- a/drivers/net/ethernet/google/gve/gve.h
++++ b/drivers/net/ethernet/google/gve/gve.h
+@@ -1116,6 +1116,16 @@ static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
+ 	return gve_xdp_tx_queue_id(priv, 0);
+ }
+ 
++static inline bool gve_supports_xdp_xmit(struct gve_priv *priv)
++{
++	switch (priv->queue_format) {
++	case GVE_GQI_QPL_FORMAT:
++		return true;
++	default:
++		return false;
++	}
++}
++
+ /* gqi napi handler defined in gve_main.c */
+ int gve_napi_poll(struct napi_struct *napi, int budget);
+ 
+diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
+index 533e659b15b31c..92237fb0b60c1e 100644
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -1903,6 +1903,8 @@ static void gve_turndown(struct gve_priv *priv)
+ 	/* Stop tx queues */
+ 	netif_tx_disable(priv->dev);
+ 
++	xdp_features_clear_redirect_target(priv->dev);
++
+ 	gve_clear_napi_enabled(priv);
+ 	gve_clear_report_stats(priv);
+ 
+@@ -1972,6 +1974,9 @@ static void gve_turnup(struct gve_priv *priv)
+ 		napi_schedule(&block->napi);
+ 	}
+ 
++	if (priv->num_xdp_queues && gve_supports_xdp_xmit(priv))
++		xdp_features_set_redirect_target(priv->dev, false);
++
+ 	gve_set_napi_enabled(priv);
+ }
+ 
+@@ -2246,7 +2251,6 @@ static void gve_set_netdev_xdp_features(struct gve_priv *priv)
+ 	if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
+ 		xdp_features = NETDEV_XDP_ACT_BASIC;
+ 		xdp_features |= NETDEV_XDP_ACT_REDIRECT;
+-		xdp_features |= NETDEV_XDP_ACT_NDO_XMIT;
+ 		xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
+ 	} else {
+ 		xdp_features = 0;
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index e95ae0d39948c8..0676fc547b6f47 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -2408,6 +2408,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 	dma_addr_t data_dma_addr;
+ 	struct netdev_queue *txq;
+ 	unsigned long lpar_rc;
++	unsigned int skblen;
+ 	union sub_crq tx_crq;
+ 	unsigned int offset;
+ 	bool use_scrq_send_direct = false;
+@@ -2522,6 +2523,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 	tx_buff->skb = skb;
+ 	tx_buff->index = bufidx;
+ 	tx_buff->pool_index = queue_num;
++	skblen = skb->len;
+ 
+ 	memset(&tx_crq, 0, sizeof(tx_crq));
+ 	tx_crq.v1.first = IBMVNIC_CRQ_CMD;
+@@ -2614,7 +2616,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 		netif_stop_subqueue(netdev, queue_num);
+ 	}
+ 
+-	tx_bytes += skb->len;
++	tx_bytes += skblen;
+ 	txq_trans_cond_update(txq);
+ 	ret = NETDEV_TX_OK;
+ 	goto out;
+diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+index 2ec62c8d86e1c1..59486fe2ad18c2 100644
+--- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
++++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+@@ -20,6 +20,8 @@ nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
+ 	struct sk_buff *skb;
+ 
+ 	skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
++	if (!skb)
++		return NULL;
+ 	skb_put(skb, size);
+ 
+ 	return skb;
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index ae743991117c45..300cf7fed8bca0 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -2888,6 +2888,7 @@ static int axienet_probe(struct platform_device *pdev)
+ 
+ 	lp->phylink_config.dev = &ndev->dev;
+ 	lp->phylink_config.type = PHYLINK_NETDEV;
++	lp->phylink_config.mac_managed_pm = true;
+ 	lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ 		MAC_10FD | MAC_100FD | MAC_1000FD;
+ 
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index bc658bc6088546..eea0875e4e5518 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -1902,21 +1902,9 @@ static void geneve_destroy_tunnels(struct net *net, struct list_head *head)
+ {
+ 	struct geneve_net *gn = net_generic(net, geneve_net_id);
+ 	struct geneve_dev *geneve, *next;
+-	struct net_device *dev, *aux;
+ 
+-	/* gather any geneve devices that were moved into this ns */
+-	for_each_netdev_safe(net, dev, aux)
+-		if (dev->rtnl_link_ops == &geneve_link_ops)
+-			unregister_netdevice_queue(dev, head);
+-
+-	/* now gather any other geneve devices that were created in this ns */
+-	list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) {
+-		/* If geneve->dev is in the same netns, it was already added
+-		 * to the list by the previous loop.
+-		 */
+-		if (!net_eq(dev_net(geneve->dev), net))
+-			unregister_netdevice_queue(geneve->dev, head);
+-	}
++	list_for_each_entry_safe(geneve, next, &gn->geneve_list, next)
++		geneve_dellink(geneve->dev, head);
+ }
+ 
+ static void __net_exit geneve_exit_batch_rtnl(struct list_head *net_list,
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index fbabada7d3ba98..2cb13e092a856b 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -2479,11 +2479,6 @@ static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list,
+ 	list_for_each_entry(net, net_list, exit_list) {
+ 		struct gtp_net *gn = net_generic(net, gtp_net_id);
+ 		struct gtp_dev *gtp, *gtp_next;
+-		struct net_device *dev;
+-
+-		for_each_netdev(net, dev)
+-			if (dev->rtnl_link_ops == &gtp_link_ops)
+-				gtp_dellink(dev, dev_to_kill);
+ 
+ 		list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list)
+ 			gtp_dellink(gtp->dev, dev_to_kill);
+diff --git a/drivers/net/pse-pd/pd692x0.c b/drivers/net/pse-pd/pd692x0.c
+index 0af7db80b2f883..7cfc36cadb5761 100644
+--- a/drivers/net/pse-pd/pd692x0.c
++++ b/drivers/net/pse-pd/pd692x0.c
+@@ -999,13 +999,12 @@ static int pd692x0_pi_get_voltage(struct pse_controller_dev *pcdev, int id)
+ 	return (buf.sub[0] << 8 | buf.sub[1]) * 100000;
+ }
+ 
+-static int pd692x0_pi_get_current_limit(struct pse_controller_dev *pcdev,
+-					int id)
++static int pd692x0_pi_get_pw_limit(struct pse_controller_dev *pcdev,
++				   int id)
+ {
+ 	struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ 	struct pd692x0_msg msg, buf = {0};
+-	int mW, uV, uA, ret;
+-	s64 tmp_64;
++	int ret;
+ 
+ 	msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_PARAM];
+ 	msg.sub[2] = id;
+@@ -1013,48 +1012,24 @@ static int pd692x0_pi_get_current_limit(struct pse_controller_dev *pcdev,
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	ret = pd692x0_pi_get_pw_from_table(buf.data[2], buf.data[3]);
+-	if (ret < 0)
+-		return ret;
+-	mW = ret;
+-
+-	ret = pd692x0_pi_get_voltage(pcdev, id);
+-	if (ret < 0)
+-		return ret;
+-	uV = ret;
+-
+-	tmp_64 = mW;
+-	tmp_64 *= 1000000000ull;
+-	/* uA = mW * 1000000000 / uV */
+-	uA = DIV_ROUND_CLOSEST_ULL(tmp_64, uV);
+-	return uA;
++	return pd692x0_pi_get_pw_from_table(buf.data[0], buf.data[1]);
+ }
+ 
+-static int pd692x0_pi_set_current_limit(struct pse_controller_dev *pcdev,
+-					int id, int max_uA)
++static int pd692x0_pi_set_pw_limit(struct pse_controller_dev *pcdev,
++				   int id, int max_mW)
+ {
+ 	struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ 	struct device *dev = &priv->client->dev;
+ 	struct pd692x0_msg msg, buf = {0};
+-	int uV, ret, mW;
+-	s64 tmp_64;
++	int ret;
+ 
+ 	ret = pd692x0_fw_unavailable(priv);
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = pd692x0_pi_get_voltage(pcdev, id);
+-	if (ret < 0)
+-		return ret;
+-	uV = ret;
+-
+ 	msg = pd692x0_msg_template_list[PD692X0_MSG_SET_PORT_PARAM];
+ 	msg.sub[2] = id;
+-	tmp_64 = uV;
+-	tmp_64 *= max_uA;
+-	/* mW = uV * uA / 1000000000 */
+-	mW = DIV_ROUND_CLOSEST_ULL(tmp_64, 1000000000);
+-	ret = pd692x0_pi_set_pw_from_table(dev, &msg, mW);
++	ret = pd692x0_pi_set_pw_from_table(dev, &msg, max_mW);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1068,8 +1043,8 @@ static const struct pse_controller_ops pd692x0_ops = {
+ 	.pi_disable = pd692x0_pi_disable,
+ 	.pi_is_enabled = pd692x0_pi_is_enabled,
+ 	.pi_get_voltage = pd692x0_pi_get_voltage,
+-	.pi_get_current_limit = pd692x0_pi_get_current_limit,
+-	.pi_set_current_limit = pd692x0_pi_set_current_limit,
++	.pi_get_pw_limit = pd692x0_pi_get_pw_limit,
++	.pi_set_pw_limit = pd692x0_pi_set_pw_limit,
+ };
+ 
+ #define PD692X0_FW_LINE_MAX_SZ 0xff
+diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c
+index 2906ce173f66cd..bb509d973e914e 100644
+--- a/drivers/net/pse-pd/pse_core.c
++++ b/drivers/net/pse-pd/pse_core.c
+@@ -291,32 +291,24 @@ static int pse_pi_get_voltage(struct regulator_dev *rdev)
+ 	return ret;
+ }
+ 
+-static int _pse_ethtool_get_status(struct pse_controller_dev *pcdev,
+-				   int id,
+-				   struct netlink_ext_ack *extack,
+-				   struct pse_control_status *status);
+-
+ static int pse_pi_get_current_limit(struct regulator_dev *rdev)
+ {
+ 	struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
+ 	const struct pse_controller_ops *ops;
+-	struct netlink_ext_ack extack = {};
+-	struct pse_control_status st = {};
+-	int id, uV, ret;
++	int id, uV, mW, ret;
+ 	s64 tmp_64;
+ 
+ 	ops = pcdev->ops;
+ 	id = rdev_get_id(rdev);
++	if (!ops->pi_get_pw_limit || !ops->pi_get_voltage)
++		return -EOPNOTSUPP;
++
+ 	mutex_lock(&pcdev->lock);
+-	if (ops->pi_get_current_limit) {
+-		ret = ops->pi_get_current_limit(pcdev, id);
++	ret = ops->pi_get_pw_limit(pcdev, id);
++	if (ret < 0)
+ 		goto out;
+-	}
++	mW = ret;
+ 
+-	/* If pi_get_current_limit() callback not populated get voltage
+-	 * from pi_get_voltage() and power limit from ethtool_get_status()
+-	 *  to calculate current limit.
+-	 */
+ 	ret = _pse_pi_get_voltage(rdev);
+ 	if (!ret) {
+ 		dev_err(pcdev->dev, "Voltage null\n");
+@@ -327,16 +319,7 @@ static int pse_pi_get_current_limit(struct regulator_dev *rdev)
+ 		goto out;
+ 	uV = ret;
+ 
+-	ret = _pse_ethtool_get_status(pcdev, id, &extack, &st);
+-	if (ret)
+-		goto out;
+-
+-	if (!st.c33_avail_pw_limit) {
+-		ret = -ENODATA;
+-		goto out;
+-	}
+-
+-	tmp_64 = st.c33_avail_pw_limit;
++	tmp_64 = mW;
+ 	tmp_64 *= 1000000000ull;
+ 	/* uA = mW * 1000000000 / uV */
+ 	ret = DIV_ROUND_CLOSEST_ULL(tmp_64, uV);
+@@ -351,15 +334,33 @@ static int pse_pi_set_current_limit(struct regulator_dev *rdev, int min_uA,
+ {
+ 	struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
+ 	const struct pse_controller_ops *ops;
+-	int id, ret;
++	int id, mW, ret;
++	s64 tmp_64;
+ 
+ 	ops = pcdev->ops;
+-	if (!ops->pi_set_current_limit)
++	if (!ops->pi_set_pw_limit || !ops->pi_get_voltage)
+ 		return -EOPNOTSUPP;
+ 
++	if (max_uA > MAX_PI_CURRENT)
++		return -ERANGE;
++
+ 	id = rdev_get_id(rdev);
+ 	mutex_lock(&pcdev->lock);
+-	ret = ops->pi_set_current_limit(pcdev, id, max_uA);
++	ret = _pse_pi_get_voltage(rdev);
++	if (!ret) {
++		dev_err(pcdev->dev, "Voltage null\n");
++		ret = -ERANGE;
++		goto out;
++	}
++	if (ret < 0)
++		goto out;
++
++	tmp_64 = ret;
++	tmp_64 *= max_uA;
++	/* mW = uA * uV / 1000000000 */
++	mW = DIV_ROUND_CLOSEST_ULL(tmp_64, 1000000000);
++	ret = ops->pi_set_pw_limit(pcdev, id, mW);
++out:
+ 	mutex_unlock(&pcdev->lock);
+ 
+ 	return ret;
+@@ -403,11 +404,9 @@ devm_pse_pi_regulator_register(struct pse_controller_dev *pcdev,
+ 
+ 	rinit_data->constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS;
+ 
+-	if (pcdev->ops->pi_set_current_limit) {
++	if (pcdev->ops->pi_set_pw_limit)
+ 		rinit_data->constraints.valid_ops_mask |=
+ 			REGULATOR_CHANGE_CURRENT;
+-		rinit_data->constraints.max_uA = MAX_PI_CURRENT;
+-	}
+ 
+ 	rinit_data->supply_regulator = "vpwr";
+ 
+@@ -736,23 +735,6 @@ struct pse_control *of_pse_control_get(struct device_node *node)
+ }
+ EXPORT_SYMBOL_GPL(of_pse_control_get);
+ 
+-static int _pse_ethtool_get_status(struct pse_controller_dev *pcdev,
+-				   int id,
+-				   struct netlink_ext_ack *extack,
+-				   struct pse_control_status *status)
+-{
+-	const struct pse_controller_ops *ops;
+-
+-	ops = pcdev->ops;
+-	if (!ops->ethtool_get_status) {
+-		NL_SET_ERR_MSG(extack,
+-			       "PSE driver does not support status report");
+-		return -EOPNOTSUPP;
+-	}
+-
+-	return ops->ethtool_get_status(pcdev, id, extack, status);
+-}
+-
+ /**
+  * pse_ethtool_get_status - get status of PSE control
+  * @psec: PSE control pointer
+@@ -765,11 +747,21 @@ int pse_ethtool_get_status(struct pse_control *psec,
+ 			   struct netlink_ext_ack *extack,
+ 			   struct pse_control_status *status)
+ {
++	const struct pse_controller_ops *ops;
++	struct pse_controller_dev *pcdev;
+ 	int err;
+ 
+-	mutex_lock(&psec->pcdev->lock);
+-	err = _pse_ethtool_get_status(psec->pcdev, psec->id, extack, status);
+-	mutex_unlock(&psec->pcdev->lock);
++	pcdev = psec->pcdev;
++	ops = pcdev->ops;
++	if (!ops->ethtool_get_status) {
++		NL_SET_ERR_MSG(extack,
++			       "PSE driver does not support status report");
++		return -EOPNOTSUPP;
++	}
++
++	mutex_lock(&pcdev->lock);
++	err = ops->ethtool_get_status(pcdev, psec->id, extack, status);
++	mutex_unlock(&pcdev->lock);
+ 
+ 	return err;
+ }
+diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
+index e8930146847af4..b1b46c2713e1cc 100644
+--- a/drivers/nvme/host/ioctl.c
++++ b/drivers/nvme/host/ioctl.c
+@@ -283,8 +283,7 @@ static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
+ {
+ 	if (ns && nsid != ns->head->ns_id) {
+ 		dev_err(ctrl->device,
+-			"%s: nsid (%u) in cmd does not match nsid (%u)"
+-			"of namespace\n",
++			"%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n",
+ 			current->comm, nsid, ns->head->ns_id);
+ 		return false;
+ 	}
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 841238f38fddab..d7c193028e7c36 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1449,11 +1449,14 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
+ 		msg.msg_control = cbuf;
+ 		msg.msg_controllen = sizeof(cbuf);
+ 	}
++	msg.msg_flags = MSG_WAITALL;
+ 	ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
+ 			iov.iov_len, msg.msg_flags);
+-	if (ret < 0) {
++	if (ret < sizeof(*icresp)) {
+ 		pr_warn("queue %d: failed to receive icresp, error %d\n",
+ 			nvme_tcp_queue_id(queue), ret);
++		if (ret >= 0)
++			ret = -ECONNRESET;
+ 		goto free_icresp;
+ 	}
+ 	ret = -ENOTCONN;
+@@ -1565,7 +1568,7 @@ static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
+ 			  ctrl->io_queues[HCTX_TYPE_POLL];
+ }
+ 
+-/**
++/*
+  * Track the number of queues assigned to each cpu using a global per-cpu
+  * counter and select the least used cpu from the mq_map. Our goal is to spread
+  * different controllers I/O threads across different cpu cores.
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index fde6c555af619e..56e3c870ab4c3a 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -606,6 +606,9 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
+ 			goto out_dev_put;
+ 	}
+ 
++	if (percpu_ref_init(&ns->ref, nvmet_destroy_namespace, 0, GFP_KERNEL))
++		goto out_pr_exit;
++
+ 	nvmet_ns_changed(subsys, ns->nsid);
+ 	ns->enabled = true;
+ 	xa_set_mark(&subsys->namespaces, ns->nsid, NVMET_NS_ENABLED);
+@@ -613,6 +616,9 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
+ out_unlock:
+ 	mutex_unlock(&subsys->lock);
+ 	return ret;
++out_pr_exit:
++	if (ns->pr.enable)
++		nvmet_pr_exit_ns(ns);
+ out_dev_put:
+ 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ 		pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
+@@ -638,6 +644,19 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
+ 
+ 	mutex_unlock(&subsys->lock);
+ 
++	/*
++	 * Now that we removed the namespaces from the lookup list, we
++	 * can kill the per_cpu ref and wait for any remaining references
++	 * to be dropped, as well as a RCU grace period for anyone only
++	 * using the namepace under rcu_read_lock().  Note that we can't
++	 * use call_rcu here as we need to ensure the namespaces have
++	 * been fully destroyed before unloading the module.
++	 */
++	percpu_ref_kill(&ns->ref);
++	synchronize_rcu();
++	wait_for_completion(&ns->disable_done);
++	percpu_ref_exit(&ns->ref);
++
+ 	if (ns->pr.enable)
+ 		nvmet_pr_exit_ns(ns);
+ 
+@@ -660,22 +679,6 @@ void nvmet_ns_free(struct nvmet_ns *ns)
+ 	if (ns->nsid == subsys->max_nsid)
+ 		subsys->max_nsid = nvmet_max_nsid(subsys);
+ 
+-	mutex_unlock(&subsys->lock);
+-
+-	/*
+-	 * Now that we removed the namespaces from the lookup list, we
+-	 * can kill the per_cpu ref and wait for any remaining references
+-	 * to be dropped, as well as a RCU grace period for anyone only
+-	 * using the namepace under rcu_read_lock().  Note that we can't
+-	 * use call_rcu here as we need to ensure the namespaces have
+-	 * been fully destroyed before unloading the module.
+-	 */
+-	percpu_ref_kill(&ns->ref);
+-	synchronize_rcu();
+-	wait_for_completion(&ns->disable_done);
+-	percpu_ref_exit(&ns->ref);
+-
+-	mutex_lock(&subsys->lock);
+ 	subsys->nr_namespaces--;
+ 	mutex_unlock(&subsys->lock);
+ 
+@@ -705,9 +708,6 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
+ 	ns->nsid = nsid;
+ 	ns->subsys = subsys;
+ 
+-	if (percpu_ref_init(&ns->ref, nvmet_destroy_namespace, 0, GFP_KERNEL))
+-		goto out_free;
+-
+ 	if (ns->nsid > subsys->max_nsid)
+ 		subsys->max_nsid = nsid;
+ 
+@@ -730,8 +730,6 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
+ 	return ns;
+ out_exit:
+ 	subsys->max_nsid = nvmet_max_nsid(subsys);
+-	percpu_ref_exit(&ns->ref);
+-out_free:
+ 	kfree(ns);
+ out_unlock:
+ 	mutex_unlock(&subsys->lock);
+diff --git a/drivers/pci/devres.c b/drivers/pci/devres.c
+index 3b59a86a764b11..1adebcb263bd08 100644
+--- a/drivers/pci/devres.c
++++ b/drivers/pci/devres.c
+@@ -411,46 +411,20 @@ static inline bool mask_contains_bar(int mask, int bar)
+ 	return mask & BIT(bar);
+ }
+ 
+-/*
+- * This is a copy of pci_intx() used to bypass the problem of recursive
+- * function calls due to the hybrid nature of pci_intx().
+- */
+-static void __pcim_intx(struct pci_dev *pdev, int enable)
+-{
+-	u16 pci_command, new;
+-
+-	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
+-
+-	if (enable)
+-		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
+-	else
+-		new = pci_command | PCI_COMMAND_INTX_DISABLE;
+-
+-	if (new != pci_command)
+-		pci_write_config_word(pdev, PCI_COMMAND, new);
+-}
+-
+ static void pcim_intx_restore(struct device *dev, void *data)
+ {
+ 	struct pci_dev *pdev = to_pci_dev(dev);
+ 	struct pcim_intx_devres *res = data;
+ 
+-	__pcim_intx(pdev, res->orig_intx);
++	pci_intx(pdev, res->orig_intx);
+ }
+ 
+-static struct pcim_intx_devres *get_or_create_intx_devres(struct device *dev)
++static void save_orig_intx(struct pci_dev *pdev, struct pcim_intx_devres *res)
+ {
+-	struct pcim_intx_devres *res;
+-
+-	res = devres_find(dev, pcim_intx_restore, NULL, NULL);
+-	if (res)
+-		return res;
++	u16 pci_command;
+ 
+-	res = devres_alloc(pcim_intx_restore, sizeof(*res), GFP_KERNEL);
+-	if (res)
+-		devres_add(dev, res);
+-
+-	return res;
++	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
++	res->orig_intx = !(pci_command & PCI_COMMAND_INTX_DISABLE);
+ }
+ 
+ /**
+@@ -466,16 +440,28 @@ static struct pcim_intx_devres *get_or_create_intx_devres(struct device *dev)
+ int pcim_intx(struct pci_dev *pdev, int enable)
+ {
+ 	struct pcim_intx_devres *res;
++	struct device *dev = &pdev->dev;
+ 
+-	res = get_or_create_intx_devres(&pdev->dev);
+-	if (!res)
+-		return -ENOMEM;
++	/*
++	 * pcim_intx() must only restore the INTx value that existed before the
++	 * driver was loaded, i.e., before it called pcim_intx() for the
++	 * first time.
++	 */
++	res = devres_find(dev, pcim_intx_restore, NULL, NULL);
++	if (!res) {
++		res = devres_alloc(pcim_intx_restore, sizeof(*res), GFP_KERNEL);
++		if (!res)
++			return -ENOMEM;
++
++		save_orig_intx(pdev, res);
++		devres_add(dev, res);
++	}
+ 
+-	res->orig_intx = !enable;
+-	__pcim_intx(pdev, enable);
++	pci_intx(pdev, enable);
+ 
+ 	return 0;
+ }
++EXPORT_SYMBOL_GPL(pcim_intx);
+ 
+ static void pcim_disable_device(void *pdev_raw)
+ {
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 661f98c6c63a39..b0ae4bc1a1bee0 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -4488,11 +4488,6 @@ void pci_disable_parity(struct pci_dev *dev)
+  * @enable: boolean: whether to enable or disable PCI INTx
+  *
+  * Enables/disables PCI INTx for device @pdev
+- *
+- * NOTE:
+- * This is a "hybrid" function: It's normally unmanaged, but becomes managed
+- * when pcim_enable_device() has been called in advance. This hybrid feature is
+- * DEPRECATED! If you want managed cleanup, use pcim_intx() instead.
+  */
+ void pci_intx(struct pci_dev *pdev, int enable)
+ {
+@@ -4505,15 +4500,10 @@ void pci_intx(struct pci_dev *pdev, int enable)
+ 	else
+ 		new = pci_command | PCI_COMMAND_INTX_DISABLE;
+ 
+-	if (new != pci_command) {
+-		/* Preserve the "hybrid" behavior for backwards compatibility */
+-		if (pci_is_managed(pdev)) {
+-			WARN_ON_ONCE(pcim_intx(pdev, enable) != 0);
+-			return;
+-		}
++	if (new == pci_command)
++		return;
+ 
+-		pci_write_config_word(pdev, PCI_COMMAND, new);
+-	}
++	pci_write_config_word(pdev, PCI_COMMAND, new);
+ }
+ EXPORT_SYMBOL_GPL(pci_intx);
+ 
+diff --git a/drivers/platform/cznic/Kconfig b/drivers/platform/cznic/Kconfig
+index 49c383eb678541..13e37b49d9d01e 100644
+--- a/drivers/platform/cznic/Kconfig
++++ b/drivers/platform/cznic/Kconfig
+@@ -6,6 +6,7 @@
+ 
+ menuconfig CZNIC_PLATFORMS
+ 	bool "Platform support for CZ.NIC's Turris hardware"
++	depends on ARCH_MVEBU || COMPILE_TEST
+ 	help
+ 	  Say Y here to be able to choose driver support for CZ.NIC's Turris
+ 	  devices. This option alone does not add any kernel code.
+diff --git a/drivers/power/supply/axp20x_battery.c b/drivers/power/supply/axp20x_battery.c
+index fa27195f074e7d..3c3158f31a484d 100644
+--- a/drivers/power/supply/axp20x_battery.c
++++ b/drivers/power/supply/axp20x_battery.c
+@@ -466,10 +466,9 @@ static int axp717_battery_get_prop(struct power_supply *psy,
+ 
+ 	/*
+ 	 * If a fault is detected it must also be cleared; if the
+-	 * condition persists it should reappear (This is an
+-	 * assumption, it's actually not documented). A restart was
+-	 * not sufficient to clear the bit in testing despite the
+-	 * register listed as POR.
++	 * condition persists it should reappear. A restart was not
++	 * sufficient to clear the bit in testing despite the register
++	 * listed as POR.
+ 	 */
+ 	case POWER_SUPPLY_PROP_HEALTH:
+ 		ret = regmap_read(axp20x_batt->regmap, AXP717_PMU_FAULT,
+@@ -480,26 +479,26 @@ static int axp717_battery_get_prop(struct power_supply *psy,
+ 		switch (reg & AXP717_BATT_PMU_FAULT_MASK) {
+ 		case AXP717_BATT_UVLO_2_5V:
+ 			val->intval = POWER_SUPPLY_HEALTH_DEAD;
+-			regmap_update_bits(axp20x_batt->regmap,
+-					   AXP717_PMU_FAULT,
+-					   AXP717_BATT_UVLO_2_5V,
+-					   AXP717_BATT_UVLO_2_5V);
++			regmap_write_bits(axp20x_batt->regmap,
++					  AXP717_PMU_FAULT,
++					  AXP717_BATT_UVLO_2_5V,
++					  AXP717_BATT_UVLO_2_5V);
+ 			return 0;
+ 
+ 		case AXP717_BATT_OVER_TEMP:
+ 			val->intval = POWER_SUPPLY_HEALTH_HOT;
+-			regmap_update_bits(axp20x_batt->regmap,
+-					   AXP717_PMU_FAULT,
+-					   AXP717_BATT_OVER_TEMP,
+-					   AXP717_BATT_OVER_TEMP);
++			regmap_write_bits(axp20x_batt->regmap,
++					  AXP717_PMU_FAULT,
++					  AXP717_BATT_OVER_TEMP,
++					  AXP717_BATT_OVER_TEMP);
+ 			return 0;
+ 
+ 		case AXP717_BATT_UNDER_TEMP:
+ 			val->intval = POWER_SUPPLY_HEALTH_COLD;
+-			regmap_update_bits(axp20x_batt->regmap,
+-					   AXP717_PMU_FAULT,
+-					   AXP717_BATT_UNDER_TEMP,
+-					   AXP717_BATT_UNDER_TEMP);
++			regmap_write_bits(axp20x_batt->regmap,
++					  AXP717_PMU_FAULT,
++					  AXP717_BATT_UNDER_TEMP,
++					  AXP717_BATT_UNDER_TEMP);
+ 			return 0;
+ 
+ 		default:
+diff --git a/drivers/power/supply/da9150-fg.c b/drivers/power/supply/da9150-fg.c
+index 652c1f213af1c2..4f28ef1bba1a3c 100644
+--- a/drivers/power/supply/da9150-fg.c
++++ b/drivers/power/supply/da9150-fg.c
+@@ -247,9 +247,9 @@ static int da9150_fg_current_avg(struct da9150_fg *fg,
+ 				      DA9150_QIF_SD_GAIN_SIZE);
+ 	da9150_fg_read_sync_end(fg);
+ 
+-	div = (u64) (sd_gain * shunt_val * 65536ULL);
++	div = 65536ULL * sd_gain * shunt_val;
+ 	do_div(div, 1000000);
+-	res = (u64) (iavg * 1000000ULL);
++	res = 1000000ULL * iavg;
+ 	do_div(res, div);
+ 
+ 	val->intval = (int) res;
+diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
+index e36e3ea165d3b2..2f34761e64135c 100644
+--- a/drivers/s390/net/ism_drv.c
++++ b/drivers/s390/net/ism_drv.c
+@@ -588,6 +588,15 @@ static int ism_dev_init(struct ism_dev *ism)
+ 	return ret;
+ }
+ 
++static void ism_dev_release(struct device *dev)
++{
++	struct ism_dev *ism;
++
++	ism = container_of(dev, struct ism_dev, dev);
++
++	kfree(ism);
++}
++
+ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ {
+ 	struct ism_dev *ism;
+@@ -601,6 +610,7 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	dev_set_drvdata(&pdev->dev, ism);
+ 	ism->pdev = pdev;
+ 	ism->dev.parent = &pdev->dev;
++	ism->dev.release = ism_dev_release;
+ 	device_initialize(&ism->dev);
+ 	dev_set_name(&ism->dev, dev_name(&pdev->dev));
+ 	ret = device_add(&ism->dev);
+@@ -637,7 +647,7 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	device_del(&ism->dev);
+ err_dev:
+ 	dev_set_drvdata(&pdev->dev, NULL);
+-	kfree(ism);
++	put_device(&ism->dev);
+ 
+ 	return ret;
+ }
+@@ -682,7 +692,7 @@ static void ism_remove(struct pci_dev *pdev)
+ 	pci_disable_device(pdev);
+ 	device_del(&ism->dev);
+ 	dev_set_drvdata(&pdev->dev, NULL);
+-	kfree(ism);
++	put_device(&ism->dev);
+ }
+ 
+ static struct pci_driver ism_driver = {
+diff --git a/drivers/soc/loongson/loongson2_guts.c b/drivers/soc/loongson/loongson2_guts.c
+index ae42e3a9127fc1..16913c3ef65ca4 100644
+--- a/drivers/soc/loongson/loongson2_guts.c
++++ b/drivers/soc/loongson/loongson2_guts.c
+@@ -114,8 +114,11 @@ static int loongson2_guts_probe(struct platform_device *pdev)
+ 	if (of_property_read_string(root, "model", &machine))
+ 		of_property_read_string_index(root, "compatible", 0, &machine);
+ 	of_node_put(root);
+-	if (machine)
++	if (machine) {
+ 		soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL);
++		if (!soc_dev_attr.machine)
++			return -ENOMEM;
++	}
+ 
+ 	svr = loongson2_guts_get_svr();
+ 	soc_die = loongson2_soc_die_match(svr, loongson2_soc_die);
+diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c
+index 322a543b8c278a..d0f397c9024201 100644
+--- a/drivers/tee/optee/supp.c
++++ b/drivers/tee/optee/supp.c
+@@ -80,7 +80,6 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
+ 	struct optee *optee = tee_get_drvdata(ctx->teedev);
+ 	struct optee_supp *supp = &optee->supp;
+ 	struct optee_supp_req *req;
+-	bool interruptable;
+ 	u32 ret;
+ 
+ 	/*
+@@ -111,36 +110,18 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
+ 	/*
+ 	 * Wait for supplicant to process and return result, once we've
+ 	 * returned from wait_for_completion(&req->c) successfully we have
+-	 * exclusive access again.
++	 * exclusive access again. Allow the wait to be killable such that
++	 * the wait doesn't turn into an indefinite state if the supplicant
++	 * gets hung for some reason.
+ 	 */
+-	while (wait_for_completion_interruptible(&req->c)) {
++	if (wait_for_completion_killable(&req->c)) {
+ 		mutex_lock(&supp->mutex);
+-		interruptable = !supp->ctx;
+-		if (interruptable) {
+-			/*
+-			 * There's no supplicant available and since the
+-			 * supp->mutex currently is held none can
+-			 * become available until the mutex released
+-			 * again.
+-			 *
+-			 * Interrupting an RPC to supplicant is only
+-			 * allowed as a way of slightly improving the user
+-			 * experience in case the supplicant hasn't been
+-			 * started yet. During normal operation the supplicant
+-			 * will serve all requests in a timely manner and
+-			 * interrupting then wouldn't make sense.
+-			 */
+-			if (req->in_queue) {
+-				list_del(&req->link);
+-				req->in_queue = false;
+-			}
++		if (req->in_queue) {
++			list_del(&req->link);
++			req->in_queue = false;
+ 		}
+ 		mutex_unlock(&supp->mutex);
+-
+-		if (interruptable) {
+-			req->ret = TEEC_ERROR_COMMUNICATION;
+-			break;
+-		}
++		req->ret = TEEC_ERROR_COMMUNICATION;
+ 	}
+ 
+ 	ret = req->ret;
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
+index 0050d6253c05d1..1a050ec9912cb8 100644
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -166,6 +166,7 @@ static struct sci_port sci_ports[SCI_NPORTS];
+ static unsigned long sci_ports_in_use;
+ static struct uart_driver sci_uart_driver;
+ static bool sci_uart_earlycon;
++static bool sci_uart_earlycon_dev_probing;
+ 
+ static inline struct sci_port *
+ to_sci_port(struct uart_port *uart)
+@@ -3057,10 +3058,6 @@ static int sci_init_single(struct platform_device *dev,
+ 		ret = sci_init_clocks(sci_port, &dev->dev);
+ 		if (ret < 0)
+ 			return ret;
+-
+-		port->dev = &dev->dev;
+-
+-		pm_runtime_enable(&dev->dev);
+ 	}
+ 
+ 	port->type		= p->type;
+@@ -3087,11 +3084,6 @@ static int sci_init_single(struct platform_device *dev,
+ 	return 0;
+ }
+ 
+-static void sci_cleanup_single(struct sci_port *port)
+-{
+-	pm_runtime_disable(port->port.dev);
+-}
+-
+ #if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) || \
+     defined(CONFIG_SERIAL_SH_SCI_EARLYCON)
+ static void serial_console_putchar(struct uart_port *port, unsigned char ch)
+@@ -3261,8 +3253,6 @@ static void sci_remove(struct platform_device *dev)
+ 	sci_ports_in_use &= ~BIT(port->port.line);
+ 	uart_remove_one_port(&sci_uart_driver, &port->port);
+ 
+-	sci_cleanup_single(port);
+-
+ 	if (port->port.fifosize > 1)
+ 		device_remove_file(&dev->dev, &dev_attr_rx_fifo_trigger);
+ 	if (type == PORT_SCIFA || type == PORT_SCIFB || type == PORT_HSCIF)
+@@ -3397,7 +3387,8 @@ static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
+ static int sci_probe_single(struct platform_device *dev,
+ 				      unsigned int index,
+ 				      struct plat_sci_port *p,
+-				      struct sci_port *sciport)
++				      struct sci_port *sciport,
++				      struct resource *sci_res)
+ {
+ 	int ret;
+ 
+@@ -3426,6 +3417,11 @@ static int sci_probe_single(struct platform_device *dev,
+ 	if (ret)
+ 		return ret;
+ 
++	sciport->port.dev = &dev->dev;
++	ret = devm_pm_runtime_enable(&dev->dev);
++	if (ret)
++		return ret;
++
+ 	sciport->gpios = mctrl_gpio_init(&sciport->port, 0);
+ 	if (IS_ERR(sciport->gpios))
+ 		return PTR_ERR(sciport->gpios);
+@@ -3439,13 +3435,31 @@ static int sci_probe_single(struct platform_device *dev,
+ 		sciport->port.flags |= UPF_HARD_FLOW;
+ 	}
+ 
+-	ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
+-	if (ret) {
+-		sci_cleanup_single(sciport);
+-		return ret;
++	if (sci_uart_earlycon && sci_ports[0].port.mapbase == sci_res->start) {
++		/*
++		 * In case:
++		 * - this is the earlycon port (mapped on index 0 in sci_ports[]) and
++		 * - it now maps to an alias other than zero and
++		 * - the earlycon is still alive (e.g., "earlycon keep_bootcon" is
++		 *   available in bootargs)
++		 *
++		 * we need to avoid disabling clocks and PM domains through the runtime
++		 * PM APIs called in __device_attach(). For this, increment the runtime
++		 * PM reference counter (the clocks and PM domains were already enabled
++		 * by the bootloader). Otherwise the earlycon may access the HW when it
++		 * has no clocks enabled leading to failures (infinite loop in
++		 * sci_poll_put_char()).
++		 */
++		pm_runtime_get_noresume(&dev->dev);
++
++		/*
++		 * Skip cleanup the sci_port[0] in early_console_exit(), this
++		 * port is the same as the earlycon one.
++		 */
++		sci_uart_earlycon_dev_probing = true;
+ 	}
+ 
+-	return 0;
++	return uart_add_one_port(&sci_uart_driver, &sciport->port);
+ }
+ 
+ static int sci_probe(struct platform_device *dev)
+@@ -3503,7 +3517,7 @@ static int sci_probe(struct platform_device *dev)
+ 
+ 	platform_set_drvdata(dev, sp);
+ 
+-	ret = sci_probe_single(dev, dev_id, p, sp);
++	ret = sci_probe_single(dev, dev_id, p, sp, res);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -3586,6 +3600,22 @@ sh_early_platform_init_buffer("earlyprintk", &sci_driver,
+ #ifdef CONFIG_SERIAL_SH_SCI_EARLYCON
+ static struct plat_sci_port port_cfg;
+ 
++static int early_console_exit(struct console *co)
++{
++	struct sci_port *sci_port = &sci_ports[0];
++
++	/*
++	 * Clean the slot used by earlycon. A new SCI device might
++	 * map to this slot.
++	 */
++	if (!sci_uart_earlycon_dev_probing) {
++		memset(sci_port, 0, sizeof(*sci_port));
++		sci_uart_earlycon = false;
++	}
++
++	return 0;
++}
++
+ static int __init early_console_setup(struct earlycon_device *device,
+ 				      int type)
+ {
+@@ -3603,6 +3633,8 @@ static int __init early_console_setup(struct earlycon_device *device,
+ 		       SCSCR_RE | SCSCR_TE | port_cfg.scscr);
+ 
+ 	device->con->write = serial_console_write;
++	device->con->exit = early_console_exit;
++
+ 	return 0;
+ }
+ static int __init sci_early_console_setup(struct earlycon_device *device,
+diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
+index 47260d65066a89..da82598fcef8a8 100644
+--- a/drivers/usb/gadget/function/f_midi.c
++++ b/drivers/usb/gadget/function/f_midi.c
+@@ -283,7 +283,7 @@ f_midi_complete(struct usb_ep *ep, struct usb_request *req)
+ 			/* Our transmit completed. See if there's more to go.
+ 			 * f_midi_transmit eats req, don't queue it again. */
+ 			req->length = 0;
+-			f_midi_transmit(midi);
++			queue_work(system_highpri_wq, &midi->work);
+ 			return;
+ 		}
+ 		break;
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index d14ecbe24d7754..0dd24d12898638 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -1145,14 +1145,19 @@ static bool find_next_delalloc_bitmap(struct folio *folio,
+ }
+ 
+ /*
+- * helper for extent_writepage(), doing all of the delayed allocation setup.
++ * Do all of the delayed allocation setup.
+  *
+- * This returns 1 if btrfs_run_delalloc_range function did all the work required
+- * to write the page (copy into inline extent).  In this case the IO has
+- * been started and the page is already unlocked.
++ * Return >0 if all the dirty blocks are submitted async (compression) or inlined.
++ * The @folio should no longer be touched (treat it as already unlocked).
+  *
+- * This returns 0 if all went well (page still locked)
+- * This returns < 0 if there were errors (page still locked)
++ * Return 0 if there is still dirty block that needs to be submitted through
++ * extent_writepage_io().
++ * bio_ctrl->submit_bitmap will indicate which blocks of the folio should be
++ * submitted, and @folio is still kept locked.
++ *
++ * Return <0 if there is any error hit.
++ * Any allocated ordered extent range covering this folio will be marked
++ * finished (IOERR), and @folio is still kept locked.
+  */
+ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
+ 						 struct folio *folio,
+@@ -1170,6 +1175,16 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
+ 	 * last delalloc end.
+ 	 */
+ 	u64 last_delalloc_end = 0;
++	/*
++	 * The range end (exclusive) of the last successfully finished delalloc
++	 * range.
++	 * Any range covered by ordered extent must either be manually marked
++	 * finished (error handling), or has IO submitted (and finish the
++	 * ordered extent normally).
++	 *
++	 * This records the end of ordered extent cleanup if we hit an error.
++	 */
++	u64 last_finished_delalloc_end = page_start;
+ 	u64 delalloc_start = page_start;
+ 	u64 delalloc_end = page_end;
+ 	u64 delalloc_to_write = 0;
+@@ -1238,11 +1253,19 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
+ 			found_len = last_delalloc_end + 1 - found_start;
+ 
+ 		if (ret >= 0) {
++			/*
++			 * Some delalloc range may be created by previous folios.
++			 * Thus we still need to clean up this range during error
++			 * handling.
++			 */
++			last_finished_delalloc_end = found_start;
+ 			/* No errors hit so far, run the current delalloc range. */
+ 			ret = btrfs_run_delalloc_range(inode, folio,
+ 						       found_start,
+ 						       found_start + found_len - 1,
+ 						       wbc);
++			if (ret >= 0)
++				last_finished_delalloc_end = found_start + found_len;
+ 		} else {
+ 			/*
+ 			 * We've hit an error during previous delalloc range,
+@@ -1277,8 +1300,22 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
+ 
+ 		delalloc_start = found_start + found_len;
+ 	}
+-	if (ret < 0)
++	/*
++	 * It's possible we had some ordered extents created before we hit
++	 * an error, cleanup non-async successfully created delalloc ranges.
++	 */
++	if (unlikely(ret < 0)) {
++		unsigned int bitmap_size = min(
++				(last_finished_delalloc_end - page_start) >>
++				fs_info->sectorsize_bits,
++				fs_info->sectors_per_page);
++
++		for_each_set_bit(bit, &bio_ctrl->submit_bitmap, bitmap_size)
++			btrfs_mark_ordered_io_finished(inode, folio,
++				page_start + (bit << fs_info->sectorsize_bits),
++				fs_info->sectorsize, false);
+ 		return ret;
++	}
+ out:
+ 	if (last_delalloc_end)
+ 		delalloc_end = last_delalloc_end;
+@@ -1394,6 +1431,7 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
+ 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ 	unsigned long range_bitmap = 0;
+ 	bool submitted_io = false;
++	bool error = false;
+ 	const u64 folio_start = folio_pos(folio);
+ 	u64 cur;
+ 	int bit;
+@@ -1436,11 +1474,26 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
+ 			break;
+ 		}
+ 		ret = submit_one_sector(inode, folio, cur, bio_ctrl, i_size);
+-		if (ret < 0)
+-			goto out;
++		if (unlikely(ret < 0)) {
++			/*
++			 * bio_ctrl may contain a bio crossing several folios.
++			 * Submit it immediately so that the bio has a chance
++			 * to finish normally, other than marked as error.
++			 */
++			submit_one_bio(bio_ctrl);
++			/*
++			 * Failed to grab the extent map which should be very rare.
++			 * Since there is no bio submitted to finish the ordered
++			 * extent, we have to manually finish this sector.
++			 */
++			btrfs_mark_ordered_io_finished(inode, folio, cur,
++						       fs_info->sectorsize, false);
++			error = true;
++			continue;
++		}
+ 		submitted_io = true;
+ 	}
+-out:
++
+ 	/*
+ 	 * If we didn't submitted any sector (>= i_size), folio dirty get
+ 	 * cleared but PAGECACHE_TAG_DIRTY is not cleared (only cleared
+@@ -1448,8 +1501,11 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
+ 	 *
+ 	 * Here we set writeback and clear for the range. If the full folio
+ 	 * is no longer dirty then we clear the PAGECACHE_TAG_DIRTY tag.
++	 *
++	 * If we hit any error, the corresponding sector will still be dirty
++	 * thus no need to clear PAGECACHE_TAG_DIRTY.
+ 	 */
+-	if (!submitted_io) {
++	if (!submitted_io && !error) {
+ 		btrfs_folio_set_writeback(fs_info, folio, start, len);
+ 		btrfs_folio_clear_writeback(fs_info, folio, start, len);
+ 	}
+@@ -1467,15 +1523,14 @@ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
+  */
+ static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl)
+ {
+-	struct inode *inode = folio->mapping->host;
+-	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
+-	const u64 page_start = folio_pos(folio);
++	struct btrfs_inode *inode = BTRFS_I(folio->mapping->host);
++	struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ 	int ret;
+ 	size_t pg_offset;
+-	loff_t i_size = i_size_read(inode);
++	loff_t i_size = i_size_read(&inode->vfs_inode);
+ 	unsigned long end_index = i_size >> PAGE_SHIFT;
+ 
+-	trace_extent_writepage(folio, inode, bio_ctrl->wbc);
++	trace_extent_writepage(folio, &inode->vfs_inode, bio_ctrl->wbc);
+ 
+ 	WARN_ON(!folio_test_locked(folio));
+ 
+@@ -1499,13 +1554,13 @@ static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl
+ 	if (ret < 0)
+ 		goto done;
+ 
+-	ret = writepage_delalloc(BTRFS_I(inode), folio, bio_ctrl);
++	ret = writepage_delalloc(inode, folio, bio_ctrl);
+ 	if (ret == 1)
+ 		return 0;
+ 	if (ret)
+ 		goto done;
+ 
+-	ret = extent_writepage_io(BTRFS_I(inode), folio, folio_pos(folio),
++	ret = extent_writepage_io(inode, folio, folio_pos(folio),
+ 				  PAGE_SIZE, bio_ctrl, i_size);
+ 	if (ret == 1)
+ 		return 0;
+@@ -1513,12 +1568,8 @@ static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl
+ 	bio_ctrl->wbc->nr_to_write--;
+ 
+ done:
+-	if (ret) {
+-		btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio,
+-					       page_start, PAGE_SIZE, !ret);
++	if (ret < 0)
+ 		mapping_set_error(folio->mapping, ret);
+-	}
+-
+ 	/*
+ 	 * Only unlock ranges that are submitted. As there can be some async
+ 	 * submitted ranges inside the folio.
+@@ -2295,11 +2346,8 @@ void extent_write_locked_range(struct inode *inode, const struct folio *locked_f
+ 		if (ret == 1)
+ 			goto next_page;
+ 
+-		if (ret) {
+-			btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio,
+-						       cur, cur_len, !ret);
++		if (ret)
+ 			mapping_set_error(mapping, ret);
+-		}
+ 		btrfs_folio_end_lock(fs_info, folio, cur, cur_len);
+ 		if (ret < 0)
+ 			found_error = true;
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index d1c8f6730a5687..b4160b1c77573d 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -2385,8 +2385,7 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_fol
+ 
+ out:
+ 	if (ret < 0)
+-		btrfs_cleanup_ordered_extents(inode, locked_folio, start,
+-					      end - start + 1);
++		btrfs_cleanup_ordered_extents(inode, NULL, start, end - start + 1);
+ 	return ret;
+ }
+ 
+diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
+index f146e06c97eb69..0d149b315a832e 100644
+--- a/fs/smb/client/inode.c
++++ b/fs/smb/client/inode.c
+@@ -1403,7 +1403,7 @@ int cifs_get_inode_info(struct inode **inode,
+ 	struct cifs_fattr fattr = {};
+ 	int rc;
+ 
+-	if (is_inode_cache_good(*inode)) {
++	if (!data && is_inode_cache_good(*inode)) {
+ 		cifs_dbg(FYI, "No need to revalidate cached inode sizes\n");
+ 		return 0;
+ 	}
+@@ -1502,7 +1502,7 @@ int smb311_posix_get_inode_info(struct inode **inode,
+ 	struct cifs_fattr fattr = {};
+ 	int rc;
+ 
+-	if (is_inode_cache_good(*inode)) {
++	if (!data && is_inode_cache_good(*inode)) {
+ 		cifs_dbg(FYI, "No need to revalidate cached inode sizes\n");
+ 		return 0;
+ 	}
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index a588f6b3f3b6a5..793e9b2b79d6f9 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -4964,6 +4964,10 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
+ 			next_buffer = (char *)cifs_buf_get();
+ 		else
+ 			next_buffer = (char *)cifs_small_buf_get();
++		if (!next_buffer) {
++			cifs_server_dbg(VFS, "No memory for (large) SMB response\n");
++			return -1;
++		}
+ 		memcpy(next_buffer, buf + next_cmd, pdu_length - next_cmd);
+ 	}
+ 
+diff --git a/fs/xfs/scrub/common.h b/fs/xfs/scrub/common.h
+index 9ff3cafd867962..1182c6fa61807d 100644
+--- a/fs/xfs/scrub/common.h
++++ b/fs/xfs/scrub/common.h
+@@ -212,7 +212,6 @@ static inline bool xchk_skip_xref(struct xfs_scrub_metadata *sm)
+ bool xchk_dir_looks_zapped(struct xfs_inode *dp);
+ bool xchk_pptr_looks_zapped(struct xfs_inode *ip);
+ 
+-#ifdef CONFIG_XFS_ONLINE_REPAIR
+ /* Decide if a repair is required. */
+ static inline bool xchk_needs_repair(const struct xfs_scrub_metadata *sm)
+ {
+@@ -232,10 +231,6 @@ static inline bool xchk_could_repair(const struct xfs_scrub *sc)
+ 	return (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR) &&
+ 		!(sc->flags & XREP_ALREADY_FIXED);
+ }
+-#else
+-# define xchk_needs_repair(sc)		(false)
+-# define xchk_could_repair(sc)		(false)
+-#endif /* CONFIG_XFS_ONLINE_REPAIR */
+ 
+ int xchk_metadata_inode_forks(struct xfs_scrub *sc);
+ 
+diff --git a/fs/xfs/scrub/repair.h b/fs/xfs/scrub/repair.h
+index b649da1a93eb8c..b3b1fe62814e7b 100644
+--- a/fs/xfs/scrub/repair.h
++++ b/fs/xfs/scrub/repair.h
+@@ -173,7 +173,16 @@ bool xrep_buf_verify_struct(struct xfs_buf *bp, const struct xfs_buf_ops *ops);
+ #else
+ 
+ #define xrep_ino_dqattach(sc)	(0)
+-#define xrep_will_attempt(sc)	(false)
++
++/*
++ * When online repair is not built into the kernel, we still want to attempt
++ * the repair so that the stub xrep_attempt below will return EOPNOTSUPP.
++ */
++static inline bool xrep_will_attempt(const struct xfs_scrub *sc)
++{
++	return (sc->sm->sm_flags & XFS_SCRUB_IFLAG_FORCE_REBUILD) ||
++		xchk_needs_repair(sc->sm);
++}
+ 
+ static inline int
+ xrep_attempt(
+diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c
+index 950f5a58dcd967..4ba02a490eface 100644
+--- a/fs/xfs/scrub/scrub.c
++++ b/fs/xfs/scrub/scrub.c
+@@ -149,6 +149,18 @@ xchk_probe(
+ 	if (xchk_should_terminate(sc, &error))
+ 		return error;
+ 
++	/*
++	 * If the caller is probing to see if repair works but repair isn't
++	 * built into the kernel, return EOPNOTSUPP because that's the signal
++	 * that userspace expects.  If online repair is built in, set the
++	 * CORRUPT flag (without any of the usual tracing/logging) to force us
++	 * into xrep_probe.
++	 */
++	if (xchk_could_repair(sc)) {
++		if (!IS_ENABLED(CONFIG_XFS_ONLINE_REPAIR))
++			return -EOPNOTSUPP;
++		sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
++	}
+ 	return 0;
+ }
+ 
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index 332cee28566208..14fc1b39c0cf3e 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -873,10 +873,11 @@ struct mm_struct {
+ 		 */
+ 		unsigned int nr_cpus_allowed;
+ 		/**
+-		 * @max_nr_cid: Maximum number of concurrency IDs allocated.
++		 * @max_nr_cid: Maximum number of allowed concurrency
++		 *              IDs allocated.
+ 		 *
+-		 * Track the highest number of concurrency IDs allocated for the
+-		 * mm.
++		 * Track the highest number of allowed concurrency IDs
++		 * allocated for the mm.
+ 		 */
+ 		atomic_t max_nr_cid;
+ 		/**
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 8268be0723eee9..bb71ad82b42ba8 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -3138,6 +3138,8 @@ static inline struct net_device *first_net_device_rcu(struct net *net)
+ }
+ 
+ int netdev_boot_setup_check(struct net_device *dev);
++struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type,
++				   const char *hwaddr);
+ struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
+ 				       const char *hwaddr);
+ struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index db9b47ce3eefdc..f05903dd7695ef 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -2297,6 +2297,7 @@ static inline void pci_fixup_device(enum pci_fixup_pass pass,
+ 				    struct pci_dev *dev) { }
+ #endif
+ 
++int pcim_intx(struct pci_dev *pdev, int enabled);
+ int pcim_request_all_regions(struct pci_dev *pdev, const char *name);
+ void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
+ void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
+diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h
+index 591a53e082e650..df1592022d938e 100644
+--- a/include/linux/pse-pd/pse.h
++++ b/include/linux/pse-pd/pse.h
+@@ -75,12 +75,8 @@ struct pse_control_status {
+  * @pi_disable: Configure the PSE PI as disabled.
+  * @pi_get_voltage: Return voltage similarly to get_voltage regulator
+  *		    callback.
+- * @pi_get_current_limit: Get the configured current limit similarly to
+- *			  get_current_limit regulator callback.
+- * @pi_set_current_limit: Configure the current limit similarly to
+- *			  set_current_limit regulator callback.
+- *			  Should not return an error in case of MAX_PI_CURRENT
+- *			  current value set.
++ * @pi_get_pw_limit: Get the configured power limit of the PSE PI.
++ * @pi_set_pw_limit: Configure the power limit of the PSE PI.
+  */
+ struct pse_controller_ops {
+ 	int (*ethtool_get_status)(struct pse_controller_dev *pcdev,
+@@ -91,10 +87,10 @@ struct pse_controller_ops {
+ 	int (*pi_enable)(struct pse_controller_dev *pcdev, int id);
+ 	int (*pi_disable)(struct pse_controller_dev *pcdev, int id);
+ 	int (*pi_get_voltage)(struct pse_controller_dev *pcdev, int id);
+-	int (*pi_get_current_limit)(struct pse_controller_dev *pcdev,
+-				    int id);
+-	int (*pi_set_current_limit)(struct pse_controller_dev *pcdev,
+-				    int id, int max_uA);
++	int (*pi_get_pw_limit)(struct pse_controller_dev *pcdev,
++			       int id);
++	int (*pi_set_pw_limit)(struct pse_controller_dev *pcdev,
++			       int id, int max_mW);
+ };
+ 
+ struct module;
+diff --git a/include/net/gro.h b/include/net/gro.h
+index b9b58c1f8d190b..7b548f91754bf3 100644
+--- a/include/net/gro.h
++++ b/include/net/gro.h
+@@ -11,6 +11,9 @@
+ #include <net/udp.h>
+ #include <net/hotdata.h>
+ 
++/* This should be increased if a protocol with a bigger head is added. */
++#define GRO_MAX_HEAD (MAX_HEADER + 128)
++
+ struct napi_gro_cb {
+ 	union {
+ 		struct {
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index e9b37b76e894bb..bc04599547c36d 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -41,6 +41,7 @@
+ #include <net/inet_ecn.h>
+ #include <net/dst.h>
+ #include <net/mptcp.h>
++#include <net/xfrm.h>
+ 
+ #include <linux/seq_file.h>
+ #include <linux/memcontrol.h>
+@@ -683,6 +684,19 @@ void tcp_fin(struct sock *sk);
+ void tcp_check_space(struct sock *sk);
+ void tcp_sack_compress_send_ack(struct sock *sk);
+ 
++static inline void tcp_cleanup_skb(struct sk_buff *skb)
++{
++	skb_dst_drop(skb);
++	secpath_reset(skb);
++}
++
++static inline void tcp_add_receive_queue(struct sock *sk, struct sk_buff *skb)
++{
++	DEBUG_NET_WARN_ON_ONCE(skb_dst(skb));
++	DEBUG_NET_WARN_ON_ONCE(secpath_exists(skb));
++	__skb_queue_tail(&sk->sk_receive_queue, skb);
++}
++
+ /* tcp_timer.c */
+ void tcp_init_xmit_timers(struct sock *);
+ static inline void tcp_clear_xmit_timers(struct sock *sk)
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index d062c5c69211ba..0b0dfef9348036 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -2045,6 +2045,8 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ 		req->opcode = 0;
+ 		return io_init_fail_req(req, -EINVAL);
+ 	}
++	opcode = array_index_nospec(opcode, IORING_OP_LAST);
++
+ 	def = &io_issue_defs[opcode];
+ 	if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
+ 		/* enforce forwards compatibility on users */
+diff --git a/io_uring/rw.c b/io_uring/rw.c
+index 29bb3010f9c06d..64322f463c2bd4 100644
+--- a/io_uring/rw.c
++++ b/io_uring/rw.c
+@@ -866,7 +866,15 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
+ 	if (unlikely(ret))
+ 		return ret;
+ 
+-	ret = io_iter_do_read(rw, &io->iter);
++	if (unlikely(req->opcode == IORING_OP_READ_MULTISHOT)) {
++		void *cb_copy = rw->kiocb.ki_complete;
++
++		rw->kiocb.ki_complete = NULL;
++		ret = io_iter_do_read(rw, &io->iter);
++		rw->kiocb.ki_complete = cb_copy;
++	} else {
++		ret = io_iter_do_read(rw, &io->iter);
++	}
+ 
+ 	/*
+ 	 * Some file systems like to return -EOPNOTSUPP for an IOCB_NOWAIT
+@@ -891,7 +899,8 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
+ 	} else if (ret == -EIOCBQUEUED) {
+ 		return IOU_ISSUE_SKIP_COMPLETE;
+ 	} else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
+-		   (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) {
++		   (req->flags & REQ_F_NOWAIT) || !need_complete_io(req) ||
++		   (issue_flags & IO_URING_F_MULTISHOT)) {
+ 		/* read all, failed, already did sync or don't want to retry */
+ 		goto done;
+ 	}
+diff --git a/kernel/acct.c b/kernel/acct.c
+index 179848ad33e978..d9d55fa4d01a71 100644
+--- a/kernel/acct.c
++++ b/kernel/acct.c
+@@ -103,48 +103,50 @@ struct bsd_acct_struct {
+ 	atomic_long_t		count;
+ 	struct rcu_head		rcu;
+ 	struct mutex		lock;
+-	int			active;
++	bool			active;
++	bool			check_space;
+ 	unsigned long		needcheck;
+ 	struct file		*file;
+ 	struct pid_namespace	*ns;
+ 	struct work_struct	work;
+ 	struct completion	done;
++	acct_t			ac;
+ };
+ 
+-static void do_acct_process(struct bsd_acct_struct *acct);
++static void fill_ac(struct bsd_acct_struct *acct);
++static void acct_write_process(struct bsd_acct_struct *acct);
+ 
+ /*
+  * Check the amount of free space and suspend/resume accordingly.
+  */
+-static int check_free_space(struct bsd_acct_struct *acct)
++static bool check_free_space(struct bsd_acct_struct *acct)
+ {
+ 	struct kstatfs sbuf;
+ 
+-	if (time_is_after_jiffies(acct->needcheck))
+-		goto out;
++	if (!acct->check_space)
++		return acct->active;
+ 
+ 	/* May block */
+ 	if (vfs_statfs(&acct->file->f_path, &sbuf))
+-		goto out;
++		return acct->active;
+ 
+ 	if (acct->active) {
+ 		u64 suspend = sbuf.f_blocks * SUSPEND;
+ 		do_div(suspend, 100);
+ 		if (sbuf.f_bavail <= suspend) {
+-			acct->active = 0;
++			acct->active = false;
+ 			pr_info("Process accounting paused\n");
+ 		}
+ 	} else {
+ 		u64 resume = sbuf.f_blocks * RESUME;
+ 		do_div(resume, 100);
+ 		if (sbuf.f_bavail >= resume) {
+-			acct->active = 1;
++			acct->active = true;
+ 			pr_info("Process accounting resumed\n");
+ 		}
+ 	}
+ 
+ 	acct->needcheck = jiffies + ACCT_TIMEOUT*HZ;
+-out:
+ 	return acct->active;
+ }
+ 
+@@ -189,7 +191,11 @@ static void acct_pin_kill(struct fs_pin *pin)
+ {
+ 	struct bsd_acct_struct *acct = to_acct(pin);
+ 	mutex_lock(&acct->lock);
+-	do_acct_process(acct);
++	/*
++	 * Fill the accounting struct with the exiting task's info
++	 * before punting to the workqueue.
++	 */
++	fill_ac(acct);
+ 	schedule_work(&acct->work);
+ 	wait_for_completion(&acct->done);
+ 	cmpxchg(&acct->ns->bacct, pin, NULL);
+@@ -202,6 +208,9 @@ static void close_work(struct work_struct *work)
+ {
+ 	struct bsd_acct_struct *acct = container_of(work, struct bsd_acct_struct, work);
+ 	struct file *file = acct->file;
++
++	/* We were fired by acct_pin_kill() which holds acct->lock. */
++	acct_write_process(acct);
+ 	if (file->f_op->flush)
+ 		file->f_op->flush(file, NULL);
+ 	__fput_sync(file);
+@@ -234,6 +243,20 @@ static int acct_on(struct filename *pathname)
+ 		return -EACCES;
+ 	}
+ 
++	/* Exclude kernel kernel internal filesystems. */
++	if (file_inode(file)->i_sb->s_flags & (SB_NOUSER | SB_KERNMOUNT)) {
++		kfree(acct);
++		filp_close(file, NULL);
++		return -EINVAL;
++	}
++
++	/* Exclude procfs and sysfs. */
++	if (file_inode(file)->i_sb->s_iflags & SB_I_USERNS_VISIBLE) {
++		kfree(acct);
++		filp_close(file, NULL);
++		return -EINVAL;
++	}
++
+ 	if (!(file->f_mode & FMODE_CAN_WRITE)) {
+ 		kfree(acct);
+ 		filp_close(file, NULL);
+@@ -430,13 +453,27 @@ static u32 encode_float(u64 value)
+  *  do_exit() or when switching to a different output file.
+  */
+ 
+-static void fill_ac(acct_t *ac)
++static void fill_ac(struct bsd_acct_struct *acct)
+ {
+ 	struct pacct_struct *pacct = &current->signal->pacct;
++	struct file *file = acct->file;
++	acct_t *ac = &acct->ac;
+ 	u64 elapsed, run_time;
+ 	time64_t btime;
+ 	struct tty_struct *tty;
+ 
++	lockdep_assert_held(&acct->lock);
++
++	if (time_is_after_jiffies(acct->needcheck)) {
++		acct->check_space = false;
++
++		/* Don't fill in @ac if nothing will be written. */
++		if (!acct->active)
++			return;
++	} else {
++		acct->check_space = true;
++	}
++
+ 	/*
+ 	 * Fill the accounting struct with the needed info as recorded
+ 	 * by the different kernel functions.
+@@ -484,64 +521,61 @@ static void fill_ac(acct_t *ac)
+ 	ac->ac_majflt = encode_comp_t(pacct->ac_majflt);
+ 	ac->ac_exitcode = pacct->ac_exitcode;
+ 	spin_unlock_irq(&current->sighand->siglock);
+-}
+-/*
+- *  do_acct_process does all actual work. Caller holds the reference to file.
+- */
+-static void do_acct_process(struct bsd_acct_struct *acct)
+-{
+-	acct_t ac;
+-	unsigned long flim;
+-	const struct cred *orig_cred;
+-	struct file *file = acct->file;
+ 
+-	/*
+-	 * Accounting records are not subject to resource limits.
+-	 */
+-	flim = rlimit(RLIMIT_FSIZE);
+-	current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
+-	/* Perform file operations on behalf of whoever enabled accounting */
+-	orig_cred = override_creds(file->f_cred);
+-
+-	/*
+-	 * First check to see if there is enough free_space to continue
+-	 * the process accounting system.
+-	 */
+-	if (!check_free_space(acct))
+-		goto out;
+-
+-	fill_ac(&ac);
+ 	/* we really need to bite the bullet and change layout */
+-	ac.ac_uid = from_kuid_munged(file->f_cred->user_ns, orig_cred->uid);
+-	ac.ac_gid = from_kgid_munged(file->f_cred->user_ns, orig_cred->gid);
++	ac->ac_uid = from_kuid_munged(file->f_cred->user_ns, current_uid());
++	ac->ac_gid = from_kgid_munged(file->f_cred->user_ns, current_gid());
+ #if ACCT_VERSION == 1 || ACCT_VERSION == 2
+ 	/* backward-compatible 16 bit fields */
+-	ac.ac_uid16 = ac.ac_uid;
+-	ac.ac_gid16 = ac.ac_gid;
++	ac->ac_uid16 = ac->ac_uid;
++	ac->ac_gid16 = ac->ac_gid;
+ #elif ACCT_VERSION == 3
+ 	{
+ 		struct pid_namespace *ns = acct->ns;
+ 
+-		ac.ac_pid = task_tgid_nr_ns(current, ns);
++		ac->ac_pid = task_tgid_nr_ns(current, ns);
+ 		rcu_read_lock();
+-		ac.ac_ppid = task_tgid_nr_ns(rcu_dereference(current->real_parent),
+-					     ns);
++		ac->ac_ppid = task_tgid_nr_ns(rcu_dereference(current->real_parent), ns);
+ 		rcu_read_unlock();
+ 	}
+ #endif
++}
++
++static void acct_write_process(struct bsd_acct_struct *acct)
++{
++	struct file *file = acct->file;
++	const struct cred *cred;
++	acct_t *ac = &acct->ac;
++
++	/* Perform file operations on behalf of whoever enabled accounting */
++	cred = override_creds(file->f_cred);
++
+ 	/*
+-	 * Get freeze protection. If the fs is frozen, just skip the write
+-	 * as we could deadlock the system otherwise.
++	 * First check to see if there is enough free_space to continue
++	 * the process accounting system. Then get freeze protection. If
++	 * the fs is frozen, just skip the write as we could deadlock
++	 * the system otherwise.
+ 	 */
+-	if (file_start_write_trylock(file)) {
++	if (check_free_space(acct) && file_start_write_trylock(file)) {
+ 		/* it's been opened O_APPEND, so position is irrelevant */
+ 		loff_t pos = 0;
+-		__kernel_write(file, &ac, sizeof(acct_t), &pos);
++		__kernel_write(file, ac, sizeof(acct_t), &pos);
+ 		file_end_write(file);
+ 	}
+-out:
++
++	revert_creds(cred);
++}
++
++static void do_acct_process(struct bsd_acct_struct *acct)
++{
++	unsigned long flim;
++
++	/* Accounting records are not subject to resource limits. */
++	flim = rlimit(RLIMIT_FSIZE);
++	current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
++	fill_ac(acct);
++	acct_write_process(acct);
+ 	current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
+-	revert_creds(orig_cred);
+ }
+ 
+ /**
+diff --git a/kernel/bpf/arena.c b/kernel/bpf/arena.c
+index 8caf56a308d964..eac5d1edefe97b 100644
+--- a/kernel/bpf/arena.c
++++ b/kernel/bpf/arena.c
+@@ -39,7 +39,7 @@
+  */
+ 
+ /* number of bytes addressable by LDX/STX insn with 16-bit 'off' field */
+-#define GUARD_SZ (1ull << sizeof_field(struct bpf_insn, off) * 8)
++#define GUARD_SZ round_up(1ull << sizeof_field(struct bpf_insn, off) * 8, PAGE_SIZE << 1)
+ #define KERN_VM_SZ (SZ_4G + GUARD_SZ)
+ 
+ struct bpf_arena {
+diff --git a/kernel/bpf/bpf_cgrp_storage.c b/kernel/bpf/bpf_cgrp_storage.c
+index 20f05de92e9c3d..7996fcea3755ec 100644
+--- a/kernel/bpf/bpf_cgrp_storage.c
++++ b/kernel/bpf/bpf_cgrp_storage.c
+@@ -154,7 +154,7 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
+ 
+ static void cgroup_storage_map_free(struct bpf_map *map)
+ {
+-	bpf_local_storage_map_free(map, &cgroup_cache, NULL);
++	bpf_local_storage_map_free(map, &cgroup_cache, &bpf_cgrp_storage_busy);
+ }
+ 
+ /* *gfp_flags* is a hidden argument provided by the verifier */
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index 10d0975deadabe..c89604e6b6aabd 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -6507,6 +6507,8 @@ static const struct bpf_raw_tp_null_args raw_tp_null_args[] = {
+ 	/* rxrpc */
+ 	{ "rxrpc_recvdata", 0x1 },
+ 	{ "rxrpc_resend", 0x10 },
++	/* skb */
++	{"kfree_skb", 0x1000},
+ 	/* sunrpc */
+ 	{ "xs_stream_read_data", 0x1 },
+ 	/* ... from xprt_cong_event event class */
+diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
+index e1cfe890e0be64..1499d8caa9a351 100644
+--- a/kernel/bpf/ringbuf.c
++++ b/kernel/bpf/ringbuf.c
+@@ -268,8 +268,6 @@ static int ringbuf_map_mmap_kern(struct bpf_map *map, struct vm_area_struct *vma
+ 		/* allow writable mapping for the consumer_pos only */
+ 		if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE)
+ 			return -EPERM;
+-	} else {
+-		vm_flags_clear(vma, VM_MAYWRITE);
+ 	}
+ 	/* remap_vmalloc_range() checks size and offset constraints */
+ 	return remap_vmalloc_range(vma, rb_map->rb,
+@@ -289,8 +287,6 @@ static int ringbuf_map_mmap_user(struct bpf_map *map, struct vm_area_struct *vma
+ 			 * position, and the ring buffer data itself.
+ 			 */
+ 			return -EPERM;
+-	} else {
+-		vm_flags_clear(vma, VM_MAYWRITE);
+ 	}
+ 	/* remap_vmalloc_range() checks size and offset constraints */
+ 	return remap_vmalloc_range(vma, rb_map->rb, vma->vm_pgoff + RINGBUF_PGOFF);
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 5684e8ce132d54..36cb18b73e7251 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -1031,7 +1031,7 @@ static const struct vm_operations_struct bpf_map_default_vmops = {
+ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
+ {
+ 	struct bpf_map *map = filp->private_data;
+-	int err;
++	int err = 0;
+ 
+ 	if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record))
+ 		return -ENOTSUPP;
+@@ -1055,24 +1055,33 @@ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
+ 			err = -EACCES;
+ 			goto out;
+ 		}
++		bpf_map_write_active_inc(map);
+ 	}
++out:
++	mutex_unlock(&map->freeze_mutex);
++	if (err)
++		return err;
+ 
+ 	/* set default open/close callbacks */
+ 	vma->vm_ops = &bpf_map_default_vmops;
+ 	vma->vm_private_data = map;
+ 	vm_flags_clear(vma, VM_MAYEXEC);
++	/* If mapping is read-only, then disallow potentially re-mapping with
++	 * PROT_WRITE by dropping VM_MAYWRITE flag. This VM_MAYWRITE clearing
++	 * means that as far as BPF map's memory-mapped VMAs are concerned,
++	 * VM_WRITE and VM_MAYWRITE and equivalent, if one of them is set,
++	 * both should be set, so we can forget about VM_MAYWRITE and always
++	 * check just VM_WRITE
++	 */
+ 	if (!(vma->vm_flags & VM_WRITE))
+-		/* disallow re-mapping with PROT_WRITE */
+ 		vm_flags_clear(vma, VM_MAYWRITE);
+ 
+ 	err = map->ops->map_mmap(map, vma);
+-	if (err)
+-		goto out;
++	if (err) {
++		if (vma->vm_flags & VM_WRITE)
++			bpf_map_write_active_dec(map);
++	}
+ 
+-	if (vma->vm_flags & VM_MAYWRITE)
+-		bpf_map_write_active_inc(map);
+-out:
+-	mutex_unlock(&map->freeze_mutex);
+ 	return err;
+ }
+ 
+@@ -1964,8 +1973,6 @@ int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
+ 	return err;
+ }
+ 
+-#define MAP_LOOKUP_RETRIES 3
+-
+ int generic_map_lookup_batch(struct bpf_map *map,
+ 				    const union bpf_attr *attr,
+ 				    union bpf_attr __user *uattr)
+@@ -1975,8 +1982,8 @@ int generic_map_lookup_batch(struct bpf_map *map,
+ 	void __user *values = u64_to_user_ptr(attr->batch.values);
+ 	void __user *keys = u64_to_user_ptr(attr->batch.keys);
+ 	void *buf, *buf_prevkey, *prev_key, *key, *value;
+-	int err, retry = MAP_LOOKUP_RETRIES;
+ 	u32 value_size, cp, max_count;
++	int err;
+ 
+ 	if (attr->batch.elem_flags & ~BPF_F_LOCK)
+ 		return -EINVAL;
+@@ -2022,14 +2029,8 @@ int generic_map_lookup_batch(struct bpf_map *map,
+ 		err = bpf_map_copy_value(map, key, value,
+ 					 attr->batch.elem_flags);
+ 
+-		if (err == -ENOENT) {
+-			if (retry) {
+-				retry--;
+-				continue;
+-			}
+-			err = -EINTR;
+-			break;
+-		}
++		if (err == -ENOENT)
++			goto next_key;
+ 
+ 		if (err)
+ 			goto free_buf;
+@@ -2044,12 +2045,12 @@ int generic_map_lookup_batch(struct bpf_map *map,
+ 			goto free_buf;
+ 		}
+ 
++		cp++;
++next_key:
+ 		if (!prev_key)
+ 			prev_key = buf_prevkey;
+ 
+ 		swap(prev_key, key);
+-		retry = MAP_LOOKUP_RETRIES;
+-		cp++;
+ 		cond_resched();
+ 	}
+ 
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 66744d60904d57..f3e121888d050f 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -3666,10 +3666,28 @@ static inline int __mm_cid_try_get(struct task_struct *t, struct mm_struct *mm)
+ {
+ 	struct cpumask *cidmask = mm_cidmask(mm);
+ 	struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid;
+-	int cid = __this_cpu_read(pcpu_cid->recent_cid);
++	int cid, max_nr_cid, allowed_max_nr_cid;
+ 
++	/*
++	 * After shrinking the number of threads or reducing the number
++	 * of allowed cpus, reduce the value of max_nr_cid so expansion
++	 * of cid allocation will preserve cache locality if the number
++	 * of threads or allowed cpus increase again.
++	 */
++	max_nr_cid = atomic_read(&mm->max_nr_cid);
++	while ((allowed_max_nr_cid = min_t(int, READ_ONCE(mm->nr_cpus_allowed),
++					   atomic_read(&mm->mm_users))),
++	       max_nr_cid > allowed_max_nr_cid) {
++		/* atomic_try_cmpxchg loads previous mm->max_nr_cid into max_nr_cid. */
++		if (atomic_try_cmpxchg(&mm->max_nr_cid, &max_nr_cid, allowed_max_nr_cid)) {
++			max_nr_cid = allowed_max_nr_cid;
++			break;
++		}
++	}
+ 	/* Try to re-use recent cid. This improves cache locality. */
+-	if (!mm_cid_is_unset(cid) && !cpumask_test_and_set_cpu(cid, cidmask))
++	cid = __this_cpu_read(pcpu_cid->recent_cid);
++	if (!mm_cid_is_unset(cid) && cid < max_nr_cid &&
++	    !cpumask_test_and_set_cpu(cid, cidmask))
+ 		return cid;
+ 	/*
+ 	 * Expand cid allocation if the maximum number of concurrency
+@@ -3677,8 +3695,9 @@ static inline int __mm_cid_try_get(struct task_struct *t, struct mm_struct *mm)
+ 	 * and number of threads. Expanding cid allocation as much as
+ 	 * possible improves cache locality.
+ 	 */
+-	cid = atomic_read(&mm->max_nr_cid);
++	cid = max_nr_cid;
+ 	while (cid < READ_ONCE(mm->nr_cpus_allowed) && cid < atomic_read(&mm->mm_users)) {
++		/* atomic_try_cmpxchg loads previous mm->max_nr_cid into cid. */
+ 		if (!atomic_try_cmpxchg(&mm->max_nr_cid, &cid, cid + 1))
+ 			continue;
+ 		if (!cpumask_test_and_set_cpu(cid, cidmask))
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 2e113f8b13a28d..b1861a57e2b062 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -3238,15 +3238,22 @@ static struct ftrace_hash *copy_hash(struct ftrace_hash *src)
+  *  The filter_hash updates uses just the append_hash() function
+  *  and the notrace_hash does not.
+  */
+-static int append_hash(struct ftrace_hash **hash, struct ftrace_hash *new_hash)
++static int append_hash(struct ftrace_hash **hash, struct ftrace_hash *new_hash,
++		       int size_bits)
+ {
+ 	struct ftrace_func_entry *entry;
+ 	int size;
+ 	int i;
+ 
+-	/* An empty hash does everything */
+-	if (ftrace_hash_empty(*hash))
+-		return 0;
++	if (*hash) {
++		/* An empty hash does everything */
++		if (ftrace_hash_empty(*hash))
++			return 0;
++	} else {
++		*hash = alloc_ftrace_hash(size_bits);
++		if (!*hash)
++			return -ENOMEM;
++	}
+ 
+ 	/* If new_hash has everything make hash have everything */
+ 	if (ftrace_hash_empty(new_hash)) {
+@@ -3310,16 +3317,18 @@ static int intersect_hash(struct ftrace_hash **hash, struct ftrace_hash *new_has
+ /* Return a new hash that has a union of all @ops->filter_hash entries */
+ static struct ftrace_hash *append_hashes(struct ftrace_ops *ops)
+ {
+-	struct ftrace_hash *new_hash;
++	struct ftrace_hash *new_hash = NULL;
+ 	struct ftrace_ops *subops;
++	int size_bits;
+ 	int ret;
+ 
+-	new_hash = alloc_ftrace_hash(ops->func_hash->filter_hash->size_bits);
+-	if (!new_hash)
+-		return NULL;
++	if (ops->func_hash->filter_hash)
++		size_bits = ops->func_hash->filter_hash->size_bits;
++	else
++		size_bits = FTRACE_HASH_DEFAULT_BITS;
+ 
+ 	list_for_each_entry(subops, &ops->subop_list, list) {
+-		ret = append_hash(&new_hash, subops->func_hash->filter_hash);
++		ret = append_hash(&new_hash, subops->func_hash->filter_hash, size_bits);
+ 		if (ret < 0) {
+ 			free_ftrace_hash(new_hash);
+ 			return NULL;
+@@ -3328,7 +3337,8 @@ static struct ftrace_hash *append_hashes(struct ftrace_ops *ops)
+ 		if (ftrace_hash_empty(new_hash))
+ 			break;
+ 	}
+-	return new_hash;
++	/* Can't return NULL as that means this failed */
++	return new_hash ? : EMPTY_HASH;
+ }
+ 
+ /* Make @ops trace evenything except what all its subops do not trace */
+@@ -3523,7 +3533,8 @@ int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int
+ 		filter_hash = alloc_and_copy_ftrace_hash(size_bits, ops->func_hash->filter_hash);
+ 		if (!filter_hash)
+ 			return -ENOMEM;
+-		ret = append_hash(&filter_hash, subops->func_hash->filter_hash);
++		ret = append_hash(&filter_hash, subops->func_hash->filter_hash,
++				  size_bits);
+ 		if (ret < 0) {
+ 			free_ftrace_hash(filter_hash);
+ 			return ret;
+@@ -5759,6 +5770,9 @@ __ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
+ 			return -ENOENT;
+ 		free_hash_entry(hash, entry);
+ 		return 0;
++	} else if (__ftrace_lookup_ip(hash, ip) != NULL) {
++		/* Already exists */
++		return 0;
+ 	}
+ 
+ 	entry = add_hash_entry(hash, ip);
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index d2267b4406cd8a..13f817afba4c2d 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -26,6 +26,7 @@
+ #include <linux/hardirq.h>
+ #include <linux/linkage.h>
+ #include <linux/uaccess.h>
++#include <linux/cleanup.h>
+ #include <linux/vmalloc.h>
+ #include <linux/ftrace.h>
+ #include <linux/module.h>
+@@ -535,19 +536,16 @@ LIST_HEAD(ftrace_trace_arrays);
+ int trace_array_get(struct trace_array *this_tr)
+ {
+ 	struct trace_array *tr;
+-	int ret = -ENODEV;
+ 
+-	mutex_lock(&trace_types_lock);
++	guard(mutex)(&trace_types_lock);
+ 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+ 		if (tr == this_tr) {
+ 			tr->ref++;
+-			ret = 0;
+-			break;
++			return 0;
+ 		}
+ 	}
+-	mutex_unlock(&trace_types_lock);
+ 
+-	return ret;
++	return -ENODEV;
+ }
+ 
+ static void __trace_array_put(struct trace_array *this_tr)
+@@ -1443,22 +1441,20 @@ EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
+ int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
+ 				 cond_update_fn_t update)
+ {
+-	struct cond_snapshot *cond_snapshot;
+-	int ret = 0;
++	struct cond_snapshot *cond_snapshot __free(kfree) =
++		kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
++	int ret;
+ 
+-	cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
+ 	if (!cond_snapshot)
+ 		return -ENOMEM;
+ 
+ 	cond_snapshot->cond_data = cond_data;
+ 	cond_snapshot->update = update;
+ 
+-	mutex_lock(&trace_types_lock);
++	guard(mutex)(&trace_types_lock);
+ 
+-	if (tr->current_trace->use_max_tr) {
+-		ret = -EBUSY;
+-		goto fail_unlock;
+-	}
++	if (tr->current_trace->use_max_tr)
++		return -EBUSY;
+ 
+ 	/*
+ 	 * The cond_snapshot can only change to NULL without the
+@@ -1468,29 +1464,20 @@ int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
+ 	 * do safely with only holding the trace_types_lock and not
+ 	 * having to take the max_lock.
+ 	 */
+-	if (tr->cond_snapshot) {
+-		ret = -EBUSY;
+-		goto fail_unlock;
+-	}
++	if (tr->cond_snapshot)
++		return -EBUSY;
+ 
+ 	ret = tracing_arm_snapshot_locked(tr);
+ 	if (ret)
+-		goto fail_unlock;
++		return ret;
+ 
+ 	local_irq_disable();
+ 	arch_spin_lock(&tr->max_lock);
+-	tr->cond_snapshot = cond_snapshot;
++	tr->cond_snapshot = no_free_ptr(cond_snapshot);
+ 	arch_spin_unlock(&tr->max_lock);
+ 	local_irq_enable();
+ 
+-	mutex_unlock(&trace_types_lock);
+-
+-	return ret;
+-
+- fail_unlock:
+-	mutex_unlock(&trace_types_lock);
+-	kfree(cond_snapshot);
+-	return ret;
++	return 0;
+ }
+ EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
+ 
+@@ -2203,10 +2190,10 @@ static __init int init_trace_selftests(void)
+ 
+ 	selftests_can_run = true;
+ 
+-	mutex_lock(&trace_types_lock);
++	guard(mutex)(&trace_types_lock);
+ 
+ 	if (list_empty(&postponed_selftests))
+-		goto out;
++		return 0;
+ 
+ 	pr_info("Running postponed tracer tests:\n");
+ 
+@@ -2235,9 +2222,6 @@ static __init int init_trace_selftests(void)
+ 	}
+ 	tracing_selftest_running = false;
+ 
+- out:
+-	mutex_unlock(&trace_types_lock);
+-
+ 	return 0;
+ }
+ core_initcall(init_trace_selftests);
+@@ -2807,7 +2791,7 @@ int tracepoint_printk_sysctl(const struct ctl_table *table, int write,
+ 	int save_tracepoint_printk;
+ 	int ret;
+ 
+-	mutex_lock(&tracepoint_printk_mutex);
++	guard(mutex)(&tracepoint_printk_mutex);
+ 	save_tracepoint_printk = tracepoint_printk;
+ 
+ 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
+@@ -2820,16 +2804,13 @@ int tracepoint_printk_sysctl(const struct ctl_table *table, int write,
+ 		tracepoint_printk = 0;
+ 
+ 	if (save_tracepoint_printk == tracepoint_printk)
+-		goto out;
++		return ret;
+ 
+ 	if (tracepoint_printk)
+ 		static_key_enable(&tracepoint_printk_key.key);
+ 	else
+ 		static_key_disable(&tracepoint_printk_key.key);
+ 
+- out:
+-	mutex_unlock(&tracepoint_printk_mutex);
+-
+ 	return ret;
+ }
+ 
+@@ -5127,7 +5108,8 @@ static int tracing_trace_options_show(struct seq_file *m, void *v)
+ 	u32 tracer_flags;
+ 	int i;
+ 
+-	mutex_lock(&trace_types_lock);
++	guard(mutex)(&trace_types_lock);
++
+ 	tracer_flags = tr->current_trace->flags->val;
+ 	trace_opts = tr->current_trace->flags->opts;
+ 
+@@ -5144,7 +5126,6 @@ static int tracing_trace_options_show(struct seq_file *m, void *v)
+ 		else
+ 			seq_printf(m, "no%s\n", trace_opts[i].name);
+ 	}
+-	mutex_unlock(&trace_types_lock);
+ 
+ 	return 0;
+ }
+@@ -5809,7 +5790,7 @@ trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
+ 		return;
+ 	}
+ 
+-	mutex_lock(&trace_eval_mutex);
++	guard(mutex)(&trace_eval_mutex);
+ 
+ 	if (!trace_eval_maps)
+ 		trace_eval_maps = map_array;
+@@ -5833,8 +5814,6 @@ trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
+ 		map_array++;
+ 	}
+ 	memset(map_array, 0, sizeof(*map_array));
+-
+-	mutex_unlock(&trace_eval_mutex);
+ }
+ 
+ static void trace_create_eval_file(struct dentry *d_tracer)
+@@ -5996,26 +5975,15 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
+ ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
+ 				  unsigned long size, int cpu_id)
+ {
+-	int ret;
+-
+-	mutex_lock(&trace_types_lock);
++	guard(mutex)(&trace_types_lock);
+ 
+ 	if (cpu_id != RING_BUFFER_ALL_CPUS) {
+ 		/* make sure, this cpu is enabled in the mask */
+-		if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
+-			ret = -EINVAL;
+-			goto out;
+-		}
++		if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask))
++			return -EINVAL;
+ 	}
+ 
+-	ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
+-	if (ret < 0)
+-		ret = -ENOMEM;
+-
+-out:
+-	mutex_unlock(&trace_types_lock);
+-
+-	return ret;
++	return __tracing_resize_ring_buffer(tr, size, cpu_id);
+ }
+ 
+ static void update_last_data(struct trace_array *tr)
+@@ -6106,9 +6074,9 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
+ #ifdef CONFIG_TRACER_MAX_TRACE
+ 	bool had_max_tr;
+ #endif
+-	int ret = 0;
++	int ret;
+ 
+-	mutex_lock(&trace_types_lock);
++	guard(mutex)(&trace_types_lock);
+ 
+ 	update_last_data(tr);
+ 
+@@ -6116,7 +6084,7 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
+ 		ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
+ 						RING_BUFFER_ALL_CPUS);
+ 		if (ret < 0)
+-			goto out;
++			return ret;
+ 		ret = 0;
+ 	}
+ 
+@@ -6124,43 +6092,37 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
+ 		if (strcmp(t->name, buf) == 0)
+ 			break;
+ 	}
+-	if (!t) {
+-		ret = -EINVAL;
+-		goto out;
+-	}
++	if (!t)
++		return -EINVAL;
++
+ 	if (t == tr->current_trace)
+-		goto out;
++		return 0;
+ 
+ #ifdef CONFIG_TRACER_SNAPSHOT
+ 	if (t->use_max_tr) {
+ 		local_irq_disable();
+ 		arch_spin_lock(&tr->max_lock);
+-		if (tr->cond_snapshot)
+-			ret = -EBUSY;
++		ret = tr->cond_snapshot ? -EBUSY : 0;
+ 		arch_spin_unlock(&tr->max_lock);
+ 		local_irq_enable();
+ 		if (ret)
+-			goto out;
++			return ret;
+ 	}
+ #endif
+ 	/* Some tracers won't work on kernel command line */
+ 	if (system_state < SYSTEM_RUNNING && t->noboot) {
+ 		pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
+ 			t->name);
+-		goto out;
++		return 0;
+ 	}
+ 
+ 	/* Some tracers are only allowed for the top level buffer */
+-	if (!trace_ok_for_array(t, tr)) {
+-		ret = -EINVAL;
+-		goto out;
+-	}
++	if (!trace_ok_for_array(t, tr))
++		return -EINVAL;
+ 
+ 	/* If trace pipe files are being read, we can't change the tracer */
+-	if (tr->trace_ref) {
+-		ret = -EBUSY;
+-		goto out;
+-	}
++	if (tr->trace_ref)
++		return -EBUSY;
+ 
+ 	trace_branch_disable();
+ 
+@@ -6191,7 +6153,7 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
+ 	if (!had_max_tr && t->use_max_tr) {
+ 		ret = tracing_arm_snapshot_locked(tr);
+ 		if (ret)
+-			goto out;
++			return ret;
+ 	}
+ #else
+ 	tr->current_trace = &nop_trace;
+@@ -6204,17 +6166,15 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
+ 			if (t->use_max_tr)
+ 				tracing_disarm_snapshot(tr);
+ #endif
+-			goto out;
++			return ret;
+ 		}
+ 	}
+ 
+ 	tr->current_trace = t;
+ 	tr->current_trace->enabled++;
+ 	trace_branch_enable(tr);
+- out:
+-	mutex_unlock(&trace_types_lock);
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ static ssize_t
+@@ -6292,22 +6252,18 @@ tracing_thresh_write(struct file *filp, const char __user *ubuf,
+ 	struct trace_array *tr = filp->private_data;
+ 	int ret;
+ 
+-	mutex_lock(&trace_types_lock);
++	guard(mutex)(&trace_types_lock);
+ 	ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
+ 	if (ret < 0)
+-		goto out;
++		return ret;
+ 
+ 	if (tr->current_trace->update_thresh) {
+ 		ret = tr->current_trace->update_thresh(tr);
+ 		if (ret < 0)
+-			goto out;
++			return ret;
+ 	}
+ 
+-	ret = cnt;
+-out:
+-	mutex_unlock(&trace_types_lock);
+-
+-	return ret;
++	return cnt;
+ }
+ 
+ #ifdef CONFIG_TRACER_MAX_TRACE
+@@ -6526,31 +6482,29 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
+ 	 * This is just a matter of traces coherency, the ring buffer itself
+ 	 * is protected.
+ 	 */
+-	mutex_lock(&iter->mutex);
++	guard(mutex)(&iter->mutex);
+ 
+ 	/* return any leftover data */
+ 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
+ 	if (sret != -EBUSY)
+-		goto out;
++		return sret;
+ 
+ 	trace_seq_init(&iter->seq);
+ 
+ 	if (iter->trace->read) {
+ 		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
+ 		if (sret)
+-			goto out;
++			return sret;
+ 	}
+ 
+ waitagain:
+ 	sret = tracing_wait_pipe(filp);
+ 	if (sret <= 0)
+-		goto out;
++		return sret;
+ 
+ 	/* stop when tracing is finished */
+-	if (trace_empty(iter)) {
+-		sret = 0;
+-		goto out;
+-	}
++	if (trace_empty(iter))
++		return 0;
+ 
+ 	if (cnt >= TRACE_SEQ_BUFFER_SIZE)
+ 		cnt = TRACE_SEQ_BUFFER_SIZE - 1;
+@@ -6614,9 +6568,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
+ 	if (sret == -EBUSY)
+ 		goto waitagain;
+ 
+-out:
+-	mutex_unlock(&iter->mutex);
+-
+ 	return sret;
+ }
+ 
+@@ -7208,25 +7159,19 @@ u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_eve
+  */
+ int tracing_set_filter_buffering(struct trace_array *tr, bool set)
+ {
+-	int ret = 0;
+-
+-	mutex_lock(&trace_types_lock);
++	guard(mutex)(&trace_types_lock);
+ 
+ 	if (set && tr->no_filter_buffering_ref++)
+-		goto out;
++		return 0;
+ 
+ 	if (!set) {
+-		if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
+-			ret = -EINVAL;
+-			goto out;
+-		}
++		if (WARN_ON_ONCE(!tr->no_filter_buffering_ref))
++			return -EINVAL;
+ 
+ 		--tr->no_filter_buffering_ref;
+ 	}
+- out:
+-	mutex_unlock(&trace_types_lock);
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ struct ftrace_buffer_info {
+@@ -7302,12 +7247,10 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ 	if (ret)
+ 		return ret;
+ 
+-	mutex_lock(&trace_types_lock);
++	guard(mutex)(&trace_types_lock);
+ 
+-	if (tr->current_trace->use_max_tr) {
+-		ret = -EBUSY;
+-		goto out;
+-	}
++	if (tr->current_trace->use_max_tr)
++		return -EBUSY;
+ 
+ 	local_irq_disable();
+ 	arch_spin_lock(&tr->max_lock);
+@@ -7316,24 +7259,20 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ 	arch_spin_unlock(&tr->max_lock);
+ 	local_irq_enable();
+ 	if (ret)
+-		goto out;
++		return ret;
+ 
+ 	switch (val) {
+ 	case 0:
+-		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
+-			ret = -EINVAL;
+-			break;
+-		}
++		if (iter->cpu_file != RING_BUFFER_ALL_CPUS)
++			return -EINVAL;
+ 		if (tr->allocated_snapshot)
+ 			free_snapshot(tr);
+ 		break;
+ 	case 1:
+ /* Only allow per-cpu swap if the ring buffer supports it */
+ #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
+-		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
+-			ret = -EINVAL;
+-			break;
+-		}
++		if (iter->cpu_file != RING_BUFFER_ALL_CPUS)
++			return -EINVAL;
+ #endif
+ 		if (tr->allocated_snapshot)
+ 			ret = resize_buffer_duplicate_size(&tr->max_buffer,
+@@ -7341,7 +7280,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ 
+ 		ret = tracing_arm_snapshot_locked(tr);
+ 		if (ret)
+-			break;
++			return ret;
+ 
+ 		/* Now, we're going to swap */
+ 		if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
+@@ -7368,8 +7307,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ 		*ppos += cnt;
+ 		ret = cnt;
+ 	}
+-out:
+-	mutex_unlock(&trace_types_lock);
++
+ 	return ret;
+ }
+ 
+@@ -7755,12 +7693,11 @@ void tracing_log_err(struct trace_array *tr,
+ 
+ 	len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
+ 
+-	mutex_lock(&tracing_err_log_lock);
++	guard(mutex)(&tracing_err_log_lock);
++
+ 	err = get_tracing_log_err(tr, len);
+-	if (PTR_ERR(err) == -ENOMEM) {
+-		mutex_unlock(&tracing_err_log_lock);
++	if (PTR_ERR(err) == -ENOMEM)
+ 		return;
+-	}
+ 
+ 	snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
+ 	snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
+@@ -7771,7 +7708,6 @@ void tracing_log_err(struct trace_array *tr,
+ 	err->info.ts = local_clock();
+ 
+ 	list_add_tail(&err->list, &tr->err_log);
+-	mutex_unlock(&tracing_err_log_lock);
+ }
+ 
+ static void clear_tracing_err_log(struct trace_array *tr)
+@@ -9519,20 +9455,17 @@ static int instance_mkdir(const char *name)
+ 	struct trace_array *tr;
+ 	int ret;
+ 
+-	mutex_lock(&event_mutex);
+-	mutex_lock(&trace_types_lock);
++	guard(mutex)(&event_mutex);
++	guard(mutex)(&trace_types_lock);
+ 
+ 	ret = -EEXIST;
+ 	if (trace_array_find(name))
+-		goto out_unlock;
++		return -EEXIST;
+ 
+ 	tr = trace_array_create(name);
+ 
+ 	ret = PTR_ERR_OR_ZERO(tr);
+ 
+-out_unlock:
+-	mutex_unlock(&trace_types_lock);
+-	mutex_unlock(&event_mutex);
+ 	return ret;
+ }
+ 
+@@ -9582,24 +9515,23 @@ struct trace_array *trace_array_get_by_name(const char *name, const char *system
+ {
+ 	struct trace_array *tr;
+ 
+-	mutex_lock(&event_mutex);
+-	mutex_lock(&trace_types_lock);
++	guard(mutex)(&event_mutex);
++	guard(mutex)(&trace_types_lock);
+ 
+ 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+-		if (tr->name && strcmp(tr->name, name) == 0)
+-			goto out_unlock;
++		if (tr->name && strcmp(tr->name, name) == 0) {
++			tr->ref++;
++			return tr;
++		}
+ 	}
+ 
+ 	tr = trace_array_create_systems(name, systems, 0, 0);
+ 
+ 	if (IS_ERR(tr))
+ 		tr = NULL;
+-out_unlock:
+-	if (tr)
++	else
+ 		tr->ref++;
+ 
+-	mutex_unlock(&trace_types_lock);
+-	mutex_unlock(&event_mutex);
+ 	return tr;
+ }
+ EXPORT_SYMBOL_GPL(trace_array_get_by_name);
+@@ -9650,48 +9582,36 @@ static int __remove_instance(struct trace_array *tr)
+ int trace_array_destroy(struct trace_array *this_tr)
+ {
+ 	struct trace_array *tr;
+-	int ret;
+ 
+ 	if (!this_tr)
+ 		return -EINVAL;
+ 
+-	mutex_lock(&event_mutex);
+-	mutex_lock(&trace_types_lock);
++	guard(mutex)(&event_mutex);
++	guard(mutex)(&trace_types_lock);
+ 
+-	ret = -ENODEV;
+ 
+ 	/* Making sure trace array exists before destroying it. */
+ 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+-		if (tr == this_tr) {
+-			ret = __remove_instance(tr);
+-			break;
+-		}
++		if (tr == this_tr)
++			return __remove_instance(tr);
+ 	}
+ 
+-	mutex_unlock(&trace_types_lock);
+-	mutex_unlock(&event_mutex);
+-
+-	return ret;
++	return -ENODEV;
+ }
+ EXPORT_SYMBOL_GPL(trace_array_destroy);
+ 
+ static int instance_rmdir(const char *name)
+ {
+ 	struct trace_array *tr;
+-	int ret;
+ 
+-	mutex_lock(&event_mutex);
+-	mutex_lock(&trace_types_lock);
++	guard(mutex)(&event_mutex);
++	guard(mutex)(&trace_types_lock);
+ 
+-	ret = -ENODEV;
+ 	tr = trace_array_find(name);
+-	if (tr)
+-		ret = __remove_instance(tr);
+-
+-	mutex_unlock(&trace_types_lock);
+-	mutex_unlock(&event_mutex);
++	if (!tr)
++		return -ENODEV;
+ 
+-	return ret;
++	return __remove_instance(tr);
+ }
+ 
+ static __init void create_trace_instances(struct dentry *d_tracer)
+@@ -9704,19 +9624,16 @@ static __init void create_trace_instances(struct dentry *d_tracer)
+ 	if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
+ 		return;
+ 
+-	mutex_lock(&event_mutex);
+-	mutex_lock(&trace_types_lock);
++	guard(mutex)(&event_mutex);
++	guard(mutex)(&trace_types_lock);
+ 
+ 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+ 		if (!tr->name)
+ 			continue;
+ 		if (MEM_FAIL(trace_array_create_dir(tr) < 0,
+ 			     "Failed to create instance directory\n"))
+-			break;
++			return;
+ 	}
+-
+-	mutex_unlock(&trace_types_lock);
+-	mutex_unlock(&event_mutex);
+ }
+ 
+ static void
+@@ -9930,7 +9847,7 @@ static void trace_module_remove_evals(struct module *mod)
+ 	if (!mod->num_trace_evals)
+ 		return;
+ 
+-	mutex_lock(&trace_eval_mutex);
++	guard(mutex)(&trace_eval_mutex);
+ 
+ 	map = trace_eval_maps;
+ 
+@@ -9942,12 +9859,10 @@ static void trace_module_remove_evals(struct module *mod)
+ 		map = map->tail.next;
+ 	}
+ 	if (!map)
+-		goto out;
++		return;
+ 
+ 	*last = trace_eval_jmp_to_tail(map)->tail.next;
+ 	kfree(map);
+- out:
+-	mutex_unlock(&trace_eval_mutex);
+ }
+ #else
+ static inline void trace_module_remove_evals(struct module *mod) { }
+diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
+index d358c9935164de..df56f9b7601094 100644
+--- a/kernel/trace/trace_functions.c
++++ b/kernel/trace/trace_functions.c
+@@ -216,7 +216,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
+ 
+ 	parent_ip = function_get_true_parent_ip(parent_ip, fregs);
+ 
+-	trace_ctx = tracing_gen_ctx();
++	trace_ctx = tracing_gen_ctx_dec();
+ 
+ 	data = this_cpu_ptr(tr->array_buffer.data);
+ 	if (!atomic_read(&data->disabled))
+@@ -321,7 +321,6 @@ function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
+ 	struct trace_array *tr = op->private;
+ 	struct trace_array_cpu *data;
+ 	unsigned int trace_ctx;
+-	unsigned long flags;
+ 	int bit;
+ 
+ 	if (unlikely(!tr->function_enabled))
+@@ -347,8 +346,7 @@ function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
+ 	if (is_repeat_check(tr, last_info, ip, parent_ip))
+ 		goto out;
+ 
+-	local_save_flags(flags);
+-	trace_ctx = tracing_gen_ctx_flags(flags);
++	trace_ctx = tracing_gen_ctx_dec();
+ 	process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
+ 
+ 	trace_function(tr, ip, parent_ip, trace_ctx);
+diff --git a/lib/iov_iter.c b/lib/iov_iter.c
+index 9ec806f989f258..65f550cb5081b9 100644
+--- a/lib/iov_iter.c
++++ b/lib/iov_iter.c
+@@ -1428,6 +1428,8 @@ static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec,
+ 	struct iovec *iov = *iovp;
+ 	ssize_t ret;
+ 
++	*iovp = NULL;
++
+ 	if (compat)
+ 		ret = copy_compat_iovec_from_user(iov, uvec, 1);
+ 	else
+@@ -1438,7 +1440,6 @@ static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec,
+ 	ret = import_ubuf(type, iov->iov_base, iov->iov_len, i);
+ 	if (unlikely(ret))
+ 		return ret;
+-	*iovp = NULL;
+ 	return i->count;
+ }
+ 
+diff --git a/mm/madvise.c b/mm/madvise.c
+index 0ceae57da7dad3..dcadd5b3457e78 100644
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -928,7 +928,16 @@ static long madvise_dontneed_free(struct vm_area_struct *vma,
+ 			 */
+ 			end = vma->vm_end;
+ 		}
+-		VM_WARN_ON(start >= end);
++		/*
++		 * If the memory region between start and end was
++		 * originally backed by 4kB pages and then remapped to
++		 * be backed by hugepages while mmap_lock was dropped,
++		 * the adjustment for hugetlb vma above may have rounded
++		 * end down to the start address.
++		 */
++		if (start == end)
++			return 0;
++		VM_WARN_ON(start > end);
+ 	}
+ 
+ 	if (behavior == MADV_DONTNEED || behavior == MADV_DONTNEED_LOCKED)
+diff --git a/mm/migrate_device.c b/mm/migrate_device.c
+index 9cf26592ac934d..5bd888223cc8b8 100644
+--- a/mm/migrate_device.c
++++ b/mm/migrate_device.c
+@@ -840,20 +840,15 @@ void migrate_device_finalize(unsigned long *src_pfns,
+ 			dst = src;
+ 		}
+ 
++		if (!folio_is_zone_device(dst))
++			folio_add_lru(dst);
+ 		remove_migration_ptes(src, dst, 0);
+ 		folio_unlock(src);
+-
+-		if (folio_is_zone_device(src))
+-			folio_put(src);
+-		else
+-			folio_putback_lru(src);
++		folio_put(src);
+ 
+ 		if (dst != src) {
+ 			folio_unlock(dst);
+-			if (folio_is_zone_device(dst))
+-				folio_put(dst);
+-			else
+-				folio_putback_lru(dst);
++			folio_put(dst);
+ 		}
+ 	}
+ }
+diff --git a/mm/zswap.c b/mm/zswap.c
+index b84c20d889b1b5..6e0c0fca583000 100644
+--- a/mm/zswap.c
++++ b/mm/zswap.c
+@@ -1445,9 +1445,9 @@ static void shrink_worker(struct work_struct *w)
+ * main API
+ **********************************/
+ 
+-static ssize_t zswap_store_page(struct page *page,
+-				struct obj_cgroup *objcg,
+-				struct zswap_pool *pool)
++static bool zswap_store_page(struct page *page,
++			     struct obj_cgroup *objcg,
++			     struct zswap_pool *pool)
+ {
+ 	swp_entry_t page_swpentry = page_swap_entry(page);
+ 	struct zswap_entry *entry, *old;
+@@ -1456,7 +1456,7 @@ static ssize_t zswap_store_page(struct page *page,
+ 	entry = zswap_entry_cache_alloc(GFP_KERNEL, page_to_nid(page));
+ 	if (!entry) {
+ 		zswap_reject_kmemcache_fail++;
+-		return -EINVAL;
++		return false;
+ 	}
+ 
+ 	if (!zswap_compress(page, entry, pool))
+@@ -1483,13 +1483,17 @@ static ssize_t zswap_store_page(struct page *page,
+ 
+ 	/*
+ 	 * The entry is successfully compressed and stored in the tree, there is
+-	 * no further possibility of failure. Grab refs to the pool and objcg.
+-	 * These refs will be dropped by zswap_entry_free() when the entry is
+-	 * removed from the tree.
++	 * no further possibility of failure. Grab refs to the pool and objcg,
++	 * charge zswap memory, and increment zswap_stored_pages.
++	 * The opposite actions will be performed by zswap_entry_free()
++	 * when the entry is removed from the tree.
+ 	 */
+ 	zswap_pool_get(pool);
+-	if (objcg)
++	if (objcg) {
+ 		obj_cgroup_get(objcg);
++		obj_cgroup_charge_zswap(objcg, entry->length);
++	}
++	atomic_long_inc(&zswap_stored_pages);
+ 
+ 	/*
+ 	 * We finish initializing the entry while it's already in xarray.
+@@ -1510,13 +1514,13 @@ static ssize_t zswap_store_page(struct page *page,
+ 		zswap_lru_add(&zswap_list_lru, entry);
+ 	}
+ 
+-	return entry->length;
++	return true;
+ 
+ store_failed:
+ 	zpool_free(pool->zpool, entry->handle);
+ compress_failed:
+ 	zswap_entry_cache_free(entry);
+-	return -EINVAL;
++	return false;
+ }
+ 
+ bool zswap_store(struct folio *folio)
+@@ -1526,7 +1530,6 @@ bool zswap_store(struct folio *folio)
+ 	struct obj_cgroup *objcg = NULL;
+ 	struct mem_cgroup *memcg = NULL;
+ 	struct zswap_pool *pool;
+-	size_t compressed_bytes = 0;
+ 	bool ret = false;
+ 	long index;
+ 
+@@ -1564,20 +1567,14 @@ bool zswap_store(struct folio *folio)
+ 
+ 	for (index = 0; index < nr_pages; ++index) {
+ 		struct page *page = folio_page(folio, index);
+-		ssize_t bytes;
+ 
+-		bytes = zswap_store_page(page, objcg, pool);
+-		if (bytes < 0)
++		if (!zswap_store_page(page, objcg, pool))
+ 			goto put_pool;
+-		compressed_bytes += bytes;
+ 	}
+ 
+-	if (objcg) {
+-		obj_cgroup_charge_zswap(objcg, compressed_bytes);
++	if (objcg)
+ 		count_objcg_events(objcg, ZSWPOUT, nr_pages);
+-	}
+ 
+-	atomic_long_add(nr_pages, &zswap_stored_pages);
+ 	count_vm_events(ZSWPOUT, nr_pages);
+ 
+ 	ret = true;
+diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
+index 501ec4249fedc3..8612023bec60dc 100644
+--- a/net/bpf/test_run.c
++++ b/net/bpf/test_run.c
+@@ -660,12 +660,9 @@ static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
+ 	void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
+ 	void *data;
+ 
+-	if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
++	if (user_size < ETH_HLEN || user_size > PAGE_SIZE - headroom - tailroom)
+ 		return ERR_PTR(-EINVAL);
+ 
+-	if (user_size > size)
+-		return ERR_PTR(-EMSGSIZE);
+-
+ 	size = SKB_DATA_ALIGN(size);
+ 	data = kzalloc(size + headroom + tailroom, GFP_USER);
+ 	if (!data)
+diff --git a/net/core/dev.c b/net/core/dev.c
+index fbb796375aa0ef..2b09714761c62a 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1012,6 +1012,12 @@ int netdev_get_name(struct net *net, char *name, int ifindex)
+ 	return ret;
+ }
+ 
++static bool dev_addr_cmp(struct net_device *dev, unsigned short type,
++			 const char *ha)
++{
++	return dev->type == type && !memcmp(dev->dev_addr, ha, dev->addr_len);
++}
++
+ /**
+  *	dev_getbyhwaddr_rcu - find a device by its hardware address
+  *	@net: the applicable net namespace
+@@ -1020,7 +1026,7 @@ int netdev_get_name(struct net *net, char *name, int ifindex)
+  *
+  *	Search for an interface by MAC address. Returns NULL if the device
+  *	is not found or a pointer to the device.
+- *	The caller must hold RCU or RTNL.
++ *	The caller must hold RCU.
+  *	The returned device has not had its ref count increased
+  *	and the caller must therefore be careful about locking
+  *
+@@ -1032,14 +1038,39 @@ struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
+ 	struct net_device *dev;
+ 
+ 	for_each_netdev_rcu(net, dev)
+-		if (dev->type == type &&
+-		    !memcmp(dev->dev_addr, ha, dev->addr_len))
++		if (dev_addr_cmp(dev, type, ha))
+ 			return dev;
+ 
+ 	return NULL;
+ }
+ EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
+ 
++/**
++ * dev_getbyhwaddr() - find a device by its hardware address
++ * @net: the applicable net namespace
++ * @type: media type of device
++ * @ha: hardware address
++ *
++ * Similar to dev_getbyhwaddr_rcu(), but the owner needs to hold
++ * rtnl_lock.
++ *
++ * Context: rtnl_lock() must be held.
++ * Return: pointer to the net_device, or NULL if not found
++ */
++struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type,
++				   const char *ha)
++{
++	struct net_device *dev;
++
++	ASSERT_RTNL();
++	for_each_netdev(net, dev)
++		if (dev_addr_cmp(dev, type, ha))
++			return dev;
++
++	return NULL;
++}
++EXPORT_SYMBOL(dev_getbyhwaddr);
++
+ struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
+ {
+ 	struct net_device *dev, *ret = NULL;
+diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
+index 6efd4cccc9ddd2..212f0a048cab68 100644
+--- a/net/core/drop_monitor.c
++++ b/net/core/drop_monitor.c
+@@ -1734,30 +1734,30 @@ static int __init init_net_drop_monitor(void)
+ 		return -ENOSPC;
+ 	}
+ 
+-	rc = genl_register_family(&net_drop_monitor_family);
+-	if (rc) {
+-		pr_err("Could not create drop monitor netlink family\n");
+-		return rc;
++	for_each_possible_cpu(cpu) {
++		net_dm_cpu_data_init(cpu);
++		net_dm_hw_cpu_data_init(cpu);
+ 	}
+-	WARN_ON(net_drop_monitor_family.mcgrp_offset != NET_DM_GRP_ALERT);
+ 
+ 	rc = register_netdevice_notifier(&dropmon_net_notifier);
+ 	if (rc < 0) {
+ 		pr_crit("Failed to register netdevice notifier\n");
++		return rc;
++	}
++
++	rc = genl_register_family(&net_drop_monitor_family);
++	if (rc) {
++		pr_err("Could not create drop monitor netlink family\n");
+ 		goto out_unreg;
+ 	}
++	WARN_ON(net_drop_monitor_family.mcgrp_offset != NET_DM_GRP_ALERT);
+ 
+ 	rc = 0;
+ 
+-	for_each_possible_cpu(cpu) {
+-		net_dm_cpu_data_init(cpu);
+-		net_dm_hw_cpu_data_init(cpu);
+-	}
+-
+ 	goto out;
+ 
+ out_unreg:
+-	genl_unregister_family(&net_drop_monitor_family);
++	WARN_ON(unregister_netdevice_notifier(&dropmon_net_notifier));
+ out:
+ 	return rc;
+ }
+@@ -1766,19 +1766,18 @@ static void exit_net_drop_monitor(void)
+ {
+ 	int cpu;
+ 
+-	BUG_ON(unregister_netdevice_notifier(&dropmon_net_notifier));
+-
+ 	/*
+ 	 * Because of the module_get/put we do in the trace state change path
+ 	 * we are guaranteed not to have any current users when we get here
+ 	 */
++	BUG_ON(genl_unregister_family(&net_drop_monitor_family));
++
++	BUG_ON(unregister_netdevice_notifier(&dropmon_net_notifier));
+ 
+ 	for_each_possible_cpu(cpu) {
+ 		net_dm_hw_cpu_data_fini(cpu);
+ 		net_dm_cpu_data_fini(cpu);
+ 	}
+-
+-	BUG_ON(genl_unregister_family(&net_drop_monitor_family));
+ }
+ 
+ module_init(init_net_drop_monitor);
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index 5db41bf2ed93e0..9cd8de6bebb543 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -853,23 +853,30 @@ __skb_flow_dissect_ports(const struct sk_buff *skb,
+ 			 void *target_container, const void *data,
+ 			 int nhoff, u8 ip_proto, int hlen)
+ {
+-	enum flow_dissector_key_id dissector_ports = FLOW_DISSECTOR_KEY_MAX;
+-	struct flow_dissector_key_ports *key_ports;
++	struct flow_dissector_key_ports_range *key_ports_range = NULL;
++	struct flow_dissector_key_ports *key_ports = NULL;
++	__be32 ports;
+ 
+ 	if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS))
+-		dissector_ports = FLOW_DISSECTOR_KEY_PORTS;
+-	else if (dissector_uses_key(flow_dissector,
+-				    FLOW_DISSECTOR_KEY_PORTS_RANGE))
+-		dissector_ports = FLOW_DISSECTOR_KEY_PORTS_RANGE;
++		key_ports = skb_flow_dissector_target(flow_dissector,
++						      FLOW_DISSECTOR_KEY_PORTS,
++						      target_container);
+ 
+-	if (dissector_ports == FLOW_DISSECTOR_KEY_MAX)
++	if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS_RANGE))
++		key_ports_range = skb_flow_dissector_target(flow_dissector,
++							    FLOW_DISSECTOR_KEY_PORTS_RANGE,
++							    target_container);
++
++	if (!key_ports && !key_ports_range)
+ 		return;
+ 
+-	key_ports = skb_flow_dissector_target(flow_dissector,
+-					      dissector_ports,
+-					      target_container);
+-	key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
+-						data, hlen);
++	ports = __skb_flow_get_ports(skb, nhoff, ip_proto, data, hlen);
++
++	if (key_ports)
++		key_ports->ports = ports;
++
++	if (key_ports_range)
++		key_ports_range->tp.ports = ports;
+ }
+ 
+ static void
+@@ -924,6 +931,7 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
+ 				     struct flow_dissector *flow_dissector,
+ 				     void *target_container)
+ {
++	struct flow_dissector_key_ports_range *key_ports_range = NULL;
+ 	struct flow_dissector_key_ports *key_ports = NULL;
+ 	struct flow_dissector_key_control *key_control;
+ 	struct flow_dissector_key_basic *key_basic;
+@@ -968,20 +976,21 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
+ 		key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ 	}
+ 
+-	if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS))
++	if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS)) {
+ 		key_ports = skb_flow_dissector_target(flow_dissector,
+ 						      FLOW_DISSECTOR_KEY_PORTS,
+ 						      target_container);
+-	else if (dissector_uses_key(flow_dissector,
+-				    FLOW_DISSECTOR_KEY_PORTS_RANGE))
+-		key_ports = skb_flow_dissector_target(flow_dissector,
+-						      FLOW_DISSECTOR_KEY_PORTS_RANGE,
+-						      target_container);
+-
+-	if (key_ports) {
+ 		key_ports->src = flow_keys->sport;
+ 		key_ports->dst = flow_keys->dport;
+ 	}
++	if (dissector_uses_key(flow_dissector,
++			       FLOW_DISSECTOR_KEY_PORTS_RANGE)) {
++		key_ports_range = skb_flow_dissector_target(flow_dissector,
++							    FLOW_DISSECTOR_KEY_PORTS_RANGE,
++							    target_container);
++		key_ports_range->tp.src = flow_keys->sport;
++		key_ports_range->tp.dst = flow_keys->dport;
++	}
+ 
+ 	if (dissector_uses_key(flow_dissector,
+ 			       FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
+diff --git a/net/core/gro.c b/net/core/gro.c
+index d1f44084e978fb..78b320b6317445 100644
+--- a/net/core/gro.c
++++ b/net/core/gro.c
+@@ -7,9 +7,6 @@
+ 
+ #define MAX_GRO_SKBS 8
+ 
+-/* This should be increased if a protocol with a bigger head is added. */
+-#define GRO_MAX_HEAD (MAX_HEADER + 128)
+-
+ static DEFINE_SPINLOCK(offload_lock);
+ 
+ /**
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 6841e61a6bd0b6..f251a99f8d4217 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -69,6 +69,7 @@
+ #include <net/dst.h>
+ #include <net/sock.h>
+ #include <net/checksum.h>
++#include <net/gro.h>
+ #include <net/gso.h>
+ #include <net/hotdata.h>
+ #include <net/ip6_checksum.h>
+@@ -95,7 +96,9 @@
+ static struct kmem_cache *skbuff_ext_cache __ro_after_init;
+ #endif
+ 
+-#define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(MAX_TCP_HEADER)
++#define GRO_MAX_HEAD_PAD (GRO_MAX_HEAD + NET_SKB_PAD + NET_IP_ALIGN)
++#define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(max(MAX_TCP_HEADER, \
++					       GRO_MAX_HEAD_PAD))
+ 
+ /* We want SKB_SMALL_HEAD_CACHE_SIZE to not be a power of two.
+  * This should ensure that SKB_SMALL_HEAD_HEADROOM is a unique
+@@ -736,7 +739,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
+ 	/* If requested length is either too small or too big,
+ 	 * we use kmalloc() for skb->head allocation.
+ 	 */
+-	if (len <= SKB_WITH_OVERHEAD(1024) ||
++	if (len <= SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) ||
+ 	    len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
+ 	    (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
+ 		skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
+@@ -816,7 +819,8 @@ struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int len)
+ 	 * When the small frag allocator is available, prefer it over kmalloc
+ 	 * for small fragments
+ 	 */
+-	if ((!NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) ||
++	if ((!NAPI_HAS_SMALL_PAGE_FRAG &&
++	     len <= SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE)) ||
+ 	    len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
+ 	    (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
+ 		skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI,
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index f1b9b3958792cd..82a14f131d00c6 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -303,7 +303,10 @@ static int sock_map_link(struct bpf_map *map, struct sock *sk)
+ 
+ 	write_lock_bh(&sk->sk_callback_lock);
+ 	if (stream_parser && stream_verdict && !psock->saved_data_ready) {
+-		ret = sk_psock_init_strp(sk, psock);
++		if (sk_is_tcp(sk))
++			ret = sk_psock_init_strp(sk, psock);
++		else
++			ret = -EOPNOTSUPP;
+ 		if (ret) {
+ 			write_unlock_bh(&sk->sk_callback_lock);
+ 			sk_psock_put(sk, psock);
+@@ -541,6 +544,9 @@ static bool sock_map_sk_state_allowed(const struct sock *sk)
+ 		return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN);
+ 	if (sk_is_stream_unix(sk))
+ 		return (1 << sk->sk_state) & TCPF_ESTABLISHED;
++	if (sk_is_vsock(sk) &&
++	    (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET))
++		return (1 << sk->sk_state) & TCPF_ESTABLISHED;
+ 	return true;
+ }
+ 
+diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
+index f23a1ec6694cb2..814300eee39de1 100644
+--- a/net/ipv4/arp.c
++++ b/net/ipv4/arp.c
+@@ -1077,7 +1077,7 @@ static int arp_req_set_public(struct net *net, struct arpreq *r,
+ 	__be32 mask = ((struct sockaddr_in *)&r->arp_netmask)->sin_addr.s_addr;
+ 
+ 	if (!dev && (r->arp_flags & ATF_COM)) {
+-		dev = dev_getbyhwaddr_rcu(net, r->arp_ha.sa_family,
++		dev = dev_getbyhwaddr(net, r->arp_ha.sa_family,
+ 				      r->arp_ha.sa_data);
+ 		if (!dev)
+ 			return -ENODEV;
+diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
+index 0f523cbfe329ef..32b28fc21b63c0 100644
+--- a/net/ipv4/tcp_fastopen.c
++++ b/net/ipv4/tcp_fastopen.c
+@@ -178,7 +178,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
+ 	if (!skb)
+ 		return;
+ 
+-	skb_dst_drop(skb);
++	tcp_cleanup_skb(skb);
+ 	/* segs_in has been initialized to 1 in tcp_create_openreq_child().
+ 	 * Hence, reset segs_in to 0 before calling tcp_segs_in()
+ 	 * to avoid double counting.  Also, tcp_segs_in() expects
+@@ -195,7 +195,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
+ 	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
+ 
+ 	tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+-	__skb_queue_tail(&sk->sk_receive_queue, skb);
++	tcp_add_receive_queue(sk, skb);
+ 	tp->syn_data_acked = 1;
+ 
+ 	/* u64_stats_update_begin(&tp->syncp) not needed here,
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 4811727b8a0225..0ee22e10fcfae7 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -243,9 +243,15 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
+ 			do_div(val, skb->truesize);
+ 			tcp_sk(sk)->scaling_ratio = val ? val : 1;
+ 
+-			if (old_ratio != tcp_sk(sk)->scaling_ratio)
+-				WRITE_ONCE(tcp_sk(sk)->window_clamp,
+-					   tcp_win_from_space(sk, sk->sk_rcvbuf));
++			if (old_ratio != tcp_sk(sk)->scaling_ratio) {
++				struct tcp_sock *tp = tcp_sk(sk);
++
++				val = tcp_win_from_space(sk, sk->sk_rcvbuf);
++				tcp_set_window_clamp(sk, val);
++
++				if (tp->window_clamp < tp->rcvq_space.space)
++					tp->rcvq_space.space = tp->window_clamp;
++			}
+ 		}
+ 		icsk->icsk_ack.rcv_mss = min_t(unsigned int, len,
+ 					       tcp_sk(sk)->advmss);
+@@ -4964,7 +4970,7 @@ static void tcp_ofo_queue(struct sock *sk)
+ 		tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
+ 		fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
+ 		if (!eaten)
+-			__skb_queue_tail(&sk->sk_receive_queue, skb);
++			tcp_add_receive_queue(sk, skb);
+ 		else
+ 			kfree_skb_partial(skb, fragstolen);
+ 
+@@ -5156,7 +5162,7 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb,
+ 				  skb, fragstolen)) ? 1 : 0;
+ 	tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq);
+ 	if (!eaten) {
+-		__skb_queue_tail(&sk->sk_receive_queue, skb);
++		tcp_add_receive_queue(sk, skb);
+ 		skb_set_owner_r(skb, sk);
+ 	}
+ 	return eaten;
+@@ -5239,7 +5245,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
+ 		__kfree_skb(skb);
+ 		return;
+ 	}
+-	skb_dst_drop(skb);
++	tcp_cleanup_skb(skb);
+ 	__skb_pull(skb, tcp_hdr(skb)->doff * 4);
+ 
+ 	reason = SKB_DROP_REASON_NOT_SPECIFIED;
+@@ -6208,7 +6214,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
+ 			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS);
+ 
+ 			/* Bulk data transfer: receiver */
+-			skb_dst_drop(skb);
++			tcp_cleanup_skb(skb);
+ 			__skb_pull(skb, tcp_header_len);
+ 			eaten = tcp_queue_rcv(sk, skb, &fragstolen);
+ 
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index c26f6c4b7bb4a3..96d68f9b1bb9de 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -2025,7 +2025,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
+ 	 */
+ 	skb_condense(skb);
+ 
+-	skb_dst_drop(skb);
++	tcp_cleanup_skb(skb);
+ 
+ 	if (unlikely(tcp_checksum_complete(skb))) {
+ 		bh_unlock_sock(sk);
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index 8e47e5355be613..4f648af8cfaafe 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -97,7 +97,7 @@ tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
+ 
+ 	err = xa_alloc_cyclic(&tcf_exts_miss_cookies_xa, &n->miss_cookie_base,
+ 			      n, xa_limit_32b, &next, GFP_KERNEL);
+-	if (err)
++	if (err < 0)
+ 		goto err_xa_alloc;
+ 
+ 	exts->miss_cookie_node = n;
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index 53a081d49d28ac..7e3db87ae4333c 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -1189,6 +1189,9 @@ static int vsock_read_skb(struct sock *sk, skb_read_actor_t read_actor)
+ {
+ 	struct vsock_sock *vsk = vsock_sk(sk);
+ 
++	if (WARN_ON_ONCE(!vsk->transport))
++		return -ENODEV;
++
+ 	return vsk->transport->read_skb(vsk, read_actor);
+ }
+ 
+diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
+index b58c3818f284f1..f0e48e6911fc46 100644
+--- a/net/vmw_vsock/virtio_transport.c
++++ b/net/vmw_vsock/virtio_transport.c
+@@ -670,6 +670,13 @@ static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
+ 	};
+ 	int ret;
+ 
++	mutex_lock(&vsock->rx_lock);
++	vsock->rx_buf_nr = 0;
++	vsock->rx_buf_max_nr = 0;
++	mutex_unlock(&vsock->rx_lock);
++
++	atomic_set(&vsock->queued_replies, 0);
++
+ 	ret = virtio_find_vqs(vdev, VSOCK_VQ_MAX, vsock->vqs, vqs_info, NULL);
+ 	if (ret < 0)
+ 		return ret;
+@@ -779,9 +786,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
+ 
+ 	vsock->vdev = vdev;
+ 
+-	vsock->rx_buf_nr = 0;
+-	vsock->rx_buf_max_nr = 0;
+-	atomic_set(&vsock->queued_replies, 0);
+ 
+ 	mutex_init(&vsock->tx_lock);
+ 	mutex_init(&vsock->rx_lock);
+diff --git a/net/vmw_vsock/vsock_bpf.c b/net/vmw_vsock/vsock_bpf.c
+index f201d9eca1df2f..07b96d56f3a577 100644
+--- a/net/vmw_vsock/vsock_bpf.c
++++ b/net/vmw_vsock/vsock_bpf.c
+@@ -87,7 +87,7 @@ static int vsock_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
+ 	lock_sock(sk);
+ 	vsk = vsock_sk(sk);
+ 
+-	if (!vsk->transport) {
++	if (WARN_ON_ONCE(!vsk->transport)) {
+ 		copied = -ENODEV;
+ 		goto out;
+ 	}
+diff --git a/rust/ffi.rs b/rust/ffi.rs
+index be153c4d551b24..584f75b49862b3 100644
+--- a/rust/ffi.rs
++++ b/rust/ffi.rs
+@@ -10,4 +10,39 @@
+ 
+ #![no_std]
+ 
+-pub use core::ffi::*;
++macro_rules! alias {
++    ($($name:ident = $ty:ty;)*) => {$(
++        #[allow(non_camel_case_types, missing_docs)]
++        pub type $name = $ty;
++
++        // Check size compatibility with `core`.
++        const _: () = assert!(
++            core::mem::size_of::<$name>() == core::mem::size_of::<core::ffi::$name>()
++        );
++    )*}
++}
++
++alias! {
++    // `core::ffi::c_char` is either `i8` or `u8` depending on architecture. In the kernel, we use
++    // `-funsigned-char` so it's always mapped to `u8`.
++    c_char = u8;
++
++    c_schar = i8;
++    c_uchar = u8;
++
++    c_short = i16;
++    c_ushort = u16;
++
++    c_int = i32;
++    c_uint = u32;
++
++    // In the kernel, `intptr_t` is defined to be `long` in all platforms, so we can map the type to
++    // `isize`.
++    c_long = isize;
++    c_ulong = usize;
++
++    c_longlong = i64;
++    c_ulonglong = u64;
++}
++
++pub use core::ffi::c_void;
+diff --git a/rust/kernel/device.rs b/rust/kernel/device.rs
+index c926e0c2b8528c..d5e6a19ff6b7ba 100644
+--- a/rust/kernel/device.rs
++++ b/rust/kernel/device.rs
+@@ -173,10 +173,10 @@ unsafe fn printk(&self, klevel: &[u8], msg: fmt::Arguments<'_>) {
+         #[cfg(CONFIG_PRINTK)]
+         unsafe {
+             bindings::_dev_printk(
+-                klevel as *const _ as *const core::ffi::c_char,
++                klevel as *const _ as *const crate::ffi::c_char,
+                 self.as_raw(),
+                 c_str!("%pA").as_char_ptr(),
+-                &msg as *const _ as *const core::ffi::c_void,
++                &msg as *const _ as *const crate::ffi::c_void,
+             )
+         };
+     }
+diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs
+index 52c5024324474f..5fece574ec023b 100644
+--- a/rust/kernel/error.rs
++++ b/rust/kernel/error.rs
+@@ -153,11 +153,8 @@ pub(crate) fn to_blk_status(self) -> bindings::blk_status_t {
+ 
+     /// Returns the error encoded as a pointer.
+     pub fn to_ptr<T>(self) -> *mut T {
+-        #[cfg_attr(target_pointer_width = "32", allow(clippy::useless_conversion))]
+         // SAFETY: `self.0` is a valid error due to its invariant.
+-        unsafe {
+-            bindings::ERR_PTR(self.0.get().into()) as *mut _
+-        }
++        unsafe { bindings::ERR_PTR(self.0.get() as _) as *mut _ }
+     }
+ 
+     /// Returns a string representing the error, if one exists.
+diff --git a/rust/kernel/firmware.rs b/rust/kernel/firmware.rs
+index 13a374a5cdb743..c5162fdc95ff05 100644
+--- a/rust/kernel/firmware.rs
++++ b/rust/kernel/firmware.rs
+@@ -12,7 +12,7 @@
+ /// One of the following: `bindings::request_firmware`, `bindings::firmware_request_nowarn`,
+ /// `bindings::firmware_request_platform`, `bindings::request_firmware_direct`.
+ struct FwFunc(
+-    unsafe extern "C" fn(*mut *const bindings::firmware, *const i8, *mut bindings::device) -> i32,
++    unsafe extern "C" fn(*mut *const bindings::firmware, *const u8, *mut bindings::device) -> i32,
+ );
+ 
+ impl FwFunc {
+diff --git a/rust/kernel/miscdevice.rs b/rust/kernel/miscdevice.rs
+index 7e2a79b3ae2636..8f88891fb1d20f 100644
+--- a/rust/kernel/miscdevice.rs
++++ b/rust/kernel/miscdevice.rs
+@@ -11,16 +11,12 @@
+ use crate::{
+     bindings,
+     error::{to_result, Error, Result, VTABLE_DEFAULT_ERROR},
++    ffi::{c_int, c_long, c_uint, c_ulong},
+     prelude::*,
+     str::CStr,
+     types::{ForeignOwnable, Opaque},
+ };
+-use core::{
+-    ffi::{c_int, c_long, c_uint, c_ulong},
+-    marker::PhantomData,
+-    mem::MaybeUninit,
+-    pin::Pin,
+-};
++use core::{marker::PhantomData, mem::MaybeUninit, pin::Pin};
+ 
+ /// Options for creating a misc device.
+ #[derive(Copy, Clone)]
+@@ -229,7 +225,7 @@ impl<T: MiscDevice> VtableHelper<T> {
+     // SAFETY: Ioctl calls can borrow the private data of the file.
+     let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
+ 
+-    match T::ioctl(device, cmd, arg as usize) {
++    match T::ioctl(device, cmd, arg) {
+         Ok(ret) => ret as c_long,
+         Err(err) => err.to_errno() as c_long,
+     }
+@@ -249,7 +245,7 @@ impl<T: MiscDevice> VtableHelper<T> {
+     // SAFETY: Ioctl calls can borrow the private data of the file.
+     let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
+ 
+-    match T::compat_ioctl(device, cmd, arg as usize) {
++    match T::compat_ioctl(device, cmd, arg) {
+         Ok(ret) => ret as c_long,
+         Err(err) => err.to_errno() as c_long,
+     }
+diff --git a/rust/kernel/print.rs b/rust/kernel/print.rs
+index a28077a7cb3011..b19ee490be58fd 100644
+--- a/rust/kernel/print.rs
++++ b/rust/kernel/print.rs
+@@ -107,7 +107,7 @@ pub unsafe fn call_printk(
+     // SAFETY: TODO.
+     unsafe {
+         bindings::_printk(
+-            format_string.as_ptr() as _,
++            format_string.as_ptr(),
+             module_name.as_ptr(),
+             &args as *const _ as *const c_void,
+         );
+@@ -128,7 +128,7 @@ pub fn call_printk_cont(args: fmt::Arguments<'_>) {
+     #[cfg(CONFIG_PRINTK)]
+     unsafe {
+         bindings::_printk(
+-            format_strings::CONT.as_ptr() as _,
++            format_strings::CONT.as_ptr(),
+             &args as *const _ as *const c_void,
+         );
+     }
+diff --git a/rust/kernel/security.rs b/rust/kernel/security.rs
+index 2522868862a1bf..ea4c58c8170336 100644
+--- a/rust/kernel/security.rs
++++ b/rust/kernel/security.rs
+@@ -19,7 +19,7 @@
+ /// successful call to `security_secid_to_secctx`, that has not yet been destroyed by calling
+ /// `security_release_secctx`.
+ pub struct SecurityCtx {
+-    secdata: *mut core::ffi::c_char,
++    secdata: *mut crate::ffi::c_char,
+     seclen: usize,
+ }
+ 
+diff --git a/rust/kernel/seq_file.rs b/rust/kernel/seq_file.rs
+index 6ca29d576d029d..04947c6729792b 100644
+--- a/rust/kernel/seq_file.rs
++++ b/rust/kernel/seq_file.rs
+@@ -36,7 +36,7 @@ pub fn call_printf(&self, args: core::fmt::Arguments<'_>) {
+             bindings::seq_printf(
+                 self.inner.get(),
+                 c_str!("%pA").as_char_ptr(),
+-                &args as *const _ as *const core::ffi::c_void,
++                &args as *const _ as *const crate::ffi::c_void,
+             );
+         }
+     }
+diff --git a/rust/kernel/str.rs b/rust/kernel/str.rs
+index d04c12a1426d1c..0f2765463dc840 100644
+--- a/rust/kernel/str.rs
++++ b/rust/kernel/str.rs
+@@ -189,7 +189,7 @@ pub unsafe fn from_char_ptr<'a>(ptr: *const crate::ffi::c_char) -> &'a Self {
+         // to a `NUL`-terminated C string.
+         let len = unsafe { bindings::strlen(ptr) } + 1;
+         // SAFETY: Lifetime guaranteed by the safety precondition.
+-        let bytes = unsafe { core::slice::from_raw_parts(ptr as _, len as _) };
++        let bytes = unsafe { core::slice::from_raw_parts(ptr as _, len) };
+         // SAFETY: As `len` is returned by `strlen`, `bytes` does not contain interior `NUL`.
+         // As we have added 1 to `len`, the last byte is known to be `NUL`.
+         unsafe { Self::from_bytes_with_nul_unchecked(bytes) }
+@@ -248,7 +248,7 @@ pub unsafe fn from_bytes_with_nul_unchecked_mut(bytes: &mut [u8]) -> &mut CStr {
+     /// Returns a C pointer to the string.
+     #[inline]
+     pub const fn as_char_ptr(&self) -> *const crate::ffi::c_char {
+-        self.0.as_ptr() as _
++        self.0.as_ptr()
+     }
+ 
+     /// Convert the string to a byte slice without the trailing `NUL` byte.
+@@ -838,7 +838,7 @@ pub fn try_from_fmt(args: fmt::Arguments<'_>) -> Result<Self, Error> {
+         // SAFETY: The buffer is valid for read because `f.bytes_written()` is bounded by `size`
+         // (which the minimum buffer size) and is non-zero (we wrote at least the `NUL` terminator)
+         // so `f.bytes_written() - 1` doesn't underflow.
+-        let ptr = unsafe { bindings::memchr(buf.as_ptr().cast(), 0, (f.bytes_written() - 1) as _) };
++        let ptr = unsafe { bindings::memchr(buf.as_ptr().cast(), 0, f.bytes_written() - 1) };
+         if !ptr.is_null() {
+             return Err(EINVAL);
+         }
+diff --git a/rust/kernel/uaccess.rs b/rust/kernel/uaccess.rs
+index 05b0b8d13b10da..cc044924867b89 100644
+--- a/rust/kernel/uaccess.rs
++++ b/rust/kernel/uaccess.rs
+@@ -8,7 +8,7 @@
+     alloc::Flags,
+     bindings,
+     error::Result,
+-    ffi::{c_ulong, c_void},
++    ffi::c_void,
+     prelude::*,
+     transmute::{AsBytes, FromBytes},
+ };
+@@ -224,13 +224,9 @@ pub fn read_raw(&mut self, out: &mut [MaybeUninit<u8>]) -> Result {
+         if len > self.length {
+             return Err(EFAULT);
+         }
+-        let Ok(len_ulong) = c_ulong::try_from(len) else {
+-            return Err(EFAULT);
+-        };
+-        // SAFETY: `out_ptr` points into a mutable slice of length `len_ulong`, so we may write
++        // SAFETY: `out_ptr` points into a mutable slice of length `len`, so we may write
+         // that many bytes to it.
+-        let res =
+-            unsafe { bindings::copy_from_user(out_ptr, self.ptr as *const c_void, len_ulong) };
++        let res = unsafe { bindings::copy_from_user(out_ptr, self.ptr as *const c_void, len) };
+         if res != 0 {
+             return Err(EFAULT);
+         }
+@@ -259,9 +255,6 @@ pub fn read<T: FromBytes>(&mut self) -> Result<T> {
+         if len > self.length {
+             return Err(EFAULT);
+         }
+-        let Ok(len_ulong) = c_ulong::try_from(len) else {
+-            return Err(EFAULT);
+-        };
+         let mut out: MaybeUninit<T> = MaybeUninit::uninit();
+         // SAFETY: The local variable `out` is valid for writing `size_of::<T>()` bytes.
+         //
+@@ -272,7 +265,7 @@ pub fn read<T: FromBytes>(&mut self) -> Result<T> {
+             bindings::_copy_from_user(
+                 out.as_mut_ptr().cast::<c_void>(),
+                 self.ptr as *const c_void,
+-                len_ulong,
++                len,
+             )
+         };
+         if res != 0 {
+@@ -335,12 +328,9 @@ pub fn write_slice(&mut self, data: &[u8]) -> Result {
+         if len > self.length {
+             return Err(EFAULT);
+         }
+-        let Ok(len_ulong) = c_ulong::try_from(len) else {
+-            return Err(EFAULT);
+-        };
+-        // SAFETY: `data_ptr` points into an immutable slice of length `len_ulong`, so we may read
++        // SAFETY: `data_ptr` points into an immutable slice of length `len`, so we may read
+         // that many bytes from it.
+-        let res = unsafe { bindings::copy_to_user(self.ptr as *mut c_void, data_ptr, len_ulong) };
++        let res = unsafe { bindings::copy_to_user(self.ptr as *mut c_void, data_ptr, len) };
+         if res != 0 {
+             return Err(EFAULT);
+         }
+@@ -359,9 +349,6 @@ pub fn write<T: AsBytes>(&mut self, value: &T) -> Result {
+         if len > self.length {
+             return Err(EFAULT);
+         }
+-        let Ok(len_ulong) = c_ulong::try_from(len) else {
+-            return Err(EFAULT);
+-        };
+         // SAFETY: The reference points to a value of type `T`, so it is valid for reading
+         // `size_of::<T>()` bytes.
+         //
+@@ -372,7 +359,7 @@ pub fn write<T: AsBytes>(&mut self, value: &T) -> Result {
+             bindings::_copy_to_user(
+                 self.ptr as *mut c_void,
+                 (value as *const T).cast::<c_void>(),
+-                len_ulong,
++                len,
+             )
+         };
+         if res != 0 {
+diff --git a/samples/rust/rust_print_main.rs b/samples/rust/rust_print_main.rs
+index aed90a6feecfa7..7935b4772ec6ce 100644
+--- a/samples/rust/rust_print_main.rs
++++ b/samples/rust/rust_print_main.rs
+@@ -83,7 +83,7 @@ fn drop(&mut self) {
+ }
+ 
+ mod trace {
+-    use core::ffi::c_int;
++    use kernel::ffi::c_int;
+ 
+     kernel::declare_trace! {
+         /// # Safety
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index 77b6ac9b5c11bc..9955c4d54e42a7 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -678,12 +678,18 @@ static int snd_seq_deliver_single_event(struct snd_seq_client *client,
+ 					  dest_port->time_real);
+ 
+ #if IS_ENABLED(CONFIG_SND_SEQ_UMP)
+-	if (!(dest->filter & SNDRV_SEQ_FILTER_NO_CONVERT)) {
+-		if (snd_seq_ev_is_ump(event)) {
++	if (snd_seq_ev_is_ump(event)) {
++		if (!(dest->filter & SNDRV_SEQ_FILTER_NO_CONVERT)) {
+ 			result = snd_seq_deliver_from_ump(client, dest, dest_port,
+ 							  event, atomic, hop);
+ 			goto __skip;
+-		} else if (snd_seq_client_is_ump(dest)) {
++		} else if (dest->type == USER_CLIENT &&
++			   !snd_seq_client_is_ump(dest)) {
++			result = 0; // drop the event
++			goto __skip;
++		}
++	} else if (snd_seq_client_is_ump(dest)) {
++		if (!(dest->filter & SNDRV_SEQ_FILTER_NO_CONVERT)) {
+ 			result = snd_seq_deliver_to_ump(client, dest, dest_port,
+ 							event, atomic, hop);
+ 			goto __skip;
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 14763c0f31ad9f..46a2204049993d 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -2470,7 +2470,9 @@ int snd_hda_create_dig_out_ctls(struct hda_codec *codec,
+ 				break;
+ 			id = kctl->id;
+ 			id.index = spdif_index;
+-			snd_ctl_rename_id(codec->card, &kctl->id, &id);
++			err = snd_ctl_rename_id(codec->card, &kctl->id, &id);
++			if (err < 0)
++				return err;
+ 		}
+ 		bus->primary_dig_out_type = HDA_PCM_TYPE_HDMI;
+ 	}
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 538c37a78a56f7..84ab357b840d67 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -1080,6 +1080,7 @@ static const struct hda_quirk cxt5066_fixups[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
+ 	SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
+ 	SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
++	SND_PCI_QUIRK(0x103c, 0x8231, "HP ProBook 450 G4", CXT_FIXUP_MUTE_LED_GPIO),
+ 	SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
+ 	SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+diff --git a/sound/pci/hda/patch_cs8409-tables.c b/sound/pci/hda/patch_cs8409-tables.c
+index 759f48038273df..621f947e38174d 100644
+--- a/sound/pci/hda/patch_cs8409-tables.c
++++ b/sound/pci/hda/patch_cs8409-tables.c
+@@ -121,7 +121,7 @@ static const struct cs8409_i2c_param cs42l42_init_reg_seq[] = {
+ 	{ CS42L42_MIXER_CHA_VOL, 0x3F },
+ 	{ CS42L42_MIXER_CHB_VOL, 0x3F },
+ 	{ CS42L42_MIXER_ADC_VOL, 0x3f },
+-	{ CS42L42_HP_CTL, 0x03 },
++	{ CS42L42_HP_CTL, 0x0D },
+ 	{ CS42L42_MIC_DET_CTL1, 0xB6 },
+ 	{ CS42L42_TIPSENSE_CTL, 0xC2 },
+ 	{ CS42L42_HS_CLAMP_DISABLE, 0x01 },
+@@ -315,7 +315,7 @@ static const struct cs8409_i2c_param dolphin_c0_init_reg_seq[] = {
+ 	{ CS42L42_ASP_TX_SZ_EN, 0x01 },
+ 	{ CS42L42_PWR_CTL1, 0x0A },
+ 	{ CS42L42_PWR_CTL2, 0x84 },
+-	{ CS42L42_HP_CTL, 0x03 },
++	{ CS42L42_HP_CTL, 0x0D },
+ 	{ CS42L42_MIXER_CHA_VOL, 0x3F },
+ 	{ CS42L42_MIXER_CHB_VOL, 0x3F },
+ 	{ CS42L42_MIXER_ADC_VOL, 0x3f },
+@@ -371,7 +371,7 @@ static const struct cs8409_i2c_param dolphin_c1_init_reg_seq[] = {
+ 	{ CS42L42_ASP_TX_SZ_EN, 0x00 },
+ 	{ CS42L42_PWR_CTL1, 0x0E },
+ 	{ CS42L42_PWR_CTL2, 0x84 },
+-	{ CS42L42_HP_CTL, 0x01 },
++	{ CS42L42_HP_CTL, 0x0D },
+ 	{ CS42L42_MIXER_CHA_VOL, 0x3F },
+ 	{ CS42L42_MIXER_CHB_VOL, 0x3F },
+ 	{ CS42L42_MIXER_ADC_VOL, 0x3f },
+diff --git a/sound/pci/hda/patch_cs8409.c b/sound/pci/hda/patch_cs8409.c
+index 614327218634c0..b760332a4e3577 100644
+--- a/sound/pci/hda/patch_cs8409.c
++++ b/sound/pci/hda/patch_cs8409.c
+@@ -876,7 +876,7 @@ static void cs42l42_resume(struct sub_codec *cs42l42)
+ 		{ CS42L42_DET_INT_STATUS2, 0x00 },
+ 		{ CS42L42_TSRS_PLUG_STATUS, 0x00 },
+ 	};
+-	int fsv_old, fsv_new;
++	unsigned int fsv;
+ 
+ 	/* Bring CS42L42 out of Reset */
+ 	spec->gpio_data = snd_hda_codec_read(codec, CS8409_PIN_AFG, 0, AC_VERB_GET_GPIO_DATA, 0);
+@@ -893,13 +893,15 @@ static void cs42l42_resume(struct sub_codec *cs42l42)
+ 	/* Clear interrupts, by reading interrupt status registers */
+ 	cs8409_i2c_bulk_read(cs42l42, irq_regs, ARRAY_SIZE(irq_regs));
+ 
+-	fsv_old = cs8409_i2c_read(cs42l42, CS42L42_HP_CTL);
+-	if (cs42l42->full_scale_vol == CS42L42_FULL_SCALE_VOL_0DB)
+-		fsv_new = fsv_old & ~CS42L42_FULL_SCALE_VOL_MASK;
+-	else
+-		fsv_new = fsv_old & CS42L42_FULL_SCALE_VOL_MASK;
+-	if (fsv_new != fsv_old)
+-		cs8409_i2c_write(cs42l42, CS42L42_HP_CTL, fsv_new);
++	fsv = cs8409_i2c_read(cs42l42, CS42L42_HP_CTL);
++	if (cs42l42->full_scale_vol) {
++		// Set the full scale volume bit
++		fsv |= CS42L42_FULL_SCALE_VOL_MASK;
++		cs8409_i2c_write(cs42l42, CS42L42_HP_CTL, fsv);
++	}
++	// Unmute analog channels A and B
++	fsv = (fsv & ~CS42L42_ANA_MUTE_AB);
++	cs8409_i2c_write(cs42l42, CS42L42_HP_CTL, fsv);
+ 
+ 	/* we have to explicitly allow unsol event handling even during the
+ 	 * resume phase so that the jack event is processed properly
+@@ -920,7 +922,7 @@ static void cs42l42_suspend(struct sub_codec *cs42l42)
+ 		{ CS42L42_MIXER_CHA_VOL, 0x3F },
+ 		{ CS42L42_MIXER_ADC_VOL, 0x3F },
+ 		{ CS42L42_MIXER_CHB_VOL, 0x3F },
+-		{ CS42L42_HP_CTL, 0x0F },
++		{ CS42L42_HP_CTL, 0x0D },
+ 		{ CS42L42_ASP_RX_DAI0_EN, 0x00 },
+ 		{ CS42L42_ASP_CLK_CFG, 0x00 },
+ 		{ CS42L42_PWR_CTL1, 0xFE },
+diff --git a/sound/pci/hda/patch_cs8409.h b/sound/pci/hda/patch_cs8409.h
+index 5e48115caf096b..14645d25e70fd2 100644
+--- a/sound/pci/hda/patch_cs8409.h
++++ b/sound/pci/hda/patch_cs8409.h
+@@ -230,9 +230,10 @@ enum cs8409_coefficient_index_registers {
+ #define CS42L42_PDN_TIMEOUT_US			(250000)
+ #define CS42L42_PDN_SLEEP_US			(2000)
+ #define CS42L42_INIT_TIMEOUT_MS			(45)
++#define CS42L42_ANA_MUTE_AB			(0x0C)
+ #define CS42L42_FULL_SCALE_VOL_MASK		(2)
+-#define CS42L42_FULL_SCALE_VOL_0DB		(1)
+-#define CS42L42_FULL_SCALE_VOL_MINUS6DB		(0)
++#define CS42L42_FULL_SCALE_VOL_0DB		(0)
++#define CS42L42_FULL_SCALE_VOL_MINUS6DB		(1)
+ 
+ /* Dell BULLSEYE / WARLOCK / CYBORG Specific Definitions */
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 6c352602987bac..ffe3de617d5ddb 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3790,6 +3790,7 @@ static void alc225_init(struct hda_codec *codec)
+ 				    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
+ 
+ 		msleep(75);
++		alc_update_coef_idx(codec, 0x4a, 3 << 10, 0);
+ 		alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */
+ 	}
+ }
+diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c
+index 8c15389c9a04bc..5585f4c8f455a5 100644
+--- a/sound/soc/fsl/fsl_micfil.c
++++ b/sound/soc/fsl/fsl_micfil.c
+@@ -157,6 +157,8 @@ static int micfil_set_quality(struct fsl_micfil *micfil)
+ 	case QUALITY_VLOW2:
+ 		qsel = MICFIL_QSEL_VLOW2_QUALITY;
+ 		break;
++	default:
++		return -EINVAL;
+ 	}
+ 
+ 	return regmap_update_bits(micfil->regmap, REG_MICFIL_CTRL2,
+diff --git a/sound/soc/fsl/imx-audmix.c b/sound/soc/fsl/imx-audmix.c
+index 231400661c9060..50ecc5f51100ee 100644
+--- a/sound/soc/fsl/imx-audmix.c
++++ b/sound/soc/fsl/imx-audmix.c
+@@ -23,7 +23,6 @@ struct imx_audmix {
+ 	struct snd_soc_card card;
+ 	struct platform_device *audmix_pdev;
+ 	struct platform_device *out_pdev;
+-	struct clk *cpu_mclk;
+ 	int num_dai;
+ 	struct snd_soc_dai_link *dai;
+ 	int num_dai_conf;
+@@ -32,34 +31,11 @@ struct imx_audmix {
+ 	struct snd_soc_dapm_route *dapm_routes;
+ };
+ 
+-static const u32 imx_audmix_rates[] = {
+-	8000, 12000, 16000, 24000, 32000, 48000, 64000, 96000,
+-};
+-
+-static const struct snd_pcm_hw_constraint_list imx_audmix_rate_constraints = {
+-	.count = ARRAY_SIZE(imx_audmix_rates),
+-	.list = imx_audmix_rates,
+-};
+-
+ static int imx_audmix_fe_startup(struct snd_pcm_substream *substream)
+ {
+-	struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+-	struct imx_audmix *priv = snd_soc_card_get_drvdata(rtd->card);
+ 	struct snd_pcm_runtime *runtime = substream->runtime;
+-	struct device *dev = rtd->card->dev;
+-	unsigned long clk_rate = clk_get_rate(priv->cpu_mclk);
+ 	int ret;
+ 
+-	if (clk_rate % 24576000 == 0) {
+-		ret = snd_pcm_hw_constraint_list(runtime, 0,
+-						 SNDRV_PCM_HW_PARAM_RATE,
+-						 &imx_audmix_rate_constraints);
+-		if (ret < 0)
+-			return ret;
+-	} else {
+-		dev_warn(dev, "mclk may be not supported %lu\n", clk_rate);
+-	}
+-
+ 	ret = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_CHANNELS,
+ 					   1, 8);
+ 	if (ret < 0)
+@@ -323,13 +299,6 @@ static int imx_audmix_probe(struct platform_device *pdev)
+ 	}
+ 	put_device(&cpu_pdev->dev);
+ 
+-	priv->cpu_mclk = devm_clk_get(&cpu_pdev->dev, "mclk1");
+-	if (IS_ERR(priv->cpu_mclk)) {
+-		ret = PTR_ERR(priv->cpu_mclk);
+-		dev_err(&cpu_pdev->dev, "failed to get DAI mclk1: %d\n", ret);
+-		return ret;
+-	}
+-
+ 	priv->audmix_pdev = audmix_pdev;
+ 	priv->out_pdev  = cpu_pdev;
+ 
+diff --git a/sound/soc/rockchip/rockchip_i2s_tdm.c b/sound/soc/rockchip/rockchip_i2s_tdm.c
+index acd75e48851fcf..7feefeb6b876dc 100644
+--- a/sound/soc/rockchip/rockchip_i2s_tdm.c
++++ b/sound/soc/rockchip/rockchip_i2s_tdm.c
+@@ -451,11 +451,11 @@ static int rockchip_i2s_tdm_set_fmt(struct snd_soc_dai *cpu_dai,
+ 			break;
+ 		case SND_SOC_DAIFMT_DSP_A:
+ 			val = I2S_TXCR_TFS_TDM_PCM;
+-			tdm_val = TDM_SHIFT_CTRL(0);
++			tdm_val = TDM_SHIFT_CTRL(2);
+ 			break;
+ 		case SND_SOC_DAIFMT_DSP_B:
+ 			val = I2S_TXCR_TFS_TDM_PCM;
+-			tdm_val = TDM_SHIFT_CTRL(2);
++			tdm_val = TDM_SHIFT_CTRL(4);
+ 			break;
+ 		default:
+ 			ret = -EINVAL;
+diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c
+index b55eb977e443d4..70b7bfb080f473 100644
+--- a/sound/soc/sof/ipc4-topology.c
++++ b/sound/soc/sof/ipc4-topology.c
+@@ -765,10 +765,16 @@ static int sof_ipc4_widget_setup_comp_dai(struct snd_sof_widget *swidget)
+ 		}
+ 
+ 		list_for_each_entry(w, &sdev->widget_list, list) {
+-			if (w->widget->sname &&
++			struct snd_sof_dai *alh_dai;
++
++			if (!WIDGET_IS_DAI(w->id) || !w->widget->sname ||
+ 			    strcmp(w->widget->sname, swidget->widget->sname))
+ 				continue;
+ 
++			alh_dai = w->private;
++			if (alh_dai->type != SOF_DAI_INTEL_ALH)
++				continue;
++
+ 			blob->alh_cfg.device_count++;
+ 		}
+ 
+@@ -2061,11 +2067,13 @@ sof_ipc4_prepare_copier_module(struct snd_sof_widget *swidget,
+ 			list_for_each_entry(w, &sdev->widget_list, list) {
+ 				u32 node_type;
+ 
+-				if (w->widget->sname &&
++				if (!WIDGET_IS_DAI(w->id) || !w->widget->sname ||
+ 				    strcmp(w->widget->sname, swidget->widget->sname))
+ 					continue;
+ 
+ 				dai = w->private;
++				if (dai->type != SOF_DAI_INTEL_ALH)
++					continue;
+ 				alh_copier = (struct sof_ipc4_copier *)dai->private;
+ 				alh_data = &alh_copier->data;
+ 				node_type = SOF_IPC4_GET_NODE_TYPE(alh_data->gtw_cfg.node_id);
+diff --git a/sound/soc/sof/pcm.c b/sound/soc/sof/pcm.c
+index 35a7462d8b6938..c5c6353f18ceef 100644
+--- a/sound/soc/sof/pcm.c
++++ b/sound/soc/sof/pcm.c
+@@ -511,6 +511,8 @@ static int sof_pcm_close(struct snd_soc_component *component,
+ 		 */
+ 	}
+ 
++	spcm->stream[substream->stream].substream = NULL;
++
+ 	return 0;
+ }
+ 
+diff --git a/sound/soc/sof/stream-ipc.c b/sound/soc/sof/stream-ipc.c
+index 794c7bbccbaf92..8262443ac89ad1 100644
+--- a/sound/soc/sof/stream-ipc.c
++++ b/sound/soc/sof/stream-ipc.c
+@@ -43,7 +43,7 @@ int sof_ipc_msg_data(struct snd_sof_dev *sdev,
+ 				return -ESTRPIPE;
+ 
+ 			posn_offset = stream->posn_offset;
+-		} else {
++		} else if (sps->cstream) {
+ 
+ 			struct sof_compr_stream *sstream = sps->cstream->runtime->private_data;
+ 
+@@ -51,6 +51,10 @@ int sof_ipc_msg_data(struct snd_sof_dev *sdev,
+ 				return -ESTRPIPE;
+ 
+ 			posn_offset = sstream->posn_offset;
++
++		} else {
++			dev_err(sdev->dev, "%s: No stream opened\n", __func__);
++			return -EINVAL;
+ 		}
+ 
+ 		snd_sof_dsp_mailbox_read(sdev, posn_offset, p, sz);


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [gentoo-commits] proj/linux-patches:6.13 commit in: /
@ 2025-03-07 18:21 Mike Pagano
  0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2025-03-07 18:21 UTC (permalink / raw
  To: gentoo-commits

commit:     d1474d248137c22303beb19171a76451f19b2f52
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Mar  7 18:21:23 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Mar  7 18:21:23 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d1474d24

Linux patch 6.13.6

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1005_linux-6.13.6.patch | 7697 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 7701 insertions(+)

diff --git a/0000_README b/0000_README
index 51a3feed..e250f73b 100644
--- a/0000_README
+++ b/0000_README
@@ -63,6 +63,10 @@ Patch:  1004_linux-6.13.5.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.13.5
 
+Patch:  1005_linux-6.13.6.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.13.6
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.

diff --git a/1005_linux-6.13.6.patch b/1005_linux-6.13.6.patch
new file mode 100644
index 00000000..abe283b7
--- /dev/null
+++ b/1005_linux-6.13.6.patch
@@ -0,0 +1,7697 @@
+diff --git a/Makefile b/Makefile
+index 56d5c11b6f1ec6..f49182f3bae143 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 13
+-SUBLEVEL = 5
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+ 
+diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
+index e18e9244d17a4f..c85aa4f1def810 100644
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -1262,7 +1262,7 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
+ extern unsigned int __ro_after_init kvm_arm_vmid_bits;
+ int __init kvm_arm_vmid_alloc_init(void);
+ void __init kvm_arm_vmid_alloc_free(void);
+-bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
++void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
+ void kvm_arm_vmid_clear_active(void);
+ 
+ static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index 7b2735ad32e911..3b3ecfed294f2b 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -580,6 +580,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ 	mmu = vcpu->arch.hw_mmu;
+ 	last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
+ 
++	/*
++	 * Ensure a VMID is allocated for the MMU before programming VTTBR_EL2,
++	 * which happens eagerly in VHE.
++	 *
++	 * Also, the VMID allocator only preserves VMIDs that are active at the
++	 * time of rollover, so KVM might need to grab a new VMID for the MMU if
++	 * this is called from kvm_sched_in().
++	 */
++	kvm_arm_vmid_update(&mmu->vmid);
++
+ 	/*
+ 	 * We guarantee that both TLBs and I-cache are private to each
+ 	 * vcpu. If detecting that a vcpu from the same VM has
+@@ -1147,18 +1157,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
+ 		 */
+ 		preempt_disable();
+ 
+-		/*
+-		 * The VMID allocator only tracks active VMIDs per
+-		 * physical CPU, and therefore the VMID allocated may not be
+-		 * preserved on VMID roll-over if the task was preempted,
+-		 * making a thread's VMID inactive. So we need to call
+-		 * kvm_arm_vmid_update() in non-premptible context.
+-		 */
+-		if (kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid) &&
+-		    has_vhe())
+-			__load_stage2(vcpu->arch.hw_mmu,
+-				      vcpu->arch.hw_mmu->arch);
+-
+ 		kvm_pmu_flush_hwstate(vcpu);
+ 
+ 		local_irq_disable();
+diff --git a/arch/arm64/kvm/vmid.c b/arch/arm64/kvm/vmid.c
+index 806223b7022afd..7fe8ba1a2851c5 100644
+--- a/arch/arm64/kvm/vmid.c
++++ b/arch/arm64/kvm/vmid.c
+@@ -135,11 +135,10 @@ void kvm_arm_vmid_clear_active(void)
+ 	atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID);
+ }
+ 
+-bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
++void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
+ {
+ 	unsigned long flags;
+ 	u64 vmid, old_active_vmid;
+-	bool updated = false;
+ 
+ 	vmid = atomic64_read(&kvm_vmid->id);
+ 
+@@ -157,21 +156,17 @@ bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
+ 	if (old_active_vmid != 0 && vmid_gen_match(vmid) &&
+ 	    0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids),
+ 					  old_active_vmid, vmid))
+-		return false;
++		return;
+ 
+ 	raw_spin_lock_irqsave(&cpu_vmid_lock, flags);
+ 
+ 	/* Check that our VMID belongs to the current generation. */
+ 	vmid = atomic64_read(&kvm_vmid->id);
+-	if (!vmid_gen_match(vmid)) {
++	if (!vmid_gen_match(vmid))
+ 		vmid = new_vmid(kvm_vmid);
+-		updated = true;
+-	}
+ 
+ 	atomic64_set(this_cpu_ptr(&active_vmids), vmid);
+ 	raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags);
+-
+-	return updated;
+ }
+ 
+ /*
+diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
+index 9c0b8d9558fc41..ccdef53872a0bf 100644
+--- a/arch/arm64/mm/init.c
++++ b/arch/arm64/mm/init.c
+@@ -279,12 +279,7 @@ void __init arm64_memblock_init(void)
+ 
+ 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+ 		extern u16 memstart_offset_seed;
+-
+-		/*
+-		 * Use the sanitised version of id_aa64mmfr0_el1 so that linear
+-		 * map randomization can be enabled by shrinking the IPA space.
+-		 */
+-		u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
++		u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
+ 		int parange = cpuid_feature_extract_unsigned_field(
+ 					mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
+ 		s64 range = linear_region_size -
+diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
+index 4cadc56220feac..427c41dde64319 100644
+--- a/arch/riscv/include/asm/cmpxchg.h
++++ b/arch/riscv/include/asm/cmpxchg.h
+@@ -231,7 +231,7 @@
+ 		__arch_cmpxchg(".w", ".w" sc_sfx, ".w" cas_sfx,		\
+ 			       sc_prepend, sc_append,			\
+ 			       cas_prepend, cas_append,			\
+-			       __ret, __ptr, (long), __old, __new);	\
++			       __ret, __ptr, (long)(int)(long), __old, __new);	\
+ 		break;							\
+ 	case 8:								\
+ 		__arch_cmpxchg(".d", ".d" sc_sfx, ".d" cas_sfx,		\
+diff --git a/arch/riscv/include/asm/futex.h b/arch/riscv/include/asm/futex.h
+index fc8130f995c1ee..6907c456ac8c05 100644
+--- a/arch/riscv/include/asm/futex.h
++++ b/arch/riscv/include/asm/futex.h
+@@ -93,7 +93,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ 		_ASM_EXTABLE_UACCESS_ERR(1b, 3b, %[r])	\
+ 		_ASM_EXTABLE_UACCESS_ERR(2b, 3b, %[r])	\
+ 	: [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp)
+-	: [ov] "Jr" (oldval), [nv] "Jr" (newval)
++	: [ov] "Jr" ((long)(int)oldval), [nv] "Jr" (newval)
+ 	: "memory");
+ 	__disable_user_access();
+ 
+diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c
+index 2d40736fc37cec..26b085dbdd073f 100644
+--- a/arch/riscv/kernel/cacheinfo.c
++++ b/arch/riscv/kernel/cacheinfo.c
+@@ -108,11 +108,11 @@ int populate_cache_leaves(unsigned int cpu)
+ 	if (!np)
+ 		return -ENOENT;
+ 
+-	if (of_property_read_bool(np, "cache-size"))
++	if (of_property_present(np, "cache-size"))
+ 		ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level);
+-	if (of_property_read_bool(np, "i-cache-size"))
++	if (of_property_present(np, "i-cache-size"))
+ 		ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
+-	if (of_property_read_bool(np, "d-cache-size"))
++	if (of_property_present(np, "d-cache-size"))
+ 		ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
+ 
+ 	prev = np;
+@@ -125,11 +125,11 @@ int populate_cache_leaves(unsigned int cpu)
+ 			break;
+ 		if (level <= levels)
+ 			break;
+-		if (of_property_read_bool(np, "cache-size"))
++		if (of_property_present(np, "cache-size"))
+ 			ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level);
+-		if (of_property_read_bool(np, "i-cache-size"))
++		if (of_property_present(np, "i-cache-size"))
+ 			ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
+-		if (of_property_read_bool(np, "d-cache-size"))
++		if (of_property_present(np, "d-cache-size"))
+ 			ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
+ 		levels = level;
+ 	}
+diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
+index c0916ed318c20e..aaf5fa57591bcd 100644
+--- a/arch/riscv/kernel/cpufeature.c
++++ b/arch/riscv/kernel/cpufeature.c
+@@ -475,7 +475,7 @@ static void __init riscv_resolve_isa(unsigned long *source_isa,
+ 			if (bit < RISCV_ISA_EXT_BASE)
+ 				*this_hwcap |= isa2hwcap[bit];
+ 		}
+-	} while (loop && memcmp(prev_resolved_isa, resolved_isa, sizeof(prev_resolved_isa)));
++	} while (loop && !bitmap_equal(prev_resolved_isa, resolved_isa, RISCV_ISA_EXT_MAX));
+ }
+ 
+ static void __init match_isa_ext(const char *name, const char *name_end, unsigned long *bitmap)
+diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
+index 45010e71df86ce..01b0520fb18028 100644
+--- a/arch/riscv/kernel/setup.c
++++ b/arch/riscv/kernel/setup.c
+@@ -324,8 +324,8 @@ void __init setup_arch(char **cmdline_p)
+ 
+ 	riscv_init_cbo_blocksizes();
+ 	riscv_fill_hwcap();
+-	init_rt_signal_env();
+ 	apply_boot_alternatives();
++	init_rt_signal_env();
+ 
+ 	if (IS_ENABLED(CONFIG_RISCV_ISA_ZICBOM) &&
+ 	    riscv_isa_extension_available(NULL, ZICBOM))
+diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
+index dcd28241945613..c3c517b9eee554 100644
+--- a/arch/riscv/kernel/signal.c
++++ b/arch/riscv/kernel/signal.c
+@@ -215,12 +215,6 @@ static size_t get_rt_frame_size(bool cal_all)
+ 		if (cal_all || riscv_v_vstate_query(task_pt_regs(current)))
+ 			total_context_size += riscv_v_sc_size;
+ 	}
+-	/*
+-	 * Preserved a __riscv_ctx_hdr for END signal context header if an
+-	 * extension uses __riscv_extra_ext_header
+-	 */
+-	if (total_context_size)
+-		total_context_size += sizeof(struct __riscv_ctx_hdr);
+ 
+ 	frame_size += total_context_size;
+ 
+diff --git a/arch/riscv/kvm/vcpu_sbi_hsm.c b/arch/riscv/kvm/vcpu_sbi_hsm.c
+index dce667f4b6ab08..3070bb31745de7 100644
+--- a/arch/riscv/kvm/vcpu_sbi_hsm.c
++++ b/arch/riscv/kvm/vcpu_sbi_hsm.c
+@@ -9,6 +9,7 @@
+ #include <linux/errno.h>
+ #include <linux/err.h>
+ #include <linux/kvm_host.h>
++#include <linux/wordpart.h>
+ #include <asm/sbi.h>
+ #include <asm/kvm_vcpu_sbi.h>
+ 
+@@ -79,12 +80,12 @@ static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu)
+ 	target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
+ 	if (!target_vcpu)
+ 		return SBI_ERR_INVALID_PARAM;
+-	if (!kvm_riscv_vcpu_stopped(target_vcpu))
+-		return SBI_HSM_STATE_STARTED;
+-	else if (vcpu->stat.generic.blocking)
++	if (kvm_riscv_vcpu_stopped(target_vcpu))
++		return SBI_HSM_STATE_STOPPED;
++	else if (target_vcpu->stat.generic.blocking)
+ 		return SBI_HSM_STATE_SUSPENDED;
+ 	else
+-		return SBI_HSM_STATE_STOPPED;
++		return SBI_HSM_STATE_STARTED;
+ }
+ 
+ static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+@@ -109,7 +110,7 @@ static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ 		}
+ 		return 0;
+ 	case SBI_EXT_HSM_HART_SUSPEND:
+-		switch (cp->a0) {
++		switch (lower_32_bits(cp->a0)) {
+ 		case SBI_HSM_SUSPEND_RET_DEFAULT:
+ 			kvm_riscv_vcpu_wfi(vcpu);
+ 			break;
+diff --git a/arch/riscv/kvm/vcpu_sbi_replace.c b/arch/riscv/kvm/vcpu_sbi_replace.c
+index 9c2ab3dfa93aa5..5fbf3f94f1e855 100644
+--- a/arch/riscv/kvm/vcpu_sbi_replace.c
++++ b/arch/riscv/kvm/vcpu_sbi_replace.c
+@@ -21,7 +21,7 @@ static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ 	u64 next_cycle;
+ 
+ 	if (cp->a6 != SBI_EXT_TIME_SET_TIMER) {
+-		retdata->err_val = SBI_ERR_INVALID_PARAM;
++		retdata->err_val = SBI_ERR_NOT_SUPPORTED;
+ 		return 0;
+ 	}
+ 
+@@ -51,9 +51,10 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+ 	unsigned long hmask = cp->a0;
+ 	unsigned long hbase = cp->a1;
++	unsigned long hart_bit = 0, sentmask = 0;
+ 
+ 	if (cp->a6 != SBI_EXT_IPI_SEND_IPI) {
+-		retdata->err_val = SBI_ERR_INVALID_PARAM;
++		retdata->err_val = SBI_ERR_NOT_SUPPORTED;
+ 		return 0;
+ 	}
+ 
+@@ -62,15 +63,23 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ 		if (hbase != -1UL) {
+ 			if (tmp->vcpu_id < hbase)
+ 				continue;
+-			if (!(hmask & (1UL << (tmp->vcpu_id - hbase))))
++			hart_bit = tmp->vcpu_id - hbase;
++			if (hart_bit >= __riscv_xlen)
++				goto done;
++			if (!(hmask & (1UL << hart_bit)))
+ 				continue;
+ 		}
+ 		ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT);
+ 		if (ret < 0)
+ 			break;
++		sentmask |= 1UL << hart_bit;
+ 		kvm_riscv_vcpu_pmu_incr_fw(tmp, SBI_PMU_FW_IPI_RCVD);
+ 	}
+ 
++done:
++	if (hbase != -1UL && (hmask ^ sentmask))
++		retdata->err_val = SBI_ERR_INVALID_PARAM;
++
+ 	return ret;
+ }
+ 
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index c2fb8fe86a4559..757333fe82c763 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -1336,6 +1336,7 @@ config X86_REBOOTFIXUPS
+ config MICROCODE
+ 	def_bool y
+ 	depends on CPU_SUP_AMD || CPU_SUP_INTEL
++	select CRYPTO_LIB_SHA256 if CPU_SUP_AMD
+ 
+ config MICROCODE_INITRD32
+ 	def_bool y
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index c75c482d4c52f7..c72e8b2b5ba99b 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -628,7 +628,7 @@ int x86_pmu_hw_config(struct perf_event *event)
+ 	if (event->attr.type == event->pmu->type)
+ 		event->hw.config |= x86_pmu_get_event_config(event);
+ 
+-	if (event->attr.sample_period && x86_pmu.limit_period) {
++	if (!event->attr.freq && x86_pmu.limit_period) {
+ 		s64 left = event->attr.sample_period;
+ 		x86_pmu.limit_period(event, &left);
+ 		if (left > event->attr.sample_period)
+diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
+index 9651275aecd1bb..dfec2c61e3547d 100644
+--- a/arch/x86/kernel/cpu/cyrix.c
++++ b/arch/x86/kernel/cpu/cyrix.c
+@@ -153,8 +153,8 @@ static void geode_configure(void)
+ 	u8 ccr3;
+ 	local_irq_save(flags);
+ 
+-	/* Suspend on halt power saving and enable #SUSP pin */
+-	setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
++	/* Suspend on halt power saving */
++	setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x08);
+ 
+ 	ccr3 = getCx86(CX86_CCR3);
+ 	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);	/* enable MAPEN */
+diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
+index fb5d0c67fbab17..f5365b32582a5c 100644
+--- a/arch/x86/kernel/cpu/microcode/amd.c
++++ b/arch/x86/kernel/cpu/microcode/amd.c
+@@ -23,14 +23,18 @@
+ 
+ #include <linux/earlycpio.h>
+ #include <linux/firmware.h>
++#include <linux/bsearch.h>
+ #include <linux/uaccess.h>
+ #include <linux/vmalloc.h>
+ #include <linux/initrd.h>
+ #include <linux/kernel.h>
+ #include <linux/pci.h>
+ 
++#include <crypto/sha2.h>
++
+ #include <asm/microcode.h>
+ #include <asm/processor.h>
++#include <asm/cmdline.h>
+ #include <asm/setup.h>
+ #include <asm/cpu.h>
+ #include <asm/msr.h>
+@@ -145,6 +149,107 @@ ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
+  */
+ static u32 bsp_cpuid_1_eax __ro_after_init;
+ 
++static bool sha_check = true;
++
++struct patch_digest {
++	u32 patch_id;
++	u8 sha256[SHA256_DIGEST_SIZE];
++};
++
++#include "amd_shas.c"
++
++static int cmp_id(const void *key, const void *elem)
++{
++	struct patch_digest *pd = (struct patch_digest *)elem;
++	u32 patch_id = *(u32 *)key;
++
++	if (patch_id == pd->patch_id)
++		return 0;
++	else if (patch_id < pd->patch_id)
++		return -1;
++	else
++		return 1;
++}
++
++static bool need_sha_check(u32 cur_rev)
++{
++	switch (cur_rev >> 8) {
++	case 0x80012: return cur_rev <= 0x800126f; break;
++	case 0x83010: return cur_rev <= 0x830107c; break;
++	case 0x86001: return cur_rev <= 0x860010e; break;
++	case 0x86081: return cur_rev <= 0x8608108; break;
++	case 0x87010: return cur_rev <= 0x8701034; break;
++	case 0x8a000: return cur_rev <= 0x8a0000a; break;
++	case 0xa0011: return cur_rev <= 0xa0011da; break;
++	case 0xa0012: return cur_rev <= 0xa001243; break;
++	case 0xa1011: return cur_rev <= 0xa101153; break;
++	case 0xa1012: return cur_rev <= 0xa10124e; break;
++	case 0xa1081: return cur_rev <= 0xa108109; break;
++	case 0xa2010: return cur_rev <= 0xa20102f; break;
++	case 0xa2012: return cur_rev <= 0xa201212; break;
++	case 0xa6012: return cur_rev <= 0xa60120a; break;
++	case 0xa7041: return cur_rev <= 0xa704109; break;
++	case 0xa7052: return cur_rev <= 0xa705208; break;
++	case 0xa7080: return cur_rev <= 0xa708009; break;
++	case 0xa70c0: return cur_rev <= 0xa70C009; break;
++	case 0xaa002: return cur_rev <= 0xaa00218; break;
++	default: break;
++	}
++
++	pr_info("You should not be seeing this. Please send the following couple of lines to x86-<at>-kernel.org\n");
++	pr_info("CPUID(1).EAX: 0x%x, current revision: 0x%x\n", bsp_cpuid_1_eax, cur_rev);
++	return true;
++}
++
++static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsigned int len)
++{
++	struct patch_digest *pd = NULL;
++	u8 digest[SHA256_DIGEST_SIZE];
++	struct sha256_state s;
++	int i;
++
++	if (x86_family(bsp_cpuid_1_eax) < 0x17 ||
++	    x86_family(bsp_cpuid_1_eax) > 0x19)
++		return true;
++
++	if (!need_sha_check(cur_rev))
++		return true;
++
++	if (!sha_check)
++		return true;
++
++	pd = bsearch(&patch_id, phashes, ARRAY_SIZE(phashes), sizeof(struct patch_digest), cmp_id);
++	if (!pd) {
++		pr_err("No sha256 digest for patch ID: 0x%x found\n", patch_id);
++		return false;
++	}
++
++	sha256_init(&s);
++	sha256_update(&s, data, len);
++	sha256_final(&s, digest);
++
++	if (memcmp(digest, pd->sha256, sizeof(digest))) {
++		pr_err("Patch 0x%x SHA256 digest mismatch!\n", patch_id);
++
++		for (i = 0; i < SHA256_DIGEST_SIZE; i++)
++			pr_cont("0x%x ", digest[i]);
++		pr_info("\n");
++
++		return false;
++	}
++
++	return true;
++}
++
++static u32 get_patch_level(void)
++{
++	u32 rev, dummy __always_unused;
++
++	native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
++
++	return rev;
++}
++
+ static union cpuid_1_eax ucode_rev_to_cpuid(unsigned int val)
+ {
+ 	union zen_patch_rev p;
+@@ -246,8 +351,7 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size)
+  * On success, @sh_psize returns the patch size according to the section header,
+  * to the caller.
+  */
+-static bool
+-__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize)
++static bool __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize)
+ {
+ 	u32 p_type, p_size;
+ 	const u32 *hdr;
+@@ -484,10 +588,13 @@ static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
+ 	}
+ }
+ 
+-static int __apply_microcode_amd(struct microcode_amd *mc, unsigned int psize)
++static bool __apply_microcode_amd(struct microcode_amd *mc, u32 *cur_rev,
++				  unsigned int psize)
+ {
+ 	unsigned long p_addr = (unsigned long)&mc->hdr.data_code;
+-	u32 rev, dummy;
++
++	if (!verify_sha256_digest(mc->hdr.patch_id, *cur_rev, (const u8 *)p_addr, psize))
++		return -1;
+ 
+ 	native_wrmsrl(MSR_AMD64_PATCH_LOADER, p_addr);
+ 
+@@ -505,47 +612,13 @@ static int __apply_microcode_amd(struct microcode_amd *mc, unsigned int psize)
+ 	}
+ 
+ 	/* verify patch application was successful */
+-	native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
+-
+-	if (rev != mc->hdr.patch_id)
+-		return -1;
++	*cur_rev = get_patch_level();
++	if (*cur_rev != mc->hdr.patch_id)
++		return false;
+ 
+-	return 0;
++	return true;
+ }
+ 
+-/*
+- * Early load occurs before we can vmalloc(). So we look for the microcode
+- * patch container file in initrd, traverse equivalent cpu table, look for a
+- * matching microcode patch, and update, all in initrd memory in place.
+- * When vmalloc() is available for use later -- on 64-bit during first AP load,
+- * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
+- * load_microcode_amd() to save equivalent cpu table and microcode patches in
+- * kernel heap memory.
+- *
+- * Returns true if container found (sets @desc), false otherwise.
+- */
+-static bool early_apply_microcode(u32 old_rev, void *ucode, size_t size)
+-{
+-	struct cont_desc desc = { 0 };
+-	struct microcode_amd *mc;
+-	bool ret = false;
+-
+-	scan_containers(ucode, size, &desc);
+-
+-	mc = desc.mc;
+-	if (!mc)
+-		return ret;
+-
+-	/*
+-	 * Allow application of the same revision to pick up SMT-specific
+-	 * changes even if the revision of the other SMT thread is already
+-	 * up-to-date.
+-	 */
+-	if (old_rev > mc->hdr.patch_id)
+-		return ret;
+-
+-	return !__apply_microcode_amd(mc, desc.psize);
+-}
+ 
+ static bool get_builtin_microcode(struct cpio_data *cp)
+ {
+@@ -569,64 +642,74 @@ static bool get_builtin_microcode(struct cpio_data *cp)
+ 	return false;
+ }
+ 
+-static void __init find_blobs_in_containers(struct cpio_data *ret)
++static bool __init find_blobs_in_containers(struct cpio_data *ret)
+ {
+ 	struct cpio_data cp;
++	bool found;
+ 
+ 	if (!get_builtin_microcode(&cp))
+ 		cp = find_microcode_in_initrd(ucode_path);
+ 
+-	*ret = cp;
++	found = cp.data && cp.size;
++	if (found)
++		*ret = cp;
++
++	return found;
+ }
+ 
++/*
++ * Early load occurs before we can vmalloc(). So we look for the microcode
++ * patch container file in initrd, traverse equivalent cpu table, look for a
++ * matching microcode patch, and update, all in initrd memory in place.
++ * When vmalloc() is available for use later -- on 64-bit during first AP load,
++ * and on 32-bit during save_microcode_in_initrd() -- we can call
++ * load_microcode_amd() to save equivalent cpu table and microcode patches in
++ * kernel heap memory.
++ */
+ void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_eax)
+ {
++	struct cont_desc desc = { };
++	struct microcode_amd *mc;
+ 	struct cpio_data cp = { };
+-	u32 dummy;
++	char buf[4];
++	u32 rev;
++
++	if (cmdline_find_option(boot_command_line, "microcode.amd_sha_check", buf, 4)) {
++		if (!strncmp(buf, "off", 3)) {
++			sha_check = false;
++			pr_warn_once("It is a very very bad idea to disable the blobs SHA check!\n");
++			add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
++		}
++	}
+ 
+ 	bsp_cpuid_1_eax = cpuid_1_eax;
+ 
+-	native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->old_rev, dummy);
++	rev = get_patch_level();
++	ed->old_rev = rev;
+ 
+ 	/* Needed in load_microcode_amd() */
+ 	ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax;
+ 
+-	find_blobs_in_containers(&cp);
+-	if (!(cp.data && cp.size))
++	if (!find_blobs_in_containers(&cp))
+ 		return;
+ 
+-	if (early_apply_microcode(ed->old_rev, cp.data, cp.size))
+-		native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->new_rev, dummy);
+-}
+-
+-static enum ucode_state _load_microcode_amd(u8 family, const u8 *data, size_t size);
+-
+-static int __init save_microcode_in_initrd(void)
+-{
+-	unsigned int cpuid_1_eax = native_cpuid_eax(1);
+-	struct cpuinfo_x86 *c = &boot_cpu_data;
+-	struct cont_desc desc = { 0 };
+-	enum ucode_state ret;
+-	struct cpio_data cp;
+-
+-	if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
+-		return 0;
+-
+-	find_blobs_in_containers(&cp);
+-	if (!(cp.data && cp.size))
+-		return -EINVAL;
+-
+ 	scan_containers(cp.data, cp.size, &desc);
+-	if (!desc.mc)
+-		return -EINVAL;
+ 
+-	ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
+-	if (ret > UCODE_UPDATED)
+-		return -EINVAL;
++	mc = desc.mc;
++	if (!mc)
++		return;
+ 
+-	return 0;
++	/*
++	 * Allow application of the same revision to pick up SMT-specific
++	 * changes even if the revision of the other SMT thread is already
++	 * up-to-date.
++	 */
++	if (ed->old_rev > mc->hdr.patch_id)
++		return;
++
++	if (__apply_microcode_amd(mc, &rev, desc.psize))
++		ed->new_rev = rev;
+ }
+-early_initcall(save_microcode_in_initrd);
+ 
+ static inline bool patch_cpus_equivalent(struct ucode_patch *p,
+ 					 struct ucode_patch *n,
+@@ -727,14 +810,9 @@ static void free_cache(void)
+ static struct ucode_patch *find_patch(unsigned int cpu)
+ {
+ 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
+-	u32 rev, dummy __always_unused;
+ 	u16 equiv_id = 0;
+ 
+-	/* fetch rev if not populated yet: */
+-	if (!uci->cpu_sig.rev) {
+-		rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
+-		uci->cpu_sig.rev = rev;
+-	}
++	uci->cpu_sig.rev = get_patch_level();
+ 
+ 	if (x86_family(bsp_cpuid_1_eax) < 0x17) {
+ 		equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig);
+@@ -757,22 +835,20 @@ void reload_ucode_amd(unsigned int cpu)
+ 
+ 	mc = p->data;
+ 
+-	rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
+-
++	rev = get_patch_level();
+ 	if (rev < mc->hdr.patch_id) {
+-		if (!__apply_microcode_amd(mc, p->size))
+-			pr_info_once("reload revision: 0x%08x\n", mc->hdr.patch_id);
++		if (__apply_microcode_amd(mc, &rev, p->size))
++			pr_info_once("reload revision: 0x%08x\n", rev);
+ 	}
+ }
+ 
+ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
+ {
+-	struct cpuinfo_x86 *c = &cpu_data(cpu);
+ 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
+ 	struct ucode_patch *p;
+ 
+ 	csig->sig = cpuid_eax(0x00000001);
+-	csig->rev = c->microcode;
++	csig->rev = get_patch_level();
+ 
+ 	/*
+ 	 * a patch could have been loaded early, set uci->mc so that
+@@ -813,7 +889,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
+ 		goto out;
+ 	}
+ 
+-	if (__apply_microcode_amd(mc_amd, p->size)) {
++	if (!__apply_microcode_amd(mc_amd, &rev, p->size)) {
+ 		pr_err("CPU%d: update failed for patch_level=0x%08x\n",
+ 			cpu, mc_amd->hdr.patch_id);
+ 		return UCODE_ERROR;
+@@ -935,8 +1011,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
+ }
+ 
+ /* Scan the blob in @data and add microcode patches to the cache. */
+-static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
+-					     size_t size)
++static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, size_t size)
+ {
+ 	u8 *fw = (u8 *)data;
+ 	size_t offset;
+@@ -1011,6 +1086,32 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
+ 	return ret;
+ }
+ 
++static int __init save_microcode_in_initrd(void)
++{
++	unsigned int cpuid_1_eax = native_cpuid_eax(1);
++	struct cpuinfo_x86 *c = &boot_cpu_data;
++	struct cont_desc desc = { 0 };
++	enum ucode_state ret;
++	struct cpio_data cp;
++
++	if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
++		return 0;
++
++	if (!find_blobs_in_containers(&cp))
++		return -EINVAL;
++
++	scan_containers(cp.data, cp.size, &desc);
++	if (!desc.mc)
++		return -EINVAL;
++
++	ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
++	if (ret > UCODE_UPDATED)
++		return -EINVAL;
++
++	return 0;
++}
++early_initcall(save_microcode_in_initrd);
++
+ /*
+  * AMD microcode firmware naming convention, up to family 15h they are in
+  * the legacy file:
+diff --git a/arch/x86/kernel/cpu/microcode/amd_shas.c b/arch/x86/kernel/cpu/microcode/amd_shas.c
+new file mode 100644
+index 00000000000000..2a1655b1fdd883
+--- /dev/null
++++ b/arch/x86/kernel/cpu/microcode/amd_shas.c
+@@ -0,0 +1,444 @@
++/* Keep 'em sorted. */
++static const struct patch_digest phashes[] = {
++ { 0x8001227, {
++		0x99,0xc0,0x9b,0x2b,0xcc,0x9f,0x52,0x1b,
++		0x1a,0x5f,0x1d,0x83,0xa1,0x6c,0xc4,0x46,
++		0xe2,0x6c,0xda,0x73,0xfb,0x2d,0x23,0xa8,
++		0x77,0xdc,0x15,0x31,0x33,0x4a,0x46,0x18,
++	}
++ },
++ { 0x8001250, {
++		0xc0,0x0b,0x6b,0x19,0xfd,0x5c,0x39,0x60,
++		0xd5,0xc3,0x57,0x46,0x54,0xe4,0xd1,0xaa,
++		0xa8,0xf7,0x1f,0xa8,0x6a,0x60,0x3e,0xe3,
++		0x27,0x39,0x8e,0x53,0x30,0xf8,0x49,0x19,
++	}
++ },
++ { 0x800126e, {
++		0xf3,0x8b,0x2b,0xb6,0x34,0xe3,0xc8,0x2c,
++		0xef,0xec,0x63,0x6d,0xc8,0x76,0x77,0xb3,
++		0x25,0x5a,0xb7,0x52,0x8c,0x83,0x26,0xe6,
++		0x4c,0xbe,0xbf,0xe9,0x7d,0x22,0x6a,0x43,
++	}
++ },
++ { 0x800126f, {
++		0x2b,0x5a,0xf2,0x9c,0xdd,0xd2,0x7f,0xec,
++		0xec,0x96,0x09,0x57,0xb0,0x96,0x29,0x8b,
++		0x2e,0x26,0x91,0xf0,0x49,0x33,0x42,0x18,
++		0xdd,0x4b,0x65,0x5a,0xd4,0x15,0x3d,0x33,
++	}
++ },
++ { 0x800820d, {
++		0x68,0x98,0x83,0xcd,0x22,0x0d,0xdd,0x59,
++		0x73,0x2c,0x5b,0x37,0x1f,0x84,0x0e,0x67,
++		0x96,0x43,0x83,0x0c,0x46,0x44,0xab,0x7c,
++		0x7b,0x65,0x9e,0x57,0xb5,0x90,0x4b,0x0e,
++	}
++ },
++ { 0x8301025, {
++		0xe4,0x7d,0xdb,0x1e,0x14,0xb4,0x5e,0x36,
++		0x8f,0x3e,0x48,0x88,0x3c,0x6d,0x76,0xa1,
++		0x59,0xc6,0xc0,0x72,0x42,0xdf,0x6c,0x30,
++		0x6f,0x0b,0x28,0x16,0x61,0xfc,0x79,0x77,
++	}
++ },
++ { 0x8301055, {
++		0x81,0x7b,0x99,0x1b,0xae,0x2d,0x4f,0x9a,
++		0xef,0x13,0xce,0xb5,0x10,0xaf,0x6a,0xea,
++		0xe5,0xb0,0x64,0x98,0x10,0x68,0x34,0x3b,
++		0x9d,0x7a,0xd6,0x22,0x77,0x5f,0xb3,0x5b,
++	}
++ },
++ { 0x8301072, {
++		0xcf,0x76,0xa7,0x1a,0x49,0xdf,0x2a,0x5e,
++		0x9e,0x40,0x70,0xe5,0xdd,0x8a,0xa8,0x28,
++		0x20,0xdc,0x91,0xd8,0x2c,0xa6,0xa0,0xb1,
++		0x2d,0x22,0x26,0x94,0x4b,0x40,0x85,0x30,
++	}
++ },
++ { 0x830107a, {
++		0x2a,0x65,0x8c,0x1a,0x5e,0x07,0x21,0x72,
++		0xdf,0x90,0xa6,0x51,0x37,0xd3,0x4b,0x34,
++		0xc4,0xda,0x03,0xe1,0x8a,0x6c,0xfb,0x20,
++		0x04,0xb2,0x81,0x05,0xd4,0x87,0xf4,0x0a,
++	}
++ },
++ { 0x830107b, {
++		0xb3,0x43,0x13,0x63,0x56,0xc1,0x39,0xad,
++		0x10,0xa6,0x2b,0xcc,0x02,0xe6,0x76,0x2a,
++		0x1e,0x39,0x58,0x3e,0x23,0x6e,0xa4,0x04,
++		0x95,0xea,0xf9,0x6d,0xc2,0x8a,0x13,0x19,
++	}
++ },
++ { 0x830107c, {
++		0x21,0x64,0xde,0xfb,0x9f,0x68,0x96,0x47,
++		0x70,0x5c,0xe2,0x8f,0x18,0x52,0x6a,0xac,
++		0xa4,0xd2,0x2e,0xe0,0xde,0x68,0x66,0xc3,
++		0xeb,0x1e,0xd3,0x3f,0xbc,0x51,0x1d,0x38,
++	}
++ },
++ { 0x860010d, {
++		0x86,0xb6,0x15,0x83,0xbc,0x3b,0x9c,0xe0,
++		0xb3,0xef,0x1d,0x99,0x84,0x35,0x15,0xf7,
++		0x7c,0x2a,0xc6,0x42,0xdb,0x73,0x07,0x5c,
++		0x7d,0xc3,0x02,0xb5,0x43,0x06,0x5e,0xf8,
++	}
++ },
++ { 0x8608108, {
++		0x14,0xfe,0x57,0x86,0x49,0xc8,0x68,0xe2,
++		0x11,0xa3,0xcb,0x6e,0xff,0x6e,0xd5,0x38,
++		0xfe,0x89,0x1a,0xe0,0x67,0xbf,0xc4,0xcc,
++		0x1b,0x9f,0x84,0x77,0x2b,0x9f,0xaa,0xbd,
++	}
++ },
++ { 0x8701034, {
++		0xc3,0x14,0x09,0xa8,0x9c,0x3f,0x8d,0x83,
++		0x9b,0x4c,0xa5,0xb7,0x64,0x8b,0x91,0x5d,
++		0x85,0x6a,0x39,0x26,0x1e,0x14,0x41,0xa8,
++		0x75,0xea,0xa6,0xf9,0xc9,0xd1,0xea,0x2b,
++	}
++ },
++ { 0x8a00008, {
++		0xd7,0x2a,0x93,0xdc,0x05,0x2f,0xa5,0x6e,
++		0x0c,0x61,0x2c,0x07,0x9f,0x38,0xe9,0x8e,
++		0xef,0x7d,0x2a,0x05,0x4d,0x56,0xaf,0x72,
++		0xe7,0x56,0x47,0x6e,0x60,0x27,0xd5,0x8c,
++	}
++ },
++ { 0x8a0000a, {
++		0x73,0x31,0x26,0x22,0xd4,0xf9,0xee,0x3c,
++		0x07,0x06,0xe7,0xb9,0xad,0xd8,0x72,0x44,
++		0x33,0x31,0xaa,0x7d,0xc3,0x67,0x0e,0xdb,
++		0x47,0xb5,0xaa,0xbc,0xf5,0xbb,0xd9,0x20,
++	}
++ },
++ { 0xa00104c, {
++		0x3c,0x8a,0xfe,0x04,0x62,0xd8,0x6d,0xbe,
++		0xa7,0x14,0x28,0x64,0x75,0xc0,0xa3,0x76,
++		0xb7,0x92,0x0b,0x97,0x0a,0x8e,0x9c,0x5b,
++		0x1b,0xc8,0x9d,0x3a,0x1e,0x81,0x3d,0x3b,
++	}
++ },
++ { 0xa00104e, {
++		0xc4,0x35,0x82,0x67,0xd2,0x86,0xe5,0xb2,
++		0xfd,0x69,0x12,0x38,0xc8,0x77,0xba,0xe0,
++		0x70,0xf9,0x77,0x89,0x10,0xa6,0x74,0x4e,
++		0x56,0x58,0x13,0xf5,0x84,0x70,0x28,0x0b,
++	}
++ },
++ { 0xa001053, {
++		0x92,0x0e,0xf4,0x69,0x10,0x3b,0xf9,0x9d,
++		0x31,0x1b,0xa6,0x99,0x08,0x7d,0xd7,0x25,
++		0x7e,0x1e,0x89,0xba,0x35,0x8d,0xac,0xcb,
++		0x3a,0xb4,0xdf,0x58,0x12,0xcf,0xc0,0xc3,
++	}
++ },
++ { 0xa001058, {
++		0x33,0x7d,0xa9,0xb5,0x4e,0x62,0x13,0x36,
++		0xef,0x66,0xc9,0xbd,0x0a,0xa6,0x3b,0x19,
++		0xcb,0xf5,0xc2,0xc3,0x55,0x47,0x20,0xec,
++		0x1f,0x7b,0xa1,0x44,0x0e,0x8e,0xa4,0xb2,
++	}
++ },
++ { 0xa001075, {
++		0x39,0x02,0x82,0xd0,0x7c,0x26,0x43,0xe9,
++		0x26,0xa3,0xd9,0x96,0xf7,0x30,0x13,0x0a,
++		0x8a,0x0e,0xac,0xe7,0x1d,0xdc,0xe2,0x0f,
++		0xcb,0x9e,0x8d,0xbc,0xd2,0xa2,0x44,0xe0,
++	}
++ },
++ { 0xa001078, {
++		0x2d,0x67,0xc7,0x35,0xca,0xef,0x2f,0x25,
++		0x4c,0x45,0x93,0x3f,0x36,0x01,0x8c,0xce,
++		0xa8,0x5b,0x07,0xd3,0xc1,0x35,0x3c,0x04,
++		0x20,0xa2,0xfc,0xdc,0xe6,0xce,0x26,0x3e,
++	}
++ },
++ { 0xa001079, {
++		0x43,0xe2,0x05,0x9c,0xfd,0xb7,0x5b,0xeb,
++		0x5b,0xe9,0xeb,0x3b,0x96,0xf4,0xe4,0x93,
++		0x73,0x45,0x3e,0xac,0x8d,0x3b,0xe4,0xdb,
++		0x10,0x31,0xc1,0xe4,0xa2,0xd0,0x5a,0x8a,
++	}
++ },
++ { 0xa00107a, {
++		0x5f,0x92,0xca,0xff,0xc3,0x59,0x22,0x5f,
++		0x02,0xa0,0x91,0x3b,0x4a,0x45,0x10,0xfd,
++		0x19,0xe1,0x8a,0x6d,0x9a,0x92,0xc1,0x3f,
++		0x75,0x78,0xac,0x78,0x03,0x1d,0xdb,0x18,
++	}
++ },
++ { 0xa001143, {
++		0x56,0xca,0xf7,0x43,0x8a,0x4c,0x46,0x80,
++		0xec,0xde,0xe5,0x9c,0x50,0x84,0x9a,0x42,
++		0x27,0xe5,0x51,0x84,0x8f,0x19,0xc0,0x8d,
++		0x0c,0x25,0xb4,0xb0,0x8f,0x10,0xf3,0xf8,
++	}
++ },
++ { 0xa001144, {
++		0x42,0xd5,0x9b,0xa7,0xd6,0x15,0x29,0x41,
++		0x61,0xc4,0x72,0x3f,0xf3,0x06,0x78,0x4b,
++		0x65,0xf3,0x0e,0xfa,0x9c,0x87,0xde,0x25,
++		0xbd,0xb3,0x9a,0xf4,0x75,0x13,0x53,0xdc,
++	}
++ },
++ { 0xa00115d, {
++		0xd4,0xc4,0x49,0x36,0x89,0x0b,0x47,0xdd,
++		0xfb,0x2f,0x88,0x3b,0x5f,0xf2,0x8e,0x75,
++		0xc6,0x6c,0x37,0x5a,0x90,0x25,0x94,0x3e,
++		0x36,0x9c,0xae,0x02,0x38,0x6c,0xf5,0x05,
++	}
++ },
++ { 0xa001173, {
++		0x28,0xbb,0x9b,0xd1,0xa0,0xa0,0x7e,0x3a,
++		0x59,0x20,0xc0,0xa9,0xb2,0x5c,0xc3,0x35,
++		0x53,0x89,0xe1,0x4c,0x93,0x2f,0x1d,0xc3,
++		0xe5,0xf7,0xf3,0xc8,0x9b,0x61,0xaa,0x9e,
++	}
++ },
++ { 0xa0011a8, {
++		0x97,0xc6,0x16,0x65,0x99,0xa4,0x85,0x3b,
++		0xf6,0xce,0xaa,0x49,0x4a,0x3a,0xc5,0xb6,
++		0x78,0x25,0xbc,0x53,0xaf,0x5d,0xcf,0xf4,
++		0x23,0x12,0xbb,0xb1,0xbc,0x8a,0x02,0x2e,
++	}
++ },
++ { 0xa0011ce, {
++		0xcf,0x1c,0x90,0xa3,0x85,0x0a,0xbf,0x71,
++		0x94,0x0e,0x80,0x86,0x85,0x4f,0xd7,0x86,
++		0xae,0x38,0x23,0x28,0x2b,0x35,0x9b,0x4e,
++		0xfe,0xb8,0xcd,0x3d,0x3d,0x39,0xc9,0x6a,
++	}
++ },
++ { 0xa0011d1, {
++		0xdf,0x0e,0xca,0xde,0xf6,0xce,0x5c,0x1e,
++		0x4c,0xec,0xd7,0x71,0x83,0xcc,0xa8,0x09,
++		0xc7,0xc5,0xfe,0xb2,0xf7,0x05,0xd2,0xc5,
++		0x12,0xdd,0xe4,0xf3,0x92,0x1c,0x3d,0xb8,
++	}
++ },
++ { 0xa0011d3, {
++		0x91,0xe6,0x10,0xd7,0x57,0xb0,0x95,0x0b,
++		0x9a,0x24,0xee,0xf7,0xcf,0x56,0xc1,0xa6,
++		0x4a,0x52,0x7d,0x5f,0x9f,0xdf,0xf6,0x00,
++		0x65,0xf7,0xea,0xe8,0x2a,0x88,0xe2,0x26,
++	}
++ },
++ { 0xa0011d5, {
++		0xed,0x69,0x89,0xf4,0xeb,0x64,0xc2,0x13,
++		0xe0,0x51,0x1f,0x03,0x26,0x52,0x7d,0xb7,
++		0x93,0x5d,0x65,0xca,0xb8,0x12,0x1d,0x62,
++		0x0d,0x5b,0x65,0x34,0x69,0xb2,0x62,0x21,
++	}
++ },
++ { 0xa001223, {
++		0xfb,0x32,0x5f,0xc6,0x83,0x4f,0x8c,0xb8,
++		0xa4,0x05,0xf9,0x71,0x53,0x01,0x16,0xc4,
++		0x83,0x75,0x94,0xdd,0xeb,0x7e,0xb7,0x15,
++		0x8e,0x3b,0x50,0x29,0x8a,0x9c,0xcc,0x45,
++	}
++ },
++ { 0xa001224, {
++		0x0e,0x0c,0xdf,0xb4,0x89,0xee,0x35,0x25,
++		0xdd,0x9e,0xdb,0xc0,0x69,0x83,0x0a,0xad,
++		0x26,0xa9,0xaa,0x9d,0xfc,0x3c,0xea,0xf9,
++		0x6c,0xdc,0xd5,0x6d,0x8b,0x6e,0x85,0x4a,
++	}
++ },
++ { 0xa001227, {
++		0xab,0xc6,0x00,0x69,0x4b,0x50,0x87,0xad,
++		0x5f,0x0e,0x8b,0xea,0x57,0x38,0xce,0x1d,
++		0x0f,0x75,0x26,0x02,0xf6,0xd6,0x96,0xe9,
++		0x87,0xb9,0xd6,0x20,0x27,0x7c,0xd2,0xe0,
++	}
++ },
++ { 0xa001229, {
++		0x7f,0x49,0x49,0x48,0x46,0xa5,0x50,0xa6,
++		0x28,0x89,0x98,0xe2,0x9e,0xb4,0x7f,0x75,
++		0x33,0xa7,0x04,0x02,0xe4,0x82,0xbf,0xb4,
++		0xa5,0x3a,0xba,0x24,0x8d,0x31,0x10,0x1d,
++	}
++ },
++ { 0xa00122e, {
++		0x56,0x94,0xa9,0x5d,0x06,0x68,0xfe,0xaf,
++		0xdf,0x7a,0xff,0x2d,0xdf,0x74,0x0f,0x15,
++		0x66,0xfb,0x00,0xb5,0x51,0x97,0x9b,0xfa,
++		0xcb,0x79,0x85,0x46,0x25,0xb4,0xd2,0x10,
++	}
++ },
++ { 0xa001231, {
++		0x0b,0x46,0xa5,0xfc,0x18,0x15,0xa0,0x9e,
++		0xa6,0xdc,0xb7,0xff,0x17,0xf7,0x30,0x64,
++		0xd4,0xda,0x9e,0x1b,0xc3,0xfc,0x02,0x3b,
++		0xe2,0xc6,0x0e,0x41,0x54,0xb5,0x18,0xdd,
++	}
++ },
++ { 0xa001234, {
++		0x88,0x8d,0xed,0xab,0xb5,0xbd,0x4e,0xf7,
++		0x7f,0xd4,0x0e,0x95,0x34,0x91,0xff,0xcc,
++		0xfb,0x2a,0xcd,0xf7,0xd5,0xdb,0x4c,0x9b,
++		0xd6,0x2e,0x73,0x50,0x8f,0x83,0x79,0x1a,
++	}
++ },
++ { 0xa001236, {
++		0x3d,0x30,0x00,0xb9,0x71,0xba,0x87,0x78,
++		0xa8,0x43,0x55,0xc4,0x26,0x59,0xcf,0x9d,
++		0x93,0xce,0x64,0x0e,0x8b,0x72,0x11,0x8b,
++		0xa3,0x8f,0x51,0xe9,0xca,0x98,0xaa,0x25,
++	}
++ },
++ { 0xa001238, {
++		0x72,0xf7,0x4b,0x0c,0x7d,0x58,0x65,0xcc,
++		0x00,0xcc,0x57,0x16,0x68,0x16,0xf8,0x2a,
++		0x1b,0xb3,0x8b,0xe1,0xb6,0x83,0x8c,0x7e,
++		0xc0,0xcd,0x33,0xf2,0x8d,0xf9,0xef,0x59,
++	}
++ },
++ { 0xa00820c, {
++		0xa8,0x0c,0x81,0xc0,0xa6,0x00,0xe7,0xf3,
++		0x5f,0x65,0xd3,0xb9,0x6f,0xea,0x93,0x63,
++		0xf1,0x8c,0x88,0x45,0xd7,0x82,0x80,0xd1,
++		0xe1,0x3b,0x8d,0xb2,0xf8,0x22,0x03,0xe2,
++	}
++ },
++ { 0xa10113e, {
++		0x05,0x3c,0x66,0xd7,0xa9,0x5a,0x33,0x10,
++		0x1b,0xf8,0x9c,0x8f,0xed,0xfc,0xa7,0xa0,
++		0x15,0xe3,0x3f,0x4b,0x1d,0x0d,0x0a,0xd5,
++		0xfa,0x90,0xc4,0xed,0x9d,0x90,0xaf,0x53,
++	}
++ },
++ { 0xa101144, {
++		0xb3,0x0b,0x26,0x9a,0xf8,0x7c,0x02,0x26,
++		0x35,0x84,0x53,0xa4,0xd3,0x2c,0x7c,0x09,
++		0x68,0x7b,0x96,0xb6,0x93,0xef,0xde,0xbc,
++		0xfd,0x4b,0x15,0xd2,0x81,0xd3,0x51,0x47,
++	}
++ },
++ { 0xa101148, {
++		0x20,0xd5,0x6f,0x40,0x4a,0xf6,0x48,0x90,
++		0xc2,0x93,0x9a,0xc2,0xfd,0xac,0xef,0x4f,
++		0xfa,0xc0,0x3d,0x92,0x3c,0x6d,0x01,0x08,
++		0xf1,0x5e,0xb0,0xde,0xb4,0x98,0xae,0xc4,
++	}
++ },
++ { 0xa10123e, {
++		0x03,0xb9,0x2c,0x76,0x48,0x93,0xc9,0x18,
++		0xfb,0x56,0xfd,0xf7,0xe2,0x1d,0xca,0x4d,
++		0x1d,0x13,0x53,0x63,0xfe,0x42,0x6f,0xfc,
++		0x19,0x0f,0xf1,0xfc,0xa7,0xdd,0x89,0x1b,
++	}
++ },
++ { 0xa101244, {
++		0x71,0x56,0xb5,0x9f,0x21,0xbf,0xb3,0x3c,
++		0x8c,0xd7,0x36,0xd0,0x34,0x52,0x1b,0xb1,
++		0x46,0x2f,0x04,0xf0,0x37,0xd8,0x1e,0x72,
++		0x24,0xa2,0x80,0x84,0x83,0x65,0x84,0xc0,
++	}
++ },
++ { 0xa101248, {
++		0xed,0x3b,0x95,0xa6,0x68,0xa7,0x77,0x3e,
++		0xfc,0x17,0x26,0xe2,0x7b,0xd5,0x56,0x22,
++		0x2c,0x1d,0xef,0xeb,0x56,0xdd,0xba,0x6e,
++		0x1b,0x7d,0x64,0x9d,0x4b,0x53,0x13,0x75,
++	}
++ },
++ { 0xa108108, {
++		0xed,0xc2,0xec,0xa1,0x15,0xc6,0x65,0xe9,
++		0xd0,0xef,0x39,0xaa,0x7f,0x55,0x06,0xc6,
++		0xf5,0xd4,0x3f,0x7b,0x14,0xd5,0x60,0x2c,
++		0x28,0x1e,0x9c,0x59,0x69,0x99,0x4d,0x16,
++	}
++ },
++ { 0xa20102d, {
++		0xf9,0x6e,0xf2,0x32,0xd3,0x0f,0x5f,0x11,
++		0x59,0xa1,0xfe,0xcc,0xcd,0x9b,0x42,0x89,
++		0x8b,0x89,0x2f,0xb5,0xbb,0x82,0xef,0x23,
++		0x8c,0xe9,0x19,0x3e,0xcc,0x3f,0x7b,0xb4,
++	}
++ },
++ { 0xa201210, {
++		0xe8,0x6d,0x51,0x6a,0x8e,0x72,0xf3,0xfe,
++		0x6e,0x16,0xbc,0x62,0x59,0x40,0x17,0xe9,
++		0x6d,0x3d,0x0e,0x6b,0xa7,0xac,0xe3,0x68,
++		0xf7,0x55,0xf0,0x13,0xbb,0x22,0xf6,0x41,
++	}
++ },
++ { 0xa404107, {
++		0xbb,0x04,0x4e,0x47,0xdd,0x5e,0x26,0x45,
++		0x1a,0xc9,0x56,0x24,0xa4,0x4c,0x82,0xb0,
++		0x8b,0x0d,0x9f,0xf9,0x3a,0xdf,0xc6,0x81,
++		0x13,0xbc,0xc5,0x25,0xe4,0xc5,0xc3,0x99,
++	}
++ },
++ { 0xa500011, {
++		0x23,0x3d,0x70,0x7d,0x03,0xc3,0xc4,0xf4,
++		0x2b,0x82,0xc6,0x05,0xda,0x80,0x0a,0xf1,
++		0xd7,0x5b,0x65,0x3a,0x7d,0xab,0xdf,0xa2,
++		0x11,0x5e,0x96,0x7e,0x71,0xe9,0xfc,0x74,
++	}
++ },
++ { 0xa601209, {
++		0x66,0x48,0xd4,0x09,0x05,0xcb,0x29,0x32,
++		0x66,0xb7,0x9a,0x76,0xcd,0x11,0xf3,0x30,
++		0x15,0x86,0xcc,0x5d,0x97,0x0f,0xc0,0x46,
++		0xe8,0x73,0xe2,0xd6,0xdb,0xd2,0x77,0x1d,
++	}
++ },
++ { 0xa704107, {
++		0xf3,0xc6,0x58,0x26,0xee,0xac,0x3f,0xd6,
++		0xce,0xa1,0x72,0x47,0x3b,0xba,0x2b,0x93,
++		0x2a,0xad,0x8e,0x6b,0xea,0x9b,0xb7,0xc2,
++		0x64,0x39,0x71,0x8c,0xce,0xe7,0x41,0x39,
++	}
++ },
++ { 0xa705206, {
++		0x8d,0xc0,0x76,0xbd,0x58,0x9f,0x8f,0xa4,
++		0x12,0x9d,0x21,0xfb,0x48,0x21,0xbc,0xe7,
++		0x67,0x6f,0x04,0x18,0xae,0x20,0x87,0x4b,
++		0x03,0x35,0xe9,0xbe,0xfb,0x06,0xdf,0xfc,
++	}
++ },
++ { 0xa708007, {
++		0x6b,0x76,0xcc,0x78,0xc5,0x8a,0xa3,0xe3,
++		0x32,0x2d,0x79,0xe4,0xc3,0x80,0xdb,0xb2,
++		0x07,0xaa,0x3a,0xe0,0x57,0x13,0x72,0x80,
++		0xdf,0x92,0x73,0x84,0x87,0x3c,0x73,0x93,
++	}
++ },
++ { 0xa70c005, {
++		0x88,0x5d,0xfb,0x79,0x64,0xd8,0x46,0x3b,
++		0x4a,0x83,0x8e,0x77,0x7e,0xcf,0xb3,0x0f,
++		0x1f,0x1f,0xf1,0x97,0xeb,0xfe,0x56,0x55,
++		0xee,0x49,0xac,0xe1,0x8b,0x13,0xc5,0x13,
++	}
++ },
++ { 0xaa00116, {
++		0xe8,0x4c,0x2c,0x88,0xa1,0xac,0x24,0x63,
++		0x65,0xe5,0xaa,0x2d,0x16,0xa9,0xc3,0xf5,
++		0xfe,0x1d,0x5e,0x65,0xc7,0xaa,0x92,0x4d,
++		0x91,0xee,0x76,0xbb,0x4c,0x66,0x78,0xc9,
++	}
++ },
++ { 0xaa00212, {
++		0xbd,0x57,0x5d,0x0a,0x0a,0x30,0xc1,0x75,
++		0x95,0x58,0x5e,0x93,0x02,0x28,0x43,0x71,
++		0xed,0x42,0x29,0xc8,0xec,0x34,0x2b,0xb2,
++		0x1a,0x65,0x4b,0xfe,0x07,0x0f,0x34,0xa1,
++	}
++ },
++ { 0xaa00213, {
++		0xed,0x58,0xb7,0x76,0x81,0x7f,0xd9,0x3a,
++		0x1a,0xff,0x8b,0x34,0xb8,0x4a,0x99,0x0f,
++		0x28,0x49,0x6c,0x56,0x2b,0xdc,0xb7,0xed,
++		0x96,0xd5,0x9d,0xc1,0x7a,0xd4,0x51,0x9b,
++	}
++ },
++ { 0xaa00215, {
++		0x55,0xd3,0x28,0xcb,0x87,0xa9,0x32,0xe9,
++		0x4e,0x85,0x4b,0x7c,0x6b,0xd5,0x7c,0xd4,
++		0x1b,0x51,0x71,0x3a,0x0e,0x0b,0xdc,0x9b,
++		0x68,0x2f,0x46,0xee,0xfe,0xc6,0x6d,0xef,
++	}
++ },
++};
+diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h
+index 21776c529fa97a..5df621752fefac 100644
+--- a/arch/x86/kernel/cpu/microcode/internal.h
++++ b/arch/x86/kernel/cpu/microcode/internal.h
+@@ -100,14 +100,12 @@ extern bool force_minrev;
+ #ifdef CONFIG_CPU_SUP_AMD
+ void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family);
+ void load_ucode_amd_ap(unsigned int family);
+-int save_microcode_in_initrd_amd(unsigned int family);
+ void reload_ucode_amd(unsigned int cpu);
+ struct microcode_ops *init_amd_microcode(void);
+ void exit_amd_microcode(void);
+ #else /* CONFIG_CPU_SUP_AMD */
+ static inline void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family) { }
+ static inline void load_ucode_amd_ap(unsigned int family) { }
+-static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
+ static inline void reload_ucode_amd(unsigned int cpu) { }
+ static inline struct microcode_ops *init_amd_microcode(void) { return NULL; }
+ static inline void exit_amd_microcode(void) { }
+diff --git a/block/blk-zoned.c b/block/blk-zoned.c
+index c964c6b667809c..b3dea22d51eba8 100644
+--- a/block/blk-zoned.c
++++ b/block/blk-zoned.c
+@@ -414,13 +414,14 @@ static bool disk_insert_zone_wplug(struct gendisk *disk,
+ 		}
+ 	}
+ 	hlist_add_head_rcu(&zwplug->node, &disk->zone_wplugs_hash[idx]);
++	atomic_inc(&disk->nr_zone_wplugs);
+ 	spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
+ 
+ 	return true;
+ }
+ 
+-static struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
+-						  sector_t sector)
++static struct blk_zone_wplug *disk_get_hashed_zone_wplug(struct gendisk *disk,
++							 sector_t sector)
+ {
+ 	unsigned int zno = disk_zone_no(disk, sector);
+ 	unsigned int idx = hash_32(zno, disk->zone_wplugs_hash_bits);
+@@ -441,6 +442,15 @@ static struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
+ 	return NULL;
+ }
+ 
++static inline struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
++							 sector_t sector)
++{
++	if (!atomic_read(&disk->nr_zone_wplugs))
++		return NULL;
++
++	return disk_get_hashed_zone_wplug(disk, sector);
++}
++
+ static void disk_free_zone_wplug_rcu(struct rcu_head *rcu_head)
+ {
+ 	struct blk_zone_wplug *zwplug =
+@@ -505,6 +515,7 @@ static void disk_remove_zone_wplug(struct gendisk *disk,
+ 	zwplug->flags |= BLK_ZONE_WPLUG_UNHASHED;
+ 	spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
+ 	hlist_del_init_rcu(&zwplug->node);
++	atomic_dec(&disk->nr_zone_wplugs);
+ 	spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
+ 	disk_put_zone_wplug(zwplug);
+ }
+@@ -594,6 +605,11 @@ static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug)
+ {
+ 	struct bio *bio;
+ 
++	if (bio_list_empty(&zwplug->bio_list))
++		return;
++
++	pr_warn_ratelimited("%s: zone %u: Aborting plugged BIOs\n",
++			    zwplug->disk->disk_name, zwplug->zone_no);
+ 	while ((bio = bio_list_pop(&zwplug->bio_list)))
+ 		blk_zone_wplug_bio_io_error(zwplug, bio);
+ }
+@@ -1042,6 +1058,47 @@ static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs)
+ 	return true;
+ }
+ 
++static void blk_zone_wplug_handle_native_zone_append(struct bio *bio)
++{
++	struct gendisk *disk = bio->bi_bdev->bd_disk;
++	struct blk_zone_wplug *zwplug;
++	unsigned long flags;
++
++	/*
++	 * We have native support for zone append operations, so we are not
++	 * going to handle @bio through plugging. However, we may already have a
++	 * zone write plug for the target zone if that zone was previously
++	 * partially written using regular writes. In such case, we risk leaving
++	 * the plug in the disk hash table if the zone is fully written using
++	 * zone append operations. Avoid this by removing the zone write plug.
++	 */
++	zwplug = disk_get_zone_wplug(disk, bio->bi_iter.bi_sector);
++	if (likely(!zwplug))
++		return;
++
++	spin_lock_irqsave(&zwplug->lock, flags);
++
++	/*
++	 * We are about to remove the zone write plug. But if the user
++	 * (mistakenly) has issued regular writes together with native zone
++	 * append, we must aborts the writes as otherwise the plugged BIOs would
++	 * not be executed by the plug BIO work as disk_get_zone_wplug() will
++	 * return NULL after the plug is removed. Aborting the plugged write
++	 * BIOs is consistent with the fact that these writes will most likely
++	 * fail anyway as there is no ordering guarantees between zone append
++	 * operations and regular write operations.
++	 */
++	if (!bio_list_empty(&zwplug->bio_list)) {
++		pr_warn_ratelimited("%s: zone %u: Invalid mix of zone append and regular writes\n",
++				    disk->disk_name, zwplug->zone_no);
++		disk_zone_wplug_abort(zwplug);
++	}
++	disk_remove_zone_wplug(disk, zwplug);
++	spin_unlock_irqrestore(&zwplug->lock, flags);
++
++	disk_put_zone_wplug(zwplug);
++}
++
+ /**
+  * blk_zone_plug_bio - Handle a zone write BIO with zone write plugging
+  * @bio: The BIO being submitted
+@@ -1098,8 +1155,10 @@ bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
+ 	 */
+ 	switch (bio_op(bio)) {
+ 	case REQ_OP_ZONE_APPEND:
+-		if (!bdev_emulates_zone_append(bdev))
++		if (!bdev_emulates_zone_append(bdev)) {
++			blk_zone_wplug_handle_native_zone_append(bio);
+ 			return false;
++		}
+ 		fallthrough;
+ 	case REQ_OP_WRITE:
+ 	case REQ_OP_WRITE_ZEROES:
+@@ -1286,6 +1345,7 @@ static int disk_alloc_zone_resources(struct gendisk *disk,
+ {
+ 	unsigned int i;
+ 
++	atomic_set(&disk->nr_zone_wplugs, 0);
+ 	disk->zone_wplugs_hash_bits =
+ 		min(ilog2(pool_size) + 1, BLK_ZONE_WPLUG_MAX_HASH_BITS);
+ 
+@@ -1340,6 +1400,7 @@ static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk)
+ 		}
+ 	}
+ 
++	WARN_ON_ONCE(atomic_read(&disk->nr_zone_wplugs));
+ 	kfree(disk->zone_wplugs_hash);
+ 	disk->zone_wplugs_hash = NULL;
+ 	disk->zone_wplugs_hash_bits = 0;
+@@ -1552,11 +1613,12 @@ static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx,
+ 	}
+ 
+ 	/*
+-	 * We need to track the write pointer of all zones that are not
+-	 * empty nor full. So make sure we have a zone write plug for
+-	 * such zone if the device has a zone write plug hash table.
++	 * If the device needs zone append emulation, we need to track the
++	 * write pointer of all zones that are not empty nor full. So make sure
++	 * we have a zone write plug for such zone if the device has a zone
++	 * write plug hash table.
+ 	 */
+-	if (!disk->zone_wplugs_hash)
++	if (!queue_emulates_zone_append(disk->queue) || !disk->zone_wplugs_hash)
+ 		return 0;
+ 
+ 	disk_zone_wplug_sync_wp_offset(disk, zone);
+diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
+index 5365e9a4300070..42433c19eb3088 100644
+--- a/drivers/firmware/cirrus/cs_dsp.c
++++ b/drivers/firmware/cirrus/cs_dsp.c
+@@ -1609,8 +1609,8 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
+ 				goto out_fw;
+ 			}
+ 
+-			ret = regmap_raw_write_async(regmap, reg, buf->buf,
+-						     le32_to_cpu(region->len));
++			ret = regmap_raw_write(regmap, reg, buf->buf,
++					       le32_to_cpu(region->len));
+ 			if (ret != 0) {
+ 				cs_dsp_err(dsp,
+ 					   "%s.%d: Failed to write %d bytes at %d in %s: %d\n",
+@@ -1625,12 +1625,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
+ 		regions++;
+ 	}
+ 
+-	ret = regmap_async_complete(regmap);
+-	if (ret != 0) {
+-		cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret);
+-		goto out_fw;
+-	}
+-
+ 	if (pos > firmware->size)
+ 		cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
+ 			    file, regions, pos - firmware->size);
+@@ -1638,7 +1632,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
+ 	cs_dsp_debugfs_save_wmfwname(dsp, file);
+ 
+ out_fw:
+-	regmap_async_complete(regmap);
+ 	cs_dsp_buf_free(&buf_list);
+ 
+ 	if (ret == -EOVERFLOW)
+@@ -2326,8 +2319,8 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
+ 			cs_dsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n",
+ 				   file, blocks, le32_to_cpu(blk->len),
+ 				   reg);
+-			ret = regmap_raw_write_async(regmap, reg, buf->buf,
+-						     le32_to_cpu(blk->len));
++			ret = regmap_raw_write(regmap, reg, buf->buf,
++					       le32_to_cpu(blk->len));
+ 			if (ret != 0) {
+ 				cs_dsp_err(dsp,
+ 					   "%s.%d: Failed to write to %x in %s: %d\n",
+@@ -2339,10 +2332,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
+ 		blocks++;
+ 	}
+ 
+-	ret = regmap_async_complete(regmap);
+-	if (ret != 0)
+-		cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret);
+-
+ 	if (pos > firmware->size)
+ 		cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
+ 			    file, blocks, pos - firmware->size);
+@@ -2350,7 +2339,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
+ 	cs_dsp_debugfs_save_binname(dsp, file);
+ 
+ out_fw:
+-	regmap_async_complete(regmap);
+ 	cs_dsp_buf_free(&buf_list);
+ 
+ 	if (ret == -EOVERFLOW)
+@@ -2561,8 +2549,8 @@ static int cs_dsp_adsp2_enable_core(struct cs_dsp *dsp)
+ {
+ 	int ret;
+ 
+-	ret = regmap_update_bits_async(dsp->regmap, dsp->base + ADSP2_CONTROL,
+-				       ADSP2_SYS_ENA, ADSP2_SYS_ENA);
++	ret = regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
++				 ADSP2_SYS_ENA, ADSP2_SYS_ENA);
+ 	if (ret != 0)
+ 		return ret;
+ 
+diff --git a/drivers/firmware/efi/mokvar-table.c b/drivers/firmware/efi/mokvar-table.c
+index 5ed0602c2f75f0..4eb0dff4dfaf8b 100644
+--- a/drivers/firmware/efi/mokvar-table.c
++++ b/drivers/firmware/efi/mokvar-table.c
+@@ -103,9 +103,7 @@ void __init efi_mokvar_table_init(void)
+ 	void *va = NULL;
+ 	unsigned long cur_offset = 0;
+ 	unsigned long offset_limit;
+-	unsigned long map_size = 0;
+ 	unsigned long map_size_needed = 0;
+-	unsigned long size;
+ 	struct efi_mokvar_table_entry *mokvar_entry;
+ 	int err;
+ 
+@@ -134,48 +132,34 @@ void __init efi_mokvar_table_init(void)
+ 	 */
+ 	err = -EINVAL;
+ 	while (cur_offset + sizeof(*mokvar_entry) <= offset_limit) {
+-		mokvar_entry = va + cur_offset;
+-		map_size_needed = cur_offset + sizeof(*mokvar_entry);
+-		if (map_size_needed > map_size) {
+-			if (va)
+-				early_memunmap(va, map_size);
+-			/*
+-			 * Map a little more than the fixed size entry
+-			 * header, anticipating some data. It's safe to
+-			 * do so as long as we stay within current memory
+-			 * descriptor.
+-			 */
+-			map_size = min(map_size_needed + 2*EFI_PAGE_SIZE,
+-				       offset_limit);
+-			va = early_memremap(efi.mokvar_table, map_size);
+-			if (!va) {
+-				pr_err("Failed to map EFI MOKvar config table pa=0x%lx, size=%lu.\n",
+-				       efi.mokvar_table, map_size);
+-				return;
+-			}
+-			mokvar_entry = va + cur_offset;
++		if (va)
++			early_memunmap(va, sizeof(*mokvar_entry));
++		va = early_memremap(efi.mokvar_table + cur_offset, sizeof(*mokvar_entry));
++		if (!va) {
++			pr_err("Failed to map EFI MOKvar config table pa=0x%lx, size=%zu.\n",
++			       efi.mokvar_table + cur_offset, sizeof(*mokvar_entry));
++			return;
+ 		}
++		mokvar_entry = va;
+ 
+ 		/* Check for last sentinel entry */
+ 		if (mokvar_entry->name[0] == '\0') {
+ 			if (mokvar_entry->data_size != 0)
+ 				break;
+ 			err = 0;
++			map_size_needed = cur_offset + sizeof(*mokvar_entry);
+ 			break;
+ 		}
+ 
+-		/* Sanity check that the name is null terminated */
+-		size = strnlen(mokvar_entry->name,
+-			       sizeof(mokvar_entry->name));
+-		if (size >= sizeof(mokvar_entry->name))
+-			break;
++		/* Enforce that the name is NUL terminated */
++		mokvar_entry->name[sizeof(mokvar_entry->name) - 1] = '\0';
+ 
+ 		/* Advance to the next entry */
+-		cur_offset = map_size_needed + mokvar_entry->data_size;
++		cur_offset += sizeof(*mokvar_entry) + mokvar_entry->data_size;
+ 	}
+ 
+ 	if (va)
+-		early_memunmap(va, map_size);
++		early_memunmap(va, sizeof(*mokvar_entry));
+ 	if (err) {
+ 		pr_err("EFI MOKvar config table is not valid\n");
+ 		return;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index cd4fac12083422..3780d50fd3ae83 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1635,6 +1635,13 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
+ 	if (amdgpu_sriov_vf(adev))
+ 		return 0;
+ 
++	/* resizing on Dell G5 SE platforms causes problems with runtime pm */
++	if ((amdgpu_runtime_pm != 0) &&
++	    adev->pdev->vendor == PCI_VENDOR_ID_ATI &&
++	    adev->pdev->device == 0x731f &&
++	    adev->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
++		return 0;
++
+ 	/* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
+ 	if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
+ 		DRM_WARN("System can't access extended configuration space, please check!!\n");
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+index 9a4dad3e415290..7ec35e36775851 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -1593,22 +1593,19 @@ static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
+ 	}
+ 
+ 	mutex_lock(&adev->enforce_isolation_mutex);
+-
+ 	for (i = 0; i < num_partitions; i++) {
+-		if (adev->enforce_isolation[i] && !partition_values[i]) {
++		if (adev->enforce_isolation[i] && !partition_values[i])
+ 			/* Going from enabled to disabled */
+ 			amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(i));
+-			amdgpu_mes_set_enforce_isolation(adev, i, false);
+-		} else if (!adev->enforce_isolation[i] && partition_values[i]) {
++		else if (!adev->enforce_isolation[i] && partition_values[i])
+ 			/* Going from disabled to enabled */
+ 			amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
+-			amdgpu_mes_set_enforce_isolation(adev, i, true);
+-		}
+ 		adev->enforce_isolation[i] = partition_values[i];
+ 	}
+-
+ 	mutex_unlock(&adev->enforce_isolation_mutex);
+ 
++	amdgpu_mes_update_enforce_isolation(adev);
++
+ 	return count;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+index 59ec20b07a6af3..452ca07e7e7d2f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+@@ -1679,7 +1679,8 @@ bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
+ }
+ 
+ /* Fix me -- node_id is used to identify the correct MES instances in the future */
+-int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable)
++static int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev,
++					    uint32_t node_id, bool enable)
+ {
+ 	struct mes_misc_op_input op_input = {0};
+ 	int r;
+@@ -1701,6 +1702,23 @@ int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_i
+ 	return r;
+ }
+ 
++int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev)
++{
++	int i, r = 0;
++
++	if (adev->enable_mes && adev->gfx.enable_cleaner_shader) {
++		mutex_lock(&adev->enforce_isolation_mutex);
++		for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
++			if (adev->enforce_isolation[i])
++				r |= amdgpu_mes_set_enforce_isolation(adev, i, true);
++			else
++				r |= amdgpu_mes_set_enforce_isolation(adev, i, false);
++		}
++		mutex_unlock(&adev->enforce_isolation_mutex);
++	}
++	return r;
++}
++
+ #if defined(CONFIG_DEBUG_FS)
+ 
+ static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+index c6f93cbd6739f0..f089c5087c63db 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+@@ -534,6 +534,6 @@ static inline void amdgpu_mes_unlock(struct amdgpu_mes *mes)
+ 
+ bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev);
+ 
+-int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable);
++int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev);
+ 
+ #endif /* __AMDGPU_MES_H__ */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 2890f54339ad0c..e7fad5587f69e9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -2280,7 +2280,7 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
+ 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+ 	struct amdgpu_res_cursor cursor;
+ 	u64 addr;
+-	int r;
++	int r = 0;
+ 
+ 	if (!adev->mman.buffer_funcs_enabled)
+ 		return -EINVAL;
+diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+index 9c905b9e937637..40750e5478efb5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+@@ -1635,6 +1635,10 @@ static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
+ 		goto failure;
+ 	}
+ 
++	r = amdgpu_mes_update_enforce_isolation(adev);
++	if (r)
++		goto failure;
++
+ out:
+ 	/*
+ 	 * Disable KIQ ring usage from the driver once MES is enabled.
+diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+index 9ecc5d61e49ba3..0921fd8c050dae 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+@@ -1590,6 +1590,10 @@ static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
+ 		goto failure;
+ 	}
+ 
++	r = amdgpu_mes_update_enforce_isolation(adev);
++	if (r)
++		goto failure;
++
+ out:
+ 	/*
+ 	 * Disable KIQ ring usage from the driver once MES is enabled.
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+index 2eff37aaf8273b..1695dd78ede8e6 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+@@ -107,6 +107,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
+ 	m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
+ 			0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+ 
++	m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
++	m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
+ 	m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
+ 
+ 	m->cp_mqd_base_addr_lo        = lower_32_bits(addr);
+@@ -167,10 +169,10 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
+ 
+ 	m = get_mqd(mqd);
+ 
+-	m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
++	m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
+ 	m->cp_hqd_pq_control |=
+ 			ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
+-	m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
++
+ 	pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
+ 
+ 	m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+index 68dbc0399c87aa..3c0ae28c5923b5 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+@@ -154,6 +154,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
+ 	m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
+ 			0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+ 
++	m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
++	m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
+ 	m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
+ 
+ 	m->cp_mqd_base_addr_lo        = lower_32_bits(addr);
+@@ -221,10 +223,9 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
+ 
+ 	m = get_mqd(mqd);
+ 
+-	m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
++	m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
+ 	m->cp_hqd_pq_control |=
+ 			ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
+-	m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
+ 	pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
+ 
+ 	m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
+index 2b72d5b4949b6c..565858b9044d46 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
+@@ -121,6 +121,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
+ 	m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
+ 			0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+ 
++	m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
++	m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
+ 	m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
+ 
+ 	m->cp_mqd_base_addr_lo        = lower_32_bits(addr);
+@@ -184,10 +186,9 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
+ 
+ 	m = get_mqd(mqd);
+ 
+-	m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
++	m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
+ 	m->cp_hqd_pq_control |=
+ 			ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
+-	m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
+ 	pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
+ 
+ 	m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+index 84e8ea3a8a0c94..217af36dc0976f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+@@ -182,6 +182,9 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
+ 	m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
+ 			0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+ 
++	m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
++	m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
++
+ 	m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
+ 
+ 	m->cp_mqd_base_addr_lo        = lower_32_bits(addr);
+@@ -244,7 +247,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
+ 
+ 	m = get_mqd(mqd);
+ 
+-	m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
++	m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
+ 	m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
+ 	pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index f7c0d7625ff12d..ca6b9a585aba90 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1617,75 +1617,130 @@ static bool dm_should_disable_stutter(struct pci_dev *pdev)
+ 	return false;
+ }
+ 
+-static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
++struct amdgpu_dm_quirks {
++	bool aux_hpd_discon;
++	bool support_edp0_on_dp1;
++};
++
++static struct amdgpu_dm_quirks quirk_entries = {
++	.aux_hpd_discon = false,
++	.support_edp0_on_dp1 = false
++};
++
++static int edp0_on_dp1_callback(const struct dmi_system_id *id)
++{
++	quirk_entries.support_edp0_on_dp1 = true;
++	return 0;
++}
++
++static int aux_hpd_discon_callback(const struct dmi_system_id *id)
++{
++	quirk_entries.aux_hpd_discon = true;
++	return 0;
++}
++
++static const struct dmi_system_id dmi_quirk_table[] = {
+ 	{
++		.callback = aux_hpd_discon_callback,
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
+ 		},
+ 	},
+ 	{
++		.callback = aux_hpd_discon_callback,
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
+ 		},
+ 	},
+ 	{
++		.callback = aux_hpd_discon_callback,
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
+ 		},
+ 	},
+ 	{
++		.callback = aux_hpd_discon_callback,
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
+ 		},
+ 	},
+ 	{
++		.callback = aux_hpd_discon_callback,
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
+ 		},
+ 	},
+ 	{
++		.callback = aux_hpd_discon_callback,
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
+ 		},
+ 	},
+ 	{
++		.callback = aux_hpd_discon_callback,
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
+ 		},
+ 	},
+ 	{
++		.callback = aux_hpd_discon_callback,
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
+ 		},
+ 	},
+ 	{
++		.callback = aux_hpd_discon_callback,
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
+ 		},
+ 	},
++	{
++		.callback = edp0_on_dp1_callback,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"),
++		},
++	},
++	{
++		.callback = edp0_on_dp1_callback,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"),
++		},
++	},
+ 	{}
+ 	/* TODO: refactor this from a fixed table to a dynamic option */
+ };
+ 
+-static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
++static void retrieve_dmi_info(struct amdgpu_display_manager *dm, struct dc_init_data *init_data)
+ {
+-	const struct dmi_system_id *dmi_id;
++	int dmi_id;
++	struct drm_device *dev = dm->ddev;
+ 
+ 	dm->aux_hpd_discon_quirk = false;
++	init_data->flags.support_edp0_on_dp1 = false;
++
++	dmi_id = dmi_check_system(dmi_quirk_table);
++
++	if (!dmi_id)
++		return;
+ 
+-	dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
+-	if (dmi_id) {
++	if (quirk_entries.aux_hpd_discon) {
+ 		dm->aux_hpd_discon_quirk = true;
+-		DRM_INFO("aux_hpd_discon_quirk attached\n");
++		drm_info(dev, "aux_hpd_discon_quirk attached\n");
++	}
++	if (quirk_entries.support_edp0_on_dp1) {
++		init_data->flags.support_edp0_on_dp1 = true;
++		drm_info(dev, "aux_hpd_discon_quirk attached\n");
+ 	}
+ }
+ 
+@@ -1993,7 +2048,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
+ 	if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
+ 		init_data.num_virtual_links = 1;
+ 
+-	retrieve_dmi_info(&adev->dm);
++	retrieve_dmi_info(&adev->dm, &init_data);
+ 
+ 	if (adev->dm.bb_from_dmub)
+ 		init_data.bb_from_dmub = adev->dm.bb_from_dmub;
+@@ -7180,8 +7235,14 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
+ 	struct dc_link *dc_link = aconnector->dc_link;
+ 	struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
+ 	const struct drm_edid *drm_edid;
++	struct i2c_adapter *ddc;
++
++	if (dc_link && dc_link->aux_mode)
++		ddc = &aconnector->dm_dp_aux.aux.ddc;
++	else
++		ddc = &aconnector->i2c->base;
+ 
+-	drm_edid = drm_edid_read(connector);
++	drm_edid = drm_edid_read_ddc(connector, ddc);
+ 	drm_edid_connector_update(connector, drm_edid);
+ 	if (!drm_edid) {
+ 		DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
+@@ -7226,14 +7287,21 @@ static int get_modes(struct drm_connector *connector)
+ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
+ {
+ 	struct drm_connector *connector = &aconnector->base;
++	struct dc_link *dc_link = aconnector->dc_link;
+ 	struct dc_sink_init_data init_params = {
+ 			.link = aconnector->dc_link,
+ 			.sink_signal = SIGNAL_TYPE_VIRTUAL
+ 	};
+ 	const struct drm_edid *drm_edid;
+ 	const struct edid *edid;
++	struct i2c_adapter *ddc;
++
++	if (dc_link && dc_link->aux_mode)
++		ddc = &aconnector->dm_dp_aux.aux.ddc;
++	else
++		ddc = &aconnector->i2c->base;
+ 
+-	drm_edid = drm_edid_read(connector);
++	drm_edid = drm_edid_read_ddc(connector, ddc);
+ 	drm_edid_connector_update(connector, drm_edid);
+ 	if (!drm_edid) {
+ 		DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+index 3390f0d8420a05..c4a7fd453e5fc0 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+@@ -894,6 +894,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
+ 	struct drm_device *dev = adev_to_drm(adev);
+ 	struct drm_connector *connector;
+ 	struct drm_connector_list_iter iter;
++	int i;
+ 
+ 	drm_connector_list_iter_begin(dev, &iter);
+ 	drm_for_each_connector_iter(connector, &iter) {
+@@ -920,6 +921,12 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
+ 		}
+ 	}
+ 	drm_connector_list_iter_end(&iter);
++
++	/* Update reference counts for HPDs */
++	for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
++		if (amdgpu_irq_get(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
++			drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n", i);
++	}
+ }
+ 
+ /**
+@@ -935,6 +942,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
+ 	struct drm_device *dev = adev_to_drm(adev);
+ 	struct drm_connector *connector;
+ 	struct drm_connector_list_iter iter;
++	int i;
+ 
+ 	drm_connector_list_iter_begin(dev, &iter);
+ 	drm_for_each_connector_iter(connector, &iter) {
+@@ -960,4 +968,10 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
+ 		}
+ 	}
+ 	drm_connector_list_iter_end(&iter);
++
++	/* Update reference counts for HPDs */
++	for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
++		if (amdgpu_irq_put(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
++			drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n", i);
++	}
+ }
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+index 45858bf1523d8f..e140b7a04d7246 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+@@ -54,7 +54,8 @@ static bool link_supports_psrsu(struct dc_link *link)
+ 	if (amdgpu_dc_debug_mask & DC_DISABLE_PSR_SU)
+ 		return false;
+ 
+-	return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub);
++	/* Temporarily disable PSR-SU to avoid glitches */
++	return false;
+ }
+ 
+ /*
+diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+index 8908646ad620d4..4323b601c9aa6a 100644
+--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
++++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+@@ -3042,6 +3042,7 @@ static int kv_dpm_hw_init(struct amdgpu_ip_block *ip_block)
+ 	if (!amdgpu_dpm)
+ 		return 0;
+ 
++	mutex_lock(&adev->pm.mutex);
+ 	kv_dpm_setup_asic(adev);
+ 	ret = kv_dpm_enable(adev);
+ 	if (ret)
+@@ -3049,6 +3050,8 @@ static int kv_dpm_hw_init(struct amdgpu_ip_block *ip_block)
+ 	else
+ 		adev->pm.dpm_enabled = true;
+ 	amdgpu_legacy_dpm_compute_clocks(adev);
++	mutex_unlock(&adev->pm.mutex);
++
+ 	return ret;
+ }
+ 
+@@ -3066,32 +3069,42 @@ static int kv_dpm_suspend(struct amdgpu_ip_block *ip_block)
+ {
+ 	struct amdgpu_device *adev = ip_block->adev;
+ 
++	cancel_work_sync(&adev->pm.dpm.thermal.work);
++
+ 	if (adev->pm.dpm_enabled) {
++		mutex_lock(&adev->pm.mutex);
++		adev->pm.dpm_enabled = false;
+ 		/* disable dpm */
+ 		kv_dpm_disable(adev);
+ 		/* reset the power state */
+ 		adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
++		mutex_unlock(&adev->pm.mutex);
+ 	}
+ 	return 0;
+ }
+ 
+ static int kv_dpm_resume(struct amdgpu_ip_block *ip_block)
+ {
+-	int ret;
++	int ret = 0;
+ 	struct amdgpu_device *adev = ip_block->adev;
+ 
+-	if (adev->pm.dpm_enabled) {
++	if (!amdgpu_dpm)
++		return 0;
++
++	if (!adev->pm.dpm_enabled) {
++		mutex_lock(&adev->pm.mutex);
+ 		/* asic init will reset to the boot state */
+ 		kv_dpm_setup_asic(adev);
+ 		ret = kv_dpm_enable(adev);
+-		if (ret)
++		if (ret) {
+ 			adev->pm.dpm_enabled = false;
+-		else
++		} else {
+ 			adev->pm.dpm_enabled = true;
+-		if (adev->pm.dpm_enabled)
+ 			amdgpu_legacy_dpm_compute_clocks(adev);
++		}
++		mutex_unlock(&adev->pm.mutex);
+ 	}
+-	return 0;
++	return ret;
+ }
+ 
+ static bool kv_dpm_is_idle(void *handle)
+diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+index e861355ebd75b9..c7518b13e78795 100644
+--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
++++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+@@ -1009,9 +1009,12 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
+ 	enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
+ 	int temp, size = sizeof(temp);
+ 
+-	if (!adev->pm.dpm_enabled)
+-		return;
++	mutex_lock(&adev->pm.mutex);
+ 
++	if (!adev->pm.dpm_enabled) {
++		mutex_unlock(&adev->pm.mutex);
++		return;
++	}
+ 	if (!pp_funcs->read_sensor(adev->powerplay.pp_handle,
+ 				   AMDGPU_PP_SENSOR_GPU_TEMP,
+ 				   (void *)&temp,
+@@ -1033,4 +1036,5 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
+ 	adev->pm.dpm.state = dpm_state;
+ 
+ 	amdgpu_legacy_dpm_compute_clocks(adev->powerplay.pp_handle);
++	mutex_unlock(&adev->pm.mutex);
+ }
+diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+index ee23a0f897c50c..5fb2409b6fc206 100644
+--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
++++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+@@ -7785,6 +7785,7 @@ static int si_dpm_hw_init(struct amdgpu_ip_block *ip_block)
+ 	if (!amdgpu_dpm)
+ 		return 0;
+ 
++	mutex_lock(&adev->pm.mutex);
+ 	si_dpm_setup_asic(adev);
+ 	ret = si_dpm_enable(adev);
+ 	if (ret)
+@@ -7792,6 +7793,7 @@ static int si_dpm_hw_init(struct amdgpu_ip_block *ip_block)
+ 	else
+ 		adev->pm.dpm_enabled = true;
+ 	amdgpu_legacy_dpm_compute_clocks(adev);
++	mutex_unlock(&adev->pm.mutex);
+ 	return ret;
+ }
+ 
+@@ -7809,32 +7811,44 @@ static int si_dpm_suspend(struct amdgpu_ip_block *ip_block)
+ {
+ 	struct amdgpu_device *adev = ip_block->adev;
+ 
++	cancel_work_sync(&adev->pm.dpm.thermal.work);
++
+ 	if (adev->pm.dpm_enabled) {
++		mutex_lock(&adev->pm.mutex);
++		adev->pm.dpm_enabled = false;
+ 		/* disable dpm */
+ 		si_dpm_disable(adev);
+ 		/* reset the power state */
+ 		adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
++		mutex_unlock(&adev->pm.mutex);
+ 	}
++
+ 	return 0;
+ }
+ 
+ static int si_dpm_resume(struct amdgpu_ip_block *ip_block)
+ {
+-	int ret;
++	int ret = 0;
+ 	struct amdgpu_device *adev = ip_block->adev;
+ 
+-	if (adev->pm.dpm_enabled) {
++	if (!amdgpu_dpm)
++		return 0;
++
++	if (!adev->pm.dpm_enabled) {
+ 		/* asic init will reset to the boot state */
++		mutex_lock(&adev->pm.mutex);
+ 		si_dpm_setup_asic(adev);
+ 		ret = si_dpm_enable(adev);
+-		if (ret)
++		if (ret) {
+ 			adev->pm.dpm_enabled = false;
+-		else
++		} else {
+ 			adev->pm.dpm_enabled = true;
+-		if (adev->pm.dpm_enabled)
+ 			amdgpu_legacy_dpm_compute_clocks(adev);
++		}
++		mutex_unlock(&adev->pm.mutex);
+ 	}
+-	return 0;
++
++	return ret;
+ }
+ 
+ static bool si_dpm_is_idle(void *handle)
+diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c
+index b14b581c059d3d..02a516e7719274 100644
+--- a/drivers/gpu/drm/drm_fbdev_dma.c
++++ b/drivers/gpu/drm/drm_fbdev_dma.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: MIT
+ 
+ #include <linux/fb.h>
++#include <linux/vmalloc.h>
+ 
+ #include <drm/drm_drv.h>
+ #include <drm/drm_fbdev_dma.h>
+@@ -70,37 +71,102 @@ static const struct fb_ops drm_fbdev_dma_fb_ops = {
+ 	.fb_destroy = drm_fbdev_dma_fb_destroy,
+ };
+ 
+-FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
++FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma_shadowed,
+ 				   drm_fb_helper_damage_range,
+ 				   drm_fb_helper_damage_area);
+ 
+-static int drm_fbdev_dma_deferred_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
++static void drm_fbdev_dma_shadowed_fb_destroy(struct fb_info *info)
+ {
+ 	struct drm_fb_helper *fb_helper = info->par;
+-	struct drm_framebuffer *fb = fb_helper->fb;
+-	struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
++	void *shadow = info->screen_buffer;
++
++	if (!fb_helper->dev)
++		return;
+ 
+-	if (!dma->map_noncoherent)
+-		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++	if (info->fbdefio)
++		fb_deferred_io_cleanup(info);
++	drm_fb_helper_fini(fb_helper);
++	vfree(shadow);
+ 
+-	return fb_deferred_io_mmap(info, vma);
++	drm_client_buffer_vunmap(fb_helper->buffer);
++	drm_client_framebuffer_delete(fb_helper->buffer);
++	drm_client_release(&fb_helper->client);
++	drm_fb_helper_unprepare(fb_helper);
++	kfree(fb_helper);
+ }
+ 
+-static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = {
++static const struct fb_ops drm_fbdev_dma_shadowed_fb_ops = {
+ 	.owner = THIS_MODULE,
+ 	.fb_open = drm_fbdev_dma_fb_open,
+ 	.fb_release = drm_fbdev_dma_fb_release,
+-	__FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma),
++	FB_DEFAULT_DEFERRED_OPS(drm_fbdev_dma_shadowed),
+ 	DRM_FB_HELPER_DEFAULT_OPS,
+-	__FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma),
+-	.fb_mmap = drm_fbdev_dma_deferred_fb_mmap,
+-	.fb_destroy = drm_fbdev_dma_fb_destroy,
++	.fb_destroy = drm_fbdev_dma_shadowed_fb_destroy,
+ };
+ 
+ /*
+  * struct drm_fb_helper
+  */
+ 
++static void drm_fbdev_dma_damage_blit_real(struct drm_fb_helper *fb_helper,
++					   struct drm_clip_rect *clip,
++					   struct iosys_map *dst)
++{
++	struct drm_framebuffer *fb = fb_helper->fb;
++	size_t offset = clip->y1 * fb->pitches[0];
++	size_t len = clip->x2 - clip->x1;
++	unsigned int y;
++	void *src;
++
++	switch (drm_format_info_bpp(fb->format, 0)) {
++	case 1:
++		offset += clip->x1 / 8;
++		len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
++		break;
++	case 2:
++		offset += clip->x1 / 4;
++		len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
++		break;
++	case 4:
++		offset += clip->x1 / 2;
++		len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
++		break;
++	default:
++		offset += clip->x1 * fb->format->cpp[0];
++		len *= fb->format->cpp[0];
++		break;
++	}
++
++	src = fb_helper->info->screen_buffer + offset;
++	iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
++
++	for (y = clip->y1; y < clip->y2; y++) {
++		iosys_map_memcpy_to(dst, 0, src, len);
++		iosys_map_incr(dst, fb->pitches[0]);
++		src += fb->pitches[0];
++	}
++}
++
++static int drm_fbdev_dma_damage_blit(struct drm_fb_helper *fb_helper,
++				     struct drm_clip_rect *clip)
++{
++	struct drm_client_buffer *buffer = fb_helper->buffer;
++	struct iosys_map dst;
++
++	/*
++	 * For fbdev emulation, we only have to protect against fbdev modeset
++	 * operations. Nothing else will involve the client buffer's BO. So it
++	 * is sufficient to acquire struct drm_fb_helper.lock here.
++	 */
++	mutex_lock(&fb_helper->lock);
++
++	dst = buffer->map;
++	drm_fbdev_dma_damage_blit_real(fb_helper, clip, &dst);
++
++	mutex_unlock(&fb_helper->lock);
++
++	return 0;
++}
+ static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
+ 					 struct drm_clip_rect *clip)
+ {
+@@ -112,6 +178,10 @@ static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
+ 		return 0;
+ 
+ 	if (helper->fb->funcs->dirty) {
++		ret = drm_fbdev_dma_damage_blit(helper, clip);
++		if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
++			return ret;
++
+ 		ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
+ 		if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
+ 			return ret;
+@@ -128,14 +198,80 @@ static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
+  * struct drm_fb_helper
+  */
+ 
++static int drm_fbdev_dma_driver_fbdev_probe_tail(struct drm_fb_helper *fb_helper,
++						 struct drm_fb_helper_surface_size *sizes)
++{
++	struct drm_device *dev = fb_helper->dev;
++	struct drm_client_buffer *buffer = fb_helper->buffer;
++	struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(buffer->gem);
++	struct drm_framebuffer *fb = fb_helper->fb;
++	struct fb_info *info = fb_helper->info;
++	struct iosys_map map = buffer->map;
++
++	info->fbops = &drm_fbdev_dma_fb_ops;
++
++	/* screen */
++	info->flags |= FBINFO_VIRTFB; /* system memory */
++	if (dma_obj->map_noncoherent)
++		info->flags |= FBINFO_READS_FAST; /* signal caching */
++	info->screen_size = sizes->surface_height * fb->pitches[0];
++	info->screen_buffer = map.vaddr;
++	if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
++		if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
++			info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
++	}
++	info->fix.smem_len = info->screen_size;
++
++	return 0;
++}
++
++static int drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(struct drm_fb_helper *fb_helper,
++							  struct drm_fb_helper_surface_size *sizes)
++{
++	struct drm_client_buffer *buffer = fb_helper->buffer;
++	struct fb_info *info = fb_helper->info;
++	size_t screen_size = buffer->gem->size;
++	void *screen_buffer;
++	int ret;
++
++	/*
++	 * Deferred I/O requires struct page for framebuffer memory,
++	 * which is not guaranteed for all DMA ranges. We thus create
++	 * a shadow buffer in system memory.
++	 */
++	screen_buffer = vzalloc(screen_size);
++	if (!screen_buffer)
++		return -ENOMEM;
++
++	info->fbops = &drm_fbdev_dma_shadowed_fb_ops;
++
++	/* screen */
++	info->flags |= FBINFO_VIRTFB; /* system memory */
++	info->flags |= FBINFO_READS_FAST; /* signal caching */
++	info->screen_buffer = screen_buffer;
++	info->fix.smem_len = screen_size;
++
++	fb_helper->fbdefio.delay = HZ / 20;
++	fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
++
++	info->fbdefio = &fb_helper->fbdefio;
++	ret = fb_deferred_io_init(info);
++	if (ret)
++		goto err_vfree;
++
++	return 0;
++
++err_vfree:
++	vfree(screen_buffer);
++	return ret;
++}
++
+ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
+ 				     struct drm_fb_helper_surface_size *sizes)
+ {
+ 	struct drm_client_dev *client = &fb_helper->client;
+ 	struct drm_device *dev = fb_helper->dev;
+-	bool use_deferred_io = false;
+ 	struct drm_client_buffer *buffer;
+-	struct drm_gem_dma_object *dma_obj;
+ 	struct drm_framebuffer *fb;
+ 	struct fb_info *info;
+ 	u32 format;
+@@ -152,19 +288,9 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
+ 					       sizes->surface_height, format);
+ 	if (IS_ERR(buffer))
+ 		return PTR_ERR(buffer);
+-	dma_obj = to_drm_gem_dma_obj(buffer->gem);
+ 
+ 	fb = buffer->fb;
+ 
+-	/*
+-	 * Deferred I/O requires struct page for framebuffer memory,
+-	 * which is not guaranteed for all DMA ranges. We thus only
+-	 * install deferred I/O if we have a framebuffer that requires
+-	 * it.
+-	 */
+-	if (fb->funcs->dirty)
+-		use_deferred_io = true;
+-
+ 	ret = drm_client_buffer_vmap(buffer, &map);
+ 	if (ret) {
+ 		goto err_drm_client_buffer_delete;
+@@ -185,45 +311,12 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
+ 
+ 	drm_fb_helper_fill_info(info, fb_helper, sizes);
+ 
+-	if (use_deferred_io)
+-		info->fbops = &drm_fbdev_dma_deferred_fb_ops;
++	if (fb->funcs->dirty)
++		ret = drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(fb_helper, sizes);
+ 	else
+-		info->fbops = &drm_fbdev_dma_fb_ops;
+-
+-	/* screen */
+-	info->flags |= FBINFO_VIRTFB; /* system memory */
+-	if (dma_obj->map_noncoherent)
+-		info->flags |= FBINFO_READS_FAST; /* signal caching */
+-	info->screen_size = sizes->surface_height * fb->pitches[0];
+-	info->screen_buffer = map.vaddr;
+-	if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
+-		if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
+-			info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
+-	}
+-	info->fix.smem_len = info->screen_size;
+-
+-	/*
+-	 * Only set up deferred I/O if the screen buffer supports
+-	 * it. If this disagrees with the previous test for ->dirty,
+-	 * mmap on the /dev/fb file might not work correctly.
+-	 */
+-	if (!is_vmalloc_addr(info->screen_buffer) && info->fix.smem_start) {
+-		unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT;
+-
+-		if (drm_WARN_ON(dev, !pfn_to_page(pfn)))
+-			use_deferred_io = false;
+-	}
+-
+-	/* deferred I/O */
+-	if (use_deferred_io) {
+-		fb_helper->fbdefio.delay = HZ / 20;
+-		fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
+-
+-		info->fbdefio = &fb_helper->fbdefio;
+-		ret = fb_deferred_io_init(info);
+-		if (ret)
+-			goto err_drm_fb_helper_release_info;
+-	}
++		ret = drm_fbdev_dma_driver_fbdev_probe_tail(fb_helper, sizes);
++	if (ret)
++		goto err_drm_fb_helper_release_info;
+ 
+ 	return 0;
+ 
+diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+index 7c78496e6213cc..192e571348f6b3 100644
+--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
++++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+@@ -53,7 +53,6 @@
+ 
+ #define RING_CTL(base)				XE_REG((base) + 0x3c)
+ #define   RING_CTL_SIZE(size)			((size) - PAGE_SIZE) /* in bytes -> pages */
+-#define   RING_CTL_SIZE(size)			((size) - PAGE_SIZE) /* in bytes -> pages */
+ 
+ #define RING_START_UDW(base)			XE_REG((base) + 0x48)
+ 
+diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
+index 913f6ba606370b..5c50ca8cd8e78f 100644
+--- a/drivers/gpu/drm/xe/xe_oa.c
++++ b/drivers/gpu/drm/xe/xe_oa.c
+@@ -1766,7 +1766,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
+ 	stream->oa_buffer.format = &stream->oa->oa_formats[param->oa_format];
+ 
+ 	stream->sample = param->sample;
+-	stream->periodic = param->period_exponent > 0;
++	stream->periodic = param->period_exponent >= 0;
+ 	stream->period_exponent = param->period_exponent;
+ 	stream->no_preempt = param->no_preempt;
+ 	stream->wait_num_reports = param->wait_num_reports;
+@@ -2058,6 +2058,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
+ 	}
+ 
+ 	param.xef = xef;
++	param.period_exponent = -1;
+ 	ret = xe_oa_user_extensions(oa, XE_OA_USER_EXTN_FROM_OPEN, data, 0, &param);
+ 	if (ret)
+ 		return ret;
+@@ -2112,7 +2113,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
+ 		goto err_exec_q;
+ 	}
+ 
+-	if (param.period_exponent > 0) {
++	if (param.period_exponent >= 0) {
+ 		u64 oa_period, oa_freq_hz;
+ 
+ 		/* Requesting samples from OAG buffer is a privileged operation */
+diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
+index c99380271de62f..5693b337f5dffe 100644
+--- a/drivers/gpu/drm/xe/xe_vm.c
++++ b/drivers/gpu/drm/xe/xe_vm.c
+@@ -667,20 +667,33 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
+ 
+ 	/* Collect invalidated userptrs */
+ 	spin_lock(&vm->userptr.invalidated_lock);
++	xe_assert(vm->xe, list_empty(&vm->userptr.repin_list));
+ 	list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
+ 				 userptr.invalidate_link) {
+ 		list_del_init(&uvma->userptr.invalidate_link);
+-		list_move_tail(&uvma->userptr.repin_link,
+-			       &vm->userptr.repin_list);
++		list_add_tail(&uvma->userptr.repin_link,
++			      &vm->userptr.repin_list);
+ 	}
+ 	spin_unlock(&vm->userptr.invalidated_lock);
+ 
+-	/* Pin and move to temporary list */
++	/* Pin and move to bind list */
+ 	list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
+ 				 userptr.repin_link) {
+ 		err = xe_vma_userptr_pin_pages(uvma);
+ 		if (err == -EFAULT) {
+ 			list_del_init(&uvma->userptr.repin_link);
++			/*
++			 * We might have already done the pin once already, but
++			 * then had to retry before the re-bind happened, due
++			 * some other condition in the caller, but in the
++			 * meantime the userptr got dinged by the notifier such
++			 * that we need to revalidate here, but this time we hit
++			 * the EFAULT. In such a case make sure we remove
++			 * ourselves from the rebind list to avoid going down in
++			 * flames.
++			 */
++			if (!list_empty(&uvma->vma.combined_links.rebind))
++				list_del_init(&uvma->vma.combined_links.rebind);
+ 
+ 			/* Wait for pending binds */
+ 			xe_vm_lock(vm, false);
+@@ -691,10 +704,10 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
+ 			err = xe_vm_invalidate_vma(&uvma->vma);
+ 			xe_vm_unlock(vm);
+ 			if (err)
+-				return err;
++				break;
+ 		} else {
+-			if (err < 0)
+-				return err;
++			if (err)
++				break;
+ 
+ 			list_del_init(&uvma->userptr.repin_link);
+ 			list_move_tail(&uvma->vma.combined_links.rebind,
+@@ -702,7 +715,19 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
+ 		}
+ 	}
+ 
+-	return 0;
++	if (err) {
++		down_write(&vm->userptr.notifier_lock);
++		spin_lock(&vm->userptr.invalidated_lock);
++		list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
++					 userptr.repin_link) {
++			list_del_init(&uvma->userptr.repin_link);
++			list_move_tail(&uvma->userptr.invalidate_link,
++				       &vm->userptr.invalidated);
++		}
++		spin_unlock(&vm->userptr.invalidated_lock);
++		up_write(&vm->userptr.notifier_lock);
++	}
++	return err;
+ }
+ 
+ /**
+@@ -1066,6 +1091,7 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
+ 		xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
+ 
+ 		spin_lock(&vm->userptr.invalidated_lock);
++		xe_assert(vm->xe, list_empty(&to_userptr_vma(vma)->userptr.repin_link));
+ 		list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
+ 		spin_unlock(&vm->userptr.invalidated_lock);
+ 	} else if (!xe_vma_is_null(vma)) {
+diff --git a/drivers/i2c/busses/i2c-amd-asf-plat.c b/drivers/i2c/busses/i2c-amd-asf-plat.c
+index 7512614bf4b733..93ebec162c6ddb 100644
+--- a/drivers/i2c/busses/i2c-amd-asf-plat.c
++++ b/drivers/i2c/busses/i2c-amd-asf-plat.c
+@@ -293,6 +293,7 @@ static irqreturn_t amd_asf_irq_handler(int irq, void *ptr)
+ 		amd_asf_update_ioport_target(piix4_smba, ASF_SLV_INTR, SMBHSTSTS, true);
+ 	}
+ 
++	iowrite32(irq, dev->eoi_base);
+ 	return IRQ_HANDLED;
+ }
+ 
+diff --git a/drivers/i2c/busses/i2c-ls2x.c b/drivers/i2c/busses/i2c-ls2x.c
+index 8821cac3897b69..b475dd27b7af94 100644
+--- a/drivers/i2c/busses/i2c-ls2x.c
++++ b/drivers/i2c/busses/i2c-ls2x.c
+@@ -10,6 +10,7 @@
+  * Rewritten for mainline by Binbin Zhou <zhoubinbin@loongson.cn>
+  */
+ 
++#include <linux/bitfield.h>
+ #include <linux/bits.h>
+ #include <linux/completion.h>
+ #include <linux/device.h>
+@@ -26,7 +27,8 @@
+ #include <linux/units.h>
+ 
+ /* I2C Registers */
+-#define I2C_LS2X_PRER		0x0 /* Freq Division Register(16 bits) */
++#define I2C_LS2X_PRER_LO	0x0 /* Freq Division Low Byte Register */
++#define I2C_LS2X_PRER_HI	0x1 /* Freq Division High Byte Register */
+ #define I2C_LS2X_CTR		0x2 /* Control Register */
+ #define I2C_LS2X_TXR		0x3 /* Transport Data Register */
+ #define I2C_LS2X_RXR		0x3 /* Receive Data Register */
+@@ -93,6 +95,7 @@ static irqreturn_t ls2x_i2c_isr(int this_irq, void *dev_id)
+  */
+ static void ls2x_i2c_adjust_bus_speed(struct ls2x_i2c_priv *priv)
+ {
++	u16 val;
+ 	struct i2c_timings *t = &priv->i2c_t;
+ 	struct device *dev = priv->adapter.dev.parent;
+ 	u32 acpi_speed = i2c_acpi_find_bus_speed(dev);
+@@ -104,9 +107,14 @@ static void ls2x_i2c_adjust_bus_speed(struct ls2x_i2c_priv *priv)
+ 	else
+ 		t->bus_freq_hz = LS2X_I2C_FREQ_STD;
+ 
+-	/* Calculate and set i2c frequency. */
+-	writew(LS2X_I2C_PCLK_FREQ / (5 * t->bus_freq_hz) - 1,
+-	       priv->base + I2C_LS2X_PRER);
++	/*
++	 * According to the chip manual, we can only access the registers as bytes,
++	 * otherwise the high bits will be truncated.
++	 * So set the I2C frequency with a sequential writeb() instead of writew().
++	 */
++	val = LS2X_I2C_PCLK_FREQ / (5 * t->bus_freq_hz) - 1;
++	writeb(FIELD_GET(GENMASK(7, 0), val), priv->base + I2C_LS2X_PRER_LO);
++	writeb(FIELD_GET(GENMASK(15, 8), val), priv->base + I2C_LS2X_PRER_HI);
+ }
+ 
+ static void ls2x_i2c_init(struct ls2x_i2c_priv *priv)
+diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
+index 482a0074d448cb..da12e5af9064ce 100644
+--- a/drivers/i2c/busses/i2c-npcm7xx.c
++++ b/drivers/i2c/busses/i2c-npcm7xx.c
+@@ -2329,6 +2329,13 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev)
+ 	if (irq < 0)
+ 		return irq;
+ 
++	/*
++	 * Disable the interrupt to avoid the interrupt handler being triggered
++	 * incorrectly by the asynchronous interrupt status since the machine
++	 * might do a warm reset during the last smbus/i2c transfer session.
++	 */
++	npcm_i2c_int_enable(bus, false);
++
+ 	ret = devm_request_irq(bus->dev, irq, npcm_i2c_bus_irq, 0,
+ 			       dev_name(bus->dev), bus);
+ 	if (ret)
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index ac4d8faa3886c8..51293efd02ea1b 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -55,6 +55,7 @@
+ #include <asm/intel-family.h>
+ #include <asm/mwait.h>
+ #include <asm/spec-ctrl.h>
++#include <asm/tsc.h>
+ #include <asm/fpu/api.h>
+ 
+ #define INTEL_IDLE_VERSION "0.5.1"
+@@ -1797,6 +1798,9 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
+ 		if (intel_idle_state_needs_timer_stop(state))
+ 			state->flags |= CPUIDLE_FLAG_TIMER_STOP;
+ 
++		if (cx->type > ACPI_STATE_C1 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
++			mark_tsc_unstable("TSC halts in idle");
++
+ 		state->enter = intel_idle;
+ 		state->enter_s2idle = intel_idle_s2idle;
+ 	}
+diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+index 2975b11b79bf77..22c98c155bd3c1 100644
+--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
++++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+@@ -204,7 +204,7 @@ struct bnxt_re_dev {
+ 	struct bnxt_re_nq_record	*nqr;
+ 
+ 	/* Device Resources */
+-	struct bnxt_qplib_dev_attr	dev_attr;
++	struct bnxt_qplib_dev_attr	*dev_attr;
+ 	struct bnxt_qplib_ctx		qplib_ctx;
+ 	struct bnxt_qplib_res		qplib_res;
+ 	struct bnxt_qplib_dpi		dpi_privileged;
+diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
+index 1e63f809174837..f51adb0a97e667 100644
+--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
++++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
+@@ -357,8 +357,8 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
+ 			goto done;
+ 		}
+ 		bnxt_re_copy_err_stats(rdev, stats, err_s);
+-		if (_is_ext_stats_supported(rdev->dev_attr.dev_cap_flags) &&
+-		    !rdev->is_virtfn) {
++		if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags,
++					     rdev->is_virtfn)) {
+ 			rc = bnxt_re_get_ext_stat(rdev, stats);
+ 			if (rc) {
+ 				clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS,
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+index 1ff2e176b0369c..0ed62d3e494c0e 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -161,7 +161,7 @@ static int __qp_access_flags_to_ib(struct bnxt_qplib_chip_ctx *cctx, u8 qflags)
+ static void bnxt_re_check_and_set_relaxed_ordering(struct bnxt_re_dev *rdev,
+ 						   struct bnxt_qplib_mrw *qplib_mr)
+ {
+-	if (_is_relaxed_ordering_supported(rdev->dev_attr.dev_cap_flags2) &&
++	if (_is_relaxed_ordering_supported(rdev->dev_attr->dev_cap_flags2) &&
+ 	    pcie_relaxed_ordering_enabled(rdev->en_dev->pdev))
+ 		qplib_mr->flags |= CMDQ_REGISTER_MR_FLAGS_ENABLE_RO;
+ }
+@@ -186,7 +186,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
+ 			 struct ib_udata *udata)
+ {
+ 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+-	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
++	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
+ 
+ 	memset(ib_attr, 0, sizeof(*ib_attr));
+ 	memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
+@@ -275,7 +275,7 @@ int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
+ 		       struct ib_port_attr *port_attr)
+ {
+ 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+-	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
++	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
+ 	int rc;
+ 
+ 	memset(port_attr, 0, sizeof(*port_attr));
+@@ -333,8 +333,8 @@ void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
+ 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ 
+ 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
+-		 rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
+-		 rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
++		 rdev->dev_attr->fw_ver[0], rdev->dev_attr->fw_ver[1],
++		 rdev->dev_attr->fw_ver[2], rdev->dev_attr->fw_ver[3]);
+ }
+ 
+ int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
+@@ -585,7 +585,7 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
+ 	mr->qplib_mr.pd = &pd->qplib_pd;
+ 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
+ 	mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
+-	if (!_is_alloc_mr_unified(rdev->dev_attr.dev_cap_flags)) {
++	if (!_is_alloc_mr_unified(rdev->dev_attr->dev_cap_flags)) {
+ 		rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ 		if (rc) {
+ 			ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
+@@ -1057,7 +1057,7 @@ static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
+ 	rdev = qp->rdev;
+ 	qplqp = &qp->qplib_qp;
+ 	sq = &qplqp->sq;
+-	dev_attr = &rdev->dev_attr;
++	dev_attr = rdev->dev_attr;
+ 
+ 	align = sizeof(struct sq_send_hdr);
+ 	ilsize = ALIGN(init_attr->cap.max_inline_data, align);
+@@ -1277,7 +1277,7 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
+ 	rdev = qp->rdev;
+ 	qplqp = &qp->qplib_qp;
+ 	rq = &qplqp->rq;
+-	dev_attr = &rdev->dev_attr;
++	dev_attr = rdev->dev_attr;
+ 
+ 	if (init_attr->srq) {
+ 		struct bnxt_re_srq *srq;
+@@ -1314,7 +1314,7 @@ static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
+ 
+ 	rdev = qp->rdev;
+ 	qplqp = &qp->qplib_qp;
+-	dev_attr = &rdev->dev_attr;
++	dev_attr = rdev->dev_attr;
+ 
+ 	if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
+ 		qplqp->rq.max_sge = dev_attr->max_qp_sges;
+@@ -1340,7 +1340,7 @@ static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
+ 	rdev = qp->rdev;
+ 	qplqp = &qp->qplib_qp;
+ 	sq = &qplqp->sq;
+-	dev_attr = &rdev->dev_attr;
++	dev_attr = rdev->dev_attr;
+ 
+ 	sq->max_sge = init_attr->cap.max_send_sge;
+ 	entries = init_attr->cap.max_send_wr;
+@@ -1393,7 +1393,7 @@ static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
+ 
+ 	rdev = qp->rdev;
+ 	qplqp = &qp->qplib_qp;
+-	dev_attr = &rdev->dev_attr;
++	dev_attr = rdev->dev_attr;
+ 
+ 	if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
+ 		entries = bnxt_re_init_depth(init_attr->cap.max_send_wr + 1, uctx);
+@@ -1442,7 +1442,7 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
+ 
+ 	rdev = qp->rdev;
+ 	qplqp = &qp->qplib_qp;
+-	dev_attr = &rdev->dev_attr;
++	dev_attr = rdev->dev_attr;
+ 
+ 	/* Setup misc params */
+ 	ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
+@@ -1612,7 +1612,7 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
+ 	ib_pd = ib_qp->pd;
+ 	pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ 	rdev = pd->rdev;
+-	dev_attr = &rdev->dev_attr;
++	dev_attr = rdev->dev_attr;
+ 	qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
+ 
+ 	uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
+@@ -1840,7 +1840,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
+ 	ib_pd = ib_srq->pd;
+ 	pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ 	rdev = pd->rdev;
+-	dev_attr = &rdev->dev_attr;
++	dev_attr = rdev->dev_attr;
+ 	srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
+ 
+ 	if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
+@@ -1872,6 +1872,8 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
+ 	srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
+ 	srq->srq_limit = srq_init_attr->attr.srq_limit;
+ 	srq->qplib_srq.eventq_hw_ring_id = rdev->nqr->nq[0].ring_id;
++	srq->qplib_srq.sg_info.pgsize = PAGE_SIZE;
++	srq->qplib_srq.sg_info.pgshft = PAGE_SHIFT;
+ 	nq = &rdev->nqr->nq[0];
+ 
+ 	if (udata) {
+@@ -2044,7 +2046,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
+ {
+ 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
+ 	struct bnxt_re_dev *rdev = qp->rdev;
+-	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
++	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
+ 	enum ib_qp_state curr_qp_state, new_qp_state;
+ 	int rc, entries;
+ 	unsigned int flags;
+@@ -3091,7 +3093,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ 	struct ib_udata *udata = &attrs->driver_udata;
+ 	struct bnxt_re_ucontext *uctx =
+ 		rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
+-	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
++	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
+ 	struct bnxt_qplib_chip_ctx *cctx;
+ 	int cqe = attr->cqe;
+ 	int rc, entries;
+@@ -3226,7 +3228,7 @@ int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
+ 
+ 	cq =  container_of(ibcq, struct bnxt_re_cq, ib_cq);
+ 	rdev = cq->rdev;
+-	dev_attr = &rdev->dev_attr;
++	dev_attr = rdev->dev_attr;
+ 	if (!ibcq->uobject) {
+ 		ibdev_err(&rdev->ibdev, "Kernel CQ Resize not supported");
+ 		return -EOPNOTSUPP;
+@@ -4199,7 +4201,7 @@ static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64
+ 	mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
+ 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
+ 
+-	if (!_is_alloc_mr_unified(rdev->dev_attr.dev_cap_flags)) {
++	if (!_is_alloc_mr_unified(rdev->dev_attr->dev_cap_flags)) {
+ 		rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ 		if (rc) {
+ 			ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc);
+@@ -4291,7 +4293,7 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
+ 	struct bnxt_re_ucontext *uctx =
+ 		container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
+ 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+-	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
++	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
+ 	struct bnxt_re_user_mmap_entry *entry;
+ 	struct bnxt_re_uctx_resp resp = {};
+ 	struct bnxt_re_uctx_req ureq = {};
+diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
+index c143f273b75967..b29687ec2ea31d 100644
+--- a/drivers/infiniband/hw/bnxt_re/main.c
++++ b/drivers/infiniband/hw/bnxt_re/main.c
+@@ -153,6 +153,10 @@ static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
+ 
+ 	if (!rdev->chip_ctx)
+ 		return;
++
++	kfree(rdev->dev_attr);
++	rdev->dev_attr = NULL;
++
+ 	chip_ctx = rdev->chip_ctx;
+ 	rdev->chip_ctx = NULL;
+ 	rdev->rcfw.res = NULL;
+@@ -166,7 +170,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
+ {
+ 	struct bnxt_qplib_chip_ctx *chip_ctx;
+ 	struct bnxt_en_dev *en_dev;
+-	int rc;
++	int rc = -ENOMEM;
+ 
+ 	en_dev = rdev->en_dev;
+ 
+@@ -182,7 +186,10 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
+ 
+ 	rdev->qplib_res.cctx = rdev->chip_ctx;
+ 	rdev->rcfw.res = &rdev->qplib_res;
+-	rdev->qplib_res.dattr = &rdev->dev_attr;
++	rdev->dev_attr = kzalloc(sizeof(*rdev->dev_attr), GFP_KERNEL);
++	if (!rdev->dev_attr)
++		goto free_chip_ctx;
++	rdev->qplib_res.dattr = rdev->dev_attr;
+ 	rdev->qplib_res.is_vf = BNXT_EN_VF(en_dev);
+ 	rdev->qplib_res.en_dev = en_dev;
+ 
+@@ -190,16 +197,20 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
+ 
+ 	bnxt_re_set_db_offset(rdev);
+ 	rc = bnxt_qplib_map_db_bar(&rdev->qplib_res);
+-	if (rc) {
+-		kfree(rdev->chip_ctx);
+-		rdev->chip_ctx = NULL;
+-		return rc;
+-	}
++	if (rc)
++		goto free_dev_attr;
+ 
+ 	if (bnxt_qplib_determine_atomics(en_dev->pdev))
+ 		ibdev_info(&rdev->ibdev,
+ 			   "platform doesn't support global atomics.");
+ 	return 0;
++free_dev_attr:
++	kfree(rdev->dev_attr);
++	rdev->dev_attr = NULL;
++free_chip_ctx:
++	kfree(rdev->chip_ctx);
++	rdev->chip_ctx = NULL;
++	return rc;
+ }
+ 
+ /* SR-IOV helper functions */
+@@ -221,7 +232,7 @@ static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
+ 	struct bnxt_qplib_ctx *ctx;
+ 	int i;
+ 
+-	attr = &rdev->dev_attr;
++	attr = rdev->dev_attr;
+ 	ctx = &rdev->qplib_ctx;
+ 
+ 	ctx->qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
+@@ -235,7 +246,7 @@ static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
+ 	if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ 		for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
+ 			rdev->qplib_ctx.tqm_ctx.qcount[i] =
+-			rdev->dev_attr.tqm_alloc_reqs[i];
++			rdev->dev_attr->tqm_alloc_reqs[i];
+ }
+ 
+ static void bnxt_re_limit_vf_res(struct bnxt_qplib_ctx *qplib_ctx, u32 num_vf)
+@@ -321,6 +332,8 @@ static void bnxt_re_stop_irq(void *handle)
+ 	int indx;
+ 
+ 	rdev = en_info->rdev;
++	if (!rdev)
++		return;
+ 	rcfw = &rdev->rcfw;
+ 
+ 	for (indx = BNXT_RE_NQ_IDX; indx < rdev->nqr->num_msix; indx++) {
+@@ -341,6 +354,8 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
+ 	int indx, rc;
+ 
+ 	rdev = en_info->rdev;
++	if (!rdev)
++		return;
+ 	msix_ent = rdev->nqr->msix_entries;
+ 	rcfw = &rdev->rcfw;
+ 	if (!ent) {
+@@ -1627,12 +1642,11 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
+ 
+ 	/* Configure and allocate resources for qplib */
+ 	rdev->qplib_res.rcfw = &rdev->rcfw;
+-	rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
++	rc = bnxt_qplib_get_dev_attr(&rdev->rcfw);
+ 	if (rc)
+ 		goto fail;
+ 
+-	rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->en_dev->pdev,
+-				  rdev->netdev, &rdev->dev_attr);
++	rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->netdev);
+ 	if (rc)
+ 		goto fail;
+ 
+@@ -2032,7 +2046,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
+ 			rdev->pacing.dbr_pacing = false;
+ 		}
+ 	}
+-	rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
++	rc = bnxt_qplib_get_dev_attr(&rdev->rcfw);
+ 	if (rc)
+ 		goto disable_rcfw;
+ 
+@@ -2356,6 +2370,7 @@ static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state)
+ 	ibdev_info(&rdev->ibdev, "%s: L2 driver notified to stop en_state 0x%lx",
+ 		   __func__, en_dev->en_state);
+ 	bnxt_re_remove_device(rdev, BNXT_RE_PRE_RECOVERY_REMOVE, adev);
++	bnxt_re_update_en_info_rdev(NULL, en_info, adev);
+ 	mutex_unlock(&bnxt_re_mutex);
+ 
+ 	return 0;
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+index 96ceec1e8199a6..02922a0987ad7a 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+@@ -876,14 +876,13 @@ void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
+ 	bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
+ }
+ 
+-int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
+-			 struct net_device *netdev,
+-			 struct bnxt_qplib_dev_attr *dev_attr)
++int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct net_device *netdev)
+ {
++	struct bnxt_qplib_dev_attr *dev_attr;
+ 	int rc;
+ 
+-	res->pdev = pdev;
+ 	res->netdev = netdev;
++	dev_attr = res->dattr;
+ 
+ 	rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
+ 	if (rc)
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
+index cbfc49a1a56d7c..711990232de1c9 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
+@@ -424,9 +424,7 @@ int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
+ void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res);
+ int bnxt_qplib_init_res(struct bnxt_qplib_res *res);
+ void bnxt_qplib_free_res(struct bnxt_qplib_res *res);
+-int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
+-			 struct net_device *netdev,
+-			 struct bnxt_qplib_dev_attr *dev_attr);
++int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct net_device *netdev);
+ void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
+ 			 struct bnxt_qplib_ctx *ctx);
+ int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
+@@ -549,6 +547,14 @@ static inline bool _is_ext_stats_supported(u16 dev_cap_flags)
+ 		CREQ_QUERY_FUNC_RESP_SB_EXT_STATS;
+ }
+ 
++static inline int bnxt_ext_stats_supported(struct bnxt_qplib_chip_ctx *ctx,
++					   u16 flags, bool virtfn)
++{
++	/* ext stats supported if cap flag is set AND is a PF OR a Thor2 VF */
++	return (_is_ext_stats_supported(flags) &&
++		((virtfn && bnxt_qplib_is_chip_gen_p7(ctx)) || (!virtfn)));
++}
++
+ static inline bool _is_hw_retx_supported(u16 dev_cap_flags)
+ {
+ 	return dev_cap_flags &
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+index 9df3e3271577de..2e09616736bc7b 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+@@ -88,9 +88,9 @@ static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
+ 	fw_ver[3] = resp.fw_rsvd;
+ }
+ 
+-int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
+-			    struct bnxt_qplib_dev_attr *attr)
++int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw)
+ {
++	struct bnxt_qplib_dev_attr *attr = rcfw->res->dattr;
+ 	struct creq_query_func_resp resp = {};
+ 	struct bnxt_qplib_cmdqmsg msg = {};
+ 	struct creq_query_func_resp_sb *sb;
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+index e6beeb514b7dd8..a1878eec7ba622 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+@@ -325,8 +325,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ 			   struct bnxt_qplib_gid *gid, u16 gid_idx,
+ 			   const u8 *smac);
+-int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
+-			    struct bnxt_qplib_dev_attr *attr);
++int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw);
+ int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
+ 				  struct bnxt_qplib_rcfw *rcfw,
+ 				  struct bnxt_qplib_ctx *ctx);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 0144e7210d05a1..f5c3e560df58d7 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -1286,10 +1286,8 @@ static u32 hns_roce_cmdq_tx_timeout(u16 opcode, u32 tx_timeout)
+ 	return tx_timeout;
+ }
+ 
+-static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u16 opcode)
++static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u32 tx_timeout)
+ {
+-	struct hns_roce_v2_priv *priv = hr_dev->priv;
+-	u32 tx_timeout = hns_roce_cmdq_tx_timeout(opcode, priv->cmq.tx_timeout);
+ 	u32 timeout = 0;
+ 
+ 	do {
+@@ -1299,8 +1297,9 @@ static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u16 opcode)
+ 	} while (++timeout < tx_timeout);
+ }
+ 
+-static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+-			       struct hns_roce_cmq_desc *desc, int num)
++static int __hns_roce_cmq_send_one(struct hns_roce_dev *hr_dev,
++				   struct hns_roce_cmq_desc *desc,
++				   int num, u32 tx_timeout)
+ {
+ 	struct hns_roce_v2_priv *priv = hr_dev->priv;
+ 	struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
+@@ -1309,8 +1308,6 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ 	int ret;
+ 	int i;
+ 
+-	spin_lock_bh(&csq->lock);
+-
+ 	tail = csq->head;
+ 
+ 	for (i = 0; i < num; i++) {
+@@ -1324,22 +1321,17 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ 
+ 	atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CMDS_CNT]);
+ 
+-	hns_roce_wait_csq_done(hr_dev, le16_to_cpu(desc->opcode));
++	hns_roce_wait_csq_done(hr_dev, tx_timeout);
+ 	if (hns_roce_cmq_csq_done(hr_dev)) {
+ 		ret = 0;
+ 		for (i = 0; i < num; i++) {
+ 			/* check the result of hardware write back */
+-			desc[i] = csq->desc[tail++];
++			desc_ret = le16_to_cpu(csq->desc[tail++].retval);
+ 			if (tail == csq->desc_num)
+ 				tail = 0;
+-
+-			desc_ret = le16_to_cpu(desc[i].retval);
+ 			if (likely(desc_ret == CMD_EXEC_SUCCESS))
+ 				continue;
+ 
+-			dev_err_ratelimited(hr_dev->dev,
+-					    "Cmdq IO error, opcode = 0x%x, return = 0x%x.\n",
+-					    desc->opcode, desc_ret);
+ 			ret = hns_roce_cmd_err_convert_errno(desc_ret);
+ 		}
+ 	} else {
+@@ -1354,14 +1346,54 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ 		ret = -EAGAIN;
+ 	}
+ 
+-	spin_unlock_bh(&csq->lock);
+-
+ 	if (ret)
+ 		atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CMDS_ERR_CNT]);
+ 
+ 	return ret;
+ }
+ 
++static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
++			       struct hns_roce_cmq_desc *desc, int num)
++{
++	struct hns_roce_v2_priv *priv = hr_dev->priv;
++	struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
++	u16 opcode = le16_to_cpu(desc->opcode);
++	u32 tx_timeout = hns_roce_cmdq_tx_timeout(opcode, priv->cmq.tx_timeout);
++	u8 try_cnt = HNS_ROCE_OPC_POST_MB_TRY_CNT;
++	u32 rsv_tail;
++	int ret;
++	int i;
++
++	while (try_cnt) {
++		try_cnt--;
++
++		spin_lock_bh(&csq->lock);
++		rsv_tail = csq->head;
++		ret = __hns_roce_cmq_send_one(hr_dev, desc, num, tx_timeout);
++		if (opcode == HNS_ROCE_OPC_POST_MB && ret == -ETIME &&
++		    try_cnt) {
++			spin_unlock_bh(&csq->lock);
++			mdelay(HNS_ROCE_OPC_POST_MB_RETRY_GAP_MSEC);
++			continue;
++		}
++
++		for (i = 0; i < num; i++) {
++			desc[i] = csq->desc[rsv_tail++];
++			if (rsv_tail == csq->desc_num)
++				rsv_tail = 0;
++		}
++		spin_unlock_bh(&csq->lock);
++		break;
++	}
++
++	if (ret)
++		dev_err_ratelimited(hr_dev->dev,
++				    "Cmdq IO error, opcode = 0x%x, return = %d.\n",
++				    opcode, ret);
++
++	return ret;
++}
++
+ static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ 			     struct hns_roce_cmq_desc *desc, int num)
+ {
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+index cbdbc9edbce6ec..91a5665465ffba 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -230,6 +230,8 @@ enum hns_roce_opcode_type {
+ };
+ 
+ #define HNS_ROCE_OPC_POST_MB_TIMEOUT 35000
++#define HNS_ROCE_OPC_POST_MB_TRY_CNT 8
++#define HNS_ROCE_OPC_POST_MB_RETRY_GAP_MSEC 5
+ struct hns_roce_cmdq_tx_timeout_map {
+ 	u16 opcode;
+ 	u32 tx_timeout;
+diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
+index 67c2d43135a8af..457cea6d990958 100644
+--- a/drivers/infiniband/hw/mana/main.c
++++ b/drivers/infiniband/hw/mana/main.c
+@@ -174,7 +174,7 @@ static int mana_gd_allocate_doorbell_page(struct gdma_context *gc,
+ 
+ 	req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE;
+ 	req.num_resources = 1;
+-	req.alignment = 1;
++	req.alignment = PAGE_SIZE / MANA_PAGE_SIZE;
+ 
+ 	/* Have GDMA start searching from 0 */
+ 	req.allocated_resources = 0;
+diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
+index 505bc47fd575d5..99036afb3aef0b 100644
+--- a/drivers/infiniband/hw/mlx5/ah.c
++++ b/drivers/infiniband/hw/mlx5/ah.c
+@@ -67,7 +67,8 @@ static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
+ 		ah->av.tclass = grh->traffic_class;
+ 	}
+ 
+-	ah->av.stat_rate_sl = (rdma_ah_get_static_rate(ah_attr) << 4);
++	ah->av.stat_rate_sl =
++		(mlx5r_ib_rate(dev, rdma_ah_get_static_rate(ah_attr)) << 4);
+ 
+ 	if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
+ 		if (init_attr->xmit_slave)
+diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c
+index 4f6c1968a2ee3c..81cfa74147a183 100644
+--- a/drivers/infiniband/hw/mlx5/counters.c
++++ b/drivers/infiniband/hw/mlx5/counters.c
+@@ -546,6 +546,7 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
+ 				   struct ib_qp *qp)
+ {
+ 	struct mlx5_ib_dev *dev = to_mdev(qp->device);
++	bool new = false;
+ 	int err;
+ 
+ 	if (!counter->id) {
+@@ -560,6 +561,7 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
+ 			return err;
+ 		counter->id =
+ 			MLX5_GET(alloc_q_counter_out, out, counter_set_id);
++		new = true;
+ 	}
+ 
+ 	err = mlx5_ib_qp_set_counter(qp, counter);
+@@ -569,8 +571,10 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
+ 	return 0;
+ 
+ fail_set_counter:
+-	mlx5_ib_counter_dealloc(counter);
+-	counter->id = 0;
++	if (new) {
++		mlx5_ib_counter_dealloc(counter);
++		counter->id = 0;
++	}
+ 
+ 	return err;
+ }
+diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
+index bb02b6adbf2c21..753faa9ad06a88 100644
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -1550,7 +1550,7 @@ static void mlx5_ib_dmabuf_invalidate_cb(struct dma_buf_attachment *attach)
+ 
+ 	dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
+ 
+-	if (!umem_dmabuf->sgt)
++	if (!umem_dmabuf->sgt || !mr)
+ 		return;
+ 
+ 	mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ZAP);
+@@ -1935,7 +1935,8 @@ mlx5_alloc_priv_descs(struct ib_device *device,
+ static void
+ mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
+ {
+-	if (!mr->umem && !mr->data_direct && mr->descs) {
++	if (!mr->umem && !mr->data_direct &&
++	    mr->ibmr.type != IB_MR_TYPE_DM && mr->descs) {
+ 		struct ib_device *device = mr->ibmr.device;
+ 		int size = mr->max_descs * mr->desc_size;
+ 		struct mlx5_ib_dev *dev = to_mdev(device);
+@@ -2022,11 +2023,16 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
+ 	struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
+ 	struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
+ 	bool is_odp = is_odp_mr(mr);
++	bool is_odp_dma_buf = is_dmabuf_mr(mr) &&
++			!to_ib_umem_dmabuf(mr->umem)->pinned;
+ 	int ret = 0;
+ 
+ 	if (is_odp)
+ 		mutex_lock(&to_ib_umem_odp(mr->umem)->umem_mutex);
+ 
++	if (is_odp_dma_buf)
++		dma_resv_lock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv, NULL);
++
+ 	if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) {
+ 		ent = mr->mmkey.cache_ent;
+ 		/* upon storing to a clean temp entry - schedule its cleanup */
+@@ -2054,6 +2060,12 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
+ 		mutex_unlock(&to_ib_umem_odp(mr->umem)->umem_mutex);
+ 	}
+ 
++	if (is_odp_dma_buf) {
++		if (!ret)
++			to_ib_umem_dmabuf(mr->umem)->private = NULL;
++		dma_resv_unlock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv);
++	}
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
+index 1d3bf56157702d..b4e2a6f9cb9c3d 100644
+--- a/drivers/infiniband/hw/mlx5/odp.c
++++ b/drivers/infiniband/hw/mlx5/odp.c
+@@ -242,6 +242,7 @@ static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
+ 	if (__xa_cmpxchg(&imr->implicit_children, idx, mr, NULL, GFP_KERNEL) !=
+ 	    mr) {
+ 		xa_unlock(&imr->implicit_children);
++		mlx5r_deref_odp_mkey(&imr->mmkey);
+ 		return;
+ 	}
+ 
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index a43eba9d3572ce..88724d15705d4b 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -3447,11 +3447,11 @@ static int ib_to_mlx5_rate_map(u8 rate)
+ 	return 0;
+ }
+ 
+-static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
++int mlx5r_ib_rate(struct mlx5_ib_dev *dev, u8 rate)
+ {
+ 	u32 stat_rate_support;
+ 
+-	if (rate == IB_RATE_PORT_CURRENT)
++	if (rate == IB_RATE_PORT_CURRENT || rate == IB_RATE_800_GBPS)
+ 		return 0;
+ 
+ 	if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_800_GBPS)
+@@ -3596,7 +3596,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ 		       sizeof(grh->dgid.raw));
+ 	}
+ 
+-	err = ib_rate_to_mlx5(dev, rdma_ah_get_static_rate(ah));
++	err = mlx5r_ib_rate(dev, rdma_ah_get_static_rate(ah));
+ 	if (err < 0)
+ 		return err;
+ 	MLX5_SET(ads, path, stat_rate, err);
+@@ -4579,6 +4579,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ 
+ 		set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1);
+ 		MLX5_SET(dctc, dctc, counter_set_id, set_id);
++
++		qp->port = attr->port_num;
+ 	} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
+ 		struct mlx5_ib_modify_qp_resp resp = {};
+ 		u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {};
+@@ -5074,7 +5076,7 @@ static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp,
+ 	}
+ 
+ 	if (qp_attr_mask & IB_QP_PORT)
+-		qp_attr->port_num = MLX5_GET(dctc, dctc, port);
++		qp_attr->port_num = mqp->port;
+ 	if (qp_attr_mask & IB_QP_MIN_RNR_TIMER)
+ 		qp_attr->min_rnr_timer = MLX5_GET(dctc, dctc, min_rnr_nak);
+ 	if (qp_attr_mask & IB_QP_AV) {
+diff --git a/drivers/infiniband/hw/mlx5/qp.h b/drivers/infiniband/hw/mlx5/qp.h
+index b6ee7c3ee1ca1b..2530e7730635f3 100644
+--- a/drivers/infiniband/hw/mlx5/qp.h
++++ b/drivers/infiniband/hw/mlx5/qp.h
+@@ -56,4 +56,5 @@ int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn);
+ int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
+ int mlx5_ib_qp_event_init(void);
+ void mlx5_ib_qp_event_cleanup(void);
++int mlx5r_ib_rate(struct mlx5_ib_dev *dev, u8 rate);
+ #endif /* _MLX5_IB_QP_H */
+diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
+index 887fd6fa3ba930..793f3c5c4d0126 100644
+--- a/drivers/infiniband/hw/mlx5/umr.c
++++ b/drivers/infiniband/hw/mlx5/umr.c
+@@ -231,30 +231,6 @@ void mlx5r_umr_cleanup(struct mlx5_ib_dev *dev)
+ 	ib_dealloc_pd(dev->umrc.pd);
+ }
+ 
+-static int mlx5r_umr_recover(struct mlx5_ib_dev *dev)
+-{
+-	struct umr_common *umrc = &dev->umrc;
+-	struct ib_qp_attr attr;
+-	int err;
+-
+-	attr.qp_state = IB_QPS_RESET;
+-	err = ib_modify_qp(umrc->qp, &attr, IB_QP_STATE);
+-	if (err) {
+-		mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
+-		goto err;
+-	}
+-
+-	err = mlx5r_umr_qp_rst2rts(dev, umrc->qp);
+-	if (err)
+-		goto err;
+-
+-	umrc->state = MLX5_UMR_STATE_ACTIVE;
+-	return 0;
+-
+-err:
+-	umrc->state = MLX5_UMR_STATE_ERR;
+-	return err;
+-}
+ 
+ static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe,
+ 			       struct mlx5r_umr_wqe *wqe, bool with_data)
+@@ -302,6 +278,61 @@ static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe,
+ 	return err;
+ }
+ 
++static int mlx5r_umr_recover(struct mlx5_ib_dev *dev, u32 mkey,
++			     struct mlx5r_umr_context *umr_context,
++			     struct mlx5r_umr_wqe *wqe, bool with_data)
++{
++	struct umr_common *umrc = &dev->umrc;
++	struct ib_qp_attr attr;
++	int err;
++
++	mutex_lock(&umrc->lock);
++	/* Preventing any further WRs to be sent now */
++	if (umrc->state != MLX5_UMR_STATE_RECOVER) {
++		mlx5_ib_warn(dev, "UMR recovery encountered an unexpected state=%d\n",
++			     umrc->state);
++		umrc->state = MLX5_UMR_STATE_RECOVER;
++	}
++	mutex_unlock(&umrc->lock);
++
++	/* Sending a final/barrier WR (the failed one) and wait for its completion.
++	 * This will ensure that all the previous WRs got a completion before
++	 * we set the QP state to RESET.
++	 */
++	err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context->cqe, wqe,
++				  with_data);
++	if (err) {
++		mlx5_ib_warn(dev, "UMR recovery post send failed, err %d\n", err);
++		goto err;
++	}
++
++	/* Since the QP is in an error state, it will only receive
++	 * IB_WC_WR_FLUSH_ERR. However, as it serves only as a barrier
++	 * we don't care about its status.
++	 */
++	wait_for_completion(&umr_context->done);
++
++	attr.qp_state = IB_QPS_RESET;
++	err = ib_modify_qp(umrc->qp, &attr, IB_QP_STATE);
++	if (err) {
++		mlx5_ib_warn(dev, "Couldn't modify UMR QP to RESET, err=%d\n", err);
++		goto err;
++	}
++
++	err = mlx5r_umr_qp_rst2rts(dev, umrc->qp);
++	if (err) {
++		mlx5_ib_warn(dev, "Couldn't modify UMR QP to RTS, err=%d\n", err);
++		goto err;
++	}
++
++	umrc->state = MLX5_UMR_STATE_ACTIVE;
++	return 0;
++
++err:
++	umrc->state = MLX5_UMR_STATE_ERR;
++	return err;
++}
++
+ static void mlx5r_umr_done(struct ib_cq *cq, struct ib_wc *wc)
+ {
+ 	struct mlx5_ib_umr_context *context =
+@@ -366,9 +397,7 @@ static int mlx5r_umr_post_send_wait(struct mlx5_ib_dev *dev, u32 mkey,
+ 		mlx5_ib_warn(dev,
+ 			"reg umr failed (%u). Trying to recover and resubmit the flushed WQEs, mkey = %u\n",
+ 			umr_context.status, mkey);
+-		mutex_lock(&umrc->lock);
+-		err = mlx5r_umr_recover(dev);
+-		mutex_unlock(&umrc->lock);
++		err = mlx5r_umr_recover(dev, mkey, &umr_context, wqe, with_data);
+ 		if (err)
+ 			mlx5_ib_warn(dev, "couldn't recover UMR, err %d\n",
+ 				     err);
+diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
+index 9f424acf474e94..e540092d664d27 100644
+--- a/drivers/iommu/intel/dmar.c
++++ b/drivers/iommu/intel/dmar.c
+@@ -2043,6 +2043,7 @@ int enable_drhd_fault_handling(unsigned int cpu)
+ 	/*
+ 	 * Enable fault control interrupt.
+ 	 */
++	guard(rwsem_read)(&dmar_global_lock);
+ 	for_each_iommu(iommu, drhd) {
+ 		u32 fault_status;
+ 		int ret;
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index 8f75c11a3ec48e..9ab5371c35389d 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -3155,7 +3155,14 @@ int __init intel_iommu_init(void)
+ 		iommu_device_sysfs_add(&iommu->iommu, NULL,
+ 				       intel_iommu_groups,
+ 				       "%s", iommu->name);
++		/*
++		 * The iommu device probe is protected by the iommu_probe_device_lock.
++		 * Release the dmar_global_lock before entering the device probe path
++		 * to avoid unnecessary lock order splat.
++		 */
++		up_read(&dmar_global_lock);
+ 		iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
++		down_read(&dmar_global_lock);
+ 
+ 		iommu_pmu_register(iommu);
+ 	}
+@@ -4380,9 +4387,6 @@ static int context_setup_pass_through_cb(struct pci_dev *pdev, u16 alias, void *
+ {
+ 	struct device *dev = data;
+ 
+-	if (dev != &pdev->dev)
+-		return 0;
+-
+ 	return context_setup_pass_through(dev, PCI_BUS_NUM(alias), alias & 0xff);
+ }
+ 
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index ee9f7cecd78e0e..555dc06b942287 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -3790,10 +3790,6 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
+ 		break;
+ 
+ 	case STATUSTYPE_TABLE: {
+-		__u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
+-
+-		watermark_percentage += ic->journal_entries / 2;
+-		do_div(watermark_percentage, ic->journal_entries);
+ 		arg_count = 3;
+ 		arg_count += !!ic->meta_dev;
+ 		arg_count += ic->sectors_per_block != 1;
+@@ -3826,6 +3822,10 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
+ 		DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
+ 		DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
+ 		if (ic->mode == 'J') {
++			__u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
++
++			watermark_percentage += ic->journal_entries / 2;
++			do_div(watermark_percentage, ic->journal_entries);
+ 			DMEMIT(" journal_watermark:%u", (unsigned int)watermark_percentage);
+ 			DMEMIT(" commit_time:%u", ic->autocommit_msec);
+ 		}
+diff --git a/drivers/md/dm-vdo/dedupe.c b/drivers/md/dm-vdo/dedupe.c
+index b6f8e2dc7729fb..3f3d29af1be474 100644
+--- a/drivers/md/dm-vdo/dedupe.c
++++ b/drivers/md/dm-vdo/dedupe.c
+@@ -2178,6 +2178,7 @@ static int initialize_index(struct vdo *vdo, struct hash_zones *zones)
+ 
+ 	vdo_set_dedupe_index_timeout_interval(vdo_dedupe_index_timeout_interval);
+ 	vdo_set_dedupe_index_min_timer_interval(vdo_dedupe_index_min_timer_interval);
++	spin_lock_init(&zones->lock);
+ 
+ 	/*
+ 	 * Since we will save up the timeouts that would have been reported but were ratelimited,
+diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig
+index 6989972eebc306..10687722d14c08 100644
+--- a/drivers/net/dsa/realtek/Kconfig
++++ b/drivers/net/dsa/realtek/Kconfig
+@@ -43,4 +43,10 @@ config NET_DSA_REALTEK_RTL8366RB
+ 	help
+ 	  Select to enable support for Realtek RTL8366RB.
+ 
++config NET_DSA_REALTEK_RTL8366RB_LEDS
++	bool "Support RTL8366RB LED control"
++	depends on (LEDS_CLASS=y || LEDS_CLASS=NET_DSA_REALTEK_RTL8366RB)
++	depends on NET_DSA_REALTEK_RTL8366RB
++	default NET_DSA_REALTEK_RTL8366RB
++
+ endif
+diff --git a/drivers/net/dsa/realtek/Makefile b/drivers/net/dsa/realtek/Makefile
+index 35491dc20d6d6e..17367bcba496c1 100644
+--- a/drivers/net/dsa/realtek/Makefile
++++ b/drivers/net/dsa/realtek/Makefile
+@@ -12,4 +12,7 @@ endif
+ 
+ obj-$(CONFIG_NET_DSA_REALTEK_RTL8366RB) += rtl8366.o
+ rtl8366-objs 				:= rtl8366-core.o rtl8366rb.o
++ifdef CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS
++rtl8366-objs 				+= rtl8366rb-leds.o
++endif
+ obj-$(CONFIG_NET_DSA_REALTEK_RTL8365MB) += rtl8365mb.o
+diff --git a/drivers/net/dsa/realtek/rtl8366rb-leds.c b/drivers/net/dsa/realtek/rtl8366rb-leds.c
+new file mode 100644
+index 00000000000000..99c890681ae607
+--- /dev/null
++++ b/drivers/net/dsa/realtek/rtl8366rb-leds.c
+@@ -0,0 +1,177 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#include <linux/bitops.h>
++#include <linux/regmap.h>
++#include <net/dsa.h>
++#include "rtl83xx.h"
++#include "rtl8366rb.h"
++
++static inline u32 rtl8366rb_led_group_port_mask(u8 led_group, u8 port)
++{
++	switch (led_group) {
++	case 0:
++		return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
++	case 1:
++		return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
++	case 2:
++		return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
++	case 3:
++		return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
++	default:
++		return 0;
++	}
++}
++
++static int rb8366rb_get_port_led(struct rtl8366rb_led *led)
++{
++	struct realtek_priv *priv = led->priv;
++	u8 led_group = led->led_group;
++	u8 port_num = led->port_num;
++	int ret;
++	u32 val;
++
++	ret = regmap_read(priv->map, RTL8366RB_LED_X_X_CTRL_REG(led_group),
++			  &val);
++	if (ret) {
++		dev_err(priv->dev, "error reading LED on port %d group %d\n",
++			led_group, port_num);
++		return ret;
++	}
++
++	return !!(val & rtl8366rb_led_group_port_mask(led_group, port_num));
++}
++
++static int rb8366rb_set_port_led(struct rtl8366rb_led *led, bool enable)
++{
++	struct realtek_priv *priv = led->priv;
++	u8 led_group = led->led_group;
++	u8 port_num = led->port_num;
++	int ret;
++
++	ret = regmap_update_bits(priv->map,
++				 RTL8366RB_LED_X_X_CTRL_REG(led_group),
++				 rtl8366rb_led_group_port_mask(led_group,
++							       port_num),
++				 enable ? 0xffff : 0);
++	if (ret) {
++		dev_err(priv->dev, "error updating LED on port %d group %d\n",
++			led_group, port_num);
++		return ret;
++	}
++
++	/* Change the LED group to manual controlled LEDs if required */
++	ret = rb8366rb_set_ledgroup_mode(priv, led_group,
++					 RTL8366RB_LEDGROUP_FORCE);
++
++	if (ret) {
++		dev_err(priv->dev, "error updating LED GROUP group %d\n",
++			led_group);
++		return ret;
++	}
++
++	return 0;
++}
++
++static int
++rtl8366rb_cled_brightness_set_blocking(struct led_classdev *ldev,
++				       enum led_brightness brightness)
++{
++	struct rtl8366rb_led *led = container_of(ldev, struct rtl8366rb_led,
++						 cdev);
++
++	return rb8366rb_set_port_led(led, brightness == LED_ON);
++}
++
++static int rtl8366rb_setup_led(struct realtek_priv *priv, struct dsa_port *dp,
++			       struct fwnode_handle *led_fwnode)
++{
++	struct rtl8366rb *rb = priv->chip_data;
++	struct led_init_data init_data = { };
++	enum led_default_state state;
++	struct rtl8366rb_led *led;
++	u32 led_group;
++	int ret;
++
++	ret = fwnode_property_read_u32(led_fwnode, "reg", &led_group);
++	if (ret)
++		return ret;
++
++	if (led_group >= RTL8366RB_NUM_LEDGROUPS) {
++		dev_warn(priv->dev, "Invalid LED reg %d defined for port %d",
++			 led_group, dp->index);
++		return -EINVAL;
++	}
++
++	led = &rb->leds[dp->index][led_group];
++	led->port_num = dp->index;
++	led->led_group = led_group;
++	led->priv = priv;
++
++	state = led_init_default_state_get(led_fwnode);
++	switch (state) {
++	case LEDS_DEFSTATE_ON:
++		led->cdev.brightness = 1;
++		rb8366rb_set_port_led(led, 1);
++		break;
++	case LEDS_DEFSTATE_KEEP:
++		led->cdev.brightness =
++			rb8366rb_get_port_led(led);
++		break;
++	case LEDS_DEFSTATE_OFF:
++	default:
++		led->cdev.brightness = 0;
++		rb8366rb_set_port_led(led, 0);
++	}
++
++	led->cdev.max_brightness = 1;
++	led->cdev.brightness_set_blocking =
++		rtl8366rb_cled_brightness_set_blocking;
++	init_data.fwnode = led_fwnode;
++	init_data.devname_mandatory = true;
++
++	init_data.devicename = kasprintf(GFP_KERNEL, "Realtek-%d:0%d:%d",
++					 dp->ds->index, dp->index, led_group);
++	if (!init_data.devicename)
++		return -ENOMEM;
++
++	ret = devm_led_classdev_register_ext(priv->dev, &led->cdev, &init_data);
++	if (ret) {
++		dev_warn(priv->dev, "Failed to init LED %d for port %d",
++			 led_group, dp->index);
++		return ret;
++	}
++
++	return 0;
++}
++
++int rtl8366rb_setup_leds(struct realtek_priv *priv)
++{
++	struct dsa_switch *ds = &priv->ds;
++	struct device_node *leds_np;
++	struct dsa_port *dp;
++	int ret = 0;
++
++	dsa_switch_for_each_port(dp, ds) {
++		if (!dp->dn)
++			continue;
++
++		leds_np = of_get_child_by_name(dp->dn, "leds");
++		if (!leds_np) {
++			dev_dbg(priv->dev, "No leds defined for port %d",
++				dp->index);
++			continue;
++		}
++
++		for_each_child_of_node_scoped(leds_np, led_np) {
++			ret = rtl8366rb_setup_led(priv, dp,
++						  of_fwnode_handle(led_np));
++			if (ret)
++				break;
++		}
++
++		of_node_put(leds_np);
++		if (ret)
++			return ret;
++	}
++	return 0;
++}
+diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
+index 23374178a17607..6bf8427f14fbd7 100644
+--- a/drivers/net/dsa/realtek/rtl8366rb.c
++++ b/drivers/net/dsa/realtek/rtl8366rb.c
+@@ -26,11 +26,7 @@
+ #include "realtek-smi.h"
+ #include "realtek-mdio.h"
+ #include "rtl83xx.h"
+-
+-#define RTL8366RB_PORT_NUM_CPU		5
+-#define RTL8366RB_NUM_PORTS		6
+-#define RTL8366RB_PHY_NO_MAX		4
+-#define RTL8366RB_PHY_ADDR_MAX		31
++#include "rtl8366rb.h"
+ 
+ /* Switch Global Configuration register */
+ #define RTL8366RB_SGCR				0x0000
+@@ -175,39 +171,6 @@
+  */
+ #define RTL8366RB_VLAN_INGRESS_CTRL2_REG	0x037f
+ 
+-/* LED control registers */
+-/* The LED blink rate is global; it is used by all triggers in all groups. */
+-#define RTL8366RB_LED_BLINKRATE_REG		0x0430
+-#define RTL8366RB_LED_BLINKRATE_MASK		0x0007
+-#define RTL8366RB_LED_BLINKRATE_28MS		0x0000
+-#define RTL8366RB_LED_BLINKRATE_56MS		0x0001
+-#define RTL8366RB_LED_BLINKRATE_84MS		0x0002
+-#define RTL8366RB_LED_BLINKRATE_111MS		0x0003
+-#define RTL8366RB_LED_BLINKRATE_222MS		0x0004
+-#define RTL8366RB_LED_BLINKRATE_446MS		0x0005
+-
+-/* LED trigger event for each group */
+-#define RTL8366RB_LED_CTRL_REG			0x0431
+-#define RTL8366RB_LED_CTRL_OFFSET(led_group)	\
+-	(4 * (led_group))
+-#define RTL8366RB_LED_CTRL_MASK(led_group)	\
+-	(0xf << RTL8366RB_LED_CTRL_OFFSET(led_group))
+-
+-/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only
+- * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is
+- * RTL8366RB_LEDGROUP_FORCE. Otherwise, it is ignored.
+- */
+-#define RTL8366RB_LED_0_1_CTRL_REG		0x0432
+-#define RTL8366RB_LED_2_3_CTRL_REG		0x0433
+-#define RTL8366RB_LED_X_X_CTRL_REG(led_group)	\
+-	((led_group) <= 1 ? \
+-		RTL8366RB_LED_0_1_CTRL_REG : \
+-		RTL8366RB_LED_2_3_CTRL_REG)
+-#define RTL8366RB_LED_0_X_CTRL_MASK		GENMASK(5, 0)
+-#define RTL8366RB_LED_X_1_CTRL_MASK		GENMASK(11, 6)
+-#define RTL8366RB_LED_2_X_CTRL_MASK		GENMASK(5, 0)
+-#define RTL8366RB_LED_X_3_CTRL_MASK		GENMASK(11, 6)
+-
+ #define RTL8366RB_MIB_COUNT			33
+ #define RTL8366RB_GLOBAL_MIB_COUNT		1
+ #define RTL8366RB_MIB_COUNTER_PORT_OFFSET	0x0050
+@@ -243,7 +206,6 @@
+ #define RTL8366RB_PORT_STATUS_AN_MASK		0x0080
+ 
+ #define RTL8366RB_NUM_VLANS		16
+-#define RTL8366RB_NUM_LEDGROUPS		4
+ #define RTL8366RB_NUM_VIDS		4096
+ #define RTL8366RB_PRIORITYMAX		7
+ #define RTL8366RB_NUM_FIDS		8
+@@ -350,46 +312,6 @@
+ #define RTL8366RB_GREEN_FEATURE_TX	BIT(0)
+ #define RTL8366RB_GREEN_FEATURE_RX	BIT(2)
+ 
+-enum rtl8366_ledgroup_mode {
+-	RTL8366RB_LEDGROUP_OFF			= 0x0,
+-	RTL8366RB_LEDGROUP_DUP_COL		= 0x1,
+-	RTL8366RB_LEDGROUP_LINK_ACT		= 0x2,
+-	RTL8366RB_LEDGROUP_SPD1000		= 0x3,
+-	RTL8366RB_LEDGROUP_SPD100		= 0x4,
+-	RTL8366RB_LEDGROUP_SPD10		= 0x5,
+-	RTL8366RB_LEDGROUP_SPD1000_ACT		= 0x6,
+-	RTL8366RB_LEDGROUP_SPD100_ACT		= 0x7,
+-	RTL8366RB_LEDGROUP_SPD10_ACT		= 0x8,
+-	RTL8366RB_LEDGROUP_SPD100_10_ACT	= 0x9,
+-	RTL8366RB_LEDGROUP_FIBER		= 0xa,
+-	RTL8366RB_LEDGROUP_AN_FAULT		= 0xb,
+-	RTL8366RB_LEDGROUP_LINK_RX		= 0xc,
+-	RTL8366RB_LEDGROUP_LINK_TX		= 0xd,
+-	RTL8366RB_LEDGROUP_MASTER		= 0xe,
+-	RTL8366RB_LEDGROUP_FORCE		= 0xf,
+-
+-	__RTL8366RB_LEDGROUP_MODE_MAX
+-};
+-
+-struct rtl8366rb_led {
+-	u8 port_num;
+-	u8 led_group;
+-	struct realtek_priv *priv;
+-	struct led_classdev cdev;
+-};
+-
+-/**
+- * struct rtl8366rb - RTL8366RB-specific data
+- * @max_mtu: per-port max MTU setting
+- * @pvid_enabled: if PVID is set for respective port
+- * @leds: per-port and per-ledgroup led info
+- */
+-struct rtl8366rb {
+-	unsigned int max_mtu[RTL8366RB_NUM_PORTS];
+-	bool pvid_enabled[RTL8366RB_NUM_PORTS];
+-	struct rtl8366rb_led leds[RTL8366RB_NUM_PORTS][RTL8366RB_NUM_LEDGROUPS];
+-};
+-
+ static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
+ 	{ 0,  0, 4, "IfInOctets"				},
+ 	{ 0,  4, 4, "EtherStatsOctets"				},
+@@ -830,9 +752,10 @@ static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
+ 	return 0;
+ }
+ 
+-static int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
+-				      u8 led_group,
+-				      enum rtl8366_ledgroup_mode mode)
++/* This code is used also with LEDs disabled */
++int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
++			       u8 led_group,
++			       enum rtl8366_ledgroup_mode mode)
+ {
+ 	int ret;
+ 	u32 val;
+@@ -849,144 +772,7 @@ static int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
+ 	return 0;
+ }
+ 
+-static inline u32 rtl8366rb_led_group_port_mask(u8 led_group, u8 port)
+-{
+-	switch (led_group) {
+-	case 0:
+-		return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+-	case 1:
+-		return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+-	case 2:
+-		return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+-	case 3:
+-		return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+-	default:
+-		return 0;
+-	}
+-}
+-
+-static int rb8366rb_get_port_led(struct rtl8366rb_led *led)
+-{
+-	struct realtek_priv *priv = led->priv;
+-	u8 led_group = led->led_group;
+-	u8 port_num = led->port_num;
+-	int ret;
+-	u32 val;
+-
+-	ret = regmap_read(priv->map, RTL8366RB_LED_X_X_CTRL_REG(led_group),
+-			  &val);
+-	if (ret) {
+-		dev_err(priv->dev, "error reading LED on port %d group %d\n",
+-			led_group, port_num);
+-		return ret;
+-	}
+-
+-	return !!(val & rtl8366rb_led_group_port_mask(led_group, port_num));
+-}
+-
+-static int rb8366rb_set_port_led(struct rtl8366rb_led *led, bool enable)
+-{
+-	struct realtek_priv *priv = led->priv;
+-	u8 led_group = led->led_group;
+-	u8 port_num = led->port_num;
+-	int ret;
+-
+-	ret = regmap_update_bits(priv->map,
+-				 RTL8366RB_LED_X_X_CTRL_REG(led_group),
+-				 rtl8366rb_led_group_port_mask(led_group,
+-							       port_num),
+-				 enable ? 0xffff : 0);
+-	if (ret) {
+-		dev_err(priv->dev, "error updating LED on port %d group %d\n",
+-			led_group, port_num);
+-		return ret;
+-	}
+-
+-	/* Change the LED group to manual controlled LEDs if required */
+-	ret = rb8366rb_set_ledgroup_mode(priv, led_group,
+-					 RTL8366RB_LEDGROUP_FORCE);
+-
+-	if (ret) {
+-		dev_err(priv->dev, "error updating LED GROUP group %d\n",
+-			led_group);
+-		return ret;
+-	}
+-
+-	return 0;
+-}
+-
+-static int
+-rtl8366rb_cled_brightness_set_blocking(struct led_classdev *ldev,
+-				       enum led_brightness brightness)
+-{
+-	struct rtl8366rb_led *led = container_of(ldev, struct rtl8366rb_led,
+-						 cdev);
+-
+-	return rb8366rb_set_port_led(led, brightness == LED_ON);
+-}
+-
+-static int rtl8366rb_setup_led(struct realtek_priv *priv, struct dsa_port *dp,
+-			       struct fwnode_handle *led_fwnode)
+-{
+-	struct rtl8366rb *rb = priv->chip_data;
+-	struct led_init_data init_data = { };
+-	enum led_default_state state;
+-	struct rtl8366rb_led *led;
+-	u32 led_group;
+-	int ret;
+-
+-	ret = fwnode_property_read_u32(led_fwnode, "reg", &led_group);
+-	if (ret)
+-		return ret;
+-
+-	if (led_group >= RTL8366RB_NUM_LEDGROUPS) {
+-		dev_warn(priv->dev, "Invalid LED reg %d defined for port %d",
+-			 led_group, dp->index);
+-		return -EINVAL;
+-	}
+-
+-	led = &rb->leds[dp->index][led_group];
+-	led->port_num = dp->index;
+-	led->led_group = led_group;
+-	led->priv = priv;
+-
+-	state = led_init_default_state_get(led_fwnode);
+-	switch (state) {
+-	case LEDS_DEFSTATE_ON:
+-		led->cdev.brightness = 1;
+-		rb8366rb_set_port_led(led, 1);
+-		break;
+-	case LEDS_DEFSTATE_KEEP:
+-		led->cdev.brightness =
+-			rb8366rb_get_port_led(led);
+-		break;
+-	case LEDS_DEFSTATE_OFF:
+-	default:
+-		led->cdev.brightness = 0;
+-		rb8366rb_set_port_led(led, 0);
+-	}
+-
+-	led->cdev.max_brightness = 1;
+-	led->cdev.brightness_set_blocking =
+-		rtl8366rb_cled_brightness_set_blocking;
+-	init_data.fwnode = led_fwnode;
+-	init_data.devname_mandatory = true;
+-
+-	init_data.devicename = kasprintf(GFP_KERNEL, "Realtek-%d:0%d:%d",
+-					 dp->ds->index, dp->index, led_group);
+-	if (!init_data.devicename)
+-		return -ENOMEM;
+-
+-	ret = devm_led_classdev_register_ext(priv->dev, &led->cdev, &init_data);
+-	if (ret) {
+-		dev_warn(priv->dev, "Failed to init LED %d for port %d",
+-			 led_group, dp->index);
+-		return ret;
+-	}
+-
+-	return 0;
+-}
+-
++/* This code is used also with LEDs disabled */
+ static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv)
+ {
+ 	int ret = 0;
+@@ -1007,38 +793,6 @@ static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv)
+ 	return ret;
+ }
+ 
+-static int rtl8366rb_setup_leds(struct realtek_priv *priv)
+-{
+-	struct dsa_switch *ds = &priv->ds;
+-	struct device_node *leds_np;
+-	struct dsa_port *dp;
+-	int ret = 0;
+-
+-	dsa_switch_for_each_port(dp, ds) {
+-		if (!dp->dn)
+-			continue;
+-
+-		leds_np = of_get_child_by_name(dp->dn, "leds");
+-		if (!leds_np) {
+-			dev_dbg(priv->dev, "No leds defined for port %d",
+-				dp->index);
+-			continue;
+-		}
+-
+-		for_each_child_of_node_scoped(leds_np, led_np) {
+-			ret = rtl8366rb_setup_led(priv, dp,
+-						  of_fwnode_handle(led_np));
+-			if (ret)
+-				break;
+-		}
+-
+-		of_node_put(leds_np);
+-		if (ret)
+-			return ret;
+-	}
+-	return 0;
+-}
+-
+ static int rtl8366rb_setup(struct dsa_switch *ds)
+ {
+ 	struct realtek_priv *priv = ds->priv;
+diff --git a/drivers/net/dsa/realtek/rtl8366rb.h b/drivers/net/dsa/realtek/rtl8366rb.h
+new file mode 100644
+index 00000000000000..685ff3275faa17
+--- /dev/null
++++ b/drivers/net/dsa/realtek/rtl8366rb.h
+@@ -0,0 +1,107 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++
++#ifndef _RTL8366RB_H
++#define _RTL8366RB_H
++
++#include "realtek.h"
++
++#define RTL8366RB_PORT_NUM_CPU		5
++#define RTL8366RB_NUM_PORTS		6
++#define RTL8366RB_PHY_NO_MAX		4
++#define RTL8366RB_NUM_LEDGROUPS		4
++#define RTL8366RB_PHY_ADDR_MAX		31
++
++/* LED control registers */
++/* The LED blink rate is global; it is used by all triggers in all groups. */
++#define RTL8366RB_LED_BLINKRATE_REG		0x0430
++#define RTL8366RB_LED_BLINKRATE_MASK		0x0007
++#define RTL8366RB_LED_BLINKRATE_28MS		0x0000
++#define RTL8366RB_LED_BLINKRATE_56MS		0x0001
++#define RTL8366RB_LED_BLINKRATE_84MS		0x0002
++#define RTL8366RB_LED_BLINKRATE_111MS		0x0003
++#define RTL8366RB_LED_BLINKRATE_222MS		0x0004
++#define RTL8366RB_LED_BLINKRATE_446MS		0x0005
++
++/* LED trigger event for each group */
++#define RTL8366RB_LED_CTRL_REG			0x0431
++#define RTL8366RB_LED_CTRL_OFFSET(led_group)	\
++	(4 * (led_group))
++#define RTL8366RB_LED_CTRL_MASK(led_group)	\
++	(0xf << RTL8366RB_LED_CTRL_OFFSET(led_group))
++
++/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only
++ * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is
++ * RTL8366RB_LEDGROUP_FORCE. Otherwise, it is ignored.
++ */
++#define RTL8366RB_LED_0_1_CTRL_REG		0x0432
++#define RTL8366RB_LED_2_3_CTRL_REG		0x0433
++#define RTL8366RB_LED_X_X_CTRL_REG(led_group)	\
++	((led_group) <= 1 ? \
++		RTL8366RB_LED_0_1_CTRL_REG : \
++		RTL8366RB_LED_2_3_CTRL_REG)
++#define RTL8366RB_LED_0_X_CTRL_MASK		GENMASK(5, 0)
++#define RTL8366RB_LED_X_1_CTRL_MASK		GENMASK(11, 6)
++#define RTL8366RB_LED_2_X_CTRL_MASK		GENMASK(5, 0)
++#define RTL8366RB_LED_X_3_CTRL_MASK		GENMASK(11, 6)
++
++enum rtl8366_ledgroup_mode {
++	RTL8366RB_LEDGROUP_OFF			= 0x0,
++	RTL8366RB_LEDGROUP_DUP_COL		= 0x1,
++	RTL8366RB_LEDGROUP_LINK_ACT		= 0x2,
++	RTL8366RB_LEDGROUP_SPD1000		= 0x3,
++	RTL8366RB_LEDGROUP_SPD100		= 0x4,
++	RTL8366RB_LEDGROUP_SPD10		= 0x5,
++	RTL8366RB_LEDGROUP_SPD1000_ACT		= 0x6,
++	RTL8366RB_LEDGROUP_SPD100_ACT		= 0x7,
++	RTL8366RB_LEDGROUP_SPD10_ACT		= 0x8,
++	RTL8366RB_LEDGROUP_SPD100_10_ACT	= 0x9,
++	RTL8366RB_LEDGROUP_FIBER		= 0xa,
++	RTL8366RB_LEDGROUP_AN_FAULT		= 0xb,
++	RTL8366RB_LEDGROUP_LINK_RX		= 0xc,
++	RTL8366RB_LEDGROUP_LINK_TX		= 0xd,
++	RTL8366RB_LEDGROUP_MASTER		= 0xe,
++	RTL8366RB_LEDGROUP_FORCE		= 0xf,
++
++	__RTL8366RB_LEDGROUP_MODE_MAX
++};
++
++#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS)
++
++struct rtl8366rb_led {
++	u8 port_num;
++	u8 led_group;
++	struct realtek_priv *priv;
++	struct led_classdev cdev;
++};
++
++int rtl8366rb_setup_leds(struct realtek_priv *priv);
++
++#else
++
++static inline int rtl8366rb_setup_leds(struct realtek_priv *priv)
++{
++	return 0;
++}
++
++#endif /* IS_ENABLED(CONFIG_LEDS_CLASS) */
++
++/**
++ * struct rtl8366rb - RTL8366RB-specific data
++ * @max_mtu: per-port max MTU setting
++ * @pvid_enabled: if PVID is set for respective port
++ * @leds: per-port and per-ledgroup led info
++ */
++struct rtl8366rb {
++	unsigned int max_mtu[RTL8366RB_NUM_PORTS];
++	bool pvid_enabled[RTL8366RB_NUM_PORTS];
++#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS)
++	struct rtl8366rb_led leds[RTL8366RB_NUM_PORTS][RTL8366RB_NUM_LEDGROUPS];
++#endif
++};
++
++/* This code is used also with LEDs disabled */
++int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
++			       u8 led_group,
++			       enum rtl8366_ledgroup_mode mode);
++
++#endif /* _RTL8366RB_H */
+diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
+index 5740c98d8c9f03..2847278d9cd48e 100644
+--- a/drivers/net/ethernet/cadence/macb.h
++++ b/drivers/net/ethernet/cadence/macb.h
+@@ -1279,6 +1279,8 @@ struct macb {
+ 	struct clk		*rx_clk;
+ 	struct clk		*tsu_clk;
+ 	struct net_device	*dev;
++	/* Protects hw_stats and ethtool_stats */
++	spinlock_t		stats_lock;
+ 	union {
+ 		struct macb_stats	macb;
+ 		struct gem_stats	gem;
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index daa416fb1724e9..af2debcaf7dcce 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -1987,10 +1987,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
+ 
+ 		if (status & MACB_BIT(ISR_ROVR)) {
+ 			/* We missed at least one packet */
++			spin_lock(&bp->stats_lock);
+ 			if (macb_is_gem(bp))
+ 				bp->hw_stats.gem.rx_overruns++;
+ 			else
+ 				bp->hw_stats.macb.rx_overruns++;
++			spin_unlock(&bp->stats_lock);
+ 
+ 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ 				queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
+@@ -3111,6 +3113,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
+ 	if (!netif_running(bp->dev))
+ 		return nstat;
+ 
++	spin_lock_irq(&bp->stats_lock);
+ 	gem_update_stats(bp);
+ 
+ 	nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
+@@ -3140,6 +3143,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
+ 	nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
+ 	nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
+ 	nstat->tx_fifo_errors = hwstat->tx_underrun;
++	spin_unlock_irq(&bp->stats_lock);
+ 
+ 	return nstat;
+ }
+@@ -3147,12 +3151,13 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
+ static void gem_get_ethtool_stats(struct net_device *dev,
+ 				  struct ethtool_stats *stats, u64 *data)
+ {
+-	struct macb *bp;
++	struct macb *bp = netdev_priv(dev);
+ 
+-	bp = netdev_priv(dev);
++	spin_lock_irq(&bp->stats_lock);
+ 	gem_update_stats(bp);
+ 	memcpy(data, &bp->ethtool_stats, sizeof(u64)
+ 			* (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
++	spin_unlock_irq(&bp->stats_lock);
+ }
+ 
+ static int gem_get_sset_count(struct net_device *dev, int sset)
+@@ -3202,6 +3207,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
+ 		return gem_get_stats(bp);
+ 
+ 	/* read stats from hardware */
++	spin_lock_irq(&bp->stats_lock);
+ 	macb_update_stats(bp);
+ 
+ 	/* Convert HW stats into netdevice stats */
+@@ -3235,6 +3241,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
+ 	nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
+ 	nstat->tx_fifo_errors = hwstat->tx_underruns;
+ 	/* Don't know about heartbeat or window errors... */
++	spin_unlock_irq(&bp->stats_lock);
+ 
+ 	return nstat;
+ }
+@@ -5106,6 +5113,7 @@ static int macb_probe(struct platform_device *pdev)
+ 		}
+ 	}
+ 	spin_lock_init(&bp->lock);
++	spin_lock_init(&bp->stats_lock);
+ 
+ 	/* setup capabilities */
+ 	macb_configure_caps(bp, macb_config);
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
+index 535969fa0fdbef..a30a5d3ce59e9d 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
+@@ -146,6 +146,24 @@ static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp,
+ 	return 0;
+ }
+ 
++/**
++ * enetc_unwind_tx_frame() - Unwind the DMA mappings of a multi-buffer Tx frame
++ * @tx_ring: Pointer to the Tx ring on which the buffer descriptors are located
++ * @count: Number of Tx buffer descriptors which need to be unmapped
++ * @i: Index of the last successfully mapped Tx buffer descriptor
++ */
++static void enetc_unwind_tx_frame(struct enetc_bdr *tx_ring, int count, int i)
++{
++	while (count--) {
++		struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
++
++		enetc_free_tx_frame(tx_ring, tx_swbd);
++		if (i == 0)
++			i = tx_ring->bd_count;
++		i--;
++	}
++}
++
+ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
+ {
+ 	bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false;
+@@ -236,9 +254,11 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
+ 		}
+ 
+ 		if (do_onestep_tstamp) {
+-			u32 lo, hi, val;
+-			u64 sec, nsec;
++			__be32 new_sec_l, new_nsec;
++			u32 lo, hi, nsec, val;
++			__be16 new_sec_h;
+ 			u8 *data;
++			u64 sec;
+ 
+ 			lo = enetc_rd_hot(hw, ENETC_SICTR0);
+ 			hi = enetc_rd_hot(hw, ENETC_SICTR1);
+@@ -252,13 +272,38 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
+ 			/* Update originTimestamp field of Sync packet
+ 			 * - 48 bits seconds field
+ 			 * - 32 bits nanseconds field
++			 *
++			 * In addition, the UDP checksum needs to be updated
++			 * by software after updating originTimestamp field,
++			 * otherwise the hardware will calculate the wrong
++			 * checksum when updating the correction field and
++			 * update it to the packet.
+ 			 */
+ 			data = skb_mac_header(skb);
+-			*(__be16 *)(data + offset2) =
+-				htons((sec >> 32) & 0xffff);
+-			*(__be32 *)(data + offset2 + 2) =
+-				htonl(sec & 0xffffffff);
+-			*(__be32 *)(data + offset2 + 6) = htonl(nsec);
++			new_sec_h = htons((sec >> 32) & 0xffff);
++			new_sec_l = htonl(sec & 0xffffffff);
++			new_nsec = htonl(nsec);
++			if (udp) {
++				struct udphdr *uh = udp_hdr(skb);
++				__be32 old_sec_l, old_nsec;
++				__be16 old_sec_h;
++
++				old_sec_h = *(__be16 *)(data + offset2);
++				inet_proto_csum_replace2(&uh->check, skb, old_sec_h,
++							 new_sec_h, false);
++
++				old_sec_l = *(__be32 *)(data + offset2 + 2);
++				inet_proto_csum_replace4(&uh->check, skb, old_sec_l,
++							 new_sec_l, false);
++
++				old_nsec = *(__be32 *)(data + offset2 + 6);
++				inet_proto_csum_replace4(&uh->check, skb, old_nsec,
++							 new_nsec, false);
++			}
++
++			*(__be16 *)(data + offset2) = new_sec_h;
++			*(__be32 *)(data + offset2 + 2) = new_sec_l;
++			*(__be32 *)(data + offset2 + 6) = new_nsec;
+ 
+ 			/* Configure single-step register */
+ 			val = ENETC_PM0_SINGLE_STEP_EN;
+@@ -329,25 +374,20 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
+ dma_err:
+ 	dev_err(tx_ring->dev, "DMA map error");
+ 
+-	do {
+-		tx_swbd = &tx_ring->tx_swbd[i];
+-		enetc_free_tx_frame(tx_ring, tx_swbd);
+-		if (i == 0)
+-			i = tx_ring->bd_count;
+-		i--;
+-	} while (count--);
++	enetc_unwind_tx_frame(tx_ring, count, i);
+ 
+ 	return 0;
+ }
+ 
+-static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+-				 struct enetc_tx_swbd *tx_swbd,
+-				 union enetc_tx_bd *txbd, int *i, int hdr_len,
+-				 int data_len)
++static int enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
++				struct enetc_tx_swbd *tx_swbd,
++				union enetc_tx_bd *txbd, int *i, int hdr_len,
++				int data_len)
+ {
+ 	union enetc_tx_bd txbd_tmp;
+ 	u8 flags = 0, e_flags = 0;
+ 	dma_addr_t addr;
++	int count = 1;
+ 
+ 	enetc_clear_tx_bd(&txbd_tmp);
+ 	addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
+@@ -390,7 +430,10 @@ static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ 		/* Write the BD */
+ 		txbd_tmp.ext.e_flags = e_flags;
+ 		*txbd = txbd_tmp;
++		count++;
+ 	}
++
++	return count;
+ }
+ 
+ static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+@@ -522,9 +565,9 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb
+ 
+ 		/* compute the csum over the L4 header */
+ 		csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos);
+-		enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len);
++		count += enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd,
++					      &i, hdr_len, data_len);
+ 		bd_data_num = 0;
+-		count++;
+ 
+ 		while (data_len > 0) {
+ 			int size;
+@@ -548,8 +591,13 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb
+ 			err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd,
+ 						    tso.data, size,
+ 						    size == data_len);
+-			if (err)
++			if (err) {
++				if (i == 0)
++					i = tx_ring->bd_count;
++				i--;
++
+ 				goto err_map_data;
++			}
+ 
+ 			data_len -= size;
+ 			count++;
+@@ -578,13 +626,7 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb
+ 	dev_err(tx_ring->dev, "DMA map error");
+ 
+ err_chained_bd:
+-	do {
+-		tx_swbd = &tx_ring->tx_swbd[i];
+-		enetc_free_tx_frame(tx_ring, tx_swbd);
+-		if (i == 0)
+-			i = tx_ring->bd_count;
+-		i--;
+-	} while (count--);
++	enetc_unwind_tx_frame(tx_ring, count, i);
+ 
+ 	return 0;
+ }
+@@ -1625,7 +1667,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
+ 				enetc_xdp_drop(rx_ring, orig_i, i);
+ 				tx_ring->stats.xdp_tx_drops++;
+ 			} else {
+-				tx_ring->stats.xdp_tx += xdp_tx_bd_cnt;
++				tx_ring->stats.xdp_tx++;
+ 				rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt;
+ 				xdp_tx_frm_cnt++;
+ 				/* The XDP_TX enqueue was successful, so we
+@@ -2938,6 +2980,9 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
+ 		new_offloads |= ENETC_F_TX_TSTAMP;
+ 		break;
+ 	case HWTSTAMP_TX_ONESTEP_SYNC:
++		if (!enetc_si_is_pf(priv->si))
++			return -EOPNOTSUPP;
++
+ 		new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
+ 		new_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP;
+ 		break;
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
+index fc41078c4f5da6..73ac8c6afb3ad8 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
+@@ -672,7 +672,6 @@ static int enetc4_pf_netdev_create(struct enetc_si *si)
+ err_alloc_msix:
+ err_config_si:
+ err_clk_get:
+-	mutex_destroy(&priv->mm_lock);
+ 	free_netdev(ndev);
+ 
+ 	return err;
+@@ -684,6 +683,7 @@ static void enetc4_pf_netdev_destroy(struct enetc_si *si)
+ 	struct net_device *ndev = si->ndev;
+ 
+ 	unregister_netdev(ndev);
++	enetc4_link_deinit(priv);
+ 	enetc_free_msix(priv);
+ 	free_netdev(ndev);
+ }
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+index bf34b5bb1e3581..ece3ae28ba827f 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+@@ -832,6 +832,7 @@ static int enetc_set_coalesce(struct net_device *ndev,
+ static int enetc_get_ts_info(struct net_device *ndev,
+ 			     struct kernel_ethtool_ts_info *info)
+ {
++	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ 	int *phc_idx;
+ 
+ 	phc_idx = symbol_get(enetc_phc_index);
+@@ -852,8 +853,10 @@ static int enetc_get_ts_info(struct net_device *ndev,
+ 				SOF_TIMESTAMPING_TX_SOFTWARE;
+ 
+ 	info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+-			 (1 << HWTSTAMP_TX_ON) |
+-			 (1 << HWTSTAMP_TX_ONESTEP_SYNC);
++			 (1 << HWTSTAMP_TX_ON);
++
++	if (enetc_si_is_pf(priv->si))
++		info->tx_types |= (1 << HWTSTAMP_TX_ONESTEP_SYNC);
+ 
+ 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ 			   (1 << HWTSTAMP_FILTER_ALL);
+diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+index 8ac0047f1ada11..f0674a44356708 100644
+--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
++++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+@@ -109,10 +109,12 @@ static void gve_rx_reset_ring_dqo(struct gve_priv *priv, int idx)
+ void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
+ {
+ 	int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
++	struct gve_rx_ring *rx = &priv->rx[idx];
+ 
+ 	if (!gve_rx_was_added_to_block(priv, idx))
+ 		return;
+ 
++	page_pool_disable_direct_recycling(rx->dqo.page_pool);
+ 	gve_remove_napi(priv, ntfy_idx);
+ 	gve_rx_remove_from_block(priv, idx);
+ 	gve_rx_reset_ring_dqo(priv, idx);
+diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
+index fb527434b58b15..d649c197cf673f 100644
+--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
++++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
+@@ -38,8 +38,7 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
+ 	if (ice_vsi_add_vlan_zero(uplink_vsi))
+ 		goto err_vlan_zero;
+ 
+-	if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
+-			     ICE_FLTR_RX))
++	if (ice_set_dflt_vsi(uplink_vsi))
+ 		goto err_def_rx;
+ 
+ 	if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
+diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
+index b83f99c01d91b9..8aabf7749aa5e0 100644
+--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
++++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
+@@ -36,6 +36,7 @@ static void ice_free_vf_entries(struct ice_pf *pf)
+ 
+ 	hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) {
+ 		hash_del_rcu(&vf->entry);
++		ice_deinitialize_vf_entry(vf);
+ 		ice_put_vf(vf);
+ 	}
+ }
+@@ -193,10 +194,6 @@ void ice_free_vfs(struct ice_pf *pf)
+ 			wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+ 		}
+ 
+-		/* clear malicious info since the VF is getting released */
+-		if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
+-			list_del(&vf->mbx_info.list_entry);
+-
+ 		mutex_unlock(&vf->cfg_lock);
+ 	}
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+index c7c0c2f50c2652..815ad0bfe8326b 100644
+--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+@@ -1036,6 +1036,14 @@ void ice_initialize_vf_entry(struct ice_vf *vf)
+ 	mutex_init(&vf->cfg_lock);
+ }
+ 
++void ice_deinitialize_vf_entry(struct ice_vf *vf)
++{
++	struct ice_pf *pf = vf->pf;
++
++	if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
++		list_del(&vf->mbx_info.list_entry);
++}
++
+ /**
+  * ice_dis_vf_qs - Disable the VF queues
+  * @vf: pointer to the VF structure
+diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
+index 0c7e77c0a09fa6..5392b040498621 100644
+--- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
++++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
+@@ -24,6 +24,7 @@
+ #endif
+ 
+ void ice_initialize_vf_entry(struct ice_vf *vf);
++void ice_deinitialize_vf_entry(struct ice_vf *vf);
+ void ice_dis_vf_qs(struct ice_vf *vf);
+ int ice_check_vf_init(struct ice_vf *vf);
+ enum virtchnl_status_code ice_err_to_virt_err(int err);
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+index 9be6a6b59c4e14..977741c4149805 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+@@ -3013,7 +3013,6 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ 	skb_shinfo(skb)->gso_size = rsc_seg_len;
+ 
+ 	skb_reset_network_header(skb);
+-	len = skb->len - skb_transport_offset(skb);
+ 
+ 	if (ipv4) {
+ 		struct iphdr *ipv4h = ip_hdr(skb);
+@@ -3022,6 +3021,7 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ 
+ 		/* Reset and set transport header offset in skb */
+ 		skb_set_transport_header(skb, sizeof(struct iphdr));
++		len = skb->len - skb_transport_offset(skb);
+ 
+ 		/* Compute the TCP pseudo header checksum*/
+ 		tcp_hdr(skb)->check =
+@@ -3031,6 +3031,7 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ 
+ 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ 		skb_set_transport_header(skb, sizeof(struct ipv6hdr));
++		len = skb->len - skb_transport_offset(skb);
+ 		tcp_hdr(skb)->check =
+ 			~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0);
+ 	}
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+index 1641791a2d5b4e..8ed83fb9886243 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+@@ -324,7 +324,7 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
+ 		       MVPP2_PRS_RI_VLAN_MASK),
+ 	/* Non IP flow, with vlan tag */
+ 	MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG,
+-		       MVPP22_CLS_HEK_OPT_VLAN,
++		       MVPP22_CLS_HEK_TAGGED,
+ 		       0, 0),
+ };
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+index 8b7c843446e115..823c1ba456cd18 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+@@ -564,6 +564,9 @@ static int esw_qos_vport_enable(struct mlx5_vport *vport, struct mlx5_esw_sched_
+ 		return err;
+ 
+ 	esw_qos_normalize_min_rate(parent->esw, parent, extack);
++	trace_mlx5_esw_vport_qos_create(vport->dev, vport,
++					vport->qos.sched_node->max_rate,
++					vport->qos.sched_node->bw_share);
+ 
+ 	return 0;
+ }
+@@ -591,8 +594,11 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t
+ 	sched_node->vport = vport;
+ 	vport->qos.sched_node = sched_node;
+ 	err = esw_qos_vport_enable(vport, parent, extack);
+-	if (err)
++	if (err) {
++		__esw_qos_free_node(sched_node);
+ 		esw_qos_put(esw);
++		vport->qos.sched_node = NULL;
++	}
+ 
+ 	return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+index 7db9cab9bedf69..d9362eabc6a1ca 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+@@ -572,7 +572,7 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
+ 	pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
+ 	pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
+ 	mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
+-		      name, size, start);
++		      name ? name : "mlx5_pcif_pool", size, start);
+ 	return pool;
+ }
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+index bfe6e2d631bdf5..f5acfb7d4ff655 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+@@ -516,6 +516,19 @@ static int loongson_dwmac_acpi_config(struct pci_dev *pdev,
+ 	return 0;
+ }
+ 
++/* Loongson's DWMAC device may take nearly two seconds to complete DMA reset */
++static int loongson_dwmac_fix_reset(void *priv, void __iomem *ioaddr)
++{
++	u32 value = readl(ioaddr + DMA_BUS_MODE);
++
++	value |= DMA_BUS_MODE_SFT_RESET;
++	writel(value, ioaddr + DMA_BUS_MODE);
++
++	return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
++				  !(value & DMA_BUS_MODE_SFT_RESET),
++				  10000, 2000000);
++}
++
+ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ {
+ 	struct plat_stmmacenet_data *plat;
+@@ -566,6 +579,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
+ 
+ 	plat->bsp_priv = ld;
+ 	plat->setup = loongson_dwmac_setup;
++	plat->fix_soc_reset = loongson_dwmac_fix_reset;
+ 	ld->dev = &pdev->dev;
+ 	ld->loongson_id = readl(res.addr + GMAC_VERSION) & 0xff;
+ 
+diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
+index 0d5a862cd78a6c..3a13d60a947a81 100644
+--- a/drivers/net/ethernet/ti/Kconfig
++++ b/drivers/net/ethernet/ti/Kconfig
+@@ -99,6 +99,7 @@ config TI_K3_AM65_CPSW_NUSS
+ 	select NET_DEVLINK
+ 	select TI_DAVINCI_MDIO
+ 	select PHYLINK
++	select PAGE_POOL
+ 	select TI_K3_CPPI_DESC_POOL
+ 	imply PHY_TI_GMII_SEL
+ 	depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS
+diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
+index 768578c0d9587d..d59c1744840af2 100644
+--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
++++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
+@@ -474,26 +474,7 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep,
+ static int icss_iep_perout_enable(struct icss_iep *iep,
+ 				  struct ptp_perout_request *req, int on)
+ {
+-	int ret = 0;
+-
+-	mutex_lock(&iep->ptp_clk_mutex);
+-
+-	if (iep->pps_enabled) {
+-		ret = -EBUSY;
+-		goto exit;
+-	}
+-
+-	if (iep->perout_enabled == !!on)
+-		goto exit;
+-
+-	ret = icss_iep_perout_enable_hw(iep, req, on);
+-	if (!ret)
+-		iep->perout_enabled = !!on;
+-
+-exit:
+-	mutex_unlock(&iep->ptp_clk_mutex);
+-
+-	return ret;
++	return -EOPNOTSUPP;
+ }
+ 
+ static void icss_iep_cap_cmp_work(struct work_struct *work)
+diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
+index fd591ddb3884df..ca62188a317ad4 100644
+--- a/drivers/net/ipvlan/ipvlan_core.c
++++ b/drivers/net/ipvlan/ipvlan_core.c
+@@ -416,20 +416,25 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
+ 
+ static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
+ {
+-	const struct iphdr *ip4h = ip_hdr(skb);
+ 	struct net_device *dev = skb->dev;
+ 	struct net *net = dev_net(dev);
+-	struct rtable *rt;
+ 	int err, ret = NET_XMIT_DROP;
++	const struct iphdr *ip4h;
++	struct rtable *rt;
+ 	struct flowi4 fl4 = {
+ 		.flowi4_oif = dev->ifindex,
+-		.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h)),
+ 		.flowi4_flags = FLOWI_FLAG_ANYSRC,
+ 		.flowi4_mark = skb->mark,
+-		.daddr = ip4h->daddr,
+-		.saddr = ip4h->saddr,
+ 	};
+ 
++	if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
++		goto err;
++
++	ip4h = ip_hdr(skb);
++	fl4.daddr = ip4h->daddr;
++	fl4.saddr = ip4h->saddr;
++	fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h));
++
+ 	rt = ip_route_output_flow(net, &fl4, NULL);
+ 	if (IS_ERR(rt))
+ 		goto err;
+@@ -488,6 +493,12 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
+ 	struct net_device *dev = skb->dev;
+ 	int err, ret = NET_XMIT_DROP;
+ 
++	if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) {
++		DEV_STATS_INC(dev, tx_errors);
++		kfree_skb(skb);
++		return ret;
++	}
++
+ 	err = ipvlan_route_v6_outbound(dev, skb);
+ 	if (unlikely(err)) {
+ 		DEV_STATS_INC(dev, tx_errors);
+diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
+index 1993b90b1a5f90..491e56b3263fd5 100644
+--- a/drivers/net/loopback.c
++++ b/drivers/net/loopback.c
+@@ -244,8 +244,22 @@ static netdev_tx_t blackhole_netdev_xmit(struct sk_buff *skb,
+ 	return NETDEV_TX_OK;
+ }
+ 
++static int blackhole_neigh_output(struct neighbour *n, struct sk_buff *skb)
++{
++	kfree_skb(skb);
++	return 0;
++}
++
++static int blackhole_neigh_construct(struct net_device *dev,
++				     struct neighbour *n)
++{
++	n->output = blackhole_neigh_output;
++	return 0;
++}
++
+ static const struct net_device_ops blackhole_netdev_ops = {
+ 	.ndo_start_xmit = blackhole_netdev_xmit,
++	.ndo_neigh_construct = blackhole_neigh_construct,
+ };
+ 
+ /* This is a dst-dummy device used specifically for invalidated
+diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c
+index bd8a51ec0ecd6a..ec336c3e338d6c 100644
+--- a/drivers/net/phy/qcom/qca807x.c
++++ b/drivers/net/phy/qcom/qca807x.c
+@@ -774,7 +774,7 @@ static int qca807x_config_init(struct phy_device *phydev)
+ 	control_dac &= ~QCA807X_CONTROL_DAC_MASK;
+ 	if (!priv->dac_full_amplitude)
+ 		control_dac |= QCA807X_CONTROL_DAC_DSP_AMPLITUDE;
+-	if (!priv->dac_full_amplitude)
++	if (!priv->dac_full_bias_current)
+ 		control_dac |= QCA807X_CONTROL_DAC_DSP_BIAS_CURRENT;
+ 	if (!priv->dac_disable_bias_current_tweak)
+ 		control_dac |= QCA807X_CONTROL_DAC_BIAS_CURRENT_TWEAK;
+diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
+index 46af78caf457a6..0bfa37c1405918 100644
+--- a/drivers/net/usb/gl620a.c
++++ b/drivers/net/usb/gl620a.c
+@@ -179,9 +179,7 @@ static int genelink_bind(struct usbnet *dev, struct usb_interface *intf)
+ {
+ 	dev->hard_mtu = GL_RCV_BUF_SIZE;
+ 	dev->net->hard_header_len += 4;
+-	dev->in = usb_rcvbulkpipe(dev->udev, dev->driver_info->in);
+-	dev->out = usb_sndbulkpipe(dev->udev, dev->driver_info->out);
+-	return 0;
++	return usbnet_get_endpoints(dev, intf);
+ }
+ 
+ static const struct driver_info	genelink_info = {
+diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
+index 2f7a05f21dc595..dcb8e1628632e6 100644
+--- a/drivers/phy/rockchip/Kconfig
++++ b/drivers/phy/rockchip/Kconfig
+@@ -125,6 +125,7 @@ config PHY_ROCKCHIP_USBDP
+ 	depends on ARCH_ROCKCHIP && OF
+ 	depends on TYPEC
+ 	select GENERIC_PHY
++	select USB_COMMON
+ 	help
+ 	  Enable this to support the Rockchip USB3.0/DP combo PHY with
+ 	  Samsung IP block. This is required for USB3 support on RK3588.
+diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+index 2eb3329ca23f67..1ef6d9630f7e09 100644
+--- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
++++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+@@ -309,7 +309,10 @@ static int rockchip_combphy_parse_dt(struct device *dev, struct rockchip_combphy
+ 
+ 	priv->ext_refclk = device_property_present(dev, "rockchip,ext-refclk");
+ 
+-	priv->phy_rst = devm_reset_control_get(dev, "phy");
++	priv->phy_rst = devm_reset_control_get_exclusive(dev, "phy");
++	/* fallback to old behaviour */
++	if (PTR_ERR(priv->phy_rst) == -ENOENT)
++		priv->phy_rst = devm_reset_control_array_get_exclusive(dev);
+ 	if (IS_ERR(priv->phy_rst))
+ 		return dev_err_probe(dev, PTR_ERR(priv->phy_rst), "failed to get phy reset\n");
+ 
+diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
+index c421b495eb0fe4..46b8f6987c62c3 100644
+--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
++++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
+@@ -488,9 +488,9 @@ exynos5_usbdrd_pipe3_set_refclk(struct phy_usb_instance *inst)
+ 	reg |=	PHYCLKRST_REFCLKSEL_EXT_REFCLK;
+ 
+ 	/* FSEL settings corresponding to reference clock */
+-	reg &= ~PHYCLKRST_FSEL_PIPE_MASK |
+-		PHYCLKRST_MPLL_MULTIPLIER_MASK |
+-		PHYCLKRST_SSC_REFCLKSEL_MASK;
++	reg &= ~(PHYCLKRST_FSEL_PIPE_MASK |
++		 PHYCLKRST_MPLL_MULTIPLIER_MASK |
++		 PHYCLKRST_SSC_REFCLKSEL_MASK);
+ 	switch (phy_drd->extrefclk) {
+ 	case EXYNOS5_FSEL_50MHZ:
+ 		reg |= (PHYCLKRST_MPLL_MULTIPLIER_50M_REF |
+@@ -532,9 +532,9 @@ exynos5_usbdrd_utmi_set_refclk(struct phy_usb_instance *inst)
+ 	reg &= ~PHYCLKRST_REFCLKSEL_MASK;
+ 	reg |=	PHYCLKRST_REFCLKSEL_EXT_REFCLK;
+ 
+-	reg &= ~PHYCLKRST_FSEL_UTMI_MASK |
+-		PHYCLKRST_MPLL_MULTIPLIER_MASK |
+-		PHYCLKRST_SSC_REFCLKSEL_MASK;
++	reg &= ~(PHYCLKRST_FSEL_UTMI_MASK |
++		 PHYCLKRST_MPLL_MULTIPLIER_MASK |
++		 PHYCLKRST_SSC_REFCLKSEL_MASK);
+ 	reg |= PHYCLKRST_FSEL(phy_drd->extrefclk);
+ 
+ 	return reg;
+@@ -1296,14 +1296,17 @@ static int exynos5_usbdrd_gs101_phy_exit(struct phy *phy)
+ 	struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
+ 	int ret;
+ 
++	if (inst->phy_cfg->id == EXYNOS5_DRDPHY_UTMI) {
++		ret = exynos850_usbdrd_phy_exit(phy);
++		if (ret)
++			return ret;
++	}
++
++	exynos5_usbdrd_phy_isol(inst, true);
++
+ 	if (inst->phy_cfg->id != EXYNOS5_DRDPHY_UTMI)
+ 		return 0;
+ 
+-	ret = exynos850_usbdrd_phy_exit(phy);
+-	if (ret)
+-		return ret;
+-
+-	exynos5_usbdrd_phy_isol(inst, true);
+ 	return regulator_bulk_disable(phy_drd->drv_data->n_regulators,
+ 				      phy_drd->regulators);
+ }
+diff --git a/drivers/phy/st/phy-stm32-combophy.c b/drivers/phy/st/phy-stm32-combophy.c
+index 49e9fa90a68199..607b4d607eb5e6 100644
+--- a/drivers/phy/st/phy-stm32-combophy.c
++++ b/drivers/phy/st/phy-stm32-combophy.c
+@@ -111,6 +111,7 @@ static const struct clk_impedance imp_lookup[] = {
+ 	{ 4204000, { 511000, 609000, 706000, 802000 } },
+ 	{ 3999000, { 571000, 648000, 726000, 803000 } }
+ };
++#define DEFAULT_IMP_INDEX 3 /* Default impedance is 50 Ohm */
+ 
+ static int stm32_impedance_tune(struct stm32_combophy *combophy)
+ {
+@@ -119,10 +120,9 @@ static int stm32_impedance_tune(struct stm32_combophy *combophy)
+ 	u8 imp_of, vswing_of;
+ 	u32 max_imp = imp_lookup[0].microohm;
+ 	u32 min_imp = imp_lookup[imp_size - 1].microohm;
+-	u32 max_vswing = imp_lookup[imp_size - 1].vswing[vswing_size - 1];
++	u32 max_vswing;
+ 	u32 min_vswing = imp_lookup[0].vswing[0];
+ 	u32 val;
+-	u32 regval;
+ 
+ 	if (!of_property_read_u32(combophy->dev->of_node, "st,output-micro-ohms", &val)) {
+ 		if (val < min_imp || val > max_imp) {
+@@ -130,45 +130,43 @@ static int stm32_impedance_tune(struct stm32_combophy *combophy)
+ 			return -EINVAL;
+ 		}
+ 
+-		regval = 0;
+-		for (imp_of = 0; imp_of < ARRAY_SIZE(imp_lookup); imp_of++) {
+-			if (imp_lookup[imp_of].microohm <= val) {
+-				regval = FIELD_PREP(STM32MP25_PCIEPRG_IMPCTRL_OHM, imp_of);
++		for (imp_of = 0; imp_of < ARRAY_SIZE(imp_lookup); imp_of++)
++			if (imp_lookup[imp_of].microohm <= val)
+ 				break;
+-			}
+-		}
++
++		if (WARN_ON(imp_of == ARRAY_SIZE(imp_lookup)))
++			return -EINVAL;
+ 
+ 		dev_dbg(combophy->dev, "Set %u micro-ohms output impedance\n",
+ 			imp_lookup[imp_of].microohm);
+ 
+ 		regmap_update_bits(combophy->regmap, SYSCFG_PCIEPRGCR,
+ 				   STM32MP25_PCIEPRG_IMPCTRL_OHM,
+-				   regval);
+-	} else {
+-		regmap_read(combophy->regmap, SYSCFG_PCIEPRGCR, &val);
+-		imp_of = FIELD_GET(STM32MP25_PCIEPRG_IMPCTRL_OHM, val);
+-	}
++				   FIELD_PREP(STM32MP25_PCIEPRG_IMPCTRL_OHM, imp_of));
++	} else
++		imp_of = DEFAULT_IMP_INDEX;
+ 
+ 	if (!of_property_read_u32(combophy->dev->of_node, "st,output-vswing-microvolt", &val)) {
++		max_vswing = imp_lookup[imp_of].vswing[vswing_size - 1];
++
+ 		if (val < min_vswing || val > max_vswing) {
+ 			dev_err(combophy->dev, "Invalid value %u for output vswing\n", val);
+ 			return -EINVAL;
+ 		}
+ 
+-		regval = 0;
+-		for (vswing_of = 0; vswing_of < ARRAY_SIZE(imp_lookup[imp_of].vswing); vswing_of++) {
+-			if (imp_lookup[imp_of].vswing[vswing_of] >= val) {
+-				regval = FIELD_PREP(STM32MP25_PCIEPRG_IMPCTRL_VSWING, vswing_of);
++		for (vswing_of = 0; vswing_of < ARRAY_SIZE(imp_lookup[imp_of].vswing); vswing_of++)
++			if (imp_lookup[imp_of].vswing[vswing_of] >= val)
+ 				break;
+-			}
+-		}
++
++		if (WARN_ON(vswing_of == ARRAY_SIZE(imp_lookup[imp_of].vswing)))
++			return -EINVAL;
+ 
+ 		dev_dbg(combophy->dev, "Set %u microvolt swing\n",
+ 			 imp_lookup[imp_of].vswing[vswing_of]);
+ 
+ 		regmap_update_bits(combophy->regmap, SYSCFG_PCIEPRGCR,
+ 				   STM32MP25_PCIEPRG_IMPCTRL_VSWING,
+-				   regval);
++				   FIELD_PREP(STM32MP25_PCIEPRG_IMPCTRL_VSWING, vswing_of));
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c
+index 0f60d5d1c1678d..fae6242aa730e0 100644
+--- a/drivers/phy/tegra/xusb-tegra186.c
++++ b/drivers/phy/tegra/xusb-tegra186.c
+@@ -928,6 +928,7 @@ static int tegra186_utmi_phy_init(struct phy *phy)
+ 	unsigned int index = lane->index;
+ 	struct device *dev = padctl->dev;
+ 	int err;
++	u32 reg;
+ 
+ 	port = tegra_xusb_find_usb2_port(padctl, index);
+ 	if (!port) {
+@@ -935,6 +936,16 @@ static int tegra186_utmi_phy_init(struct phy *phy)
+ 		return -ENODEV;
+ 	}
+ 
++	if (port->mode == USB_DR_MODE_OTG ||
++	    port->mode == USB_DR_MODE_PERIPHERAL) {
++		/* reset VBUS&ID OVERRIDE */
++		reg = padctl_readl(padctl, USB2_VBUS_ID);
++		reg &= ~VBUS_OVERRIDE;
++		reg &= ~ID_OVERRIDE(~0);
++		reg |= ID_OVERRIDE_FLOATING;
++		padctl_writel(padctl, reg, USB2_VBUS_ID);
++	}
++
+ 	if (port->supply && port->mode == USB_DR_MODE_HOST) {
+ 		err = regulator_enable(port->supply);
+ 		if (err) {
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index 5f9b107ae267f1..43766589bfc6ec 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -1656,13 +1656,6 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
+ 	if (in_flight)
+ 		__set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
+ 
+-	/*
+-	 * Only clear the driver-private command data if the LLD does not supply
+-	 * a function to initialize that data.
+-	 */
+-	if (!shost->hostt->init_cmd_priv)
+-		memset(cmd + 1, 0, shost->hostt->cmd_size);
+-
+ 	cmd->prot_op = SCSI_PROT_NORMAL;
+ 	if (blk_rq_bytes(req))
+ 		cmd->sc_data_direction = rq_dma_dir(req);
+@@ -1829,6 +1822,13 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
+ 	if (!scsi_host_queue_ready(q, shost, sdev, cmd))
+ 		goto out_dec_target_busy;
+ 
++	/*
++	 * Only clear the driver-private command data if the LLD does not supply
++	 * a function to initialize that data.
++	 */
++	if (shost->hostt->cmd_size && !shost->hostt->init_cmd_priv)
++		memset(cmd + 1, 0, shost->hostt->cmd_size);
++
+ 	if (!(req->rq_flags & RQF_DONTPREP)) {
+ 		ret = scsi_prepare_cmd(req);
+ 		if (ret != BLK_STS_OK)
+diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c
+index 3b644de3292e2e..0d9f636c80f4dc 100644
+--- a/drivers/thermal/gov_power_allocator.c
++++ b/drivers/thermal/gov_power_allocator.c
+@@ -370,7 +370,7 @@ static void divvy_up_power(struct power_actor *power, int num_actors,
+ 
+ 	for (i = 0; i < num_actors; i++) {
+ 		struct power_actor *pa = &power[i];
+-		u64 req_range = (u64)pa->req_power * power_range;
++		u64 req_range = (u64)pa->weighted_req_power * power_range;
+ 
+ 		pa->granted_power = DIV_ROUND_CLOSEST_ULL(req_range,
+ 							  total_req_power);
+@@ -641,6 +641,22 @@ static int allocate_actors_buffer(struct power_allocator_params *params,
+ 	return ret;
+ }
+ 
++static void power_allocator_update_weight(struct power_allocator_params *params)
++{
++	const struct thermal_trip_desc *td;
++	struct thermal_instance *instance;
++
++	if (!params->trip_max)
++		return;
++
++	td = trip_to_trip_desc(params->trip_max);
++
++	params->total_weight = 0;
++	list_for_each_entry(instance, &td->thermal_instances, trip_node)
++		if (power_actor_is_valid(instance))
++			params->total_weight += instance->weight;
++}
++
+ static void power_allocator_update_tz(struct thermal_zone_device *tz,
+ 				      enum thermal_notify_event reason)
+ {
+@@ -656,16 +672,12 @@ static void power_allocator_update_tz(struct thermal_zone_device *tz,
+ 			if (power_actor_is_valid(instance))
+ 				num_actors++;
+ 
+-		if (num_actors == params->num_actors)
+-			return;
++		if (num_actors != params->num_actors)
++			allocate_actors_buffer(params, num_actors);
+ 
+-		allocate_actors_buffer(params, num_actors);
+-		break;
++		fallthrough;
+ 	case THERMAL_INSTANCE_WEIGHT_CHANGED:
+-		params->total_weight = 0;
+-		list_for_each_entry(instance, &td->thermal_instances, trip_node)
+-			if (power_actor_is_valid(instance))
+-				params->total_weight += instance->weight;
++		power_allocator_update_weight(params);
+ 		break;
+ 	default:
+ 		break;
+@@ -731,6 +743,8 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
+ 
+ 	tz->governor_data = params;
+ 
++	power_allocator_update_weight(params);
++
+ 	return 0;
+ 
+ free_params:
+diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
+index 5ab4ce4daaebdf..5401f03d6b6c14 100644
+--- a/drivers/thermal/thermal_of.c
++++ b/drivers/thermal/thermal_of.c
+@@ -274,6 +274,34 @@ static bool thermal_of_get_cooling_spec(struct device_node *map_np, int index,
+ 	return true;
+ }
+ 
++static bool thermal_of_cm_lookup(struct device_node *cm_np,
++				 const struct thermal_trip *trip,
++				 struct thermal_cooling_device *cdev,
++				 struct cooling_spec *c)
++{
++	for_each_child_of_node_scoped(cm_np, child) {
++		struct device_node *tr_np;
++		int count, i;
++
++		tr_np = of_parse_phandle(child, "trip", 0);
++		if (tr_np != trip->priv)
++			continue;
++
++		/* The trip has been found, look up the cdev. */
++		count = of_count_phandle_with_args(child, "cooling-device",
++						   "#cooling-cells");
++		if (count <= 0)
++			pr_err("Add a cooling_device property with at least one device\n");
++
++		for (i = 0; i < count; i++) {
++			if (thermal_of_get_cooling_spec(child, i, cdev, c))
++				return true;
++		}
++	}
++
++	return false;
++}
++
+ static bool thermal_of_should_bind(struct thermal_zone_device *tz,
+ 				   const struct thermal_trip *trip,
+ 				   struct thermal_cooling_device *cdev,
+@@ -293,27 +321,7 @@ static bool thermal_of_should_bind(struct thermal_zone_device *tz,
+ 		goto out;
+ 
+ 	/* Look up the trip and the cdev in the cooling maps. */
+-	for_each_child_of_node_scoped(cm_np, child) {
+-		struct device_node *tr_np;
+-		int count, i;
+-
+-		tr_np = of_parse_phandle(child, "trip", 0);
+-		if (tr_np != trip->priv)
+-			continue;
+-
+-		/* The trip has been found, look up the cdev. */
+-		count = of_count_phandle_with_args(child, "cooling-device", "#cooling-cells");
+-		if (count <= 0)
+-			pr_err("Add a cooling_device property with at least one device\n");
+-
+-		for (i = 0; i < count; i++) {
+-			result = thermal_of_get_cooling_spec(child, i, cdev, c);
+-			if (result)
+-				break;
+-		}
+-
+-		break;
+-	}
++	result = thermal_of_cm_lookup(cm_np, trip, cdev, c);
+ 
+ 	of_node_put(cm_np);
+ out:
+diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
+index 8d4ad0a3f2cf02..252186124669a8 100644
+--- a/drivers/ufs/core/ufs_bsg.c
++++ b/drivers/ufs/core/ufs_bsg.c
+@@ -194,10 +194,12 @@ static int ufs_bsg_request(struct bsg_job *job)
+ 	ufshcd_rpm_put_sync(hba);
+ 	kfree(buff);
+ 	bsg_reply->result = ret;
+-	job->reply_len = !rpmb ? sizeof(struct ufs_bsg_reply) : sizeof(struct ufs_rpmb_reply);
+ 	/* complete the job here only if no error */
+-	if (ret == 0)
++	if (ret == 0) {
++		job->reply_len = rpmb ? sizeof(struct ufs_rpmb_reply) :
++					sizeof(struct ufs_bsg_reply);
+ 		bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);
++	}
+ 
+ 	return ret;
+ }
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 56b32d245c2ee6..a5bb6ea96460cc 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -266,7 +266,7 @@ static bool ufshcd_has_pending_tasks(struct ufs_hba *hba)
+ 
+ static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba)
+ {
+-	return hba->outstanding_reqs || ufshcd_has_pending_tasks(hba);
++	return scsi_host_busy(hba->host) || ufshcd_has_pending_tasks(hba);
+ }
+ 
+ static const struct ufs_dev_quirk ufs_fixups[] = {
+@@ -628,8 +628,8 @@ static void ufshcd_print_host_state(struct ufs_hba *hba)
+ 	const struct scsi_device *sdev_ufs = hba->ufs_device_wlun;
+ 
+ 	dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
+-	dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
+-		hba->outstanding_reqs, hba->outstanding_tasks);
++	dev_err(hba->dev, "%d outstanding reqs, tasks=0x%lx\n",
++		scsi_host_busy(hba->host), hba->outstanding_tasks);
+ 	dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
+ 		hba->saved_err, hba->saved_uic_err);
+ 	dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
+@@ -8944,7 +8944,7 @@ static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
+ 	dev_info(hba->dev, "%s() finished; outstanding_tasks = %#lx.\n",
+ 		 __func__, hba->outstanding_tasks);
+ 
+-	return hba->outstanding_reqs ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
++	return scsi_host_busy(hba->host) ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
+ }
+ 
+ static const struct attribute_group *ufshcd_driver_groups[] = {
+@@ -10494,6 +10494,21 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
+ 	 */
+ 	spin_lock_init(&hba->clk_gating.lock);
+ 
++	/*
++	 * Set the default power management level for runtime and system PM.
++	 * Host controller drivers can override them in their
++	 * 'ufs_hba_variant_ops::init' callback.
++	 *
++	 * Default power saving mode is to keep UFS link in Hibern8 state
++	 * and UFS device in sleep state.
++	 */
++	hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
++						UFS_SLEEP_PWR_MODE,
++						UIC_LINK_HIBERN8_STATE);
++	hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
++						UFS_SLEEP_PWR_MODE,
++						UIC_LINK_HIBERN8_STATE);
++
+ 	err = ufshcd_hba_init(hba);
+ 	if (err)
+ 		goto out_error;
+@@ -10607,21 +10622,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
+ 		goto out_disable;
+ 	}
+ 
+-	/*
+-	 * Set the default power management level for runtime and system PM if
+-	 * not set by the host controller drivers.
+-	 * Default power saving mode is to keep UFS link in Hibern8 state
+-	 * and UFS device in sleep state.
+-	 */
+-	if (!hba->rpm_lvl)
+-		hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+-						UFS_SLEEP_PWR_MODE,
+-						UIC_LINK_HIBERN8_STATE);
+-	if (!hba->spm_lvl)
+-		hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+-						UFS_SLEEP_PWR_MODE,
+-						UIC_LINK_HIBERN8_STATE);
+-
+ 	INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, ufshcd_rpm_dev_flush_recheck_work);
+ 	INIT_DELAYED_WORK(&hba->ufs_rtc_update_work, ufshcd_rtc_work);
+ 
+diff --git a/fs/afs/server.c b/fs/afs/server.c
+index 038f9d0ae3af8e..4504e16b458cc1 100644
+--- a/fs/afs/server.c
++++ b/fs/afs/server.c
+@@ -163,6 +163,8 @@ static struct afs_server *afs_install_server(struct afs_cell *cell,
+ 	rb_insert_color(&server->uuid_rb, &net->fs_servers);
+ 	hlist_add_head_rcu(&server->proc_link, &net->fs_proc);
+ 
++	afs_get_cell(cell, afs_cell_trace_get_server);
++
+ added_dup:
+ 	write_seqlock(&net->fs_addr_lock);
+ 	estate = rcu_dereference_protected(server->endpoint_state,
+@@ -442,6 +444,7 @@ static void afs_server_rcu(struct rcu_head *rcu)
+ 			 atomic_read(&server->active), afs_server_trace_free);
+ 	afs_put_endpoint_state(rcu_access_pointer(server->endpoint_state),
+ 			       afs_estate_trace_put_server);
++	afs_put_cell(server->cell, afs_cell_trace_put_server);
+ 	kfree(server);
+ }
+ 
+diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c
+index 7e7e567a7f8a20..d20cd902ef949a 100644
+--- a/fs/afs/server_list.c
++++ b/fs/afs/server_list.c
+@@ -97,8 +97,8 @@ struct afs_server_list *afs_alloc_server_list(struct afs_volume *volume,
+ 				break;
+ 		if (j < slist->nr_servers) {
+ 			if (slist->servers[j].server == server) {
+-				afs_put_server(volume->cell->net, server,
+-					       afs_server_trace_put_slist_isort);
++				afs_unuse_server(volume->cell->net, server,
++						 afs_server_trace_put_slist_isort);
+ 				continue;
+ 			}
+ 
+diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
+index 67ce85ff0ae251..7f46abbd6311b2 100644
+--- a/fs/btrfs/extent_map.c
++++ b/fs/btrfs/extent_map.c
+@@ -1128,6 +1128,8 @@ static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_c
+ 	long nr_dropped = 0;
+ 	struct rb_node *node;
+ 
++	lockdep_assert_held_write(&tree->lock);
++
+ 	/*
+ 	 * Take the mmap lock so that we serialize with the inode logging phase
+ 	 * of fsync because we may need to set the full sync flag on the inode,
+@@ -1139,28 +1141,12 @@ static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_c
+ 	 * to find new extents, which may not be there yet because ordered
+ 	 * extents haven't completed yet.
+ 	 *
+-	 * We also do a try lock because otherwise we could deadlock. This is
+-	 * because the shrinker for this filesystem may be invoked while we are
+-	 * in a path that is holding the mmap lock in write mode. For example in
+-	 * a reflink operation while COWing an extent buffer, when allocating
+-	 * pages for a new extent buffer and under memory pressure, the shrinker
+-	 * may be invoked, and therefore we would deadlock by attempting to read
+-	 * lock the mmap lock while we are holding already a write lock on it.
++	 * We also do a try lock because we don't want to block for too long and
++	 * we are holding the extent map tree's lock in write mode.
+ 	 */
+ 	if (!down_read_trylock(&inode->i_mmap_lock))
+ 		return 0;
+ 
+-	/*
+-	 * We want to be fast so if the lock is busy we don't want to spend time
+-	 * waiting for it - either some task is about to do IO for the inode or
+-	 * we may have another task shrinking extent maps, here in this code, so
+-	 * skip this inode.
+-	 */
+-	if (!write_trylock(&tree->lock)) {
+-		up_read(&inode->i_mmap_lock);
+-		return 0;
+-	}
+-
+ 	node = rb_first(&tree->root);
+ 	while (node) {
+ 		struct rb_node *next = rb_next(node);
+@@ -1201,12 +1187,61 @@ static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_c
+ 			break;
+ 		node = next;
+ 	}
+-	write_unlock(&tree->lock);
+ 	up_read(&inode->i_mmap_lock);
+ 
+ 	return nr_dropped;
+ }
+ 
++static struct btrfs_inode *find_first_inode_to_shrink(struct btrfs_root *root,
++						      u64 min_ino)
++{
++	struct btrfs_inode *inode;
++	unsigned long from = min_ino;
++
++	xa_lock(&root->inodes);
++	while (true) {
++		struct extent_map_tree *tree;
++
++		inode = xa_find(&root->inodes, &from, ULONG_MAX, XA_PRESENT);
++		if (!inode)
++			break;
++
++		tree = &inode->extent_tree;
++
++		/*
++		 * We want to be fast so if the lock is busy we don't want to
++		 * spend time waiting for it (some task is about to do IO for
++		 * the inode).
++		 */
++		if (!write_trylock(&tree->lock))
++			goto next;
++
++		/*
++		 * Skip inode if it doesn't have loaded extent maps, so we avoid
++		 * getting a reference and doing an iput later. This includes
++		 * cases like files that were opened for things like stat(2), or
++		 * files with all extent maps previously released through the
++		 * release folio callback (btrfs_release_folio()) or released in
++		 * a previous run, or directories which never have extent maps.
++		 */
++		if (RB_EMPTY_ROOT(&tree->root)) {
++			write_unlock(&tree->lock);
++			goto next;
++		}
++
++		if (igrab(&inode->vfs_inode))
++			break;
++
++		write_unlock(&tree->lock);
++next:
++		from = btrfs_ino(inode) + 1;
++		cond_resched_lock(&root->inodes.xa_lock);
++	}
++	xa_unlock(&root->inodes);
++
++	return inode;
++}
++
+ static long btrfs_scan_root(struct btrfs_root *root, struct btrfs_em_shrink_ctx *ctx)
+ {
+ 	struct btrfs_fs_info *fs_info = root->fs_info;
+@@ -1214,21 +1249,21 @@ static long btrfs_scan_root(struct btrfs_root *root, struct btrfs_em_shrink_ctx
+ 	long nr_dropped = 0;
+ 	u64 min_ino = fs_info->em_shrinker_last_ino + 1;
+ 
+-	inode = btrfs_find_first_inode(root, min_ino);
++	inode = find_first_inode_to_shrink(root, min_ino);
+ 	while (inode) {
+ 		nr_dropped += btrfs_scan_inode(inode, ctx);
++		write_unlock(&inode->extent_tree.lock);
+ 
+ 		min_ino = btrfs_ino(inode) + 1;
+ 		fs_info->em_shrinker_last_ino = btrfs_ino(inode);
+-		btrfs_add_delayed_iput(inode);
++		iput(&inode->vfs_inode);
+ 
+-		if (ctx->scanned >= ctx->nr_to_scan ||
+-		    btrfs_fs_closing(inode->root->fs_info))
++		if (ctx->scanned >= ctx->nr_to_scan || btrfs_fs_closing(fs_info))
+ 			break;
+ 
+ 		cond_resched();
+ 
+-		inode = btrfs_find_first_inode(root, min_ino);
++		inode = find_first_inode_to_shrink(root, min_ino);
+ 	}
+ 
+ 	if (inode) {
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 6542ee00bf3979..088ef0b4d56ba0 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -1127,7 +1127,7 @@ ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *i)
+ 	u64 lockend;
+ 	size_t num_written = 0;
+ 	ssize_t ret;
+-	loff_t old_isize = i_size_read(inode);
++	loff_t old_isize;
+ 	unsigned int ilock_flags = 0;
+ 	const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
+ 	unsigned int bdp_flags = (nowait ? BDP_ASYNC : 0);
+@@ -1140,6 +1140,13 @@ ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *i)
+ 	if (ret < 0)
+ 		return ret;
+ 
++	/*
++	 * We can only trust the isize with inode lock held, or it can race with
++	 * other buffered writes and cause incorrect call of
++	 * pagecache_isize_extended() to overwrite existing data.
++	 */
++	old_isize = i_size_read(inode);
++
+ 	ret = generic_write_checks(iocb, i);
+ 	if (ret <= 0)
+ 		goto out;
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index 27ccae63495d14..9ce6d1c6cac159 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -836,6 +836,12 @@ static int fuse_check_folio(struct folio *folio)
+ 	return 0;
+ }
+ 
++/*
++ * Attempt to steal a page from the splice() pipe and move it into the
++ * pagecache. If successful, the pointer in @pagep will be updated. The
++ * folio that was originally in @pagep will lose a reference and the new
++ * folio returned in @pagep will carry a reference.
++ */
+ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
+ {
+ 	int err;
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 7d92a547999858..d63e56fd3dd207 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -955,8 +955,10 @@ static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args,
+ 		fuse_invalidate_atime(inode);
+ 	}
+ 
+-	for (i = 0; i < ap->num_folios; i++)
++	for (i = 0; i < ap->num_folios; i++) {
+ 		folio_end_read(ap->folios[i], !err);
++		folio_put(ap->folios[i]);
++	}
+ 	if (ia->ff)
+ 		fuse_file_put(ia->ff, false);
+ 
+@@ -1048,7 +1050,14 @@ static void fuse_readahead(struct readahead_control *rac)
+ 		ap = &ia->ap;
+ 
+ 		while (ap->num_folios < cur_pages) {
+-			folio = readahead_folio(rac);
++			/*
++			 * This returns a folio with a ref held on it.
++			 * The ref needs to be held until the request is
++			 * completed, since the splice case (see
++			 * fuse_try_move_page()) drops the ref after it's
++			 * replaced in the page cache.
++			 */
++			folio = __readahead_folio(rac);
+ 			ap->folios[ap->num_folios] = folio;
+ 			ap->descs[ap->num_folios].length = folio_size(folio);
+ 			ap->num_folios++;
+diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
+index 035ba52742a504..4db912f5623055 100644
+--- a/fs/nfs/delegation.c
++++ b/fs/nfs/delegation.c
+@@ -780,6 +780,43 @@ int nfs4_inode_return_delegation(struct inode *inode)
+ 	return 0;
+ }
+ 
++/**
++ * nfs4_inode_set_return_delegation_on_close - asynchronously return a delegation
++ * @inode: inode to process
++ *
++ * This routine is called to request that the delegation be returned as soon
++ * as the file is closed. If the file is already closed, the delegation is
++ * immediately returned.
++ */
++void nfs4_inode_set_return_delegation_on_close(struct inode *inode)
++{
++	struct nfs_delegation *delegation;
++	struct nfs_delegation *ret = NULL;
++
++	if (!inode)
++		return;
++	rcu_read_lock();
++	delegation = nfs4_get_valid_delegation(inode);
++	if (!delegation)
++		goto out;
++	spin_lock(&delegation->lock);
++	if (!delegation->inode)
++		goto out_unlock;
++	if (list_empty(&NFS_I(inode)->open_files) &&
++	    !test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
++		/* Refcount matched in nfs_end_delegation_return() */
++		ret = nfs_get_delegation(delegation);
++	} else
++		set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
++out_unlock:
++	spin_unlock(&delegation->lock);
++	if (ret)
++		nfs_clear_verifier_delegated(inode);
++out:
++	rcu_read_unlock();
++	nfs_end_delegation_return(inode, ret, 0);
++}
++
+ /**
+  * nfs4_inode_return_delegation_on_close - asynchronously return a delegation
+  * @inode: inode to process
+diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
+index 71524d34ed207c..8ff5ab9c5c2565 100644
+--- a/fs/nfs/delegation.h
++++ b/fs/nfs/delegation.h
+@@ -49,6 +49,7 @@ void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred,
+ 				  unsigned long pagemod_limit, u32 deleg_type);
+ int nfs4_inode_return_delegation(struct inode *inode);
+ void nfs4_inode_return_delegation_on_close(struct inode *inode);
++void nfs4_inode_set_return_delegation_on_close(struct inode *inode);
+ int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid);
+ void nfs_inode_evict_delegation(struct inode *inode);
+ 
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index b08dbe96bc5796..6a6e7588413363 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -56,6 +56,7 @@
+ #include <linux/uaccess.h>
+ #include <linux/atomic.h>
+ 
++#include "delegation.h"
+ #include "internal.h"
+ #include "iostat.h"
+ #include "pnfs.h"
+@@ -130,6 +131,20 @@ static void nfs_direct_truncate_request(struct nfs_direct_req *dreq,
+ 		dreq->count = req_start;
+ }
+ 
++static void nfs_direct_file_adjust_size_locked(struct inode *inode,
++					       loff_t offset, size_t count)
++{
++	loff_t newsize = offset + (loff_t)count;
++	loff_t oldsize = i_size_read(inode);
++
++	if (newsize > oldsize) {
++		i_size_write(inode, newsize);
++		NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE;
++		trace_nfs_size_grow(inode, newsize);
++		nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
++	}
++}
++
+ /**
+  * nfs_swap_rw - NFS address space operation for swap I/O
+  * @iocb: target I/O control block
+@@ -272,6 +287,8 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
+ 	nfs_direct_count_bytes(dreq, hdr);
+ 	spin_unlock(&dreq->lock);
+ 
++	nfs_update_delegated_atime(dreq->inode);
++
+ 	while (!list_empty(&hdr->pages)) {
+ 		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+ 		struct page *page = req->wb_page;
+@@ -740,6 +757,7 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
+ 	struct nfs_direct_req *dreq = hdr->dreq;
+ 	struct nfs_commit_info cinfo;
+ 	struct nfs_page *req = nfs_list_entry(hdr->pages.next);
++	struct inode *inode = dreq->inode;
+ 	int flags = NFS_ODIRECT_DONE;
+ 
+ 	trace_nfs_direct_write_completion(dreq);
+@@ -761,6 +779,11 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
+ 	}
+ 	spin_unlock(&dreq->lock);
+ 
++	spin_lock(&inode->i_lock);
++	nfs_direct_file_adjust_size_locked(inode, dreq->io_start, dreq->count);
++	nfs_update_delegated_mtime_locked(dreq->inode);
++	spin_unlock(&inode->i_lock);
++
+ 	while (!list_empty(&hdr->pages)) {
+ 
+ 		req = nfs_list_entry(hdr->pages.next);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 405f17e6e0b45b..e7bc99c69743cf 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3898,8 +3898,11 @@ nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
+ 
+ static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
+ {
++	struct dentry *dentry = ctx->dentry;
+ 	if (ctx->state == NULL)
+ 		return;
++	if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
++		nfs4_inode_set_return_delegation_on_close(d_inode(dentry));
+ 	if (is_sync)
+ 		nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx));
+ 	else
+diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
+index 0c28e5fa340775..d7310fcf38881e 100644
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -618,7 +618,6 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c)
+ 	err = PTR_ERR(upper);
+ 	if (!IS_ERR(upper)) {
+ 		err = ovl_do_link(ofs, ovl_dentry_upper(c->dentry), udir, upper);
+-		dput(upper);
+ 
+ 		if (!err) {
+ 			/* Restore timestamps on parent (best effort) */
+@@ -626,6 +625,7 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c)
+ 			ovl_dentry_set_upper_alias(c->dentry);
+ 			ovl_dentry_update_reval(c->dentry, upper);
+ 		}
++		dput(upper);
+ 	}
+ 	inode_unlock(udir);
+ 	if (err)
+diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
+index 54504013c74915..337d3336e17565 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -457,7 +457,7 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG)
+ 	. = ALIGN((align));						\
+ 	.rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {		\
+ 		__start_rodata = .;					\
+-		*(.rodata) *(.rodata.*)					\
++		*(.rodata) *(.rodata.*) *(.data.rel.ro*)		\
+ 		SCHED_DATA						\
+ 		RO_AFTER_INIT_DATA	/* Read only after init */	\
+ 		. = ALIGN(8);						\
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 495813277597fb..315899779af16a 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -196,10 +196,11 @@ struct gendisk {
+ 	unsigned int		zone_capacity;
+ 	unsigned int		last_zone_capacity;
+ 	unsigned long __rcu	*conv_zones_bitmap;
+-	unsigned int            zone_wplugs_hash_bits;
+-	spinlock_t              zone_wplugs_lock;
++	unsigned int		zone_wplugs_hash_bits;
++	atomic_t		nr_zone_wplugs;
++	spinlock_t		zone_wplugs_lock;
+ 	struct mempool_s	*zone_wplugs_pool;
+-	struct hlist_head       *zone_wplugs_hash;
++	struct hlist_head	*zone_wplugs_hash;
+ 	struct workqueue_struct *zone_wplugs_wq;
+ #endif /* CONFIG_BLK_DEV_ZONED */
+ 
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index d0ed9583743fc2..c9b58188ec61e7 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -52,18 +52,6 @@
+  */
+ #define barrier_before_unreachable() asm volatile("")
+ 
+-/*
+- * Mark a position in code as unreachable.  This can be used to
+- * suppress control flow warnings after asm blocks that transfer
+- * control elsewhere.
+- */
+-#define unreachable() \
+-	do {					\
+-		annotate_unreachable();		\
+-		barrier_before_unreachable();	\
+-		__builtin_unreachable();	\
+-	} while (0)
+-
+ #if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP)
+ #define __HAVE_BUILTIN_BSWAP32__
+ #define __HAVE_BUILTIN_BSWAP64__
+diff --git a/include/linux/compiler.h b/include/linux/compiler.h
+index 7af999a131cb23..d004f9b5528d7a 100644
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -109,44 +109,21 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
+ 
+ /* Unreachable code */
+ #ifdef CONFIG_OBJTOOL
+-/*
+- * These macros help objtool understand GCC code flow for unreachable code.
+- * The __COUNTER__ based labels are a hack to make each instance of the macros
+- * unique, to convince GCC not to merge duplicate inline asm statements.
+- */
+-#define __stringify_label(n) #n
+-
+-#define __annotate_reachable(c) ({					\
+-	asm volatile(__stringify_label(c) ":\n\t"			\
+-			".pushsection .discard.reachable\n\t"		\
+-			".long " __stringify_label(c) "b - .\n\t"	\
+-			".popsection\n\t");				\
+-})
+-#define annotate_reachable() __annotate_reachable(__COUNTER__)
+-
+-#define __annotate_unreachable(c) ({					\
+-	asm volatile(__stringify_label(c) ":\n\t"			\
+-		     ".pushsection .discard.unreachable\n\t"		\
+-		     ".long " __stringify_label(c) "b - .\n\t"		\
+-		     ".popsection\n\t" : : "i" (c));			\
+-})
+-#define annotate_unreachable() __annotate_unreachable(__COUNTER__)
+-
+ /* Annotate a C jump table to allow objtool to follow the code flow */
+-#define __annotate_jump_table __section(".rodata..c_jump_table,\"a\",@progbits #")
+-
++#define __annotate_jump_table __section(".data.rel.ro.c_jump_table")
+ #else /* !CONFIG_OBJTOOL */
+-#define annotate_reachable()
+-#define annotate_unreachable()
+ #define __annotate_jump_table
+ #endif /* CONFIG_OBJTOOL */
+ 
+-#ifndef unreachable
+-# define unreachable() do {		\
+-	annotate_unreachable();		\
++/*
++ * Mark a position in code as unreachable.  This can be used to
++ * suppress control flow warnings after asm blocks that transfer
++ * control elsewhere.
++ */
++#define unreachable() do {		\
++	barrier_before_unreachable();	\
+ 	__builtin_unreachable();	\
+ } while (0)
+-#endif
+ 
+ /*
+  * KENTRY - kernel entry point
+diff --git a/include/linux/rcuref.h b/include/linux/rcuref.h
+index 2c8bfd0f1b6b3a..6322d8c1c6b429 100644
+--- a/include/linux/rcuref.h
++++ b/include/linux/rcuref.h
+@@ -71,27 +71,30 @@ static inline __must_check bool rcuref_get(rcuref_t *ref)
+ 	return rcuref_get_slowpath(ref);
+ }
+ 
+-extern __must_check bool rcuref_put_slowpath(rcuref_t *ref);
++extern __must_check bool rcuref_put_slowpath(rcuref_t *ref, unsigned int cnt);
+ 
+ /*
+  * Internal helper. Do not invoke directly.
+  */
+ static __always_inline __must_check bool __rcuref_put(rcuref_t *ref)
+ {
++	int cnt;
++
+ 	RCU_LOCKDEP_WARN(!rcu_read_lock_held() && preemptible(),
+ 			 "suspicious rcuref_put_rcusafe() usage");
+ 	/*
+ 	 * Unconditionally decrease the reference count. The saturation and
+ 	 * dead zones provide enough tolerance for this.
+ 	 */
+-	if (likely(!atomic_add_negative_release(-1, &ref->refcnt)))
++	cnt = atomic_sub_return_release(1, &ref->refcnt);
++	if (likely(cnt >= 0))
+ 		return false;
+ 
+ 	/*
+ 	 * Handle the last reference drop and cases inside the saturation
+ 	 * and dead zones.
+ 	 */
+-	return rcuref_put_slowpath(ref);
++	return rcuref_put_slowpath(ref, cnt);
+ }
+ 
+ /**
+diff --git a/include/linux/socket.h b/include/linux/socket.h
+index d18cc47e89bd01..c3322eb3d6865d 100644
+--- a/include/linux/socket.h
++++ b/include/linux/socket.h
+@@ -392,6 +392,8 @@ struct ucred {
+ 
+ extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
+ extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
++extern int put_cmsg_notrunc(struct msghdr *msg, int level, int type, int len,
++			    void *data);
+ 
+ struct timespec64;
+ struct __kernel_timespec;
+diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
+index fec1e8a1570c36..eac57914dcf320 100644
+--- a/include/linux/sunrpc/sched.h
++++ b/include/linux/sunrpc/sched.h
+@@ -158,7 +158,6 @@ enum {
+ 	RPC_TASK_NEED_XMIT,
+ 	RPC_TASK_NEED_RECV,
+ 	RPC_TASK_MSG_PIN_WAIT,
+-	RPC_TASK_SIGNALLED,
+ };
+ 
+ #define rpc_test_and_set_running(t) \
+@@ -171,7 +170,7 @@ enum {
+ 
+ #define RPC_IS_ACTIVATED(t)	test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate)
+ 
+-#define RPC_SIGNALLED(t)	test_bit(RPC_TASK_SIGNALLED, &(t)->tk_runstate)
++#define RPC_SIGNALLED(t)	(READ_ONCE(task->tk_rpc_status) == -ERESTARTSYS)
+ 
+ /*
+  * Task priorities.
+diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
+index 44be742cf4d604..47181fd749b256 100644
+--- a/include/net/net_namespace.h
++++ b/include/net/net_namespace.h
+@@ -295,6 +295,7 @@ static inline int check_net(const struct net *net)
+ }
+ 
+ void net_drop_ns(void *);
++void net_passive_dec(struct net *net);
+ 
+ #else
+ 
+@@ -324,8 +325,18 @@ static inline int check_net(const struct net *net)
+ }
+ 
+ #define net_drop_ns NULL
++
++static inline void net_passive_dec(struct net *net)
++{
++	refcount_dec(&net->passive);
++}
+ #endif
+ 
++static inline void net_passive_inc(struct net *net)
++{
++	refcount_inc(&net->passive);
++}
++
+ /* Returns true if the netns initialization is completed successfully */
+ static inline bool net_initialized(const struct net *net)
+ {
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 691ca7695d1db6..d3efb581c2ff4f 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1750,6 +1750,7 @@ static inline bool sock_allow_reclassification(const struct sock *csk)
+ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
+ 		      struct proto *prot, int kern);
+ void sk_free(struct sock *sk);
++void sk_net_refcnt_upgrade(struct sock *sk);
+ void sk_destruct(struct sock *sk);
+ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
+ void sk_free_unlock_clone(struct sock *sk);
+diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h
+index 3dc7a1551ac350..5d653a3491d073 100644
+--- a/include/sound/cs35l56.h
++++ b/include/sound/cs35l56.h
+@@ -12,6 +12,7 @@
+ #include <linux/firmware/cirrus/cs_dsp.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/regmap.h>
++#include <linux/spi/spi.h>
+ #include <sound/cs-amp-lib.h>
+ 
+ #define CS35L56_DEVID					0x0000000
+@@ -61,6 +62,7 @@
+ #define CS35L56_IRQ1_MASK_8				0x000E0AC
+ #define CS35L56_IRQ1_MASK_18				0x000E0D4
+ #define CS35L56_IRQ1_MASK_20				0x000E0DC
++#define CS35L56_DSP_MBOX_1_RAW				0x0011000
+ #define CS35L56_DSP_VIRTUAL1_MBOX_1			0x0011020
+ #define CS35L56_DSP_VIRTUAL1_MBOX_2			0x0011024
+ #define CS35L56_DSP_VIRTUAL1_MBOX_3			0x0011028
+@@ -224,6 +226,7 @@
+ #define CS35L56_HALO_STATE_SHUTDOWN			1
+ #define CS35L56_HALO_STATE_BOOT_DONE			2
+ 
++#define CS35L56_MBOX_CMD_PING				0x0A000000
+ #define CS35L56_MBOX_CMD_AUDIO_PLAY			0x0B000001
+ #define CS35L56_MBOX_CMD_AUDIO_PAUSE			0x0B000002
+ #define CS35L56_MBOX_CMD_AUDIO_REINIT			0x0B000003
+@@ -254,6 +257,16 @@
+ #define CS35L56_NUM_BULK_SUPPLIES			3
+ #define CS35L56_NUM_DSP_REGIONS				5
+ 
++/* Additional margin for SYSTEM_RESET to control port ready on SPI */
++#define CS35L56_SPI_RESET_TO_PORT_READY_US (CS35L56_CONTROL_PORT_READY_US + 2500)
++
++struct cs35l56_spi_payload {
++	__be32	addr;
++	__be16	pad;
++	__be32	value;
++} __packed;
++static_assert(sizeof(struct cs35l56_spi_payload) == 10);
++
+ struct cs35l56_base {
+ 	struct device *dev;
+ 	struct regmap *regmap;
+@@ -269,6 +282,7 @@ struct cs35l56_base {
+ 	s8 cal_index;
+ 	struct cirrus_amp_cal_data cal_data;
+ 	struct gpio_desc *reset_gpio;
++	struct cs35l56_spi_payload *spi_payload_buf;
+ };
+ 
+ static inline bool cs35l56_is_otp_register(unsigned int reg)
+@@ -276,6 +290,23 @@ static inline bool cs35l56_is_otp_register(unsigned int reg)
+ 	return (reg >> 16) == 3;
+ }
+ 
++static inline int cs35l56_init_config_for_spi(struct cs35l56_base *cs35l56,
++					      struct spi_device *spi)
++{
++	cs35l56->spi_payload_buf = devm_kzalloc(&spi->dev,
++						sizeof(*cs35l56->spi_payload_buf),
++						GFP_KERNEL | GFP_DMA);
++	if (!cs35l56->spi_payload_buf)
++		return -ENOMEM;
++
++	return 0;
++}
++
++static inline bool cs35l56_is_spi(struct cs35l56_base *cs35l56)
++{
++	return IS_ENABLED(CONFIG_SPI_MASTER) && !!cs35l56->spi_payload_buf;
++}
++
+ extern const struct regmap_config cs35l56_regmap_i2c;
+ extern const struct regmap_config cs35l56_regmap_spi;
+ extern const struct regmap_config cs35l56_regmap_sdw;
+diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
+index 9a75590227f262..3dddfc6abf0ee3 100644
+--- a/include/trace/events/afs.h
++++ b/include/trace/events/afs.h
+@@ -173,6 +173,7 @@ enum yfs_cm_operation {
+ 	EM(afs_cell_trace_get_queue_dns,	"GET q-dns ") \
+ 	EM(afs_cell_trace_get_queue_manage,	"GET q-mng ") \
+ 	EM(afs_cell_trace_get_queue_new,	"GET q-new ") \
++	EM(afs_cell_trace_get_server,		"GET server") \
+ 	EM(afs_cell_trace_get_vol,		"GET vol   ") \
+ 	EM(afs_cell_trace_insert,		"INSERT    ") \
+ 	EM(afs_cell_trace_manage,		"MANAGE    ") \
+@@ -180,6 +181,7 @@ enum yfs_cm_operation {
+ 	EM(afs_cell_trace_put_destroy,		"PUT destry") \
+ 	EM(afs_cell_trace_put_queue_work,	"PUT q-work") \
+ 	EM(afs_cell_trace_put_queue_fail,	"PUT q-fail") \
++	EM(afs_cell_trace_put_server,		"PUT server") \
+ 	EM(afs_cell_trace_put_vol,		"PUT vol   ") \
+ 	EM(afs_cell_trace_see_source,		"SEE source") \
+ 	EM(afs_cell_trace_see_ws,		"SEE ws    ") \
+diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
+index b13dc275ef4a79..851841336ee65c 100644
+--- a/include/trace/events/sunrpc.h
++++ b/include/trace/events/sunrpc.h
+@@ -360,8 +360,7 @@ TRACE_EVENT(rpc_request,
+ 		{ (1UL << RPC_TASK_ACTIVE), "ACTIVE" },			\
+ 		{ (1UL << RPC_TASK_NEED_XMIT), "NEED_XMIT" },		\
+ 		{ (1UL << RPC_TASK_NEED_RECV), "NEED_RECV" },		\
+-		{ (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" },	\
+-		{ (1UL << RPC_TASK_SIGNALLED), "SIGNALLED" })
++		{ (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" })
+ 
+ DECLARE_EVENT_CLASS(rpc_task_running,
+ 
+diff --git a/io_uring/net.c b/io_uring/net.c
+index b01bf900e3b940..96af3408792bb8 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -334,7 +334,9 @@ static int io_sendmsg_copy_hdr(struct io_kiocb *req,
+ 		if (unlikely(ret))
+ 			return ret;
+ 
+-		return __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
++		ret = __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
++		sr->msg_control = iomsg->msg.msg_control_user;
++		return ret;
+ 	}
+ #endif
+ 
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index e9f698c08dc179..0e6e16eb2d106d 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -4950,7 +4950,7 @@ static struct perf_event_pmu_context *
+ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
+ 		     struct perf_event *event)
+ {
+-	struct perf_event_pmu_context *new = NULL, *epc;
++	struct perf_event_pmu_context *new = NULL, *pos = NULL, *epc;
+ 	void *task_ctx_data = NULL;
+ 
+ 	if (!ctx->task) {
+@@ -5007,12 +5007,19 @@ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
+ 			atomic_inc(&epc->refcount);
+ 			goto found_epc;
+ 		}
++		/* Make sure the pmu_ctx_list is sorted by PMU type: */
++		if (!pos && epc->pmu->type > pmu->type)
++			pos = epc;
+ 	}
+ 
+ 	epc = new;
+ 	new = NULL;
+ 
+-	list_add(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list);
++	if (!pos)
++		list_add_tail(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list);
++	else
++		list_add(&epc->pmu_ctx_entry, pos->pmu_ctx_entry.prev);
++
+ 	epc->ctx = ctx;
+ 
+ found_epc:
+@@ -5962,14 +5969,15 @@ static int _perf_event_period(struct perf_event *event, u64 value)
+ 	if (!value)
+ 		return -EINVAL;
+ 
+-	if (event->attr.freq && value > sysctl_perf_event_sample_rate)
+-		return -EINVAL;
+-
+-	if (perf_event_check_period(event, value))
+-		return -EINVAL;
+-
+-	if (!event->attr.freq && (value & (1ULL << 63)))
+-		return -EINVAL;
++	if (event->attr.freq) {
++		if (value > sysctl_perf_event_sample_rate)
++			return -EINVAL;
++	} else {
++		if (perf_event_check_period(event, value))
++			return -EINVAL;
++		if (value & (1ULL << 63))
++			return -EINVAL;
++	}
+ 
+ 	event_function_call(event, __perf_event_period, &value);
+ 
+@@ -8277,7 +8285,8 @@ void perf_event_exec(void)
+ 
+ 	perf_event_enable_on_exec(ctx);
+ 	perf_event_remove_on_exec(ctx);
+-	perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL, true);
++	scoped_guard(rcu)
++		perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL, true);
+ 
+ 	perf_unpin_context(ctx);
+ 	put_ctx(ctx);
+diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
+index 7f1a95b4f14de8..3c34761c9ae731 100644
+--- a/kernel/events/uprobes.c
++++ b/kernel/events/uprobes.c
+@@ -495,6 +495,11 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
+ 	if (ret <= 0)
+ 		goto put_old;
+ 
++	if (is_zero_page(old_page)) {
++		ret = -EINVAL;
++		goto put_old;
++	}
++
+ 	if (WARN(!is_register && PageCompound(old_page),
+ 		 "uprobe unregister should never work on compound page\n")) {
+ 		ret = -EINVAL;
+@@ -762,10 +767,14 @@ static struct uprobe *hprobe_expire(struct hprobe *hprobe, bool get)
+ 	enum hprobe_state hstate;
+ 
+ 	/*
+-	 * return_instance's hprobe is protected by RCU.
+-	 * Underlying uprobe is itself protected from reuse by SRCU.
++	 * Caller should guarantee that return_instance is not going to be
++	 * freed from under us. This can be achieved either through holding
++	 * rcu_read_lock() or by owning return_instance in the first place.
++	 *
++	 * Underlying uprobe is itself protected from reuse by SRCU, so ensure
++	 * SRCU lock is held properly.
+ 	 */
+-	lockdep_assert(rcu_read_lock_held() && srcu_read_lock_held(&uretprobes_srcu));
++	lockdep_assert(srcu_read_lock_held(&uretprobes_srcu));
+ 
+ 	hstate = READ_ONCE(hprobe->state);
+ 	switch (hstate) {
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index aeba4e92010205..86cb6db0816804 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -7278,7 +7278,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
+ #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
+ int __sched __cond_resched(void)
+ {
+-	if (should_resched(0)) {
++	if (should_resched(0) && !irqs_disabled()) {
+ 		preempt_schedule_common();
+ 		return 1;
+ 	}
+diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
+index c1dec2453af432..5ccd46124ff077 100644
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -3097,7 +3097,6 @@ static struct task_struct *pick_task_scx(struct rq *rq)
+ {
+ 	struct task_struct *prev = rq->curr;
+ 	struct task_struct *p;
+-	bool prev_on_scx = prev->sched_class == &ext_sched_class;
+ 	bool keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP;
+ 	bool kick_idle = false;
+ 
+@@ -3117,14 +3116,18 @@ static struct task_struct *pick_task_scx(struct rq *rq)
+ 	 * if pick_task_scx() is called without preceding balance_scx().
+ 	 */
+ 	if (unlikely(rq->scx.flags & SCX_RQ_BAL_PENDING)) {
+-		if (prev_on_scx) {
++		if (prev->scx.flags & SCX_TASK_QUEUED) {
+ 			keep_prev = true;
+ 		} else {
+ 			keep_prev = false;
+ 			kick_idle = true;
+ 		}
+-	} else if (unlikely(keep_prev && !prev_on_scx)) {
+-		/* only allowed during transitions */
++	} else if (unlikely(keep_prev &&
++			    prev->sched_class != &ext_sched_class)) {
++		/*
++		 * Can happen while enabling as SCX_RQ_BAL_PENDING assertion is
++		 * conditional on scx_enabled() and may have been skipped.
++		 */
+ 		WARN_ON_ONCE(scx_ops_enable_state() == SCX_OPS_ENABLED);
+ 		keep_prev = false;
+ 	}
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index b1861a57e2b062..14d6c8e542f117 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -541,6 +541,7 @@ static int function_stat_show(struct seq_file *m, void *v)
+ 	static struct trace_seq s;
+ 	unsigned long long avg;
+ 	unsigned long long stddev;
++	unsigned long long stddev_denom;
+ #endif
+ 	mutex_lock(&ftrace_profile_lock);
+ 
+@@ -562,23 +563,19 @@ static int function_stat_show(struct seq_file *m, void *v)
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ 	seq_puts(m, "    ");
+ 
+-	/* Sample standard deviation (s^2) */
+-	if (rec->counter <= 1)
+-		stddev = 0;
+-	else {
+-		/*
+-		 * Apply Welford's method:
+-		 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
+-		 */
++	/*
++	 * Variance formula:
++	 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
++	 * Maybe Welford's method is better here?
++	 * Divide only by 1000 for ns^2 -> us^2 conversion.
++	 * trace_print_graph_duration will divide by 1000 again.
++	 */
++	stddev = 0;
++	stddev_denom = rec->counter * (rec->counter - 1) * 1000;
++	if (stddev_denom) {
+ 		stddev = rec->counter * rec->time_squared -
+ 			 rec->time * rec->time;
+-
+-		/*
+-		 * Divide only 1000 for ns^2 -> us^2 conversion.
+-		 * trace_print_graph_duration will divide 1000 again.
+-		 */
+-		stddev = div64_ul(stddev,
+-				  rec->counter * (rec->counter - 1) * 1000);
++		stddev = div64_ul(stddev, stddev_denom);
+ 	}
+ 
+ 	trace_seq_init(&s);
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
+index 9c058aa8baf332..89e5bcb9156283 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -6649,27 +6649,27 @@ static int event_hist_trigger_parse(struct event_command *cmd_ops,
+ 	if (existing_hist_update_only(glob, trigger_data, file))
+ 		goto out_free;
+ 
+-	ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
+-	if (ret < 0)
+-		goto out_free;
++	if (!get_named_trigger_data(trigger_data)) {
+ 
+-	if (get_named_trigger_data(trigger_data))
+-		goto enable;
++		ret = create_actions(hist_data);
++		if (ret)
++			goto out_free;
+ 
+-	ret = create_actions(hist_data);
+-	if (ret)
+-		goto out_unreg;
++		if (has_hist_vars(hist_data) || hist_data->n_var_refs) {
++			ret = save_hist_vars(hist_data);
++			if (ret)
++				goto out_free;
++		}
+ 
+-	if (has_hist_vars(hist_data) || hist_data->n_var_refs) {
+-		ret = save_hist_vars(hist_data);
++		ret = tracing_map_init(hist_data->map);
+ 		if (ret)
+-			goto out_unreg;
++			goto out_free;
+ 	}
+ 
+-	ret = tracing_map_init(hist_data->map);
+-	if (ret)
+-		goto out_unreg;
+-enable:
++	ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
++	if (ret < 0)
++		goto out_free;
++
+ 	ret = hist_trigger_enable(trigger_data, file);
+ 	if (ret)
+ 		goto out_unreg;
+diff --git a/lib/rcuref.c b/lib/rcuref.c
+index 97f300eca927ce..5bd726b71e3936 100644
+--- a/lib/rcuref.c
++++ b/lib/rcuref.c
+@@ -220,6 +220,7 @@ EXPORT_SYMBOL_GPL(rcuref_get_slowpath);
+ /**
+  * rcuref_put_slowpath - Slowpath of __rcuref_put()
+  * @ref:	Pointer to the reference count
++ * @cnt:	The resulting value of the fastpath decrement
+  *
+  * Invoked when the reference count is outside of the valid zone.
+  *
+@@ -233,10 +234,8 @@ EXPORT_SYMBOL_GPL(rcuref_get_slowpath);
+  *	with a concurrent get()/put() pair. Caller is not allowed to
+  *	deconstruct the protected object.
+  */
+-bool rcuref_put_slowpath(rcuref_t *ref)
++bool rcuref_put_slowpath(rcuref_t *ref, unsigned int cnt)
+ {
+-	unsigned int cnt = atomic_read(&ref->refcnt);
+-
+ 	/* Did this drop the last reference? */
+ 	if (likely(cnt == RCUREF_NOREF)) {
+ 		/*
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 27b4c4a2ba1fdd..728a5ce9b50587 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -636,7 +636,8 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
+ 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
+ 		hci_conn_hold(conn->hcon);
+ 
+-	list_add(&chan->list, &conn->chan_l);
++	/* Append to the list since the order matters for ECRED */
++	list_add_tail(&chan->list, &conn->chan_l);
+ }
+ 
+ void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
+@@ -3776,7 +3777,11 @@ static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
+ 	struct l2cap_ecred_conn_rsp *rsp_flex =
+ 		container_of(&rsp->pdu.rsp, struct l2cap_ecred_conn_rsp, hdr);
+ 
+-	if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
++	/* Check if channel for outgoing connection or if it wasn't deferred
++	 * since in those cases it must be skipped.
++	 */
++	if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags) ||
++	    !test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
+ 		return;
+ 
+ 	/* Reset ident so only one response is sent */
+diff --git a/net/core/gro.c b/net/core/gro.c
+index 78b320b6317445..0ad549b07e0399 100644
+--- a/net/core/gro.c
++++ b/net/core/gro.c
+@@ -653,6 +653,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
+ 	skb->pkt_type = PACKET_HOST;
+ 
+ 	skb->encapsulation = 0;
++	skb->ip_summed = CHECKSUM_NONE;
+ 	skb_shinfo(skb)->gso_type = 0;
+ 	skb_shinfo(skb)->gso_size = 0;
+ 	if (unlikely(skb->slow_gro)) {
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index b5cd3ae4f04cf2..b71aa96eeee23b 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -464,7 +464,7 @@ static void net_complete_free(void)
+ 
+ }
+ 
+-static void net_free(struct net *net)
++void net_passive_dec(struct net *net)
+ {
+ 	if (refcount_dec_and_test(&net->passive)) {
+ 		kfree(rcu_access_pointer(net->gen));
+@@ -482,7 +482,7 @@ void net_drop_ns(void *p)
+ 	struct net *net = (struct net *)p;
+ 
+ 	if (net)
+-		net_free(net);
++		net_passive_dec(net);
+ }
+ 
+ struct net *copy_net_ns(unsigned long flags,
+@@ -523,7 +523,7 @@ struct net *copy_net_ns(unsigned long flags,
+ 		key_remove_domain(net->key_domain);
+ #endif
+ 		put_user_ns(user_ns);
+-		net_free(net);
++		net_passive_dec(net);
+ dec_ucounts:
+ 		dec_net_namespaces(ucounts);
+ 		return ERR_PTR(rv);
+@@ -668,7 +668,7 @@ static void cleanup_net(struct work_struct *work)
+ 		key_remove_domain(net->key_domain);
+ #endif
+ 		put_user_ns(net->user_ns);
+-		net_free(net);
++		net_passive_dec(net);
+ 	}
+ }
+ 
+diff --git a/net/core/scm.c b/net/core/scm.c
+index 4f6a14babe5ae3..733c0cbd393d24 100644
+--- a/net/core/scm.c
++++ b/net/core/scm.c
+@@ -282,6 +282,16 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
+ }
+ EXPORT_SYMBOL(put_cmsg);
+ 
++int put_cmsg_notrunc(struct msghdr *msg, int level, int type, int len,
++		     void *data)
++{
++	/* Don't produce truncated CMSGs */
++	if (!msg->msg_control || msg->msg_controllen < CMSG_LEN(len))
++		return -ETOOSMALL;
++
++	return put_cmsg(msg, level, type, len, data);
++}
++
+ void put_cmsg_scm_timestamping64(struct msghdr *msg, struct scm_timestamping_internal *tss_internal)
+ {
+ 	struct scm_timestamping64 tss;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index f251a99f8d4217..bed75273f8c478 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -6127,11 +6127,11 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
+ 	skb->offload_fwd_mark = 0;
+ 	skb->offload_l3_fwd_mark = 0;
+ #endif
++	ipvs_reset(skb);
+ 
+ 	if (!xnet)
+ 		return;
+ 
+-	ipvs_reset(skb);
+ 	skb->mark = 0;
+ 	skb_clear_tstamp(skb);
+ }
+diff --git a/net/core/sock.c b/net/core/sock.c
+index be84885f9290a6..9d5dd99cc58178 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -2233,6 +2233,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
+ 			get_net_track(net, &sk->ns_tracker, priority);
+ 			sock_inuse_add(net, 1);
+ 		} else {
++			net_passive_inc(net);
+ 			__netns_tracker_alloc(net, &sk->ns_tracker,
+ 					      false, priority);
+ 		}
+@@ -2257,6 +2258,7 @@ EXPORT_SYMBOL(sk_alloc);
+ static void __sk_destruct(struct rcu_head *head)
+ {
+ 	struct sock *sk = container_of(head, struct sock, sk_rcu);
++	struct net *net = sock_net(sk);
+ 	struct sk_filter *filter;
+ 
+ 	if (sk->sk_destruct)
+@@ -2288,14 +2290,28 @@ static void __sk_destruct(struct rcu_head *head)
+ 	put_cred(sk->sk_peer_cred);
+ 	put_pid(sk->sk_peer_pid);
+ 
+-	if (likely(sk->sk_net_refcnt))
+-		put_net_track(sock_net(sk), &sk->ns_tracker);
+-	else
+-		__netns_tracker_free(sock_net(sk), &sk->ns_tracker, false);
+-
++	if (likely(sk->sk_net_refcnt)) {
++		put_net_track(net, &sk->ns_tracker);
++	} else {
++		__netns_tracker_free(net, &sk->ns_tracker, false);
++		net_passive_dec(net);
++	}
+ 	sk_prot_free(sk->sk_prot_creator, sk);
+ }
+ 
++void sk_net_refcnt_upgrade(struct sock *sk)
++{
++	struct net *net = sock_net(sk);
++
++	WARN_ON_ONCE(sk->sk_net_refcnt);
++	__netns_tracker_free(net, &sk->ns_tracker, false);
++	net_passive_dec(net);
++	sk->sk_net_refcnt = 1;
++	get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
++	sock_inuse_add(net, 1);
++}
++EXPORT_SYMBOL_GPL(sk_net_refcnt_upgrade);
++
+ void sk_destruct(struct sock *sk)
+ {
+ 	bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
+@@ -2392,6 +2408,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
+ 		 * is not properly dismantling its kernel sockets at netns
+ 		 * destroy time.
+ 		 */
++		net_passive_inc(sock_net(newsk));
+ 		__netns_tracker_alloc(sock_net(newsk), &newsk->ns_tracker,
+ 				      false, priority);
+ 	}
+diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
+index ad2741f1346af2..c7769ee0d9c553 100644
+--- a/net/core/sysctl_net_core.c
++++ b/net/core/sysctl_net_core.c
+@@ -34,6 +34,7 @@ static int min_sndbuf = SOCK_MIN_SNDBUF;
+ static int min_rcvbuf = SOCK_MIN_RCVBUF;
+ static int max_skb_frags = MAX_SKB_FRAGS;
+ static int min_mem_pcpu_rsv = SK_MEMORY_PCPU_RESERVE;
++static int netdev_budget_usecs_min = 2 * USEC_PER_SEC / HZ;
+ 
+ static int net_msg_warn;	/* Unused, but still a sysctl */
+ 
+@@ -587,7 +588,7 @@ static struct ctl_table net_core_table[] = {
+ 		.maxlen		= sizeof(unsigned int),
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_dointvec_minmax,
+-		.extra1		= SYSCTL_ZERO,
++		.extra1		= &netdev_budget_usecs_min,
+ 	},
+ 	{
+ 		.procname	= "fb_tunnels_only_for_init_net",
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 0d704bda6c416b..d74281eca14f0b 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2438,14 +2438,12 @@ static int tcp_recvmsg_dmabuf(struct sock *sk, const struct sk_buff *skb,
+ 			 */
+ 			memset(&dmabuf_cmsg, 0, sizeof(dmabuf_cmsg));
+ 			dmabuf_cmsg.frag_size = copy;
+-			err = put_cmsg(msg, SOL_SOCKET, SO_DEVMEM_LINEAR,
+-				       sizeof(dmabuf_cmsg), &dmabuf_cmsg);
+-			if (err || msg->msg_flags & MSG_CTRUNC) {
+-				msg->msg_flags &= ~MSG_CTRUNC;
+-				if (!err)
+-					err = -ETOOSMALL;
++			err = put_cmsg_notrunc(msg, SOL_SOCKET,
++					       SO_DEVMEM_LINEAR,
++					       sizeof(dmabuf_cmsg),
++					       &dmabuf_cmsg);
++			if (err)
+ 				goto out;
+-			}
+ 
+ 			sent += copy;
+ 
+@@ -2499,16 +2497,12 @@ static int tcp_recvmsg_dmabuf(struct sock *sk, const struct sk_buff *skb,
+ 				offset += copy;
+ 				remaining_len -= copy;
+ 
+-				err = put_cmsg(msg, SOL_SOCKET,
+-					       SO_DEVMEM_DMABUF,
+-					       sizeof(dmabuf_cmsg),
+-					       &dmabuf_cmsg);
+-				if (err || msg->msg_flags & MSG_CTRUNC) {
+-					msg->msg_flags &= ~MSG_CTRUNC;
+-					if (!err)
+-						err = -ETOOSMALL;
++				err = put_cmsg_notrunc(msg, SOL_SOCKET,
++						       SO_DEVMEM_DMABUF,
++						       sizeof(dmabuf_cmsg),
++						       &dmabuf_cmsg);
++				if (err)
+ 					goto out;
+-				}
+ 
+ 				atomic_long_inc(&niov->pp_ref_count);
+ 				tcp_xa_pool.netmems[tcp_xa_pool.idx++] = skb_frag_netmem(frag);
+diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
+index 7121d8573928cb..789e495d3bd6a2 100644
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -810,12 +810,6 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
+ 
+ 	/* In sequence, PAWS is OK. */
+ 
+-	/* TODO: We probably should defer ts_recent change once
+-	 * we take ownership of @req.
+-	 */
+-	if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
+-		WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval);
+-
+ 	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
+ 		/* Truncate SYN, it is out of window starting
+ 		   at tcp_rsk(req)->rcv_isn + 1. */
+@@ -864,6 +858,10 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
+ 	if (!child)
+ 		goto listen_overflow;
+ 
++	if (own_req && tmp_opt.saw_tstamp &&
++	    !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
++		tcp_sk(child)->rx_opt.ts_recent = tmp_opt.rcv_tsval;
++
+ 	if (own_req && rsk_drop_req(req)) {
+ 		reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
+ 		inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
+diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c
+index 0ac4283acdf20c..7c05ac846646f3 100644
+--- a/net/ipv6/rpl_iptunnel.c
++++ b/net/ipv6/rpl_iptunnel.c
+@@ -262,10 +262,18 @@ static int rpl_input(struct sk_buff *skb)
+ {
+ 	struct dst_entry *orig_dst = skb_dst(skb);
+ 	struct dst_entry *dst = NULL;
++	struct lwtunnel_state *lwtst;
+ 	struct rpl_lwt *rlwt;
+ 	int err;
+ 
+-	rlwt = rpl_lwt_lwtunnel(orig_dst->lwtstate);
++	/* We cannot dereference "orig_dst" once ip6_route_input() or
++	 * skb_dst_drop() is called. However, in order to detect a dst loop, we
++	 * need the address of its lwtstate. So, save the address of lwtstate
++	 * now and use it later as a comparison.
++	 */
++	lwtst = orig_dst->lwtstate;
++
++	rlwt = rpl_lwt_lwtunnel(lwtst);
+ 
+ 	local_bh_disable();
+ 	dst = dst_cache_get(&rlwt->cache);
+@@ -280,7 +288,9 @@ static int rpl_input(struct sk_buff *skb)
+ 	if (!dst) {
+ 		ip6_route_input(skb);
+ 		dst = skb_dst(skb);
+-		if (!dst->error) {
++
++		/* cache only if we don't create a dst reference loop */
++		if (!dst->error && lwtst != dst->lwtstate) {
+ 			local_bh_disable();
+ 			dst_cache_set_ip6(&rlwt->cache, dst,
+ 					  &ipv6_hdr(skb)->saddr);
+diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
+index 33833b2064c072..51583461ae29ba 100644
+--- a/net/ipv6/seg6_iptunnel.c
++++ b/net/ipv6/seg6_iptunnel.c
+@@ -472,10 +472,18 @@ static int seg6_input_core(struct net *net, struct sock *sk,
+ {
+ 	struct dst_entry *orig_dst = skb_dst(skb);
+ 	struct dst_entry *dst = NULL;
++	struct lwtunnel_state *lwtst;
+ 	struct seg6_lwt *slwt;
+ 	int err;
+ 
+-	slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
++	/* We cannot dereference "orig_dst" once ip6_route_input() or
++	 * skb_dst_drop() is called. However, in order to detect a dst loop, we
++	 * need the address of its lwtstate. So, save the address of lwtstate
++	 * now and use it later as a comparison.
++	 */
++	lwtst = orig_dst->lwtstate;
++
++	slwt = seg6_lwt_lwtunnel(lwtst);
+ 
+ 	local_bh_disable();
+ 	dst = dst_cache_get(&slwt->cache);
+@@ -490,7 +498,9 @@ static int seg6_input_core(struct net *net, struct sock *sk,
+ 	if (!dst) {
+ 		ip6_route_input(skb);
+ 		dst = skb_dst(skb);
+-		if (!dst->error) {
++
++		/* cache only if we don't create a dst reference loop */
++		if (!dst->error && lwtst != dst->lwtstate) {
+ 			local_bh_disable();
+ 			dst_cache_set_ip6(&slwt->cache, dst,
+ 					  &ipv6_hdr(skb)->saddr);
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index cc7db93e745c7c..2b1982fe16322f 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -1514,11 +1514,6 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
+ 		if (mptcp_pm_is_userspace(msk))
+ 			goto next;
+ 
+-		if (list_empty(&msk->conn_list)) {
+-			mptcp_pm_remove_anno_addr(msk, addr, false);
+-			goto next;
+-		}
+-
+ 		lock_sock(sk);
+ 		remove_subflow = lookup_subflow_by_saddr(&msk->conn_list, addr);
+ 		mptcp_pm_remove_anno_addr(msk, addr, remove_subflow &&
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index fd021cf8286eff..9f18217dddc865 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1142,7 +1142,6 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
+ 	if (data_len == 0) {
+ 		pr_debug("infinite mapping received\n");
+ 		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
+-		subflow->map_data_len = 0;
+ 		return MAPPING_INVALID;
+ 	}
+ 
+@@ -1286,18 +1285,6 @@ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ss
+ 		mptcp_schedule_work(sk);
+ }
+ 
+-static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
+-{
+-	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+-
+-	if (subflow->mp_join)
+-		return false;
+-	else if (READ_ONCE(msk->csum_enabled))
+-		return !subflow->valid_csum_seen;
+-	else
+-		return READ_ONCE(msk->allow_infinite_fallback);
+-}
+-
+ static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
+ {
+ 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+@@ -1393,7 +1380,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
+ 			return true;
+ 		}
+ 
+-		if (!subflow_can_fallback(subflow) && subflow->map_data_len) {
++		if (!READ_ONCE(msk->allow_infinite_fallback)) {
+ 			/* fatal protocol error, close the socket.
+ 			 * subflow_error_report() will introduce the appropriate barriers
+ 			 */
+@@ -1772,10 +1759,7 @@ int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
+ 	 * needs it.
+ 	 * Update ns_tracker to current stack trace and refcounted tracker.
+ 	 */
+-	__netns_tracker_free(net, &sf->sk->ns_tracker, false);
+-	sf->sk->sk_net_refcnt = 1;
+-	get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL);
+-	sock_inuse_add(net, 1);
++	sk_net_refcnt_upgrade(sf->sk);
+ 	err = tcp_set_ulp(sf->sk, "mptcp");
+ 	if (err)
+ 		goto err_free;
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index f4e7b5e4bb59fd..e88a1ac160bc4f 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -795,16 +795,6 @@ static int netlink_release(struct socket *sock)
+ 
+ 	sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
+ 
+-	/* Because struct net might disappear soon, do not keep a pointer. */
+-	if (!sk->sk_net_refcnt && sock_net(sk) != &init_net) {
+-		__netns_tracker_free(sock_net(sk), &sk->ns_tracker, false);
+-		/* Because of deferred_put_nlk_sk and use of work queue,
+-		 * it is possible  netns will be freed before this socket.
+-		 */
+-		sock_net_set(sk, &init_net);
+-		__netns_tracker_alloc(&init_net, &sk->ns_tracker,
+-				      false, GFP_KERNEL);
+-	}
+ 	call_rcu(&nlk->rcu, deferred_put_nlk_sk);
+ 	return 0;
+ }
+diff --git a/net/rds/tcp.c b/net/rds/tcp.c
+index 0581c53e651704..3cc2f303bf7865 100644
+--- a/net/rds/tcp.c
++++ b/net/rds/tcp.c
+@@ -504,12 +504,8 @@ bool rds_tcp_tune(struct socket *sock)
+ 			release_sock(sk);
+ 			return false;
+ 		}
+-		/* Update ns_tracker to current stack trace and refcounted tracker */
+-		__netns_tracker_free(net, &sk->ns_tracker, false);
+-
+-		sk->sk_net_refcnt = 1;
+-		netns_tracker_alloc(net, &sk->ns_tracker, GFP_KERNEL);
+-		sock_inuse_add(net, 1);
++		sk_net_refcnt_upgrade(sk);
++		put_net(net);
+ 	}
+ 	rtn = net_generic(net, rds_tcp_netid);
+ 	if (rtn->sndbuf_size > 0) {
+diff --git a/net/rxrpc/rxperf.c b/net/rxrpc/rxperf.c
+index 085e7892d31040..b1536da2246b82 100644
+--- a/net/rxrpc/rxperf.c
++++ b/net/rxrpc/rxperf.c
+@@ -478,6 +478,18 @@ static int rxperf_deliver_request(struct rxperf_call *call)
+ 		call->unmarshal++;
+ 		fallthrough;
+ 	case 2:
++		ret = rxperf_extract_data(call, true);
++		if (ret < 0)
++			return ret;
++
++		/* Deal with the terminal magic cookie. */
++		call->iov_len = 4;
++		call->kvec[0].iov_len	= call->iov_len;
++		call->kvec[0].iov_base	= call->tmp;
++		iov_iter_kvec(&call->iter, READ, call->kvec, 1, call->iov_len);
++		call->unmarshal++;
++		fallthrough;
++	case 3:
+ 		ret = rxperf_extract_data(call, false);
+ 		if (ret < 0)
+ 			return ret;
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index ebc41a7b13dbec..ba834cefb17730 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -3334,10 +3334,7 @@ int smc_create_clcsk(struct net *net, struct sock *sk, int family)
+ 	 * which need net ref.
+ 	 */
+ 	sk = smc->clcsock->sk;
+-	__netns_tracker_free(net, &sk->ns_tracker, false);
+-	sk->sk_net_refcnt = 1;
+-	get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
+-	sock_inuse_add(net, 1);
++	sk_net_refcnt_upgrade(sk);
+ 	return 0;
+ }
+ 
+diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
+index 059f6ef1ad1898..7fcb0574fc79e7 100644
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -1669,12 +1669,14 @@ static void remove_cache_proc_entries(struct cache_detail *cd)
+ 	}
+ }
+ 
+-#ifdef CONFIG_PROC_FS
+ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
+ {
+ 	struct proc_dir_entry *p;
+ 	struct sunrpc_net *sn;
+ 
++	if (!IS_ENABLED(CONFIG_PROC_FS))
++		return 0;
++
+ 	sn = net_generic(net, sunrpc_net_id);
+ 	cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc);
+ 	if (cd->procfs == NULL)
+@@ -1702,12 +1704,6 @@ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
+ 	remove_cache_proc_entries(cd);
+ 	return -ENOMEM;
+ }
+-#else /* CONFIG_PROC_FS */
+-static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
+-{
+-	return 0;
+-}
+-#endif
+ 
+ void __init cache_initialize(void)
+ {
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index cef623ea150609..9b45fbdc90cabe 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -864,8 +864,6 @@ void rpc_signal_task(struct rpc_task *task)
+ 	if (!rpc_task_set_rpc_status(task, -ERESTARTSYS))
+ 		return;
+ 	trace_rpc_task_signalled(task, task->tk_action);
+-	set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
+-	smp_mb__after_atomic();
+ 	queue = READ_ONCE(task->tk_waitqueue);
+ 	if (queue)
+ 		rpc_wake_up_queued_task(queue, task);
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index cb3bd12f5818ba..72e5a01df3d352 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -1541,10 +1541,7 @@ static struct svc_xprt *svc_create_socket(struct svc_serv *serv,
+ 	newlen = error;
+ 
+ 	if (protocol == IPPROTO_TCP) {
+-		__netns_tracker_free(net, &sock->sk->ns_tracker, false);
+-		sock->sk->sk_net_refcnt = 1;
+-		get_net_track(net, &sock->sk->ns_tracker, GFP_KERNEL);
+-		sock_inuse_add(net, 1);
++		sk_net_refcnt_upgrade(sock->sk);
+ 		if ((error = kernel_listen(sock, 64)) < 0)
+ 			goto bummer;
+ 	}
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index c60936d8cef71b..83cc095846d356 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -1941,12 +1941,8 @@ static struct socket *xs_create_sock(struct rpc_xprt *xprt,
+ 		goto out;
+ 	}
+ 
+-	if (protocol == IPPROTO_TCP) {
+-		__netns_tracker_free(xprt->xprt_net, &sock->sk->ns_tracker, false);
+-		sock->sk->sk_net_refcnt = 1;
+-		get_net_track(xprt->xprt_net, &sock->sk->ns_tracker, GFP_KERNEL);
+-		sock_inuse_add(xprt->xprt_net, 1);
+-	}
++	if (protocol == IPPROTO_TCP)
++		sk_net_refcnt_upgrade(sock->sk);
+ 
+ 	filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
+ 	if (IS_ERR(filp))
+@@ -2581,7 +2577,15 @@ static void xs_tls_handshake_done(void *data, int status, key_serial_t peerid)
+ 	struct sock_xprt *lower_transport =
+ 				container_of(lower_xprt, struct sock_xprt, xprt);
+ 
+-	lower_transport->xprt_err = status ? -EACCES : 0;
++	switch (status) {
++	case 0:
++	case -EACCES:
++	case -ETIMEDOUT:
++		lower_transport->xprt_err = status;
++		break;
++	default:
++		lower_transport->xprt_err = -EACCES;
++	}
+ 	complete(&lower_transport->handshake_done);
+ 	xprt_put(lower_xprt);
+ }
+diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
+index c0d3b716d11fac..1799ea6b1d5843 100644
+--- a/security/integrity/ima/ima.h
++++ b/security/integrity/ima/ima.h
+@@ -149,6 +149,9 @@ struct ima_kexec_hdr {
+ #define IMA_CHECK_BLACKLIST	0x40000000
+ #define IMA_VERITY_REQUIRED	0x80000000
+ 
++/* Exclude non-action flags which are not rule-specific. */
++#define IMA_NONACTION_RULE_FLAGS	(IMA_NONACTION_FLAGS & ~IMA_NEW_FILE)
++
+ #define IMA_DO_MASK		(IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \
+ 				 IMA_HASH | IMA_APPRAISE_SUBMASK)
+ #define IMA_DONE_MASK		(IMA_MEASURED | IMA_APPRAISED | IMA_AUDITED | \
+diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
+index 9b87556b03a7c0..b028c501949cad 100644
+--- a/security/integrity/ima/ima_main.c
++++ b/security/integrity/ima/ima_main.c
+@@ -269,10 +269,13 @@ static int process_measurement(struct file *file, const struct cred *cred,
+ 	mutex_lock(&iint->mutex);
+ 
+ 	if (test_and_clear_bit(IMA_CHANGE_ATTR, &iint->atomic_flags))
+-		/* reset appraisal flags if ima_inode_post_setattr was called */
++		/*
++		 * Reset appraisal flags (action and non-action rule-specific)
++		 * if ima_inode_post_setattr was called.
++		 */
+ 		iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED |
+ 				 IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK |
+-				 IMA_NONACTION_FLAGS);
++				 IMA_NONACTION_RULE_FLAGS);
+ 
+ 	/*
+ 	 * Re-evaulate the file if either the xattr has changed or the
+diff --git a/security/landlock/net.c b/security/landlock/net.c
+index d5dcc4407a197b..104b6c01fe503b 100644
+--- a/security/landlock/net.c
++++ b/security/landlock/net.c
+@@ -63,8 +63,7 @@ static int current_check_access_socket(struct socket *const sock,
+ 	if (WARN_ON_ONCE(dom->num_layers < 1))
+ 		return -EACCES;
+ 
+-	/* Checks if it's a (potential) TCP socket. */
+-	if (sock->type != SOCK_STREAM)
++	if (!sk_is_tcp(sock->sk))
+ 		return 0;
+ 
+ 	/* Checks for minimal header length to safely read sa_family. */
+diff --git a/sound/pci/hda/cs35l56_hda_spi.c b/sound/pci/hda/cs35l56_hda_spi.c
+index d4ee5bb7c48660..90357846690537 100644
+--- a/sound/pci/hda/cs35l56_hda_spi.c
++++ b/sound/pci/hda/cs35l56_hda_spi.c
+@@ -22,6 +22,9 @@ static int cs35l56_hda_spi_probe(struct spi_device *spi)
+ 		return -ENOMEM;
+ 
+ 	cs35l56->base.dev = &spi->dev;
++	ret = cs35l56_init_config_for_spi(&cs35l56->base, spi);
++	if (ret)
++		return ret;
+ 
+ #ifdef CS35L56_WAKE_HOLD_TIME_US
+ 	cs35l56->base.can_hibernate = true;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index ffe3de617d5ddb..0ffb7fa1b88314 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10595,6 +10595,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
+ 	SND_PCI_QUIRK(0x1043, 0x1433, "ASUS GX650PY/PZ/PV/PU/PYV/PZV/PIV/PVV", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC),
++	SND_PCI_QUIRK(0x1043, 0x1460, "Asus VivoBook 15", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x1463, "Asus GA402X/GA402N", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1473, "ASUS GU604VI/VC/VE/VG/VJ/VQ/VU/VV/VY/VZ", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1483, "ASUS GU603VQ/VU/VV/VJ/VI", ALC285_FIXUP_ASUS_HEADSET_MIC),
+@@ -10628,7 +10629,6 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE),
+ 	SND_PCI_QUIRK(0x1043, 0x19e1, "ASUS UX581LV", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
+-	SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1a63, "ASUS UX3405MA", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1a83, "ASUS UM5302LA", ALC294_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x1a8f, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2),
+diff --git a/sound/soc/codecs/cs35l56-shared.c b/sound/soc/codecs/cs35l56-shared.c
+index e0ed4fc11155a5..e28bfefa72f33e 100644
+--- a/sound/soc/codecs/cs35l56-shared.c
++++ b/sound/soc/codecs/cs35l56-shared.c
+@@ -10,6 +10,7 @@
+ #include <linux/gpio/consumer.h>
+ #include <linux/regmap.h>
+ #include <linux/regulator/consumer.h>
++#include <linux/spi/spi.h>
+ #include <linux/types.h>
+ #include <sound/cs-amp-lib.h>
+ 
+@@ -303,6 +304,79 @@ void cs35l56_wait_min_reset_pulse(void)
+ }
+ EXPORT_SYMBOL_NS_GPL(cs35l56_wait_min_reset_pulse, "SND_SOC_CS35L56_SHARED");
+ 
++static const struct {
++	u32 addr;
++	u32 value;
++} cs35l56_spi_system_reset_stages[] = {
++	{ .addr = CS35L56_DSP_VIRTUAL1_MBOX_1, .value = CS35L56_MBOX_CMD_SYSTEM_RESET },
++	/* The next write is necessary to delimit the soft reset */
++	{ .addr = CS35L56_DSP_MBOX_1_RAW, .value = CS35L56_MBOX_CMD_PING },
++};
++
++static void cs35l56_spi_issue_bus_locked_reset(struct cs35l56_base *cs35l56_base,
++					       struct spi_device *spi)
++{
++	struct cs35l56_spi_payload *buf = cs35l56_base->spi_payload_buf;
++	struct spi_transfer t = {
++		.tx_buf		= buf,
++		.len		= sizeof(*buf),
++	};
++	struct spi_message m;
++	int i, ret;
++
++	for (i = 0; i < ARRAY_SIZE(cs35l56_spi_system_reset_stages); i++) {
++		buf->addr = cpu_to_be32(cs35l56_spi_system_reset_stages[i].addr);
++		buf->value = cpu_to_be32(cs35l56_spi_system_reset_stages[i].value);
++		spi_message_init_with_transfers(&m, &t, 1);
++		ret = spi_sync_locked(spi, &m);
++		if (ret)
++			dev_warn(cs35l56_base->dev, "spi_sync failed: %d\n", ret);
++
++		usleep_range(CS35L56_SPI_RESET_TO_PORT_READY_US,
++			     2 * CS35L56_SPI_RESET_TO_PORT_READY_US);
++	}
++}
++
++static void cs35l56_spi_system_reset(struct cs35l56_base *cs35l56_base)
++{
++	struct spi_device *spi = to_spi_device(cs35l56_base->dev);
++	unsigned int val;
++	int read_ret, ret;
++
++	/*
++	 * There must not be any other SPI bus activity while the amp is
++	 * soft-resetting.
++	 */
++	ret = spi_bus_lock(spi->controller);
++	if (ret) {
++		dev_warn(cs35l56_base->dev, "spi_bus_lock failed: %d\n", ret);
++		return;
++	}
++
++	cs35l56_spi_issue_bus_locked_reset(cs35l56_base, spi);
++	spi_bus_unlock(spi->controller);
++
++	/*
++	 * Check firmware boot by testing for a response in MBOX_2.
++	 * HALO_STATE cannot be trusted yet because the reset sequence
++	 * can leave it with stale state. But MBOX is reset.
++	 * The regmap must remain in cache-only until the chip has
++	 * booted, so use a bypassed read.
++	 */
++	ret = read_poll_timeout(regmap_read_bypassed, read_ret,
++				(val > 0) && (val < 0xffffffff),
++				CS35L56_HALO_STATE_POLL_US,
++				CS35L56_HALO_STATE_TIMEOUT_US,
++				false,
++				cs35l56_base->regmap,
++				CS35L56_DSP_VIRTUAL1_MBOX_2,
++				&val);
++	if (ret) {
++		dev_err(cs35l56_base->dev, "SPI reboot timed out(%d): MBOX2=%#x\n",
++			read_ret, val);
++	}
++}
++
+ static const struct reg_sequence cs35l56_system_reset_seq[] = {
+ 	REG_SEQ0(CS35L56_DSP1_HALO_STATE, 0),
+ 	REG_SEQ0(CS35L56_DSP_VIRTUAL1_MBOX_1, CS35L56_MBOX_CMD_SYSTEM_RESET),
+@@ -315,6 +389,12 @@ void cs35l56_system_reset(struct cs35l56_base *cs35l56_base, bool is_soundwire)
+ 	 * accesses other than the controlled system reset sequence below.
+ 	 */
+ 	regcache_cache_only(cs35l56_base->regmap, true);
++
++	if (cs35l56_is_spi(cs35l56_base)) {
++		cs35l56_spi_system_reset(cs35l56_base);
++		return;
++	}
++
+ 	regmap_multi_reg_write_bypassed(cs35l56_base->regmap,
+ 					cs35l56_system_reset_seq,
+ 					ARRAY_SIZE(cs35l56_system_reset_seq));
+diff --git a/sound/soc/codecs/cs35l56-spi.c b/sound/soc/codecs/cs35l56-spi.c
+index c101134e85328e..ca6c03a8766d38 100644
+--- a/sound/soc/codecs/cs35l56-spi.c
++++ b/sound/soc/codecs/cs35l56-spi.c
+@@ -33,6 +33,9 @@ static int cs35l56_spi_probe(struct spi_device *spi)
+ 
+ 	cs35l56->base.dev = &spi->dev;
+ 	cs35l56->base.can_hibernate = true;
++	ret = cs35l56_init_config_for_spi(&cs35l56->base, spi);
++	if (ret)
++		return ret;
+ 
+ 	ret = cs35l56_common_probe(cs35l56);
+ 	if (ret != 0)
+diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c
+index f3c97da798dc8e..76159c45e6b52e 100644
+--- a/sound/soc/codecs/es8328.c
++++ b/sound/soc/codecs/es8328.c
+@@ -233,7 +233,6 @@ static const struct snd_kcontrol_new es8328_right_line_controls =
+ 
+ /* Left Mixer */
+ static const struct snd_kcontrol_new es8328_left_mixer_controls[] = {
+-	SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL17, 7, 1, 0),
+ 	SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL17, 6, 1, 0),
+ 	SOC_DAPM_SINGLE("Right Playback Switch", ES8328_DACCONTROL18, 7, 1, 0),
+ 	SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL18, 6, 1, 0),
+@@ -243,7 +242,6 @@ static const struct snd_kcontrol_new es8328_left_mixer_controls[] = {
+ static const struct snd_kcontrol_new es8328_right_mixer_controls[] = {
+ 	SOC_DAPM_SINGLE("Left Playback Switch", ES8328_DACCONTROL19, 7, 1, 0),
+ 	SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL19, 6, 1, 0),
+-	SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL20, 7, 1, 0),
+ 	SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL20, 6, 1, 0),
+ };
+ 
+@@ -336,10 +334,10 @@ static const struct snd_soc_dapm_widget es8328_dapm_widgets[] = {
+ 	SND_SOC_DAPM_DAC("Left DAC", "Left Playback", ES8328_DACPOWER,
+ 			ES8328_DACPOWER_LDAC_OFF, 1),
+ 
+-	SND_SOC_DAPM_MIXER("Left Mixer", SND_SOC_NOPM, 0, 0,
++	SND_SOC_DAPM_MIXER("Left Mixer", ES8328_DACCONTROL17, 7, 0,
+ 		&es8328_left_mixer_controls[0],
+ 		ARRAY_SIZE(es8328_left_mixer_controls)),
+-	SND_SOC_DAPM_MIXER("Right Mixer", SND_SOC_NOPM, 0, 0,
++	SND_SOC_DAPM_MIXER("Right Mixer", ES8328_DACCONTROL20, 7, 0,
+ 		&es8328_right_mixer_controls[0],
+ 		ARRAY_SIZE(es8328_right_mixer_controls)),
+ 
+@@ -418,19 +416,14 @@ static const struct snd_soc_dapm_route es8328_dapm_routes[] = {
+ 	{ "Right Line Mux", "PGA", "Right PGA Mux" },
+ 	{ "Right Line Mux", "Differential", "Differential Mux" },
+ 
+-	{ "Left Out 1", NULL, "Left DAC" },
+-	{ "Right Out 1", NULL, "Right DAC" },
+-	{ "Left Out 2", NULL, "Left DAC" },
+-	{ "Right Out 2", NULL, "Right DAC" },
+-
+-	{ "Left Mixer", "Playback Switch", "Left DAC" },
++	{ "Left Mixer", NULL, "Left DAC" },
+ 	{ "Left Mixer", "Left Bypass Switch", "Left Line Mux" },
+ 	{ "Left Mixer", "Right Playback Switch", "Right DAC" },
+ 	{ "Left Mixer", "Right Bypass Switch", "Right Line Mux" },
+ 
+ 	{ "Right Mixer", "Left Playback Switch", "Left DAC" },
+ 	{ "Right Mixer", "Left Bypass Switch", "Left Line Mux" },
+-	{ "Right Mixer", "Playback Switch", "Right DAC" },
++	{ "Right Mixer", NULL, "Right DAC" },
+ 	{ "Right Mixer", "Right Bypass Switch", "Right Line Mux" },
+ 
+ 	{ "DAC DIG", NULL, "DAC STM" },
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index 634168d2bb6e54..c5efbceb06d1fc 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -994,10 +994,10 @@ static struct snd_soc_dai_driver fsl_sai_dai_template[] = {
+ 	{
+ 		.name = "sai-tx",
+ 		.playback = {
+-			.stream_name = "CPU-Playback",
++			.stream_name = "SAI-Playback",
+ 			.channels_min = 1,
+ 			.channels_max = 32,
+-				.rate_min = 8000,
++			.rate_min = 8000,
+ 			.rate_max = 2822400,
+ 			.rates = SNDRV_PCM_RATE_KNOT,
+ 			.formats = FSL_SAI_FORMATS,
+@@ -1007,7 +1007,7 @@ static struct snd_soc_dai_driver fsl_sai_dai_template[] = {
+ 	{
+ 		.name = "sai-rx",
+ 		.capture = {
+-			.stream_name = "CPU-Capture",
++			.stream_name = "SAI-Capture",
+ 			.channels_min = 1,
+ 			.channels_max = 32,
+ 			.rate_min = 8000,
+diff --git a/sound/soc/fsl/imx-audmix.c b/sound/soc/fsl/imx-audmix.c
+index 50ecc5f51100ee..dac5d4ddacd6ef 100644
+--- a/sound/soc/fsl/imx-audmix.c
++++ b/sound/soc/fsl/imx-audmix.c
+@@ -119,8 +119,8 @@ static const struct snd_soc_ops imx_audmix_be_ops = {
+ static const char *name[][3] = {
+ 	{"HiFi-AUDMIX-FE-0", "HiFi-AUDMIX-FE-1", "HiFi-AUDMIX-FE-2"},
+ 	{"sai-tx", "sai-tx", "sai-rx"},
+-	{"AUDMIX-Playback-0", "AUDMIX-Playback-1", "CPU-Capture"},
+-	{"CPU-Playback", "CPU-Playback", "AUDMIX-Capture-0"},
++	{"AUDMIX-Playback-0", "AUDMIX-Playback-1", "SAI-Capture"},
++	{"SAI-Playback", "SAI-Playback", "AUDMIX-Capture-0"},
+ };
+ 
+ static int imx_audmix_probe(struct platform_device *pdev)
+diff --git a/sound/usb/midi.c b/sound/usb/midi.c
+index 737dd00e97b142..779d97d31f170e 100644
+--- a/sound/usb/midi.c
++++ b/sound/usb/midi.c
+@@ -1145,7 +1145,7 @@ static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream)
+ {
+ 	struct usbmidi_out_port *port = substream->runtime->private_data;
+ 
+-	cancel_work_sync(&port->ep->work);
++	flush_work(&port->ep->work);
+ 	return substream_open(substream, 0, 0);
+ }
+ 
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index a97efb7b131ea2..09210fb4ac60c1 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1868,6 +1868,7 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
+ 	case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */
+ 		subs->stream_offset_adj = 2;
+ 		break;
++	case USB_ID(0x2b73, 0x000a): /* Pioneer DJM-900NXS2 */
+ 	case USB_ID(0x2b73, 0x0013): /* Pioneer DJM-450 */
+ 		pioneer_djm_set_format_quirk(subs, 0x0082);
+ 		break;
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index e7ec29dfdff22a..6691bd106e4b6e 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -639,47 +639,8 @@ static int add_dead_ends(struct objtool_file *file)
+ 	uint64_t offset;
+ 
+ 	/*
+-	 * Check for manually annotated dead ends.
+-	 */
+-	rsec = find_section_by_name(file->elf, ".rela.discard.unreachable");
+-	if (!rsec)
+-		goto reachable;
+-
+-	for_each_reloc(rsec, reloc) {
+-		if (reloc->sym->type == STT_SECTION) {
+-			offset = reloc_addend(reloc);
+-		} else if (reloc->sym->local_label) {
+-			offset = reloc->sym->offset;
+-		} else {
+-			WARN("unexpected relocation symbol type in %s", rsec->name);
+-			return -1;
+-		}
+-
+-		insn = find_insn(file, reloc->sym->sec, offset);
+-		if (insn)
+-			insn = prev_insn_same_sec(file, insn);
+-		else if (offset == reloc->sym->sec->sh.sh_size) {
+-			insn = find_last_insn(file, reloc->sym->sec);
+-			if (!insn) {
+-				WARN("can't find unreachable insn at %s+0x%" PRIx64,
+-				     reloc->sym->sec->name, offset);
+-				return -1;
+-			}
+-		} else {
+-			WARN("can't find unreachable insn at %s+0x%" PRIx64,
+-			     reloc->sym->sec->name, offset);
+-			return -1;
+-		}
+-
+-		insn->dead_end = true;
+-	}
+-
+-reachable:
+-	/*
+-	 * These manually annotated reachable checks are needed for GCC 4.4,
+-	 * where the Linux unreachable() macro isn't supported.  In that case
+-	 * GCC doesn't know the "ud2" is fatal, so it generates code as if it's
+-	 * not a dead end.
++	 * UD2 defaults to being a dead-end, allow them to be annotated for
++	 * non-fatal, eg WARN.
+ 	 */
+ 	rsec = find_section_by_name(file->elf, ".rela.discard.reachable");
+ 	if (!rsec)
+@@ -2628,13 +2589,14 @@ static void mark_rodata(struct objtool_file *file)
+ 	 *
+ 	 * - .rodata: can contain GCC switch tables
+ 	 * - .rodata.<func>: same, if -fdata-sections is being used
+-	 * - .rodata..c_jump_table: contains C annotated jump tables
++	 * - .data.rel.ro.c_jump_table: contains C annotated jump tables
+ 	 *
+ 	 * .rodata.str1.* sections are ignored; they don't contain jump tables.
+ 	 */
+ 	for_each_sec(file, sec) {
+-		if (!strncmp(sec->name, ".rodata", 7) &&
+-		    !strstr(sec->name, ".str1.")) {
++		if ((!strncmp(sec->name, ".rodata", 7) &&
++		     !strstr(sec->name, ".str1.")) ||
++		    !strncmp(sec->name, ".data.rel.ro", 12)) {
+ 			sec->rodata = true;
+ 			found = true;
+ 		}
+diff --git a/tools/objtool/include/objtool/special.h b/tools/objtool/include/objtool/special.h
+index 86d4af9c5aa9dc..89ee12b1a13849 100644
+--- a/tools/objtool/include/objtool/special.h
++++ b/tools/objtool/include/objtool/special.h
+@@ -10,7 +10,7 @@
+ #include <objtool/check.h>
+ #include <objtool/elf.h>
+ 
+-#define C_JUMP_TABLE_SECTION ".rodata..c_jump_table"
++#define C_JUMP_TABLE_SECTION ".data.rel.ro.c_jump_table"
+ 
+ struct special_alt {
+ 	struct list_head list;
+diff --git a/tools/testing/selftests/drivers/net/queues.py b/tools/testing/selftests/drivers/net/queues.py
+index 38303da957ee56..8a518905a9f9c5 100755
+--- a/tools/testing/selftests/drivers/net/queues.py
++++ b/tools/testing/selftests/drivers/net/queues.py
+@@ -45,10 +45,9 @@ def addremove_queues(cfg, nl) -> None:
+ 
+     netnl = EthtoolFamily()
+     channels = netnl.channels_get({'header': {'dev-index': cfg.ifindex}})
+-    if channels['combined-count'] == 0:
+-        rx_type = 'rx'
+-    else:
+-        rx_type = 'combined'
++    rx_type = 'rx'
++    if channels.get('combined-count', 0) > 0:
++            rx_type = 'combined'
+ 
+     expected = curr_queues - 1
+     cmd(f"ethtool -L {cfg.dev['ifname']} {rx_type} {expected}", timeout=10)
+diff --git a/tools/testing/selftests/landlock/common.h b/tools/testing/selftests/landlock/common.h
+index 61056fa074bb2f..40a2def50b837e 100644
+--- a/tools/testing/selftests/landlock/common.h
++++ b/tools/testing/selftests/landlock/common.h
+@@ -234,6 +234,7 @@ enforce_ruleset(struct __test_metadata *const _metadata, const int ruleset_fd)
+ struct protocol_variant {
+ 	int domain;
+ 	int type;
++	int protocol;
+ };
+ 
+ struct service_fixture {
+diff --git a/tools/testing/selftests/landlock/config b/tools/testing/selftests/landlock/config
+index 29af19c4e9f981..a8982da4acbdc3 100644
+--- a/tools/testing/selftests/landlock/config
++++ b/tools/testing/selftests/landlock/config
+@@ -3,6 +3,8 @@ CONFIG_CGROUP_SCHED=y
+ CONFIG_INET=y
+ CONFIG_IPV6=y
+ CONFIG_KEYS=y
++CONFIG_MPTCP=y
++CONFIG_MPTCP_IPV6=y
+ CONFIG_NET=y
+ CONFIG_NET_NS=y
+ CONFIG_OVERLAY_FS=y
+diff --git a/tools/testing/selftests/landlock/net_test.c b/tools/testing/selftests/landlock/net_test.c
+index 4e0aeb53b225a5..376079d70d3fc0 100644
+--- a/tools/testing/selftests/landlock/net_test.c
++++ b/tools/testing/selftests/landlock/net_test.c
+@@ -85,18 +85,18 @@ static void setup_loopback(struct __test_metadata *const _metadata)
+ 	clear_ambient_cap(_metadata, CAP_NET_ADMIN);
+ }
+ 
++static bool prot_is_tcp(const struct protocol_variant *const prot)
++{
++	return (prot->domain == AF_INET || prot->domain == AF_INET6) &&
++	       prot->type == SOCK_STREAM &&
++	       (prot->protocol == IPPROTO_TCP || prot->protocol == IPPROTO_IP);
++}
++
+ static bool is_restricted(const struct protocol_variant *const prot,
+ 			  const enum sandbox_type sandbox)
+ {
+-	switch (prot->domain) {
+-	case AF_INET:
+-	case AF_INET6:
+-		switch (prot->type) {
+-		case SOCK_STREAM:
+-			return sandbox == TCP_SANDBOX;
+-		}
+-		break;
+-	}
++	if (sandbox == TCP_SANDBOX)
++		return prot_is_tcp(prot);
+ 	return false;
+ }
+ 
+@@ -105,7 +105,7 @@ static int socket_variant(const struct service_fixture *const srv)
+ 	int ret;
+ 
+ 	ret = socket(srv->protocol.domain, srv->protocol.type | SOCK_CLOEXEC,
+-		     0);
++		     srv->protocol.protocol);
+ 	if (ret < 0)
+ 		return -errno;
+ 	return ret;
+@@ -290,22 +290,59 @@ FIXTURE_TEARDOWN(protocol)
+ }
+ 
+ /* clang-format off */
+-FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_tcp) {
++FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_tcp1) {
+ 	/* clang-format on */
+ 	.sandbox = NO_SANDBOX,
+ 	.prot = {
+ 		.domain = AF_INET,
+ 		.type = SOCK_STREAM,
++		/* IPPROTO_IP == 0 */
++		.protocol = IPPROTO_IP,
+ 	},
+ };
+ 
+ /* clang-format off */
+-FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_tcp) {
++FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_tcp2) {
++	/* clang-format on */
++	.sandbox = NO_SANDBOX,
++	.prot = {
++		.domain = AF_INET,
++		.type = SOCK_STREAM,
++		.protocol = IPPROTO_TCP,
++	},
++};
++
++/* clang-format off */
++FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_tcp1) {
+ 	/* clang-format on */
+ 	.sandbox = NO_SANDBOX,
+ 	.prot = {
+ 		.domain = AF_INET6,
+ 		.type = SOCK_STREAM,
++		/* IPPROTO_IP == 0 */
++		.protocol = IPPROTO_IP,
++	},
++};
++
++/* clang-format off */
++FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_tcp2) {
++	/* clang-format on */
++	.sandbox = NO_SANDBOX,
++	.prot = {
++		.domain = AF_INET6,
++		.type = SOCK_STREAM,
++		.protocol = IPPROTO_TCP,
++	},
++};
++
++/* clang-format off */
++FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_mptcp) {
++	/* clang-format on */
++	.sandbox = NO_SANDBOX,
++	.prot = {
++		.domain = AF_INET,
++		.type = SOCK_STREAM,
++		.protocol = IPPROTO_MPTCP,
+ 	},
+ };
+ 
+@@ -329,6 +366,17 @@ FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_udp) {
+ 	},
+ };
+ 
++/* clang-format off */
++FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_mptcp) {
++	/* clang-format on */
++	.sandbox = NO_SANDBOX,
++	.prot = {
++		.domain = AF_INET6,
++		.type = SOCK_STREAM,
++		.protocol = IPPROTO_MPTCP,
++	},
++};
++
+ /* clang-format off */
+ FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_unix_stream) {
+ 	/* clang-format on */
+@@ -350,22 +398,48 @@ FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_unix_datagram) {
+ };
+ 
+ /* clang-format off */
+-FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_tcp) {
++FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_tcp1) {
++	/* clang-format on */
++	.sandbox = TCP_SANDBOX,
++	.prot = {
++		.domain = AF_INET,
++		.type = SOCK_STREAM,
++		/* IPPROTO_IP == 0 */
++		.protocol = IPPROTO_IP,
++	},
++};
++
++/* clang-format off */
++FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_tcp2) {
+ 	/* clang-format on */
+ 	.sandbox = TCP_SANDBOX,
+ 	.prot = {
+ 		.domain = AF_INET,
+ 		.type = SOCK_STREAM,
++		.protocol = IPPROTO_TCP,
++	},
++};
++
++/* clang-format off */
++FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_tcp1) {
++	/* clang-format on */
++	.sandbox = TCP_SANDBOX,
++	.prot = {
++		.domain = AF_INET6,
++		.type = SOCK_STREAM,
++		/* IPPROTO_IP == 0 */
++		.protocol = IPPROTO_IP,
+ 	},
+ };
+ 
+ /* clang-format off */
+-FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_tcp) {
++FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_tcp2) {
+ 	/* clang-format on */
+ 	.sandbox = TCP_SANDBOX,
+ 	.prot = {
+ 		.domain = AF_INET6,
+ 		.type = SOCK_STREAM,
++		.protocol = IPPROTO_TCP,
+ 	},
+ };
+ 
+@@ -389,6 +463,17 @@ FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_udp) {
+ 	},
+ };
+ 
++/* clang-format off */
++FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_mptcp) {
++	/* clang-format on */
++	.sandbox = TCP_SANDBOX,
++	.prot = {
++		.domain = AF_INET,
++		.type = SOCK_STREAM,
++		.protocol = IPPROTO_MPTCP,
++	},
++};
++
+ /* clang-format off */
+ FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_unix_stream) {
+ 	/* clang-format on */
+@@ -399,6 +484,17 @@ FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_unix_stream) {
+ 	},
+ };
+ 
++/* clang-format off */
++FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_mptcp) {
++	/* clang-format on */
++	.sandbox = TCP_SANDBOX,
++	.prot = {
++		.domain = AF_INET6,
++		.type = SOCK_STREAM,
++		.protocol = IPPROTO_MPTCP,
++	},
++};
++
+ /* clang-format off */
+ FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_unix_datagram) {
+ 	/* clang-format on */
+diff --git a/tools/testing/selftests/rseq/rseq-riscv-bits.h b/tools/testing/selftests/rseq/rseq-riscv-bits.h
+index de31a0143139b7..f02f411d550d18 100644
+--- a/tools/testing/selftests/rseq/rseq-riscv-bits.h
++++ b/tools/testing/selftests/rseq/rseq-riscv-bits.h
+@@ -243,7 +243,7 @@ int RSEQ_TEMPLATE_IDENTIFIER(rseq_offset_deref_addv)(intptr_t *ptr, off_t off, i
+ #ifdef RSEQ_COMPARE_TWICE
+ 				  RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, "%l[error1]")
+ #endif
+-				  RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, 3)
++				  RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, inc, 3)
+ 				  RSEQ_INJECT_ASM(4)
+ 				  RSEQ_ASM_DEFINE_ABORT(4, abort)
+ 				  : /* gcc asm goto does not allow outputs */
+@@ -251,8 +251,8 @@ int RSEQ_TEMPLATE_IDENTIFIER(rseq_offset_deref_addv)(intptr_t *ptr, off_t off, i
+ 				    [current_cpu_id]		"m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ 				    [rseq_cs]			"m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ 				    [ptr]			"r" (ptr),
+-				    [off]			"er" (off),
+-				    [inc]			"er" (inc)
++				    [off]			"r" (off),
++				    [inc]			"r" (inc)
+ 				    RSEQ_INJECT_INPUT
+ 				  : "memory", RSEQ_ASM_TMP_REG_1
+ 				    RSEQ_INJECT_CLOBBER
+diff --git a/tools/testing/selftests/rseq/rseq-riscv.h b/tools/testing/selftests/rseq/rseq-riscv.h
+index 37e598d0a365e2..67d544aaa9a3b0 100644
+--- a/tools/testing/selftests/rseq/rseq-riscv.h
++++ b/tools/testing/selftests/rseq/rseq-riscv.h
+@@ -158,7 +158,7 @@ do {									\
+ 	"bnez	" RSEQ_ASM_TMP_REG_1 ", 222b\n"				\
+ 	"333:\n"
+ 
+-#define RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, post_commit_label)		\
++#define RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, inc, post_commit_label)	\
+ 	"mv	" RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(ptr) "]\n"	\
+ 	RSEQ_ASM_OP_R_ADD(off)						\
+ 	REG_L	  RSEQ_ASM_TMP_REG_1 ", 0(" RSEQ_ASM_TMP_REG_1 ")\n"	\


^ permalink raw reply related	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2025-03-07 18:21 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-02-27 13:20 [gentoo-commits] proj/linux-patches:6.13 commit in: / Mike Pagano
  -- strict thread matches above, loose matches on Subject: below --
2025-03-07 18:21 Mike Pagano
2025-02-23 19:55 Mike Pagano
2025-02-21 13:39 Mike Pagano
2025-02-17 15:42 Mike Pagano
2025-02-17 11:28 Mike Pagano
2025-02-17 11:15 Mike Pagano
2025-02-14 12:09 Mike Pagano
2025-02-08 11:25 Mike Pagano
2025-02-05 17:10 Mike Pagano
2025-02-01 23:05 Mike Pagano
2025-01-30 12:46 Mike Pagano

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox